1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
4 * Copyright 2015 Mozilla Foundation
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 #include "wasm/WasmIonCompile.h"
21 #include "mozilla/DebugOnly.h"
22 #include "mozilla/MathAlgorithms.h"
26 #include "jit/ABIArgGenerator.h"
27 #include "jit/CodeGenerator.h"
28 #include "jit/CompileInfo.h"
30 #include "jit/IonOptimizationLevels.h"
32 #include "jit/ShuffleAnalysis.h"
33 #include "js/ScalarType.h" // js::Scalar::Type
34 #include "wasm/WasmBaselineCompile.h"
35 #include "wasm/WasmBuiltinModule.h"
36 #include "wasm/WasmBuiltins.h"
37 #include "wasm/WasmCodegenTypes.h"
38 #include "wasm/WasmGC.h"
39 #include "wasm/WasmGcObject.h"
40 #include "wasm/WasmGenerator.h"
41 #include "wasm/WasmOpIter.h"
42 #include "wasm/WasmSignalHandlers.h"
43 #include "wasm/WasmStubs.h"
44 #include "wasm/WasmValidate.h"
47 using namespace js::jit
;
48 using namespace js::wasm
;
50 using mozilla::IsPowerOfTwo
;
52 using mozilla::Nothing
;
57 using BlockVector
= Vector
<MBasicBlock
*, 8, SystemAllocPolicy
>;
58 using DefVector
= Vector
<MDefinition
*, 8, SystemAllocPolicy
>;
60 // To compile try-catch blocks, we extend the IonCompilePolicy's ControlItem
61 // from being just an MBasicBlock* to a Control structure collecting additional
63 using ControlInstructionVector
=
64 Vector
<MControlInstruction
*, 8, SystemAllocPolicy
>;
68 // For a try-catch ControlItem, when its block's Labelkind is Try, this
69 // collects branches to later bind and create the try's landing pad.
70 ControlInstructionVector tryPadPatches
;
72 Control() : block(nullptr) {}
74 explicit Control(MBasicBlock
* block
) : block(block
) {}
77 void setBlock(MBasicBlock
* newBlock
) { block
= newBlock
; }
80 // [SMDOC] WebAssembly Exception Handling in Ion
81 // =======================================================
83 // ## Throwing instructions
85 // Wasm exceptions can be thrown by either a throw instruction (local throw),
88 // ## The "catching try control"
90 // We know we are in try-code if there is a surrounding ControlItem with
91 // LabelKind::Try. The innermost such control is called the
92 // "catching try control".
94 // ## Throws without a catching try control
96 // Such throws are implemented with an instance call that triggers the exception
97 // unwinding runtime. The exception unwinding runtime will not return to the
100 // ## "landing pad" and "pre-pad" blocks
102 // When an exception is thrown, the unwinder will search for the nearest
103 // enclosing try block and redirect control flow to it. The code that executes
104 // before any catch blocks is called the 'landing pad'. The 'landing pad' is
106 // 1. Consume the pending exception state from
107 // Instance::pendingException(Tag)
108 // 2. Branch to the correct catch block, or else rethrow
110 // There is one landing pad for each try block. The immediate predecessors of
111 // the landing pad are called 'pre-pad' blocks. There is one pre-pad block per
112 // throwing instruction.
114 // ## Creating pre-pad blocks
116 // There are two possible sorts of pre-pad blocks, depending on whether we
117 // are branching after a local throw instruction, or after a wasm call:
119 // - If we encounter a local throw, we create the exception and tag objects,
120 // store them to Instance::pendingException(Tag), and then jump to the
123 // - If we encounter a wasm call, we construct a MWasmCallCatchable which is a
124 // control instruction with either a branch to a fallthrough block or
125 // to a pre-pad block.
127 // The pre-pad block for a wasm call is empty except for a jump to the
128 // landing pad. It only exists to avoid critical edges which when split would
129 // violate the invariants of MWasmCallCatchable. The pending exception state
130 // is taken care of by the unwinder.
132 // Each pre-pad ends with a pending jump to the landing pad. The pending jumps
133 // to the landing pad are tracked in `tryPadPatches`. These are called
136 // ## Creating the landing pad
138 // When we exit try-code, we check if tryPadPatches has captured any control
139 // instructions (pad patches). If not, we don't compile any catches and we mark
140 // the rest as dead code.
142 // If there are pre-pad blocks, we join them to create a landing pad (or just
143 // "pad"). The pad's last two slots are the caught exception, and the
144 // exception's tag object.
146 // There are three different forms of try-catch/catch_all Wasm instructions,
147 // which result in different form of landing pad.
149 // 1. A catchless try, so a Wasm instruction of the form "try ... end".
150 // - In this case, we end the pad by rethrowing the caught exception.
152 // 2. A single catch_all after a try.
153 // - If the first catch after a try is a catch_all, then there won't be
154 // any more catches, but we need the exception and its tag object, in
155 // case the code in a catch_all contains "rethrow" instructions.
156 // - The Wasm instruction "rethrow", gets the exception and tag object to
157 // rethrow from the last two slots of the landing pad which, due to
158 // validation, is the l'th surrounding ControlItem.
159 // - We immediately GoTo to a new block after the pad and pop both the
160 // exception and tag object, as we don't need them anymore in this case.
162 // 3. Otherwise, there is one or more catch code blocks following.
163 // - In this case, we construct the landing pad by creating a sequence
164 // of compare and branch blocks that compare the pending exception tag
165 // object to the tag object of the current tagged catch block. This is
166 // done incrementally as we visit each tagged catch block in the bytecode
167 // stream. At every step, we update the ControlItem's block to point to
168 // the next block to be created in the landing pad sequence. The final
169 // block will either be a rethrow, if there is no catch_all, or else a
170 // jump to a catch_all block.
172 struct IonCompilePolicy
{
173 // We store SSA definitions in the value stack.
174 using Value
= MDefinition
*;
175 using ValueVector
= DefVector
;
177 // We store loop headers and then/else blocks in the control flow stack.
178 // In the case of try-catch control blocks, we collect additional information
179 // regarding the possible paths from throws and calls to a landing pad, as
180 // well as information on the landing pad's handlers (its catches).
181 using ControlItem
= Control
;
184 using IonOpIter
= OpIter
<IonCompilePolicy
>;
186 class FunctionCompiler
;
188 // CallCompileState describes a call that is being compiled.
190 class CallCompileState
{
191 // A generator object that is passed each argument as it is compiled.
192 WasmABIArgGenerator abi_
;
194 // Accumulates the register arguments while compiling arguments.
195 MWasmCallBase::Args regArgs_
;
197 // Reserved argument for passing Instance* to builtin instance method calls.
200 // The stack area in which the callee will write stack return values, or
201 // nullptr if no stack results.
202 MWasmStackResultArea
* stackResultArea_
= nullptr;
204 // Indicates that the call is a return/tail call.
205 bool returnCall
= false;
207 // Only FunctionCompiler should be directly manipulating CallCompileState.
208 friend class FunctionCompiler
;
211 // Encapsulates the compilation of a single function in an asm.js module. The
212 // function compiler handles the creation and final backend compilation of the
214 class FunctionCompiler
{
215 struct ControlFlowPatch
{
216 MControlInstruction
* ins
;
218 ControlFlowPatch(MControlInstruction
* ins
, uint32_t index
)
219 : ins(ins
), index(index
) {}
222 using ControlFlowPatchVector
= Vector
<ControlFlowPatch
, 0, SystemAllocPolicy
>;
223 using ControlFlowPatchVectorVector
=
224 Vector
<ControlFlowPatchVector
, 0, SystemAllocPolicy
>;
226 const ModuleEnvironment
& moduleEnv_
;
228 const FuncCompileInput
& func_
;
229 const ValTypeVector
& locals_
;
230 size_t lastReadCallSite_
;
232 TempAllocator
& alloc_
;
234 const CompileInfo
& info_
;
235 MIRGenerator
& mirGen_
;
237 MBasicBlock
* curBlock_
;
238 uint32_t maxStackArgBytes_
;
241 uint32_t blockDepth_
;
242 ControlFlowPatchVectorVector blockPatches_
;
244 // Instance pointer argument to the current function.
245 MWasmParameter
* instancePointer_
;
246 MWasmParameter
* stackResultPointer_
;
248 // Reference to masm.tryNotes_
249 wasm::TryNoteVector
& tryNotes_
;
252 FunctionCompiler(const ModuleEnvironment
& moduleEnv
, Decoder
& decoder
,
253 const FuncCompileInput
& func
, const ValTypeVector
& locals
,
254 MIRGenerator
& mirGen
, TryNoteVector
& tryNotes
)
255 : moduleEnv_(moduleEnv
),
256 iter_(moduleEnv
, decoder
),
259 lastReadCallSite_(0),
260 alloc_(mirGen
.alloc()),
261 graph_(mirGen
.graph()),
262 info_(mirGen
.outerInfo()),
265 maxStackArgBytes_(0),
268 instancePointer_(nullptr),
269 stackResultPointer_(nullptr),
270 tryNotes_(tryNotes
) {}
272 const ModuleEnvironment
& moduleEnv() const { return moduleEnv_
; }
274 IonOpIter
& iter() { return iter_
; }
275 TempAllocator
& alloc() const { return alloc_
; }
276 // FIXME(1401675): Replace with BlockType.
277 uint32_t funcIndex() const { return func_
.index
; }
278 const FuncType
& funcType() const {
279 return *moduleEnv_
.funcs
[func_
.index
].type
;
282 BytecodeOffset
bytecodeOffset() const { return iter_
.bytecodeOffset(); }
283 BytecodeOffset
bytecodeIfNotAsmJS() const {
284 return moduleEnv_
.isAsmJS() ? BytecodeOffset() : iter_
.bytecodeOffset();
287 [[nodiscard
]] bool init() {
288 // Prepare the entry block for MIR generation:
290 const ArgTypeVector
args(funcType());
292 if (!mirGen_
.ensureBallast()) {
295 if (!newBlock(/* prev */ nullptr, &curBlock_
)) {
299 for (WasmABIArgIter
i(args
); !i
.done(); i
++) {
300 MWasmParameter
* ins
= MWasmParameter::New(alloc(), *i
, i
.mirType());
302 if (args
.isSyntheticStackResultPointerArg(i
.index())) {
303 MOZ_ASSERT(stackResultPointer_
== nullptr);
304 stackResultPointer_
= ins
;
306 curBlock_
->initSlot(info().localSlot(args
.naturalIndex(i
.index())),
309 if (!mirGen_
.ensureBallast()) {
314 // Set up a parameter that receives the hidden instance pointer argument.
316 MWasmParameter::New(alloc(), ABIArg(InstanceReg
), MIRType::Pointer
);
317 curBlock_
->add(instancePointer_
);
318 if (!mirGen_
.ensureBallast()) {
322 for (size_t i
= args
.lengthWithoutStackResults(); i
< locals_
.length();
324 ValType slotValType
= locals_
[i
];
325 #ifndef ENABLE_WASM_SIMD
326 if (slotValType
== ValType::V128
) {
327 return iter().fail("Ion has no SIMD support yet");
330 MDefinition
* zero
= constantZeroOfValType(slotValType
);
331 curBlock_
->initSlot(info().localSlot(i
), zero
);
332 if (!mirGen_
.ensureBallast()) {
341 mirGen().initWasmMaxStackArgBytes(maxStackArgBytes_
);
343 MOZ_ASSERT(loopDepth_
== 0);
344 MOZ_ASSERT(blockDepth_
== 0);
346 for (ControlFlowPatchVector
& patches
: blockPatches_
) {
347 MOZ_ASSERT(patches
.empty());
350 MOZ_ASSERT(inDeadCode());
351 MOZ_ASSERT(done(), "all bytes must be consumed");
352 MOZ_ASSERT(func_
.callSiteLineNums
.length() == lastReadCallSite_
);
355 /************************* Read-only interface (after local scope setup) */
357 MIRGenerator
& mirGen() const { return mirGen_
; }
358 MIRGraph
& mirGraph() const { return graph_
; }
359 const CompileInfo
& info() const { return info_
; }
361 MDefinition
* getLocalDef(unsigned slot
) {
365 return curBlock_
->getSlot(info().localSlot(slot
));
368 const ValTypeVector
& locals() const { return locals_
; }
370 /*********************************************************** Constants ***/
372 MDefinition
* constantF32(float f
) {
376 auto* cst
= MWasmFloatConstant::NewFloat32(alloc(), f
);
380 // Hide all other overloads, to guarantee no implicit argument conversion.
381 template <typename T
>
382 MDefinition
* constantF32(T
) = delete;
384 MDefinition
* constantF64(double d
) {
388 auto* cst
= MWasmFloatConstant::NewDouble(alloc(), d
);
392 template <typename T
>
393 MDefinition
* constantF64(T
) = delete;
395 MDefinition
* constantI32(int32_t i
) {
399 MConstant
* constant
=
400 MConstant::New(alloc(), Int32Value(i
), MIRType::Int32
);
401 curBlock_
->add(constant
);
404 template <typename T
>
405 MDefinition
* constantI32(T
) = delete;
407 MDefinition
* constantI64(int64_t i
) {
411 MConstant
* constant
= MConstant::NewInt64(alloc(), i
);
412 curBlock_
->add(constant
);
415 template <typename T
>
416 MDefinition
* constantI64(T
) = delete;
418 // Produce an MConstant of the machine's target int type (Int32 or Int64).
419 MDefinition
* constantTargetWord(intptr_t n
) {
420 return targetIs64Bit() ? constantI64(int64_t(n
)) : constantI32(int32_t(n
));
422 template <typename T
>
423 MDefinition
* constantTargetWord(T
) = delete;
425 #ifdef ENABLE_WASM_SIMD
426 MDefinition
* constantV128(V128 v
) {
430 MWasmFloatConstant
* constant
= MWasmFloatConstant::NewSimd128(
431 alloc(), SimdConstant::CreateSimd128((int8_t*)v
.bytes
));
432 curBlock_
->add(constant
);
435 template <typename T
>
436 MDefinition
* constantV128(T
) = delete;
439 MDefinition
* constantNullRef() {
443 // MConstant has a lot of baggage so we don't use that here.
444 MWasmNullConstant
* constant
= MWasmNullConstant::New(alloc());
445 curBlock_
->add(constant
);
449 // Produce a zero constant for the specified ValType.
450 MDefinition
* constantZeroOfValType(ValType valType
) {
451 switch (valType
.kind()) {
453 return constantI32(0);
455 return constantI64(int64_t(0));
456 #ifdef ENABLE_WASM_SIMD
458 return constantV128(V128(0));
461 return constantF32(0.0f
);
463 return constantF64(0.0);
465 return constantNullRef();
471 /***************************** Code generation (after local scope setup) */
477 MWasmFence
* ins
= MWasmFence::New(alloc());
482 MDefinition
* unary(MDefinition
* op
) {
486 T
* ins
= T::New(alloc(), op
);
492 MDefinition
* unary(MDefinition
* op
, MIRType type
) {
496 T
* ins
= T::New(alloc(), op
, type
);
502 MDefinition
* binary(MDefinition
* lhs
, MDefinition
* rhs
) {
506 T
* ins
= T::New(alloc(), lhs
, rhs
);
512 MDefinition
* binary(MDefinition
* lhs
, MDefinition
* rhs
, MIRType type
) {
516 T
* ins
= T::New(alloc(), lhs
, rhs
, type
);
522 MDefinition
* binary(MDefinition
* lhs
, MDefinition
* rhs
, MIRType type
,
523 MWasmBinaryBitwise::SubOpcode subOpc
) {
527 T
* ins
= T::New(alloc(), lhs
, rhs
, type
, subOpc
);
532 MDefinition
* ursh(MDefinition
* lhs
, MDefinition
* rhs
, MIRType type
) {
536 auto* ins
= MUrsh::NewWasm(alloc(), lhs
, rhs
, type
);
541 MDefinition
* add(MDefinition
* lhs
, MDefinition
* rhs
, MIRType type
) {
545 auto* ins
= MAdd::NewWasm(alloc(), lhs
, rhs
, type
);
550 bool mustPreserveNaN(MIRType type
) {
551 return IsFloatingPointType(type
) && !moduleEnv().isAsmJS();
554 MDefinition
* sub(MDefinition
* lhs
, MDefinition
* rhs
, MIRType type
) {
559 // wasm can't fold x - 0.0 because of NaN with custom payloads.
560 MSub
* ins
= MSub::NewWasm(alloc(), lhs
, rhs
, type
, mustPreserveNaN(type
));
565 MDefinition
* nearbyInt(MDefinition
* input
, RoundingMode roundingMode
) {
570 auto* ins
= MNearbyInt::New(alloc(), input
, input
->type(), roundingMode
);
575 MDefinition
* minMax(MDefinition
* lhs
, MDefinition
* rhs
, MIRType type
,
581 if (mustPreserveNaN(type
)) {
582 // Convert signaling NaN to quiet NaNs.
583 MDefinition
* zero
= constantZeroOfValType(ValType::fromMIRType(type
));
584 lhs
= sub(lhs
, zero
, type
);
585 rhs
= sub(rhs
, zero
, type
);
588 MMinMax
* ins
= MMinMax::NewWasm(alloc(), lhs
, rhs
, type
, isMax
);
593 MDefinition
* mul(MDefinition
* lhs
, MDefinition
* rhs
, MIRType type
,
599 // wasm can't fold x * 1.0 because of NaN with custom payloads.
601 MMul::NewWasm(alloc(), lhs
, rhs
, type
, mode
, mustPreserveNaN(type
));
606 MDefinition
* div(MDefinition
* lhs
, MDefinition
* rhs
, MIRType type
,
611 bool trapOnError
= !moduleEnv().isAsmJS();
612 if (!unsignd
&& type
== MIRType::Int32
) {
613 // Enforce the signedness of the operation by coercing the operands
614 // to signed. Otherwise, operands that "look" unsigned to Ion but
615 // are not unsigned to Baldr (eg, unsigned right shifts) may lead to
616 // the operation being executed unsigned. Applies to mod() as well.
618 // Do this for Int32 only since Int64 is not subject to the same
621 // Note the offsets passed to MWasmBuiltinTruncateToInt32 are wrong here,
622 // but it doesn't matter: they're not codegen'd to calls since inputs
623 // already are int32.
624 auto* lhs2
= createTruncateToInt32(lhs
);
625 curBlock_
->add(lhs2
);
627 auto* rhs2
= createTruncateToInt32(rhs
);
628 curBlock_
->add(rhs2
);
632 // For x86 and arm we implement i64 div via c++ builtin.
633 // A call to c++ builtin requires instance pointer.
634 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
635 if (type
== MIRType::Int64
) {
637 MWasmBuiltinDivI64::New(alloc(), lhs
, rhs
, instancePointer_
, unsignd
,
638 trapOnError
, bytecodeOffset());
644 auto* ins
= MDiv::New(alloc(), lhs
, rhs
, type
, unsignd
, trapOnError
,
645 bytecodeOffset(), mustPreserveNaN(type
));
650 MInstruction
* createTruncateToInt32(MDefinition
* op
) {
651 if (op
->type() == MIRType::Double
|| op
->type() == MIRType::Float32
) {
652 return MWasmBuiltinTruncateToInt32::New(alloc(), op
, instancePointer_
);
655 return MTruncateToInt32::New(alloc(), op
);
658 MDefinition
* mod(MDefinition
* lhs
, MDefinition
* rhs
, MIRType type
,
663 bool trapOnError
= !moduleEnv().isAsmJS();
664 if (!unsignd
&& type
== MIRType::Int32
) {
665 // See block comment in div().
666 auto* lhs2
= createTruncateToInt32(lhs
);
667 curBlock_
->add(lhs2
);
669 auto* rhs2
= createTruncateToInt32(rhs
);
670 curBlock_
->add(rhs2
);
674 // For x86 and arm we implement i64 mod via c++ builtin.
675 // A call to c++ builtin requires instance pointer.
676 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
677 if (type
== MIRType::Int64
) {
679 MWasmBuiltinModI64::New(alloc(), lhs
, rhs
, instancePointer_
, unsignd
,
680 trapOnError
, bytecodeOffset());
686 // Should be handled separately because we call BuiltinThunk for this case
687 // and so, need to add the dependency from instancePointer.
688 if (type
== MIRType::Double
) {
689 auto* ins
= MWasmBuiltinModD::New(alloc(), lhs
, rhs
, instancePointer_
,
690 type
, bytecodeOffset());
695 auto* ins
= MMod::New(alloc(), lhs
, rhs
, type
, unsignd
, trapOnError
,
701 MDefinition
* bitnot(MDefinition
* op
) {
705 auto* ins
= MBitNot::New(alloc(), op
);
710 MDefinition
* select(MDefinition
* trueExpr
, MDefinition
* falseExpr
,
711 MDefinition
* condExpr
) {
715 auto* ins
= MWasmSelect::New(alloc(), trueExpr
, falseExpr
, condExpr
);
720 MDefinition
* extendI32(MDefinition
* op
, bool isUnsigned
) {
724 auto* ins
= MExtendInt32ToInt64::New(alloc(), op
, isUnsigned
);
729 MDefinition
* signExtend(MDefinition
* op
, uint32_t srcSize
,
730 uint32_t targetSize
) {
735 switch (targetSize
) {
737 MSignExtendInt32::Mode mode
;
740 mode
= MSignExtendInt32::Byte
;
743 mode
= MSignExtendInt32::Half
;
746 MOZ_CRASH("Bad sign extension");
748 ins
= MSignExtendInt32::New(alloc(), op
, mode
);
752 MSignExtendInt64::Mode mode
;
755 mode
= MSignExtendInt64::Byte
;
758 mode
= MSignExtendInt64::Half
;
761 mode
= MSignExtendInt64::Word
;
764 MOZ_CRASH("Bad sign extension");
766 ins
= MSignExtendInt64::New(alloc(), op
, mode
);
770 MOZ_CRASH("Bad sign extension");
777 MDefinition
* convertI64ToFloatingPoint(MDefinition
* op
, MIRType type
,
782 #if defined(JS_CODEGEN_ARM)
783 auto* ins
= MBuiltinInt64ToFloatingPoint::New(
784 alloc(), op
, instancePointer_
, type
, bytecodeOffset(), isUnsigned
);
786 auto* ins
= MInt64ToFloatingPoint::New(alloc(), op
, type
, bytecodeOffset(),
793 MDefinition
* rotate(MDefinition
* input
, MDefinition
* count
, MIRType type
,
798 auto* ins
= MRotate::New(alloc(), input
, count
, type
, left
);
804 MDefinition
* truncate(MDefinition
* op
, TruncFlags flags
) {
808 auto* ins
= T::New(alloc(), op
, flags
, bytecodeOffset());
813 #if defined(JS_CODEGEN_ARM)
814 MDefinition
* truncateWithInstance(MDefinition
* op
, TruncFlags flags
) {
818 auto* ins
= MWasmBuiltinTruncateToInt64::New(alloc(), op
, instancePointer_
,
819 flags
, bytecodeOffset());
825 MDefinition
* compare(MDefinition
* lhs
, MDefinition
* rhs
, JSOp op
,
826 MCompare::CompareType type
) {
830 auto* ins
= MCompare::NewWasm(alloc(), lhs
, rhs
, op
, type
);
835 void assign(unsigned slot
, MDefinition
* def
) {
839 curBlock_
->setSlot(info().localSlot(slot
), def
);
842 MDefinition
* compareIsNull(MDefinition
* ref
, JSOp compareOp
) {
843 MDefinition
* nullVal
= constantNullRef();
847 return compare(ref
, nullVal
, compareOp
, MCompare::Compare_WasmAnyRef
);
850 [[nodiscard
]] bool refAsNonNull(MDefinition
* ref
) {
855 auto* ins
= MWasmTrapIfNull::New(
856 alloc(), ref
, wasm::Trap::NullPointerDereference
, bytecodeOffset());
862 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
863 [[nodiscard
]] bool brOnNull(uint32_t relativeDepth
, const DefVector
& values
,
864 const ResultType
& type
, MDefinition
* condition
) {
869 MBasicBlock
* fallthroughBlock
= nullptr;
870 if (!newBlock(curBlock_
, &fallthroughBlock
)) {
874 MDefinition
* check
= compareIsNull(condition
, JSOp::Eq
);
878 MTest
* test
= MTest::New(alloc(), check
, nullptr, fallthroughBlock
);
880 !addControlFlowPatch(test
, relativeDepth
, MTest::TrueBranchIndex
)) {
884 if (!pushDefs(values
)) {
888 curBlock_
->end(test
);
889 curBlock_
= fallthroughBlock
;
893 [[nodiscard
]] bool brOnNonNull(uint32_t relativeDepth
,
894 const DefVector
& values
,
895 const ResultType
& type
,
896 MDefinition
* condition
) {
901 MBasicBlock
* fallthroughBlock
= nullptr;
902 if (!newBlock(curBlock_
, &fallthroughBlock
)) {
906 MDefinition
* check
= compareIsNull(condition
, JSOp::Ne
);
910 MTest
* test
= MTest::New(alloc(), check
, nullptr, fallthroughBlock
);
912 !addControlFlowPatch(test
, relativeDepth
, MTest::TrueBranchIndex
)) {
916 if (!pushDefs(values
)) {
920 curBlock_
->end(test
);
921 curBlock_
= fallthroughBlock
;
925 #endif // ENABLE_WASM_FUNCTION_REFERENCES
927 #ifdef ENABLE_WASM_GC
928 MDefinition
* refI31(MDefinition
* input
) {
929 auto* ins
= MWasmNewI31Ref::New(alloc(), input
);
934 MDefinition
* i31Get(MDefinition
* input
, FieldWideningOp wideningOp
) {
935 auto* ins
= MWasmI31RefGet::New(alloc(), input
, wideningOp
);
939 #endif // ENABLE_WASM_GC
941 #ifdef ENABLE_WASM_SIMD
942 // About Wasm SIMD as supported by Ion:
944 // The expectation is that Ion will only ever support SIMD on x86 and x64,
945 // since ARMv7 will cease to be a tier-1 platform soon, and MIPS64 will never
948 // The division of the operations into MIR nodes reflects that expectation,
949 // and is a good fit for x86/x64. Should the expectation change we'll
950 // possibly want to re-architect the SIMD support to be a little more general.
952 // Most SIMD operations map directly to a single MIR node that ultimately ends
953 // up being expanded in the macroassembler.
955 // Some SIMD operations that do have a complete macroassembler expansion are
956 // open-coded into multiple MIR nodes here; in some cases that's just
957 // convenience, in other cases it may also allow them to benefit from Ion
958 // optimizations. The reason for the expansions will be documented by a
961 // (v128,v128) -> v128 effect-free binary operations
962 MDefinition
* binarySimd128(MDefinition
* lhs
, MDefinition
* rhs
,
963 bool commutative
, SimdOp op
) {
968 MOZ_ASSERT(lhs
->type() == MIRType::Simd128
&&
969 rhs
->type() == MIRType::Simd128
);
971 auto* ins
= MWasmBinarySimd128::New(alloc(), lhs
, rhs
, commutative
, op
);
976 // (v128,i32) -> v128 effect-free shift operations
977 MDefinition
* shiftSimd128(MDefinition
* lhs
, MDefinition
* rhs
, SimdOp op
) {
982 MOZ_ASSERT(lhs
->type() == MIRType::Simd128
&&
983 rhs
->type() == MIRType::Int32
);
986 if (MacroAssembler::MustMaskShiftCountSimd128(op
, &maskBits
)) {
987 MDefinition
* mask
= constantI32(maskBits
);
988 auto* rhs2
= MBitAnd::New(alloc(), rhs
, mask
, MIRType::Int32
);
989 curBlock_
->add(rhs2
);
993 auto* ins
= MWasmShiftSimd128::New(alloc(), lhs
, rhs
, op
);
998 // (v128,scalar,imm) -> v128
999 MDefinition
* replaceLaneSimd128(MDefinition
* lhs
, MDefinition
* rhs
,
1000 uint32_t laneIndex
, SimdOp op
) {
1005 MOZ_ASSERT(lhs
->type() == MIRType::Simd128
);
1007 auto* ins
= MWasmReplaceLaneSimd128::New(alloc(), lhs
, rhs
, laneIndex
, op
);
1008 curBlock_
->add(ins
);
1012 // (scalar) -> v128 effect-free unary operations
1013 MDefinition
* scalarToSimd128(MDefinition
* src
, SimdOp op
) {
1018 auto* ins
= MWasmScalarToSimd128::New(alloc(), src
, op
);
1019 curBlock_
->add(ins
);
1023 // (v128) -> v128 effect-free unary operations
1024 MDefinition
* unarySimd128(MDefinition
* src
, SimdOp op
) {
1029 MOZ_ASSERT(src
->type() == MIRType::Simd128
);
1030 auto* ins
= MWasmUnarySimd128::New(alloc(), src
, op
);
1031 curBlock_
->add(ins
);
1035 // (v128, imm) -> scalar effect-free unary operations
1036 MDefinition
* reduceSimd128(MDefinition
* src
, SimdOp op
, ValType outType
,
1042 MOZ_ASSERT(src
->type() == MIRType::Simd128
);
1044 MWasmReduceSimd128::New(alloc(), src
, op
, outType
.toMIRType(), imm
);
1045 curBlock_
->add(ins
);
1049 // (v128, v128, v128) -> v128 effect-free operations
1050 MDefinition
* ternarySimd128(MDefinition
* v0
, MDefinition
* v1
, MDefinition
* v2
,
1056 MOZ_ASSERT(v0
->type() == MIRType::Simd128
&&
1057 v1
->type() == MIRType::Simd128
&&
1058 v2
->type() == MIRType::Simd128
);
1060 auto* ins
= MWasmTernarySimd128::New(alloc(), v0
, v1
, v2
, op
);
1061 curBlock_
->add(ins
);
1065 // (v128, v128, imm_v128) -> v128 effect-free operations
1066 MDefinition
* shuffleSimd128(MDefinition
* v1
, MDefinition
* v2
, V128 control
) {
1071 MOZ_ASSERT(v1
->type() == MIRType::Simd128
);
1072 MOZ_ASSERT(v2
->type() == MIRType::Simd128
);
1073 auto* ins
= BuildWasmShuffleSimd128(
1074 alloc(), reinterpret_cast<int8_t*>(control
.bytes
), v1
, v2
);
1075 curBlock_
->add(ins
);
1079 // Also see below for SIMD memory references
1081 #endif // ENABLE_WASM_SIMD
1083 /************************************************ Linear memory accesses */
1085 // For detailed information about memory accesses, see "Linear memory
1086 // addresses and bounds checking" in WasmMemory.cpp.
1089 // If the platform does not have a HeapReg, load the memory base from
1091 MDefinition
* maybeLoadMemoryBase(uint32_t memoryIndex
) {
1092 #ifdef WASM_HAS_HEAPREG
1093 if (memoryIndex
== 0) {
1097 return memoryBase(memoryIndex
);
1101 // A value holding the memory base, whether that's HeapReg or some other
1103 MDefinition
* memoryBase(uint32_t memoryIndex
) {
1104 AliasSet aliases
= !moduleEnv_
.memories
[memoryIndex
].canMovingGrow()
1106 : AliasSet::Load(AliasSet::WasmHeapMeta
);
1107 #ifdef WASM_HAS_HEAPREG
1108 if (memoryIndex
== 0) {
1109 MWasmHeapReg
* base
= MWasmHeapReg::New(alloc(), aliases
);
1110 curBlock_
->add(base
);
1116 ? Instance::offsetOfMemory0Base()
1117 : (Instance::offsetInData(
1118 moduleEnv_
.offsetOfMemoryInstanceData(memoryIndex
) +
1119 offsetof(MemoryInstanceData
, base
)));
1120 MWasmLoadInstance
* base
= MWasmLoadInstance::New(
1121 alloc(), instancePointer_
, offset
, MIRType::Pointer
, aliases
);
1122 curBlock_
->add(base
);
1127 // If the bounds checking strategy requires it, load the bounds check limit
1128 // from the instance.
1129 MWasmLoadInstance
* maybeLoadBoundsCheckLimit(uint32_t memoryIndex
,
1131 MOZ_ASSERT(type
== MIRType::Int32
|| type
== MIRType::Int64
);
1132 if (moduleEnv_
.hugeMemoryEnabled(memoryIndex
)) {
1137 ? Instance::offsetOfMemory0BoundsCheckLimit()
1138 : (Instance::offsetInData(
1139 moduleEnv_
.offsetOfMemoryInstanceData(memoryIndex
) +
1140 offsetof(MemoryInstanceData
, boundsCheckLimit
)));
1141 AliasSet aliases
= !moduleEnv_
.memories
[memoryIndex
].canMovingGrow()
1143 : AliasSet::Load(AliasSet::WasmHeapMeta
);
1144 auto* load
= MWasmLoadInstance::New(alloc(), instancePointer_
, offset
, type
,
1146 curBlock_
->add(load
);
1150 // Return true if the access requires an alignment check. If so, sets
1151 // *mustAdd to true if the offset must be added to the pointer before
1153 bool needAlignmentCheck(MemoryAccessDesc
* access
, MDefinition
* base
,
1155 MOZ_ASSERT(!*mustAdd
);
1157 // asm.js accesses are always aligned and need no checks.
1158 if (moduleEnv_
.isAsmJS() || !access
->isAtomic()) {
1162 // If the EA is known and aligned it will need no checks.
1163 if (base
->isConstant()) {
1164 // We only care about the low bits, so overflow is OK, as is chopping off
1165 // the high bits of an i64 pointer.
1167 if (isMem64(access
->memoryIndex())) {
1168 ptr
= uint32_t(base
->toConstant()->toInt64());
1170 ptr
= base
->toConstant()->toInt32();
1172 if (((ptr
+ access
->offset64()) & (access
->byteSize() - 1)) == 0) {
1177 // If the offset is aligned then the EA is just the pointer, for
1178 // the purposes of this check.
1179 *mustAdd
= (access
->offset64() & (access
->byteSize() - 1)) != 0;
1183 // Fold a constant base into the offset and make the base 0, provided the
1184 // offset stays below the guard limit. The reason for folding the base into
1185 // the offset rather than vice versa is that a small offset can be ignored
1186 // by both explicit bounds checking and bounds check elimination.
1187 void foldConstantPointer(MemoryAccessDesc
* access
, MDefinition
** base
) {
1188 uint32_t offsetGuardLimit
= GetMaxOffsetGuardLimit(
1189 moduleEnv_
.hugeMemoryEnabled(access
->memoryIndex()));
1191 if ((*base
)->isConstant()) {
1192 uint64_t basePtr
= 0;
1193 if (isMem64(access
->memoryIndex())) {
1194 basePtr
= uint64_t((*base
)->toConstant()->toInt64());
1196 basePtr
= uint64_t(int64_t((*base
)->toConstant()->toInt32()));
1199 uint64_t offset
= access
->offset64();
1201 if (offset
< offsetGuardLimit
&& basePtr
< offsetGuardLimit
- offset
) {
1202 offset
+= uint32_t(basePtr
);
1203 access
->setOffset32(uint32_t(offset
));
1204 *base
= isMem64(access
->memoryIndex()) ? constantI64(int64_t(0))
1210 // If the offset must be added because it is large or because the true EA must
1211 // be checked, compute the effective address, trapping on overflow.
1212 void maybeComputeEffectiveAddress(MemoryAccessDesc
* access
,
1213 MDefinition
** base
, bool mustAddOffset
) {
1214 uint32_t offsetGuardLimit
= GetMaxOffsetGuardLimit(
1215 moduleEnv_
.hugeMemoryEnabled(access
->memoryIndex()));
1217 if (access
->offset64() >= offsetGuardLimit
||
1218 access
->offset64() > UINT32_MAX
|| mustAddOffset
||
1219 !JitOptions
.wasmFoldOffsets
) {
1220 *base
= computeEffectiveAddress(*base
, access
);
1224 MWasmLoadInstance
* needBoundsCheck(uint32_t memoryIndex
) {
1226 // For 32-bit base pointers:
1228 // If the bounds check uses the full 64 bits of the bounds check limit, then
1229 // the base pointer must be zero-extended to 64 bits before checking and
1230 // wrapped back to 32-bits after Spectre masking. (And it's important that
1231 // the value we end up with has flowed through the Spectre mask.)
1233 // If the memory's max size is known to be smaller than 64K pages exactly,
1234 // we can use a 32-bit check and avoid extension and wrapping.
1235 static_assert(0x100000000 % PageSize
== 0);
1236 bool mem32LimitIs64Bits
=
1237 isMem32(memoryIndex
) &&
1238 !moduleEnv_
.memories
[memoryIndex
].boundsCheckLimitIs32Bits() &&
1239 MaxMemoryPages(moduleEnv_
.memories
[memoryIndex
].indexType()) >=
1240 Pages(0x100000000 / PageSize
);
1242 // On 32-bit platforms we have no more than 2GB memory and the limit for a
1243 // 32-bit base pointer is never a 64-bit value.
1244 bool mem32LimitIs64Bits
= false;
1246 return maybeLoadBoundsCheckLimit(memoryIndex
,
1247 mem32LimitIs64Bits
|| isMem64(memoryIndex
)
1252 void performBoundsCheck(uint32_t memoryIndex
, MDefinition
** base
,
1253 MWasmLoadInstance
* boundsCheckLimit
) {
1254 // At the outset, actualBase could be the result of pretty much any integer
1255 // operation, or it could be the load of an integer constant. If its type
1256 // is i32, we may assume the value has a canonical representation for the
1257 // platform, see doc block in MacroAssembler.h.
1258 MDefinition
* actualBase
= *base
;
1260 // Extend an i32 index value to perform a 64-bit bounds check if the memory
1261 // can be 4GB or larger.
1262 bool extendAndWrapIndex
=
1263 isMem32(memoryIndex
) && boundsCheckLimit
->type() == MIRType::Int64
;
1264 if (extendAndWrapIndex
) {
1265 auto* extended
= MWasmExtendU32Index::New(alloc(), actualBase
);
1266 curBlock_
->add(extended
);
1267 actualBase
= extended
;
1270 auto target
= memoryIndex
== 0 ? MWasmBoundsCheck::Memory0
1271 : MWasmBoundsCheck::Unknown
;
1272 auto* ins
= MWasmBoundsCheck::New(alloc(), actualBase
, boundsCheckLimit
,
1273 bytecodeOffset(), target
);
1274 curBlock_
->add(ins
);
1277 // If we're masking, then we update *base to create a dependency chain
1278 // through the masked index. But we will first need to wrap the index
1279 // value if it was extended above.
1280 if (JitOptions
.spectreIndexMasking
) {
1281 if (extendAndWrapIndex
) {
1282 auto* wrapped
= MWasmWrapU32Index::New(alloc(), actualBase
);
1283 curBlock_
->add(wrapped
);
1284 actualBase
= wrapped
;
1290 // Perform all necessary checking before a wasm heap access, based on the
1291 // attributes of the access and base pointer.
1293 // For 64-bit indices on platforms that are limited to indices that fit into
1294 // 32 bits (all 32-bit platforms and mips64), this returns a bounds-checked
1295 // `base` that has type Int32. Lowering code depends on this and will assert
1296 // that the base has this type. See the end of this function.
1298 void checkOffsetAndAlignmentAndBounds(MemoryAccessDesc
* access
,
1299 MDefinition
** base
) {
1300 MOZ_ASSERT(!inDeadCode());
1301 MOZ_ASSERT(!moduleEnv_
.isAsmJS());
1303 // Attempt to fold an offset into a constant base pointer so as to simplify
1304 // the addressing expression. This may update *base.
1305 foldConstantPointer(access
, base
);
1307 // Determine whether an alignment check is needed and whether the offset
1308 // must be checked too.
1309 bool mustAddOffsetForAlignmentCheck
= false;
1310 bool alignmentCheck
=
1311 needAlignmentCheck(access
, *base
, &mustAddOffsetForAlignmentCheck
);
1313 // If bounds checking or alignment checking requires it, compute the
1314 // effective address: add the offset into the pointer and trap on overflow.
1315 // This may update *base.
1316 maybeComputeEffectiveAddress(access
, base
, mustAddOffsetForAlignmentCheck
);
1318 // Emit the alignment check if necessary; it traps if it fails.
1319 if (alignmentCheck
) {
1320 curBlock_
->add(MWasmAlignmentCheck::New(
1321 alloc(), *base
, access
->byteSize(), bytecodeOffset()));
1324 // Emit the bounds check if necessary; it traps if it fails. This may
1326 MWasmLoadInstance
* boundsCheckLimit
=
1327 needBoundsCheck(access
->memoryIndex());
1328 if (boundsCheckLimit
) {
1329 performBoundsCheck(access
->memoryIndex(), base
, boundsCheckLimit
);
1333 if (isMem64(access
->memoryIndex())) {
1334 // We must have had an explicit bounds check (or one was elided if it was
1335 // proved redundant), and on 32-bit systems the index will for sure fit in
1336 // 32 bits: the max memory is 2GB. So chop the index down to 32-bit to
1337 // simplify the back-end.
1338 MOZ_ASSERT((*base
)->type() == MIRType::Int64
);
1339 MOZ_ASSERT(!moduleEnv_
.hugeMemoryEnabled(access
->memoryIndex()));
1340 auto* chopped
= MWasmWrapU32Index::New(alloc(), *base
);
1341 MOZ_ASSERT(chopped
->type() == MIRType::Int32
);
1342 curBlock_
->add(chopped
);
1348 bool isSmallerAccessForI64(ValType result
, const MemoryAccessDesc
* access
) {
1349 if (result
== ValType::I64
&& access
->byteSize() <= 4) {
1350 // These smaller accesses should all be zero-extending.
1351 MOZ_ASSERT(!isSignedIntType(access
->type()));
1358 bool isMem32(uint32_t memoryIndex
) {
1359 return moduleEnv_
.memories
[memoryIndex
].indexType() == IndexType::I32
;
1361 bool isMem64(uint32_t memoryIndex
) {
1362 return moduleEnv_
.memories
[memoryIndex
].indexType() == IndexType::I64
;
1364 bool hugeMemoryEnabled(uint32_t memoryIndex
) {
1365 return moduleEnv_
.hugeMemoryEnabled(memoryIndex
);
1368 // Add the offset into the pointer to yield the EA; trap on overflow.
1369 MDefinition
* computeEffectiveAddress(MDefinition
* base
,
1370 MemoryAccessDesc
* access
) {
1374 uint64_t offset
= access
->offset64();
1378 auto* ins
= MWasmAddOffset::New(alloc(), base
, offset
, bytecodeOffset());
1379 curBlock_
->add(ins
);
1380 access
->clearOffset();
1384 MDefinition
* load(MDefinition
* base
, MemoryAccessDesc
* access
,
1390 MDefinition
* memoryBase
= maybeLoadMemoryBase(access
->memoryIndex());
1391 MInstruction
* load
= nullptr;
1392 if (moduleEnv_
.isAsmJS()) {
1393 MOZ_ASSERT(access
->offset64() == 0);
1394 MWasmLoadInstance
* boundsCheckLimit
=
1395 maybeLoadBoundsCheckLimit(access
->memoryIndex(), MIRType::Int32
);
1396 load
= MAsmJSLoadHeap::New(alloc(), memoryBase
, base
, boundsCheckLimit
,
1399 checkOffsetAndAlignmentAndBounds(access
, &base
);
1401 MOZ_ASSERT(base
->type() == MIRType::Int32
);
1403 load
= MWasmLoad::New(alloc(), memoryBase
, base
, *access
,
1404 result
.toMIRType());
1409 curBlock_
->add(load
);
1413 void store(MDefinition
* base
, MemoryAccessDesc
* access
, MDefinition
* v
) {
1418 MDefinition
* memoryBase
= maybeLoadMemoryBase(access
->memoryIndex());
1419 MInstruction
* store
= nullptr;
1420 if (moduleEnv_
.isAsmJS()) {
1421 MOZ_ASSERT(access
->offset64() == 0);
1422 MWasmLoadInstance
* boundsCheckLimit
=
1423 maybeLoadBoundsCheckLimit(access
->memoryIndex(), MIRType::Int32
);
1424 store
= MAsmJSStoreHeap::New(alloc(), memoryBase
, base
, boundsCheckLimit
,
1427 checkOffsetAndAlignmentAndBounds(access
, &base
);
1429 MOZ_ASSERT(base
->type() == MIRType::Int32
);
1431 store
= MWasmStore::New(alloc(), memoryBase
, base
, *access
, v
);
1436 curBlock_
->add(store
);
1439 MDefinition
* atomicCompareExchangeHeap(MDefinition
* base
,
1440 MemoryAccessDesc
* access
,
1441 ValType result
, MDefinition
* oldv
,
1442 MDefinition
* newv
) {
1447 checkOffsetAndAlignmentAndBounds(access
, &base
);
1449 MOZ_ASSERT(base
->type() == MIRType::Int32
);
1452 if (isSmallerAccessForI64(result
, access
)) {
1454 MWrapInt64ToInt32::New(alloc(), oldv
, /*bottomHalf=*/true);
1455 curBlock_
->add(cvtOldv
);
1459 MWrapInt64ToInt32::New(alloc(), newv
, /*bottomHalf=*/true);
1460 curBlock_
->add(cvtNewv
);
1464 MDefinition
* memoryBase
= maybeLoadMemoryBase(access
->memoryIndex());
1465 MInstruction
* cas
= MWasmCompareExchangeHeap::New(
1466 alloc(), bytecodeOffset(), memoryBase
, base
, *access
, oldv
, newv
,
1471 curBlock_
->add(cas
);
1473 if (isSmallerAccessForI64(result
, access
)) {
1474 cas
= MExtendInt32ToInt64::New(alloc(), cas
, true);
1475 curBlock_
->add(cas
);
1481 MDefinition
* atomicExchangeHeap(MDefinition
* base
, MemoryAccessDesc
* access
,
1482 ValType result
, MDefinition
* value
) {
1487 checkOffsetAndAlignmentAndBounds(access
, &base
);
1489 MOZ_ASSERT(base
->type() == MIRType::Int32
);
1492 if (isSmallerAccessForI64(result
, access
)) {
1494 MWrapInt64ToInt32::New(alloc(), value
, /*bottomHalf=*/true);
1495 curBlock_
->add(cvtValue
);
1499 MDefinition
* memoryBase
= maybeLoadMemoryBase(access
->memoryIndex());
1500 MInstruction
* xchg
=
1501 MWasmAtomicExchangeHeap::New(alloc(), bytecodeOffset(), memoryBase
,
1502 base
, *access
, value
, instancePointer_
);
1506 curBlock_
->add(xchg
);
1508 if (isSmallerAccessForI64(result
, access
)) {
1509 xchg
= MExtendInt32ToInt64::New(alloc(), xchg
, true);
1510 curBlock_
->add(xchg
);
1516 MDefinition
* atomicBinopHeap(AtomicOp op
, MDefinition
* base
,
1517 MemoryAccessDesc
* access
, ValType result
,
1518 MDefinition
* value
) {
1523 checkOffsetAndAlignmentAndBounds(access
, &base
);
1525 MOZ_ASSERT(base
->type() == MIRType::Int32
);
1528 if (isSmallerAccessForI64(result
, access
)) {
1530 MWrapInt64ToInt32::New(alloc(), value
, /*bottomHalf=*/true);
1531 curBlock_
->add(cvtValue
);
1535 MDefinition
* memoryBase
= maybeLoadMemoryBase(access
->memoryIndex());
1536 MInstruction
* binop
=
1537 MWasmAtomicBinopHeap::New(alloc(), bytecodeOffset(), op
, memoryBase
,
1538 base
, *access
, value
, instancePointer_
);
1542 curBlock_
->add(binop
);
1544 if (isSmallerAccessForI64(result
, access
)) {
1545 binop
= MExtendInt32ToInt64::New(alloc(), binop
, true);
1546 curBlock_
->add(binop
);
1552 #ifdef ENABLE_WASM_SIMD
1553 MDefinition
* loadSplatSimd128(Scalar::Type viewType
,
1554 const LinearMemoryAddress
<MDefinition
*>& addr
,
1555 wasm::SimdOp splatOp
) {
1560 MemoryAccessDesc
access(addr
.memoryIndex
, viewType
, addr
.align
, addr
.offset
,
1561 bytecodeIfNotAsmJS(),
1562 hugeMemoryEnabled(addr
.memoryIndex
));
1564 // Generate better code (on x86)
1565 // If AVX2 is enabled, more broadcast operators are available.
1566 if (viewType
== Scalar::Float64
1567 # if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
1568 || (js::jit::CPUInfo::IsAVX2Present() &&
1569 (viewType
== Scalar::Uint8
|| viewType
== Scalar::Uint16
||
1570 viewType
== Scalar::Float32
))
1573 access
.setSplatSimd128Load();
1574 return load(addr
.base
, &access
, ValType::V128
);
1577 ValType resultType
= ValType::I32
;
1578 if (viewType
== Scalar::Float32
) {
1579 resultType
= ValType::F32
;
1580 splatOp
= wasm::SimdOp::F32x4Splat
;
1582 auto* scalar
= load(addr
.base
, &access
, resultType
);
1583 if (!inDeadCode() && !scalar
) {
1586 return scalarToSimd128(scalar
, splatOp
);
1589 MDefinition
* loadExtendSimd128(const LinearMemoryAddress
<MDefinition
*>& addr
,
1595 // Generate better code (on x86) by loading as a double with an
1596 // operation that sign extends directly.
1597 MemoryAccessDesc
access(addr
.memoryIndex
, Scalar::Float64
, addr
.align
,
1598 addr
.offset
, bytecodeIfNotAsmJS(),
1599 hugeMemoryEnabled(addr
.memoryIndex
));
1600 access
.setWidenSimd128Load(op
);
1601 return load(addr
.base
, &access
, ValType::V128
);
1604 MDefinition
* loadZeroSimd128(Scalar::Type viewType
, size_t numBytes
,
1605 const LinearMemoryAddress
<MDefinition
*>& addr
) {
1610 MemoryAccessDesc
access(addr
.memoryIndex
, viewType
, addr
.align
, addr
.offset
,
1611 bytecodeIfNotAsmJS(),
1612 hugeMemoryEnabled(addr
.memoryIndex
));
1613 access
.setZeroExtendSimd128Load();
1614 return load(addr
.base
, &access
, ValType::V128
);
1617 MDefinition
* loadLaneSimd128(uint32_t laneSize
,
1618 const LinearMemoryAddress
<MDefinition
*>& addr
,
1619 uint32_t laneIndex
, MDefinition
* src
) {
1624 MemoryAccessDesc
access(addr
.memoryIndex
, Scalar::Simd128
, addr
.align
,
1625 addr
.offset
, bytecodeIfNotAsmJS(),
1626 hugeMemoryEnabled(addr
.memoryIndex
));
1627 MDefinition
* memoryBase
= maybeLoadMemoryBase(access
.memoryIndex());
1628 MDefinition
* base
= addr
.base
;
1629 MOZ_ASSERT(!moduleEnv_
.isAsmJS());
1630 checkOffsetAndAlignmentAndBounds(&access
, &base
);
1632 MOZ_ASSERT(base
->type() == MIRType::Int32
);
1634 MInstruction
* load
= MWasmLoadLaneSimd128::New(
1635 alloc(), memoryBase
, base
, access
, laneSize
, laneIndex
, src
);
1639 curBlock_
->add(load
);
1643 void storeLaneSimd128(uint32_t laneSize
,
1644 const LinearMemoryAddress
<MDefinition
*>& addr
,
1645 uint32_t laneIndex
, MDefinition
* src
) {
1649 MemoryAccessDesc
access(addr
.memoryIndex
, Scalar::Simd128
, addr
.align
,
1650 addr
.offset
, bytecodeIfNotAsmJS(),
1651 hugeMemoryEnabled(addr
.memoryIndex
));
1652 MDefinition
* memoryBase
= maybeLoadMemoryBase(access
.memoryIndex());
1653 MDefinition
* base
= addr
.base
;
1654 MOZ_ASSERT(!moduleEnv_
.isAsmJS());
1655 checkOffsetAndAlignmentAndBounds(&access
, &base
);
1657 MOZ_ASSERT(base
->type() == MIRType::Int32
);
1659 MInstruction
* store
= MWasmStoreLaneSimd128::New(
1660 alloc(), memoryBase
, base
, access
, laneSize
, laneIndex
, src
);
1664 curBlock_
->add(store
);
1666 #endif // ENABLE_WASM_SIMD
1668 /************************************************ Global variable accesses */
1670 MDefinition
* loadGlobalVar(unsigned instanceDataOffset
, bool isConst
,
1671 bool isIndirect
, MIRType type
) {
1678 // Pull a pointer to the value out of Instance::globalArea, then
1679 // load from that pointer. Note that the pointer is immutable
1680 // even though the value it points at may change, hence the use of
1681 // |true| for the first node's |isConst| value, irrespective of
1682 // the |isConst| formal parameter to this method. The latter
1683 // applies to the denoted value as a whole.
1684 auto* cellPtr
= MWasmLoadInstanceDataField::New(
1685 alloc(), MIRType::Pointer
, instanceDataOffset
,
1686 /*isConst=*/true, instancePointer_
);
1687 curBlock_
->add(cellPtr
);
1688 load
= MWasmLoadGlobalCell::New(alloc(), type
, cellPtr
);
1690 // Pull the value directly out of Instance::globalArea.
1691 load
= MWasmLoadInstanceDataField::New(alloc(), type
, instanceDataOffset
,
1692 isConst
, instancePointer_
);
1694 curBlock_
->add(load
);
1698 [[nodiscard
]] bool storeGlobalVar(uint32_t lineOrBytecode
,
1699 uint32_t instanceDataOffset
,
1700 bool isIndirect
, MDefinition
* v
) {
1706 // Pull a pointer to the value out of Instance::globalArea, then
1707 // store through that pointer.
1708 auto* valueAddr
= MWasmLoadInstanceDataField::New(
1709 alloc(), MIRType::Pointer
, instanceDataOffset
,
1710 /*isConst=*/true, instancePointer_
);
1711 curBlock_
->add(valueAddr
);
1713 // Handle a store to a ref-typed field specially
1714 if (v
->type() == MIRType::WasmAnyRef
) {
1715 // Load the previous value for the post-write barrier
1717 MWasmLoadGlobalCell::New(alloc(), MIRType::WasmAnyRef
, valueAddr
);
1718 curBlock_
->add(prevValue
);
1720 // Store the new value
1722 MWasmStoreRef::New(alloc(), instancePointer_
, valueAddr
,
1723 /*valueOffset=*/0, v
, AliasSet::WasmGlobalCell
,
1724 WasmPreBarrierKind::Normal
);
1725 curBlock_
->add(store
);
1727 // Call the post-write barrier
1728 return postBarrierPrecise(lineOrBytecode
, valueAddr
, prevValue
);
1731 auto* store
= MWasmStoreGlobalCell::New(alloc(), v
, valueAddr
);
1732 curBlock_
->add(store
);
1735 // Or else store the value directly in Instance::globalArea.
1737 // Handle a store to a ref-typed field specially
1738 if (v
->type() == MIRType::WasmAnyRef
) {
1739 // Compute the address of the ref-typed global
1740 auto* valueAddr
= MWasmDerivedPointer::New(
1741 alloc(), instancePointer_
,
1742 wasm::Instance::offsetInData(instanceDataOffset
));
1743 curBlock_
->add(valueAddr
);
1745 // Load the previous value for the post-write barrier
1747 MWasmLoadGlobalCell::New(alloc(), MIRType::WasmAnyRef
, valueAddr
);
1748 curBlock_
->add(prevValue
);
1750 // Store the new value
1752 MWasmStoreRef::New(alloc(), instancePointer_
, valueAddr
,
1753 /*valueOffset=*/0, v
, AliasSet::WasmInstanceData
,
1754 WasmPreBarrierKind::Normal
);
1755 curBlock_
->add(store
);
1757 // Call the post-write barrier
1758 return postBarrierPrecise(lineOrBytecode
, valueAddr
, prevValue
);
1761 auto* store
= MWasmStoreInstanceDataField::New(alloc(), instanceDataOffset
,
1762 v
, instancePointer_
);
1763 curBlock_
->add(store
);
1767 MDefinition
* loadTableField(uint32_t tableIndex
, unsigned fieldOffset
,
1769 uint32_t instanceDataOffset
= wasm::Instance::offsetInData(
1770 moduleEnv_
.offsetOfTableInstanceData(tableIndex
) + fieldOffset
);
1772 MWasmLoadInstance::New(alloc(), instancePointer_
, instanceDataOffset
,
1773 type
, AliasSet::Load(AliasSet::WasmTableMeta
));
1774 curBlock_
->add(load
);
1778 MDefinition
* loadTableLength(uint32_t tableIndex
) {
1779 return loadTableField(tableIndex
, offsetof(TableInstanceData
, length
),
1783 MDefinition
* loadTableElements(uint32_t tableIndex
) {
1784 return loadTableField(tableIndex
, offsetof(TableInstanceData
, elements
),
1788 MDefinition
* tableGetAnyRef(uint32_t tableIndex
, MDefinition
* index
) {
1789 // Load the table length and perform a bounds check with spectre index
1791 auto* length
= loadTableLength(tableIndex
);
1792 auto* check
= MWasmBoundsCheck::New(
1793 alloc(), index
, length
, bytecodeOffset(), MWasmBoundsCheck::Unknown
);
1794 curBlock_
->add(check
);
1795 if (JitOptions
.spectreIndexMasking
) {
1799 // Load the table elements and load the element
1800 auto* elements
= loadTableElements(tableIndex
);
1801 auto* element
= MWasmLoadTableElement::New(alloc(), elements
, index
);
1802 curBlock_
->add(element
);
1806 [[nodiscard
]] bool tableSetAnyRef(uint32_t tableIndex
, MDefinition
* index
,
1808 uint32_t lineOrBytecode
) {
1809 // Load the table length and perform a bounds check with spectre index
1811 auto* length
= loadTableLength(tableIndex
);
1812 auto* check
= MWasmBoundsCheck::New(
1813 alloc(), index
, length
, bytecodeOffset(), MWasmBoundsCheck::Unknown
);
1814 curBlock_
->add(check
);
1815 if (JitOptions
.spectreIndexMasking
) {
1819 // Load the table elements
1820 auto* elements
= loadTableElements(tableIndex
);
1822 // Load the previous value
1823 auto* prevValue
= MWasmLoadTableElement::New(alloc(), elements
, index
);
1824 curBlock_
->add(prevValue
);
1826 // Compute the value's location for the post barrier
1828 MWasmDerivedIndexPointer::New(alloc(), elements
, index
, ScalePointer
);
1829 curBlock_
->add(loc
);
1831 // Store the new value
1832 auto* store
= MWasmStoreRef::New(
1833 alloc(), instancePointer_
, loc
, /*valueOffset=*/0, value
,
1834 AliasSet::WasmTableElement
, WasmPreBarrierKind::Normal
);
1835 curBlock_
->add(store
);
1837 // Perform the post barrier
1838 return postBarrierPrecise(lineOrBytecode
, loc
, prevValue
);
1841 void addInterruptCheck() {
1846 MWasmInterruptCheck::New(alloc(), instancePointer_
, bytecodeOffset()));
1849 // Perform a post-write barrier to update the generational store buffer. This
1850 // version will remove a previous store buffer entry if it is no longer
1852 [[nodiscard
]] bool postBarrierPrecise(uint32_t lineOrBytecode
,
1853 MDefinition
* valueAddr
,
1854 MDefinition
* value
) {
1855 return emitInstanceCall2(lineOrBytecode
, SASigPostBarrierPrecise
, valueAddr
,
1859 // Perform a post-write barrier to update the generational store buffer. This
1860 // version will remove a previous store buffer entry if it is no longer
1862 [[nodiscard
]] bool postBarrierPreciseWithOffset(uint32_t lineOrBytecode
,
1863 MDefinition
* valueBase
,
1864 uint32_t valueOffset
,
1865 MDefinition
* value
) {
1866 MDefinition
* valueOffsetDef
= constantI32(int32_t(valueOffset
));
1867 if (!valueOffsetDef
) {
1870 return emitInstanceCall3(lineOrBytecode
, SASigPostBarrierPreciseWithOffset
,
1871 valueBase
, valueOffsetDef
, value
);
1874 // Perform a post-write barrier to update the generational store buffer. This
1875 // version is the most efficient and only requires the address to store the
1876 // value and the new value. It does not remove a previous store buffer entry
1877 // if it is no longer needed, you must use a precise post-write barrier for
1879 [[nodiscard
]] bool postBarrier(uint32_t lineOrBytecode
, MDefinition
* object
,
1880 MDefinition
* valueBase
, uint32_t valueOffset
,
1881 MDefinition
* newValue
) {
1882 auto* barrier
= MWasmPostWriteBarrier::New(
1883 alloc(), instancePointer_
, object
, valueBase
, valueOffset
, newValue
);
1887 curBlock_
->add(barrier
);
1891 /***************************************************************** Calls */
1893 // The IonMonkey backend maintains a single stack offset (from the stack
1894 // pointer to the base of the frame) by adding the total amount of spill
1895 // space required plus the maximum stack required for argument passing.
1896 // Since we do not use IonMonkey's MPrepareCall/MPassArg/MCall, we must
1897 // manually accumulate, for the entire function, the maximum required stack
1898 // space for argument passing. (This is passed to the CodeGenerator via
1899 // MIRGenerator::maxWasmStackArgBytes.) This is just be the maximum of the
1900 // stack space required for each individual call (as determined by the call
1903 // Operations that modify a CallCompileState.
1905 [[nodiscard
]] bool passInstance(MIRType instanceType
,
1906 CallCompileState
* args
) {
1911 // Should only pass an instance once. And it must be a non-GC pointer.
1912 MOZ_ASSERT(args
->instanceArg_
== ABIArg());
1913 MOZ_ASSERT(instanceType
== MIRType::Pointer
);
1914 args
->instanceArg_
= args
->abi_
.next(MIRType::Pointer
);
1918 // Do not call this directly. Call one of the passArg() variants instead.
1919 [[nodiscard
]] bool passArgWorker(MDefinition
* argDef
, MIRType type
,
1920 CallCompileState
* call
) {
1921 ABIArg arg
= call
->abi_
.next(type
);
1922 switch (arg
.kind()) {
1923 #ifdef JS_CODEGEN_REGISTER_PAIR
1924 case ABIArg::GPR_PAIR
: {
1926 MWrapInt64ToInt32::New(alloc(), argDef
, /* bottomHalf = */ true);
1927 curBlock_
->add(mirLow
);
1929 MWrapInt64ToInt32::New(alloc(), argDef
, /* bottomHalf = */ false);
1930 curBlock_
->add(mirHigh
);
1931 return call
->regArgs_
.append(
1932 MWasmCallBase::Arg(AnyRegister(arg
.gpr64().low
), mirLow
)) &&
1933 call
->regArgs_
.append(
1934 MWasmCallBase::Arg(AnyRegister(arg
.gpr64().high
), mirHigh
));
1939 return call
->regArgs_
.append(MWasmCallBase::Arg(arg
.reg(), argDef
));
1940 case ABIArg::Stack
: {
1942 MWasmStackArg::New(alloc(), arg
.offsetFromArgBase(), argDef
);
1943 curBlock_
->add(mir
);
1946 case ABIArg::Uninitialized
:
1947 MOZ_ASSERT_UNREACHABLE("Uninitialized ABIArg kind");
1949 MOZ_CRASH("Unknown ABIArg kind.");
1952 template <typename SpanT
>
1953 [[nodiscard
]] bool passArgs(const DefVector
& argDefs
, SpanT types
,
1954 CallCompileState
* call
) {
1955 MOZ_ASSERT(argDefs
.length() == types
.size());
1956 for (uint32_t i
= 0; i
< argDefs
.length(); i
++) {
1957 MDefinition
* def
= argDefs
[i
];
1958 ValType type
= types
[i
];
1959 if (!passArg(def
, type
, call
)) {
1966 [[nodiscard
]] bool passArg(MDefinition
* argDef
, MIRType type
,
1967 CallCompileState
* call
) {
1971 return passArgWorker(argDef
, type
, call
);
1974 [[nodiscard
]] bool passArg(MDefinition
* argDef
, ValType type
,
1975 CallCompileState
* call
) {
1979 return passArgWorker(argDef
, type
.toMIRType(), call
);
1982 void markReturnCall(CallCompileState
* call
) { call
->returnCall
= true; }
1984 // If the call returns results on the stack, prepare a stack area to receive
1985 // them, and pass the address of the stack area to the callee as an additional
1987 [[nodiscard
]] bool passStackResultAreaCallArg(const ResultType
& resultType
,
1988 CallCompileState
* call
) {
1992 ABIResultIter
iter(resultType
);
1993 while (!iter
.done() && iter
.cur().inRegister()) {
1997 // No stack results.
2001 auto* stackResultArea
= MWasmStackResultArea::New(alloc());
2002 if (!stackResultArea
) {
2005 if (!stackResultArea
->init(alloc(), iter
.remaining())) {
2008 for (uint32_t base
= iter
.index(); !iter
.done(); iter
.next()) {
2009 MWasmStackResultArea::StackResult
loc(iter
.cur().stackOffset(),
2010 iter
.cur().type().toMIRType());
2011 stackResultArea
->initResult(iter
.index() - base
, loc
);
2013 curBlock_
->add(stackResultArea
);
2014 MDefinition
* def
= call
->returnCall
? (MDefinition
*)stackResultPointer_
2015 : (MDefinition
*)stackResultArea
;
2016 if (!passArg(def
, MIRType::Pointer
, call
)) {
2019 call
->stackResultArea_
= stackResultArea
;
2023 [[nodiscard
]] bool finishCall(CallCompileState
* call
) {
2028 if (!call
->regArgs_
.append(
2029 MWasmCallBase::Arg(AnyRegister(InstanceReg
), instancePointer_
))) {
2033 uint32_t stackBytes
= call
->abi_
.stackBytesConsumedSoFar();
2035 maxStackArgBytes_
= std::max(maxStackArgBytes_
, stackBytes
);
2039 // Wrappers for creating various kinds of calls.
2041 [[nodiscard
]] bool collectUnaryCallResult(MIRType type
,
2042 MDefinition
** result
) {
2045 case MIRType::Int32
:
2046 def
= MWasmRegisterResult::New(alloc(), MIRType::Int32
, ReturnReg
);
2048 case MIRType::Int64
:
2049 def
= MWasmRegister64Result::New(alloc(), ReturnReg64
);
2051 case MIRType::Float32
:
2052 def
= MWasmFloatRegisterResult::New(alloc(), type
, ReturnFloat32Reg
);
2054 case MIRType::Double
:
2055 def
= MWasmFloatRegisterResult::New(alloc(), type
, ReturnDoubleReg
);
2057 #ifdef ENABLE_WASM_SIMD
2058 case MIRType::Simd128
:
2059 def
= MWasmFloatRegisterResult::New(alloc(), type
, ReturnSimd128Reg
);
2062 case MIRType::WasmAnyRef
:
2063 def
= MWasmRegisterResult::New(alloc(), MIRType::WasmAnyRef
, ReturnReg
);
2066 MOZ_CRASH("unexpected MIRType result for builtin call");
2073 curBlock_
->add(def
);
2079 [[nodiscard
]] bool collectCallResults(const ResultType
& type
,
2080 MWasmStackResultArea
* stackResultArea
,
2081 DefVector
* results
) {
2082 if (!results
->reserve(type
.length())) {
2086 // The result iterator goes in the order in which results would be popped
2087 // off; we want the order in which they would be pushed.
2088 ABIResultIter
iter(type
);
2089 uint32_t stackResultCount
= 0;
2090 while (!iter
.done()) {
2091 if (iter
.cur().onStack()) {
2097 for (iter
.switchToPrev(); !iter
.done(); iter
.prev()) {
2098 if (!mirGen().ensureBallast()) {
2101 const ABIResult
& result
= iter
.cur();
2103 if (result
.inRegister()) {
2104 switch (result
.type().kind()) {
2105 case wasm::ValType::I32
:
2107 MWasmRegisterResult::New(alloc(), MIRType::Int32
, result
.gpr());
2109 case wasm::ValType::I64
:
2110 def
= MWasmRegister64Result::New(alloc(), result
.gpr64());
2112 case wasm::ValType::F32
:
2113 def
= MWasmFloatRegisterResult::New(alloc(), MIRType::Float32
,
2116 case wasm::ValType::F64
:
2117 def
= MWasmFloatRegisterResult::New(alloc(), MIRType::Double
,
2120 case wasm::ValType::Ref
:
2121 def
= MWasmRegisterResult::New(alloc(), MIRType::WasmAnyRef
,
2124 case wasm::ValType::V128
:
2125 #ifdef ENABLE_WASM_SIMD
2126 def
= MWasmFloatRegisterResult::New(alloc(), MIRType::Simd128
,
2129 return this->iter().fail("Ion has no SIMD support yet");
2133 MOZ_ASSERT(stackResultArea
);
2134 MOZ_ASSERT(stackResultCount
);
2135 uint32_t idx
= --stackResultCount
;
2136 def
= MWasmStackResult::New(alloc(), stackResultArea
, idx
);
2142 curBlock_
->add(def
);
2143 results
->infallibleAppend(def
);
2146 MOZ_ASSERT(results
->length() == type
.length());
2151 [[nodiscard
]] bool catchableCall(const CallSiteDesc
& desc
,
2152 const CalleeDesc
& callee
,
2153 const MWasmCallBase::Args
& args
,
2154 const ArgTypeVector
& argTypes
,
2155 MDefinition
* indexOrRef
= nullptr) {
2156 MWasmCallTryDesc tryDesc
;
2157 if (!beginTryCall(&tryDesc
)) {
2162 if (tryDesc
.inTry
) {
2163 ins
= MWasmCallCatchable::New(alloc(), desc
, callee
, args
,
2164 StackArgAreaSizeUnaligned(argTypes
),
2165 tryDesc
, indexOrRef
);
2167 ins
= MWasmCallUncatchable::New(alloc(), desc
, callee
, args
,
2168 StackArgAreaSizeUnaligned(argTypes
),
2174 curBlock_
->add(ins
);
2176 return finishTryCall(&tryDesc
);
2179 [[nodiscard
]] bool callDirect(const FuncType
& funcType
, uint32_t funcIndex
,
2180 uint32_t lineOrBytecode
,
2181 const CallCompileState
& call
,
2182 DefVector
* results
) {
2183 MOZ_ASSERT(!inDeadCode());
2185 CallSiteDesc
desc(lineOrBytecode
, CallSiteDesc::Func
);
2186 ResultType resultType
= ResultType::Vector(funcType
.results());
2187 auto callee
= CalleeDesc::function(funcIndex
);
2188 ArgTypeVector
args(funcType
);
2190 if (!catchableCall(desc
, callee
, call
.regArgs_
, args
)) {
2193 return collectCallResults(resultType
, call
.stackResultArea_
, results
);
2196 [[nodiscard
]] bool returnCallDirect(const FuncType
& funcType
,
2198 uint32_t lineOrBytecode
,
2199 const CallCompileState
& call
,
2200 DefVector
* results
) {
2201 MOZ_ASSERT(!inDeadCode());
2203 CallSiteDesc
desc(lineOrBytecode
, CallSiteDesc::ReturnFunc
);
2204 auto callee
= CalleeDesc::function(funcIndex
);
2205 ArgTypeVector
args(funcType
);
2207 auto ins
= MWasmReturnCall::New(alloc(), desc
, callee
, call
.regArgs_
,
2208 StackArgAreaSizeUnaligned(args
), nullptr);
2212 curBlock_
->end(ins
);
2213 curBlock_
= nullptr;
2217 [[nodiscard
]] bool returnCallImport(unsigned globalDataOffset
,
2218 uint32_t lineOrBytecode
,
2219 const CallCompileState
& call
,
2220 const FuncType
& funcType
,
2221 DefVector
* results
) {
2222 MOZ_ASSERT(!inDeadCode());
2224 CallSiteDesc
desc(lineOrBytecode
, CallSiteDesc::Import
);
2225 auto callee
= CalleeDesc::import(globalDataOffset
);
2226 ArgTypeVector
args(funcType
);
2228 auto* ins
= MWasmReturnCall::New(alloc(), desc
, callee
, call
.regArgs_
,
2229 StackArgAreaSizeUnaligned(args
), nullptr);
2233 curBlock_
->end(ins
);
2234 curBlock_
= nullptr;
2238 [[nodiscard
]] bool returnCallIndirect(uint32_t funcTypeIndex
,
2239 uint32_t tableIndex
, MDefinition
* index
,
2240 uint32_t lineOrBytecode
,
2241 const CallCompileState
& call
,
2242 DefVector
* results
) {
2243 MOZ_ASSERT(!inDeadCode());
2245 const FuncType
& funcType
= (*moduleEnv_
.types
)[funcTypeIndex
].funcType();
2246 CallIndirectId callIndirectId
=
2247 CallIndirectId::forFuncType(moduleEnv_
, funcTypeIndex
);
2250 MOZ_ASSERT(callIndirectId
.kind() != CallIndirectIdKind::AsmJS
);
2251 const TableDesc
& table
= moduleEnv_
.tables
[tableIndex
];
2253 CalleeDesc::wasmTable(moduleEnv_
, table
, tableIndex
, callIndirectId
);
2255 CallSiteDesc
desc(lineOrBytecode
, CallSiteDesc::Indirect
);
2256 ArgTypeVector
args(funcType
);
2258 auto* ins
= MWasmReturnCall::New(alloc(), desc
, callee
, call
.regArgs_
,
2259 StackArgAreaSizeUnaligned(args
), index
);
2263 curBlock_
->end(ins
);
2264 curBlock_
= nullptr;
2268 [[nodiscard
]] bool callIndirect(uint32_t funcTypeIndex
, uint32_t tableIndex
,
2269 MDefinition
* index
, uint32_t lineOrBytecode
,
2270 const CallCompileState
& call
,
2271 DefVector
* results
) {
2272 MOZ_ASSERT(!inDeadCode());
2274 const FuncType
& funcType
= (*moduleEnv_
.types
)[funcTypeIndex
].funcType();
2275 CallIndirectId callIndirectId
=
2276 CallIndirectId::forFuncType(moduleEnv_
, funcTypeIndex
);
2279 if (moduleEnv_
.isAsmJS()) {
2280 MOZ_ASSERT(tableIndex
== 0);
2281 MOZ_ASSERT(callIndirectId
.kind() == CallIndirectIdKind::AsmJS
);
2282 uint32_t tableIndex
= moduleEnv_
.asmJSSigToTableIndex
[funcTypeIndex
];
2283 const TableDesc
& table
= moduleEnv_
.tables
[tableIndex
];
2284 MOZ_ASSERT(IsPowerOfTwo(table
.initialLength
));
2286 MDefinition
* mask
= constantI32(int32_t(table
.initialLength
- 1));
2287 MBitAnd
* maskedIndex
= MBitAnd::New(alloc(), index
, mask
, MIRType::Int32
);
2288 curBlock_
->add(maskedIndex
);
2290 index
= maskedIndex
;
2291 callee
= CalleeDesc::asmJSTable(moduleEnv_
, tableIndex
);
2293 MOZ_ASSERT(callIndirectId
.kind() != CallIndirectIdKind::AsmJS
);
2294 const TableDesc
& table
= moduleEnv_
.tables
[tableIndex
];
2296 CalleeDesc::wasmTable(moduleEnv_
, table
, tableIndex
, callIndirectId
);
2299 CallSiteDesc
desc(lineOrBytecode
, CallSiteDesc::Indirect
);
2300 ArgTypeVector
args(funcType
);
2301 ResultType resultType
= ResultType::Vector(funcType
.results());
2303 if (!catchableCall(desc
, callee
, call
.regArgs_
, args
, index
)) {
2306 return collectCallResults(resultType
, call
.stackResultArea_
, results
);
2309 [[nodiscard
]] bool callImport(unsigned instanceDataOffset
,
2310 uint32_t lineOrBytecode
,
2311 const CallCompileState
& call
,
2312 const FuncType
& funcType
, DefVector
* results
) {
2313 MOZ_ASSERT(!inDeadCode());
2315 CallSiteDesc
desc(lineOrBytecode
, CallSiteDesc::Import
);
2316 auto callee
= CalleeDesc::import(instanceDataOffset
);
2317 ArgTypeVector
args(funcType
);
2318 ResultType resultType
= ResultType::Vector(funcType
.results());
2320 if (!catchableCall(desc
, callee
, call
.regArgs_
, args
)) {
2323 return collectCallResults(resultType
, call
.stackResultArea_
, results
);
2326 [[nodiscard
]] bool builtinCall(const SymbolicAddressSignature
& builtin
,
2327 uint32_t lineOrBytecode
,
2328 const CallCompileState
& call
,
2329 MDefinition
** def
) {
2335 MOZ_ASSERT(builtin
.failureMode
== FailureMode::Infallible
);
2337 CallSiteDesc
desc(lineOrBytecode
, CallSiteDesc::Symbolic
);
2338 auto callee
= CalleeDesc::builtin(builtin
.identity
);
2339 auto* ins
= MWasmCallUncatchable::New(alloc(), desc
, callee
, call
.regArgs_
,
2340 StackArgAreaSizeUnaligned(builtin
));
2345 curBlock_
->add(ins
);
2347 return collectUnaryCallResult(builtin
.retType
, def
);
2350 [[nodiscard
]] bool builtinInstanceMethodCall(
2351 const SymbolicAddressSignature
& builtin
, uint32_t lineOrBytecode
,
2352 const CallCompileState
& call
, MDefinition
** def
= nullptr) {
2353 MOZ_ASSERT_IF(!def
, builtin
.retType
== MIRType::None
);
2361 CallSiteDesc
desc(lineOrBytecode
, CallSiteDesc::Symbolic
);
2362 auto* ins
= MWasmCallUncatchable::NewBuiltinInstanceMethodCall(
2363 alloc(), desc
, builtin
.identity
, builtin
.failureMode
, call
.instanceArg_
,
2364 call
.regArgs_
, StackArgAreaSizeUnaligned(builtin
));
2369 curBlock_
->add(ins
);
2371 return def
? collectUnaryCallResult(builtin
.retType
, def
) : true;
2374 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
2375 [[nodiscard
]] bool callRef(const FuncType
& funcType
, MDefinition
* ref
,
2376 uint32_t lineOrBytecode
,
2377 const CallCompileState
& call
, DefVector
* results
) {
2378 MOZ_ASSERT(!inDeadCode());
2380 CalleeDesc callee
= CalleeDesc::wasmFuncRef();
2382 CallSiteDesc
desc(lineOrBytecode
, CallSiteDesc::FuncRef
);
2383 ArgTypeVector
args(funcType
);
2384 ResultType resultType
= ResultType::Vector(funcType
.results());
2386 if (!catchableCall(desc
, callee
, call
.regArgs_
, args
, ref
)) {
2389 return collectCallResults(resultType
, call
.stackResultArea_
, results
);
2392 # ifdef ENABLE_WASM_TAIL_CALLS
2393 [[nodiscard
]] bool returnCallRef(const FuncType
& funcType
, MDefinition
* ref
,
2394 uint32_t lineOrBytecode
,
2395 const CallCompileState
& call
,
2396 DefVector
* results
) {
2397 MOZ_ASSERT(!inDeadCode());
2399 CalleeDesc callee
= CalleeDesc::wasmFuncRef();
2401 CallSiteDesc
desc(lineOrBytecode
, CallSiteDesc::FuncRef
);
2402 ArgTypeVector
args(funcType
);
2404 auto* ins
= MWasmReturnCall::New(alloc(), desc
, callee
, call
.regArgs_
,
2405 StackArgAreaSizeUnaligned(args
), ref
);
2409 curBlock_
->end(ins
);
2410 curBlock_
= nullptr;
2414 # endif // ENABLE_WASM_TAIL_CALLS
2416 #endif // ENABLE_WASM_FUNCTION_REFERENCES
2418 /*********************************************** Control flow generation */
2420 inline bool inDeadCode() const { return curBlock_
== nullptr; }
2422 [[nodiscard
]] bool returnValues(const DefVector
& values
) {
2427 if (values
.empty()) {
2428 curBlock_
->end(MWasmReturnVoid::New(alloc(), instancePointer_
));
2430 ResultType resultType
= ResultType::Vector(funcType().results());
2431 ABIResultIter
iter(resultType
);
2432 // Switch to iterate in FIFO order instead of the default LIFO.
2433 while (!iter
.done()) {
2436 iter
.switchToPrev();
2437 for (uint32_t i
= 0; !iter
.done(); iter
.prev(), i
++) {
2438 if (!mirGen().ensureBallast()) {
2441 const ABIResult
& result
= iter
.cur();
2442 if (result
.onStack()) {
2443 MOZ_ASSERT(iter
.remaining() > 1);
2444 if (result
.type().isRefRepr()) {
2445 auto* store
= MWasmStoreRef::New(
2446 alloc(), instancePointer_
, stackResultPointer_
,
2447 result
.stackOffset(), values
[i
], AliasSet::WasmStackResult
,
2448 WasmPreBarrierKind::None
);
2449 curBlock_
->add(store
);
2451 auto* store
= MWasmStoreStackResult::New(
2452 alloc(), stackResultPointer_
, result
.stackOffset(), values
[i
]);
2453 curBlock_
->add(store
);
2456 MOZ_ASSERT(iter
.remaining() == 1);
2457 MOZ_ASSERT(i
+ 1 == values
.length());
2459 MWasmReturn::New(alloc(), values
[i
], instancePointer_
));
2463 curBlock_
= nullptr;
2467 void unreachableTrap() {
2473 MWasmTrap::New(alloc(), wasm::Trap::Unreachable
, bytecodeOffset());
2474 curBlock_
->end(ins
);
2475 curBlock_
= nullptr;
2479 static uint32_t numPushed(MBasicBlock
* block
) {
2480 return block
->stackDepth() - block
->info().firstStackSlot();
2484 [[nodiscard
]] bool pushDefs(const DefVector
& defs
) {
2488 MOZ_ASSERT(numPushed(curBlock_
) == 0);
2489 if (!curBlock_
->ensureHasSlots(defs
.length())) {
2492 for (MDefinition
* def
: defs
) {
2493 MOZ_ASSERT(def
->type() != MIRType::None
);
2494 curBlock_
->push(def
);
2499 [[nodiscard
]] bool popPushedDefs(DefVector
* defs
) {
2500 size_t n
= numPushed(curBlock_
);
2501 if (!defs
->resizeUninitialized(n
)) {
2504 for (; n
> 0; n
--) {
2505 MDefinition
* def
= curBlock_
->pop();
2506 MOZ_ASSERT(def
->type() != MIRType::Value
);
2507 (*defs
)[n
- 1] = def
;
2513 [[nodiscard
]] bool addJoinPredecessor(const DefVector
& defs
,
2514 MBasicBlock
** joinPred
) {
2515 *joinPred
= curBlock_
;
2519 return pushDefs(defs
);
2523 [[nodiscard
]] bool branchAndStartThen(MDefinition
* cond
,
2524 MBasicBlock
** elseBlock
) {
2526 *elseBlock
= nullptr;
2528 MBasicBlock
* thenBlock
;
2529 if (!newBlock(curBlock_
, &thenBlock
)) {
2532 if (!newBlock(curBlock_
, elseBlock
)) {
2536 curBlock_
->end(MTest::New(alloc(), cond
, thenBlock
, *elseBlock
));
2538 curBlock_
= thenBlock
;
2539 mirGraph().moveBlockToEnd(curBlock_
);
2542 return startBlock();
2545 [[nodiscard
]] bool switchToElse(MBasicBlock
* elseBlock
,
2546 MBasicBlock
** thenJoinPred
) {
2548 if (!finishBlock(&values
)) {
2553 *thenJoinPred
= nullptr;
2555 if (!addJoinPredecessor(values
, thenJoinPred
)) {
2559 curBlock_
= elseBlock
;
2560 mirGraph().moveBlockToEnd(curBlock_
);
2563 return startBlock();
2566 [[nodiscard
]] bool joinIfElse(MBasicBlock
* thenJoinPred
, DefVector
* defs
) {
2568 if (!finishBlock(&values
)) {
2572 if (!thenJoinPred
&& inDeadCode()) {
2576 MBasicBlock
* elseJoinPred
;
2577 if (!addJoinPredecessor(values
, &elseJoinPred
)) {
2581 mozilla::Array
<MBasicBlock
*, 2> blocks
;
2582 size_t numJoinPreds
= 0;
2584 blocks
[numJoinPreds
++] = thenJoinPred
;
2587 blocks
[numJoinPreds
++] = elseJoinPred
;
2590 if (numJoinPreds
== 0) {
2595 if (!goToNewBlock(blocks
[0], &join
)) {
2598 for (size_t i
= 1; i
< numJoinPreds
; ++i
) {
2599 if (!goToExistingBlock(blocks
[i
], join
)) {
2605 return popPushedDefs(defs
);
2608 [[nodiscard
]] bool startBlock() {
2609 MOZ_ASSERT_IF(blockDepth_
< blockPatches_
.length(),
2610 blockPatches_
[blockDepth_
].empty());
2615 [[nodiscard
]] bool finishBlock(DefVector
* defs
) {
2616 MOZ_ASSERT(blockDepth_
);
2617 uint32_t topLabel
= --blockDepth_
;
2618 return bindBranches(topLabel
, defs
);
2621 [[nodiscard
]] bool startLoop(MBasicBlock
** loopHeader
, size_t paramCount
) {
2622 *loopHeader
= nullptr;
2631 // Create the loop header.
2632 MOZ_ASSERT(curBlock_
->loopDepth() == loopDepth_
- 1);
2633 *loopHeader
= MBasicBlock::New(mirGraph(), info(), curBlock_
,
2634 MBasicBlock::PENDING_LOOP_HEADER
);
2639 (*loopHeader
)->setLoopDepth(loopDepth_
);
2640 mirGraph().addBlock(*loopHeader
);
2641 curBlock_
->end(MGoto::New(alloc(), *loopHeader
));
2643 DefVector loopParams
;
2644 if (!iter().getResults(paramCount
, &loopParams
)) {
2647 for (size_t i
= 0; i
< paramCount
; i
++) {
2648 MPhi
* phi
= MPhi::New(alloc(), loopParams
[i
]->type());
2652 if (!phi
->reserveLength(2)) {
2655 (*loopHeader
)->addPhi(phi
);
2656 phi
->addInput(loopParams
[i
]);
2657 loopParams
[i
] = phi
;
2659 iter().setResults(paramCount
, loopParams
);
2662 if (!goToNewBlock(*loopHeader
, &body
)) {
2670 void fixupRedundantPhis(MBasicBlock
* b
) {
2671 for (size_t i
= 0, depth
= b
->stackDepth(); i
< depth
; i
++) {
2672 MDefinition
* def
= b
->getSlot(i
);
2673 if (def
->isUnused()) {
2674 b
->setSlot(i
, def
->toPhi()->getOperand(0));
2679 [[nodiscard
]] bool setLoopBackedge(MBasicBlock
* loopEntry
,
2680 MBasicBlock
* loopBody
,
2681 MBasicBlock
* backedge
, size_t paramCount
) {
2682 if (!loopEntry
->setBackedgeWasm(backedge
, paramCount
)) {
2686 // Flag all redundant phis as unused.
2687 for (MPhiIterator phi
= loopEntry
->phisBegin(); phi
!= loopEntry
->phisEnd();
2689 MOZ_ASSERT(phi
->numOperands() == 2);
2690 if (phi
->getOperand(0) == phi
->getOperand(1)) {
2695 // Fix up phis stored in the slots Vector of pending blocks.
2696 for (ControlFlowPatchVector
& patches
: blockPatches_
) {
2697 for (ControlFlowPatch
& p
: patches
) {
2698 MBasicBlock
* block
= p
.ins
->block();
2699 if (block
->loopDepth() >= loopEntry
->loopDepth()) {
2700 fixupRedundantPhis(block
);
2705 // The loop body, if any, might be referencing recycled phis too.
2707 fixupRedundantPhis(loopBody
);
2710 // Pending jumps to an enclosing try-catch may reference the recycled phis.
2711 // We have to search above all enclosing try blocks, as a delegate may move
2713 for (uint32_t depth
= 0; depth
< iter().controlStackDepth(); depth
++) {
2714 LabelKind kind
= iter().controlKind(depth
);
2715 if (kind
!= LabelKind::Try
&& kind
!= LabelKind::Body
) {
2718 Control
& control
= iter().controlItem(depth
);
2719 for (MControlInstruction
* patch
: control
.tryPadPatches
) {
2720 MBasicBlock
* block
= patch
->block();
2721 if (block
->loopDepth() >= loopEntry
->loopDepth()) {
2722 fixupRedundantPhis(block
);
2727 // Discard redundant phis and add to the free list.
2728 for (MPhiIterator phi
= loopEntry
->phisBegin();
2729 phi
!= loopEntry
->phisEnd();) {
2730 MPhi
* entryDef
= *phi
++;
2731 if (!entryDef
->isUnused()) {
2735 entryDef
->justReplaceAllUsesWith(entryDef
->getOperand(0));
2736 loopEntry
->discardPhi(entryDef
);
2737 mirGraph().addPhiToFreeList(entryDef
);
2744 [[nodiscard
]] bool closeLoop(MBasicBlock
* loopHeader
,
2745 DefVector
* loopResults
) {
2746 MOZ_ASSERT(blockDepth_
>= 1);
2747 MOZ_ASSERT(loopDepth_
);
2749 uint32_t headerLabel
= blockDepth_
- 1;
2752 MOZ_ASSERT(inDeadCode());
2753 MOZ_ASSERT(headerLabel
>= blockPatches_
.length() ||
2754 blockPatches_
[headerLabel
].empty());
2760 // Op::Loop doesn't have an implicit backedge so temporarily set
2761 // aside the end of the loop body to bind backedges.
2762 MBasicBlock
* loopBody
= curBlock_
;
2763 curBlock_
= nullptr;
2765 // As explained in bug 1253544, Ion apparently has an invariant that
2766 // there is only one backedge to loop headers. To handle wasm's ability
2767 // to have multiple backedges to the same loop header, we bind all those
2768 // branches as forward jumps to a single backward jump. This is
2769 // unfortunate but the optimizer is able to fold these into single jumps
2771 DefVector backedgeValues
;
2772 if (!bindBranches(headerLabel
, &backedgeValues
)) {
2776 MOZ_ASSERT(loopHeader
->loopDepth() == loopDepth_
);
2779 // We're on the loop backedge block, created by bindBranches.
2780 for (size_t i
= 0, n
= numPushed(curBlock_
); i
!= n
; i
++) {
2784 if (!pushDefs(backedgeValues
)) {
2788 MOZ_ASSERT(curBlock_
->loopDepth() == loopDepth_
);
2789 curBlock_
->end(MGoto::New(alloc(), loopHeader
));
2790 if (!setLoopBackedge(loopHeader
, loopBody
, curBlock_
,
2791 backedgeValues
.length())) {
2796 curBlock_
= loopBody
;
2800 // If the loop depth still at the inner loop body, correct it.
2801 if (curBlock_
&& curBlock_
->loopDepth() != loopDepth_
) {
2803 if (!goToNewBlock(curBlock_
, &out
)) {
2810 return inDeadCode() || popPushedDefs(loopResults
);
2813 [[nodiscard
]] bool addControlFlowPatch(MControlInstruction
* ins
,
2814 uint32_t relative
, uint32_t index
) {
2815 MOZ_ASSERT(relative
< blockDepth_
);
2816 uint32_t absolute
= blockDepth_
- 1 - relative
;
2818 if (absolute
>= blockPatches_
.length() &&
2819 !blockPatches_
.resize(absolute
+ 1)) {
2823 return blockPatches_
[absolute
].append(ControlFlowPatch(ins
, index
));
2826 [[nodiscard
]] bool br(uint32_t relativeDepth
, const DefVector
& values
) {
2831 MGoto
* jump
= MGoto::New(alloc());
2832 if (!addControlFlowPatch(jump
, relativeDepth
, MGoto::TargetIndex
)) {
2836 if (!pushDefs(values
)) {
2840 curBlock_
->end(jump
);
2841 curBlock_
= nullptr;
2845 [[nodiscard
]] bool brIf(uint32_t relativeDepth
, const DefVector
& values
,
2846 MDefinition
* condition
) {
2851 MBasicBlock
* joinBlock
= nullptr;
2852 if (!newBlock(curBlock_
, &joinBlock
)) {
2856 MTest
* test
= MTest::New(alloc(), condition
, nullptr, joinBlock
);
2857 if (!addControlFlowPatch(test
, relativeDepth
, MTest::TrueBranchIndex
)) {
2861 if (!pushDefs(values
)) {
2865 curBlock_
->end(test
);
2866 curBlock_
= joinBlock
;
2870 [[nodiscard
]] bool brTable(MDefinition
* operand
, uint32_t defaultDepth
,
2871 const Uint32Vector
& depths
,
2872 const DefVector
& values
) {
2877 size_t numCases
= depths
.length();
2878 MOZ_ASSERT(numCases
<= INT32_MAX
);
2879 MOZ_ASSERT(numCases
);
2881 MTableSwitch
* table
=
2882 MTableSwitch::New(alloc(), operand
, 0, int32_t(numCases
- 1));
2884 size_t defaultIndex
;
2885 if (!table
->addDefault(nullptr, &defaultIndex
)) {
2888 if (!addControlFlowPatch(table
, defaultDepth
, defaultIndex
)) {
2892 using IndexToCaseMap
=
2893 HashMap
<uint32_t, uint32_t, DefaultHasher
<uint32_t>, SystemAllocPolicy
>;
2895 IndexToCaseMap indexToCase
;
2896 if (!indexToCase
.put(defaultDepth
, defaultIndex
)) {
2900 for (size_t i
= 0; i
< numCases
; i
++) {
2901 if (!mirGen_
.ensureBallast()) {
2905 uint32_t depth
= depths
[i
];
2908 IndexToCaseMap::AddPtr p
= indexToCase
.lookupForAdd(depth
);
2910 if (!table
->addSuccessor(nullptr, &caseIndex
)) {
2913 if (!addControlFlowPatch(table
, depth
, caseIndex
)) {
2916 if (!indexToCase
.add(p
, depth
, caseIndex
)) {
2920 caseIndex
= p
->value();
2923 if (!table
->addCase(caseIndex
)) {
2928 if (!pushDefs(values
)) {
2932 curBlock_
->end(table
);
2933 curBlock_
= nullptr;
2938 /********************************************************** Exceptions ***/
2940 bool inTryBlock(uint32_t* relativeDepth
) {
2941 return iter().controlFindInnermost(LabelKind::Try
, relativeDepth
);
2945 uint32_t relativeDepth
;
2946 return inTryBlock(&relativeDepth
);
2949 MDefinition
* loadTag(uint32_t tagIndex
) {
2950 MWasmLoadInstanceDataField
* tag
= MWasmLoadInstanceDataField::New(
2951 alloc(), MIRType::WasmAnyRef
,
2952 moduleEnv_
.offsetOfTagInstanceData(tagIndex
), true, instancePointer_
);
2953 curBlock_
->add(tag
);
2957 void loadPendingExceptionState(MInstruction
** exception
, MInstruction
** tag
) {
2958 *exception
= MWasmLoadInstance::New(
2959 alloc(), instancePointer_
, wasm::Instance::offsetOfPendingException(),
2960 MIRType::WasmAnyRef
, AliasSet::Load(AliasSet::WasmPendingException
));
2961 curBlock_
->add(*exception
);
2963 *tag
= MWasmLoadInstance::New(
2964 alloc(), instancePointer_
,
2965 wasm::Instance::offsetOfPendingExceptionTag(), MIRType::WasmAnyRef
,
2966 AliasSet::Load(AliasSet::WasmPendingException
));
2967 curBlock_
->add(*tag
);
2970 [[nodiscard
]] bool setPendingExceptionState(MDefinition
* exception
,
2972 // Set the pending exception object
2973 auto* exceptionAddr
= MWasmDerivedPointer::New(
2974 alloc(), instancePointer_
, Instance::offsetOfPendingException());
2975 curBlock_
->add(exceptionAddr
);
2976 auto* setException
= MWasmStoreRef::New(
2977 alloc(), instancePointer_
, exceptionAddr
, /*valueOffset=*/0, exception
,
2978 AliasSet::WasmPendingException
, WasmPreBarrierKind::Normal
);
2979 curBlock_
->add(setException
);
2980 if (!postBarrierPrecise(/*lineOrBytecode=*/0, exceptionAddr
, exception
)) {
2984 // Set the pending exception tag object
2985 auto* exceptionTagAddr
= MWasmDerivedPointer::New(
2986 alloc(), instancePointer_
, Instance::offsetOfPendingExceptionTag());
2987 curBlock_
->add(exceptionTagAddr
);
2988 auto* setExceptionTag
= MWasmStoreRef::New(
2989 alloc(), instancePointer_
, exceptionTagAddr
, /*valueOffset=*/0, tag
,
2990 AliasSet::WasmPendingException
, WasmPreBarrierKind::Normal
);
2991 curBlock_
->add(setExceptionTag
);
2992 return postBarrierPrecise(/*lineOrBytecode=*/0, exceptionTagAddr
, tag
);
2995 [[nodiscard
]] bool addPadPatch(MControlInstruction
* ins
,
2996 size_t relativeTryDepth
) {
2997 Control
& tryControl
= iter().controlItem(relativeTryDepth
);
2998 ControlInstructionVector
& padPatches
= tryControl
.tryPadPatches
;
2999 return padPatches
.emplaceBack(ins
);
3002 [[nodiscard
]] bool endWithPadPatch(uint32_t relativeTryDepth
) {
3003 MGoto
* jumpToLandingPad
= MGoto::New(alloc());
3004 curBlock_
->end(jumpToLandingPad
);
3005 return addPadPatch(jumpToLandingPad
, relativeTryDepth
);
3008 [[nodiscard
]] bool delegatePadPatches(const ControlInstructionVector
& patches
,
3009 uint32_t relativeDepth
) {
3010 if (patches
.empty()) {
3014 // Find where we are delegating the pad patches to.
3015 uint32_t targetRelativeDepth
;
3016 if (!iter().controlFindInnermostFrom(LabelKind::Try
, relativeDepth
,
3017 &targetRelativeDepth
)) {
3018 MOZ_ASSERT(relativeDepth
<= blockDepth_
- 1);
3019 targetRelativeDepth
= blockDepth_
- 1;
3021 // Append the delegate's pad patches to the target's.
3022 for (MControlInstruction
* ins
: patches
) {
3023 if (!addPadPatch(ins
, targetRelativeDepth
)) {
3030 [[nodiscard
]] bool beginTryCall(MWasmCallTryDesc
* call
) {
3031 call
->inTry
= inTryBlock(&call
->relativeTryDepth
);
3035 // Allocate a try note
3036 if (!tryNotes_
.append(wasm::TryNote())) {
3039 call
->tryNoteIndex
= tryNotes_
.length() - 1;
3040 // Allocate blocks for fallthrough and exceptions
3041 return newBlock(curBlock_
, &call
->fallthroughBlock
) &&
3042 newBlock(curBlock_
, &call
->prePadBlock
);
3045 [[nodiscard
]] bool finishTryCall(MWasmCallTryDesc
* call
) {
3050 // Switch to the prePadBlock
3051 MBasicBlock
* callBlock
= curBlock_
;
3052 curBlock_
= call
->prePadBlock
;
3054 // Mark this as the landing pad for the call
3056 MWasmCallLandingPrePad::New(alloc(), callBlock
, call
->tryNoteIndex
));
3058 // End with a pending jump to the landing pad
3059 if (!endWithPadPatch(call
->relativeTryDepth
)) {
3063 // Compilation continues in the fallthroughBlock.
3064 curBlock_
= call
->fallthroughBlock
;
3068 // Create a landing pad for a try block if there are any throwing
3070 [[nodiscard
]] bool createTryLandingPadIfNeeded(Control
& control
,
3071 MBasicBlock
** landingPad
) {
3072 // If there are no pad-patches for this try control, it means there are no
3073 // instructions in the try code that could throw an exception. In this
3074 // case, all the catches are dead code, and the try code ends up equivalent
3075 // to a plain wasm block.
3076 ControlInstructionVector
& patches
= control
.tryPadPatches
;
3077 if (patches
.empty()) {
3078 *landingPad
= nullptr;
3082 // Otherwise, if there are (pad-) branches from places in the try code that
3083 // may throw an exception, bind these branches to a new landing pad
3084 // block. This is done similarly to what is done in bindBranches.
3085 MControlInstruction
* ins
= patches
[0];
3086 MBasicBlock
* pred
= ins
->block();
3087 if (!newBlock(pred
, landingPad
)) {
3090 ins
->replaceSuccessor(0, *landingPad
);
3091 for (size_t i
= 1; i
< patches
.length(); i
++) {
3093 pred
= ins
->block();
3094 if (!(*landingPad
)->addPredecessor(alloc(), pred
)) {
3097 ins
->replaceSuccessor(0, *landingPad
);
3100 // Set up the slots in the landing pad block.
3101 if (!setupLandingPadSlots(*landingPad
)) {
3105 // Clear the now bound pad patches.
3110 // Consume the pending exception state from instance, and set up the slots
3111 // of the landing pad with the exception state.
3112 [[nodiscard
]] bool setupLandingPadSlots(MBasicBlock
* landingPad
) {
3113 MBasicBlock
* prevBlock
= curBlock_
;
3114 curBlock_
= landingPad
;
3116 // Load the pending exception and tag
3117 MInstruction
* exception
;
3119 loadPendingExceptionState(&exception
, &tag
);
3121 // Clear the pending exception and tag
3122 auto* null
= constantNullRef();
3123 if (!setPendingExceptionState(null
, null
)) {
3127 // Push the exception and its tag on the stack to make them available
3128 // to the landing pad blocks.
3129 if (!landingPad
->ensureHasSlots(2)) {
3132 landingPad
->push(exception
);
3133 landingPad
->push(tag
);
3135 curBlock_
= prevBlock
;
3139 [[nodiscard
]] bool startTry(MBasicBlock
** curBlock
) {
3140 *curBlock
= curBlock_
;
3141 return startBlock();
3144 [[nodiscard
]] bool joinTryOrCatchBlock(Control
& control
) {
3145 // If the try or catch block ended with dead code, there is no need to
3146 // do any control flow join.
3151 // This is a split path which we'll need to join later, using a control
3153 MOZ_ASSERT(!curBlock_
->hasLastIns());
3154 MGoto
* jump
= MGoto::New(alloc());
3155 if (!addControlFlowPatch(jump
, 0, MGoto::TargetIndex
)) {
3159 // Finish the current block with the control flow patch instruction.
3160 curBlock_
->end(jump
);
3164 // Finish the previous block (either a try or catch block) and then setup a
3166 [[nodiscard
]] bool switchToCatch(Control
& control
, const LabelKind
& fromKind
,
3167 uint32_t tagIndex
) {
3168 // If there is no control block, then either:
3169 // - the entry of the try block is dead code, or
3170 // - there is no landing pad for the try-catch.
3171 // In either case, any catch will be dead code.
3172 if (!control
.block
) {
3173 MOZ_ASSERT(inDeadCode());
3177 // Join the previous try or catch block with a patch to the future join of
3178 // the whole try-catch block.
3179 if (!joinTryOrCatchBlock(control
)) {
3183 // If we are switching from the try block, create the landing pad. This is
3184 // guaranteed to happen once and only once before processing catch blocks.
3185 if (fromKind
== LabelKind::Try
) {
3186 MBasicBlock
* padBlock
= nullptr;
3187 if (!createTryLandingPadIfNeeded(control
, &padBlock
)) {
3190 // Set the control block for this try-catch to the landing pad.
3191 control
.block
= padBlock
;
3194 // If there is no landing pad, then this and following catches are dead
3196 if (!control
.block
) {
3197 curBlock_
= nullptr;
3201 // Switch to the landing pad.
3202 curBlock_
= control
.block
;
3204 // Handle a catch_all by immediately jumping to a new block. We require a
3205 // new block (as opposed to just emitting the catch_all code in the current
3206 // block) because rethrow requires the exception/tag to be present in the
3207 // landing pad's slots, while the catch_all block must not have the
3208 // exception/tag in slots.
3209 if (tagIndex
== CatchAllIndex
) {
3210 MBasicBlock
* catchAllBlock
= nullptr;
3211 if (!goToNewBlock(curBlock_
, &catchAllBlock
)) {
3214 // Compilation will continue in the catch_all block.
3215 curBlock_
= catchAllBlock
;
3216 // Remove the tag and exception slots from the block, they are no
3217 // longer necessary.
3223 // Handle a tagged catch by doing a compare and branch on the tag index,
3224 // jumping to a catch block if they match, or else to a fallthrough block
3225 // to continue the landing pad.
3226 MBasicBlock
* catchBlock
= nullptr;
3227 MBasicBlock
* fallthroughBlock
= nullptr;
3228 if (!newBlock(curBlock_
, &catchBlock
) ||
3229 !newBlock(curBlock_
, &fallthroughBlock
)) {
3233 // Get the exception and its tag from the slots we pushed when adding
3234 // control flow patches.
3235 MDefinition
* exceptionTag
= curBlock_
->pop();
3236 MDefinition
* exception
= curBlock_
->pop();
3238 // Branch to the catch block if the exception's tag matches this catch
3240 MDefinition
* catchTag
= loadTag(tagIndex
);
3241 MDefinition
* matchesCatchTag
=
3242 compare(exceptionTag
, catchTag
, JSOp::Eq
, MCompare::Compare_WasmAnyRef
);
3244 MTest::New(alloc(), matchesCatchTag
, catchBlock
, fallthroughBlock
));
3246 // The landing pad will continue in the fallthrough block
3247 control
.block
= fallthroughBlock
;
3249 // Set up the catch block by extracting the values from the exception
3251 curBlock_
= catchBlock
;
3253 // Remove the tag and exception slots from the block, they are no
3254 // longer necessary.
3258 // Extract the exception values for the catch block
3260 if (!loadExceptionValues(exception
, tagIndex
, &values
)) {
3263 iter().setResults(values
.length(), values
);
3267 [[nodiscard
]] bool loadExceptionValues(MDefinition
* exception
,
3268 uint32_t tagIndex
, DefVector
* values
) {
3269 SharedTagType tagType
= moduleEnv().tags
[tagIndex
].type
;
3270 const ValTypeVector
& params
= tagType
->argTypes();
3271 const TagOffsetVector
& offsets
= tagType
->argOffsets();
3273 // Get the data pointer from the exception object
3274 auto* data
= MWasmLoadField::New(
3275 alloc(), exception
, WasmExceptionObject::offsetOfData(),
3276 MIRType::Pointer
, MWideningOp::None
, AliasSet::Load(AliasSet::Any
));
3280 curBlock_
->add(data
);
3282 // Presize the values vector to the number of params
3283 if (!values
->reserve(params
.length())) {
3287 // Load each value from the data pointer
3288 for (size_t i
= 0; i
< params
.length(); i
++) {
3289 if (!mirGen_
.ensureBallast()) {
3292 auto* load
= MWasmLoadFieldKA::New(
3293 alloc(), exception
, data
, offsets
[i
], params
[i
].toMIRType(),
3294 MWideningOp::None
, AliasSet::Load(AliasSet::Any
));
3295 if (!load
|| !values
->append(load
)) {
3298 curBlock_
->add(load
);
3303 [[nodiscard
]] bool finishTryCatch(LabelKind kind
, Control
& control
,
3306 case LabelKind::Try
: {
3307 // This is a catchless try, we must delegate all throwing instructions
3308 // to the nearest enclosing try block if one exists, or else to the
3309 // body block which will handle it in emitBodyDelegateThrowPad. We
3310 // specify a relativeDepth of '1' to delegate outside of the still
3311 // active try block.
3312 uint32_t relativeDepth
= 1;
3313 if (!delegatePadPatches(control
.tryPadPatches
, relativeDepth
)) {
3318 case LabelKind::Catch
: {
3319 // This is a try without a catch_all, we must have a rethrow at the end
3320 // of the landing pad (if any).
3321 MBasicBlock
* padBlock
= control
.block
;
3323 MBasicBlock
* prevBlock
= curBlock_
;
3324 curBlock_
= padBlock
;
3325 MDefinition
* tag
= curBlock_
->pop();
3326 MDefinition
* exception
= curBlock_
->pop();
3327 if (!throwFrom(exception
, tag
)) {
3330 curBlock_
= prevBlock
;
3334 case LabelKind::CatchAll
:
3335 // This is a try with a catch_all, and requires no special handling.
3341 // Finish the block, joining the try and catch blocks
3342 return finishBlock(defs
);
3345 [[nodiscard
]] bool emitBodyDelegateThrowPad(Control
& control
) {
3346 // Create a landing pad for any throwing instructions
3347 MBasicBlock
* padBlock
;
3348 if (!createTryLandingPadIfNeeded(control
, &padBlock
)) {
3352 // If no landing pad was necessary, then we don't need to do anything here
3357 // Switch to the landing pad and rethrow the exception
3358 MBasicBlock
* prevBlock
= curBlock_
;
3359 curBlock_
= padBlock
;
3360 MDefinition
* tag
= curBlock_
->pop();
3361 MDefinition
* exception
= curBlock_
->pop();
3362 if (!throwFrom(exception
, tag
)) {
3365 curBlock_
= prevBlock
;
3369 [[nodiscard
]] bool emitNewException(MDefinition
* tag
,
3370 MDefinition
** exception
) {
3371 return emitInstanceCall1(readBytecodeOffset(), SASigExceptionNew
, tag
,
3375 [[nodiscard
]] bool emitThrow(uint32_t tagIndex
, const DefVector
& argValues
) {
3379 uint32_t bytecodeOffset
= readBytecodeOffset();
3382 MDefinition
* tag
= loadTag(tagIndex
);
3387 // Allocate an exception object
3388 MDefinition
* exception
;
3389 if (!emitNewException(tag
, &exception
)) {
3393 // Load the data pointer from the object
3394 auto* data
= MWasmLoadField::New(
3395 alloc(), exception
, WasmExceptionObject::offsetOfData(),
3396 MIRType::Pointer
, MWideningOp::None
, AliasSet::Load(AliasSet::Any
));
3400 curBlock_
->add(data
);
3402 // Store the params into the data pointer
3403 SharedTagType tagType
= moduleEnv_
.tags
[tagIndex
].type
;
3404 for (size_t i
= 0; i
< tagType
->argOffsets().length(); i
++) {
3405 if (!mirGen_
.ensureBallast()) {
3408 ValType type
= tagType
->argTypes()[i
];
3409 uint32_t offset
= tagType
->argOffsets()[i
];
3411 if (!type
.isRefRepr()) {
3412 auto* store
= MWasmStoreFieldKA::New(alloc(), exception
, data
, offset
,
3413 argValues
[i
], MNarrowingOp::None
,
3414 AliasSet::Store(AliasSet::Any
));
3418 curBlock_
->add(store
);
3422 // Store the new value
3423 auto* store
= MWasmStoreFieldRefKA::New(
3424 alloc(), instancePointer_
, exception
, data
, offset
, argValues
[i
],
3425 AliasSet::Store(AliasSet::Any
), Nothing(), WasmPreBarrierKind::None
);
3429 curBlock_
->add(store
);
3431 // Call the post-write barrier
3432 if (!postBarrier(bytecodeOffset
, exception
, data
, offset
, argValues
[i
])) {
3437 // Throw the exception
3438 return throwFrom(exception
, tag
);
3441 [[nodiscard
]] bool throwFrom(MDefinition
* exn
, MDefinition
* tag
) {
3446 // Check if there is a local catching try control, and if so, then add a
3447 // pad-patch to its tryPadPatches.
3448 uint32_t relativeTryDepth
;
3449 if (inTryBlock(&relativeTryDepth
)) {
3450 // Set the pending exception state, the landing pad will read from this
3451 if (!setPendingExceptionState(exn
, tag
)) {
3455 // End with a pending jump to the landing pad
3456 if (!endWithPadPatch(relativeTryDepth
)) {
3459 curBlock_
= nullptr;
3463 // If there is no surrounding catching block, call an instance method to
3464 // throw the exception.
3465 if (!emitInstanceCall1(readBytecodeOffset(), SASigThrowException
, exn
)) {
3470 curBlock_
= nullptr;
3474 [[nodiscard
]] bool emitRethrow(uint32_t relativeDepth
) {
3479 Control
& control
= iter().controlItem(relativeDepth
);
3480 MBasicBlock
* pad
= control
.block
;
3482 MOZ_ASSERT(pad
->nslots() > 1);
3483 MOZ_ASSERT(iter().controlKind(relativeDepth
) == LabelKind::Catch
||
3484 iter().controlKind(relativeDepth
) == LabelKind::CatchAll
);
3486 // The exception will always be the last slot in the landing pad.
3487 size_t exnSlotPosition
= pad
->nslots() - 2;
3488 MDefinition
* tag
= pad
->getSlot(exnSlotPosition
+ 1);
3489 MDefinition
* exception
= pad
->getSlot(exnSlotPosition
);
3490 MOZ_ASSERT(exception
->type() == MIRType::WasmAnyRef
&&
3491 tag
->type() == MIRType::WasmAnyRef
);
3492 return throwFrom(exception
, tag
);
3495 /*********************************************** Instance call helpers ***/
3497 // Do not call this function directly -- it offers no protection against
3498 // mis-counting of arguments. Instead call one of
3499 // ::emitInstanceCall{0,1,2,3,4,5,6}.
3501 // Emits a call to the Instance function indicated by `callee`. This is
3502 // assumed to take an Instance pointer as its first argument. The remaining
3503 // args are taken from `args`, which is assumed to hold `numArgs` entries.
3504 // If `result` is non-null, the MDefinition* holding the return value is
3505 // written to `*result`.
3506 [[nodiscard
]] bool emitInstanceCallN(uint32_t lineOrBytecode
,
3507 const SymbolicAddressSignature
& callee
,
3508 MDefinition
** args
, size_t numArgs
,
3509 MDefinition
** result
= nullptr) {
3510 // Check that the first formal parameter is plausibly an Instance pointer.
3511 MOZ_ASSERT(callee
.numArgs
> 0);
3512 MOZ_ASSERT(callee
.argTypes
[0] == MIRType::Pointer
);
3513 // Check we agree on the number of args.
3514 MOZ_ASSERT(numArgs
+ 1 /* the instance pointer */ == callee
.numArgs
);
3515 // Check we agree on whether a value is returned.
3516 MOZ_ASSERT((result
== nullptr) == (callee
.retType
== MIRType::None
));
3518 // If we are in dead code, it can happen that some of the `args` entries
3519 // are nullptr, which will look like an OOM to the logic below. So exit
3520 // at this point. `passInstance`, `passArg`, `finishCall` and
3521 // `builtinInstanceMethodCall` all do nothing in dead code, so it's valid
3530 // Check all args for signs of OOMness before attempting to allocating any
3532 for (size_t i
= 0; i
< numArgs
; i
++) {
3541 // Finally, construct the call.
3542 CallCompileState ccsArgs
;
3543 if (!passInstance(callee
.argTypes
[0], &ccsArgs
)) {
3546 for (size_t i
= 0; i
< numArgs
; i
++) {
3547 if (!passArg(args
[i
], callee
.argTypes
[i
+ 1], &ccsArgs
)) {
3551 if (!finishCall(&ccsArgs
)) {
3554 return builtinInstanceMethodCall(callee
, lineOrBytecode
, ccsArgs
, result
);
3557 [[nodiscard
]] bool emitInstanceCall0(uint32_t lineOrBytecode
,
3558 const SymbolicAddressSignature
& callee
,
3559 MDefinition
** result
= nullptr) {
3560 MDefinition
* args
[0] = {};
3561 return emitInstanceCallN(lineOrBytecode
, callee
, args
, 0, result
);
3563 [[nodiscard
]] bool emitInstanceCall1(uint32_t lineOrBytecode
,
3564 const SymbolicAddressSignature
& callee
,
3566 MDefinition
** result
= nullptr) {
3567 MDefinition
* args
[1] = {arg1
};
3568 return emitInstanceCallN(lineOrBytecode
, callee
, args
, 1, result
);
3570 [[nodiscard
]] bool emitInstanceCall2(uint32_t lineOrBytecode
,
3571 const SymbolicAddressSignature
& callee
,
3572 MDefinition
* arg1
, MDefinition
* arg2
,
3573 MDefinition
** result
= nullptr) {
3574 MDefinition
* args
[2] = {arg1
, arg2
};
3575 return emitInstanceCallN(lineOrBytecode
, callee
, args
, 2, result
);
3577 [[nodiscard
]] bool emitInstanceCall3(uint32_t lineOrBytecode
,
3578 const SymbolicAddressSignature
& callee
,
3579 MDefinition
* arg1
, MDefinition
* arg2
,
3581 MDefinition
** result
= nullptr) {
3582 MDefinition
* args
[3] = {arg1
, arg2
, arg3
};
3583 return emitInstanceCallN(lineOrBytecode
, callee
, args
, 3, result
);
3585 [[nodiscard
]] bool emitInstanceCall4(uint32_t lineOrBytecode
,
3586 const SymbolicAddressSignature
& callee
,
3587 MDefinition
* arg1
, MDefinition
* arg2
,
3588 MDefinition
* arg3
, MDefinition
* arg4
,
3589 MDefinition
** result
= nullptr) {
3590 MDefinition
* args
[4] = {arg1
, arg2
, arg3
, arg4
};
3591 return emitInstanceCallN(lineOrBytecode
, callee
, args
, 4, result
);
3593 [[nodiscard
]] bool emitInstanceCall5(uint32_t lineOrBytecode
,
3594 const SymbolicAddressSignature
& callee
,
3595 MDefinition
* arg1
, MDefinition
* arg2
,
3596 MDefinition
* arg3
, MDefinition
* arg4
,
3598 MDefinition
** result
= nullptr) {
3599 MDefinition
* args
[5] = {arg1
, arg2
, arg3
, arg4
, arg5
};
3600 return emitInstanceCallN(lineOrBytecode
, callee
, args
, 5, result
);
3602 [[nodiscard
]] bool emitInstanceCall6(uint32_t lineOrBytecode
,
3603 const SymbolicAddressSignature
& callee
,
3604 MDefinition
* arg1
, MDefinition
* arg2
,
3605 MDefinition
* arg3
, MDefinition
* arg4
,
3606 MDefinition
* arg5
, MDefinition
* arg6
,
3607 MDefinition
** result
= nullptr) {
3608 MDefinition
* args
[6] = {arg1
, arg2
, arg3
, arg4
, arg5
, arg6
};
3609 return emitInstanceCallN(lineOrBytecode
, callee
, args
, 6, result
);
3612 /******************************** WasmGC: low level load/store helpers ***/
3614 // Given a (FieldType, FieldExtension) pair, produce the (MIRType,
3615 // MWideningOp) pair that will give the correct operation for reading the
3616 // value from memory.
3617 static void fieldLoadInfoToMIR(FieldType type
, FieldWideningOp wideningOp
,
3618 MIRType
* mirType
, MWideningOp
* mirWideningOp
) {
3619 switch (type
.kind()) {
3620 case FieldType::I8
: {
3621 switch (wideningOp
) {
3622 case FieldWideningOp::Signed
:
3623 *mirType
= MIRType::Int32
;
3624 *mirWideningOp
= MWideningOp::FromS8
;
3626 case FieldWideningOp::Unsigned
:
3627 *mirType
= MIRType::Int32
;
3628 *mirWideningOp
= MWideningOp::FromU8
;
3634 case FieldType::I16
: {
3635 switch (wideningOp
) {
3636 case FieldWideningOp::Signed
:
3637 *mirType
= MIRType::Int32
;
3638 *mirWideningOp
= MWideningOp::FromS16
;
3640 case FieldWideningOp::Unsigned
:
3641 *mirType
= MIRType::Int32
;
3642 *mirWideningOp
= MWideningOp::FromU16
;
3649 switch (wideningOp
) {
3650 case FieldWideningOp::None
:
3651 *mirType
= type
.toMIRType();
3652 *mirWideningOp
= MWideningOp::None
;
3661 // Given a FieldType, produce the MNarrowingOp required for writing the
3663 static MNarrowingOp
fieldStoreInfoToMIR(FieldType type
) {
3664 switch (type
.kind()) {
3666 return MNarrowingOp::To8
;
3667 case FieldType::I16
:
3668 return MNarrowingOp::To16
;
3670 return MNarrowingOp::None
;
3674 // Generate a write of `value` at address `base + offset`, where `offset` is
3675 // known at JIT time. If the written value is a reftype, the previous value
3676 // at `base + offset` will be retrieved and handed off to the post-write
3677 // barrier. `keepAlive` will be referenced by the instruction so as to hold
3678 // it live (from the GC's point of view).
3679 [[nodiscard
]] bool writeGcValueAtBasePlusOffset(
3680 uint32_t lineOrBytecode
, FieldType fieldType
, MDefinition
* keepAlive
,
3681 AliasSet::Flag aliasBitset
, MDefinition
* value
, MDefinition
* base
,
3682 uint32_t offset
, bool needsTrapInfo
, WasmPreBarrierKind preBarrierKind
) {
3683 MOZ_ASSERT(aliasBitset
!= 0);
3684 MOZ_ASSERT(keepAlive
->type() == MIRType::WasmAnyRef
);
3685 MOZ_ASSERT(fieldType
.widenToValType().toMIRType() == value
->type());
3686 MNarrowingOp narrowingOp
= fieldStoreInfoToMIR(fieldType
);
3688 if (!fieldType
.isRefRepr()) {
3689 MaybeTrapSiteInfo maybeTrap
;
3690 if (needsTrapInfo
) {
3691 maybeTrap
.emplace(getTrapSiteInfo());
3693 auto* store
= MWasmStoreFieldKA::New(
3694 alloc(), keepAlive
, base
, offset
, value
, narrowingOp
,
3695 AliasSet::Store(aliasBitset
), maybeTrap
);
3699 curBlock_
->add(store
);
3703 // Otherwise it's a ref store. Load the previous value so we can show it
3704 // to the post-write barrier.
3706 // Optimisation opportunity: for the case where this field write results
3707 // from struct.new, the old value is always zero. So we should synthesise
3708 // a suitable zero constant rather than reading it from the object. See
3709 // also bug 1799999.
3710 MOZ_ASSERT(narrowingOp
== MNarrowingOp::None
);
3711 MOZ_ASSERT(fieldType
.widenToValType() == fieldType
.valType());
3713 // Store the new value
3714 auto* store
= MWasmStoreFieldRefKA::New(
3715 alloc(), instancePointer_
, keepAlive
, base
, offset
, value
,
3716 AliasSet::Store(aliasBitset
), mozilla::Some(getTrapSiteInfo()),
3721 curBlock_
->add(store
);
3723 // Call the post-write barrier
3724 return postBarrier(lineOrBytecode
, keepAlive
, base
, offset
, value
);
3727 // Generate a write of `value` at address `base + index * scale`, where
3728 // `scale` is known at JIT-time. If the written value is a reftype, the
3729 // previous value at `base + index * scale` will be retrieved and handed off
3730 // to the post-write barrier. `keepAlive` will be referenced by the
3731 // instruction so as to hold it live (from the GC's point of view).
3732 [[nodiscard
]] bool writeGcValueAtBasePlusScaledIndex(
3733 uint32_t lineOrBytecode
, FieldType fieldType
, MDefinition
* keepAlive
,
3734 AliasSet::Flag aliasBitset
, MDefinition
* value
, MDefinition
* base
,
3735 uint32_t scale
, MDefinition
* index
, WasmPreBarrierKind preBarrierKind
) {
3736 MOZ_ASSERT(aliasBitset
!= 0);
3737 MOZ_ASSERT(keepAlive
->type() == MIRType::WasmAnyRef
);
3738 MOZ_ASSERT(fieldType
.widenToValType().toMIRType() == value
->type());
3739 MOZ_ASSERT(scale
== 1 || scale
== 2 || scale
== 4 || scale
== 8 ||
3742 // Currently there's no single MIR node that this can be translated into.
3743 // So compute the final address "manually", then store directly to that
3744 // address. See bug 1802287.
3745 MDefinition
* scaleDef
= constantTargetWord(intptr_t(scale
));
3749 MDefinition
* finalAddr
= computeBasePlusScaledIndex(base
, scaleDef
, index
);
3754 return writeGcValueAtBasePlusOffset(
3755 lineOrBytecode
, fieldType
, keepAlive
, aliasBitset
, value
, finalAddr
,
3757 /*needsTrapInfo=*/false, preBarrierKind
);
3760 // Generate a read from address `base + offset`, where `offset` is known at
3761 // JIT time. The loaded value will be widened as described by `fieldType`
3762 // and `fieldWideningOp`. `keepAlive` will be referenced by the instruction
3763 // so as to hold it live (from the GC's point of view).
3764 [[nodiscard
]] MDefinition
* readGcValueAtBasePlusOffset(
3765 FieldType fieldType
, FieldWideningOp fieldWideningOp
,
3766 MDefinition
* keepAlive
, AliasSet::Flag aliasBitset
, MDefinition
* base
,
3767 uint32_t offset
, bool needsTrapInfo
) {
3768 MOZ_ASSERT(aliasBitset
!= 0);
3769 MOZ_ASSERT(keepAlive
->type() == MIRType::WasmAnyRef
);
3771 MWideningOp mirWideningOp
;
3772 fieldLoadInfoToMIR(fieldType
, fieldWideningOp
, &mirType
, &mirWideningOp
);
3773 MaybeTrapSiteInfo maybeTrap
;
3774 if (needsTrapInfo
) {
3775 maybeTrap
.emplace(getTrapSiteInfo());
3777 auto* load
= MWasmLoadFieldKA::New(alloc(), keepAlive
, base
, offset
,
3778 mirType
, mirWideningOp
,
3779 AliasSet::Load(aliasBitset
), maybeTrap
);
3783 curBlock_
->add(load
);
3787 // Generate a read from address `base + index * scale`, where `scale` is
3788 // known at JIT-time. The loaded value will be widened as described by
3789 // `fieldType` and `fieldWideningOp`. `keepAlive` will be referenced by the
3790 // instruction so as to hold it live (from the GC's point of view).
3791 [[nodiscard
]] MDefinition
* readGcValueAtBasePlusScaledIndex(
3792 FieldType fieldType
, FieldWideningOp fieldWideningOp
,
3793 MDefinition
* keepAlive
, AliasSet::Flag aliasBitset
, MDefinition
* base
,
3794 uint32_t scale
, MDefinition
* index
) {
3795 MOZ_ASSERT(aliasBitset
!= 0);
3796 MOZ_ASSERT(keepAlive
->type() == MIRType::WasmAnyRef
);
3797 MOZ_ASSERT(scale
== 1 || scale
== 2 || scale
== 4 || scale
== 8 ||
3800 // Currently there's no single MIR node that this can be translated into.
3801 // So compute the final address "manually", then store directly to that
3802 // address. See bug 1802287.
3803 MDefinition
* scaleDef
= constantTargetWord(intptr_t(scale
));
3807 MDefinition
* finalAddr
= computeBasePlusScaledIndex(base
, scaleDef
, index
);
3813 MWideningOp mirWideningOp
;
3814 fieldLoadInfoToMIR(fieldType
, fieldWideningOp
, &mirType
, &mirWideningOp
);
3815 auto* load
= MWasmLoadFieldKA::New(alloc(), keepAlive
, finalAddr
,
3816 /*offset=*/0, mirType
, mirWideningOp
,
3817 AliasSet::Load(aliasBitset
),
3818 mozilla::Some(getTrapSiteInfo()));
3822 curBlock_
->add(load
);
3826 /************************************************ WasmGC: type helpers ***/
3828 // Returns an MDefinition holding the supertype vector for `typeIndex`.
3829 [[nodiscard
]] MDefinition
* loadSuperTypeVector(uint32_t typeIndex
) {
3830 uint32_t stvOffset
= moduleEnv().offsetOfSuperTypeVector(typeIndex
);
3833 MWasmLoadInstanceDataField::New(alloc(), MIRType::Pointer
, stvOffset
,
3834 /*isConst=*/true, instancePointer_
);
3838 curBlock_
->add(load
);
3842 [[nodiscard
]] MDefinition
* loadTypeDefInstanceData(uint32_t typeIndex
) {
3843 size_t offset
= Instance::offsetInData(
3844 moduleEnv_
.offsetOfTypeDefInstanceData(typeIndex
));
3845 auto* result
= MWasmDerivedPointer::New(alloc(), instancePointer_
, offset
);
3849 curBlock_
->add(result
);
3853 /********************************************** WasmGC: struct helpers ***/
3855 [[nodiscard
]] MDefinition
* createStructObject(uint32_t typeIndex
,
3857 const TypeDef
& typeDef
= (*moduleEnv().types
)[typeIndex
];
3858 gc::AllocKind allocKind
= WasmStructObject::allocKindForTypeDef(&typeDef
);
3860 WasmStructObject::requiresOutlineBytes(typeDef
.structType().size_
);
3862 // Allocate an uninitialized struct. This requires the type definition
3864 MDefinition
* typeDefData
= loadTypeDefInstanceData(typeIndex
);
3869 auto* structObject
=
3870 MWasmNewStructObject::New(alloc(), instancePointer_
, typeDefData
,
3871 isOutline
, zeroFields
, allocKind
);
3872 if (!structObject
) {
3875 curBlock_
->add(structObject
);
3877 return structObject
;
3880 // Helper function for EmitStruct{New,Set}: given a MIR pointer to a
3881 // WasmStructObject, a MIR pointer to a value, and a field descriptor,
3882 // generate MIR to write the value to the relevant field in the object.
3883 [[nodiscard
]] bool writeValueToStructField(
3884 uint32_t lineOrBytecode
, const StructField
& field
,
3885 MDefinition
* structObject
, MDefinition
* value
,
3886 WasmPreBarrierKind preBarrierKind
) {
3887 FieldType fieldType
= field
.type
;
3888 uint32_t fieldOffset
= field
.offset
;
3891 uint32_t areaOffset
;
3892 WasmStructObject::fieldOffsetToAreaAndOffset(fieldType
, fieldOffset
,
3893 &areaIsOutline
, &areaOffset
);
3895 // Make `base` point at the first byte of either the struct object as a
3896 // whole or of the out-of-line data area. And adjust `areaOffset`
3900 if (areaIsOutline
) {
3901 auto* load
= MWasmLoadField::New(
3902 alloc(), structObject
, WasmStructObject::offsetOfOutlineData(),
3903 MIRType::Pointer
, MWideningOp::None
,
3904 AliasSet::Load(AliasSet::WasmStructOutlineDataPointer
),
3905 mozilla::Some(getTrapSiteInfo()));
3909 curBlock_
->add(load
);
3911 needsTrapInfo
= false;
3913 base
= structObject
;
3914 needsTrapInfo
= true;
3915 areaOffset
+= WasmStructObject::offsetOfInlineData();
3917 // The transaction is to happen at `base + areaOffset`, so to speak.
3918 // After this point we must ignore `fieldOffset`.
3920 // The alias set denoting the field's location, although lacking a
3921 // Load-vs-Store indication at this point.
3922 AliasSet::Flag fieldAliasSet
= areaIsOutline
3923 ? AliasSet::WasmStructOutlineDataArea
3924 : AliasSet::WasmStructInlineDataArea
;
3926 return writeGcValueAtBasePlusOffset(lineOrBytecode
, fieldType
, structObject
,
3927 fieldAliasSet
, value
, base
, areaOffset
,
3928 needsTrapInfo
, preBarrierKind
);
3931 // Helper function for EmitStructGet: given a MIR pointer to a
3932 // WasmStructObject, a field descriptor and a field widening operation,
3933 // generate MIR to read the value from the relevant field in the object.
3934 [[nodiscard
]] MDefinition
* readValueFromStructField(
3935 const StructField
& field
, FieldWideningOp wideningOp
,
3936 MDefinition
* structObject
) {
3937 FieldType fieldType
= field
.type
;
3938 uint32_t fieldOffset
= field
.offset
;
3941 uint32_t areaOffset
;
3942 WasmStructObject::fieldOffsetToAreaAndOffset(fieldType
, fieldOffset
,
3943 &areaIsOutline
, &areaOffset
);
3945 // Make `base` point at the first byte of either the struct object as a
3946 // whole or of the out-of-line data area. And adjust `areaOffset`
3950 if (areaIsOutline
) {
3951 auto* loadOOLptr
= MWasmLoadField::New(
3952 alloc(), structObject
, WasmStructObject::offsetOfOutlineData(),
3953 MIRType::Pointer
, MWideningOp::None
,
3954 AliasSet::Load(AliasSet::WasmStructOutlineDataPointer
),
3955 mozilla::Some(getTrapSiteInfo()));
3959 curBlock_
->add(loadOOLptr
);
3961 needsTrapInfo
= false;
3963 base
= structObject
;
3964 needsTrapInfo
= true;
3965 areaOffset
+= WasmStructObject::offsetOfInlineData();
3967 // The transaction is to happen at `base + areaOffset`, so to speak.
3968 // After this point we must ignore `fieldOffset`.
3970 // The alias set denoting the field's location, although lacking a
3971 // Load-vs-Store indication at this point.
3972 AliasSet::Flag fieldAliasSet
= areaIsOutline
3973 ? AliasSet::WasmStructOutlineDataArea
3974 : AliasSet::WasmStructInlineDataArea
;
3976 return readGcValueAtBasePlusOffset(fieldType
, wideningOp
, structObject
,
3977 fieldAliasSet
, base
, areaOffset
,
3981 /********************************* WasmGC: address-arithmetic helpers ***/
3983 inline bool targetIs64Bit() const {
3991 // Generate MIR to unsigned widen `val` out to the target word size. If
3992 // `val` is already at the target word size, this is a no-op. The only
3993 // other allowed case is where `val` is Int32 and we're compiling for a
3994 // 64-bit target, in which case a widen is generated.
3995 [[nodiscard
]] MDefinition
* unsignedWidenToTargetWord(MDefinition
* val
) {
3996 if (targetIs64Bit()) {
3997 if (val
->type() == MIRType::Int32
) {
3998 auto* ext
= MExtendInt32ToInt64::New(alloc(), val
, /*isUnsigned=*/true);
4002 curBlock_
->add(ext
);
4005 MOZ_ASSERT(val
->type() == MIRType::Int64
);
4008 MOZ_ASSERT(val
->type() == MIRType::Int32
);
4012 // Compute `base + index * scale`, for both 32- and 64-bit targets. For the
4013 // convenience of callers, on a 64-bit target, `index` and `scale` can
4014 // (independently) be either Int32 or Int64; in the former case they will be
4015 // zero-extended before the multiplication, so that both the multiplication
4016 // and addition are done at the target word size.
4017 [[nodiscard
]] MDefinition
* computeBasePlusScaledIndex(MDefinition
* base
,
4019 MDefinition
* index
) {
4020 // On a 32-bit target, require:
4021 // base : Int32 (== TargetWordMIRType())
4022 // index, scale : Int32
4023 // Calculate base +32 (index *32 scale)
4025 // On a 64-bit target, require:
4026 // base : Int64 (== TargetWordMIRType())
4027 // index, scale: either Int32 or Int64 (any combination is OK)
4028 // Calculate base +64 (u-widen to 64(index)) *64 (u-widen to 64(scale))
4030 // Final result type is the same as that of `base`.
4032 MOZ_ASSERT(base
->type() == TargetWordMIRType());
4034 // Widen `index` if necessary, producing `indexW`.
4035 MDefinition
* indexW
= unsignedWidenToTargetWord(index
);
4039 // Widen `scale` if necessary, producing `scaleW`.
4040 MDefinition
* scaleW
= unsignedWidenToTargetWord(scale
);
4044 // Compute `scaledIndex = indexW * scaleW`.
4045 MIRType targetWordType
= TargetWordMIRType();
4046 bool targetIs64
= targetWordType
== MIRType::Int64
;
4048 MMul::NewWasm(alloc(), indexW
, scaleW
, targetWordType
,
4049 targetIs64
? MMul::Mode::Normal
: MMul::Mode::Integer
,
4050 /*mustPreserveNan=*/false);
4054 // Compute `result = base + scaledIndex`.
4055 curBlock_
->add(scaledIndex
);
4056 MAdd
* result
= MAdd::NewWasm(alloc(), base
, scaledIndex
, targetWordType
);
4060 curBlock_
->add(result
);
4064 /********************************************** WasmGC: array helpers ***/
4066 // Given `arrayObject`, the address of a WasmArrayObject, generate MIR to
4067 // return the contents of the WasmArrayObject::numElements_ field.
4068 // Adds trap site info for the null check.
4069 [[nodiscard
]] MDefinition
* getWasmArrayObjectNumElements(
4070 MDefinition
* arrayObject
) {
4071 MOZ_ASSERT(arrayObject
->type() == MIRType::WasmAnyRef
);
4073 auto* numElements
= MWasmLoadField::New(
4074 alloc(), arrayObject
, WasmArrayObject::offsetOfNumElements(),
4075 MIRType::Int32
, MWideningOp::None
,
4076 AliasSet::Load(AliasSet::WasmArrayNumElements
),
4077 mozilla::Some(getTrapSiteInfo()));
4081 curBlock_
->add(numElements
);
4086 // Given `arrayObject`, the address of a WasmArrayObject, generate MIR to
4087 // return the contents of the WasmArrayObject::data_ field.
4088 [[nodiscard
]] MDefinition
* getWasmArrayObjectData(MDefinition
* arrayObject
) {
4089 MOZ_ASSERT(arrayObject
->type() == MIRType::WasmAnyRef
);
4091 auto* data
= MWasmLoadField::New(
4092 alloc(), arrayObject
, WasmArrayObject::offsetOfData(),
4093 TargetWordMIRType(), MWideningOp::None
,
4094 AliasSet::Load(AliasSet::WasmArrayDataPointer
),
4095 mozilla::Some(getTrapSiteInfo()));
4099 curBlock_
->add(data
);
4104 // Given a JIT-time-known type index `typeIndex` and a run-time known number
4105 // of elements `numElements`, create MIR to call `Instance::arrayNew<true>`,
4106 // producing an array with the relevant type and size and initialized with
4107 // `typeIndex`s default value.
4108 [[nodiscard
]] MDefinition
* createDefaultInitializedArrayObject(
4109 uint32_t lineOrBytecode
, uint32_t typeIndex
, MDefinition
* numElements
) {
4110 // Get the type definition for the array as a whole.
4111 MDefinition
* typeDefData
= loadTypeDefInstanceData(typeIndex
);
4117 // arrayObject = Instance::arrayNew<true>(numElements, typeDefData)
4118 // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated
4119 // by this call will trap.
4120 MDefinition
* arrayObject
;
4121 if (!emitInstanceCall2(lineOrBytecode
, SASigArrayNew_true
, numElements
,
4122 typeDefData
, &arrayObject
)) {
4129 [[nodiscard
]] MDefinition
* createUninitializedArrayObject(
4130 uint32_t lineOrBytecode
, uint32_t typeIndex
, MDefinition
* numElements
) {
4131 // Get the type definition for the array as a whole.
4132 MDefinition
* typeDefData
= loadTypeDefInstanceData(typeIndex
);
4138 // arrayObject = Instance::arrayNew<false>(numElements, typeDefData)
4139 // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated
4140 // by this call will trap.
4141 MDefinition
* arrayObject
;
4142 if (!emitInstanceCall2(lineOrBytecode
, SASigArrayNew_false
, numElements
,
4143 typeDefData
, &arrayObject
)) {
4150 // This emits MIR to perform several actions common to array loads and
4151 // stores. Given `arrayObject`, that points to a WasmArrayObject, and an
4152 // index value `index`, it:
4154 // * Generates a trap if the array pointer is null
4155 // * Gets the size of the array
4156 // * Emits a bounds check of `index` against the array size
4157 // * Retrieves the OOL object pointer from the array
4158 // * Includes check for null via signal handler.
4160 // The returned value is for the OOL object pointer.
4161 [[nodiscard
]] MDefinition
* setupForArrayAccess(MDefinition
* arrayObject
,
4162 MDefinition
* index
) {
4163 MOZ_ASSERT(arrayObject
->type() == MIRType::WasmAnyRef
);
4164 MOZ_ASSERT(index
->type() == MIRType::Int32
);
4166 // Check for null is done in getWasmArrayObjectNumElements.
4168 // Get the size value for the array.
4169 MDefinition
* numElements
= getWasmArrayObjectNumElements(arrayObject
);
4174 // Create a bounds check.
4176 MWasmBoundsCheck::New(alloc(), index
, numElements
, bytecodeOffset(),
4177 MWasmBoundsCheck::Target::Unknown
);
4181 curBlock_
->add(boundsCheck
);
4183 // Get the address of the first byte of the (OOL) data area.
4184 return getWasmArrayObjectData(arrayObject
);
4187 [[nodiscard
]] bool fillArray(uint32_t lineOrBytecode
,
4188 const ArrayType
& arrayType
,
4189 MDefinition
* arrayObject
, MDefinition
* index
,
4190 MDefinition
* numElements
, MDefinition
* val
) {
4191 mozilla::DebugOnly
<MIRType
> valMIRType
= val
->type();
4192 FieldType valFieldType
= arrayType
.elementType_
;
4193 MOZ_ASSERT(valFieldType
.widenToValType().toMIRType() == valMIRType
);
4195 uint32_t elemSize
= valFieldType
.size();
4196 MOZ_ASSERT(elemSize
>= 1 && elemSize
<= 16);
4198 // Make `arrayBase` point at the first byte of the (OOL) data area.
4199 MDefinition
* arrayBase
= getWasmArrayObjectData(arrayObject
);
4205 // arrayBase : TargetWord
4207 // numElements : Int32
4208 // val : <any FieldType>
4209 // $elemSize = arrayType.elementType_.size(); 1, 2, 4, 8 or 16
4212 // <in current block>
4213 // fillBase : TargetWord = arrayBase + numElements * index
4214 // limit : TargetWord = fillBase + numElements * elemSize
4215 // if (limit == fillBase) goto after; // skip loop if trip count == 0
4217 // ptrPhi = phi(fillBase, ptrNext)
4219 // ptrNext = ptrPhi + $elemSize
4220 // if (ptrNext <u limit) goto loop;
4223 // We construct the loop "manually" rather than using
4224 // FunctionCompiler::{startLoop,closeLoop} as the latter have awareness of
4225 // the wasm view of loops, whereas the loop we're building here is not a
4227 // ==== Create the "loop" and "after" blocks ====
4228 MBasicBlock
* loopBlock
;
4229 if (!newBlock(curBlock_
, &loopBlock
, MBasicBlock::LOOP_HEADER
)) {
4232 MBasicBlock
* afterBlock
;
4233 if (!newBlock(loopBlock
, &afterBlock
)) {
4237 // ==== Fill in the remainder of the block preceding the loop ====
4238 MDefinition
* elemSizeDef
= constantTargetWord(intptr_t(elemSize
));
4243 MDefinition
* fillBase
=
4244 computeBasePlusScaledIndex(arrayBase
, elemSizeDef
, index
);
4248 MDefinition
* limit
=
4249 computeBasePlusScaledIndex(fillBase
, elemSizeDef
, numElements
);
4254 // Use JSOp::StrictEq, not ::Eq, so that the comparison (and eventually
4255 // the entire initialisation loop) will be folded out in the case where
4256 // the number of elements is zero. See MCompare::tryFoldEqualOperands.
4257 MDefinition
* limitEqualsBase
= compare(
4258 limit
, fillBase
, JSOp::StrictEq
,
4259 targetIs64Bit() ? MCompare::Compare_UInt64
: MCompare::Compare_UInt32
);
4260 if (!limitEqualsBase
) {
4263 MTest
* skipIfLimitEqualsBase
=
4264 MTest::New(alloc(), limitEqualsBase
, afterBlock
, loopBlock
);
4265 if (!skipIfLimitEqualsBase
) {
4268 curBlock_
->end(skipIfLimitEqualsBase
);
4269 if (!afterBlock
->addPredecessor(alloc(), curBlock_
)) {
4273 // ==== Fill in the loop block as best we can ====
4274 curBlock_
= loopBlock
;
4275 MPhi
* ptrPhi
= MPhi::New(alloc(), TargetWordMIRType());
4279 if (!ptrPhi
->reserveLength(2)) {
4282 ptrPhi
->addInput(fillBase
);
4283 curBlock_
->addPhi(ptrPhi
);
4284 curBlock_
->setLoopDepth(loopDepth_
+ 1);
4286 // Because we have the exact address to hand, use
4287 // `writeGcValueAtBasePlusOffset` rather than
4288 // `writeGcValueAtBasePlusScaledIndex` to do the store.
4289 if (!writeGcValueAtBasePlusOffset(
4290 lineOrBytecode
, valFieldType
, arrayObject
,
4291 AliasSet::WasmArrayDataArea
, val
, ptrPhi
, /*offset=*/0,
4292 /*needsTrapInfo=*/false, WasmPreBarrierKind::None
)) {
4297 MAdd::NewWasm(alloc(), ptrPhi
, elemSizeDef
, TargetWordMIRType());
4301 curBlock_
->add(ptrNext
);
4302 ptrPhi
->addInput(ptrNext
);
4304 MDefinition
* ptrNextLtuLimit
= compare(
4305 ptrNext
, limit
, JSOp::Lt
,
4306 targetIs64Bit() ? MCompare::Compare_UInt64
: MCompare::Compare_UInt32
);
4307 if (!ptrNextLtuLimit
) {
4310 auto* continueIfPtrNextLtuLimit
=
4311 MTest::New(alloc(), ptrNextLtuLimit
, loopBlock
, afterBlock
);
4312 if (!continueIfPtrNextLtuLimit
) {
4315 curBlock_
->end(continueIfPtrNextLtuLimit
);
4316 if (!loopBlock
->addPredecessor(alloc(), loopBlock
)) {
4319 // ==== Loop block completed ====
4321 curBlock_
= afterBlock
;
4325 // This routine generates all MIR required for `array.new`. The returned
4326 // value is for the newly created array.
4327 [[nodiscard
]] MDefinition
* createArrayNewCallAndLoop(uint32_t lineOrBytecode
,
4329 MDefinition
* numElements
,
4330 MDefinition
* fillValue
) {
4331 const ArrayType
& arrayType
= (*moduleEnv_
.types
)[typeIndex
].arrayType();
4333 // Create the array object, uninitialized.
4334 MDefinition
* arrayObject
=
4335 createUninitializedArrayObject(lineOrBytecode
, typeIndex
, numElements
);
4340 // Optimisation opportunity: if the fill value is zero, maybe we should
4341 // likewise skip over the initialisation loop entirely (and, if the zero
4342 // value is visible at JIT time, the loop will be removed). For the
4343 // reftyped case, that would be a big win since each iteration requires a
4344 // call to the post-write barrier routine.
4346 if (!fillArray(lineOrBytecode
, arrayType
, arrayObject
, constantI32(0),
4347 numElements
, fillValue
)) {
4354 [[nodiscard
]] bool createArrayFill(uint32_t lineOrBytecode
,
4356 MDefinition
* arrayObject
,
4357 MDefinition
* index
, MDefinition
* val
,
4358 MDefinition
* numElements
) {
4359 MOZ_ASSERT(arrayObject
->type() == MIRType::WasmAnyRef
);
4360 MOZ_ASSERT(index
->type() == MIRType::Int32
);
4361 MOZ_ASSERT(numElements
->type() == MIRType::Int32
);
4363 const ArrayType
& arrayType
= (*moduleEnv_
.types
)[typeIndex
].arrayType();
4365 // Check for null is done in getWasmArrayObjectNumElements.
4367 // Get the array's actual size.
4368 MDefinition
* actualNumElements
= getWasmArrayObjectNumElements(arrayObject
);
4369 if (!actualNumElements
) {
4373 // Create a bounds check.
4374 auto* boundsCheck
= MWasmBoundsCheckRange32::New(
4375 alloc(), index
, numElements
, actualNumElements
, bytecodeOffset());
4379 curBlock_
->add(boundsCheck
);
4381 return fillArray(lineOrBytecode
, arrayType
, arrayObject
, index
, numElements
,
4385 /*********************************************** WasmGC: other helpers ***/
4387 // Generate MIR that causes a trap of kind `trapKind` if `arg` is zero.
4388 // Currently `arg` may only be a MIRType::Int32, but that requirement could
4389 // be relaxed if needed in future.
4390 [[nodiscard
]] bool trapIfZero(wasm::Trap trapKind
, MDefinition
* arg
) {
4391 MOZ_ASSERT(arg
->type() == MIRType::Int32
);
4393 MBasicBlock
* trapBlock
= nullptr;
4394 if (!newBlock(curBlock_
, &trapBlock
)) {
4398 auto* trap
= MWasmTrap::New(alloc(), trapKind
, bytecodeOffset());
4402 trapBlock
->end(trap
);
4404 MBasicBlock
* joinBlock
= nullptr;
4405 if (!newBlock(curBlock_
, &joinBlock
)) {
4409 auto* test
= MTest::New(alloc(), arg
, joinBlock
, trapBlock
);
4413 curBlock_
->end(test
);
4414 curBlock_
= joinBlock
;
4418 [[nodiscard
]] MDefinition
* isRefSubtypeOf(MDefinition
* ref
,
4421 MInstruction
* isSubTypeOf
= nullptr;
4422 if (destType
.isTypeRef()) {
4423 uint32_t typeIndex
= moduleEnv_
.types
->indexOf(*destType
.typeDef());
4424 MDefinition
* superSTV
= loadSuperTypeVector(typeIndex
);
4425 isSubTypeOf
= MWasmRefIsSubtypeOfConcrete::New(alloc(), ref
, superSTV
,
4426 sourceType
, destType
);
4429 MWasmRefIsSubtypeOfAbstract::New(alloc(), ref
, sourceType
, destType
);
4431 MOZ_ASSERT(isSubTypeOf
);
4433 curBlock_
->add(isSubTypeOf
);
4437 // Generate MIR that attempts to downcast `ref` to `castToTypeDef`. If the
4438 // downcast fails, we trap. If it succeeds, then `ref` can be assumed to
4439 // have a type that is a subtype of (or the same as) `castToTypeDef` after
4441 [[nodiscard
]] bool refCast(MDefinition
* ref
, RefType sourceType
,
4443 MDefinition
* success
= isRefSubtypeOf(ref
, sourceType
, destType
);
4448 // Trap if `success` is zero. If it's nonzero, we have established that
4449 // `ref <: castToTypeDef`.
4450 return trapIfZero(wasm::Trap::BadCast
, success
);
4453 // Generate MIR that computes a boolean value indicating whether or not it
4454 // is possible to downcast `ref` to `destType`.
4455 [[nodiscard
]] MDefinition
* refTest(MDefinition
* ref
, RefType sourceType
,
4457 return isRefSubtypeOf(ref
, sourceType
, destType
);
4460 // Generates MIR for br_on_cast and br_on_cast_fail.
4461 [[nodiscard
]] bool brOnCastCommon(bool onSuccess
, uint32_t labelRelativeDepth
,
4462 RefType sourceType
, RefType destType
,
4463 const ResultType
& labelType
,
4464 const DefVector
& values
) {
4469 MBasicBlock
* fallthroughBlock
= nullptr;
4470 if (!newBlock(curBlock_
, &fallthroughBlock
)) {
4474 // `values` are the values in the top block-value on the stack. Since the
4475 // argument to `br_on_cast{_fail}` is at the top of the stack, it is the
4476 // last element in `values`.
4478 // For both br_on_cast and br_on_cast_fail, the OpIter validation routines
4479 // ensure that `values` is non-empty (by rejecting the case
4480 // `labelType->length() < 1`) and that the last value in `values` is
4482 MOZ_RELEASE_ASSERT(values
.length() > 0);
4483 MDefinition
* ref
= values
.back();
4484 MOZ_ASSERT(ref
->type() == MIRType::WasmAnyRef
);
4486 MDefinition
* success
= isRefSubtypeOf(ref
, sourceType
, destType
);
4493 test
= MTest::New(alloc(), success
, nullptr, fallthroughBlock
);
4494 if (!test
|| !addControlFlowPatch(test
, labelRelativeDepth
,
4495 MTest::TrueBranchIndex
)) {
4499 test
= MTest::New(alloc(), success
, fallthroughBlock
, nullptr);
4500 if (!test
|| !addControlFlowPatch(test
, labelRelativeDepth
,
4501 MTest::FalseBranchIndex
)) {
4506 if (!pushDefs(values
)) {
4510 curBlock_
->end(test
);
4511 curBlock_
= fallthroughBlock
;
4515 [[nodiscard
]] bool brOnNonStruct(const DefVector
& values
) {
4520 MBasicBlock
* fallthroughBlock
= nullptr;
4521 if (!newBlock(curBlock_
, &fallthroughBlock
)) {
4525 MOZ_ASSERT(values
.length() > 0);
4526 MOZ_ASSERT(values
.back()->type() == MIRType::WasmAnyRef
);
4528 MGoto
* jump
= MGoto::New(alloc(), fallthroughBlock
);
4532 if (!pushDefs(values
)) {
4536 curBlock_
->end(jump
);
4537 curBlock_
= fallthroughBlock
;
4541 /************************************************************ DECODING ***/
4543 // AsmJS adds a line number to `callSiteLineNums` for certain operations that
4544 // are represented by a JS call, such as math builtins. We use these line
4545 // numbers when calling builtins. This method will read from
4546 // `callSiteLineNums` when we are using AsmJS, or else return the current
4549 // This method MUST be called from opcodes that AsmJS will emit a call site
4550 // line number for, or else the arrays will get out of sync. Other opcodes
4551 // must use `readBytecodeOffset` below.
4552 uint32_t readCallSiteLineOrBytecode() {
4553 if (!func_
.callSiteLineNums
.empty()) {
4554 return func_
.callSiteLineNums
[lastReadCallSite_
++];
4556 return iter_
.lastOpcodeOffset();
4559 // Return the current bytecode offset.
4560 uint32_t readBytecodeOffset() { return iter_
.lastOpcodeOffset(); }
4562 TrapSiteInfo
getTrapSiteInfo() {
4563 return TrapSiteInfo(wasm::BytecodeOffset(readBytecodeOffset()));
4567 bool done() const { return iter_
.done(); }
4570 /*************************************************************************/
4572 [[nodiscard
]] bool newBlock(MBasicBlock
* pred
, MBasicBlock
** block
,
4573 MBasicBlock::Kind kind
= MBasicBlock::NORMAL
) {
4574 *block
= MBasicBlock::New(mirGraph(), info(), pred
, kind
);
4578 mirGraph().addBlock(*block
);
4579 (*block
)->setLoopDepth(loopDepth_
);
4583 [[nodiscard
]] bool goToNewBlock(MBasicBlock
* pred
, MBasicBlock
** block
) {
4584 if (!newBlock(pred
, block
)) {
4587 pred
->end(MGoto::New(alloc(), *block
));
4591 [[nodiscard
]] bool goToExistingBlock(MBasicBlock
* prev
, MBasicBlock
* next
) {
4594 prev
->end(MGoto::New(alloc(), next
));
4595 return next
->addPredecessor(alloc(), prev
);
4598 [[nodiscard
]] bool bindBranches(uint32_t absolute
, DefVector
* defs
) {
4599 if (absolute
>= blockPatches_
.length() || blockPatches_
[absolute
].empty()) {
4600 return inDeadCode() || popPushedDefs(defs
);
4603 ControlFlowPatchVector
& patches
= blockPatches_
[absolute
];
4604 MControlInstruction
* ins
= patches
[0].ins
;
4605 MBasicBlock
* pred
= ins
->block();
4607 MBasicBlock
* join
= nullptr;
4608 if (!newBlock(pred
, &join
)) {
4613 ins
->replaceSuccessor(patches
[0].index
, join
);
4615 for (size_t i
= 1; i
< patches
.length(); i
++) {
4616 ins
= patches
[i
].ins
;
4618 pred
= ins
->block();
4619 if (!pred
->isMarked()) {
4620 if (!join
->addPredecessor(alloc(), pred
)) {
4626 ins
->replaceSuccessor(patches
[i
].index
, join
);
4629 MOZ_ASSERT_IF(curBlock_
, !curBlock_
->isMarked());
4630 for (uint32_t i
= 0; i
< join
->numPredecessors(); i
++) {
4631 join
->getPredecessor(i
)->unmark();
4634 if (curBlock_
&& !goToExistingBlock(curBlock_
, join
)) {
4640 if (!popPushedDefs(defs
)) {
4650 MDefinition
* FunctionCompiler::unary
<MToFloat32
>(MDefinition
* op
) {
4654 auto* ins
= MToFloat32::New(alloc(), op
, mustPreserveNaN(op
->type()));
4655 curBlock_
->add(ins
);
4660 MDefinition
* FunctionCompiler::unary
<MWasmBuiltinTruncateToInt32
>(
4665 auto* ins
= MWasmBuiltinTruncateToInt32::New(alloc(), op
, instancePointer_
,
4667 curBlock_
->add(ins
);
4672 MDefinition
* FunctionCompiler::unary
<MNot
>(MDefinition
* op
) {
4676 auto* ins
= MNot::NewInt32(alloc(), op
);
4677 curBlock_
->add(ins
);
4682 MDefinition
* FunctionCompiler::unary
<MAbs
>(MDefinition
* op
, MIRType type
) {
4686 auto* ins
= MAbs::NewWasm(alloc(), op
, type
);
4687 curBlock_
->add(ins
);
4691 } // end anonymous namespace
4693 static bool EmitI32Const(FunctionCompiler
& f
) {
4695 if (!f
.iter().readI32Const(&i32
)) {
4699 f
.iter().setResult(f
.constantI32(i32
));
4703 static bool EmitI64Const(FunctionCompiler
& f
) {
4705 if (!f
.iter().readI64Const(&i64
)) {
4709 f
.iter().setResult(f
.constantI64(i64
));
4713 static bool EmitF32Const(FunctionCompiler
& f
) {
4715 if (!f
.iter().readF32Const(&f32
)) {
4719 f
.iter().setResult(f
.constantF32(f32
));
4723 static bool EmitF64Const(FunctionCompiler
& f
) {
4725 if (!f
.iter().readF64Const(&f64
)) {
4729 f
.iter().setResult(f
.constantF64(f64
));
4733 static bool EmitBlock(FunctionCompiler
& f
) {
4735 return f
.iter().readBlock(¶ms
) && f
.startBlock();
4738 static bool EmitLoop(FunctionCompiler
& f
) {
4740 if (!f
.iter().readLoop(¶ms
)) {
4744 MBasicBlock
* loopHeader
;
4745 if (!f
.startLoop(&loopHeader
, params
.length())) {
4749 f
.addInterruptCheck();
4751 f
.iter().controlItem().setBlock(loopHeader
);
4755 static bool EmitIf(FunctionCompiler
& f
) {
4757 MDefinition
* condition
= nullptr;
4758 if (!f
.iter().readIf(¶ms
, &condition
)) {
4762 MBasicBlock
* elseBlock
;
4763 if (!f
.branchAndStartThen(condition
, &elseBlock
)) {
4767 f
.iter().controlItem().setBlock(elseBlock
);
4771 static bool EmitElse(FunctionCompiler
& f
) {
4772 ResultType paramType
;
4773 ResultType resultType
;
4774 DefVector thenValues
;
4775 if (!f
.iter().readElse(¶mType
, &resultType
, &thenValues
)) {
4779 if (!f
.pushDefs(thenValues
)) {
4783 Control
& control
= f
.iter().controlItem();
4784 return f
.switchToElse(control
.block
, &control
.block
);
4787 static bool EmitEnd(FunctionCompiler
& f
) {
4790 DefVector preJoinDefs
;
4791 DefVector resultsForEmptyElse
;
4792 if (!f
.iter().readEnd(&kind
, &type
, &preJoinDefs
, &resultsForEmptyElse
)) {
4796 Control
& control
= f
.iter().controlItem();
4797 MBasicBlock
* block
= control
.block
;
4799 if (!f
.pushDefs(preJoinDefs
)) {
4803 // Every label case is responsible to pop the control item at the appropriate
4804 // time for the label case
4805 DefVector postJoinDefs
;
4807 case LabelKind::Body
:
4808 if (!f
.emitBodyDelegateThrowPad(control
)) {
4811 if (!f
.finishBlock(&postJoinDefs
)) {
4814 if (!f
.returnValues(postJoinDefs
)) {
4818 MOZ_ASSERT(f
.iter().controlStackEmpty());
4819 return f
.iter().endFunction(f
.iter().end());
4820 case LabelKind::Block
:
4821 if (!f
.finishBlock(&postJoinDefs
)) {
4826 case LabelKind::Loop
:
4827 if (!f
.closeLoop(block
, &postJoinDefs
)) {
4832 case LabelKind::Then
: {
4833 // If we didn't see an Else, create a trivial else block so that we create
4834 // a diamond anyway, to preserve Ion invariants.
4835 if (!f
.switchToElse(block
, &block
)) {
4839 if (!f
.pushDefs(resultsForEmptyElse
)) {
4843 if (!f
.joinIfElse(block
, &postJoinDefs
)) {
4849 case LabelKind::Else
:
4850 if (!f
.joinIfElse(block
, &postJoinDefs
)) {
4855 case LabelKind::Try
:
4856 case LabelKind::Catch
:
4857 case LabelKind::CatchAll
:
4858 if (!f
.finishTryCatch(kind
, control
, &postJoinDefs
)) {
4865 MOZ_ASSERT_IF(!f
.inDeadCode(), postJoinDefs
.length() == type
.length());
4866 f
.iter().setResults(postJoinDefs
.length(), postJoinDefs
);
4871 static bool EmitBr(FunctionCompiler
& f
) {
4872 uint32_t relativeDepth
;
4875 if (!f
.iter().readBr(&relativeDepth
, &type
, &values
)) {
4879 return f
.br(relativeDepth
, values
);
4882 static bool EmitBrIf(FunctionCompiler
& f
) {
4883 uint32_t relativeDepth
;
4886 MDefinition
* condition
;
4887 if (!f
.iter().readBrIf(&relativeDepth
, &type
, &values
, &condition
)) {
4891 return f
.brIf(relativeDepth
, values
, condition
);
4894 static bool EmitBrTable(FunctionCompiler
& f
) {
4895 Uint32Vector depths
;
4896 uint32_t defaultDepth
;
4897 ResultType branchValueType
;
4898 DefVector branchValues
;
4900 if (!f
.iter().readBrTable(&depths
, &defaultDepth
, &branchValueType
,
4901 &branchValues
, &index
)) {
4905 // If all the targets are the same, or there are no targets, we can just
4906 // use a goto. This is not just an optimization: MaybeFoldConditionBlock
4907 // assumes that tables have more than one successor.
4908 bool allSameDepth
= true;
4909 for (uint32_t depth
: depths
) {
4910 if (depth
!= defaultDepth
) {
4911 allSameDepth
= false;
4917 return f
.br(defaultDepth
, branchValues
);
4920 return f
.brTable(index
, defaultDepth
, depths
, branchValues
);
4923 static bool EmitReturn(FunctionCompiler
& f
) {
4925 if (!f
.iter().readReturn(&values
)) {
4929 return f
.returnValues(values
);
4932 static bool EmitUnreachable(FunctionCompiler
& f
) {
4933 if (!f
.iter().readUnreachable()) {
4937 f
.unreachableTrap();
4941 static bool EmitTry(FunctionCompiler
& f
) {
4943 if (!f
.iter().readTry(¶ms
)) {
4947 MBasicBlock
* curBlock
= nullptr;
4948 if (!f
.startTry(&curBlock
)) {
4952 f
.iter().controlItem().setBlock(curBlock
);
4956 static bool EmitCatch(FunctionCompiler
& f
) {
4959 ResultType paramType
, resultType
;
4960 DefVector tryValues
;
4961 if (!f
.iter().readCatch(&kind
, &tagIndex
, ¶mType
, &resultType
,
4966 // Pushing the results of the previous block, to properly join control flow
4967 // after the try and after each handler, as well as potential control flow
4968 // patches from other instrunctions. This is similar to what is done for
4969 // if-then-else control flow and for most other control control flow joins.
4970 if (!f
.pushDefs(tryValues
)) {
4974 return f
.switchToCatch(f
.iter().controlItem(), kind
, tagIndex
);
4977 static bool EmitCatchAll(FunctionCompiler
& f
) {
4979 ResultType paramType
, resultType
;
4980 DefVector tryValues
;
4981 if (!f
.iter().readCatchAll(&kind
, ¶mType
, &resultType
, &tryValues
)) {
4985 // Pushing the results of the previous block, to properly join control flow
4986 // after the try and after each handler, as well as potential control flow
4987 // patches from other instrunctions.
4988 if (!f
.pushDefs(tryValues
)) {
4992 return f
.switchToCatch(f
.iter().controlItem(), kind
, CatchAllIndex
);
4995 static bool EmitDelegate(FunctionCompiler
& f
) {
4996 uint32_t relativeDepth
;
4997 ResultType resultType
;
4998 DefVector tryValues
;
4999 if (!f
.iter().readDelegate(&relativeDepth
, &resultType
, &tryValues
)) {
5003 Control
& control
= f
.iter().controlItem();
5004 MBasicBlock
* block
= control
.block
;
5006 // Unless the entire try-delegate is dead code, delegate any pad-patches from
5007 // this try to the next try-block above relativeDepth.
5009 ControlInstructionVector
& delegatePadPatches
= control
.tryPadPatches
;
5010 if (!f
.delegatePadPatches(delegatePadPatches
, relativeDepth
)) {
5014 f
.iter().popDelegate();
5016 // Push the results of the previous block, and join control flow with
5017 // potential control flow patches from other instrunctions in the try code.
5018 // This is similar to what is done for EmitEnd.
5019 if (!f
.pushDefs(tryValues
)) {
5022 DefVector postJoinDefs
;
5023 if (!f
.finishBlock(&postJoinDefs
)) {
5026 MOZ_ASSERT_IF(!f
.inDeadCode(), postJoinDefs
.length() == resultType
.length());
5027 f
.iter().setResults(postJoinDefs
.length(), postJoinDefs
);
5032 static bool EmitThrow(FunctionCompiler
& f
) {
5034 DefVector argValues
;
5035 if (!f
.iter().readThrow(&tagIndex
, &argValues
)) {
5039 return f
.emitThrow(tagIndex
, argValues
);
5042 static bool EmitRethrow(FunctionCompiler
& f
) {
5043 uint32_t relativeDepth
;
5044 if (!f
.iter().readRethrow(&relativeDepth
)) {
5048 return f
.emitRethrow(relativeDepth
);
5051 static bool EmitCallArgs(FunctionCompiler
& f
, const FuncType
& funcType
,
5052 const DefVector
& args
, CallCompileState
* call
) {
5053 for (size_t i
= 0, n
= funcType
.args().length(); i
< n
; ++i
) {
5054 if (!f
.mirGen().ensureBallast()) {
5057 if (!f
.passArg(args
[i
], funcType
.args()[i
], call
)) {
5062 ResultType resultType
= ResultType::Vector(funcType
.results());
5063 if (!f
.passStackResultAreaCallArg(resultType
, call
)) {
5067 return f
.finishCall(call
);
5070 static bool EmitCall(FunctionCompiler
& f
, bool asmJSFuncDef
) {
5071 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
5076 if (!f
.iter().readOldCallDirect(f
.moduleEnv().numFuncImports
, &funcIndex
,
5081 if (!f
.iter().readCall(&funcIndex
, &args
)) {
5086 if (f
.inDeadCode()) {
5090 const FuncType
& funcType
= *f
.moduleEnv().funcs
[funcIndex
].type
;
5092 CallCompileState call
;
5093 if (!EmitCallArgs(f
, funcType
, args
, &call
)) {
5098 if (f
.moduleEnv().funcIsImport(funcIndex
)) {
5099 uint32_t instanceDataOffset
=
5100 f
.moduleEnv().offsetOfFuncImportInstanceData(funcIndex
);
5101 if (!f
.callImport(instanceDataOffset
, lineOrBytecode
, call
, funcType
,
5106 if (!f
.callDirect(funcType
, funcIndex
, lineOrBytecode
, call
, &results
)) {
5111 f
.iter().setResults(results
.length(), results
);
5115 static bool EmitCallIndirect(FunctionCompiler
& f
, bool oldStyle
) {
5116 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
5118 uint32_t funcTypeIndex
;
5119 uint32_t tableIndex
;
5120 MDefinition
* callee
;
5124 if (!f
.iter().readOldCallIndirect(&funcTypeIndex
, &callee
, &args
)) {
5128 if (!f
.iter().readCallIndirect(&funcTypeIndex
, &tableIndex
, &callee
,
5134 if (f
.inDeadCode()) {
5138 const FuncType
& funcType
= (*f
.moduleEnv().types
)[funcTypeIndex
].funcType();
5140 CallCompileState call
;
5141 if (!EmitCallArgs(f
, funcType
, args
, &call
)) {
5146 if (!f
.callIndirect(funcTypeIndex
, tableIndex
, callee
, lineOrBytecode
, call
,
5151 f
.iter().setResults(results
.length(), results
);
5155 #ifdef ENABLE_WASM_TAIL_CALLS
5156 static bool EmitReturnCall(FunctionCompiler
& f
) {
5157 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
5161 if (!f
.iter().readReturnCall(&funcIndex
, &args
)) {
5165 if (f
.inDeadCode()) {
5169 const FuncType
& funcType
= *f
.moduleEnv().funcs
[funcIndex
].type
;
5171 CallCompileState call
;
5172 f
.markReturnCall(&call
);
5173 if (!EmitCallArgs(f
, funcType
, args
, &call
)) {
5178 if (f
.moduleEnv().funcIsImport(funcIndex
)) {
5179 uint32_t globalDataOffset
=
5180 f
.moduleEnv().offsetOfFuncImportInstanceData(funcIndex
);
5181 if (!f
.returnCallImport(globalDataOffset
, lineOrBytecode
, call
, funcType
,
5186 if (!f
.returnCallDirect(funcType
, funcIndex
, lineOrBytecode
, call
,
5194 static bool EmitReturnCallIndirect(FunctionCompiler
& f
) {
5195 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
5197 uint32_t funcTypeIndex
;
5198 uint32_t tableIndex
;
5199 MDefinition
* callee
;
5201 if (!f
.iter().readReturnCallIndirect(&funcTypeIndex
, &tableIndex
, &callee
,
5206 if (f
.inDeadCode()) {
5210 const FuncType
& funcType
= (*f
.moduleEnv().types
)[funcTypeIndex
].funcType();
5212 CallCompileState call
;
5213 f
.markReturnCall(&call
);
5214 if (!EmitCallArgs(f
, funcType
, args
, &call
)) {
5219 return f
.returnCallIndirect(funcTypeIndex
, tableIndex
, callee
, lineOrBytecode
,
5224 #if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_FUNCTION_REFERENCES)
5225 static bool EmitReturnCallRef(FunctionCompiler
& f
) {
5226 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
5228 const FuncType
* funcType
;
5229 MDefinition
* callee
;
5232 if (!f
.iter().readReturnCallRef(&funcType
, &callee
, &args
)) {
5236 if (f
.inDeadCode()) {
5240 CallCompileState call
;
5241 f
.markReturnCall(&call
);
5242 if (!EmitCallArgs(f
, *funcType
, args
, &call
)) {
5247 return f
.returnCallRef(*funcType
, callee
, lineOrBytecode
, call
, &results
);
5251 static bool EmitGetLocal(FunctionCompiler
& f
) {
5253 if (!f
.iter().readGetLocal(f
.locals(), &id
)) {
5257 f
.iter().setResult(f
.getLocalDef(id
));
5261 static bool EmitSetLocal(FunctionCompiler
& f
) {
5264 if (!f
.iter().readSetLocal(f
.locals(), &id
, &value
)) {
5268 f
.assign(id
, value
);
5272 static bool EmitTeeLocal(FunctionCompiler
& f
) {
5275 if (!f
.iter().readTeeLocal(f
.locals(), &id
, &value
)) {
5279 f
.assign(id
, value
);
5283 static bool EmitGetGlobal(FunctionCompiler
& f
) {
5285 if (!f
.iter().readGetGlobal(&id
)) {
5289 const GlobalDesc
& global
= f
.moduleEnv().globals
[id
];
5290 if (!global
.isConstant()) {
5291 f
.iter().setResult(f
.loadGlobalVar(global
.offset(), !global
.isMutable(),
5292 global
.isIndirect(),
5293 global
.type().toMIRType()));
5297 LitVal value
= global
.constantValue();
5299 MDefinition
* result
;
5300 switch (value
.type().kind()) {
5302 result
= f
.constantI32(int32_t(value
.i32()));
5305 result
= f
.constantI64(int64_t(value
.i64()));
5308 result
= f
.constantF32(value
.f32());
5311 result
= f
.constantF64(value
.f64());
5314 #ifdef ENABLE_WASM_SIMD
5315 result
= f
.constantV128(value
.v128());
5318 return f
.iter().fail("Ion has no SIMD support yet");
5321 MOZ_ASSERT(value
.ref().isNull());
5322 result
= f
.constantNullRef();
5325 MOZ_CRASH("unexpected type in EmitGetGlobal");
5328 f
.iter().setResult(result
);
5332 static bool EmitSetGlobal(FunctionCompiler
& f
) {
5333 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
5337 if (!f
.iter().readSetGlobal(&id
, &value
)) {
5341 const GlobalDesc
& global
= f
.moduleEnv().globals
[id
];
5342 MOZ_ASSERT(global
.isMutable());
5343 return f
.storeGlobalVar(bytecodeOffset
, global
.offset(), global
.isIndirect(),
5347 static bool EmitTeeGlobal(FunctionCompiler
& f
) {
5348 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
5352 if (!f
.iter().readTeeGlobal(&id
, &value
)) {
5356 const GlobalDesc
& global
= f
.moduleEnv().globals
[id
];
5357 MOZ_ASSERT(global
.isMutable());
5359 return f
.storeGlobalVar(bytecodeOffset
, global
.offset(), global
.isIndirect(),
5363 template <typename MIRClass
>
5364 static bool EmitUnary(FunctionCompiler
& f
, ValType operandType
) {
5366 if (!f
.iter().readUnary(operandType
, &input
)) {
5370 f
.iter().setResult(f
.unary
<MIRClass
>(input
));
5374 template <typename MIRClass
>
5375 static bool EmitConversion(FunctionCompiler
& f
, ValType operandType
,
5376 ValType resultType
) {
5378 if (!f
.iter().readConversion(operandType
, resultType
, &input
)) {
5382 f
.iter().setResult(f
.unary
<MIRClass
>(input
));
5386 template <typename MIRClass
>
5387 static bool EmitUnaryWithType(FunctionCompiler
& f
, ValType operandType
,
5390 if (!f
.iter().readUnary(operandType
, &input
)) {
5394 f
.iter().setResult(f
.unary
<MIRClass
>(input
, mirType
));
5398 template <typename MIRClass
>
5399 static bool EmitConversionWithType(FunctionCompiler
& f
, ValType operandType
,
5400 ValType resultType
, MIRType mirType
) {
5402 if (!f
.iter().readConversion(operandType
, resultType
, &input
)) {
5406 f
.iter().setResult(f
.unary
<MIRClass
>(input
, mirType
));
5410 static bool EmitTruncate(FunctionCompiler
& f
, ValType operandType
,
5411 ValType resultType
, bool isUnsigned
,
5412 bool isSaturating
) {
5413 MDefinition
* input
= nullptr;
5414 if (!f
.iter().readConversion(operandType
, resultType
, &input
)) {
5418 TruncFlags flags
= 0;
5420 flags
|= TRUNC_UNSIGNED
;
5423 flags
|= TRUNC_SATURATING
;
5425 if (resultType
== ValType::I32
) {
5426 if (f
.moduleEnv().isAsmJS()) {
5427 if (input
&& (input
->type() == MIRType::Double
||
5428 input
->type() == MIRType::Float32
)) {
5429 f
.iter().setResult(f
.unary
<MWasmBuiltinTruncateToInt32
>(input
));
5431 f
.iter().setResult(f
.unary
<MTruncateToInt32
>(input
));
5434 f
.iter().setResult(f
.truncate
<MWasmTruncateToInt32
>(input
, flags
));
5437 MOZ_ASSERT(resultType
== ValType::I64
);
5438 MOZ_ASSERT(!f
.moduleEnv().isAsmJS());
5439 #if defined(JS_CODEGEN_ARM)
5440 f
.iter().setResult(f
.truncateWithInstance(input
, flags
));
5442 f
.iter().setResult(f
.truncate
<MWasmTruncateToInt64
>(input
, flags
));
5448 static bool EmitSignExtend(FunctionCompiler
& f
, uint32_t srcSize
,
5449 uint32_t targetSize
) {
5451 ValType type
= targetSize
== 4 ? ValType::I32
: ValType::I64
;
5452 if (!f
.iter().readConversion(type
, type
, &input
)) {
5456 f
.iter().setResult(f
.signExtend(input
, srcSize
, targetSize
));
5460 static bool EmitExtendI32(FunctionCompiler
& f
, bool isUnsigned
) {
5462 if (!f
.iter().readConversion(ValType::I32
, ValType::I64
, &input
)) {
5466 f
.iter().setResult(f
.extendI32(input
, isUnsigned
));
5470 static bool EmitConvertI64ToFloatingPoint(FunctionCompiler
& f
,
5471 ValType resultType
, MIRType mirType
,
5474 if (!f
.iter().readConversion(ValType::I64
, resultType
, &input
)) {
5478 f
.iter().setResult(f
.convertI64ToFloatingPoint(input
, mirType
, isUnsigned
));
5482 static bool EmitReinterpret(FunctionCompiler
& f
, ValType resultType
,
5483 ValType operandType
, MIRType mirType
) {
5485 if (!f
.iter().readConversion(operandType
, resultType
, &input
)) {
5489 f
.iter().setResult(f
.unary
<MWasmReinterpret
>(input
, mirType
));
5493 static bool EmitAdd(FunctionCompiler
& f
, ValType type
, MIRType mirType
) {
5496 if (!f
.iter().readBinary(type
, &lhs
, &rhs
)) {
5500 f
.iter().setResult(f
.add(lhs
, rhs
, mirType
));
5504 static bool EmitSub(FunctionCompiler
& f
, ValType type
, MIRType mirType
) {
5507 if (!f
.iter().readBinary(type
, &lhs
, &rhs
)) {
5511 f
.iter().setResult(f
.sub(lhs
, rhs
, mirType
));
5515 static bool EmitRotate(FunctionCompiler
& f
, ValType type
, bool isLeftRotation
) {
5518 if (!f
.iter().readBinary(type
, &lhs
, &rhs
)) {
5522 MDefinition
* result
= f
.rotate(lhs
, rhs
, type
.toMIRType(), isLeftRotation
);
5523 f
.iter().setResult(result
);
5527 static bool EmitBitNot(FunctionCompiler
& f
, ValType operandType
) {
5529 if (!f
.iter().readUnary(operandType
, &input
)) {
5533 f
.iter().setResult(f
.bitnot(input
));
5537 static bool EmitBitwiseAndOrXor(FunctionCompiler
& f
, ValType operandType
,
5539 MWasmBinaryBitwise::SubOpcode subOpc
) {
5542 if (!f
.iter().readBinary(operandType
, &lhs
, &rhs
)) {
5546 f
.iter().setResult(f
.binary
<MWasmBinaryBitwise
>(lhs
, rhs
, mirType
, subOpc
));
5550 template <typename MIRClass
>
5551 static bool EmitShift(FunctionCompiler
& f
, ValType operandType
,
5555 if (!f
.iter().readBinary(operandType
, &lhs
, &rhs
)) {
5559 f
.iter().setResult(f
.binary
<MIRClass
>(lhs
, rhs
, mirType
));
5563 static bool EmitUrsh(FunctionCompiler
& f
, ValType operandType
,
5567 if (!f
.iter().readBinary(operandType
, &lhs
, &rhs
)) {
5571 f
.iter().setResult(f
.ursh(lhs
, rhs
, mirType
));
5575 static bool EmitMul(FunctionCompiler
& f
, ValType operandType
, MIRType mirType
) {
5578 if (!f
.iter().readBinary(operandType
, &lhs
, &rhs
)) {
5583 f
.mul(lhs
, rhs
, mirType
,
5584 mirType
== MIRType::Int32
? MMul::Integer
: MMul::Normal
));
5588 static bool EmitDiv(FunctionCompiler
& f
, ValType operandType
, MIRType mirType
,
5592 if (!f
.iter().readBinary(operandType
, &lhs
, &rhs
)) {
5596 f
.iter().setResult(f
.div(lhs
, rhs
, mirType
, isUnsigned
));
5600 static bool EmitRem(FunctionCompiler
& f
, ValType operandType
, MIRType mirType
,
5604 if (!f
.iter().readBinary(operandType
, &lhs
, &rhs
)) {
5608 f
.iter().setResult(f
.mod(lhs
, rhs
, mirType
, isUnsigned
));
5612 static bool EmitMinMax(FunctionCompiler
& f
, ValType operandType
,
5613 MIRType mirType
, bool isMax
) {
5616 if (!f
.iter().readBinary(operandType
, &lhs
, &rhs
)) {
5620 f
.iter().setResult(f
.minMax(lhs
, rhs
, mirType
, isMax
));
5624 static bool EmitCopySign(FunctionCompiler
& f
, ValType operandType
) {
5627 if (!f
.iter().readBinary(operandType
, &lhs
, &rhs
)) {
5631 f
.iter().setResult(f
.binary
<MCopySign
>(lhs
, rhs
, operandType
.toMIRType()));
5635 static bool EmitComparison(FunctionCompiler
& f
, ValType operandType
,
5636 JSOp compareOp
, MCompare::CompareType compareType
) {
5639 if (!f
.iter().readComparison(operandType
, &lhs
, &rhs
)) {
5643 f
.iter().setResult(f
.compare(lhs
, rhs
, compareOp
, compareType
));
5647 static bool EmitSelect(FunctionCompiler
& f
, bool typed
) {
5649 MDefinition
* trueValue
;
5650 MDefinition
* falseValue
;
5651 MDefinition
* condition
;
5652 if (!f
.iter().readSelect(typed
, &type
, &trueValue
, &falseValue
, &condition
)) {
5656 f
.iter().setResult(f
.select(trueValue
, falseValue
, condition
));
5660 static bool EmitLoad(FunctionCompiler
& f
, ValType type
, Scalar::Type viewType
) {
5661 LinearMemoryAddress
<MDefinition
*> addr
;
5662 if (!f
.iter().readLoad(type
, Scalar::byteSize(viewType
), &addr
)) {
5666 MemoryAccessDesc
access(addr
.memoryIndex
, viewType
, addr
.align
, addr
.offset
,
5667 f
.bytecodeIfNotAsmJS(),
5668 f
.hugeMemoryEnabled(addr
.memoryIndex
));
5669 auto* ins
= f
.load(addr
.base
, &access
, type
);
5670 if (!f
.inDeadCode() && !ins
) {
5674 f
.iter().setResult(ins
);
5678 static bool EmitStore(FunctionCompiler
& f
, ValType resultType
,
5679 Scalar::Type viewType
) {
5680 LinearMemoryAddress
<MDefinition
*> addr
;
5682 if (!f
.iter().readStore(resultType
, Scalar::byteSize(viewType
), &addr
,
5687 MemoryAccessDesc
access(addr
.memoryIndex
, viewType
, addr
.align
, addr
.offset
,
5688 f
.bytecodeIfNotAsmJS(),
5689 f
.hugeMemoryEnabled(addr
.memoryIndex
));
5691 f
.store(addr
.base
, &access
, value
);
5695 static bool EmitTeeStore(FunctionCompiler
& f
, ValType resultType
,
5696 Scalar::Type viewType
) {
5697 LinearMemoryAddress
<MDefinition
*> addr
;
5699 if (!f
.iter().readTeeStore(resultType
, Scalar::byteSize(viewType
), &addr
,
5704 MOZ_ASSERT(f
.isMem32(addr
.memoryIndex
)); // asm.js opcode
5705 MemoryAccessDesc
access(addr
.memoryIndex
, viewType
, addr
.align
, addr
.offset
,
5706 f
.bytecodeIfNotAsmJS(),
5707 f
.hugeMemoryEnabled(addr
.memoryIndex
));
5709 f
.store(addr
.base
, &access
, value
);
5713 static bool EmitTeeStoreWithCoercion(FunctionCompiler
& f
, ValType resultType
,
5714 Scalar::Type viewType
) {
5715 LinearMemoryAddress
<MDefinition
*> addr
;
5717 if (!f
.iter().readTeeStore(resultType
, Scalar::byteSize(viewType
), &addr
,
5722 if (resultType
== ValType::F32
&& viewType
== Scalar::Float64
) {
5723 value
= f
.unary
<MToDouble
>(value
);
5724 } else if (resultType
== ValType::F64
&& viewType
== Scalar::Float32
) {
5725 value
= f
.unary
<MToFloat32
>(value
);
5727 MOZ_CRASH("unexpected coerced store");
5730 MOZ_ASSERT(f
.isMem32(addr
.memoryIndex
)); // asm.js opcode
5731 MemoryAccessDesc
access(addr
.memoryIndex
, viewType
, addr
.align
, addr
.offset
,
5732 f
.bytecodeIfNotAsmJS(),
5733 f
.hugeMemoryEnabled(addr
.memoryIndex
));
5735 f
.store(addr
.base
, &access
, value
);
5739 static bool TryInlineUnaryBuiltin(FunctionCompiler
& f
, SymbolicAddress callee
,
5740 MDefinition
* input
) {
5745 MOZ_ASSERT(IsFloatingPointType(input
->type()));
5748 if (!IsRoundingFunction(callee
, &mode
)) {
5752 if (!MNearbyInt::HasAssemblerSupport(mode
)) {
5756 f
.iter().setResult(f
.nearbyInt(input
, mode
));
5760 static bool EmitUnaryMathBuiltinCall(FunctionCompiler
& f
,
5761 const SymbolicAddressSignature
& callee
) {
5762 MOZ_ASSERT(callee
.numArgs
== 1);
5764 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
5767 if (!f
.iter().readUnary(ValType::fromMIRType(callee
.argTypes
[0]), &input
)) {
5771 if (TryInlineUnaryBuiltin(f
, callee
.identity
, input
)) {
5775 CallCompileState call
;
5776 if (!f
.passArg(input
, callee
.argTypes
[0], &call
)) {
5780 if (!f
.finishCall(&call
)) {
5785 if (!f
.builtinCall(callee
, lineOrBytecode
, call
, &def
)) {
5789 f
.iter().setResult(def
);
5793 static bool EmitBinaryMathBuiltinCall(FunctionCompiler
& f
,
5794 const SymbolicAddressSignature
& callee
) {
5795 MOZ_ASSERT(callee
.numArgs
== 2);
5796 MOZ_ASSERT(callee
.argTypes
[0] == callee
.argTypes
[1]);
5798 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
5800 CallCompileState call
;
5803 // This call to readBinary assumes both operands have the same type.
5804 if (!f
.iter().readBinary(ValType::fromMIRType(callee
.argTypes
[0]), &lhs
,
5809 if (!f
.passArg(lhs
, callee
.argTypes
[0], &call
)) {
5813 if (!f
.passArg(rhs
, callee
.argTypes
[1], &call
)) {
5817 if (!f
.finishCall(&call
)) {
5822 if (!f
.builtinCall(callee
, lineOrBytecode
, call
, &def
)) {
5826 f
.iter().setResult(def
);
5830 static bool EmitMemoryGrow(FunctionCompiler
& f
) {
5831 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
5834 uint32_t memoryIndex
;
5835 if (!f
.iter().readMemoryGrow(&memoryIndex
, &delta
)) {
5839 if (f
.inDeadCode()) {
5843 MDefinition
* memoryIndexValue
= f
.constantI32(int32_t(memoryIndex
));
5844 if (!memoryIndexValue
) {
5848 const SymbolicAddressSignature
& callee
=
5849 f
.isMem32(memoryIndex
) ? SASigMemoryGrowM32
: SASigMemoryGrowM64
;
5852 if (!f
.emitInstanceCall2(bytecodeOffset
, callee
, delta
, memoryIndexValue
,
5857 f
.iter().setResult(ret
);
5861 static bool EmitMemorySize(FunctionCompiler
& f
) {
5862 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
5864 uint32_t memoryIndex
;
5865 if (!f
.iter().readMemorySize(&memoryIndex
)) {
5869 if (f
.inDeadCode()) {
5873 MDefinition
* memoryIndexValue
= f
.constantI32(int32_t(memoryIndex
));
5874 if (!memoryIndexValue
) {
5878 const SymbolicAddressSignature
& callee
=
5879 f
.isMem32(memoryIndex
) ? SASigMemorySizeM32
: SASigMemorySizeM64
;
5882 if (!f
.emitInstanceCall1(bytecodeOffset
, callee
, memoryIndexValue
, &ret
)) {
5886 f
.iter().setResult(ret
);
5890 static bool EmitAtomicCmpXchg(FunctionCompiler
& f
, ValType type
,
5891 Scalar::Type viewType
) {
5892 LinearMemoryAddress
<MDefinition
*> addr
;
5893 MDefinition
* oldValue
;
5894 MDefinition
* newValue
;
5895 if (!f
.iter().readAtomicCmpXchg(&addr
, type
, byteSize(viewType
), &oldValue
,
5900 MemoryAccessDesc
access(
5901 addr
.memoryIndex
, viewType
, addr
.align
, addr
.offset
, f
.bytecodeOffset(),
5902 f
.hugeMemoryEnabled(addr
.memoryIndex
), Synchronization::Full());
5904 f
.atomicCompareExchangeHeap(addr
.base
, &access
, type
, oldValue
, newValue
);
5905 if (!f
.inDeadCode() && !ins
) {
5909 f
.iter().setResult(ins
);
5913 static bool EmitAtomicLoad(FunctionCompiler
& f
, ValType type
,
5914 Scalar::Type viewType
) {
5915 LinearMemoryAddress
<MDefinition
*> addr
;
5916 if (!f
.iter().readAtomicLoad(&addr
, type
, byteSize(viewType
))) {
5920 MemoryAccessDesc
access(
5921 addr
.memoryIndex
, viewType
, addr
.align
, addr
.offset
, f
.bytecodeOffset(),
5922 f
.hugeMemoryEnabled(addr
.memoryIndex
), Synchronization::Load());
5923 auto* ins
= f
.load(addr
.base
, &access
, type
);
5924 if (!f
.inDeadCode() && !ins
) {
5928 f
.iter().setResult(ins
);
5932 static bool EmitAtomicRMW(FunctionCompiler
& f
, ValType type
,
5933 Scalar::Type viewType
, jit::AtomicOp op
) {
5934 LinearMemoryAddress
<MDefinition
*> addr
;
5936 if (!f
.iter().readAtomicRMW(&addr
, type
, byteSize(viewType
), &value
)) {
5940 MemoryAccessDesc
access(
5941 addr
.memoryIndex
, viewType
, addr
.align
, addr
.offset
, f
.bytecodeOffset(),
5942 f
.hugeMemoryEnabled(addr
.memoryIndex
), Synchronization::Full());
5943 auto* ins
= f
.atomicBinopHeap(op
, addr
.base
, &access
, type
, value
);
5944 if (!f
.inDeadCode() && !ins
) {
5948 f
.iter().setResult(ins
);
5952 static bool EmitAtomicStore(FunctionCompiler
& f
, ValType type
,
5953 Scalar::Type viewType
) {
5954 LinearMemoryAddress
<MDefinition
*> addr
;
5956 if (!f
.iter().readAtomicStore(&addr
, type
, byteSize(viewType
), &value
)) {
5960 MemoryAccessDesc
access(
5961 addr
.memoryIndex
, viewType
, addr
.align
, addr
.offset
, f
.bytecodeOffset(),
5962 f
.hugeMemoryEnabled(addr
.memoryIndex
), Synchronization::Store());
5963 f
.store(addr
.base
, &access
, value
);
5967 static bool EmitWait(FunctionCompiler
& f
, ValType type
, uint32_t byteSize
) {
5968 MOZ_ASSERT(type
== ValType::I32
|| type
== ValType::I64
);
5969 MOZ_ASSERT(type
.size() == byteSize
);
5971 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
5973 LinearMemoryAddress
<MDefinition
*> addr
;
5974 MDefinition
* expected
;
5975 MDefinition
* timeout
;
5976 if (!f
.iter().readWait(&addr
, type
, byteSize
, &expected
, &timeout
)) {
5980 if (f
.inDeadCode()) {
5984 MemoryAccessDesc
access(addr
.memoryIndex
,
5985 type
== ValType::I32
? Scalar::Int32
: Scalar::Int64
,
5986 addr
.align
, addr
.offset
, f
.bytecodeOffset(),
5987 f
.hugeMemoryEnabled(addr
.memoryIndex
));
5988 MDefinition
* ptr
= f
.computeEffectiveAddress(addr
.base
, &access
);
5993 MDefinition
* memoryIndex
= f
.constantI32(int32_t(addr
.memoryIndex
));
5998 const SymbolicAddressSignature
& callee
=
5999 f
.isMem32(addr
.memoryIndex
)
6000 ? (type
== ValType::I32
? SASigWaitI32M32
: SASigWaitI64M32
)
6001 : (type
== ValType::I32
? SASigWaitI32M64
: SASigWaitI64M64
);
6004 if (!f
.emitInstanceCall4(bytecodeOffset
, callee
, ptr
, expected
, timeout
,
6005 memoryIndex
, &ret
)) {
6009 f
.iter().setResult(ret
);
6013 static bool EmitFence(FunctionCompiler
& f
) {
6014 if (!f
.iter().readFence()) {
6022 static bool EmitWake(FunctionCompiler
& f
) {
6023 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
6025 LinearMemoryAddress
<MDefinition
*> addr
;
6027 if (!f
.iter().readWake(&addr
, &count
)) {
6031 if (f
.inDeadCode()) {
6035 MemoryAccessDesc
access(addr
.memoryIndex
, Scalar::Int32
, addr
.align
,
6036 addr
.offset
, f
.bytecodeOffset(),
6037 f
.hugeMemoryEnabled(addr
.memoryIndex
));
6038 MDefinition
* ptr
= f
.computeEffectiveAddress(addr
.base
, &access
);
6043 MDefinition
* memoryIndex
= f
.constantI32(int32_t(addr
.memoryIndex
));
6048 const SymbolicAddressSignature
& callee
=
6049 f
.isMem32(addr
.memoryIndex
) ? SASigWakeM32
: SASigWakeM64
;
6052 if (!f
.emitInstanceCall3(bytecodeOffset
, callee
, ptr
, count
, memoryIndex
,
6057 f
.iter().setResult(ret
);
6061 static bool EmitAtomicXchg(FunctionCompiler
& f
, ValType type
,
6062 Scalar::Type viewType
) {
6063 LinearMemoryAddress
<MDefinition
*> addr
;
6065 if (!f
.iter().readAtomicRMW(&addr
, type
, byteSize(viewType
), &value
)) {
6069 MemoryAccessDesc
access(
6070 addr
.memoryIndex
, viewType
, addr
.align
, addr
.offset
, f
.bytecodeOffset(),
6071 f
.hugeMemoryEnabled(addr
.memoryIndex
), Synchronization::Full());
6072 MDefinition
* ins
= f
.atomicExchangeHeap(addr
.base
, &access
, type
, value
);
6073 if (!f
.inDeadCode() && !ins
) {
6077 f
.iter().setResult(ins
);
6081 static bool EmitMemCopyCall(FunctionCompiler
& f
, uint32_t dstMemIndex
,
6082 uint32_t srcMemIndex
, MDefinition
* dst
,
6083 MDefinition
* src
, MDefinition
* len
) {
6084 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
6086 if (dstMemIndex
== srcMemIndex
) {
6087 const SymbolicAddressSignature
& callee
=
6088 (f
.moduleEnv().usesSharedMemory(dstMemIndex
)
6089 ? (f
.isMem32(dstMemIndex
) ? SASigMemCopySharedM32
6090 : SASigMemCopySharedM64
)
6091 : (f
.isMem32(dstMemIndex
) ? SASigMemCopyM32
: SASigMemCopyM64
));
6092 MDefinition
* memoryBase
= f
.memoryBase(dstMemIndex
);
6096 return f
.emitInstanceCall4(bytecodeOffset
, callee
, dst
, src
, len
,
6100 IndexType dstIndexType
= f
.moduleEnv().memories
[dstMemIndex
].indexType();
6101 IndexType srcIndexType
= f
.moduleEnv().memories
[srcMemIndex
].indexType();
6103 if (dstIndexType
== IndexType::I32
) {
6104 dst
= f
.extendI32(dst
, /*isUnsigned=*/true);
6109 if (srcIndexType
== IndexType::I32
) {
6110 src
= f
.extendI32(src
, /*isUnsigned=*/true);
6115 if (dstIndexType
== IndexType::I32
|| srcIndexType
== IndexType::I32
) {
6116 len
= f
.extendI32(len
, /*isUnsigned=*/true);
6122 MDefinition
* dstMemIndexValue
= f
.constantI32(int32_t(dstMemIndex
));
6123 if (!dstMemIndexValue
) {
6127 MDefinition
* srcMemIndexValue
= f
.constantI32(int32_t(srcMemIndex
));
6128 if (!srcMemIndexValue
) {
6132 return f
.emitInstanceCall5(bytecodeOffset
, SASigMemCopyAny
, dst
, src
, len
,
6133 dstMemIndexValue
, srcMemIndexValue
);
6136 static bool EmitMemCopyInline(FunctionCompiler
& f
, uint32_t memoryIndex
,
6137 MDefinition
* dst
, MDefinition
* src
,
6139 MOZ_ASSERT(length
!= 0 && length
<= MaxInlineMemoryCopyLength
);
6141 // Compute the number of copies of each width we will need to do
6142 size_t remainder
= length
;
6143 #ifdef ENABLE_WASM_SIMD
6144 size_t numCopies16
= 0;
6145 if (MacroAssembler::SupportsFastUnalignedFPAccesses()) {
6146 numCopies16
= remainder
/ sizeof(V128
);
6147 remainder
%= sizeof(V128
);
6151 size_t numCopies8
= remainder
/ sizeof(uint64_t);
6152 remainder
%= sizeof(uint64_t);
6154 size_t numCopies4
= remainder
/ sizeof(uint32_t);
6155 remainder
%= sizeof(uint32_t);
6156 size_t numCopies2
= remainder
/ sizeof(uint16_t);
6157 remainder
%= sizeof(uint16_t);
6158 size_t numCopies1
= remainder
;
6160 // Load all source bytes from low to high using the widest transfer width we
6161 // can for the system. We will trap without writing anything if any source
6162 // byte is out-of-bounds.
6164 DefVector loadedValues
;
6166 #ifdef ENABLE_WASM_SIMD
6167 for (uint32_t i
= 0; i
< numCopies16
; i
++) {
6168 MemoryAccessDesc
access(memoryIndex
, Scalar::Simd128
, 1, offset
,
6170 f
.hugeMemoryEnabled(memoryIndex
));
6171 auto* load
= f
.load(src
, &access
, ValType::V128
);
6172 if (!load
|| !loadedValues
.append(load
)) {
6176 offset
+= sizeof(V128
);
6181 for (uint32_t i
= 0; i
< numCopies8
; i
++) {
6182 MemoryAccessDesc
access(memoryIndex
, Scalar::Int64
, 1, offset
,
6184 f
.hugeMemoryEnabled(memoryIndex
));
6185 auto* load
= f
.load(src
, &access
, ValType::I64
);
6186 if (!load
|| !loadedValues
.append(load
)) {
6190 offset
+= sizeof(uint64_t);
6194 for (uint32_t i
= 0; i
< numCopies4
; i
++) {
6195 MemoryAccessDesc
access(memoryIndex
, Scalar::Uint32
, 1, offset
,
6197 f
.hugeMemoryEnabled(memoryIndex
));
6198 auto* load
= f
.load(src
, &access
, ValType::I32
);
6199 if (!load
|| !loadedValues
.append(load
)) {
6203 offset
+= sizeof(uint32_t);
6207 MemoryAccessDesc
access(memoryIndex
, Scalar::Uint16
, 1, offset
,
6209 f
.hugeMemoryEnabled(memoryIndex
));
6210 auto* load
= f
.load(src
, &access
, ValType::I32
);
6211 if (!load
|| !loadedValues
.append(load
)) {
6215 offset
+= sizeof(uint16_t);
6219 MemoryAccessDesc
access(memoryIndex
, Scalar::Uint8
, 1, offset
,
6221 f
.hugeMemoryEnabled(memoryIndex
));
6222 auto* load
= f
.load(src
, &access
, ValType::I32
);
6223 if (!load
|| !loadedValues
.append(load
)) {
6228 // Store all source bytes to the destination from high to low. We will trap
6229 // without writing anything on the first store if any dest byte is
6234 offset
-= sizeof(uint8_t);
6236 MemoryAccessDesc
access(memoryIndex
, Scalar::Uint8
, 1, offset
,
6238 f
.hugeMemoryEnabled(memoryIndex
));
6239 auto* value
= loadedValues
.popCopy();
6240 f
.store(dst
, &access
, value
);
6244 offset
-= sizeof(uint16_t);
6246 MemoryAccessDesc
access(memoryIndex
, Scalar::Uint16
, 1, offset
,
6248 f
.hugeMemoryEnabled(memoryIndex
));
6249 auto* value
= loadedValues
.popCopy();
6250 f
.store(dst
, &access
, value
);
6253 for (uint32_t i
= 0; i
< numCopies4
; i
++) {
6254 offset
-= sizeof(uint32_t);
6256 MemoryAccessDesc
access(memoryIndex
, Scalar::Uint32
, 1, offset
,
6258 f
.hugeMemoryEnabled(memoryIndex
));
6259 auto* value
= loadedValues
.popCopy();
6260 f
.store(dst
, &access
, value
);
6264 for (uint32_t i
= 0; i
< numCopies8
; i
++) {
6265 offset
-= sizeof(uint64_t);
6267 MemoryAccessDesc
access(memoryIndex
, Scalar::Int64
, 1, offset
,
6269 f
.hugeMemoryEnabled(memoryIndex
));
6270 auto* value
= loadedValues
.popCopy();
6271 f
.store(dst
, &access
, value
);
6275 #ifdef ENABLE_WASM_SIMD
6276 for (uint32_t i
= 0; i
< numCopies16
; i
++) {
6277 offset
-= sizeof(V128
);
6279 MemoryAccessDesc
access(memoryIndex
, Scalar::Simd128
, 1, offset
,
6281 f
.hugeMemoryEnabled(memoryIndex
));
6282 auto* value
= loadedValues
.popCopy();
6283 f
.store(dst
, &access
, value
);
6290 static bool EmitMemCopy(FunctionCompiler
& f
) {
6291 MDefinition
*dst
, *src
, *len
;
6292 uint32_t dstMemIndex
;
6293 uint32_t srcMemIndex
;
6294 if (!f
.iter().readMemOrTableCopy(true, &dstMemIndex
, &dst
, &srcMemIndex
, &src
,
6299 if (f
.inDeadCode()) {
6303 if (dstMemIndex
== srcMemIndex
&& len
->isConstant()) {
6304 uint64_t length
= f
.isMem32(dstMemIndex
) ? len
->toConstant()->toInt32()
6305 : len
->toConstant()->toInt64();
6306 static_assert(MaxInlineMemoryCopyLength
<= UINT32_MAX
);
6307 if (length
!= 0 && length
<= MaxInlineMemoryCopyLength
) {
6308 return EmitMemCopyInline(f
, dstMemIndex
, dst
, src
, uint32_t(length
));
6312 return EmitMemCopyCall(f
, dstMemIndex
, srcMemIndex
, dst
, src
, len
);
6315 static bool EmitTableCopy(FunctionCompiler
& f
) {
6316 MDefinition
*dst
, *src
, *len
;
6317 uint32_t dstTableIndex
;
6318 uint32_t srcTableIndex
;
6319 if (!f
.iter().readMemOrTableCopy(false, &dstTableIndex
, &dst
, &srcTableIndex
,
6324 if (f
.inDeadCode()) {
6328 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
6329 MDefinition
* dti
= f
.constantI32(int32_t(dstTableIndex
));
6330 MDefinition
* sti
= f
.constantI32(int32_t(srcTableIndex
));
6332 return f
.emitInstanceCall5(bytecodeOffset
, SASigTableCopy
, dst
, src
, len
, dti
,
6336 static bool EmitDataOrElemDrop(FunctionCompiler
& f
, bool isData
) {
6337 uint32_t segIndexVal
= 0;
6338 if (!f
.iter().readDataOrElemDrop(isData
, &segIndexVal
)) {
6342 if (f
.inDeadCode()) {
6346 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
6348 MDefinition
* segIndex
= f
.constantI32(int32_t(segIndexVal
));
6350 const SymbolicAddressSignature
& callee
=
6351 isData
? SASigDataDrop
: SASigElemDrop
;
6352 return f
.emitInstanceCall1(bytecodeOffset
, callee
, segIndex
);
6355 static bool EmitMemFillCall(FunctionCompiler
& f
, uint32_t memoryIndex
,
6356 MDefinition
* start
, MDefinition
* val
,
6358 MDefinition
* memoryBase
= f
.memoryBase(memoryIndex
);
6360 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
6361 const SymbolicAddressSignature
& callee
=
6362 (f
.moduleEnv().usesSharedMemory(memoryIndex
)
6363 ? (f
.isMem32(memoryIndex
) ? SASigMemFillSharedM32
6364 : SASigMemFillSharedM64
)
6365 : (f
.isMem32(memoryIndex
) ? SASigMemFillM32
: SASigMemFillM64
));
6366 return f
.emitInstanceCall4(bytecodeOffset
, callee
, start
, val
, len
,
6370 static bool EmitMemFillInline(FunctionCompiler
& f
, uint32_t memoryIndex
,
6371 MDefinition
* start
, MDefinition
* val
,
6373 MOZ_ASSERT(length
!= 0 && length
<= MaxInlineMemoryFillLength
);
6374 uint32_t value
= val
->toConstant()->toInt32();
6376 // Compute the number of copies of each width we will need to do
6377 size_t remainder
= length
;
6378 #ifdef ENABLE_WASM_SIMD
6379 size_t numCopies16
= 0;
6380 if (MacroAssembler::SupportsFastUnalignedFPAccesses()) {
6381 numCopies16
= remainder
/ sizeof(V128
);
6382 remainder
%= sizeof(V128
);
6386 size_t numCopies8
= remainder
/ sizeof(uint64_t);
6387 remainder
%= sizeof(uint64_t);
6389 size_t numCopies4
= remainder
/ sizeof(uint32_t);
6390 remainder
%= sizeof(uint32_t);
6391 size_t numCopies2
= remainder
/ sizeof(uint16_t);
6392 remainder
%= sizeof(uint16_t);
6393 size_t numCopies1
= remainder
;
6395 // Generate splatted definitions for wider fills as needed
6396 #ifdef ENABLE_WASM_SIMD
6397 MDefinition
* val16
= numCopies16
? f
.constantV128(V128(value
)) : nullptr;
6401 numCopies8
? f
.constantI64(int64_t(SplatByteToUInt
<uint64_t>(value
, 8)))
6405 numCopies4
? f
.constantI32(int32_t(SplatByteToUInt
<uint32_t>(value
, 4)))
6408 numCopies2
? f
.constantI32(int32_t(SplatByteToUInt
<uint32_t>(value
, 2)))
6411 // Store the fill value to the destination from high to low. We will trap
6412 // without writing anything on the first store if any dest byte is
6414 size_t offset
= length
;
6417 offset
-= sizeof(uint8_t);
6419 MemoryAccessDesc
access(memoryIndex
, Scalar::Uint8
, 1, offset
,
6421 f
.hugeMemoryEnabled(memoryIndex
));
6422 f
.store(start
, &access
, val
);
6426 offset
-= sizeof(uint16_t);
6428 MemoryAccessDesc
access(memoryIndex
, Scalar::Uint16
, 1, offset
,
6430 f
.hugeMemoryEnabled(memoryIndex
));
6431 f
.store(start
, &access
, val2
);
6434 for (uint32_t i
= 0; i
< numCopies4
; i
++) {
6435 offset
-= sizeof(uint32_t);
6437 MemoryAccessDesc
access(memoryIndex
, Scalar::Uint32
, 1, offset
,
6439 f
.hugeMemoryEnabled(memoryIndex
));
6440 f
.store(start
, &access
, val4
);
6444 for (uint32_t i
= 0; i
< numCopies8
; i
++) {
6445 offset
-= sizeof(uint64_t);
6447 MemoryAccessDesc
access(memoryIndex
, Scalar::Int64
, 1, offset
,
6449 f
.hugeMemoryEnabled(memoryIndex
));
6450 f
.store(start
, &access
, val8
);
6454 #ifdef ENABLE_WASM_SIMD
6455 for (uint32_t i
= 0; i
< numCopies16
; i
++) {
6456 offset
-= sizeof(V128
);
6458 MemoryAccessDesc
access(memoryIndex
, Scalar::Simd128
, 1, offset
,
6460 f
.hugeMemoryEnabled(memoryIndex
));
6461 f
.store(start
, &access
, val16
);
6468 static bool EmitMemFill(FunctionCompiler
& f
) {
6469 uint32_t memoryIndex
;
6470 MDefinition
*start
, *val
, *len
;
6471 if (!f
.iter().readMemFill(&memoryIndex
, &start
, &val
, &len
)) {
6475 if (f
.inDeadCode()) {
6479 if (len
->isConstant() && val
->isConstant()) {
6480 uint64_t length
= f
.isMem32(memoryIndex
) ? len
->toConstant()->toInt32()
6481 : len
->toConstant()->toInt64();
6482 static_assert(MaxInlineMemoryFillLength
<= UINT32_MAX
);
6483 if (length
!= 0 && length
<= MaxInlineMemoryFillLength
) {
6484 return EmitMemFillInline(f
, memoryIndex
, start
, val
, uint32_t(length
));
6488 return EmitMemFillCall(f
, memoryIndex
, start
, val
, len
);
6491 static bool EmitMemOrTableInit(FunctionCompiler
& f
, bool isMem
) {
6492 uint32_t segIndexVal
= 0, dstMemOrTableIndex
= 0;
6493 MDefinition
*dstOff
, *srcOff
, *len
;
6494 if (!f
.iter().readMemOrTableInit(isMem
, &segIndexVal
, &dstMemOrTableIndex
,
6495 &dstOff
, &srcOff
, &len
)) {
6499 if (f
.inDeadCode()) {
6503 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
6504 const SymbolicAddressSignature
& callee
=
6506 ? (f
.isMem32(dstMemOrTableIndex
) ? SASigMemInitM32
: SASigMemInitM64
)
6509 MDefinition
* segIndex
= f
.constantI32(int32_t(segIndexVal
));
6514 MDefinition
* dti
= f
.constantI32(int32_t(dstMemOrTableIndex
));
6519 return f
.emitInstanceCall5(bytecodeOffset
, callee
, dstOff
, srcOff
, len
,
6523 // Note, table.{get,grow,set} on table(funcref) are currently rejected by the
6526 static bool EmitTableFill(FunctionCompiler
& f
) {
6527 uint32_t tableIndex
;
6528 MDefinition
*start
, *val
, *len
;
6529 if (!f
.iter().readTableFill(&tableIndex
, &start
, &val
, &len
)) {
6533 if (f
.inDeadCode()) {
6537 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
6539 MDefinition
* tableIndexArg
= f
.constantI32(int32_t(tableIndex
));
6540 if (!tableIndexArg
) {
6544 return f
.emitInstanceCall4(bytecodeOffset
, SASigTableFill
, start
, val
, len
,
6548 #if ENABLE_WASM_MEMORY_CONTROL
6549 static bool EmitMemDiscard(FunctionCompiler
& f
) {
6550 uint32_t memoryIndex
;
6551 MDefinition
*start
, *len
;
6552 if (!f
.iter().readMemDiscard(&memoryIndex
, &start
, &len
)) {
6556 if (f
.inDeadCode()) {
6560 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
6562 MDefinition
* memoryBase
= f
.memoryBase(memoryIndex
);
6563 bool isMem32
= f
.isMem32(memoryIndex
);
6565 const SymbolicAddressSignature
& callee
=
6566 (f
.moduleEnv().usesSharedMemory(memoryIndex
)
6567 ? (isMem32
? SASigMemDiscardSharedM32
: SASigMemDiscardSharedM64
)
6568 : (isMem32
? SASigMemDiscardM32
: SASigMemDiscardM64
));
6569 return f
.emitInstanceCall3(bytecodeOffset
, callee
, start
, len
, memoryBase
);
6573 static bool EmitTableGet(FunctionCompiler
& f
) {
6574 uint32_t tableIndex
;
6576 if (!f
.iter().readTableGet(&tableIndex
, &index
)) {
6580 if (f
.inDeadCode()) {
6584 const TableDesc
& table
= f
.moduleEnv().tables
[tableIndex
];
6585 if (table
.elemType
.tableRepr() == TableRepr::Ref
) {
6586 MDefinition
* ret
= f
.tableGetAnyRef(tableIndex
, index
);
6590 f
.iter().setResult(ret
);
6594 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
6596 MDefinition
* tableIndexArg
= f
.constantI32(int32_t(tableIndex
));
6597 if (!tableIndexArg
) {
6601 // The return value here is either null, denoting an error, or a short-lived
6602 // pointer to a location containing a possibly-null ref.
6604 if (!f
.emitInstanceCall2(bytecodeOffset
, SASigTableGet
, index
, tableIndexArg
,
6609 f
.iter().setResult(ret
);
6613 static bool EmitTableGrow(FunctionCompiler
& f
) {
6614 uint32_t tableIndex
;
6615 MDefinition
* initValue
;
6617 if (!f
.iter().readTableGrow(&tableIndex
, &initValue
, &delta
)) {
6621 if (f
.inDeadCode()) {
6625 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
6627 MDefinition
* tableIndexArg
= f
.constantI32(int32_t(tableIndex
));
6628 if (!tableIndexArg
) {
6633 if (!f
.emitInstanceCall3(bytecodeOffset
, SASigTableGrow
, initValue
, delta
,
6634 tableIndexArg
, &ret
)) {
6638 f
.iter().setResult(ret
);
6642 static bool EmitTableSet(FunctionCompiler
& f
) {
6643 uint32_t tableIndex
;
6646 if (!f
.iter().readTableSet(&tableIndex
, &index
, &value
)) {
6650 if (f
.inDeadCode()) {
6654 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
6656 const TableDesc
& table
= f
.moduleEnv().tables
[tableIndex
];
6657 if (table
.elemType
.tableRepr() == TableRepr::Ref
) {
6658 return f
.tableSetAnyRef(tableIndex
, index
, value
, bytecodeOffset
);
6661 MDefinition
* tableIndexArg
= f
.constantI32(int32_t(tableIndex
));
6662 if (!tableIndexArg
) {
6666 return f
.emitInstanceCall3(bytecodeOffset
, SASigTableSet
, index
, value
,
6670 static bool EmitTableSize(FunctionCompiler
& f
) {
6671 uint32_t tableIndex
;
6672 if (!f
.iter().readTableSize(&tableIndex
)) {
6676 if (f
.inDeadCode()) {
6680 MDefinition
* length
= f
.loadTableLength(tableIndex
);
6685 f
.iter().setResult(length
);
6689 static bool EmitRefFunc(FunctionCompiler
& f
) {
6691 if (!f
.iter().readRefFunc(&funcIndex
)) {
6695 if (f
.inDeadCode()) {
6699 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
6701 MDefinition
* funcIndexArg
= f
.constantI32(int32_t(funcIndex
));
6702 if (!funcIndexArg
) {
6706 // The return value here is either null, denoting an error, or a short-lived
6707 // pointer to a location containing a possibly-null ref.
6709 if (!f
.emitInstanceCall1(bytecodeOffset
, SASigRefFunc
, funcIndexArg
, &ret
)) {
6713 f
.iter().setResult(ret
);
6717 static bool EmitRefNull(FunctionCompiler
& f
) {
6719 if (!f
.iter().readRefNull(&type
)) {
6723 if (f
.inDeadCode()) {
6727 MDefinition
* nullVal
= f
.constantNullRef();
6731 f
.iter().setResult(nullVal
);
6735 static bool EmitRefIsNull(FunctionCompiler
& f
) {
6737 if (!f
.iter().readRefIsNull(&input
)) {
6741 if (f
.inDeadCode()) {
6745 MDefinition
* nullVal
= f
.constantNullRef();
6750 f
.compare(input
, nullVal
, JSOp::Eq
, MCompare::Compare_WasmAnyRef
));
6754 #ifdef ENABLE_WASM_SIMD
6755 static bool EmitConstSimd128(FunctionCompiler
& f
) {
6757 if (!f
.iter().readV128Const(&v128
)) {
6761 f
.iter().setResult(f
.constantV128(v128
));
6765 static bool EmitBinarySimd128(FunctionCompiler
& f
, bool commutative
,
6769 if (!f
.iter().readBinary(ValType::V128
, &lhs
, &rhs
)) {
6773 f
.iter().setResult(f
.binarySimd128(lhs
, rhs
, commutative
, op
));
6777 static bool EmitTernarySimd128(FunctionCompiler
& f
, wasm::SimdOp op
) {
6781 if (!f
.iter().readTernary(ValType::V128
, &v0
, &v1
, &v2
)) {
6785 f
.iter().setResult(f
.ternarySimd128(v0
, v1
, v2
, op
));
6789 static bool EmitShiftSimd128(FunctionCompiler
& f
, SimdOp op
) {
6792 if (!f
.iter().readVectorShift(&lhs
, &rhs
)) {
6796 f
.iter().setResult(f
.shiftSimd128(lhs
, rhs
, op
));
6800 static bool EmitSplatSimd128(FunctionCompiler
& f
, ValType inType
, SimdOp op
) {
6802 if (!f
.iter().readConversion(inType
, ValType::V128
, &src
)) {
6806 f
.iter().setResult(f
.scalarToSimd128(src
, op
));
6810 static bool EmitUnarySimd128(FunctionCompiler
& f
, SimdOp op
) {
6812 if (!f
.iter().readUnary(ValType::V128
, &src
)) {
6816 f
.iter().setResult(f
.unarySimd128(src
, op
));
6820 static bool EmitReduceSimd128(FunctionCompiler
& f
, SimdOp op
) {
6822 if (!f
.iter().readConversion(ValType::V128
, ValType::I32
, &src
)) {
6826 f
.iter().setResult(f
.reduceSimd128(src
, op
, ValType::I32
));
6830 static bool EmitExtractLaneSimd128(FunctionCompiler
& f
, ValType outType
,
6831 uint32_t laneLimit
, SimdOp op
) {
6834 if (!f
.iter().readExtractLane(outType
, laneLimit
, &laneIndex
, &src
)) {
6838 f
.iter().setResult(f
.reduceSimd128(src
, op
, outType
, laneIndex
));
6842 static bool EmitReplaceLaneSimd128(FunctionCompiler
& f
, ValType laneType
,
6843 uint32_t laneLimit
, SimdOp op
) {
6847 if (!f
.iter().readReplaceLane(laneType
, laneLimit
, &laneIndex
, &lhs
, &rhs
)) {
6851 f
.iter().setResult(f
.replaceLaneSimd128(lhs
, rhs
, laneIndex
, op
));
6855 static bool EmitShuffleSimd128(FunctionCompiler
& f
) {
6859 if (!f
.iter().readVectorShuffle(&v1
, &v2
, &control
)) {
6863 f
.iter().setResult(f
.shuffleSimd128(v1
, v2
, control
));
6867 static bool EmitLoadSplatSimd128(FunctionCompiler
& f
, Scalar::Type viewType
,
6868 wasm::SimdOp splatOp
) {
6869 LinearMemoryAddress
<MDefinition
*> addr
;
6870 if (!f
.iter().readLoadSplat(Scalar::byteSize(viewType
), &addr
)) {
6874 f
.iter().setResult(f
.loadSplatSimd128(viewType
, addr
, splatOp
));
6878 static bool EmitLoadExtendSimd128(FunctionCompiler
& f
, wasm::SimdOp op
) {
6879 LinearMemoryAddress
<MDefinition
*> addr
;
6880 if (!f
.iter().readLoadExtend(&addr
)) {
6884 f
.iter().setResult(f
.loadExtendSimd128(addr
, op
));
6888 static bool EmitLoadZeroSimd128(FunctionCompiler
& f
, Scalar::Type viewType
,
6890 LinearMemoryAddress
<MDefinition
*> addr
;
6891 if (!f
.iter().readLoadSplat(numBytes
, &addr
)) {
6895 f
.iter().setResult(f
.loadZeroSimd128(viewType
, numBytes
, addr
));
6899 static bool EmitLoadLaneSimd128(FunctionCompiler
& f
, uint32_t laneSize
) {
6902 LinearMemoryAddress
<MDefinition
*> addr
;
6903 if (!f
.iter().readLoadLane(laneSize
, &addr
, &laneIndex
, &src
)) {
6907 f
.iter().setResult(f
.loadLaneSimd128(laneSize
, addr
, laneIndex
, src
));
6911 static bool EmitStoreLaneSimd128(FunctionCompiler
& f
, uint32_t laneSize
) {
6914 LinearMemoryAddress
<MDefinition
*> addr
;
6915 if (!f
.iter().readStoreLane(laneSize
, &addr
, &laneIndex
, &src
)) {
6919 f
.storeLaneSimd128(laneSize
, addr
, laneIndex
, src
);
6923 #endif // ENABLE_WASM_SIMD
6925 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
6926 static bool EmitRefAsNonNull(FunctionCompiler
& f
) {
6928 if (!f
.iter().readRefAsNonNull(&ref
)) {
6932 return f
.refAsNonNull(ref
);
6935 static bool EmitBrOnNull(FunctionCompiler
& f
) {
6936 uint32_t relativeDepth
;
6939 MDefinition
* condition
;
6940 if (!f
.iter().readBrOnNull(&relativeDepth
, &type
, &values
, &condition
)) {
6944 return f
.brOnNull(relativeDepth
, values
, type
, condition
);
6947 static bool EmitBrOnNonNull(FunctionCompiler
& f
) {
6948 uint32_t relativeDepth
;
6951 MDefinition
* condition
;
6952 if (!f
.iter().readBrOnNonNull(&relativeDepth
, &type
, &values
, &condition
)) {
6956 return f
.brOnNonNull(relativeDepth
, values
, type
, condition
);
6959 static bool EmitCallRef(FunctionCompiler
& f
) {
6960 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
6962 const FuncType
* funcType
;
6963 MDefinition
* callee
;
6966 if (!f
.iter().readCallRef(&funcType
, &callee
, &args
)) {
6970 if (f
.inDeadCode()) {
6974 CallCompileState call
;
6975 if (!EmitCallArgs(f
, *funcType
, args
, &call
)) {
6980 if (!f
.callRef(*funcType
, callee
, lineOrBytecode
, call
, &results
)) {
6984 f
.iter().setResults(results
.length(), results
);
6988 #endif // ENABLE_WASM_FUNCTION_REFERENCES
6990 #ifdef ENABLE_WASM_GC
6992 static bool EmitStructNew(FunctionCompiler
& f
) {
6993 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
6997 if (!f
.iter().readStructNew(&typeIndex
, &args
)) {
7001 if (f
.inDeadCode()) {
7005 const TypeDef
& typeDef
= (*f
.moduleEnv().types
)[typeIndex
];
7006 const StructType
& structType
= typeDef
.structType();
7007 MOZ_ASSERT(args
.length() == structType
.fields_
.length());
7009 MDefinition
* structObject
= f
.createStructObject(typeIndex
, false);
7010 if (!structObject
) {
7014 // And fill in the fields.
7015 for (uint32_t fieldIndex
= 0; fieldIndex
< structType
.fields_
.length();
7017 if (!f
.mirGen().ensureBallast()) {
7020 const StructField
& field
= structType
.fields_
[fieldIndex
];
7021 if (!f
.writeValueToStructField(lineOrBytecode
, field
, structObject
,
7023 WasmPreBarrierKind::None
)) {
7028 f
.iter().setResult(structObject
);
7032 static bool EmitStructNewDefault(FunctionCompiler
& f
) {
7033 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
7036 if (!f
.iter().readStructNewDefault(&typeIndex
)) {
7040 if (f
.inDeadCode()) {
7044 const StructType
& structType
= (*f
.moduleEnv().types
)[typeIndex
].structType();
7046 // Allocate a default initialized struct. This requires the type definition
7048 MDefinition
* typeDefData
= f
.loadTypeDefInstanceData(typeIndex
);
7053 // Figure out whether we need an OOL storage area, and hence which routine
7055 SymbolicAddressSignature calleeSASig
=
7056 WasmStructObject::requiresOutlineBytes(structType
.size_
)
7057 ? SASigStructNewOOL_true
7058 : SASigStructNewIL_true
;
7060 // Create call: structObject = Instance::structNew{IL,OOL}<true>(typeDefData)
7061 MDefinition
* structObject
;
7062 if (!f
.emitInstanceCall1(lineOrBytecode
, calleeSASig
, typeDefData
,
7067 f
.iter().setResult(structObject
);
7071 static bool EmitStructSet(FunctionCompiler
& f
) {
7072 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
7075 uint32_t fieldIndex
;
7076 MDefinition
* structObject
;
7078 if (!f
.iter().readStructSet(&typeIndex
, &fieldIndex
, &structObject
, &value
)) {
7082 if (f
.inDeadCode()) {
7086 // Check for null is done at writeValueToStructField.
7088 // And fill in the field.
7089 const StructType
& structType
= (*f
.moduleEnv().types
)[typeIndex
].structType();
7090 const StructField
& field
= structType
.fields_
[fieldIndex
];
7091 return f
.writeValueToStructField(lineOrBytecode
, field
, structObject
, value
,
7092 WasmPreBarrierKind::Normal
);
7095 static bool EmitStructGet(FunctionCompiler
& f
, FieldWideningOp wideningOp
) {
7097 uint32_t fieldIndex
;
7098 MDefinition
* structObject
;
7099 if (!f
.iter().readStructGet(&typeIndex
, &fieldIndex
, wideningOp
,
7104 if (f
.inDeadCode()) {
7108 // Check for null is done at readValueFromStructField.
7110 // And fetch the data.
7111 const StructType
& structType
= (*f
.moduleEnv().types
)[typeIndex
].structType();
7112 const StructField
& field
= structType
.fields_
[fieldIndex
];
7114 f
.readValueFromStructField(field
, wideningOp
, structObject
);
7119 f
.iter().setResult(load
);
7123 static bool EmitArrayNew(FunctionCompiler
& f
) {
7124 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
7127 MDefinition
* numElements
;
7128 MDefinition
* fillValue
;
7129 if (!f
.iter().readArrayNew(&typeIndex
, &numElements
, &fillValue
)) {
7133 if (f
.inDeadCode()) {
7137 // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated by
7138 // this helper will trap.
7139 MDefinition
* arrayObject
= f
.createArrayNewCallAndLoop(
7140 lineOrBytecode
, typeIndex
, numElements
, fillValue
);
7145 f
.iter().setResult(arrayObject
);
7149 static bool EmitArrayNewDefault(FunctionCompiler
& f
) {
7150 // This is almost identical to EmitArrayNew, except we skip the
7151 // initialisation loop.
7152 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
7155 MDefinition
* numElements
;
7156 if (!f
.iter().readArrayNewDefault(&typeIndex
, &numElements
)) {
7160 if (f
.inDeadCode()) {
7164 // Create the array object, default-initialized.
7165 MDefinition
* arrayObject
= f
.createDefaultInitializedArrayObject(
7166 lineOrBytecode
, typeIndex
, numElements
);
7171 f
.iter().setResult(arrayObject
);
7175 static bool EmitArrayNewFixed(FunctionCompiler
& f
) {
7176 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
7178 uint32_t typeIndex
, numElements
;
7181 if (!f
.iter().readArrayNewFixed(&typeIndex
, &numElements
, &values
)) {
7184 MOZ_ASSERT(values
.length() == numElements
);
7186 if (f
.inDeadCode()) {
7190 MDefinition
* numElementsDef
= f
.constantI32(int32_t(numElements
));
7191 if (!numElementsDef
) {
7195 // Create the array object, default-initialized.
7196 MDefinition
* arrayObject
= f
.createDefaultInitializedArrayObject(
7197 lineOrBytecode
, typeIndex
, numElementsDef
);
7202 // Make `base` point at the first byte of the (OOL) data area.
7203 MDefinition
* base
= f
.getWasmArrayObjectData(arrayObject
);
7208 // Write each element in turn.
7209 const ArrayType
& arrayType
= (*f
.moduleEnv().types
)[typeIndex
].arrayType();
7210 FieldType elemFieldType
= arrayType
.elementType_
;
7211 uint32_t elemSize
= elemFieldType
.size();
7213 // How do we know that the offset expression `i * elemSize` below remains
7214 // within 2^31 (signed-i32) range? In the worst case we will have 16-byte
7215 // values, and there can be at most MaxFunctionBytes expressions, if it were
7216 // theoretically possible to generate one expression per instruction byte.
7217 // Hence the max offset we can be expected to generate is
7218 // `16 * MaxFunctionBytes`.
7219 static_assert(16 /* sizeof v128 */ * MaxFunctionBytes
<=
7220 MaxArrayPayloadBytes
);
7221 MOZ_RELEASE_ASSERT(numElements
<= MaxFunctionBytes
);
7223 for (uint32_t i
= 0; i
< numElements
; i
++) {
7224 if (!f
.mirGen().ensureBallast()) {
7227 // `i * elemSize` is made safe by the assertions above.
7228 if (!f
.writeGcValueAtBasePlusOffset(
7229 lineOrBytecode
, elemFieldType
, arrayObject
,
7230 AliasSet::WasmArrayDataArea
, values
[numElements
- 1 - i
], base
,
7231 i
* elemSize
, false, WasmPreBarrierKind::None
)) {
7236 f
.iter().setResult(arrayObject
);
7240 static bool EmitArrayNewData(FunctionCompiler
& f
) {
7241 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
7243 uint32_t typeIndex
, segIndex
;
7244 MDefinition
* segByteOffset
;
7245 MDefinition
* numElements
;
7246 if (!f
.iter().readArrayNewData(&typeIndex
, &segIndex
, &segByteOffset
,
7251 if (f
.inDeadCode()) {
7255 // Get the type definition data for the array as a whole.
7256 MDefinition
* typeDefData
= f
.loadTypeDefInstanceData(typeIndex
);
7261 // Other values we need to pass to the instance call:
7262 MDefinition
* segIndexM
= f
.constantI32(int32_t(segIndex
));
7268 // arrayObject = Instance::arrayNewData(segByteOffset:u32, numElements:u32,
7269 // typeDefData:word, segIndex:u32)
7270 // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated by
7271 // this call will trap.
7272 MDefinition
* arrayObject
;
7273 if (!f
.emitInstanceCall4(lineOrBytecode
, SASigArrayNewData
, segByteOffset
,
7274 numElements
, typeDefData
, segIndexM
, &arrayObject
)) {
7278 f
.iter().setResult(arrayObject
);
7282 static bool EmitArrayNewElem(FunctionCompiler
& f
) {
7283 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
7285 uint32_t typeIndex
, segIndex
;
7286 MDefinition
* segElemIndex
;
7287 MDefinition
* numElements
;
7288 if (!f
.iter().readArrayNewElem(&typeIndex
, &segIndex
, &segElemIndex
,
7293 if (f
.inDeadCode()) {
7297 // Get the type definition for the array as a whole.
7298 // Get the type definition data for the array as a whole.
7299 MDefinition
* typeDefData
= f
.loadTypeDefInstanceData(typeIndex
);
7304 // Other values we need to pass to the instance call:
7305 MDefinition
* segIndexM
= f
.constantI32(int32_t(segIndex
));
7311 // arrayObject = Instance::arrayNewElem(segElemIndex:u32, numElements:u32,
7312 // typeDefData:word, segIndex:u32)
7313 // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated by
7314 // this call will trap.
7315 MDefinition
* arrayObject
;
7316 if (!f
.emitInstanceCall4(lineOrBytecode
, SASigArrayNewElem
, segElemIndex
,
7317 numElements
, typeDefData
, segIndexM
, &arrayObject
)) {
7321 f
.iter().setResult(arrayObject
);
7325 static bool EmitArrayInitData(FunctionCompiler
& f
) {
7326 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
7328 uint32_t typeIndex
, segIndex
;
7330 MDefinition
* arrayIndex
;
7331 MDefinition
* segOffset
;
7332 MDefinition
* length
;
7333 if (!f
.iter().readArrayInitData(&typeIndex
, &segIndex
, &array
, &arrayIndex
,
7334 &segOffset
, &length
)) {
7338 if (f
.inDeadCode()) {
7342 // Get the type definition data for the array as a whole.
7343 MDefinition
* typeDefData
= f
.loadTypeDefInstanceData(typeIndex
);
7348 // Other values we need to pass to the instance call:
7349 MDefinition
* segIndexM
= f
.constantI32(int32_t(segIndex
));
7355 // Instance::arrayInitData(array:word, index:u32, segByteOffset:u32,
7356 // numElements:u32, typeDefData:word, segIndex:u32) If the requested size
7357 // exceeds MaxArrayPayloadBytes, the MIR generated by this call will trap.
7358 return f
.emitInstanceCall6(lineOrBytecode
, SASigArrayInitData
, array
,
7359 arrayIndex
, segOffset
, length
, typeDefData
,
7363 static bool EmitArrayInitElem(FunctionCompiler
& f
) {
7364 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
7366 uint32_t typeIndex
, segIndex
;
7368 MDefinition
* arrayIndex
;
7369 MDefinition
* segOffset
;
7370 MDefinition
* length
;
7371 if (!f
.iter().readArrayInitElem(&typeIndex
, &segIndex
, &array
, &arrayIndex
,
7372 &segOffset
, &length
)) {
7376 if (f
.inDeadCode()) {
7380 // Get the type definition data for the array as a whole.
7381 MDefinition
* typeDefData
= f
.loadTypeDefInstanceData(typeIndex
);
7386 // Other values we need to pass to the instance call:
7387 MDefinition
* segIndexM
= f
.constantI32(int32_t(segIndex
));
7393 // Instance::arrayInitElem(array:word, index:u32, segByteOffset:u32,
7394 // numElements:u32, typeDefData:word, segIndex:u32) If the requested size
7395 // exceeds MaxArrayPayloadBytes, the MIR generated by this call will trap.
7396 return f
.emitInstanceCall6(lineOrBytecode
, SASigArrayInitElem
, array
,
7397 arrayIndex
, segOffset
, length
, typeDefData
,
7401 static bool EmitArraySet(FunctionCompiler
& f
) {
7402 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
7407 MDefinition
* arrayObject
;
7408 if (!f
.iter().readArraySet(&typeIndex
, &value
, &index
, &arrayObject
)) {
7412 if (f
.inDeadCode()) {
7416 // Check for null is done at setupForArrayAccess.
7418 // Create the object null check and the array bounds check and get the OOL
7420 MDefinition
* base
= f
.setupForArrayAccess(arrayObject
, index
);
7425 // And do the store.
7426 const ArrayType
& arrayType
= (*f
.moduleEnv().types
)[typeIndex
].arrayType();
7427 FieldType elemFieldType
= arrayType
.elementType_
;
7428 uint32_t elemSize
= elemFieldType
.size();
7429 MOZ_ASSERT(elemSize
>= 1 && elemSize
<= 16);
7431 return f
.writeGcValueAtBasePlusScaledIndex(
7432 lineOrBytecode
, elemFieldType
, arrayObject
, AliasSet::WasmArrayDataArea
,
7433 value
, base
, elemSize
, index
, WasmPreBarrierKind::Normal
);
7436 static bool EmitArrayGet(FunctionCompiler
& f
, FieldWideningOp wideningOp
) {
7439 MDefinition
* arrayObject
;
7440 if (!f
.iter().readArrayGet(&typeIndex
, wideningOp
, &index
, &arrayObject
)) {
7444 if (f
.inDeadCode()) {
7448 // Check for null is done at setupForArrayAccess.
7450 // Create the object null check and the array bounds check and get the OOL
7452 MDefinition
* base
= f
.setupForArrayAccess(arrayObject
, index
);
7458 const ArrayType
& arrayType
= (*f
.moduleEnv().types
)[typeIndex
].arrayType();
7459 FieldType elemFieldType
= arrayType
.elementType_
;
7460 uint32_t elemSize
= elemFieldType
.size();
7461 MOZ_ASSERT(elemSize
>= 1 && elemSize
<= 16);
7463 MDefinition
* load
= f
.readGcValueAtBasePlusScaledIndex(
7464 elemFieldType
, wideningOp
, arrayObject
, AliasSet::WasmArrayDataArea
, base
,
7470 f
.iter().setResult(load
);
7474 static bool EmitArrayLen(FunctionCompiler
& f
) {
7475 MDefinition
* arrayObject
;
7476 if (!f
.iter().readArrayLen(&arrayObject
)) {
7480 if (f
.inDeadCode()) {
7484 // Check for null is done at getWasmArrayObjectNumElements.
7486 // Get the size value for the array
7487 MDefinition
* numElements
= f
.getWasmArrayObjectNumElements(arrayObject
);
7492 f
.iter().setResult(numElements
);
7496 static bool EmitArrayCopy(FunctionCompiler
& f
) {
7497 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
7500 bool elemsAreRefTyped
;
7501 MDefinition
* dstArrayObject
;
7502 MDefinition
* dstArrayIndex
;
7503 MDefinition
* srcArrayObject
;
7504 MDefinition
* srcArrayIndex
;
7505 MDefinition
* numElements
;
7506 if (!f
.iter().readArrayCopy(&elemSize
, &elemsAreRefTyped
, &dstArrayObject
,
7507 &dstArrayIndex
, &srcArrayObject
, &srcArrayIndex
,
7512 if (f
.inDeadCode()) {
7516 MOZ_ASSERT_IF(elemsAreRefTyped
,
7517 size_t(elemSize
) == MIRTypeToSize(TargetWordMIRType()));
7518 MOZ_ASSERT_IF(!elemsAreRefTyped
, elemSize
== 1 || elemSize
== 2 ||
7519 elemSize
== 4 || elemSize
== 8 ||
7522 // A negative element size is used to inform Instance::arrayCopy that the
7523 // values are reftyped. This avoids having to pass it an extra boolean
7525 MDefinition
* elemSizeDef
=
7526 f
.constantI32(elemsAreRefTyped
? -elemSize
: elemSize
);
7532 // Instance::arrayCopy(dstArrayObject:word, dstArrayIndex:u32,
7533 // srcArrayObject:word, srcArrayIndex:u32,
7535 // (elemsAreRefTyped ? -elemSize : elemSize):u32))
7536 return f
.emitInstanceCall6(lineOrBytecode
, SASigArrayCopy
, dstArrayObject
,
7537 dstArrayIndex
, srcArrayObject
, srcArrayIndex
,
7538 numElements
, elemSizeDef
);
7541 static bool EmitArrayFill(FunctionCompiler
& f
) {
7542 uint32_t lineOrBytecode
= f
.readCallSiteLineOrBytecode();
7548 MDefinition
* numElements
;
7549 if (!f
.iter().readArrayFill(&typeIndex
, &array
, &index
, &val
, &numElements
)) {
7553 if (f
.inDeadCode()) {
7557 return f
.createArrayFill(lineOrBytecode
, typeIndex
, array
, index
, val
,
7561 static bool EmitRefI31(FunctionCompiler
& f
) {
7563 if (!f
.iter().readConversion(
7564 ValType::I32
, ValType(RefType::i31().asNonNullable()), &input
)) {
7568 if (f
.inDeadCode()) {
7572 MDefinition
* output
= f
.refI31(input
);
7576 f
.iter().setResult(output
);
7580 static bool EmitI31Get(FunctionCompiler
& f
, FieldWideningOp wideningOp
) {
7581 MOZ_ASSERT(wideningOp
!= FieldWideningOp::None
);
7584 if (!f
.iter().readConversion(ValType(RefType::i31()), ValType::I32
, &input
)) {
7588 if (f
.inDeadCode()) {
7592 if (!f
.refAsNonNull(input
)) {
7595 MDefinition
* output
= f
.i31Get(input
, wideningOp
);
7599 f
.iter().setResult(output
);
7603 static bool EmitRefTest(FunctionCompiler
& f
, bool nullable
) {
7607 if (!f
.iter().readRefTest(nullable
, &sourceType
, &destType
, &ref
)) {
7611 if (f
.inDeadCode()) {
7615 MDefinition
* success
= f
.refTest(ref
, sourceType
, destType
);
7620 f
.iter().setResult(success
);
7624 static bool EmitRefCast(FunctionCompiler
& f
, bool nullable
) {
7628 if (!f
.iter().readRefCast(nullable
, &sourceType
, &destType
, &ref
)) {
7632 if (f
.inDeadCode()) {
7636 if (!f
.refCast(ref
, sourceType
, destType
)) {
7640 f
.iter().setResult(ref
);
7644 static bool EmitBrOnCast(FunctionCompiler
& f
, bool onSuccess
) {
7645 uint32_t labelRelativeDepth
;
7648 ResultType labelType
;
7650 if (!f
.iter().readBrOnCast(onSuccess
, &labelRelativeDepth
, &sourceType
,
7651 &destType
, &labelType
, &values
)) {
7655 return f
.brOnCastCommon(onSuccess
, labelRelativeDepth
, sourceType
, destType
,
7659 static bool EmitAnyConvertExtern(FunctionCompiler
& f
) {
7660 // any.convert_extern is a no-op because anyref and extern share the same
7663 if (!f
.iter().readRefConversion(RefType::extern_(), RefType::any(), &ref
)) {
7667 f
.iter().setResult(ref
);
7671 static bool EmitExternConvertAny(FunctionCompiler
& f
) {
7672 // extern.convert_any is a no-op because anyref and extern share the same
7675 if (!f
.iter().readRefConversion(RefType::any(), RefType::extern_(), &ref
)) {
7679 f
.iter().setResult(ref
);
7683 #endif // ENABLE_WASM_GC
7685 static bool EmitCallBuiltinModuleFunc(FunctionCompiler
& f
) {
7686 // It's almost possible to use FunctionCompiler::emitInstanceCallN here.
7687 // Unfortunately not currently possible though, since ::emitInstanceCallN
7688 // expects an array of arguments along with a size, and that's not what is
7689 // available here. It would be possible if we were prepared to copy
7690 // `builtinModuleFunc->params` into a fixed-sized (16 element?) array, add
7691 // `memoryBase`, and make the call.
7692 const BuiltinModuleFunc
* builtinModuleFunc
;
7695 if (!f
.iter().readCallBuiltinModuleFunc(&builtinModuleFunc
, ¶ms
)) {
7699 uint32_t bytecodeOffset
= f
.readBytecodeOffset();
7700 const SymbolicAddressSignature
& callee
= builtinModuleFunc
->signature
;
7702 CallCompileState args
;
7703 if (!f
.passInstance(callee
.argTypes
[0], &args
)) {
7707 if (!f
.passArgs(params
, builtinModuleFunc
->params
, &args
)) {
7711 if (builtinModuleFunc
->usesMemory
) {
7712 MDefinition
* memoryBase
= f
.memoryBase(0);
7713 if (!f
.passArg(memoryBase
, MIRType::Pointer
, &args
)) {
7718 if (!f
.finishCall(&args
)) {
7722 bool hasResult
= builtinModuleFunc
->result
.isSome();
7723 MDefinition
* result
= nullptr;
7724 MDefinition
** resultOutParam
= hasResult
? &result
: nullptr;
7725 if (!f
.builtinInstanceMethodCall(callee
, bytecodeOffset
, args
,
7731 f
.iter().setResult(result
);
7736 static bool EmitBodyExprs(FunctionCompiler
& f
) {
7737 if (!f
.iter().startFunction(f
.funcIndex(), f
.locals())) {
7742 if (!(c)) return false; \
7746 if (!f
.mirGen().ensureBallast()) {
7751 if (!f
.iter().readOp(&op
)) {
7756 case uint16_t(Op::End
):
7760 if (f
.iter().controlStackEmpty()) {
7766 case uint16_t(Op::Unreachable
):
7767 CHECK(EmitUnreachable(f
));
7768 case uint16_t(Op::Nop
):
7769 CHECK(f
.iter().readNop());
7770 case uint16_t(Op::Block
):
7771 CHECK(EmitBlock(f
));
7772 case uint16_t(Op::Loop
):
7774 case uint16_t(Op::If
):
7776 case uint16_t(Op::Else
):
7778 case uint16_t(Op::Try
):
7779 if (!f
.moduleEnv().exceptionsEnabled()) {
7780 return f
.iter().unrecognizedOpcode(&op
);
7783 case uint16_t(Op::Catch
):
7784 if (!f
.moduleEnv().exceptionsEnabled()) {
7785 return f
.iter().unrecognizedOpcode(&op
);
7787 CHECK(EmitCatch(f
));
7788 case uint16_t(Op::CatchAll
):
7789 if (!f
.moduleEnv().exceptionsEnabled()) {
7790 return f
.iter().unrecognizedOpcode(&op
);
7792 CHECK(EmitCatchAll(f
));
7793 case uint16_t(Op::Delegate
):
7794 if (!f
.moduleEnv().exceptionsEnabled()) {
7795 return f
.iter().unrecognizedOpcode(&op
);
7797 if (!EmitDelegate(f
)) {
7801 case uint16_t(Op::Throw
):
7802 if (!f
.moduleEnv().exceptionsEnabled()) {
7803 return f
.iter().unrecognizedOpcode(&op
);
7805 CHECK(EmitThrow(f
));
7806 case uint16_t(Op::Rethrow
):
7807 if (!f
.moduleEnv().exceptionsEnabled()) {
7808 return f
.iter().unrecognizedOpcode(&op
);
7810 CHECK(EmitRethrow(f
));
7811 case uint16_t(Op::Br
):
7813 case uint16_t(Op::BrIf
):
7815 case uint16_t(Op::BrTable
):
7816 CHECK(EmitBrTable(f
));
7817 case uint16_t(Op::Return
):
7818 CHECK(EmitReturn(f
));
7821 case uint16_t(Op::Call
):
7822 CHECK(EmitCall(f
, /* asmJSFuncDef = */ false));
7823 case uint16_t(Op::CallIndirect
):
7824 CHECK(EmitCallIndirect(f
, /* oldStyle = */ false));
7826 // Parametric operators
7827 case uint16_t(Op::Drop
):
7828 CHECK(f
.iter().readDrop());
7829 case uint16_t(Op::SelectNumeric
):
7830 CHECK(EmitSelect(f
, /*typed*/ false));
7831 case uint16_t(Op::SelectTyped
):
7832 CHECK(EmitSelect(f
, /*typed*/ true));
7834 // Locals and globals
7835 case uint16_t(Op::LocalGet
):
7836 CHECK(EmitGetLocal(f
));
7837 case uint16_t(Op::LocalSet
):
7838 CHECK(EmitSetLocal(f
));
7839 case uint16_t(Op::LocalTee
):
7840 CHECK(EmitTeeLocal(f
));
7841 case uint16_t(Op::GlobalGet
):
7842 CHECK(EmitGetGlobal(f
));
7843 case uint16_t(Op::GlobalSet
):
7844 CHECK(EmitSetGlobal(f
));
7845 case uint16_t(Op::TableGet
):
7846 CHECK(EmitTableGet(f
));
7847 case uint16_t(Op::TableSet
):
7848 CHECK(EmitTableSet(f
));
7850 // Memory-related operators
7851 case uint16_t(Op::I32Load
):
7852 CHECK(EmitLoad(f
, ValType::I32
, Scalar::Int32
));
7853 case uint16_t(Op::I64Load
):
7854 CHECK(EmitLoad(f
, ValType::I64
, Scalar::Int64
));
7855 case uint16_t(Op::F32Load
):
7856 CHECK(EmitLoad(f
, ValType::F32
, Scalar::Float32
));
7857 case uint16_t(Op::F64Load
):
7858 CHECK(EmitLoad(f
, ValType::F64
, Scalar::Float64
));
7859 case uint16_t(Op::I32Load8S
):
7860 CHECK(EmitLoad(f
, ValType::I32
, Scalar::Int8
));
7861 case uint16_t(Op::I32Load8U
):
7862 CHECK(EmitLoad(f
, ValType::I32
, Scalar::Uint8
));
7863 case uint16_t(Op::I32Load16S
):
7864 CHECK(EmitLoad(f
, ValType::I32
, Scalar::Int16
));
7865 case uint16_t(Op::I32Load16U
):
7866 CHECK(EmitLoad(f
, ValType::I32
, Scalar::Uint16
));
7867 case uint16_t(Op::I64Load8S
):
7868 CHECK(EmitLoad(f
, ValType::I64
, Scalar::Int8
));
7869 case uint16_t(Op::I64Load8U
):
7870 CHECK(EmitLoad(f
, ValType::I64
, Scalar::Uint8
));
7871 case uint16_t(Op::I64Load16S
):
7872 CHECK(EmitLoad(f
, ValType::I64
, Scalar::Int16
));
7873 case uint16_t(Op::I64Load16U
):
7874 CHECK(EmitLoad(f
, ValType::I64
, Scalar::Uint16
));
7875 case uint16_t(Op::I64Load32S
):
7876 CHECK(EmitLoad(f
, ValType::I64
, Scalar::Int32
));
7877 case uint16_t(Op::I64Load32U
):
7878 CHECK(EmitLoad(f
, ValType::I64
, Scalar::Uint32
));
7879 case uint16_t(Op::I32Store
):
7880 CHECK(EmitStore(f
, ValType::I32
, Scalar::Int32
));
7881 case uint16_t(Op::I64Store
):
7882 CHECK(EmitStore(f
, ValType::I64
, Scalar::Int64
));
7883 case uint16_t(Op::F32Store
):
7884 CHECK(EmitStore(f
, ValType::F32
, Scalar::Float32
));
7885 case uint16_t(Op::F64Store
):
7886 CHECK(EmitStore(f
, ValType::F64
, Scalar::Float64
));
7887 case uint16_t(Op::I32Store8
):
7888 CHECK(EmitStore(f
, ValType::I32
, Scalar::Int8
));
7889 case uint16_t(Op::I32Store16
):
7890 CHECK(EmitStore(f
, ValType::I32
, Scalar::Int16
));
7891 case uint16_t(Op::I64Store8
):
7892 CHECK(EmitStore(f
, ValType::I64
, Scalar::Int8
));
7893 case uint16_t(Op::I64Store16
):
7894 CHECK(EmitStore(f
, ValType::I64
, Scalar::Int16
));
7895 case uint16_t(Op::I64Store32
):
7896 CHECK(EmitStore(f
, ValType::I64
, Scalar::Int32
));
7897 case uint16_t(Op::MemorySize
):
7898 CHECK(EmitMemorySize(f
));
7899 case uint16_t(Op::MemoryGrow
):
7900 CHECK(EmitMemoryGrow(f
));
7903 case uint16_t(Op::I32Const
):
7904 CHECK(EmitI32Const(f
));
7905 case uint16_t(Op::I64Const
):
7906 CHECK(EmitI64Const(f
));
7907 case uint16_t(Op::F32Const
):
7908 CHECK(EmitF32Const(f
));
7909 case uint16_t(Op::F64Const
):
7910 CHECK(EmitF64Const(f
));
7912 // Comparison operators
7913 case uint16_t(Op::I32Eqz
):
7914 CHECK(EmitConversion
<MNot
>(f
, ValType::I32
, ValType::I32
));
7915 case uint16_t(Op::I32Eq
):
7917 EmitComparison(f
, ValType::I32
, JSOp::Eq
, MCompare::Compare_Int32
));
7918 case uint16_t(Op::I32Ne
):
7920 EmitComparison(f
, ValType::I32
, JSOp::Ne
, MCompare::Compare_Int32
));
7921 case uint16_t(Op::I32LtS
):
7923 EmitComparison(f
, ValType::I32
, JSOp::Lt
, MCompare::Compare_Int32
));
7924 case uint16_t(Op::I32LtU
):
7925 CHECK(EmitComparison(f
, ValType::I32
, JSOp::Lt
,
7926 MCompare::Compare_UInt32
));
7927 case uint16_t(Op::I32GtS
):
7929 EmitComparison(f
, ValType::I32
, JSOp::Gt
, MCompare::Compare_Int32
));
7930 case uint16_t(Op::I32GtU
):
7931 CHECK(EmitComparison(f
, ValType::I32
, JSOp::Gt
,
7932 MCompare::Compare_UInt32
));
7933 case uint16_t(Op::I32LeS
):
7935 EmitComparison(f
, ValType::I32
, JSOp::Le
, MCompare::Compare_Int32
));
7936 case uint16_t(Op::I32LeU
):
7937 CHECK(EmitComparison(f
, ValType::I32
, JSOp::Le
,
7938 MCompare::Compare_UInt32
));
7939 case uint16_t(Op::I32GeS
):
7941 EmitComparison(f
, ValType::I32
, JSOp::Ge
, MCompare::Compare_Int32
));
7942 case uint16_t(Op::I32GeU
):
7943 CHECK(EmitComparison(f
, ValType::I32
, JSOp::Ge
,
7944 MCompare::Compare_UInt32
));
7945 case uint16_t(Op::I64Eqz
):
7946 CHECK(EmitConversion
<MNot
>(f
, ValType::I64
, ValType::I32
));
7947 case uint16_t(Op::I64Eq
):
7949 EmitComparison(f
, ValType::I64
, JSOp::Eq
, MCompare::Compare_Int64
));
7950 case uint16_t(Op::I64Ne
):
7952 EmitComparison(f
, ValType::I64
, JSOp::Ne
, MCompare::Compare_Int64
));
7953 case uint16_t(Op::I64LtS
):
7955 EmitComparison(f
, ValType::I64
, JSOp::Lt
, MCompare::Compare_Int64
));
7956 case uint16_t(Op::I64LtU
):
7957 CHECK(EmitComparison(f
, ValType::I64
, JSOp::Lt
,
7958 MCompare::Compare_UInt64
));
7959 case uint16_t(Op::I64GtS
):
7961 EmitComparison(f
, ValType::I64
, JSOp::Gt
, MCompare::Compare_Int64
));
7962 case uint16_t(Op::I64GtU
):
7963 CHECK(EmitComparison(f
, ValType::I64
, JSOp::Gt
,
7964 MCompare::Compare_UInt64
));
7965 case uint16_t(Op::I64LeS
):
7967 EmitComparison(f
, ValType::I64
, JSOp::Le
, MCompare::Compare_Int64
));
7968 case uint16_t(Op::I64LeU
):
7969 CHECK(EmitComparison(f
, ValType::I64
, JSOp::Le
,
7970 MCompare::Compare_UInt64
));
7971 case uint16_t(Op::I64GeS
):
7973 EmitComparison(f
, ValType::I64
, JSOp::Ge
, MCompare::Compare_Int64
));
7974 case uint16_t(Op::I64GeU
):
7975 CHECK(EmitComparison(f
, ValType::I64
, JSOp::Ge
,
7976 MCompare::Compare_UInt64
));
7977 case uint16_t(Op::F32Eq
):
7978 CHECK(EmitComparison(f
, ValType::F32
, JSOp::Eq
,
7979 MCompare::Compare_Float32
));
7980 case uint16_t(Op::F32Ne
):
7981 CHECK(EmitComparison(f
, ValType::F32
, JSOp::Ne
,
7982 MCompare::Compare_Float32
));
7983 case uint16_t(Op::F32Lt
):
7984 CHECK(EmitComparison(f
, ValType::F32
, JSOp::Lt
,
7985 MCompare::Compare_Float32
));
7986 case uint16_t(Op::F32Gt
):
7987 CHECK(EmitComparison(f
, ValType::F32
, JSOp::Gt
,
7988 MCompare::Compare_Float32
));
7989 case uint16_t(Op::F32Le
):
7990 CHECK(EmitComparison(f
, ValType::F32
, JSOp::Le
,
7991 MCompare::Compare_Float32
));
7992 case uint16_t(Op::F32Ge
):
7993 CHECK(EmitComparison(f
, ValType::F32
, JSOp::Ge
,
7994 MCompare::Compare_Float32
));
7995 case uint16_t(Op::F64Eq
):
7996 CHECK(EmitComparison(f
, ValType::F64
, JSOp::Eq
,
7997 MCompare::Compare_Double
));
7998 case uint16_t(Op::F64Ne
):
7999 CHECK(EmitComparison(f
, ValType::F64
, JSOp::Ne
,
8000 MCompare::Compare_Double
));
8001 case uint16_t(Op::F64Lt
):
8002 CHECK(EmitComparison(f
, ValType::F64
, JSOp::Lt
,
8003 MCompare::Compare_Double
));
8004 case uint16_t(Op::F64Gt
):
8005 CHECK(EmitComparison(f
, ValType::F64
, JSOp::Gt
,
8006 MCompare::Compare_Double
));
8007 case uint16_t(Op::F64Le
):
8008 CHECK(EmitComparison(f
, ValType::F64
, JSOp::Le
,
8009 MCompare::Compare_Double
));
8010 case uint16_t(Op::F64Ge
):
8011 CHECK(EmitComparison(f
, ValType::F64
, JSOp::Ge
,
8012 MCompare::Compare_Double
));
8014 // Numeric operators
8015 case uint16_t(Op::I32Clz
):
8016 CHECK(EmitUnaryWithType
<MClz
>(f
, ValType::I32
, MIRType::Int32
));
8017 case uint16_t(Op::I32Ctz
):
8018 CHECK(EmitUnaryWithType
<MCtz
>(f
, ValType::I32
, MIRType::Int32
));
8019 case uint16_t(Op::I32Popcnt
):
8020 CHECK(EmitUnaryWithType
<MPopcnt
>(f
, ValType::I32
, MIRType::Int32
));
8021 case uint16_t(Op::I32Add
):
8022 CHECK(EmitAdd(f
, ValType::I32
, MIRType::Int32
));
8023 case uint16_t(Op::I32Sub
):
8024 CHECK(EmitSub(f
, ValType::I32
, MIRType::Int32
));
8025 case uint16_t(Op::I32Mul
):
8026 CHECK(EmitMul(f
, ValType::I32
, MIRType::Int32
));
8027 case uint16_t(Op::I32DivS
):
8028 case uint16_t(Op::I32DivU
):
8030 EmitDiv(f
, ValType::I32
, MIRType::Int32
, Op(op
.b0
) == Op::I32DivU
));
8031 case uint16_t(Op::I32RemS
):
8032 case uint16_t(Op::I32RemU
):
8034 EmitRem(f
, ValType::I32
, MIRType::Int32
, Op(op
.b0
) == Op::I32RemU
));
8035 case uint16_t(Op::I32And
):
8036 CHECK(EmitBitwiseAndOrXor(f
, ValType::I32
, MIRType::Int32
,
8037 MWasmBinaryBitwise::SubOpcode::And
));
8038 case uint16_t(Op::I32Or
):
8039 CHECK(EmitBitwiseAndOrXor(f
, ValType::I32
, MIRType::Int32
,
8040 MWasmBinaryBitwise::SubOpcode::Or
));
8041 case uint16_t(Op::I32Xor
):
8042 CHECK(EmitBitwiseAndOrXor(f
, ValType::I32
, MIRType::Int32
,
8043 MWasmBinaryBitwise::SubOpcode::Xor
));
8044 case uint16_t(Op::I32Shl
):
8045 CHECK(EmitShift
<MLsh
>(f
, ValType::I32
, MIRType::Int32
));
8046 case uint16_t(Op::I32ShrS
):
8047 CHECK(EmitShift
<MRsh
>(f
, ValType::I32
, MIRType::Int32
));
8048 case uint16_t(Op::I32ShrU
):
8049 CHECK(EmitUrsh(f
, ValType::I32
, MIRType::Int32
));
8050 case uint16_t(Op::I32Rotl
):
8051 case uint16_t(Op::I32Rotr
):
8052 CHECK(EmitRotate(f
, ValType::I32
, Op(op
.b0
) == Op::I32Rotl
));
8053 case uint16_t(Op::I64Clz
):
8054 CHECK(EmitUnaryWithType
<MClz
>(f
, ValType::I64
, MIRType::Int64
));
8055 case uint16_t(Op::I64Ctz
):
8056 CHECK(EmitUnaryWithType
<MCtz
>(f
, ValType::I64
, MIRType::Int64
));
8057 case uint16_t(Op::I64Popcnt
):
8058 CHECK(EmitUnaryWithType
<MPopcnt
>(f
, ValType::I64
, MIRType::Int64
));
8059 case uint16_t(Op::I64Add
):
8060 CHECK(EmitAdd(f
, ValType::I64
, MIRType::Int64
));
8061 case uint16_t(Op::I64Sub
):
8062 CHECK(EmitSub(f
, ValType::I64
, MIRType::Int64
));
8063 case uint16_t(Op::I64Mul
):
8064 CHECK(EmitMul(f
, ValType::I64
, MIRType::Int64
));
8065 case uint16_t(Op::I64DivS
):
8066 case uint16_t(Op::I64DivU
):
8068 EmitDiv(f
, ValType::I64
, MIRType::Int64
, Op(op
.b0
) == Op::I64DivU
));
8069 case uint16_t(Op::I64RemS
):
8070 case uint16_t(Op::I64RemU
):
8072 EmitRem(f
, ValType::I64
, MIRType::Int64
, Op(op
.b0
) == Op::I64RemU
));
8073 case uint16_t(Op::I64And
):
8074 CHECK(EmitBitwiseAndOrXor(f
, ValType::I64
, MIRType::Int64
,
8075 MWasmBinaryBitwise::SubOpcode::And
));
8076 case uint16_t(Op::I64Or
):
8077 CHECK(EmitBitwiseAndOrXor(f
, ValType::I64
, MIRType::Int64
,
8078 MWasmBinaryBitwise::SubOpcode::Or
));
8079 case uint16_t(Op::I64Xor
):
8080 CHECK(EmitBitwiseAndOrXor(f
, ValType::I64
, MIRType::Int64
,
8081 MWasmBinaryBitwise::SubOpcode::Xor
));
8082 case uint16_t(Op::I64Shl
):
8083 CHECK(EmitShift
<MLsh
>(f
, ValType::I64
, MIRType::Int64
));
8084 case uint16_t(Op::I64ShrS
):
8085 CHECK(EmitShift
<MRsh
>(f
, ValType::I64
, MIRType::Int64
));
8086 case uint16_t(Op::I64ShrU
):
8087 CHECK(EmitUrsh(f
, ValType::I64
, MIRType::Int64
));
8088 case uint16_t(Op::I64Rotl
):
8089 case uint16_t(Op::I64Rotr
):
8090 CHECK(EmitRotate(f
, ValType::I64
, Op(op
.b0
) == Op::I64Rotl
));
8091 case uint16_t(Op::F32Abs
):
8092 CHECK(EmitUnaryWithType
<MAbs
>(f
, ValType::F32
, MIRType::Float32
));
8093 case uint16_t(Op::F32Neg
):
8094 CHECK(EmitUnaryWithType
<MWasmNeg
>(f
, ValType::F32
, MIRType::Float32
));
8095 case uint16_t(Op::F32Ceil
):
8096 CHECK(EmitUnaryMathBuiltinCall(f
, SASigCeilF
));
8097 case uint16_t(Op::F32Floor
):
8098 CHECK(EmitUnaryMathBuiltinCall(f
, SASigFloorF
));
8099 case uint16_t(Op::F32Trunc
):
8100 CHECK(EmitUnaryMathBuiltinCall(f
, SASigTruncF
));
8101 case uint16_t(Op::F32Nearest
):
8102 CHECK(EmitUnaryMathBuiltinCall(f
, SASigNearbyIntF
));
8103 case uint16_t(Op::F32Sqrt
):
8104 CHECK(EmitUnaryWithType
<MSqrt
>(f
, ValType::F32
, MIRType::Float32
));
8105 case uint16_t(Op::F32Add
):
8106 CHECK(EmitAdd(f
, ValType::F32
, MIRType::Float32
));
8107 case uint16_t(Op::F32Sub
):
8108 CHECK(EmitSub(f
, ValType::F32
, MIRType::Float32
));
8109 case uint16_t(Op::F32Mul
):
8110 CHECK(EmitMul(f
, ValType::F32
, MIRType::Float32
));
8111 case uint16_t(Op::F32Div
):
8112 CHECK(EmitDiv(f
, ValType::F32
, MIRType::Float32
,
8113 /* isUnsigned = */ false));
8114 case uint16_t(Op::F32Min
):
8115 case uint16_t(Op::F32Max
):
8116 CHECK(EmitMinMax(f
, ValType::F32
, MIRType::Float32
,
8117 Op(op
.b0
) == Op::F32Max
));
8118 case uint16_t(Op::F32CopySign
):
8119 CHECK(EmitCopySign(f
, ValType::F32
));
8120 case uint16_t(Op::F64Abs
):
8121 CHECK(EmitUnaryWithType
<MAbs
>(f
, ValType::F64
, MIRType::Double
));
8122 case uint16_t(Op::F64Neg
):
8123 CHECK(EmitUnaryWithType
<MWasmNeg
>(f
, ValType::F64
, MIRType::Double
));
8124 case uint16_t(Op::F64Ceil
):
8125 CHECK(EmitUnaryMathBuiltinCall(f
, SASigCeilD
));
8126 case uint16_t(Op::F64Floor
):
8127 CHECK(EmitUnaryMathBuiltinCall(f
, SASigFloorD
));
8128 case uint16_t(Op::F64Trunc
):
8129 CHECK(EmitUnaryMathBuiltinCall(f
, SASigTruncD
));
8130 case uint16_t(Op::F64Nearest
):
8131 CHECK(EmitUnaryMathBuiltinCall(f
, SASigNearbyIntD
));
8132 case uint16_t(Op::F64Sqrt
):
8133 CHECK(EmitUnaryWithType
<MSqrt
>(f
, ValType::F64
, MIRType::Double
));
8134 case uint16_t(Op::F64Add
):
8135 CHECK(EmitAdd(f
, ValType::F64
, MIRType::Double
));
8136 case uint16_t(Op::F64Sub
):
8137 CHECK(EmitSub(f
, ValType::F64
, MIRType::Double
));
8138 case uint16_t(Op::F64Mul
):
8139 CHECK(EmitMul(f
, ValType::F64
, MIRType::Double
));
8140 case uint16_t(Op::F64Div
):
8141 CHECK(EmitDiv(f
, ValType::F64
, MIRType::Double
,
8142 /* isUnsigned = */ false));
8143 case uint16_t(Op::F64Min
):
8144 case uint16_t(Op::F64Max
):
8145 CHECK(EmitMinMax(f
, ValType::F64
, MIRType::Double
,
8146 Op(op
.b0
) == Op::F64Max
));
8147 case uint16_t(Op::F64CopySign
):
8148 CHECK(EmitCopySign(f
, ValType::F64
));
8151 case uint16_t(Op::I32WrapI64
):
8152 CHECK(EmitConversion
<MWrapInt64ToInt32
>(f
, ValType::I64
, ValType::I32
));
8153 case uint16_t(Op::I32TruncF32S
):
8154 case uint16_t(Op::I32TruncF32U
):
8155 CHECK(EmitTruncate(f
, ValType::F32
, ValType::I32
,
8156 Op(op
.b0
) == Op::I32TruncF32U
, false));
8157 case uint16_t(Op::I32TruncF64S
):
8158 case uint16_t(Op::I32TruncF64U
):
8159 CHECK(EmitTruncate(f
, ValType::F64
, ValType::I32
,
8160 Op(op
.b0
) == Op::I32TruncF64U
, false));
8161 case uint16_t(Op::I64ExtendI32S
):
8162 case uint16_t(Op::I64ExtendI32U
):
8163 CHECK(EmitExtendI32(f
, Op(op
.b0
) == Op::I64ExtendI32U
));
8164 case uint16_t(Op::I64TruncF32S
):
8165 case uint16_t(Op::I64TruncF32U
):
8166 CHECK(EmitTruncate(f
, ValType::F32
, ValType::I64
,
8167 Op(op
.b0
) == Op::I64TruncF32U
, false));
8168 case uint16_t(Op::I64TruncF64S
):
8169 case uint16_t(Op::I64TruncF64U
):
8170 CHECK(EmitTruncate(f
, ValType::F64
, ValType::I64
,
8171 Op(op
.b0
) == Op::I64TruncF64U
, false));
8172 case uint16_t(Op::F32ConvertI32S
):
8173 CHECK(EmitConversion
<MToFloat32
>(f
, ValType::I32
, ValType::F32
));
8174 case uint16_t(Op::F32ConvertI32U
):
8175 CHECK(EmitConversion
<MWasmUnsignedToFloat32
>(f
, ValType::I32
,
8177 case uint16_t(Op::F32ConvertI64S
):
8178 case uint16_t(Op::F32ConvertI64U
):
8179 CHECK(EmitConvertI64ToFloatingPoint(f
, ValType::F32
, MIRType::Float32
,
8180 Op(op
.b0
) == Op::F32ConvertI64U
));
8181 case uint16_t(Op::F32DemoteF64
):
8182 CHECK(EmitConversion
<MToFloat32
>(f
, ValType::F64
, ValType::F32
));
8183 case uint16_t(Op::F64ConvertI32S
):
8184 CHECK(EmitConversion
<MToDouble
>(f
, ValType::I32
, ValType::F64
));
8185 case uint16_t(Op::F64ConvertI32U
):
8186 CHECK(EmitConversion
<MWasmUnsignedToDouble
>(f
, ValType::I32
,
8188 case uint16_t(Op::F64ConvertI64S
):
8189 case uint16_t(Op::F64ConvertI64U
):
8190 CHECK(EmitConvertI64ToFloatingPoint(f
, ValType::F64
, MIRType::Double
,
8191 Op(op
.b0
) == Op::F64ConvertI64U
));
8192 case uint16_t(Op::F64PromoteF32
):
8193 CHECK(EmitConversion
<MToDouble
>(f
, ValType::F32
, ValType::F64
));
8195 // Reinterpretations
8196 case uint16_t(Op::I32ReinterpretF32
):
8197 CHECK(EmitReinterpret(f
, ValType::I32
, ValType::F32
, MIRType::Int32
));
8198 case uint16_t(Op::I64ReinterpretF64
):
8199 CHECK(EmitReinterpret(f
, ValType::I64
, ValType::F64
, MIRType::Int64
));
8200 case uint16_t(Op::F32ReinterpretI32
):
8201 CHECK(EmitReinterpret(f
, ValType::F32
, ValType::I32
, MIRType::Float32
));
8202 case uint16_t(Op::F64ReinterpretI64
):
8203 CHECK(EmitReinterpret(f
, ValType::F64
, ValType::I64
, MIRType::Double
));
8205 #ifdef ENABLE_WASM_GC
8206 case uint16_t(Op::RefEq
):
8207 if (!f
.moduleEnv().gcEnabled()) {
8208 return f
.iter().unrecognizedOpcode(&op
);
8210 CHECK(EmitComparison(f
, RefType::eq(), JSOp::Eq
,
8211 MCompare::Compare_WasmAnyRef
));
8213 case uint16_t(Op::RefFunc
):
8214 CHECK(EmitRefFunc(f
));
8215 case uint16_t(Op::RefNull
):
8216 CHECK(EmitRefNull(f
));
8217 case uint16_t(Op::RefIsNull
):
8218 CHECK(EmitRefIsNull(f
));
8221 case uint16_t(Op::I32Extend8S
):
8222 CHECK(EmitSignExtend(f
, 1, 4));
8223 case uint16_t(Op::I32Extend16S
):
8224 CHECK(EmitSignExtend(f
, 2, 4));
8225 case uint16_t(Op::I64Extend8S
):
8226 CHECK(EmitSignExtend(f
, 1, 8));
8227 case uint16_t(Op::I64Extend16S
):
8228 CHECK(EmitSignExtend(f
, 2, 8));
8229 case uint16_t(Op::I64Extend32S
):
8230 CHECK(EmitSignExtend(f
, 4, 8));
8232 #ifdef ENABLE_WASM_TAIL_CALLS
8233 case uint16_t(Op::ReturnCall
): {
8234 if (!f
.moduleEnv().tailCallsEnabled()) {
8235 return f
.iter().unrecognizedOpcode(&op
);
8237 CHECK(EmitReturnCall(f
));
8239 case uint16_t(Op::ReturnCallIndirect
): {
8240 if (!f
.moduleEnv().tailCallsEnabled()) {
8241 return f
.iter().unrecognizedOpcode(&op
);
8243 CHECK(EmitReturnCallIndirect(f
));
8247 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
8248 case uint16_t(Op::RefAsNonNull
):
8249 if (!f
.moduleEnv().functionReferencesEnabled()) {
8250 return f
.iter().unrecognizedOpcode(&op
);
8252 CHECK(EmitRefAsNonNull(f
));
8253 case uint16_t(Op::BrOnNull
): {
8254 if (!f
.moduleEnv().functionReferencesEnabled()) {
8255 return f
.iter().unrecognizedOpcode(&op
);
8257 CHECK(EmitBrOnNull(f
));
8259 case uint16_t(Op::BrOnNonNull
): {
8260 if (!f
.moduleEnv().functionReferencesEnabled()) {
8261 return f
.iter().unrecognizedOpcode(&op
);
8263 CHECK(EmitBrOnNonNull(f
));
8265 case uint16_t(Op::CallRef
): {
8266 if (!f
.moduleEnv().functionReferencesEnabled()) {
8267 return f
.iter().unrecognizedOpcode(&op
);
8269 CHECK(EmitCallRef(f
));
8273 #if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_FUNCTION_REFERENCES)
8274 case uint16_t(Op::ReturnCallRef
): {
8275 if (!f
.moduleEnv().functionReferencesEnabled() ||
8276 !f
.moduleEnv().tailCallsEnabled()) {
8277 return f
.iter().unrecognizedOpcode(&op
);
8279 CHECK(EmitReturnCallRef(f
));
8284 #ifdef ENABLE_WASM_GC
8285 case uint16_t(Op::GcPrefix
): {
8286 if (!f
.moduleEnv().gcEnabled()) {
8287 return f
.iter().unrecognizedOpcode(&op
);
8290 case uint32_t(GcOp::StructNew
):
8291 CHECK(EmitStructNew(f
));
8292 case uint32_t(GcOp::StructNewDefault
):
8293 CHECK(EmitStructNewDefault(f
));
8294 case uint32_t(GcOp::StructSet
):
8295 CHECK(EmitStructSet(f
));
8296 case uint32_t(GcOp::StructGet
):
8297 CHECK(EmitStructGet(f
, FieldWideningOp::None
));
8298 case uint32_t(GcOp::StructGetS
):
8299 CHECK(EmitStructGet(f
, FieldWideningOp::Signed
));
8300 case uint32_t(GcOp::StructGetU
):
8301 CHECK(EmitStructGet(f
, FieldWideningOp::Unsigned
));
8302 case uint32_t(GcOp::ArrayNew
):
8303 CHECK(EmitArrayNew(f
));
8304 case uint32_t(GcOp::ArrayNewDefault
):
8305 CHECK(EmitArrayNewDefault(f
));
8306 case uint32_t(GcOp::ArrayNewFixed
):
8307 CHECK(EmitArrayNewFixed(f
));
8308 case uint32_t(GcOp::ArrayNewData
):
8309 CHECK(EmitArrayNewData(f
));
8310 case uint32_t(GcOp::ArrayNewElem
):
8311 CHECK(EmitArrayNewElem(f
));
8312 case uint32_t(GcOp::ArrayInitData
):
8313 CHECK(EmitArrayInitData(f
));
8314 case uint32_t(GcOp::ArrayInitElem
):
8315 CHECK(EmitArrayInitElem(f
));
8316 case uint32_t(GcOp::ArraySet
):
8317 CHECK(EmitArraySet(f
));
8318 case uint32_t(GcOp::ArrayGet
):
8319 CHECK(EmitArrayGet(f
, FieldWideningOp::None
));
8320 case uint32_t(GcOp::ArrayGetS
):
8321 CHECK(EmitArrayGet(f
, FieldWideningOp::Signed
));
8322 case uint32_t(GcOp::ArrayGetU
):
8323 CHECK(EmitArrayGet(f
, FieldWideningOp::Unsigned
));
8324 case uint32_t(GcOp::ArrayLen
):
8325 CHECK(EmitArrayLen(f
));
8326 case uint32_t(GcOp::ArrayCopy
):
8327 CHECK(EmitArrayCopy(f
));
8328 case uint32_t(GcOp::ArrayFill
):
8329 CHECK(EmitArrayFill(f
));
8330 case uint32_t(GcOp::RefI31
):
8331 CHECK(EmitRefI31(f
));
8332 case uint32_t(GcOp::I31GetS
):
8333 CHECK(EmitI31Get(f
, FieldWideningOp::Signed
));
8334 case uint32_t(GcOp::I31GetU
):
8335 CHECK(EmitI31Get(f
, FieldWideningOp::Unsigned
));
8336 case uint32_t(GcOp::BrOnCast
):
8337 CHECK(EmitBrOnCast(f
, /*onSuccess=*/true));
8338 case uint32_t(GcOp::BrOnCastFail
):
8339 CHECK(EmitBrOnCast(f
, /*onSuccess=*/false));
8340 case uint32_t(GcOp::RefTest
):
8341 CHECK(EmitRefTest(f
, /*nullable=*/false));
8342 case uint32_t(GcOp::RefTestNull
):
8343 CHECK(EmitRefTest(f
, /*nullable=*/true));
8344 case uint32_t(GcOp::RefCast
):
8345 CHECK(EmitRefCast(f
, /*nullable=*/false));
8346 case uint32_t(GcOp::RefCastNull
):
8347 CHECK(EmitRefCast(f
, /*nullable=*/true));
8348 case uint16_t(GcOp::AnyConvertExtern
):
8349 CHECK(EmitAnyConvertExtern(f
));
8350 case uint16_t(GcOp::ExternConvertAny
):
8351 CHECK(EmitExternConvertAny(f
));
8353 return f
.iter().unrecognizedOpcode(&op
);
8360 #ifdef ENABLE_WASM_SIMD
8361 case uint16_t(Op::SimdPrefix
): {
8362 if (!f
.moduleEnv().simdAvailable()) {
8363 return f
.iter().unrecognizedOpcode(&op
);
8366 case uint32_t(SimdOp::V128Const
):
8367 CHECK(EmitConstSimd128(f
));
8368 case uint32_t(SimdOp::V128Load
):
8369 CHECK(EmitLoad(f
, ValType::V128
, Scalar::Simd128
));
8370 case uint32_t(SimdOp::V128Store
):
8371 CHECK(EmitStore(f
, ValType::V128
, Scalar::Simd128
));
8372 case uint32_t(SimdOp::V128And
):
8373 case uint32_t(SimdOp::V128Or
):
8374 case uint32_t(SimdOp::V128Xor
):
8375 case uint32_t(SimdOp::I8x16AvgrU
):
8376 case uint32_t(SimdOp::I16x8AvgrU
):
8377 case uint32_t(SimdOp::I8x16Add
):
8378 case uint32_t(SimdOp::I8x16AddSatS
):
8379 case uint32_t(SimdOp::I8x16AddSatU
):
8380 case uint32_t(SimdOp::I8x16MinS
):
8381 case uint32_t(SimdOp::I8x16MinU
):
8382 case uint32_t(SimdOp::I8x16MaxS
):
8383 case uint32_t(SimdOp::I8x16MaxU
):
8384 case uint32_t(SimdOp::I16x8Add
):
8385 case uint32_t(SimdOp::I16x8AddSatS
):
8386 case uint32_t(SimdOp::I16x8AddSatU
):
8387 case uint32_t(SimdOp::I16x8Mul
):
8388 case uint32_t(SimdOp::I16x8MinS
):
8389 case uint32_t(SimdOp::I16x8MinU
):
8390 case uint32_t(SimdOp::I16x8MaxS
):
8391 case uint32_t(SimdOp::I16x8MaxU
):
8392 case uint32_t(SimdOp::I32x4Add
):
8393 case uint32_t(SimdOp::I32x4Mul
):
8394 case uint32_t(SimdOp::I32x4MinS
):
8395 case uint32_t(SimdOp::I32x4MinU
):
8396 case uint32_t(SimdOp::I32x4MaxS
):
8397 case uint32_t(SimdOp::I32x4MaxU
):
8398 case uint32_t(SimdOp::I64x2Add
):
8399 case uint32_t(SimdOp::I64x2Mul
):
8400 case uint32_t(SimdOp::F32x4Add
):
8401 case uint32_t(SimdOp::F32x4Mul
):
8402 case uint32_t(SimdOp::F32x4Min
):
8403 case uint32_t(SimdOp::F32x4Max
):
8404 case uint32_t(SimdOp::F64x2Add
):
8405 case uint32_t(SimdOp::F64x2Mul
):
8406 case uint32_t(SimdOp::F64x2Min
):
8407 case uint32_t(SimdOp::F64x2Max
):
8408 case uint32_t(SimdOp::I8x16Eq
):
8409 case uint32_t(SimdOp::I8x16Ne
):
8410 case uint32_t(SimdOp::I16x8Eq
):
8411 case uint32_t(SimdOp::I16x8Ne
):
8412 case uint32_t(SimdOp::I32x4Eq
):
8413 case uint32_t(SimdOp::I32x4Ne
):
8414 case uint32_t(SimdOp::I64x2Eq
):
8415 case uint32_t(SimdOp::I64x2Ne
):
8416 case uint32_t(SimdOp::F32x4Eq
):
8417 case uint32_t(SimdOp::F32x4Ne
):
8418 case uint32_t(SimdOp::F64x2Eq
):
8419 case uint32_t(SimdOp::F64x2Ne
):
8420 case uint32_t(SimdOp::I32x4DotI16x8S
):
8421 case uint32_t(SimdOp::I16x8ExtmulLowI8x16S
):
8422 case uint32_t(SimdOp::I16x8ExtmulHighI8x16S
):
8423 case uint32_t(SimdOp::I16x8ExtmulLowI8x16U
):
8424 case uint32_t(SimdOp::I16x8ExtmulHighI8x16U
):
8425 case uint32_t(SimdOp::I32x4ExtmulLowI16x8S
):
8426 case uint32_t(SimdOp::I32x4ExtmulHighI16x8S
):
8427 case uint32_t(SimdOp::I32x4ExtmulLowI16x8U
):
8428 case uint32_t(SimdOp::I32x4ExtmulHighI16x8U
):
8429 case uint32_t(SimdOp::I64x2ExtmulLowI32x4S
):
8430 case uint32_t(SimdOp::I64x2ExtmulHighI32x4S
):
8431 case uint32_t(SimdOp::I64x2ExtmulLowI32x4U
):
8432 case uint32_t(SimdOp::I64x2ExtmulHighI32x4U
):
8433 case uint32_t(SimdOp::I16x8Q15MulrSatS
):
8434 CHECK(EmitBinarySimd128(f
, /* commutative= */ true, SimdOp(op
.b1
)));
8435 case uint32_t(SimdOp::V128AndNot
):
8436 case uint32_t(SimdOp::I8x16Sub
):
8437 case uint32_t(SimdOp::I8x16SubSatS
):
8438 case uint32_t(SimdOp::I8x16SubSatU
):
8439 case uint32_t(SimdOp::I16x8Sub
):
8440 case uint32_t(SimdOp::I16x8SubSatS
):
8441 case uint32_t(SimdOp::I16x8SubSatU
):
8442 case uint32_t(SimdOp::I32x4Sub
):
8443 case uint32_t(SimdOp::I64x2Sub
):
8444 case uint32_t(SimdOp::F32x4Sub
):
8445 case uint32_t(SimdOp::F32x4Div
):
8446 case uint32_t(SimdOp::F64x2Sub
):
8447 case uint32_t(SimdOp::F64x2Div
):
8448 case uint32_t(SimdOp::I8x16NarrowI16x8S
):
8449 case uint32_t(SimdOp::I8x16NarrowI16x8U
):
8450 case uint32_t(SimdOp::I16x8NarrowI32x4S
):
8451 case uint32_t(SimdOp::I16x8NarrowI32x4U
):
8452 case uint32_t(SimdOp::I8x16LtS
):
8453 case uint32_t(SimdOp::I8x16LtU
):
8454 case uint32_t(SimdOp::I8x16GtS
):
8455 case uint32_t(SimdOp::I8x16GtU
):
8456 case uint32_t(SimdOp::I8x16LeS
):
8457 case uint32_t(SimdOp::I8x16LeU
):
8458 case uint32_t(SimdOp::I8x16GeS
):
8459 case uint32_t(SimdOp::I8x16GeU
):
8460 case uint32_t(SimdOp::I16x8LtS
):
8461 case uint32_t(SimdOp::I16x8LtU
):
8462 case uint32_t(SimdOp::I16x8GtS
):
8463 case uint32_t(SimdOp::I16x8GtU
):
8464 case uint32_t(SimdOp::I16x8LeS
):
8465 case uint32_t(SimdOp::I16x8LeU
):
8466 case uint32_t(SimdOp::I16x8GeS
):
8467 case uint32_t(SimdOp::I16x8GeU
):
8468 case uint32_t(SimdOp::I32x4LtS
):
8469 case uint32_t(SimdOp::I32x4LtU
):
8470 case uint32_t(SimdOp::I32x4GtS
):
8471 case uint32_t(SimdOp::I32x4GtU
):
8472 case uint32_t(SimdOp::I32x4LeS
):
8473 case uint32_t(SimdOp::I32x4LeU
):
8474 case uint32_t(SimdOp::I32x4GeS
):
8475 case uint32_t(SimdOp::I32x4GeU
):
8476 case uint32_t(SimdOp::I64x2LtS
):
8477 case uint32_t(SimdOp::I64x2GtS
):
8478 case uint32_t(SimdOp::I64x2LeS
):
8479 case uint32_t(SimdOp::I64x2GeS
):
8480 case uint32_t(SimdOp::F32x4Lt
):
8481 case uint32_t(SimdOp::F32x4Gt
):
8482 case uint32_t(SimdOp::F32x4Le
):
8483 case uint32_t(SimdOp::F32x4Ge
):
8484 case uint32_t(SimdOp::F64x2Lt
):
8485 case uint32_t(SimdOp::F64x2Gt
):
8486 case uint32_t(SimdOp::F64x2Le
):
8487 case uint32_t(SimdOp::F64x2Ge
):
8488 case uint32_t(SimdOp::I8x16Swizzle
):
8489 case uint32_t(SimdOp::F32x4PMax
):
8490 case uint32_t(SimdOp::F32x4PMin
):
8491 case uint32_t(SimdOp::F64x2PMax
):
8492 case uint32_t(SimdOp::F64x2PMin
):
8494 EmitBinarySimd128(f
, /* commutative= */ false, SimdOp(op
.b1
)));
8495 case uint32_t(SimdOp::I8x16Splat
):
8496 case uint32_t(SimdOp::I16x8Splat
):
8497 case uint32_t(SimdOp::I32x4Splat
):
8498 CHECK(EmitSplatSimd128(f
, ValType::I32
, SimdOp(op
.b1
)));
8499 case uint32_t(SimdOp::I64x2Splat
):
8500 CHECK(EmitSplatSimd128(f
, ValType::I64
, SimdOp(op
.b1
)));
8501 case uint32_t(SimdOp::F32x4Splat
):
8502 CHECK(EmitSplatSimd128(f
, ValType::F32
, SimdOp(op
.b1
)));
8503 case uint32_t(SimdOp::F64x2Splat
):
8504 CHECK(EmitSplatSimd128(f
, ValType::F64
, SimdOp(op
.b1
)));
8505 case uint32_t(SimdOp::I8x16Neg
):
8506 case uint32_t(SimdOp::I16x8Neg
):
8507 case uint32_t(SimdOp::I16x8ExtendLowI8x16S
):
8508 case uint32_t(SimdOp::I16x8ExtendHighI8x16S
):
8509 case uint32_t(SimdOp::I16x8ExtendLowI8x16U
):
8510 case uint32_t(SimdOp::I16x8ExtendHighI8x16U
):
8511 case uint32_t(SimdOp::I32x4Neg
):
8512 case uint32_t(SimdOp::I32x4ExtendLowI16x8S
):
8513 case uint32_t(SimdOp::I32x4ExtendHighI16x8S
):
8514 case uint32_t(SimdOp::I32x4ExtendLowI16x8U
):
8515 case uint32_t(SimdOp::I32x4ExtendHighI16x8U
):
8516 case uint32_t(SimdOp::I32x4TruncSatF32x4S
):
8517 case uint32_t(SimdOp::I32x4TruncSatF32x4U
):
8518 case uint32_t(SimdOp::I64x2Neg
):
8519 case uint32_t(SimdOp::I64x2ExtendLowI32x4S
):
8520 case uint32_t(SimdOp::I64x2ExtendHighI32x4S
):
8521 case uint32_t(SimdOp::I64x2ExtendLowI32x4U
):
8522 case uint32_t(SimdOp::I64x2ExtendHighI32x4U
):
8523 case uint32_t(SimdOp::F32x4Abs
):
8524 case uint32_t(SimdOp::F32x4Neg
):
8525 case uint32_t(SimdOp::F32x4Sqrt
):
8526 case uint32_t(SimdOp::F32x4ConvertI32x4S
):
8527 case uint32_t(SimdOp::F32x4ConvertI32x4U
):
8528 case uint32_t(SimdOp::F64x2Abs
):
8529 case uint32_t(SimdOp::F64x2Neg
):
8530 case uint32_t(SimdOp::F64x2Sqrt
):
8531 case uint32_t(SimdOp::V128Not
):
8532 case uint32_t(SimdOp::I8x16Popcnt
):
8533 case uint32_t(SimdOp::I8x16Abs
):
8534 case uint32_t(SimdOp::I16x8Abs
):
8535 case uint32_t(SimdOp::I32x4Abs
):
8536 case uint32_t(SimdOp::I64x2Abs
):
8537 case uint32_t(SimdOp::F32x4Ceil
):
8538 case uint32_t(SimdOp::F32x4Floor
):
8539 case uint32_t(SimdOp::F32x4Trunc
):
8540 case uint32_t(SimdOp::F32x4Nearest
):
8541 case uint32_t(SimdOp::F64x2Ceil
):
8542 case uint32_t(SimdOp::F64x2Floor
):
8543 case uint32_t(SimdOp::F64x2Trunc
):
8544 case uint32_t(SimdOp::F64x2Nearest
):
8545 case uint32_t(SimdOp::F32x4DemoteF64x2Zero
):
8546 case uint32_t(SimdOp::F64x2PromoteLowF32x4
):
8547 case uint32_t(SimdOp::F64x2ConvertLowI32x4S
):
8548 case uint32_t(SimdOp::F64x2ConvertLowI32x4U
):
8549 case uint32_t(SimdOp::I32x4TruncSatF64x2SZero
):
8550 case uint32_t(SimdOp::I32x4TruncSatF64x2UZero
):
8551 case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16S
):
8552 case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16U
):
8553 case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8S
):
8554 case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8U
):
8555 CHECK(EmitUnarySimd128(f
, SimdOp(op
.b1
)));
8556 case uint32_t(SimdOp::V128AnyTrue
):
8557 case uint32_t(SimdOp::I8x16AllTrue
):
8558 case uint32_t(SimdOp::I16x8AllTrue
):
8559 case uint32_t(SimdOp::I32x4AllTrue
):
8560 case uint32_t(SimdOp::I64x2AllTrue
):
8561 case uint32_t(SimdOp::I8x16Bitmask
):
8562 case uint32_t(SimdOp::I16x8Bitmask
):
8563 case uint32_t(SimdOp::I32x4Bitmask
):
8564 case uint32_t(SimdOp::I64x2Bitmask
):
8565 CHECK(EmitReduceSimd128(f
, SimdOp(op
.b1
)));
8566 case uint32_t(SimdOp::I8x16Shl
):
8567 case uint32_t(SimdOp::I8x16ShrS
):
8568 case uint32_t(SimdOp::I8x16ShrU
):
8569 case uint32_t(SimdOp::I16x8Shl
):
8570 case uint32_t(SimdOp::I16x8ShrS
):
8571 case uint32_t(SimdOp::I16x8ShrU
):
8572 case uint32_t(SimdOp::I32x4Shl
):
8573 case uint32_t(SimdOp::I32x4ShrS
):
8574 case uint32_t(SimdOp::I32x4ShrU
):
8575 case uint32_t(SimdOp::I64x2Shl
):
8576 case uint32_t(SimdOp::I64x2ShrS
):
8577 case uint32_t(SimdOp::I64x2ShrU
):
8578 CHECK(EmitShiftSimd128(f
, SimdOp(op
.b1
)));
8579 case uint32_t(SimdOp::I8x16ExtractLaneS
):
8580 case uint32_t(SimdOp::I8x16ExtractLaneU
):
8581 CHECK(EmitExtractLaneSimd128(f
, ValType::I32
, 16, SimdOp(op
.b1
)));
8582 case uint32_t(SimdOp::I16x8ExtractLaneS
):
8583 case uint32_t(SimdOp::I16x8ExtractLaneU
):
8584 CHECK(EmitExtractLaneSimd128(f
, ValType::I32
, 8, SimdOp(op
.b1
)));
8585 case uint32_t(SimdOp::I32x4ExtractLane
):
8586 CHECK(EmitExtractLaneSimd128(f
, ValType::I32
, 4, SimdOp(op
.b1
)));
8587 case uint32_t(SimdOp::I64x2ExtractLane
):
8588 CHECK(EmitExtractLaneSimd128(f
, ValType::I64
, 2, SimdOp(op
.b1
)));
8589 case uint32_t(SimdOp::F32x4ExtractLane
):
8590 CHECK(EmitExtractLaneSimd128(f
, ValType::F32
, 4, SimdOp(op
.b1
)));
8591 case uint32_t(SimdOp::F64x2ExtractLane
):
8592 CHECK(EmitExtractLaneSimd128(f
, ValType::F64
, 2, SimdOp(op
.b1
)));
8593 case uint32_t(SimdOp::I8x16ReplaceLane
):
8594 CHECK(EmitReplaceLaneSimd128(f
, ValType::I32
, 16, SimdOp(op
.b1
)));
8595 case uint32_t(SimdOp::I16x8ReplaceLane
):
8596 CHECK(EmitReplaceLaneSimd128(f
, ValType::I32
, 8, SimdOp(op
.b1
)));
8597 case uint32_t(SimdOp::I32x4ReplaceLane
):
8598 CHECK(EmitReplaceLaneSimd128(f
, ValType::I32
, 4, SimdOp(op
.b1
)));
8599 case uint32_t(SimdOp::I64x2ReplaceLane
):
8600 CHECK(EmitReplaceLaneSimd128(f
, ValType::I64
, 2, SimdOp(op
.b1
)));
8601 case uint32_t(SimdOp::F32x4ReplaceLane
):
8602 CHECK(EmitReplaceLaneSimd128(f
, ValType::F32
, 4, SimdOp(op
.b1
)));
8603 case uint32_t(SimdOp::F64x2ReplaceLane
):
8604 CHECK(EmitReplaceLaneSimd128(f
, ValType::F64
, 2, SimdOp(op
.b1
)));
8605 case uint32_t(SimdOp::V128Bitselect
):
8606 CHECK(EmitTernarySimd128(f
, SimdOp(op
.b1
)));
8607 case uint32_t(SimdOp::I8x16Shuffle
):
8608 CHECK(EmitShuffleSimd128(f
));
8609 case uint32_t(SimdOp::V128Load8Splat
):
8610 CHECK(EmitLoadSplatSimd128(f
, Scalar::Uint8
, SimdOp::I8x16Splat
));
8611 case uint32_t(SimdOp::V128Load16Splat
):
8612 CHECK(EmitLoadSplatSimd128(f
, Scalar::Uint16
, SimdOp::I16x8Splat
));
8613 case uint32_t(SimdOp::V128Load32Splat
):
8614 CHECK(EmitLoadSplatSimd128(f
, Scalar::Float32
, SimdOp::I32x4Splat
));
8615 case uint32_t(SimdOp::V128Load64Splat
):
8616 CHECK(EmitLoadSplatSimd128(f
, Scalar::Float64
, SimdOp::I64x2Splat
));
8617 case uint32_t(SimdOp::V128Load8x8S
):
8618 case uint32_t(SimdOp::V128Load8x8U
):
8619 case uint32_t(SimdOp::V128Load16x4S
):
8620 case uint32_t(SimdOp::V128Load16x4U
):
8621 case uint32_t(SimdOp::V128Load32x2S
):
8622 case uint32_t(SimdOp::V128Load32x2U
):
8623 CHECK(EmitLoadExtendSimd128(f
, SimdOp(op
.b1
)));
8624 case uint32_t(SimdOp::V128Load32Zero
):
8625 CHECK(EmitLoadZeroSimd128(f
, Scalar::Float32
, 4));
8626 case uint32_t(SimdOp::V128Load64Zero
):
8627 CHECK(EmitLoadZeroSimd128(f
, Scalar::Float64
, 8));
8628 case uint32_t(SimdOp::V128Load8Lane
):
8629 CHECK(EmitLoadLaneSimd128(f
, 1));
8630 case uint32_t(SimdOp::V128Load16Lane
):
8631 CHECK(EmitLoadLaneSimd128(f
, 2));
8632 case uint32_t(SimdOp::V128Load32Lane
):
8633 CHECK(EmitLoadLaneSimd128(f
, 4));
8634 case uint32_t(SimdOp::V128Load64Lane
):
8635 CHECK(EmitLoadLaneSimd128(f
, 8));
8636 case uint32_t(SimdOp::V128Store8Lane
):
8637 CHECK(EmitStoreLaneSimd128(f
, 1));
8638 case uint32_t(SimdOp::V128Store16Lane
):
8639 CHECK(EmitStoreLaneSimd128(f
, 2));
8640 case uint32_t(SimdOp::V128Store32Lane
):
8641 CHECK(EmitStoreLaneSimd128(f
, 4));
8642 case uint32_t(SimdOp::V128Store64Lane
):
8643 CHECK(EmitStoreLaneSimd128(f
, 8));
8644 # ifdef ENABLE_WASM_RELAXED_SIMD
8645 case uint32_t(SimdOp::F32x4RelaxedMadd
):
8646 case uint32_t(SimdOp::F32x4RelaxedNmadd
):
8647 case uint32_t(SimdOp::F64x2RelaxedMadd
):
8648 case uint32_t(SimdOp::F64x2RelaxedNmadd
):
8649 case uint32_t(SimdOp::I8x16RelaxedLaneSelect
):
8650 case uint32_t(SimdOp::I16x8RelaxedLaneSelect
):
8651 case uint32_t(SimdOp::I32x4RelaxedLaneSelect
):
8652 case uint32_t(SimdOp::I64x2RelaxedLaneSelect
):
8653 case uint32_t(SimdOp::I32x4DotI8x16I7x16AddS
): {
8654 if (!f
.moduleEnv().v128RelaxedEnabled()) {
8655 return f
.iter().unrecognizedOpcode(&op
);
8657 CHECK(EmitTernarySimd128(f
, SimdOp(op
.b1
)));
8659 case uint32_t(SimdOp::F32x4RelaxedMin
):
8660 case uint32_t(SimdOp::F32x4RelaxedMax
):
8661 case uint32_t(SimdOp::F64x2RelaxedMin
):
8662 case uint32_t(SimdOp::F64x2RelaxedMax
):
8663 case uint32_t(SimdOp::I16x8RelaxedQ15MulrS
): {
8664 if (!f
.moduleEnv().v128RelaxedEnabled()) {
8665 return f
.iter().unrecognizedOpcode(&op
);
8667 CHECK(EmitBinarySimd128(f
, /* commutative= */ true, SimdOp(op
.b1
)));
8669 case uint32_t(SimdOp::I32x4RelaxedTruncF32x4S
):
8670 case uint32_t(SimdOp::I32x4RelaxedTruncF32x4U
):
8671 case uint32_t(SimdOp::I32x4RelaxedTruncF64x2SZero
):
8672 case uint32_t(SimdOp::I32x4RelaxedTruncF64x2UZero
): {
8673 if (!f
.moduleEnv().v128RelaxedEnabled()) {
8674 return f
.iter().unrecognizedOpcode(&op
);
8676 CHECK(EmitUnarySimd128(f
, SimdOp(op
.b1
)));
8678 case uint32_t(SimdOp::I8x16RelaxedSwizzle
):
8679 case uint32_t(SimdOp::I16x8DotI8x16I7x16S
): {
8680 if (!f
.moduleEnv().v128RelaxedEnabled()) {
8681 return f
.iter().unrecognizedOpcode(&op
);
8684 EmitBinarySimd128(f
, /* commutative= */ false, SimdOp(op
.b1
)));
8689 return f
.iter().unrecognizedOpcode(&op
);
8695 // Miscellaneous operations
8696 case uint16_t(Op::MiscPrefix
): {
8698 case uint32_t(MiscOp::I32TruncSatF32S
):
8699 case uint32_t(MiscOp::I32TruncSatF32U
):
8700 CHECK(EmitTruncate(f
, ValType::F32
, ValType::I32
,
8701 MiscOp(op
.b1
) == MiscOp::I32TruncSatF32U
, true));
8702 case uint32_t(MiscOp::I32TruncSatF64S
):
8703 case uint32_t(MiscOp::I32TruncSatF64U
):
8704 CHECK(EmitTruncate(f
, ValType::F64
, ValType::I32
,
8705 MiscOp(op
.b1
) == MiscOp::I32TruncSatF64U
, true));
8706 case uint32_t(MiscOp::I64TruncSatF32S
):
8707 case uint32_t(MiscOp::I64TruncSatF32U
):
8708 CHECK(EmitTruncate(f
, ValType::F32
, ValType::I64
,
8709 MiscOp(op
.b1
) == MiscOp::I64TruncSatF32U
, true));
8710 case uint32_t(MiscOp::I64TruncSatF64S
):
8711 case uint32_t(MiscOp::I64TruncSatF64U
):
8712 CHECK(EmitTruncate(f
, ValType::F64
, ValType::I64
,
8713 MiscOp(op
.b1
) == MiscOp::I64TruncSatF64U
, true));
8714 case uint32_t(MiscOp::MemoryCopy
):
8715 CHECK(EmitMemCopy(f
));
8716 case uint32_t(MiscOp::DataDrop
):
8717 CHECK(EmitDataOrElemDrop(f
, /*isData=*/true));
8718 case uint32_t(MiscOp::MemoryFill
):
8719 CHECK(EmitMemFill(f
));
8720 case uint32_t(MiscOp::MemoryInit
):
8721 CHECK(EmitMemOrTableInit(f
, /*isMem=*/true));
8722 case uint32_t(MiscOp::TableCopy
):
8723 CHECK(EmitTableCopy(f
));
8724 case uint32_t(MiscOp::ElemDrop
):
8725 CHECK(EmitDataOrElemDrop(f
, /*isData=*/false));
8726 case uint32_t(MiscOp::TableInit
):
8727 CHECK(EmitMemOrTableInit(f
, /*isMem=*/false));
8728 case uint32_t(MiscOp::TableFill
):
8729 CHECK(EmitTableFill(f
));
8730 #if ENABLE_WASM_MEMORY_CONTROL
8731 case uint32_t(MiscOp::MemoryDiscard
): {
8732 if (!f
.moduleEnv().memoryControlEnabled()) {
8733 return f
.iter().unrecognizedOpcode(&op
);
8735 CHECK(EmitMemDiscard(f
));
8738 case uint32_t(MiscOp::TableGrow
):
8739 CHECK(EmitTableGrow(f
));
8740 case uint32_t(MiscOp::TableSize
):
8741 CHECK(EmitTableSize(f
));
8743 return f
.iter().unrecognizedOpcode(&op
);
8748 // Thread operations
8749 case uint16_t(Op::ThreadPrefix
): {
8750 // Though thread ops can be used on nonshared memories, we make them
8751 // unavailable if shared memory has been disabled in the prefs, for
8752 // maximum predictability and safety and consistency with JS.
8753 if (f
.moduleEnv().sharedMemoryEnabled() == Shareable::False
) {
8754 return f
.iter().unrecognizedOpcode(&op
);
8757 case uint32_t(ThreadOp::Wake
):
8760 case uint32_t(ThreadOp::I32Wait
):
8761 CHECK(EmitWait(f
, ValType::I32
, 4));
8762 case uint32_t(ThreadOp::I64Wait
):
8763 CHECK(EmitWait(f
, ValType::I64
, 8));
8764 case uint32_t(ThreadOp::Fence
):
8765 CHECK(EmitFence(f
));
8767 case uint32_t(ThreadOp::I32AtomicLoad
):
8768 CHECK(EmitAtomicLoad(f
, ValType::I32
, Scalar::Int32
));
8769 case uint32_t(ThreadOp::I64AtomicLoad
):
8770 CHECK(EmitAtomicLoad(f
, ValType::I64
, Scalar::Int64
));
8771 case uint32_t(ThreadOp::I32AtomicLoad8U
):
8772 CHECK(EmitAtomicLoad(f
, ValType::I32
, Scalar::Uint8
));
8773 case uint32_t(ThreadOp::I32AtomicLoad16U
):
8774 CHECK(EmitAtomicLoad(f
, ValType::I32
, Scalar::Uint16
));
8775 case uint32_t(ThreadOp::I64AtomicLoad8U
):
8776 CHECK(EmitAtomicLoad(f
, ValType::I64
, Scalar::Uint8
));
8777 case uint32_t(ThreadOp::I64AtomicLoad16U
):
8778 CHECK(EmitAtomicLoad(f
, ValType::I64
, Scalar::Uint16
));
8779 case uint32_t(ThreadOp::I64AtomicLoad32U
):
8780 CHECK(EmitAtomicLoad(f
, ValType::I64
, Scalar::Uint32
));
8782 case uint32_t(ThreadOp::I32AtomicStore
):
8783 CHECK(EmitAtomicStore(f
, ValType::I32
, Scalar::Int32
));
8784 case uint32_t(ThreadOp::I64AtomicStore
):
8785 CHECK(EmitAtomicStore(f
, ValType::I64
, Scalar::Int64
));
8786 case uint32_t(ThreadOp::I32AtomicStore8U
):
8787 CHECK(EmitAtomicStore(f
, ValType::I32
, Scalar::Uint8
));
8788 case uint32_t(ThreadOp::I32AtomicStore16U
):
8789 CHECK(EmitAtomicStore(f
, ValType::I32
, Scalar::Uint16
));
8790 case uint32_t(ThreadOp::I64AtomicStore8U
):
8791 CHECK(EmitAtomicStore(f
, ValType::I64
, Scalar::Uint8
));
8792 case uint32_t(ThreadOp::I64AtomicStore16U
):
8793 CHECK(EmitAtomicStore(f
, ValType::I64
, Scalar::Uint16
));
8794 case uint32_t(ThreadOp::I64AtomicStore32U
):
8795 CHECK(EmitAtomicStore(f
, ValType::I64
, Scalar::Uint32
));
8797 case uint32_t(ThreadOp::I32AtomicAdd
):
8798 CHECK(EmitAtomicRMW(f
, ValType::I32
, Scalar::Int32
,
8800 case uint32_t(ThreadOp::I64AtomicAdd
):
8801 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Int64
,
8803 case uint32_t(ThreadOp::I32AtomicAdd8U
):
8804 CHECK(EmitAtomicRMW(f
, ValType::I32
, Scalar::Uint8
,
8806 case uint32_t(ThreadOp::I32AtomicAdd16U
):
8807 CHECK(EmitAtomicRMW(f
, ValType::I32
, Scalar::Uint16
,
8809 case uint32_t(ThreadOp::I64AtomicAdd8U
):
8810 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Uint8
,
8812 case uint32_t(ThreadOp::I64AtomicAdd16U
):
8813 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Uint16
,
8815 case uint32_t(ThreadOp::I64AtomicAdd32U
):
8816 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Uint32
,
8819 case uint32_t(ThreadOp::I32AtomicSub
):
8820 CHECK(EmitAtomicRMW(f
, ValType::I32
, Scalar::Int32
,
8822 case uint32_t(ThreadOp::I64AtomicSub
):
8823 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Int64
,
8825 case uint32_t(ThreadOp::I32AtomicSub8U
):
8826 CHECK(EmitAtomicRMW(f
, ValType::I32
, Scalar::Uint8
,
8828 case uint32_t(ThreadOp::I32AtomicSub16U
):
8829 CHECK(EmitAtomicRMW(f
, ValType::I32
, Scalar::Uint16
,
8831 case uint32_t(ThreadOp::I64AtomicSub8U
):
8832 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Uint8
,
8834 case uint32_t(ThreadOp::I64AtomicSub16U
):
8835 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Uint16
,
8837 case uint32_t(ThreadOp::I64AtomicSub32U
):
8838 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Uint32
,
8841 case uint32_t(ThreadOp::I32AtomicAnd
):
8842 CHECK(EmitAtomicRMW(f
, ValType::I32
, Scalar::Int32
,
8844 case uint32_t(ThreadOp::I64AtomicAnd
):
8845 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Int64
,
8847 case uint32_t(ThreadOp::I32AtomicAnd8U
):
8848 CHECK(EmitAtomicRMW(f
, ValType::I32
, Scalar::Uint8
,
8850 case uint32_t(ThreadOp::I32AtomicAnd16U
):
8851 CHECK(EmitAtomicRMW(f
, ValType::I32
, Scalar::Uint16
,
8853 case uint32_t(ThreadOp::I64AtomicAnd8U
):
8854 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Uint8
,
8856 case uint32_t(ThreadOp::I64AtomicAnd16U
):
8857 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Uint16
,
8859 case uint32_t(ThreadOp::I64AtomicAnd32U
):
8860 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Uint32
,
8863 case uint32_t(ThreadOp::I32AtomicOr
):
8865 EmitAtomicRMW(f
, ValType::I32
, Scalar::Int32
, AtomicFetchOrOp
));
8866 case uint32_t(ThreadOp::I64AtomicOr
):
8868 EmitAtomicRMW(f
, ValType::I64
, Scalar::Int64
, AtomicFetchOrOp
));
8869 case uint32_t(ThreadOp::I32AtomicOr8U
):
8871 EmitAtomicRMW(f
, ValType::I32
, Scalar::Uint8
, AtomicFetchOrOp
));
8872 case uint32_t(ThreadOp::I32AtomicOr16U
):
8873 CHECK(EmitAtomicRMW(f
, ValType::I32
, Scalar::Uint16
,
8875 case uint32_t(ThreadOp::I64AtomicOr8U
):
8877 EmitAtomicRMW(f
, ValType::I64
, Scalar::Uint8
, AtomicFetchOrOp
));
8878 case uint32_t(ThreadOp::I64AtomicOr16U
):
8879 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Uint16
,
8881 case uint32_t(ThreadOp::I64AtomicOr32U
):
8882 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Uint32
,
8885 case uint32_t(ThreadOp::I32AtomicXor
):
8886 CHECK(EmitAtomicRMW(f
, ValType::I32
, Scalar::Int32
,
8888 case uint32_t(ThreadOp::I64AtomicXor
):
8889 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Int64
,
8891 case uint32_t(ThreadOp::I32AtomicXor8U
):
8892 CHECK(EmitAtomicRMW(f
, ValType::I32
, Scalar::Uint8
,
8894 case uint32_t(ThreadOp::I32AtomicXor16U
):
8895 CHECK(EmitAtomicRMW(f
, ValType::I32
, Scalar::Uint16
,
8897 case uint32_t(ThreadOp::I64AtomicXor8U
):
8898 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Uint8
,
8900 case uint32_t(ThreadOp::I64AtomicXor16U
):
8901 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Uint16
,
8903 case uint32_t(ThreadOp::I64AtomicXor32U
):
8904 CHECK(EmitAtomicRMW(f
, ValType::I64
, Scalar::Uint32
,
8907 case uint32_t(ThreadOp::I32AtomicXchg
):
8908 CHECK(EmitAtomicXchg(f
, ValType::I32
, Scalar::Int32
));
8909 case uint32_t(ThreadOp::I64AtomicXchg
):
8910 CHECK(EmitAtomicXchg(f
, ValType::I64
, Scalar::Int64
));
8911 case uint32_t(ThreadOp::I32AtomicXchg8U
):
8912 CHECK(EmitAtomicXchg(f
, ValType::I32
, Scalar::Uint8
));
8913 case uint32_t(ThreadOp::I32AtomicXchg16U
):
8914 CHECK(EmitAtomicXchg(f
, ValType::I32
, Scalar::Uint16
));
8915 case uint32_t(ThreadOp::I64AtomicXchg8U
):
8916 CHECK(EmitAtomicXchg(f
, ValType::I64
, Scalar::Uint8
));
8917 case uint32_t(ThreadOp::I64AtomicXchg16U
):
8918 CHECK(EmitAtomicXchg(f
, ValType::I64
, Scalar::Uint16
));
8919 case uint32_t(ThreadOp::I64AtomicXchg32U
):
8920 CHECK(EmitAtomicXchg(f
, ValType::I64
, Scalar::Uint32
));
8922 case uint32_t(ThreadOp::I32AtomicCmpXchg
):
8923 CHECK(EmitAtomicCmpXchg(f
, ValType::I32
, Scalar::Int32
));
8924 case uint32_t(ThreadOp::I64AtomicCmpXchg
):
8925 CHECK(EmitAtomicCmpXchg(f
, ValType::I64
, Scalar::Int64
));
8926 case uint32_t(ThreadOp::I32AtomicCmpXchg8U
):
8927 CHECK(EmitAtomicCmpXchg(f
, ValType::I32
, Scalar::Uint8
));
8928 case uint32_t(ThreadOp::I32AtomicCmpXchg16U
):
8929 CHECK(EmitAtomicCmpXchg(f
, ValType::I32
, Scalar::Uint16
));
8930 case uint32_t(ThreadOp::I64AtomicCmpXchg8U
):
8931 CHECK(EmitAtomicCmpXchg(f
, ValType::I64
, Scalar::Uint8
));
8932 case uint32_t(ThreadOp::I64AtomicCmpXchg16U
):
8933 CHECK(EmitAtomicCmpXchg(f
, ValType::I64
, Scalar::Uint16
));
8934 case uint32_t(ThreadOp::I64AtomicCmpXchg32U
):
8935 CHECK(EmitAtomicCmpXchg(f
, ValType::I64
, Scalar::Uint32
));
8938 return f
.iter().unrecognizedOpcode(&op
);
8943 // asm.js-specific operators
8944 case uint16_t(Op::MozPrefix
): {
8945 if (op
.b1
== uint32_t(MozOp::CallBuiltinModuleFunc
)) {
8946 if (!f
.moduleEnv().isBuiltinModule()) {
8947 return f
.iter().unrecognizedOpcode(&op
);
8949 CHECK(EmitCallBuiltinModuleFunc(f
));
8952 if (!f
.moduleEnv().isAsmJS()) {
8953 return f
.iter().unrecognizedOpcode(&op
);
8956 case uint32_t(MozOp::TeeGlobal
):
8957 CHECK(EmitTeeGlobal(f
));
8958 case uint32_t(MozOp::I32Min
):
8959 case uint32_t(MozOp::I32Max
):
8960 CHECK(EmitMinMax(f
, ValType::I32
, MIRType::Int32
,
8961 MozOp(op
.b1
) == MozOp::I32Max
));
8962 case uint32_t(MozOp::I32Neg
):
8963 CHECK(EmitUnaryWithType
<MWasmNeg
>(f
, ValType::I32
, MIRType::Int32
));
8964 case uint32_t(MozOp::I32BitNot
):
8965 CHECK(EmitBitNot(f
, ValType::I32
));
8966 case uint32_t(MozOp::I32Abs
):
8967 CHECK(EmitUnaryWithType
<MAbs
>(f
, ValType::I32
, MIRType::Int32
));
8968 case uint32_t(MozOp::F32TeeStoreF64
):
8969 CHECK(EmitTeeStoreWithCoercion(f
, ValType::F32
, Scalar::Float64
));
8970 case uint32_t(MozOp::F64TeeStoreF32
):
8971 CHECK(EmitTeeStoreWithCoercion(f
, ValType::F64
, Scalar::Float32
));
8972 case uint32_t(MozOp::I32TeeStore8
):
8973 CHECK(EmitTeeStore(f
, ValType::I32
, Scalar::Int8
));
8974 case uint32_t(MozOp::I32TeeStore16
):
8975 CHECK(EmitTeeStore(f
, ValType::I32
, Scalar::Int16
));
8976 case uint32_t(MozOp::I64TeeStore8
):
8977 CHECK(EmitTeeStore(f
, ValType::I64
, Scalar::Int8
));
8978 case uint32_t(MozOp::I64TeeStore16
):
8979 CHECK(EmitTeeStore(f
, ValType::I64
, Scalar::Int16
));
8980 case uint32_t(MozOp::I64TeeStore32
):
8981 CHECK(EmitTeeStore(f
, ValType::I64
, Scalar::Int32
));
8982 case uint32_t(MozOp::I32TeeStore
):
8983 CHECK(EmitTeeStore(f
, ValType::I32
, Scalar::Int32
));
8984 case uint32_t(MozOp::I64TeeStore
):
8985 CHECK(EmitTeeStore(f
, ValType::I64
, Scalar::Int64
));
8986 case uint32_t(MozOp::F32TeeStore
):
8987 CHECK(EmitTeeStore(f
, ValType::F32
, Scalar::Float32
));
8988 case uint32_t(MozOp::F64TeeStore
):
8989 CHECK(EmitTeeStore(f
, ValType::F64
, Scalar::Float64
));
8990 case uint32_t(MozOp::F64Mod
):
8991 CHECK(EmitRem(f
, ValType::F64
, MIRType::Double
,
8992 /* isUnsigned = */ false));
8993 case uint32_t(MozOp::F64SinNative
):
8994 CHECK(EmitUnaryMathBuiltinCall(f
, SASigSinNativeD
));
8995 case uint32_t(MozOp::F64SinFdlibm
):
8996 CHECK(EmitUnaryMathBuiltinCall(f
, SASigSinFdlibmD
));
8997 case uint32_t(MozOp::F64CosNative
):
8998 CHECK(EmitUnaryMathBuiltinCall(f
, SASigCosNativeD
));
8999 case uint32_t(MozOp::F64CosFdlibm
):
9000 CHECK(EmitUnaryMathBuiltinCall(f
, SASigCosFdlibmD
));
9001 case uint32_t(MozOp::F64TanNative
):
9002 CHECK(EmitUnaryMathBuiltinCall(f
, SASigTanNativeD
));
9003 case uint32_t(MozOp::F64TanFdlibm
):
9004 CHECK(EmitUnaryMathBuiltinCall(f
, SASigTanFdlibmD
));
9005 case uint32_t(MozOp::F64Asin
):
9006 CHECK(EmitUnaryMathBuiltinCall(f
, SASigASinD
));
9007 case uint32_t(MozOp::F64Acos
):
9008 CHECK(EmitUnaryMathBuiltinCall(f
, SASigACosD
));
9009 case uint32_t(MozOp::F64Atan
):
9010 CHECK(EmitUnaryMathBuiltinCall(f
, SASigATanD
));
9011 case uint32_t(MozOp::F64Exp
):
9012 CHECK(EmitUnaryMathBuiltinCall(f
, SASigExpD
));
9013 case uint32_t(MozOp::F64Log
):
9014 CHECK(EmitUnaryMathBuiltinCall(f
, SASigLogD
));
9015 case uint32_t(MozOp::F64Pow
):
9016 CHECK(EmitBinaryMathBuiltinCall(f
, SASigPowD
));
9017 case uint32_t(MozOp::F64Atan2
):
9018 CHECK(EmitBinaryMathBuiltinCall(f
, SASigATan2D
));
9019 case uint32_t(MozOp::OldCallDirect
):
9020 CHECK(EmitCall(f
, /* asmJSFuncDef = */ true));
9021 case uint32_t(MozOp::OldCallIndirect
):
9022 CHECK(EmitCallIndirect(f
, /* oldStyle = */ true));
9025 return f
.iter().unrecognizedOpcode(&op
);
9031 return f
.iter().unrecognizedOpcode(&op
);
9035 MOZ_CRASH("unreachable");
9040 bool wasm::IonCompileFunctions(const ModuleEnvironment
& moduleEnv
,
9041 const CompilerEnvironment
& compilerEnv
,
9043 const FuncCompileInputVector
& inputs
,
9044 CompiledCode
* code
, UniqueChars
* error
) {
9045 MOZ_ASSERT(compilerEnv
.tier() == Tier::Optimized
);
9046 MOZ_ASSERT(compilerEnv
.debug() == DebugEnabled::False
);
9048 TempAllocator
alloc(&lifo
);
9049 JitContext jitContext
;
9050 MOZ_ASSERT(IsCompilingWasm());
9051 WasmMacroAssembler
masm(alloc
, moduleEnv
);
9052 #if defined(JS_CODEGEN_ARM64)
9053 masm
.SetStackPointer64(PseudoStackPointer64
);
9056 // Swap in already-allocated empty vectors to avoid malloc/free.
9057 MOZ_ASSERT(code
->empty());
9058 if (!code
->swap(masm
)) {
9062 // Create a description of the stack layout created by GenerateTrapExit().
9063 RegisterOffsets trapExitLayout
;
9064 size_t trapExitLayoutNumWords
;
9065 GenerateTrapExitRegisterOffsets(&trapExitLayout
, &trapExitLayoutNumWords
);
9067 for (const FuncCompileInput
& func
: inputs
) {
9068 JitSpewCont(JitSpew_Codegen
, "\n");
9069 JitSpew(JitSpew_Codegen
,
9070 "# ================================"
9071 "==================================");
9072 JitSpew(JitSpew_Codegen
, "# ==");
9073 JitSpew(JitSpew_Codegen
,
9074 "# wasm::IonCompileFunctions: starting on function index %d",
9077 Decoder
d(func
.begin
, func
.end
, func
.lineOrBytecode
, error
);
9079 // Build the local types vector.
9081 const FuncType
& funcType
= *moduleEnv
.funcs
[func
.index
].type
;
9082 ValTypeVector locals
;
9083 if (!locals
.appendAll(funcType
.args())) {
9086 if (!DecodeLocalEntries(d
, *moduleEnv
.types
, moduleEnv
.features
, &locals
)) {
9090 // Set up for Ion compilation.
9092 const JitCompileOptions options
;
9093 MIRGraph
graph(&alloc
);
9094 CompileInfo
compileInfo(locals
.length());
9095 MIRGenerator
mir(nullptr, options
, &alloc
, &graph
, &compileInfo
,
9096 IonOptimizations
.get(OptimizationLevel::Wasm
));
9097 if (moduleEnv
.numMemories() > 0) {
9098 if (moduleEnv
.memories
[0].indexType() == IndexType::I32
) {
9099 mir
.initMinWasmMemory0Length(moduleEnv
.memories
[0].initialLength32());
9101 mir
.initMinWasmMemory0Length(moduleEnv
.memories
[0].initialLength64());
9107 FunctionCompiler
f(moduleEnv
, d
, func
, locals
, mir
, masm
.tryNotes());
9112 if (!f
.startBlock()) {
9116 if (!EmitBodyExprs(f
)) {
9123 // Compile MIR graph
9125 jit::SpewBeginWasmFunction(&mir
, func
.index
);
9126 jit::AutoSpewEndFunction
spewEndFunction(&mir
);
9128 if (!OptimizeMIR(&mir
)) {
9132 LIRGraph
* lir
= GenerateLIR(&mir
);
9137 size_t unwindInfoBefore
= masm
.codeRangeUnwindInfos().length();
9139 CodeGenerator
codegen(&mir
, lir
, &masm
);
9141 BytecodeOffset
prologueTrapOffset(func
.lineOrBytecode
);
9142 FuncOffsets offsets
;
9143 ArgTypeVector
args(funcType
);
9144 if (!codegen
.generateWasm(CallIndirectId::forFunc(moduleEnv
, func
.index
),
9145 prologueTrapOffset
, args
, trapExitLayout
,
9146 trapExitLayoutNumWords
, &offsets
,
9147 &code
->stackMaps
, &d
)) {
9151 bool hasUnwindInfo
=
9152 unwindInfoBefore
!= masm
.codeRangeUnwindInfos().length();
9153 if (!code
->codeRanges
.emplaceBack(func
.index
, func
.lineOrBytecode
,
9154 offsets
, hasUnwindInfo
)) {
9159 JitSpew(JitSpew_Codegen
,
9160 "# wasm::IonCompileFunctions: completed function index %d",
9162 JitSpew(JitSpew_Codegen
, "# ==");
9163 JitSpew(JitSpew_Codegen
,
9164 "# ================================"
9165 "==================================");
9166 JitSpewCont(JitSpew_Codegen
, "\n");
9174 return code
->swap(masm
);
9177 bool js::wasm::IonPlatformSupport() {
9178 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \
9179 defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS64) || \
9180 defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_LOONG64) || \
9181 defined(JS_CODEGEN_RISCV64)