Backed out changeset 88fbb17e3c20 (bug 1865637) for causing animation related mochite...
[gecko.git] / js / src / wasm / WasmIonCompile.cpp
blob44dc723112d422d48b8a45a951eed5f6fe7e9fd5
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
4 * Copyright 2015 Mozilla Foundation
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 #include "wasm/WasmIonCompile.h"
21 #include "mozilla/DebugOnly.h"
22 #include "mozilla/MathAlgorithms.h"
24 #include <algorithm>
26 #include "jit/ABIArgGenerator.h"
27 #include "jit/CodeGenerator.h"
28 #include "jit/CompileInfo.h"
29 #include "jit/Ion.h"
30 #include "jit/IonOptimizationLevels.h"
31 #include "jit/MIR.h"
32 #include "jit/ShuffleAnalysis.h"
33 #include "js/ScalarType.h" // js::Scalar::Type
34 #include "wasm/WasmBaselineCompile.h"
35 #include "wasm/WasmBuiltinModule.h"
36 #include "wasm/WasmBuiltins.h"
37 #include "wasm/WasmCodegenTypes.h"
38 #include "wasm/WasmGC.h"
39 #include "wasm/WasmGcObject.h"
40 #include "wasm/WasmGenerator.h"
41 #include "wasm/WasmOpIter.h"
42 #include "wasm/WasmSignalHandlers.h"
43 #include "wasm/WasmStubs.h"
44 #include "wasm/WasmValidate.h"
46 using namespace js;
47 using namespace js::jit;
48 using namespace js::wasm;
50 using mozilla::IsPowerOfTwo;
51 using mozilla::Maybe;
52 using mozilla::Nothing;
53 using mozilla::Some;
55 namespace {
57 using BlockVector = Vector<MBasicBlock*, 8, SystemAllocPolicy>;
58 using DefVector = Vector<MDefinition*, 8, SystemAllocPolicy>;
60 // To compile try-catch blocks, we extend the IonCompilePolicy's ControlItem
61 // from being just an MBasicBlock* to a Control structure collecting additional
62 // information.
63 using ControlInstructionVector =
64 Vector<MControlInstruction*, 8, SystemAllocPolicy>;
66 struct TryControl {
67 // Branches to bind to the try's landing pad.
68 ControlInstructionVector landingPadPatches;
69 // For `try_table`, the list of tagged catches and labels to branch to.
70 TryTableCatchVector catches;
71 // Whether this try is in the body and should catch any thrown exception.
72 bool inBody;
74 TryControl() : inBody(false) {}
76 // Reset the try control for when it is cached in FunctionCompiler.
77 void reset() {
78 landingPadPatches.clearAndFree();
79 catches.clearAndFree();
80 inBody = false;
83 using UniqueTryControl = UniquePtr<TryControl>;
84 using VectorUniqueTryControl = Vector<UniqueTryControl, 2, SystemAllocPolicy>;
86 struct Control {
87 MBasicBlock* block;
88 UniqueTryControl tryControl;
90 Control() : block(nullptr), tryControl(nullptr) {}
91 Control(Control&&) = default;
92 Control(const Control&) = delete;
95 // [SMDOC] WebAssembly Exception Handling in Ion
96 // =======================================================
98 // ## Throwing instructions
100 // Wasm exceptions can be thrown by either a throw instruction (local throw),
101 // or by a wasm call.
103 // ## The "catching try control"
105 // We know we are in try-code if there is a surrounding ControlItem with
106 // LabelKind::Try. The innermost such control is called the
107 // "catching try control".
109 // ## Throws without a catching try control
111 // Such throws are implemented with an instance call that triggers the exception
112 // unwinding runtime. The exception unwinding runtime will not return to the
113 // function.
115 // ## "landing pad" and "pre-pad" blocks
117 // When an exception is thrown, the unwinder will search for the nearest
118 // enclosing try block and redirect control flow to it. The code that executes
119 // before any catch blocks is called the 'landing pad'. The 'landing pad' is
120 // responsible to:
121 // 1. Consume the pending exception state from
122 // Instance::pendingException(Tag)
123 // 2. Branch to the correct catch block, or else rethrow
125 // There is one landing pad for each try block. The immediate predecessors of
126 // the landing pad are called 'pre-pad' blocks. There is one pre-pad block per
127 // throwing instruction.
129 // ## Creating pre-pad blocks
131 // There are two possible sorts of pre-pad blocks, depending on whether we
132 // are branching after a local throw instruction, or after a wasm call:
134 // - If we encounter a local throw, we create the exception and tag objects,
135 // store them to Instance::pendingException(Tag), and then jump to the
136 // landing pad.
138 // - If we encounter a wasm call, we construct a MWasmCallCatchable which is a
139 // control instruction with either a branch to a fallthrough block or
140 // to a pre-pad block.
142 // The pre-pad block for a wasm call is empty except for a jump to the
143 // landing pad. It only exists to avoid critical edges which when split would
144 // violate the invariants of MWasmCallCatchable. The pending exception state
145 // is taken care of by the unwinder.
147 // Each pre-pad ends with a pending jump to the landing pad. The pending jumps
148 // to the landing pad are tracked in `tryPadPatches`. These are called
149 // "pad patches".
151 // ## Creating the landing pad
153 // When we exit try-code, we check if tryPadPatches has captured any control
154 // instructions (pad patches). If not, we don't compile any catches and we mark
155 // the rest as dead code.
157 // If there are pre-pad blocks, we join them to create a landing pad (or just
158 // "pad"). The pad's last two slots are the caught exception, and the
159 // exception's tag object.
161 // There are three different forms of try-catch/catch_all Wasm instructions,
162 // which result in different form of landing pad.
164 // 1. A catchless try, so a Wasm instruction of the form "try ... end".
165 // - In this case, we end the pad by rethrowing the caught exception.
167 // 2. A single catch_all after a try.
168 // - If the first catch after a try is a catch_all, then there won't be
169 // any more catches, but we need the exception and its tag object, in
170 // case the code in a catch_all contains "rethrow" instructions.
171 // - The Wasm instruction "rethrow", gets the exception and tag object to
172 // rethrow from the last two slots of the landing pad which, due to
173 // validation, is the l'th surrounding ControlItem.
174 // - We immediately GoTo to a new block after the pad and pop both the
175 // exception and tag object, as we don't need them anymore in this case.
177 // 3. Otherwise, there is one or more catch code blocks following.
178 // - In this case, we construct the landing pad by creating a sequence
179 // of compare and branch blocks that compare the pending exception tag
180 // object to the tag object of the current tagged catch block. This is
181 // done incrementally as we visit each tagged catch block in the bytecode
182 // stream. At every step, we update the ControlItem's block to point to
183 // the next block to be created in the landing pad sequence. The final
184 // block will either be a rethrow, if there is no catch_all, or else a
185 // jump to a catch_all block.
187 struct IonCompilePolicy {
188 // We store SSA definitions in the value stack.
189 using Value = MDefinition*;
190 using ValueVector = DefVector;
192 // We store loop headers and then/else blocks in the control flow stack.
193 // In the case of try-catch control blocks, we collect additional information
194 // regarding the possible paths from throws and calls to a landing pad, as
195 // well as information on the landing pad's handlers (its catches).
196 using ControlItem = Control;
199 using IonOpIter = OpIter<IonCompilePolicy>;
201 class FunctionCompiler;
203 // CallCompileState describes a call that is being compiled.
205 class CallCompileState {
206 // A generator object that is passed each argument as it is compiled.
207 WasmABIArgGenerator abi_;
209 // Accumulates the register arguments while compiling arguments.
210 MWasmCallBase::Args regArgs_;
212 // Reserved argument for passing Instance* to builtin instance method calls.
213 ABIArg instanceArg_;
215 // The stack area in which the callee will write stack return values, or
216 // nullptr if no stack results.
217 MWasmStackResultArea* stackResultArea_ = nullptr;
219 // Indicates that the call is a return/tail call.
220 bool returnCall = false;
222 // Only FunctionCompiler should be directly manipulating CallCompileState.
223 friend class FunctionCompiler;
226 // Encapsulates the compilation of a single function in an asm.js module. The
227 // function compiler handles the creation and final backend compilation of the
228 // MIR graph.
229 class FunctionCompiler {
230 struct ControlFlowPatch {
231 MControlInstruction* ins;
232 uint32_t index;
233 ControlFlowPatch(MControlInstruction* ins, uint32_t index)
234 : ins(ins), index(index) {}
237 using ControlFlowPatchVector = Vector<ControlFlowPatch, 0, SystemAllocPolicy>;
238 using ControlFlowPatchVectorVector =
239 Vector<ControlFlowPatchVector, 0, SystemAllocPolicy>;
241 const ModuleEnvironment& moduleEnv_;
242 IonOpIter iter_;
243 const FuncCompileInput& func_;
244 const ValTypeVector& locals_;
245 size_t lastReadCallSite_;
247 TempAllocator& alloc_;
248 MIRGraph& graph_;
249 const CompileInfo& info_;
250 MIRGenerator& mirGen_;
252 MBasicBlock* curBlock_;
253 uint32_t maxStackArgBytes_;
255 uint32_t loopDepth_;
256 uint32_t blockDepth_;
257 ControlFlowPatchVectorVector blockPatches_;
258 // Control flow patches created by `delegate` instructions that target the
259 // outermost label of this function. These will be bound to a pad that will
260 // do a rethrow in `emitBodyDelegateThrowPad`.
261 ControlInstructionVector bodyDelegatePadPatches_;
263 // Instance pointer argument to the current function.
264 MWasmParameter* instancePointer_;
265 MWasmParameter* stackResultPointer_;
267 // Reference to masm.tryNotes_
268 wasm::TryNoteVector& tryNotes_;
270 // Cache of TryControl to minimize heap allocations
271 VectorUniqueTryControl tryControlCache_;
273 public:
274 FunctionCompiler(const ModuleEnvironment& moduleEnv, Decoder& decoder,
275 const FuncCompileInput& func, const ValTypeVector& locals,
276 MIRGenerator& mirGen, TryNoteVector& tryNotes)
277 : moduleEnv_(moduleEnv),
278 iter_(moduleEnv, decoder),
279 func_(func),
280 locals_(locals),
281 lastReadCallSite_(0),
282 alloc_(mirGen.alloc()),
283 graph_(mirGen.graph()),
284 info_(mirGen.outerInfo()),
285 mirGen_(mirGen),
286 curBlock_(nullptr),
287 maxStackArgBytes_(0),
288 loopDepth_(0),
289 blockDepth_(0),
290 instancePointer_(nullptr),
291 stackResultPointer_(nullptr),
292 tryNotes_(tryNotes) {}
294 const ModuleEnvironment& moduleEnv() const { return moduleEnv_; }
296 IonOpIter& iter() { return iter_; }
297 TempAllocator& alloc() const { return alloc_; }
298 // FIXME(1401675): Replace with BlockType.
299 uint32_t funcIndex() const { return func_.index; }
300 const FuncType& funcType() const {
301 return *moduleEnv_.funcs[func_.index].type;
304 BytecodeOffset bytecodeOffset() const { return iter_.bytecodeOffset(); }
305 BytecodeOffset bytecodeIfNotAsmJS() const {
306 return moduleEnv_.isAsmJS() ? BytecodeOffset() : iter_.bytecodeOffset();
309 // Try to get a free TryControl from the cache, or allocate a new one.
310 [[nodiscard]] UniqueTryControl newTryControl() {
311 if (tryControlCache_.empty()) {
312 return UniqueTryControl(js_new<TryControl>());
314 UniqueTryControl tryControl = std::move(tryControlCache_.back());
315 tryControlCache_.popBack();
316 return tryControl;
319 // Release the TryControl to the cache.
320 void freeTryControl(UniqueTryControl&& tryControl) {
321 // Ensure that it's in a consistent state
322 tryControl->reset();
323 // Ignore any OOM, as we'll fail later
324 (void)tryControlCache_.append(std::move(tryControl));
327 [[nodiscard]] bool init() {
328 // Prepare the entry block for MIR generation:
330 const ArgTypeVector args(funcType());
332 if (!mirGen_.ensureBallast()) {
333 return false;
335 if (!newBlock(/* prev */ nullptr, &curBlock_)) {
336 return false;
339 for (WasmABIArgIter i(args); !i.done(); i++) {
340 MWasmParameter* ins = MWasmParameter::New(alloc(), *i, i.mirType());
341 curBlock_->add(ins);
342 if (args.isSyntheticStackResultPointerArg(i.index())) {
343 MOZ_ASSERT(stackResultPointer_ == nullptr);
344 stackResultPointer_ = ins;
345 } else {
346 curBlock_->initSlot(info().localSlot(args.naturalIndex(i.index())),
347 ins);
349 if (!mirGen_.ensureBallast()) {
350 return false;
354 // Set up a parameter that receives the hidden instance pointer argument.
355 instancePointer_ =
356 MWasmParameter::New(alloc(), ABIArg(InstanceReg), MIRType::Pointer);
357 curBlock_->add(instancePointer_);
358 if (!mirGen_.ensureBallast()) {
359 return false;
362 for (size_t i = args.lengthWithoutStackResults(); i < locals_.length();
363 i++) {
364 ValType slotValType = locals_[i];
365 #ifndef ENABLE_WASM_SIMD
366 if (slotValType == ValType::V128) {
367 return iter().fail("Ion has no SIMD support yet");
369 #endif
370 MDefinition* zero = constantZeroOfValType(slotValType);
371 curBlock_->initSlot(info().localSlot(i), zero);
372 if (!mirGen_.ensureBallast()) {
373 return false;
377 return true;
380 void finish() {
381 mirGen().initWasmMaxStackArgBytes(maxStackArgBytes_);
383 MOZ_ASSERT(loopDepth_ == 0);
384 MOZ_ASSERT(blockDepth_ == 0);
385 #ifdef DEBUG
386 for (ControlFlowPatchVector& patches : blockPatches_) {
387 MOZ_ASSERT(patches.empty());
389 #endif
390 MOZ_ASSERT(inDeadCode());
391 MOZ_ASSERT(done(), "all bytes must be consumed");
392 MOZ_ASSERT(func_.callSiteLineNums.length() == lastReadCallSite_);
395 /************************* Read-only interface (after local scope setup) */
397 MIRGenerator& mirGen() const { return mirGen_; }
398 MIRGraph& mirGraph() const { return graph_; }
399 const CompileInfo& info() const { return info_; }
401 MDefinition* getLocalDef(unsigned slot) {
402 if (inDeadCode()) {
403 return nullptr;
405 return curBlock_->getSlot(info().localSlot(slot));
408 const ValTypeVector& locals() const { return locals_; }
410 /*********************************************************** Constants ***/
412 MDefinition* constantF32(float f) {
413 if (inDeadCode()) {
414 return nullptr;
416 auto* cst = MWasmFloatConstant::NewFloat32(alloc(), f);
417 curBlock_->add(cst);
418 return cst;
420 // Hide all other overloads, to guarantee no implicit argument conversion.
421 template <typename T>
422 MDefinition* constantF32(T) = delete;
424 MDefinition* constantF64(double d) {
425 if (inDeadCode()) {
426 return nullptr;
428 auto* cst = MWasmFloatConstant::NewDouble(alloc(), d);
429 curBlock_->add(cst);
430 return cst;
432 template <typename T>
433 MDefinition* constantF64(T) = delete;
435 MDefinition* constantI32(int32_t i) {
436 if (inDeadCode()) {
437 return nullptr;
439 MConstant* constant =
440 MConstant::New(alloc(), Int32Value(i), MIRType::Int32);
441 curBlock_->add(constant);
442 return constant;
444 template <typename T>
445 MDefinition* constantI32(T) = delete;
447 MDefinition* constantI64(int64_t i) {
448 if (inDeadCode()) {
449 return nullptr;
451 MConstant* constant = MConstant::NewInt64(alloc(), i);
452 curBlock_->add(constant);
453 return constant;
455 template <typename T>
456 MDefinition* constantI64(T) = delete;
458 // Produce an MConstant of the machine's target int type (Int32 or Int64).
459 MDefinition* constantTargetWord(intptr_t n) {
460 return targetIs64Bit() ? constantI64(int64_t(n)) : constantI32(int32_t(n));
462 template <typename T>
463 MDefinition* constantTargetWord(T) = delete;
465 #ifdef ENABLE_WASM_SIMD
466 MDefinition* constantV128(V128 v) {
467 if (inDeadCode()) {
468 return nullptr;
470 MWasmFloatConstant* constant = MWasmFloatConstant::NewSimd128(
471 alloc(), SimdConstant::CreateSimd128((int8_t*)v.bytes));
472 curBlock_->add(constant);
473 return constant;
475 template <typename T>
476 MDefinition* constantV128(T) = delete;
477 #endif
479 MDefinition* constantNullRef() {
480 if (inDeadCode()) {
481 return nullptr;
483 // MConstant has a lot of baggage so we don't use that here.
484 MWasmNullConstant* constant = MWasmNullConstant::New(alloc());
485 curBlock_->add(constant);
486 return constant;
489 // Produce a zero constant for the specified ValType.
490 MDefinition* constantZeroOfValType(ValType valType) {
491 switch (valType.kind()) {
492 case ValType::I32:
493 return constantI32(0);
494 case ValType::I64:
495 return constantI64(int64_t(0));
496 #ifdef ENABLE_WASM_SIMD
497 case ValType::V128:
498 return constantV128(V128(0));
499 #endif
500 case ValType::F32:
501 return constantF32(0.0f);
502 case ValType::F64:
503 return constantF64(0.0);
504 case ValType::Ref:
505 return constantNullRef();
506 default:
507 MOZ_CRASH();
511 /***************************** Code generation (after local scope setup) */
513 void fence() {
514 if (inDeadCode()) {
515 return;
517 MWasmFence* ins = MWasmFence::New(alloc());
518 curBlock_->add(ins);
521 template <class T>
522 MDefinition* unary(MDefinition* op) {
523 if (inDeadCode()) {
524 return nullptr;
526 T* ins = T::New(alloc(), op);
527 curBlock_->add(ins);
528 return ins;
531 template <class T>
532 MDefinition* unary(MDefinition* op, MIRType type) {
533 if (inDeadCode()) {
534 return nullptr;
536 T* ins = T::New(alloc(), op, type);
537 curBlock_->add(ins);
538 return ins;
541 template <class T>
542 MDefinition* binary(MDefinition* lhs, MDefinition* rhs) {
543 if (inDeadCode()) {
544 return nullptr;
546 T* ins = T::New(alloc(), lhs, rhs);
547 curBlock_->add(ins);
548 return ins;
551 template <class T>
552 MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type) {
553 if (inDeadCode()) {
554 return nullptr;
556 T* ins = T::New(alloc(), lhs, rhs, type);
557 curBlock_->add(ins);
558 return ins;
561 template <class T>
562 MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type,
563 MWasmBinaryBitwise::SubOpcode subOpc) {
564 if (inDeadCode()) {
565 return nullptr;
567 T* ins = T::New(alloc(), lhs, rhs, type, subOpc);
568 curBlock_->add(ins);
569 return ins;
572 MDefinition* ursh(MDefinition* lhs, MDefinition* rhs, MIRType type) {
573 if (inDeadCode()) {
574 return nullptr;
576 auto* ins = MUrsh::NewWasm(alloc(), lhs, rhs, type);
577 curBlock_->add(ins);
578 return ins;
581 MDefinition* add(MDefinition* lhs, MDefinition* rhs, MIRType type) {
582 if (inDeadCode()) {
583 return nullptr;
585 auto* ins = MAdd::NewWasm(alloc(), lhs, rhs, type);
586 curBlock_->add(ins);
587 return ins;
590 bool mustPreserveNaN(MIRType type) {
591 return IsFloatingPointType(type) && !moduleEnv().isAsmJS();
594 MDefinition* sub(MDefinition* lhs, MDefinition* rhs, MIRType type) {
595 if (inDeadCode()) {
596 return nullptr;
599 // wasm can't fold x - 0.0 because of NaN with custom payloads.
600 MSub* ins = MSub::NewWasm(alloc(), lhs, rhs, type, mustPreserveNaN(type));
601 curBlock_->add(ins);
602 return ins;
605 MDefinition* nearbyInt(MDefinition* input, RoundingMode roundingMode) {
606 if (inDeadCode()) {
607 return nullptr;
610 auto* ins = MNearbyInt::New(alloc(), input, input->type(), roundingMode);
611 curBlock_->add(ins);
612 return ins;
615 MDefinition* minMax(MDefinition* lhs, MDefinition* rhs, MIRType type,
616 bool isMax) {
617 if (inDeadCode()) {
618 return nullptr;
621 if (mustPreserveNaN(type)) {
622 // Convert signaling NaN to quiet NaNs.
623 MDefinition* zero = constantZeroOfValType(ValType::fromMIRType(type));
624 lhs = sub(lhs, zero, type);
625 rhs = sub(rhs, zero, type);
628 MMinMax* ins = MMinMax::NewWasm(alloc(), lhs, rhs, type, isMax);
629 curBlock_->add(ins);
630 return ins;
633 MDefinition* mul(MDefinition* lhs, MDefinition* rhs, MIRType type,
634 MMul::Mode mode) {
635 if (inDeadCode()) {
636 return nullptr;
639 // wasm can't fold x * 1.0 because of NaN with custom payloads.
640 auto* ins =
641 MMul::NewWasm(alloc(), lhs, rhs, type, mode, mustPreserveNaN(type));
642 curBlock_->add(ins);
643 return ins;
646 MDefinition* div(MDefinition* lhs, MDefinition* rhs, MIRType type,
647 bool unsignd) {
648 if (inDeadCode()) {
649 return nullptr;
651 bool trapOnError = !moduleEnv().isAsmJS();
652 if (!unsignd && type == MIRType::Int32) {
653 // Enforce the signedness of the operation by coercing the operands
654 // to signed. Otherwise, operands that "look" unsigned to Ion but
655 // are not unsigned to Baldr (eg, unsigned right shifts) may lead to
656 // the operation being executed unsigned. Applies to mod() as well.
658 // Do this for Int32 only since Int64 is not subject to the same
659 // issues.
661 // Note the offsets passed to MWasmBuiltinTruncateToInt32 are wrong here,
662 // but it doesn't matter: they're not codegen'd to calls since inputs
663 // already are int32.
664 auto* lhs2 = createTruncateToInt32(lhs);
665 curBlock_->add(lhs2);
666 lhs = lhs2;
667 auto* rhs2 = createTruncateToInt32(rhs);
668 curBlock_->add(rhs2);
669 rhs = rhs2;
672 // For x86 and arm we implement i64 div via c++ builtin.
673 // A call to c++ builtin requires instance pointer.
674 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
675 if (type == MIRType::Int64) {
676 auto* ins =
677 MWasmBuiltinDivI64::New(alloc(), lhs, rhs, instancePointer_, unsignd,
678 trapOnError, bytecodeOffset());
679 curBlock_->add(ins);
680 return ins;
682 #endif
684 auto* ins = MDiv::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
685 bytecodeOffset(), mustPreserveNaN(type));
686 curBlock_->add(ins);
687 return ins;
690 MInstruction* createTruncateToInt32(MDefinition* op) {
691 if (op->type() == MIRType::Double || op->type() == MIRType::Float32) {
692 return MWasmBuiltinTruncateToInt32::New(alloc(), op, instancePointer_);
695 return MTruncateToInt32::New(alloc(), op);
698 MDefinition* mod(MDefinition* lhs, MDefinition* rhs, MIRType type,
699 bool unsignd) {
700 if (inDeadCode()) {
701 return nullptr;
703 bool trapOnError = !moduleEnv().isAsmJS();
704 if (!unsignd && type == MIRType::Int32) {
705 // See block comment in div().
706 auto* lhs2 = createTruncateToInt32(lhs);
707 curBlock_->add(lhs2);
708 lhs = lhs2;
709 auto* rhs2 = createTruncateToInt32(rhs);
710 curBlock_->add(rhs2);
711 rhs = rhs2;
714 // For x86 and arm we implement i64 mod via c++ builtin.
715 // A call to c++ builtin requires instance pointer.
716 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
717 if (type == MIRType::Int64) {
718 auto* ins =
719 MWasmBuiltinModI64::New(alloc(), lhs, rhs, instancePointer_, unsignd,
720 trapOnError, bytecodeOffset());
721 curBlock_->add(ins);
722 return ins;
724 #endif
726 // Should be handled separately because we call BuiltinThunk for this case
727 // and so, need to add the dependency from instancePointer.
728 if (type == MIRType::Double) {
729 auto* ins = MWasmBuiltinModD::New(alloc(), lhs, rhs, instancePointer_,
730 type, bytecodeOffset());
731 curBlock_->add(ins);
732 return ins;
735 auto* ins = MMod::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
736 bytecodeOffset());
737 curBlock_->add(ins);
738 return ins;
741 MDefinition* bitnot(MDefinition* op) {
742 if (inDeadCode()) {
743 return nullptr;
745 auto* ins = MBitNot::New(alloc(), op);
746 curBlock_->add(ins);
747 return ins;
750 MDefinition* select(MDefinition* trueExpr, MDefinition* falseExpr,
751 MDefinition* condExpr) {
752 if (inDeadCode()) {
753 return nullptr;
755 auto* ins = MWasmSelect::New(alloc(), trueExpr, falseExpr, condExpr);
756 curBlock_->add(ins);
757 return ins;
760 MDefinition* extendI32(MDefinition* op, bool isUnsigned) {
761 if (inDeadCode()) {
762 return nullptr;
764 auto* ins = MExtendInt32ToInt64::New(alloc(), op, isUnsigned);
765 curBlock_->add(ins);
766 return ins;
769 MDefinition* signExtend(MDefinition* op, uint32_t srcSize,
770 uint32_t targetSize) {
771 if (inDeadCode()) {
772 return nullptr;
774 MInstruction* ins;
775 switch (targetSize) {
776 case 4: {
777 MSignExtendInt32::Mode mode;
778 switch (srcSize) {
779 case 1:
780 mode = MSignExtendInt32::Byte;
781 break;
782 case 2:
783 mode = MSignExtendInt32::Half;
784 break;
785 default:
786 MOZ_CRASH("Bad sign extension");
788 ins = MSignExtendInt32::New(alloc(), op, mode);
789 break;
791 case 8: {
792 MSignExtendInt64::Mode mode;
793 switch (srcSize) {
794 case 1:
795 mode = MSignExtendInt64::Byte;
796 break;
797 case 2:
798 mode = MSignExtendInt64::Half;
799 break;
800 case 4:
801 mode = MSignExtendInt64::Word;
802 break;
803 default:
804 MOZ_CRASH("Bad sign extension");
806 ins = MSignExtendInt64::New(alloc(), op, mode);
807 break;
809 default: {
810 MOZ_CRASH("Bad sign extension");
813 curBlock_->add(ins);
814 return ins;
817 MDefinition* convertI64ToFloatingPoint(MDefinition* op, MIRType type,
818 bool isUnsigned) {
819 if (inDeadCode()) {
820 return nullptr;
822 #if defined(JS_CODEGEN_ARM)
823 auto* ins = MBuiltinInt64ToFloatingPoint::New(
824 alloc(), op, instancePointer_, type, bytecodeOffset(), isUnsigned);
825 #else
826 auto* ins = MInt64ToFloatingPoint::New(alloc(), op, type, bytecodeOffset(),
827 isUnsigned);
828 #endif
829 curBlock_->add(ins);
830 return ins;
833 MDefinition* rotate(MDefinition* input, MDefinition* count, MIRType type,
834 bool left) {
835 if (inDeadCode()) {
836 return nullptr;
838 auto* ins = MRotate::New(alloc(), input, count, type, left);
839 curBlock_->add(ins);
840 return ins;
843 template <class T>
844 MDefinition* truncate(MDefinition* op, TruncFlags flags) {
845 if (inDeadCode()) {
846 return nullptr;
848 auto* ins = T::New(alloc(), op, flags, bytecodeOffset());
849 curBlock_->add(ins);
850 return ins;
853 #if defined(JS_CODEGEN_ARM)
854 MDefinition* truncateWithInstance(MDefinition* op, TruncFlags flags) {
855 if (inDeadCode()) {
856 return nullptr;
858 auto* ins = MWasmBuiltinTruncateToInt64::New(alloc(), op, instancePointer_,
859 flags, bytecodeOffset());
860 curBlock_->add(ins);
861 return ins;
863 #endif
865 MDefinition* compare(MDefinition* lhs, MDefinition* rhs, JSOp op,
866 MCompare::CompareType type) {
867 if (inDeadCode()) {
868 return nullptr;
870 auto* ins = MCompare::NewWasm(alloc(), lhs, rhs, op, type);
871 curBlock_->add(ins);
872 return ins;
875 void assign(unsigned slot, MDefinition* def) {
876 if (inDeadCode()) {
877 return;
879 curBlock_->setSlot(info().localSlot(slot), def);
882 MDefinition* compareIsNull(MDefinition* ref, JSOp compareOp) {
883 MDefinition* nullVal = constantNullRef();
884 if (!nullVal) {
885 return nullptr;
887 return compare(ref, nullVal, compareOp, MCompare::Compare_WasmAnyRef);
890 [[nodiscard]] bool refAsNonNull(MDefinition* ref) {
891 if (inDeadCode()) {
892 return true;
895 auto* ins = MWasmTrapIfNull::New(
896 alloc(), ref, wasm::Trap::NullPointerDereference, bytecodeOffset());
898 curBlock_->add(ins);
899 return true;
902 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
903 [[nodiscard]] bool brOnNull(uint32_t relativeDepth, const DefVector& values,
904 const ResultType& type, MDefinition* condition) {
905 if (inDeadCode()) {
906 return true;
909 MBasicBlock* fallthroughBlock = nullptr;
910 if (!newBlock(curBlock_, &fallthroughBlock)) {
911 return false;
914 MDefinition* check = compareIsNull(condition, JSOp::Eq);
915 if (!check) {
916 return false;
918 MTest* test = MTest::New(alloc(), check, nullptr, fallthroughBlock);
919 if (!test ||
920 !addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex)) {
921 return false;
924 if (!pushDefs(values)) {
925 return false;
928 curBlock_->end(test);
929 curBlock_ = fallthroughBlock;
930 return true;
933 [[nodiscard]] bool brOnNonNull(uint32_t relativeDepth,
934 const DefVector& values,
935 const ResultType& type,
936 MDefinition* condition) {
937 if (inDeadCode()) {
938 return true;
941 MBasicBlock* fallthroughBlock = nullptr;
942 if (!newBlock(curBlock_, &fallthroughBlock)) {
943 return false;
946 MDefinition* check = compareIsNull(condition, JSOp::Ne);
947 if (!check) {
948 return false;
950 MTest* test = MTest::New(alloc(), check, nullptr, fallthroughBlock);
951 if (!test ||
952 !addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex)) {
953 return false;
956 if (!pushDefs(values)) {
957 return false;
960 curBlock_->end(test);
961 curBlock_ = fallthroughBlock;
962 return true;
965 #endif // ENABLE_WASM_FUNCTION_REFERENCES
967 #ifdef ENABLE_WASM_GC
968 MDefinition* refI31(MDefinition* input) {
969 auto* ins = MWasmNewI31Ref::New(alloc(), input);
970 curBlock_->add(ins);
971 return ins;
974 MDefinition* i31Get(MDefinition* input, FieldWideningOp wideningOp) {
975 auto* ins = MWasmI31RefGet::New(alloc(), input, wideningOp);
976 curBlock_->add(ins);
977 return ins;
979 #endif // ENABLE_WASM_GC
981 #ifdef ENABLE_WASM_SIMD
982 // About Wasm SIMD as supported by Ion:
984 // The expectation is that Ion will only ever support SIMD on x86 and x64,
985 // since ARMv7 will cease to be a tier-1 platform soon, and MIPS64 will never
986 // implement SIMD.
988 // The division of the operations into MIR nodes reflects that expectation,
989 // and is a good fit for x86/x64. Should the expectation change we'll
990 // possibly want to re-architect the SIMD support to be a little more general.
992 // Most SIMD operations map directly to a single MIR node that ultimately ends
993 // up being expanded in the macroassembler.
995 // Some SIMD operations that do have a complete macroassembler expansion are
996 // open-coded into multiple MIR nodes here; in some cases that's just
997 // convenience, in other cases it may also allow them to benefit from Ion
998 // optimizations. The reason for the expansions will be documented by a
999 // comment.
1001 // (v128,v128) -> v128 effect-free binary operations
1002 MDefinition* binarySimd128(MDefinition* lhs, MDefinition* rhs,
1003 bool commutative, SimdOp op) {
1004 if (inDeadCode()) {
1005 return nullptr;
1008 MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
1009 rhs->type() == MIRType::Simd128);
1011 auto* ins = MWasmBinarySimd128::New(alloc(), lhs, rhs, commutative, op);
1012 curBlock_->add(ins);
1013 return ins;
1016 // (v128,i32) -> v128 effect-free shift operations
1017 MDefinition* shiftSimd128(MDefinition* lhs, MDefinition* rhs, SimdOp op) {
1018 if (inDeadCode()) {
1019 return nullptr;
1022 MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
1023 rhs->type() == MIRType::Int32);
1025 int32_t maskBits;
1026 if (MacroAssembler::MustMaskShiftCountSimd128(op, &maskBits)) {
1027 MDefinition* mask = constantI32(maskBits);
1028 auto* rhs2 = MBitAnd::New(alloc(), rhs, mask, MIRType::Int32);
1029 curBlock_->add(rhs2);
1030 rhs = rhs2;
1033 auto* ins = MWasmShiftSimd128::New(alloc(), lhs, rhs, op);
1034 curBlock_->add(ins);
1035 return ins;
1038 // (v128,scalar,imm) -> v128
1039 MDefinition* replaceLaneSimd128(MDefinition* lhs, MDefinition* rhs,
1040 uint32_t laneIndex, SimdOp op) {
1041 if (inDeadCode()) {
1042 return nullptr;
1045 MOZ_ASSERT(lhs->type() == MIRType::Simd128);
1047 auto* ins = MWasmReplaceLaneSimd128::New(alloc(), lhs, rhs, laneIndex, op);
1048 curBlock_->add(ins);
1049 return ins;
1052 // (scalar) -> v128 effect-free unary operations
1053 MDefinition* scalarToSimd128(MDefinition* src, SimdOp op) {
1054 if (inDeadCode()) {
1055 return nullptr;
1058 auto* ins = MWasmScalarToSimd128::New(alloc(), src, op);
1059 curBlock_->add(ins);
1060 return ins;
1063 // (v128) -> v128 effect-free unary operations
1064 MDefinition* unarySimd128(MDefinition* src, SimdOp op) {
1065 if (inDeadCode()) {
1066 return nullptr;
1069 MOZ_ASSERT(src->type() == MIRType::Simd128);
1070 auto* ins = MWasmUnarySimd128::New(alloc(), src, op);
1071 curBlock_->add(ins);
1072 return ins;
1075 // (v128, imm) -> scalar effect-free unary operations
1076 MDefinition* reduceSimd128(MDefinition* src, SimdOp op, ValType outType,
1077 uint32_t imm = 0) {
1078 if (inDeadCode()) {
1079 return nullptr;
1082 MOZ_ASSERT(src->type() == MIRType::Simd128);
1083 auto* ins =
1084 MWasmReduceSimd128::New(alloc(), src, op, outType.toMIRType(), imm);
1085 curBlock_->add(ins);
1086 return ins;
1089 // (v128, v128, v128) -> v128 effect-free operations
1090 MDefinition* ternarySimd128(MDefinition* v0, MDefinition* v1, MDefinition* v2,
1091 SimdOp op) {
1092 if (inDeadCode()) {
1093 return nullptr;
1096 MOZ_ASSERT(v0->type() == MIRType::Simd128 &&
1097 v1->type() == MIRType::Simd128 &&
1098 v2->type() == MIRType::Simd128);
1100 auto* ins = MWasmTernarySimd128::New(alloc(), v0, v1, v2, op);
1101 curBlock_->add(ins);
1102 return ins;
1105 // (v128, v128, imm_v128) -> v128 effect-free operations
1106 MDefinition* shuffleSimd128(MDefinition* v1, MDefinition* v2, V128 control) {
1107 if (inDeadCode()) {
1108 return nullptr;
1111 MOZ_ASSERT(v1->type() == MIRType::Simd128);
1112 MOZ_ASSERT(v2->type() == MIRType::Simd128);
1113 auto* ins = BuildWasmShuffleSimd128(
1114 alloc(), reinterpret_cast<int8_t*>(control.bytes), v1, v2);
1115 curBlock_->add(ins);
1116 return ins;
1119 // Also see below for SIMD memory references
1121 #endif // ENABLE_WASM_SIMD
1123 /************************************************ Linear memory accesses */
1125 // For detailed information about memory accesses, see "Linear memory
1126 // addresses and bounds checking" in WasmMemory.cpp.
1128 private:
1129 // If the platform does not have a HeapReg, load the memory base from
1130 // instance.
1131 MDefinition* maybeLoadMemoryBase(uint32_t memoryIndex) {
1132 #ifdef WASM_HAS_HEAPREG
1133 if (memoryIndex == 0) {
1134 return nullptr;
1136 #endif
1137 return memoryBase(memoryIndex);
1140 public:
1141 // A value holding the memory base, whether that's HeapReg or some other
1142 // register.
1143 MDefinition* memoryBase(uint32_t memoryIndex) {
1144 AliasSet aliases = !moduleEnv_.memories[memoryIndex].canMovingGrow()
1145 ? AliasSet::None()
1146 : AliasSet::Load(AliasSet::WasmHeapMeta);
1147 #ifdef WASM_HAS_HEAPREG
1148 if (memoryIndex == 0) {
1149 MWasmHeapReg* base = MWasmHeapReg::New(alloc(), aliases);
1150 curBlock_->add(base);
1151 return base;
1153 #endif
1154 uint32_t offset =
1155 memoryIndex == 0
1156 ? Instance::offsetOfMemory0Base()
1157 : (Instance::offsetInData(
1158 moduleEnv_.offsetOfMemoryInstanceData(memoryIndex) +
1159 offsetof(MemoryInstanceData, base)));
1160 MWasmLoadInstance* base = MWasmLoadInstance::New(
1161 alloc(), instancePointer_, offset, MIRType::Pointer, aliases);
1162 curBlock_->add(base);
1163 return base;
1166 private:
1167 // If the bounds checking strategy requires it, load the bounds check limit
1168 // from the instance.
1169 MWasmLoadInstance* maybeLoadBoundsCheckLimit(uint32_t memoryIndex,
1170 MIRType type) {
1171 MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Int64);
1172 if (moduleEnv_.hugeMemoryEnabled(memoryIndex)) {
1173 return nullptr;
1175 uint32_t offset =
1176 memoryIndex == 0
1177 ? Instance::offsetOfMemory0BoundsCheckLimit()
1178 : (Instance::offsetInData(
1179 moduleEnv_.offsetOfMemoryInstanceData(memoryIndex) +
1180 offsetof(MemoryInstanceData, boundsCheckLimit)));
1181 AliasSet aliases = !moduleEnv_.memories[memoryIndex].canMovingGrow()
1182 ? AliasSet::None()
1183 : AliasSet::Load(AliasSet::WasmHeapMeta);
1184 auto* load = MWasmLoadInstance::New(alloc(), instancePointer_, offset, type,
1185 aliases);
1186 curBlock_->add(load);
1187 return load;
1190 // Return true if the access requires an alignment check. If so, sets
1191 // *mustAdd to true if the offset must be added to the pointer before
1192 // checking.
1193 bool needAlignmentCheck(MemoryAccessDesc* access, MDefinition* base,
1194 bool* mustAdd) {
1195 MOZ_ASSERT(!*mustAdd);
1197 // asm.js accesses are always aligned and need no checks.
1198 if (moduleEnv_.isAsmJS() || !access->isAtomic()) {
1199 return false;
1202 // If the EA is known and aligned it will need no checks.
1203 if (base->isConstant()) {
1204 // We only care about the low bits, so overflow is OK, as is chopping off
1205 // the high bits of an i64 pointer.
1206 uint32_t ptr = 0;
1207 if (isMem64(access->memoryIndex())) {
1208 ptr = uint32_t(base->toConstant()->toInt64());
1209 } else {
1210 ptr = base->toConstant()->toInt32();
1212 if (((ptr + access->offset64()) & (access->byteSize() - 1)) == 0) {
1213 return false;
1217 // If the offset is aligned then the EA is just the pointer, for
1218 // the purposes of this check.
1219 *mustAdd = (access->offset64() & (access->byteSize() - 1)) != 0;
1220 return true;
1223 // Fold a constant base into the offset and make the base 0, provided the
1224 // offset stays below the guard limit. The reason for folding the base into
1225 // the offset rather than vice versa is that a small offset can be ignored
1226 // by both explicit bounds checking and bounds check elimination.
1227 void foldConstantPointer(MemoryAccessDesc* access, MDefinition** base) {
1228 uint32_t offsetGuardLimit = GetMaxOffsetGuardLimit(
1229 moduleEnv_.hugeMemoryEnabled(access->memoryIndex()));
1231 if ((*base)->isConstant()) {
1232 uint64_t basePtr = 0;
1233 if (isMem64(access->memoryIndex())) {
1234 basePtr = uint64_t((*base)->toConstant()->toInt64());
1235 } else {
1236 basePtr = uint64_t(int64_t((*base)->toConstant()->toInt32()));
1239 uint64_t offset = access->offset64();
1241 if (offset < offsetGuardLimit && basePtr < offsetGuardLimit - offset) {
1242 offset += uint32_t(basePtr);
1243 access->setOffset32(uint32_t(offset));
1244 *base = isMem64(access->memoryIndex()) ? constantI64(int64_t(0))
1245 : constantI32(0);
1250 // If the offset must be added because it is large or because the true EA must
1251 // be checked, compute the effective address, trapping on overflow.
1252 void maybeComputeEffectiveAddress(MemoryAccessDesc* access,
1253 MDefinition** base, bool mustAddOffset) {
1254 uint32_t offsetGuardLimit = GetMaxOffsetGuardLimit(
1255 moduleEnv_.hugeMemoryEnabled(access->memoryIndex()));
1257 if (access->offset64() >= offsetGuardLimit ||
1258 access->offset64() > UINT32_MAX || mustAddOffset ||
1259 !JitOptions.wasmFoldOffsets) {
1260 *base = computeEffectiveAddress(*base, access);
1264 MWasmLoadInstance* needBoundsCheck(uint32_t memoryIndex) {
1265 #ifdef JS_64BIT
1266 // For 32-bit base pointers:
1268 // If the bounds check uses the full 64 bits of the bounds check limit, then
1269 // the base pointer must be zero-extended to 64 bits before checking and
1270 // wrapped back to 32-bits after Spectre masking. (And it's important that
1271 // the value we end up with has flowed through the Spectre mask.)
1273 // If the memory's max size is known to be smaller than 64K pages exactly,
1274 // we can use a 32-bit check and avoid extension and wrapping.
1275 static_assert(0x100000000 % PageSize == 0);
1276 bool mem32LimitIs64Bits =
1277 isMem32(memoryIndex) &&
1278 !moduleEnv_.memories[memoryIndex].boundsCheckLimitIs32Bits() &&
1279 MaxMemoryPages(moduleEnv_.memories[memoryIndex].indexType()) >=
1280 Pages(0x100000000 / PageSize);
1281 #else
1282 // On 32-bit platforms we have no more than 2GB memory and the limit for a
1283 // 32-bit base pointer is never a 64-bit value.
1284 bool mem32LimitIs64Bits = false;
1285 #endif
1286 return maybeLoadBoundsCheckLimit(memoryIndex,
1287 mem32LimitIs64Bits || isMem64(memoryIndex)
1288 ? MIRType::Int64
1289 : MIRType::Int32);
1292 void performBoundsCheck(uint32_t memoryIndex, MDefinition** base,
1293 MWasmLoadInstance* boundsCheckLimit) {
1294 // At the outset, actualBase could be the result of pretty much any integer
1295 // operation, or it could be the load of an integer constant. If its type
1296 // is i32, we may assume the value has a canonical representation for the
1297 // platform, see doc block in MacroAssembler.h.
1298 MDefinition* actualBase = *base;
1300 // Extend an i32 index value to perform a 64-bit bounds check if the memory
1301 // can be 4GB or larger.
1302 bool extendAndWrapIndex =
1303 isMem32(memoryIndex) && boundsCheckLimit->type() == MIRType::Int64;
1304 if (extendAndWrapIndex) {
1305 auto* extended = MWasmExtendU32Index::New(alloc(), actualBase);
1306 curBlock_->add(extended);
1307 actualBase = extended;
1310 auto target = memoryIndex == 0 ? MWasmBoundsCheck::Memory0
1311 : MWasmBoundsCheck::Unknown;
1312 auto* ins = MWasmBoundsCheck::New(alloc(), actualBase, boundsCheckLimit,
1313 bytecodeOffset(), target);
1314 curBlock_->add(ins);
1315 actualBase = ins;
1317 // If we're masking, then we update *base to create a dependency chain
1318 // through the masked index. But we will first need to wrap the index
1319 // value if it was extended above.
1320 if (JitOptions.spectreIndexMasking) {
1321 if (extendAndWrapIndex) {
1322 auto* wrapped = MWasmWrapU32Index::New(alloc(), actualBase);
1323 curBlock_->add(wrapped);
1324 actualBase = wrapped;
1326 *base = actualBase;
1330 // Perform all necessary checking before a wasm heap access, based on the
1331 // attributes of the access and base pointer.
1333 // For 64-bit indices on platforms that are limited to indices that fit into
1334 // 32 bits (all 32-bit platforms and mips64), this returns a bounds-checked
1335 // `base` that has type Int32. Lowering code depends on this and will assert
1336 // that the base has this type. See the end of this function.
1338 void checkOffsetAndAlignmentAndBounds(MemoryAccessDesc* access,
1339 MDefinition** base) {
1340 MOZ_ASSERT(!inDeadCode());
1341 MOZ_ASSERT(!moduleEnv_.isAsmJS());
1343 // Attempt to fold an offset into a constant base pointer so as to simplify
1344 // the addressing expression. This may update *base.
1345 foldConstantPointer(access, base);
1347 // Determine whether an alignment check is needed and whether the offset
1348 // must be checked too.
1349 bool mustAddOffsetForAlignmentCheck = false;
1350 bool alignmentCheck =
1351 needAlignmentCheck(access, *base, &mustAddOffsetForAlignmentCheck);
1353 // If bounds checking or alignment checking requires it, compute the
1354 // effective address: add the offset into the pointer and trap on overflow.
1355 // This may update *base.
1356 maybeComputeEffectiveAddress(access, base, mustAddOffsetForAlignmentCheck);
1358 // Emit the alignment check if necessary; it traps if it fails.
1359 if (alignmentCheck) {
1360 curBlock_->add(MWasmAlignmentCheck::New(
1361 alloc(), *base, access->byteSize(), bytecodeOffset()));
1364 // Emit the bounds check if necessary; it traps if it fails. This may
1365 // update *base.
1366 MWasmLoadInstance* boundsCheckLimit =
1367 needBoundsCheck(access->memoryIndex());
1368 if (boundsCheckLimit) {
1369 performBoundsCheck(access->memoryIndex(), base, boundsCheckLimit);
1372 #ifndef JS_64BIT
1373 if (isMem64(access->memoryIndex())) {
1374 // We must have had an explicit bounds check (or one was elided if it was
1375 // proved redundant), and on 32-bit systems the index will for sure fit in
1376 // 32 bits: the max memory is 2GB. So chop the index down to 32-bit to
1377 // simplify the back-end.
1378 MOZ_ASSERT((*base)->type() == MIRType::Int64);
1379 MOZ_ASSERT(!moduleEnv_.hugeMemoryEnabled(access->memoryIndex()));
1380 auto* chopped = MWasmWrapU32Index::New(alloc(), *base);
1381 MOZ_ASSERT(chopped->type() == MIRType::Int32);
1382 curBlock_->add(chopped);
1383 *base = chopped;
1385 #endif
1388 bool isSmallerAccessForI64(ValType result, const MemoryAccessDesc* access) {
1389 if (result == ValType::I64 && access->byteSize() <= 4) {
1390 // These smaller accesses should all be zero-extending.
1391 MOZ_ASSERT(!isSignedIntType(access->type()));
1392 return true;
1394 return false;
1397 public:
1398 bool isMem32(uint32_t memoryIndex) {
1399 return moduleEnv_.memories[memoryIndex].indexType() == IndexType::I32;
1401 bool isMem64(uint32_t memoryIndex) {
1402 return moduleEnv_.memories[memoryIndex].indexType() == IndexType::I64;
1404 bool hugeMemoryEnabled(uint32_t memoryIndex) {
1405 return moduleEnv_.hugeMemoryEnabled(memoryIndex);
1408 // Add the offset into the pointer to yield the EA; trap on overflow.
1409 MDefinition* computeEffectiveAddress(MDefinition* base,
1410 MemoryAccessDesc* access) {
1411 if (inDeadCode()) {
1412 return nullptr;
1414 uint64_t offset = access->offset64();
1415 if (offset == 0) {
1416 return base;
1418 auto* ins = MWasmAddOffset::New(alloc(), base, offset, bytecodeOffset());
1419 curBlock_->add(ins);
1420 access->clearOffset();
1421 return ins;
1424 MDefinition* load(MDefinition* base, MemoryAccessDesc* access,
1425 ValType result) {
1426 if (inDeadCode()) {
1427 return nullptr;
1430 MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
1431 MInstruction* load = nullptr;
1432 if (moduleEnv_.isAsmJS()) {
1433 MOZ_ASSERT(access->offset64() == 0);
1434 MWasmLoadInstance* boundsCheckLimit =
1435 maybeLoadBoundsCheckLimit(access->memoryIndex(), MIRType::Int32);
1436 load = MAsmJSLoadHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
1437 access->type());
1438 } else {
1439 checkOffsetAndAlignmentAndBounds(access, &base);
1440 #ifndef JS_64BIT
1441 MOZ_ASSERT(base->type() == MIRType::Int32);
1442 #endif
1443 load = MWasmLoad::New(alloc(), memoryBase, base, *access,
1444 result.toMIRType());
1446 if (!load) {
1447 return nullptr;
1449 curBlock_->add(load);
1450 return load;
1453 void store(MDefinition* base, MemoryAccessDesc* access, MDefinition* v) {
1454 if (inDeadCode()) {
1455 return;
1458 MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
1459 MInstruction* store = nullptr;
1460 if (moduleEnv_.isAsmJS()) {
1461 MOZ_ASSERT(access->offset64() == 0);
1462 MWasmLoadInstance* boundsCheckLimit =
1463 maybeLoadBoundsCheckLimit(access->memoryIndex(), MIRType::Int32);
1464 store = MAsmJSStoreHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
1465 access->type(), v);
1466 } else {
1467 checkOffsetAndAlignmentAndBounds(access, &base);
1468 #ifndef JS_64BIT
1469 MOZ_ASSERT(base->type() == MIRType::Int32);
1470 #endif
1471 store = MWasmStore::New(alloc(), memoryBase, base, *access, v);
1473 if (!store) {
1474 return;
1476 curBlock_->add(store);
1479 MDefinition* atomicCompareExchangeHeap(MDefinition* base,
1480 MemoryAccessDesc* access,
1481 ValType result, MDefinition* oldv,
1482 MDefinition* newv) {
1483 if (inDeadCode()) {
1484 return nullptr;
1487 checkOffsetAndAlignmentAndBounds(access, &base);
1488 #ifndef JS_64BIT
1489 MOZ_ASSERT(base->type() == MIRType::Int32);
1490 #endif
1492 if (isSmallerAccessForI64(result, access)) {
1493 auto* cvtOldv =
1494 MWrapInt64ToInt32::New(alloc(), oldv, /*bottomHalf=*/true);
1495 curBlock_->add(cvtOldv);
1496 oldv = cvtOldv;
1498 auto* cvtNewv =
1499 MWrapInt64ToInt32::New(alloc(), newv, /*bottomHalf=*/true);
1500 curBlock_->add(cvtNewv);
1501 newv = cvtNewv;
1504 MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
1505 MInstruction* cas = MWasmCompareExchangeHeap::New(
1506 alloc(), bytecodeOffset(), memoryBase, base, *access, oldv, newv,
1507 instancePointer_);
1508 if (!cas) {
1509 return nullptr;
1511 curBlock_->add(cas);
1513 if (isSmallerAccessForI64(result, access)) {
1514 cas = MExtendInt32ToInt64::New(alloc(), cas, true);
1515 curBlock_->add(cas);
1518 return cas;
1521 MDefinition* atomicExchangeHeap(MDefinition* base, MemoryAccessDesc* access,
1522 ValType result, MDefinition* value) {
1523 if (inDeadCode()) {
1524 return nullptr;
1527 checkOffsetAndAlignmentAndBounds(access, &base);
1528 #ifndef JS_64BIT
1529 MOZ_ASSERT(base->type() == MIRType::Int32);
1530 #endif
1532 if (isSmallerAccessForI64(result, access)) {
1533 auto* cvtValue =
1534 MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
1535 curBlock_->add(cvtValue);
1536 value = cvtValue;
1539 MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
1540 MInstruction* xchg =
1541 MWasmAtomicExchangeHeap::New(alloc(), bytecodeOffset(), memoryBase,
1542 base, *access, value, instancePointer_);
1543 if (!xchg) {
1544 return nullptr;
1546 curBlock_->add(xchg);
1548 if (isSmallerAccessForI64(result, access)) {
1549 xchg = MExtendInt32ToInt64::New(alloc(), xchg, true);
1550 curBlock_->add(xchg);
1553 return xchg;
1556 MDefinition* atomicBinopHeap(AtomicOp op, MDefinition* base,
1557 MemoryAccessDesc* access, ValType result,
1558 MDefinition* value) {
1559 if (inDeadCode()) {
1560 return nullptr;
1563 checkOffsetAndAlignmentAndBounds(access, &base);
1564 #ifndef JS_64BIT
1565 MOZ_ASSERT(base->type() == MIRType::Int32);
1566 #endif
1568 if (isSmallerAccessForI64(result, access)) {
1569 auto* cvtValue =
1570 MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
1571 curBlock_->add(cvtValue);
1572 value = cvtValue;
1575 MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
1576 MInstruction* binop =
1577 MWasmAtomicBinopHeap::New(alloc(), bytecodeOffset(), op, memoryBase,
1578 base, *access, value, instancePointer_);
1579 if (!binop) {
1580 return nullptr;
1582 curBlock_->add(binop);
1584 if (isSmallerAccessForI64(result, access)) {
1585 binop = MExtendInt32ToInt64::New(alloc(), binop, true);
1586 curBlock_->add(binop);
1589 return binop;
1592 #ifdef ENABLE_WASM_SIMD
1593 MDefinition* loadSplatSimd128(Scalar::Type viewType,
1594 const LinearMemoryAddress<MDefinition*>& addr,
1595 wasm::SimdOp splatOp) {
1596 if (inDeadCode()) {
1597 return nullptr;
1600 MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
1601 bytecodeIfNotAsmJS(),
1602 hugeMemoryEnabled(addr.memoryIndex));
1604 // Generate better code (on x86)
1605 // If AVX2 is enabled, more broadcast operators are available.
1606 if (viewType == Scalar::Float64
1607 # if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
1608 || (js::jit::CPUInfo::IsAVX2Present() &&
1609 (viewType == Scalar::Uint8 || viewType == Scalar::Uint16 ||
1610 viewType == Scalar::Float32))
1611 # endif
1613 access.setSplatSimd128Load();
1614 return load(addr.base, &access, ValType::V128);
1617 ValType resultType = ValType::I32;
1618 if (viewType == Scalar::Float32) {
1619 resultType = ValType::F32;
1620 splatOp = wasm::SimdOp::F32x4Splat;
1622 auto* scalar = load(addr.base, &access, resultType);
1623 if (!inDeadCode() && !scalar) {
1624 return nullptr;
1626 return scalarToSimd128(scalar, splatOp);
1629 MDefinition* loadExtendSimd128(const LinearMemoryAddress<MDefinition*>& addr,
1630 wasm::SimdOp op) {
1631 if (inDeadCode()) {
1632 return nullptr;
1635 // Generate better code (on x86) by loading as a double with an
1636 // operation that sign extends directly.
1637 MemoryAccessDesc access(addr.memoryIndex, Scalar::Float64, addr.align,
1638 addr.offset, bytecodeIfNotAsmJS(),
1639 hugeMemoryEnabled(addr.memoryIndex));
1640 access.setWidenSimd128Load(op);
1641 return load(addr.base, &access, ValType::V128);
1644 MDefinition* loadZeroSimd128(Scalar::Type viewType, size_t numBytes,
1645 const LinearMemoryAddress<MDefinition*>& addr) {
1646 if (inDeadCode()) {
1647 return nullptr;
1650 MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
1651 bytecodeIfNotAsmJS(),
1652 hugeMemoryEnabled(addr.memoryIndex));
1653 access.setZeroExtendSimd128Load();
1654 return load(addr.base, &access, ValType::V128);
1657 MDefinition* loadLaneSimd128(uint32_t laneSize,
1658 const LinearMemoryAddress<MDefinition*>& addr,
1659 uint32_t laneIndex, MDefinition* src) {
1660 if (inDeadCode()) {
1661 return nullptr;
1664 MemoryAccessDesc access(addr.memoryIndex, Scalar::Simd128, addr.align,
1665 addr.offset, bytecodeIfNotAsmJS(),
1666 hugeMemoryEnabled(addr.memoryIndex));
1667 MDefinition* memoryBase = maybeLoadMemoryBase(access.memoryIndex());
1668 MDefinition* base = addr.base;
1669 MOZ_ASSERT(!moduleEnv_.isAsmJS());
1670 checkOffsetAndAlignmentAndBounds(&access, &base);
1671 # ifndef JS_64BIT
1672 MOZ_ASSERT(base->type() == MIRType::Int32);
1673 # endif
1674 MInstruction* load = MWasmLoadLaneSimd128::New(
1675 alloc(), memoryBase, base, access, laneSize, laneIndex, src);
1676 if (!load) {
1677 return nullptr;
1679 curBlock_->add(load);
1680 return load;
1683 void storeLaneSimd128(uint32_t laneSize,
1684 const LinearMemoryAddress<MDefinition*>& addr,
1685 uint32_t laneIndex, MDefinition* src) {
1686 if (inDeadCode()) {
1687 return;
1689 MemoryAccessDesc access(addr.memoryIndex, Scalar::Simd128, addr.align,
1690 addr.offset, bytecodeIfNotAsmJS(),
1691 hugeMemoryEnabled(addr.memoryIndex));
1692 MDefinition* memoryBase = maybeLoadMemoryBase(access.memoryIndex());
1693 MDefinition* base = addr.base;
1694 MOZ_ASSERT(!moduleEnv_.isAsmJS());
1695 checkOffsetAndAlignmentAndBounds(&access, &base);
1696 # ifndef JS_64BIT
1697 MOZ_ASSERT(base->type() == MIRType::Int32);
1698 # endif
1699 MInstruction* store = MWasmStoreLaneSimd128::New(
1700 alloc(), memoryBase, base, access, laneSize, laneIndex, src);
1701 if (!store) {
1702 return;
1704 curBlock_->add(store);
1706 #endif // ENABLE_WASM_SIMD
1708 /************************************************ Global variable accesses */
1710 MDefinition* loadGlobalVar(unsigned instanceDataOffset, bool isConst,
1711 bool isIndirect, MIRType type) {
1712 if (inDeadCode()) {
1713 return nullptr;
1716 MInstruction* load;
1717 if (isIndirect) {
1718 // Pull a pointer to the value out of Instance::globalArea, then
1719 // load from that pointer. Note that the pointer is immutable
1720 // even though the value it points at may change, hence the use of
1721 // |true| for the first node's |isConst| value, irrespective of
1722 // the |isConst| formal parameter to this method. The latter
1723 // applies to the denoted value as a whole.
1724 auto* cellPtr = MWasmLoadInstanceDataField::New(
1725 alloc(), MIRType::Pointer, instanceDataOffset,
1726 /*isConst=*/true, instancePointer_);
1727 curBlock_->add(cellPtr);
1728 load = MWasmLoadGlobalCell::New(alloc(), type, cellPtr);
1729 } else {
1730 // Pull the value directly out of Instance::globalArea.
1731 load = MWasmLoadInstanceDataField::New(alloc(), type, instanceDataOffset,
1732 isConst, instancePointer_);
1734 curBlock_->add(load);
1735 return load;
1738 [[nodiscard]] bool storeGlobalVar(uint32_t lineOrBytecode,
1739 uint32_t instanceDataOffset,
1740 bool isIndirect, MDefinition* v) {
1741 if (inDeadCode()) {
1742 return true;
1745 if (isIndirect) {
1746 // Pull a pointer to the value out of Instance::globalArea, then
1747 // store through that pointer.
1748 auto* valueAddr = MWasmLoadInstanceDataField::New(
1749 alloc(), MIRType::Pointer, instanceDataOffset,
1750 /*isConst=*/true, instancePointer_);
1751 curBlock_->add(valueAddr);
1753 // Handle a store to a ref-typed field specially
1754 if (v->type() == MIRType::WasmAnyRef) {
1755 // Load the previous value for the post-write barrier
1756 auto* prevValue =
1757 MWasmLoadGlobalCell::New(alloc(), MIRType::WasmAnyRef, valueAddr);
1758 curBlock_->add(prevValue);
1760 // Store the new value
1761 auto* store =
1762 MWasmStoreRef::New(alloc(), instancePointer_, valueAddr,
1763 /*valueOffset=*/0, v, AliasSet::WasmGlobalCell,
1764 WasmPreBarrierKind::Normal);
1765 curBlock_->add(store);
1767 // Call the post-write barrier
1768 return postBarrierPrecise(lineOrBytecode, valueAddr, prevValue);
1771 auto* store = MWasmStoreGlobalCell::New(alloc(), v, valueAddr);
1772 curBlock_->add(store);
1773 return true;
1775 // Or else store the value directly in Instance::globalArea.
1777 // Handle a store to a ref-typed field specially
1778 if (v->type() == MIRType::WasmAnyRef) {
1779 // Compute the address of the ref-typed global
1780 auto* valueAddr = MWasmDerivedPointer::New(
1781 alloc(), instancePointer_,
1782 wasm::Instance::offsetInData(instanceDataOffset));
1783 curBlock_->add(valueAddr);
1785 // Load the previous value for the post-write barrier
1786 auto* prevValue =
1787 MWasmLoadGlobalCell::New(alloc(), MIRType::WasmAnyRef, valueAddr);
1788 curBlock_->add(prevValue);
1790 // Store the new value
1791 auto* store =
1792 MWasmStoreRef::New(alloc(), instancePointer_, valueAddr,
1793 /*valueOffset=*/0, v, AliasSet::WasmInstanceData,
1794 WasmPreBarrierKind::Normal);
1795 curBlock_->add(store);
1797 // Call the post-write barrier
1798 return postBarrierPrecise(lineOrBytecode, valueAddr, prevValue);
1801 auto* store = MWasmStoreInstanceDataField::New(alloc(), instanceDataOffset,
1802 v, instancePointer_);
1803 curBlock_->add(store);
1804 return true;
1807 MDefinition* loadTableField(uint32_t tableIndex, unsigned fieldOffset,
1808 MIRType type) {
1809 uint32_t instanceDataOffset = wasm::Instance::offsetInData(
1810 moduleEnv_.offsetOfTableInstanceData(tableIndex) + fieldOffset);
1811 auto* load =
1812 MWasmLoadInstance::New(alloc(), instancePointer_, instanceDataOffset,
1813 type, AliasSet::Load(AliasSet::WasmTableMeta));
1814 curBlock_->add(load);
1815 return load;
1818 MDefinition* loadTableLength(uint32_t tableIndex) {
1819 return loadTableField(tableIndex, offsetof(TableInstanceData, length),
1820 MIRType::Int32);
1823 MDefinition* loadTableElements(uint32_t tableIndex) {
1824 return loadTableField(tableIndex, offsetof(TableInstanceData, elements),
1825 MIRType::Pointer);
1828 MDefinition* tableGetAnyRef(uint32_t tableIndex, MDefinition* index) {
1829 // Load the table length and perform a bounds check with spectre index
1830 // masking
1831 auto* length = loadTableLength(tableIndex);
1832 auto* check = MWasmBoundsCheck::New(
1833 alloc(), index, length, bytecodeOffset(), MWasmBoundsCheck::Unknown);
1834 curBlock_->add(check);
1835 if (JitOptions.spectreIndexMasking) {
1836 index = check;
1839 // Load the table elements and load the element
1840 auto* elements = loadTableElements(tableIndex);
1841 auto* element = MWasmLoadTableElement::New(alloc(), elements, index);
1842 curBlock_->add(element);
1843 return element;
1846 [[nodiscard]] bool tableSetAnyRef(uint32_t tableIndex, MDefinition* index,
1847 MDefinition* value,
1848 uint32_t lineOrBytecode) {
1849 // Load the table length and perform a bounds check with spectre index
1850 // masking
1851 auto* length = loadTableLength(tableIndex);
1852 auto* check = MWasmBoundsCheck::New(
1853 alloc(), index, length, bytecodeOffset(), MWasmBoundsCheck::Unknown);
1854 curBlock_->add(check);
1855 if (JitOptions.spectreIndexMasking) {
1856 index = check;
1859 // Load the table elements
1860 auto* elements = loadTableElements(tableIndex);
1862 // Load the previous value
1863 auto* prevValue = MWasmLoadTableElement::New(alloc(), elements, index);
1864 curBlock_->add(prevValue);
1866 // Compute the value's location for the post barrier
1867 auto* loc =
1868 MWasmDerivedIndexPointer::New(alloc(), elements, index, ScalePointer);
1869 curBlock_->add(loc);
1871 // Store the new value
1872 auto* store = MWasmStoreRef::New(
1873 alloc(), instancePointer_, loc, /*valueOffset=*/0, value,
1874 AliasSet::WasmTableElement, WasmPreBarrierKind::Normal);
1875 curBlock_->add(store);
1877 // Perform the post barrier
1878 return postBarrierPrecise(lineOrBytecode, loc, prevValue);
1881 void addInterruptCheck() {
1882 if (inDeadCode()) {
1883 return;
1885 curBlock_->add(
1886 MWasmInterruptCheck::New(alloc(), instancePointer_, bytecodeOffset()));
1889 // Perform a post-write barrier to update the generational store buffer. This
1890 // version will remove a previous store buffer entry if it is no longer
1891 // needed.
1892 [[nodiscard]] bool postBarrierPrecise(uint32_t lineOrBytecode,
1893 MDefinition* valueAddr,
1894 MDefinition* value) {
1895 return emitInstanceCall2(lineOrBytecode, SASigPostBarrierPrecise, valueAddr,
1896 value);
1899 // Perform a post-write barrier to update the generational store buffer. This
1900 // version will remove a previous store buffer entry if it is no longer
1901 // needed.
1902 [[nodiscard]] bool postBarrierPreciseWithOffset(uint32_t lineOrBytecode,
1903 MDefinition* valueBase,
1904 uint32_t valueOffset,
1905 MDefinition* value) {
1906 MDefinition* valueOffsetDef = constantI32(int32_t(valueOffset));
1907 if (!valueOffsetDef) {
1908 return false;
1910 return emitInstanceCall3(lineOrBytecode, SASigPostBarrierPreciseWithOffset,
1911 valueBase, valueOffsetDef, value);
1914 // Perform a post-write barrier to update the generational store buffer. This
1915 // version is the most efficient and only requires the address to store the
1916 // value and the new value. It does not remove a previous store buffer entry
1917 // if it is no longer needed, you must use a precise post-write barrier for
1918 // that.
1919 [[nodiscard]] bool postBarrier(uint32_t lineOrBytecode, MDefinition* object,
1920 MDefinition* valueBase, uint32_t valueOffset,
1921 MDefinition* newValue) {
1922 auto* barrier = MWasmPostWriteBarrier::New(
1923 alloc(), instancePointer_, object, valueBase, valueOffset, newValue);
1924 if (!barrier) {
1925 return false;
1927 curBlock_->add(barrier);
1928 return true;
1931 /***************************************************************** Calls */
1933 // The IonMonkey backend maintains a single stack offset (from the stack
1934 // pointer to the base of the frame) by adding the total amount of spill
1935 // space required plus the maximum stack required for argument passing.
1936 // Since we do not use IonMonkey's MPrepareCall/MPassArg/MCall, we must
1937 // manually accumulate, for the entire function, the maximum required stack
1938 // space for argument passing. (This is passed to the CodeGenerator via
1939 // MIRGenerator::maxWasmStackArgBytes.) This is just be the maximum of the
1940 // stack space required for each individual call (as determined by the call
1941 // ABI).
1943 // Operations that modify a CallCompileState.
1945 [[nodiscard]] bool passInstance(MIRType instanceType,
1946 CallCompileState* args) {
1947 if (inDeadCode()) {
1948 return true;
1951 // Should only pass an instance once. And it must be a non-GC pointer.
1952 MOZ_ASSERT(args->instanceArg_ == ABIArg());
1953 MOZ_ASSERT(instanceType == MIRType::Pointer);
1954 args->instanceArg_ = args->abi_.next(MIRType::Pointer);
1955 return true;
1958 // Do not call this directly. Call one of the passArg() variants instead.
1959 [[nodiscard]] bool passArgWorker(MDefinition* argDef, MIRType type,
1960 CallCompileState* call) {
1961 ABIArg arg = call->abi_.next(type);
1962 switch (arg.kind()) {
1963 #ifdef JS_CODEGEN_REGISTER_PAIR
1964 case ABIArg::GPR_PAIR: {
1965 auto mirLow =
1966 MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ true);
1967 curBlock_->add(mirLow);
1968 auto mirHigh =
1969 MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ false);
1970 curBlock_->add(mirHigh);
1971 return call->regArgs_.append(
1972 MWasmCallBase::Arg(AnyRegister(arg.gpr64().low), mirLow)) &&
1973 call->regArgs_.append(
1974 MWasmCallBase::Arg(AnyRegister(arg.gpr64().high), mirHigh));
1976 #endif
1977 case ABIArg::GPR:
1978 case ABIArg::FPU:
1979 return call->regArgs_.append(MWasmCallBase::Arg(arg.reg(), argDef));
1980 case ABIArg::Stack: {
1981 auto* mir =
1982 MWasmStackArg::New(alloc(), arg.offsetFromArgBase(), argDef);
1983 curBlock_->add(mir);
1984 return true;
1986 case ABIArg::Uninitialized:
1987 MOZ_ASSERT_UNREACHABLE("Uninitialized ABIArg kind");
1989 MOZ_CRASH("Unknown ABIArg kind.");
1992 template <typename SpanT>
1993 [[nodiscard]] bool passArgs(const DefVector& argDefs, SpanT types,
1994 CallCompileState* call) {
1995 MOZ_ASSERT(argDefs.length() == types.size());
1996 for (uint32_t i = 0; i < argDefs.length(); i++) {
1997 MDefinition* def = argDefs[i];
1998 ValType type = types[i];
1999 if (!passArg(def, type, call)) {
2000 return false;
2003 return true;
2006 [[nodiscard]] bool passArg(MDefinition* argDef, MIRType type,
2007 CallCompileState* call) {
2008 if (inDeadCode()) {
2009 return true;
2011 return passArgWorker(argDef, type, call);
2014 [[nodiscard]] bool passArg(MDefinition* argDef, ValType type,
2015 CallCompileState* call) {
2016 if (inDeadCode()) {
2017 return true;
2019 return passArgWorker(argDef, type.toMIRType(), call);
2022 void markReturnCall(CallCompileState* call) { call->returnCall = true; }
2024 // If the call returns results on the stack, prepare a stack area to receive
2025 // them, and pass the address of the stack area to the callee as an additional
2026 // argument.
2027 [[nodiscard]] bool passStackResultAreaCallArg(const ResultType& resultType,
2028 CallCompileState* call) {
2029 if (inDeadCode()) {
2030 return true;
2032 ABIResultIter iter(resultType);
2033 while (!iter.done() && iter.cur().inRegister()) {
2034 iter.next();
2036 if (iter.done()) {
2037 // No stack results.
2038 return true;
2041 auto* stackResultArea = MWasmStackResultArea::New(alloc());
2042 if (!stackResultArea) {
2043 return false;
2045 if (!stackResultArea->init(alloc(), iter.remaining())) {
2046 return false;
2048 for (uint32_t base = iter.index(); !iter.done(); iter.next()) {
2049 MWasmStackResultArea::StackResult loc(iter.cur().stackOffset(),
2050 iter.cur().type().toMIRType());
2051 stackResultArea->initResult(iter.index() - base, loc);
2053 curBlock_->add(stackResultArea);
2054 MDefinition* def = call->returnCall ? (MDefinition*)stackResultPointer_
2055 : (MDefinition*)stackResultArea;
2056 if (!passArg(def, MIRType::Pointer, call)) {
2057 return false;
2059 call->stackResultArea_ = stackResultArea;
2060 return true;
2063 [[nodiscard]] bool finishCall(CallCompileState* call) {
2064 if (inDeadCode()) {
2065 return true;
2068 if (!call->regArgs_.append(
2069 MWasmCallBase::Arg(AnyRegister(InstanceReg), instancePointer_))) {
2070 return false;
2073 uint32_t stackBytes = call->abi_.stackBytesConsumedSoFar();
2075 maxStackArgBytes_ = std::max(maxStackArgBytes_, stackBytes);
2076 return true;
2079 // Wrappers for creating various kinds of calls.
2081 [[nodiscard]] bool collectUnaryCallResult(MIRType type,
2082 MDefinition** result) {
2083 MInstruction* def;
2084 switch (type) {
2085 case MIRType::Int32:
2086 def = MWasmRegisterResult::New(alloc(), MIRType::Int32, ReturnReg);
2087 break;
2088 case MIRType::Int64:
2089 def = MWasmRegister64Result::New(alloc(), ReturnReg64);
2090 break;
2091 case MIRType::Float32:
2092 def = MWasmFloatRegisterResult::New(alloc(), type, ReturnFloat32Reg);
2093 break;
2094 case MIRType::Double:
2095 def = MWasmFloatRegisterResult::New(alloc(), type, ReturnDoubleReg);
2096 break;
2097 #ifdef ENABLE_WASM_SIMD
2098 case MIRType::Simd128:
2099 def = MWasmFloatRegisterResult::New(alloc(), type, ReturnSimd128Reg);
2100 break;
2101 #endif
2102 case MIRType::WasmAnyRef:
2103 def = MWasmRegisterResult::New(alloc(), MIRType::WasmAnyRef, ReturnReg);
2104 break;
2105 default:
2106 MOZ_CRASH("unexpected MIRType result for builtin call");
2109 if (!def) {
2110 return false;
2113 curBlock_->add(def);
2114 *result = def;
2116 return true;
2119 [[nodiscard]] bool collectCallResults(const ResultType& type,
2120 MWasmStackResultArea* stackResultArea,
2121 DefVector* results) {
2122 if (!results->reserve(type.length())) {
2123 return false;
2126 // The result iterator goes in the order in which results would be popped
2127 // off; we want the order in which they would be pushed.
2128 ABIResultIter iter(type);
2129 uint32_t stackResultCount = 0;
2130 while (!iter.done()) {
2131 if (iter.cur().onStack()) {
2132 stackResultCount++;
2134 iter.next();
2137 for (iter.switchToPrev(); !iter.done(); iter.prev()) {
2138 if (!mirGen().ensureBallast()) {
2139 return false;
2141 const ABIResult& result = iter.cur();
2142 MInstruction* def;
2143 if (result.inRegister()) {
2144 switch (result.type().kind()) {
2145 case wasm::ValType::I32:
2146 def =
2147 MWasmRegisterResult::New(alloc(), MIRType::Int32, result.gpr());
2148 break;
2149 case wasm::ValType::I64:
2150 def = MWasmRegister64Result::New(alloc(), result.gpr64());
2151 break;
2152 case wasm::ValType::F32:
2153 def = MWasmFloatRegisterResult::New(alloc(), MIRType::Float32,
2154 result.fpr());
2155 break;
2156 case wasm::ValType::F64:
2157 def = MWasmFloatRegisterResult::New(alloc(), MIRType::Double,
2158 result.fpr());
2159 break;
2160 case wasm::ValType::Ref:
2161 def = MWasmRegisterResult::New(alloc(), MIRType::WasmAnyRef,
2162 result.gpr());
2163 break;
2164 case wasm::ValType::V128:
2165 #ifdef ENABLE_WASM_SIMD
2166 def = MWasmFloatRegisterResult::New(alloc(), MIRType::Simd128,
2167 result.fpr());
2168 #else
2169 return this->iter().fail("Ion has no SIMD support yet");
2170 #endif
2172 } else {
2173 MOZ_ASSERT(stackResultArea);
2174 MOZ_ASSERT(stackResultCount);
2175 uint32_t idx = --stackResultCount;
2176 def = MWasmStackResult::New(alloc(), stackResultArea, idx);
2179 if (!def) {
2180 return false;
2182 curBlock_->add(def);
2183 results->infallibleAppend(def);
2186 MOZ_ASSERT(results->length() == type.length());
2188 return true;
2191 [[nodiscard]] bool catchableCall(const CallSiteDesc& desc,
2192 const CalleeDesc& callee,
2193 const MWasmCallBase::Args& args,
2194 const ArgTypeVector& argTypes,
2195 MDefinition* indexOrRef = nullptr) {
2196 MWasmCallTryDesc tryDesc;
2197 if (!beginTryCall(&tryDesc)) {
2198 return false;
2201 MInstruction* ins;
2202 if (tryDesc.inTry) {
2203 ins = MWasmCallCatchable::New(alloc(), desc, callee, args,
2204 StackArgAreaSizeUnaligned(argTypes),
2205 tryDesc, indexOrRef);
2206 } else {
2207 ins = MWasmCallUncatchable::New(alloc(), desc, callee, args,
2208 StackArgAreaSizeUnaligned(argTypes),
2209 indexOrRef);
2211 if (!ins) {
2212 return false;
2214 curBlock_->add(ins);
2216 return finishTryCall(&tryDesc);
2219 [[nodiscard]] bool callDirect(const FuncType& funcType, uint32_t funcIndex,
2220 uint32_t lineOrBytecode,
2221 const CallCompileState& call,
2222 DefVector* results) {
2223 MOZ_ASSERT(!inDeadCode());
2225 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Func);
2226 ResultType resultType = ResultType::Vector(funcType.results());
2227 auto callee = CalleeDesc::function(funcIndex);
2228 ArgTypeVector args(funcType);
2230 if (!catchableCall(desc, callee, call.regArgs_, args)) {
2231 return false;
2233 return collectCallResults(resultType, call.stackResultArea_, results);
2236 [[nodiscard]] bool returnCallDirect(const FuncType& funcType,
2237 uint32_t funcIndex,
2238 uint32_t lineOrBytecode,
2239 const CallCompileState& call,
2240 DefVector* results) {
2241 MOZ_ASSERT(!inDeadCode());
2243 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::ReturnFunc);
2244 auto callee = CalleeDesc::function(funcIndex);
2245 ArgTypeVector args(funcType);
2247 auto ins = MWasmReturnCall::New(alloc(), desc, callee, call.regArgs_,
2248 StackArgAreaSizeUnaligned(args), nullptr);
2249 if (!ins) {
2250 return false;
2252 curBlock_->end(ins);
2253 curBlock_ = nullptr;
2254 return true;
2257 [[nodiscard]] bool returnCallImport(unsigned globalDataOffset,
2258 uint32_t lineOrBytecode,
2259 const CallCompileState& call,
2260 const FuncType& funcType,
2261 DefVector* results) {
2262 MOZ_ASSERT(!inDeadCode());
2264 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Import);
2265 auto callee = CalleeDesc::import(globalDataOffset);
2266 ArgTypeVector args(funcType);
2268 auto* ins = MWasmReturnCall::New(alloc(), desc, callee, call.regArgs_,
2269 StackArgAreaSizeUnaligned(args), nullptr);
2270 if (!ins) {
2271 return false;
2273 curBlock_->end(ins);
2274 curBlock_ = nullptr;
2275 return true;
2278 [[nodiscard]] bool returnCallIndirect(uint32_t funcTypeIndex,
2279 uint32_t tableIndex, MDefinition* index,
2280 uint32_t lineOrBytecode,
2281 const CallCompileState& call,
2282 DefVector* results) {
2283 MOZ_ASSERT(!inDeadCode());
2285 const FuncType& funcType = (*moduleEnv_.types)[funcTypeIndex].funcType();
2286 CallIndirectId callIndirectId =
2287 CallIndirectId::forFuncType(moduleEnv_, funcTypeIndex);
2289 CalleeDesc callee;
2290 MOZ_ASSERT(callIndirectId.kind() != CallIndirectIdKind::AsmJS);
2291 const TableDesc& table = moduleEnv_.tables[tableIndex];
2292 callee =
2293 CalleeDesc::wasmTable(moduleEnv_, table, tableIndex, callIndirectId);
2295 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Indirect);
2296 ArgTypeVector args(funcType);
2298 auto* ins = MWasmReturnCall::New(alloc(), desc, callee, call.regArgs_,
2299 StackArgAreaSizeUnaligned(args), index);
2300 if (!ins) {
2301 return false;
2303 curBlock_->end(ins);
2304 curBlock_ = nullptr;
2305 return true;
2308 [[nodiscard]] bool callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
2309 MDefinition* index, uint32_t lineOrBytecode,
2310 const CallCompileState& call,
2311 DefVector* results) {
2312 MOZ_ASSERT(!inDeadCode());
2314 const FuncType& funcType = (*moduleEnv_.types)[funcTypeIndex].funcType();
2315 CallIndirectId callIndirectId =
2316 CallIndirectId::forFuncType(moduleEnv_, funcTypeIndex);
2318 CalleeDesc callee;
2319 if (moduleEnv_.isAsmJS()) {
2320 MOZ_ASSERT(tableIndex == 0);
2321 MOZ_ASSERT(callIndirectId.kind() == CallIndirectIdKind::AsmJS);
2322 uint32_t tableIndex = moduleEnv_.asmJSSigToTableIndex[funcTypeIndex];
2323 const TableDesc& table = moduleEnv_.tables[tableIndex];
2324 MOZ_ASSERT(IsPowerOfTwo(table.initialLength));
2326 MDefinition* mask = constantI32(int32_t(table.initialLength - 1));
2327 MBitAnd* maskedIndex = MBitAnd::New(alloc(), index, mask, MIRType::Int32);
2328 curBlock_->add(maskedIndex);
2330 index = maskedIndex;
2331 callee = CalleeDesc::asmJSTable(moduleEnv_, tableIndex);
2332 } else {
2333 MOZ_ASSERT(callIndirectId.kind() != CallIndirectIdKind::AsmJS);
2334 const TableDesc& table = moduleEnv_.tables[tableIndex];
2335 callee =
2336 CalleeDesc::wasmTable(moduleEnv_, table, tableIndex, callIndirectId);
2339 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Indirect);
2340 ArgTypeVector args(funcType);
2341 ResultType resultType = ResultType::Vector(funcType.results());
2343 if (!catchableCall(desc, callee, call.regArgs_, args, index)) {
2344 return false;
2346 return collectCallResults(resultType, call.stackResultArea_, results);
2349 [[nodiscard]] bool callImport(unsigned instanceDataOffset,
2350 uint32_t lineOrBytecode,
2351 const CallCompileState& call,
2352 const FuncType& funcType, DefVector* results) {
2353 MOZ_ASSERT(!inDeadCode());
2355 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Import);
2356 auto callee = CalleeDesc::import(instanceDataOffset);
2357 ArgTypeVector args(funcType);
2358 ResultType resultType = ResultType::Vector(funcType.results());
2360 if (!catchableCall(desc, callee, call.regArgs_, args)) {
2361 return false;
2363 return collectCallResults(resultType, call.stackResultArea_, results);
2366 [[nodiscard]] bool builtinCall(const SymbolicAddressSignature& builtin,
2367 uint32_t lineOrBytecode,
2368 const CallCompileState& call,
2369 MDefinition** def) {
2370 if (inDeadCode()) {
2371 *def = nullptr;
2372 return true;
2375 MOZ_ASSERT(builtin.failureMode == FailureMode::Infallible);
2377 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Symbolic);
2378 auto callee = CalleeDesc::builtin(builtin.identity);
2379 auto* ins = MWasmCallUncatchable::New(alloc(), desc, callee, call.regArgs_,
2380 StackArgAreaSizeUnaligned(builtin));
2381 if (!ins) {
2382 return false;
2385 curBlock_->add(ins);
2387 return collectUnaryCallResult(builtin.retType, def);
2390 [[nodiscard]] bool builtinInstanceMethodCall(
2391 const SymbolicAddressSignature& builtin, uint32_t lineOrBytecode,
2392 const CallCompileState& call, MDefinition** def = nullptr) {
2393 MOZ_ASSERT_IF(!def, builtin.retType == MIRType::None);
2394 if (inDeadCode()) {
2395 if (def) {
2396 *def = nullptr;
2398 return true;
2401 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Symbolic);
2402 MInstruction* ins;
2403 ins = MWasmCallUncatchable::NewBuiltinInstanceMethodCall(
2404 alloc(), desc, builtin.identity, builtin.failureMode, call.instanceArg_,
2405 call.regArgs_, StackArgAreaSizeUnaligned(builtin));
2406 if (!ins) {
2407 return false;
2409 curBlock_->add(ins);
2411 if (!def) {
2412 return true;
2414 return collectUnaryCallResult(builtin.retType, def);
2417 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
2418 [[nodiscard]] bool callRef(const FuncType& funcType, MDefinition* ref,
2419 uint32_t lineOrBytecode,
2420 const CallCompileState& call, DefVector* results) {
2421 MOZ_ASSERT(!inDeadCode());
2423 CalleeDesc callee = CalleeDesc::wasmFuncRef();
2425 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::FuncRef);
2426 ArgTypeVector args(funcType);
2427 ResultType resultType = ResultType::Vector(funcType.results());
2429 if (!catchableCall(desc, callee, call.regArgs_, args, ref)) {
2430 return false;
2432 return collectCallResults(resultType, call.stackResultArea_, results);
2435 # ifdef ENABLE_WASM_TAIL_CALLS
2436 [[nodiscard]] bool returnCallRef(const FuncType& funcType, MDefinition* ref,
2437 uint32_t lineOrBytecode,
2438 const CallCompileState& call,
2439 DefVector* results) {
2440 MOZ_ASSERT(!inDeadCode());
2442 CalleeDesc callee = CalleeDesc::wasmFuncRef();
2444 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::FuncRef);
2445 ArgTypeVector args(funcType);
2447 auto* ins = MWasmReturnCall::New(alloc(), desc, callee, call.regArgs_,
2448 StackArgAreaSizeUnaligned(args), ref);
2449 if (!ins) {
2450 return false;
2452 curBlock_->end(ins);
2453 curBlock_ = nullptr;
2454 return true;
2457 # endif // ENABLE_WASM_TAIL_CALLS
2459 #endif // ENABLE_WASM_FUNCTION_REFERENCES
2461 /*********************************************** Control flow generation */
2463 inline bool inDeadCode() const { return curBlock_ == nullptr; }
2465 [[nodiscard]] bool returnValues(const DefVector& values) {
2466 if (inDeadCode()) {
2467 return true;
2470 if (values.empty()) {
2471 curBlock_->end(MWasmReturnVoid::New(alloc(), instancePointer_));
2472 } else {
2473 ResultType resultType = ResultType::Vector(funcType().results());
2474 ABIResultIter iter(resultType);
2475 // Switch to iterate in FIFO order instead of the default LIFO.
2476 while (!iter.done()) {
2477 iter.next();
2479 iter.switchToPrev();
2480 for (uint32_t i = 0; !iter.done(); iter.prev(), i++) {
2481 if (!mirGen().ensureBallast()) {
2482 return false;
2484 const ABIResult& result = iter.cur();
2485 if (result.onStack()) {
2486 MOZ_ASSERT(iter.remaining() > 1);
2487 if (result.type().isRefRepr()) {
2488 auto* store = MWasmStoreRef::New(
2489 alloc(), instancePointer_, stackResultPointer_,
2490 result.stackOffset(), values[i], AliasSet::WasmStackResult,
2491 WasmPreBarrierKind::None);
2492 curBlock_->add(store);
2493 } else {
2494 auto* store = MWasmStoreStackResult::New(
2495 alloc(), stackResultPointer_, result.stackOffset(), values[i]);
2496 curBlock_->add(store);
2498 } else {
2499 MOZ_ASSERT(iter.remaining() == 1);
2500 MOZ_ASSERT(i + 1 == values.length());
2501 curBlock_->end(
2502 MWasmReturn::New(alloc(), values[i], instancePointer_));
2506 curBlock_ = nullptr;
2507 return true;
2510 void unreachableTrap() {
2511 if (inDeadCode()) {
2512 return;
2515 auto* ins =
2516 MWasmTrap::New(alloc(), wasm::Trap::Unreachable, bytecodeOffset());
2517 curBlock_->end(ins);
2518 curBlock_ = nullptr;
2521 private:
2522 static uint32_t numPushed(MBasicBlock* block) {
2523 return block->stackDepth() - block->info().firstStackSlot();
2526 public:
2527 [[nodiscard]] bool pushDefs(const DefVector& defs) {
2528 if (inDeadCode()) {
2529 return true;
2531 MOZ_ASSERT(numPushed(curBlock_) == 0);
2532 if (!curBlock_->ensureHasSlots(defs.length())) {
2533 return false;
2535 for (MDefinition* def : defs) {
2536 MOZ_ASSERT(def->type() != MIRType::None);
2537 curBlock_->push(def);
2539 return true;
2542 [[nodiscard]] bool popPushedDefs(DefVector* defs) {
2543 size_t n = numPushed(curBlock_);
2544 if (!defs->resizeUninitialized(n)) {
2545 return false;
2547 for (; n > 0; n--) {
2548 MDefinition* def = curBlock_->pop();
2549 MOZ_ASSERT(def->type() != MIRType::Value);
2550 (*defs)[n - 1] = def;
2552 return true;
2555 private:
2556 [[nodiscard]] bool addJoinPredecessor(const DefVector& defs,
2557 MBasicBlock** joinPred) {
2558 *joinPred = curBlock_;
2559 if (inDeadCode()) {
2560 return true;
2562 return pushDefs(defs);
2565 public:
2566 [[nodiscard]] bool branchAndStartThen(MDefinition* cond,
2567 MBasicBlock** elseBlock) {
2568 if (inDeadCode()) {
2569 *elseBlock = nullptr;
2570 } else {
2571 MBasicBlock* thenBlock;
2572 if (!newBlock(curBlock_, &thenBlock)) {
2573 return false;
2575 if (!newBlock(curBlock_, elseBlock)) {
2576 return false;
2579 curBlock_->end(MTest::New(alloc(), cond, thenBlock, *elseBlock));
2581 curBlock_ = thenBlock;
2582 mirGraph().moveBlockToEnd(curBlock_);
2585 return startBlock();
2588 [[nodiscard]] bool switchToElse(MBasicBlock* elseBlock,
2589 MBasicBlock** thenJoinPred) {
2590 DefVector values;
2591 if (!finishBlock(&values)) {
2592 return false;
2595 if (!elseBlock) {
2596 *thenJoinPred = nullptr;
2597 } else {
2598 if (!addJoinPredecessor(values, thenJoinPred)) {
2599 return false;
2602 curBlock_ = elseBlock;
2603 mirGraph().moveBlockToEnd(curBlock_);
2606 return startBlock();
2609 [[nodiscard]] bool joinIfElse(MBasicBlock* thenJoinPred, DefVector* defs) {
2610 DefVector values;
2611 if (!finishBlock(&values)) {
2612 return false;
2615 if (!thenJoinPred && inDeadCode()) {
2616 return true;
2619 MBasicBlock* elseJoinPred;
2620 if (!addJoinPredecessor(values, &elseJoinPred)) {
2621 return false;
2624 mozilla::Array<MBasicBlock*, 2> blocks;
2625 size_t numJoinPreds = 0;
2626 if (thenJoinPred) {
2627 blocks[numJoinPreds++] = thenJoinPred;
2629 if (elseJoinPred) {
2630 blocks[numJoinPreds++] = elseJoinPred;
2633 if (numJoinPreds == 0) {
2634 return true;
2637 MBasicBlock* join;
2638 if (!goToNewBlock(blocks[0], &join)) {
2639 return false;
2641 for (size_t i = 1; i < numJoinPreds; ++i) {
2642 if (!goToExistingBlock(blocks[i], join)) {
2643 return false;
2647 curBlock_ = join;
2648 return popPushedDefs(defs);
2651 [[nodiscard]] bool startBlock() {
2652 MOZ_ASSERT_IF(blockDepth_ < blockPatches_.length(),
2653 blockPatches_[blockDepth_].empty());
2654 blockDepth_++;
2655 return true;
2658 [[nodiscard]] bool finishBlock(DefVector* defs) {
2659 MOZ_ASSERT(blockDepth_);
2660 uint32_t topLabel = --blockDepth_;
2661 return bindBranches(topLabel, defs);
2664 [[nodiscard]] bool startLoop(MBasicBlock** loopHeader, size_t paramCount) {
2665 *loopHeader = nullptr;
2667 blockDepth_++;
2668 loopDepth_++;
2670 if (inDeadCode()) {
2671 return true;
2674 // Create the loop header.
2675 MOZ_ASSERT(curBlock_->loopDepth() == loopDepth_ - 1);
2676 *loopHeader = MBasicBlock::New(mirGraph(), info(), curBlock_,
2677 MBasicBlock::PENDING_LOOP_HEADER);
2678 if (!*loopHeader) {
2679 return false;
2682 (*loopHeader)->setLoopDepth(loopDepth_);
2683 mirGraph().addBlock(*loopHeader);
2684 curBlock_->end(MGoto::New(alloc(), *loopHeader));
2686 DefVector loopParams;
2687 if (!iter().getResults(paramCount, &loopParams)) {
2688 return false;
2690 for (size_t i = 0; i < paramCount; i++) {
2691 MPhi* phi = MPhi::New(alloc(), loopParams[i]->type());
2692 if (!phi) {
2693 return false;
2695 if (!phi->reserveLength(2)) {
2696 return false;
2698 (*loopHeader)->addPhi(phi);
2699 phi->addInput(loopParams[i]);
2700 loopParams[i] = phi;
2702 iter().setResults(paramCount, loopParams);
2704 MBasicBlock* body;
2705 if (!goToNewBlock(*loopHeader, &body)) {
2706 return false;
2708 curBlock_ = body;
2709 return true;
2712 private:
2713 void fixupRedundantPhis(MBasicBlock* b) {
2714 for (size_t i = 0, depth = b->stackDepth(); i < depth; i++) {
2715 MDefinition* def = b->getSlot(i);
2716 if (def->isUnused()) {
2717 b->setSlot(i, def->toPhi()->getOperand(0));
2722 [[nodiscard]] bool setLoopBackedge(MBasicBlock* loopEntry,
2723 MBasicBlock* loopBody,
2724 MBasicBlock* backedge, size_t paramCount) {
2725 if (!loopEntry->setBackedgeWasm(backedge, paramCount)) {
2726 return false;
2729 // Flag all redundant phis as unused.
2730 for (MPhiIterator phi = loopEntry->phisBegin(); phi != loopEntry->phisEnd();
2731 phi++) {
2732 MOZ_ASSERT(phi->numOperands() == 2);
2733 if (phi->getOperand(0) == phi->getOperand(1)) {
2734 phi->setUnused();
2738 // Fix up phis stored in the slots Vector of pending blocks.
2739 for (ControlFlowPatchVector& patches : blockPatches_) {
2740 for (ControlFlowPatch& p : patches) {
2741 MBasicBlock* block = p.ins->block();
2742 if (block->loopDepth() >= loopEntry->loopDepth()) {
2743 fixupRedundantPhis(block);
2748 // The loop body, if any, might be referencing recycled phis too.
2749 if (loopBody) {
2750 fixupRedundantPhis(loopBody);
2753 // Pending jumps to an enclosing try-catch may reference the recycled phis.
2754 // We have to search above all enclosing try blocks, as a delegate may move
2755 // patches around.
2756 for (uint32_t depth = 0; depth < iter().controlStackDepth(); depth++) {
2757 LabelKind kind = iter().controlKind(depth);
2758 if (kind != LabelKind::Try && kind != LabelKind::Body) {
2759 continue;
2761 Control& control = iter().controlItem(depth);
2762 if (!control.tryControl) {
2763 continue;
2765 for (MControlInstruction* patch : control.tryControl->landingPadPatches) {
2766 MBasicBlock* block = patch->block();
2767 if (block->loopDepth() >= loopEntry->loopDepth()) {
2768 fixupRedundantPhis(block);
2772 for (MControlInstruction* patch : bodyDelegatePadPatches_) {
2773 MBasicBlock* block = patch->block();
2774 if (block->loopDepth() >= loopEntry->loopDepth()) {
2775 fixupRedundantPhis(block);
2779 // Discard redundant phis and add to the free list.
2780 for (MPhiIterator phi = loopEntry->phisBegin();
2781 phi != loopEntry->phisEnd();) {
2782 MPhi* entryDef = *phi++;
2783 if (!entryDef->isUnused()) {
2784 continue;
2787 entryDef->justReplaceAllUsesWith(entryDef->getOperand(0));
2788 loopEntry->discardPhi(entryDef);
2789 mirGraph().addPhiToFreeList(entryDef);
2792 return true;
2795 public:
2796 [[nodiscard]] bool closeLoop(MBasicBlock* loopHeader,
2797 DefVector* loopResults) {
2798 MOZ_ASSERT(blockDepth_ >= 1);
2799 MOZ_ASSERT(loopDepth_);
2801 uint32_t headerLabel = blockDepth_ - 1;
2803 if (!loopHeader) {
2804 MOZ_ASSERT(inDeadCode());
2805 MOZ_ASSERT(headerLabel >= blockPatches_.length() ||
2806 blockPatches_[headerLabel].empty());
2807 blockDepth_--;
2808 loopDepth_--;
2809 return true;
2812 // Op::Loop doesn't have an implicit backedge so temporarily set
2813 // aside the end of the loop body to bind backedges.
2814 MBasicBlock* loopBody = curBlock_;
2815 curBlock_ = nullptr;
2817 // As explained in bug 1253544, Ion apparently has an invariant that
2818 // there is only one backedge to loop headers. To handle wasm's ability
2819 // to have multiple backedges to the same loop header, we bind all those
2820 // branches as forward jumps to a single backward jump. This is
2821 // unfortunate but the optimizer is able to fold these into single jumps
2822 // to backedges.
2823 DefVector backedgeValues;
2824 if (!bindBranches(headerLabel, &backedgeValues)) {
2825 return false;
2828 MOZ_ASSERT(loopHeader->loopDepth() == loopDepth_);
2830 if (curBlock_) {
2831 // We're on the loop backedge block, created by bindBranches.
2832 for (size_t i = 0, n = numPushed(curBlock_); i != n; i++) {
2833 curBlock_->pop();
2836 if (!pushDefs(backedgeValues)) {
2837 return false;
2840 MOZ_ASSERT(curBlock_->loopDepth() == loopDepth_);
2841 curBlock_->end(MGoto::New(alloc(), loopHeader));
2842 if (!setLoopBackedge(loopHeader, loopBody, curBlock_,
2843 backedgeValues.length())) {
2844 return false;
2848 curBlock_ = loopBody;
2850 loopDepth_--;
2852 // If the loop depth still at the inner loop body, correct it.
2853 if (curBlock_ && curBlock_->loopDepth() != loopDepth_) {
2854 MBasicBlock* out;
2855 if (!goToNewBlock(curBlock_, &out)) {
2856 return false;
2858 curBlock_ = out;
2861 blockDepth_ -= 1;
2862 return inDeadCode() || popPushedDefs(loopResults);
2865 [[nodiscard]] bool addControlFlowPatch(MControlInstruction* ins,
2866 uint32_t relative, uint32_t index) {
2867 MOZ_ASSERT(relative < blockDepth_);
2868 uint32_t absolute = blockDepth_ - 1 - relative;
2870 if (absolute >= blockPatches_.length() &&
2871 !blockPatches_.resize(absolute + 1)) {
2872 return false;
2875 return blockPatches_[absolute].append(ControlFlowPatch(ins, index));
2878 [[nodiscard]] bool br(uint32_t relativeDepth, const DefVector& values) {
2879 if (inDeadCode()) {
2880 return true;
2883 MGoto* jump = MGoto::New(alloc());
2884 if (!addControlFlowPatch(jump, relativeDepth, MGoto::TargetIndex)) {
2885 return false;
2888 if (!pushDefs(values)) {
2889 return false;
2892 curBlock_->end(jump);
2893 curBlock_ = nullptr;
2894 return true;
2897 [[nodiscard]] bool brIf(uint32_t relativeDepth, const DefVector& values,
2898 MDefinition* condition) {
2899 if (inDeadCode()) {
2900 return true;
2903 MBasicBlock* joinBlock = nullptr;
2904 if (!newBlock(curBlock_, &joinBlock)) {
2905 return false;
2908 MTest* test = MTest::New(alloc(), condition, nullptr, joinBlock);
2909 if (!addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex)) {
2910 return false;
2913 if (!pushDefs(values)) {
2914 return false;
2917 curBlock_->end(test);
2918 curBlock_ = joinBlock;
2919 return true;
2922 [[nodiscard]] bool brTable(MDefinition* operand, uint32_t defaultDepth,
2923 const Uint32Vector& depths,
2924 const DefVector& values) {
2925 if (inDeadCode()) {
2926 return true;
2929 size_t numCases = depths.length();
2930 MOZ_ASSERT(numCases <= INT32_MAX);
2931 MOZ_ASSERT(numCases);
2933 MTableSwitch* table =
2934 MTableSwitch::New(alloc(), operand, 0, int32_t(numCases - 1));
2936 size_t defaultIndex;
2937 if (!table->addDefault(nullptr, &defaultIndex)) {
2938 return false;
2940 if (!addControlFlowPatch(table, defaultDepth, defaultIndex)) {
2941 return false;
2944 using IndexToCaseMap =
2945 HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>;
2947 IndexToCaseMap indexToCase;
2948 if (!indexToCase.put(defaultDepth, defaultIndex)) {
2949 return false;
2952 for (size_t i = 0; i < numCases; i++) {
2953 if (!mirGen_.ensureBallast()) {
2954 return false;
2957 uint32_t depth = depths[i];
2959 size_t caseIndex;
2960 IndexToCaseMap::AddPtr p = indexToCase.lookupForAdd(depth);
2961 if (!p) {
2962 if (!table->addSuccessor(nullptr, &caseIndex)) {
2963 return false;
2965 if (!addControlFlowPatch(table, depth, caseIndex)) {
2966 return false;
2968 if (!indexToCase.add(p, depth, caseIndex)) {
2969 return false;
2971 } else {
2972 caseIndex = p->value();
2975 if (!table->addCase(caseIndex)) {
2976 return false;
2980 if (!pushDefs(values)) {
2981 return false;
2984 curBlock_->end(table);
2985 curBlock_ = nullptr;
2987 return true;
2990 /********************************************************** Exceptions ***/
2992 bool inTryBlockFrom(uint32_t fromRelativeDepth, uint32_t* relativeDepth) {
2993 return iter().controlFindInnermostFrom(
2994 [](LabelKind kind, const Control& control) {
2995 return control.tryControl != nullptr && control.tryControl->inBody;
2997 fromRelativeDepth, relativeDepth);
3000 bool inTryBlock(uint32_t* relativeDepth) {
3001 return inTryBlockFrom(0, relativeDepth);
3004 bool inTryCode() {
3005 uint32_t relativeDepth;
3006 return inTryBlock(&relativeDepth);
3009 MDefinition* loadTag(uint32_t tagIndex) {
3010 MWasmLoadInstanceDataField* tag = MWasmLoadInstanceDataField::New(
3011 alloc(), MIRType::WasmAnyRef,
3012 moduleEnv_.offsetOfTagInstanceData(tagIndex), true, instancePointer_);
3013 curBlock_->add(tag);
3014 return tag;
3017 void loadPendingExceptionState(MInstruction** exception, MInstruction** tag) {
3018 *exception = MWasmLoadInstance::New(
3019 alloc(), instancePointer_, wasm::Instance::offsetOfPendingException(),
3020 MIRType::WasmAnyRef, AliasSet::Load(AliasSet::WasmPendingException));
3021 curBlock_->add(*exception);
3023 *tag = MWasmLoadInstance::New(
3024 alloc(), instancePointer_,
3025 wasm::Instance::offsetOfPendingExceptionTag(), MIRType::WasmAnyRef,
3026 AliasSet::Load(AliasSet::WasmPendingException));
3027 curBlock_->add(*tag);
3030 [[nodiscard]] bool setPendingExceptionState(MDefinition* exception,
3031 MDefinition* tag) {
3032 // Set the pending exception object
3033 auto* exceptionAddr = MWasmDerivedPointer::New(
3034 alloc(), instancePointer_, Instance::offsetOfPendingException());
3035 curBlock_->add(exceptionAddr);
3036 auto* setException = MWasmStoreRef::New(
3037 alloc(), instancePointer_, exceptionAddr, /*valueOffset=*/0, exception,
3038 AliasSet::WasmPendingException, WasmPreBarrierKind::Normal);
3039 curBlock_->add(setException);
3040 if (!postBarrierPrecise(/*lineOrBytecode=*/0, exceptionAddr, exception)) {
3041 return false;
3044 // Set the pending exception tag object
3045 auto* exceptionTagAddr = MWasmDerivedPointer::New(
3046 alloc(), instancePointer_, Instance::offsetOfPendingExceptionTag());
3047 curBlock_->add(exceptionTagAddr);
3048 auto* setExceptionTag = MWasmStoreRef::New(
3049 alloc(), instancePointer_, exceptionTagAddr, /*valueOffset=*/0, tag,
3050 AliasSet::WasmPendingException, WasmPreBarrierKind::Normal);
3051 curBlock_->add(setExceptionTag);
3052 return postBarrierPrecise(/*lineOrBytecode=*/0, exceptionTagAddr, tag);
3055 [[nodiscard]] bool addPadPatch(MControlInstruction* ins,
3056 size_t relativeTryDepth) {
3057 Control& control = iter().controlItem(relativeTryDepth);
3058 return control.tryControl->landingPadPatches.emplaceBack(ins);
3061 [[nodiscard]] bool endWithPadPatch(uint32_t relativeTryDepth) {
3062 MGoto* jumpToLandingPad = MGoto::New(alloc());
3063 curBlock_->end(jumpToLandingPad);
3064 return addPadPatch(jumpToLandingPad, relativeTryDepth);
3067 [[nodiscard]] bool delegatePadPatches(const ControlInstructionVector& patches,
3068 uint32_t relativeDepth) {
3069 if (patches.empty()) {
3070 return true;
3073 // Find where we are delegating the pad patches to.
3074 ControlInstructionVector* targetPatches;
3075 uint32_t targetRelativeDepth;
3076 if (inTryBlockFrom(relativeDepth, &targetRelativeDepth)) {
3077 targetPatches = &iter()
3078 .controlItem(targetRelativeDepth)
3079 .tryControl->landingPadPatches;
3080 } else {
3081 MOZ_ASSERT(relativeDepth <= blockDepth_ - 1);
3082 targetPatches = &bodyDelegatePadPatches_;
3085 // Append the delegate's pad patches to the target's.
3086 for (MControlInstruction* ins : patches) {
3087 if (!targetPatches->emplaceBack(ins)) {
3088 return false;
3091 return true;
3094 [[nodiscard]] bool beginTryCall(MWasmCallTryDesc* call) {
3095 call->inTry = inTryBlock(&call->relativeTryDepth);
3096 if (!call->inTry) {
3097 return true;
3099 // Allocate a try note
3100 if (!tryNotes_.append(wasm::TryNote())) {
3101 return false;
3103 call->tryNoteIndex = tryNotes_.length() - 1;
3104 // Allocate blocks for fallthrough and exceptions
3105 return newBlock(curBlock_, &call->fallthroughBlock) &&
3106 newBlock(curBlock_, &call->prePadBlock);
3109 [[nodiscard]] bool finishTryCall(MWasmCallTryDesc* call) {
3110 if (!call->inTry) {
3111 return true;
3114 // Switch to the prePadBlock
3115 MBasicBlock* callBlock = curBlock_;
3116 curBlock_ = call->prePadBlock;
3118 // Mark this as the landing pad for the call
3119 curBlock_->add(
3120 MWasmCallLandingPrePad::New(alloc(), callBlock, call->tryNoteIndex));
3122 // End with a pending jump to the landing pad
3123 if (!endWithPadPatch(call->relativeTryDepth)) {
3124 return false;
3127 // Compilation continues in the fallthroughBlock.
3128 curBlock_ = call->fallthroughBlock;
3129 return true;
3132 // Create a landing pad for a try block if there are any throwing
3133 // instructions. This is also used for the implicit rethrow landing pad used
3134 // for delegate instructions that target the outermost label.
3135 [[nodiscard]] bool createTryLandingPadIfNeeded(
3136 ControlInstructionVector& landingPadPatches, MBasicBlock** landingPad) {
3137 // If there are no pad-patches for this try control, it means there are no
3138 // instructions in the try code that could throw an exception. In this
3139 // case, all the catches are dead code, and the try code ends up equivalent
3140 // to a plain wasm block.
3141 if (landingPadPatches.empty()) {
3142 *landingPad = nullptr;
3143 return true;
3146 // Otherwise, if there are (pad-) branches from places in the try code that
3147 // may throw an exception, bind these branches to a new landing pad
3148 // block. This is done similarly to what is done in bindBranches.
3149 MControlInstruction* ins = landingPadPatches[0];
3150 MBasicBlock* pred = ins->block();
3151 if (!newBlock(pred, landingPad)) {
3152 return false;
3154 ins->replaceSuccessor(0, *landingPad);
3155 for (size_t i = 1; i < landingPadPatches.length(); i++) {
3156 ins = landingPadPatches[i];
3157 pred = ins->block();
3158 if (!(*landingPad)->addPredecessor(alloc(), pred)) {
3159 return false;
3161 ins->replaceSuccessor(0, *landingPad);
3164 // Set up the slots in the landing pad block.
3165 if (!setupLandingPadSlots(landingPad)) {
3166 return false;
3169 // Clear the now bound pad patches.
3170 landingPadPatches.clear();
3171 return true;
3174 [[nodiscard]] bool createTryTableLandingPad(TryControl* tryControl) {
3175 MBasicBlock* landingPad;
3176 if (!createTryLandingPadIfNeeded(tryControl->landingPadPatches,
3177 &landingPad)) {
3178 return false;
3181 // If there is no landing pad created, no exceptions were possibly thrown
3182 // and we don't need to do anything here.
3183 if (!landingPad) {
3184 return true;
3187 MBasicBlock* originalBlock = curBlock_;
3188 curBlock_ = landingPad;
3190 bool hadCatchAll = false;
3191 for (const TryTableCatch& tryTableCatch : tryControl->catches) {
3192 MOZ_ASSERT(numPushed(curBlock_) == 2);
3194 // Handle a catch_all by jumping to the target block
3195 if (tryTableCatch.tagIndex == CatchAllIndex) {
3196 // Get the exception from the slots we pushed when adding
3197 // control flow patches.
3198 curBlock_->pop();
3199 MDefinition* exception = curBlock_->pop();
3201 // Capture the exnref value if we need to
3202 DefVector values;
3203 if (tryTableCatch.captureExnRef && !values.append(exception)) {
3204 return false;
3207 // Branch to the catch_all code
3208 if (!br(tryTableCatch.labelRelativeDepth, values)) {
3209 return false;
3212 // Break from the loop and skip the implicit rethrow that's needed
3213 // if we didn't have a catch_all
3214 hadCatchAll = true;
3215 break;
3218 // Handle a tagged catch by doing a compare and branch on the tag index,
3219 // jumping to a catch block if they match, or else to a fallthrough block
3220 // to continue the landing pad.
3221 MBasicBlock* catchBlock = nullptr;
3222 MBasicBlock* fallthroughBlock = nullptr;
3223 if (!newBlock(curBlock_, &catchBlock) ||
3224 !newBlock(curBlock_, &fallthroughBlock)) {
3225 return false;
3228 // Get the exception and its tag from the slots we pushed when adding
3229 // control flow patches.
3230 MDefinition* exceptionTag = curBlock_->pop();
3231 curBlock_->pop();
3233 // Branch to the catch block if the exception's tag matches this catch
3234 // block's tag.
3235 MDefinition* catchTag = loadTag(tryTableCatch.tagIndex);
3236 MDefinition* matchesCatchTag = compare(exceptionTag, catchTag, JSOp::Eq,
3237 MCompare::Compare_WasmAnyRef);
3238 curBlock_->end(
3239 MTest::New(alloc(), matchesCatchTag, catchBlock, fallthroughBlock));
3241 // Set up the catch block by extracting the values from the exception
3242 // object.
3243 curBlock_ = catchBlock;
3245 // Remove the tag and exception slots from the block, they are no
3246 // longer necessary.
3247 curBlock_->pop();
3248 MDefinition* exception = curBlock_->pop();
3249 MOZ_ASSERT(numPushed(curBlock_) == 0);
3251 // Extract the exception values for the catch block
3252 DefVector values;
3253 if (!loadExceptionValues(exception, tryTableCatch.tagIndex, &values)) {
3254 return false;
3256 if (tryTableCatch.captureExnRef && !values.append(exception)) {
3257 return false;
3260 if (!br(tryTableCatch.labelRelativeDepth, values)) {
3261 return false;
3264 curBlock_ = fallthroughBlock;
3267 // If there was no catch_all, we must rethrow this exception.
3268 if (!hadCatchAll) {
3269 MOZ_ASSERT(numPushed(curBlock_) == 2);
3270 MDefinition* tag = curBlock_->pop();
3271 MDefinition* exception = curBlock_->pop();
3272 MOZ_ASSERT(numPushed(curBlock_) == 0);
3274 if (!throwFrom(exception, tag)) {
3275 return false;
3279 curBlock_ = originalBlock;
3280 return true;
3283 // Consume the pending exception state from instance, and set up the slots
3284 // of the landing pad with the exception state.
3285 [[nodiscard]] bool setupLandingPadSlots(MBasicBlock** landingPad) {
3286 MBasicBlock* prevBlock = curBlock_;
3287 curBlock_ = *landingPad;
3289 // Load the pending exception and tag
3290 MInstruction* exception;
3291 MInstruction* tag;
3292 loadPendingExceptionState(&exception, &tag);
3294 // Clear the pending exception and tag
3295 auto* null = constantNullRef();
3296 if (!setPendingExceptionState(null, null)) {
3297 return false;
3300 // Push the exception and its tag on the stack to make them available
3301 // to the landing pad blocks.
3302 if (!curBlock_->ensureHasSlots(2)) {
3303 return false;
3305 curBlock_->push(exception);
3306 curBlock_->push(tag);
3307 *landingPad = curBlock_;
3309 curBlock_ = prevBlock;
3310 return true;
3313 [[nodiscard]] bool startTry() {
3314 Control& control = iter().controlItem();
3315 control.block = curBlock_;
3316 control.tryControl = newTryControl();
3317 if (!control.tryControl) {
3318 return false;
3320 control.tryControl->inBody = true;
3321 return startBlock();
3324 [[nodiscard]] bool startTryTable(TryTableCatchVector&& catches) {
3325 Control& control = iter().controlItem();
3326 control.block = curBlock_;
3327 control.tryControl = newTryControl();
3328 if (!control.tryControl) {
3329 return false;
3331 control.tryControl->inBody = true;
3332 control.tryControl->catches = std::move(catches);
3333 return startBlock();
3336 [[nodiscard]] bool joinTryOrCatchBlock(Control& control) {
3337 // If the try or catch block ended with dead code, there is no need to
3338 // do any control flow join.
3339 if (inDeadCode()) {
3340 return true;
3343 // This is a split path which we'll need to join later, using a control
3344 // flow patch.
3345 MOZ_ASSERT(!curBlock_->hasLastIns());
3346 MGoto* jump = MGoto::New(alloc());
3347 if (!addControlFlowPatch(jump, 0, MGoto::TargetIndex)) {
3348 return false;
3351 // Finish the current block with the control flow patch instruction.
3352 curBlock_->end(jump);
3353 return true;
3356 // Finish the previous block (either a try or catch block) and then setup a
3357 // new catch block.
3358 [[nodiscard]] bool switchToCatch(Control& control, LabelKind fromKind,
3359 uint32_t tagIndex) {
3360 // Mark this control node as being no longer in the body of the try
3361 control.tryControl->inBody = false;
3363 // If there is no control block, then either:
3364 // - the entry of the try block is dead code, or
3365 // - there is no landing pad for the try-catch.
3366 // In either case, any catch will be dead code.
3367 if (!control.block) {
3368 MOZ_ASSERT(inDeadCode());
3369 return true;
3372 // Join the previous try or catch block with a patch to the future join of
3373 // the whole try-catch block.
3374 if (!joinTryOrCatchBlock(control)) {
3375 return false;
3378 // If we are switching from the try block, create the landing pad. This is
3379 // guaranteed to happen once and only once before processing catch blocks.
3380 if (fromKind == LabelKind::Try) {
3381 MBasicBlock* padBlock = nullptr;
3382 if (!createTryLandingPadIfNeeded(control.tryControl->landingPadPatches,
3383 &padBlock)) {
3384 return false;
3386 // Set the control block for this try-catch to the landing pad.
3387 control.block = padBlock;
3390 // If there is no landing pad, then this and following catches are dead
3391 // code.
3392 if (!control.block) {
3393 curBlock_ = nullptr;
3394 return true;
3397 // Switch to the landing pad.
3398 curBlock_ = control.block;
3400 // Handle a catch_all by immediately jumping to a new block. We require a
3401 // new block (as opposed to just emitting the catch_all code in the current
3402 // block) because rethrow requires the exception/tag to be present in the
3403 // landing pad's slots, while the catch_all block must not have the
3404 // exception/tag in slots.
3405 if (tagIndex == CatchAllIndex) {
3406 MBasicBlock* catchAllBlock = nullptr;
3407 if (!goToNewBlock(curBlock_, &catchAllBlock)) {
3408 return false;
3410 // Compilation will continue in the catch_all block.
3411 curBlock_ = catchAllBlock;
3412 // Remove the tag and exception slots from the block, they are no
3413 // longer necessary.
3414 curBlock_->pop();
3415 curBlock_->pop();
3416 return true;
3419 // Handle a tagged catch by doing a compare and branch on the tag index,
3420 // jumping to a catch block if they match, or else to a fallthrough block
3421 // to continue the landing pad.
3422 MBasicBlock* catchBlock = nullptr;
3423 MBasicBlock* fallthroughBlock = nullptr;
3424 if (!newBlock(curBlock_, &catchBlock) ||
3425 !newBlock(curBlock_, &fallthroughBlock)) {
3426 return false;
3429 // Get the exception and its tag from the slots we pushed when adding
3430 // control flow patches.
3431 MDefinition* exceptionTag = curBlock_->pop();
3432 MDefinition* exception = curBlock_->pop();
3434 // Branch to the catch block if the exception's tag matches this catch
3435 // block's tag.
3436 MDefinition* catchTag = loadTag(tagIndex);
3437 MDefinition* matchesCatchTag =
3438 compare(exceptionTag, catchTag, JSOp::Eq, MCompare::Compare_WasmAnyRef);
3439 curBlock_->end(
3440 MTest::New(alloc(), matchesCatchTag, catchBlock, fallthroughBlock));
3442 // The landing pad will continue in the fallthrough block
3443 control.block = fallthroughBlock;
3445 // Set up the catch block by extracting the values from the exception
3446 // object.
3447 curBlock_ = catchBlock;
3449 // Remove the tag and exception slots from the block, they are no
3450 // longer necessary.
3451 curBlock_->pop();
3452 exception = curBlock_->pop();
3454 // Extract the exception values for the catch block
3455 DefVector values;
3456 if (!loadExceptionValues(exception, tagIndex, &values)) {
3457 return false;
3459 iter().setResults(values.length(), values);
3460 return true;
3463 [[nodiscard]] bool loadExceptionValues(MDefinition* exception,
3464 uint32_t tagIndex, DefVector* values) {
3465 SharedTagType tagType = moduleEnv().tags[tagIndex].type;
3466 const ValTypeVector& params = tagType->argTypes();
3467 const TagOffsetVector& offsets = tagType->argOffsets();
3469 // Get the data pointer from the exception object
3470 auto* data = MWasmLoadField::New(
3471 alloc(), exception, WasmExceptionObject::offsetOfData(),
3472 MIRType::Pointer, MWideningOp::None, AliasSet::Load(AliasSet::Any));
3473 if (!data) {
3474 return false;
3476 curBlock_->add(data);
3478 // Presize the values vector to the number of params
3479 if (!values->reserve(params.length())) {
3480 return false;
3483 // Load each value from the data pointer
3484 for (size_t i = 0; i < params.length(); i++) {
3485 if (!mirGen_.ensureBallast()) {
3486 return false;
3488 auto* load = MWasmLoadFieldKA::New(
3489 alloc(), exception, data, offsets[i], params[i].toMIRType(),
3490 MWideningOp::None, AliasSet::Load(AliasSet::Any));
3491 if (!load || !values->append(load)) {
3492 return false;
3494 curBlock_->add(load);
3496 return true;
3499 [[nodiscard]] bool finishTryCatch(LabelKind kind, Control& control,
3500 DefVector* defs) {
3501 switch (kind) {
3502 case LabelKind::Try: {
3503 // This is a catchless try, we must delegate all throwing instructions
3504 // to the nearest enclosing try block if one exists, or else to the
3505 // body block which will handle it in emitBodyDelegateThrowPad. We
3506 // specify a relativeDepth of '1' to delegate outside of the still
3507 // active try block.
3508 uint32_t relativeDepth = 1;
3509 if (!delegatePadPatches(control.tryControl->landingPadPatches,
3510 relativeDepth)) {
3511 return false;
3513 break;
3515 case LabelKind::Catch: {
3516 MOZ_ASSERT(!control.tryControl->inBody);
3517 // This is a try without a catch_all, we must have a rethrow at the end
3518 // of the landing pad (if any).
3519 MBasicBlock* padBlock = control.block;
3520 if (padBlock) {
3521 MBasicBlock* prevBlock = curBlock_;
3522 curBlock_ = padBlock;
3523 MDefinition* tag = curBlock_->pop();
3524 MDefinition* exception = curBlock_->pop();
3525 if (!throwFrom(exception, tag)) {
3526 return false;
3528 curBlock_ = prevBlock;
3530 break;
3532 case LabelKind::CatchAll: {
3533 MOZ_ASSERT(!control.tryControl->inBody);
3534 // This is a try with a catch_all, and requires no special handling.
3535 break;
3537 default:
3538 MOZ_CRASH();
3541 // Finish the block, joining the try and catch blocks
3542 return finishBlock(defs);
3545 [[nodiscard]] bool finishTryTable(Control& control, DefVector* defs) {
3546 // Mark this control as no longer in the body of the try
3547 control.tryControl->inBody = false;
3548 // Create a landing pad for all of the catches
3549 if (!createTryTableLandingPad(control.tryControl.get())) {
3550 return false;
3552 // Finish the block, joining the try and catch blocks
3553 return finishBlock(defs);
3556 [[nodiscard]] bool emitBodyDelegateThrowPad(Control& control) {
3557 // Create a landing pad for any throwing instructions
3558 MBasicBlock* padBlock;
3559 if (!createTryLandingPadIfNeeded(bodyDelegatePadPatches_, &padBlock)) {
3560 return false;
3563 // If no landing pad was necessary, then we don't need to do anything here
3564 if (!padBlock) {
3565 return true;
3568 // Switch to the landing pad and rethrow the exception
3569 MBasicBlock* prevBlock = curBlock_;
3570 curBlock_ = padBlock;
3571 MDefinition* tag = curBlock_->pop();
3572 MDefinition* exception = curBlock_->pop();
3573 if (!throwFrom(exception, tag)) {
3574 return false;
3576 curBlock_ = prevBlock;
3577 return true;
3580 [[nodiscard]] bool emitNewException(MDefinition* tag,
3581 MDefinition** exception) {
3582 return emitInstanceCall1(readBytecodeOffset(), SASigExceptionNew, tag,
3583 exception);
3586 [[nodiscard]] bool emitThrow(uint32_t tagIndex, const DefVector& argValues) {
3587 if (inDeadCode()) {
3588 return true;
3590 uint32_t bytecodeOffset = readBytecodeOffset();
3592 // Load the tag
3593 MDefinition* tag = loadTag(tagIndex);
3594 if (!tag) {
3595 return false;
3598 // Allocate an exception object
3599 MDefinition* exception;
3600 if (!emitNewException(tag, &exception)) {
3601 return false;
3604 // Load the data pointer from the object
3605 auto* data = MWasmLoadField::New(
3606 alloc(), exception, WasmExceptionObject::offsetOfData(),
3607 MIRType::Pointer, MWideningOp::None, AliasSet::Load(AliasSet::Any));
3608 if (!data) {
3609 return false;
3611 curBlock_->add(data);
3613 // Store the params into the data pointer
3614 SharedTagType tagType = moduleEnv_.tags[tagIndex].type;
3615 for (size_t i = 0; i < tagType->argOffsets().length(); i++) {
3616 if (!mirGen_.ensureBallast()) {
3617 return false;
3619 ValType type = tagType->argTypes()[i];
3620 uint32_t offset = tagType->argOffsets()[i];
3622 if (!type.isRefRepr()) {
3623 auto* store = MWasmStoreFieldKA::New(alloc(), exception, data, offset,
3624 argValues[i], MNarrowingOp::None,
3625 AliasSet::Store(AliasSet::Any));
3626 if (!store) {
3627 return false;
3629 curBlock_->add(store);
3630 continue;
3633 // Store the new value
3634 auto* store = MWasmStoreFieldRefKA::New(
3635 alloc(), instancePointer_, exception, data, offset, argValues[i],
3636 AliasSet::Store(AliasSet::Any), Nothing(), WasmPreBarrierKind::None);
3637 if (!store) {
3638 return false;
3640 curBlock_->add(store);
3642 // Call the post-write barrier
3643 if (!postBarrier(bytecodeOffset, exception, data, offset, argValues[i])) {
3644 return false;
3648 // Throw the exception
3649 return throwFrom(exception, tag);
3652 [[nodiscard]] bool emitThrowRef(MDefinition* exnRef) {
3653 if (inDeadCode()) {
3654 return true;
3657 // The exception must be non-null
3658 if (!refAsNonNull(exnRef)) {
3659 return false;
3662 // If there is no surrounding catching block, call an instance method to
3663 // throw the exception.
3664 if (!emitInstanceCall1(readBytecodeOffset(), SASigThrowException, exnRef)) {
3665 return false;
3667 unreachableTrap();
3669 curBlock_ = nullptr;
3670 return true;
3673 [[nodiscard]] bool throwFrom(MDefinition* exn, MDefinition* tag) {
3674 if (inDeadCode()) {
3675 return true;
3678 // Check if there is a local catching try control, and if so, then add a
3679 // pad-patch to its tryPadPatches.
3680 uint32_t relativeTryDepth;
3681 if (inTryBlock(&relativeTryDepth)) {
3682 // Set the pending exception state, the landing pad will read from this
3683 if (!setPendingExceptionState(exn, tag)) {
3684 return false;
3687 // End with a pending jump to the landing pad
3688 if (!endWithPadPatch(relativeTryDepth)) {
3689 return false;
3691 curBlock_ = nullptr;
3692 return true;
3695 // If there is no surrounding catching block, call an instance method to
3696 // throw the exception.
3697 if (!emitInstanceCall1(readBytecodeOffset(), SASigThrowException, exn)) {
3698 return false;
3700 unreachableTrap();
3702 curBlock_ = nullptr;
3703 return true;
3706 [[nodiscard]] bool emitRethrow(uint32_t relativeDepth) {
3707 if (inDeadCode()) {
3708 return true;
3711 Control& control = iter().controlItem(relativeDepth);
3712 MBasicBlock* pad = control.block;
3713 MOZ_ASSERT(pad);
3714 MOZ_ASSERT(pad->nslots() > 1);
3715 MOZ_ASSERT(iter().controlKind(relativeDepth) == LabelKind::Catch ||
3716 iter().controlKind(relativeDepth) == LabelKind::CatchAll);
3718 // The exception will always be the last slot in the landing pad.
3719 size_t exnSlotPosition = pad->nslots() - 2;
3720 MDefinition* tag = pad->getSlot(exnSlotPosition + 1);
3721 MDefinition* exception = pad->getSlot(exnSlotPosition);
3722 MOZ_ASSERT(exception->type() == MIRType::WasmAnyRef &&
3723 tag->type() == MIRType::WasmAnyRef);
3724 return throwFrom(exception, tag);
3727 /*********************************************** Instance call helpers ***/
3729 // Do not call this function directly -- it offers no protection against
3730 // mis-counting of arguments. Instead call one of
3731 // ::emitInstanceCall{0,1,2,3,4,5,6}.
3733 // Emits a call to the Instance function indicated by `callee`. This is
3734 // assumed to take an Instance pointer as its first argument. The remaining
3735 // args are taken from `args`, which is assumed to hold `numArgs` entries.
3736 // If `result` is non-null, the MDefinition* holding the return value is
3737 // written to `*result`.
3738 [[nodiscard]] bool emitInstanceCallN(uint32_t lineOrBytecode,
3739 const SymbolicAddressSignature& callee,
3740 MDefinition** args, size_t numArgs,
3741 MDefinition** result = nullptr) {
3742 // Check that the first formal parameter is plausibly an Instance pointer.
3743 MOZ_ASSERT(callee.numArgs > 0);
3744 MOZ_ASSERT(callee.argTypes[0] == MIRType::Pointer);
3745 // Check we agree on the number of args.
3746 MOZ_ASSERT(numArgs + 1 /* the instance pointer */ == callee.numArgs);
3747 // Check we agree on whether a value is returned.
3748 MOZ_ASSERT((result == nullptr) == (callee.retType == MIRType::None));
3750 // If we are in dead code, it can happen that some of the `args` entries
3751 // are nullptr, which will look like an OOM to the logic below. So exit
3752 // at this point. `passInstance`, `passArg`, `finishCall` and
3753 // `builtinInstanceMethodCall` all do nothing in dead code, so it's valid
3754 // to exit here.
3755 if (inDeadCode()) {
3756 if (result) {
3757 *result = nullptr;
3759 return true;
3762 // Check all args for signs of OOMness before attempting to allocating any
3763 // more memory.
3764 for (size_t i = 0; i < numArgs; i++) {
3765 if (!args[i]) {
3766 if (result) {
3767 *result = nullptr;
3769 return false;
3773 // Finally, construct the call.
3774 CallCompileState ccsArgs;
3775 if (!passInstance(callee.argTypes[0], &ccsArgs)) {
3776 return false;
3778 for (size_t i = 0; i < numArgs; i++) {
3779 if (!passArg(args[i], callee.argTypes[i + 1], &ccsArgs)) {
3780 return false;
3783 if (!finishCall(&ccsArgs)) {
3784 return false;
3786 return builtinInstanceMethodCall(callee, lineOrBytecode, ccsArgs, result);
3789 [[nodiscard]] bool emitInstanceCall0(uint32_t lineOrBytecode,
3790 const SymbolicAddressSignature& callee,
3791 MDefinition** result = nullptr) {
3792 MDefinition* args[0] = {};
3793 return emitInstanceCallN(lineOrBytecode, callee, args, 0, result);
3795 [[nodiscard]] bool emitInstanceCall1(uint32_t lineOrBytecode,
3796 const SymbolicAddressSignature& callee,
3797 MDefinition* arg1,
3798 MDefinition** result = nullptr) {
3799 MDefinition* args[1] = {arg1};
3800 return emitInstanceCallN(lineOrBytecode, callee, args, 1, result);
3802 [[nodiscard]] bool emitInstanceCall2(uint32_t lineOrBytecode,
3803 const SymbolicAddressSignature& callee,
3804 MDefinition* arg1, MDefinition* arg2,
3805 MDefinition** result = nullptr) {
3806 MDefinition* args[2] = {arg1, arg2};
3807 return emitInstanceCallN(lineOrBytecode, callee, args, 2, result);
3809 [[nodiscard]] bool emitInstanceCall3(uint32_t lineOrBytecode,
3810 const SymbolicAddressSignature& callee,
3811 MDefinition* arg1, MDefinition* arg2,
3812 MDefinition* arg3,
3813 MDefinition** result = nullptr) {
3814 MDefinition* args[3] = {arg1, arg2, arg3};
3815 return emitInstanceCallN(lineOrBytecode, callee, args, 3, result);
3817 [[nodiscard]] bool emitInstanceCall4(uint32_t lineOrBytecode,
3818 const SymbolicAddressSignature& callee,
3819 MDefinition* arg1, MDefinition* arg2,
3820 MDefinition* arg3, MDefinition* arg4,
3821 MDefinition** result = nullptr) {
3822 MDefinition* args[4] = {arg1, arg2, arg3, arg4};
3823 return emitInstanceCallN(lineOrBytecode, callee, args, 4, result);
3825 [[nodiscard]] bool emitInstanceCall5(uint32_t lineOrBytecode,
3826 const SymbolicAddressSignature& callee,
3827 MDefinition* arg1, MDefinition* arg2,
3828 MDefinition* arg3, MDefinition* arg4,
3829 MDefinition* arg5,
3830 MDefinition** result = nullptr) {
3831 MDefinition* args[5] = {arg1, arg2, arg3, arg4, arg5};
3832 return emitInstanceCallN(lineOrBytecode, callee, args, 5, result);
3834 [[nodiscard]] bool emitInstanceCall6(uint32_t lineOrBytecode,
3835 const SymbolicAddressSignature& callee,
3836 MDefinition* arg1, MDefinition* arg2,
3837 MDefinition* arg3, MDefinition* arg4,
3838 MDefinition* arg5, MDefinition* arg6,
3839 MDefinition** result = nullptr) {
3840 MDefinition* args[6] = {arg1, arg2, arg3, arg4, arg5, arg6};
3841 return emitInstanceCallN(lineOrBytecode, callee, args, 6, result);
3844 /******************************** WasmGC: low level load/store helpers ***/
3846 // Given a (StorageType, FieldExtension) pair, produce the (MIRType,
3847 // MWideningOp) pair that will give the correct operation for reading the
3848 // value from memory.
3849 static void fieldLoadInfoToMIR(StorageType type, FieldWideningOp wideningOp,
3850 MIRType* mirType, MWideningOp* mirWideningOp) {
3851 switch (type.kind()) {
3852 case StorageType::I8: {
3853 switch (wideningOp) {
3854 case FieldWideningOp::Signed:
3855 *mirType = MIRType::Int32;
3856 *mirWideningOp = MWideningOp::FromS8;
3857 return;
3858 case FieldWideningOp::Unsigned:
3859 *mirType = MIRType::Int32;
3860 *mirWideningOp = MWideningOp::FromU8;
3861 return;
3862 default:
3863 MOZ_CRASH();
3866 case StorageType::I16: {
3867 switch (wideningOp) {
3868 case FieldWideningOp::Signed:
3869 *mirType = MIRType::Int32;
3870 *mirWideningOp = MWideningOp::FromS16;
3871 return;
3872 case FieldWideningOp::Unsigned:
3873 *mirType = MIRType::Int32;
3874 *mirWideningOp = MWideningOp::FromU16;
3875 return;
3876 default:
3877 MOZ_CRASH();
3880 default: {
3881 switch (wideningOp) {
3882 case FieldWideningOp::None:
3883 *mirType = type.toMIRType();
3884 *mirWideningOp = MWideningOp::None;
3885 return;
3886 default:
3887 MOZ_CRASH();
3893 // Given a StorageType, produce the MNarrowingOp required for writing the
3894 // value to memory.
3895 static MNarrowingOp fieldStoreInfoToMIR(StorageType type) {
3896 switch (type.kind()) {
3897 case StorageType::I8:
3898 return MNarrowingOp::To8;
3899 case StorageType::I16:
3900 return MNarrowingOp::To16;
3901 default:
3902 return MNarrowingOp::None;
3906 // Generate a write of `value` at address `base + offset`, where `offset` is
3907 // known at JIT time. If the written value is a reftype, the previous value
3908 // at `base + offset` will be retrieved and handed off to the post-write
3909 // barrier. `keepAlive` will be referenced by the instruction so as to hold
3910 // it live (from the GC's point of view).
3911 [[nodiscard]] bool writeGcValueAtBasePlusOffset(
3912 uint32_t lineOrBytecode, StorageType fieldType, MDefinition* keepAlive,
3913 AliasSet::Flag aliasBitset, MDefinition* value, MDefinition* base,
3914 uint32_t offset, bool needsTrapInfo, WasmPreBarrierKind preBarrierKind) {
3915 MOZ_ASSERT(aliasBitset != 0);
3916 MOZ_ASSERT(keepAlive->type() == MIRType::WasmAnyRef);
3917 MOZ_ASSERT(fieldType.widenToValType().toMIRType() == value->type());
3918 MNarrowingOp narrowingOp = fieldStoreInfoToMIR(fieldType);
3920 if (!fieldType.isRefRepr()) {
3921 MaybeTrapSiteInfo maybeTrap;
3922 if (needsTrapInfo) {
3923 maybeTrap.emplace(getTrapSiteInfo());
3925 auto* store = MWasmStoreFieldKA::New(
3926 alloc(), keepAlive, base, offset, value, narrowingOp,
3927 AliasSet::Store(aliasBitset), maybeTrap);
3928 if (!store) {
3929 return false;
3931 curBlock_->add(store);
3932 return true;
3935 // Otherwise it's a ref store. Load the previous value so we can show it
3936 // to the post-write barrier.
3938 // Optimisation opportunity: for the case where this field write results
3939 // from struct.new, the old value is always zero. So we should synthesise
3940 // a suitable zero constant rather than reading it from the object. See
3941 // also bug 1799999.
3942 MOZ_ASSERT(narrowingOp == MNarrowingOp::None);
3943 MOZ_ASSERT(fieldType.widenToValType() == fieldType.valType());
3945 // Store the new value
3946 auto* store = MWasmStoreFieldRefKA::New(
3947 alloc(), instancePointer_, keepAlive, base, offset, value,
3948 AliasSet::Store(aliasBitset), mozilla::Some(getTrapSiteInfo()),
3949 preBarrierKind);
3950 if (!store) {
3951 return false;
3953 curBlock_->add(store);
3955 // Call the post-write barrier
3956 return postBarrier(lineOrBytecode, keepAlive, base, offset, value);
3959 // Generate a write of `value` at address `base + index * scale`, where
3960 // `scale` is known at JIT-time. If the written value is a reftype, the
3961 // previous value at `base + index * scale` will be retrieved and handed off
3962 // to the post-write barrier. `keepAlive` will be referenced by the
3963 // instruction so as to hold it live (from the GC's point of view).
3964 [[nodiscard]] bool writeGcValueAtBasePlusScaledIndex(
3965 uint32_t lineOrBytecode, StorageType fieldType, MDefinition* keepAlive,
3966 AliasSet::Flag aliasBitset, MDefinition* value, MDefinition* base,
3967 uint32_t scale, MDefinition* index, WasmPreBarrierKind preBarrierKind) {
3968 MOZ_ASSERT(aliasBitset != 0);
3969 MOZ_ASSERT(keepAlive->type() == MIRType::WasmAnyRef);
3970 MOZ_ASSERT(fieldType.widenToValType().toMIRType() == value->type());
3971 MOZ_ASSERT(scale == 1 || scale == 2 || scale == 4 || scale == 8 ||
3972 scale == 16);
3974 // Currently there's no single MIR node that this can be translated into.
3975 // So compute the final address "manually", then store directly to that
3976 // address. See bug 1802287.
3977 MDefinition* scaleDef = constantTargetWord(intptr_t(scale));
3978 if (!scaleDef) {
3979 return false;
3981 MDefinition* finalAddr = computeBasePlusScaledIndex(base, scaleDef, index);
3982 if (!finalAddr) {
3983 return false;
3986 return writeGcValueAtBasePlusOffset(
3987 lineOrBytecode, fieldType, keepAlive, aliasBitset, value, finalAddr,
3988 /*offset=*/0,
3989 /*needsTrapInfo=*/false, preBarrierKind);
3992 // Generate a read from address `base + offset`, where `offset` is known at
3993 // JIT time. The loaded value will be widened as described by `fieldType`
3994 // and `fieldWideningOp`. `keepAlive` will be referenced by the instruction
3995 // so as to hold it live (from the GC's point of view).
3996 [[nodiscard]] MDefinition* readGcValueAtBasePlusOffset(
3997 StorageType fieldType, FieldWideningOp fieldWideningOp,
3998 MDefinition* keepAlive, AliasSet::Flag aliasBitset, MDefinition* base,
3999 uint32_t offset, bool needsTrapInfo) {
4000 MOZ_ASSERT(aliasBitset != 0);
4001 MOZ_ASSERT(keepAlive->type() == MIRType::WasmAnyRef);
4002 MIRType mirType;
4003 MWideningOp mirWideningOp;
4004 fieldLoadInfoToMIR(fieldType, fieldWideningOp, &mirType, &mirWideningOp);
4005 MaybeTrapSiteInfo maybeTrap;
4006 if (needsTrapInfo) {
4007 maybeTrap.emplace(getTrapSiteInfo());
4009 auto* load = MWasmLoadFieldKA::New(alloc(), keepAlive, base, offset,
4010 mirType, mirWideningOp,
4011 AliasSet::Load(aliasBitset), maybeTrap);
4012 if (!load) {
4013 return nullptr;
4015 curBlock_->add(load);
4016 return load;
4019 // Generate a read from address `base + index * scale`, where `scale` is
4020 // known at JIT-time. The loaded value will be widened as described by
4021 // `fieldType` and `fieldWideningOp`. `keepAlive` will be referenced by the
4022 // instruction so as to hold it live (from the GC's point of view).
4023 [[nodiscard]] MDefinition* readGcValueAtBasePlusScaledIndex(
4024 StorageType fieldType, FieldWideningOp fieldWideningOp,
4025 MDefinition* keepAlive, AliasSet::Flag aliasBitset, MDefinition* base,
4026 uint32_t scale, MDefinition* index) {
4027 MOZ_ASSERT(aliasBitset != 0);
4028 MOZ_ASSERT(keepAlive->type() == MIRType::WasmAnyRef);
4029 MOZ_ASSERT(scale == 1 || scale == 2 || scale == 4 || scale == 8 ||
4030 scale == 16);
4032 // Currently there's no single MIR node that this can be translated into.
4033 // So compute the final address "manually", then store directly to that
4034 // address. See bug 1802287.
4035 MDefinition* scaleDef = constantTargetWord(intptr_t(scale));
4036 if (!scaleDef) {
4037 return nullptr;
4039 MDefinition* finalAddr = computeBasePlusScaledIndex(base, scaleDef, index);
4040 if (!finalAddr) {
4041 return nullptr;
4044 MIRType mirType;
4045 MWideningOp mirWideningOp;
4046 fieldLoadInfoToMIR(fieldType, fieldWideningOp, &mirType, &mirWideningOp);
4047 auto* load = MWasmLoadFieldKA::New(alloc(), keepAlive, finalAddr,
4048 /*offset=*/0, mirType, mirWideningOp,
4049 AliasSet::Load(aliasBitset),
4050 mozilla::Some(getTrapSiteInfo()));
4051 if (!load) {
4052 return nullptr;
4054 curBlock_->add(load);
4055 return load;
4058 /************************************************ WasmGC: type helpers ***/
4060 // Returns an MDefinition holding the supertype vector for `typeIndex`.
4061 [[nodiscard]] MDefinition* loadSuperTypeVector(uint32_t typeIndex) {
4062 uint32_t stvOffset = moduleEnv().offsetOfSuperTypeVector(typeIndex);
4064 auto* load =
4065 MWasmLoadInstanceDataField::New(alloc(), MIRType::Pointer, stvOffset,
4066 /*isConst=*/true, instancePointer_);
4067 if (!load) {
4068 return nullptr;
4070 curBlock_->add(load);
4071 return load;
4074 [[nodiscard]] MDefinition* loadTypeDefInstanceData(uint32_t typeIndex) {
4075 size_t offset = Instance::offsetInData(
4076 moduleEnv_.offsetOfTypeDefInstanceData(typeIndex));
4077 auto* result = MWasmDerivedPointer::New(alloc(), instancePointer_, offset);
4078 if (!result) {
4079 return nullptr;
4081 curBlock_->add(result);
4082 return result;
4085 /********************************************** WasmGC: struct helpers ***/
4087 [[nodiscard]] MDefinition* createStructObject(uint32_t typeIndex,
4088 bool zeroFields) {
4089 const TypeDef& typeDef = (*moduleEnv().types)[typeIndex];
4090 gc::AllocKind allocKind = WasmStructObject::allocKindForTypeDef(&typeDef);
4091 bool isOutline =
4092 WasmStructObject::requiresOutlineBytes(typeDef.structType().size_);
4094 // Allocate an uninitialized struct. This requires the type definition
4095 // for the struct.
4096 MDefinition* typeDefData = loadTypeDefInstanceData(typeIndex);
4097 if (!typeDefData) {
4098 return nullptr;
4101 auto* structObject =
4102 MWasmNewStructObject::New(alloc(), instancePointer_, typeDefData,
4103 isOutline, zeroFields, allocKind);
4104 if (!structObject) {
4105 return nullptr;
4107 curBlock_->add(structObject);
4109 return structObject;
4112 // Helper function for EmitStruct{New,Set}: given a MIR pointer to a
4113 // WasmStructObject, a MIR pointer to a value, and a field descriptor,
4114 // generate MIR to write the value to the relevant field in the object.
4115 [[nodiscard]] bool writeValueToStructField(
4116 uint32_t lineOrBytecode, const StructField& field,
4117 MDefinition* structObject, MDefinition* value,
4118 WasmPreBarrierKind preBarrierKind) {
4119 StorageType fieldType = field.type;
4120 uint32_t fieldOffset = field.offset;
4122 bool areaIsOutline;
4123 uint32_t areaOffset;
4124 WasmStructObject::fieldOffsetToAreaAndOffset(fieldType, fieldOffset,
4125 &areaIsOutline, &areaOffset);
4127 // Make `base` point at the first byte of either the struct object as a
4128 // whole or of the out-of-line data area. And adjust `areaOffset`
4129 // accordingly.
4130 MDefinition* base;
4131 bool needsTrapInfo;
4132 if (areaIsOutline) {
4133 auto* load = MWasmLoadField::New(
4134 alloc(), structObject, WasmStructObject::offsetOfOutlineData(),
4135 MIRType::Pointer, MWideningOp::None,
4136 AliasSet::Load(AliasSet::WasmStructOutlineDataPointer),
4137 mozilla::Some(getTrapSiteInfo()));
4138 if (!load) {
4139 return false;
4141 curBlock_->add(load);
4142 base = load;
4143 needsTrapInfo = false;
4144 } else {
4145 base = structObject;
4146 needsTrapInfo = true;
4147 areaOffset += WasmStructObject::offsetOfInlineData();
4149 // The transaction is to happen at `base + areaOffset`, so to speak.
4150 // After this point we must ignore `fieldOffset`.
4152 // The alias set denoting the field's location, although lacking a
4153 // Load-vs-Store indication at this point.
4154 AliasSet::Flag fieldAliasSet = areaIsOutline
4155 ? AliasSet::WasmStructOutlineDataArea
4156 : AliasSet::WasmStructInlineDataArea;
4158 return writeGcValueAtBasePlusOffset(lineOrBytecode, fieldType, structObject,
4159 fieldAliasSet, value, base, areaOffset,
4160 needsTrapInfo, preBarrierKind);
4163 // Helper function for EmitStructGet: given a MIR pointer to a
4164 // WasmStructObject, a field descriptor and a field widening operation,
4165 // generate MIR to read the value from the relevant field in the object.
4166 [[nodiscard]] MDefinition* readValueFromStructField(
4167 const StructField& field, FieldWideningOp wideningOp,
4168 MDefinition* structObject) {
4169 StorageType fieldType = field.type;
4170 uint32_t fieldOffset = field.offset;
4172 bool areaIsOutline;
4173 uint32_t areaOffset;
4174 WasmStructObject::fieldOffsetToAreaAndOffset(fieldType, fieldOffset,
4175 &areaIsOutline, &areaOffset);
4177 // Make `base` point at the first byte of either the struct object as a
4178 // whole or of the out-of-line data area. And adjust `areaOffset`
4179 // accordingly.
4180 MDefinition* base;
4181 bool needsTrapInfo;
4182 if (areaIsOutline) {
4183 auto* loadOOLptr = MWasmLoadField::New(
4184 alloc(), structObject, WasmStructObject::offsetOfOutlineData(),
4185 MIRType::Pointer, MWideningOp::None,
4186 AliasSet::Load(AliasSet::WasmStructOutlineDataPointer),
4187 mozilla::Some(getTrapSiteInfo()));
4188 if (!loadOOLptr) {
4189 return nullptr;
4191 curBlock_->add(loadOOLptr);
4192 base = loadOOLptr;
4193 needsTrapInfo = false;
4194 } else {
4195 base = structObject;
4196 needsTrapInfo = true;
4197 areaOffset += WasmStructObject::offsetOfInlineData();
4199 // The transaction is to happen at `base + areaOffset`, so to speak.
4200 // After this point we must ignore `fieldOffset`.
4202 // The alias set denoting the field's location, although lacking a
4203 // Load-vs-Store indication at this point.
4204 AliasSet::Flag fieldAliasSet = areaIsOutline
4205 ? AliasSet::WasmStructOutlineDataArea
4206 : AliasSet::WasmStructInlineDataArea;
4208 return readGcValueAtBasePlusOffset(fieldType, wideningOp, structObject,
4209 fieldAliasSet, base, areaOffset,
4210 needsTrapInfo);
4213 /********************************* WasmGC: address-arithmetic helpers ***/
4215 inline bool targetIs64Bit() const {
4216 #ifdef JS_64BIT
4217 return true;
4218 #else
4219 return false;
4220 #endif
4223 // Generate MIR to unsigned widen `val` out to the target word size. If
4224 // `val` is already at the target word size, this is a no-op. The only
4225 // other allowed case is where `val` is Int32 and we're compiling for a
4226 // 64-bit target, in which case a widen is generated.
4227 [[nodiscard]] MDefinition* unsignedWidenToTargetWord(MDefinition* val) {
4228 if (targetIs64Bit()) {
4229 if (val->type() == MIRType::Int32) {
4230 auto* ext = MExtendInt32ToInt64::New(alloc(), val, /*isUnsigned=*/true);
4231 if (!ext) {
4232 return nullptr;
4234 curBlock_->add(ext);
4235 return ext;
4237 MOZ_ASSERT(val->type() == MIRType::Int64);
4238 return val;
4240 MOZ_ASSERT(val->type() == MIRType::Int32);
4241 return val;
4244 // Compute `base + index * scale`, for both 32- and 64-bit targets. For the
4245 // convenience of callers, on a 64-bit target, `index` and `scale` can
4246 // (independently) be either Int32 or Int64; in the former case they will be
4247 // zero-extended before the multiplication, so that both the multiplication
4248 // and addition are done at the target word size.
4249 [[nodiscard]] MDefinition* computeBasePlusScaledIndex(MDefinition* base,
4250 MDefinition* scale,
4251 MDefinition* index) {
4252 // On a 32-bit target, require:
4253 // base : Int32 (== TargetWordMIRType())
4254 // index, scale : Int32
4255 // Calculate base +32 (index *32 scale)
4257 // On a 64-bit target, require:
4258 // base : Int64 (== TargetWordMIRType())
4259 // index, scale: either Int32 or Int64 (any combination is OK)
4260 // Calculate base +64 (u-widen to 64(index)) *64 (u-widen to 64(scale))
4262 // Final result type is the same as that of `base`.
4264 MOZ_ASSERT(base->type() == TargetWordMIRType());
4266 // Widen `index` if necessary, producing `indexW`.
4267 MDefinition* indexW = unsignedWidenToTargetWord(index);
4268 if (!indexW) {
4269 return nullptr;
4271 // Widen `scale` if necessary, producing `scaleW`.
4272 MDefinition* scaleW = unsignedWidenToTargetWord(scale);
4273 if (!scaleW) {
4274 return nullptr;
4276 // Compute `scaledIndex = indexW * scaleW`.
4277 MIRType targetWordType = TargetWordMIRType();
4278 bool targetIs64 = targetWordType == MIRType::Int64;
4279 MMul* scaledIndex =
4280 MMul::NewWasm(alloc(), indexW, scaleW, targetWordType,
4281 targetIs64 ? MMul::Mode::Normal : MMul::Mode::Integer,
4282 /*mustPreserveNan=*/false);
4283 if (!scaledIndex) {
4284 return nullptr;
4286 // Compute `result = base + scaledIndex`.
4287 curBlock_->add(scaledIndex);
4288 MAdd* result = MAdd::NewWasm(alloc(), base, scaledIndex, targetWordType);
4289 if (!result) {
4290 return nullptr;
4292 curBlock_->add(result);
4293 return result;
4296 /********************************************** WasmGC: array helpers ***/
4298 // Given `arrayObject`, the address of a WasmArrayObject, generate MIR to
4299 // return the contents of the WasmArrayObject::numElements_ field.
4300 // Adds trap site info for the null check.
4301 [[nodiscard]] MDefinition* getWasmArrayObjectNumElements(
4302 MDefinition* arrayObject) {
4303 MOZ_ASSERT(arrayObject->type() == MIRType::WasmAnyRef);
4305 auto* numElements = MWasmLoadField::New(
4306 alloc(), arrayObject, WasmArrayObject::offsetOfNumElements(),
4307 MIRType::Int32, MWideningOp::None,
4308 AliasSet::Load(AliasSet::WasmArrayNumElements),
4309 mozilla::Some(getTrapSiteInfo()));
4310 if (!numElements) {
4311 return nullptr;
4313 curBlock_->add(numElements);
4315 return numElements;
4318 // Given `arrayObject`, the address of a WasmArrayObject, generate MIR to
4319 // return the contents of the WasmArrayObject::data_ field.
4320 [[nodiscard]] MDefinition* getWasmArrayObjectData(MDefinition* arrayObject) {
4321 MOZ_ASSERT(arrayObject->type() == MIRType::WasmAnyRef);
4323 auto* data = MWasmLoadField::New(
4324 alloc(), arrayObject, WasmArrayObject::offsetOfData(),
4325 TargetWordMIRType(), MWideningOp::None,
4326 AliasSet::Load(AliasSet::WasmArrayDataPointer),
4327 mozilla::Some(getTrapSiteInfo()));
4328 if (!data) {
4329 return nullptr;
4331 curBlock_->add(data);
4333 return data;
4336 // Given a JIT-time-known type index `typeIndex` and a run-time known number
4337 // of elements `numElements`, create MIR to call `Instance::arrayNew<true>`,
4338 // producing an array with the relevant type and size and initialized with
4339 // `typeIndex`s default value.
4340 [[nodiscard]] MDefinition* createDefaultInitializedArrayObject(
4341 uint32_t lineOrBytecode, uint32_t typeIndex, MDefinition* numElements) {
4342 // Get the type definition for the array as a whole.
4343 MDefinition* typeDefData = loadTypeDefInstanceData(typeIndex);
4344 if (!typeDefData) {
4345 return nullptr;
4348 // Create call:
4349 // arrayObject = Instance::arrayNew<true>(numElements, typeDefData)
4350 // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated
4351 // by this call will trap.
4352 MDefinition* arrayObject;
4353 if (!emitInstanceCall2(lineOrBytecode, SASigArrayNew_true, numElements,
4354 typeDefData, &arrayObject)) {
4355 return nullptr;
4358 return arrayObject;
4361 [[nodiscard]] MDefinition* createUninitializedArrayObject(
4362 uint32_t lineOrBytecode, uint32_t typeIndex, MDefinition* numElements) {
4363 // Get the type definition for the array as a whole.
4364 MDefinition* typeDefData = loadTypeDefInstanceData(typeIndex);
4365 if (!typeDefData) {
4366 return nullptr;
4369 // Create call:
4370 // arrayObject = Instance::arrayNew<false>(numElements, typeDefData)
4371 // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated
4372 // by this call will trap.
4373 MDefinition* arrayObject;
4374 if (!emitInstanceCall2(lineOrBytecode, SASigArrayNew_false, numElements,
4375 typeDefData, &arrayObject)) {
4376 return nullptr;
4379 return arrayObject;
4382 // This emits MIR to perform several actions common to array loads and
4383 // stores. Given `arrayObject`, that points to a WasmArrayObject, and an
4384 // index value `index`, it:
4386 // * Generates a trap if the array pointer is null
4387 // * Gets the size of the array
4388 // * Emits a bounds check of `index` against the array size
4389 // * Retrieves the OOL object pointer from the array
4390 // * Includes check for null via signal handler.
4392 // The returned value is for the OOL object pointer.
4393 [[nodiscard]] MDefinition* setupForArrayAccess(MDefinition* arrayObject,
4394 MDefinition* index) {
4395 MOZ_ASSERT(arrayObject->type() == MIRType::WasmAnyRef);
4396 MOZ_ASSERT(index->type() == MIRType::Int32);
4398 // Check for null is done in getWasmArrayObjectNumElements.
4400 // Get the size value for the array.
4401 MDefinition* numElements = getWasmArrayObjectNumElements(arrayObject);
4402 if (!numElements) {
4403 return nullptr;
4406 // Create a bounds check.
4407 auto* boundsCheck =
4408 MWasmBoundsCheck::New(alloc(), index, numElements, bytecodeOffset(),
4409 MWasmBoundsCheck::Target::Unknown);
4410 if (!boundsCheck) {
4411 return nullptr;
4413 curBlock_->add(boundsCheck);
4415 // Get the address of the first byte of the (OOL) data area.
4416 return getWasmArrayObjectData(arrayObject);
4419 [[nodiscard]] bool fillArray(uint32_t lineOrBytecode,
4420 const ArrayType& arrayType,
4421 MDefinition* arrayObject, MDefinition* index,
4422 MDefinition* numElements, MDefinition* val) {
4423 mozilla::DebugOnly<MIRType> valMIRType = val->type();
4424 StorageType elemType = arrayType.elementType_;
4425 MOZ_ASSERT(elemType.widenToValType().toMIRType() == valMIRType);
4427 uint32_t elemSize = elemType.size();
4428 MOZ_ASSERT(elemSize >= 1 && elemSize <= 16);
4430 // Make `arrayBase` point at the first byte of the (OOL) data area.
4431 MDefinition* arrayBase = getWasmArrayObjectData(arrayObject);
4432 if (!arrayBase) {
4433 return false;
4436 // We have:
4437 // arrayBase : TargetWord
4438 // index : Int32
4439 // numElements : Int32
4440 // val : <any StorageType>
4441 // $elemSize = arrayType.elementType_.size(); 1, 2, 4, 8 or 16
4443 // Generate MIR:
4444 // <in current block>
4445 // fillBase : TargetWord = arrayBase + numElements * index
4446 // limit : TargetWord = fillBase + numElements * elemSize
4447 // if (limit == fillBase) goto after; // skip loop if trip count == 0
4448 // loop:
4449 // ptrPhi = phi(fillBase, ptrNext)
4450 // *ptrPhi = val
4451 // ptrNext = ptrPhi + $elemSize
4452 // if (ptrNext <u limit) goto loop;
4453 // after:
4455 // We construct the loop "manually" rather than using
4456 // FunctionCompiler::{startLoop,closeLoop} as the latter have awareness of
4457 // the wasm view of loops, whereas the loop we're building here is not a
4458 // wasm-level loop.
4459 // ==== Create the "loop" and "after" blocks ====
4460 MBasicBlock* loopBlock;
4461 if (!newBlock(curBlock_, &loopBlock, MBasicBlock::LOOP_HEADER)) {
4462 return false;
4464 MBasicBlock* afterBlock;
4465 if (!newBlock(loopBlock, &afterBlock)) {
4466 return false;
4469 // ==== Fill in the remainder of the block preceding the loop ====
4470 MDefinition* elemSizeDef = constantTargetWord(intptr_t(elemSize));
4471 if (!elemSizeDef) {
4472 return false;
4475 MDefinition* fillBase =
4476 computeBasePlusScaledIndex(arrayBase, elemSizeDef, index);
4477 if (!fillBase) {
4478 return false;
4480 MDefinition* limit =
4481 computeBasePlusScaledIndex(fillBase, elemSizeDef, numElements);
4482 if (!limit) {
4483 return false;
4486 // Use JSOp::StrictEq, not ::Eq, so that the comparison (and eventually
4487 // the entire initialisation loop) will be folded out in the case where
4488 // the number of elements is zero. See MCompare::tryFoldEqualOperands.
4489 MDefinition* limitEqualsBase = compare(
4490 limit, fillBase, JSOp::StrictEq,
4491 targetIs64Bit() ? MCompare::Compare_UInt64 : MCompare::Compare_UInt32);
4492 if (!limitEqualsBase) {
4493 return false;
4495 MTest* skipIfLimitEqualsBase =
4496 MTest::New(alloc(), limitEqualsBase, afterBlock, loopBlock);
4497 if (!skipIfLimitEqualsBase) {
4498 return false;
4500 curBlock_->end(skipIfLimitEqualsBase);
4501 if (!afterBlock->addPredecessor(alloc(), curBlock_)) {
4502 return false;
4505 // ==== Fill in the loop block as best we can ====
4506 curBlock_ = loopBlock;
4507 MPhi* ptrPhi = MPhi::New(alloc(), TargetWordMIRType());
4508 if (!ptrPhi) {
4509 return false;
4511 if (!ptrPhi->reserveLength(2)) {
4512 return false;
4514 ptrPhi->addInput(fillBase);
4515 curBlock_->addPhi(ptrPhi);
4516 curBlock_->setLoopDepth(loopDepth_ + 1);
4518 // Because we have the exact address to hand, use
4519 // `writeGcValueAtBasePlusOffset` rather than
4520 // `writeGcValueAtBasePlusScaledIndex` to do the store.
4521 if (!writeGcValueAtBasePlusOffset(
4522 lineOrBytecode, elemType, arrayObject, AliasSet::WasmArrayDataArea,
4523 val, ptrPhi, /*offset=*/0,
4524 /*needsTrapInfo=*/false, WasmPreBarrierKind::None)) {
4525 return false;
4528 auto* ptrNext =
4529 MAdd::NewWasm(alloc(), ptrPhi, elemSizeDef, TargetWordMIRType());
4530 if (!ptrNext) {
4531 return false;
4533 curBlock_->add(ptrNext);
4534 ptrPhi->addInput(ptrNext);
4536 MDefinition* ptrNextLtuLimit = compare(
4537 ptrNext, limit, JSOp::Lt,
4538 targetIs64Bit() ? MCompare::Compare_UInt64 : MCompare::Compare_UInt32);
4539 if (!ptrNextLtuLimit) {
4540 return false;
4542 auto* continueIfPtrNextLtuLimit =
4543 MTest::New(alloc(), ptrNextLtuLimit, loopBlock, afterBlock);
4544 if (!continueIfPtrNextLtuLimit) {
4545 return false;
4547 curBlock_->end(continueIfPtrNextLtuLimit);
4548 if (!loopBlock->addPredecessor(alloc(), loopBlock)) {
4549 return false;
4551 // ==== Loop block completed ====
4553 curBlock_ = afterBlock;
4554 return true;
4557 // This routine generates all MIR required for `array.new`. The returned
4558 // value is for the newly created array.
4559 [[nodiscard]] MDefinition* createArrayNewCallAndLoop(uint32_t lineOrBytecode,
4560 uint32_t typeIndex,
4561 MDefinition* numElements,
4562 MDefinition* fillValue) {
4563 const ArrayType& arrayType = (*moduleEnv_.types)[typeIndex].arrayType();
4565 // Create the array object, uninitialized.
4566 MDefinition* arrayObject =
4567 createUninitializedArrayObject(lineOrBytecode, typeIndex, numElements);
4568 if (!arrayObject) {
4569 return nullptr;
4572 // Optimisation opportunity: if the fill value is zero, maybe we should
4573 // likewise skip over the initialisation loop entirely (and, if the zero
4574 // value is visible at JIT time, the loop will be removed). For the
4575 // reftyped case, that would be a big win since each iteration requires a
4576 // call to the post-write barrier routine.
4578 if (!fillArray(lineOrBytecode, arrayType, arrayObject, constantI32(0),
4579 numElements, fillValue)) {
4580 return nullptr;
4583 return arrayObject;
4586 [[nodiscard]] bool createArrayFill(uint32_t lineOrBytecode,
4587 uint32_t typeIndex,
4588 MDefinition* arrayObject,
4589 MDefinition* index, MDefinition* val,
4590 MDefinition* numElements) {
4591 MOZ_ASSERT(arrayObject->type() == MIRType::WasmAnyRef);
4592 MOZ_ASSERT(index->type() == MIRType::Int32);
4593 MOZ_ASSERT(numElements->type() == MIRType::Int32);
4595 const ArrayType& arrayType = (*moduleEnv_.types)[typeIndex].arrayType();
4597 // Check for null is done in getWasmArrayObjectNumElements.
4599 // Get the array's actual size.
4600 MDefinition* actualNumElements = getWasmArrayObjectNumElements(arrayObject);
4601 if (!actualNumElements) {
4602 return false;
4605 // Create a bounds check.
4606 auto* boundsCheck = MWasmBoundsCheckRange32::New(
4607 alloc(), index, numElements, actualNumElements, bytecodeOffset());
4608 if (!boundsCheck) {
4609 return false;
4611 curBlock_->add(boundsCheck);
4613 return fillArray(lineOrBytecode, arrayType, arrayObject, index, numElements,
4614 val);
4617 /*********************************************** WasmGC: other helpers ***/
4619 // Generate MIR that causes a trap of kind `trapKind` if `arg` is zero.
4620 // Currently `arg` may only be a MIRType::Int32, but that requirement could
4621 // be relaxed if needed in future.
4622 [[nodiscard]] bool trapIfZero(wasm::Trap trapKind, MDefinition* arg) {
4623 MOZ_ASSERT(arg->type() == MIRType::Int32);
4625 MBasicBlock* trapBlock = nullptr;
4626 if (!newBlock(curBlock_, &trapBlock)) {
4627 return false;
4630 auto* trap = MWasmTrap::New(alloc(), trapKind, bytecodeOffset());
4631 if (!trap) {
4632 return false;
4634 trapBlock->end(trap);
4636 MBasicBlock* joinBlock = nullptr;
4637 if (!newBlock(curBlock_, &joinBlock)) {
4638 return false;
4641 auto* test = MTest::New(alloc(), arg, joinBlock, trapBlock);
4642 if (!test) {
4643 return false;
4645 curBlock_->end(test);
4646 curBlock_ = joinBlock;
4647 return true;
4650 [[nodiscard]] MDefinition* isRefSubtypeOf(MDefinition* ref,
4651 RefType sourceType,
4652 RefType destType) {
4653 MInstruction* isSubTypeOf = nullptr;
4654 if (destType.isTypeRef()) {
4655 uint32_t typeIndex = moduleEnv_.types->indexOf(*destType.typeDef());
4656 MDefinition* superSTV = loadSuperTypeVector(typeIndex);
4657 isSubTypeOf = MWasmRefIsSubtypeOfConcrete::New(alloc(), ref, superSTV,
4658 sourceType, destType);
4659 } else {
4660 isSubTypeOf =
4661 MWasmRefIsSubtypeOfAbstract::New(alloc(), ref, sourceType, destType);
4663 MOZ_ASSERT(isSubTypeOf);
4665 curBlock_->add(isSubTypeOf);
4666 return isSubTypeOf;
4669 // Generate MIR that attempts to downcast `ref` to `castToTypeDef`. If the
4670 // downcast fails, we trap. If it succeeds, then `ref` can be assumed to
4671 // have a type that is a subtype of (or the same as) `castToTypeDef` after
4672 // this point.
4673 [[nodiscard]] bool refCast(MDefinition* ref, RefType sourceType,
4674 RefType destType) {
4675 MDefinition* success = isRefSubtypeOf(ref, sourceType, destType);
4676 if (!success) {
4677 return false;
4680 // Trap if `success` is zero. If it's nonzero, we have established that
4681 // `ref <: castToTypeDef`.
4682 return trapIfZero(wasm::Trap::BadCast, success);
4685 // Generate MIR that computes a boolean value indicating whether or not it
4686 // is possible to downcast `ref` to `destType`.
4687 [[nodiscard]] MDefinition* refTest(MDefinition* ref, RefType sourceType,
4688 RefType destType) {
4689 return isRefSubtypeOf(ref, sourceType, destType);
4692 // Generates MIR for br_on_cast and br_on_cast_fail.
4693 [[nodiscard]] bool brOnCastCommon(bool onSuccess, uint32_t labelRelativeDepth,
4694 RefType sourceType, RefType destType,
4695 const ResultType& labelType,
4696 const DefVector& values) {
4697 if (inDeadCode()) {
4698 return true;
4701 MBasicBlock* fallthroughBlock = nullptr;
4702 if (!newBlock(curBlock_, &fallthroughBlock)) {
4703 return false;
4706 // `values` are the values in the top block-value on the stack. Since the
4707 // argument to `br_on_cast{_fail}` is at the top of the stack, it is the
4708 // last element in `values`.
4710 // For both br_on_cast and br_on_cast_fail, the OpIter validation routines
4711 // ensure that `values` is non-empty (by rejecting the case
4712 // `labelType->length() < 1`) and that the last value in `values` is
4713 // reftyped.
4714 MOZ_RELEASE_ASSERT(values.length() > 0);
4715 MDefinition* ref = values.back();
4716 MOZ_ASSERT(ref->type() == MIRType::WasmAnyRef);
4718 MDefinition* success = isRefSubtypeOf(ref, sourceType, destType);
4719 if (!success) {
4720 return false;
4723 MTest* test;
4724 if (onSuccess) {
4725 test = MTest::New(alloc(), success, nullptr, fallthroughBlock);
4726 if (!test || !addControlFlowPatch(test, labelRelativeDepth,
4727 MTest::TrueBranchIndex)) {
4728 return false;
4730 } else {
4731 test = MTest::New(alloc(), success, fallthroughBlock, nullptr);
4732 if (!test || !addControlFlowPatch(test, labelRelativeDepth,
4733 MTest::FalseBranchIndex)) {
4734 return false;
4738 if (!pushDefs(values)) {
4739 return false;
4742 curBlock_->end(test);
4743 curBlock_ = fallthroughBlock;
4744 return true;
4747 [[nodiscard]] bool brOnNonStruct(const DefVector& values) {
4748 if (inDeadCode()) {
4749 return true;
4752 MBasicBlock* fallthroughBlock = nullptr;
4753 if (!newBlock(curBlock_, &fallthroughBlock)) {
4754 return false;
4757 MOZ_ASSERT(values.length() > 0);
4758 MOZ_ASSERT(values.back()->type() == MIRType::WasmAnyRef);
4760 MGoto* jump = MGoto::New(alloc(), fallthroughBlock);
4761 if (!jump) {
4762 return false;
4764 if (!pushDefs(values)) {
4765 return false;
4768 curBlock_->end(jump);
4769 curBlock_ = fallthroughBlock;
4770 return true;
4773 /************************************************************ DECODING ***/
4775 // AsmJS adds a line number to `callSiteLineNums` for certain operations that
4776 // are represented by a JS call, such as math builtins. We use these line
4777 // numbers when calling builtins. This method will read from
4778 // `callSiteLineNums` when we are using AsmJS, or else return the current
4779 // bytecode offset.
4781 // This method MUST be called from opcodes that AsmJS will emit a call site
4782 // line number for, or else the arrays will get out of sync. Other opcodes
4783 // must use `readBytecodeOffset` below.
4784 uint32_t readCallSiteLineOrBytecode() {
4785 if (!func_.callSiteLineNums.empty()) {
4786 return func_.callSiteLineNums[lastReadCallSite_++];
4788 return iter_.lastOpcodeOffset();
4791 // Return the current bytecode offset.
4792 uint32_t readBytecodeOffset() { return iter_.lastOpcodeOffset(); }
4794 TrapSiteInfo getTrapSiteInfo() {
4795 return TrapSiteInfo(wasm::BytecodeOffset(readBytecodeOffset()));
4798 #if DEBUG
4799 bool done() const { return iter_.done(); }
4800 #endif
4802 /*************************************************************************/
4803 private:
4804 [[nodiscard]] bool newBlock(MBasicBlock* pred, MBasicBlock** block,
4805 MBasicBlock::Kind kind = MBasicBlock::NORMAL) {
4806 *block = MBasicBlock::New(mirGraph(), info(), pred, kind);
4807 if (!*block) {
4808 return false;
4810 mirGraph().addBlock(*block);
4811 (*block)->setLoopDepth(loopDepth_);
4812 return true;
4815 [[nodiscard]] bool goToNewBlock(MBasicBlock* pred, MBasicBlock** block) {
4816 if (!newBlock(pred, block)) {
4817 return false;
4819 pred->end(MGoto::New(alloc(), *block));
4820 return true;
4823 [[nodiscard]] bool goToExistingBlock(MBasicBlock* prev, MBasicBlock* next) {
4824 MOZ_ASSERT(prev);
4825 MOZ_ASSERT(next);
4826 prev->end(MGoto::New(alloc(), next));
4827 return next->addPredecessor(alloc(), prev);
4830 [[nodiscard]] bool bindBranches(uint32_t absolute, DefVector* defs) {
4831 if (absolute >= blockPatches_.length() || blockPatches_[absolute].empty()) {
4832 return inDeadCode() || popPushedDefs(defs);
4835 ControlFlowPatchVector& patches = blockPatches_[absolute];
4836 MControlInstruction* ins = patches[0].ins;
4837 MBasicBlock* pred = ins->block();
4839 MBasicBlock* join = nullptr;
4840 if (!newBlock(pred, &join)) {
4841 return false;
4844 pred->mark();
4845 ins->replaceSuccessor(patches[0].index, join);
4847 for (size_t i = 1; i < patches.length(); i++) {
4848 ins = patches[i].ins;
4850 pred = ins->block();
4851 if (!pred->isMarked()) {
4852 if (!join->addPredecessor(alloc(), pred)) {
4853 return false;
4855 pred->mark();
4858 ins->replaceSuccessor(patches[i].index, join);
4861 MOZ_ASSERT_IF(curBlock_, !curBlock_->isMarked());
4862 for (uint32_t i = 0; i < join->numPredecessors(); i++) {
4863 join->getPredecessor(i)->unmark();
4866 if (curBlock_ && !goToExistingBlock(curBlock_, join)) {
4867 return false;
4870 curBlock_ = join;
4872 if (!popPushedDefs(defs)) {
4873 return false;
4876 patches.clear();
4877 return true;
4881 template <>
4882 MDefinition* FunctionCompiler::unary<MToFloat32>(MDefinition* op) {
4883 if (inDeadCode()) {
4884 return nullptr;
4886 auto* ins = MToFloat32::New(alloc(), op, mustPreserveNaN(op->type()));
4887 curBlock_->add(ins);
4888 return ins;
4891 template <>
4892 MDefinition* FunctionCompiler::unary<MWasmBuiltinTruncateToInt32>(
4893 MDefinition* op) {
4894 if (inDeadCode()) {
4895 return nullptr;
4897 auto* ins = MWasmBuiltinTruncateToInt32::New(alloc(), op, instancePointer_,
4898 bytecodeOffset());
4899 curBlock_->add(ins);
4900 return ins;
4903 template <>
4904 MDefinition* FunctionCompiler::unary<MNot>(MDefinition* op) {
4905 if (inDeadCode()) {
4906 return nullptr;
4908 auto* ins = MNot::NewInt32(alloc(), op);
4909 curBlock_->add(ins);
4910 return ins;
4913 template <>
4914 MDefinition* FunctionCompiler::unary<MAbs>(MDefinition* op, MIRType type) {
4915 if (inDeadCode()) {
4916 return nullptr;
4918 auto* ins = MAbs::NewWasm(alloc(), op, type);
4919 curBlock_->add(ins);
4920 return ins;
4923 } // end anonymous namespace
4925 static bool EmitI32Const(FunctionCompiler& f) {
4926 int32_t i32;
4927 if (!f.iter().readI32Const(&i32)) {
4928 return false;
4931 f.iter().setResult(f.constantI32(i32));
4932 return true;
4935 static bool EmitI64Const(FunctionCompiler& f) {
4936 int64_t i64;
4937 if (!f.iter().readI64Const(&i64)) {
4938 return false;
4941 f.iter().setResult(f.constantI64(i64));
4942 return true;
4945 static bool EmitF32Const(FunctionCompiler& f) {
4946 float f32;
4947 if (!f.iter().readF32Const(&f32)) {
4948 return false;
4951 f.iter().setResult(f.constantF32(f32));
4952 return true;
4955 static bool EmitF64Const(FunctionCompiler& f) {
4956 double f64;
4957 if (!f.iter().readF64Const(&f64)) {
4958 return false;
4961 f.iter().setResult(f.constantF64(f64));
4962 return true;
4965 static bool EmitBlock(FunctionCompiler& f) {
4966 ResultType params;
4967 return f.iter().readBlock(&params) && f.startBlock();
4970 static bool EmitLoop(FunctionCompiler& f) {
4971 ResultType params;
4972 if (!f.iter().readLoop(&params)) {
4973 return false;
4976 MBasicBlock* loopHeader;
4977 if (!f.startLoop(&loopHeader, params.length())) {
4978 return false;
4981 f.addInterruptCheck();
4983 f.iter().controlItem().block = loopHeader;
4984 return true;
4987 static bool EmitIf(FunctionCompiler& f) {
4988 ResultType params;
4989 MDefinition* condition = nullptr;
4990 if (!f.iter().readIf(&params, &condition)) {
4991 return false;
4994 MBasicBlock* elseBlock;
4995 if (!f.branchAndStartThen(condition, &elseBlock)) {
4996 return false;
4999 f.iter().controlItem().block = elseBlock;
5000 return true;
5003 static bool EmitElse(FunctionCompiler& f) {
5004 ResultType paramType;
5005 ResultType resultType;
5006 DefVector thenValues;
5007 if (!f.iter().readElse(&paramType, &resultType, &thenValues)) {
5008 return false;
5011 if (!f.pushDefs(thenValues)) {
5012 return false;
5015 Control& control = f.iter().controlItem();
5016 return f.switchToElse(control.block, &control.block);
5019 static bool EmitEnd(FunctionCompiler& f) {
5020 LabelKind kind;
5021 ResultType type;
5022 DefVector preJoinDefs;
5023 DefVector resultsForEmptyElse;
5024 if (!f.iter().readEnd(&kind, &type, &preJoinDefs, &resultsForEmptyElse)) {
5025 return false;
5028 Control& control = f.iter().controlItem();
5029 MBasicBlock* block = control.block;
5031 if (!f.pushDefs(preJoinDefs)) {
5032 return false;
5035 // Every label case is responsible to pop the control item at the appropriate
5036 // time for the label case
5037 DefVector postJoinDefs;
5038 switch (kind) {
5039 case LabelKind::Body:
5040 MOZ_ASSERT(!control.tryControl);
5041 if (!f.emitBodyDelegateThrowPad(control)) {
5042 return false;
5044 if (!f.finishBlock(&postJoinDefs)) {
5045 return false;
5047 if (!f.returnValues(postJoinDefs)) {
5048 return false;
5050 f.iter().popEnd();
5051 MOZ_ASSERT(f.iter().controlStackEmpty());
5052 return f.iter().endFunction(f.iter().end());
5053 case LabelKind::Block:
5054 MOZ_ASSERT(!control.tryControl);
5055 if (!f.finishBlock(&postJoinDefs)) {
5056 return false;
5058 f.iter().popEnd();
5059 break;
5060 case LabelKind::Loop:
5061 MOZ_ASSERT(!control.tryControl);
5062 if (!f.closeLoop(block, &postJoinDefs)) {
5063 return false;
5065 f.iter().popEnd();
5066 break;
5067 case LabelKind::Then: {
5068 MOZ_ASSERT(!control.tryControl);
5069 // If we didn't see an Else, create a trivial else block so that we create
5070 // a diamond anyway, to preserve Ion invariants.
5071 if (!f.switchToElse(block, &block)) {
5072 return false;
5075 if (!f.pushDefs(resultsForEmptyElse)) {
5076 return false;
5079 if (!f.joinIfElse(block, &postJoinDefs)) {
5080 return false;
5082 f.iter().popEnd();
5083 break;
5085 case LabelKind::Else:
5086 MOZ_ASSERT(!control.tryControl);
5087 if (!f.joinIfElse(block, &postJoinDefs)) {
5088 return false;
5090 f.iter().popEnd();
5091 break;
5092 case LabelKind::Try:
5093 case LabelKind::Catch:
5094 case LabelKind::CatchAll:
5095 MOZ_ASSERT(control.tryControl);
5096 if (!f.finishTryCatch(kind, control, &postJoinDefs)) {
5097 return false;
5099 f.freeTryControl(std::move(control.tryControl));
5100 f.iter().popEnd();
5101 break;
5102 case LabelKind::TryTable:
5103 MOZ_ASSERT(control.tryControl);
5104 if (!f.finishTryTable(control, &postJoinDefs)) {
5105 return false;
5107 f.freeTryControl(std::move(control.tryControl));
5108 f.iter().popEnd();
5109 break;
5112 MOZ_ASSERT_IF(!f.inDeadCode(), postJoinDefs.length() == type.length());
5113 f.iter().setResults(postJoinDefs.length(), postJoinDefs);
5115 return true;
5118 static bool EmitBr(FunctionCompiler& f) {
5119 uint32_t relativeDepth;
5120 ResultType type;
5121 DefVector values;
5122 if (!f.iter().readBr(&relativeDepth, &type, &values)) {
5123 return false;
5126 return f.br(relativeDepth, values);
5129 static bool EmitBrIf(FunctionCompiler& f) {
5130 uint32_t relativeDepth;
5131 ResultType type;
5132 DefVector values;
5133 MDefinition* condition;
5134 if (!f.iter().readBrIf(&relativeDepth, &type, &values, &condition)) {
5135 return false;
5138 return f.brIf(relativeDepth, values, condition);
5141 static bool EmitBrTable(FunctionCompiler& f) {
5142 Uint32Vector depths;
5143 uint32_t defaultDepth;
5144 ResultType branchValueType;
5145 DefVector branchValues;
5146 MDefinition* index;
5147 if (!f.iter().readBrTable(&depths, &defaultDepth, &branchValueType,
5148 &branchValues, &index)) {
5149 return false;
5152 // If all the targets are the same, or there are no targets, we can just
5153 // use a goto. This is not just an optimization: MaybeFoldConditionBlock
5154 // assumes that tables have more than one successor.
5155 bool allSameDepth = true;
5156 for (uint32_t depth : depths) {
5157 if (depth != defaultDepth) {
5158 allSameDepth = false;
5159 break;
5163 if (allSameDepth) {
5164 return f.br(defaultDepth, branchValues);
5167 return f.brTable(index, defaultDepth, depths, branchValues);
5170 static bool EmitReturn(FunctionCompiler& f) {
5171 DefVector values;
5172 if (!f.iter().readReturn(&values)) {
5173 return false;
5176 return f.returnValues(values);
5179 static bool EmitUnreachable(FunctionCompiler& f) {
5180 if (!f.iter().readUnreachable()) {
5181 return false;
5184 f.unreachableTrap();
5185 return true;
5188 static bool EmitTry(FunctionCompiler& f) {
5189 ResultType params;
5190 if (!f.iter().readTry(&params)) {
5191 return false;
5194 return f.startTry();
5197 static bool EmitCatch(FunctionCompiler& f) {
5198 LabelKind kind;
5199 uint32_t tagIndex;
5200 ResultType paramType, resultType;
5201 DefVector tryValues;
5202 if (!f.iter().readCatch(&kind, &tagIndex, &paramType, &resultType,
5203 &tryValues)) {
5204 return false;
5207 // Pushing the results of the previous block, to properly join control flow
5208 // after the try and after each handler, as well as potential control flow
5209 // patches from other instrunctions. This is similar to what is done for
5210 // if-then-else control flow and for most other control control flow joins.
5211 if (!f.pushDefs(tryValues)) {
5212 return false;
5215 return f.switchToCatch(f.iter().controlItem(), kind, tagIndex);
5218 static bool EmitCatchAll(FunctionCompiler& f) {
5219 LabelKind kind;
5220 ResultType paramType, resultType;
5221 DefVector tryValues;
5222 if (!f.iter().readCatchAll(&kind, &paramType, &resultType, &tryValues)) {
5223 return false;
5226 // Pushing the results of the previous block, to properly join control flow
5227 // after the try and after each handler, as well as potential control flow
5228 // patches from other instrunctions.
5229 if (!f.pushDefs(tryValues)) {
5230 return false;
5233 return f.switchToCatch(f.iter().controlItem(), kind, CatchAllIndex);
5236 static bool EmitTryTable(FunctionCompiler& f) {
5237 ResultType params;
5238 TryTableCatchVector catches;
5239 if (!f.iter().readTryTable(&params, &catches)) {
5240 return false;
5243 return f.startTryTable(std::move(catches));
5246 static bool EmitDelegate(FunctionCompiler& f) {
5247 uint32_t relativeDepth;
5248 ResultType resultType;
5249 DefVector tryValues;
5250 if (!f.iter().readDelegate(&relativeDepth, &resultType, &tryValues)) {
5251 return false;
5254 Control& control = f.iter().controlItem();
5255 MBasicBlock* block = control.block;
5256 MOZ_ASSERT(control.tryControl);
5258 // Unless the entire try-delegate is dead code, delegate any pad-patches from
5259 // this try to the next try-block above relativeDepth.
5260 if (block) {
5261 ControlInstructionVector& delegatePadPatches =
5262 control.tryControl->landingPadPatches;
5263 if (!f.delegatePadPatches(delegatePadPatches, relativeDepth)) {
5264 return false;
5267 f.freeTryControl(std::move(control.tryControl));
5268 f.iter().popDelegate();
5270 // Push the results of the previous block, and join control flow with
5271 // potential control flow patches from other instrunctions in the try code.
5272 // This is similar to what is done for EmitEnd.
5273 if (!f.pushDefs(tryValues)) {
5274 return false;
5276 DefVector postJoinDefs;
5277 if (!f.finishBlock(&postJoinDefs)) {
5278 return false;
5280 MOZ_ASSERT_IF(!f.inDeadCode(), postJoinDefs.length() == resultType.length());
5281 f.iter().setResults(postJoinDefs.length(), postJoinDefs);
5283 return true;
5286 static bool EmitThrow(FunctionCompiler& f) {
5287 uint32_t tagIndex;
5288 DefVector argValues;
5289 if (!f.iter().readThrow(&tagIndex, &argValues)) {
5290 return false;
5293 return f.emitThrow(tagIndex, argValues);
5296 static bool EmitThrowRef(FunctionCompiler& f) {
5297 MDefinition* exnRef;
5298 if (!f.iter().readThrowRef(&exnRef)) {
5299 return false;
5302 return f.emitThrowRef(exnRef);
5305 static bool EmitRethrow(FunctionCompiler& f) {
5306 uint32_t relativeDepth;
5307 if (!f.iter().readRethrow(&relativeDepth)) {
5308 return false;
5311 return f.emitRethrow(relativeDepth);
5314 static bool EmitCallArgs(FunctionCompiler& f, const FuncType& funcType,
5315 const DefVector& args, CallCompileState* call) {
5316 for (size_t i = 0, n = funcType.args().length(); i < n; ++i) {
5317 if (!f.mirGen().ensureBallast()) {
5318 return false;
5320 if (!f.passArg(args[i], funcType.args()[i], call)) {
5321 return false;
5325 ResultType resultType = ResultType::Vector(funcType.results());
5326 if (!f.passStackResultAreaCallArg(resultType, call)) {
5327 return false;
5330 return f.finishCall(call);
5333 static bool EmitCall(FunctionCompiler& f, bool asmJSFuncDef) {
5334 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
5336 uint32_t funcIndex;
5337 DefVector args;
5338 if (asmJSFuncDef) {
5339 if (!f.iter().readOldCallDirect(f.moduleEnv().numFuncImports, &funcIndex,
5340 &args)) {
5341 return false;
5343 } else {
5344 if (!f.iter().readCall(&funcIndex, &args)) {
5345 return false;
5349 if (f.inDeadCode()) {
5350 return true;
5353 const FuncType& funcType = *f.moduleEnv().funcs[funcIndex].type;
5355 CallCompileState call;
5356 if (!EmitCallArgs(f, funcType, args, &call)) {
5357 return false;
5360 DefVector results;
5361 if (f.moduleEnv().funcIsImport(funcIndex)) {
5362 uint32_t instanceDataOffset =
5363 f.moduleEnv().offsetOfFuncImportInstanceData(funcIndex);
5364 if (!f.callImport(instanceDataOffset, lineOrBytecode, call, funcType,
5365 &results)) {
5366 return false;
5368 } else {
5369 if (!f.callDirect(funcType, funcIndex, lineOrBytecode, call, &results)) {
5370 return false;
5374 f.iter().setResults(results.length(), results);
5375 return true;
5378 static bool EmitCallIndirect(FunctionCompiler& f, bool oldStyle) {
5379 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
5381 uint32_t funcTypeIndex;
5382 uint32_t tableIndex;
5383 MDefinition* callee;
5384 DefVector args;
5385 if (oldStyle) {
5386 tableIndex = 0;
5387 if (!f.iter().readOldCallIndirect(&funcTypeIndex, &callee, &args)) {
5388 return false;
5390 } else {
5391 if (!f.iter().readCallIndirect(&funcTypeIndex, &tableIndex, &callee,
5392 &args)) {
5393 return false;
5397 if (f.inDeadCode()) {
5398 return true;
5401 const FuncType& funcType = (*f.moduleEnv().types)[funcTypeIndex].funcType();
5403 CallCompileState call;
5404 if (!EmitCallArgs(f, funcType, args, &call)) {
5405 return false;
5408 DefVector results;
5409 if (!f.callIndirect(funcTypeIndex, tableIndex, callee, lineOrBytecode, call,
5410 &results)) {
5411 return false;
5414 f.iter().setResults(results.length(), results);
5415 return true;
5418 #ifdef ENABLE_WASM_TAIL_CALLS
5419 static bool EmitReturnCall(FunctionCompiler& f) {
5420 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
5422 uint32_t funcIndex;
5423 DefVector args;
5424 if (!f.iter().readReturnCall(&funcIndex, &args)) {
5425 return false;
5428 if (f.inDeadCode()) {
5429 return true;
5432 const FuncType& funcType = *f.moduleEnv().funcs[funcIndex].type;
5434 CallCompileState call;
5435 f.markReturnCall(&call);
5436 if (!EmitCallArgs(f, funcType, args, &call)) {
5437 return false;
5440 DefVector results;
5441 if (f.moduleEnv().funcIsImport(funcIndex)) {
5442 uint32_t globalDataOffset =
5443 f.moduleEnv().offsetOfFuncImportInstanceData(funcIndex);
5444 if (!f.returnCallImport(globalDataOffset, lineOrBytecode, call, funcType,
5445 &results)) {
5446 return false;
5448 } else {
5449 if (!f.returnCallDirect(funcType, funcIndex, lineOrBytecode, call,
5450 &results)) {
5451 return false;
5454 return true;
5457 static bool EmitReturnCallIndirect(FunctionCompiler& f) {
5458 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
5460 uint32_t funcTypeIndex;
5461 uint32_t tableIndex;
5462 MDefinition* callee;
5463 DefVector args;
5464 if (!f.iter().readReturnCallIndirect(&funcTypeIndex, &tableIndex, &callee,
5465 &args)) {
5466 return false;
5469 if (f.inDeadCode()) {
5470 return true;
5473 const FuncType& funcType = (*f.moduleEnv().types)[funcTypeIndex].funcType();
5475 CallCompileState call;
5476 f.markReturnCall(&call);
5477 if (!EmitCallArgs(f, funcType, args, &call)) {
5478 return false;
5481 DefVector results;
5482 return f.returnCallIndirect(funcTypeIndex, tableIndex, callee, lineOrBytecode,
5483 call, &results);
5485 #endif
5487 #if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_FUNCTION_REFERENCES)
5488 static bool EmitReturnCallRef(FunctionCompiler& f) {
5489 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
5491 const FuncType* funcType;
5492 MDefinition* callee;
5493 DefVector args;
5495 if (!f.iter().readReturnCallRef(&funcType, &callee, &args)) {
5496 return false;
5499 if (f.inDeadCode()) {
5500 return true;
5503 CallCompileState call;
5504 f.markReturnCall(&call);
5505 if (!EmitCallArgs(f, *funcType, args, &call)) {
5506 return false;
5509 DefVector results;
5510 return f.returnCallRef(*funcType, callee, lineOrBytecode, call, &results);
5512 #endif
5514 static bool EmitGetLocal(FunctionCompiler& f) {
5515 uint32_t id;
5516 if (!f.iter().readGetLocal(f.locals(), &id)) {
5517 return false;
5520 f.iter().setResult(f.getLocalDef(id));
5521 return true;
5524 static bool EmitSetLocal(FunctionCompiler& f) {
5525 uint32_t id;
5526 MDefinition* value;
5527 if (!f.iter().readSetLocal(f.locals(), &id, &value)) {
5528 return false;
5531 f.assign(id, value);
5532 return true;
5535 static bool EmitTeeLocal(FunctionCompiler& f) {
5536 uint32_t id;
5537 MDefinition* value;
5538 if (!f.iter().readTeeLocal(f.locals(), &id, &value)) {
5539 return false;
5542 f.assign(id, value);
5543 return true;
5546 static bool EmitGetGlobal(FunctionCompiler& f) {
5547 uint32_t id;
5548 if (!f.iter().readGetGlobal(&id)) {
5549 return false;
5552 const GlobalDesc& global = f.moduleEnv().globals[id];
5553 if (!global.isConstant()) {
5554 f.iter().setResult(f.loadGlobalVar(global.offset(), !global.isMutable(),
5555 global.isIndirect(),
5556 global.type().toMIRType()));
5557 return true;
5560 LitVal value = global.constantValue();
5562 MDefinition* result;
5563 switch (value.type().kind()) {
5564 case ValType::I32:
5565 result = f.constantI32(int32_t(value.i32()));
5566 break;
5567 case ValType::I64:
5568 result = f.constantI64(int64_t(value.i64()));
5569 break;
5570 case ValType::F32:
5571 result = f.constantF32(value.f32());
5572 break;
5573 case ValType::F64:
5574 result = f.constantF64(value.f64());
5575 break;
5576 case ValType::V128:
5577 #ifdef ENABLE_WASM_SIMD
5578 result = f.constantV128(value.v128());
5579 break;
5580 #else
5581 return f.iter().fail("Ion has no SIMD support yet");
5582 #endif
5583 case ValType::Ref:
5584 MOZ_ASSERT(value.ref().isNull());
5585 result = f.constantNullRef();
5586 break;
5587 default:
5588 MOZ_CRASH("unexpected type in EmitGetGlobal");
5591 f.iter().setResult(result);
5592 return true;
5595 static bool EmitSetGlobal(FunctionCompiler& f) {
5596 uint32_t bytecodeOffset = f.readBytecodeOffset();
5598 uint32_t id;
5599 MDefinition* value;
5600 if (!f.iter().readSetGlobal(&id, &value)) {
5601 return false;
5604 const GlobalDesc& global = f.moduleEnv().globals[id];
5605 MOZ_ASSERT(global.isMutable());
5606 return f.storeGlobalVar(bytecodeOffset, global.offset(), global.isIndirect(),
5607 value);
5610 static bool EmitTeeGlobal(FunctionCompiler& f) {
5611 uint32_t bytecodeOffset = f.readBytecodeOffset();
5613 uint32_t id;
5614 MDefinition* value;
5615 if (!f.iter().readTeeGlobal(&id, &value)) {
5616 return false;
5619 const GlobalDesc& global = f.moduleEnv().globals[id];
5620 MOZ_ASSERT(global.isMutable());
5622 return f.storeGlobalVar(bytecodeOffset, global.offset(), global.isIndirect(),
5623 value);
5626 template <typename MIRClass>
5627 static bool EmitUnary(FunctionCompiler& f, ValType operandType) {
5628 MDefinition* input;
5629 if (!f.iter().readUnary(operandType, &input)) {
5630 return false;
5633 f.iter().setResult(f.unary<MIRClass>(input));
5634 return true;
5637 template <typename MIRClass>
5638 static bool EmitConversion(FunctionCompiler& f, ValType operandType,
5639 ValType resultType) {
5640 MDefinition* input;
5641 if (!f.iter().readConversion(operandType, resultType, &input)) {
5642 return false;
5645 f.iter().setResult(f.unary<MIRClass>(input));
5646 return true;
5649 template <typename MIRClass>
5650 static bool EmitUnaryWithType(FunctionCompiler& f, ValType operandType,
5651 MIRType mirType) {
5652 MDefinition* input;
5653 if (!f.iter().readUnary(operandType, &input)) {
5654 return false;
5657 f.iter().setResult(f.unary<MIRClass>(input, mirType));
5658 return true;
5661 template <typename MIRClass>
5662 static bool EmitConversionWithType(FunctionCompiler& f, ValType operandType,
5663 ValType resultType, MIRType mirType) {
5664 MDefinition* input;
5665 if (!f.iter().readConversion(operandType, resultType, &input)) {
5666 return false;
5669 f.iter().setResult(f.unary<MIRClass>(input, mirType));
5670 return true;
5673 static bool EmitTruncate(FunctionCompiler& f, ValType operandType,
5674 ValType resultType, bool isUnsigned,
5675 bool isSaturating) {
5676 MDefinition* input = nullptr;
5677 if (!f.iter().readConversion(operandType, resultType, &input)) {
5678 return false;
5681 TruncFlags flags = 0;
5682 if (isUnsigned) {
5683 flags |= TRUNC_UNSIGNED;
5685 if (isSaturating) {
5686 flags |= TRUNC_SATURATING;
5688 if (resultType == ValType::I32) {
5689 if (f.moduleEnv().isAsmJS()) {
5690 if (input && (input->type() == MIRType::Double ||
5691 input->type() == MIRType::Float32)) {
5692 f.iter().setResult(f.unary<MWasmBuiltinTruncateToInt32>(input));
5693 } else {
5694 f.iter().setResult(f.unary<MTruncateToInt32>(input));
5696 } else {
5697 f.iter().setResult(f.truncate<MWasmTruncateToInt32>(input, flags));
5699 } else {
5700 MOZ_ASSERT(resultType == ValType::I64);
5701 MOZ_ASSERT(!f.moduleEnv().isAsmJS());
5702 #if defined(JS_CODEGEN_ARM)
5703 f.iter().setResult(f.truncateWithInstance(input, flags));
5704 #else
5705 f.iter().setResult(f.truncate<MWasmTruncateToInt64>(input, flags));
5706 #endif
5708 return true;
5711 static bool EmitSignExtend(FunctionCompiler& f, uint32_t srcSize,
5712 uint32_t targetSize) {
5713 MDefinition* input;
5714 ValType type = targetSize == 4 ? ValType::I32 : ValType::I64;
5715 if (!f.iter().readConversion(type, type, &input)) {
5716 return false;
5719 f.iter().setResult(f.signExtend(input, srcSize, targetSize));
5720 return true;
5723 static bool EmitExtendI32(FunctionCompiler& f, bool isUnsigned) {
5724 MDefinition* input;
5725 if (!f.iter().readConversion(ValType::I32, ValType::I64, &input)) {
5726 return false;
5729 f.iter().setResult(f.extendI32(input, isUnsigned));
5730 return true;
5733 static bool EmitConvertI64ToFloatingPoint(FunctionCompiler& f,
5734 ValType resultType, MIRType mirType,
5735 bool isUnsigned) {
5736 MDefinition* input;
5737 if (!f.iter().readConversion(ValType::I64, resultType, &input)) {
5738 return false;
5741 f.iter().setResult(f.convertI64ToFloatingPoint(input, mirType, isUnsigned));
5742 return true;
5745 static bool EmitReinterpret(FunctionCompiler& f, ValType resultType,
5746 ValType operandType, MIRType mirType) {
5747 MDefinition* input;
5748 if (!f.iter().readConversion(operandType, resultType, &input)) {
5749 return false;
5752 f.iter().setResult(f.unary<MWasmReinterpret>(input, mirType));
5753 return true;
5756 static bool EmitAdd(FunctionCompiler& f, ValType type, MIRType mirType) {
5757 MDefinition* lhs;
5758 MDefinition* rhs;
5759 if (!f.iter().readBinary(type, &lhs, &rhs)) {
5760 return false;
5763 f.iter().setResult(f.add(lhs, rhs, mirType));
5764 return true;
5767 static bool EmitSub(FunctionCompiler& f, ValType type, MIRType mirType) {
5768 MDefinition* lhs;
5769 MDefinition* rhs;
5770 if (!f.iter().readBinary(type, &lhs, &rhs)) {
5771 return false;
5774 f.iter().setResult(f.sub(lhs, rhs, mirType));
5775 return true;
5778 static bool EmitRotate(FunctionCompiler& f, ValType type, bool isLeftRotation) {
5779 MDefinition* lhs;
5780 MDefinition* rhs;
5781 if (!f.iter().readBinary(type, &lhs, &rhs)) {
5782 return false;
5785 MDefinition* result = f.rotate(lhs, rhs, type.toMIRType(), isLeftRotation);
5786 f.iter().setResult(result);
5787 return true;
5790 static bool EmitBitNot(FunctionCompiler& f, ValType operandType) {
5791 MDefinition* input;
5792 if (!f.iter().readUnary(operandType, &input)) {
5793 return false;
5796 f.iter().setResult(f.bitnot(input));
5797 return true;
5800 static bool EmitBitwiseAndOrXor(FunctionCompiler& f, ValType operandType,
5801 MIRType mirType,
5802 MWasmBinaryBitwise::SubOpcode subOpc) {
5803 MDefinition* lhs;
5804 MDefinition* rhs;
5805 if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
5806 return false;
5809 f.iter().setResult(f.binary<MWasmBinaryBitwise>(lhs, rhs, mirType, subOpc));
5810 return true;
5813 template <typename MIRClass>
5814 static bool EmitShift(FunctionCompiler& f, ValType operandType,
5815 MIRType mirType) {
5816 MDefinition* lhs;
5817 MDefinition* rhs;
5818 if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
5819 return false;
5822 f.iter().setResult(f.binary<MIRClass>(lhs, rhs, mirType));
5823 return true;
5826 static bool EmitUrsh(FunctionCompiler& f, ValType operandType,
5827 MIRType mirType) {
5828 MDefinition* lhs;
5829 MDefinition* rhs;
5830 if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
5831 return false;
5834 f.iter().setResult(f.ursh(lhs, rhs, mirType));
5835 return true;
5838 static bool EmitMul(FunctionCompiler& f, ValType operandType, MIRType mirType) {
5839 MDefinition* lhs;
5840 MDefinition* rhs;
5841 if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
5842 return false;
5845 f.iter().setResult(
5846 f.mul(lhs, rhs, mirType,
5847 mirType == MIRType::Int32 ? MMul::Integer : MMul::Normal));
5848 return true;
5851 static bool EmitDiv(FunctionCompiler& f, ValType operandType, MIRType mirType,
5852 bool isUnsigned) {
5853 MDefinition* lhs;
5854 MDefinition* rhs;
5855 if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
5856 return false;
5859 f.iter().setResult(f.div(lhs, rhs, mirType, isUnsigned));
5860 return true;
5863 static bool EmitRem(FunctionCompiler& f, ValType operandType, MIRType mirType,
5864 bool isUnsigned) {
5865 MDefinition* lhs;
5866 MDefinition* rhs;
5867 if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
5868 return false;
5871 f.iter().setResult(f.mod(lhs, rhs, mirType, isUnsigned));
5872 return true;
5875 static bool EmitMinMax(FunctionCompiler& f, ValType operandType,
5876 MIRType mirType, bool isMax) {
5877 MDefinition* lhs;
5878 MDefinition* rhs;
5879 if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
5880 return false;
5883 f.iter().setResult(f.minMax(lhs, rhs, mirType, isMax));
5884 return true;
5887 static bool EmitCopySign(FunctionCompiler& f, ValType operandType) {
5888 MDefinition* lhs;
5889 MDefinition* rhs;
5890 if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
5891 return false;
5894 f.iter().setResult(f.binary<MCopySign>(lhs, rhs, operandType.toMIRType()));
5895 return true;
5898 static bool EmitComparison(FunctionCompiler& f, ValType operandType,
5899 JSOp compareOp, MCompare::CompareType compareType) {
5900 MDefinition* lhs;
5901 MDefinition* rhs;
5902 if (!f.iter().readComparison(operandType, &lhs, &rhs)) {
5903 return false;
5906 f.iter().setResult(f.compare(lhs, rhs, compareOp, compareType));
5907 return true;
5910 static bool EmitSelect(FunctionCompiler& f, bool typed) {
5911 StackType type;
5912 MDefinition* trueValue;
5913 MDefinition* falseValue;
5914 MDefinition* condition;
5915 if (!f.iter().readSelect(typed, &type, &trueValue, &falseValue, &condition)) {
5916 return false;
5919 f.iter().setResult(f.select(trueValue, falseValue, condition));
5920 return true;
5923 static bool EmitLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType) {
5924 LinearMemoryAddress<MDefinition*> addr;
5925 if (!f.iter().readLoad(type, Scalar::byteSize(viewType), &addr)) {
5926 return false;
5929 MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
5930 f.bytecodeIfNotAsmJS(),
5931 f.hugeMemoryEnabled(addr.memoryIndex));
5932 auto* ins = f.load(addr.base, &access, type);
5933 if (!f.inDeadCode() && !ins) {
5934 return false;
5937 f.iter().setResult(ins);
5938 return true;
5941 static bool EmitStore(FunctionCompiler& f, ValType resultType,
5942 Scalar::Type viewType) {
5943 LinearMemoryAddress<MDefinition*> addr;
5944 MDefinition* value;
5945 if (!f.iter().readStore(resultType, Scalar::byteSize(viewType), &addr,
5946 &value)) {
5947 return false;
5950 MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
5951 f.bytecodeIfNotAsmJS(),
5952 f.hugeMemoryEnabled(addr.memoryIndex));
5954 f.store(addr.base, &access, value);
5955 return true;
5958 static bool EmitTeeStore(FunctionCompiler& f, ValType resultType,
5959 Scalar::Type viewType) {
5960 LinearMemoryAddress<MDefinition*> addr;
5961 MDefinition* value;
5962 if (!f.iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr,
5963 &value)) {
5964 return false;
5967 MOZ_ASSERT(f.isMem32(addr.memoryIndex)); // asm.js opcode
5968 MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
5969 f.bytecodeIfNotAsmJS(),
5970 f.hugeMemoryEnabled(addr.memoryIndex));
5972 f.store(addr.base, &access, value);
5973 return true;
5976 static bool EmitTeeStoreWithCoercion(FunctionCompiler& f, ValType resultType,
5977 Scalar::Type viewType) {
5978 LinearMemoryAddress<MDefinition*> addr;
5979 MDefinition* value;
5980 if (!f.iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr,
5981 &value)) {
5982 return false;
5985 if (resultType == ValType::F32 && viewType == Scalar::Float64) {
5986 value = f.unary<MToDouble>(value);
5987 } else if (resultType == ValType::F64 && viewType == Scalar::Float32) {
5988 value = f.unary<MToFloat32>(value);
5989 } else {
5990 MOZ_CRASH("unexpected coerced store");
5993 MOZ_ASSERT(f.isMem32(addr.memoryIndex)); // asm.js opcode
5994 MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
5995 f.bytecodeIfNotAsmJS(),
5996 f.hugeMemoryEnabled(addr.memoryIndex));
5998 f.store(addr.base, &access, value);
5999 return true;
6002 static bool TryInlineUnaryBuiltin(FunctionCompiler& f, SymbolicAddress callee,
6003 MDefinition* input) {
6004 if (!input) {
6005 return false;
6008 MOZ_ASSERT(IsFloatingPointType(input->type()));
6010 RoundingMode mode;
6011 if (!IsRoundingFunction(callee, &mode)) {
6012 return false;
6015 if (!MNearbyInt::HasAssemblerSupport(mode)) {
6016 return false;
6019 f.iter().setResult(f.nearbyInt(input, mode));
6020 return true;
6023 static bool EmitUnaryMathBuiltinCall(FunctionCompiler& f,
6024 const SymbolicAddressSignature& callee) {
6025 MOZ_ASSERT(callee.numArgs == 1);
6027 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
6029 MDefinition* input;
6030 if (!f.iter().readUnary(ValType::fromMIRType(callee.argTypes[0]), &input)) {
6031 return false;
6034 if (TryInlineUnaryBuiltin(f, callee.identity, input)) {
6035 return true;
6038 CallCompileState call;
6039 if (!f.passArg(input, callee.argTypes[0], &call)) {
6040 return false;
6043 if (!f.finishCall(&call)) {
6044 return false;
6047 MDefinition* def;
6048 if (!f.builtinCall(callee, lineOrBytecode, call, &def)) {
6049 return false;
6052 f.iter().setResult(def);
6053 return true;
6056 static bool EmitBinaryMathBuiltinCall(FunctionCompiler& f,
6057 const SymbolicAddressSignature& callee) {
6058 MOZ_ASSERT(callee.numArgs == 2);
6059 MOZ_ASSERT(callee.argTypes[0] == callee.argTypes[1]);
6061 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
6063 CallCompileState call;
6064 MDefinition* lhs;
6065 MDefinition* rhs;
6066 // This call to readBinary assumes both operands have the same type.
6067 if (!f.iter().readBinary(ValType::fromMIRType(callee.argTypes[0]), &lhs,
6068 &rhs)) {
6069 return false;
6072 if (!f.passArg(lhs, callee.argTypes[0], &call)) {
6073 return false;
6076 if (!f.passArg(rhs, callee.argTypes[1], &call)) {
6077 return false;
6080 if (!f.finishCall(&call)) {
6081 return false;
6084 MDefinition* def;
6085 if (!f.builtinCall(callee, lineOrBytecode, call, &def)) {
6086 return false;
6089 f.iter().setResult(def);
6090 return true;
6093 static bool EmitMemoryGrow(FunctionCompiler& f) {
6094 uint32_t bytecodeOffset = f.readBytecodeOffset();
6096 MDefinition* delta;
6097 uint32_t memoryIndex;
6098 if (!f.iter().readMemoryGrow(&memoryIndex, &delta)) {
6099 return false;
6102 if (f.inDeadCode()) {
6103 return true;
6106 MDefinition* memoryIndexValue = f.constantI32(int32_t(memoryIndex));
6107 if (!memoryIndexValue) {
6108 return false;
6111 const SymbolicAddressSignature& callee =
6112 f.isMem32(memoryIndex) ? SASigMemoryGrowM32 : SASigMemoryGrowM64;
6114 MDefinition* ret;
6115 if (!f.emitInstanceCall2(bytecodeOffset, callee, delta, memoryIndexValue,
6116 &ret)) {
6117 return false;
6120 f.iter().setResult(ret);
6121 return true;
6124 static bool EmitMemorySize(FunctionCompiler& f) {
6125 uint32_t bytecodeOffset = f.readBytecodeOffset();
6127 uint32_t memoryIndex;
6128 if (!f.iter().readMemorySize(&memoryIndex)) {
6129 return false;
6132 if (f.inDeadCode()) {
6133 return true;
6136 MDefinition* memoryIndexValue = f.constantI32(int32_t(memoryIndex));
6137 if (!memoryIndexValue) {
6138 return false;
6141 const SymbolicAddressSignature& callee =
6142 f.isMem32(memoryIndex) ? SASigMemorySizeM32 : SASigMemorySizeM64;
6144 MDefinition* ret;
6145 if (!f.emitInstanceCall1(bytecodeOffset, callee, memoryIndexValue, &ret)) {
6146 return false;
6149 f.iter().setResult(ret);
6150 return true;
6153 static bool EmitAtomicCmpXchg(FunctionCompiler& f, ValType type,
6154 Scalar::Type viewType) {
6155 LinearMemoryAddress<MDefinition*> addr;
6156 MDefinition* oldValue;
6157 MDefinition* newValue;
6158 if (!f.iter().readAtomicCmpXchg(&addr, type, byteSize(viewType), &oldValue,
6159 &newValue)) {
6160 return false;
6163 MemoryAccessDesc access(
6164 addr.memoryIndex, viewType, addr.align, addr.offset, f.bytecodeOffset(),
6165 f.hugeMemoryEnabled(addr.memoryIndex), Synchronization::Full());
6166 auto* ins =
6167 f.atomicCompareExchangeHeap(addr.base, &access, type, oldValue, newValue);
6168 if (!f.inDeadCode() && !ins) {
6169 return false;
6172 f.iter().setResult(ins);
6173 return true;
6176 static bool EmitAtomicLoad(FunctionCompiler& f, ValType type,
6177 Scalar::Type viewType) {
6178 LinearMemoryAddress<MDefinition*> addr;
6179 if (!f.iter().readAtomicLoad(&addr, type, byteSize(viewType))) {
6180 return false;
6183 MemoryAccessDesc access(
6184 addr.memoryIndex, viewType, addr.align, addr.offset, f.bytecodeOffset(),
6185 f.hugeMemoryEnabled(addr.memoryIndex), Synchronization::Load());
6186 auto* ins = f.load(addr.base, &access, type);
6187 if (!f.inDeadCode() && !ins) {
6188 return false;
6191 f.iter().setResult(ins);
6192 return true;
6195 static bool EmitAtomicRMW(FunctionCompiler& f, ValType type,
6196 Scalar::Type viewType, jit::AtomicOp op) {
6197 LinearMemoryAddress<MDefinition*> addr;
6198 MDefinition* value;
6199 if (!f.iter().readAtomicRMW(&addr, type, byteSize(viewType), &value)) {
6200 return false;
6203 MemoryAccessDesc access(
6204 addr.memoryIndex, viewType, addr.align, addr.offset, f.bytecodeOffset(),
6205 f.hugeMemoryEnabled(addr.memoryIndex), Synchronization::Full());
6206 auto* ins = f.atomicBinopHeap(op, addr.base, &access, type, value);
6207 if (!f.inDeadCode() && !ins) {
6208 return false;
6211 f.iter().setResult(ins);
6212 return true;
6215 static bool EmitAtomicStore(FunctionCompiler& f, ValType type,
6216 Scalar::Type viewType) {
6217 LinearMemoryAddress<MDefinition*> addr;
6218 MDefinition* value;
6219 if (!f.iter().readAtomicStore(&addr, type, byteSize(viewType), &value)) {
6220 return false;
6223 MemoryAccessDesc access(
6224 addr.memoryIndex, viewType, addr.align, addr.offset, f.bytecodeOffset(),
6225 f.hugeMemoryEnabled(addr.memoryIndex), Synchronization::Store());
6226 f.store(addr.base, &access, value);
6227 return true;
6230 static bool EmitWait(FunctionCompiler& f, ValType type, uint32_t byteSize) {
6231 MOZ_ASSERT(type == ValType::I32 || type == ValType::I64);
6232 MOZ_ASSERT(type.size() == byteSize);
6234 uint32_t bytecodeOffset = f.readBytecodeOffset();
6236 LinearMemoryAddress<MDefinition*> addr;
6237 MDefinition* expected;
6238 MDefinition* timeout;
6239 if (!f.iter().readWait(&addr, type, byteSize, &expected, &timeout)) {
6240 return false;
6243 if (f.inDeadCode()) {
6244 return true;
6247 MemoryAccessDesc access(addr.memoryIndex,
6248 type == ValType::I32 ? Scalar::Int32 : Scalar::Int64,
6249 addr.align, addr.offset, f.bytecodeOffset(),
6250 f.hugeMemoryEnabled(addr.memoryIndex));
6251 MDefinition* ptr = f.computeEffectiveAddress(addr.base, &access);
6252 if (!ptr) {
6253 return false;
6256 MDefinition* memoryIndex = f.constantI32(int32_t(addr.memoryIndex));
6257 if (!memoryIndex) {
6258 return false;
6261 const SymbolicAddressSignature& callee =
6262 f.isMem32(addr.memoryIndex)
6263 ? (type == ValType::I32 ? SASigWaitI32M32 : SASigWaitI64M32)
6264 : (type == ValType::I32 ? SASigWaitI32M64 : SASigWaitI64M64);
6266 MDefinition* ret;
6267 if (!f.emitInstanceCall4(bytecodeOffset, callee, ptr, expected, timeout,
6268 memoryIndex, &ret)) {
6269 return false;
6272 f.iter().setResult(ret);
6273 return true;
6276 static bool EmitFence(FunctionCompiler& f) {
6277 if (!f.iter().readFence()) {
6278 return false;
6281 f.fence();
6282 return true;
6285 static bool EmitWake(FunctionCompiler& f) {
6286 uint32_t bytecodeOffset = f.readBytecodeOffset();
6288 LinearMemoryAddress<MDefinition*> addr;
6289 MDefinition* count;
6290 if (!f.iter().readWake(&addr, &count)) {
6291 return false;
6294 if (f.inDeadCode()) {
6295 return true;
6298 MemoryAccessDesc access(addr.memoryIndex, Scalar::Int32, addr.align,
6299 addr.offset, f.bytecodeOffset(),
6300 f.hugeMemoryEnabled(addr.memoryIndex));
6301 MDefinition* ptr = f.computeEffectiveAddress(addr.base, &access);
6302 if (!ptr) {
6303 return false;
6306 MDefinition* memoryIndex = f.constantI32(int32_t(addr.memoryIndex));
6307 if (!memoryIndex) {
6308 return false;
6311 const SymbolicAddressSignature& callee =
6312 f.isMem32(addr.memoryIndex) ? SASigWakeM32 : SASigWakeM64;
6314 MDefinition* ret;
6315 if (!f.emitInstanceCall3(bytecodeOffset, callee, ptr, count, memoryIndex,
6316 &ret)) {
6317 return false;
6320 f.iter().setResult(ret);
6321 return true;
6324 static bool EmitAtomicXchg(FunctionCompiler& f, ValType type,
6325 Scalar::Type viewType) {
6326 LinearMemoryAddress<MDefinition*> addr;
6327 MDefinition* value;
6328 if (!f.iter().readAtomicRMW(&addr, type, byteSize(viewType), &value)) {
6329 return false;
6332 MemoryAccessDesc access(
6333 addr.memoryIndex, viewType, addr.align, addr.offset, f.bytecodeOffset(),
6334 f.hugeMemoryEnabled(addr.memoryIndex), Synchronization::Full());
6335 MDefinition* ins = f.atomicExchangeHeap(addr.base, &access, type, value);
6336 if (!f.inDeadCode() && !ins) {
6337 return false;
6340 f.iter().setResult(ins);
6341 return true;
6344 static bool EmitMemCopyCall(FunctionCompiler& f, uint32_t dstMemIndex,
6345 uint32_t srcMemIndex, MDefinition* dst,
6346 MDefinition* src, MDefinition* len) {
6347 uint32_t bytecodeOffset = f.readBytecodeOffset();
6349 if (dstMemIndex == srcMemIndex) {
6350 const SymbolicAddressSignature& callee =
6351 (f.moduleEnv().usesSharedMemory(dstMemIndex)
6352 ? (f.isMem32(dstMemIndex) ? SASigMemCopySharedM32
6353 : SASigMemCopySharedM64)
6354 : (f.isMem32(dstMemIndex) ? SASigMemCopyM32 : SASigMemCopyM64));
6355 MDefinition* memoryBase = f.memoryBase(dstMemIndex);
6356 if (!memoryBase) {
6357 return false;
6359 return f.emitInstanceCall4(bytecodeOffset, callee, dst, src, len,
6360 memoryBase);
6363 IndexType dstIndexType = f.moduleEnv().memories[dstMemIndex].indexType();
6364 IndexType srcIndexType = f.moduleEnv().memories[srcMemIndex].indexType();
6366 if (dstIndexType == IndexType::I32) {
6367 dst = f.extendI32(dst, /*isUnsigned=*/true);
6368 if (!dst) {
6369 return false;
6372 if (srcIndexType == IndexType::I32) {
6373 src = f.extendI32(src, /*isUnsigned=*/true);
6374 if (!src) {
6375 return false;
6378 if (dstIndexType == IndexType::I32 || srcIndexType == IndexType::I32) {
6379 len = f.extendI32(len, /*isUnsigned=*/true);
6380 if (!len) {
6381 return false;
6385 MDefinition* dstMemIndexValue = f.constantI32(int32_t(dstMemIndex));
6386 if (!dstMemIndexValue) {
6387 return false;
6390 MDefinition* srcMemIndexValue = f.constantI32(int32_t(srcMemIndex));
6391 if (!srcMemIndexValue) {
6392 return false;
6395 return f.emitInstanceCall5(bytecodeOffset, SASigMemCopyAny, dst, src, len,
6396 dstMemIndexValue, srcMemIndexValue);
6399 static bool EmitMemCopyInline(FunctionCompiler& f, uint32_t memoryIndex,
6400 MDefinition* dst, MDefinition* src,
6401 uint32_t length) {
6402 MOZ_ASSERT(length != 0 && length <= MaxInlineMemoryCopyLength);
6404 // Compute the number of copies of each width we will need to do
6405 size_t remainder = length;
6406 #ifdef ENABLE_WASM_SIMD
6407 size_t numCopies16 = 0;
6408 if (MacroAssembler::SupportsFastUnalignedFPAccesses()) {
6409 numCopies16 = remainder / sizeof(V128);
6410 remainder %= sizeof(V128);
6412 #endif
6413 #ifdef JS_64BIT
6414 size_t numCopies8 = remainder / sizeof(uint64_t);
6415 remainder %= sizeof(uint64_t);
6416 #endif
6417 size_t numCopies4 = remainder / sizeof(uint32_t);
6418 remainder %= sizeof(uint32_t);
6419 size_t numCopies2 = remainder / sizeof(uint16_t);
6420 remainder %= sizeof(uint16_t);
6421 size_t numCopies1 = remainder;
6423 // Load all source bytes from low to high using the widest transfer width we
6424 // can for the system. We will trap without writing anything if any source
6425 // byte is out-of-bounds.
6426 size_t offset = 0;
6427 DefVector loadedValues;
6429 #ifdef ENABLE_WASM_SIMD
6430 for (uint32_t i = 0; i < numCopies16; i++) {
6431 MemoryAccessDesc access(memoryIndex, Scalar::Simd128, 1, offset,
6432 f.bytecodeOffset(),
6433 f.hugeMemoryEnabled(memoryIndex));
6434 auto* load = f.load(src, &access, ValType::V128);
6435 if (!load || !loadedValues.append(load)) {
6436 return false;
6439 offset += sizeof(V128);
6441 #endif
6443 #ifdef JS_64BIT
6444 for (uint32_t i = 0; i < numCopies8; i++) {
6445 MemoryAccessDesc access(memoryIndex, Scalar::Int64, 1, offset,
6446 f.bytecodeOffset(),
6447 f.hugeMemoryEnabled(memoryIndex));
6448 auto* load = f.load(src, &access, ValType::I64);
6449 if (!load || !loadedValues.append(load)) {
6450 return false;
6453 offset += sizeof(uint64_t);
6455 #endif
6457 for (uint32_t i = 0; i < numCopies4; i++) {
6458 MemoryAccessDesc access(memoryIndex, Scalar::Uint32, 1, offset,
6459 f.bytecodeOffset(),
6460 f.hugeMemoryEnabled(memoryIndex));
6461 auto* load = f.load(src, &access, ValType::I32);
6462 if (!load || !loadedValues.append(load)) {
6463 return false;
6466 offset += sizeof(uint32_t);
6469 if (numCopies2) {
6470 MemoryAccessDesc access(memoryIndex, Scalar::Uint16, 1, offset,
6471 f.bytecodeOffset(),
6472 f.hugeMemoryEnabled(memoryIndex));
6473 auto* load = f.load(src, &access, ValType::I32);
6474 if (!load || !loadedValues.append(load)) {
6475 return false;
6478 offset += sizeof(uint16_t);
6481 if (numCopies1) {
6482 MemoryAccessDesc access(memoryIndex, Scalar::Uint8, 1, offset,
6483 f.bytecodeOffset(),
6484 f.hugeMemoryEnabled(memoryIndex));
6485 auto* load = f.load(src, &access, ValType::I32);
6486 if (!load || !loadedValues.append(load)) {
6487 return false;
6491 // Store all source bytes to the destination from high to low. We will trap
6492 // without writing anything on the first store if any dest byte is
6493 // out-of-bounds.
6494 offset = length;
6496 if (numCopies1) {
6497 offset -= sizeof(uint8_t);
6499 MemoryAccessDesc access(memoryIndex, Scalar::Uint8, 1, offset,
6500 f.bytecodeOffset(),
6501 f.hugeMemoryEnabled(memoryIndex));
6502 auto* value = loadedValues.popCopy();
6503 f.store(dst, &access, value);
6506 if (numCopies2) {
6507 offset -= sizeof(uint16_t);
6509 MemoryAccessDesc access(memoryIndex, Scalar::Uint16, 1, offset,
6510 f.bytecodeOffset(),
6511 f.hugeMemoryEnabled(memoryIndex));
6512 auto* value = loadedValues.popCopy();
6513 f.store(dst, &access, value);
6516 for (uint32_t i = 0; i < numCopies4; i++) {
6517 offset -= sizeof(uint32_t);
6519 MemoryAccessDesc access(memoryIndex, Scalar::Uint32, 1, offset,
6520 f.bytecodeOffset(),
6521 f.hugeMemoryEnabled(memoryIndex));
6522 auto* value = loadedValues.popCopy();
6523 f.store(dst, &access, value);
6526 #ifdef JS_64BIT
6527 for (uint32_t i = 0; i < numCopies8; i++) {
6528 offset -= sizeof(uint64_t);
6530 MemoryAccessDesc access(memoryIndex, Scalar::Int64, 1, offset,
6531 f.bytecodeOffset(),
6532 f.hugeMemoryEnabled(memoryIndex));
6533 auto* value = loadedValues.popCopy();
6534 f.store(dst, &access, value);
6536 #endif
6538 #ifdef ENABLE_WASM_SIMD
6539 for (uint32_t i = 0; i < numCopies16; i++) {
6540 offset -= sizeof(V128);
6542 MemoryAccessDesc access(memoryIndex, Scalar::Simd128, 1, offset,
6543 f.bytecodeOffset(),
6544 f.hugeMemoryEnabled(memoryIndex));
6545 auto* value = loadedValues.popCopy();
6546 f.store(dst, &access, value);
6548 #endif
6550 return true;
6553 static bool EmitMemCopy(FunctionCompiler& f) {
6554 MDefinition *dst, *src, *len;
6555 uint32_t dstMemIndex;
6556 uint32_t srcMemIndex;
6557 if (!f.iter().readMemOrTableCopy(true, &dstMemIndex, &dst, &srcMemIndex, &src,
6558 &len)) {
6559 return false;
6562 if (f.inDeadCode()) {
6563 return true;
6566 if (dstMemIndex == srcMemIndex && len->isConstant()) {
6567 uint64_t length = f.isMem32(dstMemIndex) ? len->toConstant()->toInt32()
6568 : len->toConstant()->toInt64();
6569 static_assert(MaxInlineMemoryCopyLength <= UINT32_MAX);
6570 if (length != 0 && length <= MaxInlineMemoryCopyLength) {
6571 return EmitMemCopyInline(f, dstMemIndex, dst, src, uint32_t(length));
6575 return EmitMemCopyCall(f, dstMemIndex, srcMemIndex, dst, src, len);
6578 static bool EmitTableCopy(FunctionCompiler& f) {
6579 MDefinition *dst, *src, *len;
6580 uint32_t dstTableIndex;
6581 uint32_t srcTableIndex;
6582 if (!f.iter().readMemOrTableCopy(false, &dstTableIndex, &dst, &srcTableIndex,
6583 &src, &len)) {
6584 return false;
6587 if (f.inDeadCode()) {
6588 return true;
6591 uint32_t bytecodeOffset = f.readBytecodeOffset();
6592 MDefinition* dti = f.constantI32(int32_t(dstTableIndex));
6593 MDefinition* sti = f.constantI32(int32_t(srcTableIndex));
6595 return f.emitInstanceCall5(bytecodeOffset, SASigTableCopy, dst, src, len, dti,
6596 sti);
6599 static bool EmitDataOrElemDrop(FunctionCompiler& f, bool isData) {
6600 uint32_t segIndexVal = 0;
6601 if (!f.iter().readDataOrElemDrop(isData, &segIndexVal)) {
6602 return false;
6605 if (f.inDeadCode()) {
6606 return true;
6609 uint32_t bytecodeOffset = f.readBytecodeOffset();
6611 MDefinition* segIndex = f.constantI32(int32_t(segIndexVal));
6613 const SymbolicAddressSignature& callee =
6614 isData ? SASigDataDrop : SASigElemDrop;
6615 return f.emitInstanceCall1(bytecodeOffset, callee, segIndex);
6618 static bool EmitMemFillCall(FunctionCompiler& f, uint32_t memoryIndex,
6619 MDefinition* start, MDefinition* val,
6620 MDefinition* len) {
6621 MDefinition* memoryBase = f.memoryBase(memoryIndex);
6623 uint32_t bytecodeOffset = f.readBytecodeOffset();
6624 const SymbolicAddressSignature& callee =
6625 (f.moduleEnv().usesSharedMemory(memoryIndex)
6626 ? (f.isMem32(memoryIndex) ? SASigMemFillSharedM32
6627 : SASigMemFillSharedM64)
6628 : (f.isMem32(memoryIndex) ? SASigMemFillM32 : SASigMemFillM64));
6629 return f.emitInstanceCall4(bytecodeOffset, callee, start, val, len,
6630 memoryBase);
6633 static bool EmitMemFillInline(FunctionCompiler& f, uint32_t memoryIndex,
6634 MDefinition* start, MDefinition* val,
6635 uint32_t length) {
6636 MOZ_ASSERT(length != 0 && length <= MaxInlineMemoryFillLength);
6637 uint32_t value = val->toConstant()->toInt32();
6639 // Compute the number of copies of each width we will need to do
6640 size_t remainder = length;
6641 #ifdef ENABLE_WASM_SIMD
6642 size_t numCopies16 = 0;
6643 if (MacroAssembler::SupportsFastUnalignedFPAccesses()) {
6644 numCopies16 = remainder / sizeof(V128);
6645 remainder %= sizeof(V128);
6647 #endif
6648 #ifdef JS_64BIT
6649 size_t numCopies8 = remainder / sizeof(uint64_t);
6650 remainder %= sizeof(uint64_t);
6651 #endif
6652 size_t numCopies4 = remainder / sizeof(uint32_t);
6653 remainder %= sizeof(uint32_t);
6654 size_t numCopies2 = remainder / sizeof(uint16_t);
6655 remainder %= sizeof(uint16_t);
6656 size_t numCopies1 = remainder;
6658 // Generate splatted definitions for wider fills as needed
6659 #ifdef ENABLE_WASM_SIMD
6660 MDefinition* val16 = numCopies16 ? f.constantV128(V128(value)) : nullptr;
6661 #endif
6662 #ifdef JS_64BIT
6663 MDefinition* val8 =
6664 numCopies8 ? f.constantI64(int64_t(SplatByteToUInt<uint64_t>(value, 8)))
6665 : nullptr;
6666 #endif
6667 MDefinition* val4 =
6668 numCopies4 ? f.constantI32(int32_t(SplatByteToUInt<uint32_t>(value, 4)))
6669 : nullptr;
6670 MDefinition* val2 =
6671 numCopies2 ? f.constantI32(int32_t(SplatByteToUInt<uint32_t>(value, 2)))
6672 : nullptr;
6674 // Store the fill value to the destination from high to low. We will trap
6675 // without writing anything on the first store if any dest byte is
6676 // out-of-bounds.
6677 size_t offset = length;
6679 if (numCopies1) {
6680 offset -= sizeof(uint8_t);
6682 MemoryAccessDesc access(memoryIndex, Scalar::Uint8, 1, offset,
6683 f.bytecodeOffset(),
6684 f.hugeMemoryEnabled(memoryIndex));
6685 f.store(start, &access, val);
6688 if (numCopies2) {
6689 offset -= sizeof(uint16_t);
6691 MemoryAccessDesc access(memoryIndex, Scalar::Uint16, 1, offset,
6692 f.bytecodeOffset(),
6693 f.hugeMemoryEnabled(memoryIndex));
6694 f.store(start, &access, val2);
6697 for (uint32_t i = 0; i < numCopies4; i++) {
6698 offset -= sizeof(uint32_t);
6700 MemoryAccessDesc access(memoryIndex, Scalar::Uint32, 1, offset,
6701 f.bytecodeOffset(),
6702 f.hugeMemoryEnabled(memoryIndex));
6703 f.store(start, &access, val4);
6706 #ifdef JS_64BIT
6707 for (uint32_t i = 0; i < numCopies8; i++) {
6708 offset -= sizeof(uint64_t);
6710 MemoryAccessDesc access(memoryIndex, Scalar::Int64, 1, offset,
6711 f.bytecodeOffset(),
6712 f.hugeMemoryEnabled(memoryIndex));
6713 f.store(start, &access, val8);
6715 #endif
6717 #ifdef ENABLE_WASM_SIMD
6718 for (uint32_t i = 0; i < numCopies16; i++) {
6719 offset -= sizeof(V128);
6721 MemoryAccessDesc access(memoryIndex, Scalar::Simd128, 1, offset,
6722 f.bytecodeOffset(),
6723 f.hugeMemoryEnabled(memoryIndex));
6724 f.store(start, &access, val16);
6726 #endif
6728 return true;
6731 static bool EmitMemFill(FunctionCompiler& f) {
6732 uint32_t memoryIndex;
6733 MDefinition *start, *val, *len;
6734 if (!f.iter().readMemFill(&memoryIndex, &start, &val, &len)) {
6735 return false;
6738 if (f.inDeadCode()) {
6739 return true;
6742 if (len->isConstant() && val->isConstant()) {
6743 uint64_t length = f.isMem32(memoryIndex) ? len->toConstant()->toInt32()
6744 : len->toConstant()->toInt64();
6745 static_assert(MaxInlineMemoryFillLength <= UINT32_MAX);
6746 if (length != 0 && length <= MaxInlineMemoryFillLength) {
6747 return EmitMemFillInline(f, memoryIndex, start, val, uint32_t(length));
6751 return EmitMemFillCall(f, memoryIndex, start, val, len);
6754 static bool EmitMemOrTableInit(FunctionCompiler& f, bool isMem) {
6755 uint32_t segIndexVal = 0, dstMemOrTableIndex = 0;
6756 MDefinition *dstOff, *srcOff, *len;
6757 if (!f.iter().readMemOrTableInit(isMem, &segIndexVal, &dstMemOrTableIndex,
6758 &dstOff, &srcOff, &len)) {
6759 return false;
6762 if (f.inDeadCode()) {
6763 return true;
6766 uint32_t bytecodeOffset = f.readBytecodeOffset();
6767 const SymbolicAddressSignature& callee =
6768 isMem
6769 ? (f.isMem32(dstMemOrTableIndex) ? SASigMemInitM32 : SASigMemInitM64)
6770 : SASigTableInit;
6772 MDefinition* segIndex = f.constantI32(int32_t(segIndexVal));
6773 if (!segIndex) {
6774 return false;
6777 MDefinition* dti = f.constantI32(int32_t(dstMemOrTableIndex));
6778 if (!dti) {
6779 return false;
6782 return f.emitInstanceCall5(bytecodeOffset, callee, dstOff, srcOff, len,
6783 segIndex, dti);
6786 // Note, table.{get,grow,set} on table(funcref) are currently rejected by the
6787 // verifier.
6789 static bool EmitTableFill(FunctionCompiler& f) {
6790 uint32_t tableIndex;
6791 MDefinition *start, *val, *len;
6792 if (!f.iter().readTableFill(&tableIndex, &start, &val, &len)) {
6793 return false;
6796 if (f.inDeadCode()) {
6797 return true;
6800 uint32_t bytecodeOffset = f.readBytecodeOffset();
6802 MDefinition* tableIndexArg = f.constantI32(int32_t(tableIndex));
6803 if (!tableIndexArg) {
6804 return false;
6807 return f.emitInstanceCall4(bytecodeOffset, SASigTableFill, start, val, len,
6808 tableIndexArg);
6811 #if ENABLE_WASM_MEMORY_CONTROL
6812 static bool EmitMemDiscard(FunctionCompiler& f) {
6813 uint32_t memoryIndex;
6814 MDefinition *start, *len;
6815 if (!f.iter().readMemDiscard(&memoryIndex, &start, &len)) {
6816 return false;
6819 if (f.inDeadCode()) {
6820 return true;
6823 uint32_t bytecodeOffset = f.readBytecodeOffset();
6825 MDefinition* memoryBase = f.memoryBase(memoryIndex);
6826 bool isMem32 = f.isMem32(memoryIndex);
6828 const SymbolicAddressSignature& callee =
6829 (f.moduleEnv().usesSharedMemory(memoryIndex)
6830 ? (isMem32 ? SASigMemDiscardSharedM32 : SASigMemDiscardSharedM64)
6831 : (isMem32 ? SASigMemDiscardM32 : SASigMemDiscardM64));
6832 return f.emitInstanceCall3(bytecodeOffset, callee, start, len, memoryBase);
6834 #endif
6836 static bool EmitTableGet(FunctionCompiler& f) {
6837 uint32_t tableIndex;
6838 MDefinition* index;
6839 if (!f.iter().readTableGet(&tableIndex, &index)) {
6840 return false;
6843 if (f.inDeadCode()) {
6844 return true;
6847 const TableDesc& table = f.moduleEnv().tables[tableIndex];
6848 if (table.elemType.tableRepr() == TableRepr::Ref) {
6849 MDefinition* ret = f.tableGetAnyRef(tableIndex, index);
6850 if (!ret) {
6851 return false;
6853 f.iter().setResult(ret);
6854 return true;
6857 uint32_t bytecodeOffset = f.readBytecodeOffset();
6859 MDefinition* tableIndexArg = f.constantI32(int32_t(tableIndex));
6860 if (!tableIndexArg) {
6861 return false;
6864 // The return value here is either null, denoting an error, or a short-lived
6865 // pointer to a location containing a possibly-null ref.
6866 MDefinition* ret;
6867 if (!f.emitInstanceCall2(bytecodeOffset, SASigTableGet, index, tableIndexArg,
6868 &ret)) {
6869 return false;
6872 f.iter().setResult(ret);
6873 return true;
6876 static bool EmitTableGrow(FunctionCompiler& f) {
6877 uint32_t tableIndex;
6878 MDefinition* initValue;
6879 MDefinition* delta;
6880 if (!f.iter().readTableGrow(&tableIndex, &initValue, &delta)) {
6881 return false;
6884 if (f.inDeadCode()) {
6885 return true;
6888 uint32_t bytecodeOffset = f.readBytecodeOffset();
6890 MDefinition* tableIndexArg = f.constantI32(int32_t(tableIndex));
6891 if (!tableIndexArg) {
6892 return false;
6895 MDefinition* ret;
6896 if (!f.emitInstanceCall3(bytecodeOffset, SASigTableGrow, initValue, delta,
6897 tableIndexArg, &ret)) {
6898 return false;
6901 f.iter().setResult(ret);
6902 return true;
6905 static bool EmitTableSet(FunctionCompiler& f) {
6906 uint32_t tableIndex;
6907 MDefinition* index;
6908 MDefinition* value;
6909 if (!f.iter().readTableSet(&tableIndex, &index, &value)) {
6910 return false;
6913 if (f.inDeadCode()) {
6914 return true;
6917 uint32_t bytecodeOffset = f.readBytecodeOffset();
6919 const TableDesc& table = f.moduleEnv().tables[tableIndex];
6920 if (table.elemType.tableRepr() == TableRepr::Ref) {
6921 return f.tableSetAnyRef(tableIndex, index, value, bytecodeOffset);
6924 MDefinition* tableIndexArg = f.constantI32(int32_t(tableIndex));
6925 if (!tableIndexArg) {
6926 return false;
6929 return f.emitInstanceCall3(bytecodeOffset, SASigTableSet, index, value,
6930 tableIndexArg);
6933 static bool EmitTableSize(FunctionCompiler& f) {
6934 uint32_t tableIndex;
6935 if (!f.iter().readTableSize(&tableIndex)) {
6936 return false;
6939 if (f.inDeadCode()) {
6940 return true;
6943 MDefinition* length = f.loadTableLength(tableIndex);
6944 if (!length) {
6945 return false;
6948 f.iter().setResult(length);
6949 return true;
6952 static bool EmitRefFunc(FunctionCompiler& f) {
6953 uint32_t funcIndex;
6954 if (!f.iter().readRefFunc(&funcIndex)) {
6955 return false;
6958 if (f.inDeadCode()) {
6959 return true;
6962 uint32_t bytecodeOffset = f.readBytecodeOffset();
6964 MDefinition* funcIndexArg = f.constantI32(int32_t(funcIndex));
6965 if (!funcIndexArg) {
6966 return false;
6969 // The return value here is either null, denoting an error, or a short-lived
6970 // pointer to a location containing a possibly-null ref.
6971 MDefinition* ret;
6972 if (!f.emitInstanceCall1(bytecodeOffset, SASigRefFunc, funcIndexArg, &ret)) {
6973 return false;
6976 f.iter().setResult(ret);
6977 return true;
6980 static bool EmitRefNull(FunctionCompiler& f) {
6981 RefType type;
6982 if (!f.iter().readRefNull(&type)) {
6983 return false;
6986 if (f.inDeadCode()) {
6987 return true;
6990 MDefinition* nullVal = f.constantNullRef();
6991 if (!nullVal) {
6992 return false;
6994 f.iter().setResult(nullVal);
6995 return true;
6998 static bool EmitRefIsNull(FunctionCompiler& f) {
6999 MDefinition* input;
7000 if (!f.iter().readRefIsNull(&input)) {
7001 return false;
7004 if (f.inDeadCode()) {
7005 return true;
7008 MDefinition* nullVal = f.constantNullRef();
7009 if (!nullVal) {
7010 return false;
7012 f.iter().setResult(
7013 f.compare(input, nullVal, JSOp::Eq, MCompare::Compare_WasmAnyRef));
7014 return true;
7017 #ifdef ENABLE_WASM_SIMD
7018 static bool EmitConstSimd128(FunctionCompiler& f) {
7019 V128 v128;
7020 if (!f.iter().readV128Const(&v128)) {
7021 return false;
7024 f.iter().setResult(f.constantV128(v128));
7025 return true;
7028 static bool EmitBinarySimd128(FunctionCompiler& f, bool commutative,
7029 SimdOp op) {
7030 MDefinition* lhs;
7031 MDefinition* rhs;
7032 if (!f.iter().readBinary(ValType::V128, &lhs, &rhs)) {
7033 return false;
7036 f.iter().setResult(f.binarySimd128(lhs, rhs, commutative, op));
7037 return true;
7040 static bool EmitTernarySimd128(FunctionCompiler& f, wasm::SimdOp op) {
7041 MDefinition* v0;
7042 MDefinition* v1;
7043 MDefinition* v2;
7044 if (!f.iter().readTernary(ValType::V128, &v0, &v1, &v2)) {
7045 return false;
7048 f.iter().setResult(f.ternarySimd128(v0, v1, v2, op));
7049 return true;
7052 static bool EmitShiftSimd128(FunctionCompiler& f, SimdOp op) {
7053 MDefinition* lhs;
7054 MDefinition* rhs;
7055 if (!f.iter().readVectorShift(&lhs, &rhs)) {
7056 return false;
7059 f.iter().setResult(f.shiftSimd128(lhs, rhs, op));
7060 return true;
7063 static bool EmitSplatSimd128(FunctionCompiler& f, ValType inType, SimdOp op) {
7064 MDefinition* src;
7065 if (!f.iter().readConversion(inType, ValType::V128, &src)) {
7066 return false;
7069 f.iter().setResult(f.scalarToSimd128(src, op));
7070 return true;
7073 static bool EmitUnarySimd128(FunctionCompiler& f, SimdOp op) {
7074 MDefinition* src;
7075 if (!f.iter().readUnary(ValType::V128, &src)) {
7076 return false;
7079 f.iter().setResult(f.unarySimd128(src, op));
7080 return true;
7083 static bool EmitReduceSimd128(FunctionCompiler& f, SimdOp op) {
7084 MDefinition* src;
7085 if (!f.iter().readConversion(ValType::V128, ValType::I32, &src)) {
7086 return false;
7089 f.iter().setResult(f.reduceSimd128(src, op, ValType::I32));
7090 return true;
7093 static bool EmitExtractLaneSimd128(FunctionCompiler& f, ValType outType,
7094 uint32_t laneLimit, SimdOp op) {
7095 uint32_t laneIndex;
7096 MDefinition* src;
7097 if (!f.iter().readExtractLane(outType, laneLimit, &laneIndex, &src)) {
7098 return false;
7101 f.iter().setResult(f.reduceSimd128(src, op, outType, laneIndex));
7102 return true;
7105 static bool EmitReplaceLaneSimd128(FunctionCompiler& f, ValType laneType,
7106 uint32_t laneLimit, SimdOp op) {
7107 uint32_t laneIndex;
7108 MDefinition* lhs;
7109 MDefinition* rhs;
7110 if (!f.iter().readReplaceLane(laneType, laneLimit, &laneIndex, &lhs, &rhs)) {
7111 return false;
7114 f.iter().setResult(f.replaceLaneSimd128(lhs, rhs, laneIndex, op));
7115 return true;
7118 static bool EmitShuffleSimd128(FunctionCompiler& f) {
7119 MDefinition* v1;
7120 MDefinition* v2;
7121 V128 control;
7122 if (!f.iter().readVectorShuffle(&v1, &v2, &control)) {
7123 return false;
7126 f.iter().setResult(f.shuffleSimd128(v1, v2, control));
7127 return true;
7130 static bool EmitLoadSplatSimd128(FunctionCompiler& f, Scalar::Type viewType,
7131 wasm::SimdOp splatOp) {
7132 LinearMemoryAddress<MDefinition*> addr;
7133 if (!f.iter().readLoadSplat(Scalar::byteSize(viewType), &addr)) {
7134 return false;
7137 f.iter().setResult(f.loadSplatSimd128(viewType, addr, splatOp));
7138 return true;
7141 static bool EmitLoadExtendSimd128(FunctionCompiler& f, wasm::SimdOp op) {
7142 LinearMemoryAddress<MDefinition*> addr;
7143 if (!f.iter().readLoadExtend(&addr)) {
7144 return false;
7147 f.iter().setResult(f.loadExtendSimd128(addr, op));
7148 return true;
7151 static bool EmitLoadZeroSimd128(FunctionCompiler& f, Scalar::Type viewType,
7152 size_t numBytes) {
7153 LinearMemoryAddress<MDefinition*> addr;
7154 if (!f.iter().readLoadSplat(numBytes, &addr)) {
7155 return false;
7158 f.iter().setResult(f.loadZeroSimd128(viewType, numBytes, addr));
7159 return true;
7162 static bool EmitLoadLaneSimd128(FunctionCompiler& f, uint32_t laneSize) {
7163 uint32_t laneIndex;
7164 MDefinition* src;
7165 LinearMemoryAddress<MDefinition*> addr;
7166 if (!f.iter().readLoadLane(laneSize, &addr, &laneIndex, &src)) {
7167 return false;
7170 f.iter().setResult(f.loadLaneSimd128(laneSize, addr, laneIndex, src));
7171 return true;
7174 static bool EmitStoreLaneSimd128(FunctionCompiler& f, uint32_t laneSize) {
7175 uint32_t laneIndex;
7176 MDefinition* src;
7177 LinearMemoryAddress<MDefinition*> addr;
7178 if (!f.iter().readStoreLane(laneSize, &addr, &laneIndex, &src)) {
7179 return false;
7182 f.storeLaneSimd128(laneSize, addr, laneIndex, src);
7183 return true;
7186 #endif // ENABLE_WASM_SIMD
7188 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
7189 static bool EmitRefAsNonNull(FunctionCompiler& f) {
7190 MDefinition* ref;
7191 if (!f.iter().readRefAsNonNull(&ref)) {
7192 return false;
7195 return f.refAsNonNull(ref);
7198 static bool EmitBrOnNull(FunctionCompiler& f) {
7199 uint32_t relativeDepth;
7200 ResultType type;
7201 DefVector values;
7202 MDefinition* condition;
7203 if (!f.iter().readBrOnNull(&relativeDepth, &type, &values, &condition)) {
7204 return false;
7207 return f.brOnNull(relativeDepth, values, type, condition);
7210 static bool EmitBrOnNonNull(FunctionCompiler& f) {
7211 uint32_t relativeDepth;
7212 ResultType type;
7213 DefVector values;
7214 MDefinition* condition;
7215 if (!f.iter().readBrOnNonNull(&relativeDepth, &type, &values, &condition)) {
7216 return false;
7219 return f.brOnNonNull(relativeDepth, values, type, condition);
7222 static bool EmitCallRef(FunctionCompiler& f) {
7223 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7225 const FuncType* funcType;
7226 MDefinition* callee;
7227 DefVector args;
7229 if (!f.iter().readCallRef(&funcType, &callee, &args)) {
7230 return false;
7233 if (f.inDeadCode()) {
7234 return true;
7237 CallCompileState call;
7238 if (!EmitCallArgs(f, *funcType, args, &call)) {
7239 return false;
7242 DefVector results;
7243 if (!f.callRef(*funcType, callee, lineOrBytecode, call, &results)) {
7244 return false;
7247 f.iter().setResults(results.length(), results);
7248 return true;
7251 #endif // ENABLE_WASM_FUNCTION_REFERENCES
7253 #ifdef ENABLE_WASM_GC
7255 static bool EmitStructNew(FunctionCompiler& f) {
7256 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7258 uint32_t typeIndex;
7259 DefVector args;
7260 if (!f.iter().readStructNew(&typeIndex, &args)) {
7261 return false;
7264 if (f.inDeadCode()) {
7265 return true;
7268 const TypeDef& typeDef = (*f.moduleEnv().types)[typeIndex];
7269 const StructType& structType = typeDef.structType();
7270 MOZ_ASSERT(args.length() == structType.fields_.length());
7272 MDefinition* structObject = f.createStructObject(typeIndex, false);
7273 if (!structObject) {
7274 return false;
7277 // And fill in the fields.
7278 for (uint32_t fieldIndex = 0; fieldIndex < structType.fields_.length();
7279 fieldIndex++) {
7280 if (!f.mirGen().ensureBallast()) {
7281 return false;
7283 const StructField& field = structType.fields_[fieldIndex];
7284 if (!f.writeValueToStructField(lineOrBytecode, field, structObject,
7285 args[fieldIndex],
7286 WasmPreBarrierKind::None)) {
7287 return false;
7291 f.iter().setResult(structObject);
7292 return true;
7295 static bool EmitStructNewDefault(FunctionCompiler& f) {
7296 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7298 uint32_t typeIndex;
7299 if (!f.iter().readStructNewDefault(&typeIndex)) {
7300 return false;
7303 if (f.inDeadCode()) {
7304 return true;
7307 const StructType& structType = (*f.moduleEnv().types)[typeIndex].structType();
7309 // Allocate a default initialized struct. This requires the type definition
7310 // for the struct.
7311 MDefinition* typeDefData = f.loadTypeDefInstanceData(typeIndex);
7312 if (!typeDefData) {
7313 return false;
7316 // Figure out whether we need an OOL storage area, and hence which routine
7317 // to call.
7318 SymbolicAddressSignature calleeSASig =
7319 WasmStructObject::requiresOutlineBytes(structType.size_)
7320 ? SASigStructNewOOL_true
7321 : SASigStructNewIL_true;
7323 // Create call: structObject = Instance::structNew{IL,OOL}<true>(typeDefData)
7324 MDefinition* structObject;
7325 if (!f.emitInstanceCall1(lineOrBytecode, calleeSASig, typeDefData,
7326 &structObject)) {
7327 return false;
7330 f.iter().setResult(structObject);
7331 return true;
7334 static bool EmitStructSet(FunctionCompiler& f) {
7335 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7337 uint32_t typeIndex;
7338 uint32_t fieldIndex;
7339 MDefinition* structObject;
7340 MDefinition* value;
7341 if (!f.iter().readStructSet(&typeIndex, &fieldIndex, &structObject, &value)) {
7342 return false;
7345 if (f.inDeadCode()) {
7346 return true;
7349 // Check for null is done at writeValueToStructField.
7351 // And fill in the field.
7352 const StructType& structType = (*f.moduleEnv().types)[typeIndex].structType();
7353 const StructField& field = structType.fields_[fieldIndex];
7354 return f.writeValueToStructField(lineOrBytecode, field, structObject, value,
7355 WasmPreBarrierKind::Normal);
7358 static bool EmitStructGet(FunctionCompiler& f, FieldWideningOp wideningOp) {
7359 uint32_t typeIndex;
7360 uint32_t fieldIndex;
7361 MDefinition* structObject;
7362 if (!f.iter().readStructGet(&typeIndex, &fieldIndex, wideningOp,
7363 &structObject)) {
7364 return false;
7367 if (f.inDeadCode()) {
7368 return true;
7371 // Check for null is done at readValueFromStructField.
7373 // And fetch the data.
7374 const StructType& structType = (*f.moduleEnv().types)[typeIndex].structType();
7375 const StructField& field = structType.fields_[fieldIndex];
7376 MDefinition* load =
7377 f.readValueFromStructField(field, wideningOp, structObject);
7378 if (!load) {
7379 return false;
7382 f.iter().setResult(load);
7383 return true;
7386 static bool EmitArrayNew(FunctionCompiler& f) {
7387 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7389 uint32_t typeIndex;
7390 MDefinition* numElements;
7391 MDefinition* fillValue;
7392 if (!f.iter().readArrayNew(&typeIndex, &numElements, &fillValue)) {
7393 return false;
7396 if (f.inDeadCode()) {
7397 return true;
7400 // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated by
7401 // this helper will trap.
7402 MDefinition* arrayObject = f.createArrayNewCallAndLoop(
7403 lineOrBytecode, typeIndex, numElements, fillValue);
7404 if (!arrayObject) {
7405 return false;
7408 f.iter().setResult(arrayObject);
7409 return true;
7412 static bool EmitArrayNewDefault(FunctionCompiler& f) {
7413 // This is almost identical to EmitArrayNew, except we skip the
7414 // initialisation loop.
7415 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7417 uint32_t typeIndex;
7418 MDefinition* numElements;
7419 if (!f.iter().readArrayNewDefault(&typeIndex, &numElements)) {
7420 return false;
7423 if (f.inDeadCode()) {
7424 return true;
7427 // Create the array object, default-initialized.
7428 MDefinition* arrayObject = f.createDefaultInitializedArrayObject(
7429 lineOrBytecode, typeIndex, numElements);
7430 if (!arrayObject) {
7431 return false;
7434 f.iter().setResult(arrayObject);
7435 return true;
7438 static bool EmitArrayNewFixed(FunctionCompiler& f) {
7439 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7441 uint32_t typeIndex, numElements;
7442 DefVector values;
7444 if (!f.iter().readArrayNewFixed(&typeIndex, &numElements, &values)) {
7445 return false;
7447 MOZ_ASSERT(values.length() == numElements);
7449 if (f.inDeadCode()) {
7450 return true;
7453 MDefinition* numElementsDef = f.constantI32(int32_t(numElements));
7454 if (!numElementsDef) {
7455 return false;
7458 // Create the array object, default-initialized.
7459 MDefinition* arrayObject = f.createDefaultInitializedArrayObject(
7460 lineOrBytecode, typeIndex, numElementsDef);
7461 if (!arrayObject) {
7462 return false;
7465 // Make `base` point at the first byte of the (OOL) data area.
7466 MDefinition* base = f.getWasmArrayObjectData(arrayObject);
7467 if (!base) {
7468 return false;
7471 // Write each element in turn.
7472 const ArrayType& arrayType = (*f.moduleEnv().types)[typeIndex].arrayType();
7473 StorageType elemType = arrayType.elementType_;
7474 uint32_t elemSize = elemType.size();
7476 // How do we know that the offset expression `i * elemSize` below remains
7477 // within 2^31 (signed-i32) range? In the worst case we will have 16-byte
7478 // values, and there can be at most MaxFunctionBytes expressions, if it were
7479 // theoretically possible to generate one expression per instruction byte.
7480 // Hence the max offset we can be expected to generate is
7481 // `16 * MaxFunctionBytes`.
7482 static_assert(16 /* sizeof v128 */ * MaxFunctionBytes <=
7483 MaxArrayPayloadBytes);
7484 MOZ_RELEASE_ASSERT(numElements <= MaxFunctionBytes);
7486 for (uint32_t i = 0; i < numElements; i++) {
7487 if (!f.mirGen().ensureBallast()) {
7488 return false;
7490 // `i * elemSize` is made safe by the assertions above.
7491 if (!f.writeGcValueAtBasePlusOffset(
7492 lineOrBytecode, elemType, arrayObject, AliasSet::WasmArrayDataArea,
7493 values[numElements - 1 - i], base, i * elemSize, false,
7494 WasmPreBarrierKind::None)) {
7495 return false;
7499 f.iter().setResult(arrayObject);
7500 return true;
7503 static bool EmitArrayNewData(FunctionCompiler& f) {
7504 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7506 uint32_t typeIndex, segIndex;
7507 MDefinition* segByteOffset;
7508 MDefinition* numElements;
7509 if (!f.iter().readArrayNewData(&typeIndex, &segIndex, &segByteOffset,
7510 &numElements)) {
7511 return false;
7514 if (f.inDeadCode()) {
7515 return true;
7518 // Get the type definition data for the array as a whole.
7519 MDefinition* typeDefData = f.loadTypeDefInstanceData(typeIndex);
7520 if (!typeDefData) {
7521 return false;
7524 // Other values we need to pass to the instance call:
7525 MDefinition* segIndexM = f.constantI32(int32_t(segIndex));
7526 if (!segIndexM) {
7527 return false;
7530 // Create call:
7531 // arrayObject = Instance::arrayNewData(segByteOffset:u32, numElements:u32,
7532 // typeDefData:word, segIndex:u32)
7533 // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated by
7534 // this call will trap.
7535 MDefinition* arrayObject;
7536 if (!f.emitInstanceCall4(lineOrBytecode, SASigArrayNewData, segByteOffset,
7537 numElements, typeDefData, segIndexM, &arrayObject)) {
7538 return false;
7541 f.iter().setResult(arrayObject);
7542 return true;
7545 static bool EmitArrayNewElem(FunctionCompiler& f) {
7546 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7548 uint32_t typeIndex, segIndex;
7549 MDefinition* segElemIndex;
7550 MDefinition* numElements;
7551 if (!f.iter().readArrayNewElem(&typeIndex, &segIndex, &segElemIndex,
7552 &numElements)) {
7553 return false;
7556 if (f.inDeadCode()) {
7557 return true;
7560 // Get the type definition for the array as a whole.
7561 // Get the type definition data for the array as a whole.
7562 MDefinition* typeDefData = f.loadTypeDefInstanceData(typeIndex);
7563 if (!typeDefData) {
7564 return false;
7567 // Other values we need to pass to the instance call:
7568 MDefinition* segIndexM = f.constantI32(int32_t(segIndex));
7569 if (!segIndexM) {
7570 return false;
7573 // Create call:
7574 // arrayObject = Instance::arrayNewElem(segElemIndex:u32, numElements:u32,
7575 // typeDefData:word, segIndex:u32)
7576 // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated by
7577 // this call will trap.
7578 MDefinition* arrayObject;
7579 if (!f.emitInstanceCall4(lineOrBytecode, SASigArrayNewElem, segElemIndex,
7580 numElements, typeDefData, segIndexM, &arrayObject)) {
7581 return false;
7584 f.iter().setResult(arrayObject);
7585 return true;
7588 static bool EmitArrayInitData(FunctionCompiler& f) {
7589 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7591 uint32_t typeIndex, segIndex;
7592 MDefinition* array;
7593 MDefinition* arrayIndex;
7594 MDefinition* segOffset;
7595 MDefinition* length;
7596 if (!f.iter().readArrayInitData(&typeIndex, &segIndex, &array, &arrayIndex,
7597 &segOffset, &length)) {
7598 return false;
7601 if (f.inDeadCode()) {
7602 return true;
7605 // Get the type definition data for the array as a whole.
7606 MDefinition* typeDefData = f.loadTypeDefInstanceData(typeIndex);
7607 if (!typeDefData) {
7608 return false;
7611 // Other values we need to pass to the instance call:
7612 MDefinition* segIndexM = f.constantI32(int32_t(segIndex));
7613 if (!segIndexM) {
7614 return false;
7617 // Create call:
7618 // Instance::arrayInitData(array:word, index:u32, segByteOffset:u32,
7619 // numElements:u32, typeDefData:word, segIndex:u32) If the requested size
7620 // exceeds MaxArrayPayloadBytes, the MIR generated by this call will trap.
7621 return f.emitInstanceCall6(lineOrBytecode, SASigArrayInitData, array,
7622 arrayIndex, segOffset, length, typeDefData,
7623 segIndexM);
7626 static bool EmitArrayInitElem(FunctionCompiler& f) {
7627 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7629 uint32_t typeIndex, segIndex;
7630 MDefinition* array;
7631 MDefinition* arrayIndex;
7632 MDefinition* segOffset;
7633 MDefinition* length;
7634 if (!f.iter().readArrayInitElem(&typeIndex, &segIndex, &array, &arrayIndex,
7635 &segOffset, &length)) {
7636 return false;
7639 if (f.inDeadCode()) {
7640 return true;
7643 // Get the type definition data for the array as a whole.
7644 MDefinition* typeDefData = f.loadTypeDefInstanceData(typeIndex);
7645 if (!typeDefData) {
7646 return false;
7649 // Other values we need to pass to the instance call:
7650 MDefinition* segIndexM = f.constantI32(int32_t(segIndex));
7651 if (!segIndexM) {
7652 return false;
7655 // Create call:
7656 // Instance::arrayInitElem(array:word, index:u32, segByteOffset:u32,
7657 // numElements:u32, typeDefData:word, segIndex:u32) If the requested size
7658 // exceeds MaxArrayPayloadBytes, the MIR generated by this call will trap.
7659 return f.emitInstanceCall6(lineOrBytecode, SASigArrayInitElem, array,
7660 arrayIndex, segOffset, length, typeDefData,
7661 segIndexM);
7664 static bool EmitArraySet(FunctionCompiler& f) {
7665 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7667 uint32_t typeIndex;
7668 MDefinition* value;
7669 MDefinition* index;
7670 MDefinition* arrayObject;
7671 if (!f.iter().readArraySet(&typeIndex, &value, &index, &arrayObject)) {
7672 return false;
7675 if (f.inDeadCode()) {
7676 return true;
7679 // Check for null is done at setupForArrayAccess.
7681 // Create the object null check and the array bounds check and get the OOL
7682 // data pointer.
7683 MDefinition* base = f.setupForArrayAccess(arrayObject, index);
7684 if (!base) {
7685 return false;
7688 // And do the store.
7689 const ArrayType& arrayType = (*f.moduleEnv().types)[typeIndex].arrayType();
7690 StorageType elemType = arrayType.elementType_;
7691 uint32_t elemSize = elemType.size();
7692 MOZ_ASSERT(elemSize >= 1 && elemSize <= 16);
7694 return f.writeGcValueAtBasePlusScaledIndex(
7695 lineOrBytecode, elemType, arrayObject, AliasSet::WasmArrayDataArea, value,
7696 base, elemSize, index, WasmPreBarrierKind::Normal);
7699 static bool EmitArrayGet(FunctionCompiler& f, FieldWideningOp wideningOp) {
7700 uint32_t typeIndex;
7701 MDefinition* index;
7702 MDefinition* arrayObject;
7703 if (!f.iter().readArrayGet(&typeIndex, wideningOp, &index, &arrayObject)) {
7704 return false;
7707 if (f.inDeadCode()) {
7708 return true;
7711 // Check for null is done at setupForArrayAccess.
7713 // Create the object null check and the array bounds check and get the OOL
7714 // data pointer.
7715 MDefinition* base = f.setupForArrayAccess(arrayObject, index);
7716 if (!base) {
7717 return false;
7720 // And do the load.
7721 const ArrayType& arrayType = (*f.moduleEnv().types)[typeIndex].arrayType();
7722 StorageType elemType = arrayType.elementType_;
7723 uint32_t elemSize = elemType.size();
7724 MOZ_ASSERT(elemSize >= 1 && elemSize <= 16);
7726 MDefinition* load = f.readGcValueAtBasePlusScaledIndex(
7727 elemType, wideningOp, arrayObject, AliasSet::WasmArrayDataArea, base,
7728 elemSize, index);
7729 if (!load) {
7730 return false;
7733 f.iter().setResult(load);
7734 return true;
7737 static bool EmitArrayLen(FunctionCompiler& f) {
7738 MDefinition* arrayObject;
7739 if (!f.iter().readArrayLen(&arrayObject)) {
7740 return false;
7743 if (f.inDeadCode()) {
7744 return true;
7747 // Check for null is done at getWasmArrayObjectNumElements.
7749 // Get the size value for the array
7750 MDefinition* numElements = f.getWasmArrayObjectNumElements(arrayObject);
7751 if (!numElements) {
7752 return false;
7755 f.iter().setResult(numElements);
7756 return true;
7759 static bool EmitArrayCopy(FunctionCompiler& f) {
7760 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7762 int32_t elemSize;
7763 bool elemsAreRefTyped;
7764 MDefinition* dstArrayObject;
7765 MDefinition* dstArrayIndex;
7766 MDefinition* srcArrayObject;
7767 MDefinition* srcArrayIndex;
7768 MDefinition* numElements;
7769 if (!f.iter().readArrayCopy(&elemSize, &elemsAreRefTyped, &dstArrayObject,
7770 &dstArrayIndex, &srcArrayObject, &srcArrayIndex,
7771 &numElements)) {
7772 return false;
7775 if (f.inDeadCode()) {
7776 return true;
7779 MOZ_ASSERT_IF(elemsAreRefTyped,
7780 size_t(elemSize) == MIRTypeToSize(TargetWordMIRType()));
7781 MOZ_ASSERT_IF(!elemsAreRefTyped, elemSize == 1 || elemSize == 2 ||
7782 elemSize == 4 || elemSize == 8 ||
7783 elemSize == 16);
7785 // A negative element size is used to inform Instance::arrayCopy that the
7786 // values are reftyped. This avoids having to pass it an extra boolean
7787 // argument.
7788 MDefinition* elemSizeDef =
7789 f.constantI32(elemsAreRefTyped ? -elemSize : elemSize);
7790 if (!elemSizeDef) {
7791 return false;
7794 // Create call:
7795 // Instance::arrayCopy(dstArrayObject:word, dstArrayIndex:u32,
7796 // srcArrayObject:word, srcArrayIndex:u32,
7797 // numElements:u32,
7798 // (elemsAreRefTyped ? -elemSize : elemSize):u32))
7799 return f.emitInstanceCall6(lineOrBytecode, SASigArrayCopy, dstArrayObject,
7800 dstArrayIndex, srcArrayObject, srcArrayIndex,
7801 numElements, elemSizeDef);
7804 static bool EmitArrayFill(FunctionCompiler& f) {
7805 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7807 uint32_t typeIndex;
7808 MDefinition* array;
7809 MDefinition* index;
7810 MDefinition* val;
7811 MDefinition* numElements;
7812 if (!f.iter().readArrayFill(&typeIndex, &array, &index, &val, &numElements)) {
7813 return false;
7816 if (f.inDeadCode()) {
7817 return true;
7820 return f.createArrayFill(lineOrBytecode, typeIndex, array, index, val,
7821 numElements);
7824 static bool EmitRefI31(FunctionCompiler& f) {
7825 MDefinition* input;
7826 if (!f.iter().readConversion(
7827 ValType::I32, ValType(RefType::i31().asNonNullable()), &input)) {
7828 return false;
7831 if (f.inDeadCode()) {
7832 return true;
7835 MDefinition* output = f.refI31(input);
7836 if (!output) {
7837 return false;
7839 f.iter().setResult(output);
7840 return true;
7843 static bool EmitI31Get(FunctionCompiler& f, FieldWideningOp wideningOp) {
7844 MOZ_ASSERT(wideningOp != FieldWideningOp::None);
7846 MDefinition* input;
7847 if (!f.iter().readConversion(ValType(RefType::i31()), ValType::I32, &input)) {
7848 return false;
7851 if (f.inDeadCode()) {
7852 return true;
7855 if (!f.refAsNonNull(input)) {
7856 return false;
7858 MDefinition* output = f.i31Get(input, wideningOp);
7859 if (!output) {
7860 return false;
7862 f.iter().setResult(output);
7863 return true;
7866 static bool EmitRefTest(FunctionCompiler& f, bool nullable) {
7867 MDefinition* ref;
7868 RefType sourceType;
7869 RefType destType;
7870 if (!f.iter().readRefTest(nullable, &sourceType, &destType, &ref)) {
7871 return false;
7874 if (f.inDeadCode()) {
7875 return true;
7878 MDefinition* success = f.refTest(ref, sourceType, destType);
7879 if (!success) {
7880 return false;
7883 f.iter().setResult(success);
7884 return true;
7887 static bool EmitRefCast(FunctionCompiler& f, bool nullable) {
7888 MDefinition* ref;
7889 RefType sourceType;
7890 RefType destType;
7891 if (!f.iter().readRefCast(nullable, &sourceType, &destType, &ref)) {
7892 return false;
7895 if (f.inDeadCode()) {
7896 return true;
7899 if (!f.refCast(ref, sourceType, destType)) {
7900 return false;
7903 f.iter().setResult(ref);
7904 return true;
7907 static bool EmitBrOnCast(FunctionCompiler& f, bool onSuccess) {
7908 uint32_t labelRelativeDepth;
7909 RefType sourceType;
7910 RefType destType;
7911 ResultType labelType;
7912 DefVector values;
7913 if (!f.iter().readBrOnCast(onSuccess, &labelRelativeDepth, &sourceType,
7914 &destType, &labelType, &values)) {
7915 return false;
7918 return f.brOnCastCommon(onSuccess, labelRelativeDepth, sourceType, destType,
7919 labelType, values);
7922 static bool EmitAnyConvertExtern(FunctionCompiler& f) {
7923 // any.convert_extern is a no-op because anyref and extern share the same
7924 // representation
7925 MDefinition* ref;
7926 if (!f.iter().readRefConversion(RefType::extern_(), RefType::any(), &ref)) {
7927 return false;
7930 f.iter().setResult(ref);
7931 return true;
7934 static bool EmitExternConvertAny(FunctionCompiler& f) {
7935 // extern.convert_any is a no-op because anyref and extern share the same
7936 // representation
7937 MDefinition* ref;
7938 if (!f.iter().readRefConversion(RefType::any(), RefType::extern_(), &ref)) {
7939 return false;
7942 f.iter().setResult(ref);
7943 return true;
7946 #endif // ENABLE_WASM_GC
7948 static bool EmitCallBuiltinModuleFunc(FunctionCompiler& f) {
7949 // It's almost possible to use FunctionCompiler::emitInstanceCallN here.
7950 // Unfortunately not currently possible though, since ::emitInstanceCallN
7951 // expects an array of arguments along with a size, and that's not what is
7952 // available here. It would be possible if we were prepared to copy
7953 // `builtinModuleFunc->params` into a fixed-sized (16 element?) array, add
7954 // `memoryBase`, and make the call.
7955 const BuiltinModuleFunc* builtinModuleFunc;
7957 DefVector params;
7958 if (!f.iter().readCallBuiltinModuleFunc(&builtinModuleFunc, &params)) {
7959 return false;
7962 uint32_t bytecodeOffset = f.readBytecodeOffset();
7963 const SymbolicAddressSignature& callee = builtinModuleFunc->signature;
7965 CallCompileState args;
7966 if (!f.passInstance(callee.argTypes[0], &args)) {
7967 return false;
7970 if (!f.passArgs(params, builtinModuleFunc->params, &args)) {
7971 return false;
7974 if (builtinModuleFunc->usesMemory) {
7975 MDefinition* memoryBase = f.memoryBase(0);
7976 if (!f.passArg(memoryBase, MIRType::Pointer, &args)) {
7977 return false;
7981 if (!f.finishCall(&args)) {
7982 return false;
7985 bool hasResult = builtinModuleFunc->result.isSome();
7986 MDefinition* result = nullptr;
7987 MDefinition** resultOutParam = hasResult ? &result : nullptr;
7988 if (!f.builtinInstanceMethodCall(callee, bytecodeOffset, args,
7989 resultOutParam)) {
7990 return false;
7993 if (hasResult) {
7994 f.iter().setResult(result);
7996 return true;
7999 static bool EmitBodyExprs(FunctionCompiler& f) {
8000 if (!f.iter().startFunction(f.funcIndex(), f.locals())) {
8001 return false;
8004 #define CHECK(c) \
8005 if (!(c)) return false; \
8006 break
8008 while (true) {
8009 if (!f.mirGen().ensureBallast()) {
8010 return false;
8013 OpBytes op;
8014 if (!f.iter().readOp(&op)) {
8015 return false;
8018 switch (op.b0) {
8019 case uint16_t(Op::End):
8020 if (!EmitEnd(f)) {
8021 return false;
8023 if (f.iter().controlStackEmpty()) {
8024 return true;
8026 break;
8028 // Control opcodes
8029 case uint16_t(Op::Unreachable):
8030 CHECK(EmitUnreachable(f));
8031 case uint16_t(Op::Nop):
8032 CHECK(f.iter().readNop());
8033 case uint16_t(Op::Block):
8034 CHECK(EmitBlock(f));
8035 case uint16_t(Op::Loop):
8036 CHECK(EmitLoop(f));
8037 case uint16_t(Op::If):
8038 CHECK(EmitIf(f));
8039 case uint16_t(Op::Else):
8040 CHECK(EmitElse(f));
8041 case uint16_t(Op::Try):
8042 if (!f.moduleEnv().exceptionsEnabled()) {
8043 return f.iter().unrecognizedOpcode(&op);
8045 CHECK(EmitTry(f));
8046 case uint16_t(Op::Catch):
8047 if (!f.moduleEnv().exceptionsEnabled()) {
8048 return f.iter().unrecognizedOpcode(&op);
8050 CHECK(EmitCatch(f));
8051 case uint16_t(Op::CatchAll):
8052 if (!f.moduleEnv().exceptionsEnabled()) {
8053 return f.iter().unrecognizedOpcode(&op);
8055 CHECK(EmitCatchAll(f));
8056 case uint16_t(Op::Delegate):
8057 if (!f.moduleEnv().exceptionsEnabled()) {
8058 return f.iter().unrecognizedOpcode(&op);
8060 if (!EmitDelegate(f)) {
8061 return false;
8063 break;
8064 case uint16_t(Op::Throw):
8065 if (!f.moduleEnv().exceptionsEnabled()) {
8066 return f.iter().unrecognizedOpcode(&op);
8068 CHECK(EmitThrow(f));
8069 case uint16_t(Op::Rethrow):
8070 if (!f.moduleEnv().exceptionsEnabled()) {
8071 return f.iter().unrecognizedOpcode(&op);
8073 CHECK(EmitRethrow(f));
8074 case uint16_t(Op::ThrowRef):
8075 if (!f.moduleEnv().exnrefEnabled()) {
8076 return f.iter().unrecognizedOpcode(&op);
8078 CHECK(EmitThrowRef(f));
8079 case uint16_t(Op::TryTable):
8080 if (!f.moduleEnv().exnrefEnabled()) {
8081 return f.iter().unrecognizedOpcode(&op);
8083 CHECK(EmitTryTable(f));
8084 case uint16_t(Op::Br):
8085 CHECK(EmitBr(f));
8086 case uint16_t(Op::BrIf):
8087 CHECK(EmitBrIf(f));
8088 case uint16_t(Op::BrTable):
8089 CHECK(EmitBrTable(f));
8090 case uint16_t(Op::Return):
8091 CHECK(EmitReturn(f));
8093 // Calls
8094 case uint16_t(Op::Call):
8095 CHECK(EmitCall(f, /* asmJSFuncDef = */ false));
8096 case uint16_t(Op::CallIndirect):
8097 CHECK(EmitCallIndirect(f, /* oldStyle = */ false));
8099 // Parametric operators
8100 case uint16_t(Op::Drop):
8101 CHECK(f.iter().readDrop());
8102 case uint16_t(Op::SelectNumeric):
8103 CHECK(EmitSelect(f, /*typed*/ false));
8104 case uint16_t(Op::SelectTyped):
8105 CHECK(EmitSelect(f, /*typed*/ true));
8107 // Locals and globals
8108 case uint16_t(Op::LocalGet):
8109 CHECK(EmitGetLocal(f));
8110 case uint16_t(Op::LocalSet):
8111 CHECK(EmitSetLocal(f));
8112 case uint16_t(Op::LocalTee):
8113 CHECK(EmitTeeLocal(f));
8114 case uint16_t(Op::GlobalGet):
8115 CHECK(EmitGetGlobal(f));
8116 case uint16_t(Op::GlobalSet):
8117 CHECK(EmitSetGlobal(f));
8118 case uint16_t(Op::TableGet):
8119 CHECK(EmitTableGet(f));
8120 case uint16_t(Op::TableSet):
8121 CHECK(EmitTableSet(f));
8123 // Memory-related operators
8124 case uint16_t(Op::I32Load):
8125 CHECK(EmitLoad(f, ValType::I32, Scalar::Int32));
8126 case uint16_t(Op::I64Load):
8127 CHECK(EmitLoad(f, ValType::I64, Scalar::Int64));
8128 case uint16_t(Op::F32Load):
8129 CHECK(EmitLoad(f, ValType::F32, Scalar::Float32));
8130 case uint16_t(Op::F64Load):
8131 CHECK(EmitLoad(f, ValType::F64, Scalar::Float64));
8132 case uint16_t(Op::I32Load8S):
8133 CHECK(EmitLoad(f, ValType::I32, Scalar::Int8));
8134 case uint16_t(Op::I32Load8U):
8135 CHECK(EmitLoad(f, ValType::I32, Scalar::Uint8));
8136 case uint16_t(Op::I32Load16S):
8137 CHECK(EmitLoad(f, ValType::I32, Scalar::Int16));
8138 case uint16_t(Op::I32Load16U):
8139 CHECK(EmitLoad(f, ValType::I32, Scalar::Uint16));
8140 case uint16_t(Op::I64Load8S):
8141 CHECK(EmitLoad(f, ValType::I64, Scalar::Int8));
8142 case uint16_t(Op::I64Load8U):
8143 CHECK(EmitLoad(f, ValType::I64, Scalar::Uint8));
8144 case uint16_t(Op::I64Load16S):
8145 CHECK(EmitLoad(f, ValType::I64, Scalar::Int16));
8146 case uint16_t(Op::I64Load16U):
8147 CHECK(EmitLoad(f, ValType::I64, Scalar::Uint16));
8148 case uint16_t(Op::I64Load32S):
8149 CHECK(EmitLoad(f, ValType::I64, Scalar::Int32));
8150 case uint16_t(Op::I64Load32U):
8151 CHECK(EmitLoad(f, ValType::I64, Scalar::Uint32));
8152 case uint16_t(Op::I32Store):
8153 CHECK(EmitStore(f, ValType::I32, Scalar::Int32));
8154 case uint16_t(Op::I64Store):
8155 CHECK(EmitStore(f, ValType::I64, Scalar::Int64));
8156 case uint16_t(Op::F32Store):
8157 CHECK(EmitStore(f, ValType::F32, Scalar::Float32));
8158 case uint16_t(Op::F64Store):
8159 CHECK(EmitStore(f, ValType::F64, Scalar::Float64));
8160 case uint16_t(Op::I32Store8):
8161 CHECK(EmitStore(f, ValType::I32, Scalar::Int8));
8162 case uint16_t(Op::I32Store16):
8163 CHECK(EmitStore(f, ValType::I32, Scalar::Int16));
8164 case uint16_t(Op::I64Store8):
8165 CHECK(EmitStore(f, ValType::I64, Scalar::Int8));
8166 case uint16_t(Op::I64Store16):
8167 CHECK(EmitStore(f, ValType::I64, Scalar::Int16));
8168 case uint16_t(Op::I64Store32):
8169 CHECK(EmitStore(f, ValType::I64, Scalar::Int32));
8170 case uint16_t(Op::MemorySize):
8171 CHECK(EmitMemorySize(f));
8172 case uint16_t(Op::MemoryGrow):
8173 CHECK(EmitMemoryGrow(f));
8175 // Constants
8176 case uint16_t(Op::I32Const):
8177 CHECK(EmitI32Const(f));
8178 case uint16_t(Op::I64Const):
8179 CHECK(EmitI64Const(f));
8180 case uint16_t(Op::F32Const):
8181 CHECK(EmitF32Const(f));
8182 case uint16_t(Op::F64Const):
8183 CHECK(EmitF64Const(f));
8185 // Comparison operators
8186 case uint16_t(Op::I32Eqz):
8187 CHECK(EmitConversion<MNot>(f, ValType::I32, ValType::I32));
8188 case uint16_t(Op::I32Eq):
8189 CHECK(
8190 EmitComparison(f, ValType::I32, JSOp::Eq, MCompare::Compare_Int32));
8191 case uint16_t(Op::I32Ne):
8192 CHECK(
8193 EmitComparison(f, ValType::I32, JSOp::Ne, MCompare::Compare_Int32));
8194 case uint16_t(Op::I32LtS):
8195 CHECK(
8196 EmitComparison(f, ValType::I32, JSOp::Lt, MCompare::Compare_Int32));
8197 case uint16_t(Op::I32LtU):
8198 CHECK(EmitComparison(f, ValType::I32, JSOp::Lt,
8199 MCompare::Compare_UInt32));
8200 case uint16_t(Op::I32GtS):
8201 CHECK(
8202 EmitComparison(f, ValType::I32, JSOp::Gt, MCompare::Compare_Int32));
8203 case uint16_t(Op::I32GtU):
8204 CHECK(EmitComparison(f, ValType::I32, JSOp::Gt,
8205 MCompare::Compare_UInt32));
8206 case uint16_t(Op::I32LeS):
8207 CHECK(
8208 EmitComparison(f, ValType::I32, JSOp::Le, MCompare::Compare_Int32));
8209 case uint16_t(Op::I32LeU):
8210 CHECK(EmitComparison(f, ValType::I32, JSOp::Le,
8211 MCompare::Compare_UInt32));
8212 case uint16_t(Op::I32GeS):
8213 CHECK(
8214 EmitComparison(f, ValType::I32, JSOp::Ge, MCompare::Compare_Int32));
8215 case uint16_t(Op::I32GeU):
8216 CHECK(EmitComparison(f, ValType::I32, JSOp::Ge,
8217 MCompare::Compare_UInt32));
8218 case uint16_t(Op::I64Eqz):
8219 CHECK(EmitConversion<MNot>(f, ValType::I64, ValType::I32));
8220 case uint16_t(Op::I64Eq):
8221 CHECK(
8222 EmitComparison(f, ValType::I64, JSOp::Eq, MCompare::Compare_Int64));
8223 case uint16_t(Op::I64Ne):
8224 CHECK(
8225 EmitComparison(f, ValType::I64, JSOp::Ne, MCompare::Compare_Int64));
8226 case uint16_t(Op::I64LtS):
8227 CHECK(
8228 EmitComparison(f, ValType::I64, JSOp::Lt, MCompare::Compare_Int64));
8229 case uint16_t(Op::I64LtU):
8230 CHECK(EmitComparison(f, ValType::I64, JSOp::Lt,
8231 MCompare::Compare_UInt64));
8232 case uint16_t(Op::I64GtS):
8233 CHECK(
8234 EmitComparison(f, ValType::I64, JSOp::Gt, MCompare::Compare_Int64));
8235 case uint16_t(Op::I64GtU):
8236 CHECK(EmitComparison(f, ValType::I64, JSOp::Gt,
8237 MCompare::Compare_UInt64));
8238 case uint16_t(Op::I64LeS):
8239 CHECK(
8240 EmitComparison(f, ValType::I64, JSOp::Le, MCompare::Compare_Int64));
8241 case uint16_t(Op::I64LeU):
8242 CHECK(EmitComparison(f, ValType::I64, JSOp::Le,
8243 MCompare::Compare_UInt64));
8244 case uint16_t(Op::I64GeS):
8245 CHECK(
8246 EmitComparison(f, ValType::I64, JSOp::Ge, MCompare::Compare_Int64));
8247 case uint16_t(Op::I64GeU):
8248 CHECK(EmitComparison(f, ValType::I64, JSOp::Ge,
8249 MCompare::Compare_UInt64));
8250 case uint16_t(Op::F32Eq):
8251 CHECK(EmitComparison(f, ValType::F32, JSOp::Eq,
8252 MCompare::Compare_Float32));
8253 case uint16_t(Op::F32Ne):
8254 CHECK(EmitComparison(f, ValType::F32, JSOp::Ne,
8255 MCompare::Compare_Float32));
8256 case uint16_t(Op::F32Lt):
8257 CHECK(EmitComparison(f, ValType::F32, JSOp::Lt,
8258 MCompare::Compare_Float32));
8259 case uint16_t(Op::F32Gt):
8260 CHECK(EmitComparison(f, ValType::F32, JSOp::Gt,
8261 MCompare::Compare_Float32));
8262 case uint16_t(Op::F32Le):
8263 CHECK(EmitComparison(f, ValType::F32, JSOp::Le,
8264 MCompare::Compare_Float32));
8265 case uint16_t(Op::F32Ge):
8266 CHECK(EmitComparison(f, ValType::F32, JSOp::Ge,
8267 MCompare::Compare_Float32));
8268 case uint16_t(Op::F64Eq):
8269 CHECK(EmitComparison(f, ValType::F64, JSOp::Eq,
8270 MCompare::Compare_Double));
8271 case uint16_t(Op::F64Ne):
8272 CHECK(EmitComparison(f, ValType::F64, JSOp::Ne,
8273 MCompare::Compare_Double));
8274 case uint16_t(Op::F64Lt):
8275 CHECK(EmitComparison(f, ValType::F64, JSOp::Lt,
8276 MCompare::Compare_Double));
8277 case uint16_t(Op::F64Gt):
8278 CHECK(EmitComparison(f, ValType::F64, JSOp::Gt,
8279 MCompare::Compare_Double));
8280 case uint16_t(Op::F64Le):
8281 CHECK(EmitComparison(f, ValType::F64, JSOp::Le,
8282 MCompare::Compare_Double));
8283 case uint16_t(Op::F64Ge):
8284 CHECK(EmitComparison(f, ValType::F64, JSOp::Ge,
8285 MCompare::Compare_Double));
8287 // Numeric operators
8288 case uint16_t(Op::I32Clz):
8289 CHECK(EmitUnaryWithType<MClz>(f, ValType::I32, MIRType::Int32));
8290 case uint16_t(Op::I32Ctz):
8291 CHECK(EmitUnaryWithType<MCtz>(f, ValType::I32, MIRType::Int32));
8292 case uint16_t(Op::I32Popcnt):
8293 CHECK(EmitUnaryWithType<MPopcnt>(f, ValType::I32, MIRType::Int32));
8294 case uint16_t(Op::I32Add):
8295 CHECK(EmitAdd(f, ValType::I32, MIRType::Int32));
8296 case uint16_t(Op::I32Sub):
8297 CHECK(EmitSub(f, ValType::I32, MIRType::Int32));
8298 case uint16_t(Op::I32Mul):
8299 CHECK(EmitMul(f, ValType::I32, MIRType::Int32));
8300 case uint16_t(Op::I32DivS):
8301 case uint16_t(Op::I32DivU):
8302 CHECK(
8303 EmitDiv(f, ValType::I32, MIRType::Int32, Op(op.b0) == Op::I32DivU));
8304 case uint16_t(Op::I32RemS):
8305 case uint16_t(Op::I32RemU):
8306 CHECK(
8307 EmitRem(f, ValType::I32, MIRType::Int32, Op(op.b0) == Op::I32RemU));
8308 case uint16_t(Op::I32And):
8309 CHECK(EmitBitwiseAndOrXor(f, ValType::I32, MIRType::Int32,
8310 MWasmBinaryBitwise::SubOpcode::And));
8311 case uint16_t(Op::I32Or):
8312 CHECK(EmitBitwiseAndOrXor(f, ValType::I32, MIRType::Int32,
8313 MWasmBinaryBitwise::SubOpcode::Or));
8314 case uint16_t(Op::I32Xor):
8315 CHECK(EmitBitwiseAndOrXor(f, ValType::I32, MIRType::Int32,
8316 MWasmBinaryBitwise::SubOpcode::Xor));
8317 case uint16_t(Op::I32Shl):
8318 CHECK(EmitShift<MLsh>(f, ValType::I32, MIRType::Int32));
8319 case uint16_t(Op::I32ShrS):
8320 CHECK(EmitShift<MRsh>(f, ValType::I32, MIRType::Int32));
8321 case uint16_t(Op::I32ShrU):
8322 CHECK(EmitUrsh(f, ValType::I32, MIRType::Int32));
8323 case uint16_t(Op::I32Rotl):
8324 case uint16_t(Op::I32Rotr):
8325 CHECK(EmitRotate(f, ValType::I32, Op(op.b0) == Op::I32Rotl));
8326 case uint16_t(Op::I64Clz):
8327 CHECK(EmitUnaryWithType<MClz>(f, ValType::I64, MIRType::Int64));
8328 case uint16_t(Op::I64Ctz):
8329 CHECK(EmitUnaryWithType<MCtz>(f, ValType::I64, MIRType::Int64));
8330 case uint16_t(Op::I64Popcnt):
8331 CHECK(EmitUnaryWithType<MPopcnt>(f, ValType::I64, MIRType::Int64));
8332 case uint16_t(Op::I64Add):
8333 CHECK(EmitAdd(f, ValType::I64, MIRType::Int64));
8334 case uint16_t(Op::I64Sub):
8335 CHECK(EmitSub(f, ValType::I64, MIRType::Int64));
8336 case uint16_t(Op::I64Mul):
8337 CHECK(EmitMul(f, ValType::I64, MIRType::Int64));
8338 case uint16_t(Op::I64DivS):
8339 case uint16_t(Op::I64DivU):
8340 CHECK(
8341 EmitDiv(f, ValType::I64, MIRType::Int64, Op(op.b0) == Op::I64DivU));
8342 case uint16_t(Op::I64RemS):
8343 case uint16_t(Op::I64RemU):
8344 CHECK(
8345 EmitRem(f, ValType::I64, MIRType::Int64, Op(op.b0) == Op::I64RemU));
8346 case uint16_t(Op::I64And):
8347 CHECK(EmitBitwiseAndOrXor(f, ValType::I64, MIRType::Int64,
8348 MWasmBinaryBitwise::SubOpcode::And));
8349 case uint16_t(Op::I64Or):
8350 CHECK(EmitBitwiseAndOrXor(f, ValType::I64, MIRType::Int64,
8351 MWasmBinaryBitwise::SubOpcode::Or));
8352 case uint16_t(Op::I64Xor):
8353 CHECK(EmitBitwiseAndOrXor(f, ValType::I64, MIRType::Int64,
8354 MWasmBinaryBitwise::SubOpcode::Xor));
8355 case uint16_t(Op::I64Shl):
8356 CHECK(EmitShift<MLsh>(f, ValType::I64, MIRType::Int64));
8357 case uint16_t(Op::I64ShrS):
8358 CHECK(EmitShift<MRsh>(f, ValType::I64, MIRType::Int64));
8359 case uint16_t(Op::I64ShrU):
8360 CHECK(EmitUrsh(f, ValType::I64, MIRType::Int64));
8361 case uint16_t(Op::I64Rotl):
8362 case uint16_t(Op::I64Rotr):
8363 CHECK(EmitRotate(f, ValType::I64, Op(op.b0) == Op::I64Rotl));
8364 case uint16_t(Op::F32Abs):
8365 CHECK(EmitUnaryWithType<MAbs>(f, ValType::F32, MIRType::Float32));
8366 case uint16_t(Op::F32Neg):
8367 CHECK(EmitUnaryWithType<MWasmNeg>(f, ValType::F32, MIRType::Float32));
8368 case uint16_t(Op::F32Ceil):
8369 CHECK(EmitUnaryMathBuiltinCall(f, SASigCeilF));
8370 case uint16_t(Op::F32Floor):
8371 CHECK(EmitUnaryMathBuiltinCall(f, SASigFloorF));
8372 case uint16_t(Op::F32Trunc):
8373 CHECK(EmitUnaryMathBuiltinCall(f, SASigTruncF));
8374 case uint16_t(Op::F32Nearest):
8375 CHECK(EmitUnaryMathBuiltinCall(f, SASigNearbyIntF));
8376 case uint16_t(Op::F32Sqrt):
8377 CHECK(EmitUnaryWithType<MSqrt>(f, ValType::F32, MIRType::Float32));
8378 case uint16_t(Op::F32Add):
8379 CHECK(EmitAdd(f, ValType::F32, MIRType::Float32));
8380 case uint16_t(Op::F32Sub):
8381 CHECK(EmitSub(f, ValType::F32, MIRType::Float32));
8382 case uint16_t(Op::F32Mul):
8383 CHECK(EmitMul(f, ValType::F32, MIRType::Float32));
8384 case uint16_t(Op::F32Div):
8385 CHECK(EmitDiv(f, ValType::F32, MIRType::Float32,
8386 /* isUnsigned = */ false));
8387 case uint16_t(Op::F32Min):
8388 case uint16_t(Op::F32Max):
8389 CHECK(EmitMinMax(f, ValType::F32, MIRType::Float32,
8390 Op(op.b0) == Op::F32Max));
8391 case uint16_t(Op::F32CopySign):
8392 CHECK(EmitCopySign(f, ValType::F32));
8393 case uint16_t(Op::F64Abs):
8394 CHECK(EmitUnaryWithType<MAbs>(f, ValType::F64, MIRType::Double));
8395 case uint16_t(Op::F64Neg):
8396 CHECK(EmitUnaryWithType<MWasmNeg>(f, ValType::F64, MIRType::Double));
8397 case uint16_t(Op::F64Ceil):
8398 CHECK(EmitUnaryMathBuiltinCall(f, SASigCeilD));
8399 case uint16_t(Op::F64Floor):
8400 CHECK(EmitUnaryMathBuiltinCall(f, SASigFloorD));
8401 case uint16_t(Op::F64Trunc):
8402 CHECK(EmitUnaryMathBuiltinCall(f, SASigTruncD));
8403 case uint16_t(Op::F64Nearest):
8404 CHECK(EmitUnaryMathBuiltinCall(f, SASigNearbyIntD));
8405 case uint16_t(Op::F64Sqrt):
8406 CHECK(EmitUnaryWithType<MSqrt>(f, ValType::F64, MIRType::Double));
8407 case uint16_t(Op::F64Add):
8408 CHECK(EmitAdd(f, ValType::F64, MIRType::Double));
8409 case uint16_t(Op::F64Sub):
8410 CHECK(EmitSub(f, ValType::F64, MIRType::Double));
8411 case uint16_t(Op::F64Mul):
8412 CHECK(EmitMul(f, ValType::F64, MIRType::Double));
8413 case uint16_t(Op::F64Div):
8414 CHECK(EmitDiv(f, ValType::F64, MIRType::Double,
8415 /* isUnsigned = */ false));
8416 case uint16_t(Op::F64Min):
8417 case uint16_t(Op::F64Max):
8418 CHECK(EmitMinMax(f, ValType::F64, MIRType::Double,
8419 Op(op.b0) == Op::F64Max));
8420 case uint16_t(Op::F64CopySign):
8421 CHECK(EmitCopySign(f, ValType::F64));
8423 // Conversions
8424 case uint16_t(Op::I32WrapI64):
8425 CHECK(EmitConversion<MWrapInt64ToInt32>(f, ValType::I64, ValType::I32));
8426 case uint16_t(Op::I32TruncF32S):
8427 case uint16_t(Op::I32TruncF32U):
8428 CHECK(EmitTruncate(f, ValType::F32, ValType::I32,
8429 Op(op.b0) == Op::I32TruncF32U, false));
8430 case uint16_t(Op::I32TruncF64S):
8431 case uint16_t(Op::I32TruncF64U):
8432 CHECK(EmitTruncate(f, ValType::F64, ValType::I32,
8433 Op(op.b0) == Op::I32TruncF64U, false));
8434 case uint16_t(Op::I64ExtendI32S):
8435 case uint16_t(Op::I64ExtendI32U):
8436 CHECK(EmitExtendI32(f, Op(op.b0) == Op::I64ExtendI32U));
8437 case uint16_t(Op::I64TruncF32S):
8438 case uint16_t(Op::I64TruncF32U):
8439 CHECK(EmitTruncate(f, ValType::F32, ValType::I64,
8440 Op(op.b0) == Op::I64TruncF32U, false));
8441 case uint16_t(Op::I64TruncF64S):
8442 case uint16_t(Op::I64TruncF64U):
8443 CHECK(EmitTruncate(f, ValType::F64, ValType::I64,
8444 Op(op.b0) == Op::I64TruncF64U, false));
8445 case uint16_t(Op::F32ConvertI32S):
8446 CHECK(EmitConversion<MToFloat32>(f, ValType::I32, ValType::F32));
8447 case uint16_t(Op::F32ConvertI32U):
8448 CHECK(EmitConversion<MWasmUnsignedToFloat32>(f, ValType::I32,
8449 ValType::F32));
8450 case uint16_t(Op::F32ConvertI64S):
8451 case uint16_t(Op::F32ConvertI64U):
8452 CHECK(EmitConvertI64ToFloatingPoint(f, ValType::F32, MIRType::Float32,
8453 Op(op.b0) == Op::F32ConvertI64U));
8454 case uint16_t(Op::F32DemoteF64):
8455 CHECK(EmitConversion<MToFloat32>(f, ValType::F64, ValType::F32));
8456 case uint16_t(Op::F64ConvertI32S):
8457 CHECK(EmitConversion<MToDouble>(f, ValType::I32, ValType::F64));
8458 case uint16_t(Op::F64ConvertI32U):
8459 CHECK(EmitConversion<MWasmUnsignedToDouble>(f, ValType::I32,
8460 ValType::F64));
8461 case uint16_t(Op::F64ConvertI64S):
8462 case uint16_t(Op::F64ConvertI64U):
8463 CHECK(EmitConvertI64ToFloatingPoint(f, ValType::F64, MIRType::Double,
8464 Op(op.b0) == Op::F64ConvertI64U));
8465 case uint16_t(Op::F64PromoteF32):
8466 CHECK(EmitConversion<MToDouble>(f, ValType::F32, ValType::F64));
8468 // Reinterpretations
8469 case uint16_t(Op::I32ReinterpretF32):
8470 CHECK(EmitReinterpret(f, ValType::I32, ValType::F32, MIRType::Int32));
8471 case uint16_t(Op::I64ReinterpretF64):
8472 CHECK(EmitReinterpret(f, ValType::I64, ValType::F64, MIRType::Int64));
8473 case uint16_t(Op::F32ReinterpretI32):
8474 CHECK(EmitReinterpret(f, ValType::F32, ValType::I32, MIRType::Float32));
8475 case uint16_t(Op::F64ReinterpretI64):
8476 CHECK(EmitReinterpret(f, ValType::F64, ValType::I64, MIRType::Double));
8478 #ifdef ENABLE_WASM_GC
8479 case uint16_t(Op::RefEq):
8480 if (!f.moduleEnv().gcEnabled()) {
8481 return f.iter().unrecognizedOpcode(&op);
8483 CHECK(EmitComparison(f, RefType::eq(), JSOp::Eq,
8484 MCompare::Compare_WasmAnyRef));
8485 #endif
8486 case uint16_t(Op::RefFunc):
8487 CHECK(EmitRefFunc(f));
8488 case uint16_t(Op::RefNull):
8489 CHECK(EmitRefNull(f));
8490 case uint16_t(Op::RefIsNull):
8491 CHECK(EmitRefIsNull(f));
8493 // Sign extensions
8494 case uint16_t(Op::I32Extend8S):
8495 CHECK(EmitSignExtend(f, 1, 4));
8496 case uint16_t(Op::I32Extend16S):
8497 CHECK(EmitSignExtend(f, 2, 4));
8498 case uint16_t(Op::I64Extend8S):
8499 CHECK(EmitSignExtend(f, 1, 8));
8500 case uint16_t(Op::I64Extend16S):
8501 CHECK(EmitSignExtend(f, 2, 8));
8502 case uint16_t(Op::I64Extend32S):
8503 CHECK(EmitSignExtend(f, 4, 8));
8505 #ifdef ENABLE_WASM_TAIL_CALLS
8506 case uint16_t(Op::ReturnCall): {
8507 if (!f.moduleEnv().tailCallsEnabled()) {
8508 return f.iter().unrecognizedOpcode(&op);
8510 CHECK(EmitReturnCall(f));
8512 case uint16_t(Op::ReturnCallIndirect): {
8513 if (!f.moduleEnv().tailCallsEnabled()) {
8514 return f.iter().unrecognizedOpcode(&op);
8516 CHECK(EmitReturnCallIndirect(f));
8518 #endif
8520 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
8521 case uint16_t(Op::RefAsNonNull):
8522 if (!f.moduleEnv().functionReferencesEnabled()) {
8523 return f.iter().unrecognizedOpcode(&op);
8525 CHECK(EmitRefAsNonNull(f));
8526 case uint16_t(Op::BrOnNull): {
8527 if (!f.moduleEnv().functionReferencesEnabled()) {
8528 return f.iter().unrecognizedOpcode(&op);
8530 CHECK(EmitBrOnNull(f));
8532 case uint16_t(Op::BrOnNonNull): {
8533 if (!f.moduleEnv().functionReferencesEnabled()) {
8534 return f.iter().unrecognizedOpcode(&op);
8536 CHECK(EmitBrOnNonNull(f));
8538 case uint16_t(Op::CallRef): {
8539 if (!f.moduleEnv().functionReferencesEnabled()) {
8540 return f.iter().unrecognizedOpcode(&op);
8542 CHECK(EmitCallRef(f));
8544 #endif
8546 #if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_FUNCTION_REFERENCES)
8547 case uint16_t(Op::ReturnCallRef): {
8548 if (!f.moduleEnv().functionReferencesEnabled() ||
8549 !f.moduleEnv().tailCallsEnabled()) {
8550 return f.iter().unrecognizedOpcode(&op);
8552 CHECK(EmitReturnCallRef(f));
8554 #endif
8556 // Gc operations
8557 #ifdef ENABLE_WASM_GC
8558 case uint16_t(Op::GcPrefix): {
8559 if (!f.moduleEnv().gcEnabled()) {
8560 return f.iter().unrecognizedOpcode(&op);
8562 switch (op.b1) {
8563 case uint32_t(GcOp::StructNew):
8564 CHECK(EmitStructNew(f));
8565 case uint32_t(GcOp::StructNewDefault):
8566 CHECK(EmitStructNewDefault(f));
8567 case uint32_t(GcOp::StructSet):
8568 CHECK(EmitStructSet(f));
8569 case uint32_t(GcOp::StructGet):
8570 CHECK(EmitStructGet(f, FieldWideningOp::None));
8571 case uint32_t(GcOp::StructGetS):
8572 CHECK(EmitStructGet(f, FieldWideningOp::Signed));
8573 case uint32_t(GcOp::StructGetU):
8574 CHECK(EmitStructGet(f, FieldWideningOp::Unsigned));
8575 case uint32_t(GcOp::ArrayNew):
8576 CHECK(EmitArrayNew(f));
8577 case uint32_t(GcOp::ArrayNewDefault):
8578 CHECK(EmitArrayNewDefault(f));
8579 case uint32_t(GcOp::ArrayNewFixed):
8580 CHECK(EmitArrayNewFixed(f));
8581 case uint32_t(GcOp::ArrayNewData):
8582 CHECK(EmitArrayNewData(f));
8583 case uint32_t(GcOp::ArrayNewElem):
8584 CHECK(EmitArrayNewElem(f));
8585 case uint32_t(GcOp::ArrayInitData):
8586 CHECK(EmitArrayInitData(f));
8587 case uint32_t(GcOp::ArrayInitElem):
8588 CHECK(EmitArrayInitElem(f));
8589 case uint32_t(GcOp::ArraySet):
8590 CHECK(EmitArraySet(f));
8591 case uint32_t(GcOp::ArrayGet):
8592 CHECK(EmitArrayGet(f, FieldWideningOp::None));
8593 case uint32_t(GcOp::ArrayGetS):
8594 CHECK(EmitArrayGet(f, FieldWideningOp::Signed));
8595 case uint32_t(GcOp::ArrayGetU):
8596 CHECK(EmitArrayGet(f, FieldWideningOp::Unsigned));
8597 case uint32_t(GcOp::ArrayLen):
8598 CHECK(EmitArrayLen(f));
8599 case uint32_t(GcOp::ArrayCopy):
8600 CHECK(EmitArrayCopy(f));
8601 case uint32_t(GcOp::ArrayFill):
8602 CHECK(EmitArrayFill(f));
8603 case uint32_t(GcOp::RefI31):
8604 CHECK(EmitRefI31(f));
8605 case uint32_t(GcOp::I31GetS):
8606 CHECK(EmitI31Get(f, FieldWideningOp::Signed));
8607 case uint32_t(GcOp::I31GetU):
8608 CHECK(EmitI31Get(f, FieldWideningOp::Unsigned));
8609 case uint32_t(GcOp::BrOnCast):
8610 CHECK(EmitBrOnCast(f, /*onSuccess=*/true));
8611 case uint32_t(GcOp::BrOnCastFail):
8612 CHECK(EmitBrOnCast(f, /*onSuccess=*/false));
8613 case uint32_t(GcOp::RefTest):
8614 CHECK(EmitRefTest(f, /*nullable=*/false));
8615 case uint32_t(GcOp::RefTestNull):
8616 CHECK(EmitRefTest(f, /*nullable=*/true));
8617 case uint32_t(GcOp::RefCast):
8618 CHECK(EmitRefCast(f, /*nullable=*/false));
8619 case uint32_t(GcOp::RefCastNull):
8620 CHECK(EmitRefCast(f, /*nullable=*/true));
8621 case uint16_t(GcOp::AnyConvertExtern):
8622 CHECK(EmitAnyConvertExtern(f));
8623 case uint16_t(GcOp::ExternConvertAny):
8624 CHECK(EmitExternConvertAny(f));
8625 default:
8626 return f.iter().unrecognizedOpcode(&op);
8627 } // switch (op.b1)
8628 break;
8630 #endif
8632 // SIMD operations
8633 #ifdef ENABLE_WASM_SIMD
8634 case uint16_t(Op::SimdPrefix): {
8635 if (!f.moduleEnv().simdAvailable()) {
8636 return f.iter().unrecognizedOpcode(&op);
8638 switch (op.b1) {
8639 case uint32_t(SimdOp::V128Const):
8640 CHECK(EmitConstSimd128(f));
8641 case uint32_t(SimdOp::V128Load):
8642 CHECK(EmitLoad(f, ValType::V128, Scalar::Simd128));
8643 case uint32_t(SimdOp::V128Store):
8644 CHECK(EmitStore(f, ValType::V128, Scalar::Simd128));
8645 case uint32_t(SimdOp::V128And):
8646 case uint32_t(SimdOp::V128Or):
8647 case uint32_t(SimdOp::V128Xor):
8648 case uint32_t(SimdOp::I8x16AvgrU):
8649 case uint32_t(SimdOp::I16x8AvgrU):
8650 case uint32_t(SimdOp::I8x16Add):
8651 case uint32_t(SimdOp::I8x16AddSatS):
8652 case uint32_t(SimdOp::I8x16AddSatU):
8653 case uint32_t(SimdOp::I8x16MinS):
8654 case uint32_t(SimdOp::I8x16MinU):
8655 case uint32_t(SimdOp::I8x16MaxS):
8656 case uint32_t(SimdOp::I8x16MaxU):
8657 case uint32_t(SimdOp::I16x8Add):
8658 case uint32_t(SimdOp::I16x8AddSatS):
8659 case uint32_t(SimdOp::I16x8AddSatU):
8660 case uint32_t(SimdOp::I16x8Mul):
8661 case uint32_t(SimdOp::I16x8MinS):
8662 case uint32_t(SimdOp::I16x8MinU):
8663 case uint32_t(SimdOp::I16x8MaxS):
8664 case uint32_t(SimdOp::I16x8MaxU):
8665 case uint32_t(SimdOp::I32x4Add):
8666 case uint32_t(SimdOp::I32x4Mul):
8667 case uint32_t(SimdOp::I32x4MinS):
8668 case uint32_t(SimdOp::I32x4MinU):
8669 case uint32_t(SimdOp::I32x4MaxS):
8670 case uint32_t(SimdOp::I32x4MaxU):
8671 case uint32_t(SimdOp::I64x2Add):
8672 case uint32_t(SimdOp::I64x2Mul):
8673 case uint32_t(SimdOp::F32x4Add):
8674 case uint32_t(SimdOp::F32x4Mul):
8675 case uint32_t(SimdOp::F32x4Min):
8676 case uint32_t(SimdOp::F32x4Max):
8677 case uint32_t(SimdOp::F64x2Add):
8678 case uint32_t(SimdOp::F64x2Mul):
8679 case uint32_t(SimdOp::F64x2Min):
8680 case uint32_t(SimdOp::F64x2Max):
8681 case uint32_t(SimdOp::I8x16Eq):
8682 case uint32_t(SimdOp::I8x16Ne):
8683 case uint32_t(SimdOp::I16x8Eq):
8684 case uint32_t(SimdOp::I16x8Ne):
8685 case uint32_t(SimdOp::I32x4Eq):
8686 case uint32_t(SimdOp::I32x4Ne):
8687 case uint32_t(SimdOp::I64x2Eq):
8688 case uint32_t(SimdOp::I64x2Ne):
8689 case uint32_t(SimdOp::F32x4Eq):
8690 case uint32_t(SimdOp::F32x4Ne):
8691 case uint32_t(SimdOp::F64x2Eq):
8692 case uint32_t(SimdOp::F64x2Ne):
8693 case uint32_t(SimdOp::I32x4DotI16x8S):
8694 case uint32_t(SimdOp::I16x8ExtmulLowI8x16S):
8695 case uint32_t(SimdOp::I16x8ExtmulHighI8x16S):
8696 case uint32_t(SimdOp::I16x8ExtmulLowI8x16U):
8697 case uint32_t(SimdOp::I16x8ExtmulHighI8x16U):
8698 case uint32_t(SimdOp::I32x4ExtmulLowI16x8S):
8699 case uint32_t(SimdOp::I32x4ExtmulHighI16x8S):
8700 case uint32_t(SimdOp::I32x4ExtmulLowI16x8U):
8701 case uint32_t(SimdOp::I32x4ExtmulHighI16x8U):
8702 case uint32_t(SimdOp::I64x2ExtmulLowI32x4S):
8703 case uint32_t(SimdOp::I64x2ExtmulHighI32x4S):
8704 case uint32_t(SimdOp::I64x2ExtmulLowI32x4U):
8705 case uint32_t(SimdOp::I64x2ExtmulHighI32x4U):
8706 case uint32_t(SimdOp::I16x8Q15MulrSatS):
8707 CHECK(EmitBinarySimd128(f, /* commutative= */ true, SimdOp(op.b1)));
8708 case uint32_t(SimdOp::V128AndNot):
8709 case uint32_t(SimdOp::I8x16Sub):
8710 case uint32_t(SimdOp::I8x16SubSatS):
8711 case uint32_t(SimdOp::I8x16SubSatU):
8712 case uint32_t(SimdOp::I16x8Sub):
8713 case uint32_t(SimdOp::I16x8SubSatS):
8714 case uint32_t(SimdOp::I16x8SubSatU):
8715 case uint32_t(SimdOp::I32x4Sub):
8716 case uint32_t(SimdOp::I64x2Sub):
8717 case uint32_t(SimdOp::F32x4Sub):
8718 case uint32_t(SimdOp::F32x4Div):
8719 case uint32_t(SimdOp::F64x2Sub):
8720 case uint32_t(SimdOp::F64x2Div):
8721 case uint32_t(SimdOp::I8x16NarrowI16x8S):
8722 case uint32_t(SimdOp::I8x16NarrowI16x8U):
8723 case uint32_t(SimdOp::I16x8NarrowI32x4S):
8724 case uint32_t(SimdOp::I16x8NarrowI32x4U):
8725 case uint32_t(SimdOp::I8x16LtS):
8726 case uint32_t(SimdOp::I8x16LtU):
8727 case uint32_t(SimdOp::I8x16GtS):
8728 case uint32_t(SimdOp::I8x16GtU):
8729 case uint32_t(SimdOp::I8x16LeS):
8730 case uint32_t(SimdOp::I8x16LeU):
8731 case uint32_t(SimdOp::I8x16GeS):
8732 case uint32_t(SimdOp::I8x16GeU):
8733 case uint32_t(SimdOp::I16x8LtS):
8734 case uint32_t(SimdOp::I16x8LtU):
8735 case uint32_t(SimdOp::I16x8GtS):
8736 case uint32_t(SimdOp::I16x8GtU):
8737 case uint32_t(SimdOp::I16x8LeS):
8738 case uint32_t(SimdOp::I16x8LeU):
8739 case uint32_t(SimdOp::I16x8GeS):
8740 case uint32_t(SimdOp::I16x8GeU):
8741 case uint32_t(SimdOp::I32x4LtS):
8742 case uint32_t(SimdOp::I32x4LtU):
8743 case uint32_t(SimdOp::I32x4GtS):
8744 case uint32_t(SimdOp::I32x4GtU):
8745 case uint32_t(SimdOp::I32x4LeS):
8746 case uint32_t(SimdOp::I32x4LeU):
8747 case uint32_t(SimdOp::I32x4GeS):
8748 case uint32_t(SimdOp::I32x4GeU):
8749 case uint32_t(SimdOp::I64x2LtS):
8750 case uint32_t(SimdOp::I64x2GtS):
8751 case uint32_t(SimdOp::I64x2LeS):
8752 case uint32_t(SimdOp::I64x2GeS):
8753 case uint32_t(SimdOp::F32x4Lt):
8754 case uint32_t(SimdOp::F32x4Gt):
8755 case uint32_t(SimdOp::F32x4Le):
8756 case uint32_t(SimdOp::F32x4Ge):
8757 case uint32_t(SimdOp::F64x2Lt):
8758 case uint32_t(SimdOp::F64x2Gt):
8759 case uint32_t(SimdOp::F64x2Le):
8760 case uint32_t(SimdOp::F64x2Ge):
8761 case uint32_t(SimdOp::I8x16Swizzle):
8762 case uint32_t(SimdOp::F32x4PMax):
8763 case uint32_t(SimdOp::F32x4PMin):
8764 case uint32_t(SimdOp::F64x2PMax):
8765 case uint32_t(SimdOp::F64x2PMin):
8766 CHECK(
8767 EmitBinarySimd128(f, /* commutative= */ false, SimdOp(op.b1)));
8768 case uint32_t(SimdOp::I8x16Splat):
8769 case uint32_t(SimdOp::I16x8Splat):
8770 case uint32_t(SimdOp::I32x4Splat):
8771 CHECK(EmitSplatSimd128(f, ValType::I32, SimdOp(op.b1)));
8772 case uint32_t(SimdOp::I64x2Splat):
8773 CHECK(EmitSplatSimd128(f, ValType::I64, SimdOp(op.b1)));
8774 case uint32_t(SimdOp::F32x4Splat):
8775 CHECK(EmitSplatSimd128(f, ValType::F32, SimdOp(op.b1)));
8776 case uint32_t(SimdOp::F64x2Splat):
8777 CHECK(EmitSplatSimd128(f, ValType::F64, SimdOp(op.b1)));
8778 case uint32_t(SimdOp::I8x16Neg):
8779 case uint32_t(SimdOp::I16x8Neg):
8780 case uint32_t(SimdOp::I16x8ExtendLowI8x16S):
8781 case uint32_t(SimdOp::I16x8ExtendHighI8x16S):
8782 case uint32_t(SimdOp::I16x8ExtendLowI8x16U):
8783 case uint32_t(SimdOp::I16x8ExtendHighI8x16U):
8784 case uint32_t(SimdOp::I32x4Neg):
8785 case uint32_t(SimdOp::I32x4ExtendLowI16x8S):
8786 case uint32_t(SimdOp::I32x4ExtendHighI16x8S):
8787 case uint32_t(SimdOp::I32x4ExtendLowI16x8U):
8788 case uint32_t(SimdOp::I32x4ExtendHighI16x8U):
8789 case uint32_t(SimdOp::I32x4TruncSatF32x4S):
8790 case uint32_t(SimdOp::I32x4TruncSatF32x4U):
8791 case uint32_t(SimdOp::I64x2Neg):
8792 case uint32_t(SimdOp::I64x2ExtendLowI32x4S):
8793 case uint32_t(SimdOp::I64x2ExtendHighI32x4S):
8794 case uint32_t(SimdOp::I64x2ExtendLowI32x4U):
8795 case uint32_t(SimdOp::I64x2ExtendHighI32x4U):
8796 case uint32_t(SimdOp::F32x4Abs):
8797 case uint32_t(SimdOp::F32x4Neg):
8798 case uint32_t(SimdOp::F32x4Sqrt):
8799 case uint32_t(SimdOp::F32x4ConvertI32x4S):
8800 case uint32_t(SimdOp::F32x4ConvertI32x4U):
8801 case uint32_t(SimdOp::F64x2Abs):
8802 case uint32_t(SimdOp::F64x2Neg):
8803 case uint32_t(SimdOp::F64x2Sqrt):
8804 case uint32_t(SimdOp::V128Not):
8805 case uint32_t(SimdOp::I8x16Popcnt):
8806 case uint32_t(SimdOp::I8x16Abs):
8807 case uint32_t(SimdOp::I16x8Abs):
8808 case uint32_t(SimdOp::I32x4Abs):
8809 case uint32_t(SimdOp::I64x2Abs):
8810 case uint32_t(SimdOp::F32x4Ceil):
8811 case uint32_t(SimdOp::F32x4Floor):
8812 case uint32_t(SimdOp::F32x4Trunc):
8813 case uint32_t(SimdOp::F32x4Nearest):
8814 case uint32_t(SimdOp::F64x2Ceil):
8815 case uint32_t(SimdOp::F64x2Floor):
8816 case uint32_t(SimdOp::F64x2Trunc):
8817 case uint32_t(SimdOp::F64x2Nearest):
8818 case uint32_t(SimdOp::F32x4DemoteF64x2Zero):
8819 case uint32_t(SimdOp::F64x2PromoteLowF32x4):
8820 case uint32_t(SimdOp::F64x2ConvertLowI32x4S):
8821 case uint32_t(SimdOp::F64x2ConvertLowI32x4U):
8822 case uint32_t(SimdOp::I32x4TruncSatF64x2SZero):
8823 case uint32_t(SimdOp::I32x4TruncSatF64x2UZero):
8824 case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16S):
8825 case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16U):
8826 case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8S):
8827 case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8U):
8828 CHECK(EmitUnarySimd128(f, SimdOp(op.b1)));
8829 case uint32_t(SimdOp::V128AnyTrue):
8830 case uint32_t(SimdOp::I8x16AllTrue):
8831 case uint32_t(SimdOp::I16x8AllTrue):
8832 case uint32_t(SimdOp::I32x4AllTrue):
8833 case uint32_t(SimdOp::I64x2AllTrue):
8834 case uint32_t(SimdOp::I8x16Bitmask):
8835 case uint32_t(SimdOp::I16x8Bitmask):
8836 case uint32_t(SimdOp::I32x4Bitmask):
8837 case uint32_t(SimdOp::I64x2Bitmask):
8838 CHECK(EmitReduceSimd128(f, SimdOp(op.b1)));
8839 case uint32_t(SimdOp::I8x16Shl):
8840 case uint32_t(SimdOp::I8x16ShrS):
8841 case uint32_t(SimdOp::I8x16ShrU):
8842 case uint32_t(SimdOp::I16x8Shl):
8843 case uint32_t(SimdOp::I16x8ShrS):
8844 case uint32_t(SimdOp::I16x8ShrU):
8845 case uint32_t(SimdOp::I32x4Shl):
8846 case uint32_t(SimdOp::I32x4ShrS):
8847 case uint32_t(SimdOp::I32x4ShrU):
8848 case uint32_t(SimdOp::I64x2Shl):
8849 case uint32_t(SimdOp::I64x2ShrS):
8850 case uint32_t(SimdOp::I64x2ShrU):
8851 CHECK(EmitShiftSimd128(f, SimdOp(op.b1)));
8852 case uint32_t(SimdOp::I8x16ExtractLaneS):
8853 case uint32_t(SimdOp::I8x16ExtractLaneU):
8854 CHECK(EmitExtractLaneSimd128(f, ValType::I32, 16, SimdOp(op.b1)));
8855 case uint32_t(SimdOp::I16x8ExtractLaneS):
8856 case uint32_t(SimdOp::I16x8ExtractLaneU):
8857 CHECK(EmitExtractLaneSimd128(f, ValType::I32, 8, SimdOp(op.b1)));
8858 case uint32_t(SimdOp::I32x4ExtractLane):
8859 CHECK(EmitExtractLaneSimd128(f, ValType::I32, 4, SimdOp(op.b1)));
8860 case uint32_t(SimdOp::I64x2ExtractLane):
8861 CHECK(EmitExtractLaneSimd128(f, ValType::I64, 2, SimdOp(op.b1)));
8862 case uint32_t(SimdOp::F32x4ExtractLane):
8863 CHECK(EmitExtractLaneSimd128(f, ValType::F32, 4, SimdOp(op.b1)));
8864 case uint32_t(SimdOp::F64x2ExtractLane):
8865 CHECK(EmitExtractLaneSimd128(f, ValType::F64, 2, SimdOp(op.b1)));
8866 case uint32_t(SimdOp::I8x16ReplaceLane):
8867 CHECK(EmitReplaceLaneSimd128(f, ValType::I32, 16, SimdOp(op.b1)));
8868 case uint32_t(SimdOp::I16x8ReplaceLane):
8869 CHECK(EmitReplaceLaneSimd128(f, ValType::I32, 8, SimdOp(op.b1)));
8870 case uint32_t(SimdOp::I32x4ReplaceLane):
8871 CHECK(EmitReplaceLaneSimd128(f, ValType::I32, 4, SimdOp(op.b1)));
8872 case uint32_t(SimdOp::I64x2ReplaceLane):
8873 CHECK(EmitReplaceLaneSimd128(f, ValType::I64, 2, SimdOp(op.b1)));
8874 case uint32_t(SimdOp::F32x4ReplaceLane):
8875 CHECK(EmitReplaceLaneSimd128(f, ValType::F32, 4, SimdOp(op.b1)));
8876 case uint32_t(SimdOp::F64x2ReplaceLane):
8877 CHECK(EmitReplaceLaneSimd128(f, ValType::F64, 2, SimdOp(op.b1)));
8878 case uint32_t(SimdOp::V128Bitselect):
8879 CHECK(EmitTernarySimd128(f, SimdOp(op.b1)));
8880 case uint32_t(SimdOp::I8x16Shuffle):
8881 CHECK(EmitShuffleSimd128(f));
8882 case uint32_t(SimdOp::V128Load8Splat):
8883 CHECK(EmitLoadSplatSimd128(f, Scalar::Uint8, SimdOp::I8x16Splat));
8884 case uint32_t(SimdOp::V128Load16Splat):
8885 CHECK(EmitLoadSplatSimd128(f, Scalar::Uint16, SimdOp::I16x8Splat));
8886 case uint32_t(SimdOp::V128Load32Splat):
8887 CHECK(EmitLoadSplatSimd128(f, Scalar::Float32, SimdOp::I32x4Splat));
8888 case uint32_t(SimdOp::V128Load64Splat):
8889 CHECK(EmitLoadSplatSimd128(f, Scalar::Float64, SimdOp::I64x2Splat));
8890 case uint32_t(SimdOp::V128Load8x8S):
8891 case uint32_t(SimdOp::V128Load8x8U):
8892 case uint32_t(SimdOp::V128Load16x4S):
8893 case uint32_t(SimdOp::V128Load16x4U):
8894 case uint32_t(SimdOp::V128Load32x2S):
8895 case uint32_t(SimdOp::V128Load32x2U):
8896 CHECK(EmitLoadExtendSimd128(f, SimdOp(op.b1)));
8897 case uint32_t(SimdOp::V128Load32Zero):
8898 CHECK(EmitLoadZeroSimd128(f, Scalar::Float32, 4));
8899 case uint32_t(SimdOp::V128Load64Zero):
8900 CHECK(EmitLoadZeroSimd128(f, Scalar::Float64, 8));
8901 case uint32_t(SimdOp::V128Load8Lane):
8902 CHECK(EmitLoadLaneSimd128(f, 1));
8903 case uint32_t(SimdOp::V128Load16Lane):
8904 CHECK(EmitLoadLaneSimd128(f, 2));
8905 case uint32_t(SimdOp::V128Load32Lane):
8906 CHECK(EmitLoadLaneSimd128(f, 4));
8907 case uint32_t(SimdOp::V128Load64Lane):
8908 CHECK(EmitLoadLaneSimd128(f, 8));
8909 case uint32_t(SimdOp::V128Store8Lane):
8910 CHECK(EmitStoreLaneSimd128(f, 1));
8911 case uint32_t(SimdOp::V128Store16Lane):
8912 CHECK(EmitStoreLaneSimd128(f, 2));
8913 case uint32_t(SimdOp::V128Store32Lane):
8914 CHECK(EmitStoreLaneSimd128(f, 4));
8915 case uint32_t(SimdOp::V128Store64Lane):
8916 CHECK(EmitStoreLaneSimd128(f, 8));
8917 # ifdef ENABLE_WASM_RELAXED_SIMD
8918 case uint32_t(SimdOp::F32x4RelaxedMadd):
8919 case uint32_t(SimdOp::F32x4RelaxedNmadd):
8920 case uint32_t(SimdOp::F64x2RelaxedMadd):
8921 case uint32_t(SimdOp::F64x2RelaxedNmadd):
8922 case uint32_t(SimdOp::I8x16RelaxedLaneSelect):
8923 case uint32_t(SimdOp::I16x8RelaxedLaneSelect):
8924 case uint32_t(SimdOp::I32x4RelaxedLaneSelect):
8925 case uint32_t(SimdOp::I64x2RelaxedLaneSelect):
8926 case uint32_t(SimdOp::I32x4DotI8x16I7x16AddS): {
8927 if (!f.moduleEnv().v128RelaxedEnabled()) {
8928 return f.iter().unrecognizedOpcode(&op);
8930 CHECK(EmitTernarySimd128(f, SimdOp(op.b1)));
8932 case uint32_t(SimdOp::F32x4RelaxedMin):
8933 case uint32_t(SimdOp::F32x4RelaxedMax):
8934 case uint32_t(SimdOp::F64x2RelaxedMin):
8935 case uint32_t(SimdOp::F64x2RelaxedMax):
8936 case uint32_t(SimdOp::I16x8RelaxedQ15MulrS): {
8937 if (!f.moduleEnv().v128RelaxedEnabled()) {
8938 return f.iter().unrecognizedOpcode(&op);
8940 CHECK(EmitBinarySimd128(f, /* commutative= */ true, SimdOp(op.b1)));
8942 case uint32_t(SimdOp::I32x4RelaxedTruncF32x4S):
8943 case uint32_t(SimdOp::I32x4RelaxedTruncF32x4U):
8944 case uint32_t(SimdOp::I32x4RelaxedTruncF64x2SZero):
8945 case uint32_t(SimdOp::I32x4RelaxedTruncF64x2UZero): {
8946 if (!f.moduleEnv().v128RelaxedEnabled()) {
8947 return f.iter().unrecognizedOpcode(&op);
8949 CHECK(EmitUnarySimd128(f, SimdOp(op.b1)));
8951 case uint32_t(SimdOp::I8x16RelaxedSwizzle):
8952 case uint32_t(SimdOp::I16x8DotI8x16I7x16S): {
8953 if (!f.moduleEnv().v128RelaxedEnabled()) {
8954 return f.iter().unrecognizedOpcode(&op);
8956 CHECK(
8957 EmitBinarySimd128(f, /* commutative= */ false, SimdOp(op.b1)));
8959 # endif
8961 default:
8962 return f.iter().unrecognizedOpcode(&op);
8963 } // switch (op.b1)
8964 break;
8966 #endif
8968 // Miscellaneous operations
8969 case uint16_t(Op::MiscPrefix): {
8970 switch (op.b1) {
8971 case uint32_t(MiscOp::I32TruncSatF32S):
8972 case uint32_t(MiscOp::I32TruncSatF32U):
8973 CHECK(EmitTruncate(f, ValType::F32, ValType::I32,
8974 MiscOp(op.b1) == MiscOp::I32TruncSatF32U, true));
8975 case uint32_t(MiscOp::I32TruncSatF64S):
8976 case uint32_t(MiscOp::I32TruncSatF64U):
8977 CHECK(EmitTruncate(f, ValType::F64, ValType::I32,
8978 MiscOp(op.b1) == MiscOp::I32TruncSatF64U, true));
8979 case uint32_t(MiscOp::I64TruncSatF32S):
8980 case uint32_t(MiscOp::I64TruncSatF32U):
8981 CHECK(EmitTruncate(f, ValType::F32, ValType::I64,
8982 MiscOp(op.b1) == MiscOp::I64TruncSatF32U, true));
8983 case uint32_t(MiscOp::I64TruncSatF64S):
8984 case uint32_t(MiscOp::I64TruncSatF64U):
8985 CHECK(EmitTruncate(f, ValType::F64, ValType::I64,
8986 MiscOp(op.b1) == MiscOp::I64TruncSatF64U, true));
8987 case uint32_t(MiscOp::MemoryCopy):
8988 CHECK(EmitMemCopy(f));
8989 case uint32_t(MiscOp::DataDrop):
8990 CHECK(EmitDataOrElemDrop(f, /*isData=*/true));
8991 case uint32_t(MiscOp::MemoryFill):
8992 CHECK(EmitMemFill(f));
8993 case uint32_t(MiscOp::MemoryInit):
8994 CHECK(EmitMemOrTableInit(f, /*isMem=*/true));
8995 case uint32_t(MiscOp::TableCopy):
8996 CHECK(EmitTableCopy(f));
8997 case uint32_t(MiscOp::ElemDrop):
8998 CHECK(EmitDataOrElemDrop(f, /*isData=*/false));
8999 case uint32_t(MiscOp::TableInit):
9000 CHECK(EmitMemOrTableInit(f, /*isMem=*/false));
9001 case uint32_t(MiscOp::TableFill):
9002 CHECK(EmitTableFill(f));
9003 #if ENABLE_WASM_MEMORY_CONTROL
9004 case uint32_t(MiscOp::MemoryDiscard): {
9005 if (!f.moduleEnv().memoryControlEnabled()) {
9006 return f.iter().unrecognizedOpcode(&op);
9008 CHECK(EmitMemDiscard(f));
9010 #endif
9011 case uint32_t(MiscOp::TableGrow):
9012 CHECK(EmitTableGrow(f));
9013 case uint32_t(MiscOp::TableSize):
9014 CHECK(EmitTableSize(f));
9015 default:
9016 return f.iter().unrecognizedOpcode(&op);
9018 break;
9021 // Thread operations
9022 case uint16_t(Op::ThreadPrefix): {
9023 // Though thread ops can be used on nonshared memories, we make them
9024 // unavailable if shared memory has been disabled in the prefs, for
9025 // maximum predictability and safety and consistency with JS.
9026 if (f.moduleEnv().sharedMemoryEnabled() == Shareable::False) {
9027 return f.iter().unrecognizedOpcode(&op);
9029 switch (op.b1) {
9030 case uint32_t(ThreadOp::Wake):
9031 CHECK(EmitWake(f));
9033 case uint32_t(ThreadOp::I32Wait):
9034 CHECK(EmitWait(f, ValType::I32, 4));
9035 case uint32_t(ThreadOp::I64Wait):
9036 CHECK(EmitWait(f, ValType::I64, 8));
9037 case uint32_t(ThreadOp::Fence):
9038 CHECK(EmitFence(f));
9040 case uint32_t(ThreadOp::I32AtomicLoad):
9041 CHECK(EmitAtomicLoad(f, ValType::I32, Scalar::Int32));
9042 case uint32_t(ThreadOp::I64AtomicLoad):
9043 CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Int64));
9044 case uint32_t(ThreadOp::I32AtomicLoad8U):
9045 CHECK(EmitAtomicLoad(f, ValType::I32, Scalar::Uint8));
9046 case uint32_t(ThreadOp::I32AtomicLoad16U):
9047 CHECK(EmitAtomicLoad(f, ValType::I32, Scalar::Uint16));
9048 case uint32_t(ThreadOp::I64AtomicLoad8U):
9049 CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Uint8));
9050 case uint32_t(ThreadOp::I64AtomicLoad16U):
9051 CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Uint16));
9052 case uint32_t(ThreadOp::I64AtomicLoad32U):
9053 CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Uint32));
9055 case uint32_t(ThreadOp::I32AtomicStore):
9056 CHECK(EmitAtomicStore(f, ValType::I32, Scalar::Int32));
9057 case uint32_t(ThreadOp::I64AtomicStore):
9058 CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Int64));
9059 case uint32_t(ThreadOp::I32AtomicStore8U):
9060 CHECK(EmitAtomicStore(f, ValType::I32, Scalar::Uint8));
9061 case uint32_t(ThreadOp::I32AtomicStore16U):
9062 CHECK(EmitAtomicStore(f, ValType::I32, Scalar::Uint16));
9063 case uint32_t(ThreadOp::I64AtomicStore8U):
9064 CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Uint8));
9065 case uint32_t(ThreadOp::I64AtomicStore16U):
9066 CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Uint16));
9067 case uint32_t(ThreadOp::I64AtomicStore32U):
9068 CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Uint32));
9070 case uint32_t(ThreadOp::I32AtomicAdd):
9071 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
9072 AtomicFetchAddOp));
9073 case uint32_t(ThreadOp::I64AtomicAdd):
9074 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
9075 AtomicFetchAddOp));
9076 case uint32_t(ThreadOp::I32AtomicAdd8U):
9077 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
9078 AtomicFetchAddOp));
9079 case uint32_t(ThreadOp::I32AtomicAdd16U):
9080 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
9081 AtomicFetchAddOp));
9082 case uint32_t(ThreadOp::I64AtomicAdd8U):
9083 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
9084 AtomicFetchAddOp));
9085 case uint32_t(ThreadOp::I64AtomicAdd16U):
9086 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
9087 AtomicFetchAddOp));
9088 case uint32_t(ThreadOp::I64AtomicAdd32U):
9089 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
9090 AtomicFetchAddOp));
9092 case uint32_t(ThreadOp::I32AtomicSub):
9093 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
9094 AtomicFetchSubOp));
9095 case uint32_t(ThreadOp::I64AtomicSub):
9096 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
9097 AtomicFetchSubOp));
9098 case uint32_t(ThreadOp::I32AtomicSub8U):
9099 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
9100 AtomicFetchSubOp));
9101 case uint32_t(ThreadOp::I32AtomicSub16U):
9102 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
9103 AtomicFetchSubOp));
9104 case uint32_t(ThreadOp::I64AtomicSub8U):
9105 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
9106 AtomicFetchSubOp));
9107 case uint32_t(ThreadOp::I64AtomicSub16U):
9108 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
9109 AtomicFetchSubOp));
9110 case uint32_t(ThreadOp::I64AtomicSub32U):
9111 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
9112 AtomicFetchSubOp));
9114 case uint32_t(ThreadOp::I32AtomicAnd):
9115 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
9116 AtomicFetchAndOp));
9117 case uint32_t(ThreadOp::I64AtomicAnd):
9118 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
9119 AtomicFetchAndOp));
9120 case uint32_t(ThreadOp::I32AtomicAnd8U):
9121 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
9122 AtomicFetchAndOp));
9123 case uint32_t(ThreadOp::I32AtomicAnd16U):
9124 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
9125 AtomicFetchAndOp));
9126 case uint32_t(ThreadOp::I64AtomicAnd8U):
9127 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
9128 AtomicFetchAndOp));
9129 case uint32_t(ThreadOp::I64AtomicAnd16U):
9130 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
9131 AtomicFetchAndOp));
9132 case uint32_t(ThreadOp::I64AtomicAnd32U):
9133 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
9134 AtomicFetchAndOp));
9136 case uint32_t(ThreadOp::I32AtomicOr):
9137 CHECK(
9138 EmitAtomicRMW(f, ValType::I32, Scalar::Int32, AtomicFetchOrOp));
9139 case uint32_t(ThreadOp::I64AtomicOr):
9140 CHECK(
9141 EmitAtomicRMW(f, ValType::I64, Scalar::Int64, AtomicFetchOrOp));
9142 case uint32_t(ThreadOp::I32AtomicOr8U):
9143 CHECK(
9144 EmitAtomicRMW(f, ValType::I32, Scalar::Uint8, AtomicFetchOrOp));
9145 case uint32_t(ThreadOp::I32AtomicOr16U):
9146 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
9147 AtomicFetchOrOp));
9148 case uint32_t(ThreadOp::I64AtomicOr8U):
9149 CHECK(
9150 EmitAtomicRMW(f, ValType::I64, Scalar::Uint8, AtomicFetchOrOp));
9151 case uint32_t(ThreadOp::I64AtomicOr16U):
9152 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
9153 AtomicFetchOrOp));
9154 case uint32_t(ThreadOp::I64AtomicOr32U):
9155 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
9156 AtomicFetchOrOp));
9158 case uint32_t(ThreadOp::I32AtomicXor):
9159 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
9160 AtomicFetchXorOp));
9161 case uint32_t(ThreadOp::I64AtomicXor):
9162 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
9163 AtomicFetchXorOp));
9164 case uint32_t(ThreadOp::I32AtomicXor8U):
9165 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
9166 AtomicFetchXorOp));
9167 case uint32_t(ThreadOp::I32AtomicXor16U):
9168 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
9169 AtomicFetchXorOp));
9170 case uint32_t(ThreadOp::I64AtomicXor8U):
9171 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
9172 AtomicFetchXorOp));
9173 case uint32_t(ThreadOp::I64AtomicXor16U):
9174 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
9175 AtomicFetchXorOp));
9176 case uint32_t(ThreadOp::I64AtomicXor32U):
9177 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
9178 AtomicFetchXorOp));
9180 case uint32_t(ThreadOp::I32AtomicXchg):
9181 CHECK(EmitAtomicXchg(f, ValType::I32, Scalar::Int32));
9182 case uint32_t(ThreadOp::I64AtomicXchg):
9183 CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Int64));
9184 case uint32_t(ThreadOp::I32AtomicXchg8U):
9185 CHECK(EmitAtomicXchg(f, ValType::I32, Scalar::Uint8));
9186 case uint32_t(ThreadOp::I32AtomicXchg16U):
9187 CHECK(EmitAtomicXchg(f, ValType::I32, Scalar::Uint16));
9188 case uint32_t(ThreadOp::I64AtomicXchg8U):
9189 CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Uint8));
9190 case uint32_t(ThreadOp::I64AtomicXchg16U):
9191 CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Uint16));
9192 case uint32_t(ThreadOp::I64AtomicXchg32U):
9193 CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Uint32));
9195 case uint32_t(ThreadOp::I32AtomicCmpXchg):
9196 CHECK(EmitAtomicCmpXchg(f, ValType::I32, Scalar::Int32));
9197 case uint32_t(ThreadOp::I64AtomicCmpXchg):
9198 CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Int64));
9199 case uint32_t(ThreadOp::I32AtomicCmpXchg8U):
9200 CHECK(EmitAtomicCmpXchg(f, ValType::I32, Scalar::Uint8));
9201 case uint32_t(ThreadOp::I32AtomicCmpXchg16U):
9202 CHECK(EmitAtomicCmpXchg(f, ValType::I32, Scalar::Uint16));
9203 case uint32_t(ThreadOp::I64AtomicCmpXchg8U):
9204 CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Uint8));
9205 case uint32_t(ThreadOp::I64AtomicCmpXchg16U):
9206 CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Uint16));
9207 case uint32_t(ThreadOp::I64AtomicCmpXchg32U):
9208 CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Uint32));
9210 default:
9211 return f.iter().unrecognizedOpcode(&op);
9213 break;
9216 // asm.js-specific operators
9217 case uint16_t(Op::MozPrefix): {
9218 if (op.b1 == uint32_t(MozOp::CallBuiltinModuleFunc)) {
9219 if (!f.moduleEnv().isBuiltinModule()) {
9220 return f.iter().unrecognizedOpcode(&op);
9222 CHECK(EmitCallBuiltinModuleFunc(f));
9225 if (!f.moduleEnv().isAsmJS()) {
9226 return f.iter().unrecognizedOpcode(&op);
9228 switch (op.b1) {
9229 case uint32_t(MozOp::TeeGlobal):
9230 CHECK(EmitTeeGlobal(f));
9231 case uint32_t(MozOp::I32Min):
9232 case uint32_t(MozOp::I32Max):
9233 CHECK(EmitMinMax(f, ValType::I32, MIRType::Int32,
9234 MozOp(op.b1) == MozOp::I32Max));
9235 case uint32_t(MozOp::I32Neg):
9236 CHECK(EmitUnaryWithType<MWasmNeg>(f, ValType::I32, MIRType::Int32));
9237 case uint32_t(MozOp::I32BitNot):
9238 CHECK(EmitBitNot(f, ValType::I32));
9239 case uint32_t(MozOp::I32Abs):
9240 CHECK(EmitUnaryWithType<MAbs>(f, ValType::I32, MIRType::Int32));
9241 case uint32_t(MozOp::F32TeeStoreF64):
9242 CHECK(EmitTeeStoreWithCoercion(f, ValType::F32, Scalar::Float64));
9243 case uint32_t(MozOp::F64TeeStoreF32):
9244 CHECK(EmitTeeStoreWithCoercion(f, ValType::F64, Scalar::Float32));
9245 case uint32_t(MozOp::I32TeeStore8):
9246 CHECK(EmitTeeStore(f, ValType::I32, Scalar::Int8));
9247 case uint32_t(MozOp::I32TeeStore16):
9248 CHECK(EmitTeeStore(f, ValType::I32, Scalar::Int16));
9249 case uint32_t(MozOp::I64TeeStore8):
9250 CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int8));
9251 case uint32_t(MozOp::I64TeeStore16):
9252 CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int16));
9253 case uint32_t(MozOp::I64TeeStore32):
9254 CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int32));
9255 case uint32_t(MozOp::I32TeeStore):
9256 CHECK(EmitTeeStore(f, ValType::I32, Scalar::Int32));
9257 case uint32_t(MozOp::I64TeeStore):
9258 CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int64));
9259 case uint32_t(MozOp::F32TeeStore):
9260 CHECK(EmitTeeStore(f, ValType::F32, Scalar::Float32));
9261 case uint32_t(MozOp::F64TeeStore):
9262 CHECK(EmitTeeStore(f, ValType::F64, Scalar::Float64));
9263 case uint32_t(MozOp::F64Mod):
9264 CHECK(EmitRem(f, ValType::F64, MIRType::Double,
9265 /* isUnsigned = */ false));
9266 case uint32_t(MozOp::F64SinNative):
9267 CHECK(EmitUnaryMathBuiltinCall(f, SASigSinNativeD));
9268 case uint32_t(MozOp::F64SinFdlibm):
9269 CHECK(EmitUnaryMathBuiltinCall(f, SASigSinFdlibmD));
9270 case uint32_t(MozOp::F64CosNative):
9271 CHECK(EmitUnaryMathBuiltinCall(f, SASigCosNativeD));
9272 case uint32_t(MozOp::F64CosFdlibm):
9273 CHECK(EmitUnaryMathBuiltinCall(f, SASigCosFdlibmD));
9274 case uint32_t(MozOp::F64TanNative):
9275 CHECK(EmitUnaryMathBuiltinCall(f, SASigTanNativeD));
9276 case uint32_t(MozOp::F64TanFdlibm):
9277 CHECK(EmitUnaryMathBuiltinCall(f, SASigTanFdlibmD));
9278 case uint32_t(MozOp::F64Asin):
9279 CHECK(EmitUnaryMathBuiltinCall(f, SASigASinD));
9280 case uint32_t(MozOp::F64Acos):
9281 CHECK(EmitUnaryMathBuiltinCall(f, SASigACosD));
9282 case uint32_t(MozOp::F64Atan):
9283 CHECK(EmitUnaryMathBuiltinCall(f, SASigATanD));
9284 case uint32_t(MozOp::F64Exp):
9285 CHECK(EmitUnaryMathBuiltinCall(f, SASigExpD));
9286 case uint32_t(MozOp::F64Log):
9287 CHECK(EmitUnaryMathBuiltinCall(f, SASigLogD));
9288 case uint32_t(MozOp::F64Pow):
9289 CHECK(EmitBinaryMathBuiltinCall(f, SASigPowD));
9290 case uint32_t(MozOp::F64Atan2):
9291 CHECK(EmitBinaryMathBuiltinCall(f, SASigATan2D));
9292 case uint32_t(MozOp::OldCallDirect):
9293 CHECK(EmitCall(f, /* asmJSFuncDef = */ true));
9294 case uint32_t(MozOp::OldCallIndirect):
9295 CHECK(EmitCallIndirect(f, /* oldStyle = */ true));
9297 default:
9298 return f.iter().unrecognizedOpcode(&op);
9300 break;
9303 default:
9304 return f.iter().unrecognizedOpcode(&op);
9308 MOZ_CRASH("unreachable");
9310 #undef CHECK
9313 bool wasm::IonCompileFunctions(const ModuleEnvironment& moduleEnv,
9314 const CompilerEnvironment& compilerEnv,
9315 LifoAlloc& lifo,
9316 const FuncCompileInputVector& inputs,
9317 CompiledCode* code, UniqueChars* error) {
9318 MOZ_ASSERT(compilerEnv.tier() == Tier::Optimized);
9319 MOZ_ASSERT(compilerEnv.debug() == DebugEnabled::False);
9321 TempAllocator alloc(&lifo);
9322 JitContext jitContext;
9323 MOZ_ASSERT(IsCompilingWasm());
9324 WasmMacroAssembler masm(alloc, moduleEnv);
9325 #if defined(JS_CODEGEN_ARM64)
9326 masm.SetStackPointer64(PseudoStackPointer64);
9327 #endif
9329 // Swap in already-allocated empty vectors to avoid malloc/free.
9330 MOZ_ASSERT(code->empty());
9331 if (!code->swap(masm)) {
9332 return false;
9335 // Create a description of the stack layout created by GenerateTrapExit().
9336 RegisterOffsets trapExitLayout;
9337 size_t trapExitLayoutNumWords;
9338 GenerateTrapExitRegisterOffsets(&trapExitLayout, &trapExitLayoutNumWords);
9340 for (const FuncCompileInput& func : inputs) {
9341 JitSpewCont(JitSpew_Codegen, "\n");
9342 JitSpew(JitSpew_Codegen,
9343 "# ================================"
9344 "==================================");
9345 JitSpew(JitSpew_Codegen, "# ==");
9346 JitSpew(JitSpew_Codegen,
9347 "# wasm::IonCompileFunctions: starting on function index %d",
9348 (int)func.index);
9350 Decoder d(func.begin, func.end, func.lineOrBytecode, error);
9352 // Build the local types vector.
9354 const FuncType& funcType = *moduleEnv.funcs[func.index].type;
9355 ValTypeVector locals;
9356 if (!locals.appendAll(funcType.args())) {
9357 return false;
9359 if (!DecodeLocalEntries(d, *moduleEnv.types, moduleEnv.features, &locals)) {
9360 return false;
9363 // Set up for Ion compilation.
9365 const JitCompileOptions options;
9366 MIRGraph graph(&alloc);
9367 CompileInfo compileInfo(locals.length());
9368 MIRGenerator mir(nullptr, options, &alloc, &graph, &compileInfo,
9369 IonOptimizations.get(OptimizationLevel::Wasm));
9370 if (moduleEnv.numMemories() > 0) {
9371 if (moduleEnv.memories[0].indexType() == IndexType::I32) {
9372 mir.initMinWasmMemory0Length(moduleEnv.memories[0].initialLength32());
9373 } else {
9374 mir.initMinWasmMemory0Length(moduleEnv.memories[0].initialLength64());
9378 // Build MIR graph
9380 FunctionCompiler f(moduleEnv, d, func, locals, mir, masm.tryNotes());
9381 if (!f.init()) {
9382 return false;
9385 if (!f.startBlock()) {
9386 return false;
9389 if (!EmitBodyExprs(f)) {
9390 return false;
9393 f.finish();
9396 // Compile MIR graph
9398 jit::SpewBeginWasmFunction(&mir, func.index);
9399 jit::AutoSpewEndFunction spewEndFunction(&mir);
9401 if (!OptimizeMIR(&mir)) {
9402 return false;
9405 LIRGraph* lir = GenerateLIR(&mir);
9406 if (!lir) {
9407 return false;
9410 size_t unwindInfoBefore = masm.codeRangeUnwindInfos().length();
9412 CodeGenerator codegen(&mir, lir, &masm);
9414 BytecodeOffset prologueTrapOffset(func.lineOrBytecode);
9415 FuncOffsets offsets;
9416 ArgTypeVector args(funcType);
9417 if (!codegen.generateWasm(CallIndirectId::forFunc(moduleEnv, func.index),
9418 prologueTrapOffset, args, trapExitLayout,
9419 trapExitLayoutNumWords, &offsets,
9420 &code->stackMaps, &d)) {
9421 return false;
9424 bool hasUnwindInfo =
9425 unwindInfoBefore != masm.codeRangeUnwindInfos().length();
9426 if (!code->codeRanges.emplaceBack(func.index, func.lineOrBytecode,
9427 offsets, hasUnwindInfo)) {
9428 return false;
9432 JitSpew(JitSpew_Codegen,
9433 "# wasm::IonCompileFunctions: completed function index %d",
9434 (int)func.index);
9435 JitSpew(JitSpew_Codegen, "# ==");
9436 JitSpew(JitSpew_Codegen,
9437 "# ================================"
9438 "==================================");
9439 JitSpewCont(JitSpew_Codegen, "\n");
9442 masm.finish();
9443 if (masm.oom()) {
9444 return false;
9447 return code->swap(masm);
9450 bool js::wasm::IonPlatformSupport() {
9451 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \
9452 defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS64) || \
9453 defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_LOONG64) || \
9454 defined(JS_CODEGEN_RISCV64)
9455 return true;
9456 #else
9457 return false;
9458 #endif