Backed out changeset d53c38086d1b (bug 1853454) for causing spidermonkey build bustag...
[gecko.git] / js / src / wasm / WasmBCClass.h
bloba92440a97e3726f29b1079026a1165ab98cd9502
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
4 * Copyright 2016 Mozilla Foundation
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 // This is an INTERNAL header for Wasm baseline compiler: the compiler object
20 // and its supporting types.
22 #ifndef wasm_wasm_baseline_object_h
23 #define wasm_wasm_baseline_object_h
25 #include "wasm/WasmBCDefs.h"
26 #include "wasm/WasmBCFrame.h"
27 #include "wasm/WasmBCRegDefs.h"
28 #include "wasm/WasmBCStk.h"
29 #include "wasm/WasmConstants.h"
31 namespace js {
32 namespace wasm {
34 // Container for a piece of out-of-line code, the slow path that supports an
35 // operation.
36 class OutOfLineCode;
38 // Part of the inter-bytecode state for the boolean-evaluation-for-control
39 // optimization.
40 struct BranchState;
42 // Representation of wasm local variables.
43 using Local = BaseStackFrame::Local;
45 // Bitset used for simple bounds check elimination. Capping this at 64 locals
46 // makes sense; even 32 locals would probably be OK in practice.
48 // For more information about BCE, see the block comment in WasmBCMemory.cpp.
49 using BCESet = uint64_t;
51 // Information stored in the control node for generating exception handling
52 // landing pads.
53 struct CatchInfo {
54 uint32_t tagIndex; // Index for the associated exception.
55 NonAssertingLabel label; // The entry label for the handler.
57 explicit CatchInfo(uint32_t tagIndex_) : tagIndex(tagIndex_) {}
60 using CatchInfoVector = Vector<CatchInfo, 1, SystemAllocPolicy>;
62 // Control node, representing labels and stack heights at join points.
63 struct Control {
64 NonAssertingLabel label; // The "exit" label
65 NonAssertingLabel otherLabel; // Used for the "else" branch of if-then-else
66 // and to allow delegate to jump to catches.
67 StackHeight stackHeight; // From BaseStackFrame
68 uint32_t stackSize; // Value stack height
69 BCESet bceSafeOnEntry; // Bounds check info flowing into the item
70 BCESet bceSafeOnExit; // Bounds check info flowing out of the item
71 bool deadOnArrival; // deadCode_ was set on entry to the region
72 bool deadThenBranch; // deadCode_ was set on exit from "then"
73 size_t tryNoteIndex; // For tracking try branch code ranges.
74 CatchInfoVector catchInfos; // Used for try-catch handlers.
76 Control()
77 : stackHeight(StackHeight::Invalid()),
78 stackSize(UINT32_MAX),
79 bceSafeOnEntry(0),
80 bceSafeOnExit(~BCESet(0)),
81 deadOnArrival(false),
82 deadThenBranch(false),
83 tryNoteIndex(0) {}
86 // A vector of Nothing values, used for reading opcodes.
87 class BaseNothingVector {
88 Nothing unused_;
90 public:
91 bool reserve(size_t size) { return true; }
92 bool resize(size_t length) { return true; }
93 Nothing& operator[](size_t) { return unused_; }
94 Nothing& back() { return unused_; }
95 size_t length() const { return 0; }
96 bool append(Nothing& nothing) { return true; }
97 void infallibleAppend(Nothing& nothing) {}
100 // The baseline compiler tracks values on a stack of its own -- it needs to scan
101 // that stack for spilling -- and thus has no need for the values maintained by
102 // the iterator.
103 struct BaseCompilePolicy {
104 using Value = Nothing;
105 using ValueVector = BaseNothingVector;
107 // The baseline compiler uses the iterator's control stack, attaching
108 // its own control information.
109 using ControlItem = Control;
112 using BaseOpIter = OpIter<BaseCompilePolicy>;
114 // Latent operation for boolean-evaluation-for-control optimization.
115 enum class LatentOp { None, Compare, Eqz };
117 // Encapsulate the checking needed for a memory access.
118 struct AccessCheck {
119 AccessCheck()
120 : omitBoundsCheck(false),
121 omitAlignmentCheck(false),
122 onlyPointerAlignment(false) {}
124 // If `omitAlignmentCheck` is true then we need check neither the
125 // pointer nor the offset. Otherwise, if `onlyPointerAlignment` is true
126 // then we need check only the pointer. Otherwise, check the sum of
127 // pointer and offset.
129 bool omitBoundsCheck;
130 bool omitAlignmentCheck;
131 bool onlyPointerAlignment;
134 // Encapsulate all the information about a function call.
135 struct FunctionCall {
136 FunctionCall()
137 : restoreRegisterStateAndRealm(false),
138 usesSystemAbi(false),
139 #ifdef JS_CODEGEN_ARM
140 hardFP(true),
141 #endif
142 frameAlignAdjustment(0),
143 stackArgAreaSize(0) {
146 WasmABIArgGenerator abi;
147 bool restoreRegisterStateAndRealm;
148 bool usesSystemAbi;
149 #ifdef JS_CODEGEN_ARM
150 bool hardFP;
151 #endif
152 size_t frameAlignAdjustment;
153 size_t stackArgAreaSize;
156 enum class PreBarrierKind {
157 // No pre-write barrier is required because the previous value is undefined.
158 None,
159 // Perform a pre-write barrier to mark the previous value if an incremental
160 // GC is underway.
161 Normal,
164 enum class PostBarrierKind {
165 // Remove an existing store buffer entry if the new value does not require
166 // one. This is required to preserve invariants with HeapPtr when used for
167 // movable storage.
168 Precise,
169 // Add a store buffer entry if the new value requires it, but do not attempt
170 // to remove a pre-existing entry.
171 Imprecise,
174 //////////////////////////////////////////////////////////////////////////////
176 // Wasm baseline compiler proper.
178 // This is a struct and not a class because there is no real benefit to hiding
179 // anything, and because many static functions that are wrappers for masm
180 // methods need to reach into it and would otherwise have to be declared as
181 // friends.
183 // (Members generally have a '_' suffix but some don't because they are
184 // referenced everywhere and it would be tedious to spell that out.)
186 struct BaseCompiler final {
187 ///////////////////////////////////////////////////////////////////////////
189 // Private types
191 using LabelVector = Vector<NonAssertingLabel, 8, SystemAllocPolicy>;
193 ///////////////////////////////////////////////////////////////////////////
195 // Read-only and write-once members.
197 // Static compilation environment.
198 const ModuleEnvironment& moduleEnv_;
199 const CompilerEnvironment& compilerEnv_;
200 const FuncCompileInput& func_;
201 const ValTypeVector& locals_;
203 // Information about the locations of locals, this is set up during
204 // initialization and read-only after that.
205 BaseStackFrame::LocalVector localInfo_;
207 // On specific platforms we sometimes need to use specific registers.
208 const SpecificRegs specific_;
210 // SigD and SigF are single-entry parameter lists for f64 and f32, these are
211 // created during initialization.
212 ValTypeVector SigD_;
213 ValTypeVector SigF_;
215 // Where to go to to return, bound as compilation ends.
216 NonAssertingLabel returnLabel_;
218 // Prologue and epilogue offsets, initialized during prologue and epilogue
219 // generation and only used by the caller.
220 FuncOffsets offsets_;
222 // We call this address from the breakable point when the breakpoint handler
223 // is not null.
224 NonAssertingLabel debugTrapStub_;
225 uint32_t previousBreakablePoint_;
227 // BaselineCompileFunctions() "lends" us the StkVector to use in this
228 // BaseCompiler object, and that is installed in |stk_| in our constructor.
229 // This is so as to avoid having to malloc/free the vector's contents at
230 // each creation/destruction of a BaseCompiler object. It does however mean
231 // that we need to hold on to a reference to BaselineCompileFunctions()'s
232 // vector, so we can swap (give) its contents back when this BaseCompiler
233 // object is destroyed. This significantly reduces the heap turnover of the
234 // baseline compiler. See bug 1532592.
235 StkVector& stkSource_;
237 ///////////////////////////////////////////////////////////////////////////
239 // Output-only data structures.
241 // Bump allocator for temporary memory, used for the value stack and
242 // out-of-line code blobs. Bump-allocated memory is not freed until the end
243 // of the compilation.
244 TempAllocator::Fallible alloc_;
246 // Machine code emitter.
247 MacroAssembler& masm;
249 ///////////////////////////////////////////////////////////////////////////
251 // Compilation state.
253 // Decoder for this function, used for misc error reporting.
254 Decoder& decoder_;
256 // Opcode reader.
257 BaseOpIter iter_;
259 // Register allocator.
260 BaseRegAlloc ra;
262 // Stack frame abstraction.
263 BaseStackFrame fr;
265 // Latent out of line support code for some operations, code for these will be
266 // emitted at the end of compilation.
267 Vector<OutOfLineCode*, 8, SystemAllocPolicy> outOfLine_;
269 // Stack map state. This keeps track of live pointer slots and allows precise
270 // stack maps to be generated at safe points.
271 StackMapGenerator stackMapGenerator_;
273 // Wasm value stack. This maps values on the wasm stack to values in the
274 // running code and their locations.
276 // The value stack facilitates on-the-fly register allocation and the use of
277 // immediates in instructions. It tracks latent constants, latent references
278 // to locals, register contents, and values that have been flushed to the CPU
279 // stack.
281 // The stack can be flushed to the CPU stack using sync().
283 // The stack is a StkVector rather than a StkVector& since constantly
284 // dereferencing a StkVector& has been shown to add 0.5% or more to the
285 // compiler's dynamic instruction count.
286 StkVector stk_;
288 // Flag indicating that the compiler is currently in a dead code region.
289 bool deadCode_;
291 ///////////////////////////////////////////////////////////////////////////
293 // State for bounds check elimination.
295 // Locals that have been bounds checked and not updated since
296 BCESet bceSafe_;
298 ///////////////////////////////////////////////////////////////////////////
300 // State for boolean-evaluation-for-control.
302 // Latent operation for branch (seen next)
303 LatentOp latentOp_;
305 // Operand type, if latentOp_ is true
306 ValType latentType_;
308 // Comparison operator, if latentOp_ == Compare, int types
309 Assembler::Condition latentIntCmp_;
311 // Comparison operator, if latentOp_ == Compare, float types
312 Assembler::DoubleCondition latentDoubleCmp_;
314 ///////////////////////////////////////////////////////////////////////////
316 // Main compilation API.
318 // A client will create a compiler object, and then call init(),
319 // emitFunction(), and finish() in that order.
321 BaseCompiler(const ModuleEnvironment& moduleEnv,
322 const CompilerEnvironment& compilerEnv,
323 const FuncCompileInput& func, const ValTypeVector& locals,
324 const RegisterOffsets& trapExitLayout,
325 size_t trapExitLayoutNumWords, Decoder& decoder,
326 StkVector& stkSource, TempAllocator* alloc, MacroAssembler* masm,
327 StackMaps* stackMaps);
328 ~BaseCompiler();
330 [[nodiscard]] bool init();
331 [[nodiscard]] bool emitFunction();
332 [[nodiscard]] FuncOffsets finish();
334 //////////////////////////////////////////////////////////////////////////////
336 // Sundry accessor abstractions and convenience predicates.
338 // WasmBaselineObject-inl.h.
340 inline const FuncType& funcType() const;
341 inline bool usesMemory() const;
342 inline bool usesSharedMemory(uint32_t memoryIndex) const;
343 inline bool isMem32(uint32_t memoryIndex) const;
344 inline bool isMem64(uint32_t memoryIndex) const;
345 inline bool hugeMemoryEnabled(uint32_t memoryIndex) const;
346 inline uint32_t instanceOffsetOfMemoryBase(uint32_t memoryIndex) const;
347 inline uint32_t instanceOffsetOfBoundsCheckLimit(uint32_t memoryIndex) const;
349 // The casts are used by some of the ScratchRegister implementations.
350 operator MacroAssembler&() const { return masm; }
351 operator BaseRegAlloc&() { return ra; }
353 //////////////////////////////////////////////////////////////////////////////
355 // Locals.
357 // WasmBaselineObject-inl.h.
359 // Assert that the local at the given index has the given type, and return a
360 // reference to the Local.
361 inline const Local& localFromSlot(uint32_t slot, MIRType type);
363 //////////////////////////////////////////////////////////////////////////////
365 // Out of line code management.
367 [[nodiscard]] OutOfLineCode* addOutOfLineCode(OutOfLineCode* ool);
368 [[nodiscard]] bool generateOutOfLineCode();
370 /////////////////////////////////////////////////////////////////////////////
372 // Layering in the compiler (briefly).
374 // At the lowest layers are abstractions for registers (managed by the
375 // BaseRegAlloc and the wrappers below) and the stack frame (managed by the
376 // BaseStackFrame).
378 // The registers and frame are in turn used by the value abstraction, which is
379 // implemented by the Stk type and backed by the value stack. Values may be
380 // stored in registers, in the frame, or may be latent constants, and the
381 // value stack handles storage mostly transparently in its push and pop
382 // routines.
384 // In turn, the pop routines bring values into registers so that we can
385 // compute on them, and the push routines move values to the stack (where they
386 // may still reside in registers until the registers are needed or the value
387 // must be in memory).
389 // Routines for managing parameters and results (for blocks or calls) may also
390 // manipulate the stack directly.
392 // At the top are the code generators: methods that use the poppers and
393 // pushers and other utilities to move values into place, and that emit code
394 // to compute on those values or change control flow.
396 /////////////////////////////////////////////////////////////////////////////
398 // Register management. These are simply strongly-typed wrappers that
399 // delegate to the register allocator.
401 inline bool isAvailableI32(RegI32 r);
402 inline bool isAvailableI64(RegI64 r);
403 inline bool isAvailableRef(RegRef r);
404 inline bool isAvailablePtr(RegPtr r);
405 inline bool isAvailableF32(RegF32 r);
406 inline bool isAvailableF64(RegF64 r);
407 #ifdef ENABLE_WASM_SIMD
408 inline bool isAvailableV128(RegV128 r);
409 #endif
411 // Allocate any register
412 [[nodiscard]] inline RegI32 needI32();
413 [[nodiscard]] inline RegI64 needI64();
414 [[nodiscard]] inline RegRef needRef();
415 [[nodiscard]] inline RegPtr needPtr();
416 [[nodiscard]] inline RegF32 needF32();
417 [[nodiscard]] inline RegF64 needF64();
418 #ifdef ENABLE_WASM_SIMD
419 [[nodiscard]] inline RegV128 needV128();
420 #endif
422 // Allocate a specific register
423 inline void needI32(RegI32 specific);
424 inline void needI64(RegI64 specific);
425 inline void needRef(RegRef specific);
426 inline void needPtr(RegPtr specific);
427 inline void needF32(RegF32 specific);
428 inline void needF64(RegF64 specific);
429 #ifdef ENABLE_WASM_SIMD
430 inline void needV128(RegV128 specific);
431 #endif
433 template <typename RegType>
434 inline RegType need();
436 // Just a shorthand.
437 inline void need2xI32(RegI32 r0, RegI32 r1);
438 inline void need2xI64(RegI64 r0, RegI64 r1);
440 // Get a register but do not sync the stack to free one up. This will crash
441 // if no register is available.
442 inline void needI32NoSync(RegI32 r);
444 #if defined(JS_CODEGEN_ARM)
445 // Allocate a specific register pair (even-odd register numbers).
446 [[nodiscard]] inline RegI64 needI64Pair();
447 #endif
449 inline void freeAny(AnyReg r);
450 inline void freeI32(RegI32 r);
451 inline void freeI64(RegI64 r);
452 inline void freeRef(RegRef r);
453 inline void freePtr(RegPtr r);
454 inline void freeF32(RegF32 r);
455 inline void freeF64(RegF64 r);
456 #ifdef ENABLE_WASM_SIMD
457 inline void freeV128(RegV128 r);
458 #endif
460 template <typename RegType>
461 inline void free(RegType r);
463 // Free r if it is not invalid.
464 inline void maybeFree(RegI32 r);
465 inline void maybeFree(RegI64 r);
466 inline void maybeFree(RegF32 r);
467 inline void maybeFree(RegF64 r);
468 inline void maybeFree(RegRef r);
469 inline void maybeFree(RegPtr r);
470 #ifdef ENABLE_WASM_SIMD
471 inline void maybeFree(RegV128 r);
472 #endif
474 // On 64-bit systems, `except` must equal r and this is a no-op. On 32-bit
475 // systems, `except` must equal the high or low part of a pair and the other
476 // part of the pair is freed.
477 inline void freeI64Except(RegI64 r, RegI32 except);
479 // Return the 32-bit low part of the 64-bit register, do not free anything.
480 inline RegI32 fromI64(RegI64 r);
482 // If r is valid, return fromI64(r), otherwise an invalid RegI32.
483 inline RegI32 maybeFromI64(RegI64 r);
485 #ifdef JS_PUNBOX64
486 // On 64-bit systems, reinterpret r as 64-bit.
487 inline RegI64 fromI32(RegI32 r);
488 #endif
490 // Widen r to 64 bits; this may allocate another register to form a pair.
491 // Note this does not generate code for sign/zero extension.
492 inline RegI64 widenI32(RegI32 r);
494 // Narrow r to 32 bits; this may free part of a pair. Note this does not
495 // generate code to canonicalize the value on 64-bit systems.
496 inline RegI32 narrowI64(RegI64 r);
497 inline RegI32 narrowRef(RegRef r);
499 // Return the 32-bit low part of r.
500 inline RegI32 lowPart(RegI64 r);
502 // On 64-bit systems, return an invalid register. On 32-bit systems, return
503 // the low part of a pair.
504 inline RegI32 maybeHighPart(RegI64 r);
506 // On 64-bit systems, do nothing. On 32-bit systems, clear the high register.
507 inline void maybeClearHighPart(RegI64 r);
509 //////////////////////////////////////////////////////////////////////////////
511 // Values and value stack: Low-level methods for moving Stk values of specific
512 // kinds to registers.
514 inline void loadConstI32(const Stk& src, RegI32 dest);
515 inline void loadMemI32(const Stk& src, RegI32 dest);
516 inline void loadLocalI32(const Stk& src, RegI32 dest);
517 inline void loadRegisterI32(const Stk& src, RegI32 dest);
518 inline void loadConstI64(const Stk& src, RegI64 dest);
519 inline void loadMemI64(const Stk& src, RegI64 dest);
520 inline void loadLocalI64(const Stk& src, RegI64 dest);
521 inline void loadRegisterI64(const Stk& src, RegI64 dest);
522 inline void loadConstRef(const Stk& src, RegRef dest);
523 inline void loadMemRef(const Stk& src, RegRef dest);
524 inline void loadLocalRef(const Stk& src, RegRef dest);
525 inline void loadRegisterRef(const Stk& src, RegRef dest);
526 inline void loadConstF64(const Stk& src, RegF64 dest);
527 inline void loadMemF64(const Stk& src, RegF64 dest);
528 inline void loadLocalF64(const Stk& src, RegF64 dest);
529 inline void loadRegisterF64(const Stk& src, RegF64 dest);
530 inline void loadConstF32(const Stk& src, RegF32 dest);
531 inline void loadMemF32(const Stk& src, RegF32 dest);
532 inline void loadLocalF32(const Stk& src, RegF32 dest);
533 inline void loadRegisterF32(const Stk& src, RegF32 dest);
534 #ifdef ENABLE_WASM_SIMD
535 inline void loadConstV128(const Stk& src, RegV128 dest);
536 inline void loadMemV128(const Stk& src, RegV128 dest);
537 inline void loadLocalV128(const Stk& src, RegV128 dest);
538 inline void loadRegisterV128(const Stk& src, RegV128 dest);
539 #endif
541 //////////////////////////////////////////////////////////////////////////
543 // Values and value stack: Mid-level routines for moving Stk values of any
544 // kind to registers.
546 inline void loadI32(const Stk& src, RegI32 dest);
547 inline void loadI64(const Stk& src, RegI64 dest);
548 #if !defined(JS_PUNBOX64)
549 inline void loadI64Low(const Stk& src, RegI32 dest);
550 inline void loadI64High(const Stk& src, RegI32 dest);
551 #endif
552 inline void loadF64(const Stk& src, RegF64 dest);
553 inline void loadF32(const Stk& src, RegF32 dest);
554 #ifdef ENABLE_WASM_SIMD
555 inline void loadV128(const Stk& src, RegV128 dest);
556 #endif
557 inline void loadRef(const Stk& src, RegRef dest);
559 //////////////////////////////////////////////////////////////////////
561 // Value stack: stack management.
563 // Flush all local and register value stack elements to memory.
564 inline void sync();
566 // Save a register on the value stack temporarily.
567 void saveTempPtr(const RegPtr& r);
569 // Restore a temporarily saved register from the value stack.
570 void restoreTempPtr(const RegPtr& r);
572 // This is an optimization used to avoid calling sync for setLocal: if the
573 // local does not exist unresolved on the value stack then we can skip the
574 // sync.
575 inline bool hasLocal(uint32_t slot);
577 // Sync the local if necessary. (This currently syncs everything if a sync is
578 // needed at all.)
579 inline void syncLocal(uint32_t slot);
581 // Return the amount of execution stack consumed by the top numval
582 // values on the value stack.
583 inline size_t stackConsumed(size_t numval);
585 // Drop one value off the stack, possibly also moving the physical stack
586 // pointer.
587 inline void dropValue();
589 #ifdef DEBUG
590 // Check that we're not leaking registers by comparing the
591 // state of the stack + available registers with the set of
592 // all available registers.
594 // Call this between opcodes.
595 void performRegisterLeakCheck();
597 // This can be called at any point, really, but typically just after
598 // performRegisterLeakCheck().
599 void assertStackInvariants() const;
601 // Count the number of memory references on the value stack.
602 inline size_t countMemRefsOnStk();
604 // Print the stack to stderr.
605 void showStack(const char* who) const;
606 #endif
608 //////////////////////////////////////////////////////////////////////
610 // Value stack: pushers of values.
612 // Push a register onto the value stack.
613 inline void pushAny(AnyReg r);
614 inline void pushI32(RegI32 r);
615 inline void pushI64(RegI64 r);
616 inline void pushRef(RegRef r);
617 inline void pushPtr(RegPtr r);
618 inline void pushF64(RegF64 r);
619 inline void pushF32(RegF32 r);
620 #ifdef ENABLE_WASM_SIMD
621 inline void pushV128(RegV128 r);
622 #endif
624 // Template variation of the foregoing, for use by templated emitters.
625 template <typename RegType>
626 inline void push(RegType item);
628 // Push a constant value onto the stack. pushI32 can also take uint32_t, and
629 // pushI64 can take uint64_t; the semantics are the same. Appropriate sign
630 // extension for a 32-bit value on a 64-bit architecture happens when the
631 // value is popped, see the definition of moveImm32.
632 inline void pushI32(int32_t v);
633 inline void pushI64(int64_t v);
634 inline void pushRef(intptr_t v);
635 inline void pushPtr(intptr_t v);
636 inline void pushF64(double v);
637 inline void pushF32(float v);
638 #ifdef ENABLE_WASM_SIMD
639 inline void pushV128(V128 v);
640 #endif
641 inline void pushConstRef(intptr_t v);
643 // Push the local slot onto the stack. The slot will not be read here; it
644 // will be read when it is consumed, or when a side effect to the slot forces
645 // its value to be saved.
646 inline void pushLocalI32(uint32_t slot);
647 inline void pushLocalI64(uint32_t slot);
648 inline void pushLocalRef(uint32_t slot);
649 inline void pushLocalF64(uint32_t slot);
650 inline void pushLocalF32(uint32_t slot);
651 #ifdef ENABLE_WASM_SIMD
652 inline void pushLocalV128(uint32_t slot);
653 #endif
655 // Push an U32 as an I64, zero-extending it in the process
656 inline void pushU32AsI64(RegI32 rs);
658 //////////////////////////////////////////////////////////////////////
660 // Value stack: poppers and peekers of values.
662 // Pop some value off the stack.
663 inline AnyReg popAny();
664 inline AnyReg popAny(AnyReg specific);
666 // Call only from other popI32() variants. v must be the stack top. May pop
667 // the CPU stack.
668 inline void popI32(const Stk& v, RegI32 dest);
670 [[nodiscard]] inline RegI32 popI32();
671 inline RegI32 popI32(RegI32 specific);
673 #ifdef ENABLE_WASM_SIMD
674 // Call only from other popV128() variants. v must be the stack top. May pop
675 // the CPU stack.
676 inline void popV128(const Stk& v, RegV128 dest);
678 [[nodiscard]] inline RegV128 popV128();
679 inline RegV128 popV128(RegV128 specific);
680 #endif
682 // Call only from other popI64() variants. v must be the stack top. May pop
683 // the CPU stack.
684 inline void popI64(const Stk& v, RegI64 dest);
686 [[nodiscard]] inline RegI64 popI64();
687 inline RegI64 popI64(RegI64 specific);
689 // Call only from other popRef() variants. v must be the stack top. May pop
690 // the CPU stack.
691 inline void popRef(const Stk& v, RegRef dest);
693 inline RegRef popRef(RegRef specific);
694 [[nodiscard]] inline RegRef popRef();
696 // Call only from other popPtr() variants. v must be the stack top. May pop
697 // the CPU stack.
698 inline void popPtr(const Stk& v, RegPtr dest);
700 inline RegPtr popPtr(RegPtr specific);
701 [[nodiscard]] inline RegPtr popPtr();
703 // Call only from other popF64() variants. v must be the stack top. May pop
704 // the CPU stack.
705 inline void popF64(const Stk& v, RegF64 dest);
707 [[nodiscard]] inline RegF64 popF64();
708 inline RegF64 popF64(RegF64 specific);
710 // Call only from other popF32() variants. v must be the stack top. May pop
711 // the CPU stack.
712 inline void popF32(const Stk& v, RegF32 dest);
714 [[nodiscard]] inline RegF32 popF32();
715 inline RegF32 popF32(RegF32 specific);
717 // Templated variation of the foregoing, for use by templated emitters.
718 template <typename RegType>
719 inline RegType pop();
721 // Constant poppers will return true and pop the value if the stack top is a
722 // constant of the appropriate type; otherwise pop nothing and return false.
723 [[nodiscard]] inline bool hasConst() const;
724 [[nodiscard]] inline bool popConst(int32_t* c);
725 [[nodiscard]] inline bool popConst(int64_t* c);
726 [[nodiscard]] inline bool peekConst(int32_t* c);
727 [[nodiscard]] inline bool peekConst(int64_t* c);
728 [[nodiscard]] inline bool peek2xConst(int32_t* c0, int32_t* c1);
729 [[nodiscard]] inline bool popConstPositivePowerOfTwo(int32_t* c,
730 uint_fast8_t* power,
731 int32_t cutoff);
732 [[nodiscard]] inline bool popConstPositivePowerOfTwo(int64_t* c,
733 uint_fast8_t* power,
734 int64_t cutoff);
736 // Shorthand: Pop r1, then r0.
737 inline void pop2xI32(RegI32* r0, RegI32* r1);
738 inline void pop2xI64(RegI64* r0, RegI64* r1);
739 inline void pop2xF32(RegF32* r0, RegF32* r1);
740 inline void pop2xF64(RegF64* r0, RegF64* r1);
741 #ifdef ENABLE_WASM_SIMD
742 inline void pop2xV128(RegV128* r0, RegV128* r1);
743 #endif
744 inline void pop2xRef(RegRef* r0, RegRef* r1);
746 // Pop to a specific register
747 inline RegI32 popI32ToSpecific(RegI32 specific);
748 inline RegI64 popI64ToSpecific(RegI64 specific);
750 #ifdef JS_CODEGEN_ARM
751 // Pop an I64 as a valid register pair.
752 inline RegI64 popI64Pair();
753 #endif
755 // Pop an I64 but narrow it and return the narrowed part.
756 inline RegI32 popI64ToI32();
757 inline RegI32 popI64ToSpecificI32(RegI32 specific);
759 // Pop an I32 or I64 as an I64. The value is zero extended out to 64-bits.
760 inline RegI64 popIndexToInt64(IndexType indexType);
762 // Pop the stack until it has the desired size, but do not move the physical
763 // stack pointer.
764 inline void popValueStackTo(uint32_t stackSize);
766 // Pop the given number of elements off the value stack, but do not move
767 // the physical stack pointer.
768 inline void popValueStackBy(uint32_t items);
770 // Peek into the stack at relativeDepth from the top.
771 inline Stk& peek(uint32_t relativeDepth);
773 // Peek the reference value at the specified depth and load it into a
774 // register.
775 inline void peekRefAt(uint32_t depth, RegRef dest);
777 // Peek at the value on the top of the stack and return true if it is a Local
778 // of any type.
779 [[nodiscard]] inline bool peekLocal(uint32_t* local);
781 ////////////////////////////////////////////////////////////////////////////
783 // Block parameters and results.
785 // Blocks may have multiple parameters and multiple results. Blocks can also
786 // be the target of branches: the entry for loops, and the exit for
787 // non-loops.
789 // Passing multiple values to a non-branch target (i.e., the entry of a
790 // "block") falls out naturally: any items on the value stack can flow
791 // directly from one block to another.
793 // However, for branch targets, we need to allocate well-known locations for
794 // the branch values. The approach taken in the baseline compiler is to
795 // allocate registers to the top N values (currently N=1), and then stack
796 // locations for the rest.
799 // Types of result registers that interest us for result-manipulating
800 // functions.
801 enum class ResultRegKind {
802 // General and floating result registers.
803 All,
805 // General result registers only.
806 OnlyGPRs
809 // This is a flag ultimately intended for popBlockResults() that specifies how
810 // the CPU stack should be handled after the result values have been
811 // processed.
812 enum class ContinuationKind {
813 // Adjust the stack for a fallthrough: do nothing.
814 Fallthrough,
816 // Adjust the stack for a jump: make the stack conform to the
817 // expected stack at the target
818 Jump
821 // TODO: It's definitely disputable whether the result register management is
822 // hot enough to warrant inlining at the outermost level.
824 inline void needResultRegisters(ResultType type, ResultRegKind which);
825 #ifdef JS_64BIT
826 inline void widenInt32ResultRegisters(ResultType type);
827 #endif
828 inline void freeResultRegisters(ResultType type, ResultRegKind which);
829 inline void needIntegerResultRegisters(ResultType type);
830 inline void freeIntegerResultRegisters(ResultType type);
831 inline void needResultRegisters(ResultType type);
832 inline void freeResultRegisters(ResultType type);
833 void assertResultRegistersAvailable(ResultType type);
834 inline void captureResultRegisters(ResultType type);
835 inline void captureCallResultRegisters(ResultType type);
837 void popRegisterResults(ABIResultIter& iter);
838 void popStackResults(ABIResultIter& iter, StackHeight stackBase);
840 void popBlockResults(ResultType type, StackHeight stackBase,
841 ContinuationKind kind);
843 // This function is similar to popBlockResults, but additionally handles the
844 // implicit exception pointer that is pushed to the value stack on entry to
845 // a catch handler by dropping it appropriately.
846 void popCatchResults(ResultType type, StackHeight stackBase);
848 Stk captureStackResult(const ABIResult& result, StackHeight resultsBase,
849 uint32_t stackResultBytes);
851 [[nodiscard]] bool pushResults(ResultType type, StackHeight resultsBase);
852 [[nodiscard]] bool pushBlockResults(ResultType type);
854 // A combination of popBlockResults + pushBlockResults, used when entering a
855 // block with a control-flow join (loops) or split (if) to shuffle the
856 // fallthrough block parameters into the locations expected by the
857 // continuation.
859 // This function should only be called when entering a block with a
860 // control-flow join at the entry, where there are no live temporaries in
861 // the current block.
862 [[nodiscard]] bool topBlockParams(ResultType type);
864 // A combination of popBlockResults + pushBlockResults, used before branches
865 // where we don't know the target (br_if / br_table). If and when the branch
866 // is taken, the stack results will be shuffled down into place. For br_if
867 // that has fallthrough, the parameters for the untaken branch flow through to
868 // the continuation.
869 [[nodiscard]] bool topBranchParams(ResultType type, StackHeight* height);
871 // Conditional branches with fallthrough are preceded by a topBranchParams, so
872 // we know that there are no stack results that need to be materialized. In
873 // that case, we can just shuffle the whole block down before popping the
874 // stack.
875 void shuffleStackResultsBeforeBranch(StackHeight srcHeight,
876 StackHeight destHeight, ResultType type);
878 // If in debug mode, adds LeaveFrame breakpoint.
879 bool insertLeaveFrame();
881 //////////////////////////////////////////////////////////////////////
883 // Stack maps
885 // Various methods for creating a stackmap. Stackmaps are indexed by the
886 // lowest address of the instruction immediately *after* the instruction of
887 // interest. In practice that means either: the return point of a call, the
888 // instruction immediately after a trap instruction (the "resume"
889 // instruction), or the instruction immediately following a no-op (when
890 // debugging is enabled).
892 // Create a vanilla stackmap.
893 [[nodiscard]] bool createStackMap(const char* who);
895 // Create a stackmap as vanilla, but for a custom assembler offset.
896 [[nodiscard]] bool createStackMap(const char* who,
897 CodeOffset assemblerOffset);
899 // Create a stack map as vanilla, and note the presence of a ref-typed
900 // DebugFrame on the stack.
901 [[nodiscard]] bool createStackMap(
902 const char* who, HasDebugFrameWithLiveRefs debugFrameWithLiveRefs);
904 // The most general stackmap construction.
905 [[nodiscard]] bool createStackMap(
906 const char* who, const ExitStubMapVector& extras,
907 uint32_t assemblerOffset,
908 HasDebugFrameWithLiveRefs debugFrameWithLiveRefs);
910 ////////////////////////////////////////////////////////////
912 // Control stack
914 inline void initControl(Control& item, ResultType params);
915 inline Control& controlItem();
916 inline Control& controlItem(uint32_t relativeDepth);
917 inline Control& controlOutermost();
918 inline LabelKind controlKind(uint32_t relativeDepth);
920 ////////////////////////////////////////////////////////////
922 // Debugger API
924 // Insert a breakpoint almost anywhere. This will create a call, with all the
925 // overhead that entails.
926 void insertBreakablePoint(CallSiteDesc::Kind kind);
928 // Insert code at the end of a function for breakpoint filtering.
929 void insertBreakpointStub();
931 // Debugger API used at the return point: shuffle register return values off
932 // to memory for the debugger to see; and get them back again.
933 void saveRegisterReturnValues(const ResultType& resultType);
934 void restoreRegisterReturnValues(const ResultType& resultType);
936 //////////////////////////////////////////////////////////////////////
938 // Function prologue and epilogue.
940 // Set up and tear down frame, execute prologue and epilogue.
941 [[nodiscard]] bool beginFunction();
942 [[nodiscard]] bool endFunction();
944 // Move return values to memory before returning, as appropriate
945 void popStackReturnValues(const ResultType& resultType);
947 //////////////////////////////////////////////////////////////////////
949 // Calls.
951 void beginCall(FunctionCall& call, UseABI useABI,
952 RestoreRegisterStateAndRealm restoreRegisterStateAndRealm);
953 void endCall(FunctionCall& call, size_t stackSpace);
954 void startCallArgs(size_t stackArgAreaSizeUnaligned, FunctionCall* call);
955 ABIArg reservePointerArgument(FunctionCall* call);
956 void passArg(ValType type, const Stk& arg, FunctionCall* call);
957 CodeOffset callDefinition(uint32_t funcIndex, const FunctionCall& call);
958 CodeOffset callSymbolic(SymbolicAddress callee, const FunctionCall& call);
960 // Precondition for the call*() methods: sync()
962 bool callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
963 const Stk& indexVal, const FunctionCall& call,
964 bool tailCall, CodeOffset* fastCallOffset,
965 CodeOffset* slowCallOffset);
966 CodeOffset callImport(unsigned instanceDataOffset, const FunctionCall& call);
967 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
968 void callRef(const Stk& calleeRef, const FunctionCall& call,
969 CodeOffset* fastCallOffset, CodeOffset* slowCallOffset);
970 # ifdef ENABLE_WASM_TAIL_CALLS
971 void returnCallRef(const Stk& calleeRef, const FunctionCall& call,
972 const FuncType* funcType);
973 # endif
974 #endif
975 CodeOffset builtinCall(SymbolicAddress builtin, const FunctionCall& call);
976 CodeOffset builtinInstanceMethodCall(const SymbolicAddressSignature& builtin,
977 const ABIArg& instanceArg,
978 const FunctionCall& call);
979 [[nodiscard]] bool pushCallResults(const FunctionCall& call, ResultType type,
980 const StackResultsLoc& loc);
982 // Helpers to pick up the returned value from the return register.
983 inline RegI32 captureReturnedI32();
984 inline RegI64 captureReturnedI64();
985 inline RegF32 captureReturnedF32(const FunctionCall& call);
986 inline RegF64 captureReturnedF64(const FunctionCall& call);
987 #ifdef ENABLE_WASM_SIMD
988 inline RegV128 captureReturnedV128(const FunctionCall& call);
989 #endif
990 inline RegRef captureReturnedRef();
992 //////////////////////////////////////////////////////////////////////
994 // Register-to-register moves. These emit nothing if src == dest.
996 inline void moveI32(RegI32 src, RegI32 dest);
997 inline void moveI64(RegI64 src, RegI64 dest);
998 inline void moveRef(RegRef src, RegRef dest);
999 inline void movePtr(RegPtr src, RegPtr dest);
1000 inline void moveF64(RegF64 src, RegF64 dest);
1001 inline void moveF32(RegF32 src, RegF32 dest);
1002 #ifdef ENABLE_WASM_SIMD
1003 inline void moveV128(RegV128 src, RegV128 dest);
1004 #endif
1006 template <typename RegType>
1007 inline void move(RegType src, RegType dest);
1009 //////////////////////////////////////////////////////////////////////
1011 // Immediate-to-register moves.
1013 // The compiler depends on moveImm32() clearing the high bits of a 64-bit
1014 // register on 64-bit systems except MIPS64 And LoongArch64 where high bits
1015 // are sign extended from lower bits, see doc block "64-bit GPRs carrying
1016 // 32-bit values" in MacroAssembler.h.
1018 inline void moveImm32(int32_t v, RegI32 dest);
1019 inline void moveImm64(int64_t v, RegI64 dest);
1020 inline void moveImmRef(intptr_t v, RegRef dest);
1022 //////////////////////////////////////////////////////////////////////
1024 // Sundry low-level code generators.
1026 // Check the interrupt flag, trap if it is set.
1027 [[nodiscard]] bool addInterruptCheck();
1029 // Check that the value is not zero, trap if it is.
1030 void checkDivideByZero(RegI32 rhs);
1031 void checkDivideByZero(RegI64 r);
1033 // Check that a signed division will not overflow, trap or flush-to-zero if it
1034 // will according to `zeroOnOverflow`.
1035 void checkDivideSignedOverflow(RegI32 rhs, RegI32 srcDest, Label* done,
1036 bool zeroOnOverflow);
1037 void checkDivideSignedOverflow(RegI64 rhs, RegI64 srcDest, Label* done,
1038 bool zeroOnOverflow);
1040 // Emit a jump table to be used by tableSwitch()
1041 void jumpTable(const LabelVector& labels, Label* theTable);
1043 // Emit a table switch, `theTable` is the jump table.
1044 void tableSwitch(Label* theTable, RegI32 switchValue, Label* dispatchCode);
1046 // Compare i64 and set an i32 boolean result according to the condition.
1047 inline void cmp64Set(Assembler::Condition cond, RegI64 lhs, RegI64 rhs,
1048 RegI32 dest);
1050 // Round floating to integer.
1051 [[nodiscard]] inline bool supportsRoundInstruction(RoundingMode mode);
1052 inline void roundF32(RoundingMode roundingMode, RegF32 f0);
1053 inline void roundF64(RoundingMode roundingMode, RegF64 f0);
1055 // These are just wrappers around assembler functions, but without
1056 // type-specific names, and using our register abstractions for better type
1057 // discipline.
1058 inline void branchTo(Assembler::DoubleCondition c, RegF64 lhs, RegF64 rhs,
1059 Label* l);
1060 inline void branchTo(Assembler::DoubleCondition c, RegF32 lhs, RegF32 rhs,
1061 Label* l);
1062 inline void branchTo(Assembler::Condition c, RegI32 lhs, RegI32 rhs,
1063 Label* l);
1064 inline void branchTo(Assembler::Condition c, RegI32 lhs, Imm32 rhs, Label* l);
1065 inline void branchTo(Assembler::Condition c, RegI64 lhs, RegI64 rhs,
1066 Label* l);
1067 inline void branchTo(Assembler::Condition c, RegI64 lhs, Imm64 rhs, Label* l);
1068 inline void branchTo(Assembler::Condition c, RegRef lhs, ImmWord rhs,
1069 Label* l);
1071 // Helpers for accessing Instance::baselineScratchWords_. Note that Word
1072 // and I64 versions of these routines access the same area and it is up to
1073 // the caller to use it in some way which makes sense.
1075 // Store/load `r`, a machine word, to/from the `index`th scratch storage
1076 // slot in the current Instance. `instancePtr` must point at the current
1077 // Instance; it will not be modified. For ::stashWord, `r` must not be the
1078 // same as `instancePtr`.
1079 void stashWord(RegPtr instancePtr, size_t index, RegPtr r);
1080 void unstashWord(RegPtr instancePtr, size_t index, RegPtr r);
1082 #ifdef JS_CODEGEN_X86
1083 // Store r in instance scratch storage after first loading the instance from
1084 // the frame into the regForInstance. regForInstance must be neither of the
1085 // registers in r.
1086 void stashI64(RegPtr regForInstance, RegI64 r);
1088 // Load r from the instance scratch storage after first loading the instance
1089 // from the frame into the regForInstance. regForInstance can be one of the
1090 // registers in r.
1091 void unstashI64(RegPtr regForInstance, RegI64 r);
1092 #endif
1094 //////////////////////////////////////////////////////////////////////
1096 // Code generators for actual operations.
1098 template <typename RegType, typename IntType>
1099 void quotientOrRemainder(RegType rs, RegType rsd, RegType reserved,
1100 IsUnsigned isUnsigned, ZeroOnOverflow zeroOnOverflow,
1101 bool isConst, IntType c,
1102 void (*operate)(MacroAssembler&, RegType, RegType,
1103 RegType, IsUnsigned));
1105 [[nodiscard]] bool truncateF32ToI32(RegF32 src, RegI32 dest,
1106 TruncFlags flags);
1107 [[nodiscard]] bool truncateF64ToI32(RegF64 src, RegI32 dest,
1108 TruncFlags flags);
1110 #ifndef RABALDR_FLOAT_TO_I64_CALLOUT
1111 [[nodiscard]] RegF64 needTempForFloatingToI64(TruncFlags flags);
1112 [[nodiscard]] bool truncateF32ToI64(RegF32 src, RegI64 dest, TruncFlags flags,
1113 RegF64 temp);
1114 [[nodiscard]] bool truncateF64ToI64(RegF64 src, RegI64 dest, TruncFlags flags,
1115 RegF64 temp);
1116 #endif // RABALDR_FLOAT_TO_I64_CALLOUT
1118 #ifndef RABALDR_I64_TO_FLOAT_CALLOUT
1119 [[nodiscard]] RegI32 needConvertI64ToFloatTemp(ValType to, bool isUnsigned);
1120 void convertI64ToF32(RegI64 src, bool isUnsigned, RegF32 dest, RegI32 temp);
1121 void convertI64ToF64(RegI64 src, bool isUnsigned, RegF64 dest, RegI32 temp);
1122 #endif // RABALDR_I64_TO_FLOAT_CALLOUT
1124 //////////////////////////////////////////////////////////////////////
1126 // Global variable access.
1128 Address addressOfGlobalVar(const GlobalDesc& global, RegPtr tmp);
1130 //////////////////////////////////////////////////////////////////////
1132 // Table access.
1134 Address addressOfTableField(uint32_t tableIndex, uint32_t fieldOffset,
1135 RegPtr instance);
1136 void loadTableLength(uint32_t tableIndex, RegPtr instance, RegI32 length);
1137 void loadTableElements(uint32_t tableIndex, RegPtr instance, RegPtr elements);
1139 //////////////////////////////////////////////////////////////////////
1141 // Heap access.
1143 void bceCheckLocal(MemoryAccessDesc* access, AccessCheck* check,
1144 uint32_t local);
1145 void bceLocalIsUpdated(uint32_t local);
1147 // Fold offsets into ptr and bounds check as necessary. The instance will be
1148 // valid in cases where it's needed.
1149 template <typename RegIndexType>
1150 void prepareMemoryAccess(MemoryAccessDesc* access, AccessCheck* check,
1151 RegPtr instance, RegIndexType ptr);
1153 void branchAddNoOverflow(uint64_t offset, RegI32 ptr, Label* ok);
1154 void branchTestLowZero(RegI32 ptr, Imm32 mask, Label* ok);
1155 void boundsCheck4GBOrLargerAccess(uint32_t memoryIndex, RegPtr instance,
1156 RegI32 ptr, Label* ok);
1157 void boundsCheckBelow4GBAccess(uint32_t memoryIndex, RegPtr instance,
1158 RegI32 ptr, Label* ok);
1160 void branchAddNoOverflow(uint64_t offset, RegI64 ptr, Label* ok);
1161 void branchTestLowZero(RegI64 ptr, Imm32 mask, Label* ok);
1162 void boundsCheck4GBOrLargerAccess(uint32_t memoryIndex, RegPtr instance,
1163 RegI64 ptr, Label* ok);
1164 void boundsCheckBelow4GBAccess(uint32_t memoryIndex, RegPtr instance,
1165 RegI64 ptr, Label* ok);
1167 // Some consumers depend on the returned Address not incorporating instance,
1168 // as instance may be the scratch register.
1169 template <typename RegIndexType>
1170 Address prepareAtomicMemoryAccess(MemoryAccessDesc* access,
1171 AccessCheck* check, RegPtr instance,
1172 RegIndexType ptr);
1174 template <typename RegIndexType>
1175 void computeEffectiveAddress(MemoryAccessDesc* access);
1177 [[nodiscard]] bool needInstanceForAccess(const MemoryAccessDesc* access,
1178 const AccessCheck& check);
1180 // ptr and dest may be the same iff dest is I32.
1181 // This may destroy ptr even if ptr and dest are not the same.
1182 void executeLoad(MemoryAccessDesc* access, AccessCheck* check,
1183 RegPtr instance, RegPtr memoryBase, RegI32 ptr, AnyReg dest,
1184 RegI32 temp);
1185 void load(MemoryAccessDesc* access, AccessCheck* check, RegPtr instance,
1186 RegPtr memoryBase, RegI32 ptr, AnyReg dest, RegI32 temp);
1187 #ifdef ENABLE_WASM_MEMORY64
1188 void load(MemoryAccessDesc* access, AccessCheck* check, RegPtr instance,
1189 RegPtr memoryBase, RegI64 ptr, AnyReg dest, RegI64 temp);
1190 #endif
1192 template <typename RegType>
1193 void doLoadCommon(MemoryAccessDesc* access, AccessCheck check, ValType type);
1195 void loadCommon(MemoryAccessDesc* access, AccessCheck check, ValType type);
1197 // ptr and src must not be the same register.
1198 // This may destroy ptr and src.
1199 void executeStore(MemoryAccessDesc* access, AccessCheck* check,
1200 RegPtr instance, RegPtr memoryBase, RegI32 ptr, AnyReg src,
1201 RegI32 temp);
1202 void store(MemoryAccessDesc* access, AccessCheck* check, RegPtr instance,
1203 RegPtr memoryBase, RegI32 ptr, AnyReg src, RegI32 temp);
1204 #ifdef ENABLE_WASM_MEMORY64
1205 void store(MemoryAccessDesc* access, AccessCheck* check, RegPtr instance,
1206 RegPtr memoryBase, RegI64 ptr, AnyReg src, RegI64 temp);
1207 #endif
1209 template <typename RegType>
1210 void doStoreCommon(MemoryAccessDesc* access, AccessCheck check,
1211 ValType resultType);
1213 void storeCommon(MemoryAccessDesc* access, AccessCheck check,
1214 ValType resultType);
1216 void atomicLoad(MemoryAccessDesc* access, ValType type);
1217 #if !defined(JS_64BIT)
1218 template <typename RegIndexType>
1219 void atomicLoad64(MemoryAccessDesc* desc);
1220 #endif
1222 void atomicStore(MemoryAccessDesc* access, ValType type);
1224 void atomicRMW(MemoryAccessDesc* access, ValType type, AtomicOp op);
1225 template <typename RegIndexType>
1226 void atomicRMW32(MemoryAccessDesc* access, ValType type, AtomicOp op);
1227 template <typename RegIndexType>
1228 void atomicRMW64(MemoryAccessDesc* access, ValType type, AtomicOp op);
1230 void atomicXchg(MemoryAccessDesc* access, ValType type);
1231 template <typename RegIndexType>
1232 void atomicXchg64(MemoryAccessDesc* access, WantResult wantResult);
1233 template <typename RegIndexType>
1234 void atomicXchg32(MemoryAccessDesc* access, ValType type);
1236 void atomicCmpXchg(MemoryAccessDesc* access, ValType type);
1237 template <typename RegIndexType>
1238 void atomicCmpXchg32(MemoryAccessDesc* access, ValType type);
1239 template <typename RegIndexType>
1240 void atomicCmpXchg64(MemoryAccessDesc* access, ValType type);
1242 template <typename RegType>
1243 RegType popConstMemoryAccess(MemoryAccessDesc* access, AccessCheck* check);
1244 template <typename RegType>
1245 RegType popMemoryAccess(MemoryAccessDesc* access, AccessCheck* check);
1247 void pushHeapBase(uint32_t memoryIndex);
1249 ////////////////////////////////////////////////////////////////////////////
1251 // Platform-specific popping and register targeting.
1253 // The simple popping methods pop values into targeted registers; the caller
1254 // can free registers using standard functions. These are always called
1255 // popXForY where X says something about types and Y something about the
1256 // operation being targeted.
1258 RegI32 needRotate64Temp();
1259 void popAndAllocateForDivAndRemI32(RegI32* r0, RegI32* r1, RegI32* reserved);
1260 void popAndAllocateForMulI64(RegI64* r0, RegI64* r1, RegI32* temp);
1261 #ifndef RABALDR_INT_DIV_I64_CALLOUT
1262 void popAndAllocateForDivAndRemI64(RegI64* r0, RegI64* r1, RegI64* reserved,
1263 IsRemainder isRemainder);
1264 #endif
1265 RegI32 popI32RhsForShift();
1266 RegI32 popI32RhsForShiftI64();
1267 RegI64 popI64RhsForShift();
1268 RegI32 popI32RhsForRotate();
1269 RegI64 popI64RhsForRotate();
1270 void popI32ForSignExtendI64(RegI64* r0);
1271 void popI64ForSignExtendI64(RegI64* r0);
1273 ////////////////////////////////////////////////////////////
1275 // Sundry helpers.
1277 // Retrieve the current bytecodeOffset.
1278 inline BytecodeOffset bytecodeOffset() const;
1280 // Generate a trap instruction for the current bytecodeOffset.
1281 inline void trap(Trap t) const;
1283 // Abstracted helper for throwing, used for throw, rethrow, and rethrowing
1284 // at the end of a series of catch blocks (if none matched the exception).
1285 [[nodiscard]] bool throwFrom(RegRef exn);
1287 // Load the specified tag object from the Instance.
1288 void loadTag(RegPtr instanceData, uint32_t tagIndex, RegRef tagDst);
1290 // Load the pending exception state from the Instance and then reset it.
1291 void consumePendingException(RegRef* exnDst, RegRef* tagDst);
1293 [[nodiscard]] bool startTryNote(size_t* tryNoteIndex);
1294 void finishTryNote(size_t tryNoteIndex);
1296 ////////////////////////////////////////////////////////////
1298 // Barriers support.
1300 // This emits a GC pre-write barrier. The pre-barrier is needed when we
1301 // replace a member field with a new value, and the previous field value
1302 // might have no other referents, and incremental GC is ongoing. The field
1303 // might belong to an object or be a stack slot or a register or a heap
1304 // allocated value.
1306 // let obj = { field: previousValue };
1307 // obj.field = newValue; // previousValue must be marked with a pre-barrier.
1309 // The `valueAddr` is the address of the location that we are about to
1310 // update. This function preserves that register.
1311 void emitPreBarrier(RegPtr valueAddr);
1313 // This emits a GC post-write barrier. The post-barrier is needed when we
1314 // replace a member field with a new value, the new value is in the nursery,
1315 // and the containing object is a tenured object. The field must then be
1316 // added to the store buffer so that the nursery can be correctly collected.
1317 // The field might belong to an object or be a stack slot or a register or a
1318 // heap allocated value.
1320 // For the difference between 'precise' and 'imprecise', look at the
1321 // documentation on PostBarrierKind.
1323 // `object` is a pointer to the object that contains the field. It is used, if
1324 // present, to skip adding a store buffer entry when the containing object is
1325 // in the nursery. This register is preserved by this function.
1326 // `valueAddr` is the address of the location that we are writing to. This
1327 // register is consumed by this function.
1328 // `prevValue` is the value that existed in the field before `value` was
1329 // stored. This register is consumed by this function.
1330 // `value` is the value that was stored in the field. This register is
1331 // preserved by this function.
1332 [[nodiscard]] bool emitPostBarrierImprecise(const Maybe<RegRef>& object,
1333 RegPtr valueAddr, RegRef value);
1334 [[nodiscard]] bool emitPostBarrierPrecise(const Maybe<RegRef>& object,
1335 RegPtr valueAddr, RegRef prevValue,
1336 RegRef value);
1338 // Emits a store to a JS object pointer at the address `valueAddr`, which is
1339 // inside the GC cell `object`.
1341 // Preserves `object` and `value`. Consumes `valueAddr`.
1342 [[nodiscard]] bool emitBarrieredStore(const Maybe<RegRef>& object,
1343 RegPtr valueAddr, RegRef value,
1344 PreBarrierKind preBarrierKind,
1345 PostBarrierKind postBarrierKind);
1347 // Emits a store of nullptr to a JS object pointer at the address valueAddr.
1348 // Preserves `valueAddr`.
1349 void emitBarrieredClear(RegPtr valueAddr);
1351 ////////////////////////////////////////////////////////////
1353 // Machinery for optimized conditional branches. See comments in the
1354 // implementation.
1356 void setLatentCompare(Assembler::Condition compareOp, ValType operandType);
1357 void setLatentCompare(Assembler::DoubleCondition compareOp,
1358 ValType operandType);
1359 void setLatentEqz(ValType operandType);
1360 bool hasLatentOp() const;
1361 void resetLatentOp();
1362 // Jump to the given branch, passing results, if the condition, `cond`
1363 // matches between `lhs` and `rhs.
1364 template <typename Cond, typename Lhs, typename Rhs>
1365 [[nodiscard]] bool jumpConditionalWithResults(BranchState* b, Cond cond,
1366 Lhs lhs, Rhs rhs);
1367 #ifdef ENABLE_WASM_GC
1368 // Jump to the given branch, passing results, if the WasmGcObject, `object`,
1369 // is a subtype of `destType`.
1370 [[nodiscard]] bool jumpConditionalWithResults(BranchState* b, RegRef object,
1371 RefType sourceType,
1372 RefType destType,
1373 bool onSuccess);
1374 #endif
1375 template <typename Cond>
1376 [[nodiscard]] bool sniffConditionalControlCmp(Cond compareOp,
1377 ValType operandType);
1378 [[nodiscard]] bool sniffConditionalControlEqz(ValType operandType);
1379 void emitBranchSetup(BranchState* b);
1380 [[nodiscard]] bool emitBranchPerform(BranchState* b);
1382 //////////////////////////////////////////////////////////////////////
1384 [[nodiscard]] bool emitBody();
1385 [[nodiscard]] bool emitBlock();
1386 [[nodiscard]] bool emitLoop();
1387 [[nodiscard]] bool emitIf();
1388 [[nodiscard]] bool emitElse();
1389 // Used for common setup for catch and catch_all.
1390 void emitCatchSetup(LabelKind kind, Control& tryCatch,
1391 const ResultType& resultType);
1392 // Helper function used to generate landing pad code for the special
1393 // case in which `delegate` jumps to a function's body block.
1394 [[nodiscard]] bool emitBodyDelegateThrowPad();
1396 [[nodiscard]] bool emitTry();
1397 [[nodiscard]] bool emitCatch();
1398 [[nodiscard]] bool emitCatchAll();
1399 [[nodiscard]] bool emitDelegate();
1400 [[nodiscard]] bool emitThrow();
1401 [[nodiscard]] bool emitRethrow();
1402 [[nodiscard]] bool emitEnd();
1403 [[nodiscard]] bool emitBr();
1404 [[nodiscard]] bool emitBrIf();
1405 [[nodiscard]] bool emitBrTable();
1406 [[nodiscard]] bool emitDrop();
1407 [[nodiscard]] bool emitReturn();
1409 // A flag passed to emitCallArgs, describing how the value stack is laid out.
1410 enum class CalleeOnStack {
1411 // After the arguments to the call, there is a callee pushed onto value
1412 // stack. This is only the case for callIndirect. To get the arguments to
1413 // the call, emitCallArgs has to reach one element deeper into the value
1414 // stack, to skip the callee.
1415 True,
1417 // No callee on the stack.
1418 False
1421 // The typename T for emitCallArgs can be one of the following:
1422 // NormalCallResults, TailCallResults, or NoCallResults.
1423 template <typename T>
1424 [[nodiscard]] bool emitCallArgs(const ValTypeVector& argTypes, T results,
1425 FunctionCall* baselineCall,
1426 CalleeOnStack calleeOnStack);
1428 [[nodiscard]] bool emitCall();
1429 [[nodiscard]] bool emitReturnCall();
1430 [[nodiscard]] bool emitCallIndirect();
1431 [[nodiscard]] bool emitReturnCallIndirect();
1432 [[nodiscard]] bool emitUnaryMathBuiltinCall(SymbolicAddress callee,
1433 ValType operandType);
1434 [[nodiscard]] bool emitGetLocal();
1435 [[nodiscard]] bool emitSetLocal();
1436 [[nodiscard]] bool emitTeeLocal();
1437 [[nodiscard]] bool emitGetGlobal();
1438 [[nodiscard]] bool emitSetGlobal();
1439 [[nodiscard]] RegPtr maybeLoadMemoryBaseForAccess(
1440 RegPtr instance, const MemoryAccessDesc* access);
1441 [[nodiscard]] RegPtr maybeLoadInstanceForAccess(
1442 const MemoryAccessDesc* access, const AccessCheck& check);
1443 [[nodiscard]] RegPtr maybeLoadInstanceForAccess(
1444 const MemoryAccessDesc* access, const AccessCheck& check,
1445 RegPtr specific);
1446 [[nodiscard]] bool emitLoad(ValType type, Scalar::Type viewType);
1447 [[nodiscard]] bool emitStore(ValType resultType, Scalar::Type viewType);
1448 [[nodiscard]] bool emitSelect(bool typed);
1450 template <bool isSetLocal>
1451 [[nodiscard]] bool emitSetOrTeeLocal(uint32_t slot);
1453 [[nodiscard]] bool endBlock(ResultType type);
1454 [[nodiscard]] bool endIfThen(ResultType type);
1455 [[nodiscard]] bool endIfThenElse(ResultType type);
1456 [[nodiscard]] bool endTryCatch(ResultType type);
1458 void doReturn(ContinuationKind kind);
1459 void pushReturnValueOfCall(const FunctionCall& call, MIRType type);
1461 [[nodiscard]] bool pushStackResultsForCall(const ResultType& type,
1462 RegPtr temp, StackResultsLoc* loc);
1463 void popStackResultsAfterCall(const StackResultsLoc& results,
1464 uint32_t stackArgBytes);
1466 void emitCompareI32(Assembler::Condition compareOp, ValType compareType);
1467 void emitCompareI64(Assembler::Condition compareOp, ValType compareType);
1468 void emitCompareF32(Assembler::DoubleCondition compareOp,
1469 ValType compareType);
1470 void emitCompareF64(Assembler::DoubleCondition compareOp,
1471 ValType compareType);
1472 void emitCompareRef(Assembler::Condition compareOp, ValType compareType);
1474 template <typename CompilerType>
1475 inline CompilerType& selectCompiler();
1477 template <typename SourceType, typename DestType>
1478 inline void emitUnop(void (*op)(MacroAssembler& masm, SourceType rs,
1479 DestType rd));
1481 template <typename SourceType, typename DestType, typename TempType>
1482 inline void emitUnop(void (*op)(MacroAssembler& masm, SourceType rs,
1483 DestType rd, TempType temp));
1485 template <typename SourceType, typename DestType, typename ImmType>
1486 inline void emitUnop(ImmType immediate, void (*op)(MacroAssembler&, ImmType,
1487 SourceType, DestType));
1489 template <typename CompilerType, typename RegType>
1490 inline void emitUnop(void (*op)(CompilerType& compiler, RegType rsd));
1492 template <typename RegType, typename TempType>
1493 inline void emitUnop(void (*op)(BaseCompiler& bc, RegType rsd, TempType rt),
1494 TempType (*getSpecializedTemp)(BaseCompiler& bc));
1496 template <typename CompilerType, typename RhsType, typename LhsDestType>
1497 inline void emitBinop(void (*op)(CompilerType& masm, RhsType src,
1498 LhsDestType srcDest));
1500 template <typename RhsDestType, typename LhsType>
1501 inline void emitBinop(void (*op)(MacroAssembler& masm, RhsDestType src,
1502 LhsType srcDest, RhsDestOp));
1504 template <typename RhsType, typename LhsDestType, typename TempType>
1505 inline void emitBinop(void (*)(MacroAssembler& masm, RhsType rs,
1506 LhsDestType rsd, TempType temp));
1508 template <typename RhsType, typename LhsDestType, typename TempType1,
1509 typename TempType2>
1510 inline void emitBinop(void (*)(MacroAssembler& masm, RhsType rs,
1511 LhsDestType rsd, TempType1 temp1,
1512 TempType2 temp2));
1514 template <typename RhsType, typename LhsDestType, typename ImmType>
1515 inline void emitBinop(ImmType immediate, void (*op)(MacroAssembler&, ImmType,
1516 RhsType, LhsDestType));
1518 template <typename RhsType, typename LhsDestType, typename ImmType,
1519 typename TempType1, typename TempType2>
1520 inline void emitBinop(ImmType immediate,
1521 void (*op)(MacroAssembler&, ImmType, RhsType,
1522 LhsDestType, TempType1 temp1,
1523 TempType2 temp2));
1525 template <typename CompilerType1, typename CompilerType2, typename RegType,
1526 typename ImmType>
1527 inline void emitBinop(void (*op)(CompilerType1& compiler1, RegType rs,
1528 RegType rd),
1529 void (*opConst)(CompilerType2& compiler2, ImmType c,
1530 RegType rd),
1531 RegType (BaseCompiler::*rhsPopper)() = nullptr);
1533 template <typename CompilerType, typename ValType>
1534 inline void emitTernary(void (*op)(CompilerType&, ValType src0, ValType src1,
1535 ValType srcDest));
1537 template <typename CompilerType, typename ValType>
1538 inline void emitTernary(void (*op)(CompilerType&, ValType src0, ValType src1,
1539 ValType srcDest, ValType temp));
1541 template <typename CompilerType, typename ValType>
1542 inline void emitTernaryResultLast(void (*op)(CompilerType&, ValType src0,
1543 ValType src1, ValType srcDest));
1545 template <typename R>
1546 [[nodiscard]] inline bool emitInstanceCallOp(
1547 const SymbolicAddressSignature& fn, R reader);
1549 template <typename A1, typename R>
1550 [[nodiscard]] inline bool emitInstanceCallOp(
1551 const SymbolicAddressSignature& fn, R reader);
1553 template <typename A1, typename A2, typename R>
1554 [[nodiscard]] inline bool emitInstanceCallOp(
1555 const SymbolicAddressSignature& fn, R reader);
1557 void emitMultiplyI64();
1558 void emitQuotientI32();
1559 void emitQuotientU32();
1560 void emitRemainderI32();
1561 void emitRemainderU32();
1562 #ifdef RABALDR_INT_DIV_I64_CALLOUT
1563 [[nodiscard]] bool emitDivOrModI64BuiltinCall(SymbolicAddress callee,
1564 ValType operandType);
1565 #else
1566 void emitQuotientI64();
1567 void emitQuotientU64();
1568 void emitRemainderI64();
1569 void emitRemainderU64();
1570 #endif
1571 void emitRotrI64();
1572 void emitRotlI64();
1573 void emitEqzI32();
1574 void emitEqzI64();
1575 template <TruncFlags flags>
1576 [[nodiscard]] bool emitTruncateF32ToI32();
1577 template <TruncFlags flags>
1578 [[nodiscard]] bool emitTruncateF64ToI32();
1579 #ifdef RABALDR_FLOAT_TO_I64_CALLOUT
1580 [[nodiscard]] bool emitConvertFloatingToInt64Callout(SymbolicAddress callee,
1581 ValType operandType,
1582 ValType resultType);
1583 #else
1584 template <TruncFlags flags>
1585 [[nodiscard]] bool emitTruncateF32ToI64();
1586 template <TruncFlags flags>
1587 [[nodiscard]] bool emitTruncateF64ToI64();
1588 #endif
1589 void emitExtendI64_8();
1590 void emitExtendI64_16();
1591 void emitExtendI64_32();
1592 void emitExtendI32ToI64();
1593 void emitExtendU32ToI64();
1594 #ifdef RABALDR_I64_TO_FLOAT_CALLOUT
1595 [[nodiscard]] bool emitConvertInt64ToFloatingCallout(SymbolicAddress callee,
1596 ValType operandType,
1597 ValType resultType);
1598 #else
1599 void emitConvertU64ToF32();
1600 void emitConvertU64ToF64();
1601 #endif
1602 void emitRound(RoundingMode roundingMode, ValType operandType);
1604 // Generate a call to the instance function denoted by `builtin`, passing as
1605 // args the top elements of the compiler's value stack and optionally an
1606 // Instance* too. The relationship between the top of stack and arg
1607 // ordering is as follows. If the value stack looks like this:
1609 // A <- least recently pushed
1610 // B
1611 // C <- most recently pushed
1613 // then the called function is expected to have signature [if an Instance*
1614 // is also to be passed]:
1616 // static Instance::foo(Instance*, A, B, C)
1618 // and the SymbolicAddressSignature::argTypes array will be
1620 // {_PTR, _A, _B, _C, _END} // _PTR is for the Instance*
1622 // (see WasmBuiltins.cpp). In short, the most recently pushed value is the
1623 // rightmost argument to the function.
1624 [[nodiscard]] bool emitInstanceCall(const SymbolicAddressSignature& builtin);
1626 [[nodiscard]] bool emitMemoryGrow();
1627 [[nodiscard]] bool emitMemorySize();
1629 [[nodiscard]] bool emitRefFunc();
1630 [[nodiscard]] bool emitRefNull();
1631 [[nodiscard]] bool emitRefIsNull();
1632 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
1633 [[nodiscard]] bool emitRefAsNonNull();
1634 [[nodiscard]] bool emitBrOnNull();
1635 [[nodiscard]] bool emitBrOnNonNull();
1636 [[nodiscard]] bool emitCallRef();
1637 [[nodiscard]] bool emitReturnCallRef();
1638 #endif
1640 [[nodiscard]] bool emitAtomicCmpXchg(ValType type, Scalar::Type viewType);
1641 [[nodiscard]] bool emitAtomicLoad(ValType type, Scalar::Type viewType);
1642 [[nodiscard]] bool emitAtomicRMW(ValType type, Scalar::Type viewType,
1643 AtomicOp op);
1644 [[nodiscard]] bool emitAtomicStore(ValType type, Scalar::Type viewType);
1645 [[nodiscard]] bool emitWait(ValType type, uint32_t byteSize);
1646 [[nodiscard]] bool atomicWait(ValType type, MemoryAccessDesc* access);
1647 [[nodiscard]] bool emitWake();
1648 [[nodiscard]] bool atomicWake(MemoryAccessDesc* access);
1649 [[nodiscard]] bool emitFence();
1650 [[nodiscard]] bool emitAtomicXchg(ValType type, Scalar::Type viewType);
1651 [[nodiscard]] bool emitMemInit();
1652 [[nodiscard]] bool emitMemCopy();
1653 [[nodiscard]] bool memCopyCall(uint32_t dstMemIndex, uint32_t srcMemIndex);
1654 void memCopyInlineM32();
1655 [[nodiscard]] bool emitTableCopy();
1656 [[nodiscard]] bool emitDataOrElemDrop(bool isData);
1657 [[nodiscard]] bool emitMemFill();
1658 [[nodiscard]] bool memFillCall(uint32_t memoryIndex);
1659 void memFillInlineM32();
1660 [[nodiscard]] bool emitTableInit();
1661 [[nodiscard]] bool emitTableFill();
1662 [[nodiscard]] bool emitMemDiscard();
1663 [[nodiscard]] bool emitTableGet();
1664 [[nodiscard]] bool emitTableGrow();
1665 [[nodiscard]] bool emitTableSet();
1666 [[nodiscard]] bool emitTableSize();
1668 void emitTableBoundsCheck(uint32_t tableIndex, RegI32 index, RegPtr instance);
1669 [[nodiscard]] bool emitTableGetAnyRef(uint32_t tableIndex);
1670 [[nodiscard]] bool emitTableSetAnyRef(uint32_t tableIndex);
1672 #ifdef ENABLE_WASM_GC
1673 [[nodiscard]] bool emitStructNew();
1674 [[nodiscard]] bool emitStructNewDefault();
1675 [[nodiscard]] bool emitStructGet(FieldWideningOp wideningOp);
1676 [[nodiscard]] bool emitStructSet();
1677 [[nodiscard]] bool emitArrayNew();
1678 [[nodiscard]] bool emitArrayNewFixed();
1679 [[nodiscard]] bool emitArrayNewDefault();
1680 [[nodiscard]] bool emitArrayNewData();
1681 [[nodiscard]] bool emitArrayNewElem();
1682 [[nodiscard]] bool emitArrayInitData();
1683 [[nodiscard]] bool emitArrayInitElem();
1684 [[nodiscard]] bool emitArrayGet(FieldWideningOp wideningOp);
1685 [[nodiscard]] bool emitArraySet();
1686 [[nodiscard]] bool emitArrayLen();
1687 [[nodiscard]] bool emitArrayCopy();
1688 [[nodiscard]] bool emitArrayFill();
1689 [[nodiscard]] bool emitRefI31();
1690 [[nodiscard]] bool emitI31Get(FieldWideningOp wideningOp);
1691 [[nodiscard]] bool emitRefTest(bool nullable);
1692 [[nodiscard]] bool emitRefCast(bool nullable);
1693 [[nodiscard]] bool emitBrOnCastCommon(bool onSuccess,
1694 uint32_t labelRelativeDepth,
1695 const ResultType& labelType,
1696 RefType sourceType, RefType destType);
1697 [[nodiscard]] bool emitBrOnCast(bool onSuccess);
1698 [[nodiscard]] bool emitAnyConvertExtern();
1699 [[nodiscard]] bool emitExternConvertAny();
1701 // Utility classes/methods to add trap information related to
1702 // null pointer dereferences/accesses.
1703 struct NoNullCheck {
1704 static void emitNullCheck(BaseCompiler* bc, RegRef rp) {}
1705 static void emitTrapSite(BaseCompiler* bc, FaultingCodeOffset fco,
1706 TrapMachineInsn tmi) {}
1708 struct SignalNullCheck {
1709 static void emitNullCheck(BaseCompiler* bc, RegRef rp);
1710 static void emitTrapSite(BaseCompiler* bc, FaultingCodeOffset fco,
1711 TrapMachineInsn tmi);
1714 // Load a pointer to the TypeDefInstanceData for a given type index
1715 RegPtr loadTypeDefInstanceData(uint32_t typeIndex);
1716 // Load a pointer to the SuperTypeVector for a given type index
1717 RegPtr loadSuperTypeVector(uint32_t typeIndex);
1719 template <bool ZeroFields>
1720 bool emitStructAlloc(uint32_t typeIndex, RegRef* object,
1721 bool* isOutlineStruct, RegPtr* outlineBase);
1723 template <typename NullCheckPolicy>
1724 RegPtr emitGcArrayGetData(RegRef rp);
1725 template <typename NullCheckPolicy>
1726 RegI32 emitGcArrayGetNumElements(RegRef rp);
1727 void emitGcArrayBoundsCheck(RegI32 index, RegI32 numElements);
1728 template <typename T, typename NullCheckPolicy>
1729 void emitGcGet(FieldType type, FieldWideningOp wideningOp, const T& src);
1730 template <typename T, typename NullCheckPolicy>
1731 void emitGcSetScalar(const T& dst, FieldType type, AnyReg value);
1733 // Common code for both old and new ref.test instructions.
1734 void emitRefTestCommon(RefType sourceType, RefType destType);
1735 // Common code for both old and new ref.cast instructions.
1736 void emitRefCastCommon(RefType sourceType, RefType destType);
1738 // Allocate registers and branch if the given wasm ref is a subtype of the
1739 // given heap type.
1740 void branchIfRefSubtype(RegRef ref, RefType sourceType, RefType destType,
1741 Label* label, bool onSuccess);
1743 // Write `value` to wasm struct `object`, at `areaBase + areaOffset`. The
1744 // caller must decide on the in- vs out-of-lineness before the call and set
1745 // the latter two accordingly; this routine does not take that into account.
1746 // The value in `object` is unmodified, but `areaBase` and `value` may get
1747 // trashed.
1748 template <typename NullCheckPolicy>
1749 [[nodiscard]] bool emitGcStructSet(RegRef object, RegPtr areaBase,
1750 uint32_t areaOffset, FieldType fieldType,
1751 AnyReg value,
1752 PreBarrierKind preBarrierKind);
1754 [[nodiscard]] bool emitGcArraySet(RegRef object, RegPtr data, RegI32 index,
1755 const ArrayType& array, AnyReg value,
1756 PreBarrierKind preBarrierKind);
1757 #endif // ENABLE_WASM_GC
1759 #ifdef ENABLE_WASM_SIMD
1760 void emitVectorAndNot();
1761 # ifdef ENABLE_WASM_RELAXED_SIMD
1762 void emitDotI8x16I7x16AddS();
1763 # endif
1765 void loadSplat(MemoryAccessDesc* access);
1766 void loadZero(MemoryAccessDesc* access);
1767 void loadExtend(MemoryAccessDesc* access, Scalar::Type viewType);
1768 void loadLane(MemoryAccessDesc* access, uint32_t laneIndex);
1769 void storeLane(MemoryAccessDesc* access, uint32_t laneIndex);
1771 [[nodiscard]] bool emitLoadSplat(Scalar::Type viewType);
1772 [[nodiscard]] bool emitLoadZero(Scalar::Type viewType);
1773 [[nodiscard]] bool emitLoadExtend(Scalar::Type viewType);
1774 [[nodiscard]] bool emitLoadLane(uint32_t laneSize);
1775 [[nodiscard]] bool emitStoreLane(uint32_t laneSize);
1776 [[nodiscard]] bool emitVectorShuffle();
1777 [[nodiscard]] bool emitVectorLaneSelect();
1778 # if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
1779 [[nodiscard]] bool emitVectorShiftRightI64x2();
1780 # endif
1781 #endif
1782 [[nodiscard]] bool emitCallBuiltinModuleFunc();
1785 } // namespace wasm
1786 } // namespace js
1788 #endif // wasm_wasm_baseline_object_h