no bug - Bumping Firefox l10n changesets r=release a=l10n-bump DONTBUILD CLOSED TREE
[gecko.git] / js / src / wasm / WasmBCClass.h
blobee9cb4ce30fb22d6a6674ea2e48585f147d27815
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
4 * Copyright 2016 Mozilla Foundation
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 // This is an INTERNAL header for Wasm baseline compiler: the compiler object
20 // and its supporting types.
22 #ifndef wasm_wasm_baseline_object_h
23 #define wasm_wasm_baseline_object_h
25 #include "wasm/WasmBCDefs.h"
26 #include "wasm/WasmBCFrame.h"
27 #include "wasm/WasmBCRegDefs.h"
28 #include "wasm/WasmBCStk.h"
29 #include "wasm/WasmConstants.h"
31 namespace js {
32 namespace wasm {
34 // Container for a piece of out-of-line code, the slow path that supports an
35 // operation.
36 class OutOfLineCode;
38 // Part of the inter-bytecode state for the boolean-evaluation-for-control
39 // optimization.
40 struct BranchState;
42 // Representation of wasm local variables.
43 using Local = BaseStackFrame::Local;
45 // Bitset used for simple bounds check elimination. Capping this at 64 locals
46 // makes sense; even 32 locals would probably be OK in practice.
48 // For more information about BCE, see the block comment in WasmBCMemory.cpp.
49 using BCESet = uint64_t;
51 // Information stored in the control node for generating exception handling
52 // landing pads.
53 struct CatchInfo {
54 uint32_t tagIndex; // Index for the associated exception.
55 NonAssertingLabel label; // The entry label for the handler.
57 explicit CatchInfo(uint32_t tagIndex_) : tagIndex(tagIndex_) {}
60 using CatchInfoVector = Vector<CatchInfo, 1, SystemAllocPolicy>;
62 // Control node, representing labels and stack heights at join points.
63 struct Control {
64 NonAssertingLabel label; // The "exit" label
65 NonAssertingLabel otherLabel; // Used for the "else" branch of if-then-else
66 // and to allow delegate to jump to catches.
67 StackHeight stackHeight; // From BaseStackFrame
68 uint32_t stackSize; // Value stack height
69 BCESet bceSafeOnEntry; // Bounds check info flowing into the item
70 BCESet bceSafeOnExit; // Bounds check info flowing out of the item
71 bool deadOnArrival; // deadCode_ was set on entry to the region
72 bool deadThenBranch; // deadCode_ was set on exit from "then"
73 size_t tryNoteIndex; // For tracking try branch code ranges.
74 CatchInfoVector catchInfos; // Used for try-catch handlers.
76 Control()
77 : stackHeight(StackHeight::Invalid()),
78 stackSize(UINT32_MAX),
79 bceSafeOnEntry(0),
80 bceSafeOnExit(~BCESet(0)),
81 deadOnArrival(false),
82 deadThenBranch(false),
83 tryNoteIndex(0) {}
85 Control(Control&&) = default;
86 Control(const Control&) = delete;
89 // A vector of Nothing values, used for reading opcodes.
90 class BaseNothingVector {
91 Nothing unused_;
93 public:
94 bool reserve(size_t size) { return true; }
95 bool resize(size_t length) { return true; }
96 Nothing& operator[](size_t) { return unused_; }
97 Nothing& back() { return unused_; }
98 size_t length() const { return 0; }
99 bool append(Nothing& nothing) { return true; }
100 void infallibleAppend(Nothing& nothing) {}
103 // The baseline compiler tracks values on a stack of its own -- it needs to scan
104 // that stack for spilling -- and thus has no need for the values maintained by
105 // the iterator.
106 struct BaseCompilePolicy {
107 using Value = Nothing;
108 using ValueVector = BaseNothingVector;
110 // The baseline compiler uses the iterator's control stack, attaching
111 // its own control information.
112 using ControlItem = Control;
115 using BaseOpIter = OpIter<BaseCompilePolicy>;
117 // Latent operation for boolean-evaluation-for-control optimization.
118 enum class LatentOp { None, Compare, Eqz };
120 // Encapsulate the checking needed for a memory access.
121 struct AccessCheck {
122 AccessCheck()
123 : omitBoundsCheck(false),
124 omitAlignmentCheck(false),
125 onlyPointerAlignment(false) {}
127 // If `omitAlignmentCheck` is true then we need check neither the
128 // pointer nor the offset. Otherwise, if `onlyPointerAlignment` is true
129 // then we need check only the pointer. Otherwise, check the sum of
130 // pointer and offset.
132 bool omitBoundsCheck;
133 bool omitAlignmentCheck;
134 bool onlyPointerAlignment;
137 // Encapsulate all the information about a function call.
138 struct FunctionCall {
139 FunctionCall()
140 : restoreRegisterStateAndRealm(false),
141 usesSystemAbi(false),
142 #ifdef JS_CODEGEN_ARM
143 hardFP(true),
144 #endif
145 frameAlignAdjustment(0),
146 stackArgAreaSize(0) {
149 WasmABIArgGenerator abi;
150 bool restoreRegisterStateAndRealm;
151 bool usesSystemAbi;
152 #ifdef JS_CODEGEN_ARM
153 bool hardFP;
154 #endif
155 size_t frameAlignAdjustment;
156 size_t stackArgAreaSize;
159 enum class PreBarrierKind {
160 // No pre-write barrier is required because the previous value is undefined.
161 None,
162 // Perform a pre-write barrier to mark the previous value if an incremental
163 // GC is underway.
164 Normal,
167 enum class PostBarrierKind {
168 // Remove an existing store buffer entry if the new value does not require
169 // one. This is required to preserve invariants with HeapPtr when used for
170 // movable storage.
171 Precise,
172 // Add a store buffer entry if the new value requires it, but do not attempt
173 // to remove a pre-existing entry.
174 Imprecise,
177 //////////////////////////////////////////////////////////////////////////////
179 // Wasm baseline compiler proper.
181 // This is a struct and not a class because there is no real benefit to hiding
182 // anything, and because many static functions that are wrappers for masm
183 // methods need to reach into it and would otherwise have to be declared as
184 // friends.
186 // (Members generally have a '_' suffix but some don't because they are
187 // referenced everywhere and it would be tedious to spell that out.)
189 struct BaseCompiler final {
190 ///////////////////////////////////////////////////////////////////////////
192 // Private types
194 using LabelVector = Vector<NonAssertingLabel, 8, SystemAllocPolicy>;
196 ///////////////////////////////////////////////////////////////////////////
198 // Read-only and write-once members.
200 // Static compilation environment.
201 const ModuleEnvironment& moduleEnv_;
202 const CompilerEnvironment& compilerEnv_;
203 const FuncCompileInput& func_;
204 const ValTypeVector& locals_;
206 // Information about the locations of locals, this is set up during
207 // initialization and read-only after that.
208 BaseStackFrame::LocalVector localInfo_;
210 // On specific platforms we sometimes need to use specific registers.
211 const SpecificRegs specific_;
213 // SigD and SigF are single-entry parameter lists for f64 and f32, these are
214 // created during initialization.
215 ValTypeVector SigD_;
216 ValTypeVector SigF_;
218 // Where to go to to return, bound as compilation ends.
219 NonAssertingLabel returnLabel_;
221 // Prologue and epilogue offsets, initialized during prologue and epilogue
222 // generation and only used by the caller.
223 FuncOffsets offsets_;
225 // We call this address from the breakable point when the breakpoint handler
226 // is not null.
227 NonAssertingLabel debugTrapStub_;
228 uint32_t previousBreakablePoint_;
230 // BaselineCompileFunctions() "lends" us the StkVector to use in this
231 // BaseCompiler object, and that is installed in |stk_| in our constructor.
232 // This is so as to avoid having to malloc/free the vector's contents at
233 // each creation/destruction of a BaseCompiler object. It does however mean
234 // that we need to hold on to a reference to BaselineCompileFunctions()'s
235 // vector, so we can swap (give) its contents back when this BaseCompiler
236 // object is destroyed. This significantly reduces the heap turnover of the
237 // baseline compiler. See bug 1532592.
238 StkVector& stkSource_;
240 ///////////////////////////////////////////////////////////////////////////
242 // Output-only data structures.
244 // Bump allocator for temporary memory, used for the value stack and
245 // out-of-line code blobs. Bump-allocated memory is not freed until the end
246 // of the compilation.
247 TempAllocator::Fallible alloc_;
249 // Machine code emitter.
250 MacroAssembler& masm;
252 ///////////////////////////////////////////////////////////////////////////
254 // Compilation state.
256 // Decoder for this function, used for misc error reporting.
257 Decoder& decoder_;
259 // Opcode reader.
260 BaseOpIter iter_;
262 // Register allocator.
263 BaseRegAlloc ra;
265 // Stack frame abstraction.
266 BaseStackFrame fr;
268 // Latent out of line support code for some operations, code for these will be
269 // emitted at the end of compilation.
270 Vector<OutOfLineCode*, 8, SystemAllocPolicy> outOfLine_;
272 // Stack map state. This keeps track of live pointer slots and allows precise
273 // stack maps to be generated at safe points.
274 StackMapGenerator stackMapGenerator_;
276 // Wasm value stack. This maps values on the wasm stack to values in the
277 // running code and their locations.
279 // The value stack facilitates on-the-fly register allocation and the use of
280 // immediates in instructions. It tracks latent constants, latent references
281 // to locals, register contents, and values that have been flushed to the CPU
282 // stack.
284 // The stack can be flushed to the CPU stack using sync().
286 // The stack is a StkVector rather than a StkVector& since constantly
287 // dereferencing a StkVector& has been shown to add 0.5% or more to the
288 // compiler's dynamic instruction count.
289 StkVector stk_;
291 // Flag indicating that the compiler is currently in a dead code region.
292 bool deadCode_;
294 ///////////////////////////////////////////////////////////////////////////
296 // State for bounds check elimination.
298 // Locals that have been bounds checked and not updated since
299 BCESet bceSafe_;
301 ///////////////////////////////////////////////////////////////////////////
303 // State for boolean-evaluation-for-control.
305 // Latent operation for branch (seen next)
306 LatentOp latentOp_;
308 // Operand type, if latentOp_ is true
309 ValType latentType_;
311 // Comparison operator, if latentOp_ == Compare, int types
312 Assembler::Condition latentIntCmp_;
314 // Comparison operator, if latentOp_ == Compare, float types
315 Assembler::DoubleCondition latentDoubleCmp_;
317 ///////////////////////////////////////////////////////////////////////////
319 // Main compilation API.
321 // A client will create a compiler object, and then call init(),
322 // emitFunction(), and finish() in that order.
324 BaseCompiler(const ModuleEnvironment& moduleEnv,
325 const CompilerEnvironment& compilerEnv,
326 const FuncCompileInput& func, const ValTypeVector& locals,
327 const RegisterOffsets& trapExitLayout,
328 size_t trapExitLayoutNumWords, Decoder& decoder,
329 StkVector& stkSource, TempAllocator* alloc, MacroAssembler* masm,
330 StackMaps* stackMaps);
331 ~BaseCompiler();
333 [[nodiscard]] bool init();
334 [[nodiscard]] bool emitFunction();
335 [[nodiscard]] FuncOffsets finish();
337 //////////////////////////////////////////////////////////////////////////////
339 // Sundry accessor abstractions and convenience predicates.
341 // WasmBaselineObject-inl.h.
343 inline const FuncType& funcType() const;
344 inline bool usesMemory() const;
345 inline bool usesSharedMemory(uint32_t memoryIndex) const;
346 inline bool isMem32(uint32_t memoryIndex) const;
347 inline bool isMem64(uint32_t memoryIndex) const;
348 inline bool hugeMemoryEnabled(uint32_t memoryIndex) const;
349 inline uint32_t instanceOffsetOfMemoryBase(uint32_t memoryIndex) const;
350 inline uint32_t instanceOffsetOfBoundsCheckLimit(uint32_t memoryIndex) const;
352 // The casts are used by some of the ScratchRegister implementations.
353 operator MacroAssembler&() const { return masm; }
354 operator BaseRegAlloc&() { return ra; }
356 //////////////////////////////////////////////////////////////////////////////
358 // Locals.
360 // WasmBaselineObject-inl.h.
362 // Assert that the local at the given index has the given type, and return a
363 // reference to the Local.
364 inline const Local& localFromSlot(uint32_t slot, MIRType type);
366 //////////////////////////////////////////////////////////////////////////////
368 // Out of line code management.
370 [[nodiscard]] OutOfLineCode* addOutOfLineCode(OutOfLineCode* ool);
371 [[nodiscard]] bool generateOutOfLineCode();
373 /////////////////////////////////////////////////////////////////////////////
375 // Layering in the compiler (briefly).
377 // At the lowest layers are abstractions for registers (managed by the
378 // BaseRegAlloc and the wrappers below) and the stack frame (managed by the
379 // BaseStackFrame).
381 // The registers and frame are in turn used by the value abstraction, which is
382 // implemented by the Stk type and backed by the value stack. Values may be
383 // stored in registers, in the frame, or may be latent constants, and the
384 // value stack handles storage mostly transparently in its push and pop
385 // routines.
387 // In turn, the pop routines bring values into registers so that we can
388 // compute on them, and the push routines move values to the stack (where they
389 // may still reside in registers until the registers are needed or the value
390 // must be in memory).
392 // Routines for managing parameters and results (for blocks or calls) may also
393 // manipulate the stack directly.
395 // At the top are the code generators: methods that use the poppers and
396 // pushers and other utilities to move values into place, and that emit code
397 // to compute on those values or change control flow.
399 /////////////////////////////////////////////////////////////////////////////
401 // Register management. These are simply strongly-typed wrappers that
402 // delegate to the register allocator.
404 inline bool isAvailableI32(RegI32 r);
405 inline bool isAvailableI64(RegI64 r);
406 inline bool isAvailableRef(RegRef r);
407 inline bool isAvailablePtr(RegPtr r);
408 inline bool isAvailableF32(RegF32 r);
409 inline bool isAvailableF64(RegF64 r);
410 #ifdef ENABLE_WASM_SIMD
411 inline bool isAvailableV128(RegV128 r);
412 #endif
414 // Allocate any register
415 [[nodiscard]] inline RegI32 needI32();
416 [[nodiscard]] inline RegI64 needI64();
417 [[nodiscard]] inline RegRef needRef();
418 [[nodiscard]] inline RegPtr needPtr();
419 [[nodiscard]] inline RegF32 needF32();
420 [[nodiscard]] inline RegF64 needF64();
421 #ifdef ENABLE_WASM_SIMD
422 [[nodiscard]] inline RegV128 needV128();
423 #endif
425 // Allocate a specific register
426 inline void needI32(RegI32 specific);
427 inline void needI64(RegI64 specific);
428 inline void needRef(RegRef specific);
429 inline void needPtr(RegPtr specific);
430 inline void needF32(RegF32 specific);
431 inline void needF64(RegF64 specific);
432 #ifdef ENABLE_WASM_SIMD
433 inline void needV128(RegV128 specific);
434 #endif
436 template <typename RegType>
437 inline RegType need();
439 // Just a shorthand.
440 inline void need2xI32(RegI32 r0, RegI32 r1);
441 inline void need2xI64(RegI64 r0, RegI64 r1);
443 // Get a register but do not sync the stack to free one up. This will crash
444 // if no register is available.
445 inline void needI32NoSync(RegI32 r);
447 #if defined(JS_CODEGEN_ARM)
448 // Allocate a specific register pair (even-odd register numbers).
449 [[nodiscard]] inline RegI64 needI64Pair();
450 #endif
452 inline void freeAny(AnyReg r);
453 inline void freeI32(RegI32 r);
454 inline void freeI64(RegI64 r);
455 inline void freeRef(RegRef r);
456 inline void freePtr(RegPtr r);
457 inline void freeF32(RegF32 r);
458 inline void freeF64(RegF64 r);
459 #ifdef ENABLE_WASM_SIMD
460 inline void freeV128(RegV128 r);
461 #endif
463 template <typename RegType>
464 inline void free(RegType r);
466 // Free r if it is not invalid.
467 inline void maybeFree(RegI32 r);
468 inline void maybeFree(RegI64 r);
469 inline void maybeFree(RegF32 r);
470 inline void maybeFree(RegF64 r);
471 inline void maybeFree(RegRef r);
472 inline void maybeFree(RegPtr r);
473 #ifdef ENABLE_WASM_SIMD
474 inline void maybeFree(RegV128 r);
475 #endif
477 // On 64-bit systems, `except` must equal r and this is a no-op. On 32-bit
478 // systems, `except` must equal the high or low part of a pair and the other
479 // part of the pair is freed.
480 inline void freeI64Except(RegI64 r, RegI32 except);
482 // Return the 32-bit low part of the 64-bit register, do not free anything.
483 inline RegI32 fromI64(RegI64 r);
485 // If r is valid, return fromI64(r), otherwise an invalid RegI32.
486 inline RegI32 maybeFromI64(RegI64 r);
488 #ifdef JS_PUNBOX64
489 // On 64-bit systems, reinterpret r as 64-bit.
490 inline RegI64 fromI32(RegI32 r);
491 #endif
493 // Widen r to 64 bits; this may allocate another register to form a pair.
494 // Note this does not generate code for sign/zero extension.
495 inline RegI64 widenI32(RegI32 r);
497 // Narrow r to 32 bits; this may free part of a pair. Note this does not
498 // generate code to canonicalize the value on 64-bit systems.
499 inline RegI32 narrowI64(RegI64 r);
500 inline RegI32 narrowRef(RegRef r);
502 // Return the 32-bit low part of r.
503 inline RegI32 lowPart(RegI64 r);
505 // On 64-bit systems, return an invalid register. On 32-bit systems, return
506 // the low part of a pair.
507 inline RegI32 maybeHighPart(RegI64 r);
509 // On 64-bit systems, do nothing. On 32-bit systems, clear the high register.
510 inline void maybeClearHighPart(RegI64 r);
512 //////////////////////////////////////////////////////////////////////////////
514 // Values and value stack: Low-level methods for moving Stk values of specific
515 // kinds to registers.
517 inline void loadConstI32(const Stk& src, RegI32 dest);
518 inline void loadMemI32(const Stk& src, RegI32 dest);
519 inline void loadLocalI32(const Stk& src, RegI32 dest);
520 inline void loadRegisterI32(const Stk& src, RegI32 dest);
521 inline void loadConstI64(const Stk& src, RegI64 dest);
522 inline void loadMemI64(const Stk& src, RegI64 dest);
523 inline void loadLocalI64(const Stk& src, RegI64 dest);
524 inline void loadRegisterI64(const Stk& src, RegI64 dest);
525 inline void loadConstRef(const Stk& src, RegRef dest);
526 inline void loadMemRef(const Stk& src, RegRef dest);
527 inline void loadLocalRef(const Stk& src, RegRef dest);
528 inline void loadRegisterRef(const Stk& src, RegRef dest);
529 inline void loadConstF64(const Stk& src, RegF64 dest);
530 inline void loadMemF64(const Stk& src, RegF64 dest);
531 inline void loadLocalF64(const Stk& src, RegF64 dest);
532 inline void loadRegisterF64(const Stk& src, RegF64 dest);
533 inline void loadConstF32(const Stk& src, RegF32 dest);
534 inline void loadMemF32(const Stk& src, RegF32 dest);
535 inline void loadLocalF32(const Stk& src, RegF32 dest);
536 inline void loadRegisterF32(const Stk& src, RegF32 dest);
537 #ifdef ENABLE_WASM_SIMD
538 inline void loadConstV128(const Stk& src, RegV128 dest);
539 inline void loadMemV128(const Stk& src, RegV128 dest);
540 inline void loadLocalV128(const Stk& src, RegV128 dest);
541 inline void loadRegisterV128(const Stk& src, RegV128 dest);
542 #endif
544 //////////////////////////////////////////////////////////////////////////
546 // Values and value stack: Mid-level routines for moving Stk values of any
547 // kind to registers.
549 inline void loadI32(const Stk& src, RegI32 dest);
550 inline void loadI64(const Stk& src, RegI64 dest);
551 #if !defined(JS_PUNBOX64)
552 inline void loadI64Low(const Stk& src, RegI32 dest);
553 inline void loadI64High(const Stk& src, RegI32 dest);
554 #endif
555 inline void loadF64(const Stk& src, RegF64 dest);
556 inline void loadF32(const Stk& src, RegF32 dest);
557 #ifdef ENABLE_WASM_SIMD
558 inline void loadV128(const Stk& src, RegV128 dest);
559 #endif
560 inline void loadRef(const Stk& src, RegRef dest);
562 //////////////////////////////////////////////////////////////////////
564 // Value stack: stack management.
566 // Flush all local and register value stack elements to memory.
567 inline void sync();
569 // Save a register on the value stack temporarily.
570 void saveTempPtr(const RegPtr& r);
572 // Restore a temporarily saved register from the value stack.
573 void restoreTempPtr(const RegPtr& r);
575 // This is an optimization used to avoid calling sync for setLocal: if the
576 // local does not exist unresolved on the value stack then we can skip the
577 // sync.
578 inline bool hasLocal(uint32_t slot);
580 // Sync the local if necessary. (This currently syncs everything if a sync is
581 // needed at all.)
582 inline void syncLocal(uint32_t slot);
584 // Return the amount of execution stack consumed by the top numval
585 // values on the value stack.
586 inline size_t stackConsumed(size_t numval);
588 // Drop one value off the stack, possibly also moving the physical stack
589 // pointer.
590 inline void dropValue();
592 #ifdef DEBUG
593 // Check that we're not leaking registers by comparing the
594 // state of the stack + available registers with the set of
595 // all available registers.
597 // Call this between opcodes.
598 void performRegisterLeakCheck();
600 // This can be called at any point, really, but typically just after
601 // performRegisterLeakCheck().
602 void assertStackInvariants() const;
604 // Count the number of memory references on the value stack.
605 inline size_t countMemRefsOnStk();
607 // Print the stack to stderr.
608 void showStack(const char* who) const;
609 #endif
611 //////////////////////////////////////////////////////////////////////
613 // Value stack: pushers of values.
615 // Push a register onto the value stack.
616 inline void pushAny(AnyReg r);
617 inline void pushI32(RegI32 r);
618 inline void pushI64(RegI64 r);
619 inline void pushRef(RegRef r);
620 inline void pushPtr(RegPtr r);
621 inline void pushF64(RegF64 r);
622 inline void pushF32(RegF32 r);
623 #ifdef ENABLE_WASM_SIMD
624 inline void pushV128(RegV128 r);
625 #endif
627 // Template variation of the foregoing, for use by templated emitters.
628 template <typename RegType>
629 inline void push(RegType item);
631 // Push a constant value onto the stack. pushI32 can also take uint32_t, and
632 // pushI64 can take uint64_t; the semantics are the same. Appropriate sign
633 // extension for a 32-bit value on a 64-bit architecture happens when the
634 // value is popped, see the definition of moveImm32.
635 inline void pushI32(int32_t v);
636 inline void pushI64(int64_t v);
637 inline void pushRef(intptr_t v);
638 inline void pushPtr(intptr_t v);
639 inline void pushF64(double v);
640 inline void pushF32(float v);
641 #ifdef ENABLE_WASM_SIMD
642 inline void pushV128(V128 v);
643 #endif
644 inline void pushConstRef(intptr_t v);
646 // Push the local slot onto the stack. The slot will not be read here; it
647 // will be read when it is consumed, or when a side effect to the slot forces
648 // its value to be saved.
649 inline void pushLocalI32(uint32_t slot);
650 inline void pushLocalI64(uint32_t slot);
651 inline void pushLocalRef(uint32_t slot);
652 inline void pushLocalF64(uint32_t slot);
653 inline void pushLocalF32(uint32_t slot);
654 #ifdef ENABLE_WASM_SIMD
655 inline void pushLocalV128(uint32_t slot);
656 #endif
658 // Push an U32 as an I64, zero-extending it in the process
659 inline void pushU32AsI64(RegI32 rs);
661 //////////////////////////////////////////////////////////////////////
663 // Value stack: poppers and peekers of values.
665 // Pop some value off the stack.
666 inline AnyReg popAny();
667 inline AnyReg popAny(AnyReg specific);
669 // Call only from other popI32() variants. v must be the stack top. May pop
670 // the CPU stack.
671 inline void popI32(const Stk& v, RegI32 dest);
673 [[nodiscard]] inline RegI32 popI32();
674 inline RegI32 popI32(RegI32 specific);
676 #ifdef ENABLE_WASM_SIMD
677 // Call only from other popV128() variants. v must be the stack top. May pop
678 // the CPU stack.
679 inline void popV128(const Stk& v, RegV128 dest);
681 [[nodiscard]] inline RegV128 popV128();
682 inline RegV128 popV128(RegV128 specific);
683 #endif
685 // Call only from other popI64() variants. v must be the stack top. May pop
686 // the CPU stack.
687 inline void popI64(const Stk& v, RegI64 dest);
689 [[nodiscard]] inline RegI64 popI64();
690 inline RegI64 popI64(RegI64 specific);
692 // Call only from other popRef() variants. v must be the stack top. May pop
693 // the CPU stack.
694 inline void popRef(const Stk& v, RegRef dest);
696 inline RegRef popRef(RegRef specific);
697 [[nodiscard]] inline RegRef popRef();
699 // Call only from other popPtr() variants. v must be the stack top. May pop
700 // the CPU stack.
701 inline void popPtr(const Stk& v, RegPtr dest);
703 inline RegPtr popPtr(RegPtr specific);
704 [[nodiscard]] inline RegPtr popPtr();
706 // Call only from other popF64() variants. v must be the stack top. May pop
707 // the CPU stack.
708 inline void popF64(const Stk& v, RegF64 dest);
710 [[nodiscard]] inline RegF64 popF64();
711 inline RegF64 popF64(RegF64 specific);
713 // Call only from other popF32() variants. v must be the stack top. May pop
714 // the CPU stack.
715 inline void popF32(const Stk& v, RegF32 dest);
717 [[nodiscard]] inline RegF32 popF32();
718 inline RegF32 popF32(RegF32 specific);
720 // Templated variation of the foregoing, for use by templated emitters.
721 template <typename RegType>
722 inline RegType pop();
724 // Constant poppers will return true and pop the value if the stack top is a
725 // constant of the appropriate type; otherwise pop nothing and return false.
726 [[nodiscard]] inline bool hasConst() const;
727 [[nodiscard]] inline bool popConst(int32_t* c);
728 [[nodiscard]] inline bool popConst(int64_t* c);
729 [[nodiscard]] inline bool peekConst(int32_t* c);
730 [[nodiscard]] inline bool peekConst(int64_t* c);
731 [[nodiscard]] inline bool peek2xConst(int32_t* c0, int32_t* c1);
732 [[nodiscard]] inline bool popConstPositivePowerOfTwo(int32_t* c,
733 uint_fast8_t* power,
734 int32_t cutoff);
735 [[nodiscard]] inline bool popConstPositivePowerOfTwo(int64_t* c,
736 uint_fast8_t* power,
737 int64_t cutoff);
739 // Shorthand: Pop r1, then r0.
740 inline void pop2xI32(RegI32* r0, RegI32* r1);
741 inline void pop2xI64(RegI64* r0, RegI64* r1);
742 inline void pop2xF32(RegF32* r0, RegF32* r1);
743 inline void pop2xF64(RegF64* r0, RegF64* r1);
744 #ifdef ENABLE_WASM_SIMD
745 inline void pop2xV128(RegV128* r0, RegV128* r1);
746 #endif
747 inline void pop2xRef(RegRef* r0, RegRef* r1);
749 // Pop to a specific register
750 inline RegI32 popI32ToSpecific(RegI32 specific);
751 inline RegI64 popI64ToSpecific(RegI64 specific);
753 #ifdef JS_CODEGEN_ARM
754 // Pop an I64 as a valid register pair.
755 inline RegI64 popI64Pair();
756 #endif
758 // Pop an I64 but narrow it and return the narrowed part.
759 inline RegI32 popI64ToI32();
760 inline RegI32 popI64ToSpecificI32(RegI32 specific);
762 // Pop an I32 or I64 as an I64. The value is zero extended out to 64-bits.
763 inline RegI64 popIndexToInt64(IndexType indexType);
765 // Pop the stack until it has the desired size, but do not move the physical
766 // stack pointer.
767 inline void popValueStackTo(uint32_t stackSize);
769 // Pop the given number of elements off the value stack, but do not move
770 // the physical stack pointer.
771 inline void popValueStackBy(uint32_t items);
773 // Peek into the stack at relativeDepth from the top.
774 inline Stk& peek(uint32_t relativeDepth);
776 // Peek the reference value at the specified depth and load it into a
777 // register.
778 inline void peekRefAt(uint32_t depth, RegRef dest);
780 // Peek at the value on the top of the stack and return true if it is a Local
781 // of any type.
782 [[nodiscard]] inline bool peekLocal(uint32_t* local);
784 ////////////////////////////////////////////////////////////////////////////
786 // Block parameters and results.
788 // Blocks may have multiple parameters and multiple results. Blocks can also
789 // be the target of branches: the entry for loops, and the exit for
790 // non-loops.
792 // Passing multiple values to a non-branch target (i.e., the entry of a
793 // "block") falls out naturally: any items on the value stack can flow
794 // directly from one block to another.
796 // However, for branch targets, we need to allocate well-known locations for
797 // the branch values. The approach taken in the baseline compiler is to
798 // allocate registers to the top N values (currently N=1), and then stack
799 // locations for the rest.
802 // Types of result registers that interest us for result-manipulating
803 // functions.
804 enum class ResultRegKind {
805 // General and floating result registers.
806 All,
808 // General result registers only.
809 OnlyGPRs
812 // This is a flag ultimately intended for popBlockResults() that specifies how
813 // the CPU stack should be handled after the result values have been
814 // processed.
815 enum class ContinuationKind {
816 // Adjust the stack for a fallthrough: do nothing.
817 Fallthrough,
819 // Adjust the stack for a jump: make the stack conform to the
820 // expected stack at the target
821 Jump
824 // TODO: It's definitely disputable whether the result register management is
825 // hot enough to warrant inlining at the outermost level.
827 inline void needResultRegisters(ResultType type, ResultRegKind which);
828 #ifdef JS_64BIT
829 inline void widenInt32ResultRegisters(ResultType type);
830 #endif
831 inline void freeResultRegisters(ResultType type, ResultRegKind which);
832 inline void needIntegerResultRegisters(ResultType type);
833 inline void freeIntegerResultRegisters(ResultType type);
834 inline void needResultRegisters(ResultType type);
835 inline void freeResultRegisters(ResultType type);
836 void assertResultRegistersAvailable(ResultType type);
837 inline void captureResultRegisters(ResultType type);
838 inline void captureCallResultRegisters(ResultType type);
840 void popRegisterResults(ABIResultIter& iter);
841 void popStackResults(ABIResultIter& iter, StackHeight stackBase);
843 void popBlockResults(ResultType type, StackHeight stackBase,
844 ContinuationKind kind);
846 // This function is similar to popBlockResults, but additionally handles the
847 // implicit exception pointer that is pushed to the value stack on entry to
848 // a catch handler by dropping it appropriately.
849 void popCatchResults(ResultType type, StackHeight stackBase);
851 Stk captureStackResult(const ABIResult& result, StackHeight resultsBase,
852 uint32_t stackResultBytes);
854 [[nodiscard]] bool pushResults(ResultType type, StackHeight resultsBase);
855 [[nodiscard]] bool pushBlockResults(ResultType type);
857 // A combination of popBlockResults + pushBlockResults, used when entering a
858 // block with a control-flow join (loops) or split (if) to shuffle the
859 // fallthrough block parameters into the locations expected by the
860 // continuation.
862 // This function should only be called when entering a block with a
863 // control-flow join at the entry, where there are no live temporaries in
864 // the current block.
865 [[nodiscard]] bool topBlockParams(ResultType type);
867 // A combination of popBlockResults + pushBlockResults, used before branches
868 // where we don't know the target (br_if / br_table). If and when the branch
869 // is taken, the stack results will be shuffled down into place. For br_if
870 // that has fallthrough, the parameters for the untaken branch flow through to
871 // the continuation.
872 [[nodiscard]] bool topBranchParams(ResultType type, StackHeight* height);
874 // Conditional branches with fallthrough are preceded by a topBranchParams, so
875 // we know that there are no stack results that need to be materialized. In
876 // that case, we can just shuffle the whole block down before popping the
877 // stack.
878 void shuffleStackResultsBeforeBranch(StackHeight srcHeight,
879 StackHeight destHeight, ResultType type);
881 // If in debug mode, adds LeaveFrame breakpoint.
882 bool insertDebugCollapseFrame();
884 //////////////////////////////////////////////////////////////////////
886 // Stack maps
888 // Various methods for creating a stackmap. Stackmaps are indexed by the
889 // lowest address of the instruction immediately *after* the instruction of
890 // interest. In practice that means either: the return point of a call, the
891 // instruction immediately after a trap instruction (the "resume"
892 // instruction), or the instruction immediately following a no-op (when
893 // debugging is enabled).
895 // Create a vanilla stackmap.
896 [[nodiscard]] bool createStackMap(const char* who);
898 // Create a stackmap as vanilla, but for a custom assembler offset.
899 [[nodiscard]] bool createStackMap(const char* who,
900 CodeOffset assemblerOffset);
902 // Create a stack map as vanilla, and note the presence of a ref-typed
903 // DebugFrame on the stack.
904 [[nodiscard]] bool createStackMap(
905 const char* who, HasDebugFrameWithLiveRefs debugFrameWithLiveRefs);
907 // The most general stackmap construction.
908 [[nodiscard]] bool createStackMap(
909 const char* who, const ExitStubMapVector& extras,
910 uint32_t assemblerOffset,
911 HasDebugFrameWithLiveRefs debugFrameWithLiveRefs);
913 ////////////////////////////////////////////////////////////
915 // Control stack
917 inline void initControl(Control& item, ResultType params);
918 inline Control& controlItem();
919 inline Control& controlItem(uint32_t relativeDepth);
920 inline Control& controlOutermost();
921 inline LabelKind controlKind(uint32_t relativeDepth);
923 ////////////////////////////////////////////////////////////
925 // Debugger API
927 // Insert a breakpoint almost anywhere. This will create a call, with all the
928 // overhead that entails.
929 void insertBreakablePoint(CallSiteDesc::Kind kind);
931 // Insert code at the end of a function for breakpoint filtering.
932 void insertBreakpointStub();
934 // Debugger API used at the return point: shuffle register return values off
935 // to memory for the debugger to see; and get them back again.
936 void saveRegisterReturnValues(const ResultType& resultType);
937 void restoreRegisterReturnValues(const ResultType& resultType);
939 //////////////////////////////////////////////////////////////////////
941 // Function prologue and epilogue.
943 // Set up and tear down frame, execute prologue and epilogue.
944 [[nodiscard]] bool beginFunction();
945 [[nodiscard]] bool endFunction();
947 // Move return values to memory before returning, as appropriate
948 void popStackReturnValues(const ResultType& resultType);
950 //////////////////////////////////////////////////////////////////////
952 // Calls.
954 void beginCall(FunctionCall& call, UseABI useABI,
955 RestoreRegisterStateAndRealm restoreRegisterStateAndRealm);
956 void endCall(FunctionCall& call, size_t stackSpace);
957 void startCallArgs(size_t stackArgAreaSizeUnaligned, FunctionCall* call);
958 ABIArg reservePointerArgument(FunctionCall* call);
959 void passArg(ValType type, const Stk& arg, FunctionCall* call);
960 CodeOffset callDefinition(uint32_t funcIndex, const FunctionCall& call);
961 CodeOffset callSymbolic(SymbolicAddress callee, const FunctionCall& call);
963 // Precondition for the call*() methods: sync()
965 bool callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
966 const Stk& indexVal, const FunctionCall& call,
967 bool tailCall, CodeOffset* fastCallOffset,
968 CodeOffset* slowCallOffset);
969 CodeOffset callImport(unsigned instanceDataOffset, const FunctionCall& call);
970 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
971 void callRef(const Stk& calleeRef, const FunctionCall& call,
972 CodeOffset* fastCallOffset, CodeOffset* slowCallOffset);
973 # ifdef ENABLE_WASM_TAIL_CALLS
974 void returnCallRef(const Stk& calleeRef, const FunctionCall& call,
975 const FuncType* funcType);
976 # endif
977 #endif
978 CodeOffset builtinCall(SymbolicAddress builtin, const FunctionCall& call);
979 CodeOffset builtinInstanceMethodCall(const SymbolicAddressSignature& builtin,
980 const ABIArg& instanceArg,
981 const FunctionCall& call);
982 [[nodiscard]] bool pushCallResults(const FunctionCall& call, ResultType type,
983 const StackResultsLoc& loc);
985 // Helpers to pick up the returned value from the return register.
986 inline RegI32 captureReturnedI32();
987 inline RegI64 captureReturnedI64();
988 inline RegF32 captureReturnedF32(const FunctionCall& call);
989 inline RegF64 captureReturnedF64(const FunctionCall& call);
990 #ifdef ENABLE_WASM_SIMD
991 inline RegV128 captureReturnedV128(const FunctionCall& call);
992 #endif
993 inline RegRef captureReturnedRef();
995 //////////////////////////////////////////////////////////////////////
997 // Register-to-register moves. These emit nothing if src == dest.
999 inline void moveI32(RegI32 src, RegI32 dest);
1000 inline void moveI64(RegI64 src, RegI64 dest);
1001 inline void moveRef(RegRef src, RegRef dest);
1002 inline void movePtr(RegPtr src, RegPtr dest);
1003 inline void moveF64(RegF64 src, RegF64 dest);
1004 inline void moveF32(RegF32 src, RegF32 dest);
1005 #ifdef ENABLE_WASM_SIMD
1006 inline void moveV128(RegV128 src, RegV128 dest);
1007 #endif
1009 template <typename RegType>
1010 inline void move(RegType src, RegType dest);
1012 //////////////////////////////////////////////////////////////////////
1014 // Immediate-to-register moves.
1016 // The compiler depends on moveImm32() clearing the high bits of a 64-bit
1017 // register on 64-bit systems except MIPS64 And LoongArch64 where high bits
1018 // are sign extended from lower bits, see doc block "64-bit GPRs carrying
1019 // 32-bit values" in MacroAssembler.h.
1021 inline void moveImm32(int32_t v, RegI32 dest);
1022 inline void moveImm64(int64_t v, RegI64 dest);
1023 inline void moveImmRef(intptr_t v, RegRef dest);
1025 //////////////////////////////////////////////////////////////////////
1027 // Sundry low-level code generators.
1029 // Check the interrupt flag, trap if it is set.
1030 [[nodiscard]] bool addInterruptCheck();
1032 // Check that the value is not zero, trap if it is.
1033 void checkDivideByZero(RegI32 rhs);
1034 void checkDivideByZero(RegI64 r);
1036 // Check that a signed division will not overflow, trap or flush-to-zero if it
1037 // will according to `zeroOnOverflow`.
1038 void checkDivideSignedOverflow(RegI32 rhs, RegI32 srcDest, Label* done,
1039 bool zeroOnOverflow);
1040 void checkDivideSignedOverflow(RegI64 rhs, RegI64 srcDest, Label* done,
1041 bool zeroOnOverflow);
1043 // Emit a jump table to be used by tableSwitch()
1044 void jumpTable(const LabelVector& labels, Label* theTable);
1046 // Emit a table switch, `theTable` is the jump table.
1047 void tableSwitch(Label* theTable, RegI32 switchValue, Label* dispatchCode);
1049 // Compare i64 and set an i32 boolean result according to the condition.
1050 inline void cmp64Set(Assembler::Condition cond, RegI64 lhs, RegI64 rhs,
1051 RegI32 dest);
1053 // Round floating to integer.
1054 [[nodiscard]] inline bool supportsRoundInstruction(RoundingMode mode);
1055 inline void roundF32(RoundingMode roundingMode, RegF32 f0);
1056 inline void roundF64(RoundingMode roundingMode, RegF64 f0);
1058 // These are just wrappers around assembler functions, but without
1059 // type-specific names, and using our register abstractions for better type
1060 // discipline.
1061 inline void branchTo(Assembler::DoubleCondition c, RegF64 lhs, RegF64 rhs,
1062 Label* l);
1063 inline void branchTo(Assembler::DoubleCondition c, RegF32 lhs, RegF32 rhs,
1064 Label* l);
1065 inline void branchTo(Assembler::Condition c, RegI32 lhs, RegI32 rhs,
1066 Label* l);
1067 inline void branchTo(Assembler::Condition c, RegI32 lhs, Imm32 rhs, Label* l);
1068 inline void branchTo(Assembler::Condition c, RegI64 lhs, RegI64 rhs,
1069 Label* l);
1070 inline void branchTo(Assembler::Condition c, RegI64 lhs, Imm64 rhs, Label* l);
1071 inline void branchTo(Assembler::Condition c, RegRef lhs, ImmWord rhs,
1072 Label* l);
1074 // Helpers for accessing Instance::baselineScratchWords_. Note that Word
1075 // and I64 versions of these routines access the same area and it is up to
1076 // the caller to use it in some way which makes sense.
1078 // Store/load `r`, a machine word, to/from the `index`th scratch storage
1079 // slot in the current Instance. `instancePtr` must point at the current
1080 // Instance; it will not be modified. For ::stashWord, `r` must not be the
1081 // same as `instancePtr`.
1082 void stashWord(RegPtr instancePtr, size_t index, RegPtr r);
1083 void unstashWord(RegPtr instancePtr, size_t index, RegPtr r);
1085 #ifdef JS_CODEGEN_X86
1086 // Store r in instance scratch storage after first loading the instance from
1087 // the frame into the regForInstance. regForInstance must be neither of the
1088 // registers in r.
1089 void stashI64(RegPtr regForInstance, RegI64 r);
1091 // Load r from the instance scratch storage after first loading the instance
1092 // from the frame into the regForInstance. regForInstance can be one of the
1093 // registers in r.
1094 void unstashI64(RegPtr regForInstance, RegI64 r);
1095 #endif
1097 //////////////////////////////////////////////////////////////////////
1099 // Code generators for actual operations.
1101 template <typename RegType, typename IntType>
1102 void quotientOrRemainder(RegType rs, RegType rsd, RegType reserved,
1103 IsUnsigned isUnsigned, ZeroOnOverflow zeroOnOverflow,
1104 bool isConst, IntType c,
1105 void (*operate)(MacroAssembler&, RegType, RegType,
1106 RegType, IsUnsigned));
1108 [[nodiscard]] bool truncateF32ToI32(RegF32 src, RegI32 dest,
1109 TruncFlags flags);
1110 [[nodiscard]] bool truncateF64ToI32(RegF64 src, RegI32 dest,
1111 TruncFlags flags);
1113 #ifndef RABALDR_FLOAT_TO_I64_CALLOUT
1114 [[nodiscard]] RegF64 needTempForFloatingToI64(TruncFlags flags);
1115 [[nodiscard]] bool truncateF32ToI64(RegF32 src, RegI64 dest, TruncFlags flags,
1116 RegF64 temp);
1117 [[nodiscard]] bool truncateF64ToI64(RegF64 src, RegI64 dest, TruncFlags flags,
1118 RegF64 temp);
1119 #endif // RABALDR_FLOAT_TO_I64_CALLOUT
1121 #ifndef RABALDR_I64_TO_FLOAT_CALLOUT
1122 [[nodiscard]] RegI32 needConvertI64ToFloatTemp(ValType to, bool isUnsigned);
1123 void convertI64ToF32(RegI64 src, bool isUnsigned, RegF32 dest, RegI32 temp);
1124 void convertI64ToF64(RegI64 src, bool isUnsigned, RegF64 dest, RegI32 temp);
1125 #endif // RABALDR_I64_TO_FLOAT_CALLOUT
1127 //////////////////////////////////////////////////////////////////////
1129 // Global variable access.
1131 Address addressOfGlobalVar(const GlobalDesc& global, RegPtr tmp);
1133 //////////////////////////////////////////////////////////////////////
1135 // Table access.
1137 Address addressOfTableField(uint32_t tableIndex, uint32_t fieldOffset,
1138 RegPtr instance);
1139 void loadTableLength(uint32_t tableIndex, RegPtr instance, RegI32 length);
1140 void loadTableElements(uint32_t tableIndex, RegPtr instance, RegPtr elements);
1142 //////////////////////////////////////////////////////////////////////
1144 // Heap access.
1146 void bceCheckLocal(MemoryAccessDesc* access, AccessCheck* check,
1147 uint32_t local);
1148 void bceLocalIsUpdated(uint32_t local);
1150 // Fold offsets into ptr and bounds check as necessary. The instance will be
1151 // valid in cases where it's needed.
1152 template <typename RegIndexType>
1153 void prepareMemoryAccess(MemoryAccessDesc* access, AccessCheck* check,
1154 RegPtr instance, RegIndexType ptr);
1156 void branchAddNoOverflow(uint64_t offset, RegI32 ptr, Label* ok);
1157 void branchTestLowZero(RegI32 ptr, Imm32 mask, Label* ok);
1158 void boundsCheck4GBOrLargerAccess(uint32_t memoryIndex, RegPtr instance,
1159 RegI32 ptr, Label* ok);
1160 void boundsCheckBelow4GBAccess(uint32_t memoryIndex, RegPtr instance,
1161 RegI32 ptr, Label* ok);
1163 void branchAddNoOverflow(uint64_t offset, RegI64 ptr, Label* ok);
1164 void branchTestLowZero(RegI64 ptr, Imm32 mask, Label* ok);
1165 void boundsCheck4GBOrLargerAccess(uint32_t memoryIndex, RegPtr instance,
1166 RegI64 ptr, Label* ok);
1167 void boundsCheckBelow4GBAccess(uint32_t memoryIndex, RegPtr instance,
1168 RegI64 ptr, Label* ok);
1170 // Some consumers depend on the returned Address not incorporating instance,
1171 // as instance may be the scratch register.
1172 template <typename RegIndexType>
1173 Address prepareAtomicMemoryAccess(MemoryAccessDesc* access,
1174 AccessCheck* check, RegPtr instance,
1175 RegIndexType ptr);
1177 template <typename RegIndexType>
1178 void computeEffectiveAddress(MemoryAccessDesc* access);
1180 [[nodiscard]] bool needInstanceForAccess(const MemoryAccessDesc* access,
1181 const AccessCheck& check);
1183 // ptr and dest may be the same iff dest is I32.
1184 // This may destroy ptr even if ptr and dest are not the same.
1185 void executeLoad(MemoryAccessDesc* access, AccessCheck* check,
1186 RegPtr instance, RegPtr memoryBase, RegI32 ptr, AnyReg dest,
1187 RegI32 temp);
1188 void load(MemoryAccessDesc* access, AccessCheck* check, RegPtr instance,
1189 RegPtr memoryBase, RegI32 ptr, AnyReg dest, RegI32 temp);
1190 #ifdef ENABLE_WASM_MEMORY64
1191 void load(MemoryAccessDesc* access, AccessCheck* check, RegPtr instance,
1192 RegPtr memoryBase, RegI64 ptr, AnyReg dest, RegI64 temp);
1193 #endif
1195 template <typename RegType>
1196 void doLoadCommon(MemoryAccessDesc* access, AccessCheck check, ValType type);
1198 void loadCommon(MemoryAccessDesc* access, AccessCheck check, ValType type);
1200 // ptr and src must not be the same register.
1201 // This may destroy ptr and src.
1202 void executeStore(MemoryAccessDesc* access, AccessCheck* check,
1203 RegPtr instance, RegPtr memoryBase, RegI32 ptr, AnyReg src,
1204 RegI32 temp);
1205 void store(MemoryAccessDesc* access, AccessCheck* check, RegPtr instance,
1206 RegPtr memoryBase, RegI32 ptr, AnyReg src, RegI32 temp);
1207 #ifdef ENABLE_WASM_MEMORY64
1208 void store(MemoryAccessDesc* access, AccessCheck* check, RegPtr instance,
1209 RegPtr memoryBase, RegI64 ptr, AnyReg src, RegI64 temp);
1210 #endif
1212 template <typename RegType>
1213 void doStoreCommon(MemoryAccessDesc* access, AccessCheck check,
1214 ValType resultType);
1216 void storeCommon(MemoryAccessDesc* access, AccessCheck check,
1217 ValType resultType);
1219 void atomicLoad(MemoryAccessDesc* access, ValType type);
1220 #if !defined(JS_64BIT)
1221 template <typename RegIndexType>
1222 void atomicLoad64(MemoryAccessDesc* desc);
1223 #endif
1225 void atomicStore(MemoryAccessDesc* access, ValType type);
1227 void atomicRMW(MemoryAccessDesc* access, ValType type, AtomicOp op);
1228 template <typename RegIndexType>
1229 void atomicRMW32(MemoryAccessDesc* access, ValType type, AtomicOp op);
1230 template <typename RegIndexType>
1231 void atomicRMW64(MemoryAccessDesc* access, ValType type, AtomicOp op);
1233 void atomicXchg(MemoryAccessDesc* access, ValType type);
1234 template <typename RegIndexType>
1235 void atomicXchg64(MemoryAccessDesc* access, WantResult wantResult);
1236 template <typename RegIndexType>
1237 void atomicXchg32(MemoryAccessDesc* access, ValType type);
1239 void atomicCmpXchg(MemoryAccessDesc* access, ValType type);
1240 template <typename RegIndexType>
1241 void atomicCmpXchg32(MemoryAccessDesc* access, ValType type);
1242 template <typename RegIndexType>
1243 void atomicCmpXchg64(MemoryAccessDesc* access, ValType type);
1245 template <typename RegType>
1246 RegType popConstMemoryAccess(MemoryAccessDesc* access, AccessCheck* check);
1247 template <typename RegType>
1248 RegType popMemoryAccess(MemoryAccessDesc* access, AccessCheck* check);
1250 void pushHeapBase(uint32_t memoryIndex);
1252 ////////////////////////////////////////////////////////////////////////////
1254 // Platform-specific popping and register targeting.
1256 // The simple popping methods pop values into targeted registers; the caller
1257 // can free registers using standard functions. These are always called
1258 // popXForY where X says something about types and Y something about the
1259 // operation being targeted.
1261 RegI32 needRotate64Temp();
1262 void popAndAllocateForDivAndRemI32(RegI32* r0, RegI32* r1, RegI32* reserved);
1263 void popAndAllocateForMulI64(RegI64* r0, RegI64* r1, RegI32* temp);
1264 #ifndef RABALDR_INT_DIV_I64_CALLOUT
1265 void popAndAllocateForDivAndRemI64(RegI64* r0, RegI64* r1, RegI64* reserved,
1266 IsRemainder isRemainder);
1267 #endif
1268 RegI32 popI32RhsForShift();
1269 RegI32 popI32RhsForShiftI64();
1270 RegI64 popI64RhsForShift();
1271 RegI32 popI32RhsForRotate();
1272 RegI64 popI64RhsForRotate();
1273 void popI32ForSignExtendI64(RegI64* r0);
1274 void popI64ForSignExtendI64(RegI64* r0);
1276 ////////////////////////////////////////////////////////////
1278 // Sundry helpers.
1280 // Retrieve the current bytecodeOffset.
1281 inline BytecodeOffset bytecodeOffset() const;
1283 // Generate a trap instruction for the current bytecodeOffset.
1284 inline void trap(Trap t) const;
1286 // Abstracted helper for throwing, used for throw, rethrow, and rethrowing
1287 // at the end of a series of catch blocks (if none matched the exception).
1288 [[nodiscard]] bool throwFrom(RegRef exn);
1290 // Load the specified tag object from the Instance.
1291 void loadTag(RegPtr instance, uint32_t tagIndex, RegRef tagDst);
1293 // Load the pending exception state from the Instance and then reset it.
1294 void consumePendingException(RegPtr instance, RegRef* exnDst, RegRef* tagDst);
1296 [[nodiscard]] bool startTryNote(size_t* tryNoteIndex);
1297 void finishTryNote(size_t tryNoteIndex);
1299 ////////////////////////////////////////////////////////////
1301 // Barriers support.
1303 // This emits a GC pre-write barrier. The pre-barrier is needed when we
1304 // replace a member field with a new value, and the previous field value
1305 // might have no other referents, and incremental GC is ongoing. The field
1306 // might belong to an object or be a stack slot or a register or a heap
1307 // allocated value.
1309 // let obj = { field: previousValue };
1310 // obj.field = newValue; // previousValue must be marked with a pre-barrier.
1312 // The `valueAddr` is the address of the location that we are about to
1313 // update. This function preserves that register.
1314 void emitPreBarrier(RegPtr valueAddr);
1316 // This emits a GC post-write barrier. The post-barrier is needed when we
1317 // replace a member field with a new value, the new value is in the nursery,
1318 // and the containing object is a tenured object. The field must then be
1319 // added to the store buffer so that the nursery can be correctly collected.
1320 // The field might belong to an object or be a stack slot or a register or a
1321 // heap allocated value.
1323 // For the difference between 'precise' and 'imprecise', look at the
1324 // documentation on PostBarrierKind.
1326 // `object` is a pointer to the object that contains the field. It is used, if
1327 // present, to skip adding a store buffer entry when the containing object is
1328 // in the nursery. This register is preserved by this function.
1329 // `valueAddr` is the address of the location that we are writing to. This
1330 // register is consumed by this function.
1331 // `prevValue` is the value that existed in the field before `value` was
1332 // stored. This register is consumed by this function.
1333 // `value` is the value that was stored in the field. This register is
1334 // preserved by this function.
1335 [[nodiscard]] bool emitPostBarrierImprecise(const Maybe<RegRef>& object,
1336 RegPtr valueAddr, RegRef value);
1337 [[nodiscard]] bool emitPostBarrierPrecise(const Maybe<RegRef>& object,
1338 RegPtr valueAddr, RegRef prevValue,
1339 RegRef value);
1341 // Emits a store to a JS object pointer at the address `valueAddr`, which is
1342 // inside the GC cell `object`.
1344 // Preserves `object` and `value`. Consumes `valueAddr`.
1345 [[nodiscard]] bool emitBarrieredStore(const Maybe<RegRef>& object,
1346 RegPtr valueAddr, RegRef value,
1347 PreBarrierKind preBarrierKind,
1348 PostBarrierKind postBarrierKind);
1350 // Emits a store of nullptr to a JS object pointer at the address valueAddr.
1351 // Preserves `valueAddr`.
1352 void emitBarrieredClear(RegPtr valueAddr);
1354 ////////////////////////////////////////////////////////////
1356 // Machinery for optimized conditional branches. See comments in the
1357 // implementation.
1359 void setLatentCompare(Assembler::Condition compareOp, ValType operandType);
1360 void setLatentCompare(Assembler::DoubleCondition compareOp,
1361 ValType operandType);
1362 void setLatentEqz(ValType operandType);
1363 bool hasLatentOp() const;
1364 void resetLatentOp();
1365 // Jump to the given branch, passing results, if the condition, `cond`
1366 // matches between `lhs` and `rhs.
1367 template <typename Cond, typename Lhs, typename Rhs>
1368 [[nodiscard]] bool jumpConditionalWithResults(BranchState* b, Cond cond,
1369 Lhs lhs, Rhs rhs);
1370 #ifdef ENABLE_WASM_GC
1371 // Jump to the given branch, passing results, if the WasmGcObject, `object`,
1372 // is a subtype of `destType`.
1373 [[nodiscard]] bool jumpConditionalWithResults(BranchState* b, RegRef object,
1374 RefType sourceType,
1375 RefType destType,
1376 bool onSuccess);
1377 #endif
1378 template <typename Cond>
1379 [[nodiscard]] bool sniffConditionalControlCmp(Cond compareOp,
1380 ValType operandType);
1381 [[nodiscard]] bool sniffConditionalControlEqz(ValType operandType);
1382 void emitBranchSetup(BranchState* b);
1383 [[nodiscard]] bool emitBranchPerform(BranchState* b);
1385 //////////////////////////////////////////////////////////////////////
1387 [[nodiscard]] bool emitBody();
1388 [[nodiscard]] bool emitBlock();
1389 [[nodiscard]] bool emitLoop();
1390 [[nodiscard]] bool emitIf();
1391 [[nodiscard]] bool emitElse();
1392 // Used for common setup for catch and catch_all.
1393 void emitCatchSetup(LabelKind kind, Control& tryCatch,
1394 const ResultType& resultType);
1395 // Helper function used to generate landing pad code for the special
1396 // case in which `delegate` jumps to a function's body block.
1397 [[nodiscard]] bool emitBodyDelegateThrowPad();
1399 [[nodiscard]] bool emitTry();
1400 [[nodiscard]] bool emitTryTable();
1401 [[nodiscard]] bool emitCatch();
1402 [[nodiscard]] bool emitCatchAll();
1403 [[nodiscard]] bool emitDelegate();
1404 [[nodiscard]] bool emitThrow();
1405 [[nodiscard]] bool emitThrowRef();
1406 [[nodiscard]] bool emitRethrow();
1407 [[nodiscard]] bool emitEnd();
1408 [[nodiscard]] bool emitBr();
1409 [[nodiscard]] bool emitBrIf();
1410 [[nodiscard]] bool emitBrTable();
1411 [[nodiscard]] bool emitDrop();
1412 [[nodiscard]] bool emitReturn();
1414 // A flag passed to emitCallArgs, describing how the value stack is laid out.
1415 enum class CalleeOnStack {
1416 // After the arguments to the call, there is a callee pushed onto value
1417 // stack. This is only the case for callIndirect. To get the arguments to
1418 // the call, emitCallArgs has to reach one element deeper into the value
1419 // stack, to skip the callee.
1420 True,
1422 // No callee on the stack.
1423 False
1426 // The typename T for emitCallArgs can be one of the following:
1427 // NormalCallResults, TailCallResults, or NoCallResults.
1428 template <typename T>
1429 [[nodiscard]] bool emitCallArgs(const ValTypeVector& argTypes, T results,
1430 FunctionCall* baselineCall,
1431 CalleeOnStack calleeOnStack);
1433 [[nodiscard]] bool emitCall();
1434 [[nodiscard]] bool emitReturnCall();
1435 [[nodiscard]] bool emitCallIndirect();
1436 [[nodiscard]] bool emitReturnCallIndirect();
1437 [[nodiscard]] bool emitUnaryMathBuiltinCall(SymbolicAddress callee,
1438 ValType operandType);
1439 [[nodiscard]] bool emitGetLocal();
1440 [[nodiscard]] bool emitSetLocal();
1441 [[nodiscard]] bool emitTeeLocal();
1442 [[nodiscard]] bool emitGetGlobal();
1443 [[nodiscard]] bool emitSetGlobal();
1444 [[nodiscard]] RegPtr maybeLoadMemoryBaseForAccess(
1445 RegPtr instance, const MemoryAccessDesc* access);
1446 [[nodiscard]] RegPtr maybeLoadInstanceForAccess(
1447 const MemoryAccessDesc* access, const AccessCheck& check);
1448 [[nodiscard]] RegPtr maybeLoadInstanceForAccess(
1449 const MemoryAccessDesc* access, const AccessCheck& check,
1450 RegPtr specific);
1451 [[nodiscard]] bool emitLoad(ValType type, Scalar::Type viewType);
1452 [[nodiscard]] bool emitStore(ValType resultType, Scalar::Type viewType);
1453 [[nodiscard]] bool emitSelect(bool typed);
1455 template <bool isSetLocal>
1456 [[nodiscard]] bool emitSetOrTeeLocal(uint32_t slot);
1458 [[nodiscard]] bool endBlock(ResultType type);
1459 [[nodiscard]] bool endIfThen(ResultType type);
1460 [[nodiscard]] bool endIfThenElse(ResultType type);
1461 [[nodiscard]] bool endTryCatch(ResultType type);
1462 [[nodiscard]] bool endTryTable(ResultType type);
1464 void doReturn(ContinuationKind kind);
1465 void pushReturnValueOfCall(const FunctionCall& call, MIRType type);
1467 [[nodiscard]] bool pushStackResultsForCall(const ResultType& type,
1468 RegPtr temp, StackResultsLoc* loc);
1469 void popStackResultsAfterCall(const StackResultsLoc& results,
1470 uint32_t stackArgBytes);
1472 void emitCompareI32(Assembler::Condition compareOp, ValType compareType);
1473 void emitCompareI64(Assembler::Condition compareOp, ValType compareType);
1474 void emitCompareF32(Assembler::DoubleCondition compareOp,
1475 ValType compareType);
1476 void emitCompareF64(Assembler::DoubleCondition compareOp,
1477 ValType compareType);
1478 void emitCompareRef(Assembler::Condition compareOp, ValType compareType);
1480 template <typename CompilerType>
1481 inline CompilerType& selectCompiler();
1483 template <typename SourceType, typename DestType>
1484 inline void emitUnop(void (*op)(MacroAssembler& masm, SourceType rs,
1485 DestType rd));
1487 template <typename SourceType, typename DestType, typename TempType>
1488 inline void emitUnop(void (*op)(MacroAssembler& masm, SourceType rs,
1489 DestType rd, TempType temp));
1491 template <typename SourceType, typename DestType, typename ImmType>
1492 inline void emitUnop(ImmType immediate, void (*op)(MacroAssembler&, ImmType,
1493 SourceType, DestType));
1495 template <typename CompilerType, typename RegType>
1496 inline void emitUnop(void (*op)(CompilerType& compiler, RegType rsd));
1498 template <typename RegType, typename TempType>
1499 inline void emitUnop(void (*op)(BaseCompiler& bc, RegType rsd, TempType rt),
1500 TempType (*getSpecializedTemp)(BaseCompiler& bc));
1502 template <typename CompilerType, typename RhsType, typename LhsDestType>
1503 inline void emitBinop(void (*op)(CompilerType& masm, RhsType src,
1504 LhsDestType srcDest));
1506 template <typename RhsDestType, typename LhsType>
1507 inline void emitBinop(void (*op)(MacroAssembler& masm, RhsDestType src,
1508 LhsType srcDest, RhsDestOp));
1510 template <typename RhsType, typename LhsDestType, typename TempType>
1511 inline void emitBinop(void (*)(MacroAssembler& masm, RhsType rs,
1512 LhsDestType rsd, TempType temp));
1514 template <typename RhsType, typename LhsDestType, typename TempType1,
1515 typename TempType2>
1516 inline void emitBinop(void (*)(MacroAssembler& masm, RhsType rs,
1517 LhsDestType rsd, TempType1 temp1,
1518 TempType2 temp2));
1520 template <typename RhsType, typename LhsDestType, typename ImmType>
1521 inline void emitBinop(ImmType immediate, void (*op)(MacroAssembler&, ImmType,
1522 RhsType, LhsDestType));
1524 template <typename RhsType, typename LhsDestType, typename ImmType,
1525 typename TempType1, typename TempType2>
1526 inline void emitBinop(ImmType immediate,
1527 void (*op)(MacroAssembler&, ImmType, RhsType,
1528 LhsDestType, TempType1 temp1,
1529 TempType2 temp2));
1531 template <typename CompilerType1, typename CompilerType2, typename RegType,
1532 typename ImmType>
1533 inline void emitBinop(void (*op)(CompilerType1& compiler1, RegType rs,
1534 RegType rd),
1535 void (*opConst)(CompilerType2& compiler2, ImmType c,
1536 RegType rd),
1537 RegType (BaseCompiler::*rhsPopper)() = nullptr);
1539 template <typename CompilerType, typename ValType>
1540 inline void emitTernary(void (*op)(CompilerType&, ValType src0, ValType src1,
1541 ValType srcDest));
1543 template <typename CompilerType, typename ValType>
1544 inline void emitTernary(void (*op)(CompilerType&, ValType src0, ValType src1,
1545 ValType srcDest, ValType temp));
1547 template <typename CompilerType, typename ValType>
1548 inline void emitTernaryResultLast(void (*op)(CompilerType&, ValType src0,
1549 ValType src1, ValType srcDest));
1551 template <typename R>
1552 [[nodiscard]] inline bool emitInstanceCallOp(
1553 const SymbolicAddressSignature& fn, R reader);
1555 template <typename A1, typename R>
1556 [[nodiscard]] inline bool emitInstanceCallOp(
1557 const SymbolicAddressSignature& fn, R reader);
1559 template <typename A1, typename A2, typename R>
1560 [[nodiscard]] inline bool emitInstanceCallOp(
1561 const SymbolicAddressSignature& fn, R reader);
1563 void emitMultiplyI64();
1564 void emitQuotientI32();
1565 void emitQuotientU32();
1566 void emitRemainderI32();
1567 void emitRemainderU32();
1568 #ifdef RABALDR_INT_DIV_I64_CALLOUT
1569 [[nodiscard]] bool emitDivOrModI64BuiltinCall(SymbolicAddress callee,
1570 ValType operandType);
1571 #else
1572 void emitQuotientI64();
1573 void emitQuotientU64();
1574 void emitRemainderI64();
1575 void emitRemainderU64();
1576 #endif
1577 void emitRotrI64();
1578 void emitRotlI64();
1579 void emitEqzI32();
1580 void emitEqzI64();
1581 template <TruncFlags flags>
1582 [[nodiscard]] bool emitTruncateF32ToI32();
1583 template <TruncFlags flags>
1584 [[nodiscard]] bool emitTruncateF64ToI32();
1585 #ifdef RABALDR_FLOAT_TO_I64_CALLOUT
1586 [[nodiscard]] bool emitConvertFloatingToInt64Callout(SymbolicAddress callee,
1587 ValType operandType,
1588 ValType resultType);
1589 #else
1590 template <TruncFlags flags>
1591 [[nodiscard]] bool emitTruncateF32ToI64();
1592 template <TruncFlags flags>
1593 [[nodiscard]] bool emitTruncateF64ToI64();
1594 #endif
1595 void emitExtendI64_8();
1596 void emitExtendI64_16();
1597 void emitExtendI64_32();
1598 void emitExtendI32ToI64();
1599 void emitExtendU32ToI64();
1600 #ifdef RABALDR_I64_TO_FLOAT_CALLOUT
1601 [[nodiscard]] bool emitConvertInt64ToFloatingCallout(SymbolicAddress callee,
1602 ValType operandType,
1603 ValType resultType);
1604 #else
1605 void emitConvertU64ToF32();
1606 void emitConvertU64ToF64();
1607 #endif
1608 void emitRound(RoundingMode roundingMode, ValType operandType);
1610 // Generate a call to the instance function denoted by `builtin`, passing as
1611 // args the top elements of the compiler's value stack and optionally an
1612 // Instance* too. The relationship between the top of stack and arg
1613 // ordering is as follows. If the value stack looks like this:
1615 // A <- least recently pushed
1616 // B
1617 // C <- most recently pushed
1619 // then the called function is expected to have signature [if an Instance*
1620 // is also to be passed]:
1622 // static Instance::foo(Instance*, A, B, C)
1624 // and the SymbolicAddressSignature::argTypes array will be
1626 // {_PTR, _A, _B, _C, _END} // _PTR is for the Instance*
1628 // (see WasmBuiltins.cpp). In short, the most recently pushed value is the
1629 // rightmost argument to the function.
1630 [[nodiscard]] bool emitInstanceCall(const SymbolicAddressSignature& builtin);
1632 [[nodiscard]] bool emitMemoryGrow();
1633 [[nodiscard]] bool emitMemorySize();
1635 [[nodiscard]] bool emitRefFunc();
1636 [[nodiscard]] bool emitRefNull();
1637 [[nodiscard]] bool emitRefIsNull();
1638 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
1639 [[nodiscard]] bool emitRefAsNonNull();
1640 [[nodiscard]] bool emitBrOnNull();
1641 [[nodiscard]] bool emitBrOnNonNull();
1642 [[nodiscard]] bool emitCallRef();
1643 [[nodiscard]] bool emitReturnCallRef();
1644 #endif
1646 [[nodiscard]] bool emitAtomicCmpXchg(ValType type, Scalar::Type viewType);
1647 [[nodiscard]] bool emitAtomicLoad(ValType type, Scalar::Type viewType);
1648 [[nodiscard]] bool emitAtomicRMW(ValType type, Scalar::Type viewType,
1649 AtomicOp op);
1650 [[nodiscard]] bool emitAtomicStore(ValType type, Scalar::Type viewType);
1651 [[nodiscard]] bool emitWait(ValType type, uint32_t byteSize);
1652 [[nodiscard]] bool atomicWait(ValType type, MemoryAccessDesc* access);
1653 [[nodiscard]] bool emitWake();
1654 [[nodiscard]] bool atomicWake(MemoryAccessDesc* access);
1655 [[nodiscard]] bool emitFence();
1656 [[nodiscard]] bool emitAtomicXchg(ValType type, Scalar::Type viewType);
1657 [[nodiscard]] bool emitMemInit();
1658 [[nodiscard]] bool emitMemCopy();
1659 [[nodiscard]] bool memCopyCall(uint32_t dstMemIndex, uint32_t srcMemIndex);
1660 void memCopyInlineM32();
1661 [[nodiscard]] bool emitTableCopy();
1662 [[nodiscard]] bool emitDataOrElemDrop(bool isData);
1663 [[nodiscard]] bool emitMemFill();
1664 [[nodiscard]] bool memFillCall(uint32_t memoryIndex);
1665 void memFillInlineM32();
1666 [[nodiscard]] bool emitTableInit();
1667 [[nodiscard]] bool emitTableFill();
1668 [[nodiscard]] bool emitMemDiscard();
1669 [[nodiscard]] bool emitTableGet();
1670 [[nodiscard]] bool emitTableGrow();
1671 [[nodiscard]] bool emitTableSet();
1672 [[nodiscard]] bool emitTableSize();
1674 void emitTableBoundsCheck(uint32_t tableIndex, RegI32 index, RegPtr instance);
1675 [[nodiscard]] bool emitTableGetAnyRef(uint32_t tableIndex);
1676 [[nodiscard]] bool emitTableSetAnyRef(uint32_t tableIndex);
1678 #ifdef ENABLE_WASM_GC
1679 [[nodiscard]] bool emitStructNew();
1680 [[nodiscard]] bool emitStructNewDefault();
1681 [[nodiscard]] bool emitStructGet(FieldWideningOp wideningOp);
1682 [[nodiscard]] bool emitStructSet();
1683 [[nodiscard]] bool emitArrayNew();
1684 [[nodiscard]] bool emitArrayNewFixed();
1685 [[nodiscard]] bool emitArrayNewDefault();
1686 [[nodiscard]] bool emitArrayNewData();
1687 [[nodiscard]] bool emitArrayNewElem();
1688 [[nodiscard]] bool emitArrayInitData();
1689 [[nodiscard]] bool emitArrayInitElem();
1690 [[nodiscard]] bool emitArrayGet(FieldWideningOp wideningOp);
1691 [[nodiscard]] bool emitArraySet();
1692 [[nodiscard]] bool emitArrayLen();
1693 [[nodiscard]] bool emitArrayCopy();
1694 [[nodiscard]] bool emitArrayFill();
1695 [[nodiscard]] bool emitRefI31();
1696 [[nodiscard]] bool emitI31Get(FieldWideningOp wideningOp);
1697 [[nodiscard]] bool emitRefTest(bool nullable);
1698 [[nodiscard]] bool emitRefCast(bool nullable);
1699 [[nodiscard]] bool emitBrOnCastCommon(bool onSuccess,
1700 uint32_t labelRelativeDepth,
1701 const ResultType& labelType,
1702 RefType sourceType, RefType destType);
1703 [[nodiscard]] bool emitBrOnCast(bool onSuccess);
1704 [[nodiscard]] bool emitAnyConvertExtern();
1705 [[nodiscard]] bool emitExternConvertAny();
1707 // Utility classes/methods to add trap information related to
1708 // null pointer dereferences/accesses.
1709 struct NoNullCheck {
1710 static void emitNullCheck(BaseCompiler* bc, RegRef rp) {}
1711 static void emitTrapSite(BaseCompiler* bc, FaultingCodeOffset fco,
1712 TrapMachineInsn tmi) {}
1714 struct SignalNullCheck {
1715 static void emitNullCheck(BaseCompiler* bc, RegRef rp);
1716 static void emitTrapSite(BaseCompiler* bc, FaultingCodeOffset fco,
1717 TrapMachineInsn tmi);
1720 // Load a pointer to the TypeDefInstanceData for a given type index
1721 RegPtr loadTypeDefInstanceData(uint32_t typeIndex);
1722 // Load a pointer to the SuperTypeVector for a given type index
1723 RegPtr loadSuperTypeVector(uint32_t typeIndex);
1725 // Emits allocation code for a GC struct. The struct may have an out-of-line
1726 // data area; if so, `isOutlineStruct` will be true and `outlineBase` will be
1727 // allocated and must be freed.
1728 template <bool ZeroFields>
1729 bool emitStructAlloc(uint32_t typeIndex, RegRef* object,
1730 bool* isOutlineStruct, RegPtr* outlineBase);
1731 // Emits allocation code for a dynamically-sized GC array.
1732 template <bool ZeroFields>
1733 bool emitArrayAlloc(uint32_t typeIndex, RegRef object, RegI32 numElements,
1734 uint32_t elemSize);
1735 // Emits allocation code for a fixed-size GC array.
1736 template <bool ZeroFields>
1737 bool emitArrayAllocFixed(uint32_t typeIndex, RegRef object,
1738 uint32_t numElements, uint32_t elemSize);
1740 template <typename NullCheckPolicy>
1741 RegPtr emitGcArrayGetData(RegRef rp);
1742 template <typename NullCheckPolicy>
1743 RegI32 emitGcArrayGetNumElements(RegRef rp);
1744 void emitGcArrayBoundsCheck(RegI32 index, RegI32 numElements);
1745 template <typename T, typename NullCheckPolicy>
1746 void emitGcGet(StorageType type, FieldWideningOp wideningOp, const T& src);
1747 template <typename T, typename NullCheckPolicy>
1748 void emitGcSetScalar(const T& dst, StorageType type, AnyReg value);
1750 // Common code for both old and new ref.test instructions.
1751 void emitRefTestCommon(RefType sourceType, RefType destType);
1752 // Common code for both old and new ref.cast instructions.
1753 void emitRefCastCommon(RefType sourceType, RefType destType);
1755 // Allocate registers and branch if the given wasm ref is a subtype of the
1756 // given heap type.
1757 void branchIfRefSubtype(RegRef ref, RefType sourceType, RefType destType,
1758 Label* label, bool onSuccess);
1760 // Write `value` to wasm struct `object`, at `areaBase + areaOffset`. The
1761 // caller must decide on the in- vs out-of-lineness before the call and set
1762 // the latter two accordingly; this routine does not take that into account.
1763 // The value in `object` is unmodified, but `areaBase` and `value` may get
1764 // trashed.
1765 template <typename NullCheckPolicy>
1766 [[nodiscard]] bool emitGcStructSet(RegRef object, RegPtr areaBase,
1767 uint32_t areaOffset, StorageType type,
1768 AnyReg value,
1769 PreBarrierKind preBarrierKind);
1771 [[nodiscard]] bool emitGcArraySet(RegRef object, RegPtr data, RegI32 index,
1772 const ArrayType& array, AnyReg value,
1773 PreBarrierKind preBarrierKind);
1774 #endif // ENABLE_WASM_GC
1776 #ifdef ENABLE_WASM_SIMD
1777 void emitVectorAndNot();
1778 # ifdef ENABLE_WASM_RELAXED_SIMD
1779 void emitDotI8x16I7x16AddS();
1780 # endif
1782 void loadSplat(MemoryAccessDesc* access);
1783 void loadZero(MemoryAccessDesc* access);
1784 void loadExtend(MemoryAccessDesc* access, Scalar::Type viewType);
1785 void loadLane(MemoryAccessDesc* access, uint32_t laneIndex);
1786 void storeLane(MemoryAccessDesc* access, uint32_t laneIndex);
1788 [[nodiscard]] bool emitLoadSplat(Scalar::Type viewType);
1789 [[nodiscard]] bool emitLoadZero(Scalar::Type viewType);
1790 [[nodiscard]] bool emitLoadExtend(Scalar::Type viewType);
1791 [[nodiscard]] bool emitLoadLane(uint32_t laneSize);
1792 [[nodiscard]] bool emitStoreLane(uint32_t laneSize);
1793 [[nodiscard]] bool emitVectorShuffle();
1794 [[nodiscard]] bool emitVectorLaneSelect();
1795 # if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
1796 [[nodiscard]] bool emitVectorShiftRightI64x2();
1797 # endif
1798 #endif
1799 [[nodiscard]] bool emitCallBuiltinModuleFunc();
1802 } // namespace wasm
1803 } // namespace js
1805 #endif // wasm_wasm_baseline_object_h