1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef jit_MacroAssembler_h
8 #define jit_MacroAssembler_h
10 #include "mozilla/EndianUtils.h"
11 #include "mozilla/MacroForEach.h"
12 #include "mozilla/MathAlgorithms.h"
13 #include "mozilla/Maybe.h"
14 #include "mozilla/Variant.h"
18 #if defined(JS_CODEGEN_X86)
19 # include "jit/x86/MacroAssembler-x86.h"
20 #elif defined(JS_CODEGEN_X64)
21 # include "jit/x64/MacroAssembler-x64.h"
22 #elif defined(JS_CODEGEN_ARM)
23 # include "jit/arm/MacroAssembler-arm.h"
24 #elif defined(JS_CODEGEN_ARM64)
25 # include "jit/arm64/MacroAssembler-arm64.h"
26 #elif defined(JS_CODEGEN_MIPS32)
27 # include "jit/mips32/MacroAssembler-mips32.h"
28 #elif defined(JS_CODEGEN_MIPS64)
29 # include "jit/mips64/MacroAssembler-mips64.h"
30 #elif defined(JS_CODEGEN_LOONG64)
31 # include "jit/loong64/MacroAssembler-loong64.h"
32 #elif defined(JS_CODEGEN_RISCV64)
33 # include "jit/riscv64/MacroAssembler-riscv64.h"
34 #elif defined(JS_CODEGEN_WASM32)
35 # include "jit/wasm32/MacroAssembler-wasm32.h"
36 #elif defined(JS_CODEGEN_NONE)
37 # include "jit/none/MacroAssembler-none.h"
39 # error "Unknown architecture!"
41 #include "jit/ABIArgGenerator.h"
42 #include "jit/ABIFunctions.h"
43 #include "jit/AtomicOp.h"
44 #include "jit/IonTypes.h"
45 #include "jit/MoveResolver.h"
46 #include "jit/VMFunctions.h"
47 #include "js/ScalarType.h" // js::Scalar::Type
48 #include "util/Memory.h"
49 #include "vm/FunctionFlags.h"
50 #include "vm/Opcodes.h"
51 #include "vm/RealmFuses.h"
52 #include "wasm/WasmCodegenTypes.h"
53 #include "wasm/WasmFrame.h"
55 // [SMDOC] MacroAssembler multi-platform overview
57 // * How to read/write MacroAssembler method declarations:
59 // The following macros are made to avoid #ifdef around each method declarations
60 // of the Macro Assembler, and they are also used as an hint on the location of
61 // the implementations of each method. For example, the following declaration
63 // void Pop(FloatRegister t) DEFINED_ON(x86_shared, arm);
65 // suggests the MacroAssembler::Pop(FloatRegister) method is implemented in
66 // x86-shared/MacroAssembler-x86-shared.h, and also in arm/MacroAssembler-arm.h.
68 // - If there is no annotation, then there is only one generic definition in
69 // MacroAssembler.cpp.
71 // - If the declaration is "inline", then the method definition(s) would be in
72 // the "-inl.h" variant of the same file(s).
74 // The script check_macroassembler_style.py (which runs on every build) is
75 // used to verify that method definitions match the annotation on the method
76 // declarations. If there is any difference, then you either forgot to define
77 // the method in one of the macro assembler, or you forgot to update the
78 // annotation of the macro assembler declaration.
80 // Some convenient short-cuts are used to avoid repeating the same list of
81 // architectures on each method declaration, such as PER_ARCH and
84 // Functions that are architecture-agnostic and are the same for all
85 // architectures, that it's necessary to define inline *in this header* to
86 // avoid used-before-defined warnings/errors that would occur if the
87 // definitions were in MacroAssembler-inl.h, should use the OOL_IN_HEADER
88 // marker at end of the declaration:
90 // inline uint32_t framePushed() const OOL_IN_HEADER;
92 // Such functions should then be defined immediately after MacroAssembler's
93 // definition, for example:
95 // //{{{ check_macroassembler_style
97 // MacroAssembler::framePushed() const
99 // return framePushed_;
101 // ////}}} check_macroassembler_style
103 #define ALL_ARCH mips32, mips64, arm, arm64, x86, x64, loong64, riscv64, wasm32
104 #define ALL_SHARED_ARCH \
105 arm, arm64, loong64, riscv64, x86_shared, mips_shared, wasm32
107 // * How this macro works:
109 // DEFINED_ON is a macro which check if, for the current architecture, the
110 // method is defined on the macro assembler or not.
112 // For each architecture, we have a macro named DEFINED_ON_arch. This macro is
113 // empty if this is not the current architecture. Otherwise it must be either
114 // set to "define" or "crash" (only used for the none target so far).
116 // The DEFINED_ON macro maps the list of architecture names given as arguments
117 // to a list of macro names. For example,
119 // DEFINED_ON(arm, x86_shared)
123 // DEFINED_ON_none DEFINED_ON_arm DEFINED_ON_x86_shared
125 // which are later expanded on ARM, x86, x64 by DEFINED_ON_EXPAND_ARCH_RESULTS
130 // or if the JIT is disabled or set to no architecture to
134 // or to nothing, if the current architecture is not listed in the list of
135 // arguments of DEFINED_ON. Note, only one of the DEFINED_ON_arch macro
136 // contributes to the non-empty result, which is the macro of the current
137 // architecture if it is listed in the arguments of DEFINED_ON.
139 // This result is appended to DEFINED_ON_RESULT_ before expanding the macro,
140 // which results in either no annotation, a MOZ_CRASH(), or a "= delete"
141 // annotation on the method declaration.
143 #define DEFINED_ON_x86
144 #define DEFINED_ON_x64
145 #define DEFINED_ON_x86_shared
146 #define DEFINED_ON_arm
147 #define DEFINED_ON_arm64
148 #define DEFINED_ON_mips32
149 #define DEFINED_ON_mips64
150 #define DEFINED_ON_mips_shared
151 #define DEFINED_ON_loong64
152 #define DEFINED_ON_riscv64
153 #define DEFINED_ON_wasm32
154 #define DEFINED_ON_none
156 // Specialize for each architecture.
157 #if defined(JS_CODEGEN_X86)
158 # undef DEFINED_ON_x86
159 # define DEFINED_ON_x86 define
160 # undef DEFINED_ON_x86_shared
161 # define DEFINED_ON_x86_shared define
162 #elif defined(JS_CODEGEN_X64)
163 # undef DEFINED_ON_x64
164 # define DEFINED_ON_x64 define
165 # undef DEFINED_ON_x86_shared
166 # define DEFINED_ON_x86_shared define
167 #elif defined(JS_CODEGEN_ARM)
168 # undef DEFINED_ON_arm
169 # define DEFINED_ON_arm define
170 #elif defined(JS_CODEGEN_ARM64)
171 # undef DEFINED_ON_arm64
172 # define DEFINED_ON_arm64 define
173 #elif defined(JS_CODEGEN_MIPS32)
174 # undef DEFINED_ON_mips32
175 # define DEFINED_ON_mips32 define
176 # undef DEFINED_ON_mips_shared
177 # define DEFINED_ON_mips_shared define
178 #elif defined(JS_CODEGEN_MIPS64)
179 # undef DEFINED_ON_mips64
180 # define DEFINED_ON_mips64 define
181 # undef DEFINED_ON_mips_shared
182 # define DEFINED_ON_mips_shared define
183 #elif defined(JS_CODEGEN_LOONG64)
184 # undef DEFINED_ON_loong64
185 # define DEFINED_ON_loong64 define
186 #elif defined(JS_CODEGEN_RISCV64)
187 # undef DEFINED_ON_riscv64
188 # define DEFINED_ON_riscv64 define
189 #elif defined(JS_CODEGEN_WASM32)
190 # undef DEFINED_ON_wasm32
191 # define DEFINED_ON_wasm32 define
192 #elif defined(JS_CODEGEN_NONE)
193 # undef DEFINED_ON_none
194 # define DEFINED_ON_none crash
196 # error "Unknown architecture!"
199 #define DEFINED_ON_RESULT_crash \
201 #define DEFINED_ON_RESULT_define
202 #define DEFINED_ON_RESULT_ = delete
204 #define DEFINED_ON_DISPATCH_RESULT_2(Macro, Result) Macro##Result
205 #define DEFINED_ON_DISPATCH_RESULT(...) \
206 DEFINED_ON_DISPATCH_RESULT_2(DEFINED_ON_RESULT_, __VA_ARGS__)
208 // We need to let the evaluation of MOZ_FOR_EACH terminates.
209 #define DEFINED_ON_EXPAND_ARCH_RESULTS_3(ParenResult) \
210 DEFINED_ON_DISPATCH_RESULT ParenResult
211 #define DEFINED_ON_EXPAND_ARCH_RESULTS_2(ParenResult) \
212 DEFINED_ON_EXPAND_ARCH_RESULTS_3(ParenResult)
213 #define DEFINED_ON_EXPAND_ARCH_RESULTS(ParenResult) \
214 DEFINED_ON_EXPAND_ARCH_RESULTS_2(ParenResult)
216 #define DEFINED_ON_FWDARCH(Arch) DEFINED_ON_##Arch
217 #define DEFINED_ON_MAP_ON_ARCHS(ArchList) \
218 DEFINED_ON_EXPAND_ARCH_RESULTS( \
219 (MOZ_FOR_EACH(DEFINED_ON_FWDARCH, (), ArchList)))
221 #define DEFINED_ON(...) DEFINED_ON_MAP_ON_ARCHS((none, __VA_ARGS__))
223 #define PER_ARCH DEFINED_ON(ALL_ARCH)
224 #define PER_SHARED_ARCH DEFINED_ON(ALL_SHARED_ARCH)
225 #define OOL_IN_HEADER
227 class JSLinearString
;
230 struct ExpandoAndGeneration
;
236 class FixedLengthTypedArrayObject
;
238 enum class NativeIteratorIndices
: uint32_t;
243 class BytecodeOffset
;
244 class MemoryAccessDesc
;
246 struct ModuleEnvironment
;
248 enum class FailureMode
: uint8_t;
250 enum class SymbolicAddress
;
256 // Defined in JitFrames.h
257 enum class ExitFrameType
: uint8_t;
259 class AutoSaveLiveRegisters
;
261 class TemplateNativeObject
;
262 class TemplateObject
;
264 enum class CheckUnsafeCallWithABI
{
265 // Require the callee to use AutoUnsafeCallWithABI.
268 // We pushed an exit frame so this callWithABI can safely GC and walk the
270 DontCheckHasExitFrame
,
272 // Don't check this callWithABI uses AutoUnsafeCallWithABI, for instance
273 // because we're calling a simple helper function (like malloc or js_free)
274 // that we can't change and/or that we know won't GC.
278 // This is a global function made to create the DynFn type in a controlled
279 // environment which would check if the function signature has been registered
280 // as an ABI function signature.
281 template <typename Sig
>
282 static inline DynFn
DynamicFunction(Sig fun
);
284 enum class CharEncoding
{ Latin1
, TwoByte
};
286 constexpr uint32_t WasmCallerInstanceOffsetBeforeCall
=
287 wasm::FrameWithInstances::callerInstanceOffsetWithoutFrame();
288 constexpr uint32_t WasmCalleeInstanceOffsetBeforeCall
=
289 wasm::FrameWithInstances::calleeInstanceOffsetWithoutFrame();
291 // Allocation sites may be passed to GC thing allocation methods either via a
292 // register (for baseline compilation) or an enum indicating one of the
293 // catch-all allocation sites (for optimized compilation).
294 struct AllocSiteInput
295 : public mozilla::Variant
<Register
, gc::CatchAllAllocSite
> {
296 using Base
= mozilla::Variant
<Register
, gc::CatchAllAllocSite
>;
297 AllocSiteInput() : Base(gc::CatchAllAllocSite::Unknown
) {}
298 explicit AllocSiteInput(gc::CatchAllAllocSite catchAll
) : Base(catchAll
) {}
299 explicit AllocSiteInput(Register reg
) : Base(reg
) {}
302 #ifdef ENABLE_WASM_TAIL_CALLS
303 // Instance slots (including ShadowStackArea) and arguments size information
304 // from two neighboring frames.
305 // Used in Wasm tail calls to remove frame.
306 struct ReturnCallAdjustmentInfo
{
307 uint32_t newSlotsAndStackArgBytes
;
308 uint32_t oldSlotsAndStackArgBytes
;
310 ReturnCallAdjustmentInfo(uint32_t newSlotsAndStackArgBytes
,
311 uint32_t oldSlotsAndStackArgBytes
)
312 : newSlotsAndStackArgBytes(newSlotsAndStackArgBytes
),
313 oldSlotsAndStackArgBytes(oldSlotsAndStackArgBytes
) {}
315 #endif // ENABLE_WASM_TAIL_CALLS
317 // [SMDOC] Code generation invariants (incomplete)
319 // ## 64-bit GPRs carrying 32-bit values
321 // At least at the end of every JS or Wasm operation (= SpiderMonkey bytecode or
322 // Wasm bytecode; this is necessarily a little vague), if a 64-bit GPR has a
323 // 32-bit value, then the upper 32 bits of the register may be predictable in
324 // accordance with platform-specific rules, as follows.
326 // - On x64 and arm64, the upper bits are zero
327 // - On mips64 and loongarch64 the upper bits are the sign extension of the
329 // - (On risc-v we have no rule, having no port yet. Sign extension is the most
330 // likely rule, but "unpredictable" is an option.)
332 // In most cases no extra work needs to be done to maintain the invariant:
334 // - 32-bit operations on x64 and arm64 zero-extend the result to 64 bits.
335 // These operations ignore the upper bits of the inputs.
336 // - 32-bit operations on mips64 sign-extend the result to 64 bits (even many
337 // that are labeled as "unsigned", eg ADDU, though not all, eg LU).
338 // Additionally, the inputs to many 32-bit operations must be properly
339 // sign-extended to avoid "unpredictable" behavior, and our simulators check
340 // that inputs conform.
341 // - (32-bit operations on risc-v and loongarch64 sign-extend, much as mips, but
342 // appear to ignore the upper bits of the inputs.)
344 // The upshot of these invariants is, among other things, that:
346 // - No code needs to be generated when a 32-bit value is extended to 64 bits
347 // or a 64-bit value is wrapped to 32 bits, if the upper bits are known to be
348 // correct because they resulted from an operation that produced them
350 // - Literal loads must be careful to avoid instructions that might extend the
351 // literal in the wrong way.
352 // - Code that produces values using intermediate values with non-canonical
353 // extensions must extend according to platform conventions before being
356 // All optimizations are necessarily platform-specific and should only be used
357 // in platform-specific code. We may add architectures in the future that do
358 // not follow the patterns of the few architectures we already have.
360 // Also see MacroAssembler::debugAssertCanonicalInt32().
362 // The public entrypoint for emitting assembly. Note that a MacroAssembler can
363 // use cx->lifoAlloc, so take care not to interleave masm use with other
364 // lifoAlloc use if one will be destroyed before the other.
365 class MacroAssembler
: public MacroAssemblerSpecific
{
367 // Information about the current JSRuntime. This is nullptr only for Wasm
369 CompileRuntime
* maybeRuntime_
= nullptr;
371 // Information about the current Realm. This is nullptr for Wasm compilations
372 // and when compiling JitRuntime trampolines.
373 CompileRealm
* maybeRealm_
= nullptr;
375 // Labels for handling exceptions and failures.
376 NonAssertingLabel failureLabel_
;
379 // Constructor is protected. Use one of the derived classes!
380 explicit MacroAssembler(TempAllocator
& alloc
,
381 CompileRuntime
* maybeRuntime
= nullptr,
382 CompileRealm
* maybeRealm
= nullptr);
385 MoveResolver
& moveResolver() {
386 // As an optimization, the MoveResolver is a persistent data structure
387 // shared between visitors in the CodeGenerator. This assertion
388 // checks that state is not leaking from visitor to visitor
389 // via an unresolved addMove().
390 MOZ_ASSERT(moveResolver_
.hasNoPendingMoves());
391 return moveResolver_
;
394 size_t instructionsSize() const { return size(); }
396 CompileRealm
* realm() const {
397 MOZ_ASSERT(maybeRealm_
);
400 CompileRuntime
* runtime() const {
401 MOZ_ASSERT(maybeRuntime_
);
402 return maybeRuntime_
;
405 #ifdef JS_HAS_HIDDEN_SP
406 void Push(RegisterOrSP reg
);
409 #ifdef ENABLE_WASM_SIMD
410 // `op` should be a shift operation. Return true if a variable-width shift
411 // operation on this architecture should pre-mask the shift count, and if so,
412 // return the mask in `*mask`.
413 static bool MustMaskShiftCountSimd128(wasm::SimdOp op
, int32_t* mask
);
416 //{{{ check_macroassembler_decl_style
418 // ===============================================================
419 // MacroAssembler high-level usage.
421 // Flushes the assembly buffer, on platforms that need it.
422 void flush() PER_SHARED_ARCH
;
424 // Add a comment that is visible in the pretty printed assembly code.
425 void comment(const char* msg
) PER_SHARED_ARCH
;
427 // ===============================================================
428 // Frame manipulation functions.
430 inline uint32_t framePushed() const OOL_IN_HEADER
;
431 inline void setFramePushed(uint32_t framePushed
) OOL_IN_HEADER
;
432 inline void adjustFrame(int32_t value
) OOL_IN_HEADER
;
434 // Adjust the frame, to account for implicit modification of the stack
435 // pointer, such that callee can remove arguments on the behalf of the
437 inline void implicitPop(uint32_t bytes
) OOL_IN_HEADER
;
440 // This field is used to statically (at compilation time) emulate a frame
441 // pointer by keeping track of stack manipulations.
443 // It is maintained by all stack manipulation functions below.
444 uint32_t framePushed_
;
447 // ===============================================================
448 // Stack manipulation functions -- sets of registers.
450 // Approximately speaking, the following routines must use the same memory
451 // layout. Any inconsistencies will certainly lead to crashing in generated
454 // MacroAssembler::PushRegsInMaskSizeInBytes
455 // MacroAssembler::PushRegsInMask
456 // MacroAssembler::storeRegsInMask
457 // MacroAssembler::PopRegsInMask
458 // MacroAssembler::PopRegsInMaskIgnore
459 // FloatRegister::getRegisterDumpOffsetInBytes
460 // (no class) PushRegisterDump
461 // (union) RegisterContent
462 // JitRuntime::generateInvalidator
463 // JitRuntime::generateBailoutHandler
464 // JSJitFrameIter::machineState
466 // To be more exact, the invariants are:
468 // * The save area is conceptually viewed as starting at a highest address
469 // (really, at "highest address - 1") and working down to some lower
472 // * PushRegsInMask, storeRegsInMask and PopRegsInMask{Ignore} must use
473 // exactly the same memory layout, when starting from the abovementioned
476 // * PushRegsInMaskSizeInBytes must produce a value which is exactly equal
477 // to the change in the machine's stack pointer register as a result of
478 // calling PushRegsInMask or PopRegsInMask{Ignore}. This value must be at
479 // least uintptr_t-aligned on the target, and may be more aligned than that.
481 // * PushRegsInMaskSizeInBytes must produce a value which is greater than or
482 // equal to the amount of space used by storeRegsInMask.
484 // * Hence, regardless of whether the save area is created with
485 // storeRegsInMask or PushRegsInMask, it is guaranteed to fit inside an
486 // area of size calculated by PushRegsInMaskSizeInBytes.
488 // * For the `ignore` argument of PopRegsInMaskIgnore, equality checking
489 // for the floating point/SIMD registers is done on the basis of the
490 // underlying physical register, regardless of width. For example, if the
491 // to-restore set contains v17 (the SIMD register with encoding 17) and
492 // the ignore set contains d17 (the double register with encoding 17) then
493 // no part of the physical register with encoding 17 will be restored.
494 // (This is probably not true on arm32, since that has aliased float32
495 // registers; but none of our other targets do.)
497 // * {Push,store}RegsInMask/storeRegsInMask are further constrained as
498 // follows: when given the argument AllFloatRegisters, the resulting
499 // memory area must contain exactly all the SIMD/FP registers for the
500 // target at their widest width (that we care about). [We have no targets
501 // where the SIMD registers and FP register sets are disjoint.] They must
502 // be packed end-to-end with no holes, with the register with the lowest
503 // encoding number (0), as returned by FloatRegister::encoding(), at the
504 // abovementioned highest address, register 1 just below that, etc.
506 // Furthermore the sizeof(RegisterContent) must equal the size of a SIMD
507 // register in the abovementioned array.
509 // Furthermore the value returned by
510 // FloatRegister::getRegisterDumpOffsetInBytes must be a correct index
511 // into the abovementioned array. Given the constraints, the only correct
512 // value is `reg.encoding() * sizeof(RegisterContent)`.
514 // Note that some of the routines listed above are JS-only, and do not support
515 // SIMD registers. They are otherwise part of the same equivalence class.
516 // Register spilling for e.g. OOL VM calls is implemented using
517 // PushRegsInMask, and recovered on bailout using machineState. This requires
518 // the same layout to be used in machineState, and therefore in all other code
519 // that can spill registers that are recovered on bailout. Implementations of
520 // JitRuntime::generate{Invalidator,BailoutHandler} should either call
521 // PushRegsInMask, or check carefully to be sure that they generate the same
524 // The size of the area used by PushRegsInMask.
525 static size_t PushRegsInMaskSizeInBytes(LiveRegisterSet set
)
526 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
529 void PushRegsInMask(LiveRegisterSet set
)
530 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
532 void PushRegsInMask(LiveGeneralRegisterSet set
);
534 // Like PushRegsInMask, but instead of pushing the registers, store them to
535 // |dest|. |dest| should point to the end of the reserved space, so the
536 // first register will be stored at |dest.offset - sizeof(register)|. It is
537 // required that |dest.offset| is at least as large as the value computed by
538 // PushRegsInMaskSizeInBytes for this |set|. In other words, |dest.base|
539 // must point to either the lowest address in the save area, or some address
541 void storeRegsInMask(LiveRegisterSet set
, Address dest
, Register scratch
)
542 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
545 void PopRegsInMask(LiveRegisterSet set
);
546 void PopRegsInMask(LiveGeneralRegisterSet set
);
547 void PopRegsInMaskIgnore(LiveRegisterSet set
, LiveRegisterSet ignore
)
548 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
551 // ===============================================================
552 // Stack manipulation functions -- single registers/values.
554 void Push(const Operand op
) DEFINED_ON(x86_shared
);
555 void Push(Register reg
) PER_SHARED_ARCH
;
556 void Push(Register reg1
, Register reg2
, Register reg3
, Register reg4
)
558 void Push(const Imm32 imm
) PER_SHARED_ARCH
;
559 void Push(const ImmWord imm
) PER_SHARED_ARCH
;
560 void Push(const ImmPtr imm
) PER_SHARED_ARCH
;
561 void Push(const ImmGCPtr ptr
) PER_SHARED_ARCH
;
562 void Push(FloatRegister reg
) PER_SHARED_ARCH
;
563 void PushBoxed(FloatRegister reg
) PER_ARCH
;
564 void PushFlags() DEFINED_ON(x86_shared
);
565 void Push(PropertyKey key
, Register scratchReg
);
566 void Push(const Address
& addr
);
567 void Push(TypedOrValueRegister v
);
568 void Push(const ConstantOrRegister
& v
);
569 void Push(const ValueOperand
& val
);
570 void Push(const Value
& val
);
571 void Push(JSValueType type
, Register reg
);
572 void Push(const Register64 reg
);
573 void PushEmptyRooted(VMFunctionData::RootType rootType
);
574 inline CodeOffset
PushWithPatch(ImmWord word
);
575 inline CodeOffset
PushWithPatch(ImmPtr imm
);
577 void Pop(const Operand op
) DEFINED_ON(x86_shared
);
578 void Pop(Register reg
) PER_SHARED_ARCH
;
579 void Pop(FloatRegister t
) PER_SHARED_ARCH
;
580 void Pop(const ValueOperand
& val
) PER_SHARED_ARCH
;
581 void PopFlags() DEFINED_ON(x86_shared
);
583 DEFINED_ON(arm
, mips_shared
, x86_shared
, loong64
, riscv64
, wasm32
);
585 // Move the stack pointer based on the requested amount.
586 void adjustStack(int amount
);
587 void freeStack(uint32_t amount
);
589 // Move the stack pointer to the specified position. It assumes the SP
590 // register is not valid -- it uses FP to set the position.
591 void freeStackTo(uint32_t framePushed
)
592 DEFINED_ON(x86_shared
, arm
, arm64
, loong64
, mips64
, riscv64
);
594 // Warning: This method does not update the framePushed() counter.
595 void freeStack(Register amount
);
598 // ===============================================================
599 // Register allocation fields.
601 friend AutoRegisterScope
;
602 friend AutoFloatRegisterScope
;
603 // Used to track register scopes for debug builds.
604 // Manipulated by the AutoGenericRegisterScope class.
605 AllocatableRegisterSet debugTrackedRegisters_
;
609 // ===============================================================
610 // Simple call functions.
612 // The returned CodeOffset is the assembler offset for the instruction
613 // immediately following the call; that is, for the return point.
614 CodeOffset
call(Register reg
) PER_SHARED_ARCH
;
615 CodeOffset
call(Label
* label
) PER_SHARED_ARCH
;
617 void call(const Address
& addr
) PER_SHARED_ARCH
;
618 void call(ImmWord imm
) PER_SHARED_ARCH
;
619 // Call a target native function, which is neither traceable nor movable.
620 void call(ImmPtr imm
) PER_SHARED_ARCH
;
621 CodeOffset
call(wasm::SymbolicAddress imm
) PER_SHARED_ARCH
;
622 inline CodeOffset
call(const wasm::CallSiteDesc
& desc
,
623 wasm::SymbolicAddress imm
);
625 // Call a target JitCode, which must be traceable, and may be movable.
626 void call(JitCode
* c
) PER_SHARED_ARCH
;
628 inline void call(TrampolinePtr code
);
630 inline CodeOffset
call(const wasm::CallSiteDesc
& desc
, const Register reg
);
631 inline CodeOffset
call(const wasm::CallSiteDesc
& desc
, uint32_t funcDefIndex
);
632 inline void call(const wasm::CallSiteDesc
& desc
, wasm::Trap trap
);
634 CodeOffset
callWithPatch() PER_SHARED_ARCH
;
635 void patchCall(uint32_t callerOffset
, uint32_t calleeOffset
) PER_SHARED_ARCH
;
637 // Push the return address and make a call. On platforms where this function
638 // is not defined, push the link register (pushReturnAddress) at the entry
639 // point of the callee.
640 void callAndPushReturnAddress(Register reg
) DEFINED_ON(x86_shared
);
641 void callAndPushReturnAddress(Label
* label
) DEFINED_ON(x86_shared
);
643 // These do not adjust framePushed().
644 void pushReturnAddress()
645 DEFINED_ON(mips_shared
, arm
, arm64
, loong64
, riscv64
, wasm32
);
646 void popReturnAddress()
647 DEFINED_ON(mips_shared
, arm
, arm64
, loong64
, riscv64
, wasm32
);
649 // Useful for dealing with two-valued returns.
650 void moveRegPair(Register src0
, Register src1
, Register dst0
, Register dst1
,
651 MoveOp::Type type
= MoveOp::GENERAL
);
653 void reserveVMFunctionOutParamSpace(const VMFunctionData
& f
);
654 void loadVMFunctionOutParam(const VMFunctionData
& f
, const Address
& addr
);
657 // ===============================================================
658 // Patchable near/far jumps.
660 // "Far jumps" provide the ability to jump to any uint32_t offset from any
661 // other uint32_t offset without using a constant pool (thus returning a
662 // simple CodeOffset instead of a CodeOffsetJump).
663 CodeOffset
farJumpWithPatch() PER_SHARED_ARCH
;
664 void patchFarJump(CodeOffset farJump
, uint32_t targetOffset
) PER_SHARED_ARCH
;
666 // Emit a nop that can be patched to and from a nop and a call with int32
667 // relative displacement.
668 CodeOffset
nopPatchableToCall() PER_SHARED_ARCH
;
669 void nopPatchableToCall(const wasm::CallSiteDesc
& desc
);
670 static void patchNopToCall(uint8_t* callsite
,
671 uint8_t* target
) PER_SHARED_ARCH
;
672 static void patchCallToNop(uint8_t* callsite
) PER_SHARED_ARCH
;
674 // These methods are like movWithPatch/PatchDataWithValueCheck but allow
675 // using pc-relative addressing on certain platforms (RIP-relative LEA on x64,
676 // ADR instruction on arm64).
678 // Note: "Near" applies to ARM64 where the target must be within 1 MB (this is
679 // release-asserted).
680 CodeOffset
moveNearAddressWithPatch(Register dest
) PER_ARCH
;
681 static void patchNearAddressMove(CodeLocationLabel loc
,
682 CodeLocationLabel target
)
683 DEFINED_ON(x86
, x64
, arm
, arm64
, loong64
, riscv64
, wasm32
, mips_shared
);
686 // ===============================================================
687 // [SMDOC] JIT-to-C++ Function Calls (callWithABI)
689 // callWithABI is used to make a call using the standard C/C++ system ABI.
691 // callWithABI is a low level interface for making calls, as such every call
692 // made with callWithABI should be organized with 6 steps: spilling live
693 // registers, aligning the stack, listing arguments of the called function,
694 // calling a function pointer, extracting the returned value and restoring
697 // A more detailed example of the six stages:
699 // 1) Saving of registers that are live. This will vary depending on which
700 // SpiderMonkey compiler you are working on. Registers that shouldn't be
701 // restored can be excluded.
703 // LiveRegisterSet volatileRegs(...);
704 // volatileRegs.take(scratch);
705 // masm.PushRegsInMask(volatileRegs);
707 // 2) Align the stack to perform the call with the correct stack alignment.
709 // When the stack pointer alignment is unknown and cannot be corrected
710 // when generating the code, setupUnalignedABICall must be used to
711 // dynamically align the stack pointer to the expectation of the ABI.
712 // When the stack pointer is known at JIT compilation time, the stack can
713 // be fixed manually and setupAlignedABICall and setupWasmABICall can be
716 // setupWasmABICall is a special case of setupAlignedABICall as
717 // SpiderMonkey's WebAssembly implementation mostly follow the system
718 // ABI, except for float/double arguments, which always use floating
719 // point registers, even if this is not supported by the system ABI.
721 // masm.setupUnalignedABICall(scratch);
723 // 3) Passing arguments. Arguments are passed left-to-right.
725 // masm.passABIArg(scratch);
726 // masm.passABIArg(FloatOp0, ABIType::Float64);
728 // Note how float register arguments are annotated with ABIType::Float64.
730 // Concerning stack-relative address, see the note on passABIArg.
734 // using Fn = int32_t (*)(int32_t)
735 // masm.callWithABI<Fn, Callee>();
737 // In the case where the call returns a double, that needs to be
738 // indicated to the callWithABI like this:
740 // using Fn = double (*)(int32_t)
741 // masm.callWithABI<Fn, Callee>(ABIType::Float64);
743 // There are overloads to allow calls to registers and addresses.
745 // 5) Take care of the result
747 // masm.storeCallPointerResult(scratch1);
748 // masm.storeCallBoolResult(scratch1);
749 // masm.storeCallInt32Result(scratch1);
750 // masm.storeCallFloatResult(scratch1);
752 // 6) Restore the potentially clobbered volatile registers
754 // masm.PopRegsInMask(volatileRegs);
756 // If expecting a returned value, this call should use
757 // PopRegsInMaskIgnore to filter out the registers which are containing
758 // the returned value.
760 // Unless an exit frame is pushed prior to the setupABICall, the callee
761 // should not GC. To ensure this is the case callWithABI is instrumented to
762 // make sure that in the default case callees are annotated with an
763 // AutoUnsafeCallWithABI on the stack.
765 // A callWithABI can opt out of checking, if for example it is known there
766 // is an exit frame, or the callee is known not to GC.
768 // If your callee needs to be able to GC, consider using a VMFunction, or
769 // create a fake exit frame, and instrument the TraceJitExitFrame
772 // Setup a call to C/C++ code, given the assumption that the framePushed
773 // accurately defines the state of the stack, and that the top of the stack
774 // was properly aligned. Note that this only supports cdecl.
776 // As a rule of thumb, this can be used in CodeGenerator but not in CacheIR or
777 // Baseline code (because the stack is not aligned to ABIStackAlignment).
778 void setupAlignedABICall();
780 // As setupAlignedABICall, but for WebAssembly native ABI calls, which pass
781 // through a builtin thunk that uses the wasm ABI. All the wasm ABI calls
782 // can be native, since we always know the stack alignment a priori.
783 void setupWasmABICall();
785 // Setup an ABI call for when the alignment is not known. This may need a
787 void setupUnalignedABICall(Register scratch
) PER_ARCH
;
789 // Like setupUnalignedABICall, but more efficient because it doesn't push/pop
790 // the unaligned stack pointer. The caller is responsible for restoring SP
791 // after the callWithABI, for example using the frame pointer register.
792 void setupUnalignedABICallDontSaveRestoreSP();
794 // Arguments must be assigned to a C/C++ call in order. They are moved
795 // in parallel immediately before performing the call. This process may
796 // temporarily use more stack, in which case esp-relative addresses will be
797 // automatically adjusted. It is extremely important that esp-relative
798 // addresses are computed *after* setupABICall(). Furthermore, no
799 // operations should be emitted while setting arguments.
800 void passABIArg(const MoveOperand
& from
, ABIType type
);
801 inline void passABIArg(Register reg
);
802 inline void passABIArg(FloatRegister reg
, ABIType type
);
804 inline void callWithABI(
805 DynFn fun
, ABIType result
= ABIType::General
,
806 CheckUnsafeCallWithABI check
= CheckUnsafeCallWithABI::Check
);
807 template <typename Sig
, Sig fun
>
808 inline void callWithABI(
809 ABIType result
= ABIType::General
,
810 CheckUnsafeCallWithABI check
= CheckUnsafeCallWithABI::Check
);
811 inline void callWithABI(Register fun
, ABIType result
= ABIType::General
);
812 inline void callWithABI(const Address
& fun
,
813 ABIType result
= ABIType::General
);
815 CodeOffset
callWithABI(wasm::BytecodeOffset offset
, wasm::SymbolicAddress fun
,
816 mozilla::Maybe
<int32_t> instanceOffset
,
817 ABIType result
= ABIType::General
);
818 void callDebugWithABI(wasm::SymbolicAddress fun
,
819 ABIType result
= ABIType::General
);
822 // Reinitialize the variables which have to be cleared before making a call
824 template <class ABIArgGeneratorT
>
825 void setupABICallHelper();
827 // Reinitialize the variables which have to be cleared before making a call
829 void setupNativeABICall();
831 // Reserve the stack and resolve the arguments move.
832 void callWithABIPre(uint32_t* stackAdjust
,
833 bool callFromWasm
= false) PER_ARCH
;
835 // Emits a call to a C/C++ function, resolving all argument moves.
836 void callWithABINoProfiler(void* fun
, ABIType result
,
837 CheckUnsafeCallWithABI check
);
838 void callWithABINoProfiler(Register fun
, ABIType result
) PER_ARCH
;
839 void callWithABINoProfiler(const Address
& fun
, ABIType result
) PER_ARCH
;
841 // Restore the stack to its state before the setup function call.
842 void callWithABIPost(uint32_t stackAdjust
, ABIType result
,
843 bool callFromWasm
= false) PER_ARCH
;
845 // Create the signature to be able to decode the arguments of a native
846 // function, when calling a function within the simulator.
847 inline void appendSignatureType(ABIType type
);
848 inline ABIFunctionType
signature() const;
850 // Private variables used to handle moves between registers given as
851 // arguments to passABIArg and the list of ABI registers expected for the
852 // signature of the function.
853 MoveResolver moveResolver_
;
855 // Architecture specific implementation which specify how registers & stack
856 // offsets are used for calling a function.
857 ABIArgGenerator abiArgs_
;
860 // Flag use to assert that we use ABI function in the right context.
864 // If set by setupUnalignedABICall then callWithABI will pop the stack
865 // register which is on the stack.
866 bool dynamicAlignment_
;
869 // The signature is used to accumulate all types of arguments which are used
870 // by the caller. This is used by the simulators to decode the arguments
871 // properly, and cast the function pointer to the right type.
876 // ===============================================================
879 // These functions are used to build the content of the Jit frames. See
880 // CommonFrameLayout class, and all its derivatives. The content should be
881 // pushed in the opposite order as the fields of the structures, such that
882 // the structures can be used to interpret the content of the stack.
884 // Call the Jit function, and push the return address (or let the callee
885 // push the return address).
887 // These functions return the offset of the return address, in order to use
888 // the return address to index the safepoints, which are used to list all
890 inline uint32_t callJitNoProfiler(Register callee
);
891 inline uint32_t callJit(Register callee
);
892 inline uint32_t callJit(JitCode
* code
);
893 inline uint32_t callJit(TrampolinePtr code
);
894 inline uint32_t callJit(ImmPtr callee
);
896 // The frame descriptor is the second field of all Jit frames, pushed before
897 // calling the Jit function. See CommonFrameLayout::descriptor_.
898 inline void pushFrameDescriptor(FrameType type
);
899 inline void PushFrameDescriptor(FrameType type
);
901 // For JitFrameLayout, the descriptor also stores the number of arguments
902 // passed by the caller. See MakeFrameDescriptorForJitCall.
903 inline void pushFrameDescriptorForJitCall(FrameType type
, uint32_t argc
);
904 inline void pushFrameDescriptorForJitCall(FrameType type
, Register argc
,
906 inline void PushFrameDescriptorForJitCall(FrameType type
, uint32_t argc
);
907 inline void PushFrameDescriptorForJitCall(FrameType type
, Register argc
,
910 // Load the number of actual arguments from the frame's JitFrameLayout.
911 inline void loadNumActualArgs(Register framePtr
, Register dest
);
913 // Push the callee token of a JSFunction which pointer is stored in the
914 // |callee| register. The callee token is packed with a |constructing| flag
915 // which correspond to the fact that the JS function is called with "new" or
917 inline void PushCalleeToken(Register callee
, bool constructing
);
919 // Unpack a callee token located at the |token| address, and return the
920 // JSFunction pointer in the |dest| register.
921 inline void loadFunctionFromCalleeToken(Address token
, Register dest
);
923 // This function emulates a call by pushing an exit frame on the stack,
924 // except that the fake-function is inlined within the body of the caller.
926 // This function assumes that the current frame is an IonJS frame.
928 // This function returns the offset of the /fake/ return address, in order to
929 // use the return address to index the safepoints, which are used to list all
932 // This function should be balanced with a call to adjustStack, to pop the
933 // exit frame and emulate the return statement of the inlined function.
934 inline uint32_t buildFakeExitFrame(Register scratch
);
937 // This function is used by buildFakeExitFrame to push a fake return address
938 // on the stack. This fake return address should never be used for resuming
939 // any execution, and can even be an invalid pointer into the instruction
940 // stream, as long as it does not alias any other.
941 uint32_t pushFakeReturnAddress(Register scratch
) PER_SHARED_ARCH
;
944 // ===============================================================
945 // Exit frame footer.
947 // When calling outside the Jit we push an exit frame. To mark the stack
948 // correctly, we have to push additional information, called the Exit frame
949 // footer, which is used to identify how the stack is marked.
951 // See JitFrames.h, and TraceJitExitFrame in JitFrames.cpp.
953 // Links the exit frame and pushes the ExitFooterFrame.
954 inline void enterExitFrame(Register cxreg
, Register scratch
, VMFunctionId f
);
956 // Push an exit frame token to identify which fake exit frame this footer
958 inline void enterFakeExitFrame(Register cxreg
, Register scratch
,
961 // Push an exit frame token for a native call.
962 inline void enterFakeExitFrameForNative(Register cxreg
, Register scratch
,
963 bool isConstructing
);
965 // Pop ExitFrame footer in addition to the extra frame.
966 inline void leaveExitFrame(size_t extraFrame
= 0);
969 // Save the top of the stack into JitActivation::packedExitFP of the
970 // current thread, which should be the location of the latest exit frame.
971 void linkExitFrame(Register cxreg
, Register scratch
);
974 // ===============================================================
977 inline void move64(Imm64 imm
, Register64 dest
) PER_ARCH
;
978 inline void move64(Register64 src
, Register64 dest
) PER_ARCH
;
980 inline void moveFloat32ToGPR(FloatRegister src
,
981 Register dest
) PER_SHARED_ARCH
;
982 inline void moveGPRToFloat32(Register src
,
983 FloatRegister dest
) PER_SHARED_ARCH
;
985 inline void moveDoubleToGPR64(FloatRegister src
, Register64 dest
) PER_ARCH
;
986 inline void moveGPR64ToDouble(Register64 src
, FloatRegister dest
) PER_ARCH
;
988 inline void move8ZeroExtend(Register src
, Register dest
) PER_SHARED_ARCH
;
990 inline void move8SignExtend(Register src
, Register dest
) PER_SHARED_ARCH
;
991 inline void move16SignExtend(Register src
, Register dest
) PER_SHARED_ARCH
;
993 // move64To32 will clear the high bits of `dest` on 64-bit systems.
994 inline void move64To32(Register64 src
, Register dest
) PER_ARCH
;
996 inline void move32To64ZeroExtend(Register src
, Register64 dest
) PER_ARCH
;
998 inline void move8To64SignExtend(Register src
, Register64 dest
) PER_ARCH
;
999 inline void move16To64SignExtend(Register src
, Register64 dest
) PER_ARCH
;
1000 inline void move32To64SignExtend(Register src
, Register64 dest
) PER_ARCH
;
1002 inline void move32SignExtendToPtr(Register src
, Register dest
) PER_ARCH
;
1003 inline void move32ZeroExtendToPtr(Register src
, Register dest
) PER_ARCH
;
1005 // Copy a constant, typed-register, or a ValueOperand into a ValueOperand
1007 inline void moveValue(const ConstantOrRegister
& src
,
1008 const ValueOperand
& dest
);
1009 void moveValue(const TypedOrValueRegister
& src
,
1010 const ValueOperand
& dest
) PER_ARCH
;
1011 void moveValue(const ValueOperand
& src
, const ValueOperand
& dest
) PER_ARCH
;
1012 void moveValue(const Value
& src
, const ValueOperand
& dest
) PER_ARCH
;
1014 void movePropertyKey(PropertyKey key
, Register dest
);
1016 // ===============================================================
1017 // Load instructions
1019 inline void load32SignExtendToPtr(const Address
& src
, Register dest
) PER_ARCH
;
1021 inline void loadAbiReturnAddress(Register dest
) PER_SHARED_ARCH
;
1023 // ===============================================================
1024 // Copy instructions
1026 inline void copy64(const Address
& src
, const Address
& dest
, Register scratch
);
1029 // ===============================================================
1030 // Logical instructions
1032 inline void not32(Register reg
) PER_SHARED_ARCH
;
1033 inline void notPtr(Register reg
) PER_ARCH
;
1035 inline void and32(Register src
, Register dest
) PER_SHARED_ARCH
;
1036 inline void and32(Imm32 imm
, Register dest
) PER_SHARED_ARCH
;
1037 inline void and32(Imm32 imm
, Register src
, Register dest
) DEFINED_ON(arm64
);
1038 inline void and32(Imm32 imm
, const Address
& dest
) PER_SHARED_ARCH
;
1039 inline void and32(const Address
& src
, Register dest
) PER_SHARED_ARCH
;
1041 inline void andPtr(Register src
, Register dest
) PER_ARCH
;
1042 inline void andPtr(Imm32 imm
, Register dest
) PER_ARCH
;
1044 inline void and64(Imm64 imm
, Register64 dest
) PER_ARCH
;
1045 inline void or64(Imm64 imm
, Register64 dest
) PER_ARCH
;
1046 inline void xor64(Imm64 imm
, Register64 dest
) PER_ARCH
;
1048 inline void or32(Register src
, Register dest
) PER_SHARED_ARCH
;
1049 inline void or32(Imm32 imm
, Register dest
) PER_SHARED_ARCH
;
1050 inline void or32(Imm32 imm
, const Address
& dest
) PER_SHARED_ARCH
;
1052 inline void orPtr(Register src
, Register dest
) PER_ARCH
;
1053 inline void orPtr(Imm32 imm
, Register dest
) PER_ARCH
;
1055 inline void and64(Register64 src
, Register64 dest
) PER_ARCH
;
1056 inline void or64(Register64 src
, Register64 dest
) PER_ARCH
;
1057 inline void xor64(Register64 src
, Register64 dest
) PER_ARCH
;
1059 inline void xor32(Register src
, Register dest
) PER_SHARED_ARCH
;
1060 inline void xor32(Imm32 imm
, Register dest
) PER_SHARED_ARCH
;
1061 inline void xor32(Imm32 imm
, const Address
& dest
) PER_SHARED_ARCH
;
1062 inline void xor32(const Address
& src
, Register dest
) PER_SHARED_ARCH
;
1064 inline void xorPtr(Register src
, Register dest
) PER_ARCH
;
1065 inline void xorPtr(Imm32 imm
, Register dest
) PER_ARCH
;
1067 inline void and64(const Operand
& src
, Register64 dest
)
1068 DEFINED_ON(x64
, mips64
, loong64
, riscv64
);
1069 inline void or64(const Operand
& src
, Register64 dest
)
1070 DEFINED_ON(x64
, mips64
, loong64
, riscv64
);
1071 inline void xor64(const Operand
& src
, Register64 dest
)
1072 DEFINED_ON(x64
, mips64
, loong64
, riscv64
);
1074 // ===============================================================
1075 // Swap instructions
1077 // Swap the two lower bytes and sign extend the result to 32-bit.
1078 inline void byteSwap16SignExtend(Register reg
) PER_SHARED_ARCH
;
1080 // Swap the two lower bytes and zero extend the result to 32-bit.
1081 inline void byteSwap16ZeroExtend(Register reg
) PER_SHARED_ARCH
;
1083 // Swap all four bytes in a 32-bit integer.
1084 inline void byteSwap32(Register reg
) PER_SHARED_ARCH
;
1086 // Swap all eight bytes in a 64-bit integer.
1087 inline void byteSwap64(Register64 reg
) PER_ARCH
;
1089 // ===============================================================
1090 // Arithmetic functions
1092 // Condition flags aren't guaranteed to be set by these functions, for example
1093 // x86 will always set condition flags, but ARM64 won't do it unless
1094 // explicitly requested. Instead use branch(Add|Sub|Mul|Neg) to test for
1095 // condition flags after performing arithmetic operations.
1097 inline void add32(Register src
, Register dest
) PER_SHARED_ARCH
;
1098 inline void add32(Imm32 imm
, Register dest
) PER_SHARED_ARCH
;
1099 inline void add32(Imm32 imm
, Register src
, Register dest
) PER_SHARED_ARCH
;
1100 inline void add32(Imm32 imm
, const Address
& dest
) PER_SHARED_ARCH
;
1101 inline void add32(Imm32 imm
, const AbsoluteAddress
& dest
)
1102 DEFINED_ON(x86_shared
);
1104 inline void addPtr(Register src
, Register dest
) PER_ARCH
;
1105 inline void addPtr(Register src1
, Register src2
, Register dest
)
1107 inline void addPtr(Imm32 imm
, Register dest
) PER_ARCH
;
1108 inline void addPtr(Imm32 imm
, Register src
, Register dest
) DEFINED_ON(arm64
);
1109 inline void addPtr(ImmWord imm
, Register dest
) PER_ARCH
;
1110 inline void addPtr(ImmPtr imm
, Register dest
);
1111 inline void addPtr(Imm32 imm
, const Address
& dest
)
1112 DEFINED_ON(mips_shared
, arm
, arm64
, x86
, x64
, loong64
, riscv64
, wasm32
);
1113 inline void addPtr(Imm32 imm
, const AbsoluteAddress
& dest
)
1114 DEFINED_ON(x86
, x64
);
1115 inline void addPtr(const Address
& src
, Register dest
)
1116 DEFINED_ON(mips_shared
, arm
, arm64
, x86
, x64
, loong64
, riscv64
, wasm32
);
1118 inline void add64(Register64 src
, Register64 dest
) PER_ARCH
;
1119 inline void add64(Imm32 imm
, Register64 dest
) PER_ARCH
;
1120 inline void add64(Imm64 imm
, Register64 dest
) PER_ARCH
;
1121 inline void add64(const Operand
& src
, Register64 dest
)
1122 DEFINED_ON(x64
, mips64
, loong64
, riscv64
);
1124 inline void addFloat32(FloatRegister src
, FloatRegister dest
) PER_SHARED_ARCH
;
1126 // Compute dest=SP-imm where dest is a pointer registers and not SP. The
1127 // offset returned from sub32FromStackPtrWithPatch() must be passed to
1128 // patchSub32FromStackPtr().
1129 inline CodeOffset
sub32FromStackPtrWithPatch(Register dest
) PER_ARCH
;
1130 inline void patchSub32FromStackPtr(CodeOffset offset
, Imm32 imm
) PER_ARCH
;
1132 inline void addDouble(FloatRegister src
, FloatRegister dest
) PER_SHARED_ARCH
;
1133 inline void addConstantDouble(double d
, FloatRegister dest
) DEFINED_ON(x86
);
1135 inline void sub32(const Address
& src
, Register dest
) PER_SHARED_ARCH
;
1136 inline void sub32(Register src
, Register dest
) PER_SHARED_ARCH
;
1137 inline void sub32(Imm32 imm
, Register dest
) PER_SHARED_ARCH
;
1139 inline void subPtr(Register src
, Register dest
) PER_ARCH
;
1140 inline void subPtr(Register src
, const Address
& dest
)
1141 DEFINED_ON(mips_shared
, arm
, arm64
, x86
, x64
, loong64
, riscv64
, wasm32
);
1142 inline void subPtr(Imm32 imm
, Register dest
) PER_ARCH
;
1143 inline void subPtr(ImmWord imm
, Register dest
) DEFINED_ON(x64
);
1144 inline void subPtr(const Address
& addr
, Register dest
)
1145 DEFINED_ON(mips_shared
, arm
, arm64
, x86
, x64
, loong64
, riscv64
, wasm32
);
1147 inline void sub64(Register64 src
, Register64 dest
) PER_ARCH
;
1148 inline void sub64(Imm64 imm
, Register64 dest
) PER_ARCH
;
1149 inline void sub64(const Operand
& src
, Register64 dest
)
1150 DEFINED_ON(x64
, mips64
, loong64
, riscv64
);
1152 inline void subFloat32(FloatRegister src
, FloatRegister dest
) PER_SHARED_ARCH
;
1154 inline void subDouble(FloatRegister src
, FloatRegister dest
) PER_SHARED_ARCH
;
1156 inline void mul32(Register rhs
, Register srcDest
) PER_SHARED_ARCH
;
1157 inline void mul32(Imm32 imm
, Register srcDest
) PER_SHARED_ARCH
;
1159 inline void mul32(Register src1
, Register src2
, Register dest
, Label
* onOver
)
1162 // Return the high word of the unsigned multiplication into |dest|.
1163 inline void mulHighUnsigned32(Imm32 imm
, Register src
,
1164 Register dest
) PER_ARCH
;
1166 inline void mulPtr(Register rhs
, Register srcDest
) PER_ARCH
;
1168 inline void mul64(const Operand
& src
, const Register64
& dest
) DEFINED_ON(x64
);
1169 inline void mul64(const Operand
& src
, const Register64
& dest
,
1170 const Register temp
)
1171 DEFINED_ON(x64
, mips64
, loong64
, riscv64
);
1172 inline void mul64(Imm64 imm
, const Register64
& dest
) PER_ARCH
;
1173 inline void mul64(Imm64 imm
, const Register64
& dest
, const Register temp
)
1174 DEFINED_ON(x86
, x64
, arm
, mips32
, mips64
, loong64
, riscv64
);
1175 inline void mul64(const Register64
& src
, const Register64
& dest
,
1176 const Register temp
) PER_ARCH
;
1177 inline void mul64(const Register64
& src1
, const Register64
& src2
,
1178 const Register64
& dest
) DEFINED_ON(arm64
);
1179 inline void mul64(Imm64 src1
, const Register64
& src2
, const Register64
& dest
)
1182 inline void mulBy3(Register src
, Register dest
) PER_ARCH
;
1184 inline void mulFloat32(FloatRegister src
, FloatRegister dest
) PER_SHARED_ARCH
;
1185 inline void mulDouble(FloatRegister src
, FloatRegister dest
) PER_SHARED_ARCH
;
1187 inline void mulDoublePtr(ImmPtr imm
, Register temp
, FloatRegister dest
)
1188 DEFINED_ON(mips_shared
, arm
, arm64
, x86
, x64
, loong64
, riscv64
, wasm32
);
1190 // Perform an integer division, returning the integer part rounded toward
1191 // zero. rhs must not be zero, and the division must not overflow.
1193 // On ARM, the chip must have hardware division instructions.
1194 inline void quotient32(Register rhs
, Register srcDest
, bool isUnsigned
)
1195 DEFINED_ON(mips_shared
, arm
, arm64
, loong64
, riscv64
, wasm32
);
1197 // As above, but srcDest must be eax and tempEdx must be edx.
1198 inline void quotient32(Register rhs
, Register srcDest
, Register tempEdx
,
1199 bool isUnsigned
) DEFINED_ON(x86_shared
);
1201 // Perform an integer division, returning the remainder part.
1202 // rhs must not be zero, and the division must not overflow.
1204 // On ARM, the chip must have hardware division instructions.
1205 inline void remainder32(Register rhs
, Register srcDest
, bool isUnsigned
)
1206 DEFINED_ON(mips_shared
, arm
, arm64
, loong64
, riscv64
, wasm32
);
1208 // As above, but srcDest must be eax and tempEdx must be edx.
1209 inline void remainder32(Register rhs
, Register srcDest
, Register tempEdx
,
1210 bool isUnsigned
) DEFINED_ON(x86_shared
);
1212 // Perform an integer division, returning the integer part rounded toward
1213 // zero. rhs must not be zero, and the division must not overflow.
1215 // This variant preserves registers, and doesn't require hardware division
1216 // instructions on ARM (will call out to a runtime routine).
1218 // rhs is preserved, srdDest is clobbered.
1219 void flexibleRemainder32(Register rhs
, Register srcDest
, bool isUnsigned
,
1220 const LiveRegisterSet
& volatileLiveRegs
)
1221 DEFINED_ON(mips_shared
, arm
, arm64
, x86_shared
, loong64
, riscv64
, wasm32
);
1223 // Perform an integer division, returning the integer part rounded toward
1224 // zero. rhs must not be zero, and the division must not overflow.
1226 // This variant preserves registers, and doesn't require hardware division
1227 // instructions on ARM (will call out to a runtime routine).
1229 // rhs is preserved, srdDest is clobbered.
1230 void flexibleQuotient32(Register rhs
, Register srcDest
, bool isUnsigned
,
1231 const LiveRegisterSet
& volatileLiveRegs
)
1232 DEFINED_ON(mips_shared
, arm
, arm64
, x86_shared
, loong64
, riscv64
);
1234 // Perform an integer division, returning the integer part rounded toward
1235 // zero. rhs must not be zero, and the division must not overflow. The
1236 // remainder is stored into the third argument register here.
1238 // This variant preserves registers, and doesn't require hardware division
1239 // instructions on ARM (will call out to a runtime routine).
1241 // rhs is preserved, srdDest and remOutput are clobbered.
1242 void flexibleDivMod32(Register rhs
, Register srcDest
, Register remOutput
,
1244 const LiveRegisterSet
& volatileLiveRegs
)
1245 DEFINED_ON(mips_shared
, arm
, arm64
, x86_shared
, loong64
, riscv64
, wasm32
);
1247 inline void divFloat32(FloatRegister src
, FloatRegister dest
) PER_SHARED_ARCH
;
1248 inline void divDouble(FloatRegister src
, FloatRegister dest
) PER_SHARED_ARCH
;
1250 inline void inc64(AbsoluteAddress dest
) PER_ARCH
;
1252 inline void neg32(Register reg
) PER_SHARED_ARCH
;
1253 inline void neg64(Register64 reg
) PER_ARCH
;
1254 inline void negPtr(Register reg
) PER_ARCH
;
1256 inline void negateFloat(FloatRegister reg
) PER_SHARED_ARCH
;
1258 inline void negateDouble(FloatRegister reg
) PER_SHARED_ARCH
;
1260 inline void abs32(Register src
, Register dest
) PER_SHARED_ARCH
;
1261 inline void absFloat32(FloatRegister src
, FloatRegister dest
) PER_SHARED_ARCH
;
1262 inline void absDouble(FloatRegister src
, FloatRegister dest
) PER_SHARED_ARCH
;
1264 inline void sqrtFloat32(FloatRegister src
,
1265 FloatRegister dest
) PER_SHARED_ARCH
;
1266 inline void sqrtDouble(FloatRegister src
, FloatRegister dest
) PER_SHARED_ARCH
;
1268 void floorFloat32ToInt32(FloatRegister src
, Register dest
,
1269 Label
* fail
) PER_SHARED_ARCH
;
1270 void floorDoubleToInt32(FloatRegister src
, Register dest
,
1271 Label
* fail
) PER_SHARED_ARCH
;
1273 void ceilFloat32ToInt32(FloatRegister src
, Register dest
,
1274 Label
* fail
) PER_SHARED_ARCH
;
1275 void ceilDoubleToInt32(FloatRegister src
, Register dest
,
1276 Label
* fail
) PER_SHARED_ARCH
;
1278 void roundFloat32ToInt32(FloatRegister src
, Register dest
, FloatRegister temp
,
1279 Label
* fail
) PER_SHARED_ARCH
;
1280 void roundDoubleToInt32(FloatRegister src
, Register dest
, FloatRegister temp
,
1281 Label
* fail
) PER_SHARED_ARCH
;
1283 void truncFloat32ToInt32(FloatRegister src
, Register dest
,
1284 Label
* fail
) PER_SHARED_ARCH
;
1285 void truncDoubleToInt32(FloatRegister src
, Register dest
,
1286 Label
* fail
) PER_SHARED_ARCH
;
1288 void nearbyIntDouble(RoundingMode mode
, FloatRegister src
,
1289 FloatRegister dest
) PER_SHARED_ARCH
;
1290 void nearbyIntFloat32(RoundingMode mode
, FloatRegister src
,
1291 FloatRegister dest
) PER_SHARED_ARCH
;
1293 void signInt32(Register input
, Register output
);
1294 void signDouble(FloatRegister input
, FloatRegister output
);
1295 void signDoubleToInt32(FloatRegister input
, Register output
,
1296 FloatRegister temp
, Label
* fail
);
1298 void copySignDouble(FloatRegister lhs
, FloatRegister rhs
,
1299 FloatRegister output
) PER_SHARED_ARCH
;
1300 void copySignFloat32(FloatRegister lhs
, FloatRegister rhs
,
1301 FloatRegister output
) DEFINED_ON(x86_shared
, arm64
);
1303 // Returns a random double in range [0, 1) in |dest|. The |rng| register must
1304 // hold a pointer to a mozilla::non_crypto::XorShift128PlusRNG.
1305 void randomDouble(Register rng
, FloatRegister dest
, Register64 temp0
,
1308 // srcDest = {min,max}{Float32,Double}(srcDest, other)
1309 // For min and max, handle NaN specially if handleNaN is true.
1311 inline void minFloat32(FloatRegister other
, FloatRegister srcDest
,
1312 bool handleNaN
) PER_SHARED_ARCH
;
1313 inline void minDouble(FloatRegister other
, FloatRegister srcDest
,
1314 bool handleNaN
) PER_SHARED_ARCH
;
1316 inline void maxFloat32(FloatRegister other
, FloatRegister srcDest
,
1317 bool handleNaN
) PER_SHARED_ARCH
;
1318 inline void maxDouble(FloatRegister other
, FloatRegister srcDest
,
1319 bool handleNaN
) PER_SHARED_ARCH
;
1321 void minMaxArrayInt32(Register array
, Register result
, Register temp1
,
1322 Register temp2
, Register temp3
, bool isMax
,
1324 void minMaxArrayNumber(Register array
, FloatRegister result
,
1325 FloatRegister floatTemp
, Register temp1
,
1326 Register temp2
, bool isMax
, Label
* fail
);
1328 // Compute |pow(base, power)| and store the result in |dest|. If the result
1329 // exceeds the int32 range, jumps to |onOver|.
1330 // |base| and |power| are preserved, the other input registers are clobbered.
1331 void pow32(Register base
, Register power
, Register dest
, Register temp1
,
1332 Register temp2
, Label
* onOver
);
1334 void sameValueDouble(FloatRegister left
, FloatRegister right
,
1335 FloatRegister temp
, Register dest
);
1337 void branchIfNotRegExpPrototypeOptimizable(Register proto
, Register temp
,
1338 const GlobalObject
* maybeGlobal
,
1340 void branchIfNotRegExpInstanceOptimizable(Register regexp
, Register temp
,
1341 const GlobalObject
* maybeGlobal
,
1344 void loadRegExpLastIndex(Register regexp
, Register string
, Register lastIndex
,
1345 Label
* notFoundZeroLastIndex
);
1347 void loadAndClearRegExpSearcherLastLimit(Register result
, Register scratch
);
1349 void loadParsedRegExpShared(Register regexp
, Register result
,
1352 // ===============================================================
1355 // For shift-by-register there may be platform-specific variations, for
1356 // example, x86 will perform the shift mod 32 but ARM will perform the shift
1359 // For shift-by-immediate the platform assembler may restrict the immediate,
1360 // for example, the ARM assembler requires the count for 32-bit shifts to be
1361 // in the range [0,31].
1363 inline void lshift32(Imm32 shift
, Register srcDest
) PER_SHARED_ARCH
;
1364 inline void rshift32(Imm32 shift
, Register srcDest
) PER_SHARED_ARCH
;
1365 inline void rshift32Arithmetic(Imm32 shift
, Register srcDest
) PER_SHARED_ARCH
;
1367 inline void lshiftPtr(Imm32 imm
, Register dest
) PER_ARCH
;
1368 inline void rshiftPtr(Imm32 imm
, Register dest
) PER_ARCH
;
1369 inline void rshiftPtr(Imm32 imm
, Register src
, Register dest
)
1371 inline void rshiftPtrArithmetic(Imm32 imm
, Register dest
) PER_ARCH
;
1373 inline void lshift64(Imm32 imm
, Register64 dest
) PER_ARCH
;
1374 inline void rshift64(Imm32 imm
, Register64 dest
) PER_ARCH
;
1375 inline void rshift64Arithmetic(Imm32 imm
, Register64 dest
) PER_ARCH
;
1377 // On x86_shared these have the constraint that shift must be in CL.
1378 inline void lshift32(Register shift
, Register srcDest
) PER_SHARED_ARCH
;
1379 inline void rshift32(Register shift
, Register srcDest
) PER_SHARED_ARCH
;
1380 inline void rshift32Arithmetic(Register shift
,
1381 Register srcDest
) PER_SHARED_ARCH
;
1382 inline void lshiftPtr(Register shift
, Register srcDest
) PER_ARCH
;
1383 inline void rshiftPtr(Register shift
, Register srcDest
) PER_ARCH
;
1385 // These variants do not have the above constraint, but may emit some extra
1386 // instructions on x86_shared. They also handle shift >= 32 consistently by
1387 // masking with 0x1F (either explicitly or relying on the hardware to do
1389 inline void flexibleLshift32(Register shift
,
1390 Register srcDest
) PER_SHARED_ARCH
;
1391 inline void flexibleRshift32(Register shift
,
1392 Register srcDest
) PER_SHARED_ARCH
;
1393 inline void flexibleRshift32Arithmetic(Register shift
,
1394 Register srcDest
) PER_SHARED_ARCH
;
1396 inline void lshift64(Register shift
, Register64 srcDest
) PER_ARCH
;
1397 inline void rshift64(Register shift
, Register64 srcDest
) PER_ARCH
;
1398 inline void rshift64Arithmetic(Register shift
, Register64 srcDest
) PER_ARCH
;
1400 // ===============================================================
1401 // Rotation functions
1402 // Note: - on x86 and x64 the count register must be in CL.
1403 // - on x64 the temp register should be InvalidReg.
1405 inline void rotateLeft(Imm32 count
, Register input
,
1406 Register dest
) PER_SHARED_ARCH
;
1407 inline void rotateLeft(Register count
, Register input
,
1408 Register dest
) PER_SHARED_ARCH
;
1409 inline void rotateLeft64(Imm32 count
, Register64 input
, Register64 dest
)
1411 inline void rotateLeft64(Register count
, Register64 input
, Register64 dest
)
1413 inline void rotateLeft64(Imm32 count
, Register64 input
, Register64 dest
,
1414 Register temp
) PER_ARCH
;
1415 inline void rotateLeft64(Register count
, Register64 input
, Register64 dest
,
1416 Register temp
) PER_ARCH
;
1418 inline void rotateRight(Imm32 count
, Register input
,
1419 Register dest
) PER_SHARED_ARCH
;
1420 inline void rotateRight(Register count
, Register input
,
1421 Register dest
) PER_SHARED_ARCH
;
1422 inline void rotateRight64(Imm32 count
, Register64 input
, Register64 dest
)
1424 inline void rotateRight64(Register count
, Register64 input
, Register64 dest
)
1426 inline void rotateRight64(Imm32 count
, Register64 input
, Register64 dest
,
1427 Register temp
) PER_ARCH
;
1428 inline void rotateRight64(Register count
, Register64 input
, Register64 dest
,
1429 Register temp
) PER_ARCH
;
1431 // ===============================================================
1432 // Bit counting functions
1434 // knownNotZero may be true only if the src is known not to be zero.
1435 inline void clz32(Register src
, Register dest
,
1436 bool knownNotZero
) PER_SHARED_ARCH
;
1437 inline void ctz32(Register src
, Register dest
,
1438 bool knownNotZero
) PER_SHARED_ARCH
;
1440 inline void clz64(Register64 src
, Register dest
) PER_ARCH
;
1441 inline void ctz64(Register64 src
, Register dest
) PER_ARCH
;
1443 // On x86_shared, temp may be Invalid only if the chip has the POPCNT
1444 // instruction. On ARM, temp may never be Invalid.
1445 inline void popcnt32(Register src
, Register dest
,
1446 Register temp
) PER_SHARED_ARCH
;
1448 // temp may be invalid only if the chip has the POPCNT instruction.
1449 inline void popcnt64(Register64 src
, Register64 dest
, Register temp
) PER_ARCH
;
1451 // ===============================================================
1452 // Condition functions
1454 inline void cmp8Set(Condition cond
, Address lhs
, Imm32 rhs
,
1455 Register dest
) PER_SHARED_ARCH
;
1457 inline void cmp16Set(Condition cond
, Address lhs
, Imm32 rhs
,
1458 Register dest
) PER_SHARED_ARCH
;
1460 template <typename T1
, typename T2
>
1461 inline void cmp32Set(Condition cond
, T1 lhs
, T2 rhs
, Register dest
)
1462 DEFINED_ON(x86_shared
, arm
, arm64
, mips32
, mips64
, loong64
, riscv64
,
1465 // Only the NotEqual and Equal conditions are allowed.
1466 inline void cmp64Set(Condition cond
, Address lhs
, Imm64 rhs
,
1467 Register dest
) PER_ARCH
;
1469 template <typename T1
, typename T2
>
1470 inline void cmpPtrSet(Condition cond
, T1 lhs
, T2 rhs
, Register dest
) PER_ARCH
;
1472 // ===============================================================
1475 inline void branch8(Condition cond
, const Address
& lhs
, Imm32 rhs
,
1476 Label
* label
) PER_SHARED_ARCH
;
1478 // Compares the byte in |lhs| against |rhs| using a 8-bit comparison on
1479 // x86/x64 or a 32-bit comparison (all other platforms). The caller should
1480 // ensure |rhs| is a zero- resp. sign-extended byte value for cross-platform
1482 inline void branch8(Condition cond
, const BaseIndex
& lhs
, Register rhs
,
1483 Label
* label
) PER_SHARED_ARCH
;
1485 inline void branch16(Condition cond
, const Address
& lhs
, Imm32 rhs
,
1486 Label
* label
) PER_SHARED_ARCH
;
1489 inline void branch32(Condition cond
, Register lhs
, Register rhs
,
1490 L label
) PER_SHARED_ARCH
;
1492 inline void branch32(Condition cond
, Register lhs
, Imm32 rhs
,
1493 L label
) PER_SHARED_ARCH
;
1495 inline void branch32(Condition cond
, Register lhs
, const Address
& rhs
,
1496 Label
* label
) DEFINED_ON(arm64
);
1498 inline void branch32(Condition cond
, const Address
& lhs
, Register rhs
,
1499 Label
* label
) PER_SHARED_ARCH
;
1500 inline void branch32(Condition cond
, const Address
& lhs
, Imm32 rhs
,
1501 Label
* label
) PER_SHARED_ARCH
;
1503 inline void branch32(Condition cond
, const AbsoluteAddress
& lhs
, Register rhs
,
1505 DEFINED_ON(arm
, arm64
, mips_shared
, x86
, x64
, loong64
, riscv64
, wasm32
);
1506 inline void branch32(Condition cond
, const AbsoluteAddress
& lhs
, Imm32 rhs
,
1508 DEFINED_ON(arm
, arm64
, mips_shared
, x86
, x64
, loong64
, riscv64
, wasm32
);
1510 inline void branch32(Condition cond
, const BaseIndex
& lhs
, Register rhs
,
1511 Label
* label
) DEFINED_ON(arm
, x86_shared
);
1512 inline void branch32(Condition cond
, const BaseIndex
& lhs
, Imm32 rhs
,
1513 Label
* label
) PER_SHARED_ARCH
;
1515 inline void branch32(Condition cond
, const Operand
& lhs
, Register rhs
,
1516 Label
* label
) DEFINED_ON(x86_shared
);
1517 inline void branch32(Condition cond
, const Operand
& lhs
, Imm32 rhs
,
1518 Label
* label
) DEFINED_ON(x86_shared
);
1520 inline void branch32(Condition cond
, wasm::SymbolicAddress lhs
, Imm32 rhs
,
1522 DEFINED_ON(arm
, arm64
, mips_shared
, x86
, x64
, loong64
, riscv64
, wasm32
);
1524 // The supported condition are Equal, NotEqual, LessThan(orEqual),
1525 // GreaterThan(orEqual), Below(orEqual) and Above(orEqual). When a fail label
1526 // is not defined it will fall through to next instruction, else jump to the
1528 inline void branch64(Condition cond
, Register64 lhs
, Imm64 val
,
1529 Label
* success
, Label
* fail
= nullptr) PER_ARCH
;
1530 inline void branch64(Condition cond
, Register64 lhs
, Register64 rhs
,
1531 Label
* success
, Label
* fail
= nullptr) PER_ARCH
;
1532 // Only the NotEqual and Equal conditions are allowed for the branch64
1533 // variants with Address as lhs.
1534 inline void branch64(Condition cond
, const Address
& lhs
, Imm64 val
,
1535 Label
* label
) PER_ARCH
;
1536 inline void branch64(Condition cond
, const Address
& lhs
, Register64 rhs
,
1537 Label
* label
) PER_ARCH
;
1539 // Compare the value at |lhs| with the value at |rhs|. The scratch
1540 // register *must not* be the base of |lhs| or |rhs|.
1541 inline void branch64(Condition cond
, const Address
& lhs
, const Address
& rhs
,
1542 Register scratch
, Label
* label
) PER_ARCH
;
1545 inline void branchPtr(Condition cond
, Register lhs
, Register rhs
,
1546 L label
) PER_SHARED_ARCH
;
1547 inline void branchPtr(Condition cond
, Register lhs
, Imm32 rhs
,
1548 Label
* label
) PER_SHARED_ARCH
;
1549 inline void branchPtr(Condition cond
, Register lhs
, ImmPtr rhs
,
1550 Label
* label
) PER_SHARED_ARCH
;
1551 inline void branchPtr(Condition cond
, Register lhs
, ImmGCPtr rhs
,
1552 Label
* label
) PER_SHARED_ARCH
;
1553 inline void branchPtr(Condition cond
, Register lhs
, ImmWord rhs
,
1554 Label
* label
) PER_SHARED_ARCH
;
1557 inline void branchPtr(Condition cond
, const Address
& lhs
, Register rhs
,
1558 L label
) PER_SHARED_ARCH
;
1559 inline void branchPtr(Condition cond
, const Address
& lhs
, ImmPtr rhs
,
1560 Label
* label
) PER_SHARED_ARCH
;
1561 inline void branchPtr(Condition cond
, const Address
& lhs
, ImmGCPtr rhs
,
1562 Label
* label
) PER_SHARED_ARCH
;
1563 inline void branchPtr(Condition cond
, const Address
& lhs
, ImmWord rhs
,
1564 Label
* label
) PER_SHARED_ARCH
;
1566 inline void branchPtr(Condition cond
, const BaseIndex
& lhs
, ImmWord rhs
,
1567 Label
* label
) PER_SHARED_ARCH
;
1568 inline void branchPtr(Condition cond
, const BaseIndex
& lhs
, Register rhs
,
1569 Label
* label
) PER_SHARED_ARCH
;
1571 inline void branchPtr(Condition cond
, const AbsoluteAddress
& lhs
,
1572 Register rhs
, Label
* label
)
1573 DEFINED_ON(arm
, arm64
, mips_shared
, x86
, x64
, loong64
, riscv64
, wasm32
);
1574 inline void branchPtr(Condition cond
, const AbsoluteAddress
& lhs
, ImmWord rhs
,
1576 DEFINED_ON(arm
, arm64
, mips_shared
, x86
, x64
, loong64
, riscv64
, wasm32
);
1578 inline void branchPtr(Condition cond
, wasm::SymbolicAddress lhs
, Register rhs
,
1580 DEFINED_ON(arm
, arm64
, mips_shared
, x86
, x64
, loong64
, riscv64
, wasm32
);
1582 // Given a pointer to a GC Cell, retrieve the StoreBuffer pointer from its
1583 // chunk header, or nullptr if it is in the tenured heap.
1584 void loadStoreBuffer(Register ptr
, Register buffer
) PER_ARCH
;
1586 void branchPtrInNurseryChunk(Condition cond
, Register ptr
, Register temp
,
1588 DEFINED_ON(arm
, arm64
, mips_shared
, x86
, x64
, loong64
, riscv64
, wasm32
);
1589 void branchPtrInNurseryChunk(Condition cond
, const Address
& address
,
1590 Register temp
, Label
* label
) DEFINED_ON(x86
);
1591 void branchValueIsNurseryCell(Condition cond
, const Address
& address
,
1592 Register temp
, Label
* label
) PER_ARCH
;
1593 void branchValueIsNurseryCell(Condition cond
, ValueOperand value
,
1594 Register temp
, Label
* label
) PER_ARCH
;
1596 // This function compares a Value (lhs) which is having a private pointer
1597 // boxed inside a js::Value, with a raw pointer (rhs).
1598 inline void branchPrivatePtr(Condition cond
, const Address
& lhs
, Register rhs
,
1599 Label
* label
) PER_ARCH
;
1601 inline void branchFloat(DoubleCondition cond
, FloatRegister lhs
,
1602 FloatRegister rhs
, Label
* label
) PER_SHARED_ARCH
;
1604 // Truncate a double/float32 to int32 and when it doesn't fit an int32 it will
1605 // jump to the failure label. This particular variant is allowed to return the
1606 // value module 2**32, which isn't implemented on all architectures. E.g. the
1607 // x64 variants will do this only in the int64_t range.
1608 inline void branchTruncateFloat32MaybeModUint32(FloatRegister src
,
1609 Register dest
, Label
* fail
)
1610 DEFINED_ON(arm
, arm64
, mips_shared
, x86
, x64
, loong64
, riscv64
, wasm32
);
1611 inline void branchTruncateDoubleMaybeModUint32(FloatRegister src
,
1612 Register dest
, Label
* fail
)
1613 DEFINED_ON(arm
, arm64
, mips_shared
, x86
, x64
, loong64
, riscv64
, wasm32
);
1615 // Truncate a double/float32 to intptr and when it doesn't fit jump to the
1617 inline void branchTruncateFloat32ToPtr(FloatRegister src
, Register dest
,
1618 Label
* fail
) DEFINED_ON(x86
, x64
);
1619 inline void branchTruncateDoubleToPtr(FloatRegister src
, Register dest
,
1620 Label
* fail
) DEFINED_ON(x86
, x64
);
1622 // Truncate a double/float32 to int32 and when it doesn't fit jump to the
1624 inline void branchTruncateFloat32ToInt32(FloatRegister src
, Register dest
,
1626 DEFINED_ON(arm
, arm64
, mips_shared
, x86
, x64
, loong64
, riscv64
, wasm32
);
1627 inline void branchTruncateDoubleToInt32(FloatRegister src
, Register dest
,
1628 Label
* fail
) PER_ARCH
;
1630 inline void branchDouble(DoubleCondition cond
, FloatRegister lhs
,
1631 FloatRegister rhs
, Label
* label
) PER_SHARED_ARCH
;
1633 inline void branchDoubleNotInInt64Range(Address src
, Register temp
,
1635 inline void branchDoubleNotInUInt64Range(Address src
, Register temp
,
1637 inline void branchFloat32NotInInt64Range(Address src
, Register temp
,
1639 inline void branchFloat32NotInUInt64Range(Address src
, Register temp
,
1642 template <typename T
>
1643 inline void branchAdd32(Condition cond
, T src
, Register dest
,
1644 Label
* label
) PER_SHARED_ARCH
;
1645 template <typename T
>
1646 inline void branchSub32(Condition cond
, T src
, Register dest
,
1647 Label
* label
) PER_SHARED_ARCH
;
1648 template <typename T
>
1649 inline void branchMul32(Condition cond
, T src
, Register dest
,
1650 Label
* label
) PER_SHARED_ARCH
;
1651 template <typename T
>
1652 inline void branchRshift32(Condition cond
, T src
, Register dest
,
1653 Label
* label
) PER_SHARED_ARCH
;
1655 inline void branchNeg32(Condition cond
, Register reg
,
1656 Label
* label
) PER_SHARED_ARCH
;
1658 inline void branchAdd64(Condition cond
, Imm64 imm
, Register64 dest
,
1659 Label
* label
) DEFINED_ON(x86
, arm
, wasm32
);
1661 template <typename T
>
1662 inline void branchAddPtr(Condition cond
, T src
, Register dest
,
1663 Label
* label
) PER_SHARED_ARCH
;
1665 template <typename T
>
1666 inline void branchSubPtr(Condition cond
, T src
, Register dest
,
1667 Label
* label
) PER_SHARED_ARCH
;
1669 inline void branchMulPtr(Condition cond
, Register src
, Register dest
,
1670 Label
* label
) PER_SHARED_ARCH
;
1672 inline void decBranchPtr(Condition cond
, Register lhs
, Imm32 rhs
,
1673 Label
* label
) PER_SHARED_ARCH
;
1676 inline void branchTest32(Condition cond
, Register lhs
, Register rhs
,
1677 L label
) PER_SHARED_ARCH
;
1679 inline void branchTest32(Condition cond
, Register lhs
, Imm32 rhs
,
1680 L label
) PER_SHARED_ARCH
;
1681 inline void branchTest32(Condition cond
, const Address
& lhs
, Imm32 rhh
,
1682 Label
* label
) PER_SHARED_ARCH
;
1683 inline void branchTest32(Condition cond
, const AbsoluteAddress
& lhs
,
1684 Imm32 rhs
, Label
* label
)
1685 DEFINED_ON(arm
, arm64
, mips_shared
, x86
, x64
, loong64
, riscv64
, wasm32
);
1688 inline void branchTestPtr(Condition cond
, Register lhs
, Register rhs
,
1689 L label
) PER_SHARED_ARCH
;
1690 inline void branchTestPtr(Condition cond
, Register lhs
, Imm32 rhs
,
1691 Label
* label
) PER_SHARED_ARCH
;
1692 inline void branchTestPtr(Condition cond
, const Address
& lhs
, Imm32 rhs
,
1693 Label
* label
) PER_SHARED_ARCH
;
1696 inline void branchTest64(Condition cond
, Register64 lhs
, Register64 rhs
,
1697 Register temp
, L label
) PER_ARCH
;
1699 // Branches to |label| if |reg| is false. |reg| should be a C++ bool.
1701 inline void branchIfFalseBool(Register reg
, L label
);
1703 // Branches to |label| if |reg| is true. |reg| should be a C++ bool.
1704 inline void branchIfTrueBool(Register reg
, Label
* label
);
1706 inline void branchIfRope(Register str
, Label
* label
);
1707 inline void branchIfNotRope(Register str
, Label
* label
);
1709 inline void branchLatin1String(Register string
, Label
* label
);
1710 inline void branchTwoByteString(Register string
, Label
* label
);
1712 inline void branchIfBigIntIsNegative(Register bigInt
, Label
* label
);
1713 inline void branchIfBigIntIsNonNegative(Register bigInt
, Label
* label
);
1714 inline void branchIfBigIntIsZero(Register bigInt
, Label
* label
);
1715 inline void branchIfBigIntIsNonZero(Register bigInt
, Label
* label
);
1717 inline void branchTestFunctionFlags(Register fun
, uint32_t flags
,
1718 Condition cond
, Label
* label
);
1720 inline void branchIfNotFunctionIsNonBuiltinCtor(Register fun
,
1724 inline void branchIfFunctionHasNoJitEntry(Register fun
, bool isConstructing
,
1726 inline void branchIfFunctionHasJitEntry(Register fun
, bool isConstructing
,
1729 inline void branchIfScriptHasJitScript(Register script
, Label
* label
);
1730 inline void branchIfScriptHasNoJitScript(Register script
, Label
* label
);
1731 inline void loadJitScript(Register script
, Register dest
);
1733 // Loads the function's argument count.
1734 inline void loadFunctionArgCount(Register func
, Register output
);
1736 // Loads the function length. This handles interpreted, native, and bound
1737 // functions. The caller is responsible for checking that INTERPRETED_LAZY and
1738 // RESOLVED_LENGTH flags are not set.
1739 void loadFunctionLength(Register func
, Register funFlagsAndArgCount
,
1740 Register output
, Label
* slowPath
);
1742 // Loads the function name. This handles interpreted, native, and bound
1744 void loadFunctionName(Register func
, Register output
, ImmGCPtr emptyString
,
1747 void assertFunctionIsExtended(Register func
);
1749 inline void branchFunctionKind(Condition cond
,
1750 FunctionFlags::FunctionKind kind
, Register fun
,
1751 Register scratch
, Label
* label
);
1753 inline void branchIfObjectEmulatesUndefined(Register objReg
, Register scratch
,
1754 Label
* slowCheck
, Label
* label
);
1756 // For all methods below: spectreRegToZero is a register that will be zeroed
1757 // on speculatively executed code paths (when the branch should be taken but
1758 // branch prediction speculates it isn't). Usually this will be the object
1759 // register but the caller may pass a different register.
1761 inline void branchTestObjClass(Condition cond
, Register obj
,
1762 const JSClass
* clasp
, Register scratch
,
1763 Register spectreRegToZero
, Label
* label
);
1764 inline void branchTestObjClassNoSpectreMitigations(Condition cond
,
1766 const JSClass
* clasp
,
1770 inline void branchTestObjClass(Condition cond
, Register obj
,
1771 const Address
& clasp
, Register scratch
,
1772 Register spectreRegToZero
, Label
* label
);
1773 inline void branchTestObjClassNoSpectreMitigations(Condition cond
,
1775 const Address
& clasp
,
1779 inline void branchTestObjClass(Condition cond
, Register obj
, Register clasp
,
1780 Register scratch
, Register spectreRegToZero
,
1784 inline void branchTestClass(Condition cond
, Register clasp
,
1785 std::pair
<const JSClass
*, const JSClass
*> classes
,
1789 inline void branchTestObjClass(
1790 Condition cond
, Register obj
,
1791 std::pair
<const JSClass
*, const JSClass
*> classes
, Register scratch
,
1792 Register spectreRegToZero
, Label
* label
);
1793 inline void branchTestObjClassNoSpectreMitigations(
1794 Condition cond
, Register obj
,
1795 std::pair
<const JSClass
*, const JSClass
*> classes
, Register scratch
,
1798 inline void branchTestObjShape(Condition cond
, Register obj
,
1799 const Shape
* shape
, Register scratch
,
1800 Register spectreRegToZero
, Label
* label
);
1801 inline void branchTestObjShapeNoSpectreMitigations(Condition cond
,
1806 void branchTestObjShapeList(Condition cond
, Register obj
,
1807 Register shapeElements
, Register shapeScratch
,
1808 Register endScratch
, Register spectreScratch
,
1811 inline void branchTestClassIsFunction(Condition cond
, Register clasp
,
1813 inline void branchTestObjIsFunction(Condition cond
, Register obj
,
1815 Register spectreRegToZero
, Label
* label
);
1816 inline void branchTestObjIsFunctionNoSpectreMitigations(Condition cond
,
1821 inline void branchTestObjShape(Condition cond
, Register obj
, Register shape
,
1822 Register scratch
, Register spectreRegToZero
,
1824 inline void branchTestObjShapeNoSpectreMitigations(Condition cond
,
1829 // TODO: audit/fix callers to be Spectre safe.
1830 inline void branchTestObjShapeUnsafe(Condition cond
, Register obj
,
1831 Register shape
, Label
* label
);
1833 void branchTestObjCompartment(Condition cond
, Register obj
,
1834 const Address
& compartment
, Register scratch
,
1836 void branchTestObjCompartment(Condition cond
, Register obj
,
1837 const JS::Compartment
* compartment
,
1838 Register scratch
, Label
* label
);
1840 void branchIfNonNativeObj(Register obj
, Register scratch
, Label
* label
);
1842 void branchIfObjectNotExtensible(Register obj
, Register scratch
,
1845 void branchTestObjectNeedsProxyResultValidation(Condition condition
,
1850 inline void branchTestClassIsProxy(bool proxy
, Register clasp
, Label
* label
);
1852 inline void branchTestObjectIsProxy(bool proxy
, Register object
,
1853 Register scratch
, Label
* label
);
1855 inline void branchTestProxyHandlerFamily(Condition cond
, Register proxy
,
1857 const void* handlerp
, Label
* label
);
1859 inline void branchTestNeedsIncrementalBarrier(Condition cond
, Label
* label
);
1860 inline void branchTestNeedsIncrementalBarrierAnyZone(Condition cond
,
1864 // Perform a type-test on a tag of a Value (32bits boxing), or the tagged
1865 // value (64bits boxing).
1866 inline void branchTestUndefined(Condition cond
, Register tag
,
1867 Label
* label
) PER_SHARED_ARCH
;
1868 inline void branchTestInt32(Condition cond
, Register tag
,
1869 Label
* label
) PER_SHARED_ARCH
;
1870 inline void branchTestDouble(Condition cond
, Register tag
, Label
* label
)
1871 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
1873 inline void branchTestNumber(Condition cond
, Register tag
,
1874 Label
* label
) PER_SHARED_ARCH
;
1875 inline void branchTestBoolean(Condition cond
, Register tag
,
1876 Label
* label
) PER_SHARED_ARCH
;
1877 inline void branchTestString(Condition cond
, Register tag
,
1878 Label
* label
) PER_SHARED_ARCH
;
1879 inline void branchTestSymbol(Condition cond
, Register tag
,
1880 Label
* label
) PER_SHARED_ARCH
;
1881 inline void branchTestBigInt(Condition cond
, Register tag
,
1882 Label
* label
) PER_SHARED_ARCH
;
1883 inline void branchTestNull(Condition cond
, Register tag
,
1884 Label
* label
) PER_SHARED_ARCH
;
1885 inline void branchTestObject(Condition cond
, Register tag
,
1886 Label
* label
) PER_SHARED_ARCH
;
1887 inline void branchTestPrimitive(Condition cond
, Register tag
,
1888 Label
* label
) PER_SHARED_ARCH
;
1889 inline void branchTestMagic(Condition cond
, Register tag
,
1890 Label
* label
) PER_SHARED_ARCH
;
1891 void branchTestType(Condition cond
, Register tag
, JSValueType type
,
1894 // Perform a type-test on a Value, addressed by Address or BaseIndex, or
1895 // loaded into ValueOperand.
1896 // BaseIndex and ValueOperand variants clobber the ScratchReg on x64.
1897 // All Variants clobber the ScratchReg on arm64.
1898 inline void branchTestUndefined(Condition cond
, const Address
& address
,
1899 Label
* label
) PER_SHARED_ARCH
;
1900 inline void branchTestUndefined(Condition cond
, const BaseIndex
& address
,
1901 Label
* label
) PER_SHARED_ARCH
;
1902 inline void branchTestUndefined(Condition cond
, const ValueOperand
& value
,
1904 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
1907 inline void branchTestInt32(Condition cond
, const Address
& address
,
1908 Label
* label
) PER_SHARED_ARCH
;
1909 inline void branchTestInt32(Condition cond
, const BaseIndex
& address
,
1910 Label
* label
) PER_SHARED_ARCH
;
1911 inline void branchTestInt32(Condition cond
, const ValueOperand
& value
,
1913 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
1916 inline void branchTestDouble(Condition cond
, const Address
& address
,
1917 Label
* label
) PER_SHARED_ARCH
;
1918 inline void branchTestDouble(Condition cond
, const BaseIndex
& address
,
1919 Label
* label
) PER_SHARED_ARCH
;
1920 inline void branchTestDouble(Condition cond
, const ValueOperand
& value
,
1922 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
1925 inline void branchTestNumber(Condition cond
, const ValueOperand
& value
,
1927 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
1930 inline void branchTestBoolean(Condition cond
, const Address
& address
,
1931 Label
* label
) PER_SHARED_ARCH
;
1932 inline void branchTestBoolean(Condition cond
, const BaseIndex
& address
,
1933 Label
* label
) PER_SHARED_ARCH
;
1934 inline void branchTestBoolean(Condition cond
, const ValueOperand
& value
,
1936 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
1939 inline void branchTestString(Condition cond
, const Address
& address
,
1940 Label
* label
) PER_SHARED_ARCH
;
1941 inline void branchTestString(Condition cond
, const BaseIndex
& address
,
1942 Label
* label
) PER_SHARED_ARCH
;
1943 inline void branchTestString(Condition cond
, const ValueOperand
& value
,
1945 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
1948 inline void branchTestSymbol(Condition cond
, const Address
& address
,
1949 Label
* label
) PER_SHARED_ARCH
;
1950 inline void branchTestSymbol(Condition cond
, const BaseIndex
& address
,
1951 Label
* label
) PER_SHARED_ARCH
;
1952 inline void branchTestSymbol(Condition cond
, const ValueOperand
& value
,
1954 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
1957 inline void branchTestBigInt(Condition cond
, const Address
& address
,
1958 Label
* label
) PER_SHARED_ARCH
;
1959 inline void branchTestBigInt(Condition cond
, const BaseIndex
& address
,
1960 Label
* label
) PER_SHARED_ARCH
;
1961 inline void branchTestBigInt(Condition cond
, const ValueOperand
& value
,
1963 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
1966 inline void branchTestNull(Condition cond
, const Address
& address
,
1967 Label
* label
) PER_SHARED_ARCH
;
1968 inline void branchTestNull(Condition cond
, const BaseIndex
& address
,
1969 Label
* label
) PER_SHARED_ARCH
;
1970 inline void branchTestNull(Condition cond
, const ValueOperand
& value
,
1972 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
1975 // Clobbers the ScratchReg on x64.
1976 inline void branchTestObject(Condition cond
, const Address
& address
,
1977 Label
* label
) PER_SHARED_ARCH
;
1978 inline void branchTestObject(Condition cond
, const BaseIndex
& address
,
1979 Label
* label
) PER_SHARED_ARCH
;
1980 inline void branchTestObject(Condition cond
, const ValueOperand
& value
,
1982 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
1985 inline void branchTestGCThing(Condition cond
, const Address
& address
,
1986 Label
* label
) PER_SHARED_ARCH
;
1987 inline void branchTestGCThing(Condition cond
, const BaseIndex
& address
,
1988 Label
* label
) PER_SHARED_ARCH
;
1989 inline void branchTestGCThing(Condition cond
, const ValueOperand
& value
,
1990 Label
* label
) PER_SHARED_ARCH
;
1992 inline void branchTestPrimitive(Condition cond
, const ValueOperand
& value
,
1994 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
1997 inline void branchTestMagic(Condition cond
, const Address
& address
,
1998 Label
* label
) PER_SHARED_ARCH
;
1999 inline void branchTestMagic(Condition cond
, const BaseIndex
& address
,
2000 Label
* label
) PER_SHARED_ARCH
;
2002 inline void branchTestMagic(Condition cond
, const ValueOperand
& value
,
2004 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
2007 inline void branchTestMagic(Condition cond
, const Address
& valaddr
,
2008 JSWhyMagic why
, Label
* label
) PER_ARCH
;
2010 inline void branchTestMagicValue(Condition cond
, const ValueOperand
& val
,
2011 JSWhyMagic why
, Label
* label
);
2013 void branchTestValue(Condition cond
, const ValueOperand
& lhs
,
2014 const Value
& rhs
, Label
* label
) PER_ARCH
;
2016 inline void branchTestValue(Condition cond
, const BaseIndex
& lhs
,
2017 const ValueOperand
& rhs
, Label
* label
) PER_ARCH
;
2019 // Checks if given Value is evaluated to true or false in a condition.
2020 // The type of the value should match the type of the method.
2021 inline void branchTestInt32Truthy(bool truthy
, const ValueOperand
& value
,
2023 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, x86_shared
,
2025 inline void branchTestDoubleTruthy(bool truthy
, FloatRegister reg
,
2026 Label
* label
) PER_SHARED_ARCH
;
2027 inline void branchTestBooleanTruthy(bool truthy
, const ValueOperand
& value
,
2028 Label
* label
) PER_ARCH
;
2029 inline void branchTestStringTruthy(bool truthy
, const ValueOperand
& value
,
2031 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
2033 inline void branchTestBigIntTruthy(bool truthy
, const ValueOperand
& value
,
2035 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, wasm32
,
2038 // Create an unconditional branch to the address given as argument.
2039 inline void branchToComputedAddress(const BaseIndex
& address
) PER_ARCH
;
2042 template <typename T
, typename S
, typename L
>
2043 inline void branchPtrImpl(Condition cond
, const T
& lhs
, const S
& rhs
, L label
)
2044 DEFINED_ON(x86_shared
);
2046 void branchPtrInNurseryChunkImpl(Condition cond
, Register ptr
, Label
* label
)
2048 template <typename T
>
2049 void branchValueIsNurseryCellImpl(Condition cond
, const T
& value
,
2050 Register temp
, Label
* label
)
2051 DEFINED_ON(arm64
, x64
, mips64
, loong64
, riscv64
);
2053 template <typename T
>
2054 inline void branchTestUndefinedImpl(Condition cond
, const T
& t
, Label
* label
)
2055 DEFINED_ON(arm
, arm64
, x86_shared
);
2056 template <typename T
>
2057 inline void branchTestInt32Impl(Condition cond
, const T
& t
, Label
* label
)
2058 DEFINED_ON(arm
, arm64
, x86_shared
);
2059 template <typename T
>
2060 inline void branchTestDoubleImpl(Condition cond
, const T
& t
, Label
* label
)
2061 DEFINED_ON(arm
, arm64
, x86_shared
);
2062 template <typename T
>
2063 inline void branchTestNumberImpl(Condition cond
, const T
& t
, Label
* label
)
2064 DEFINED_ON(arm
, arm64
, x86_shared
);
2065 template <typename T
>
2066 inline void branchTestBooleanImpl(Condition cond
, const T
& t
, Label
* label
)
2067 DEFINED_ON(arm
, arm64
, x86_shared
);
2068 template <typename T
>
2069 inline void branchTestStringImpl(Condition cond
, const T
& t
, Label
* label
)
2070 DEFINED_ON(arm
, arm64
, x86_shared
);
2071 template <typename T
>
2072 inline void branchTestSymbolImpl(Condition cond
, const T
& t
, Label
* label
)
2073 DEFINED_ON(arm
, arm64
, x86_shared
);
2074 template <typename T
>
2075 inline void branchTestBigIntImpl(Condition cond
, const T
& t
, Label
* label
)
2076 DEFINED_ON(arm
, arm64
, x86_shared
);
2077 template <typename T
>
2078 inline void branchTestNullImpl(Condition cond
, const T
& t
, Label
* label
)
2079 DEFINED_ON(arm
, arm64
, x86_shared
);
2080 template <typename T
>
2081 inline void branchTestObjectImpl(Condition cond
, const T
& t
, Label
* label
)
2082 DEFINED_ON(arm
, arm64
, x86_shared
);
2083 template <typename T
>
2084 inline void branchTestGCThingImpl(Condition cond
, const T
& t
,
2085 Label
* label
) PER_SHARED_ARCH
;
2086 template <typename T
>
2087 inline void branchTestPrimitiveImpl(Condition cond
, const T
& t
, Label
* label
)
2088 DEFINED_ON(arm
, arm64
, x86_shared
);
2089 template <typename T
, class L
>
2090 inline void branchTestMagicImpl(Condition cond
, const T
& t
, L label
)
2091 DEFINED_ON(arm
, arm64
, x86_shared
);
2094 template <typename T
>
2095 inline void testNumberSet(Condition cond
, const T
& src
,
2096 Register dest
) PER_SHARED_ARCH
;
2097 template <typename T
>
2098 inline void testBooleanSet(Condition cond
, const T
& src
,
2099 Register dest
) PER_SHARED_ARCH
;
2100 template <typename T
>
2101 inline void testStringSet(Condition cond
, const T
& src
,
2102 Register dest
) PER_SHARED_ARCH
;
2103 template <typename T
>
2104 inline void testSymbolSet(Condition cond
, const T
& src
,
2105 Register dest
) PER_SHARED_ARCH
;
2106 template <typename T
>
2107 inline void testBigIntSet(Condition cond
, const T
& src
,
2108 Register dest
) PER_SHARED_ARCH
;
2111 // The fallibleUnbox* methods below combine a Value type check with an unbox.
2112 // Especially on 64-bit platforms this can be implemented more efficiently
2113 // than a separate branch + unbox.
2115 // |src| and |dest| can be the same register, but |dest| may hold garbage on
2117 inline void fallibleUnboxPtr(const ValueOperand
& src
, Register dest
,
2118 JSValueType type
, Label
* fail
) PER_ARCH
;
2119 inline void fallibleUnboxPtr(const Address
& src
, Register dest
,
2120 JSValueType type
, Label
* fail
) PER_ARCH
;
2121 inline void fallibleUnboxPtr(const BaseIndex
& src
, Register dest
,
2122 JSValueType type
, Label
* fail
) PER_ARCH
;
2123 template <typename T
>
2124 inline void fallibleUnboxInt32(const T
& src
, Register dest
, Label
* fail
);
2125 template <typename T
>
2126 inline void fallibleUnboxBoolean(const T
& src
, Register dest
, Label
* fail
);
2127 template <typename T
>
2128 inline void fallibleUnboxObject(const T
& src
, Register dest
, Label
* fail
);
2129 template <typename T
>
2130 inline void fallibleUnboxString(const T
& src
, Register dest
, Label
* fail
);
2131 template <typename T
>
2132 inline void fallibleUnboxSymbol(const T
& src
, Register dest
, Label
* fail
);
2133 template <typename T
>
2134 inline void fallibleUnboxBigInt(const T
& src
, Register dest
, Label
* fail
);
2136 inline void cmp32Move32(Condition cond
, Register lhs
, Imm32 rhs
, Register src
,
2138 DEFINED_ON(arm
, arm64
, loong64
, riscv64
, wasm32
, mips_shared
, x86_shared
);
2140 inline void cmp32Move32(Condition cond
, Register lhs
, Register rhs
,
2141 Register src
, Register dest
)
2142 DEFINED_ON(arm
, arm64
, loong64
, riscv64
, wasm32
, mips_shared
, x86_shared
);
2144 inline void cmp32Move32(Condition cond
, Register lhs
, const Address
& rhs
,
2145 Register src
, Register dest
)
2146 DEFINED_ON(arm
, arm64
, loong64
, riscv64
, wasm32
, mips_shared
, x86_shared
);
2148 inline void cmpPtrMovePtr(Condition cond
, Register lhs
, Register rhs
,
2149 Register src
, Register dest
) PER_ARCH
;
2151 inline void cmpPtrMovePtr(Condition cond
, Register lhs
, const Address
& rhs
,
2152 Register src
, Register dest
) PER_ARCH
;
2154 inline void cmp32Load32(Condition cond
, Register lhs
, const Address
& rhs
,
2155 const Address
& src
, Register dest
)
2156 DEFINED_ON(arm
, arm64
, loong64
, riscv64
, mips_shared
, x86_shared
);
2158 inline void cmp32Load32(Condition cond
, Register lhs
, Register rhs
,
2159 const Address
& src
, Register dest
)
2160 DEFINED_ON(arm
, arm64
, loong64
, riscv64
, mips_shared
, x86_shared
);
2162 inline void cmp32Load32(Condition cond
, Register lhs
, Imm32 rhs
,
2163 const Address
& src
, Register dest
)
2164 DEFINED_ON(arm
, arm64
, loong64
, riscv64
, wasm32
, mips_shared
, x86_shared
);
2166 inline void cmp32LoadPtr(Condition cond
, const Address
& lhs
, Imm32 rhs
,
2167 const Address
& src
, Register dest
)
2168 DEFINED_ON(arm
, arm64
, loong64
, riscv64
, wasm32
, mips_shared
, x86
, x64
);
2170 inline void cmp32MovePtr(Condition cond
, Register lhs
, Imm32 rhs
,
2171 Register src
, Register dest
)
2172 DEFINED_ON(arm
, arm64
, loong64
, riscv64
, wasm32
, mips_shared
, x86
, x64
);
2174 inline void test32LoadPtr(Condition cond
, const Address
& addr
, Imm32 mask
,
2175 const Address
& src
, Register dest
)
2176 DEFINED_ON(arm
, arm64
, loong64
, riscv64
, wasm32
, mips_shared
, x86
, x64
);
2178 inline void test32MovePtr(Condition cond
, const Address
& addr
, Imm32 mask
,
2179 Register src
, Register dest
)
2180 DEFINED_ON(arm
, arm64
, loong64
, riscv64
, wasm32
, mips_shared
, x86
, x64
);
2182 // Conditional move for Spectre mitigations.
2183 inline void spectreMovePtr(Condition cond
, Register src
, Register dest
)
2184 DEFINED_ON(arm
, arm64
, mips_shared
, x86
, x64
, loong64
, riscv64
, wasm32
);
2186 // Zeroes dest if the condition is true.
2187 inline void spectreZeroRegister(Condition cond
, Register scratch
,
2189 DEFINED_ON(arm
, arm64
, mips_shared
, x86_shared
, loong64
, riscv64
, wasm32
);
2191 // Performs a bounds check and zeroes the index register if out-of-bounds
2192 // (to mitigate Spectre).
2194 inline void spectreBoundsCheck32(Register index
, const Operand
& length
,
2195 Register maybeScratch
, Label
* failure
)
2199 inline void spectreBoundsCheck32(Register index
, Register length
,
2200 Register maybeScratch
, Label
* failure
)
2201 DEFINED_ON(arm
, arm64
, mips_shared
, x86
, x64
, loong64
, riscv64
, wasm32
);
2202 inline void spectreBoundsCheck32(Register index
, const Address
& length
,
2203 Register maybeScratch
, Label
* failure
)
2204 DEFINED_ON(arm
, arm64
, mips_shared
, x86
, x64
, loong64
, riscv64
, wasm32
);
2206 inline void spectreBoundsCheckPtr(Register index
, Register length
,
2207 Register maybeScratch
, Label
* failure
)
2208 DEFINED_ON(arm
, arm64
, mips_shared
, x86
, x64
, loong64
, riscv64
, wasm32
);
2209 inline void spectreBoundsCheckPtr(Register index
, const Address
& length
,
2210 Register maybeScratch
, Label
* failure
)
2211 DEFINED_ON(arm
, arm64
, mips_shared
, x86
, x64
, loong64
, riscv64
, wasm32
);
2213 // ========================================================================
2214 // Canonicalization primitives.
2215 inline void canonicalizeDouble(FloatRegister reg
);
2216 inline void canonicalizeDoubleIfDeterministic(FloatRegister reg
);
2218 inline void canonicalizeFloat(FloatRegister reg
);
2219 inline void canonicalizeFloatIfDeterministic(FloatRegister reg
);
2222 // ========================================================================
2223 // Memory access primitives.
2224 inline FaultingCodeOffset
storeUncanonicalizedDouble(FloatRegister src
,
2225 const Address
& dest
)
2226 DEFINED_ON(x86_shared
, arm
, arm64
, mips32
, mips64
, loong64
, riscv64
,
2228 inline FaultingCodeOffset
storeUncanonicalizedDouble(FloatRegister src
,
2229 const BaseIndex
& dest
)
2230 DEFINED_ON(x86_shared
, arm
, arm64
, mips32
, mips64
, loong64
, riscv64
,
2232 inline FaultingCodeOffset
storeUncanonicalizedDouble(FloatRegister src
,
2233 const Operand
& dest
)
2234 DEFINED_ON(x86_shared
);
2237 inline FaultingCodeOffset
storeDouble(FloatRegister src
, const T
& dest
);
2240 inline void boxDouble(FloatRegister src
, const T
& dest
);
2242 using MacroAssemblerSpecific::boxDouble
;
2244 inline FaultingCodeOffset
storeUncanonicalizedFloat32(FloatRegister src
,
2245 const Address
& dest
)
2246 DEFINED_ON(x86_shared
, arm
, arm64
, mips32
, mips64
, loong64
, riscv64
,
2248 inline FaultingCodeOffset
storeUncanonicalizedFloat32(FloatRegister src
,
2249 const BaseIndex
& dest
)
2250 DEFINED_ON(x86_shared
, arm
, arm64
, mips32
, mips64
, loong64
, riscv64
,
2252 inline FaultingCodeOffset
storeUncanonicalizedFloat32(FloatRegister src
,
2253 const Operand
& dest
)
2254 DEFINED_ON(x86_shared
);
2257 inline FaultingCodeOffset
storeFloat32(FloatRegister src
, const T
& dest
);
2259 template <typename T
>
2260 void storeUnboxedValue(const ConstantOrRegister
& value
, MIRType valueType
,
2261 const T
& dest
) PER_ARCH
;
2263 inline void memoryBarrier(MemoryBarrierBits barrier
) PER_SHARED_ARCH
;
2266 // ========================================================================
2269 // Naming is "operationSimd128" when operate on the whole vector, otherwise
2270 // it's "operation<Type><Size>x<Lanes>".
2272 // For microarchitectural reasons we can in principle get a performance win by
2273 // using int or float specific instructions in the operationSimd128 case when
2274 // we know that subsequent operations on the result are int or float oriented.
2275 // In practice, we don't care about that yet.
2277 // The order of operations here follows those in the SIMD overview document,
2278 // https://github.com/WebAssembly/simd/blob/master/proposals/simd/SIMD.md.
2280 // Since we must target Intel SSE indefinitely and SSE is one-address or
2281 // two-address, the x86 porting interfaces are nearly all one-address or
2282 // two-address. Likewise there are two-address ARM64 interfaces to support
2283 // the baseline compiler. But there are also three-address ARM64 interfaces
2284 // as the ARM64 Ion back-end can use those. In the future, they may support
2285 // AVX2 or similar for x86.
2287 // Conventions for argument order and naming and semantics:
2288 // - Condition codes come first.
2289 // - Other immediates (masks, shift counts) come next.
2290 // - Operands come next:
2291 // - For a binary two-address operator where the left-hand-side has the
2292 // same type as the result, one register parameter is normally named
2293 // `lhsDest` and is both the left-hand side and destination; the other
2294 // parameter is named `rhs` and is the right-hand side. `rhs` comes
2295 // first, `lhsDest` second. `rhs` and `lhsDest` may be the same register
2296 // (if rhs is a register).
2297 // - For a binary three-address operator the order is `lhs`, `rhs`, `dest`,
2298 // and generally these registers may be the same.
2299 // - For a unary operator, the input is named `src` and the output is named
2300 // `dest`. `src` comes first, `dest` second. `src` and `dest` may be
2301 // the same register (if `src` is a register).
2302 // - Temp registers follow operands and are named `temp` if there's only one,
2303 // otherwise `temp1`, `temp2`, etc regardless of type. GPR temps precede
2304 // FPU temps. If there are several temps then they must be distinct
2305 // registers, and they must be distinct from the operand registers unless
2310 inline void moveSimd128(FloatRegister src
, FloatRegister dest
)
2311 DEFINED_ON(x86_shared
, arm64
);
2315 inline void loadConstantSimd128(const SimdConstant
& v
, FloatRegister dest
)
2316 DEFINED_ON(x86_shared
, arm64
);
2320 inline void splatX16(Register src
, FloatRegister dest
)
2321 DEFINED_ON(x86_shared
, arm64
);
2323 inline void splatX16(uint32_t srcLane
, FloatRegister src
, FloatRegister dest
)
2326 inline void splatX8(Register src
, FloatRegister dest
)
2327 DEFINED_ON(x86_shared
, arm64
);
2329 inline void splatX8(uint32_t srcLane
, FloatRegister src
, FloatRegister dest
)
2332 inline void splatX4(Register src
, FloatRegister dest
)
2333 DEFINED_ON(x86_shared
, arm64
);
2335 inline void splatX4(FloatRegister src
, FloatRegister dest
)
2336 DEFINED_ON(x86_shared
, arm64
);
2338 inline void splatX2(Register64 src
, FloatRegister dest
)
2339 DEFINED_ON(x86
, x64
, arm64
);
2341 inline void splatX2(FloatRegister src
, FloatRegister dest
)
2342 DEFINED_ON(x86_shared
, arm64
);
2344 // Extract lane as scalar. Float extraction does not canonicalize the value.
2346 inline void extractLaneInt8x16(uint32_t lane
, FloatRegister src
,
2347 Register dest
) DEFINED_ON(x86_shared
, arm64
);
2349 inline void unsignedExtractLaneInt8x16(uint32_t lane
, FloatRegister src
,
2351 DEFINED_ON(x86_shared
, arm64
);
2353 inline void extractLaneInt16x8(uint32_t lane
, FloatRegister src
,
2354 Register dest
) DEFINED_ON(x86_shared
, arm64
);
2356 inline void unsignedExtractLaneInt16x8(uint32_t lane
, FloatRegister src
,
2358 DEFINED_ON(x86_shared
, arm64
);
2360 inline void extractLaneInt32x4(uint32_t lane
, FloatRegister src
,
2361 Register dest
) DEFINED_ON(x86_shared
, arm64
);
2363 inline void extractLaneInt64x2(uint32_t lane
, FloatRegister src
,
2364 Register64 dest
) DEFINED_ON(x86
, x64
, arm64
);
2366 inline void extractLaneFloat32x4(uint32_t lane
, FloatRegister src
,
2368 DEFINED_ON(x86_shared
, arm64
);
2370 inline void extractLaneFloat64x2(uint32_t lane
, FloatRegister src
,
2372 DEFINED_ON(x86_shared
, arm64
);
2374 // Replace lane value
2376 inline void replaceLaneInt8x16(unsigned lane
, FloatRegister lhs
, Register rhs
,
2377 FloatRegister dest
) DEFINED_ON(x86_shared
);
2379 inline void replaceLaneInt8x16(unsigned lane
, Register rhs
,
2380 FloatRegister lhsDest
)
2381 DEFINED_ON(x86_shared
, arm64
);
2383 inline void replaceLaneInt16x8(unsigned lane
, FloatRegister lhs
, Register rhs
,
2384 FloatRegister dest
) DEFINED_ON(x86_shared
);
2386 inline void replaceLaneInt16x8(unsigned lane
, Register rhs
,
2387 FloatRegister lhsDest
)
2388 DEFINED_ON(x86_shared
, arm64
);
2390 inline void replaceLaneInt32x4(unsigned lane
, FloatRegister lhs
, Register rhs
,
2391 FloatRegister dest
) DEFINED_ON(x86_shared
);
2393 inline void replaceLaneInt32x4(unsigned lane
, Register rhs
,
2394 FloatRegister lhsDest
)
2395 DEFINED_ON(x86_shared
, arm64
);
2397 inline void replaceLaneInt64x2(unsigned lane
, FloatRegister lhs
,
2398 Register64 rhs
, FloatRegister dest
)
2399 DEFINED_ON(x86
, x64
);
2401 inline void replaceLaneInt64x2(unsigned lane
, Register64 rhs
,
2402 FloatRegister lhsDest
)
2403 DEFINED_ON(x86
, x64
, arm64
);
2405 inline void replaceLaneFloat32x4(unsigned lane
, FloatRegister lhs
,
2406 FloatRegister rhs
, FloatRegister dest
)
2407 DEFINED_ON(x86_shared
);
2409 inline void replaceLaneFloat32x4(unsigned lane
, FloatRegister rhs
,
2410 FloatRegister lhsDest
)
2411 DEFINED_ON(x86_shared
, arm64
);
2413 inline void replaceLaneFloat64x2(unsigned lane
, FloatRegister lhs
,
2414 FloatRegister rhs
, FloatRegister dest
)
2415 DEFINED_ON(x86_shared
);
2417 inline void replaceLaneFloat64x2(unsigned lane
, FloatRegister rhs
,
2418 FloatRegister lhsDest
)
2419 DEFINED_ON(x86_shared
, arm64
);
2421 // Shuffle - blend and permute with immediate indices, and its many
2422 // specializations. Lane values other than those mentioned are illegal.
2424 // lane values 0..31
2425 inline void shuffleInt8x16(const uint8_t lanes
[16], FloatRegister rhs
,
2426 FloatRegister lhsDest
)
2427 DEFINED_ON(x86_shared
, arm64
);
2429 inline void shuffleInt8x16(const uint8_t lanes
[16], FloatRegister lhs
,
2430 FloatRegister rhs
, FloatRegister dest
)
2431 DEFINED_ON(x86_shared
, arm64
);
2433 // Lane values must be 0 (select from lhs) or FF (select from rhs).
2434 // The behavior is undefined for lane values that are neither 0 nor FF.
2435 // on x86_shared: it is required that lhs == dest.
2436 inline void blendInt8x16(const uint8_t lanes
[16], FloatRegister lhs
,
2437 FloatRegister rhs
, FloatRegister dest
,
2438 FloatRegister temp
) DEFINED_ON(x86_shared
);
2440 // Lane values must be 0 (select from lhs) or FF (select from rhs).
2441 // The behavior is undefined for lane values that are neither 0 nor FF.
2442 inline void blendInt8x16(const uint8_t lanes
[16], FloatRegister lhs
,
2443 FloatRegister rhs
, FloatRegister dest
)
2446 // Lane values must be 0 (select from lhs) or FFFF (select from rhs).
2447 // The behavior is undefined for lane values that are neither 0 nor FFFF.
2448 // on x86_shared: it is required that lhs == dest.
2449 inline void blendInt16x8(const uint16_t lanes
[8], FloatRegister lhs
,
2450 FloatRegister rhs
, FloatRegister dest
)
2451 DEFINED_ON(x86_shared
, arm64
);
2453 // Mask lane values must be ~0 or 0. The former selects from lhs and the
2455 // The implementation works effectively for I8x16, I16x8, I32x4, and I64x2.
2456 inline void laneSelectSimd128(FloatRegister mask
, FloatRegister lhs
,
2457 FloatRegister rhs
, FloatRegister dest
)
2458 DEFINED_ON(x86_shared
, arm64
);
2460 inline void interleaveHighInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2462 DEFINED_ON(x86_shared
, arm64
);
2464 inline void interleaveHighInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2466 DEFINED_ON(x86_shared
, arm64
);
2468 inline void interleaveHighInt32x4(FloatRegister lhs
, FloatRegister rhs
,
2470 DEFINED_ON(x86_shared
, arm64
);
2472 inline void interleaveHighInt64x2(FloatRegister lhs
, FloatRegister rhs
,
2474 DEFINED_ON(x86_shared
, arm64
);
2476 inline void interleaveLowInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2478 DEFINED_ON(x86_shared
, arm64
);
2480 inline void interleaveLowInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2482 DEFINED_ON(x86_shared
, arm64
);
2484 inline void interleaveLowInt32x4(FloatRegister lhs
, FloatRegister rhs
,
2486 DEFINED_ON(x86_shared
, arm64
);
2488 inline void interleaveLowInt64x2(FloatRegister lhs
, FloatRegister rhs
,
2490 DEFINED_ON(x86_shared
, arm64
);
2492 // Permute - permute with immediate indices.
2494 // lane values 0..15
2495 inline void permuteInt8x16(const uint8_t lanes
[16], FloatRegister src
,
2496 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2499 inline void permuteInt16x8(const uint16_t lanes
[8], FloatRegister src
,
2500 FloatRegister dest
) DEFINED_ON(arm64
);
2502 // lane values 0..3 [sic].
2503 inline void permuteHighInt16x8(const uint16_t lanes
[4], FloatRegister src
,
2504 FloatRegister dest
) DEFINED_ON(x86_shared
);
2506 // lane values 0..3.
2507 inline void permuteLowInt16x8(const uint16_t lanes
[4], FloatRegister src
,
2508 FloatRegister dest
) DEFINED_ON(x86_shared
);
2511 inline void permuteInt32x4(const uint32_t lanes
[4], FloatRegister src
,
2512 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2514 // Funnel shift by immediate count:
2515 // low_16_bytes_of((lhs ++ rhs) >> shift*8), shift must be < 16
2516 inline void concatAndRightShiftSimd128(FloatRegister lhs
, FloatRegister rhs
,
2517 FloatRegister dest
, uint32_t shift
)
2518 DEFINED_ON(x86_shared
, arm64
);
2520 // Rotate right by immediate count:
2521 // low_16_bytes_of((src ++ src) >> shift*8), shift must be < 16
2522 inline void rotateRightSimd128(FloatRegister src
, FloatRegister dest
,
2523 uint32_t shift
) DEFINED_ON(arm64
);
2525 // Shift bytes with immediate count, shifting in zeroes. Shift count 0..15.
2527 inline void leftShiftSimd128(Imm32 count
, FloatRegister src
,
2529 DEFINED_ON(x86_shared
, arm64
);
2531 inline void rightShiftSimd128(Imm32 count
, FloatRegister src
,
2533 DEFINED_ON(x86_shared
, arm64
);
2535 // Zero extend int values.
2537 inline void zeroExtend8x16To16x8(FloatRegister src
, FloatRegister dest
)
2538 DEFINED_ON(x86_shared
, arm64
);
2539 inline void zeroExtend8x16To32x4(FloatRegister src
, FloatRegister dest
)
2540 DEFINED_ON(x86_shared
, arm64
);
2541 inline void zeroExtend8x16To64x2(FloatRegister src
, FloatRegister dest
)
2542 DEFINED_ON(x86_shared
, arm64
);
2543 inline void zeroExtend16x8To32x4(FloatRegister src
, FloatRegister dest
)
2544 DEFINED_ON(x86_shared
, arm64
);
2545 inline void zeroExtend16x8To64x2(FloatRegister src
, FloatRegister dest
)
2546 DEFINED_ON(x86_shared
, arm64
);
2547 inline void zeroExtend32x4To64x2(FloatRegister src
, FloatRegister dest
)
2548 DEFINED_ON(x86_shared
, arm64
);
2550 // Reverse bytes in lanes.
2552 inline void reverseInt16x8(FloatRegister src
, FloatRegister dest
)
2553 DEFINED_ON(x86_shared
, arm64
);
2555 inline void reverseInt32x4(FloatRegister src
, FloatRegister dest
)
2556 DEFINED_ON(x86_shared
, arm64
);
2558 inline void reverseInt64x2(FloatRegister src
, FloatRegister dest
)
2559 DEFINED_ON(x86_shared
, arm64
);
2561 // Swizzle - permute with variable indices. `rhs` holds the lanes parameter.
2563 inline void swizzleInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2564 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2566 inline void swizzleInt8x16Relaxed(FloatRegister lhs
, FloatRegister rhs
,
2568 DEFINED_ON(x86_shared
, arm64
);
2572 inline void addInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2573 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2575 inline void addInt8x16(FloatRegister lhs
, const SimdConstant
& rhs
,
2576 FloatRegister dest
) DEFINED_ON(x86_shared
);
2578 inline void addInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2579 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2581 inline void addInt16x8(FloatRegister lhs
, const SimdConstant
& rhs
,
2582 FloatRegister dest
) DEFINED_ON(x86_shared
);
2584 inline void addInt32x4(FloatRegister lhs
, FloatRegister rhs
,
2585 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2587 inline void addInt32x4(FloatRegister lhs
, const SimdConstant
& rhs
,
2588 FloatRegister dest
) DEFINED_ON(x86_shared
);
2590 inline void addInt64x2(FloatRegister lhs
, FloatRegister rhs
,
2591 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2593 inline void addInt64x2(FloatRegister lhs
, const SimdConstant
& rhs
,
2594 FloatRegister dest
) DEFINED_ON(x86_shared
);
2598 inline void subInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2599 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2601 inline void subInt8x16(FloatRegister lhs
, const SimdConstant
& rhs
,
2602 FloatRegister dest
) DEFINED_ON(x86_shared
);
2604 inline void subInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2605 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2607 inline void subInt16x8(FloatRegister lhs
, const SimdConstant
& rhs
,
2608 FloatRegister dest
) DEFINED_ON(x86_shared
);
2610 inline void subInt32x4(FloatRegister lhs
, const SimdConstant
& rhs
,
2611 FloatRegister dest
) DEFINED_ON(x86_shared
);
2613 inline void subInt32x4(FloatRegister lhs
, FloatRegister rhs
,
2614 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2616 inline void subInt64x2(FloatRegister lhs
, const SimdConstant
& rhs
,
2617 FloatRegister dest
) DEFINED_ON(x86_shared
);
2619 inline void subInt64x2(FloatRegister lhs
, FloatRegister rhs
,
2620 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2624 inline void mulInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2625 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2627 inline void mulInt16x8(FloatRegister lhs
, const SimdConstant
& rhs
,
2628 FloatRegister dest
) DEFINED_ON(x86_shared
);
2630 inline void mulInt32x4(FloatRegister lhs
, FloatRegister rhs
,
2631 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2633 inline void mulInt32x4(FloatRegister lhs
, const SimdConstant
& rhs
,
2634 FloatRegister dest
) DEFINED_ON(x86_shared
);
2636 // On x86_shared, it is required lhs == dest
2637 inline void mulInt64x2(FloatRegister lhs
, FloatRegister rhs
,
2638 FloatRegister dest
, FloatRegister temp
)
2639 DEFINED_ON(x86_shared
);
2641 inline void mulInt64x2(FloatRegister lhs
, const SimdConstant
& rhs
,
2642 FloatRegister dest
, FloatRegister temp
)
2643 DEFINED_ON(x86_shared
);
2645 inline void mulInt64x2(FloatRegister lhs
, FloatRegister rhs
,
2646 FloatRegister dest
, FloatRegister temp1
,
2647 FloatRegister temp2
) DEFINED_ON(arm64
);
2649 // Note for the extMul opcodes, the NxM designation is for the input lanes;
2650 // the output lanes are twice as wide.
2651 inline void extMulLowInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2653 DEFINED_ON(x86_shared
, arm64
);
2655 inline void extMulHighInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2657 DEFINED_ON(x86_shared
, arm64
);
2659 inline void unsignedExtMulLowInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2661 DEFINED_ON(x86_shared
, arm64
);
2663 inline void unsignedExtMulHighInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2665 DEFINED_ON(x86_shared
, arm64
);
2667 inline void extMulLowInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2669 DEFINED_ON(x86_shared
, arm64
);
2671 inline void extMulHighInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2673 DEFINED_ON(x86_shared
, arm64
);
2675 inline void unsignedExtMulLowInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2677 DEFINED_ON(x86_shared
, arm64
);
2679 inline void unsignedExtMulHighInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2681 DEFINED_ON(x86_shared
, arm64
);
2683 inline void extMulLowInt32x4(FloatRegister lhs
, FloatRegister rhs
,
2685 DEFINED_ON(x86_shared
, arm64
);
2687 inline void extMulHighInt32x4(FloatRegister lhs
, FloatRegister rhs
,
2689 DEFINED_ON(x86_shared
, arm64
);
2691 inline void unsignedExtMulLowInt32x4(FloatRegister lhs
, FloatRegister rhs
,
2693 DEFINED_ON(x86_shared
, arm64
);
2695 inline void unsignedExtMulHighInt32x4(FloatRegister lhs
, FloatRegister rhs
,
2697 DEFINED_ON(x86_shared
, arm64
);
2699 inline void q15MulrSatInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2701 DEFINED_ON(x86_shared
, arm64
);
2705 inline void negInt8x16(FloatRegister src
, FloatRegister dest
)
2706 DEFINED_ON(x86_shared
, arm64
);
2708 inline void negInt16x8(FloatRegister src
, FloatRegister dest
)
2709 DEFINED_ON(x86_shared
, arm64
);
2711 inline void negInt32x4(FloatRegister src
, FloatRegister dest
)
2712 DEFINED_ON(x86_shared
, arm64
);
2714 inline void negInt64x2(FloatRegister src
, FloatRegister dest
)
2715 DEFINED_ON(x86_shared
, arm64
);
2717 // Saturating integer add
2719 inline void addSatInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2720 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2722 inline void addSatInt8x16(FloatRegister lhs
, const SimdConstant
& rhs
,
2723 FloatRegister dest
) DEFINED_ON(x86_shared
);
2725 inline void unsignedAddSatInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2727 DEFINED_ON(x86_shared
, arm64
);
2729 inline void unsignedAddSatInt8x16(FloatRegister lhs
, const SimdConstant
& rhs
,
2730 FloatRegister dest
) DEFINED_ON(x86_shared
);
2732 inline void addSatInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2733 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2735 inline void addSatInt16x8(FloatRegister lhs
, const SimdConstant
& rhs
,
2736 FloatRegister dest
) DEFINED_ON(x86_shared
);
2738 inline void unsignedAddSatInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2740 DEFINED_ON(x86_shared
, arm64
);
2742 inline void unsignedAddSatInt16x8(FloatRegister lhs
, const SimdConstant
& rhs
,
2743 FloatRegister dest
) DEFINED_ON(x86_shared
);
2745 // Saturating integer subtract
2747 inline void subSatInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2748 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2750 inline void subSatInt8x16(FloatRegister lhs
, const SimdConstant
& rhs
,
2751 FloatRegister dest
) DEFINED_ON(x86_shared
);
2753 inline void unsignedSubSatInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2755 DEFINED_ON(x86_shared
, arm64
);
2757 inline void unsignedSubSatInt8x16(FloatRegister lhs
, const SimdConstant
& rhs
,
2758 FloatRegister dest
) DEFINED_ON(x86_shared
);
2760 inline void subSatInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2761 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2763 inline void subSatInt16x8(FloatRegister lhs
, const SimdConstant
& rhs
,
2764 FloatRegister dest
) DEFINED_ON(x86_shared
);
2766 inline void unsignedSubSatInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2768 DEFINED_ON(x86_shared
, arm64
);
2770 inline void unsignedSubSatInt16x8(FloatRegister lhs
, const SimdConstant
& rhs
,
2771 FloatRegister dest
) DEFINED_ON(x86_shared
);
2773 // Lane-wise integer minimum
2775 inline void minInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2776 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2778 inline void minInt8x16(FloatRegister lhs
, const SimdConstant
& rhs
,
2779 FloatRegister dest
) DEFINED_ON(x86_shared
);
2781 inline void unsignedMinInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2783 DEFINED_ON(x86_shared
, arm64
);
2785 inline void unsignedMinInt8x16(FloatRegister lhs
, const SimdConstant
& rhs
,
2786 FloatRegister dest
) DEFINED_ON(x86_shared
);
2788 inline void minInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2789 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2791 inline void minInt16x8(FloatRegister lhs
, const SimdConstant
& rhs
,
2792 FloatRegister dest
) DEFINED_ON(x86_shared
);
2794 inline void unsignedMinInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2796 DEFINED_ON(x86_shared
, arm64
);
2798 inline void unsignedMinInt16x8(FloatRegister lhs
, const SimdConstant
& rhs
,
2799 FloatRegister dest
) DEFINED_ON(x86_shared
);
2801 inline void minInt32x4(FloatRegister lhs
, FloatRegister rhs
,
2802 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2804 inline void minInt32x4(FloatRegister lhs
, const SimdConstant
& rhs
,
2805 FloatRegister dest
) DEFINED_ON(x86_shared
);
2807 inline void unsignedMinInt32x4(FloatRegister lhs
, FloatRegister rhs
,
2809 DEFINED_ON(x86_shared
, arm64
);
2811 inline void unsignedMinInt32x4(FloatRegister lhs
, const SimdConstant
& rhs
,
2812 FloatRegister dest
) DEFINED_ON(x86_shared
);
2814 // Lane-wise integer maximum
2816 inline void maxInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2817 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2819 inline void maxInt8x16(FloatRegister lhs
, const SimdConstant
& rhs
,
2820 FloatRegister dest
) DEFINED_ON(x86_shared
);
2822 inline void unsignedMaxInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2824 DEFINED_ON(x86_shared
, arm64
);
2826 inline void unsignedMaxInt8x16(FloatRegister lhs
, const SimdConstant
& rhs
,
2827 FloatRegister dest
) DEFINED_ON(x86_shared
);
2829 inline void maxInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2830 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2832 inline void maxInt16x8(FloatRegister lhs
, const SimdConstant
& rhs
,
2833 FloatRegister dest
) DEFINED_ON(x86_shared
);
2835 inline void unsignedMaxInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2837 DEFINED_ON(x86_shared
, arm64
);
2839 inline void unsignedMaxInt16x8(FloatRegister lhs
, const SimdConstant
& rhs
,
2840 FloatRegister dest
) DEFINED_ON(x86_shared
);
2842 inline void maxInt32x4(FloatRegister lhs
, FloatRegister rhs
,
2843 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
2845 inline void maxInt32x4(FloatRegister lhs
, const SimdConstant
& rhs
,
2846 FloatRegister dest
) DEFINED_ON(x86_shared
);
2848 inline void unsignedMaxInt32x4(FloatRegister lhs
, FloatRegister rhs
,
2850 DEFINED_ON(x86_shared
, arm64
);
2852 inline void unsignedMaxInt32x4(FloatRegister lhs
, const SimdConstant
& rhs
,
2853 FloatRegister dest
) DEFINED_ON(x86_shared
);
2855 // Lane-wise integer rounding average
2857 inline void unsignedAverageInt8x16(FloatRegister lhs
, FloatRegister rhs
,
2859 DEFINED_ON(x86_shared
, arm64
);
2861 inline void unsignedAverageInt16x8(FloatRegister lhs
, FloatRegister rhs
,
2863 DEFINED_ON(x86_shared
, arm64
);
2865 // Lane-wise integer absolute value
2867 inline void absInt8x16(FloatRegister src
, FloatRegister dest
)
2868 DEFINED_ON(x86_shared
, arm64
);
2870 inline void absInt16x8(FloatRegister src
, FloatRegister dest
)
2871 DEFINED_ON(x86_shared
, arm64
);
2873 inline void absInt32x4(FloatRegister src
, FloatRegister dest
)
2874 DEFINED_ON(x86_shared
, arm64
);
2876 inline void absInt64x2(FloatRegister src
, FloatRegister dest
)
2877 DEFINED_ON(x86_shared
, arm64
);
2879 // Left shift by scalar. Immediates and variable shifts must have been
2880 // masked; shifts of zero will work but may or may not generate code.
2882 inline void leftShiftInt8x16(Register rhs
, FloatRegister lhsDest
,
2883 FloatRegister temp
) DEFINED_ON(x86_shared
);
2885 inline void leftShiftInt8x16(FloatRegister lhs
, Register rhs
,
2886 FloatRegister dest
) DEFINED_ON(arm64
);
2888 inline void leftShiftInt8x16(Imm32 count
, FloatRegister src
,
2890 DEFINED_ON(x86_shared
, arm64
);
2892 inline void leftShiftInt16x8(Register rhs
, FloatRegister lhsDest
)
2893 DEFINED_ON(x86_shared
);
2895 inline void leftShiftInt16x8(FloatRegister lhs
, Register rhs
,
2896 FloatRegister dest
) DEFINED_ON(arm64
);
2898 inline void leftShiftInt16x8(Imm32 count
, FloatRegister src
,
2900 DEFINED_ON(x86_shared
, arm64
);
2902 inline void leftShiftInt32x4(Register rhs
, FloatRegister lhsDest
)
2903 DEFINED_ON(x86_shared
);
2905 inline void leftShiftInt32x4(FloatRegister lhs
, Register rhs
,
2906 FloatRegister dest
) DEFINED_ON(arm64
);
2908 inline void leftShiftInt32x4(Imm32 count
, FloatRegister src
,
2910 DEFINED_ON(x86_shared
, arm64
);
2912 inline void leftShiftInt64x2(Register rhs
, FloatRegister lhsDest
)
2913 DEFINED_ON(x86_shared
);
2915 inline void leftShiftInt64x2(FloatRegister lhs
, Register rhs
,
2916 FloatRegister dest
) DEFINED_ON(arm64
);
2918 inline void leftShiftInt64x2(Imm32 count
, FloatRegister src
,
2920 DEFINED_ON(x86_shared
, arm64
);
2922 // Right shift by scalar. Immediates and variable shifts must have been
2923 // masked; shifts of zero will work but may or may not generate code.
2925 inline void rightShiftInt8x16(Register rhs
, FloatRegister lhsDest
,
2926 FloatRegister temp
) DEFINED_ON(x86_shared
);
2928 inline void rightShiftInt8x16(FloatRegister lhs
, Register rhs
,
2929 FloatRegister dest
) DEFINED_ON(arm64
);
2931 inline void rightShiftInt8x16(Imm32 count
, FloatRegister src
,
2933 DEFINED_ON(x86_shared
, arm64
);
2935 inline void unsignedRightShiftInt8x16(Register rhs
, FloatRegister lhsDest
,
2937 DEFINED_ON(x86_shared
);
2939 inline void unsignedRightShiftInt8x16(FloatRegister lhs
, Register rhs
,
2940 FloatRegister dest
) DEFINED_ON(arm64
);
2942 inline void unsignedRightShiftInt8x16(Imm32 count
, FloatRegister src
,
2944 DEFINED_ON(x86_shared
, arm64
);
2946 inline void rightShiftInt16x8(Register rhs
, FloatRegister lhsDest
)
2947 DEFINED_ON(x86_shared
);
2949 inline void rightShiftInt16x8(FloatRegister lhs
, Register rhs
,
2950 FloatRegister dest
) DEFINED_ON(arm64
);
2952 inline void rightShiftInt16x8(Imm32 count
, FloatRegister src
,
2954 DEFINED_ON(x86_shared
, arm64
);
2956 inline void unsignedRightShiftInt16x8(Register rhs
, FloatRegister lhsDest
)
2957 DEFINED_ON(x86_shared
);
2959 inline void unsignedRightShiftInt16x8(FloatRegister lhs
, Register rhs
,
2960 FloatRegister dest
) DEFINED_ON(arm64
);
2962 inline void unsignedRightShiftInt16x8(Imm32 count
, FloatRegister src
,
2964 DEFINED_ON(x86_shared
, arm64
);
2966 inline void rightShiftInt32x4(Register rhs
, FloatRegister lhsDest
)
2967 DEFINED_ON(x86_shared
);
2969 inline void rightShiftInt32x4(FloatRegister lhs
, Register rhs
,
2970 FloatRegister dest
) DEFINED_ON(arm64
);
2972 inline void rightShiftInt32x4(Imm32 count
, FloatRegister src
,
2974 DEFINED_ON(x86_shared
, arm64
);
2976 inline void unsignedRightShiftInt32x4(Register rhs
, FloatRegister lhsDest
)
2977 DEFINED_ON(x86_shared
);
2979 inline void unsignedRightShiftInt32x4(FloatRegister lhs
, Register rhs
,
2980 FloatRegister dest
) DEFINED_ON(arm64
);
2982 inline void unsignedRightShiftInt32x4(Imm32 count
, FloatRegister src
,
2984 DEFINED_ON(x86_shared
, arm64
);
2986 inline void rightShiftInt64x2(Register rhs
, FloatRegister lhsDest
,
2987 FloatRegister temp
) DEFINED_ON(x86_shared
);
2989 inline void rightShiftInt64x2(Imm32 count
, FloatRegister src
,
2991 DEFINED_ON(x86_shared
, arm64
);
2993 inline void rightShiftInt64x2(FloatRegister lhs
, Register rhs
,
2994 FloatRegister dest
) DEFINED_ON(arm64
);
2996 inline void unsignedRightShiftInt64x2(Register rhs
, FloatRegister lhsDest
)
2997 DEFINED_ON(x86_shared
);
2999 inline void unsignedRightShiftInt64x2(FloatRegister lhs
, Register rhs
,
3000 FloatRegister dest
) DEFINED_ON(arm64
);
3002 inline void unsignedRightShiftInt64x2(Imm32 count
, FloatRegister src
,
3004 DEFINED_ON(x86_shared
, arm64
);
3006 // Sign replication operation
3008 inline void signReplicationInt8x16(FloatRegister src
, FloatRegister dest
)
3009 DEFINED_ON(x86_shared
);
3011 inline void signReplicationInt16x8(FloatRegister src
, FloatRegister dest
)
3012 DEFINED_ON(x86_shared
);
3014 inline void signReplicationInt32x4(FloatRegister src
, FloatRegister dest
)
3015 DEFINED_ON(x86_shared
);
3017 inline void signReplicationInt64x2(FloatRegister src
, FloatRegister dest
)
3018 DEFINED_ON(x86_shared
);
3020 // Bitwise and, or, xor, not
3022 inline void bitwiseAndSimd128(FloatRegister rhs
, FloatRegister lhsDest
)
3023 DEFINED_ON(x86_shared
, arm64
);
3025 inline void bitwiseAndSimd128(FloatRegister lhs
, FloatRegister rhs
,
3027 DEFINED_ON(x86_shared
, arm64
);
3029 inline void bitwiseAndSimd128(FloatRegister lhs
, const SimdConstant
& rhs
,
3030 FloatRegister dest
) DEFINED_ON(x86_shared
);
3032 inline void bitwiseOrSimd128(FloatRegister rhs
, FloatRegister lhsDest
)
3033 DEFINED_ON(x86_shared
, arm64
);
3035 inline void bitwiseOrSimd128(FloatRegister lhs
, FloatRegister rhs
,
3037 DEFINED_ON(x86_shared
, arm64
);
3039 inline void bitwiseOrSimd128(FloatRegister lhs
, const SimdConstant
& rhs
,
3040 FloatRegister dest
) DEFINED_ON(x86_shared
);
3042 inline void bitwiseXorSimd128(FloatRegister rhs
, FloatRegister lhsDest
)
3043 DEFINED_ON(x86_shared
, arm64
);
3045 inline void bitwiseXorSimd128(FloatRegister lhs
, FloatRegister rhs
,
3047 DEFINED_ON(x86_shared
, arm64
);
3049 inline void bitwiseXorSimd128(FloatRegister lhs
, const SimdConstant
& rhs
,
3050 FloatRegister dest
) DEFINED_ON(x86_shared
);
3052 inline void bitwiseNotSimd128(FloatRegister src
, FloatRegister dest
)
3053 DEFINED_ON(x86_shared
, arm64
);
3055 // Bitwise AND with compliment: dest = lhs & ~rhs, note only arm64 can do it.
3056 inline void bitwiseAndNotSimd128(FloatRegister lhs
, FloatRegister rhs
,
3057 FloatRegister lhsDest
) DEFINED_ON(arm64
);
3059 // Bitwise AND with complement: dest = ~lhs & rhs, note this is not what Wasm
3060 // wants but what the x86 hardware offers. Hence the name.
3062 inline void bitwiseNotAndSimd128(FloatRegister rhs
, FloatRegister lhsDest
)
3063 DEFINED_ON(x86_shared
, arm64
);
3065 inline void bitwiseNotAndSimd128(FloatRegister lhs
, FloatRegister rhs
,
3066 FloatRegister lhsDest
)
3067 DEFINED_ON(x86_shared
);
3071 inline void bitwiseSelectSimd128(FloatRegister mask
, FloatRegister onTrue
,
3072 FloatRegister onFalse
, FloatRegister dest
,
3073 FloatRegister temp
) DEFINED_ON(x86_shared
);
3075 inline void bitwiseSelectSimd128(FloatRegister onTrue
, FloatRegister onFalse
,
3076 FloatRegister maskDest
) DEFINED_ON(arm64
);
3080 inline void popcntInt8x16(FloatRegister src
, FloatRegister dest
,
3081 FloatRegister temp
) DEFINED_ON(x86_shared
);
3083 inline void popcntInt8x16(FloatRegister src
, FloatRegister dest
)
3086 // Any lane true, ie, any bit set
3088 inline void anyTrueSimd128(FloatRegister src
, Register dest
)
3089 DEFINED_ON(x86_shared
, arm64
);
3093 inline void allTrueInt8x16(FloatRegister src
, Register dest
)
3094 DEFINED_ON(x86_shared
, arm64
);
3096 inline void allTrueInt16x8(FloatRegister src
, Register dest
)
3097 DEFINED_ON(x86_shared
, arm64
);
3099 inline void allTrueInt32x4(FloatRegister src
, Register dest
)
3100 DEFINED_ON(x86_shared
, arm64
);
3102 inline void allTrueInt64x2(FloatRegister src
, Register dest
)
3103 DEFINED_ON(x86_shared
, arm64
);
3105 // Bitmask, ie extract and compress high bits of all lanes
3107 inline void bitmaskInt8x16(FloatRegister src
, Register dest
)
3108 DEFINED_ON(x86_shared
);
3110 inline void bitmaskInt8x16(FloatRegister src
, Register dest
,
3111 FloatRegister temp
) DEFINED_ON(arm64
);
3113 inline void bitmaskInt16x8(FloatRegister src
, Register dest
)
3114 DEFINED_ON(x86_shared
);
3116 inline void bitmaskInt16x8(FloatRegister src
, Register dest
,
3117 FloatRegister temp
) DEFINED_ON(arm64
);
3119 inline void bitmaskInt32x4(FloatRegister src
, Register dest
)
3120 DEFINED_ON(x86_shared
);
3122 inline void bitmaskInt32x4(FloatRegister src
, Register dest
,
3123 FloatRegister temp
) DEFINED_ON(arm64
);
3125 inline void bitmaskInt64x2(FloatRegister src
, Register dest
)
3126 DEFINED_ON(x86_shared
);
3128 inline void bitmaskInt64x2(FloatRegister src
, Register dest
,
3129 FloatRegister temp
) DEFINED_ON(arm64
);
3131 // Comparisons (integer and floating-point)
3133 inline void compareInt8x16(Assembler::Condition cond
, FloatRegister rhs
,
3134 FloatRegister lhsDest
)
3135 DEFINED_ON(x86_shared
, arm64
);
3137 // On x86_shared, limited to !=, ==, <=, >
3138 inline void compareInt8x16(Assembler::Condition cond
, FloatRegister lhs
,
3139 const SimdConstant
& rhs
, FloatRegister dest
)
3140 DEFINED_ON(x86_shared
);
3142 // On arm64, use any integer comparison condition.
3143 inline void compareInt8x16(Assembler::Condition cond
, FloatRegister lhs
,
3144 FloatRegister rhs
, FloatRegister dest
)
3145 DEFINED_ON(x86_shared
, arm64
);
3147 inline void compareInt16x8(Assembler::Condition cond
, FloatRegister rhs
,
3148 FloatRegister lhsDest
)
3149 DEFINED_ON(x86_shared
, arm64
);
3151 inline void compareInt16x8(Assembler::Condition cond
, FloatRegister lhs
,
3152 FloatRegister rhs
, FloatRegister dest
)
3153 DEFINED_ON(x86_shared
, arm64
);
3155 // On x86_shared, limited to !=, ==, <=, >
3156 inline void compareInt16x8(Assembler::Condition cond
, FloatRegister lhs
,
3157 const SimdConstant
& rhs
, FloatRegister dest
)
3158 DEFINED_ON(x86_shared
);
3160 // On x86_shared, limited to !=, ==, <=, >
3161 inline void compareInt32x4(Assembler::Condition cond
, FloatRegister rhs
,
3162 FloatRegister lhsDest
)
3163 DEFINED_ON(x86_shared
, arm64
);
3165 inline void compareInt32x4(Assembler::Condition cond
, FloatRegister lhs
,
3166 const SimdConstant
& rhs
, FloatRegister dest
)
3167 DEFINED_ON(x86_shared
);
3169 // On arm64, use any integer comparison condition.
3170 inline void compareInt32x4(Assembler::Condition cond
, FloatRegister lhs
,
3171 FloatRegister rhs
, FloatRegister dest
)
3172 DEFINED_ON(x86_shared
, arm64
);
3174 inline void compareForEqualityInt64x2(Assembler::Condition cond
,
3175 FloatRegister lhs
, FloatRegister rhs
,
3177 DEFINED_ON(x86_shared
);
3179 inline void compareForOrderingInt64x2(Assembler::Condition cond
,
3180 FloatRegister lhs
, FloatRegister rhs
,
3181 FloatRegister dest
, FloatRegister temp1
,
3182 FloatRegister temp2
)
3183 DEFINED_ON(x86_shared
);
3185 inline void compareInt64x2(Assembler::Condition cond
, FloatRegister rhs
,
3186 FloatRegister lhsDest
) DEFINED_ON(arm64
);
3188 inline void compareInt64x2(Assembler::Condition cond
, FloatRegister lhs
,
3189 FloatRegister rhs
, FloatRegister dest
)
3192 inline void compareFloat32x4(Assembler::Condition cond
, FloatRegister rhs
,
3193 FloatRegister lhsDest
)
3194 DEFINED_ON(x86_shared
, arm64
);
3196 // On x86_shared, limited to ==, !=, <, <=
3197 inline void compareFloat32x4(Assembler::Condition cond
, FloatRegister lhs
,
3198 const SimdConstant
& rhs
, FloatRegister dest
)
3199 DEFINED_ON(x86_shared
);
3201 // On x86_shared, limited to ==, !=, <, <=
3202 // On arm64, use any float-point comparison condition.
3203 inline void compareFloat32x4(Assembler::Condition cond
, FloatRegister lhs
,
3204 FloatRegister rhs
, FloatRegister dest
)
3205 DEFINED_ON(x86_shared
, arm64
);
3207 inline void compareFloat64x2(Assembler::Condition cond
, FloatRegister rhs
,
3208 FloatRegister lhsDest
)
3209 DEFINED_ON(x86_shared
, arm64
);
3211 // On x86_shared, limited to ==, !=, <, <=
3212 inline void compareFloat64x2(Assembler::Condition cond
, FloatRegister lhs
,
3213 const SimdConstant
& rhs
, FloatRegister dest
)
3214 DEFINED_ON(x86_shared
);
3216 // On x86_shared, limited to ==, !=, <, <=
3217 // On arm64, use any float-point comparison condition.
3218 inline void compareFloat64x2(Assembler::Condition cond
, FloatRegister lhs
,
3219 FloatRegister rhs
, FloatRegister dest
)
3220 DEFINED_ON(x86_shared
, arm64
);
3224 inline void loadUnalignedSimd128(const Operand
& src
, FloatRegister dest
)
3225 DEFINED_ON(x86_shared
);
3227 inline FaultingCodeOffset
loadUnalignedSimd128(const Address
& src
,
3229 DEFINED_ON(x86_shared
, arm64
);
3231 inline FaultingCodeOffset
loadUnalignedSimd128(const BaseIndex
& src
,
3233 DEFINED_ON(x86_shared
, arm64
);
3237 inline FaultingCodeOffset
storeUnalignedSimd128(FloatRegister src
,
3238 const Address
& dest
)
3239 DEFINED_ON(x86_shared
, arm64
);
3241 inline FaultingCodeOffset
storeUnalignedSimd128(FloatRegister src
,
3242 const BaseIndex
& dest
)
3243 DEFINED_ON(x86_shared
, arm64
);
3245 // Floating point negation
3247 inline void negFloat32x4(FloatRegister src
, FloatRegister dest
)
3248 DEFINED_ON(x86_shared
, arm64
);
3250 inline void negFloat64x2(FloatRegister src
, FloatRegister dest
)
3251 DEFINED_ON(x86_shared
, arm64
);
3253 // Floating point absolute value
3255 inline void absFloat32x4(FloatRegister src
, FloatRegister dest
)
3256 DEFINED_ON(x86_shared
, arm64
);
3258 inline void absFloat64x2(FloatRegister src
, FloatRegister dest
)
3259 DEFINED_ON(x86_shared
, arm64
);
3261 // NaN-propagating minimum
3263 inline void minFloat32x4(FloatRegister lhs
, FloatRegister rhs
,
3264 FloatRegister dest
, FloatRegister temp1
,
3265 FloatRegister temp2
) DEFINED_ON(x86_shared
);
3267 inline void minFloat32x4(FloatRegister rhs
, FloatRegister lhsDest
)
3270 inline void minFloat32x4(FloatRegister lhs
, FloatRegister rhs
,
3271 FloatRegister dest
) DEFINED_ON(arm64
);
3273 inline void minFloat64x2(FloatRegister lhs
, FloatRegister rhs
,
3274 FloatRegister dest
, FloatRegister temp1
,
3275 FloatRegister temp2
) DEFINED_ON(x86_shared
);
3277 inline void minFloat64x2(FloatRegister rhs
, FloatRegister lhsDest
)
3280 inline void minFloat64x2(FloatRegister lhs
, FloatRegister rhs
,
3281 FloatRegister dest
) DEFINED_ON(arm64
);
3283 // NaN-propagating maximum
3285 inline void maxFloat32x4(FloatRegister lhs
, FloatRegister rhs
,
3286 FloatRegister dest
, FloatRegister temp1
,
3287 FloatRegister temp2
) DEFINED_ON(x86_shared
);
3289 inline void maxFloat32x4(FloatRegister rhs
, FloatRegister lhsDest
)
3292 inline void maxFloat32x4(FloatRegister lhs
, FloatRegister rhs
,
3293 FloatRegister dest
) DEFINED_ON(arm64
);
3295 inline void maxFloat64x2(FloatRegister lhs
, FloatRegister rhs
,
3296 FloatRegister dest
, FloatRegister temp1
,
3297 FloatRegister temp2
) DEFINED_ON(x86_shared
);
3299 inline void maxFloat64x2(FloatRegister rhs
, FloatRegister lhsDest
)
3302 inline void maxFloat64x2(FloatRegister lhs
, FloatRegister rhs
,
3303 FloatRegister dest
) DEFINED_ON(arm64
);
3307 inline void addFloat32x4(FloatRegister lhs
, FloatRegister rhs
,
3308 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
3310 inline void addFloat32x4(FloatRegister lhs
, const SimdConstant
& rhs
,
3311 FloatRegister dest
) DEFINED_ON(x86_shared
);
3313 inline void addFloat64x2(FloatRegister lhs
, FloatRegister rhs
,
3314 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
3316 inline void addFloat64x2(FloatRegister lhs
, const SimdConstant
& rhs
,
3317 FloatRegister dest
) DEFINED_ON(x86_shared
);
3319 // Floating subtract
3321 inline void subFloat32x4(FloatRegister lhs
, FloatRegister rhs
,
3322 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
3324 inline void subFloat32x4(FloatRegister lhs
, const SimdConstant
& rhs
,
3325 FloatRegister dest
) DEFINED_ON(x86_shared
);
3327 inline void subFloat64x2(FloatRegister lhs
, FloatRegister rhs
,
3328 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
3330 inline void subFloat64x2(FloatRegister lhs
, const SimdConstant
& rhs
,
3331 FloatRegister dest
) DEFINED_ON(x86_shared
);
3333 // Floating division
3335 inline void divFloat32x4(FloatRegister lhs
, FloatRegister rhs
,
3336 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
3338 inline void divFloat32x4(FloatRegister lhs
, const SimdConstant
& rhs
,
3339 FloatRegister dest
) DEFINED_ON(x86_shared
);
3341 inline void divFloat64x2(FloatRegister lhs
, FloatRegister rhs
,
3342 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
3344 inline void divFloat64x2(FloatRegister lhs
, const SimdConstant
& rhs
,
3345 FloatRegister dest
) DEFINED_ON(x86_shared
);
3347 // Floating Multiply
3349 inline void mulFloat32x4(FloatRegister lhs
, FloatRegister rhs
,
3350 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
3352 inline void mulFloat32x4(FloatRegister lhs
, const SimdConstant
& rhs
,
3353 FloatRegister dest
) DEFINED_ON(x86_shared
);
3355 inline void mulFloat64x2(FloatRegister lhs
, FloatRegister rhs
,
3356 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
3358 inline void mulFloat64x2(FloatRegister lhs
, const SimdConstant
& rhs
,
3359 FloatRegister dest
) DEFINED_ON(x86_shared
);
3363 inline void extAddPairwiseInt8x16(FloatRegister src
, FloatRegister dest
)
3364 DEFINED_ON(x86_shared
, arm64
);
3366 inline void unsignedExtAddPairwiseInt8x16(FloatRegister src
,
3368 DEFINED_ON(x86_shared
, arm64
);
3370 inline void extAddPairwiseInt16x8(FloatRegister src
, FloatRegister dest
)
3371 DEFINED_ON(x86_shared
, arm64
);
3373 inline void unsignedExtAddPairwiseInt16x8(FloatRegister src
,
3375 DEFINED_ON(x86_shared
, arm64
);
3377 // Floating square root
3379 inline void sqrtFloat32x4(FloatRegister src
, FloatRegister dest
)
3380 DEFINED_ON(x86_shared
, arm64
);
3382 inline void sqrtFloat64x2(FloatRegister src
, FloatRegister dest
)
3383 DEFINED_ON(x86_shared
, arm64
);
3385 // Integer to floating point with rounding
3387 inline void convertInt32x4ToFloat32x4(FloatRegister src
, FloatRegister dest
)
3388 DEFINED_ON(x86_shared
, arm64
);
3390 inline void unsignedConvertInt32x4ToFloat32x4(FloatRegister src
,
3392 DEFINED_ON(x86_shared
, arm64
);
3394 inline void convertInt32x4ToFloat64x2(FloatRegister src
, FloatRegister dest
)
3395 DEFINED_ON(x86_shared
, arm64
);
3397 inline void unsignedConvertInt32x4ToFloat64x2(FloatRegister src
,
3399 DEFINED_ON(x86_shared
, arm64
);
3401 // Floating point to integer with saturation
3403 inline void truncSatFloat32x4ToInt32x4(FloatRegister src
, FloatRegister dest
)
3404 DEFINED_ON(x86_shared
, arm64
);
3406 inline void unsignedTruncSatFloat32x4ToInt32x4(FloatRegister src
,
3409 DEFINED_ON(x86_shared
);
3411 inline void unsignedTruncSatFloat32x4ToInt32x4(FloatRegister src
,
3415 inline void truncSatFloat64x2ToInt32x4(FloatRegister src
, FloatRegister dest
,
3417 DEFINED_ON(x86_shared
, arm64
);
3419 inline void unsignedTruncSatFloat64x2ToInt32x4(FloatRegister src
,
3422 DEFINED_ON(x86_shared
, arm64
);
3424 inline void truncFloat32x4ToInt32x4Relaxed(FloatRegister src
,
3426 DEFINED_ON(x86_shared
, arm64
);
3428 inline void unsignedTruncFloat32x4ToInt32x4Relaxed(FloatRegister src
,
3430 DEFINED_ON(x86_shared
, arm64
);
3432 inline void truncFloat64x2ToInt32x4Relaxed(FloatRegister src
,
3434 DEFINED_ON(x86_shared
, arm64
);
3436 inline void unsignedTruncFloat64x2ToInt32x4Relaxed(FloatRegister src
,
3438 DEFINED_ON(x86_shared
, arm64
);
3440 // Floating point narrowing
3442 inline void convertFloat64x2ToFloat32x4(FloatRegister src
, FloatRegister dest
)
3443 DEFINED_ON(x86_shared
, arm64
);
3445 // Floating point widening
3447 inline void convertFloat32x4ToFloat64x2(FloatRegister src
, FloatRegister dest
)
3448 DEFINED_ON(x86_shared
, arm64
);
3450 // Integer to integer narrowing
3452 inline void narrowInt16x8(FloatRegister lhs
, const SimdConstant
& rhs
,
3453 FloatRegister dest
) DEFINED_ON(x86_shared
);
3455 inline void narrowInt16x8(FloatRegister lhs
, FloatRegister rhs
,
3456 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
3458 inline void unsignedNarrowInt16x8(FloatRegister lhs
, const SimdConstant
& rhs
,
3459 FloatRegister dest
) DEFINED_ON(x86_shared
);
3461 inline void unsignedNarrowInt16x8(FloatRegister lhs
, FloatRegister rhs
,
3463 DEFINED_ON(x86_shared
, arm64
);
3465 inline void narrowInt32x4(FloatRegister lhs
, const SimdConstant
& rhs
,
3466 FloatRegister dest
) DEFINED_ON(x86_shared
);
3468 inline void narrowInt32x4(FloatRegister lhs
, FloatRegister rhs
,
3469 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
3471 inline void unsignedNarrowInt32x4(FloatRegister lhs
, const SimdConstant
& rhs
,
3472 FloatRegister dest
) DEFINED_ON(x86_shared
);
3474 inline void unsignedNarrowInt32x4(FloatRegister lhs
, FloatRegister rhs
,
3476 DEFINED_ON(x86_shared
, arm64
);
3478 // Integer to integer widening
3480 inline void widenLowInt8x16(FloatRegister src
, FloatRegister dest
)
3481 DEFINED_ON(x86_shared
, arm64
);
3483 inline void widenHighInt8x16(FloatRegister src
, FloatRegister dest
)
3484 DEFINED_ON(x86_shared
, arm64
);
3486 inline void unsignedWidenLowInt8x16(FloatRegister src
, FloatRegister dest
)
3487 DEFINED_ON(x86_shared
, arm64
);
3489 inline void unsignedWidenHighInt8x16(FloatRegister src
, FloatRegister dest
)
3490 DEFINED_ON(x86_shared
, arm64
);
3492 inline void widenLowInt16x8(FloatRegister src
, FloatRegister dest
)
3493 DEFINED_ON(x86_shared
, arm64
);
3495 inline void widenHighInt16x8(FloatRegister src
, FloatRegister dest
)
3496 DEFINED_ON(x86_shared
, arm64
);
3498 inline void unsignedWidenLowInt16x8(FloatRegister src
, FloatRegister dest
)
3499 DEFINED_ON(x86_shared
, arm64
);
3501 inline void unsignedWidenHighInt16x8(FloatRegister src
, FloatRegister dest
)
3502 DEFINED_ON(x86_shared
, arm64
);
3504 inline void widenLowInt32x4(FloatRegister src
, FloatRegister dest
)
3505 DEFINED_ON(x86_shared
, arm64
);
3507 inline void unsignedWidenLowInt32x4(FloatRegister src
, FloatRegister dest
)
3508 DEFINED_ON(x86_shared
, arm64
);
3510 inline void widenHighInt32x4(FloatRegister src
, FloatRegister dest
)
3511 DEFINED_ON(x86_shared
, arm64
);
3513 inline void unsignedWidenHighInt32x4(FloatRegister src
, FloatRegister dest
)
3514 DEFINED_ON(x86_shared
, arm64
);
3516 // Compare-based minimum/maximum
3518 // On x86, the signature is (rhsDest, lhs); on arm64 it is (rhs, lhsDest).
3520 // The masm preprocessor can't deal with multiple declarations with identical
3521 // signatures even if they are on different platforms, hence the weird
3524 inline void pseudoMinFloat32x4(FloatRegister rhsOrRhsDest
,
3525 FloatRegister lhsOrLhsDest
)
3526 DEFINED_ON(x86_shared
, arm64
);
3528 inline void pseudoMinFloat32x4(FloatRegister lhs
, FloatRegister rhs
,
3530 DEFINED_ON(x86_shared
, arm64
);
3532 inline void pseudoMinFloat64x2(FloatRegister rhsOrRhsDest
,
3533 FloatRegister lhsOrLhsDest
)
3534 DEFINED_ON(x86_shared
, arm64
);
3536 inline void pseudoMinFloat64x2(FloatRegister lhs
, FloatRegister rhs
,
3538 DEFINED_ON(x86_shared
, arm64
);
3540 inline void pseudoMaxFloat32x4(FloatRegister rhsOrRhsDest
,
3541 FloatRegister lhsOrLhsDest
)
3542 DEFINED_ON(x86_shared
, arm64
);
3544 inline void pseudoMaxFloat32x4(FloatRegister lhs
, FloatRegister rhs
,
3546 DEFINED_ON(x86_shared
, arm64
);
3548 inline void pseudoMaxFloat64x2(FloatRegister rhsOrRhsDest
,
3549 FloatRegister lhsOrLhsDest
)
3550 DEFINED_ON(x86_shared
, arm64
);
3552 inline void pseudoMaxFloat64x2(FloatRegister lhs
, FloatRegister rhs
,
3554 DEFINED_ON(x86_shared
, arm64
);
3556 // Widening/pairwise integer dot product
3558 inline void widenDotInt16x8(FloatRegister lhs
, FloatRegister rhs
,
3559 FloatRegister dest
) DEFINED_ON(x86_shared
, arm64
);
3561 inline void widenDotInt16x8(FloatRegister lhs
, const SimdConstant
& rhs
,
3562 FloatRegister dest
) DEFINED_ON(x86_shared
);
3564 inline void dotInt8x16Int7x16(FloatRegister lhs
, FloatRegister rhs
,
3566 DEFINED_ON(x86_shared
, arm64
);
3568 inline void dotInt8x16Int7x16ThenAdd(FloatRegister lhs
, FloatRegister rhs
,
3570 DEFINED_ON(x86_shared
);
3572 inline void dotInt8x16Int7x16ThenAdd(FloatRegister lhs
, FloatRegister rhs
,
3573 FloatRegister dest
, FloatRegister temp
)
3576 // Floating point rounding
3578 inline void ceilFloat32x4(FloatRegister src
, FloatRegister dest
)
3579 DEFINED_ON(x86_shared
, arm64
);
3581 inline void ceilFloat64x2(FloatRegister src
, FloatRegister dest
)
3582 DEFINED_ON(x86_shared
, arm64
);
3584 inline void floorFloat32x4(FloatRegister src
, FloatRegister dest
)
3585 DEFINED_ON(x86_shared
, arm64
);
3587 inline void floorFloat64x2(FloatRegister src
, FloatRegister dest
)
3588 DEFINED_ON(x86_shared
, arm64
);
3590 inline void truncFloat32x4(FloatRegister src
, FloatRegister dest
)
3591 DEFINED_ON(x86_shared
, arm64
);
3593 inline void truncFloat64x2(FloatRegister src
, FloatRegister dest
)
3594 DEFINED_ON(x86_shared
, arm64
);
3596 inline void nearestFloat32x4(FloatRegister src
, FloatRegister dest
)
3597 DEFINED_ON(x86_shared
, arm64
);
3599 inline void nearestFloat64x2(FloatRegister src
, FloatRegister dest
)
3600 DEFINED_ON(x86_shared
, arm64
);
3602 // Floating multiply-accumulate: srcDest [+-]= src1 * src2
3604 inline void fmaFloat32x4(FloatRegister src1
, FloatRegister src2
,
3605 FloatRegister srcDest
) DEFINED_ON(x86_shared
, arm64
);
3607 inline void fnmaFloat32x4(FloatRegister src1
, FloatRegister src2
,
3608 FloatRegister srcDest
)
3609 DEFINED_ON(x86_shared
, arm64
);
3611 inline void fmaFloat64x2(FloatRegister src1
, FloatRegister src2
,
3612 FloatRegister srcDest
) DEFINED_ON(x86_shared
, arm64
);
3614 inline void fnmaFloat64x2(FloatRegister src1
, FloatRegister src2
,
3615 FloatRegister srcDest
)
3616 DEFINED_ON(x86_shared
, arm64
);
3618 inline void minFloat32x4Relaxed(FloatRegister src
, FloatRegister srcDest
)
3619 DEFINED_ON(x86_shared
, arm64
);
3621 inline void minFloat32x4Relaxed(FloatRegister lhs
, FloatRegister rhs
,
3623 DEFINED_ON(x86_shared
, arm64
);
3625 inline void maxFloat32x4Relaxed(FloatRegister src
, FloatRegister srcDest
)
3626 DEFINED_ON(x86_shared
, arm64
);
3628 inline void maxFloat32x4Relaxed(FloatRegister lhs
, FloatRegister rhs
,
3630 DEFINED_ON(x86_shared
, arm64
);
3632 inline void minFloat64x2Relaxed(FloatRegister src
, FloatRegister srcDest
)
3633 DEFINED_ON(x86_shared
, arm64
);
3635 inline void minFloat64x2Relaxed(FloatRegister lhs
, FloatRegister rhs
,
3637 DEFINED_ON(x86_shared
, arm64
);
3639 inline void maxFloat64x2Relaxed(FloatRegister src
, FloatRegister srcDest
)
3640 DEFINED_ON(x86_shared
, arm64
);
3642 inline void maxFloat64x2Relaxed(FloatRegister lhs
, FloatRegister rhs
,
3644 DEFINED_ON(x86_shared
, arm64
);
3646 inline void q15MulrInt16x8Relaxed(FloatRegister lhs
, FloatRegister rhs
,
3648 DEFINED_ON(x86_shared
, arm64
);
3651 // ========================================================================
3652 // Truncate floating point.
3654 // Undefined behaviour when truncation is outside Int64 range.
3655 // Needs a temp register if SSE3 is not present.
3656 inline void truncateFloat32ToInt64(Address src
, Address dest
, Register temp
)
3657 DEFINED_ON(x86_shared
);
3658 inline void truncateFloat32ToUInt64(Address src
, Address dest
, Register temp
,
3659 FloatRegister floatTemp
)
3660 DEFINED_ON(x86
, x64
);
3661 inline void truncateDoubleToInt64(Address src
, Address dest
, Register temp
)
3662 DEFINED_ON(x86_shared
);
3663 inline void truncateDoubleToUInt64(Address src
, Address dest
, Register temp
,
3664 FloatRegister floatTemp
)
3665 DEFINED_ON(x86
, x64
);
3668 // ========================================================================
3669 // Convert floating point.
3671 // temp required on x86 and x64; must be undefined on mips64 and loong64.
3672 void convertUInt64ToFloat32(Register64 src
, FloatRegister dest
, Register temp
)
3673 DEFINED_ON(arm64
, mips64
, loong64
, riscv64
, wasm32
, x64
, x86
);
3675 void convertInt64ToFloat32(Register64 src
, FloatRegister dest
)
3676 DEFINED_ON(arm64
, mips64
, loong64
, riscv64
, wasm32
, x64
, x86
);
3678 bool convertUInt64ToDoubleNeedsTemp() PER_ARCH
;
3680 // temp required when convertUInt64ToDoubleNeedsTemp() returns true.
3681 void convertUInt64ToDouble(Register64 src
, FloatRegister dest
,
3682 Register temp
) PER_ARCH
;
3684 void convertInt64ToDouble(Register64 src
, FloatRegister dest
) PER_ARCH
;
3686 void convertIntPtrToDouble(Register src
, FloatRegister dest
) PER_ARCH
;
3689 // ========================================================================
3692 FaultingCodeOffset
wasmTrapInstruction() PER_SHARED_ARCH
;
3694 void wasmTrap(wasm::Trap trap
, wasm::BytecodeOffset bytecodeOffset
);
3696 // Load all pinned regs via InstanceReg. If the trapOffset is something,
3697 // give the first load a trap descriptor with type IndirectCallToNull, so that
3698 // a null instance will cause a trap.
3699 void loadWasmPinnedRegsFromInstance(
3700 mozilla::Maybe
<wasm::BytecodeOffset
> trapOffset
= mozilla::Nothing());
3702 // Returns a pair: the offset of the undefined (trapping) instruction, and
3703 // the number of extra bytes of stack allocated prior to the trap
3704 // instruction proper.
3705 std::pair
<CodeOffset
, uint32_t> wasmReserveStackChecked(
3706 uint32_t amount
, wasm::BytecodeOffset trapOffset
);
3708 // Emit a bounds check against the wasm heap limit, jumping to 'ok' if 'cond'
3709 // holds; this can be the label either of the access or of the trap. The
3710 // label should name a code position greater than the position of the bounds
3713 // If JitOptions.spectreMaskIndex is true, a no-op speculation barrier is
3714 // emitted in the code stream after the check to prevent an OOB access from
3715 // being executed speculatively. (On current tier-1 platforms the barrier is
3716 // a conditional saturation of 'index' to 'boundsCheckLimit', using the same
3717 // condition as the check.) If the condition is such that the bounds check
3718 // branches out of line to the trap, the barrier will actually be executed
3719 // when the bounds check passes.
3721 // On 32-bit systems for both wasm and asm.js, and on 64-bit systems for
3722 // asm.js, heap lengths are limited to 2GB. On 64-bit systems for wasm,
3723 // 32-bit heap lengths are limited to 4GB, and 64-bit heap lengths will be
3724 // limited to something much larger.
3726 void wasmBoundsCheck32(Condition cond
, Register index
,
3727 Register boundsCheckLimit
, Label
* ok
)
3728 DEFINED_ON(arm
, arm64
, mips32
, mips64
, x86_shared
, loong64
, riscv64
,
3731 void wasmBoundsCheck32(Condition cond
, Register index
,
3732 Address boundsCheckLimit
, Label
* ok
)
3733 DEFINED_ON(arm
, arm64
, mips32
, mips64
, x86_shared
, loong64
, riscv64
,
3736 void wasmBoundsCheck64(Condition cond
, Register64 index
,
3737 Register64 boundsCheckLimit
, Label
* ok
)
3738 DEFINED_ON(arm64
, mips64
, x64
, x86
, arm
, loong64
, riscv64
, wasm32
);
3740 void wasmBoundsCheck64(Condition cond
, Register64 index
,
3741 Address boundsCheckLimit
, Label
* ok
)
3742 DEFINED_ON(arm64
, mips64
, x64
, x86
, arm
, loong64
, riscv64
, wasm32
);
3744 // Each wasm load/store instruction appends its own wasm::Trap::OutOfBounds.
3745 void wasmLoad(const wasm::MemoryAccessDesc
& access
, Operand srcAddr
,
3746 AnyRegister out
) DEFINED_ON(x86
, x64
);
3747 void wasmLoadI64(const wasm::MemoryAccessDesc
& access
, Operand srcAddr
,
3748 Register64 out
) DEFINED_ON(x86
, x64
);
3749 void wasmStore(const wasm::MemoryAccessDesc
& access
, AnyRegister value
,
3750 Operand dstAddr
) DEFINED_ON(x86
, x64
);
3751 void wasmStoreI64(const wasm::MemoryAccessDesc
& access
, Register64 value
,
3752 Operand dstAddr
) DEFINED_ON(x86
);
3754 // For all the ARM/MIPS/LOONG64 wasmLoad and wasmStore functions below, `ptr`
3755 // MUST equal `ptrScratch`, and that register will be updated based on
3756 // conditions listed below (where it is only mentioned as `ptr`).
3758 // `ptr` will be updated if access.offset() != 0 or access.type() ==
3760 void wasmLoad(const wasm::MemoryAccessDesc
& access
, Register memoryBase
,
3761 Register ptr
, Register ptrScratch
, AnyRegister output
)
3762 DEFINED_ON(arm
, loong64
, riscv64
, mips_shared
);
3763 void wasmLoadI64(const wasm::MemoryAccessDesc
& access
, Register memoryBase
,
3764 Register ptr
, Register ptrScratch
, Register64 output
)
3765 DEFINED_ON(arm
, mips32
, mips64
, loong64
, riscv64
);
3766 void wasmStore(const wasm::MemoryAccessDesc
& access
, AnyRegister value
,
3767 Register memoryBase
, Register ptr
, Register ptrScratch
)
3768 DEFINED_ON(arm
, loong64
, riscv64
, mips_shared
);
3769 void wasmStoreI64(const wasm::MemoryAccessDesc
& access
, Register64 value
,
3770 Register memoryBase
, Register ptr
, Register ptrScratch
)
3771 DEFINED_ON(arm
, mips32
, mips64
, loong64
, riscv64
);
3773 // These accept general memoryBase + ptr + offset (in `access`); the offset is
3774 // always smaller than the guard region. They will insert an additional add
3775 // if the offset is nonzero, and of course that add may require a temporary
3776 // register for the offset if the offset is large, and instructions to set it
3778 void wasmLoad(const wasm::MemoryAccessDesc
& access
, Register memoryBase
,
3779 Register ptr
, AnyRegister output
) DEFINED_ON(arm64
);
3780 void wasmLoadI64(const wasm::MemoryAccessDesc
& access
, Register memoryBase
,
3781 Register ptr
, Register64 output
) DEFINED_ON(arm64
);
3782 void wasmStore(const wasm::MemoryAccessDesc
& access
, AnyRegister value
,
3783 Register memoryBase
, Register ptr
) DEFINED_ON(arm64
);
3784 void wasmStoreI64(const wasm::MemoryAccessDesc
& access
, Register64 value
,
3785 Register memoryBase
, Register ptr
) DEFINED_ON(arm64
);
3787 // `ptr` will always be updated.
3788 void wasmUnalignedLoad(const wasm::MemoryAccessDesc
& access
,
3789 Register memoryBase
, Register ptr
, Register ptrScratch
,
3790 Register output
, Register tmp
)
3791 DEFINED_ON(mips32
, mips64
);
3793 // MIPS: `ptr` will always be updated.
3794 void wasmUnalignedLoadFP(const wasm::MemoryAccessDesc
& access
,
3795 Register memoryBase
, Register ptr
,
3796 Register ptrScratch
, FloatRegister output
,
3797 Register tmp1
) DEFINED_ON(mips32
, mips64
);
3799 // `ptr` will always be updated.
3800 void wasmUnalignedLoadI64(const wasm::MemoryAccessDesc
& access
,
3801 Register memoryBase
, Register ptr
,
3802 Register ptrScratch
, Register64 output
,
3803 Register tmp
) DEFINED_ON(mips32
, mips64
);
3805 // MIPS: `ptr` will always be updated.
3806 void wasmUnalignedStore(const wasm::MemoryAccessDesc
& access
, Register value
,
3807 Register memoryBase
, Register ptr
,
3808 Register ptrScratch
, Register tmp
)
3809 DEFINED_ON(mips32
, mips64
);
3811 // `ptr` will always be updated.
3812 void wasmUnalignedStoreFP(const wasm::MemoryAccessDesc
& access
,
3813 FloatRegister floatValue
, Register memoryBase
,
3814 Register ptr
, Register ptrScratch
, Register tmp
)
3815 DEFINED_ON(mips32
, mips64
);
3817 // `ptr` will always be updated.
3818 void wasmUnalignedStoreI64(const wasm::MemoryAccessDesc
& access
,
3819 Register64 value
, Register memoryBase
,
3820 Register ptr
, Register ptrScratch
, Register tmp
)
3821 DEFINED_ON(mips32
, mips64
);
3823 // wasm specific methods, used in both the wasm baseline compiler and ion.
3825 // The truncate-to-int32 methods do not bind the rejoin label; clients must
3826 // do so if oolWasmTruncateCheckF64ToI32() can jump to it.
3827 void wasmTruncateDoubleToUInt32(FloatRegister input
, Register output
,
3828 bool isSaturating
, Label
* oolEntry
) PER_ARCH
;
3829 void wasmTruncateDoubleToInt32(FloatRegister input
, Register output
,
3831 Label
* oolEntry
) PER_SHARED_ARCH
;
3832 void oolWasmTruncateCheckF64ToI32(FloatRegister input
, Register output
,
3833 TruncFlags flags
, wasm::BytecodeOffset off
,
3835 DEFINED_ON(arm
, arm64
, x86_shared
, mips_shared
, loong64
, riscv64
, wasm32
);
3837 void wasmTruncateFloat32ToUInt32(FloatRegister input
, Register output
,
3838 bool isSaturating
, Label
* oolEntry
) PER_ARCH
;
3839 void wasmTruncateFloat32ToInt32(FloatRegister input
, Register output
,
3841 Label
* oolEntry
) PER_SHARED_ARCH
;
3842 void oolWasmTruncateCheckF32ToI32(FloatRegister input
, Register output
,
3843 TruncFlags flags
, wasm::BytecodeOffset off
,
3845 DEFINED_ON(arm
, arm64
, x86_shared
, mips_shared
, loong64
, riscv64
, wasm32
);
3847 // The truncate-to-int64 methods will always bind the `oolRejoin` label
3848 // after the last emitted instruction.
3849 void wasmTruncateDoubleToInt64(FloatRegister input
, Register64 output
,
3850 bool isSaturating
, Label
* oolEntry
,
3851 Label
* oolRejoin
, FloatRegister tempDouble
)
3852 DEFINED_ON(arm64
, x86
, x64
, mips64
, loong64
, riscv64
, wasm32
);
3853 void wasmTruncateDoubleToUInt64(FloatRegister input
, Register64 output
,
3854 bool isSaturating
, Label
* oolEntry
,
3855 Label
* oolRejoin
, FloatRegister tempDouble
)
3856 DEFINED_ON(arm64
, x86
, x64
, mips64
, loong64
, riscv64
, wasm32
);
3857 void oolWasmTruncateCheckF64ToI64(FloatRegister input
, Register64 output
,
3858 TruncFlags flags
, wasm::BytecodeOffset off
,
3860 DEFINED_ON(arm
, arm64
, x86_shared
, mips_shared
, loong64
, riscv64
, wasm32
);
3862 void wasmTruncateFloat32ToInt64(FloatRegister input
, Register64 output
,
3863 bool isSaturating
, Label
* oolEntry
,
3864 Label
* oolRejoin
, FloatRegister tempDouble
)
3865 DEFINED_ON(arm64
, x86
, x64
, mips64
, loong64
, riscv64
, wasm32
);
3866 void wasmTruncateFloat32ToUInt64(FloatRegister input
, Register64 output
,
3867 bool isSaturating
, Label
* oolEntry
,
3868 Label
* oolRejoin
, FloatRegister tempDouble
)
3869 DEFINED_ON(arm64
, x86
, x64
, mips64
, loong64
, riscv64
, wasm32
);
3870 void oolWasmTruncateCheckF32ToI64(FloatRegister input
, Register64 output
,
3871 TruncFlags flags
, wasm::BytecodeOffset off
,
3873 DEFINED_ON(arm
, arm64
, x86_shared
, mips_shared
, loong64
, riscv64
, wasm32
);
3875 // This function takes care of loading the callee's instance and pinned regs
3876 // but it is the caller's responsibility to save/restore instance or pinned
3878 CodeOffset
wasmCallImport(const wasm::CallSiteDesc
& desc
,
3879 const wasm::CalleeDesc
& callee
);
3881 #ifdef ENABLE_WASM_TAIL_CALLS
3882 CodeOffset
wasmReturnCallImport(const wasm::CallSiteDesc
& desc
,
3883 const wasm::CalleeDesc
& callee
,
3884 const ReturnCallAdjustmentInfo
& retCallInfo
);
3886 CodeOffset
wasmReturnCall(const wasm::CallSiteDesc
& desc
,
3887 uint32_t funcDefIndex
,
3888 const ReturnCallAdjustmentInfo
& retCallInfo
);
3890 void wasmCollapseFrameSlow(const ReturnCallAdjustmentInfo
& retCallInfo
,
3891 wasm::CallSiteDesc desc
);
3893 void wasmCollapseFrameFast(const ReturnCallAdjustmentInfo
& retCallInfo
);
3895 void wasmCheckSlowCallsite(Register ra
, Label
* notSlow
, Register temp1
,
3897 DEFINED_ON(x86
, x64
, arm
, arm64
, loong64
, mips64
, riscv64
);
3899 void wasmMarkSlowCall()
3900 DEFINED_ON(x86
, x64
, arm
, arm64
, loong64
, mips64
, riscv64
);
3903 // WasmTableCallIndexReg must contain the index of the indirect call. This is
3904 // for wasm calls only.
3906 // Indirect calls use a dual-path mechanism where a run-time test determines
3907 // whether a context switch is needed (slow path) or not (fast path). This
3908 // gives rise to two call instructions, both of which need safe points. As
3909 // per normal, the call offsets are the code offsets at the end of the call
3910 // instructions (the return points).
3912 // `boundsCheckFailedLabel` is non-null iff a bounds check is required.
3913 // `nullCheckFailedLabel` is non-null only on platforms that can't fold the
3914 // null check into the rest of the call instructions.
3915 void wasmCallIndirect(const wasm::CallSiteDesc
& desc
,
3916 const wasm::CalleeDesc
& callee
,
3917 Label
* boundsCheckFailedLabel
,
3918 Label
* nullCheckFailedLabel
,
3919 mozilla::Maybe
<uint32_t> tableSize
,
3920 CodeOffset
* fastCallOffset
, CodeOffset
* slowCallOffset
);
3922 #ifdef ENABLE_WASM_TAIL_CALLS
3923 // WasmTableCallIndexReg must contain the index of the indirect call. This is
3924 // for wasm calls only.
3926 // `boundsCheckFailedLabel` is non-null iff a bounds check is required.
3927 // `nullCheckFailedLabel` is non-null only on platforms that can't fold the
3928 // null check into the rest of the call instructions.
3929 void wasmReturnCallIndirect(const wasm::CallSiteDesc
& desc
,
3930 const wasm::CalleeDesc
& callee
,
3931 Label
* boundsCheckFailedLabel
,
3932 Label
* nullCheckFailedLabel
,
3933 mozilla::Maybe
<uint32_t> tableSize
,
3934 const ReturnCallAdjustmentInfo
& retCallInfo
);
3935 #endif // ENABLE_WASM_TAIL_CALLS
3937 // This function takes care of loading the callee's instance and address from
3939 void wasmCallRef(const wasm::CallSiteDesc
& desc
,
3940 const wasm::CalleeDesc
& callee
, CodeOffset
* fastCallOffset
,
3941 CodeOffset
* slowCallOffset
);
3943 #ifdef ENABLE_WASM_TAIL_CALLS
3944 void wasmReturnCallRef(const wasm::CallSiteDesc
& desc
,
3945 const wasm::CalleeDesc
& callee
,
3946 const ReturnCallAdjustmentInfo
& retCallInfo
);
3947 #endif // ENABLE_WASM_TAIL_CALLS
3949 // WasmTableCallIndexReg must contain the index of the indirect call.
3950 // This is for asm.js calls only.
3951 CodeOffset
asmCallIndirect(const wasm::CallSiteDesc
& desc
,
3952 const wasm::CalleeDesc
& callee
);
3954 // This function takes care of loading the pointer to the current instance
3955 // as the implicit first argument. It preserves instance and pinned registers.
3956 // (instance & pinned regs are non-volatile registers in the system ABI).
3957 CodeOffset
wasmCallBuiltinInstanceMethod(const wasm::CallSiteDesc
& desc
,
3958 const ABIArg
& instanceArg
,
3959 wasm::SymbolicAddress builtin
,
3960 wasm::FailureMode failureMode
);
3962 // Performs a bounds check for ranged wasm operations like memory.fill or
3963 // array.fill. This handles the bizarre edge case in the wasm spec where a
3964 // write to index N is valid as long as the length is zero - despite the index
3965 // itself being out of bounds.
3967 // `length` and `limit` will be unchanged.
3968 void wasmBoundsCheckRange32(Register index
, Register length
, Register limit
,
3970 wasm::BytecodeOffset bytecodeOffset
);
3972 // Perform a subtype check that `ref` is a subtype of `type`, branching to
3973 // `label` depending on `onSuccess`. `type` must be in the `any` hierarchy.
3975 // `superSTV` is required iff the destination type is a concrete
3976 // type. `scratch1` is required iff the destination type is eq or lower and
3977 // not none. `scratch2` is required iff the destination type is a concrete
3978 // type and its `subTypingDepth` is >= wasm::MinSuperTypeVectorLength.
3980 // `ref` and `superSTV` are preserved. Scratch registers are
3982 void branchWasmRefIsSubtypeAny(Register ref
, wasm::RefType sourceType
,
3983 wasm::RefType destType
, Label
* label
,
3984 bool onSuccess
, Register superSTV
,
3985 Register scratch1
, Register scratch2
);
3986 static bool needScratch1ForBranchWasmRefIsSubtypeAny(wasm::RefType type
);
3987 static bool needScratch2ForBranchWasmRefIsSubtypeAny(wasm::RefType type
);
3988 static bool needSuperSTVForBranchWasmRefIsSubtypeAny(wasm::RefType type
);
3990 // Perform a subtype check that `ref` is a subtype of `type`, branching to
3991 // `label` depending on `onSuccess`. `type` must be in the `func` hierarchy.
3993 // `superSTV` and `scratch1` are required iff the destination type
3994 // is a concrete type (not func and not nofunc). `scratch2` is required iff
3995 // the destination type is a concrete type and its `subTypingDepth` is >=
3996 // wasm::MinSuperTypeVectorLength.
3998 // `ref` and `superSTV` are preserved. Scratch registers are
4000 void branchWasmRefIsSubtypeFunc(Register ref
, wasm::RefType sourceType
,
4001 wasm::RefType destType
, Label
* label
,
4002 bool onSuccess
, Register superSTV
,
4003 Register scratch1
, Register scratch2
);
4004 static bool needSuperSTVAndScratch1ForBranchWasmRefIsSubtypeFunc(
4005 wasm::RefType type
);
4006 static bool needScratch2ForBranchWasmRefIsSubtypeFunc(wasm::RefType type
);
4008 // Perform a subtype check that `ref` is a subtype of `destType`, branching to
4009 // `label` depending on `onSuccess`. `type` must be in the `extern` hierarchy.
4010 void branchWasmRefIsSubtypeExtern(Register ref
, wasm::RefType sourceType
,
4011 wasm::RefType destType
, Label
* label
,
4014 // Perform a subtype check that `ref` is a subtype of `destType`, branching to
4015 // `label` depending on `onSuccess`. `type` must be in the `exn` hierarchy.
4016 void branchWasmRefIsSubtypeExn(Register ref
, wasm::RefType sourceType
,
4017 wasm::RefType destType
, Label
* label
,
4020 // Perform a subtype check that `subSTV` is a subtype of `superSTV`, branching
4021 // to `label` depending on `onSuccess`. This method is a specialization of the
4022 // general `wasm::TypeDef::isSubTypeOf` method for the case where the
4023 // `superSTV` is statically known, which is the case for all wasm
4026 // `scratch` is required iff the `superDepth` is >=
4027 // wasm::MinSuperTypeVectorLength. `subSTV` is clobbered by this method.
4028 // `superSTV` is preserved.
4029 void branchWasmSTVIsSubtype(Register subSTV
, Register superSTV
,
4030 Register scratch
, uint32_t superDepth
,
4031 Label
* label
, bool onSuccess
);
4033 // Same as branchWasmSTVIsSubtype, but looks up a dynamic position in the
4034 // super type vector.
4036 // `scratch` is always required. `subSTV` and `superDepth` are clobbered.
4037 // `superSTV` is preserved.
4038 void branchWasmSTVIsSubtypeDynamicDepth(Register subSTV
, Register superSTV
,
4039 Register superDepth
, Register scratch
,
4040 Label
* label
, bool onSuccess
);
4042 // Branch if the wasm anyref `src` is or is not the null value.
4043 void branchWasmAnyRefIsNull(bool isNull
, Register src
, Label
* label
);
4044 // Branch if the wasm anyref `src` is or is not an I31.
4045 void branchWasmAnyRefIsI31(bool isI31
, Register src
, Label
* label
);
4046 // Branch if the wasm anyref `src` is or is not a JSObject*.
4047 void branchWasmAnyRefIsObjectOrNull(bool isObject
, Register src
,
4049 // Branch if the wasm anyref `src` is or is not a GC thing.
4050 void branchWasmAnyRefIsGCThing(bool isGCThing
, Register src
, Label
* label
);
4051 // Branch if the wasm anyref `src` is or is not pointing to a nursery cell.
4052 void branchWasmAnyRefIsNurseryCell(bool isNurseryCell
, Register src
,
4053 Register scratch
, Label
* label
);
4055 // Create a wasm i31ref by truncating the 32-bit integer.
4056 void truncate32ToWasmI31Ref(Register src
, Register dest
);
4057 // Convert a wasm i31ref to a signed 32-bit integer.
4058 void convertWasmI31RefTo32Signed(Register src
, Register dest
);
4059 // Convert a wasm i31ref to an unsigned 32-bit integer.
4060 void convertWasmI31RefTo32Unsigned(Register src
, Register dest
);
4062 // Branch if the JS value `src` would need to be boxed out of line to be
4063 // converted to a wasm anyref.
4064 void branchValueConvertsToWasmAnyRefInline(ValueOperand src
,
4065 Register scratchInt
,
4066 FloatRegister scratchFloat
,
4068 // Convert a JS value to a wasm anyref. If the value requires boxing, this
4069 // will branch to `oolConvert`.
4070 void convertValueToWasmAnyRef(ValueOperand src
, Register dest
,
4071 FloatRegister scratchFloat
, Label
* oolConvert
);
4072 // Convert a JS object to a wasm anyref. This cannot fail.
4073 void convertObjectToWasmAnyRef(Register src
, Register dest
);
4074 // Convert a JS string to a wasm anyref. This cannot fail.
4075 void convertStringToWasmAnyRef(Register src
, Register dest
);
4077 // Convert a wasm anyref to a JS value. This cannot fail.
4079 // Due to spectre mitigations, these methods may clobber src.
4080 void convertWasmAnyRefToValue(Register instance
, Register src
,
4081 ValueOperand dst
, Register scratch
);
4082 void convertWasmAnyRefToValue(Register instance
, Register src
,
4083 const Address
& dst
, Register scratch
);
4085 // Branch if the object `src` is or is not a WasmGcObject.
4086 void branchObjectIsWasmGcObject(bool isGcObject
, Register src
,
4087 Register scratch
, Label
* label
);
4089 // `typeDefData` will be preserved. `instance` and `result` may be the same
4090 // register, in which case `instance` will be clobbered.
4091 void wasmNewStructObject(Register instance
, Register result
,
4092 Register typeDefData
, Register temp1
, Register temp2
,
4093 Label
* fail
, gc::AllocKind allocKind
,
4095 // Allocates a wasm array with a dynamic number of elements.
4097 // `numElements` and `typeDefData` will be preserved. `instance` and `result`
4098 // may be the same register, in which case `instance` will be clobbered.
4099 void wasmNewArrayObject(Register instance
, Register result
,
4100 Register numElements
, Register typeDefData
,
4101 Register temp
, Label
* fail
, uint32_t elemSize
,
4103 // Allocates a wasm array with a fixed number of elements.
4105 // `typeDefData` will be preserved. `instance` and `result` may be the same
4106 // register, in which case `instance` will be clobbered.
4107 void wasmNewArrayObjectFixed(Register instance
, Register result
,
4108 Register typeDefData
, Register temp1
,
4109 Register temp2
, Label
* fail
,
4110 uint32_t numElements
, uint32_t storageBytes
,
4113 // This function handles nursery allocations for wasm. For JS, see
4114 // MacroAssembler::bumpPointerAllocate.
4116 // `typeDefData` will be preserved. `instance` and `result` may be the same
4117 // register, in which case `instance` will be clobbered.
4119 // See also the dynamically-sized version,
4120 // MacroAssembler::wasmBumpPointerAllocateDynamic.
4121 void wasmBumpPointerAllocate(Register instance
, Register result
,
4122 Register typeDefData
, Register temp1
,
4123 Register temp2
, Label
* fail
, uint32_t size
);
4124 // This function handles nursery allocations for wasm of dynamic size. For
4125 // fixed-size allocations, see MacroAssembler::wasmBumpPointerAllocate.
4127 // `typeDefData` and `size` will be preserved. `instance` and `result` may be
4128 // the same register, in which case `instance` will be clobbered.
4129 void wasmBumpPointerAllocateDynamic(Register instance
, Register result
,
4130 Register typeDefData
, Register size
,
4131 Register temp1
, Label
* fail
);
4133 // Compute ptr += (indexTemp32 << shift) where shift can be any value < 32.
4134 // May destroy indexTemp32. The value of indexTemp32 must be positive, and it
4135 // is implementation-defined what happens if bits are lost or the value
4136 // becomes negative through the shift. On 64-bit systems, the high 32 bits of
4137 // indexTemp32 must be zero, not garbage.
4138 void shiftIndex32AndAdd(Register indexTemp32
, int shift
,
4139 Register pointer
) PER_SHARED_ARCH
;
4141 // The System ABI frequently states that the high bits of a 64-bit register
4142 // that holds a 32-bit return value are unpredictable, and C++ compilers will
4143 // indeed generate code that leaves garbage in the upper bits.
4145 // Adjust the contents of the 64-bit register `r` to conform to our internal
4146 // convention, which requires predictable high bits. In practice, this means
4147 // that the 32-bit value will be zero-extended or sign-extended to 64 bits as
4148 // appropriate for the platform.
4149 void widenInt32(Register r
) DEFINED_ON(arm64
, x64
, mips64
, loong64
, riscv64
);
4151 // As enterFakeExitFrame(), but using register conventions appropriate for
4153 void enterFakeExitFrameForWasm(Register cxreg
, Register scratch
,
4154 ExitFrameType type
) PER_SHARED_ARCH
;
4157 // ========================================================================
4158 // Barrier functions.
4160 void emitPreBarrierFastPath(JSRuntime
* rt
, MIRType type
, Register temp1
,
4161 Register temp2
, Register temp3
, Label
* noBarrier
);
4164 // ========================================================================
4165 // Clamping functions.
4167 inline void clampIntToUint8(Register reg
) PER_SHARED_ARCH
;
4170 // ========================================================================
4171 // Primitive atomic operations.
4173 // If the access is from JS and the eventual destination of the result is a
4174 // js::Value, it's probably best to use the JS-specific versions of these,
4175 // see further below.
4177 // Temp registers must be defined unless otherwise noted in the per-function
4180 // 8-bit, 16-bit, and 32-bit wide operations.
4182 // The 8-bit and 16-bit operations zero-extend or sign-extend the result to
4183 // 32 bits, according to `type`. On 64-bit systems, the upper 32 bits of the
4184 // result will be zero on some platforms (eg, on x64) and will be the sign
4185 // extension of the lower bits on other platforms (eg, MIPS).
4187 // CompareExchange with memory. Return the value that was in memory,
4188 // whether we wrote or not.
4190 // x86-shared: `output` must be eax.
4191 // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
4192 // and 16-bit wide operations.
4194 void compareExchange(Scalar::Type type
, Synchronization sync
,
4195 const Address
& mem
, Register expected
,
4196 Register replacement
, Register output
)
4197 DEFINED_ON(arm
, arm64
, x86_shared
);
4199 void compareExchange(Scalar::Type type
, Synchronization sync
,
4200 const BaseIndex
& mem
, Register expected
,
4201 Register replacement
, Register output
)
4202 DEFINED_ON(arm
, arm64
, x86_shared
);
4204 void compareExchange(Scalar::Type type
, Synchronization sync
,
4205 const Address
& mem
, Register expected
,
4206 Register replacement
, Register valueTemp
,
4207 Register offsetTemp
, Register maskTemp
, Register output
)
4208 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4210 void compareExchange(Scalar::Type type
, Synchronization sync
,
4211 const BaseIndex
& mem
, Register expected
,
4212 Register replacement
, Register valueTemp
,
4213 Register offsetTemp
, Register maskTemp
, Register output
)
4214 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4216 // x86: `expected` and `output` must be edx:eax; `replacement` is ecx:ebx.
4217 // x64: `output` must be rax.
4218 // ARM: Registers must be distinct; `replacement` and `output` must be
4219 // (even,odd) pairs.
4221 void compareExchange64(Synchronization sync
, const Address
& mem
,
4222 Register64 expected
, Register64 replacement
,
4224 DEFINED_ON(arm
, arm64
, x64
, x86
, mips64
, loong64
, riscv64
);
4226 void compareExchange64(Synchronization sync
, const BaseIndex
& mem
,
4227 Register64 expected
, Register64 replacement
,
4229 DEFINED_ON(arm
, arm64
, x64
, x86
, mips64
, loong64
, riscv64
);
4231 // Exchange with memory. Return the value initially in memory.
4232 // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
4233 // and 16-bit wide operations.
4235 void atomicExchange(Scalar::Type type
, Synchronization sync
,
4236 const Address
& mem
, Register value
, Register output
)
4237 DEFINED_ON(arm
, arm64
, x86_shared
);
4239 void atomicExchange(Scalar::Type type
, Synchronization sync
,
4240 const BaseIndex
& mem
, Register value
, Register output
)
4241 DEFINED_ON(arm
, arm64
, x86_shared
);
4243 void atomicExchange(Scalar::Type type
, Synchronization sync
,
4244 const Address
& mem
, Register value
, Register valueTemp
,
4245 Register offsetTemp
, Register maskTemp
, Register output
)
4246 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4248 void atomicExchange(Scalar::Type type
, Synchronization sync
,
4249 const BaseIndex
& mem
, Register value
, Register valueTemp
,
4250 Register offsetTemp
, Register maskTemp
, Register output
)
4251 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4253 // x86: `value` must be ecx:ebx; `output` must be edx:eax.
4254 // ARM: `value` and `output` must be distinct and (even,odd) pairs.
4255 // ARM64: `value` and `output` must be distinct.
4257 void atomicExchange64(Synchronization sync
, const Address
& mem
,
4258 Register64 value
, Register64 output
)
4259 DEFINED_ON(arm
, arm64
, x64
, x86
, mips64
, loong64
, riscv64
);
4261 void atomicExchange64(Synchronization sync
, const BaseIndex
& mem
,
4262 Register64 value
, Register64 output
)
4263 DEFINED_ON(arm
, arm64
, x64
, x86
, mips64
, loong64
, riscv64
);
4265 // Read-modify-write with memory. Return the value in memory before the
4269 // For 8-bit operations, `value` and `output` must have a byte subregister.
4270 // For Add and Sub, `temp` must be invalid.
4271 // For And, Or, and Xor, `output` must be eax and `temp` must have a byte
4274 // ARM: Registers `value` and `output` must differ.
4275 // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
4276 // and 16-bit wide operations; `value` and `output` must differ.
4278 void atomicFetchOp(Scalar::Type type
, Synchronization sync
, AtomicOp op
,
4279 Register value
, const Address
& mem
, Register temp
,
4280 Register output
) DEFINED_ON(arm
, arm64
, x86_shared
);
4282 void atomicFetchOp(Scalar::Type type
, Synchronization sync
, AtomicOp op
,
4283 Imm32 value
, const Address
& mem
, Register temp
,
4284 Register output
) DEFINED_ON(x86_shared
);
4286 void atomicFetchOp(Scalar::Type type
, Synchronization sync
, AtomicOp op
,
4287 Register value
, const BaseIndex
& mem
, Register temp
,
4288 Register output
) DEFINED_ON(arm
, arm64
, x86_shared
);
4290 void atomicFetchOp(Scalar::Type type
, Synchronization sync
, AtomicOp op
,
4291 Imm32 value
, const BaseIndex
& mem
, Register temp
,
4292 Register output
) DEFINED_ON(x86_shared
);
4294 void atomicFetchOp(Scalar::Type type
, Synchronization sync
, AtomicOp op
,
4295 Register value
, const Address
& mem
, Register valueTemp
,
4296 Register offsetTemp
, Register maskTemp
, Register output
)
4297 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4299 void atomicFetchOp(Scalar::Type type
, Synchronization sync
, AtomicOp op
,
4300 Register value
, const BaseIndex
& mem
, Register valueTemp
,
4301 Register offsetTemp
, Register maskTemp
, Register output
)
4302 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4305 // `temp` must be ecx:ebx; `output` must be edx:eax.
4307 // For Add and Sub, `temp` is ignored.
4308 // For And, Or, and Xor, `output` must be rax.
4310 // `temp` and `output` must be (even,odd) pairs and distinct from `value`.
4312 // Registers `value`, `temp`, and `output` must all differ.
4314 void atomicFetchOp64(Synchronization sync
, AtomicOp op
, Register64 value
,
4315 const Address
& mem
, Register64 temp
, Register64 output
)
4316 DEFINED_ON(arm
, arm64
, x64
, mips64
, loong64
, riscv64
);
4318 void atomicFetchOp64(Synchronization sync
, AtomicOp op
, const Address
& value
,
4319 const Address
& mem
, Register64 temp
, Register64 output
)
4322 void atomicFetchOp64(Synchronization sync
, AtomicOp op
, Register64 value
,
4323 const BaseIndex
& mem
, Register64 temp
, Register64 output
)
4324 DEFINED_ON(arm
, arm64
, x64
, mips64
, loong64
, riscv64
);
4326 void atomicFetchOp64(Synchronization sync
, AtomicOp op
, const Address
& value
,
4327 const BaseIndex
& mem
, Register64 temp
, Register64 output
)
4331 // `value` can be any register.
4333 // `temp` must be an (even,odd) pair and distinct from `value`.
4335 // Registers `value` and `temp` must differ.
4337 void atomicEffectOp64(Synchronization sync
, AtomicOp op
, Register64 value
,
4338 const Address
& mem
) DEFINED_ON(x64
);
4340 void atomicEffectOp64(Synchronization sync
, AtomicOp op
, Register64 value
,
4341 const Address
& mem
, Register64 temp
)
4342 DEFINED_ON(arm
, arm64
, mips64
, loong64
, riscv64
);
4344 void atomicEffectOp64(Synchronization sync
, AtomicOp op
, Register64 value
,
4345 const BaseIndex
& mem
) DEFINED_ON(x64
);
4347 void atomicEffectOp64(Synchronization sync
, AtomicOp op
, Register64 value
,
4348 const BaseIndex
& mem
, Register64 temp
)
4349 DEFINED_ON(arm
, arm64
, mips64
, loong64
, riscv64
);
4351 // 64-bit atomic load. On 64-bit systems, use regular load with
4352 // Synchronization::Load, not this method.
4354 // x86: `temp` must be ecx:ebx; `output` must be edx:eax.
4355 // ARM: `output` must be (even,odd) pair.
4357 void atomicLoad64(Synchronization sync
, const Address
& mem
, Register64 temp
,
4358 Register64 output
) DEFINED_ON(x86
);
4360 void atomicLoad64(Synchronization sync
, const BaseIndex
& mem
, Register64 temp
,
4361 Register64 output
) DEFINED_ON(x86
);
4363 void atomicLoad64(Synchronization sync
, const Address
& mem
, Register64 output
)
4366 void atomicLoad64(Synchronization sync
, const BaseIndex
& mem
,
4367 Register64 output
) DEFINED_ON(arm
);
4369 // 64-bit atomic store. On 64-bit systems, use regular store with
4370 // Synchronization::Store, not this method.
4372 // x86: `value` must be ecx:ebx; `temp` must be edx:eax.
4373 // ARM: `value` and `temp` must be (even,odd) pairs.
4375 void atomicStore64(Synchronization sync
, const Address
& mem
, Register64 value
,
4376 Register64 temp
) DEFINED_ON(x86
, arm
);
4378 void atomicStore64(Synchronization sync
, const BaseIndex
& mem
,
4379 Register64 value
, Register64 temp
) DEFINED_ON(x86
, arm
);
4381 // ========================================================================
4382 // Wasm atomic operations.
4384 // Constraints, when omitted, are exactly as for the primitive operations
4387 void wasmCompareExchange(const wasm::MemoryAccessDesc
& access
,
4388 const Address
& mem
, Register expected
,
4389 Register replacement
, Register output
)
4390 DEFINED_ON(arm
, arm64
, x86_shared
);
4392 void wasmCompareExchange(const wasm::MemoryAccessDesc
& access
,
4393 const BaseIndex
& mem
, Register expected
,
4394 Register replacement
, Register output
)
4395 DEFINED_ON(arm
, arm64
, x86_shared
);
4397 void wasmCompareExchange(const wasm::MemoryAccessDesc
& access
,
4398 const Address
& mem
, Register expected
,
4399 Register replacement
, Register valueTemp
,
4400 Register offsetTemp
, Register maskTemp
,
4402 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4404 void wasmCompareExchange(const wasm::MemoryAccessDesc
& access
,
4405 const BaseIndex
& mem
, Register expected
,
4406 Register replacement
, Register valueTemp
,
4407 Register offsetTemp
, Register maskTemp
,
4409 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4411 void wasmAtomicExchange(const wasm::MemoryAccessDesc
& access
,
4412 const Address
& mem
, Register value
, Register output
)
4413 DEFINED_ON(arm
, arm64
, x86_shared
);
4415 void wasmAtomicExchange(const wasm::MemoryAccessDesc
& access
,
4416 const BaseIndex
& mem
, Register value
, Register output
)
4417 DEFINED_ON(arm
, arm64
, x86_shared
);
4419 void wasmAtomicExchange(const wasm::MemoryAccessDesc
& access
,
4420 const Address
& mem
, Register value
,
4421 Register valueTemp
, Register offsetTemp
,
4422 Register maskTemp
, Register output
)
4423 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4425 void wasmAtomicExchange(const wasm::MemoryAccessDesc
& access
,
4426 const BaseIndex
& mem
, Register value
,
4427 Register valueTemp
, Register offsetTemp
,
4428 Register maskTemp
, Register output
)
4429 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4431 void wasmAtomicFetchOp(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4432 Register value
, const Address
& mem
, Register temp
,
4433 Register output
) DEFINED_ON(arm
, arm64
, x86_shared
);
4435 void wasmAtomicFetchOp(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4436 Imm32 value
, const Address
& mem
, Register temp
,
4437 Register output
) DEFINED_ON(x86_shared
);
4439 void wasmAtomicFetchOp(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4440 Register value
, const BaseIndex
& mem
, Register temp
,
4441 Register output
) DEFINED_ON(arm
, arm64
, x86_shared
);
4443 void wasmAtomicFetchOp(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4444 Imm32 value
, const BaseIndex
& mem
, Register temp
,
4445 Register output
) DEFINED_ON(x86_shared
);
4447 void wasmAtomicFetchOp(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4448 Register value
, const Address
& mem
, Register valueTemp
,
4449 Register offsetTemp
, Register maskTemp
,
4451 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4453 void wasmAtomicFetchOp(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4454 Register value
, const BaseIndex
& mem
,
4455 Register valueTemp
, Register offsetTemp
,
4456 Register maskTemp
, Register output
)
4457 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4459 // Read-modify-write with memory. Return no value.
4461 // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
4462 // and 16-bit wide operations.
4464 void wasmAtomicEffectOp(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4465 Register value
, const Address
& mem
, Register temp
)
4466 DEFINED_ON(arm
, arm64
, x86_shared
);
4468 void wasmAtomicEffectOp(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4469 Imm32 value
, const Address
& mem
, Register temp
)
4470 DEFINED_ON(x86_shared
);
4472 void wasmAtomicEffectOp(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4473 Register value
, const BaseIndex
& mem
, Register temp
)
4474 DEFINED_ON(arm
, arm64
, x86_shared
);
4476 void wasmAtomicEffectOp(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4477 Imm32 value
, const BaseIndex
& mem
, Register temp
)
4478 DEFINED_ON(x86_shared
);
4480 void wasmAtomicEffectOp(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4481 Register value
, const Address
& mem
,
4482 Register valueTemp
, Register offsetTemp
,
4484 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4486 void wasmAtomicEffectOp(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4487 Register value
, const BaseIndex
& mem
,
4488 Register valueTemp
, Register offsetTemp
,
4490 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4492 // 64-bit wide operations.
4494 // 64-bit atomic load. On 64-bit systems, use regular wasm load with
4495 // Synchronization::Load, not this method.
4497 // x86: `temp` must be ecx:ebx; `output` must be edx:eax.
4498 // ARM: `temp` should be invalid; `output` must be (even,odd) pair.
4499 // MIPS32: `temp` should be invalid.
4501 void wasmAtomicLoad64(const wasm::MemoryAccessDesc
& access
,
4502 const Address
& mem
, Register64 temp
, Register64 output
)
4503 DEFINED_ON(arm
, mips32
, x86
, wasm32
);
4505 void wasmAtomicLoad64(const wasm::MemoryAccessDesc
& access
,
4506 const BaseIndex
& mem
, Register64 temp
,
4507 Register64 output
) DEFINED_ON(arm
, mips32
, x86
, wasm32
);
4509 // x86: `expected` must be the same as `output`, and must be edx:eax.
4510 // x86: `replacement` must be ecx:ebx.
4511 // x64: `output` must be rax.
4512 // ARM: Registers must be distinct; `replacement` and `output` must be
4513 // (even,odd) pairs.
4514 // ARM64: The base register in `mem` must not overlap `output`.
4515 // MIPS: Registers must be distinct.
4517 void wasmCompareExchange64(const wasm::MemoryAccessDesc
& access
,
4518 const Address
& mem
, Register64 expected
,
4519 Register64 replacement
,
4520 Register64 output
) PER_ARCH
;
4522 void wasmCompareExchange64(const wasm::MemoryAccessDesc
& access
,
4523 const BaseIndex
& mem
, Register64 expected
,
4524 Register64 replacement
,
4525 Register64 output
) PER_ARCH
;
4527 // x86: `value` must be ecx:ebx; `output` must be edx:eax.
4528 // ARM: Registers must be distinct; `value` and `output` must be (even,odd)
4530 // MIPS: Registers must be distinct.
4532 void wasmAtomicExchange64(const wasm::MemoryAccessDesc
& access
,
4533 const Address
& mem
, Register64 value
,
4534 Register64 output
) PER_ARCH
;
4536 void wasmAtomicExchange64(const wasm::MemoryAccessDesc
& access
,
4537 const BaseIndex
& mem
, Register64 value
,
4538 Register64 output
) PER_ARCH
;
4540 // x86: `output` must be edx:eax, `temp` must be ecx:ebx.
4541 // x64: For And, Or, and Xor `output` must be rax.
4542 // ARM: Registers must be distinct; `temp` and `output` must be (even,odd)
4544 // MIPS: Registers must be distinct.
4545 // MIPS32: `temp` should be invalid.
4547 void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4548 Register64 value
, const Address
& mem
,
4549 Register64 temp
, Register64 output
)
4550 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, x64
);
4552 void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4553 Register64 value
, const BaseIndex
& mem
,
4554 Register64 temp
, Register64 output
)
4555 DEFINED_ON(arm
, arm64
, mips32
, mips64
, loong64
, riscv64
, x64
);
4557 void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4558 const Address
& value
, const Address
& mem
,
4559 Register64 temp
, Register64 output
) DEFINED_ON(x86
);
4561 void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4562 const Address
& value
, const BaseIndex
& mem
,
4563 Register64 temp
, Register64 output
) DEFINED_ON(x86
);
4565 // Here `value` can be any register.
4567 void wasmAtomicEffectOp64(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4568 Register64 value
, const BaseIndex
& mem
)
4571 void wasmAtomicEffectOp64(const wasm::MemoryAccessDesc
& access
, AtomicOp op
,
4572 Register64 value
, const BaseIndex
& mem
,
4573 Register64 temp
) DEFINED_ON(arm64
);
4575 // ========================================================================
4576 // JS atomic operations.
4578 // Here the arrayType must be a type that is valid for JS. As of 2017 that
4579 // is an 8-bit, 16-bit, or 32-bit integer type.
4581 // If arrayType is Scalar::Uint32 then:
4583 // - `output` must be a float register
4584 // - if the operation takes one temp register then `temp` must be defined
4585 // - if the operation takes two temp registers then `temp2` must be defined.
4587 // Otherwise `output` must be a GPR and `temp`/`temp2` should be InvalidReg.
4588 // (`temp1` must always be valid.)
4590 // For additional register constraints, see the primitive 32-bit operations
4591 // and/or wasm operations above.
4593 void compareExchangeJS(Scalar::Type arrayType
, Synchronization sync
,
4594 const Address
& mem
, Register expected
,
4595 Register replacement
, Register temp
,
4596 AnyRegister output
) DEFINED_ON(arm
, arm64
, x86_shared
);
4598 void compareExchangeJS(Scalar::Type arrayType
, Synchronization sync
,
4599 const BaseIndex
& mem
, Register expected
,
4600 Register replacement
, Register temp
,
4601 AnyRegister output
) DEFINED_ON(arm
, arm64
, x86_shared
);
4603 void compareExchangeJS(Scalar::Type arrayType
, Synchronization sync
,
4604 const Address
& mem
, Register expected
,
4605 Register replacement
, Register valueTemp
,
4606 Register offsetTemp
, Register maskTemp
, Register temp
,
4608 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4610 void compareExchangeJS(Scalar::Type arrayType
, Synchronization sync
,
4611 const BaseIndex
& mem
, Register expected
,
4612 Register replacement
, Register valueTemp
,
4613 Register offsetTemp
, Register maskTemp
, Register temp
,
4615 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4617 void atomicExchangeJS(Scalar::Type arrayType
, Synchronization sync
,
4618 const Address
& mem
, Register value
, Register temp
,
4619 AnyRegister output
) DEFINED_ON(arm
, arm64
, x86_shared
);
4621 void atomicExchangeJS(Scalar::Type arrayType
, Synchronization sync
,
4622 const BaseIndex
& mem
, Register value
, Register temp
,
4623 AnyRegister output
) DEFINED_ON(arm
, arm64
, x86_shared
);
4625 void atomicExchangeJS(Scalar::Type arrayType
, Synchronization sync
,
4626 const Address
& mem
, Register value
, Register valueTemp
,
4627 Register offsetTemp
, Register maskTemp
, Register temp
,
4629 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4631 void atomicExchangeJS(Scalar::Type arrayType
, Synchronization sync
,
4632 const BaseIndex
& mem
, Register value
,
4633 Register valueTemp
, Register offsetTemp
,
4634 Register maskTemp
, Register temp
, AnyRegister output
)
4635 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4637 void atomicFetchOpJS(Scalar::Type arrayType
, Synchronization sync
,
4638 AtomicOp op
, Register value
, const Address
& mem
,
4639 Register temp1
, Register temp2
, AnyRegister output
)
4640 DEFINED_ON(arm
, arm64
, x86_shared
);
4642 void atomicFetchOpJS(Scalar::Type arrayType
, Synchronization sync
,
4643 AtomicOp op
, Register value
, const BaseIndex
& mem
,
4644 Register temp1
, Register temp2
, AnyRegister output
)
4645 DEFINED_ON(arm
, arm64
, x86_shared
);
4647 void atomicFetchOpJS(Scalar::Type arrayType
, Synchronization sync
,
4648 AtomicOp op
, Imm32 value
, const Address
& mem
,
4649 Register temp1
, Register temp2
, AnyRegister output
)
4650 DEFINED_ON(x86_shared
);
4652 void atomicFetchOpJS(Scalar::Type arrayType
, Synchronization sync
,
4653 AtomicOp op
, Imm32 value
, const BaseIndex
& mem
,
4654 Register temp1
, Register temp2
, AnyRegister output
)
4655 DEFINED_ON(x86_shared
);
4657 void atomicFetchOpJS(Scalar::Type arrayType
, Synchronization sync
,
4658 AtomicOp op
, Register value
, const Address
& mem
,
4659 Register valueTemp
, Register offsetTemp
,
4660 Register maskTemp
, Register temp
, AnyRegister output
)
4661 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4663 void atomicFetchOpJS(Scalar::Type arrayType
, Synchronization sync
,
4664 AtomicOp op
, Register value
, const BaseIndex
& mem
,
4665 Register valueTemp
, Register offsetTemp
,
4666 Register maskTemp
, Register temp
, AnyRegister output
)
4667 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4669 void atomicEffectOpJS(Scalar::Type arrayType
, Synchronization sync
,
4670 AtomicOp op
, Register value
, const Address
& mem
,
4671 Register temp
) DEFINED_ON(arm
, arm64
, x86_shared
);
4673 void atomicEffectOpJS(Scalar::Type arrayType
, Synchronization sync
,
4674 AtomicOp op
, Register value
, const BaseIndex
& mem
,
4675 Register temp
) DEFINED_ON(arm
, arm64
, x86_shared
);
4677 void atomicEffectOpJS(Scalar::Type arrayType
, Synchronization sync
,
4678 AtomicOp op
, Imm32 value
, const Address
& mem
,
4679 Register temp
) DEFINED_ON(x86_shared
);
4681 void atomicEffectOpJS(Scalar::Type arrayType
, Synchronization sync
,
4682 AtomicOp op
, Imm32 value
, const BaseIndex
& mem
,
4683 Register temp
) DEFINED_ON(x86_shared
);
4685 void atomicEffectOpJS(Scalar::Type arrayType
, Synchronization sync
,
4686 AtomicOp op
, Register value
, const Address
& mem
,
4687 Register valueTemp
, Register offsetTemp
,
4689 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4691 void atomicEffectOpJS(Scalar::Type arrayType
, Synchronization sync
,
4692 AtomicOp op
, Register value
, const BaseIndex
& mem
,
4693 Register valueTemp
, Register offsetTemp
,
4695 DEFINED_ON(mips_shared
, loong64
, riscv64
);
4697 void atomicIsLockFreeJS(Register value
, Register output
);
4699 // ========================================================================
4700 // Spectre Mitigations.
4702 // Spectre attacks are side-channel attacks based on cache pollution or
4703 // slow-execution of some instructions. We have multiple spectre mitigations
4706 // - Stop speculative executions, with memory barriers. Memory barriers
4707 // force all branches depending on loads to be resolved, and thus
4708 // resolve all miss-speculated paths.
4710 // - Use conditional move instructions. Some CPUs have a branch predictor,
4711 // and not a flag predictor. In such cases, using a conditional move
4712 // instruction to zero some pointer/index is enough to add a
4713 // data-dependency which prevents any futher executions until the load is
4716 void spectreMaskIndex32(Register index
, Register length
, Register output
);
4717 void spectreMaskIndex32(Register index
, const Address
& length
,
4719 void spectreMaskIndexPtr(Register index
, Register length
, Register output
);
4720 void spectreMaskIndexPtr(Register index
, const Address
& length
,
4723 // The length must be a power of two. Performs a bounds check and Spectre
4725 void boundsCheck32PowerOfTwo(Register index
, uint32_t length
, Label
* failure
);
4727 void speculationBarrier() PER_SHARED_ARCH
;
4729 //}}} check_macroassembler_decl_style
4731 // Unsafe here means the caller is responsible for Spectre mitigations if
4732 // needed. Prefer branchTestObjClass or one of the other masm helpers!
4733 inline void loadObjClassUnsafe(Register obj
, Register dest
);
4735 template <typename EmitPreBarrier
>
4736 inline void storeObjShape(Register shape
, Register obj
,
4737 EmitPreBarrier emitPreBarrier
);
4738 template <typename EmitPreBarrier
>
4739 inline void storeObjShape(Shape
* shape
, Register obj
,
4740 EmitPreBarrier emitPreBarrier
);
4742 inline void loadObjProto(Register obj
, Register dest
);
4744 inline void loadStringLength(Register str
, Register dest
);
4746 void loadStringChars(Register str
, Register dest
, CharEncoding encoding
);
4748 void loadNonInlineStringChars(Register str
, Register dest
,
4749 CharEncoding encoding
);
4750 void loadNonInlineStringCharsForStore(Register str
, Register dest
);
4751 void storeNonInlineStringChars(Register chars
, Register str
);
4753 void loadInlineStringChars(Register str
, Register dest
,
4754 CharEncoding encoding
);
4755 void loadInlineStringCharsForStore(Register str
, Register dest
);
4758 enum class CharKind
{ CharCode
, CodePoint
};
4760 void branchIfMaybeSplitSurrogatePair(Register leftChild
, Register index
,
4761 Register scratch
, Label
* maybeSplit
,
4764 void loadRopeChild(CharKind kind
, Register str
, Register index
,
4765 Register output
, Register maybeScratch
, Label
* isLinear
,
4766 Label
* splitSurrogate
);
4768 void branchIfCanLoadStringChar(CharKind kind
, Register str
, Register index
,
4769 Register scratch
, Register maybeScratch
,
4771 void branchIfNotCanLoadStringChar(CharKind kind
, Register str
, Register index
,
4772 Register scratch
, Register maybeScratch
,
4775 void loadStringChar(CharKind kind
, Register str
, Register index
,
4776 Register output
, Register scratch1
, Register scratch2
,
4780 void branchIfCanLoadStringChar(Register str
, Register index
, Register scratch
,
4782 branchIfCanLoadStringChar(CharKind::CharCode
, str
, index
, scratch
,
4785 void branchIfNotCanLoadStringChar(Register str
, Register index
,
4786 Register scratch
, Label
* label
) {
4787 branchIfNotCanLoadStringChar(CharKind::CharCode
, str
, index
, scratch
,
4791 void branchIfCanLoadStringCodePoint(Register str
, Register index
,
4792 Register scratch1
, Register scratch2
,
4794 branchIfCanLoadStringChar(CharKind::CodePoint
, str
, index
, scratch1
,
4797 void branchIfNotCanLoadStringCodePoint(Register str
, Register index
,
4798 Register scratch1
, Register scratch2
,
4800 branchIfNotCanLoadStringChar(CharKind::CodePoint
, str
, index
, scratch1
,
4804 void loadStringChar(Register str
, Register index
, Register output
,
4805 Register scratch1
, Register scratch2
, Label
* fail
) {
4806 loadStringChar(CharKind::CharCode
, str
, index
, output
, scratch1
, scratch2
,
4810 void loadStringChar(Register str
, int32_t index
, Register output
,
4811 Register scratch1
, Register scratch2
, Label
* fail
);
4813 void loadStringCodePoint(Register str
, Register index
, Register output
,
4814 Register scratch1
, Register scratch2
, Label
* fail
) {
4815 loadStringChar(CharKind::CodePoint
, str
, index
, output
, scratch1
, scratch2
,
4819 void loadRopeLeftChild(Register str
, Register dest
);
4820 void loadRopeRightChild(Register str
, Register dest
);
4821 void storeRopeChildren(Register left
, Register right
, Register str
);
4823 void loadDependentStringBase(Register str
, Register dest
);
4824 void storeDependentStringBase(Register base
, Register str
);
4826 void loadStringIndexValue(Register str
, Register dest
, Label
* fail
);
4829 * Store the character in |src| to |dest|.
4831 template <typename T
>
4832 void storeChar(const T
& src
, Address dest
, CharEncoding encoding
) {
4833 if (encoding
== CharEncoding::Latin1
) {
4841 * Load the character at |src| into |dest|.
4843 template <typename T
>
4844 void loadChar(const T
& src
, Register dest
, CharEncoding encoding
) {
4845 if (encoding
== CharEncoding::Latin1
) {
4846 load8ZeroExtend(src
, dest
);
4848 load16ZeroExtend(src
, dest
);
4853 * Load the character at |chars[index + offset]| into |dest|. The optional
4854 * offset argument is not scaled to the character encoding.
4856 void loadChar(Register chars
, Register index
, Register dest
,
4857 CharEncoding encoding
, int32_t offset
= 0);
4860 * Add |index| to |chars| so that |chars| now points at |chars[index]|.
4862 void addToCharPtr(Register chars
, Register index
, CharEncoding encoding
);
4865 * Branch if |src| is not a lead surrogate character.
4867 void branchIfNotLeadSurrogate(Register src
, Label
* label
);
4870 enum class SurrogateChar
{ Lead
, Trail
};
4871 void branchSurrogate(Assembler::Condition cond
, Register src
,
4872 Register scratch
, Label
* label
,
4873 SurrogateChar surrogateChar
);
4877 * Branch if |src| is a lead surrogate character.
4879 void branchIfLeadSurrogate(Register src
, Register scratch
, Label
* label
) {
4880 branchSurrogate(Assembler::Equal
, src
, scratch
, label
, SurrogateChar::Lead
);
4884 * Branch if |src| is not a lead surrogate character.
4886 void branchIfNotLeadSurrogate(Register src
, Register scratch
, Label
* label
) {
4887 branchSurrogate(Assembler::NotEqual
, src
, scratch
, label
,
4888 SurrogateChar::Lead
);
4892 * Branch if |src| is not a trail surrogate character.
4894 void branchIfNotTrailSurrogate(Register src
, Register scratch
, Label
* label
) {
4895 branchSurrogate(Assembler::NotEqual
, src
, scratch
, label
,
4896 SurrogateChar::Trail
);
4900 void loadStringFromUnit(Register unit
, Register dest
,
4901 const StaticStrings
& staticStrings
);
4902 void loadLengthTwoString(Register c1
, Register c2
, Register dest
,
4903 const StaticStrings
& staticStrings
);
4907 * Lookup the length-one string from the static strings cache.
4909 void lookupStaticString(Register ch
, Register dest
,
4910 const StaticStrings
& staticStrings
);
4913 * Lookup the length-one string from the static strings cache. Jumps to |fail|
4914 * when the string wasn't found in the strings cache.
4916 void lookupStaticString(Register ch
, Register dest
,
4917 const StaticStrings
& staticStrings
, Label
* fail
);
4920 * Lookup the length-two string from the static strings cache. Jumps to |fail|
4921 * when the string wasn't found in the strings cache.
4923 * Clobbers |ch1| and |ch2|.
4925 void lookupStaticString(Register ch1
, Register ch2
, Register dest
,
4926 const StaticStrings
& staticStrings
, Label
* fail
);
4929 * Lookup the integer string from the static integer strings cache. Jumps to
4930 * |fail| when the string wasn't found in the strings cache.
4932 void lookupStaticIntString(Register integer
, Register dest
, Register scratch
,
4933 const StaticStrings
& staticStrings
, Label
* fail
);
4934 void lookupStaticIntString(Register integer
, Register dest
,
4935 const StaticStrings
& staticStrings
, Label
* fail
) {
4936 lookupStaticIntString(integer
, dest
, dest
, staticStrings
, fail
);
4940 * Load the string representation of |input| in base |base|. Jumps to |fail|
4941 * when the string representation needs to be allocated dynamically.
4943 void loadInt32ToStringWithBase(Register input
, Register base
, Register dest
,
4944 Register scratch1
, Register scratch2
,
4945 const StaticStrings
& staticStrings
,
4946 const LiveRegisterSet
& volatileRegs
,
4947 bool lowerCase
, Label
* fail
);
4948 void loadInt32ToStringWithBase(Register input
, int32_t base
, Register dest
,
4949 Register scratch1
, Register scratch2
,
4950 const StaticStrings
& staticStrings
,
4951 bool lowerCase
, Label
* fail
);
4954 * Load the BigInt digits from |bigInt| into |digits|.
4956 void loadBigIntDigits(Register bigInt
, Register digits
);
4959 * Load the first [u]int64 value from |bigInt| into |dest|.
4961 void loadBigInt64(Register bigInt
, Register64 dest
);
4964 * Load the first digit from |bigInt| into |dest|. Handles the case when the
4965 * BigInt digits length is zero.
4967 * Note: A BigInt digit is a pointer-sized value.
4969 void loadFirstBigIntDigitOrZero(Register bigInt
, Register dest
);
4972 * Load the number stored in |bigInt| into |dest|. Handles the case when the
4973 * BigInt digits length is zero. Jumps to |fail| when the number can't be
4974 * saved into a single pointer-sized register.
4976 void loadBigInt(Register bigInt
, Register dest
, Label
* fail
);
4979 * Load the number stored in |bigInt| into |dest|. Doesn't handle the case
4980 * when the BigInt digits length is zero. Jumps to |fail| when the number
4981 * can't be saved into a single pointer-sized register.
4983 void loadBigIntNonZero(Register bigInt
, Register dest
, Label
* fail
);
4986 * Load the absolute number stored in |bigInt| into |dest|. Handles the case
4987 * when the BigInt digits length is zero. Jumps to |fail| when the number
4988 * can't be saved into a single pointer-sized register.
4990 void loadBigIntAbsolute(Register bigInt
, Register dest
, Label
* fail
);
4993 * In-place modifies the BigInt digit to a signed pointer-sized value. Jumps
4994 * to |fail| when the digit exceeds the representable range.
4996 void bigIntDigitToSignedPtr(Register bigInt
, Register digit
, Label
* fail
);
4999 * Initialize a BigInt from |val|. Clobbers |val|!
5001 void initializeBigInt64(Scalar::Type type
, Register bigInt
, Register64 val
);
5004 * Initialize a BigInt from the signed, pointer-sized register |val|.
5007 void initializeBigInt(Register bigInt
, Register val
);
5010 * Initialize a BigInt from the pointer-sized register |val|.
5012 void initializeBigIntAbsolute(Register bigInt
, Register val
);
5015 * Copy a BigInt. Jumps to |fail| on allocation failure or when the BigInt
5016 * digits need to be heap allocated.
5018 void copyBigIntWithInlineDigits(Register src
, Register dest
, Register temp
,
5019 gc::Heap initialHeap
, Label
* fail
);
5022 * Compare a BigInt and an Int32 value. Falls through to the false case.
5024 void compareBigIntAndInt32(JSOp op
, Register bigInt
, Register int32
,
5025 Register scratch1
, Register scratch2
,
5026 Label
* ifTrue
, Label
* ifFalse
);
5029 * Compare two BigInts for equality. Falls through if both BigInts are equal
5032 * - When we jump to |notSameLength|, |temp1| holds the length of the right
5034 * - When we jump to |notSameDigit|, |temp2| points to the current digit of
5035 * the left operand and |temp4| holds the current digit of the right
5038 void equalBigInts(Register left
, Register right
, Register temp1
,
5039 Register temp2
, Register temp3
, Register temp4
,
5040 Label
* notSameSign
, Label
* notSameLength
,
5041 Label
* notSameDigit
);
5043 void loadJSContext(Register dest
);
5045 void loadGlobalObjectData(Register dest
);
5047 void loadRealmFuse(RealmFuses::FuseIndex index
, Register dest
);
5049 void switchToRealm(Register realm
);
5050 void switchToRealm(const void* realm
, Register scratch
);
5051 void switchToObjectRealm(Register obj
, Register scratch
);
5052 void switchToBaselineFrameRealm(Register scratch
);
5053 void switchToWasmInstanceRealm(Register scratch1
, Register scratch2
);
5054 void debugAssertContextRealm(const void* realm
, Register scratch
);
5056 void loadJitActivation(Register dest
);
5058 void guardSpecificAtom(Register str
, JSAtom
* atom
, Register scratch
,
5059 const LiveRegisterSet
& volatileRegs
, Label
* fail
);
5061 void guardStringToInt32(Register str
, Register output
, Register scratch
,
5062 LiveRegisterSet volatileRegs
, Label
* fail
);
5064 template <typename T
>
5065 void loadTypedOrValue(const T
& src
, TypedOrValueRegister dest
) {
5066 if (dest
.hasValue()) {
5067 loadValue(src
, dest
.valueReg());
5069 loadUnboxedValue(src
, dest
.type(), dest
.typedReg());
5073 template <typename T
>
5074 void storeTypedOrValue(TypedOrValueRegister src
, const T
& dest
) {
5075 if (src
.hasValue()) {
5076 storeValue(src
.valueReg(), dest
);
5077 } else if (IsFloatingPointType(src
.type())) {
5078 FloatRegister reg
= src
.typedReg().fpu();
5079 if (src
.type() == MIRType::Float32
) {
5080 ScratchDoubleScope
fpscratch(*this);
5081 convertFloat32ToDouble(reg
, fpscratch
);
5082 boxDouble(fpscratch
, dest
);
5084 boxDouble(reg
, dest
);
5087 storeValue(ValueTypeFromMIRType(src
.type()), src
.typedReg().gpr(), dest
);
5091 template <typename T
>
5092 void storeConstantOrRegister(const ConstantOrRegister
& src
, const T
& dest
) {
5093 if (src
.constant()) {
5094 storeValue(src
.value(), dest
);
5096 storeTypedOrValue(src
.reg(), dest
);
5100 void storeCallPointerResult(Register reg
) {
5101 if (reg
!= ReturnReg
) {
5102 mov(ReturnReg
, reg
);
5106 inline void storeCallBoolResult(Register reg
);
5107 inline void storeCallInt32Result(Register reg
);
5109 void storeCallFloatResult(FloatRegister reg
) {
5110 if (reg
!= ReturnDoubleReg
) {
5111 moveDouble(ReturnDoubleReg
, reg
);
5115 inline void storeCallResultValue(AnyRegister dest
, JSValueType type
);
5117 void storeCallResultValue(ValueOperand dest
) {
5118 #if defined(JS_NUNBOX32)
5119 // reshuffle the return registers used for a call result to store into
5120 // dest, using ReturnReg as a scratch register if necessary. This must
5121 // only be called after returning from a call, at a point when the
5122 // return register is not live. XXX would be better to allow wrappers
5123 // to store the return value to different places.
5124 if (dest
.typeReg() == JSReturnReg_Data
) {
5125 if (dest
.payloadReg() == JSReturnReg_Type
) {
5126 // swap the two registers.
5127 mov(JSReturnReg_Type
, ReturnReg
);
5128 mov(JSReturnReg_Data
, JSReturnReg_Type
);
5129 mov(ReturnReg
, JSReturnReg_Data
);
5131 mov(JSReturnReg_Data
, dest
.payloadReg());
5132 mov(JSReturnReg_Type
, dest
.typeReg());
5135 mov(JSReturnReg_Type
, dest
.typeReg());
5136 mov(JSReturnReg_Data
, dest
.payloadReg());
5138 #elif defined(JS_PUNBOX64)
5139 if (dest
.valueReg() != JSReturnReg
) {
5140 mov(JSReturnReg
, dest
.valueReg());
5143 # error "Bad architecture"
5147 inline void storeCallResultValue(TypedOrValueRegister dest
);
5150 TrampolinePtr
preBarrierTrampoline(MIRType type
);
5152 template <typename T
>
5153 void unguardedCallPreBarrier(const T
& address
, MIRType type
) {
5155 if (type
== MIRType::Value
) {
5156 branchTestGCThing(Assembler::NotEqual
, address
, &done
);
5157 } else if (type
== MIRType::Object
|| type
== MIRType::String
) {
5158 branchPtr(Assembler::Equal
, address
, ImmWord(0), &done
);
5161 Push(PreBarrierReg
);
5162 computeEffectiveAddress(address
, PreBarrierReg
);
5164 TrampolinePtr preBarrier
= preBarrierTrampoline(type
);
5168 // On arm64, SP may be < PSP now (that's OK).
5169 // eg testcase: tests/auto-regress/bug702915.js
5174 template <typename T
>
5175 void guardedCallPreBarrier(const T
& address
, MIRType type
) {
5177 branchTestNeedsIncrementalBarrier(Assembler::Zero
, &done
);
5178 unguardedCallPreBarrier(address
, type
);
5182 // Like guardedCallPreBarrier, but unlike guardedCallPreBarrier this can be
5183 // called from runtime-wide trampolines because it loads cx->zone (instead of
5184 // baking in the current Zone) if JitContext::realm is nullptr.
5185 template <typename T
>
5186 void guardedCallPreBarrierAnyZone(const T
& address
, MIRType type
,
5189 branchTestNeedsIncrementalBarrierAnyZone(Assembler::Zero
, &done
, scratch
);
5190 unguardedCallPreBarrier(address
, type
);
5194 enum class Uint32Mode
{ FailOnDouble
, ForceDouble
};
5196 void boxUint32(Register source
, ValueOperand dest
, Uint32Mode uint32Mode
,
5199 template <typename T
>
5200 void loadFromTypedArray(Scalar::Type arrayType
, const T
& src
,
5201 AnyRegister dest
, Register temp
, Label
* fail
);
5203 template <typename T
>
5204 void loadFromTypedArray(Scalar::Type arrayType
, const T
& src
,
5205 const ValueOperand
& dest
, Uint32Mode uint32Mode
,
5206 Register temp
, Label
* fail
);
5208 template <typename T
>
5209 void loadFromTypedBigIntArray(Scalar::Type arrayType
, const T
& src
,
5210 Register bigInt
, Register64 temp
);
5212 template <typename S
, typename T
>
5213 void storeToTypedIntArray(Scalar::Type arrayType
, const S
& value
,
5215 switch (arrayType
) {
5218 case Scalar::Uint8Clamped
:
5219 store8(value
, dest
);
5222 case Scalar::Uint16
:
5223 store16(value
, dest
);
5226 case Scalar::Uint32
:
5227 store32(value
, dest
);
5230 MOZ_CRASH("Invalid typed array type");
5234 void storeToTypedFloatArray(Scalar::Type arrayType
, FloatRegister value
,
5235 const BaseIndex
& dest
);
5236 void storeToTypedFloatArray(Scalar::Type arrayType
, FloatRegister value
,
5237 const Address
& dest
);
5239 void storeToTypedBigIntArray(Scalar::Type arrayType
, Register64 value
,
5240 const BaseIndex
& dest
);
5241 void storeToTypedBigIntArray(Scalar::Type arrayType
, Register64 value
,
5242 const Address
& dest
);
5244 void memoryBarrierBefore(Synchronization sync
);
5245 void memoryBarrierAfter(Synchronization sync
);
5247 void debugAssertIsObject(const ValueOperand
& val
);
5248 void debugAssertObjHasFixedSlots(Register obj
, Register scratch
);
5250 void debugAssertObjectHasClass(Register obj
, Register scratch
,
5251 const JSClass
* clasp
);
5253 void debugAssertGCThingIsTenured(Register ptr
, Register temp
);
5255 void branchArrayIsNotPacked(Register array
, Register temp1
, Register temp2
,
5258 void setIsPackedArray(Register obj
, Register output
, Register temp
);
5260 void packedArrayPop(Register array
, ValueOperand output
, Register temp1
,
5261 Register temp2
, Label
* fail
);
5262 void packedArrayShift(Register array
, ValueOperand output
, Register temp1
,
5263 Register temp2
, LiveRegisterSet volatileRegs
,
5266 void loadArgumentsObjectElement(Register obj
, Register index
,
5267 ValueOperand output
, Register temp
,
5269 void loadArgumentsObjectElementHole(Register obj
, Register index
,
5270 ValueOperand output
, Register temp
,
5272 void loadArgumentsObjectElementExists(Register obj
, Register index
,
5273 Register output
, Register temp
,
5276 void loadArgumentsObjectLength(Register obj
, Register output
, Label
* fail
);
5278 void branchTestArgumentsObjectFlags(Register obj
, Register temp
,
5279 uint32_t flags
, Condition cond
,
5282 void typedArrayElementSize(Register obj
, Register output
);
5285 // Shift |output| by the element shift of the ResizableTypedArray in |obj|.
5286 void resizableTypedArrayElementShiftBy(Register obj
, Register output
,
5290 void branchIfClassIsNotTypedArray(Register clasp
, Label
* notTypedArray
);
5291 void branchIfClassIsNotFixedLengthTypedArray(Register clasp
,
5292 Label
* notTypedArray
);
5294 void branchIfHasDetachedArrayBuffer(Register obj
, Register temp
,
5297 void branchIfNativeIteratorNotReusable(Register ni
, Label
* notReusable
);
5298 void branchNativeIteratorIndices(Condition cond
, Register ni
, Register temp
,
5299 NativeIteratorIndices kind
, Label
* label
);
5301 void maybeLoadIteratorFromShape(Register obj
, Register dest
, Register temp
,
5302 Register temp2
, Register temp3
,
5305 void iteratorMore(Register obj
, ValueOperand output
, Register temp
);
5306 void iteratorClose(Register obj
, Register temp1
, Register temp2
,
5308 void registerIterator(Register enumeratorsList
, Register iter
, Register temp
);
5310 void toHashableNonGCThing(ValueOperand value
, ValueOperand result
,
5311 FloatRegister tempFloat
);
5313 void toHashableValue(ValueOperand value
, ValueOperand result
,
5314 FloatRegister tempFloat
, Label
* atomizeString
,
5318 void scrambleHashCode(Register result
);
5321 void prepareHashNonGCThing(ValueOperand value
, Register result
,
5323 void prepareHashString(Register str
, Register result
, Register temp
);
5324 void prepareHashSymbol(Register sym
, Register result
);
5325 void prepareHashBigInt(Register bigInt
, Register result
, Register temp1
,
5326 Register temp2
, Register temp3
);
5327 void prepareHashObject(Register setObj
, ValueOperand value
, Register result
,
5328 Register temp1
, Register temp2
, Register temp3
,
5330 void prepareHashValue(Register setObj
, ValueOperand value
, Register result
,
5331 Register temp1
, Register temp2
, Register temp3
,
5335 enum class IsBigInt
{ No
, Yes
, Maybe
};
5338 * Search for a value in a OrderedHashTable.
5340 * When we jump to |found|, |entryTemp| holds the found hashtable entry.
5342 template <typename OrderedHashTable
>
5343 void orderedHashTableLookup(Register setOrMapObj
, ValueOperand value
,
5344 Register hash
, Register entryTemp
, Register temp1
,
5345 Register temp3
, Register temp4
, Register temp5
,
5346 Label
* found
, IsBigInt isBigInt
);
5348 void setObjectHas(Register setObj
, ValueOperand value
, Register hash
,
5349 Register result
, Register temp1
, Register temp2
,
5350 Register temp3
, Register temp4
, IsBigInt isBigInt
);
5352 void mapObjectHas(Register mapObj
, ValueOperand value
, Register hash
,
5353 Register result
, Register temp1
, Register temp2
,
5354 Register temp3
, Register temp4
, IsBigInt isBigInt
);
5356 void mapObjectGet(Register mapObj
, ValueOperand value
, Register hash
,
5357 ValueOperand result
, Register temp1
, Register temp2
,
5358 Register temp3
, Register temp4
, Register temp5
,
5362 void setObjectHasNonBigInt(Register setObj
, ValueOperand value
, Register hash
,
5363 Register result
, Register temp1
, Register temp2
) {
5364 return setObjectHas(setObj
, value
, hash
, result
, temp1
, temp2
, InvalidReg
,
5365 InvalidReg
, IsBigInt::No
);
5367 void setObjectHasBigInt(Register setObj
, ValueOperand value
, Register hash
,
5368 Register result
, Register temp1
, Register temp2
,
5369 Register temp3
, Register temp4
) {
5370 return setObjectHas(setObj
, value
, hash
, result
, temp1
, temp2
, temp3
, temp4
,
5373 void setObjectHasValue(Register setObj
, ValueOperand value
, Register hash
,
5374 Register result
, Register temp1
, Register temp2
,
5375 Register temp3
, Register temp4
) {
5376 return setObjectHas(setObj
, value
, hash
, result
, temp1
, temp2
, temp3
, temp4
,
5380 void mapObjectHasNonBigInt(Register mapObj
, ValueOperand value
, Register hash
,
5381 Register result
, Register temp1
, Register temp2
) {
5382 return mapObjectHas(mapObj
, value
, hash
, result
, temp1
, temp2
, InvalidReg
,
5383 InvalidReg
, IsBigInt::No
);
5385 void mapObjectHasBigInt(Register mapObj
, ValueOperand value
, Register hash
,
5386 Register result
, Register temp1
, Register temp2
,
5387 Register temp3
, Register temp4
) {
5388 return mapObjectHas(mapObj
, value
, hash
, result
, temp1
, temp2
, temp3
, temp4
,
5391 void mapObjectHasValue(Register mapObj
, ValueOperand value
, Register hash
,
5392 Register result
, Register temp1
, Register temp2
,
5393 Register temp3
, Register temp4
) {
5394 return mapObjectHas(mapObj
, value
, hash
, result
, temp1
, temp2
, temp3
, temp4
,
5398 void mapObjectGetNonBigInt(Register mapObj
, ValueOperand value
, Register hash
,
5399 ValueOperand result
, Register temp1
,
5400 Register temp2
, Register temp3
) {
5401 return mapObjectGet(mapObj
, value
, hash
, result
, temp1
, temp2
, temp3
,
5402 InvalidReg
, InvalidReg
, IsBigInt::No
);
5404 void mapObjectGetBigInt(Register mapObj
, ValueOperand value
, Register hash
,
5405 ValueOperand result
, Register temp1
, Register temp2
,
5406 Register temp3
, Register temp4
, Register temp5
) {
5407 return mapObjectGet(mapObj
, value
, hash
, result
, temp1
, temp2
, temp3
, temp4
,
5408 temp5
, IsBigInt::Yes
);
5410 void mapObjectGetValue(Register mapObj
, ValueOperand value
, Register hash
,
5411 ValueOperand result
, Register temp1
, Register temp2
,
5412 Register temp3
, Register temp4
, Register temp5
) {
5413 return mapObjectGet(mapObj
, value
, hash
, result
, temp1
, temp2
, temp3
, temp4
,
5414 temp5
, IsBigInt::Maybe
);
5418 template <typename OrderedHashTable
>
5419 void loadOrderedHashTableCount(Register setOrMapObj
, Register result
);
5422 void loadSetObjectSize(Register setObj
, Register result
);
5423 void loadMapObjectSize(Register mapObj
, Register result
);
5425 // Inline version of js_TypedArray_uint8_clamp_double.
5426 // This function clobbers the input register.
5427 void clampDoubleToUint8(FloatRegister input
, Register output
) PER_ARCH
;
5429 using MacroAssemblerSpecific::ensureDouble
;
5431 template <typename S
>
5432 void ensureDouble(const S
& source
, FloatRegister dest
, Label
* failure
) {
5433 Label isDouble
, done
;
5434 branchTestDouble(Assembler::Equal
, source
, &isDouble
);
5435 branchTestInt32(Assembler::NotEqual
, source
, failure
);
5437 convertInt32ToDouble(source
, dest
);
5441 unboxDouble(source
, dest
);
5446 // Inline allocation.
5448 void checkAllocatorState(Register temp
, gc::AllocKind allocKind
, Label
* fail
);
5449 bool shouldNurseryAllocate(gc::AllocKind allocKind
, gc::Heap initialHeap
);
5450 void nurseryAllocateObject(
5451 Register result
, Register temp
, gc::AllocKind allocKind
,
5452 size_t nDynamicSlots
, Label
* fail
,
5453 const AllocSiteInput
& allocSite
= AllocSiteInput());
5454 void bumpPointerAllocate(Register result
, Register temp
, Label
* fail
,
5455 CompileZone
* zone
, JS::TraceKind traceKind
,
5457 const AllocSiteInput
& allocSite
= AllocSiteInput());
5458 void updateAllocSite(Register temp
, Register result
, CompileZone
* zone
,
5461 void freeListAllocate(Register result
, Register temp
, gc::AllocKind allocKind
,
5463 void allocateObject(Register result
, Register temp
, gc::AllocKind allocKind
,
5464 uint32_t nDynamicSlots
, gc::Heap initialHeap
, Label
* fail
,
5465 const AllocSiteInput
& allocSite
= AllocSiteInput());
5466 void nurseryAllocateString(Register result
, Register temp
,
5467 gc::AllocKind allocKind
, Label
* fail
);
5468 void allocateString(Register result
, Register temp
, gc::AllocKind allocKind
,
5469 gc::Heap initialHeap
, Label
* fail
);
5470 void nurseryAllocateBigInt(Register result
, Register temp
, Label
* fail
);
5471 void copySlotsFromTemplate(Register obj
,
5472 const TemplateNativeObject
& templateObj
,
5473 uint32_t start
, uint32_t end
);
5474 void fillSlotsWithConstantValue(Address addr
, Register temp
, uint32_t start
,
5475 uint32_t end
, const Value
& v
);
5476 void fillSlotsWithUndefined(Address addr
, Register temp
, uint32_t start
,
5478 void fillSlotsWithUninitialized(Address addr
, Register temp
, uint32_t start
,
5481 void initGCSlots(Register obj
, Register temp
,
5482 const TemplateNativeObject
& templateObj
);
5485 void callFreeStub(Register slots
);
5486 void createGCObject(Register result
, Register temp
,
5487 const TemplateObject
& templateObj
, gc::Heap initialHeap
,
5488 Label
* fail
, bool initContents
= true);
5490 void createPlainGCObject(Register result
, Register shape
, Register temp
,
5491 Register temp2
, uint32_t numFixedSlots
,
5492 uint32_t numDynamicSlots
, gc::AllocKind allocKind
,
5493 gc::Heap initialHeap
, Label
* fail
,
5494 const AllocSiteInput
& allocSite
,
5495 bool initContents
= true);
5497 // dynamicSlotsTemp is used to initialize the dynamic slots after allocating
5498 // the object. If numUsedDynamicSlots == 0, it may be InvalidReg.
5499 void createArrayWithFixedElements(
5500 Register result
, Register shape
, Register temp
, Register dynamicSlotsTemp
,
5501 uint32_t arrayLength
, uint32_t arrayCapacity
,
5502 uint32_t numUsedDynamicSlots
, uint32_t numDynamicSlots
,
5503 gc::AllocKind allocKind
, gc::Heap initialHeap
, Label
* fail
,
5504 const AllocSiteInput
& allocSite
= AllocSiteInput());
5506 void initGCThing(Register obj
, Register temp
,
5507 const TemplateObject
& templateObj
, bool initContents
= true);
5509 enum class TypedArrayLength
{ Fixed
, Dynamic
};
5511 void initTypedArraySlots(Register obj
, Register temp
, Register lengthReg
,
5512 LiveRegisterSet liveRegs
, Label
* fail
,
5513 FixedLengthTypedArrayObject
* templateObj
,
5514 TypedArrayLength lengthKind
);
5516 void newGCString(Register result
, Register temp
, gc::Heap initialHeap
,
5518 void newGCFatInlineString(Register result
, Register temp
,
5519 gc::Heap initialHeap
, Label
* fail
);
5521 void newGCBigInt(Register result
, Register temp
, gc::Heap initialHeap
,
5525 void branchIfNotStringCharsEquals(Register stringChars
,
5526 const JSLinearString
* linear
, Label
* label
);
5529 // Returns true if |linear| is a (non-empty) string which can be compared
5530 // using |compareStringChars|.
5531 static bool canCompareStringCharsInline(const JSLinearString
* linear
);
5533 // Load the string characters in preparation for |compareStringChars|.
5534 void loadStringCharsForCompare(Register input
, const JSLinearString
* linear
,
5535 Register stringChars
, Label
* fail
);
5537 // Compare string characters based on the equality operator. The string
5538 // characters must be at least as long as the length of |linear|.
5539 void compareStringChars(JSOp op
, Register stringChars
,
5540 const JSLinearString
* linear
, Register result
);
5542 // Compares two strings for equality based on the JSOP.
5543 // This checks for identical pointers, atoms and length and fails for
5545 void compareStrings(JSOp op
, Register left
, Register right
, Register result
,
5548 // Result of the typeof operation. Falls back to slow-path for proxies.
5549 void typeOfObject(Register objReg
, Register scratch
, Label
* slow
,
5550 Label
* isObject
, Label
* isCallable
, Label
* isUndefined
);
5552 // Implementation of IsCallable. Doesn't handle proxies.
5553 void isCallable(Register obj
, Register output
, Label
* isProxy
) {
5554 isCallableOrConstructor(true, obj
, output
, isProxy
);
5556 void isConstructor(Register obj
, Register output
, Label
* isProxy
) {
5557 isCallableOrConstructor(false, obj
, output
, isProxy
);
5560 void setIsCrossRealmArrayConstructor(Register obj
, Register output
);
5562 void setIsDefinitelyTypedArrayConstructor(Register obj
, Register output
);
5564 void loadMegamorphicCache(Register dest
);
5565 void lookupStringInAtomCacheLastLookups(Register str
, Register scratch
,
5567 void loadMegamorphicSetPropCache(Register dest
);
5569 void loadAtomOrSymbolAndHash(ValueOperand value
, Register outId
,
5570 Register outHash
, Label
* cacheMiss
);
5572 void loadAtomHash(Register id
, Register hash
, Label
* done
);
5574 void emitExtractValueFromMegamorphicCacheEntry(
5575 Register obj
, Register entry
, Register scratch1
, Register scratch2
,
5576 ValueOperand output
, Label
* cacheHit
, Label
* cacheMiss
);
5578 template <typename IdOperandType
>
5579 void emitMegamorphicCacheLookupByValueCommon(
5580 IdOperandType id
, Register obj
, Register scratch1
, Register scratch2
,
5581 Register outEntryPtr
, Label
* cacheMiss
, Label
* cacheMissWithEntry
);
5583 void emitMegamorphicCacheLookup(PropertyKey id
, Register obj
,
5584 Register scratch1
, Register scratch2
,
5585 Register outEntryPtr
, ValueOperand output
,
5588 // NOTE: |id| must either be a ValueOperand or a Register. If it is a
5589 // Register, we assume that it is an atom.
5590 template <typename IdOperandType
>
5591 void emitMegamorphicCacheLookupByValue(IdOperandType id
, Register obj
,
5592 Register scratch1
, Register scratch2
,
5593 Register outEntryPtr
,
5594 ValueOperand output
, Label
* cacheHit
);
5596 void emitMegamorphicCacheLookupExists(ValueOperand id
, Register obj
,
5597 Register scratch1
, Register scratch2
,
5598 Register outEntryPtr
, Register output
,
5599 Label
* cacheHit
, bool hasOwn
);
5601 // Given a PropertyIteratorObject with valid indices, extract the current
5602 // PropertyIndex, storing the index in |outIndex| and the kind in |outKind|
5603 void extractCurrentIndexAndKindFromIterator(Register iterator
,
5607 template <typename IdType
>
5608 #ifdef JS_CODEGEN_X86
5609 // See MegamorphicSetElement in LIROps.yaml
5610 void emitMegamorphicCachedSetSlot(IdType id
, Register obj
, Register scratch1
,
5611 ValueOperand value
, Label
* cacheHit
,
5612 void (*emitPreBarrier
)(MacroAssembler
&,
5616 void emitMegamorphicCachedSetSlot(
5617 IdType id
, Register obj
, Register scratch1
, Register scratch2
,
5618 Register scratch3
, ValueOperand value
, Label
* cacheHit
,
5619 void (*emitPreBarrier
)(MacroAssembler
&, const Address
&, MIRType
));
5622 void loadDOMExpandoValueGuardGeneration(
5623 Register obj
, ValueOperand output
,
5624 JS::ExpandoAndGeneration
* expandoAndGeneration
, uint64_t generation
,
5627 void guardNonNegativeIntPtrToInt32(Register reg
, Label
* fail
);
5629 void loadArrayBufferByteLengthIntPtr(Register obj
, Register output
);
5630 void loadArrayBufferViewByteOffsetIntPtr(Register obj
, Register output
);
5631 void loadArrayBufferViewLengthIntPtr(Register obj
, Register output
);
5633 void loadGrowableSharedArrayBufferByteLengthIntPtr(Synchronization sync
,
5638 enum class ResizableArrayBufferView
{ TypedArray
, DataView
};
5640 void loadResizableArrayBufferViewLengthIntPtr(ResizableArrayBufferView view
,
5641 Synchronization sync
,
5642 Register obj
, Register output
,
5646 void loadResizableTypedArrayLengthIntPtr(Synchronization sync
, Register obj
,
5647 Register output
, Register scratch
) {
5648 loadResizableArrayBufferViewLengthIntPtr(
5649 ResizableArrayBufferView::TypedArray
, sync
, obj
, output
, scratch
);
5652 void loadResizableDataViewByteLengthIntPtr(Synchronization sync
, Register obj
,
5655 loadResizableArrayBufferViewLengthIntPtr(ResizableArrayBufferView::DataView
,
5656 sync
, obj
, output
, scratch
);
5660 void isCallableOrConstructor(bool isCallable
, Register obj
, Register output
,
5664 // Generates code used to complete a bailout.
5665 void generateBailoutTail(Register scratch
, Register bailoutInfo
);
5668 #ifndef JS_CODEGEN_ARM64
5669 // StackPointer manipulation functions.
5670 // On ARM64, the StackPointer is implemented as two synchronized registers.
5671 // Code shared across platforms must use these functions to be valid.
5672 template <typename T
>
5673 inline void addToStackPtr(T t
);
5674 template <typename T
>
5675 inline void addStackPtrTo(T t
);
5677 void subFromStackPtr(Imm32 imm32
)
5678 DEFINED_ON(mips32
, mips64
, loong64
, riscv64
, wasm32
, arm
, x86
, x64
);
5679 void subFromStackPtr(Register reg
);
5681 template <typename T
>
5682 void subStackPtrFrom(T t
) {
5683 subPtr(getStackPointer(), t
);
5686 template <typename T
>
5687 void andToStackPtr(T t
) {
5688 andPtr(t
, getStackPointer());
5691 template <typename T
>
5692 void moveToStackPtr(T t
) {
5693 movePtr(t
, getStackPointer());
5695 template <typename T
>
5696 void moveStackPtrTo(T t
) {
5697 movePtr(getStackPointer(), t
);
5700 template <typename T
>
5701 void loadStackPtr(T t
) {
5702 loadPtr(t
, getStackPointer());
5704 template <typename T
>
5705 void storeStackPtr(T t
) {
5706 storePtr(getStackPointer(), t
);
5709 // StackPointer testing functions.
5710 // On ARM64, sp can function as the zero register depending on context.
5711 // Code shared across platforms must use these functions to be valid.
5712 template <typename T
>
5713 inline void branchTestStackPtr(Condition cond
, T t
, Label
* label
);
5714 template <typename T
>
5715 inline void branchStackPtr(Condition cond
, T rhs
, Label
* label
);
5716 template <typename T
>
5717 inline void branchStackPtrRhs(Condition cond
, T lhs
, Label
* label
);
5719 // Move the stack pointer based on the requested amount.
5720 inline void reserveStack(uint32_t amount
);
5721 #else // !JS_CODEGEN_ARM64
5722 void reserveStack(uint32_t amount
);
5726 void enableProfilingInstrumentation() {
5727 emitProfilingInstrumentation_
= true;
5731 // This class is used to surround call sites throughout the assembler. This
5732 // is used by callWithABI, and callJit functions, except if suffixed by
5734 class MOZ_RAII AutoProfilerCallInstrumentation
{
5736 explicit AutoProfilerCallInstrumentation(MacroAssembler
& masm
);
5737 ~AutoProfilerCallInstrumentation() = default;
5739 friend class AutoProfilerCallInstrumentation
;
5741 void appendProfilerCallSite(CodeOffset label
) {
5742 propagateOOM(profilerCallSites_
.append(label
));
5745 // Fix up the code pointers to be written for locations where profilerCallSite
5746 // emitted moves of RIP to a register.
5747 void linkProfilerCallSites(JitCode
* code
);
5749 // This field is used to manage profiling instrumentation output. If
5750 // provided and enabled, then instrumentation will be emitted around call
5752 bool emitProfilingInstrumentation_
;
5754 // Record locations of the call sites.
5755 Vector
<CodeOffset
, 0, SystemAllocPolicy
> profilerCallSites_
;
5758 void loadJitCodeRaw(Register func
, Register dest
);
5759 void loadBaselineJitCodeRaw(Register func
, Register dest
,
5760 Label
* failure
= nullptr);
5761 void storeICScriptInJSContext(Register icScript
);
5763 void loadBaselineFramePtr(Register framePtr
, Register dest
);
5765 void pushBaselineFramePtr(Register framePtr
, Register scratch
) {
5766 loadBaselineFramePtr(framePtr
, scratch
);
5770 void PushBaselineFramePtr(Register framePtr
, Register scratch
) {
5771 loadBaselineFramePtr(framePtr
, scratch
);
5775 using MacroAssemblerSpecific::movePtr
;
5777 void movePtr(TrampolinePtr ptr
, Register dest
) {
5778 movePtr(ImmPtr(ptr
.value
), dest
);
5782 void handleFailure();
5785 Label
* exceptionLabel() {
5786 // Exceptions are currently handled the same way as sequential failures.
5787 return &failureLabel_
;
5790 Label
* failureLabel() { return &failureLabel_
; }
5793 void link(JitCode
* code
);
5795 void assumeUnreachable(const char* output
);
5797 void printf(const char* output
);
5798 void printf(const char* output
, Register value
);
5800 #define DISPATCH_FLOATING_POINT_OP(method, type, arg1d, arg1f, arg2) \
5801 MOZ_ASSERT(IsFloatingPointType(type)); \
5802 if (type == MIRType::Double) \
5803 method##Double(arg1d, arg2); \
5805 method##Float32(arg1f, arg2);
5807 void loadConstantFloatingPoint(double d
, float f
, FloatRegister dest
,
5809 DISPATCH_FLOATING_POINT_OP(loadConstant
, destType
, d
, f
, dest
);
5811 void boolValueToFloatingPoint(ValueOperand value
, FloatRegister dest
,
5813 DISPATCH_FLOATING_POINT_OP(boolValueTo
, destType
, value
, value
, dest
);
5815 void int32ValueToFloatingPoint(ValueOperand value
, FloatRegister dest
,
5817 DISPATCH_FLOATING_POINT_OP(int32ValueTo
, destType
, value
, value
, dest
);
5819 void convertInt32ToFloatingPoint(Register src
, FloatRegister dest
,
5821 DISPATCH_FLOATING_POINT_OP(convertInt32To
, destType
, src
, src
, dest
);
5824 #undef DISPATCH_FLOATING_POINT_OP
5826 void convertValueToFloatingPoint(ValueOperand value
, FloatRegister output
,
5827 Label
* fail
, MIRType outputType
);
5829 void outOfLineTruncateSlow(FloatRegister src
, Register dest
,
5830 bool widenFloatToDouble
, bool compilingWasm
,
5831 wasm::BytecodeOffset callOffset
);
5833 void convertInt32ValueToDouble(ValueOperand val
);
5835 void convertValueToDouble(ValueOperand value
, FloatRegister output
,
5837 convertValueToFloatingPoint(value
, output
, fail
, MIRType::Double
);
5840 void convertValueToFloat(ValueOperand value
, FloatRegister output
,
5842 convertValueToFloatingPoint(value
, output
, fail
, MIRType::Float32
);
5846 // Functions for converting values to int.
5848 void convertDoubleToInt(FloatRegister src
, Register output
,
5849 FloatRegister temp
, Label
* truncateFail
, Label
* fail
,
5850 IntConversionBehavior behavior
);
5852 // Strings may be handled by providing labels to jump to when the behavior
5853 // is truncation or clamping. The subroutine, usually an OOL call, is
5854 // passed the unboxed string in |stringReg| and should convert it to a
5855 // double store into |temp|.
5856 void convertValueToInt(
5857 ValueOperand value
, Label
* handleStringEntry
, Label
* handleStringRejoin
,
5858 Label
* truncateDoubleSlow
, Register stringReg
, FloatRegister temp
,
5859 Register output
, Label
* fail
, IntConversionBehavior behavior
,
5860 IntConversionInputKind conversion
= IntConversionInputKind::Any
);
5862 // This carries over the MToNumberInt32 operation on the ValueOperand
5863 // input; see comment at the top of this class.
5864 void convertValueToInt32(
5865 ValueOperand value
, FloatRegister temp
, Register output
, Label
* fail
,
5866 bool negativeZeroCheck
,
5867 IntConversionInputKind conversion
= IntConversionInputKind::Any
) {
5869 value
, nullptr, nullptr, nullptr, InvalidReg
, temp
, output
, fail
,
5870 negativeZeroCheck
? IntConversionBehavior::NegativeZeroCheck
5871 : IntConversionBehavior::Normal
,
5875 // This carries over the MTruncateToInt32 operation on the ValueOperand
5876 // input; see the comment at the top of this class.
5877 void truncateValueToInt32(ValueOperand value
, Label
* handleStringEntry
,
5878 Label
* handleStringRejoin
,
5879 Label
* truncateDoubleSlow
, Register stringReg
,
5880 FloatRegister temp
, Register output
, Label
* fail
) {
5881 convertValueToInt(value
, handleStringEntry
, handleStringRejoin
,
5882 truncateDoubleSlow
, stringReg
, temp
, output
, fail
,
5883 IntConversionBehavior::Truncate
);
5886 void truncateValueToInt32(ValueOperand value
, FloatRegister temp
,
5887 Register output
, Label
* fail
) {
5888 truncateValueToInt32(value
, nullptr, nullptr, nullptr, InvalidReg
, temp
,
5892 // Convenience functions for clamping values to uint8.
5893 void clampValueToUint8(ValueOperand value
, Label
* handleStringEntry
,
5894 Label
* handleStringRejoin
, Register stringReg
,
5895 FloatRegister temp
, Register output
, Label
* fail
) {
5896 convertValueToInt(value
, handleStringEntry
, handleStringRejoin
, nullptr,
5897 stringReg
, temp
, output
, fail
,
5898 IntConversionBehavior::ClampToUint8
);
5901 [[nodiscard
]] bool icBuildOOLFakeExitFrame(void* fakeReturnAddr
,
5902 AutoSaveLiveRegisters
& save
);
5904 // Align the stack pointer based on the number of arguments which are pushed
5905 // on the stack, such that the JitFrameLayout would be correctly aligned on
5906 // the JitStackAlignment.
5907 void alignJitStackBasedOnNArgs(Register nargs
, bool countIncludesThis
);
5908 void alignJitStackBasedOnNArgs(uint32_t argc
, bool countIncludesThis
);
5910 inline void assertStackAlignment(uint32_t alignment
, int32_t offset
= 0);
5912 void touchFrameValues(Register numStackValues
, Register scratch1
,
5916 // See comment block "64-bit GPRs carrying 32-bit values" above. This asserts
5917 // that the high bits of the register are appropriate for the architecture and
5918 // the value in the low bits.
5919 void debugAssertCanonicalInt32(Register r
);
5923 // StackMacroAssembler checks no GC will happen while it's on the stack.
5924 class MOZ_RAII StackMacroAssembler
: public MacroAssembler
{
5925 JS::AutoCheckCannotGC nogc
;
5928 StackMacroAssembler(JSContext
* cx
, TempAllocator
& alloc
);
5931 // WasmMacroAssembler does not contain GC pointers, so it doesn't need the no-GC
5932 // checking StackMacroAssembler has.
5933 class MOZ_RAII WasmMacroAssembler
: public MacroAssembler
{
5935 explicit WasmMacroAssembler(TempAllocator
& alloc
, bool limitedSize
= true);
5936 explicit WasmMacroAssembler(TempAllocator
& alloc
,
5937 const wasm::ModuleEnvironment
& env
,
5938 bool limitedSize
= true);
5939 ~WasmMacroAssembler() { assertNoGCThings(); }
5942 // Heap-allocated MacroAssembler used for Ion off-thread code generation.
5943 // GC cancels off-thread compilations.
5944 class IonHeapMacroAssembler
: public MacroAssembler
{
5946 IonHeapMacroAssembler(TempAllocator
& alloc
, CompileRealm
* realm
);
5949 //{{{ check_macroassembler_style
5950 inline uint32_t MacroAssembler::framePushed() const { return framePushed_
; }
5952 inline void MacroAssembler::setFramePushed(uint32_t framePushed
) {
5953 framePushed_
= framePushed
;
5956 inline void MacroAssembler::adjustFrame(int32_t value
) {
5957 MOZ_ASSERT_IF(value
< 0, framePushed_
>= uint32_t(-value
));
5958 setFramePushed(framePushed_
+ value
);
5961 inline void MacroAssembler::implicitPop(uint32_t bytes
) {
5962 MOZ_ASSERT(bytes
% sizeof(intptr_t) == 0);
5963 MOZ_ASSERT(bytes
<= INT32_MAX
);
5964 adjustFrame(-int32_t(bytes
));
5966 //}}} check_macroassembler_style
5968 static inline Assembler::DoubleCondition
JSOpToDoubleCondition(JSOp op
) {
5971 case JSOp::StrictEq
:
5972 return Assembler::DoubleEqual
;
5974 case JSOp::StrictNe
:
5975 return Assembler::DoubleNotEqualOrUnordered
;
5977 return Assembler::DoubleLessThan
;
5979 return Assembler::DoubleLessThanOrEqual
;
5981 return Assembler::DoubleGreaterThan
;
5983 return Assembler::DoubleGreaterThanOrEqual
;
5985 MOZ_CRASH("Unexpected comparison operation");
5989 // Note: the op may have been inverted during lowering (to put constants in a
5990 // position where they can be immediates), so it is important to use the
5991 // lir->jsop() instead of the mir->jsop() when it is present.
5992 static inline Assembler::Condition
JSOpToCondition(JSOp op
, bool isSigned
) {
5996 case JSOp::StrictEq
:
5997 return Assembler::Equal
;
5999 case JSOp::StrictNe
:
6000 return Assembler::NotEqual
;
6002 return Assembler::LessThan
;
6004 return Assembler::LessThanOrEqual
;
6006 return Assembler::GreaterThan
;
6008 return Assembler::GreaterThanOrEqual
;
6010 MOZ_CRASH("Unrecognized comparison operation");
6015 case JSOp::StrictEq
:
6016 return Assembler::Equal
;
6018 case JSOp::StrictNe
:
6019 return Assembler::NotEqual
;
6021 return Assembler::Below
;
6023 return Assembler::BelowOrEqual
;
6025 return Assembler::Above
;
6027 return Assembler::AboveOrEqual
;
6029 MOZ_CRASH("Unrecognized comparison operation");
6034 static inline size_t StackDecrementForCall(uint32_t alignment
,
6035 size_t bytesAlreadyPushed
,
6036 size_t bytesToPush
) {
6037 return bytesToPush
+
6038 ComputeByteAlignment(bytesAlreadyPushed
+ bytesToPush
, alignment
);
6041 // Helper for generatePreBarrier.
6042 inline DynFn
JitPreWriteBarrier(MIRType type
);
6047 #endif /* jit_MacroAssembler_h */