1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
4 * Copyright 2015 Mozilla Foundation
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 #include "wasm/WasmStubs.h"
21 #include "mozilla/ArrayUtils.h"
25 #include "jit/ABIFunctions.h"
26 #include "jit/JitFrames.h"
27 #include "jit/JitScript.h"
28 #include "jit/RegisterAllocator.h"
29 #include "js/Printf.h"
30 #include "util/Memory.h"
31 #include "wasm/WasmCode.h"
32 #include "wasm/WasmGenerator.h"
33 #include "wasm/WasmInstance.h"
35 #include "jit/ABIFunctionList-inl.h"
36 #include "jit/MacroAssembler-inl.h"
39 using namespace js::jit
;
40 using namespace js::wasm
;
42 using mozilla::ArrayLength
;
44 typedef Vector
<jit::MIRType
, 8, SystemAllocPolicy
> MIRTypeVector
;
45 using ABIArgMIRTypeIter
= jit::ABIArgIter
<MIRTypeVector
>;
47 /*****************************************************************************/
48 // ABIResultIter implementation
50 static uint32_t ResultStackSize(ValType type
) {
51 switch (type
.kind()) {
53 return ABIResult::StackSizeOfInt32
;
55 return ABIResult::StackSizeOfInt64
;
57 return ABIResult::StackSizeOfFloat
;
59 return ABIResult::StackSizeOfDouble
;
60 #ifdef ENABLE_WASM_SIMD
62 return ABIResult::StackSizeOfV128
;
65 return ABIResult::StackSizeOfPtr
;
67 MOZ_CRASH("Unexpected result type");
71 uint32_t ABIResult::size() const { return ResultStackSize(type()); }
73 void ABIResultIter::settleRegister(ValType type
) {
75 MOZ_ASSERT_IF(direction_
== Next
, index() < MaxRegisterResults
);
76 MOZ_ASSERT_IF(direction_
== Prev
, index() >= count_
- MaxRegisterResults
);
77 static_assert(MaxRegisterResults
== 1, "expected a single register result");
79 switch (type
.kind()) {
81 cur_
= ABIResult(type
, ReturnReg
);
84 cur_
= ABIResult(type
, ReturnReg64
);
87 cur_
= ABIResult(type
, ReturnFloat32Reg
);
90 cur_
= ABIResult(type
, ReturnDoubleReg
);
93 cur_
= ABIResult(type
, ReturnReg
);
95 #ifdef ENABLE_WASM_SIMD
97 cur_
= ABIResult(type
, ReturnSimd128Reg
);
101 MOZ_CRASH("Unexpected result type");
105 void ABIResultIter::settleNext() {
106 MOZ_ASSERT(direction_
== Next
);
109 uint32_t typeIndex
= count_
- index_
- 1;
110 ValType type
= type_
[typeIndex
];
112 if (index_
< MaxRegisterResults
) {
113 settleRegister(type
);
117 cur_
= ABIResult(type
, nextStackOffset_
);
118 nextStackOffset_
+= ResultStackSize(type
);
121 void ABIResultIter::settlePrev() {
122 MOZ_ASSERT(direction_
== Prev
);
124 uint32_t typeIndex
= index_
;
125 ValType type
= type_
[typeIndex
];
127 if (count_
- index_
- 1 < MaxRegisterResults
) {
128 settleRegister(type
);
132 uint32_t size
= ResultStackSize(type
);
133 MOZ_ASSERT(nextStackOffset_
>= size
);
134 nextStackOffset_
-= size
;
135 cur_
= ABIResult(type
, nextStackOffset_
);
138 // Register save/restore.
140 // On ARM64, the register sets are not able to represent SIMD registers (see
141 // lengthy comment in Architecture-arm64.h for information), and so we use a
142 // hack to save and restore them: on this architecture, when we care about SIMD,
143 // we call special routines that know about them.
145 // In a couple of cases it is not currently necessary to save and restore SIMD
146 // registers, but the extra traffic is all along slow paths and not really worth
148 static void PushRegsInMask(MacroAssembler
& masm
, const LiveRegisterSet
& set
) {
149 #if defined(ENABLE_WASM_SIMD) && defined(JS_CODEGEN_ARM64)
150 masm
.PushRegsInMaskForWasmStubs(set
);
152 masm
.PushRegsInMask(set
);
156 static void PopRegsInMask(MacroAssembler
& masm
, const LiveRegisterSet
& set
) {
157 #if defined(ENABLE_WASM_SIMD) && defined(JS_CODEGEN_ARM64)
158 masm
.PopRegsInMaskForWasmStubs(set
, LiveRegisterSet());
160 masm
.PopRegsInMask(set
);
164 static void PopRegsInMaskIgnore(MacroAssembler
& masm
,
165 const LiveRegisterSet
& set
,
166 const LiveRegisterSet
& ignore
) {
167 #if defined(ENABLE_WASM_SIMD) && defined(JS_CODEGEN_ARM64)
168 masm
.PopRegsInMaskForWasmStubs(set
, ignore
);
170 masm
.PopRegsInMaskIgnore(set
, ignore
);
174 #ifdef WASM_CODEGEN_DEBUG
175 template <class Closure
>
176 static void GenPrint(DebugChannel channel
, MacroAssembler
& masm
,
177 const Maybe
<Register
>& taken
, Closure passArgAndCall
) {
178 if (!IsCodegenDebugEnabled(channel
)) {
182 AllocatableRegisterSet
regs(RegisterSet::All());
183 LiveRegisterSet
save(regs
.asLiveSet());
184 PushRegsInMask(masm
, save
);
187 regs
.take(taken
.value());
189 Register temp
= regs
.takeAnyGeneral();
192 MOZ_ASSERT(MaybeGetJitContext(),
193 "codegen debug checks require a jit context");
194 masm
.setupUnalignedABICall(temp
);
195 passArgAndCall(IsCompilingWasm(), temp
);
198 PopRegsInMask(masm
, save
);
201 static void GenPrintf(DebugChannel channel
, MacroAssembler
& masm
,
202 const char* fmt
, ...) {
205 UniqueChars str
= JS_vsmprintf(fmt
, ap
);
208 GenPrint(channel
, masm
, Nothing(), [&](bool inWasm
, Register temp
) {
209 // If we've gone this far, it means we're actually using the debugging
210 // strings. In this case, we leak them! This is only for debugging, and
211 // doing the right thing is cumbersome (in Ion, it'd mean add a vec of
212 // strings to the IonScript; in wasm, it'd mean add it to the current
213 // Module and serialize it properly).
214 const char* text
= str
.release();
216 masm
.movePtr(ImmPtr((void*)text
, ImmPtr::NoCheckToken()), temp
);
217 masm
.passABIArg(temp
);
219 masm
.callDebugWithABI(SymbolicAddress::PrintText
);
221 using Fn
= void (*)(const char* output
);
222 masm
.callWithABI
<Fn
, PrintText
>(MoveOp::GENERAL
,
223 CheckUnsafeCallWithABI::DontCheckOther
);
228 static void GenPrintIsize(DebugChannel channel
, MacroAssembler
& masm
,
229 const Register
& src
) {
230 GenPrint(channel
, masm
, Some(src
), [&](bool inWasm
, Register _temp
) {
231 masm
.passABIArg(src
);
233 masm
.callDebugWithABI(SymbolicAddress::PrintI32
);
235 using Fn
= void (*)(int32_t val
);
236 masm
.callWithABI
<Fn
, PrintI32
>(MoveOp::GENERAL
,
237 CheckUnsafeCallWithABI::DontCheckOther
);
242 static void GenPrintPtr(DebugChannel channel
, MacroAssembler
& masm
,
243 const Register
& src
) {
244 GenPrint(channel
, masm
, Some(src
), [&](bool inWasm
, Register _temp
) {
245 masm
.passABIArg(src
);
247 masm
.callDebugWithABI(SymbolicAddress::PrintPtr
);
249 using Fn
= void (*)(uint8_t * val
);
250 masm
.callWithABI
<Fn
, PrintPtr
>(MoveOp::GENERAL
,
251 CheckUnsafeCallWithABI::DontCheckOther
);
256 static void GenPrintI64(DebugChannel channel
, MacroAssembler
& masm
,
257 const Register64
& src
) {
258 # if JS_BITS_PER_WORD == 64
259 GenPrintf(channel
, masm
, "i64 ");
260 GenPrintIsize(channel
, masm
, src
.reg
);
262 GenPrintf(channel
, masm
, "i64(");
263 GenPrintIsize(channel
, masm
, src
.low
);
264 GenPrintIsize(channel
, masm
, src
.high
);
265 GenPrintf(channel
, masm
, ") ");
269 static void GenPrintF32(DebugChannel channel
, MacroAssembler
& masm
,
270 const FloatRegister
& src
) {
271 GenPrint(channel
, masm
, Nothing(), [&](bool inWasm
, Register temp
) {
272 masm
.passABIArg(src
, MoveOp::FLOAT32
);
274 masm
.callDebugWithABI(SymbolicAddress::PrintF32
);
276 using Fn
= void (*)(float val
);
277 masm
.callWithABI
<Fn
, PrintF32
>(MoveOp::GENERAL
,
278 CheckUnsafeCallWithABI::DontCheckOther
);
283 static void GenPrintF64(DebugChannel channel
, MacroAssembler
& masm
,
284 const FloatRegister
& src
) {
285 GenPrint(channel
, masm
, Nothing(), [&](bool inWasm
, Register temp
) {
286 masm
.passABIArg(src
, MoveOp::DOUBLE
);
288 masm
.callDebugWithABI(SymbolicAddress::PrintF64
);
290 using Fn
= void (*)(double val
);
291 masm
.callWithABI
<Fn
, PrintF64
>(MoveOp::GENERAL
,
292 CheckUnsafeCallWithABI::DontCheckOther
);
297 # ifdef ENABLE_WASM_SIMD
298 static void GenPrintV128(DebugChannel channel
, MacroAssembler
& masm
,
299 const FloatRegister
& src
) {
300 // TODO: We might try to do something meaningful here once SIMD data are
301 // aligned and hence C++-ABI compliant. For now, just make ourselves visible.
302 GenPrintf(channel
, masm
, "v128");
306 static void GenPrintf(DebugChannel channel
, MacroAssembler
& masm
,
307 const char* fmt
, ...) {}
308 static void GenPrintIsize(DebugChannel channel
, MacroAssembler
& masm
,
309 const Register
& src
) {}
310 static void GenPrintPtr(DebugChannel channel
, MacroAssembler
& masm
,
311 const Register
& src
) {}
312 static void GenPrintI64(DebugChannel channel
, MacroAssembler
& masm
,
313 const Register64
& src
) {}
314 static void GenPrintF32(DebugChannel channel
, MacroAssembler
& masm
,
315 const FloatRegister
& src
) {}
316 static void GenPrintF64(DebugChannel channel
, MacroAssembler
& masm
,
317 const FloatRegister
& src
) {}
318 # ifdef ENABLE_WASM_SIMD
319 static void GenPrintV128(DebugChannel channel
, MacroAssembler
& masm
,
320 const FloatRegister
& src
) {}
324 static bool FinishOffsets(MacroAssembler
& masm
, Offsets
* offsets
) {
325 // On old ARM hardware, constant pools could be inserted and they need to
326 // be flushed before considering the size of the masm.
328 offsets
->end
= masm
.size();
332 static void AssertStackAlignment(MacroAssembler
& masm
, uint32_t alignment
,
333 uint32_t addBeforeAssert
= 0) {
335 (sizeof(Frame
) + masm
.framePushed() + addBeforeAssert
) % alignment
== 0);
336 masm
.assertStackAlignment(alignment
, addBeforeAssert
);
339 template <class VectorT
, template <class VecT
> class ABIArgIterT
>
340 static unsigned StackArgBytesHelper(const VectorT
& args
) {
341 ABIArgIterT
<VectorT
> iter(args
);
342 while (!iter
.done()) {
345 return iter
.stackBytesConsumedSoFar();
348 template <class VectorT
>
349 static unsigned StackArgBytesForNativeABI(const VectorT
& args
) {
350 return StackArgBytesHelper
<VectorT
, ABIArgIter
>(args
);
353 template <class VectorT
>
354 static unsigned StackArgBytesForWasmABI(const VectorT
& args
) {
355 return StackArgBytesHelper
<VectorT
, WasmABIArgIter
>(args
);
358 static unsigned StackArgBytesForWasmABI(const FuncType
& funcType
) {
359 ArgTypeVector
args(funcType
);
360 return StackArgBytesForWasmABI(args
);
363 static void Move64(MacroAssembler
& masm
, const Address
& src
,
364 const Address
& dest
, Register scratch
) {
365 #if JS_BITS_PER_WORD == 32
366 masm
.load32(LowWord(src
), scratch
);
367 masm
.store32(scratch
, LowWord(dest
));
368 masm
.load32(HighWord(src
), scratch
);
369 masm
.store32(scratch
, HighWord(dest
));
371 Register64
scratch64(scratch
);
372 masm
.load64(src
, scratch64
);
373 masm
.store64(scratch64
, dest
);
377 static void SetupABIArguments(MacroAssembler
& masm
, const FuncExport
& fe
,
378 Register argv
, Register scratch
) {
379 // Copy parameters out of argv and into the registers/stack-slots specified by
382 // SetupABIArguments are only used for C++ -> wasm calls through callExport(),
383 // and V128 and Ref types (other than externref) are not currently allowed.
384 ArgTypeVector
args(fe
.funcType());
385 for (WasmABIArgIter
iter(args
); !iter
.done(); iter
++) {
386 unsigned argOffset
= iter
.index() * sizeof(ExportArg
);
387 Address
src(argv
, argOffset
);
388 MIRType type
= iter
.mirType();
389 switch (iter
->kind()) {
391 if (type
== MIRType::Int32
) {
392 masm
.load32(src
, iter
->gpr());
393 } else if (type
== MIRType::Int64
) {
394 masm
.load64(src
, iter
->gpr64());
395 } else if (type
== MIRType::RefOrNull
) {
396 masm
.loadPtr(src
, iter
->gpr());
397 } else if (type
== MIRType::StackResults
) {
398 MOZ_ASSERT(args
.isSyntheticStackResultPointerArg(iter
.index()));
399 masm
.loadPtr(src
, iter
->gpr());
401 MOZ_CRASH("unknown GPR type");
404 #ifdef JS_CODEGEN_REGISTER_PAIR
405 case ABIArg::GPR_PAIR
:
406 if (type
== MIRType::Int64
) {
407 masm
.load64(src
, iter
->gpr64());
409 MOZ_CRASH("wasm uses hardfp for function calls.");
414 static_assert(sizeof(ExportArg
) >= jit::Simd128DataSize
,
415 "ExportArg must be big enough to store SIMD values");
417 case MIRType::Double
:
418 masm
.loadDouble(src
, iter
->fpu());
420 case MIRType::Float32
:
421 masm
.loadFloat32(src
, iter
->fpu());
423 case MIRType::Simd128
:
424 #ifdef ENABLE_WASM_SIMD
425 // We will reach this point when we generate interpreter entry stubs
426 // for exports that receive v128 values, but the code will never be
427 // executed because such exports cannot be called from JS.
431 MOZ_CRASH("V128 not supported in SetupABIArguments");
434 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected FPU type");
442 masm
.load32(src
, scratch
);
443 masm
.storePtr(scratch
, Address(masm
.getStackPointer(),
444 iter
->offsetFromArgBase()));
446 case MIRType::Int64
: {
447 RegisterOrSP sp
= masm
.getStackPointer();
448 Move64(masm
, src
, Address(sp
, iter
->offsetFromArgBase()), scratch
);
451 case MIRType::RefOrNull
:
452 masm
.loadPtr(src
, scratch
);
453 masm
.storePtr(scratch
, Address(masm
.getStackPointer(),
454 iter
->offsetFromArgBase()));
456 case MIRType::Double
: {
457 ScratchDoubleScope
fpscratch(masm
);
458 masm
.loadDouble(src
, fpscratch
);
459 masm
.storeDouble(fpscratch
, Address(masm
.getStackPointer(),
460 iter
->offsetFromArgBase()));
463 case MIRType::Float32
: {
464 ScratchFloat32Scope
fpscratch(masm
);
465 masm
.loadFloat32(src
, fpscratch
);
466 masm
.storeFloat32(fpscratch
, Address(masm
.getStackPointer(),
467 iter
->offsetFromArgBase()));
470 case MIRType::Simd128
: {
471 #ifdef ENABLE_WASM_SIMD
472 // We will reach this point when we generate interpreter entry stubs
473 // for exports that receive v128 values, but the code will never be
474 // executed because such exports cannot be called from JS.
478 MOZ_CRASH("V128 not supported in SetupABIArguments");
481 case MIRType::StackResults
: {
482 MOZ_ASSERT(args
.isSyntheticStackResultPointerArg(iter
.index()));
483 masm
.loadPtr(src
, scratch
);
484 masm
.storePtr(scratch
, Address(masm
.getStackPointer(),
485 iter
->offsetFromArgBase()));
489 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE(
490 "unexpected stack arg type");
493 case ABIArg::Uninitialized
:
494 MOZ_CRASH("Uninitialized ABIArg kind");
499 static void StoreRegisterResult(MacroAssembler
& masm
, const FuncExport
& fe
,
501 ResultType results
= ResultType::Vector(fe
.funcType().results());
502 DebugOnly
<bool> sawRegisterResult
= false;
503 for (ABIResultIter
iter(results
); !iter
.done(); iter
.next()) {
504 const ABIResult
& result
= iter
.cur();
505 if (result
.inRegister()) {
506 MOZ_ASSERT(!sawRegisterResult
);
507 sawRegisterResult
= true;
508 switch (result
.type().kind()) {
510 masm
.store32(result
.gpr(), Address(loc
, 0));
513 masm
.store64(result
.gpr64(), Address(loc
, 0));
516 #ifdef ENABLE_WASM_SIMD
517 masm
.storeUnalignedSimd128(result
.fpr(), Address(loc
, 0));
520 MOZ_CRASH("V128 not supported in StoreABIReturn");
523 masm
.canonicalizeFloat(result
.fpr());
524 masm
.storeFloat32(result
.fpr(), Address(loc
, 0));
527 masm
.canonicalizeDouble(result
.fpr());
528 masm
.storeDouble(result
.fpr(), Address(loc
, 0));
531 masm
.storePtr(result
.gpr(), Address(loc
, 0));
536 MOZ_ASSERT(sawRegisterResult
== (results
.length() > 0));
539 #if defined(JS_CODEGEN_ARM)
540 // The ARM system ABI also includes d15 & s31 in the non volatile float
541 // registers. Also exclude lr (a.k.a. r14) as we preserve it manually.
542 static const LiveRegisterSet NonVolatileRegs
=
543 LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask
&
544 ~(uint32_t(1) << Registers::lr
)),
545 FloatRegisterSet(FloatRegisters::NonVolatileMask
|
546 (1ULL << FloatRegisters::d15
) |
547 (1ULL << FloatRegisters::s31
)));
548 #elif defined(JS_CODEGEN_ARM64)
549 // Exclude the Link Register (x30) because it is preserved manually.
551 // Include x16 (scratch) to make a 16-byte aligned amount of integer registers.
552 // Include d31 (scratch) to make a 16-byte aligned amount of floating registers.
553 static const LiveRegisterSet NonVolatileRegs
=
554 LiveRegisterSet(GeneralRegisterSet((Registers::NonVolatileMask
&
555 ~(uint32_t(1) << Registers::lr
)) |
556 (uint32_t(1) << Registers::x16
)),
557 FloatRegisterSet(FloatRegisters::NonVolatileMask
|
558 FloatRegisters::NonAllocatableMask
));
560 static const LiveRegisterSet NonVolatileRegs
=
561 LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask
),
562 FloatRegisterSet(FloatRegisters::NonVolatileMask
));
565 #if defined(JS_CODEGEN_NONE)
566 static const unsigned NonVolatileRegsPushSize
= 0;
567 #elif defined(ENABLE_WASM_SIMD) && defined(JS_CODEGEN_ARM64)
568 static const unsigned NonVolatileRegsPushSize
=
569 NonVolatileRegs
.gprs().size() * sizeof(intptr_t) +
570 FloatRegister::GetPushSizeInBytesForWasmStubs(NonVolatileRegs
.fpus());
572 static const unsigned NonVolatileRegsPushSize
=
573 NonVolatileRegs
.gprs().size() * sizeof(intptr_t) +
574 NonVolatileRegs
.fpus().getPushSizeInBytes();
577 #ifdef ENABLE_WASM_REFTYPES
578 static const unsigned NumExtraPushed
= 2; // tls and argv
580 static const unsigned NumExtraPushed
= 1; // argv
583 #ifdef JS_CODEGEN_ARM64
584 static const unsigned WasmPushSize
= 16;
586 static const unsigned WasmPushSize
= sizeof(void*);
589 static const unsigned FramePushedBeforeAlign
=
590 NonVolatileRegsPushSize
+ NumExtraPushed
* WasmPushSize
;
592 static void AssertExpectedSP(const MacroAssembler
& masm
) {
593 #ifdef JS_CODEGEN_ARM64
594 MOZ_ASSERT(sp
.Is(masm
.GetStackPointer64()));
598 template <class Operand
>
599 static void WasmPush(MacroAssembler
& masm
, const Operand
& op
) {
600 #ifdef JS_CODEGEN_ARM64
601 // Allocate a pad word so that SP can remain properly aligned. |op| will be
602 // written at the lower-addressed of the two words pushed here.
603 masm
.reserveStack(WasmPushSize
);
604 masm
.storePtr(op
, Address(masm
.getStackPointer(), 0));
610 static void WasmPop(MacroAssembler
& masm
, Register r
) {
611 #ifdef JS_CODEGEN_ARM64
612 // Also pop the pad word allocated by WasmPush.
613 masm
.loadPtr(Address(masm
.getStackPointer(), 0), r
);
614 masm
.freeStack(WasmPushSize
);
620 static void MoveSPForJitABI(MacroAssembler
& masm
) {
621 #ifdef JS_CODEGEN_ARM64
622 masm
.moveStackPtrTo(PseudoStackPointer
);
626 static void CallFuncExport(MacroAssembler
& masm
, const FuncExport
& fe
,
627 const Maybe
<ImmPtr
>& funcPtr
) {
628 MOZ_ASSERT(fe
.hasEagerStubs() == !funcPtr
);
632 masm
.call(CallSiteDesc(CallSiteDesc::Func
), fe
.funcIndex());
636 STATIC_ASSERT_ANYREF_IS_JSOBJECT
; // Strings are currently boxed
638 // Unboxing is branchy and contorted because of Spectre mitigations - we don't
639 // have enough scratch registers. Were it not for the spectre mitigations in
640 // branchTestObjClass, the branch nest below would be restructured significantly
641 // by inverting branches and using fewer registers.
643 // Unbox an anyref in src (clobbering src in the process) and then re-box it as
644 // a Value in *dst. See the definition of AnyRef for a discussion of pointer
646 static void UnboxAnyrefIntoValue(MacroAssembler
& masm
, Register tls
,
647 Register src
, const Address
& dst
,
649 MOZ_ASSERT(src
!= scratch
);
651 // Not actually the value we're passing, but we've no way of
652 // decoding anything better.
653 GenPrintPtr(DebugChannel::Import
, masm
, src
);
655 Label notNull
, mustUnbox
, done
;
656 masm
.branchTestPtr(Assembler::NonZero
, src
, src
, ¬Null
);
657 masm
.storeValue(NullValue(), dst
);
661 // The type test will clear src if the test fails, so store early.
662 masm
.storeValue(JSVAL_TYPE_OBJECT
, src
, dst
);
663 // Spectre mitigations: see comment above about efficiency.
664 masm
.branchTestObjClass(Assembler::Equal
, src
,
665 Address(tls
, offsetof(TlsData
, valueBoxClass
)),
666 scratch
, src
, &mustUnbox
);
669 masm
.bind(&mustUnbox
);
670 Move64(masm
, Address(src
, WasmValueBox::offsetOfValue()), dst
, scratch
);
675 // Unbox an anyref in src and then re-box it as a Value in dst.
676 // See the definition of AnyRef for a discussion of pointer representation.
677 static void UnboxAnyrefIntoValueReg(MacroAssembler
& masm
, Register tls
,
678 Register src
, ValueOperand dst
,
680 MOZ_ASSERT(src
!= scratch
);
681 #if JS_BITS_PER_WORD == 32
682 MOZ_ASSERT(dst
.typeReg() != scratch
);
683 MOZ_ASSERT(dst
.payloadReg() != scratch
);
685 MOZ_ASSERT(dst
.valueReg() != scratch
);
688 // Not actually the value we're passing, but we've no way of
689 // decoding anything better.
690 GenPrintPtr(DebugChannel::Import
, masm
, src
);
692 Label notNull
, mustUnbox
, done
;
693 masm
.branchTestPtr(Assembler::NonZero
, src
, src
, ¬Null
);
694 masm
.moveValue(NullValue(), dst
);
698 // The type test will clear src if the test fails, so store early.
699 masm
.moveValue(TypedOrValueRegister(MIRType::Object
, AnyRegister(src
)), dst
);
700 // Spectre mitigations: see comment above about efficiency.
701 masm
.branchTestObjClass(Assembler::Equal
, src
,
702 Address(tls
, offsetof(TlsData
, valueBoxClass
)),
703 scratch
, src
, &mustUnbox
);
706 masm
.bind(&mustUnbox
);
707 masm
.loadValue(Address(src
, WasmValueBox::offsetOfValue()), dst
);
712 // Box the Value in src as an anyref in dest. src and dest must not overlap.
713 // See the definition of AnyRef for a discussion of pointer representation.
714 static void BoxValueIntoAnyref(MacroAssembler
& masm
, ValueOperand src
,
715 Register dest
, Label
* oolConvert
) {
716 Label nullValue
, objectValue
, done
;
718 ScratchTagScope
tag(masm
, src
);
719 masm
.splitTagForTest(src
, tag
);
720 masm
.branchTestObject(Assembler::Equal
, tag
, &objectValue
);
721 masm
.branchTestNull(Assembler::Equal
, tag
, &nullValue
);
722 masm
.jump(oolConvert
);
725 masm
.bind(&nullValue
);
726 masm
.xorPtr(dest
, dest
);
729 masm
.bind(&objectValue
);
730 masm
.unboxObject(src
, dest
);
735 // Generate a stub that enters wasm from a C++ caller via the native ABI. The
736 // signature of the entry point is Module::ExportFuncPtr. The exported wasm
737 // function has an ABI derived from its specific signature, so this function
738 // must map from the ABI of ExportFuncPtr to the export's signature's ABI.
739 static bool GenerateInterpEntry(MacroAssembler
& masm
, const FuncExport
& fe
,
740 const Maybe
<ImmPtr
>& funcPtr
,
742 AssertExpectedSP(masm
);
743 masm
.haltingAlign(CodeAlignment
);
745 offsets
->begin
= masm
.currentOffset();
747 // Save the return address if it wasn't already saved by the call insn.
748 #ifdef JS_USE_LINK_REGISTER
749 # if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || \
750 defined(JS_CODEGEN_MIPS64)
751 masm
.pushReturnAddress();
752 # elif defined(JS_CODEGEN_ARM64)
753 // WasmPush updates framePushed() unlike pushReturnAddress(), but that's
754 // cancelled by the setFramePushed() below.
757 MOZ_CRASH("Implement this");
761 // Save all caller non-volatile registers before we clobber them here and in
762 // the wasm callee (which does not preserve non-volatile registers).
763 masm
.setFramePushed(0);
764 PushRegsInMask(masm
, NonVolatileRegs
);
765 MOZ_ASSERT(masm
.framePushed() == NonVolatileRegsPushSize
);
767 // Put the 'argv' argument into a non-argument/return/TLS register so that
768 // we can use 'argv' while we fill in the arguments for the wasm callee.
769 // Use a second non-argument/return register as temporary scratch.
770 Register argv
= ABINonArgReturnReg0
;
771 Register scratch
= ABINonArgReturnReg1
;
773 // Read the arguments of wasm::ExportFuncPtr according to the native ABI.
774 // The entry stub's frame is 1 word.
775 const unsigned argBase
= sizeof(void*) + masm
.framePushed();
780 arg
= abi
.next(MIRType::Pointer
);
781 if (arg
.kind() == ABIArg::GPR
) {
782 masm
.movePtr(arg
.gpr(), argv
);
785 Address(masm
.getStackPointer(), argBase
+ arg
.offsetFromArgBase()),
790 arg
= abi
.next(MIRType::Pointer
);
791 if (arg
.kind() == ABIArg::GPR
) {
792 masm
.movePtr(arg
.gpr(), WasmTlsReg
);
795 Address(masm
.getStackPointer(), argBase
+ arg
.offsetFromArgBase()),
799 #ifdef ENABLE_WASM_REFTYPES
800 WasmPush(masm
, WasmTlsReg
);
803 // Save 'argv' on the stack so that we can recover it after the call.
804 WasmPush(masm
, argv
);
806 // Since we're about to dynamically align the stack, reset the frame depth
807 // so we can still assert static stack depth balancing.
808 MOZ_ASSERT(masm
.framePushed() == FramePushedBeforeAlign
);
809 masm
.setFramePushed(0);
811 // Dynamically align the stack since ABIStackAlignment is not necessarily
812 // WasmStackAlignment. Preserve SP so it can be restored after the call.
813 #ifdef JS_CODEGEN_ARM64
814 static_assert(WasmStackAlignment
== 16, "ARM64 SP alignment");
816 masm
.moveStackPtrTo(scratch
);
817 masm
.andToStackPtr(Imm32(~(WasmStackAlignment
- 1)));
821 // Reserve stack space for the wasm call.
822 unsigned argDecrement
=
823 StackDecrementForCall(WasmStackAlignment
, masm
.framePushed(),
824 StackArgBytesForWasmABI(fe
.funcType()));
825 masm
.reserveStack(argDecrement
);
827 // Copy parameters out of argv and into the wasm ABI registers/stack-slots.
828 SetupABIArguments(masm
, fe
, argv
, scratch
);
830 // Setup wasm register state. The nullness of the frame pointer is used to
831 // determine whether the call ended in success or failure.
832 masm
.movePtr(ImmWord(0), FramePointer
);
833 masm
.loadWasmPinnedRegsFromTls();
835 masm
.storePtr(WasmTlsReg
,
836 Address(masm
.getStackPointer(), WasmCalleeTLSOffsetBeforeCall
));
838 // Call into the real function. Note that, due to the throw stub, fp, tls
839 // and pinned registers may be clobbered.
840 masm
.assertStackAlignment(WasmStackAlignment
);
841 CallFuncExport(masm
, fe
, funcPtr
);
842 masm
.assertStackAlignment(WasmStackAlignment
);
844 // Pop the arguments pushed after the dynamic alignment.
845 masm
.freeStack(argDecrement
);
847 // Pop the stack pointer to its value right before dynamic alignment.
848 #ifdef JS_CODEGEN_ARM64
849 static_assert(WasmStackAlignment
== 16, "ARM64 SP alignment");
853 MOZ_ASSERT(masm
.framePushed() == 0);
854 masm
.setFramePushed(FramePushedBeforeAlign
);
856 // Recover the 'argv' pointer which was saved before aligning the stack.
859 #ifdef ENABLE_WASM_REFTYPES
860 WasmPop(masm
, WasmTlsReg
);
863 // Store the register result, if any, in argv[0].
864 // No spectre.index_masking is required, as the value leaves ReturnReg.
865 StoreRegisterResult(masm
, fe
, argv
);
867 // After the ReturnReg is stored into argv[0] but before fp is clobbered by
868 // the PopRegsInMask(NonVolatileRegs) below, set the return value based on
869 // whether fp is null (which is the case for successful returns) or the
870 // FailFP magic value (set by the throw stub);
872 masm
.branchTestPtr(Assembler::Zero
, FramePointer
, FramePointer
, &success
);
875 masm
.branchPtr(Assembler::Equal
, FramePointer
, Imm32(FailFP
), &ok
);
879 masm
.move32(Imm32(false), ReturnReg
);
882 masm
.move32(Imm32(true), ReturnReg
);
885 // Restore clobbered non-volatile registers of the caller.
886 PopRegsInMask(masm
, NonVolatileRegs
);
887 MOZ_ASSERT(masm
.framePushed() == 0);
889 #if defined(JS_CODEGEN_ARM64)
890 masm
.setFramePushed(WasmPushSize
);
897 return FinishOffsets(masm
, offsets
);
901 static const ValueOperand ScratchValIonEntry
= ValueOperand(ABINonArgReg0
);
903 static const ValueOperand ScratchValIonEntry
=
904 ValueOperand(ABINonArgReg0
, ABINonArgReg1
);
906 static const Register ScratchIonEntry
= ABINonArgReg2
;
908 static void CallSymbolicAddress(MacroAssembler
& masm
, bool isAbsolute
,
909 SymbolicAddress sym
) {
911 masm
.call(ImmPtr(SymbolicAddressTarget(sym
), ImmPtr::NoCheckToken()));
917 // Load instance's TLS from the callee.
918 static void GenerateJitEntryLoadTls(MacroAssembler
& masm
, unsigned frameSize
) {
919 AssertExpectedSP(masm
);
921 // ScratchIonEntry := callee => JSFunction*
922 unsigned offset
= frameSize
+ JitFrameLayout::offsetOfCalleeToken();
923 masm
.loadFunctionFromCalleeToken(Address(masm
.getStackPointer(), offset
),
926 // ScratchIonEntry := callee->getExtendedSlot(WASM_TLSDATA_SLOT)->toPrivate()
928 offset
= FunctionExtended::offsetOfExtendedSlot(
929 FunctionExtended::WASM_TLSDATA_SLOT
);
930 masm
.loadPrivate(Address(ScratchIonEntry
, offset
), WasmTlsReg
);
933 // Creates a JS fake exit frame for wasm, so the frame iterators just use
934 // JSJit frame iteration.
935 static void GenerateJitEntryThrow(MacroAssembler
& masm
, unsigned frameSize
) {
936 AssertExpectedSP(masm
);
938 MOZ_ASSERT(masm
.framePushed() == frameSize
);
940 GenerateJitEntryLoadTls(masm
, frameSize
);
942 masm
.freeStack(frameSize
);
943 MoveSPForJitABI(masm
);
945 masm
.loadPtr(Address(WasmTlsReg
, offsetof(TlsData
, cx
)), ScratchIonEntry
);
946 masm
.enterFakeExitFrameForWasm(ScratchIonEntry
, ScratchIonEntry
,
947 ExitFrameType::WasmGenericJitEntry
);
949 masm
.loadPtr(Address(WasmTlsReg
, offsetof(TlsData
, instance
)),
952 Address(ScratchIonEntry
, Instance::offsetOfJSJitExceptionHandler()),
954 masm
.jump(ScratchIonEntry
);
957 // Helper function for allocating a BigInt and initializing it from an I64
958 // in GenerateJitEntry and GenerateImportInterpExit. The return result is
959 // written to scratch.
960 static void GenerateBigIntInitialization(MacroAssembler
& masm
,
961 unsigned bytesPushedByPrologue
,
962 Register64 input
, Register scratch
,
963 const FuncExport
* fe
, Label
* fail
) {
964 #if JS_BITS_PER_WORD == 32
965 MOZ_ASSERT(input
.low
!= scratch
);
966 MOZ_ASSERT(input
.high
!= scratch
);
968 MOZ_ASSERT(input
.reg
!= scratch
);
971 // We need to avoid clobbering other argument registers and the input.
972 AllocatableRegisterSet
regs(RegisterSet::Volatile());
973 LiveRegisterSet
save(regs
.asLiveSet());
974 PushRegsInMask(masm
, save
);
976 unsigned frameSize
= StackDecrementForCall(
977 ABIStackAlignment
, masm
.framePushed() + bytesPushedByPrologue
, 0);
978 masm
.reserveStack(frameSize
);
979 masm
.assertStackAlignment(ABIStackAlignment
);
981 // Needs to use a different call type depending on stub it's used from.
983 CallSymbolicAddress(masm
, !fe
->hasEagerStubs(),
984 SymbolicAddress::AllocateBigInt
);
986 masm
.call(SymbolicAddress::AllocateBigInt
);
988 masm
.storeCallPointerResult(scratch
);
990 masm
.assertStackAlignment(ABIStackAlignment
);
991 masm
.freeStack(frameSize
);
993 LiveRegisterSet ignore
;
995 PopRegsInMaskIgnore(masm
, save
, ignore
);
997 masm
.branchTest32(Assembler::Zero
, scratch
, scratch
, fail
);
998 masm
.initializeBigInt64(Scalar::BigInt64
, scratch
, input
);
1001 // Generate a stub that enters wasm from a jit code caller via the jit ABI.
1003 // ARM64 note: This does not save the PseudoStackPointer so we must be sure to
1004 // recompute it on every return path, be it normal return or exception return.
1005 // The JIT code we return to assumes it is correct.
1007 static bool GenerateJitEntry(MacroAssembler
& masm
, size_t funcExportIndex
,
1008 const FuncExport
& fe
, const Maybe
<ImmPtr
>& funcPtr
,
1010 AssertExpectedSP(masm
);
1012 RegisterOrSP sp
= masm
.getStackPointer();
1014 GenerateJitEntryPrologue(masm
, offsets
);
1016 // The jit caller has set up the following stack layout (sp grows to the
1018 // <-- retAddr | descriptor | callee | argc | this | arg1..N
1020 unsigned normalBytesNeeded
= StackArgBytesForWasmABI(fe
.funcType());
1022 MIRTypeVector coerceArgTypes
;
1023 MOZ_ALWAYS_TRUE(coerceArgTypes
.append(MIRType::Int32
));
1024 MOZ_ALWAYS_TRUE(coerceArgTypes
.append(MIRType::Pointer
));
1025 MOZ_ALWAYS_TRUE(coerceArgTypes
.append(MIRType::Pointer
));
1026 unsigned oolBytesNeeded
= StackArgBytesForWasmABI(coerceArgTypes
);
1028 unsigned bytesNeeded
= std::max(normalBytesNeeded
, oolBytesNeeded
);
1030 // Note the jit caller ensures the stack is aligned *after* the call
1032 unsigned frameSize
= StackDecrementForCall(WasmStackAlignment
,
1033 masm
.framePushed(), bytesNeeded
);
1035 // Reserve stack space for wasm ABI arguments, set up like this:
1036 // <-- ABI args | padding
1037 masm
.reserveStack(frameSize
);
1039 GenerateJitEntryLoadTls(masm
, frameSize
);
1041 #ifdef ENABLE_WASM_SIMD
1042 if (fe
.funcType().hasV128ArgOrRet()) {
1043 CallSymbolicAddress(masm
, !fe
.hasEagerStubs(),
1044 SymbolicAddress::ReportV128JSCall
);
1045 GenerateJitEntryThrow(masm
, frameSize
);
1046 return FinishOffsets(masm
, offsets
);
1050 FloatRegister scratchF
= ABINonArgDoubleReg
;
1051 Register scratchG
= ScratchIonEntry
;
1052 ValueOperand scratchV
= ScratchValIonEntry
;
1054 GenPrintf(DebugChannel::Function
, masm
, "wasm-function[%d]; arguments ",
1058 // - one loop up-front will make sure that all the Value tags fit the
1059 // expected signature argument types. If at least one inline conversion
1060 // fails, we just jump to the OOL path which will call into C++. Inline
1061 // conversions are ordered in the way we expect them to happen the most.
1062 // - the second loop will unbox the arguments into the right registers.
1064 for (size_t i
= 0; i
< fe
.funcType().args().length(); i
++) {
1065 unsigned jitArgOffset
= frameSize
+ JitFrameLayout::offsetOfActualArg(i
);
1066 Address
jitArgAddr(sp
, jitArgOffset
);
1067 masm
.loadValue(jitArgAddr
, scratchV
);
1070 switch (fe
.funcType().args()[i
].kind()) {
1071 case ValType::I32
: {
1072 ScratchTagScope
tag(masm
, scratchV
);
1073 masm
.splitTagForTest(scratchV
, tag
);
1075 // For int32 inputs, just skip.
1076 masm
.branchTestInt32(Assembler::Equal
, tag
, &next
);
1078 // For double inputs, unbox, truncate and store back.
1079 Label storeBack
, notDouble
;
1080 masm
.branchTestDouble(Assembler::NotEqual
, tag
, ¬Double
);
1082 ScratchTagScopeRelease
_(&tag
);
1083 masm
.unboxDouble(scratchV
, scratchF
);
1084 masm
.branchTruncateDoubleMaybeModUint32(scratchF
, scratchG
, &oolCall
);
1085 masm
.jump(&storeBack
);
1087 masm
.bind(¬Double
);
1089 // For null or undefined, store 0.
1090 Label nullOrUndefined
, notNullOrUndefined
;
1091 masm
.branchTestUndefined(Assembler::Equal
, tag
, &nullOrUndefined
);
1092 masm
.branchTestNull(Assembler::NotEqual
, tag
, ¬NullOrUndefined
);
1093 masm
.bind(&nullOrUndefined
);
1095 ScratchTagScopeRelease
_(&tag
);
1096 masm
.storeValue(Int32Value(0), jitArgAddr
);
1099 masm
.bind(¬NullOrUndefined
);
1101 // For booleans, store the number value back. Other types (symbol,
1102 // object, strings) go to the C++ call.
1103 masm
.branchTestBoolean(Assembler::NotEqual
, tag
, &oolCall
);
1104 masm
.unboxBoolean(scratchV
, scratchG
);
1107 masm
.bind(&storeBack
);
1109 ScratchTagScopeRelease
_(&tag
);
1110 masm
.storeValue(JSVAL_TYPE_INT32
, scratchG
, jitArgAddr
);
1114 case ValType::I64
: {
1115 ScratchTagScope
tag(masm
, scratchV
);
1116 masm
.splitTagForTest(scratchV
, tag
);
1118 // For BigInt inputs, just skip. Otherwise go to C++ for other
1119 // types that require creating a new BigInt or erroring.
1120 masm
.branchTestBigInt(Assembler::NotEqual
, tag
, &oolCall
);
1125 case ValType::F64
: {
1126 // Note we can reuse the same code for f32/f64 here, since for the
1127 // case of f32, the conversion of f64 to f32 will happen in the
1129 ScratchTagScope
tag(masm
, scratchV
);
1130 masm
.splitTagForTest(scratchV
, tag
);
1132 // For double inputs, just skip.
1133 masm
.branchTestDouble(Assembler::Equal
, tag
, &next
);
1135 // For int32 inputs, convert and rebox.
1136 Label storeBack
, notInt32
;
1138 ScratchTagScopeRelease
_(&tag
);
1139 masm
.branchTestInt32(Assembler::NotEqual
, scratchV
, ¬Int32
);
1140 masm
.int32ValueToDouble(scratchV
, scratchF
);
1141 masm
.jump(&storeBack
);
1143 masm
.bind(¬Int32
);
1145 // For undefined (missing argument), store NaN.
1147 masm
.branchTestUndefined(Assembler::NotEqual
, tag
, ¬Undefined
);
1149 ScratchTagScopeRelease
_(&tag
);
1150 masm
.storeValue(DoubleValue(JS::GenericNaN()), jitArgAddr
);
1153 masm
.bind(¬Undefined
);
1157 masm
.branchTestNull(Assembler::NotEqual
, tag
, ¬Null
);
1159 ScratchTagScopeRelease
_(&tag
);
1160 masm
.storeValue(DoubleValue(0.), jitArgAddr
);
1163 masm
.bind(¬Null
);
1165 // For booleans, store the number value back. Other types (symbol,
1166 // object, strings) go to the C++ call.
1167 masm
.branchTestBoolean(Assembler::NotEqual
, tag
, &oolCall
);
1168 masm
.boolValueToDouble(scratchV
, scratchF
);
1171 masm
.bind(&storeBack
);
1173 ScratchTagScopeRelease
_(&tag
);
1174 masm
.boxDouble(scratchF
, jitArgAddr
);
1178 case ValType::Ref
: {
1179 switch (fe
.funcType().args()[i
].refTypeKind()) {
1180 case RefType::Extern
: {
1181 ScratchTagScope
tag(masm
, scratchV
);
1182 masm
.splitTagForTest(scratchV
, tag
);
1184 // For object inputs, we handle object and null inline, everything
1185 // else requires an actual box and we go out of line to allocate
1187 masm
.branchTestObject(Assembler::Equal
, tag
, &next
);
1188 masm
.branchTestNull(Assembler::Equal
, tag
, &next
);
1189 masm
.jump(&oolCall
);
1194 case RefType::TypeIndex
: {
1195 // Guarded against by temporarilyUnsupportedReftypeForEntry()
1196 MOZ_CRASH("unexpected argument type when calling from the jit");
1201 case ValType::V128
: {
1202 // Guarded against by hasV128ArgOrRet()
1203 MOZ_CRASH("unexpected argument type when calling from the jit");
1206 MOZ_CRASH("unexpected argument type when calling from the jit");
1209 masm
.nopAlign(CodeAlignment
);
1213 Label rejoinBeforeCall
;
1214 masm
.bind(&rejoinBeforeCall
);
1216 // Convert all the expected values to unboxed values on the stack.
1217 ArgTypeVector
args(fe
.funcType());
1218 for (WasmABIArgIter
iter(args
); !iter
.done(); iter
++) {
1219 unsigned jitArgOffset
=
1220 frameSize
+ JitFrameLayout::offsetOfActualArg(iter
.index());
1221 Address
argv(sp
, jitArgOffset
);
1222 bool isStackArg
= iter
->kind() == ABIArg::Stack
;
1223 switch (iter
.mirType()) {
1224 case MIRType::Int32
: {
1225 Register target
= isStackArg
? ScratchIonEntry
: iter
->gpr();
1226 masm
.unboxInt32(argv
, target
);
1227 GenPrintIsize(DebugChannel::Function
, masm
, target
);
1229 masm
.storePtr(target
, Address(sp
, iter
->offsetFromArgBase()));
1233 case MIRType::Int64
: {
1234 // The coercion has provided a BigInt value by this point, which
1235 // we need to convert to an I64 here.
1237 Address
dst(sp
, iter
->offsetFromArgBase());
1238 Register src
= scratchV
.payloadOrValueReg();
1239 #if JS_BITS_PER_WORD == 64
1240 Register64
scratch64(scratchG
);
1242 Register64
scratch64(scratchG
, ABINonArgReg3
);
1244 masm
.unboxBigInt(argv
, src
);
1245 masm
.loadBigInt64(src
, scratch64
);
1246 GenPrintI64(DebugChannel::Function
, masm
, scratch64
);
1247 masm
.store64(scratch64
, dst
);
1249 Register src
= scratchG
;
1250 Register64 target
= iter
->gpr64();
1251 masm
.unboxBigInt(argv
, src
);
1252 masm
.loadBigInt64(src
, target
);
1253 GenPrintI64(DebugChannel::Function
, masm
, target
);
1257 case MIRType::Float32
: {
1258 FloatRegister target
= isStackArg
? ABINonArgDoubleReg
: iter
->fpu();
1259 masm
.unboxDouble(argv
, ABINonArgDoubleReg
);
1260 masm
.convertDoubleToFloat32(ABINonArgDoubleReg
, target
);
1261 GenPrintF32(DebugChannel::Function
, masm
, target
.asSingle());
1263 masm
.storeFloat32(target
, Address(sp
, iter
->offsetFromArgBase()));
1267 case MIRType::Double
: {
1268 FloatRegister target
= isStackArg
? ABINonArgDoubleReg
: iter
->fpu();
1269 masm
.unboxDouble(argv
, target
);
1270 GenPrintF64(DebugChannel::Function
, masm
, target
);
1272 masm
.storeDouble(target
, Address(sp
, iter
->offsetFromArgBase()));
1276 case MIRType::RefOrNull
: {
1277 Register target
= isStackArg
? ScratchIonEntry
: iter
->gpr();
1278 masm
.unboxObjectOrNull(argv
, target
);
1279 GenPrintPtr(DebugChannel::Function
, masm
, target
);
1281 masm
.storePtr(target
, Address(sp
, iter
->offsetFromArgBase()));
1286 MOZ_CRASH("unexpected input argument when calling from jit");
1291 GenPrintf(DebugChannel::Function
, masm
, "\n");
1293 // Setup wasm register state.
1294 masm
.loadWasmPinnedRegsFromTls();
1296 masm
.storePtr(WasmTlsReg
,
1297 Address(masm
.getStackPointer(), WasmCalleeTLSOffsetBeforeCall
));
1299 // Call into the real function. Note that, due to the throw stub, fp, tls
1300 // and pinned registers may be clobbered.
1301 masm
.assertStackAlignment(WasmStackAlignment
);
1302 CallFuncExport(masm
, fe
, funcPtr
);
1303 masm
.assertStackAlignment(WasmStackAlignment
);
1305 // If fp is equal to the FailFP magic value (set by the throw stub), then
1306 // report the exception to the JIT caller by jumping into the exception
1307 // stub; otherwise the FP value is still set to the parent ion frame value.
1309 masm
.branchPtr(Assembler::Equal
, FramePointer
, Imm32(FailFP
), &exception
);
1312 masm
.freeStack(frameSize
);
1314 GenPrintf(DebugChannel::Function
, masm
, "wasm-function[%d]; returns ",
1317 // Store the return value in the JSReturnOperand.
1318 const ValTypeVector
& results
= fe
.funcType().results();
1319 if (results
.length() == 0) {
1320 GenPrintf(DebugChannel::Function
, masm
, "void");
1321 masm
.moveValue(UndefinedValue(), JSReturnOperand
);
1323 MOZ_ASSERT(results
.length() == 1, "multi-value return to JS unimplemented");
1324 switch (results
[0].kind()) {
1326 GenPrintIsize(DebugChannel::Function
, masm
, ReturnReg
);
1327 // No spectre.index_masking is required, as the value is boxed.
1328 masm
.boxNonDouble(JSVAL_TYPE_INT32
, ReturnReg
, JSReturnOperand
);
1330 case ValType::F32
: {
1331 masm
.canonicalizeFloat(ReturnFloat32Reg
);
1332 masm
.convertFloat32ToDouble(ReturnFloat32Reg
, ReturnDoubleReg
);
1333 GenPrintF64(DebugChannel::Function
, masm
, ReturnDoubleReg
);
1334 ScratchDoubleScope
fpscratch(masm
);
1335 masm
.boxDouble(ReturnDoubleReg
, JSReturnOperand
, fpscratch
);
1338 case ValType::F64
: {
1339 masm
.canonicalizeDouble(ReturnDoubleReg
);
1340 GenPrintF64(DebugChannel::Function
, masm
, ReturnDoubleReg
);
1341 ScratchDoubleScope
fpscratch(masm
);
1342 masm
.boxDouble(ReturnDoubleReg
, JSReturnOperand
, fpscratch
);
1345 case ValType::I64
: {
1347 GenPrintI64(DebugChannel::Function
, masm
, ReturnReg64
);
1348 GenerateBigIntInitialization(masm
, 0, ReturnReg64
, scratchG
, &fe
,
1350 masm
.boxNonDouble(JSVAL_TYPE_BIGINT
, scratchG
, JSReturnOperand
);
1353 // Fixup the stack for the exception tail so that we can share it.
1354 masm
.reserveStack(frameSize
);
1355 masm
.jump(&exception
);
1357 // Un-fixup the stack for the benefit of the assertion below.
1358 masm
.setFramePushed(0);
1361 case ValType::V128
: {
1362 MOZ_CRASH("unexpected return type when calling from ion to wasm");
1364 case ValType::Ref
: {
1365 switch (results
[0].refTypeKind()) {
1368 // For FuncRef and EqRef use the AnyRef path for now, since that
1370 case RefType::Extern
:
1371 // Per comment above, the call may have clobbered the Tls register,
1372 // so reload since unboxing will need it.
1373 GenerateJitEntryLoadTls(masm
, /* frameSize */ 0);
1374 UnboxAnyrefIntoValueReg(masm
, WasmTlsReg
, ReturnReg
,
1375 JSReturnOperand
, WasmJitEntryReturnScratch
);
1377 case RefType::TypeIndex
:
1378 MOZ_CRASH("returning reference in jitentry NYI");
1385 GenPrintf(DebugChannel::Function
, masm
, "\n");
1387 MOZ_ASSERT(masm
.framePushed() == 0);
1388 #ifdef JS_CODEGEN_ARM64
1389 masm
.loadPtr(Address(sp
, 0), lr
);
1390 masm
.addToStackPtr(Imm32(8));
1391 masm
.moveStackPtrTo(PseudoStackPointer
);
1397 // Generate an OOL call to the C++ conversion path.
1398 if (fe
.funcType().args().length()) {
1399 masm
.bind(&oolCall
);
1400 masm
.setFramePushed(frameSize
);
1402 // Baseline and Ion call C++ runtime via BuiltinThunk with wasm abi, so to
1403 // unify the BuiltinThunk's interface we call it here with wasm abi.
1404 jit::WasmABIArgIter
<MIRTypeVector
> argsIter(coerceArgTypes
);
1406 // argument 0: function export index.
1407 if (argsIter
->kind() == ABIArg::GPR
) {
1408 masm
.movePtr(ImmWord(funcExportIndex
), argsIter
->gpr());
1410 masm
.storePtr(ImmWord(funcExportIndex
),
1411 Address(sp
, argsIter
->offsetFromArgBase()));
1415 // argument 1: tlsData
1416 if (argsIter
->kind() == ABIArg::GPR
) {
1417 masm
.movePtr(WasmTlsReg
, argsIter
->gpr());
1419 masm
.storePtr(WasmTlsReg
, Address(sp
, argsIter
->offsetFromArgBase()));
1423 // argument 2: effective address of start of argv
1424 Address
argv(sp
, masm
.framePushed() + JitFrameLayout::offsetOfActualArg(0));
1425 if (argsIter
->kind() == ABIArg::GPR
) {
1426 masm
.computeEffectiveAddress(argv
, argsIter
->gpr());
1428 masm
.computeEffectiveAddress(argv
, ScratchIonEntry
);
1429 masm
.storePtr(ScratchIonEntry
,
1430 Address(sp
, argsIter
->offsetFromArgBase()));
1433 MOZ_ASSERT(argsIter
.done());
1435 masm
.assertStackAlignment(ABIStackAlignment
);
1436 CallSymbolicAddress(masm
, !fe
.hasEagerStubs(),
1437 SymbolicAddress::CoerceInPlace_JitEntry
);
1438 masm
.assertStackAlignment(ABIStackAlignment
);
1440 // No spectre.index_masking is required, as the return value is used as a
1442 masm
.branchTest32(Assembler::NonZero
, ReturnReg
, ReturnReg
,
1446 // Prepare to throw: reload WasmTlsReg from the frame.
1447 masm
.bind(&exception
);
1448 masm
.setFramePushed(frameSize
);
1449 GenerateJitEntryThrow(masm
, frameSize
);
1451 return FinishOffsets(masm
, offsets
);
1454 void wasm::GenerateDirectCallFromJit(MacroAssembler
& masm
, const FuncExport
& fe
,
1455 const Instance
& inst
,
1456 const JitCallStackArgVector
& stackArgs
,
1457 bool profilingEnabled
, Register scratch
,
1458 uint32_t* callOffset
) {
1459 MOZ_ASSERT(!IsCompilingWasm());
1461 size_t framePushedAtStart
= masm
.framePushed();
1463 if (profilingEnabled
) {
1464 // FramePointer isn't volatile, manually preserve it because it will be
1466 masm
.Push(FramePointer
);
1469 // Ensure that the FramePointer is actually Ion-volatile. This might
1470 // assert when bug 1426134 lands.
1471 AllocatableRegisterSet
set(RegisterSet::All());
1472 TakeJitRegisters(/* profiling */ false, &set
);
1473 MOZ_ASSERT(set
.has(FramePointer
),
1474 "replace the whole if branch by the then body when this fails");
1478 // Note, if code here pushes a reference value into the frame for its own
1479 // purposes (and not just as an argument to the callee) then the frame must be
1480 // traced in TraceJitExitFrame, see the case there for DirectWasmJitCall. The
1481 // callee will trace values that are pushed as arguments, however.
1483 // Push a special frame descriptor that indicates the frame size so we can
1484 // directly iterate from the current JIT frame without an extra call.
1485 *callOffset
= masm
.buildFakeExitFrame(scratch
);
1486 masm
.loadJSContext(scratch
);
1488 masm
.moveStackPtrTo(FramePointer
);
1489 masm
.enterFakeExitFrame(scratch
, scratch
, ExitFrameType::DirectWasmJitCall
);
1490 masm
.orPtr(Imm32(ExitOrJitEntryFPTag
), FramePointer
);
1492 // Move stack arguments to their final locations.
1493 unsigned bytesNeeded
= StackArgBytesForWasmABI(fe
.funcType());
1494 bytesNeeded
= StackDecrementForCall(WasmStackAlignment
, masm
.framePushed(),
1497 masm
.reserveStack(bytesNeeded
);
1500 GenPrintf(DebugChannel::Function
, masm
, "wasm-function[%d]; arguments ",
1503 ArgTypeVector
args(fe
.funcType());
1504 for (WasmABIArgIter
iter(args
); !iter
.done(); iter
++) {
1505 MOZ_ASSERT_IF(iter
->kind() == ABIArg::GPR
, iter
->gpr() != scratch
);
1506 MOZ_ASSERT_IF(iter
->kind() == ABIArg::GPR
, iter
->gpr() != FramePointer
);
1507 if (iter
->kind() != ABIArg::Stack
) {
1508 switch (iter
.mirType()) {
1509 case MIRType::Int32
:
1510 GenPrintIsize(DebugChannel::Function
, masm
, iter
->gpr());
1512 case MIRType::Int64
:
1513 GenPrintI64(DebugChannel::Function
, masm
, iter
->gpr64());
1515 case MIRType::Float32
:
1516 GenPrintF32(DebugChannel::Function
, masm
, iter
->fpu());
1518 case MIRType::Double
:
1519 GenPrintF64(DebugChannel::Function
, masm
, iter
->fpu());
1521 case MIRType::RefOrNull
:
1522 GenPrintPtr(DebugChannel::Function
, masm
, iter
->gpr());
1524 case MIRType::StackResults
:
1525 MOZ_ASSERT(args
.isSyntheticStackResultPointerArg(iter
.index()));
1526 GenPrintPtr(DebugChannel::Function
, masm
, iter
->gpr());
1529 MOZ_CRASH("ion to wasm fast path can only handle i32/f32/f64");
1534 Address
dst(masm
.getStackPointer(), iter
->offsetFromArgBase());
1536 const JitCallStackArg
& stackArg
= stackArgs
[iter
.index()];
1537 switch (stackArg
.tag()) {
1538 case JitCallStackArg::Tag::Imm32
:
1539 GenPrintf(DebugChannel::Function
, masm
, "%d ", stackArg
.imm32());
1540 masm
.storePtr(ImmWord(stackArg
.imm32()), dst
);
1542 case JitCallStackArg::Tag::GPR
:
1543 MOZ_ASSERT(stackArg
.gpr() != scratch
);
1544 MOZ_ASSERT(stackArg
.gpr() != FramePointer
);
1545 GenPrintIsize(DebugChannel::Function
, masm
, stackArg
.gpr());
1546 masm
.storePtr(stackArg
.gpr(), dst
);
1548 case JitCallStackArg::Tag::FPU
:
1549 switch (iter
.mirType()) {
1550 case MIRType::Double
:
1551 GenPrintF64(DebugChannel::Function
, masm
, stackArg
.fpu());
1552 masm
.storeDouble(stackArg
.fpu(), dst
);
1554 case MIRType::Float32
:
1555 GenPrintF32(DebugChannel::Function
, masm
, stackArg
.fpu());
1556 masm
.storeFloat32(stackArg
.fpu(), dst
);
1560 "unexpected MIR type for a float register in wasm fast call");
1563 case JitCallStackArg::Tag::Address
: {
1564 // The address offsets were valid *before* we pushed our frame.
1565 Address src
= stackArg
.addr();
1566 src
.offset
+= masm
.framePushed() - framePushedAtStart
;
1567 switch (iter
.mirType()) {
1568 case MIRType::Double
: {
1569 ScratchDoubleScope
fpscratch(masm
);
1570 GenPrintF64(DebugChannel::Function
, masm
, fpscratch
);
1571 masm
.loadDouble(src
, fpscratch
);
1572 masm
.storeDouble(fpscratch
, dst
);
1575 case MIRType::Float32
: {
1576 ScratchFloat32Scope
fpscratch(masm
);
1577 masm
.loadFloat32(src
, fpscratch
);
1578 GenPrintF32(DebugChannel::Function
, masm
, fpscratch
);
1579 masm
.storeFloat32(fpscratch
, dst
);
1582 case MIRType::Int32
: {
1583 masm
.loadPtr(src
, scratch
);
1584 GenPrintIsize(DebugChannel::Function
, masm
, scratch
);
1585 masm
.storePtr(scratch
, dst
);
1588 case MIRType::RefOrNull
: {
1589 masm
.loadPtr(src
, scratch
);
1590 GenPrintPtr(DebugChannel::Function
, masm
, scratch
);
1591 masm
.storePtr(scratch
, dst
);
1594 case MIRType::StackResults
: {
1595 MOZ_CRASH("multi-value in ion to wasm fast path unimplemented");
1598 MOZ_CRASH("unexpected MIR type for a stack slot in wasm fast call");
1603 case JitCallStackArg::Tag::Undefined
: {
1604 MOZ_CRASH("can't happen because of arg.kind() check");
1609 GenPrintf(DebugChannel::Function
, masm
, "\n");
1611 // Load tls; from now on, WasmTlsReg is live.
1612 masm
.movePtr(ImmPtr(inst
.tlsData()), WasmTlsReg
);
1613 masm
.storePtr(WasmTlsReg
,
1614 Address(masm
.getStackPointer(), WasmCalleeTLSOffsetBeforeCall
));
1615 masm
.loadWasmPinnedRegsFromTls();
1618 const CodeTier
& codeTier
= inst
.code().codeTier(inst
.code().bestTier());
1619 const MetadataTier
& metadata
= codeTier
.metadata();
1620 const CodeRange
& codeRange
= metadata
.codeRange(fe
);
1621 void* callee
= codeTier
.segment().base() + codeRange
.funcUncheckedCallEntry();
1623 masm
.assertStackAlignment(WasmStackAlignment
);
1624 masm
.callJit(ImmPtr(callee
));
1625 #ifdef JS_CODEGEN_ARM64
1626 // WASM does not use the emulated stack pointer, so reinitialize it as it
1627 // might be clobbered either by WASM or by any C++ calls within.
1628 masm
.initPseudoStackPtr();
1630 masm
.assertStackAlignment(WasmStackAlignment
);
1632 masm
.branchPtr(Assembler::Equal
, FramePointer
, Imm32(wasm::FailFP
),
1633 masm
.exceptionLabel());
1635 // Store the return value in the appropriate place.
1636 GenPrintf(DebugChannel::Function
, masm
, "wasm-function[%d]; returns ",
1638 const ValTypeVector
& results
= fe
.funcType().results();
1639 if (results
.length() == 0) {
1640 masm
.moveValue(UndefinedValue(), JSReturnOperand
);
1641 GenPrintf(DebugChannel::Function
, masm
, "void");
1643 MOZ_ASSERT(results
.length() == 1, "multi-value return to JS unimplemented");
1644 switch (results
[0].kind()) {
1645 case wasm::ValType::I32
:
1646 // The return value is in ReturnReg, which is what Ion expects.
1647 GenPrintIsize(DebugChannel::Function
, masm
, ReturnReg
);
1648 #if defined(JS_CODEGEN_X64)
1649 if (JitOptions
.spectreIndexMasking
) {
1650 masm
.movl(ReturnReg
, ReturnReg
);
1654 case wasm::ValType::I64
:
1655 // The return value is in ReturnReg64, which is what Ion expects.
1656 GenPrintI64(DebugChannel::Function
, masm
, ReturnReg64
);
1658 case wasm::ValType::F32
:
1659 masm
.canonicalizeFloat(ReturnFloat32Reg
);
1660 GenPrintF32(DebugChannel::Function
, masm
, ReturnFloat32Reg
);
1662 case wasm::ValType::F64
:
1663 masm
.canonicalizeDouble(ReturnDoubleReg
);
1664 GenPrintF64(DebugChannel::Function
, masm
, ReturnDoubleReg
);
1666 case wasm::ValType::Ref
:
1667 switch (results
[0].refTypeKind()) {
1668 case wasm::RefType::Func
:
1669 case wasm::RefType::Eq
:
1670 // For FuncRef and EqRef, use the AnyRef path for now, since that
1672 case wasm::RefType::Extern
:
1673 // The call to wasm above preserves the WasmTlsReg, we don't need to
1675 UnboxAnyrefIntoValueReg(masm
, WasmTlsReg
, ReturnReg
,
1676 JSReturnOperand
, WasmJitEntryReturnScratch
);
1678 case wasm::RefType::TypeIndex
:
1679 MOZ_CRASH("unexpected return type when calling from ion to wasm");
1682 case wasm::ValType::V128
:
1683 MOZ_CRASH("unexpected return type when calling from ion to wasm");
1687 GenPrintf(DebugChannel::Function
, masm
, "\n");
1689 // Free args + frame descriptor.
1690 masm
.leaveExitFrame(bytesNeeded
+ ExitFrameLayout::Size());
1692 // If we pushed it, free FramePointer.
1693 if (profilingEnabled
) {
1694 masm
.Pop(FramePointer
);
1697 MOZ_ASSERT(framePushedAtStart
== masm
.framePushed());
1700 static void StackCopy(MacroAssembler
& masm
, MIRType type
, Register scratch
,
1701 Address src
, Address dst
) {
1702 if (type
== MIRType::Int32
) {
1703 masm
.load32(src
, scratch
);
1704 GenPrintIsize(DebugChannel::Import
, masm
, scratch
);
1705 masm
.store32(scratch
, dst
);
1706 } else if (type
== MIRType::Int64
) {
1707 #if JS_BITS_PER_WORD == 32
1708 GenPrintf(DebugChannel::Import
, masm
, "i64(");
1709 masm
.load32(LowWord(src
), scratch
);
1710 GenPrintIsize(DebugChannel::Import
, masm
, scratch
);
1711 masm
.store32(scratch
, LowWord(dst
));
1712 masm
.load32(HighWord(src
), scratch
);
1713 GenPrintIsize(DebugChannel::Import
, masm
, scratch
);
1714 masm
.store32(scratch
, HighWord(dst
));
1715 GenPrintf(DebugChannel::Import
, masm
, ") ");
1717 Register64
scratch64(scratch
);
1718 masm
.load64(src
, scratch64
);
1719 GenPrintIsize(DebugChannel::Import
, masm
, scratch
);
1720 masm
.store64(scratch64
, dst
);
1722 } else if (type
== MIRType::RefOrNull
|| type
== MIRType::Pointer
||
1723 type
== MIRType::StackResults
) {
1724 masm
.loadPtr(src
, scratch
);
1725 GenPrintPtr(DebugChannel::Import
, masm
, scratch
);
1726 masm
.storePtr(scratch
, dst
);
1727 } else if (type
== MIRType::Float32
) {
1728 ScratchFloat32Scope
fpscratch(masm
);
1729 masm
.loadFloat32(src
, fpscratch
);
1730 GenPrintF32(DebugChannel::Import
, masm
, fpscratch
);
1731 masm
.storeFloat32(fpscratch
, dst
);
1732 } else if (type
== MIRType::Double
) {
1733 ScratchDoubleScope
fpscratch(masm
);
1734 masm
.loadDouble(src
, fpscratch
);
1735 GenPrintF64(DebugChannel::Import
, masm
, fpscratch
);
1736 masm
.storeDouble(fpscratch
, dst
);
1737 #ifdef ENABLE_WASM_SIMD
1738 } else if (type
== MIRType::Simd128
) {
1739 ScratchSimd128Scope
fpscratch(masm
);
1740 masm
.loadUnalignedSimd128(src
, fpscratch
);
1741 GenPrintV128(DebugChannel::Import
, masm
, fpscratch
);
1742 masm
.storeUnalignedSimd128(fpscratch
, dst
);
1745 MOZ_CRASH("StackCopy: unexpected type");
1749 using ToValue
= bool;
1751 // Note, when toValue is true then this may destroy the values in incoming
1752 // argument registers as a result of Spectre mitigation.
1753 static void FillArgumentArrayForExit(
1754 MacroAssembler
& masm
, Register tls
, unsigned funcImportIndex
,
1755 const FuncType
& funcType
, unsigned argOffset
,
1756 unsigned offsetFromFPToCallerStackArgs
, Register scratch
, Register scratch2
,
1757 Register scratch3
, ToValue toValue
, Label
* throwLabel
) {
1758 MOZ_ASSERT(scratch
!= scratch2
);
1759 MOZ_ASSERT(scratch
!= scratch3
);
1760 MOZ_ASSERT(scratch2
!= scratch3
);
1762 // This loop does not root the values that are being constructed in
1763 // for the arguments. Allocations that are generated by code either
1764 // in the loop or called from it should be NoGC allocations.
1765 GenPrintf(DebugChannel::Import
, masm
, "wasm-import[%u]; arguments ",
1768 ArgTypeVector
args(funcType
);
1769 for (ABIArgIter
i(args
); !i
.done(); i
++) {
1770 Address
dst(masm
.getStackPointer(), argOffset
+ i
.index() * sizeof(Value
));
1772 MIRType type
= i
.mirType();
1773 MOZ_ASSERT(args
.isSyntheticStackResultPointerArg(i
.index()) ==
1774 (type
== MIRType::StackResults
));
1775 switch (i
->kind()) {
1777 if (type
== MIRType::Int32
) {
1778 GenPrintIsize(DebugChannel::Import
, masm
, i
->gpr());
1780 masm
.storeValue(JSVAL_TYPE_INT32
, i
->gpr(), dst
);
1782 masm
.store32(i
->gpr(), dst
);
1784 } else if (type
== MIRType::Int64
) {
1785 GenPrintI64(DebugChannel::Import
, masm
, i
->gpr64());
1788 GenerateBigIntInitialization(masm
, offsetFromFPToCallerStackArgs
,
1789 i
->gpr64(), scratch
, nullptr,
1791 masm
.storeValue(JSVAL_TYPE_BIGINT
, scratch
, dst
);
1793 masm
.store64(i
->gpr64(), dst
);
1795 } else if (type
== MIRType::RefOrNull
) {
1797 // This works also for FuncRef because it is distinguishable from
1799 masm
.movePtr(i
->gpr(), scratch2
);
1800 UnboxAnyrefIntoValue(masm
, tls
, scratch2
, dst
, scratch
);
1802 GenPrintPtr(DebugChannel::Import
, masm
, i
->gpr());
1803 masm
.storePtr(i
->gpr(), dst
);
1805 } else if (type
== MIRType::StackResults
) {
1806 MOZ_ASSERT(!toValue
, "Multi-result exit to JIT unimplemented");
1807 GenPrintPtr(DebugChannel::Import
, masm
, i
->gpr());
1808 masm
.storePtr(i
->gpr(), dst
);
1810 MOZ_CRASH("FillArgumentArrayForExit, ABIArg::GPR: unexpected type");
1813 #ifdef JS_CODEGEN_REGISTER_PAIR
1814 case ABIArg::GPR_PAIR
:
1815 if (type
== MIRType::Int64
) {
1816 GenPrintI64(DebugChannel::Import
, masm
, i
->gpr64());
1819 GenerateBigIntInitialization(masm
, offsetFromFPToCallerStackArgs
,
1820 i
->gpr64(), scratch
, nullptr,
1822 masm
.storeValue(JSVAL_TYPE_BIGINT
, scratch
, dst
);
1824 masm
.store64(i
->gpr64(), dst
);
1827 MOZ_CRASH("wasm uses hardfp for function calls.");
1832 FloatRegister srcReg
= i
->fpu();
1833 if (type
== MIRType::Double
) {
1835 // Preserve the NaN pattern in the input.
1836 ScratchDoubleScope
fpscratch(masm
);
1837 masm
.moveDouble(srcReg
, fpscratch
);
1838 masm
.canonicalizeDouble(fpscratch
);
1839 GenPrintF64(DebugChannel::Import
, masm
, fpscratch
);
1840 masm
.boxDouble(fpscratch
, dst
);
1842 GenPrintF64(DebugChannel::Import
, masm
, srcReg
);
1843 masm
.storeDouble(srcReg
, dst
);
1845 } else if (type
== MIRType::Float32
) {
1847 // JS::Values can't store Float32, so convert to a Double.
1848 ScratchDoubleScope
fpscratch(masm
);
1849 masm
.convertFloat32ToDouble(srcReg
, fpscratch
);
1850 masm
.canonicalizeDouble(fpscratch
);
1851 GenPrintF64(DebugChannel::Import
, masm
, fpscratch
);
1852 masm
.boxDouble(fpscratch
, dst
);
1854 // Preserve the NaN pattern in the input.
1855 GenPrintF32(DebugChannel::Import
, masm
, srcReg
);
1856 masm
.storeFloat32(srcReg
, dst
);
1858 } else if (type
== MIRType::Simd128
) {
1859 // The value should never escape; the call will be stopped later as
1860 // the import is being called. But we should generate something sane
1861 // here for the boxed case since a debugger or the stack walker may
1862 // observe something.
1863 ScratchDoubleScope
dscratch(masm
);
1864 masm
.loadConstantDouble(0, dscratch
);
1865 GenPrintF64(DebugChannel::Import
, masm
, dscratch
);
1867 masm
.boxDouble(dscratch
, dst
);
1869 masm
.storeDouble(dscratch
, dst
);
1872 MOZ_CRASH("Unknown MIRType in wasm exit stub");
1876 case ABIArg::Stack
: {
1877 Address
src(FramePointer
,
1878 offsetFromFPToCallerStackArgs
+ i
->offsetFromArgBase());
1880 if (type
== MIRType::Int32
) {
1881 masm
.load32(src
, scratch
);
1882 GenPrintIsize(DebugChannel::Import
, masm
, scratch
);
1883 masm
.storeValue(JSVAL_TYPE_INT32
, scratch
, dst
);
1884 } else if (type
== MIRType::Int64
) {
1885 #if JS_BITS_PER_WORD == 64
1886 Register64
scratch64(scratch2
);
1888 Register64
scratch64(scratch2
, scratch3
);
1890 masm
.load64(src
, scratch64
);
1891 GenPrintI64(DebugChannel::Import
, masm
, scratch64
);
1892 GenerateBigIntInitialization(masm
, sizeof(Frame
), scratch64
,
1893 scratch
, nullptr, throwLabel
);
1894 masm
.storeValue(JSVAL_TYPE_BIGINT
, scratch
, dst
);
1895 } else if (type
== MIRType::RefOrNull
) {
1896 // This works also for FuncRef because it is distinguishable from a
1898 masm
.loadPtr(src
, scratch
);
1899 UnboxAnyrefIntoValue(masm
, tls
, scratch
, dst
, scratch2
);
1900 } else if (IsFloatingPointType(type
)) {
1901 ScratchDoubleScope
dscratch(masm
);
1902 FloatRegister fscratch
= dscratch
.asSingle();
1903 if (type
== MIRType::Float32
) {
1904 masm
.loadFloat32(src
, fscratch
);
1905 masm
.convertFloat32ToDouble(fscratch
, dscratch
);
1907 masm
.loadDouble(src
, dscratch
);
1909 masm
.canonicalizeDouble(dscratch
);
1910 GenPrintF64(DebugChannel::Import
, masm
, dscratch
);
1911 masm
.boxDouble(dscratch
, dst
);
1912 } else if (type
== MIRType::Simd128
) {
1913 // The value should never escape; the call will be stopped later as
1914 // the import is being called. But we should generate something
1915 // sane here for the boxed case since a debugger or the stack walker
1916 // may observe something.
1917 ScratchDoubleScope
dscratch(masm
);
1918 masm
.loadConstantDouble(0, dscratch
);
1919 GenPrintF64(DebugChannel::Import
, masm
, dscratch
);
1920 masm
.boxDouble(dscratch
, dst
);
1923 "FillArgumentArrayForExit, ABIArg::Stack: unexpected type");
1926 if (type
== MIRType::Simd128
) {
1927 // As above. StackCopy does not know this trick.
1928 ScratchDoubleScope
dscratch(masm
);
1929 masm
.loadConstantDouble(0, dscratch
);
1930 GenPrintF64(DebugChannel::Import
, masm
, dscratch
);
1931 masm
.storeDouble(dscratch
, dst
);
1933 StackCopy(masm
, type
, scratch
, src
, dst
);
1938 case ABIArg::Uninitialized
:
1939 MOZ_CRASH("Uninitialized ABIArg kind");
1942 GenPrintf(DebugChannel::Import
, masm
, "\n");
1945 // Generate a wrapper function with the standard intra-wasm call ABI which
1946 // simply calls an import. This wrapper function allows any import to be treated
1947 // like a normal wasm function for the purposes of exports and table calls. In
1948 // particular, the wrapper function provides:
1949 // - a table entry, so JS imports can be put into tables
1950 // - normal entries, so that, if the import is re-exported, an entry stub can
1951 // be generated and called without any special cases
1952 static bool GenerateImportFunction(jit::MacroAssembler
& masm
,
1953 const FuncImport
& fi
,
1954 FuncTypeIdDesc funcTypeId
,
1955 FuncOffsets
* offsets
) {
1956 AssertExpectedSP(masm
);
1958 GenerateFunctionPrologue(masm
, funcTypeId
, Nothing(), offsets
);
1960 MOZ_ASSERT(masm
.framePushed() == 0);
1961 const unsigned sizeOfTlsSlot
= sizeof(void*);
1962 unsigned framePushed
= StackDecrementForCall(
1964 sizeof(Frame
), // pushed by prologue
1965 StackArgBytesForWasmABI(fi
.funcType()) + sizeOfTlsSlot
);
1966 masm
.wasmReserveStackChecked(framePushed
, BytecodeOffset(0));
1967 MOZ_ASSERT(masm
.framePushed() == framePushed
);
1969 masm
.storePtr(WasmTlsReg
,
1970 Address(masm
.getStackPointer(), framePushed
- sizeOfTlsSlot
));
1972 // The argument register state is already setup by our caller. We just need
1973 // to be sure not to clobber it before the call.
1974 Register scratch
= ABINonArgReg0
;
1976 // Copy our frame's stack arguments to the callee frame's stack argument.
1977 unsigned offsetFromFPToCallerStackArgs
= sizeof(Frame
);
1978 ArgTypeVector
args(fi
.funcType());
1979 for (WasmABIArgIter
i(args
); !i
.done(); i
++) {
1980 if (i
->kind() != ABIArg::Stack
) {
1984 Address
src(FramePointer
,
1985 offsetFromFPToCallerStackArgs
+ i
->offsetFromArgBase());
1986 Address
dst(masm
.getStackPointer(), i
->offsetFromArgBase());
1987 GenPrintf(DebugChannel::Import
, masm
,
1988 "calling exotic import function with arguments: ");
1989 StackCopy(masm
, i
.mirType(), scratch
, src
, dst
);
1990 GenPrintf(DebugChannel::Import
, masm
, "\n");
1993 // Call the import exit stub.
1994 CallSiteDesc
desc(CallSiteDesc::Dynamic
);
1995 MoveSPForJitABI(masm
);
1996 masm
.wasmCallImport(desc
, CalleeDesc::import(fi
.tlsDataOffset()));
1998 // Restore the TLS register and pinned regs, per wasm function ABI.
1999 masm
.loadPtr(Address(masm
.getStackPointer(), framePushed
- sizeOfTlsSlot
),
2001 masm
.loadWasmPinnedRegsFromTls();
2003 // Restore cx->realm.
2004 masm
.switchToWasmTlsRealm(ABINonArgReturnReg0
, ABINonArgReturnReg1
);
2006 GenerateFunctionEpilogue(masm
, framePushed
, offsets
);
2007 return FinishOffsets(masm
, offsets
);
2010 static const unsigned STUBS_LIFO_DEFAULT_CHUNK_SIZE
= 4 * 1024;
2012 bool wasm::GenerateImportFunctions(const ModuleEnvironment
& env
,
2013 const FuncImportVector
& imports
,
2014 CompiledCode
* code
) {
2015 LifoAlloc
lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE
);
2016 TempAllocator
alloc(&lifo
);
2017 WasmMacroAssembler
masm(alloc
);
2019 for (uint32_t funcIndex
= 0; funcIndex
< imports
.length(); funcIndex
++) {
2020 const FuncImport
& fi
= imports
[funcIndex
];
2022 FuncOffsets offsets
;
2023 if (!GenerateImportFunction(masm
, fi
, env
.funcTypes
[funcIndex
]->id
,
2027 if (!code
->codeRanges
.emplaceBack(funcIndex
, /* bytecodeOffset = */ 0,
2038 return code
->swap(masm
);
2041 // Generate a stub that is called via the internal ABI derived from the
2042 // signature of the import and calls into an appropriate callImport C++
2043 // function, having boxed all the ABI arguments into a homogeneous Value array.
2044 static bool GenerateImportInterpExit(MacroAssembler
& masm
, const FuncImport
& fi
,
2045 uint32_t funcImportIndex
,
2047 CallableOffsets
* offsets
) {
2048 AssertExpectedSP(masm
);
2049 masm
.setFramePushed(0);
2051 // Argument types for Instance::callImport_*:
2052 static const MIRType typeArray
[] = {MIRType::Pointer
, // Instance*
2053 MIRType::Pointer
, // funcImportIndex
2054 MIRType::Int32
, // argc
2055 MIRType::Pointer
}; // argv
2056 MIRTypeVector invokeArgTypes
;
2057 MOZ_ALWAYS_TRUE(invokeArgTypes
.append(typeArray
, ArrayLength(typeArray
)));
2059 // At the point of the call, the stack layout shall be (sp grows to the left):
2060 // | stack args | padding | argv[] | padding | retaddr | caller stack args |
2061 // The padding between stack args and argv ensures that argv is aligned. The
2062 // padding between argv and retaddr ensures that sp is aligned.
2063 unsigned argOffset
=
2064 AlignBytes(StackArgBytesForNativeABI(invokeArgTypes
), sizeof(double));
2065 // The abiArgCount includes a stack result pointer argument if needed.
2066 unsigned abiArgCount
= ArgTypeVector(fi
.funcType()).lengthWithStackResults();
2067 unsigned argBytes
= std::max
<size_t>(1, abiArgCount
) * sizeof(Value
);
2068 unsigned framePushed
=
2069 StackDecrementForCall(ABIStackAlignment
,
2070 sizeof(Frame
), // pushed by prologue
2071 argOffset
+ argBytes
);
2073 GenerateExitPrologue(masm
, framePushed
, ExitReason::Fixed::ImportInterp
,
2076 // Fill the argument array.
2077 unsigned offsetFromFPToCallerStackArgs
= sizeof(FrameWithTls
);
2078 Register scratch
= ABINonArgReturnReg0
;
2079 Register scratch2
= ABINonArgReturnReg1
;
2080 // The scratch3 reg does not need to be non-volatile, but has to be
2081 // distinct from scratch & scratch2.
2082 Register scratch3
= ABINonVolatileReg
;
2083 FillArgumentArrayForExit(masm
, WasmTlsReg
, funcImportIndex
, fi
.funcType(),
2084 argOffset
, offsetFromFPToCallerStackArgs
, scratch
,
2085 scratch2
, scratch3
, ToValue(false), throwLabel
);
2087 // Prepare the arguments for the call to Instance::callImport_*.
2088 ABIArgMIRTypeIter
i(invokeArgTypes
);
2090 // argument 0: Instance*
2091 Address
instancePtr(WasmTlsReg
, offsetof(TlsData
, instance
));
2092 if (i
->kind() == ABIArg::GPR
) {
2093 masm
.loadPtr(instancePtr
, i
->gpr());
2095 masm
.loadPtr(instancePtr
, scratch
);
2096 masm
.storePtr(scratch
,
2097 Address(masm
.getStackPointer(), i
->offsetFromArgBase()));
2101 // argument 1: funcImportIndex
2102 if (i
->kind() == ABIArg::GPR
) {
2103 masm
.mov(ImmWord(funcImportIndex
), i
->gpr());
2105 masm
.store32(Imm32(funcImportIndex
),
2106 Address(masm
.getStackPointer(), i
->offsetFromArgBase()));
2111 unsigned argc
= abiArgCount
;
2112 if (i
->kind() == ABIArg::GPR
) {
2113 masm
.mov(ImmWord(argc
), i
->gpr());
2115 masm
.store32(Imm32(argc
),
2116 Address(masm
.getStackPointer(), i
->offsetFromArgBase()));
2121 Address
argv(masm
.getStackPointer(), argOffset
);
2122 if (i
->kind() == ABIArg::GPR
) {
2123 masm
.computeEffectiveAddress(argv
, i
->gpr());
2125 masm
.computeEffectiveAddress(argv
, scratch
);
2126 masm
.storePtr(scratch
,
2127 Address(masm
.getStackPointer(), i
->offsetFromArgBase()));
2130 MOZ_ASSERT(i
.done());
2132 // Make the call, test whether it succeeded, and extract the return value.
2133 AssertStackAlignment(masm
, ABIStackAlignment
);
2134 masm
.call(SymbolicAddress::CallImport_General
);
2135 masm
.branchTest32(Assembler::Zero
, ReturnReg
, ReturnReg
, throwLabel
);
2137 ResultType resultType
= ResultType::Vector(fi
.funcType().results());
2138 ValType registerResultType
;
2139 for (ABIResultIter
iter(resultType
); !iter
.done(); iter
.next()) {
2140 if (iter
.cur().inRegister()) {
2141 MOZ_ASSERT(!registerResultType
.isValid());
2142 registerResultType
= iter
.cur().type();
2145 if (!registerResultType
.isValid()) {
2146 GenPrintf(DebugChannel::Import
, masm
, "wasm-import[%u]; returns ",
2148 GenPrintf(DebugChannel::Import
, masm
, "void");
2150 switch (registerResultType
.kind()) {
2152 masm
.load32(argv
, ReturnReg
);
2153 // No spectre.index_masking is required, as we know the value comes from
2155 GenPrintf(DebugChannel::Import
, masm
, "wasm-import[%u]; returns ",
2157 GenPrintIsize(DebugChannel::Import
, masm
, ReturnReg
);
2160 masm
.load64(argv
, ReturnReg64
);
2161 GenPrintf(DebugChannel::Import
, masm
, "wasm-import[%u]; returns ",
2163 GenPrintI64(DebugChannel::Import
, masm
, ReturnReg64
);
2166 // Note, CallImport_V128 currently always throws, so we should never
2167 // reach this point.
2171 masm
.loadFloat32(argv
, ReturnFloat32Reg
);
2172 GenPrintf(DebugChannel::Import
, masm
, "wasm-import[%u]; returns ",
2174 GenPrintF32(DebugChannel::Import
, masm
, ReturnFloat32Reg
);
2177 masm
.loadDouble(argv
, ReturnDoubleReg
);
2178 GenPrintf(DebugChannel::Import
, masm
, "wasm-import[%u]; returns ",
2180 GenPrintF64(DebugChannel::Import
, masm
, ReturnDoubleReg
);
2183 switch (registerResultType
.refTypeKind()) {
2185 masm
.loadPtr(argv
, ReturnReg
);
2186 GenPrintf(DebugChannel::Import
, masm
, "wasm-import[%u]; returns ",
2188 GenPrintPtr(DebugChannel::Import
, masm
, ReturnReg
);
2190 case RefType::Extern
:
2192 masm
.loadPtr(argv
, ReturnReg
);
2193 GenPrintf(DebugChannel::Import
, masm
, "wasm-import[%u]; returns ",
2195 GenPrintPtr(DebugChannel::Import
, masm
, ReturnReg
);
2197 case RefType::TypeIndex
:
2198 MOZ_CRASH("No Ref support here yet");
2204 GenPrintf(DebugChannel::Import
, masm
, "\n");
2206 // The native ABI preserves the TLS, heap and global registers since they
2207 // are non-volatile.
2208 MOZ_ASSERT(NonVolatileRegs
.has(WasmTlsReg
));
2209 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \
2210 defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \
2211 defined(JS_CODEGEN_MIPS64)
2212 MOZ_ASSERT(NonVolatileRegs
.has(HeapReg
));
2215 GenerateExitEpilogue(masm
, framePushed
, ExitReason::Fixed::ImportInterp
,
2218 return FinishOffsets(masm
, offsets
);
2221 // Generate a stub that is called via the internal ABI derived from the
2222 // signature of the import and calls into a compatible JIT function,
2223 // having boxed all the ABI arguments into the JIT stack frame layout.
2224 static bool GenerateImportJitExit(MacroAssembler
& masm
, const FuncImport
& fi
,
2225 unsigned funcImportIndex
, Label
* throwLabel
,
2226 JitExitOffsets
* offsets
) {
2227 AssertExpectedSP(masm
);
2228 masm
.setFramePushed(0);
2230 // JIT calls use the following stack layout (sp grows to the left):
2231 // | WasmToJSJitFrameLayout | this | arg1..N | saved Tls |
2232 // Unlike most ABIs, the JIT ABI requires that sp be JitStackAlignment-
2233 // aligned *after* pushing the return address.
2234 static_assert(WasmStackAlignment
>= JitStackAlignment
, "subsumes");
2235 const unsigned sizeOfTlsSlot
= sizeof(void*);
2236 const unsigned sizeOfRetAddr
= sizeof(void*);
2237 const unsigned sizeOfPreFrame
=
2238 WasmToJSJitFrameLayout::Size() - sizeOfRetAddr
;
2239 const unsigned sizeOfThisAndArgs
=
2240 (1 + fi
.funcType().args().length()) * sizeof(Value
);
2241 const unsigned totalJitFrameBytes
=
2242 sizeOfRetAddr
+ sizeOfPreFrame
+ sizeOfThisAndArgs
+ sizeOfTlsSlot
;
2243 const unsigned jitFramePushed
=
2244 StackDecrementForCall(JitStackAlignment
,
2245 sizeof(Frame
), // pushed by prologue
2246 totalJitFrameBytes
) -
2248 const unsigned sizeOfThisAndArgsAndPadding
= jitFramePushed
- sizeOfPreFrame
;
2250 // On ARM64 we must align the SP to a 16-byte boundary.
2251 #ifdef JS_CODEGEN_ARM64
2252 const unsigned frameAlignExtra
= sizeof(void*);
2254 const unsigned frameAlignExtra
= 0;
2257 GenerateJitExitPrologue(masm
, jitFramePushed
+ frameAlignExtra
, offsets
);
2260 size_t argOffset
= frameAlignExtra
;
2261 uint32_t descriptor
=
2262 MakeFrameDescriptor(sizeOfThisAndArgsAndPadding
, FrameType::WasmToJSJit
,
2263 WasmToJSJitFrameLayout::Size());
2264 masm
.storePtr(ImmWord(uintptr_t(descriptor
)),
2265 Address(masm
.getStackPointer(), argOffset
));
2266 argOffset
+= sizeof(size_t);
2268 // 2. Callee, part 1 -- need the callee register for argument filling, so
2269 // record offset here and set up callee later.
2270 size_t calleeArgOffset
= argOffset
;
2271 argOffset
+= sizeof(size_t);
2274 unsigned argc
= fi
.funcType().args().length();
2275 masm
.storePtr(ImmWord(uintptr_t(argc
)),
2276 Address(masm
.getStackPointer(), argOffset
));
2277 argOffset
+= sizeof(size_t);
2278 MOZ_ASSERT(argOffset
== sizeOfPreFrame
+ frameAlignExtra
);
2281 masm
.storeValue(UndefinedValue(), Address(masm
.getStackPointer(), argOffset
));
2282 argOffset
+= sizeof(Value
);
2284 // 5. Fill the arguments.
2285 const uint32_t offsetFromFPToCallerStackArgs
= sizeof(FrameWithTls
);
2286 Register scratch
= ABINonArgReturnReg1
; // Repeatedly clobbered
2287 Register scratch2
= ABINonArgReturnReg0
; // Reused as callee below
2288 // The scratch3 reg does not need to be non-volatile, but has to be
2289 // distinct from scratch & scratch2.
2290 Register scratch3
= ABINonVolatileReg
;
2291 FillArgumentArrayForExit(masm
, WasmTlsReg
, funcImportIndex
, fi
.funcType(),
2292 argOffset
, offsetFromFPToCallerStackArgs
, scratch
,
2293 scratch2
, scratch3
, ToValue(true), throwLabel
);
2294 argOffset
+= fi
.funcType().args().length() * sizeof(Value
);
2295 MOZ_ASSERT(argOffset
== sizeOfThisAndArgs
+ sizeOfPreFrame
+ frameAlignExtra
);
2297 // Preserve Tls because the JIT callee clobbers it.
2298 const size_t savedTlsOffset
= argOffset
;
2299 masm
.storePtr(WasmTlsReg
, Address(masm
.getStackPointer(), savedTlsOffset
));
2301 // 2. Callee, part 2 -- now that the register is free, set up the callee.
2302 Register callee
= ABINonArgReturnReg0
; // Live until call
2304 // 2.1. Get JSFunction callee.
2305 masm
.loadWasmGlobalPtr(fi
.tlsDataOffset() + offsetof(FuncImportTls
, fun
),
2308 // 2.2. Save callee.
2309 masm
.storePtr(callee
, Address(masm
.getStackPointer(), calleeArgOffset
));
2311 // 6. Check if we need to rectify arguments.
2312 masm
.load16ZeroExtend(Address(callee
, JSFunction::offsetOfNargs()), scratch
);
2315 masm
.branch32(Assembler::Above
, scratch
, Imm32(fi
.funcType().args().length()),
2318 // 7. If we haven't rectified arguments, load callee executable entry point.
2320 if (fi
.funcType().jitExitRequiresArgCheck()) {
2321 masm
.loadJitCodeRaw(callee
, callee
);
2323 // This is equivalent to masm.loadJitCodeNoArgCheck(callee, callee) but uses
2324 // two loads instead of three.
2325 masm
.loadWasmGlobalPtr(
2326 fi
.tlsDataOffset() + offsetof(FuncImportTls
, jitScript
), callee
);
2327 masm
.loadPtr(Address(callee
, JitScript::offsetOfJitCodeSkipArgCheck()),
2331 Label rejoinBeforeCall
;
2332 masm
.bind(&rejoinBeforeCall
);
2334 AssertStackAlignment(masm
, JitStackAlignment
,
2335 sizeOfRetAddr
+ frameAlignExtra
);
2336 #ifdef JS_CODEGEN_ARM64
2337 // Conform to JIT ABI.
2338 masm
.addToStackPtr(Imm32(8));
2340 MoveSPForJitABI(masm
);
2341 masm
.callJitNoProfiler(callee
);
2342 #ifdef JS_CODEGEN_ARM64
2343 // Conform to platform conventions - align the SP.
2344 masm
.subFromStackPtr(Imm32(8));
2347 // Note that there might be a GC thing in the JSReturnOperand now.
2348 // In all the code paths from here:
2349 // - either the value is unboxed because it was a primitive and we don't
2350 // need to worry about rooting anymore.
2351 // - or the value needs to be rooted, but nothing can cause a GC between
2352 // here and CoerceInPlace, which roots before coercing to a primitive.
2354 // The JIT callee clobbers all registers, including WasmTlsReg and
2355 // FramePointer, so restore those here. During this sequence of
2356 // instructions, FP can't be trusted by the profiling frame iterator.
2357 offsets
->untrustedFPStart
= masm
.currentOffset();
2358 AssertStackAlignment(masm
, JitStackAlignment
,
2359 sizeOfRetAddr
+ frameAlignExtra
);
2361 masm
.loadPtr(Address(masm
.getStackPointer(), savedTlsOffset
), WasmTlsReg
);
2362 masm
.moveStackPtrTo(FramePointer
);
2363 masm
.addPtr(Imm32(masm
.framePushed()), FramePointer
);
2364 offsets
->untrustedFPEnd
= masm
.currentOffset();
2366 // As explained above, the frame was aligned for the JIT ABI such that
2367 // (sp + sizeof(void*)) % JitStackAlignment == 0
2368 // But now we possibly want to call one of several different C++ functions,
2369 // so subtract the sizeof(void*) so that sp is aligned for an ABI call.
2370 static_assert(ABIStackAlignment
<= JitStackAlignment
, "subsumes");
2371 #ifdef JS_CODEGEN_ARM64
2372 // We've already allocated the extra space for frame alignment.
2373 static_assert(sizeOfRetAddr
== frameAlignExtra
, "ARM64 SP alignment");
2375 masm
.reserveStack(sizeOfRetAddr
);
2377 unsigned nativeFramePushed
= masm
.framePushed();
2378 AssertStackAlignment(masm
, ABIStackAlignment
);
2383 masm
.branchTestMagic(Assembler::NotEqual
, JSReturnOperand
, &ok
);
2389 GenPrintf(DebugChannel::Import
, masm
, "wasm-import[%u]; returns ",
2393 const ValTypeVector
& results
= fi
.funcType().results();
2394 if (results
.length() == 0) {
2395 GenPrintf(DebugChannel::Import
, masm
, "void");
2397 MOZ_ASSERT(results
.length() == 1, "multi-value return unimplemented");
2398 switch (results
[0].kind()) {
2400 // No spectre.index_masking required, as the return value does not come
2401 // to us in ReturnReg.
2402 masm
.truncateValueToInt32(JSReturnOperand
, ReturnDoubleReg
, ReturnReg
,
2404 GenPrintIsize(DebugChannel::Import
, masm
, ReturnReg
);
2407 // No fastpath for now, go immediately to ool case
2408 masm
.jump(&oolConvert
);
2411 // Unreachable as callImport should not call the stub.
2415 masm
.convertValueToFloat(JSReturnOperand
, ReturnFloat32Reg
,
2417 GenPrintF32(DebugChannel::Import
, masm
, ReturnFloat32Reg
);
2420 masm
.convertValueToDouble(JSReturnOperand
, ReturnDoubleReg
,
2422 GenPrintF64(DebugChannel::Import
, masm
, ReturnDoubleReg
);
2425 switch (results
[0].refTypeKind()) {
2426 case RefType::Extern
:
2427 BoxValueIntoAnyref(masm
, JSReturnOperand
, ReturnReg
, &oolConvert
);
2428 GenPrintPtr(DebugChannel::Import
, masm
, ReturnReg
);
2432 case RefType::TypeIndex
:
2433 MOZ_CRASH("typed reference returned by import (jit exit) NYI");
2439 GenPrintf(DebugChannel::Import
, masm
, "\n");
2444 GenerateJitExitEpilogue(masm
, masm
.framePushed(), offsets
);
2447 // Call the arguments rectifier.
2448 masm
.bind(&rectify
);
2449 masm
.loadPtr(Address(WasmTlsReg
, offsetof(TlsData
, instance
)), callee
);
2450 masm
.loadPtr(Address(callee
, Instance::offsetOfJSJitArgsRectifier()),
2452 masm
.jump(&rejoinBeforeCall
);
2455 if (oolConvert
.used()) {
2456 masm
.bind(&oolConvert
);
2457 masm
.setFramePushed(nativeFramePushed
);
2459 // Coercion calls use the following stack layout (sp grows to the left):
2460 // | args | padding | Value argv[1] | padding | exit Frame |
2461 MIRTypeVector coerceArgTypes
;
2462 MOZ_ALWAYS_TRUE(coerceArgTypes
.append(MIRType::Pointer
));
2463 unsigned offsetToCoerceArgv
=
2464 AlignBytes(StackArgBytesForWasmABI(coerceArgTypes
), sizeof(Value
));
2465 MOZ_ASSERT(nativeFramePushed
>= offsetToCoerceArgv
+ sizeof(Value
));
2466 AssertStackAlignment(masm
, ABIStackAlignment
);
2468 // Store return value into argv[0].
2469 masm
.storeValue(JSReturnOperand
,
2470 Address(masm
.getStackPointer(), offsetToCoerceArgv
));
2472 // From this point, it's safe to reuse the scratch register (which
2473 // might be part of the JSReturnOperand).
2475 // The JIT might have clobbered exitFP at this point. Since there's
2476 // going to be a CoerceInPlace call, pretend we're still doing the JIT
2477 // call by restoring our tagged exitFP.
2478 SetExitFP(masm
, ExitReason::Fixed::ImportJit
, scratch
);
2481 ABIArgMIRTypeIter
i(coerceArgTypes
);
2482 Address
argv(masm
.getStackPointer(), offsetToCoerceArgv
);
2483 if (i
->kind() == ABIArg::GPR
) {
2484 masm
.computeEffectiveAddress(argv
, i
->gpr());
2486 masm
.computeEffectiveAddress(argv
, scratch
);
2487 masm
.storePtr(scratch
,
2488 Address(masm
.getStackPointer(), i
->offsetFromArgBase()));
2491 MOZ_ASSERT(i
.done());
2493 // Call coercion function. Note that right after the call, the value of
2494 // FP is correct because FP is non-volatile in the native ABI.
2495 AssertStackAlignment(masm
, ABIStackAlignment
);
2496 const ValTypeVector
& results
= fi
.funcType().results();
2497 if (results
.length() > 0) {
2498 // NOTE that once there can be more than one result and we can box some of
2499 // the results (as we must for AnyRef), pointer and already-boxed results
2500 // must be rooted while subsequent results are boxed.
2501 MOZ_ASSERT(results
.length() == 1, "multi-value return unimplemented");
2502 switch (results
[0].kind()) {
2504 masm
.call(SymbolicAddress::CoerceInPlace_ToInt32
);
2505 masm
.branchTest32(Assembler::Zero
, ReturnReg
, ReturnReg
, throwLabel
);
2506 masm
.unboxInt32(Address(masm
.getStackPointer(), offsetToCoerceArgv
),
2508 // No spectre.index_masking required, as we generate a known-good
2509 // value in a safe way here.
2511 case ValType::I64
: {
2512 masm
.call(SymbolicAddress::CoerceInPlace_ToBigInt
);
2513 masm
.branchTest32(Assembler::Zero
, ReturnReg
, ReturnReg
, throwLabel
);
2514 Address
argv(masm
.getStackPointer(), offsetToCoerceArgv
);
2515 masm
.unboxBigInt(argv
, scratch
);
2516 masm
.loadBigInt64(scratch
, ReturnReg64
);
2521 masm
.call(SymbolicAddress::CoerceInPlace_ToNumber
);
2522 masm
.branchTest32(Assembler::Zero
, ReturnReg
, ReturnReg
, throwLabel
);
2523 masm
.unboxDouble(Address(masm
.getStackPointer(), offsetToCoerceArgv
),
2525 if (results
[0].kind() == ValType::F32
) {
2526 masm
.convertDoubleToFloat32(ReturnDoubleReg
, ReturnFloat32Reg
);
2530 switch (results
[0].refTypeKind()) {
2531 case RefType::Extern
:
2532 masm
.call(SymbolicAddress::BoxValue_Anyref
);
2533 masm
.branchTest32(Assembler::Zero
, ReturnReg
, ReturnReg
,
2538 case RefType::TypeIndex
:
2539 MOZ_CRASH("Unsupported convert type");
2543 MOZ_CRASH("Unsupported convert type");
2547 // Maintain the invariant that exitFP is either unset or not set to a
2548 // wasm tagged exitFP, per the jit exit contract.
2549 ClearExitFP(masm
, scratch
);
2552 masm
.setFramePushed(0);
2555 MOZ_ASSERT(masm
.framePushed() == 0);
2557 return FinishOffsets(masm
, offsets
);
2560 struct ABIFunctionArgs
{
2561 ABIFunctionType abiType
;
2564 explicit ABIFunctionArgs(ABIFunctionType sig
)
2565 : abiType(ABIFunctionType(sig
>> ArgType_Shift
)) {
2567 uint32_t i
= uint32_t(abiType
);
2569 i
= i
>> ArgType_Shift
;
2574 size_t length() const { return len
; }
2576 MIRType
operator[](size_t i
) const {
2577 MOZ_ASSERT(i
< len
);
2578 uint32_t abi
= uint32_t(abiType
);
2580 abi
= abi
>> ArgType_Shift
;
2582 return ToMIRType(ABIArgType(abi
& ArgType_Mask
));
2586 bool wasm::GenerateBuiltinThunk(MacroAssembler
& masm
, ABIFunctionType abiType
,
2587 ExitReason exitReason
, void* funcPtr
,
2588 CallableOffsets
* offsets
) {
2589 AssertExpectedSP(masm
);
2590 masm
.setFramePushed(0);
2592 ABIFunctionArgs
args(abiType
);
2593 uint32_t framePushed
=
2594 StackDecrementForCall(ABIStackAlignment
,
2595 sizeof(Frame
), // pushed by prologue
2596 StackArgBytesForNativeABI(args
));
2598 GenerateExitPrologue(masm
, framePushed
, exitReason
, offsets
);
2600 // Copy out and convert caller arguments, if needed.
2601 unsigned offsetFromFPToCallerStackArgs
= sizeof(FrameWithTls
);
2602 Register scratch
= ABINonArgReturnReg0
;
2603 for (ABIArgIter
i(args
); !i
.done(); i
++) {
2604 if (i
->argInRegister()) {
2605 #ifdef JS_CODEGEN_ARM
2606 // Non hard-fp passes the args values in GPRs.
2607 if (!UseHardFpABI() && IsFloatingPointType(i
.mirType())) {
2608 FloatRegister input
= i
->fpu();
2609 if (i
.mirType() == MIRType::Float32
) {
2610 masm
.ma_vxfer(input
, Register::FromCode(input
.id()));
2611 } else if (i
.mirType() == MIRType::Double
) {
2612 uint32_t regId
= input
.singleOverlay().id();
2613 masm
.ma_vxfer(input
, Register::FromCode(regId
),
2614 Register::FromCode(regId
+ 1));
2621 Address
src(FramePointer
,
2622 offsetFromFPToCallerStackArgs
+ i
->offsetFromArgBase());
2623 Address
dst(masm
.getStackPointer(), i
->offsetFromArgBase());
2624 StackCopy(masm
, i
.mirType(), scratch
, src
, dst
);
2627 AssertStackAlignment(masm
, ABIStackAlignment
);
2628 MoveSPForJitABI(masm
);
2629 masm
.call(ImmPtr(funcPtr
, ImmPtr::NoCheckToken()));
2631 #if defined(JS_CODEGEN_X64)
2632 // No spectre.index_masking is required, as the caller will mask.
2633 #elif defined(JS_CODEGEN_X86)
2634 // x86 passes the return value on the x87 FP stack.
2636 MIRType retType
= ToMIRType(ABIArgType(abiType
& ArgType_Mask
));
2637 if (retType
== MIRType::Float32
) {
2639 masm
.loadFloat32(op
, ReturnFloat32Reg
);
2640 } else if (retType
== MIRType::Double
) {
2642 masm
.loadDouble(op
, ReturnDoubleReg
);
2644 #elif defined(JS_CODEGEN_ARM)
2645 // Non hard-fp passes the return values in GPRs.
2646 MIRType retType
= ToMIRType(ABIArgType(abiType
& ArgType_Mask
));
2647 if (!UseHardFpABI() && IsFloatingPointType(retType
)) {
2648 masm
.ma_vxfer(r0
, r1
, d0
);
2652 GenerateExitEpilogue(masm
, framePushed
, exitReason
, offsets
);
2653 return FinishOffsets(masm
, offsets
);
2656 #if defined(JS_CODEGEN_ARM)
2657 static const LiveRegisterSet
RegsToPreserve(
2658 GeneralRegisterSet(Registers::AllMask
& ~((uint32_t(1) << Registers::sp
) |
2659 (uint32_t(1) << Registers::pc
))),
2660 FloatRegisterSet(FloatRegisters::AllDoubleMask
));
2661 # ifdef ENABLE_WASM_SIMD
2662 # error "high lanes of SIMD registers need to be saved too."
2664 #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
2665 static const LiveRegisterSet
RegsToPreserve(
2666 GeneralRegisterSet(Registers::AllMask
&
2667 ~((uint32_t(1) << Registers::k0
) |
2668 (uint32_t(1) << Registers::k1
) |
2669 (uint32_t(1) << Registers::sp
) |
2670 (uint32_t(1) << Registers::zero
))),
2671 FloatRegisterSet(FloatRegisters::AllDoubleMask
));
2672 # ifdef ENABLE_WASM_SIMD
2673 # error "high lanes of SIMD registers need to be saved too."
2675 #elif defined(JS_CODEGEN_ARM64)
2676 // We assume that traps do not happen while lr is live. This both ensures that
2677 // the size of RegsToPreserve is a multiple of 2 (preserving WasmStackAlignment)
2678 // and gives us a register to clobber in the return path.
2680 // Note there are no SIMD registers in the set; the doubles in the set stand in
2681 // for SIMD registers, which are pushed as appropriate. See comments above at
2682 // PushRegsInMask and lengty comment in Architecture-arm64.h.
2683 static const LiveRegisterSet
RegsToPreserve(
2684 GeneralRegisterSet(Registers::AllMask
&
2685 ~((uint32_t(1) << Registers::StackPointer
) |
2686 (uint32_t(1) << Registers::lr
))),
2687 FloatRegisterSet(FloatRegisters::AllDoubleMask
));
2688 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
2689 // It's correct to use FloatRegisters::AllMask even when SIMD is not enabled;
2690 // PushRegsInMask strips out the high lanes of the XMM registers in this case,
2691 // while the singles will be stripped as they are aliased by the larger doubles.
2692 static const LiveRegisterSet
RegsToPreserve(
2693 GeneralRegisterSet(Registers::AllMask
&
2694 ~(uint32_t(1) << Registers::StackPointer
)),
2695 FloatRegisterSet(FloatRegisters::AllMask
));
2697 static const LiveRegisterSet
RegsToPreserve(
2698 GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllDoubleMask
));
2699 # ifdef ENABLE_WASM_SIMD
2700 # error "no SIMD support"
2704 // Generate a MachineState which describes the locations of the GPRs as saved
2705 // by GenerateTrapExit. FP registers are ignored. Note that the values
2706 // stored in the MachineState are offsets in words downwards from the top of
2707 // the save area. That is, a higher value implies a lower address.
2708 void wasm::GenerateTrapExitMachineState(MachineState
* machine
,
2710 // This is the number of words pushed by the initial WasmPush().
2711 *numWords
= WasmPushSize
/ sizeof(void*);
2712 MOZ_ASSERT(*numWords
== TrapExitDummyValueOffsetFromTop
+ 1);
2714 // And these correspond to the PushRegsInMask() that immediately follows.
2715 for (GeneralRegisterBackwardIterator
iter(RegsToPreserve
.gprs()); iter
.more();
2717 machine
->setRegisterLocation(*iter
,
2718 reinterpret_cast<uintptr_t*>(*numWords
));
2723 // Generate a stub which calls WasmReportTrap() and can be executed by having
2724 // the signal handler redirect PC from any trapping instruction.
2725 static bool GenerateTrapExit(MacroAssembler
& masm
, Label
* throwLabel
,
2727 AssertExpectedSP(masm
);
2728 masm
.haltingAlign(CodeAlignment
);
2730 masm
.setFramePushed(0);
2732 offsets
->begin
= masm
.currentOffset();
2734 // Traps can only happen at well-defined program points. However, since
2735 // traps may resume and the optimal assumption for the surrounding code is
2736 // that registers are not clobbered, we need to preserve all registers in
2737 // the trap exit. One simplifying assumption is that flags may be clobbered.
2738 // Push a dummy word to use as return address below.
2739 WasmPush(masm
, ImmWord(TrapExitDummyValue
));
2740 unsigned framePushedBeforePreserve
= masm
.framePushed();
2741 PushRegsInMask(masm
, RegsToPreserve
);
2742 unsigned offsetOfReturnWord
= masm
.framePushed() - framePushedBeforePreserve
;
2744 // We know that StackPointer is word-aligned, but not necessarily
2745 // stack-aligned, so we need to align it dynamically.
2746 Register preAlignStackPointer
= ABINonVolatileReg
;
2747 masm
.moveStackPtrTo(preAlignStackPointer
);
2748 masm
.andToStackPtr(Imm32(~(ABIStackAlignment
- 1)));
2749 if (ShadowStackSpace
) {
2750 masm
.subFromStackPtr(Imm32(ShadowStackSpace
));
2753 masm
.assertStackAlignment(ABIStackAlignment
);
2754 masm
.call(SymbolicAddress::HandleTrap
);
2756 // WasmHandleTrap returns null if control should transfer to the throw stub.
2757 masm
.branchTestPtr(Assembler::Zero
, ReturnReg
, ReturnReg
, throwLabel
);
2759 // Otherwise, the return value is the TrapData::resumePC we must jump to.
2760 // We must restore register state before jumping, which will clobber
2761 // ReturnReg, so store ReturnReg in the above-reserved stack slot which we
2762 // use to jump to via ret.
2763 masm
.moveToStackPtr(preAlignStackPointer
);
2764 masm
.storePtr(ReturnReg
, Address(masm
.getStackPointer(), offsetOfReturnWord
));
2765 PopRegsInMask(masm
, RegsToPreserve
);
2766 #ifdef JS_CODEGEN_ARM64
2773 return FinishOffsets(masm
, offsets
);
2776 // Generate a stub that restores the stack pointer to what it was on entry to
2777 // the wasm activation, sets the return register to 'false' and then executes a
2778 // return which will return from this wasm activation to the caller. This stub
2779 // should only be called after the caller has reported an error.
2780 static bool GenerateThrowStub(MacroAssembler
& masm
, Label
* throwLabel
,
2782 AssertExpectedSP(masm
);
2783 masm
.haltingAlign(CodeAlignment
);
2785 masm
.bind(throwLabel
);
2787 offsets
->begin
= masm
.currentOffset();
2789 // Conservatively, the stack pointer can be unaligned and we must align it
2791 masm
.andToStackPtr(Imm32(~(ABIStackAlignment
- 1)));
2792 if (ShadowStackSpace
) {
2793 masm
.subFromStackPtr(Imm32(ShadowStackSpace
));
2796 // WasmHandleThrow unwinds JitActivation::wasmExitFP() and returns the
2797 // address of the return address on the stack this stub should return to.
2798 // Set the FramePointer to a magic value to indicate a return by throw.
2799 masm
.call(SymbolicAddress::HandleThrow
);
2800 masm
.moveToStackPtr(ReturnReg
);
2801 masm
.move32(Imm32(FailFP
), FramePointer
);
2802 #ifdef JS_CODEGEN_ARM64
2803 masm
.loadPtr(Address(ReturnReg
, 0), lr
);
2804 masm
.addToStackPtr(Imm32(8));
2810 return FinishOffsets(masm
, offsets
);
2813 static const LiveRegisterSet AllAllocatableRegs
=
2814 LiveRegisterSet(GeneralRegisterSet(Registers::AllocatableMask
),
2815 FloatRegisterSet(FloatRegisters::AllMask
));
2817 // Generate a stub that handle toggable enter/leave frame traps or breakpoints.
2818 // The trap records frame pointer (via GenerateExitPrologue) and saves most of
2819 // registers to not affect the code generated by WasmBaselineCompile.
2820 static bool GenerateDebugTrapStub(MacroAssembler
& masm
, Label
* throwLabel
,
2821 CallableOffsets
* offsets
) {
2822 AssertExpectedSP(masm
);
2823 masm
.haltingAlign(CodeAlignment
);
2824 masm
.setFramePushed(0);
2826 GenerateExitPrologue(masm
, 0, ExitReason::Fixed::DebugTrap
, offsets
);
2828 // Save all registers used between baseline compiler operations.
2829 PushRegsInMask(masm
, AllAllocatableRegs
);
2831 uint32_t framePushed
= masm
.framePushed();
2833 // This method might be called with unaligned stack -- aligning and
2834 // saving old stack pointer at the top.
2835 #ifdef JS_CODEGEN_ARM64
2836 // On ARM64 however the stack is always aligned.
2837 static_assert(ABIStackAlignment
== 16, "ARM64 SP alignment");
2839 Register scratch
= ABINonArgReturnReg0
;
2840 masm
.moveStackPtrTo(scratch
);
2841 masm
.subFromStackPtr(Imm32(sizeof(intptr_t)));
2842 masm
.andToStackPtr(Imm32(~(ABIStackAlignment
- 1)));
2843 masm
.storePtr(scratch
, Address(masm
.getStackPointer(), 0));
2846 if (ShadowStackSpace
) {
2847 masm
.subFromStackPtr(Imm32(ShadowStackSpace
));
2849 masm
.assertStackAlignment(ABIStackAlignment
);
2850 masm
.call(SymbolicAddress::HandleDebugTrap
);
2852 masm
.branchIfFalseBool(ReturnReg
, throwLabel
);
2854 if (ShadowStackSpace
) {
2855 masm
.addToStackPtr(Imm32(ShadowStackSpace
));
2857 #ifndef JS_CODEGEN_ARM64
2859 masm
.moveToStackPtr(scratch
);
2862 masm
.setFramePushed(framePushed
);
2863 PopRegsInMask(masm
, AllAllocatableRegs
);
2865 GenerateExitEpilogue(masm
, 0, ExitReason::Fixed::DebugTrap
, offsets
);
2867 return FinishOffsets(masm
, offsets
);
2870 bool wasm::GenerateEntryStubs(MacroAssembler
& masm
, size_t funcExportIndex
,
2871 const FuncExport
& fe
, const Maybe
<ImmPtr
>& callee
,
2872 bool isAsmJS
, CodeRangeVector
* codeRanges
) {
2873 MOZ_ASSERT(!callee
== fe
.hasEagerStubs());
2874 MOZ_ASSERT_IF(isAsmJS
, fe
.hasEagerStubs());
2877 if (!GenerateInterpEntry(masm
, fe
, callee
, &offsets
)) {
2880 if (!codeRanges
->emplaceBack(CodeRange::InterpEntry
, fe
.funcIndex(),
2885 if (isAsmJS
|| fe
.funcType().temporarilyUnsupportedReftypeForEntry()) {
2889 #ifdef ENABLE_WASM_SIMD
2890 // SIMD spec requires JS calls to exports with V128 in the signature to throw.
2891 if (fe
.funcType().hasV128ArgOrRet()) {
2896 // Returning multiple values to JS JIT code not yet implemented (see
2898 if (fe
.funcType().temporarilyUnsupportedResultCountForJitEntry()) {
2902 if (!GenerateJitEntry(masm
, funcExportIndex
, fe
, callee
, &offsets
)) {
2905 if (!codeRanges
->emplaceBack(CodeRange::JitEntry
, fe
.funcIndex(), offsets
)) {
2912 bool wasm::GenerateStubs(const ModuleEnvironment
& env
,
2913 const FuncImportVector
& imports
,
2914 const FuncExportVector
& exports
, CompiledCode
* code
) {
2915 LifoAlloc
lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE
);
2916 TempAllocator
alloc(&lifo
);
2917 WasmMacroAssembler
masm(alloc
);
2919 // Swap in already-allocated empty vectors to avoid malloc/free.
2920 if (!code
->swap(masm
)) {
2926 JitSpew(JitSpew_Codegen
, "# Emitting wasm import stubs");
2928 for (uint32_t funcIndex
= 0; funcIndex
< imports
.length(); funcIndex
++) {
2929 const FuncImport
& fi
= imports
[funcIndex
];
2931 CallableOffsets interpOffsets
;
2932 if (!GenerateImportInterpExit(masm
, fi
, funcIndex
, &throwLabel
,
2936 if (!code
->codeRanges
.emplaceBack(CodeRange::ImportInterpExit
, funcIndex
,
2941 #ifdef ENABLE_WASM_SIMD
2942 // SIMD spec requires calls to JS functions with V128 in the signature to
2944 if (fi
.funcType().hasV128ArgOrRet()) {
2949 if (fi
.funcType().temporarilyUnsupportedReftypeForExit()) {
2953 // Exit to JS JIT code returning multiple values not yet implemented
2954 // (see bug 1595031).
2955 if (fi
.funcType().temporarilyUnsupportedResultCountForJitExit()) {
2959 JitExitOffsets jitOffsets
;
2960 if (!GenerateImportJitExit(masm
, fi
, funcIndex
, &throwLabel
, &jitOffsets
)) {
2963 if (!code
->codeRanges
.emplaceBack(funcIndex
, jitOffsets
)) {
2968 JitSpew(JitSpew_Codegen
, "# Emitting wasm export stubs");
2970 Maybe
<ImmPtr
> noAbsolute
;
2971 for (size_t i
= 0; i
< exports
.length(); i
++) {
2972 const FuncExport
& fe
= exports
[i
];
2973 if (!fe
.hasEagerStubs()) {
2976 if (!GenerateEntryStubs(masm
, i
, fe
, noAbsolute
, env
.isAsmJS(),
2977 &code
->codeRanges
)) {
2982 JitSpew(JitSpew_Codegen
, "# Emitting wasm exit stubs");
2986 if (!GenerateTrapExit(masm
, &throwLabel
, &offsets
)) {
2989 if (!code
->codeRanges
.emplaceBack(CodeRange::TrapExit
, offsets
)) {
2993 CallableOffsets callableOffsets
;
2994 if (!GenerateDebugTrapStub(masm
, &throwLabel
, &callableOffsets
)) {
2997 if (!code
->codeRanges
.emplaceBack(CodeRange::DebugTrap
, callableOffsets
)) {
3001 if (!GenerateThrowStub(masm
, &throwLabel
, &offsets
)) {
3004 if (!code
->codeRanges
.emplaceBack(CodeRange::Throw
, offsets
)) {
3013 return code
->swap(masm
);