1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/MacroAssembler-inl.h"
9 #include "mozilla/FloatingPoint.h"
10 #include "mozilla/Latin1.h"
11 #include "mozilla/MathAlgorithms.h"
12 #include "mozilla/XorShift128PlusRNG.h"
17 #include "jit/AtomicOp.h"
18 #include "jit/AtomicOperations.h"
19 #include "jit/Bailouts.h"
20 #include "jit/BaselineFrame.h"
21 #include "jit/BaselineJIT.h"
22 #include "jit/JitFrames.h"
23 #include "jit/JitOptions.h"
24 #include "jit/JitRuntime.h"
25 #include "jit/JitScript.h"
26 #include "jit/MoveEmitter.h"
27 #include "jit/ReciprocalMulConstants.h"
28 #include "jit/SharedICHelpers.h"
29 #include "jit/SharedICRegisters.h"
30 #include "jit/Simulator.h"
31 #include "jit/VMFunctions.h"
32 #include "js/Conversions.h"
33 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
34 #include "js/GCAPI.h" // JS::AutoCheckCannotGC
35 #include "js/ScalarType.h" // js::Scalar::Type
36 #include "util/Unicode.h"
37 #include "vm/ArgumentsObject.h"
38 #include "vm/ArrayBufferViewObject.h"
39 #include "vm/BoundFunctionObject.h"
40 #include "vm/FunctionFlags.h" // js::FunctionFlags
41 #include "vm/Iteration.h"
42 #include "vm/JSContext.h"
43 #include "vm/JSFunction.h"
44 #include "vm/StringType.h"
45 #include "vm/TypedArrayObject.h"
46 #include "wasm/WasmBuiltins.h"
47 #include "wasm/WasmCodegenConstants.h"
48 #include "wasm/WasmCodegenTypes.h"
49 #include "wasm/WasmGcObject.h"
50 #include "wasm/WasmInstanceData.h"
51 #include "wasm/WasmMemory.h"
52 #include "wasm/WasmTypeDef.h"
53 #include "wasm/WasmValidate.h"
55 #include "jit/TemplateObject-inl.h"
56 #include "vm/BytecodeUtil-inl.h"
57 #include "vm/Interpreter-inl.h"
58 #include "vm/JSObject-inl.h"
61 using namespace js::jit
;
66 using mozilla::CheckedInt
;
68 TrampolinePtr
MacroAssembler::preBarrierTrampoline(MIRType type
) {
69 const JitRuntime
* rt
= runtime()->jitRuntime();
70 return rt
->preBarrier(type
);
73 template <typename S
, typename T
>
74 static void StoreToTypedFloatArray(MacroAssembler
& masm
, int arrayType
,
75 const S
& value
, const T
& dest
) {
78 masm
.storeFloat32(value
, dest
);
81 masm
.storeDouble(value
, dest
);
84 MOZ_CRASH("Invalid typed array type");
88 void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType
,
90 const BaseIndex
& dest
) {
91 StoreToTypedFloatArray(*this, arrayType
, value
, dest
);
93 void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType
,
95 const Address
& dest
) {
96 StoreToTypedFloatArray(*this, arrayType
, value
, dest
);
99 template <typename S
, typename T
>
100 static void StoreToTypedBigIntArray(MacroAssembler
& masm
,
101 Scalar::Type arrayType
, const S
& value
,
103 MOZ_ASSERT(Scalar::isBigIntType(arrayType
));
104 masm
.store64(value
, dest
);
107 void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType
,
109 const BaseIndex
& dest
) {
110 StoreToTypedBigIntArray(*this, arrayType
, value
, dest
);
112 void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType
,
114 const Address
& dest
) {
115 StoreToTypedBigIntArray(*this, arrayType
, value
, dest
);
118 void MacroAssembler::boxUint32(Register source
, ValueOperand dest
,
119 Uint32Mode mode
, Label
* fail
) {
121 // Fail if the value does not fit in an int32.
122 case Uint32Mode::FailOnDouble
: {
123 branchTest32(Assembler::Signed
, source
, source
, fail
);
124 tagValue(JSVAL_TYPE_INT32
, source
, dest
);
127 case Uint32Mode::ForceDouble
: {
128 // Always convert the value to double.
129 ScratchDoubleScope
fpscratch(*this);
130 convertUInt32ToDouble(source
, fpscratch
);
131 boxDouble(fpscratch
, dest
, fpscratch
);
137 template <typename T
>
138 void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
, const T
& src
,
139 AnyRegister dest
, Register temp
,
143 load8SignExtend(src
, dest
.gpr());
146 case Scalar::Uint8Clamped
:
147 load8ZeroExtend(src
, dest
.gpr());
150 load16SignExtend(src
, dest
.gpr());
153 load16ZeroExtend(src
, dest
.gpr());
156 load32(src
, dest
.gpr());
159 if (dest
.isFloat()) {
161 convertUInt32ToDouble(temp
, dest
.fpu());
163 load32(src
, dest
.gpr());
165 // Bail out if the value doesn't fit into a signed int32 value. This
166 // is what allows MLoadUnboxedScalar to have a type() of
167 // MIRType::Int32 for UInt32 array loads.
168 branchTest32(Assembler::Signed
, dest
.gpr(), dest
.gpr(), fail
);
171 case Scalar::Float32
:
172 loadFloat32(src
, dest
.fpu());
173 canonicalizeFloat(dest
.fpu());
175 case Scalar::Float64
:
176 loadDouble(src
, dest
.fpu());
177 canonicalizeDouble(dest
.fpu());
179 case Scalar::BigInt64
:
180 case Scalar::BigUint64
:
182 MOZ_CRASH("Invalid typed array type");
186 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
,
189 Register temp
, Label
* fail
);
190 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
,
191 const BaseIndex
& src
,
193 Register temp
, Label
* fail
);
195 template <typename T
>
196 void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
, const T
& src
,
197 const ValueOperand
& dest
,
198 Uint32Mode uint32Mode
, Register temp
,
203 case Scalar::Uint8Clamped
:
207 loadFromTypedArray(arrayType
, src
, AnyRegister(dest
.scratchReg()),
208 InvalidReg
, nullptr);
209 tagValue(JSVAL_TYPE_INT32
, dest
.scratchReg(), dest
);
212 // Don't clobber dest when we could fail, instead use temp.
214 boxUint32(temp
, dest
, uint32Mode
, fail
);
216 case Scalar::Float32
: {
217 ScratchDoubleScope
dscratch(*this);
218 FloatRegister fscratch
= dscratch
.asSingle();
219 loadFromTypedArray(arrayType
, src
, AnyRegister(fscratch
),
220 dest
.scratchReg(), nullptr);
221 convertFloat32ToDouble(fscratch
, dscratch
);
222 boxDouble(dscratch
, dest
, dscratch
);
225 case Scalar::Float64
: {
226 ScratchDoubleScope
fpscratch(*this);
227 loadFromTypedArray(arrayType
, src
, AnyRegister(fpscratch
),
228 dest
.scratchReg(), nullptr);
229 boxDouble(fpscratch
, dest
, fpscratch
);
232 case Scalar::BigInt64
:
233 case Scalar::BigUint64
:
235 MOZ_CRASH("Invalid typed array type");
239 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
,
241 const ValueOperand
& dest
,
242 Uint32Mode uint32Mode
,
243 Register temp
, Label
* fail
);
244 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
,
245 const BaseIndex
& src
,
246 const ValueOperand
& dest
,
247 Uint32Mode uint32Mode
,
248 Register temp
, Label
* fail
);
250 template <typename T
>
251 void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType
,
252 const T
& src
, Register bigInt
,
254 MOZ_ASSERT(Scalar::isBigIntType(arrayType
));
257 initializeBigInt64(arrayType
, bigInt
, temp
);
260 template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType
,
264 template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType
,
265 const BaseIndex
& src
,
269 // Inlined version of gc::CheckAllocatorState that checks the bare essentials
270 // and bails for anything that cannot be handled with our jit allocators.
271 void MacroAssembler::checkAllocatorState(Register temp
, gc::AllocKind allocKind
,
273 // Don't execute the inline path if GC probes are built in.
279 // Don't execute the inline path if gc zeal or tracing are active.
280 const uint32_t* ptrZealModeBits
= runtime()->addressOfGCZealModeBits();
281 branch32(Assembler::NotEqual
, AbsoluteAddress(ptrZealModeBits
), Imm32(0),
285 // If the zone has a realm with an object allocation metadata hook, emit a
286 // guard for this. Note that IC stubs and some other trampolines can be shared
287 // across realms, so we don't bake in a realm pointer.
288 if (gc::IsObjectAllocKind(allocKind
) &&
289 realm()->zone()->hasRealmWithAllocMetadataBuilder()) {
291 loadPtr(Address(temp
, JSContext::offsetOfRealm()), temp
);
292 branchPtr(Assembler::NotEqual
,
293 Address(temp
, Realm::offsetOfAllocationMetadataBuilder()),
298 bool MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind
,
299 gc::Heap initialHeap
) {
300 // Note that Ion elides barriers on writes to objects known to be in the
301 // nursery, so any allocation that can be made into the nursery must be made
302 // into the nursery, even if the nursery is disabled. At runtime these will
303 // take the out-of-line path, which is required to insert a barrier for the
304 // initializing writes.
305 return IsNurseryAllocable(allocKind
) && initialHeap
!= gc::Heap::Tenured
;
308 // Inline version of Nursery::allocateObject. If the object has dynamic slots,
309 // this fills in the slots_ pointer.
310 void MacroAssembler::nurseryAllocateObject(Register result
, Register temp
,
311 gc::AllocKind allocKind
,
312 size_t nDynamicSlots
, Label
* fail
,
313 const AllocSiteInput
& allocSite
) {
314 MOZ_ASSERT(IsNurseryAllocable(allocKind
));
316 // Currently the JIT does not nursery allocate foreground finalized
317 // objects. This is allowed for objects that support this and have the
318 // JSCLASS_SKIP_NURSERY_FINALIZE class flag set. It's hard to assert that here
319 // though so disallow all foreground finalized objects for now.
320 MOZ_ASSERT(!IsForegroundFinalized(allocKind
));
322 // We still need to allocate in the nursery, per the comment in
323 // shouldNurseryAllocate; however, we need to insert into the
324 // mallocedBuffers set, so bail to do the nursery allocation in the
326 if (nDynamicSlots
>= Nursery::MaxNurseryBufferSize
/ sizeof(Value
)) {
331 // Check whether this allocation site needs pretenuring. This dynamic check
332 // only happens for baseline code.
333 if (allocSite
.is
<Register
>()) {
334 Register site
= allocSite
.as
<Register
>();
335 branchTestPtr(Assembler::NonZero
,
336 Address(site
, gc::AllocSite::offsetOfScriptAndState()),
337 Imm32(gc::AllocSite::LONG_LIVED_BIT
), fail
);
340 // No explicit check for nursery.isEnabled() is needed, as the comparison
341 // with the nursery's end will always fail in such cases.
342 CompileZone
* zone
= realm()->zone();
343 size_t thingSize
= gc::Arena::thingSize(allocKind
);
344 size_t totalSize
= thingSize
;
346 totalSize
+= ObjectSlots::allocSize(nDynamicSlots
);
348 MOZ_ASSERT(totalSize
< INT32_MAX
);
349 MOZ_ASSERT(totalSize
% gc::CellAlignBytes
== 0);
351 bumpPointerAllocate(result
, temp
, fail
, zone
, JS::TraceKind::Object
,
352 totalSize
, allocSite
);
355 store32(Imm32(nDynamicSlots
),
356 Address(result
, thingSize
+ ObjectSlots::offsetOfCapacity()));
359 Address(result
, thingSize
+ ObjectSlots::offsetOfDictionarySlotSpan()));
360 store64(Imm64(ObjectSlots::NoUniqueIdInDynamicSlots
),
361 Address(result
, thingSize
+ ObjectSlots::offsetOfMaybeUniqueId()));
362 computeEffectiveAddress(
363 Address(result
, thingSize
+ ObjectSlots::offsetOfSlots()), temp
);
364 storePtr(temp
, Address(result
, NativeObject::offsetOfSlots()));
368 // Inlined version of FreeSpan::allocate. This does not fill in slots_.
369 void MacroAssembler::freeListAllocate(Register result
, Register temp
,
370 gc::AllocKind allocKind
, Label
* fail
) {
371 CompileZone
* zone
= realm()->zone();
372 int thingSize
= int(gc::Arena::thingSize(allocKind
));
377 // Load the first and last offsets of |zone|'s free list for |allocKind|.
378 // If there is no room remaining in the span, fall back to get the next one.
379 gc::FreeSpan
** ptrFreeList
= zone
->addressOfFreeList(allocKind
);
380 loadPtr(AbsoluteAddress(ptrFreeList
), temp
);
381 load16ZeroExtend(Address(temp
, js::gc::FreeSpan::offsetOfFirst()), result
);
382 load16ZeroExtend(Address(temp
, js::gc::FreeSpan::offsetOfLast()), temp
);
383 branch32(Assembler::AboveOrEqual
, result
, temp
, &fallback
);
385 // Bump the offset for the next allocation.
386 add32(Imm32(thingSize
), result
);
387 loadPtr(AbsoluteAddress(ptrFreeList
), temp
);
388 store16(result
, Address(temp
, js::gc::FreeSpan::offsetOfFirst()));
389 sub32(Imm32(thingSize
), result
);
390 addPtr(temp
, result
); // Turn the offset into a pointer.
394 // If there are no free spans left, we bail to finish the allocation. The
395 // interpreter will call the GC allocator to set up a new arena to allocate
396 // from, after which we can resume allocating in the jit.
397 branchTest32(Assembler::Zero
, result
, result
, fail
);
398 loadPtr(AbsoluteAddress(ptrFreeList
), temp
);
399 addPtr(temp
, result
); // Turn the offset into a pointer.
401 // Update the free list to point to the next span (which may be empty).
402 load32(Address(result
, 0), result
);
403 store32(result
, Address(temp
, js::gc::FreeSpan::offsetOfFirst()));
408 if (runtime()->geckoProfiler().enabled()) {
409 uint32_t* countAddress
= zone
->addressOfTenuredAllocCount();
410 movePtr(ImmPtr(countAddress
), temp
);
411 add32(Imm32(1), Address(temp
, 0));
415 void MacroAssembler::callFreeStub(Register slots
) {
416 // This register must match the one in JitRuntime::generateFreeStub.
417 const Register regSlots
= CallTempReg0
;
420 movePtr(slots
, regSlots
);
421 call(runtime()->jitRuntime()->freeStub());
425 // Inlined equivalent of gc::AllocateObject, without failure case handling.
426 void MacroAssembler::allocateObject(Register result
, Register temp
,
427 gc::AllocKind allocKind
,
428 uint32_t nDynamicSlots
,
429 gc::Heap initialHeap
, Label
* fail
,
430 const AllocSiteInput
& allocSite
) {
431 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind
));
433 checkAllocatorState(temp
, allocKind
, fail
);
435 if (shouldNurseryAllocate(allocKind
, initialHeap
)) {
436 MOZ_ASSERT(initialHeap
== gc::Heap::Default
);
437 return nurseryAllocateObject(result
, temp
, allocKind
, nDynamicSlots
, fail
,
441 // Fall back to calling into the VM to allocate objects in the tenured heap
442 // that have dynamic slots.
448 return freeListAllocate(result
, temp
, allocKind
, fail
);
451 void MacroAssembler::createGCObject(Register obj
, Register temp
,
452 const TemplateObject
& templateObj
,
453 gc::Heap initialHeap
, Label
* fail
,
454 bool initContents
/* = true */) {
455 gc::AllocKind allocKind
= templateObj
.getAllocKind();
456 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind
));
458 uint32_t nDynamicSlots
= 0;
459 if (templateObj
.isNativeObject()) {
460 const TemplateNativeObject
& ntemplate
=
461 templateObj
.asTemplateNativeObject();
462 nDynamicSlots
= ntemplate
.numDynamicSlots();
465 allocateObject(obj
, temp
, allocKind
, nDynamicSlots
, initialHeap
, fail
);
466 initGCThing(obj
, temp
, templateObj
, initContents
);
469 void MacroAssembler::createPlainGCObject(
470 Register result
, Register shape
, Register temp
, Register temp2
,
471 uint32_t numFixedSlots
, uint32_t numDynamicSlots
, gc::AllocKind allocKind
,
472 gc::Heap initialHeap
, Label
* fail
, const AllocSiteInput
& allocSite
,
473 bool initContents
/* = true */) {
474 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind
));
475 MOZ_ASSERT(shape
!= temp
, "shape can overlap with temp2, but not temp");
478 allocateObject(result
, temp
, allocKind
, numDynamicSlots
, initialHeap
, fail
,
481 // Initialize shape field.
482 storePtr(shape
, Address(result
, JSObject::offsetOfShape()));
484 // If the object has dynamic slots, allocateObject will initialize
485 // the slots field. If not, we must initialize it now.
486 if (numDynamicSlots
== 0) {
487 storePtr(ImmPtr(emptyObjectSlots
),
488 Address(result
, NativeObject::offsetOfSlots()));
491 // Initialize elements field.
492 storePtr(ImmPtr(emptyObjectElements
),
493 Address(result
, NativeObject::offsetOfElements()));
495 // Initialize fixed slots.
497 fillSlotsWithUndefined(Address(result
, NativeObject::getFixedSlotOffset(0)),
498 temp
, 0, numFixedSlots
);
501 // Initialize dynamic slots.
502 if (numDynamicSlots
> 0) {
503 loadPtr(Address(result
, NativeObject::offsetOfSlots()), temp2
);
504 fillSlotsWithUndefined(Address(temp2
, 0), temp
, 0, numDynamicSlots
);
508 void MacroAssembler::createArrayWithFixedElements(
509 Register result
, Register shape
, Register temp
, Register dynamicSlotsTemp
,
510 uint32_t arrayLength
, uint32_t arrayCapacity
, uint32_t numUsedDynamicSlots
,
511 uint32_t numDynamicSlots
, gc::AllocKind allocKind
, gc::Heap initialHeap
,
512 Label
* fail
, const AllocSiteInput
& allocSite
) {
513 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind
));
514 MOZ_ASSERT(shape
!= temp
, "shape can overlap with temp2, but not temp");
515 MOZ_ASSERT(result
!= temp
);
517 // This only supports allocating arrays with fixed elements and does not
518 // support any dynamic elements.
519 MOZ_ASSERT(arrayCapacity
>= arrayLength
);
520 MOZ_ASSERT(gc::GetGCKindSlots(allocKind
) >=
521 arrayCapacity
+ ObjectElements::VALUES_PER_HEADER
);
523 MOZ_ASSERT(numUsedDynamicSlots
<= numDynamicSlots
);
526 allocateObject(result
, temp
, allocKind
, numDynamicSlots
, initialHeap
, fail
,
529 // Initialize shape field.
530 storePtr(shape
, Address(result
, JSObject::offsetOfShape()));
532 // If the object has dynamic slots, allocateObject will initialize
533 // the slots field. If not, we must initialize it now.
534 if (numDynamicSlots
== 0) {
535 storePtr(ImmPtr(emptyObjectSlots
),
536 Address(result
, NativeObject::offsetOfSlots()));
539 // Initialize elements pointer for fixed (inline) elements.
540 computeEffectiveAddress(
541 Address(result
, NativeObject::offsetOfFixedElements()), temp
);
542 storePtr(temp
, Address(result
, NativeObject::offsetOfElements()));
544 // Initialize elements header.
545 store32(Imm32(ObjectElements::FIXED
),
546 Address(temp
, ObjectElements::offsetOfFlags()));
547 store32(Imm32(0), Address(temp
, ObjectElements::offsetOfInitializedLength()));
548 store32(Imm32(arrayCapacity
),
549 Address(temp
, ObjectElements::offsetOfCapacity()));
550 store32(Imm32(arrayLength
), Address(temp
, ObjectElements::offsetOfLength()));
552 // Initialize dynamic slots.
553 if (numUsedDynamicSlots
> 0) {
554 MOZ_ASSERT(dynamicSlotsTemp
!= temp
);
555 MOZ_ASSERT(dynamicSlotsTemp
!= InvalidReg
);
556 loadPtr(Address(result
, NativeObject::offsetOfSlots()), dynamicSlotsTemp
);
557 fillSlotsWithUndefined(Address(dynamicSlotsTemp
, 0), temp
, 0,
558 numUsedDynamicSlots
);
562 // Inline version of Nursery::allocateString.
563 void MacroAssembler::nurseryAllocateString(Register result
, Register temp
,
564 gc::AllocKind allocKind
,
566 MOZ_ASSERT(IsNurseryAllocable(allocKind
));
568 // No explicit check for nursery.isEnabled() is needed, as the comparison
569 // with the nursery's end will always fail in such cases.
571 CompileZone
* zone
= realm()->zone();
572 size_t thingSize
= gc::Arena::thingSize(allocKind
);
573 bumpPointerAllocate(result
, temp
, fail
, zone
, JS::TraceKind::String
,
577 // Inline version of Nursery::allocateBigInt.
578 void MacroAssembler::nurseryAllocateBigInt(Register result
, Register temp
,
580 MOZ_ASSERT(IsNurseryAllocable(gc::AllocKind::BIGINT
));
582 // No explicit check for nursery.isEnabled() is needed, as the comparison
583 // with the nursery's end will always fail in such cases.
585 CompileZone
* zone
= realm()->zone();
586 size_t thingSize
= gc::Arena::thingSize(gc::AllocKind::BIGINT
);
588 bumpPointerAllocate(result
, temp
, fail
, zone
, JS::TraceKind::BigInt
,
592 static bool IsNurseryAllocEnabled(CompileZone
* zone
, JS::TraceKind kind
) {
594 case JS::TraceKind::Object
:
595 return zone
->allocNurseryObjects();
596 case JS::TraceKind::String
:
597 return zone
->allocNurseryStrings();
598 case JS::TraceKind::BigInt
:
599 return zone
->allocNurseryBigInts();
601 MOZ_CRASH("Bad nursery allocation kind");
605 // This function handles nursery allocations for JS. For wasm, see
606 // MacroAssembler::wasmBumpPointerAllocate.
607 void MacroAssembler::bumpPointerAllocate(Register result
, Register temp
,
608 Label
* fail
, CompileZone
* zone
,
609 JS::TraceKind traceKind
, uint32_t size
,
610 const AllocSiteInput
& allocSite
) {
611 MOZ_ASSERT(size
>= gc::MinCellSize
);
613 uint32_t totalSize
= size
+ Nursery::nurseryCellHeaderSize();
614 MOZ_ASSERT(totalSize
< INT32_MAX
, "Nursery allocation too large");
615 MOZ_ASSERT(totalSize
% gc::CellAlignBytes
== 0);
617 // We know statically whether nursery allocation is enable for a particular
618 // kind because we discard JIT code when this changes.
619 if (!IsNurseryAllocEnabled(zone
, traceKind
)) {
624 // Use a relative 32 bit offset to the Nursery position_ to currentEnd_ to
625 // avoid 64-bit immediate loads.
626 void* posAddr
= zone
->addressOfNurseryPosition();
627 int32_t endOffset
= Nursery::offsetOfCurrentEndFromPosition();
629 movePtr(ImmPtr(posAddr
), temp
);
630 loadPtr(Address(temp
, 0), result
);
631 addPtr(Imm32(totalSize
), result
);
632 branchPtr(Assembler::Below
, Address(temp
, endOffset
), result
, fail
);
633 storePtr(result
, Address(temp
, 0));
634 subPtr(Imm32(size
), result
);
636 if (allocSite
.is
<gc::CatchAllAllocSite
>()) {
637 // No allocation site supplied. This is the case when called from Warp, or
638 // from places that don't support pretenuring.
639 gc::CatchAllAllocSite siteKind
= allocSite
.as
<gc::CatchAllAllocSite
>();
640 gc::AllocSite
* site
= zone
->catchAllAllocSite(traceKind
, siteKind
);
641 uintptr_t headerWord
= gc::NurseryCellHeader::MakeValue(site
, traceKind
);
642 storePtr(ImmWord(headerWord
),
643 Address(result
, -js::Nursery::nurseryCellHeaderSize()));
645 if (traceKind
!= JS::TraceKind::Object
||
646 runtime()->geckoProfiler().enabled()) {
647 // Update the catch all allocation site, which his is used to calculate
648 // nursery allocation counts so we can determine whether to disable
649 // nursery allocation of strings and bigints.
650 uint32_t* countAddress
= site
->nurseryAllocCountAddress();
651 CheckedInt
<int32_t> counterOffset
=
652 (CheckedInt
<uintptr_t>(uintptr_t(countAddress
)) -
653 CheckedInt
<uintptr_t>(uintptr_t(posAddr
)))
654 .toChecked
<int32_t>();
655 if (counterOffset
.isValid()) {
656 add32(Imm32(1), Address(temp
, counterOffset
.value()));
658 movePtr(ImmPtr(countAddress
), temp
);
659 add32(Imm32(1), Address(temp
, 0));
663 // Update allocation site and store pointer in the nursery cell header. This
664 // is only used from baseline.
665 Register site
= allocSite
.as
<Register
>();
666 updateAllocSite(temp
, result
, zone
, site
);
667 // See NurseryCellHeader::MakeValue.
668 orPtr(Imm32(int32_t(traceKind
)), site
);
669 storePtr(site
, Address(result
, -js::Nursery::nurseryCellHeaderSize()));
673 // Update the allocation site in the same way as Nursery::allocateCell.
674 void MacroAssembler::updateAllocSite(Register temp
, Register result
,
675 CompileZone
* zone
, Register site
) {
678 add32(Imm32(1), Address(site
, gc::AllocSite::offsetOfNurseryAllocCount()));
680 branch32(Assembler::NotEqual
,
681 Address(site
, gc::AllocSite::offsetOfNurseryAllocCount()), Imm32(1),
684 loadPtr(AbsoluteAddress(zone
->addressOfNurseryAllocatedSites()), temp
);
685 storePtr(temp
, Address(site
, gc::AllocSite::offsetOfNextNurseryAllocated()));
686 storePtr(site
, AbsoluteAddress(zone
->addressOfNurseryAllocatedSites()));
691 // Inlined equivalent of gc::AllocateString, jumping to fail if nursery
692 // allocation requested but unsuccessful.
693 void MacroAssembler::allocateString(Register result
, Register temp
,
694 gc::AllocKind allocKind
,
695 gc::Heap initialHeap
, Label
* fail
) {
696 MOZ_ASSERT(allocKind
== gc::AllocKind::STRING
||
697 allocKind
== gc::AllocKind::FAT_INLINE_STRING
);
699 checkAllocatorState(temp
, allocKind
, fail
);
701 if (shouldNurseryAllocate(allocKind
, initialHeap
)) {
702 MOZ_ASSERT(initialHeap
== gc::Heap::Default
);
703 return nurseryAllocateString(result
, temp
, allocKind
, fail
);
706 freeListAllocate(result
, temp
, allocKind
, fail
);
709 void MacroAssembler::newGCString(Register result
, Register temp
,
710 gc::Heap initialHeap
, Label
* fail
) {
711 allocateString(result
, temp
, js::gc::AllocKind::STRING
, initialHeap
, fail
);
714 void MacroAssembler::newGCFatInlineString(Register result
, Register temp
,
715 gc::Heap initialHeap
, Label
* fail
) {
716 allocateString(result
, temp
, js::gc::AllocKind::FAT_INLINE_STRING
,
720 void MacroAssembler::newGCBigInt(Register result
, Register temp
,
721 gc::Heap initialHeap
, Label
* fail
) {
722 constexpr gc::AllocKind allocKind
= gc::AllocKind::BIGINT
;
724 checkAllocatorState(temp
, allocKind
, fail
);
726 if (shouldNurseryAllocate(allocKind
, initialHeap
)) {
727 MOZ_ASSERT(initialHeap
== gc::Heap::Default
);
728 return nurseryAllocateBigInt(result
, temp
, fail
);
731 freeListAllocate(result
, temp
, allocKind
, fail
);
734 void MacroAssembler::copySlotsFromTemplate(
735 Register obj
, const TemplateNativeObject
& templateObj
, uint32_t start
,
737 uint32_t nfixed
= std::min(templateObj
.numFixedSlots(), end
);
738 for (unsigned i
= start
; i
< nfixed
; i
++) {
739 // Template objects are not exposed to script and therefore immutable.
740 // However, regexp template objects are sometimes used directly (when
741 // the cloning is not observable), and therefore we can end up with a
742 // non-zero lastIndex. Detect this case here and just substitute 0, to
743 // avoid racing with the main thread updating this slot.
745 if (templateObj
.isRegExpObject() && i
== RegExpObject::lastIndexSlot()) {
748 v
= templateObj
.getSlot(i
);
750 storeValue(v
, Address(obj
, NativeObject::getFixedSlotOffset(i
)));
754 void MacroAssembler::fillSlotsWithConstantValue(Address base
, Register temp
,
755 uint32_t start
, uint32_t end
,
757 MOZ_ASSERT(v
.isUndefined() || IsUninitializedLexical(v
));
764 // We only have a single spare register, so do the initialization as two
765 // strided writes of the tag and body.
767 move32(Imm32(v
.toNunboxPayload()), temp
);
768 for (unsigned i
= start
; i
< end
; ++i
, addr
.offset
+= sizeof(GCPtr
<Value
>)) {
769 store32(temp
, ToPayload(addr
));
773 move32(Imm32(v
.toNunboxTag()), temp
);
774 for (unsigned i
= start
; i
< end
; ++i
, addr
.offset
+= sizeof(GCPtr
<Value
>)) {
775 store32(temp
, ToType(addr
));
778 moveValue(v
, ValueOperand(temp
));
779 for (uint32_t i
= start
; i
< end
; ++i
, base
.offset
+= sizeof(GCPtr
<Value
>)) {
780 storePtr(temp
, base
);
785 void MacroAssembler::fillSlotsWithUndefined(Address base
, Register temp
,
786 uint32_t start
, uint32_t end
) {
787 fillSlotsWithConstantValue(base
, temp
, start
, end
, UndefinedValue());
790 void MacroAssembler::fillSlotsWithUninitialized(Address base
, Register temp
,
791 uint32_t start
, uint32_t end
) {
792 fillSlotsWithConstantValue(base
, temp
, start
, end
,
793 MagicValue(JS_UNINITIALIZED_LEXICAL
));
796 static std::pair
<uint32_t, uint32_t> FindStartOfUninitializedAndUndefinedSlots(
797 const TemplateNativeObject
& templateObj
, uint32_t nslots
) {
798 MOZ_ASSERT(nslots
== templateObj
.slotSpan());
799 MOZ_ASSERT(nslots
> 0);
801 uint32_t first
= nslots
;
802 for (; first
!= 0; --first
) {
803 if (templateObj
.getSlot(first
- 1) != UndefinedValue()) {
807 uint32_t startOfUndefined
= first
;
809 if (first
!= 0 && IsUninitializedLexical(templateObj
.getSlot(first
- 1))) {
810 for (; first
!= 0; --first
) {
811 if (!IsUninitializedLexical(templateObj
.getSlot(first
- 1))) {
816 uint32_t startOfUninitialized
= first
;
818 return {startOfUninitialized
, startOfUndefined
};
821 void MacroAssembler::initTypedArraySlots(Register obj
, Register temp
,
823 LiveRegisterSet liveRegs
, Label
* fail
,
824 TypedArrayObject
* templateObj
,
825 TypedArrayLength lengthKind
) {
826 MOZ_ASSERT(!templateObj
->hasBuffer());
828 constexpr size_t dataSlotOffset
= ArrayBufferViewObject::dataOffset();
829 constexpr size_t dataOffset
= dataSlotOffset
+ sizeof(HeapSlot
);
832 FixedLengthTypedArrayObject::FIXED_DATA_START
==
833 FixedLengthTypedArrayObject::DATA_SLOT
+ 1,
834 "fixed inline element data assumed to begin after the data slot");
837 FixedLengthTypedArrayObject::INLINE_BUFFER_LIMIT
==
838 JSObject::MAX_BYTE_SIZE
- dataOffset
,
839 "typed array inline buffer is limited by the maximum object byte size");
841 // Initialise data elements to zero.
842 size_t length
= templateObj
->length();
843 MOZ_ASSERT(length
<= INT32_MAX
,
844 "Template objects are only created for int32 lengths");
845 size_t nbytes
= length
* templateObj
->bytesPerElement();
847 if (lengthKind
== TypedArrayLength::Fixed
&&
848 nbytes
<= FixedLengthTypedArrayObject::INLINE_BUFFER_LIMIT
) {
849 MOZ_ASSERT(dataOffset
+ nbytes
<= templateObj
->tenuredSizeOfThis());
851 // Store data elements inside the remaining JSObject slots.
852 computeEffectiveAddress(Address(obj
, dataOffset
), temp
);
853 storePrivateValue(temp
, Address(obj
, dataSlotOffset
));
855 // Write enough zero pointers into fixed data to zero every
856 // element. (This zeroes past the end of a byte count that's
857 // not a multiple of pointer size. That's okay, because fixed
858 // data is a count of 8-byte HeapSlots (i.e. <= pointer size),
859 // and we won't inline unless the desired memory fits in that
861 static_assert(sizeof(HeapSlot
) == 8, "Assumed 8 bytes alignment");
863 size_t numZeroPointers
= ((nbytes
+ 7) & ~0x7) / sizeof(char*);
864 for (size_t i
= 0; i
< numZeroPointers
; i
++) {
865 storePtr(ImmWord(0), Address(obj
, dataOffset
+ i
* sizeof(char*)));
867 MOZ_ASSERT(nbytes
> 0, "Zero-length TypedArrays need ZeroLengthArrayData");
869 if (lengthKind
== TypedArrayLength::Fixed
) {
870 move32(Imm32(length
), lengthReg
);
873 // Ensure volatile |obj| is saved across the call.
874 if (obj
.volatile_()) {
875 liveRegs
.addUnchecked(obj
);
878 // Allocate a buffer on the heap to store the data elements.
879 PushRegsInMask(liveRegs
);
880 using Fn
= void (*)(JSContext
* cx
, TypedArrayObject
* obj
, int32_t count
);
881 setupUnalignedABICall(temp
);
885 passABIArg(lengthReg
);
886 callWithABI
<Fn
, AllocateAndInitTypedArrayBuffer
>();
887 PopRegsInMask(liveRegs
);
889 // Fail when data slot is UndefinedValue.
890 branchTestUndefined(Assembler::Equal
, Address(obj
, dataSlotOffset
), fail
);
894 void MacroAssembler::initGCSlots(Register obj
, Register temp
,
895 const TemplateNativeObject
& templateObj
) {
896 MOZ_ASSERT(!templateObj
.isArrayObject());
898 // Slots of non-array objects are required to be initialized.
899 // Use the values currently in the template object.
900 uint32_t nslots
= templateObj
.slotSpan();
905 uint32_t nfixed
= templateObj
.numUsedFixedSlots();
906 uint32_t ndynamic
= templateObj
.numDynamicSlots();
908 // Attempt to group slot writes such that we minimize the amount of
909 // duplicated data we need to embed in code and load into registers. In
910 // general, most template object slots will be undefined except for any
911 // reserved slots. Since reserved slots come first, we split the object
912 // logically into independent non-UndefinedValue writes to the head and
913 // duplicated writes of UndefinedValue to the tail. For the majority of
914 // objects, the "tail" will be the entire slot range.
916 // The template object may be a CallObject, in which case we need to
917 // account for uninitialized lexical slots as well as undefined
918 // slots. Uninitialized lexical slots appears in CallObjects if the function
919 // has parameter expressions, in which case closed over parameters have
920 // TDZ. Uninitialized slots come before undefined slots in CallObjects.
921 auto [startOfUninitialized
, startOfUndefined
] =
922 FindStartOfUninitializedAndUndefinedSlots(templateObj
, nslots
);
923 MOZ_ASSERT(startOfUninitialized
<= nfixed
); // Reserved slots must be fixed.
924 MOZ_ASSERT(startOfUndefined
>= startOfUninitialized
);
925 MOZ_ASSERT_IF(!templateObj
.isCallObject() &&
926 !templateObj
.isBlockLexicalEnvironmentObject(),
927 startOfUninitialized
== startOfUndefined
);
929 // Copy over any preserved reserved slots.
930 copySlotsFromTemplate(obj
, templateObj
, 0, startOfUninitialized
);
932 // Fill the rest of the fixed slots with undefined and uninitialized.
933 size_t offset
= NativeObject::getFixedSlotOffset(startOfUninitialized
);
934 fillSlotsWithUninitialized(Address(obj
, offset
), temp
, startOfUninitialized
,
935 std::min(startOfUndefined
, nfixed
));
937 if (startOfUndefined
< nfixed
) {
938 offset
= NativeObject::getFixedSlotOffset(startOfUndefined
);
939 fillSlotsWithUndefined(Address(obj
, offset
), temp
, startOfUndefined
,
944 // We are short one register to do this elegantly. Borrow the obj
945 // register briefly for our slots base address.
947 loadPtr(Address(obj
, NativeObject::offsetOfSlots()), obj
);
949 // Fill uninitialized slots if necessary. Otherwise initialize all
950 // slots to undefined.
951 if (startOfUndefined
> nfixed
) {
952 MOZ_ASSERT(startOfUninitialized
!= startOfUndefined
);
953 fillSlotsWithUninitialized(Address(obj
, 0), temp
, 0,
954 startOfUndefined
- nfixed
);
955 size_t offset
= (startOfUndefined
- nfixed
) * sizeof(Value
);
956 fillSlotsWithUndefined(Address(obj
, offset
), temp
,
957 startOfUndefined
- nfixed
, ndynamic
);
959 fillSlotsWithUndefined(Address(obj
, 0), temp
, 0, ndynamic
);
966 void MacroAssembler::initGCThing(Register obj
, Register temp
,
967 const TemplateObject
& templateObj
,
969 // Fast initialization of an empty object returned by allocateObject().
971 storePtr(ImmGCPtr(templateObj
.shape()),
972 Address(obj
, JSObject::offsetOfShape()));
974 if (templateObj
.isNativeObject()) {
975 const TemplateNativeObject
& ntemplate
=
976 templateObj
.asTemplateNativeObject();
977 MOZ_ASSERT(!ntemplate
.hasDynamicElements());
979 // If the object has dynamic slots, the slots member has already been
981 if (ntemplate
.numDynamicSlots() == 0) {
982 storePtr(ImmPtr(emptyObjectSlots
),
983 Address(obj
, NativeObject::offsetOfSlots()));
986 if (ntemplate
.isArrayObject()) {
987 // Can't skip initializing reserved slots.
988 MOZ_ASSERT(initContents
);
990 int elementsOffset
= NativeObject::offsetOfFixedElements();
992 computeEffectiveAddress(Address(obj
, elementsOffset
), temp
);
993 storePtr(temp
, Address(obj
, NativeObject::offsetOfElements()));
995 // Fill in the elements header.
997 Imm32(ntemplate
.getDenseCapacity()),
998 Address(obj
, elementsOffset
+ ObjectElements::offsetOfCapacity()));
999 store32(Imm32(ntemplate
.getDenseInitializedLength()),
1000 Address(obj
, elementsOffset
+
1001 ObjectElements::offsetOfInitializedLength()));
1002 store32(Imm32(ntemplate
.getArrayLength()),
1003 Address(obj
, elementsOffset
+ ObjectElements::offsetOfLength()));
1004 store32(Imm32(ObjectElements::FIXED
),
1005 Address(obj
, elementsOffset
+ ObjectElements::offsetOfFlags()));
1006 } else if (ntemplate
.isArgumentsObject()) {
1007 // The caller will initialize the reserved slots.
1008 MOZ_ASSERT(!initContents
);
1009 storePtr(ImmPtr(emptyObjectElements
),
1010 Address(obj
, NativeObject::offsetOfElements()));
1012 // If the target type could be a TypedArray that maps shared memory
1013 // then this would need to store emptyObjectElementsShared in that case.
1014 MOZ_ASSERT(!ntemplate
.isSharedMemory());
1016 // Can't skip initializing reserved slots.
1017 MOZ_ASSERT(initContents
);
1019 storePtr(ImmPtr(emptyObjectElements
),
1020 Address(obj
, NativeObject::offsetOfElements()));
1022 initGCSlots(obj
, temp
, ntemplate
);
1025 MOZ_CRASH("Unknown object");
1029 AllocatableRegisterSet
regs(RegisterSet::Volatile());
1030 LiveRegisterSet
save(regs
.asLiveSet());
1031 PushRegsInMask(save
);
1033 regs
.takeUnchecked(obj
);
1034 Register temp2
= regs
.takeAnyGeneral();
1036 using Fn
= void (*)(JSObject
* obj
);
1037 setupUnalignedABICall(temp2
);
1039 callWithABI
<Fn
, TraceCreateObject
>();
1041 PopRegsInMask(save
);
1045 static size_t StringCharsByteLength(const JSLinearString
* linear
) {
1046 CharEncoding encoding
=
1047 linear
->hasLatin1Chars() ? CharEncoding::Latin1
: CharEncoding::TwoByte
;
1048 size_t encodingSize
= encoding
== CharEncoding::Latin1
1049 ? sizeof(JS::Latin1Char
)
1051 return linear
->length() * encodingSize
;
1054 bool MacroAssembler::canCompareStringCharsInline(const JSLinearString
* linear
) {
1055 // Limit the number of inline instructions used for character comparisons. Use
1056 // the same instruction limit for both encodings, i.e. two-byte uses half the
1057 // limit of Latin-1 strings.
1058 constexpr size_t ByteLengthCompareCutoff
= 32;
1060 size_t byteLength
= StringCharsByteLength(linear
);
1061 return 0 < byteLength
&& byteLength
<= ByteLengthCompareCutoff
;
1064 template <typename T
, typename CharT
>
1065 static inline T
CopyCharacters(const CharT
* chars
) {
1067 std::memcpy(&value
, chars
, sizeof(T
));
1071 template <typename T
>
1072 static inline T
CopyCharacters(const JSLinearString
* linear
, size_t index
) {
1073 JS::AutoCheckCannotGC nogc
;
1075 if (linear
->hasLatin1Chars()) {
1076 MOZ_ASSERT(index
+ sizeof(T
) / sizeof(JS::Latin1Char
) <= linear
->length());
1077 return CopyCharacters
<T
>(linear
->latin1Chars(nogc
) + index
);
1080 MOZ_ASSERT(sizeof(T
) >= sizeof(char16_t
));
1081 MOZ_ASSERT(index
+ sizeof(T
) / sizeof(char16_t
) <= linear
->length());
1082 return CopyCharacters
<T
>(linear
->twoByteChars(nogc
) + index
);
1085 void MacroAssembler::branchIfNotStringCharsEquals(Register stringChars
,
1086 const JSLinearString
* linear
,
1088 CharEncoding encoding
=
1089 linear
->hasLatin1Chars() ? CharEncoding::Latin1
: CharEncoding::TwoByte
;
1090 size_t encodingSize
= encoding
== CharEncoding::Latin1
1091 ? sizeof(JS::Latin1Char
)
1093 size_t byteLength
= StringCharsByteLength(linear
);
1096 for (size_t stride
: {8, 4, 2, 1}) {
1097 while (byteLength
>= stride
) {
1098 Address
addr(stringChars
, pos
* encodingSize
);
1101 auto x
= CopyCharacters
<uint64_t>(linear
, pos
);
1102 branch64(Assembler::NotEqual
, addr
, Imm64(x
), label
);
1106 auto x
= CopyCharacters
<uint32_t>(linear
, pos
);
1107 branch32(Assembler::NotEqual
, addr
, Imm32(x
), label
);
1111 auto x
= CopyCharacters
<uint16_t>(linear
, pos
);
1112 branch16(Assembler::NotEqual
, addr
, Imm32(x
), label
);
1116 auto x
= CopyCharacters
<uint8_t>(linear
, pos
);
1117 branch8(Assembler::NotEqual
, addr
, Imm32(x
), label
);
1122 byteLength
-= stride
;
1123 pos
+= stride
/ encodingSize
;
1126 // Prefer a single comparison for trailing bytes instead of doing
1127 // multiple consecutive comparisons.
1129 // For example when comparing against the string "example", emit two
1130 // four-byte comparisons against "exam" and "mple" instead of doing
1131 // three comparisons against "exam", "pl", and finally "e".
1132 if (pos
> 0 && byteLength
> stride
/ 2) {
1133 MOZ_ASSERT(stride
== 8 || stride
== 4);
1135 size_t prev
= pos
- (stride
- byteLength
) / encodingSize
;
1136 Address
addr(stringChars
, prev
* encodingSize
);
1139 auto x
= CopyCharacters
<uint64_t>(linear
, prev
);
1140 branch64(Assembler::NotEqual
, addr
, Imm64(x
), label
);
1144 auto x
= CopyCharacters
<uint32_t>(linear
, prev
);
1145 branch32(Assembler::NotEqual
, addr
, Imm32(x
), label
);
1150 // Break from the loop, because we've finished the complete string.
1156 void MacroAssembler::loadStringCharsForCompare(Register input
,
1157 const JSLinearString
* linear
,
1158 Register stringChars
,
1160 CharEncoding encoding
=
1161 linear
->hasLatin1Chars() ? CharEncoding::Latin1
: CharEncoding::TwoByte
;
1163 // Take the slow path when the string is a rope or has a different character
1165 branchIfRope(input
, fail
);
1166 if (encoding
== CharEncoding::Latin1
) {
1167 branchTwoByteString(input
, fail
);
1169 JS::AutoCheckCannotGC nogc
;
1170 if (mozilla::IsUtf16Latin1(linear
->twoByteRange(nogc
))) {
1171 branchLatin1String(input
, fail
);
1173 // This case was already handled in the caller.
1176 branchTwoByteString(input
, &ok
);
1177 assumeUnreachable("Unexpected Latin-1 string");
1185 size_t length
= linear
->length();
1186 MOZ_ASSERT(length
> 0);
1189 branch32(Assembler::AboveOrEqual
,
1190 Address(input
, JSString::offsetOfLength()), Imm32(length
), &ok
);
1191 assumeUnreachable("Input mustn't be smaller than search string");
1196 // Load the input string's characters.
1197 loadStringChars(input
, stringChars
, encoding
);
1200 void MacroAssembler::compareStringChars(JSOp op
, Register stringChars
,
1201 const JSLinearString
* linear
,
1203 MOZ_ASSERT(IsEqualityOp(op
));
1205 size_t byteLength
= StringCharsByteLength(linear
);
1207 // Prefer a single compare-and-set instruction if possible.
1208 if (byteLength
== 1 || byteLength
== 2 || byteLength
== 4 ||
1210 auto cond
= JSOpToCondition(op
, /* isSigned = */ false);
1212 Address
addr(stringChars
, 0);
1213 switch (byteLength
) {
1215 auto x
= CopyCharacters
<uint64_t>(linear
, 0);
1216 cmp64Set(cond
, addr
, Imm64(x
), output
);
1220 auto x
= CopyCharacters
<uint32_t>(linear
, 0);
1221 cmp32Set(cond
, addr
, Imm32(x
), output
);
1225 auto x
= CopyCharacters
<uint16_t>(linear
, 0);
1226 cmp16Set(cond
, addr
, Imm32(x
), output
);
1230 auto x
= CopyCharacters
<uint8_t>(linear
, 0);
1231 cmp8Set(cond
, addr
, Imm32(x
), output
);
1236 Label setNotEqualResult
;
1237 branchIfNotStringCharsEquals(stringChars
, linear
, &setNotEqualResult
);
1239 // Falls through if both strings are equal.
1242 move32(Imm32(op
== JSOp::Eq
|| op
== JSOp::StrictEq
), output
);
1245 bind(&setNotEqualResult
);
1246 move32(Imm32(op
== JSOp::Ne
|| op
== JSOp::StrictNe
), output
);
1252 void MacroAssembler::compareStrings(JSOp op
, Register left
, Register right
,
1253 Register result
, Label
* fail
) {
1254 MOZ_ASSERT(left
!= result
);
1255 MOZ_ASSERT(right
!= result
);
1256 MOZ_ASSERT(IsEqualityOp(op
) || IsRelationalOp(op
));
1258 Label notPointerEqual
;
1259 // If operands point to the same instance, the strings are trivially equal.
1260 branchPtr(Assembler::NotEqual
, left
, right
,
1261 IsEqualityOp(op
) ? ¬PointerEqual
: fail
);
1262 move32(Imm32(op
== JSOp::Eq
|| op
== JSOp::StrictEq
|| op
== JSOp::Le
||
1266 if (IsEqualityOp(op
)) {
1270 bind(¬PointerEqual
);
1272 Label leftIsNotAtom
;
1273 Label setNotEqualResult
;
1274 // Atoms cannot be equal to each other if they point to different strings.
1275 Imm32
atomBit(JSString::ATOM_BIT
);
1276 branchTest32(Assembler::Zero
, Address(left
, JSString::offsetOfFlags()),
1277 atomBit
, &leftIsNotAtom
);
1278 branchTest32(Assembler::NonZero
, Address(right
, JSString::offsetOfFlags()),
1279 atomBit
, &setNotEqualResult
);
1281 bind(&leftIsNotAtom
);
1282 // Strings of different length can never be equal.
1283 loadStringLength(left
, result
);
1284 branch32(Assembler::Equal
, Address(right
, JSString::offsetOfLength()),
1287 bind(&setNotEqualResult
);
1288 move32(Imm32(op
== JSOp::Ne
|| op
== JSOp::StrictNe
), result
);
1294 void MacroAssembler::loadStringChars(Register str
, Register dest
,
1295 CharEncoding encoding
) {
1296 MOZ_ASSERT(str
!= dest
);
1298 if (JitOptions
.spectreStringMitigations
) {
1299 if (encoding
== CharEncoding::Latin1
) {
1300 // If the string is a rope, zero the |str| register. The code below
1301 // depends on str->flags so this should block speculative execution.
1302 movePtr(ImmWord(0), dest
);
1303 test32MovePtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1304 Imm32(JSString::LINEAR_BIT
), dest
, str
);
1306 // If we're loading TwoByte chars, there's an additional risk:
1307 // if the string has Latin1 chars, we could read out-of-bounds. To
1308 // prevent this, we check both the Linear and Latin1 bits. We don't
1309 // have a scratch register, so we use these flags also to block
1310 // speculative execution, similar to the use of 0 above.
1311 MOZ_ASSERT(encoding
== CharEncoding::TwoByte
);
1312 static constexpr uint32_t Mask
=
1313 JSString::LINEAR_BIT
| JSString::LATIN1_CHARS_BIT
;
1314 static_assert(Mask
< 1024,
1315 "Mask should be a small, near-null value to ensure we "
1316 "block speculative execution when it's used as string "
1318 move32(Imm32(Mask
), dest
);
1319 and32(Address(str
, JSString::offsetOfFlags()), dest
);
1320 cmp32MovePtr(Assembler::NotEqual
, dest
, Imm32(JSString::LINEAR_BIT
), dest
,
1325 // Load the inline chars.
1326 computeEffectiveAddress(Address(str
, JSInlineString::offsetOfInlineStorage()),
1329 // If it's not an inline string, load the non-inline chars. Use a
1330 // conditional move to prevent speculative execution.
1331 test32LoadPtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1332 Imm32(JSString::INLINE_CHARS_BIT
),
1333 Address(str
, JSString::offsetOfNonInlineChars()), dest
);
1336 void MacroAssembler::loadNonInlineStringChars(Register str
, Register dest
,
1337 CharEncoding encoding
) {
1338 MOZ_ASSERT(str
!= dest
);
1340 if (JitOptions
.spectreStringMitigations
) {
1341 // If the string is a rope, has inline chars, or has a different
1342 // character encoding, set str to a near-null value to prevent
1343 // speculative execution below (when reading str->nonInlineChars).
1345 static constexpr uint32_t Mask
= JSString::LINEAR_BIT
|
1346 JSString::INLINE_CHARS_BIT
|
1347 JSString::LATIN1_CHARS_BIT
;
1348 static_assert(Mask
< 1024,
1349 "Mask should be a small, near-null value to ensure we "
1350 "block speculative execution when it's used as string "
1353 uint32_t expectedBits
= JSString::LINEAR_BIT
;
1354 if (encoding
== CharEncoding::Latin1
) {
1355 expectedBits
|= JSString::LATIN1_CHARS_BIT
;
1358 move32(Imm32(Mask
), dest
);
1359 and32(Address(str
, JSString::offsetOfFlags()), dest
);
1361 cmp32MovePtr(Assembler::NotEqual
, dest
, Imm32(expectedBits
), dest
, str
);
1364 loadPtr(Address(str
, JSString::offsetOfNonInlineChars()), dest
);
1367 void MacroAssembler::storeNonInlineStringChars(Register chars
, Register str
) {
1368 MOZ_ASSERT(chars
!= str
);
1369 storePtr(chars
, Address(str
, JSString::offsetOfNonInlineChars()));
1372 void MacroAssembler::loadInlineStringCharsForStore(Register str
,
1374 computeEffectiveAddress(Address(str
, JSInlineString::offsetOfInlineStorage()),
1378 void MacroAssembler::loadInlineStringChars(Register str
, Register dest
,
1379 CharEncoding encoding
) {
1380 MOZ_ASSERT(str
!= dest
);
1382 if (JitOptions
.spectreStringMitigations
) {
1383 // Making this Spectre-safe is a bit complicated: using
1384 // computeEffectiveAddress and then zeroing the output register if
1385 // non-inline is not sufficient: when the index is very large, it would
1386 // allow reading |nullptr + index|. Just fall back to loadStringChars
1388 loadStringChars(str
, dest
, encoding
);
1390 computeEffectiveAddress(
1391 Address(str
, JSInlineString::offsetOfInlineStorage()), dest
);
1395 void MacroAssembler::loadRopeLeftChild(Register str
, Register dest
) {
1396 MOZ_ASSERT(str
!= dest
);
1398 if (JitOptions
.spectreStringMitigations
) {
1399 // Zero the output register if the input was not a rope.
1400 movePtr(ImmWord(0), dest
);
1401 test32LoadPtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1402 Imm32(JSString::LINEAR_BIT
),
1403 Address(str
, JSRope::offsetOfLeft()), dest
);
1405 loadPtr(Address(str
, JSRope::offsetOfLeft()), dest
);
1409 void MacroAssembler::loadRopeRightChild(Register str
, Register dest
) {
1410 MOZ_ASSERT(str
!= dest
);
1412 if (JitOptions
.spectreStringMitigations
) {
1413 // Zero the output register if the input was not a rope.
1414 movePtr(ImmWord(0), dest
);
1415 test32LoadPtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1416 Imm32(JSString::LINEAR_BIT
),
1417 Address(str
, JSRope::offsetOfRight()), dest
);
1419 loadPtr(Address(str
, JSRope::offsetOfRight()), dest
);
1423 void MacroAssembler::storeRopeChildren(Register left
, Register right
,
1425 storePtr(left
, Address(str
, JSRope::offsetOfLeft()));
1426 storePtr(right
, Address(str
, JSRope::offsetOfRight()));
1429 void MacroAssembler::loadDependentStringBase(Register str
, Register dest
) {
1430 MOZ_ASSERT(str
!= dest
);
1432 if (JitOptions
.spectreStringMitigations
) {
1433 // If the string is not a dependent string, zero the |str| register.
1434 // The code below loads str->base so this should block speculative
1436 movePtr(ImmWord(0), dest
);
1437 test32MovePtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1438 Imm32(JSString::DEPENDENT_BIT
), dest
, str
);
1441 loadPtr(Address(str
, JSDependentString::offsetOfBase()), dest
);
1444 void MacroAssembler::storeDependentStringBase(Register base
, Register str
) {
1445 storePtr(base
, Address(str
, JSDependentString::offsetOfBase()));
1448 void MacroAssembler::branchIfMaybeSplitSurrogatePair(Register leftChild
,
1453 // If |index| is the last character of the left child and the left child
1454 // is a two-byte string, it's possible that a surrogate pair is split
1455 // between the left and right child of a rope.
1457 // Can't be a split surrogate when the left child is a Latin-1 string.
1458 branchLatin1String(leftChild
, notSplit
);
1460 // Can't be a split surrogate when |index + 1| is in the left child.
1461 add32(Imm32(1), index
, scratch
);
1462 branch32(Assembler::Above
, Address(leftChild
, JSString::offsetOfLength()),
1465 // Load the character at |index|.
1466 loadStringChars(leftChild
, scratch
, CharEncoding::TwoByte
);
1467 loadChar(scratch
, index
, scratch
, CharEncoding::TwoByte
);
1469 // Jump to |maybeSplit| if the last character is a lead surrogate.
1470 branchIfLeadSurrogate(scratch
, scratch
, maybeSplit
);
1473 void MacroAssembler::loadRopeChild(CharKind kind
, Register str
, Register index
,
1474 Register output
, Register maybeScratch
,
1475 Label
* isLinear
, Label
* splitSurrogate
) {
1476 // This follows JSString::getChar.
1477 branchIfNotRope(str
, isLinear
);
1479 loadRopeLeftChild(str
, output
);
1482 if (kind
== CharKind::CharCode
) {
1483 // Check if |index| is contained in the left child.
1484 branch32(Assembler::Above
, Address(output
, JSString::offsetOfLength()),
1485 index
, &loadedChild
);
1487 MOZ_ASSERT(maybeScratch
!= InvalidReg
);
1489 // Check if |index| is contained in the left child.
1491 branch32(Assembler::BelowOrEqual
,
1492 Address(output
, JSString::offsetOfLength()), index
, &loadRight
);
1494 // Handle possible split surrogate pairs.
1495 branchIfMaybeSplitSurrogatePair(output
, index
, maybeScratch
,
1496 splitSurrogate
, &loadedChild
);
1502 // The index must be in the rightChild.
1503 loadRopeRightChild(str
, output
);
1508 void MacroAssembler::branchIfCanLoadStringChar(CharKind kind
, Register str
,
1509 Register index
, Register scratch
,
1510 Register maybeScratch
,
1512 Label splitSurrogate
;
1513 loadRopeChild(kind
, str
, index
, scratch
, maybeScratch
, label
,
1516 // Branch if the left resp. right side is linear.
1517 branchIfNotRope(scratch
, label
);
1519 if (kind
== CharKind::CodePoint
) {
1520 bind(&splitSurrogate
);
1524 void MacroAssembler::branchIfNotCanLoadStringChar(CharKind kind
, Register str
,
1527 Register maybeScratch
,
1530 loadRopeChild(kind
, str
, index
, scratch
, maybeScratch
, &done
, label
);
1532 // Branch if the left or right side is another rope.
1533 branchIfRope(scratch
, label
);
1538 void MacroAssembler::loadStringChar(CharKind kind
, Register str
, Register index
,
1539 Register output
, Register scratch1
,
1540 Register scratch2
, Label
* fail
) {
1541 MOZ_ASSERT(str
!= output
);
1542 MOZ_ASSERT(str
!= index
);
1543 MOZ_ASSERT(index
!= output
);
1544 MOZ_ASSERT_IF(kind
== CharKind::CodePoint
, index
!= scratch1
);
1545 MOZ_ASSERT(output
!= scratch1
);
1546 MOZ_ASSERT(output
!= scratch2
);
1548 // Use scratch1 for the index (adjusted below).
1549 if (index
!= scratch1
) {
1550 move32(index
, scratch1
);
1552 movePtr(str
, output
);
1554 // This follows JSString::getChar.
1556 branchIfNotRope(str
, ¬Rope
);
1558 loadRopeLeftChild(str
, output
);
1560 // Check if the index is contained in the leftChild.
1561 Label loadedChild
, notInLeft
;
1562 spectreBoundsCheck32(scratch1
, Address(output
, JSString::offsetOfLength()),
1563 scratch2
, ¬InLeft
);
1564 if (kind
== CharKind::CodePoint
) {
1565 branchIfMaybeSplitSurrogatePair(output
, scratch1
, scratch2
, fail
,
1570 // The index must be in the rightChild.
1571 // index -= rope->leftChild()->length()
1573 sub32(Address(output
, JSString::offsetOfLength()), scratch1
);
1574 loadRopeRightChild(str
, output
);
1576 // If the left or right side is another rope, give up.
1578 branchIfRope(output
, fail
);
1582 Label isLatin1
, done
;
1583 branchLatin1String(output
, &isLatin1
);
1585 loadStringChars(output
, scratch2
, CharEncoding::TwoByte
);
1587 if (kind
== CharKind::CharCode
) {
1588 loadChar(scratch2
, scratch1
, output
, CharEncoding::TwoByte
);
1590 // Load the first character.
1591 addToCharPtr(scratch2
, scratch1
, CharEncoding::TwoByte
);
1592 loadChar(Address(scratch2
, 0), output
, CharEncoding::TwoByte
);
1594 // If the first character isn't a lead surrogate, go to |done|.
1595 branchIfNotLeadSurrogate(output
, &done
);
1597 // branchIfMaybeSplitSurrogatePair ensures that the surrogate pair can't
1598 // split between two rope children. So if |index + 1 < str.length|, then
1599 // |index| and |index + 1| are in the same rope child.
1601 // NB: We use the non-adjusted |index| and |str| inputs, because |output|
1602 // was overwritten and no longer contains the rope child.
1604 // If |index + 1| is a valid index into |str|.
1605 add32(Imm32(1), index
, scratch1
);
1606 spectreBoundsCheck32(scratch1
, Address(str
, JSString::offsetOfLength()),
1609 // Then load the next character at |scratch2 + sizeof(char16_t)|.
1610 loadChar(Address(scratch2
, sizeof(char16_t
)), scratch1
,
1611 CharEncoding::TwoByte
);
1613 // If the next character isn't a trail surrogate, go to |done|.
1614 branchIfNotTrailSurrogate(scratch1
, scratch2
, &done
);
1616 // Inlined unicode::UTF16Decode(char16_t, char16_t).
1617 lshift32(Imm32(10), output
);
1618 add32(Imm32(unicode::NonBMPMin
- (unicode::LeadSurrogateMin
<< 10) -
1619 unicode::TrailSurrogateMin
),
1621 add32(scratch1
, output
);
1628 loadStringChars(output
, scratch2
, CharEncoding::Latin1
);
1629 loadChar(scratch2
, scratch1
, output
, CharEncoding::Latin1
);
1635 void MacroAssembler::loadStringChar(Register str
, int32_t index
,
1636 Register output
, Register scratch1
,
1637 Register scratch2
, Label
* fail
) {
1638 MOZ_ASSERT(str
!= output
);
1639 MOZ_ASSERT(output
!= scratch1
);
1640 MOZ_ASSERT(output
!= scratch2
);
1643 movePtr(str
, scratch1
);
1645 // This follows JSString::getChar.
1647 branchIfNotRope(str
, ¬Rope
);
1649 loadRopeLeftChild(str
, scratch1
);
1651 // Rope children can't be empty, so the index can't be in the right side.
1653 // If the left side is another rope, give up.
1654 branchIfRope(scratch1
, fail
);
1658 Label isLatin1
, done
;
1659 branchLatin1String(scratch1
, &isLatin1
);
1660 loadStringChars(scratch1
, scratch2
, CharEncoding::TwoByte
);
1661 loadChar(Address(scratch2
, 0), output
, CharEncoding::TwoByte
);
1665 loadStringChars(scratch1
, scratch2
, CharEncoding::Latin1
);
1666 loadChar(Address(scratch2
, 0), output
, CharEncoding::Latin1
);
1670 move32(Imm32(index
), scratch1
);
1671 loadStringChar(str
, scratch1
, output
, scratch1
, scratch2
, fail
);
1675 void MacroAssembler::loadStringIndexValue(Register str
, Register dest
,
1677 MOZ_ASSERT(str
!= dest
);
1679 load32(Address(str
, JSString::offsetOfFlags()), dest
);
1681 // Does not have a cached index value.
1682 branchTest32(Assembler::Zero
, dest
, Imm32(JSString::INDEX_VALUE_BIT
), fail
);
1684 // Extract the index.
1685 rshift32(Imm32(JSString::INDEX_VALUE_SHIFT
), dest
);
1688 void MacroAssembler::loadChar(Register chars
, Register index
, Register dest
,
1689 CharEncoding encoding
, int32_t offset
/* = 0 */) {
1690 if (encoding
== CharEncoding::Latin1
) {
1691 loadChar(BaseIndex(chars
, index
, TimesOne
, offset
), dest
, encoding
);
1693 loadChar(BaseIndex(chars
, index
, TimesTwo
, offset
), dest
, encoding
);
1697 void MacroAssembler::addToCharPtr(Register chars
, Register index
,
1698 CharEncoding encoding
) {
1699 if (encoding
== CharEncoding::Latin1
) {
1700 static_assert(sizeof(char) == 1,
1701 "Latin-1 string index shouldn't need scaling");
1702 addPtr(index
, chars
);
1704 computeEffectiveAddress(BaseIndex(chars
, index
, TimesTwo
), chars
);
1708 void MacroAssembler::branchIfNotLeadSurrogate(Register src
, Label
* label
) {
1709 branch32(Assembler::Below
, src
, Imm32(unicode::LeadSurrogateMin
), label
);
1710 branch32(Assembler::Above
, src
, Imm32(unicode::LeadSurrogateMax
), label
);
1713 void MacroAssembler::branchSurrogate(Assembler::Condition cond
, Register src
,
1714 Register scratch
, Label
* label
,
1715 SurrogateChar surrogateChar
) {
1716 // For TrailSurrogateMin ≤ x ≤ TrailSurrogateMax and
1717 // LeadSurrogateMin ≤ x ≤ LeadSurrogateMax, the following equations hold.
1719 // SurrogateMin ≤ x ≤ SurrogateMax
1720 // <> SurrogateMin ≤ x ≤ SurrogateMin + 2^10 - 1
1721 // <> ((x - SurrogateMin) >>> 10) = 0 where >>> is an unsigned-shift
1722 // See Hacker's Delight, section 4-1 for details.
1724 // ((x - SurrogateMin) >>> 10) = 0
1725 // <> floor((x - SurrogateMin) / 1024) = 0
1726 // <> floor((x / 1024) - (SurrogateMin / 1024)) = 0
1727 // <> floor(x / 1024) = SurrogateMin / 1024
1728 // <> floor(x / 1024) * 1024 = SurrogateMin
1729 // <> (x >>> 10) << 10 = SurrogateMin
1730 // <> x & ~(2^10 - 1) = SurrogateMin
1732 constexpr char16_t SurrogateMask
= 0xFC00;
1733 char16_t SurrogateMin
= surrogateChar
== SurrogateChar::Lead
1734 ? unicode::LeadSurrogateMin
1735 : unicode::TrailSurrogateMin
;
1737 if (src
!= scratch
) {
1738 move32(src
, scratch
);
1741 and32(Imm32(SurrogateMask
), scratch
);
1742 branch32(cond
, scratch
, Imm32(SurrogateMin
), label
);
1745 void MacroAssembler::loadStringFromUnit(Register unit
, Register dest
,
1746 const StaticStrings
& staticStrings
) {
1747 movePtr(ImmPtr(&staticStrings
.unitStaticTable
), dest
);
1748 loadPtr(BaseIndex(dest
, unit
, ScalePointer
), dest
);
1751 void MacroAssembler::loadLengthTwoString(Register c1
, Register c2
,
1753 const StaticStrings
& staticStrings
) {
1754 // Compute (toSmallCharTable[c1] << SMALL_CHAR_BITS) + toSmallCharTable[c2]
1755 // to obtain the index into `StaticStrings::length2StaticTable`.
1756 static_assert(sizeof(StaticStrings::SmallChar
) == 1);
1758 movePtr(ImmPtr(&StaticStrings::toSmallCharTable
.storage
), dest
);
1759 load8ZeroExtend(BaseIndex(dest
, c1
, Scale::TimesOne
), c1
);
1760 load8ZeroExtend(BaseIndex(dest
, c2
, Scale::TimesOne
), c2
);
1762 lshift32(Imm32(StaticStrings::SMALL_CHAR_BITS
), c1
);
1765 // Look up the string from the computed index.
1766 movePtr(ImmPtr(&staticStrings
.length2StaticTable
), dest
);
1767 loadPtr(BaseIndex(dest
, c1
, ScalePointer
), dest
);
1770 void MacroAssembler::lookupStaticString(Register ch
, Register dest
,
1771 const StaticStrings
& staticStrings
) {
1772 MOZ_ASSERT(ch
!= dest
);
1774 movePtr(ImmPtr(&staticStrings
.unitStaticTable
), dest
);
1775 loadPtr(BaseIndex(dest
, ch
, ScalePointer
), dest
);
1778 void MacroAssembler::lookupStaticString(Register ch
, Register dest
,
1779 const StaticStrings
& staticStrings
,
1781 MOZ_ASSERT(ch
!= dest
);
1783 boundsCheck32PowerOfTwo(ch
, StaticStrings::UNIT_STATIC_LIMIT
, fail
);
1784 movePtr(ImmPtr(&staticStrings
.unitStaticTable
), dest
);
1785 loadPtr(BaseIndex(dest
, ch
, ScalePointer
), dest
);
1788 void MacroAssembler::lookupStaticString(Register ch1
, Register ch2
,
1790 const StaticStrings
& staticStrings
,
1792 MOZ_ASSERT(ch1
!= dest
);
1793 MOZ_ASSERT(ch2
!= dest
);
1795 branch32(Assembler::AboveOrEqual
, ch1
,
1796 Imm32(StaticStrings::SMALL_CHAR_TABLE_SIZE
), fail
);
1797 branch32(Assembler::AboveOrEqual
, ch2
,
1798 Imm32(StaticStrings::SMALL_CHAR_TABLE_SIZE
), fail
);
1800 movePtr(ImmPtr(&StaticStrings::toSmallCharTable
.storage
), dest
);
1801 load8ZeroExtend(BaseIndex(dest
, ch1
, Scale::TimesOne
), ch1
);
1802 load8ZeroExtend(BaseIndex(dest
, ch2
, Scale::TimesOne
), ch2
);
1804 branch32(Assembler::Equal
, ch1
, Imm32(StaticStrings::INVALID_SMALL_CHAR
),
1806 branch32(Assembler::Equal
, ch2
, Imm32(StaticStrings::INVALID_SMALL_CHAR
),
1809 lshift32(Imm32(StaticStrings::SMALL_CHAR_BITS
), ch1
);
1812 // Look up the string from the computed index.
1813 movePtr(ImmPtr(&staticStrings
.length2StaticTable
), dest
);
1814 loadPtr(BaseIndex(dest
, ch1
, ScalePointer
), dest
);
1817 void MacroAssembler::lookupStaticIntString(Register integer
, Register dest
,
1819 const StaticStrings
& staticStrings
,
1821 MOZ_ASSERT(integer
!= scratch
);
1823 boundsCheck32PowerOfTwo(integer
, StaticStrings::INT_STATIC_LIMIT
, fail
);
1824 movePtr(ImmPtr(&staticStrings
.intStaticTable
), scratch
);
1825 loadPtr(BaseIndex(scratch
, integer
, ScalePointer
), dest
);
1828 void MacroAssembler::loadInt32ToStringWithBase(
1829 Register input
, Register base
, Register dest
, Register scratch1
,
1830 Register scratch2
, const StaticStrings
& staticStrings
,
1831 const LiveRegisterSet
& volatileRegs
, bool lowerCase
, Label
* fail
) {
1833 Label baseBad
, baseOk
;
1834 branch32(Assembler::LessThan
, base
, Imm32(2), &baseBad
);
1835 branch32(Assembler::LessThanOrEqual
, base
, Imm32(36), &baseOk
);
1837 assumeUnreachable("base must be in range [2, 36]");
1841 // Compute |"0123456789abcdefghijklmnopqrstuvwxyz"[r]|.
1842 auto toChar
= [this, base
, lowerCase
](Register r
) {
1845 branch32(Assembler::Below
, r
, base
, &ok
);
1846 assumeUnreachable("bad digit");
1849 // Silence unused lambda capture warning.
1854 add32(Imm32('0'), r
);
1855 branch32(Assembler::BelowOrEqual
, r
, Imm32('9'), &done
);
1856 add32(Imm32((lowerCase
? 'a' : 'A') - '0' - 10), r
);
1860 // Perform a "unit" lookup when |unsigned(input) < unsigned(base)|.
1861 Label lengthTwo
, done
;
1862 branch32(Assembler::AboveOrEqual
, input
, base
, &lengthTwo
);
1864 move32(input
, scratch1
);
1867 loadStringFromUnit(scratch1
, dest
, staticStrings
);
1873 // Compute |base * base|.
1874 move32(base
, scratch1
);
1875 mul32(scratch1
, scratch1
);
1877 // Perform a "length2" lookup when |unsigned(input) < unsigned(base * base)|.
1878 branch32(Assembler::AboveOrEqual
, input
, scratch1
, fail
);
1880 // Compute |scratch1 = input / base| and |scratch2 = input % base|.
1881 move32(input
, scratch1
);
1882 flexibleDivMod32(base
, scratch1
, scratch2
, true, volatileRegs
);
1884 // Compute the digits of the divisor and remainder.
1888 // Look up the 2-character digit string in the small-char table.
1889 loadLengthTwoString(scratch1
, scratch2
, dest
, staticStrings
);
1894 void MacroAssembler::loadInt32ToStringWithBase(
1895 Register input
, int32_t base
, Register dest
, Register scratch1
,
1896 Register scratch2
, const StaticStrings
& staticStrings
, bool lowerCase
,
1898 MOZ_ASSERT(2 <= base
&& base
<= 36, "base must be in range [2, 36]");
1900 // Compute |"0123456789abcdefghijklmnopqrstuvwxyz"[r]|.
1901 auto toChar
= [this, base
, lowerCase
](Register r
) {
1904 branch32(Assembler::Below
, r
, Imm32(base
), &ok
);
1905 assumeUnreachable("bad digit");
1910 add32(Imm32('0'), r
);
1913 add32(Imm32('0'), r
);
1914 branch32(Assembler::BelowOrEqual
, r
, Imm32('9'), &done
);
1915 add32(Imm32((lowerCase
? 'a' : 'A') - '0' - 10), r
);
1920 // Perform a "unit" lookup when |unsigned(input) < unsigned(base)|.
1921 Label lengthTwo
, done
;
1922 branch32(Assembler::AboveOrEqual
, input
, Imm32(base
), &lengthTwo
);
1924 move32(input
, scratch1
);
1927 loadStringFromUnit(scratch1
, dest
, staticStrings
);
1933 // Perform a "length2" lookup when |unsigned(input) < unsigned(base * base)|.
1934 branch32(Assembler::AboveOrEqual
, input
, Imm32(base
* base
), fail
);
1936 // Compute |scratch1 = input / base| and |scratch2 = input % base|.
1937 if (mozilla::IsPowerOfTwo(uint32_t(base
))) {
1938 uint32_t shift
= mozilla::FloorLog2(base
);
1940 move32(input
, scratch1
);
1941 rshift32(Imm32(shift
), scratch1
);
1943 move32(input
, scratch2
);
1944 and32(Imm32((uint32_t(1) << shift
) - 1), scratch2
);
1946 // The following code matches CodeGenerator::visitUDivOrModConstant()
1947 // for x86-shared. Also see Hacker's Delight 2nd edition, chapter 10-8
1948 // "Unsigned Division by 7" for the case when |rmc.multiplier| exceeds
1949 // UINT32_MAX and we need to adjust the shift amount.
1951 auto rmc
= ReciprocalMulConstants::computeUnsignedDivisionConstants(base
);
1953 // We first compute |q = (M * n) >> 32), where M = rmc.multiplier.
1954 mulHighUnsigned32(Imm32(rmc
.multiplier
), input
, scratch1
);
1956 if (rmc
.multiplier
> UINT32_MAX
) {
1957 // M >= 2^32 and shift == 0 is impossible, as d >= 2 implies that
1958 // ((M * n) >> (32 + shift)) >= n > floor(n/d) whenever n >= d,
1959 // contradicting the proof of correctness in computeDivisionConstants.
1960 MOZ_ASSERT(rmc
.shiftAmount
> 0);
1961 MOZ_ASSERT(rmc
.multiplier
< (int64_t(1) << 33));
1963 // Compute |t = (n - q) / 2|.
1964 move32(input
, scratch2
);
1965 sub32(scratch1
, scratch2
);
1966 rshift32(Imm32(1), scratch2
);
1968 // Compute |t = (n - q) / 2 + q = (n + q) / 2|.
1969 add32(scratch2
, scratch1
);
1971 // Finish the computation |q = floor(n / d)|.
1972 rshift32(Imm32(rmc
.shiftAmount
- 1), scratch1
);
1974 rshift32(Imm32(rmc
.shiftAmount
), scratch1
);
1977 // Compute the remainder from |r = n - q * d|.
1978 move32(scratch1
, dest
);
1979 mul32(Imm32(base
), dest
);
1980 move32(input
, scratch2
);
1981 sub32(dest
, scratch2
);
1984 // Compute the digits of the divisor and remainder.
1988 // Look up the 2-character digit string in the small-char table.
1989 loadLengthTwoString(scratch1
, scratch2
, dest
, staticStrings
);
1994 void MacroAssembler::loadBigIntDigits(Register bigInt
, Register digits
) {
1995 MOZ_ASSERT(digits
!= bigInt
);
1997 // Load the inline digits.
1998 computeEffectiveAddress(Address(bigInt
, BigInt::offsetOfInlineDigits()),
2001 // If inline digits aren't used, load the heap digits. Use a conditional move
2002 // to prevent speculative execution.
2003 cmp32LoadPtr(Assembler::Above
, Address(bigInt
, BigInt::offsetOfLength()),
2004 Imm32(int32_t(BigInt::inlineDigitsLength())),
2005 Address(bigInt
, BigInt::offsetOfHeapDigits()), digits
);
2008 void MacroAssembler::loadBigInt64(Register bigInt
, Register64 dest
) {
2009 // This code follows the implementation of |BigInt::toUint64()|. We're also
2010 // using it for inline callers of |BigInt::toInt64()|, which works, because
2011 // all supported Jit architectures use a two's complement representation for
2012 // int64 values, which means the WrapToSigned call in toInt64() is a no-op.
2014 Label done
, nonZero
;
2016 branchIfBigIntIsNonZero(bigInt
, &nonZero
);
2018 move64(Imm64(0), dest
);
2024 Register digits
= dest
.reg
;
2026 Register digits
= dest
.high
;
2029 loadBigIntDigits(bigInt
, digits
);
2032 // Load the first digit into the destination register.
2033 load64(Address(digits
, 0), dest
);
2035 // Load the first digit into the destination register's low value.
2036 load32(Address(digits
, 0), dest
.low
);
2038 // And conditionally load the second digit into the high value register.
2039 Label twoDigits
, digitsDone
;
2040 branch32(Assembler::Above
, Address(bigInt
, BigInt::offsetOfLength()),
2041 Imm32(1), &twoDigits
);
2043 move32(Imm32(0), dest
.high
);
2048 load32(Address(digits
, sizeof(BigInt::Digit
)), dest
.high
);
2053 branchTest32(Assembler::Zero
, Address(bigInt
, BigInt::offsetOfFlags()),
2054 Imm32(BigInt::signBitMask()), &done
);
2060 void MacroAssembler::loadFirstBigIntDigitOrZero(Register bigInt
,
2062 Label done
, nonZero
;
2063 branchIfBigIntIsNonZero(bigInt
, &nonZero
);
2065 movePtr(ImmWord(0), dest
);
2070 loadBigIntDigits(bigInt
, dest
);
2072 // Load the first digit into the destination register.
2073 loadPtr(Address(dest
, 0), dest
);
2078 void MacroAssembler::loadBigInt(Register bigInt
, Register dest
, Label
* fail
) {
2079 Label done
, nonZero
;
2080 branchIfBigIntIsNonZero(bigInt
, &nonZero
);
2082 movePtr(ImmWord(0), dest
);
2087 loadBigIntNonZero(bigInt
, dest
, fail
);
2092 void MacroAssembler::loadBigIntNonZero(Register bigInt
, Register dest
,
2094 MOZ_ASSERT(bigInt
!= dest
);
2098 branchIfBigIntIsNonZero(bigInt
, &nonZero
);
2099 assumeUnreachable("Unexpected zero BigInt");
2103 branch32(Assembler::Above
, Address(bigInt
, BigInt::offsetOfLength()),
2106 static_assert(BigInt::inlineDigitsLength() > 0,
2107 "Single digit BigInts use inline storage");
2109 // Load the first inline digit into the destination register.
2110 loadPtr(Address(bigInt
, BigInt::offsetOfInlineDigits()), dest
);
2112 // Return as a signed pointer.
2113 bigIntDigitToSignedPtr(bigInt
, dest
, fail
);
2116 void MacroAssembler::bigIntDigitToSignedPtr(Register bigInt
, Register digit
,
2118 // BigInt digits are stored as absolute numbers. Take the failure path when
2119 // the digit can't be stored in intptr_t.
2120 branchTestPtr(Assembler::Signed
, digit
, digit
, fail
);
2122 // Negate |dest| when the BigInt is negative.
2124 branchIfBigIntIsNonNegative(bigInt
, &nonNegative
);
2129 void MacroAssembler::loadBigIntAbsolute(Register bigInt
, Register dest
,
2131 MOZ_ASSERT(bigInt
!= dest
);
2133 branch32(Assembler::Above
, Address(bigInt
, BigInt::offsetOfLength()),
2136 static_assert(BigInt::inlineDigitsLength() > 0,
2137 "Single digit BigInts use inline storage");
2139 // Load the first inline digit into the destination register.
2140 movePtr(ImmWord(0), dest
);
2141 cmp32LoadPtr(Assembler::NotEqual
, Address(bigInt
, BigInt::offsetOfLength()),
2142 Imm32(0), Address(bigInt
, BigInt::offsetOfInlineDigits()), dest
);
2145 void MacroAssembler::initializeBigInt64(Scalar::Type type
, Register bigInt
,
2147 MOZ_ASSERT(Scalar::isBigIntType(type
));
2149 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfFlags()));
2151 Label done
, nonZero
;
2152 branch64(Assembler::NotEqual
, val
, Imm64(0), &nonZero
);
2154 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfLength()));
2159 if (type
== Scalar::BigInt64
) {
2160 // Set the sign-bit for negative values and then continue with the two's
2163 branch64(Assembler::GreaterThan
, val
, Imm64(0), &isPositive
);
2165 store32(Imm32(BigInt::signBitMask()),
2166 Address(bigInt
, BigInt::offsetOfFlags()));
2172 store32(Imm32(1), Address(bigInt
, BigInt::offsetOfLength()));
2174 static_assert(sizeof(BigInt::Digit
) == sizeof(uintptr_t),
2175 "BigInt Digit size matches uintptr_t, so there's a single "
2176 "store on 64-bit and up to two stores on 32-bit");
2180 branchTest32(Assembler::Zero
, val
.high
, val
.high
, &singleDigit
);
2181 store32(Imm32(2), Address(bigInt
, BigInt::offsetOfLength()));
2184 // We can perform a single store64 on 32-bit platforms, because inline
2185 // storage can store at least two 32-bit integers.
2186 static_assert(BigInt::inlineDigitsLength() >= 2,
2187 "BigInt inline storage can store at least two digits");
2190 store64(val
, Address(bigInt
, js::BigInt::offsetOfInlineDigits()));
2195 void MacroAssembler::initializeBigInt(Register bigInt
, Register val
) {
2196 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfFlags()));
2198 Label done
, nonZero
;
2199 branchTestPtr(Assembler::NonZero
, val
, val
, &nonZero
);
2201 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfLength()));
2206 // Set the sign-bit for negative values and then continue with the two's
2209 branchTestPtr(Assembler::NotSigned
, val
, val
, &isPositive
);
2211 store32(Imm32(BigInt::signBitMask()),
2212 Address(bigInt
, BigInt::offsetOfFlags()));
2217 store32(Imm32(1), Address(bigInt
, BigInt::offsetOfLength()));
2219 static_assert(sizeof(BigInt::Digit
) == sizeof(uintptr_t),
2220 "BigInt Digit size matches uintptr_t");
2222 storePtr(val
, Address(bigInt
, js::BigInt::offsetOfInlineDigits()));
2227 void MacroAssembler::initializeBigIntAbsolute(Register bigInt
, Register val
) {
2228 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfFlags()));
2230 Label done
, nonZero
;
2231 branchTestPtr(Assembler::NonZero
, val
, val
, &nonZero
);
2233 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfLength()));
2238 store32(Imm32(1), Address(bigInt
, BigInt::offsetOfLength()));
2240 static_assert(sizeof(BigInt::Digit
) == sizeof(uintptr_t),
2241 "BigInt Digit size matches uintptr_t");
2243 storePtr(val
, Address(bigInt
, js::BigInt::offsetOfInlineDigits()));
2248 void MacroAssembler::copyBigIntWithInlineDigits(Register src
, Register dest
,
2250 gc::Heap initialHeap
,
2252 branch32(Assembler::Above
, Address(src
, BigInt::offsetOfLength()),
2253 Imm32(int32_t(BigInt::inlineDigitsLength())), fail
);
2255 newGCBigInt(dest
, temp
, initialHeap
, fail
);
2257 // Copy the sign-bit, but not any of the other bits used by the GC.
2258 load32(Address(src
, BigInt::offsetOfFlags()), temp
);
2259 and32(Imm32(BigInt::signBitMask()), temp
);
2260 store32(temp
, Address(dest
, BigInt::offsetOfFlags()));
2263 load32(Address(src
, BigInt::offsetOfLength()), temp
);
2264 store32(temp
, Address(dest
, BigInt::offsetOfLength()));
2267 Address
srcDigits(src
, js::BigInt::offsetOfInlineDigits());
2268 Address
destDigits(dest
, js::BigInt::offsetOfInlineDigits());
2270 for (size_t i
= 0; i
< BigInt::inlineDigitsLength(); i
++) {
2271 static_assert(sizeof(BigInt::Digit
) == sizeof(uintptr_t),
2272 "BigInt Digit size matches uintptr_t");
2274 loadPtr(srcDigits
, temp
);
2275 storePtr(temp
, destDigits
);
2277 srcDigits
= Address(src
, srcDigits
.offset
+ sizeof(BigInt::Digit
));
2278 destDigits
= Address(dest
, destDigits
.offset
+ sizeof(BigInt::Digit
));
2282 void MacroAssembler::compareBigIntAndInt32(JSOp op
, Register bigInt
,
2283 Register int32
, Register scratch1
,
2284 Register scratch2
, Label
* ifTrue
,
2286 MOZ_ASSERT(IsLooseEqualityOp(op
) || IsRelationalOp(op
));
2288 static_assert(std::is_same_v
<BigInt::Digit
, uintptr_t>,
2289 "BigInt digit can be loaded in a pointer-sized register");
2290 static_assert(sizeof(BigInt::Digit
) >= sizeof(uint32_t),
2291 "BigInt digit stores at least an uint32");
2293 // Test for too large numbers.
2295 // If the absolute value of the BigInt can't be expressed in an uint32/uint64,
2296 // the result of the comparison is a constant.
2297 if (op
== JSOp::Eq
|| op
== JSOp::Ne
) {
2298 Label
* tooLarge
= op
== JSOp::Eq
? ifFalse
: ifTrue
;
2299 branch32(Assembler::GreaterThan
,
2300 Address(bigInt
, BigInt::offsetOfDigitLength()), Imm32(1),
2304 branch32(Assembler::LessThanOrEqual
,
2305 Address(bigInt
, BigInt::offsetOfDigitLength()), Imm32(1),
2308 // Still need to take the sign-bit into account for relational operations.
2309 if (op
== JSOp::Lt
|| op
== JSOp::Le
) {
2310 branchIfBigIntIsNegative(bigInt
, ifTrue
);
2313 branchIfBigIntIsNegative(bigInt
, ifFalse
);
2320 // Test for mismatched signs and, if the signs are equal, load |abs(x)| in
2321 // |scratch1| and |abs(y)| in |scratch2| and then compare the absolute numbers
2322 // against each other.
2324 // Jump to |ifTrue| resp. |ifFalse| if the BigInt is strictly less than
2325 // resp. strictly greater than the int32 value, depending on the comparison
2329 if (op
== JSOp::Eq
) {
2330 greaterThan
= ifFalse
;
2332 } else if (op
== JSOp::Ne
) {
2333 greaterThan
= ifTrue
;
2335 } else if (op
== JSOp::Lt
|| op
== JSOp::Le
) {
2336 greaterThan
= ifFalse
;
2339 MOZ_ASSERT(op
== JSOp::Gt
|| op
== JSOp::Ge
);
2340 greaterThan
= ifTrue
;
2344 // BigInt digits are always stored as an absolute number.
2345 loadFirstBigIntDigitOrZero(bigInt
, scratch1
);
2347 // Load the int32 into |scratch2| and negate it for negative numbers.
2348 move32(int32
, scratch2
);
2350 Label isNegative
, doCompare
;
2351 branchIfBigIntIsNegative(bigInt
, &isNegative
);
2352 branch32(Assembler::LessThan
, int32
, Imm32(0), greaterThan
);
2355 // We rely on |neg32(INT32_MIN)| staying INT32_MIN, because we're using an
2356 // unsigned comparison below.
2358 branch32(Assembler::GreaterThanOrEqual
, int32
, Imm32(0), lessThan
);
2361 // Not all supported platforms (e.g. MIPS64) zero-extend 32-bit operations,
2362 // so we need to explicitly clear any high 32-bits.
2363 move32ZeroExtendToPtr(scratch2
, scratch2
);
2365 // Reverse the relational comparator for negative numbers.
2366 // |-x < -y| <=> |+x > +y|.
2367 // |-x ≤ -y| <=> |+x ≥ +y|.
2368 // |-x > -y| <=> |+x < +y|.
2369 // |-x ≥ -y| <=> |+x ≤ +y|.
2370 JSOp reversed
= ReverseCompareOp(op
);
2371 if (reversed
!= op
) {
2372 branchPtr(JSOpToCondition(reversed
, /* isSigned = */ false), scratch1
,
2378 branchPtr(JSOpToCondition(op
, /* isSigned = */ false), scratch1
, scratch2
,
2383 void MacroAssembler::equalBigInts(Register left
, Register right
, Register temp1
,
2384 Register temp2
, Register temp3
,
2385 Register temp4
, Label
* notSameSign
,
2386 Label
* notSameLength
, Label
* notSameDigit
) {
2387 MOZ_ASSERT(left
!= temp1
);
2388 MOZ_ASSERT(right
!= temp1
);
2389 MOZ_ASSERT(right
!= temp2
);
2391 // Jump to |notSameSign| when the sign aren't the same.
2392 load32(Address(left
, BigInt::offsetOfFlags()), temp1
);
2393 xor32(Address(right
, BigInt::offsetOfFlags()), temp1
);
2394 branchTest32(Assembler::NonZero
, temp1
, Imm32(BigInt::signBitMask()),
2397 // Jump to |notSameLength| when the digits length is different.
2398 load32(Address(right
, BigInt::offsetOfLength()), temp1
);
2399 branch32(Assembler::NotEqual
, Address(left
, BigInt::offsetOfLength()), temp1
,
2402 // Both BigInts have the same sign and the same number of digits. Loop
2403 // over each digit, starting with the left-most one, and break from the
2404 // loop when the first non-matching digit was found.
2406 loadBigIntDigits(left
, temp2
);
2407 loadBigIntDigits(right
, temp3
);
2409 static_assert(sizeof(BigInt::Digit
) == sizeof(void*),
2410 "BigInt::Digit is pointer sized");
2412 computeEffectiveAddress(BaseIndex(temp2
, temp1
, ScalePointer
), temp2
);
2413 computeEffectiveAddress(BaseIndex(temp3
, temp1
, ScalePointer
), temp3
);
2419 subPtr(Imm32(sizeof(BigInt::Digit
)), temp2
);
2420 subPtr(Imm32(sizeof(BigInt::Digit
)), temp3
);
2422 loadPtr(Address(temp3
, 0), temp4
);
2423 branchPtr(Assembler::NotEqual
, Address(temp2
, 0), temp4
, notSameDigit
);
2426 branchSub32(Assembler::NotSigned
, Imm32(1), temp1
, &loop
);
2428 // No different digits were found, both BigInts are equal to each other.
2431 void MacroAssembler::typeOfObject(Register obj
, Register scratch
, Label
* slow
,
2432 Label
* isObject
, Label
* isCallable
,
2433 Label
* isUndefined
) {
2434 loadObjClassUnsafe(obj
, scratch
);
2436 // Proxies can emulate undefined and have complex isCallable behavior.
2437 branchTestClassIsProxy(true, scratch
, slow
);
2439 // JSFunctions are always callable.
2440 branchTestClassIsFunction(Assembler::Equal
, scratch
, isCallable
);
2442 // Objects that emulate undefined.
2443 Address
flags(scratch
, JSClass::offsetOfFlags());
2444 branchTest32(Assembler::NonZero
, flags
, Imm32(JSCLASS_EMULATES_UNDEFINED
),
2447 // Handle classes with a call hook.
2448 branchPtr(Assembler::Equal
, Address(scratch
, offsetof(JSClass
, cOps
)),
2449 ImmPtr(nullptr), isObject
);
2451 loadPtr(Address(scratch
, offsetof(JSClass
, cOps
)), scratch
);
2452 branchPtr(Assembler::Equal
, Address(scratch
, offsetof(JSClassOps
, call
)),
2453 ImmPtr(nullptr), isObject
);
2458 void MacroAssembler::isCallableOrConstructor(bool isCallable
, Register obj
,
2459 Register output
, Label
* isProxy
) {
2460 MOZ_ASSERT(obj
!= output
);
2462 Label notFunction
, hasCOps
, done
;
2463 loadObjClassUnsafe(obj
, output
);
2465 // An object is callable iff:
2466 // is<JSFunction>() || (getClass()->cOps && getClass()->cOps->call).
2467 // An object is constructor iff:
2468 // ((is<JSFunction>() && as<JSFunction>().isConstructor) ||
2469 // (getClass()->cOps && getClass()->cOps->construct)).
2470 branchTestClassIsFunction(Assembler::NotEqual
, output
, ¬Function
);
2472 move32(Imm32(1), output
);
2474 static_assert(mozilla::IsPowerOfTwo(uint32_t(FunctionFlags::CONSTRUCTOR
)),
2475 "FunctionFlags::CONSTRUCTOR has only one bit set");
2477 load32(Address(obj
, JSFunction::offsetOfFlagsAndArgCount()), output
);
2478 rshift32(Imm32(mozilla::FloorLog2(uint32_t(FunctionFlags::CONSTRUCTOR
))),
2480 and32(Imm32(1), output
);
2487 // For bound functions, we need to check the isConstructor flag.
2488 Label notBoundFunction
;
2489 branchPtr(Assembler::NotEqual
, output
, ImmPtr(&BoundFunctionObject::class_
),
2492 static_assert(BoundFunctionObject::IsConstructorFlag
== 0b1,
2493 "AND operation results in boolean value");
2494 unboxInt32(Address(obj
, BoundFunctionObject::offsetOfFlagsSlot()), output
);
2495 and32(Imm32(BoundFunctionObject::IsConstructorFlag
), output
);
2498 bind(¬BoundFunction
);
2501 // Just skim proxies off. Their notion of isCallable()/isConstructor() is
2502 // more complicated.
2503 branchTestClassIsProxy(true, output
, isProxy
);
2505 branchPtr(Assembler::NonZero
, Address(output
, offsetof(JSClass
, cOps
)),
2506 ImmPtr(nullptr), &hasCOps
);
2507 move32(Imm32(0), output
);
2511 loadPtr(Address(output
, offsetof(JSClass
, cOps
)), output
);
2513 isCallable
? offsetof(JSClassOps
, call
) : offsetof(JSClassOps
, construct
);
2514 cmpPtrSet(Assembler::NonZero
, Address(output
, opsOffset
), ImmPtr(nullptr),
2520 void MacroAssembler::loadJSContext(Register dest
) {
2521 movePtr(ImmPtr(runtime()->mainContextPtr()), dest
);
2524 static const uint8_t* ContextRealmPtr(CompileRuntime
* rt
) {
2525 return (static_cast<const uint8_t*>(rt
->mainContextPtr()) +
2526 JSContext::offsetOfRealm());
2529 void MacroAssembler::loadGlobalObjectData(Register dest
) {
2530 loadPtr(AbsoluteAddress(ContextRealmPtr(runtime())), dest
);
2531 loadPtr(Address(dest
, Realm::offsetOfActiveGlobal()), dest
);
2532 loadPrivate(Address(dest
, GlobalObject::offsetOfGlobalDataSlot()), dest
);
2535 void MacroAssembler::switchToRealm(Register realm
) {
2536 storePtr(realm
, AbsoluteAddress(ContextRealmPtr(runtime())));
2539 void MacroAssembler::loadRealmFuse(RealmFuses::FuseIndex index
, Register dest
) {
2540 // Load Realm pointer
2541 loadPtr(AbsoluteAddress(ContextRealmPtr(runtime())), dest
);
2542 loadPtr(Address(dest
, RealmFuses::offsetOfFuseWordRelativeToRealm(index
)),
2546 void MacroAssembler::switchToRealm(const void* realm
, Register scratch
) {
2549 movePtr(ImmPtr(realm
), scratch
);
2550 switchToRealm(scratch
);
2553 void MacroAssembler::switchToObjectRealm(Register obj
, Register scratch
) {
2554 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
2555 loadPtr(Address(scratch
, Shape::offsetOfBaseShape()), scratch
);
2556 loadPtr(Address(scratch
, BaseShape::offsetOfRealm()), scratch
);
2557 switchToRealm(scratch
);
2560 void MacroAssembler::switchToBaselineFrameRealm(Register scratch
) {
2561 Address
envChain(FramePointer
,
2562 BaselineFrame::reverseOffsetOfEnvironmentChain());
2563 loadPtr(envChain
, scratch
);
2564 switchToObjectRealm(scratch
, scratch
);
2567 void MacroAssembler::switchToWasmInstanceRealm(Register scratch1
,
2568 Register scratch2
) {
2569 loadPtr(Address(InstanceReg
, wasm::Instance::offsetOfCx()), scratch1
);
2570 loadPtr(Address(InstanceReg
, wasm::Instance::offsetOfRealm()), scratch2
);
2571 storePtr(scratch2
, Address(scratch1
, JSContext::offsetOfRealm()));
2574 void MacroAssembler::debugAssertContextRealm(const void* realm
,
2578 movePtr(ImmPtr(realm
), scratch
);
2579 branchPtr(Assembler::Equal
, AbsoluteAddress(ContextRealmPtr(runtime())),
2581 assumeUnreachable("Unexpected context realm");
2586 void MacroAssembler::setIsCrossRealmArrayConstructor(Register obj
,
2590 branchTestObjectIsProxy(false, obj
, output
, ¬Proxy
);
2591 assumeUnreachable("Unexpected proxy in setIsCrossRealmArrayConstructor");
2595 // The object's realm must not be cx->realm.
2596 Label isFalse
, done
;
2597 loadPtr(Address(obj
, JSObject::offsetOfShape()), output
);
2598 loadPtr(Address(output
, Shape::offsetOfBaseShape()), output
);
2599 loadPtr(Address(output
, BaseShape::offsetOfRealm()), output
);
2600 branchPtr(Assembler::Equal
, AbsoluteAddress(ContextRealmPtr(runtime())),
2603 // The object must be a function.
2604 branchTestObjIsFunction(Assembler::NotEqual
, obj
, output
, obj
, &isFalse
);
2606 // The function must be the ArrayConstructor native.
2607 branchPtr(Assembler::NotEqual
,
2608 Address(obj
, JSFunction::offsetOfNativeOrEnv()),
2609 ImmPtr(js::ArrayConstructor
), &isFalse
);
2611 move32(Imm32(1), output
);
2615 move32(Imm32(0), output
);
2620 void MacroAssembler::setIsDefinitelyTypedArrayConstructor(Register obj
,
2622 Label isFalse
, isTrue
, done
;
2624 // The object must be a function. (Wrappers are not supported.)
2625 branchTestObjIsFunction(Assembler::NotEqual
, obj
, output
, obj
, &isFalse
);
2627 // Load the native into |output|.
2628 loadPtr(Address(obj
, JSFunction::offsetOfNativeOrEnv()), output
);
2630 auto branchIsTypedArrayCtor
= [&](Scalar::Type type
) {
2631 // The function must be a TypedArrayConstructor native (from any realm).
2632 JSNative constructor
= TypedArrayConstructorNative(type
);
2633 branchPtr(Assembler::Equal
, output
, ImmPtr(constructor
), &isTrue
);
2636 #define TYPED_ARRAY_CONSTRUCTOR_NATIVE(_, T, N) \
2637 branchIsTypedArrayCtor(Scalar::N);
2638 JS_FOR_EACH_TYPED_ARRAY(TYPED_ARRAY_CONSTRUCTOR_NATIVE
)
2639 #undef TYPED_ARRAY_CONSTRUCTOR_NATIVE
2641 // Falls through to the false case.
2644 move32(Imm32(0), output
);
2648 move32(Imm32(1), output
);
2653 void MacroAssembler::loadMegamorphicCache(Register dest
) {
2654 movePtr(ImmPtr(runtime()->addressOfMegamorphicCache()), dest
);
2656 void MacroAssembler::loadMegamorphicSetPropCache(Register dest
) {
2657 movePtr(ImmPtr(runtime()->addressOfMegamorphicSetPropCache()), dest
);
2660 void MacroAssembler::loadStringToAtomCacheLastLookups(Register dest
) {
2661 uintptr_t cachePtr
= uintptr_t(runtime()->addressOfStringToAtomCache());
2662 void* offset
= (void*)(cachePtr
+ StringToAtomCache::offsetOfLastLookups());
2663 movePtr(ImmPtr(offset
), dest
);
2666 void MacroAssembler::loadAtomHash(Register id
, Register outHash
, Label
* done
) {
2667 Label doneInner
, fatInline
;
2671 move32(Imm32(JSString::FAT_INLINE_MASK
), outHash
);
2672 and32(Address(id
, JSString::offsetOfFlags()), outHash
);
2674 branch32(Assembler::Equal
, outHash
, Imm32(JSString::FAT_INLINE_MASK
),
2676 load32(Address(id
, NormalAtom::offsetOfHash()), outHash
);
2679 load32(Address(id
, FatInlineAtom::offsetOfHash()), outHash
);
2684 void MacroAssembler::loadAtomOrSymbolAndHash(ValueOperand value
, Register outId
,
2687 Label isString
, isSymbol
, isNull
, isUndefined
, done
, nonAtom
, atom
,
2691 ScratchTagScope
tag(*this, value
);
2692 splitTagForTest(value
, tag
);
2693 branchTestString(Assembler::Equal
, tag
, &isString
);
2694 branchTestSymbol(Assembler::Equal
, tag
, &isSymbol
);
2695 branchTestNull(Assembler::Equal
, tag
, &isNull
);
2696 branchTestUndefined(Assembler::NotEqual
, tag
, cacheMiss
);
2699 const JSAtomState
& names
= runtime()->names();
2700 movePropertyKey(PropertyKey::NonIntAtom(names
.undefined
), outId
);
2701 move32(Imm32(names
.undefined
->hash()), outHash
);
2705 movePropertyKey(PropertyKey::NonIntAtom(names
.null
), outId
);
2706 move32(Imm32(names
.null
->hash()), outHash
);
2710 unboxSymbol(value
, outId
);
2711 load32(Address(outId
, JS::Symbol::offsetOfHash()), outHash
);
2712 orPtr(Imm32(PropertyKey::SymbolTypeTag
), outId
);
2716 unboxString(value
, outId
);
2717 branchTest32(Assembler::Zero
, Address(outId
, JSString::offsetOfFlags()),
2718 Imm32(JSString::ATOM_BIT
), &nonAtom
);
2721 loadAtomHash(outId
, outHash
, &done
);
2724 loadStringToAtomCacheLastLookups(outHash
);
2726 // Compare each entry in the StringToAtomCache's lastLookups_ array
2727 size_t stringOffset
= StringToAtomCache::LastLookup::offsetOfString();
2728 branchPtr(Assembler::Equal
, Address(outHash
, stringOffset
), outId
,
2730 for (size_t i
= 0; i
< StringToAtomCache::NumLastLookups
- 1; ++i
) {
2731 addPtr(Imm32(sizeof(StringToAtomCache::LastLookup
)), outHash
);
2732 branchPtr(Assembler::Equal
, Address(outHash
, stringOffset
), outId
,
2736 // Couldn't find us in the cache, so fall back to the C++ call
2739 // We found a hit in the lastLookups_ array! Load the associated atom
2740 // and jump back up to our usual atom handling code
2741 bind(&lastLookupAtom
);
2742 size_t atomOffset
= StringToAtomCache::LastLookup::offsetOfAtom();
2743 loadPtr(Address(outHash
, atomOffset
), outId
);
2749 void MacroAssembler::emitExtractValueFromMegamorphicCacheEntry(
2750 Register obj
, Register entry
, Register scratch1
, Register scratch2
,
2751 ValueOperand output
, Label
* cacheHit
, Label
* cacheMiss
) {
2752 Label isMissing
, dynamicSlot
, protoLoopHead
, protoLoopTail
;
2754 // scratch2 = entry->numHops_
2755 load8ZeroExtend(Address(entry
, MegamorphicCache::Entry::offsetOfNumHops()),
2757 // if (scratch2 == NumHopsForMissingOwnProperty) goto cacheMiss
2758 branch32(Assembler::Equal
, scratch2
,
2759 Imm32(MegamorphicCache::Entry::NumHopsForMissingOwnProperty
),
2761 // if (scratch2 == NumHopsForMissingProperty) goto isMissing
2762 branch32(Assembler::Equal
, scratch2
,
2763 Imm32(MegamorphicCache::Entry::NumHopsForMissingProperty
),
2766 // NOTE: Where this is called, `output` can actually alias `obj`, and before
2767 // the last cacheMiss branch above we can't write to `obj`, so we can't
2768 // use `output`'s scratch register there. However a cache miss is impossible
2769 // now, so we're free to use `output` as we like.
2770 Register outputScratch
= output
.scratchReg();
2771 if (!outputScratch
.aliases(obj
)) {
2772 // We're okay with paying this very slight extra cost to avoid a potential
2773 // footgun of writing to what callers understand as only an input register.
2774 movePtr(obj
, outputScratch
);
2776 branchTest32(Assembler::Zero
, scratch2
, scratch2
, &protoLoopTail
);
2777 bind(&protoLoopHead
);
2778 loadObjProto(outputScratch
, outputScratch
);
2779 branchSub32(Assembler::NonZero
, Imm32(1), scratch2
, &protoLoopHead
);
2780 bind(&protoLoopTail
);
2782 // scratch1 = entry->slotOffset()
2783 load32(Address(entry
, MegamorphicCacheEntry::offsetOfSlotOffset()), scratch1
);
2785 // scratch2 = slotOffset.offset()
2786 move32(scratch1
, scratch2
);
2787 rshift32(Imm32(TaggedSlotOffset::OffsetShift
), scratch2
);
2789 // if (!slotOffset.isFixedSlot()) goto dynamicSlot
2790 branchTest32(Assembler::Zero
, scratch1
,
2791 Imm32(TaggedSlotOffset::IsFixedSlotFlag
), &dynamicSlot
);
2792 // output = outputScratch[scratch2]
2793 loadValue(BaseIndex(outputScratch
, scratch2
, TimesOne
), output
);
2797 // output = outputScratch->slots_[scratch2]
2798 loadPtr(Address(outputScratch
, NativeObject::offsetOfSlots()), outputScratch
);
2799 loadValue(BaseIndex(outputScratch
, scratch2
, TimesOne
), output
);
2803 // output = undefined
2804 moveValue(UndefinedValue(), output
);
2808 template <typename IdOperandType
>
2809 void MacroAssembler::emitMegamorphicCacheLookupByValueCommon(
2810 IdOperandType id
, Register obj
, Register scratch1
, Register scratch2
,
2811 Register outEntryPtr
, Label
* cacheMiss
, Label
* cacheMissWithEntry
) {
2812 // A lot of this code is shared with emitMegamorphicCacheLookup. It would
2813 // be nice to be able to avoid the duplication here, but due to a few
2814 // differences like taking the id in a ValueOperand instead of being able
2815 // to bake it in as an immediate, and only needing a Register for the output
2816 // value, it seemed more awkward to read once it was deduplicated.
2818 // outEntryPtr = obj->shape()
2819 loadPtr(Address(obj
, JSObject::offsetOfShape()), outEntryPtr
);
2821 movePtr(outEntryPtr
, scratch2
);
2823 // outEntryPtr = (outEntryPtr >> 3) ^ (outEntryPtr >> 13) + idHash
2824 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift1
), outEntryPtr
);
2825 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift2
), scratch2
);
2826 xorPtr(scratch2
, outEntryPtr
);
2828 if constexpr (std::is_same
<IdOperandType
, ValueOperand
>::value
) {
2829 loadAtomOrSymbolAndHash(id
, scratch1
, scratch2
, cacheMiss
);
2831 static_assert(std::is_same
<IdOperandType
, Register
>::value
);
2832 movePtr(id
, scratch1
);
2833 loadAtomHash(scratch1
, scratch2
, nullptr);
2835 addPtr(scratch2
, outEntryPtr
);
2837 // outEntryPtr %= MegamorphicCache::NumEntries
2838 constexpr size_t cacheSize
= MegamorphicCache::NumEntries
;
2839 static_assert(mozilla::IsPowerOfTwo(cacheSize
));
2840 size_t cacheMask
= cacheSize
- 1;
2841 and32(Imm32(cacheMask
), outEntryPtr
);
2843 loadMegamorphicCache(scratch2
);
2844 // outEntryPtr = &scratch2->entries_[outEntryPtr]
2845 constexpr size_t entrySize
= sizeof(MegamorphicCache::Entry
);
2846 static_assert(sizeof(void*) == 4 || entrySize
== 24);
2847 if constexpr (sizeof(void*) == 4) {
2848 mul32(Imm32(entrySize
), outEntryPtr
);
2849 computeEffectiveAddress(BaseIndex(scratch2
, outEntryPtr
, TimesOne
,
2850 MegamorphicCache::offsetOfEntries()),
2853 computeEffectiveAddress(BaseIndex(outEntryPtr
, outEntryPtr
, TimesTwo
),
2855 computeEffectiveAddress(BaseIndex(scratch2
, outEntryPtr
, TimesEight
,
2856 MegamorphicCache::offsetOfEntries()),
2860 // if (outEntryPtr->key_ != scratch1) goto cacheMissWithEntry
2861 branchPtr(Assembler::NotEqual
,
2862 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfKey()),
2863 scratch1
, cacheMissWithEntry
);
2864 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch1
);
2866 // if (outEntryPtr->shape_ != scratch1) goto cacheMissWithEntry
2867 branchPtr(Assembler::NotEqual
,
2868 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfShape()),
2869 scratch1
, cacheMissWithEntry
);
2871 // scratch2 = scratch2->generation_
2872 load16ZeroExtend(Address(scratch2
, MegamorphicCache::offsetOfGeneration()),
2875 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfGeneration()),
2877 // if (outEntryPtr->generation_ != scratch2) goto cacheMissWithEntry
2878 branch32(Assembler::NotEqual
, scratch1
, scratch2
, cacheMissWithEntry
);
2881 void MacroAssembler::emitMegamorphicCacheLookup(
2882 PropertyKey id
, Register obj
, Register scratch1
, Register scratch2
,
2883 Register outEntryPtr
, ValueOperand output
, Label
* cacheHit
) {
2884 Label cacheMiss
, isMissing
, dynamicSlot
, protoLoopHead
, protoLoopTail
;
2886 // scratch1 = obj->shape()
2887 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch1
);
2889 movePtr(scratch1
, outEntryPtr
);
2890 movePtr(scratch1
, scratch2
);
2892 // outEntryPtr = (scratch1 >> 3) ^ (scratch1 >> 13) + hash(id)
2893 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift1
), outEntryPtr
);
2894 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift2
), scratch2
);
2895 xorPtr(scratch2
, outEntryPtr
);
2896 addPtr(Imm32(HashAtomOrSymbolPropertyKey(id
)), outEntryPtr
);
2898 // outEntryPtr %= MegamorphicCache::NumEntries
2899 constexpr size_t cacheSize
= MegamorphicCache::NumEntries
;
2900 static_assert(mozilla::IsPowerOfTwo(cacheSize
));
2901 size_t cacheMask
= cacheSize
- 1;
2902 and32(Imm32(cacheMask
), outEntryPtr
);
2904 loadMegamorphicCache(scratch2
);
2905 // outEntryPtr = &scratch2->entries_[outEntryPtr]
2906 constexpr size_t entrySize
= sizeof(MegamorphicCache::Entry
);
2907 static_assert(sizeof(void*) == 4 || entrySize
== 24);
2908 if constexpr (sizeof(void*) == 4) {
2909 mul32(Imm32(entrySize
), outEntryPtr
);
2910 computeEffectiveAddress(BaseIndex(scratch2
, outEntryPtr
, TimesOne
,
2911 MegamorphicCache::offsetOfEntries()),
2914 computeEffectiveAddress(BaseIndex(outEntryPtr
, outEntryPtr
, TimesTwo
),
2916 computeEffectiveAddress(BaseIndex(scratch2
, outEntryPtr
, TimesEight
,
2917 MegamorphicCache::offsetOfEntries()),
2921 // if (outEntryPtr->shape_ != scratch1) goto cacheMiss
2922 branchPtr(Assembler::NotEqual
,
2923 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfShape()),
2924 scratch1
, &cacheMiss
);
2926 // if (outEntryPtr->key_ != id) goto cacheMiss
2927 movePropertyKey(id
, scratch1
);
2928 branchPtr(Assembler::NotEqual
,
2929 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfKey()),
2930 scratch1
, &cacheMiss
);
2932 // scratch2 = scratch2->generation_
2933 load16ZeroExtend(Address(scratch2
, MegamorphicCache::offsetOfGeneration()),
2936 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfGeneration()),
2938 // if (outEntryPtr->generation_ != scratch2) goto cacheMiss
2939 branch32(Assembler::NotEqual
, scratch1
, scratch2
, &cacheMiss
);
2941 emitExtractValueFromMegamorphicCacheEntry(
2942 obj
, outEntryPtr
, scratch1
, scratch2
, output
, cacheHit
, &cacheMiss
);
2947 template <typename IdOperandType
>
2948 void MacroAssembler::emitMegamorphicCacheLookupByValue(
2949 IdOperandType id
, Register obj
, Register scratch1
, Register scratch2
,
2950 Register outEntryPtr
, ValueOperand output
, Label
* cacheHit
) {
2951 Label cacheMiss
, cacheMissWithEntry
;
2952 emitMegamorphicCacheLookupByValueCommon(id
, obj
, scratch1
, scratch2
,
2953 outEntryPtr
, &cacheMiss
,
2954 &cacheMissWithEntry
);
2955 emitExtractValueFromMegamorphicCacheEntry(obj
, outEntryPtr
, scratch1
,
2956 scratch2
, output
, cacheHit
,
2957 &cacheMissWithEntry
);
2959 xorPtr(outEntryPtr
, outEntryPtr
);
2960 bind(&cacheMissWithEntry
);
2963 template void MacroAssembler::emitMegamorphicCacheLookupByValue
<ValueOperand
>(
2964 ValueOperand id
, Register obj
, Register scratch1
, Register scratch2
,
2965 Register outEntryPtr
, ValueOperand output
, Label
* cacheHit
);
2967 template void MacroAssembler::emitMegamorphicCacheLookupByValue
<Register
>(
2968 Register id
, Register obj
, Register scratch1
, Register scratch2
,
2969 Register outEntryPtr
, ValueOperand output
, Label
* cacheHit
);
2971 void MacroAssembler::emitMegamorphicCacheLookupExists(
2972 ValueOperand id
, Register obj
, Register scratch1
, Register scratch2
,
2973 Register outEntryPtr
, Register output
, Label
* cacheHit
, bool hasOwn
) {
2974 Label cacheMiss
, cacheMissWithEntry
, cacheHitFalse
;
2975 emitMegamorphicCacheLookupByValueCommon(id
, obj
, scratch1
, scratch2
,
2976 outEntryPtr
, &cacheMiss
,
2977 &cacheMissWithEntry
);
2979 // scratch1 = outEntryPtr->numHops_
2981 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfNumHops()),
2984 branch32(Assembler::Equal
, scratch1
,
2985 Imm32(MegamorphicCache::Entry::NumHopsForMissingProperty
),
2989 branch32(Assembler::NotEqual
, scratch1
, Imm32(0), &cacheHitFalse
);
2991 branch32(Assembler::Equal
, scratch1
,
2992 Imm32(MegamorphicCache::Entry::NumHopsForMissingOwnProperty
),
2993 &cacheMissWithEntry
);
2996 move32(Imm32(1), output
);
2999 bind(&cacheHitFalse
);
3000 xor32(output
, output
);
3004 xorPtr(outEntryPtr
, outEntryPtr
);
3005 bind(&cacheMissWithEntry
);
3008 void MacroAssembler::extractCurrentIndexAndKindFromIterator(Register iterator
,
3011 // Load iterator object
3012 Address
nativeIterAddr(iterator
,
3013 PropertyIteratorObject::offsetOfIteratorSlot());
3014 loadPrivate(nativeIterAddr
, outIndex
);
3016 // Compute offset of propertyCursor_ from propertiesBegin()
3017 loadPtr(Address(outIndex
, NativeIterator::offsetOfPropertyCursor()), outKind
);
3018 subPtr(Address(outIndex
, NativeIterator::offsetOfShapesEnd()), outKind
);
3020 // Compute offset of current index from indicesBegin(). Note that because
3021 // propertyCursor has already been incremented, this is actually the offset
3022 // of the next index. We adjust accordingly below.
3023 size_t indexAdjustment
=
3024 sizeof(GCPtr
<JSLinearString
*>) / sizeof(PropertyIndex
);
3025 if (indexAdjustment
!= 1) {
3026 MOZ_ASSERT(indexAdjustment
== 2);
3027 rshift32(Imm32(1), outKind
);
3030 // Load current index.
3031 loadPtr(Address(outIndex
, NativeIterator::offsetOfPropertiesEnd()), outIndex
);
3032 load32(BaseIndex(outIndex
, outKind
, Scale::TimesOne
,
3033 -int32_t(sizeof(PropertyIndex
))),
3037 move32(outIndex
, outKind
);
3038 rshift32(Imm32(PropertyIndex::KindShift
), outKind
);
3041 and32(Imm32(PropertyIndex::IndexMask
), outIndex
);
3044 template <typename IdType
>
3045 void MacroAssembler::emitMegamorphicCachedSetSlot(
3046 IdType id
, Register obj
, Register scratch1
,
3047 #ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
3048 Register scratch2
, Register scratch3
,
3050 ValueOperand value
, Label
* cacheHit
,
3051 void (*emitPreBarrier
)(MacroAssembler
&, const Address
&, MIRType
)) {
3052 Label cacheMiss
, dynamicSlot
, doAdd
, doSet
, doAddDynamic
, doSetDynamic
;
3054 #ifdef JS_CODEGEN_X86
3056 Register scratch2
= value
.typeReg();
3057 Register scratch3
= value
.payloadReg();
3060 // outEntryPtr = obj->shape()
3061 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch3
);
3063 movePtr(scratch3
, scratch2
);
3065 // scratch3 = (scratch3 >> 3) ^ (scratch3 >> 13) + idHash
3066 rshiftPtr(Imm32(MegamorphicSetPropCache::ShapeHashShift1
), scratch3
);
3067 rshiftPtr(Imm32(MegamorphicSetPropCache::ShapeHashShift2
), scratch2
);
3068 xorPtr(scratch2
, scratch3
);
3070 if constexpr (std::is_same
<IdType
, ValueOperand
>::value
) {
3071 loadAtomOrSymbolAndHash(id
, scratch1
, scratch2
, &cacheMiss
);
3072 addPtr(scratch2
, scratch3
);
3074 static_assert(std::is_same
<IdType
, PropertyKey
>::value
);
3075 addPtr(Imm32(HashAtomOrSymbolPropertyKey(id
)), scratch3
);
3076 movePropertyKey(id
, scratch1
);
3079 // scratch3 %= MegamorphicSetPropCache::NumEntries
3080 constexpr size_t cacheSize
= MegamorphicSetPropCache::NumEntries
;
3081 static_assert(mozilla::IsPowerOfTwo(cacheSize
));
3082 size_t cacheMask
= cacheSize
- 1;
3083 and32(Imm32(cacheMask
), scratch3
);
3085 loadMegamorphicSetPropCache(scratch2
);
3086 // scratch3 = &scratch2->entries_[scratch3]
3087 constexpr size_t entrySize
= sizeof(MegamorphicSetPropCache::Entry
);
3088 mul32(Imm32(entrySize
), scratch3
);
3089 computeEffectiveAddress(BaseIndex(scratch2
, scratch3
, TimesOne
,
3090 MegamorphicSetPropCache::offsetOfEntries()),
3093 // if (scratch3->key_ != scratch1) goto cacheMiss
3094 branchPtr(Assembler::NotEqual
,
3095 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfKey()),
3096 scratch1
, &cacheMiss
);
3098 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch1
);
3099 // if (scratch3->shape_ != scratch1) goto cacheMiss
3100 branchPtr(Assembler::NotEqual
,
3101 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfShape()),
3102 scratch1
, &cacheMiss
);
3104 // scratch2 = scratch2->generation_
3106 Address(scratch2
, MegamorphicSetPropCache::offsetOfGeneration()),
3109 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfGeneration()),
3111 // if (scratch3->generation_ != scratch2) goto cacheMiss
3112 branch32(Assembler::NotEqual
, scratch1
, scratch2
, &cacheMiss
);
3114 // scratch2 = entry->slotOffset()
3116 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfSlotOffset()),
3119 // scratch1 = slotOffset.offset()
3120 move32(scratch2
, scratch1
);
3121 rshift32(Imm32(TaggedSlotOffset::OffsetShift
), scratch1
);
3123 Address
afterShapePtr(scratch3
,
3124 MegamorphicSetPropCache::Entry::offsetOfAfterShape());
3126 // if (!slotOffset.isFixedSlot()) goto dynamicSlot
3127 branchTest32(Assembler::Zero
, scratch2
,
3128 Imm32(TaggedSlotOffset::IsFixedSlotFlag
), &dynamicSlot
);
3130 // Calculate slot address in scratch1. Jump to doSet if scratch3 == nullptr,
3131 // else jump (or fall-through) to doAdd.
3132 addPtr(obj
, scratch1
);
3133 branchPtr(Assembler::Equal
, afterShapePtr
, ImmPtr(nullptr), &doSet
);
3137 branchPtr(Assembler::Equal
, afterShapePtr
, ImmPtr(nullptr), &doSetDynamic
);
3139 Address
slotAddr(scratch1
, 0);
3141 // If entry->newCapacity_ is nonzero, we need to grow the slots on the
3142 // object. Otherwise just jump straight to a dynamic add.
3144 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfNewCapacity()),
3146 branchTest32(Assembler::Zero
, scratch2
, scratch2
, &doAddDynamic
);
3148 AllocatableRegisterSet
regs(RegisterSet::Volatile());
3149 regs
.takeUnchecked(scratch2
);
3151 LiveRegisterSet
save(regs
.asLiveSet());
3152 PushRegsInMask(save
);
3155 if (regs
.has(obj
)) {
3156 regs
.takeUnchecked(obj
);
3157 tmp
= regs
.takeAnyGeneral();
3158 regs
.addUnchecked(obj
);
3160 tmp
= regs
.takeAnyGeneral();
3163 using Fn
= bool (*)(JSContext
* cx
, NativeObject
* obj
, uint32_t newCount
);
3164 setupUnalignedABICall(tmp
);
3168 passABIArg(scratch2
);
3169 callWithABI
<Fn
, NativeObject::growSlotsPure
>();
3170 storeCallPointerResult(scratch2
);
3171 PopRegsInMask(save
);
3173 branchIfFalseBool(scratch2
, &cacheMiss
);
3175 bind(&doAddDynamic
);
3176 addPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch1
);
3179 // scratch3 = entry->afterShape()
3181 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfAfterShape()),
3184 storeObjShape(scratch3
, obj
,
3185 [emitPreBarrier
](MacroAssembler
& masm
, const Address
& addr
) {
3186 emitPreBarrier(masm
, addr
, MIRType::Shape
);
3188 #ifdef JS_CODEGEN_X86
3191 storeValue(value
, slotAddr
);
3194 bind(&doSetDynamic
);
3195 addPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch1
);
3197 guardedCallPreBarrier(slotAddr
, MIRType::Value
);
3199 #ifdef JS_CODEGEN_X86
3202 storeValue(value
, slotAddr
);
3206 #ifdef JS_CODEGEN_X86
3211 template void MacroAssembler::emitMegamorphicCachedSetSlot
<PropertyKey
>(
3212 PropertyKey id
, Register obj
, Register scratch1
,
3213 #ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
3214 Register scratch2
, Register scratch3
,
3216 ValueOperand value
, Label
* cacheHit
,
3217 void (*emitPreBarrier
)(MacroAssembler
&, const Address
&, MIRType
));
3219 template void MacroAssembler::emitMegamorphicCachedSetSlot
<ValueOperand
>(
3220 ValueOperand id
, Register obj
, Register scratch1
,
3221 #ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
3222 Register scratch2
, Register scratch3
,
3224 ValueOperand value
, Label
* cacheHit
,
3225 void (*emitPreBarrier
)(MacroAssembler
&, const Address
&, MIRType
));
3227 void MacroAssembler::guardNonNegativeIntPtrToInt32(Register reg
, Label
* fail
) {
3230 branchPtr(Assembler::NotSigned
, reg
, reg
, &ok
);
3231 assumeUnreachable("Unexpected negative value");
3236 branchPtr(Assembler::Above
, reg
, Imm32(INT32_MAX
), fail
);
3240 void MacroAssembler::loadArrayBufferByteLengthIntPtr(Register obj
,
3242 Address
slotAddr(obj
, ArrayBufferObject::offsetOfByteLengthSlot());
3243 loadPrivate(slotAddr
, output
);
3246 void MacroAssembler::loadArrayBufferViewByteOffsetIntPtr(Register obj
,
3248 Address
slotAddr(obj
, ArrayBufferViewObject::byteOffsetOffset());
3249 loadPrivate(slotAddr
, output
);
3252 void MacroAssembler::loadArrayBufferViewLengthIntPtr(Register obj
,
3254 Address
slotAddr(obj
, ArrayBufferViewObject::lengthOffset());
3255 loadPrivate(slotAddr
, output
);
3258 void MacroAssembler::loadDOMExpandoValueGuardGeneration(
3259 Register obj
, ValueOperand output
,
3260 JS::ExpandoAndGeneration
* expandoAndGeneration
, uint64_t generation
,
3262 loadPtr(Address(obj
, ProxyObject::offsetOfReservedSlots()),
3263 output
.scratchReg());
3264 loadValue(Address(output
.scratchReg(),
3265 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
3268 // Guard the ExpandoAndGeneration* matches the proxy's ExpandoAndGeneration
3270 branchTestValue(Assembler::NotEqual
, output
,
3271 PrivateValue(expandoAndGeneration
), fail
);
3273 // Guard expandoAndGeneration->generation matches the expected generation.
3274 Address
generationAddr(output
.payloadOrValueReg(),
3275 JS::ExpandoAndGeneration::offsetOfGeneration());
3276 branch64(Assembler::NotEqual
, generationAddr
, Imm64(generation
), fail
);
3278 // Load expandoAndGeneration->expando into the output Value register.
3279 loadValue(Address(output
.payloadOrValueReg(),
3280 JS::ExpandoAndGeneration::offsetOfExpando()),
3284 void MacroAssembler::loadJitActivation(Register dest
) {
3285 loadJSContext(dest
);
3286 loadPtr(Address(dest
, offsetof(JSContext
, activation_
)), dest
);
3289 void MacroAssembler::guardSpecificAtom(Register str
, JSAtom
* atom
,
3291 const LiveRegisterSet
& volatileRegs
,
3294 branchPtr(Assembler::Equal
, str
, ImmGCPtr(atom
), &done
);
3296 // The pointers are not equal, so if the input string is also an atom it
3297 // must be a different string.
3298 branchTest32(Assembler::NonZero
, Address(str
, JSString::offsetOfFlags()),
3299 Imm32(JSString::ATOM_BIT
), fail
);
3301 // Check the length.
3302 branch32(Assembler::NotEqual
, Address(str
, JSString::offsetOfLength()),
3303 Imm32(atom
->length()), fail
);
3305 // Compare short atoms using inline assembly.
3306 if (canCompareStringCharsInline(atom
)) {
3307 // Pure two-byte strings can't be equal to Latin-1 strings.
3308 if (atom
->hasTwoByteChars()) {
3309 JS::AutoCheckCannotGC nogc
;
3310 if (!mozilla::IsUtf16Latin1(atom
->twoByteRange(nogc
))) {
3311 branchLatin1String(str
, fail
);
3315 // Call into the VM when the input is a rope or has a different encoding.
3318 // Load the input string's characters.
3319 Register stringChars
= scratch
;
3320 loadStringCharsForCompare(str
, atom
, stringChars
, &vmCall
);
3322 // Start comparing character by character.
3323 branchIfNotStringCharsEquals(stringChars
, atom
, fail
);
3325 // Falls through if both strings are equal.
3331 // We have a non-atomized string with the same length. Call a helper
3332 // function to do the comparison.
3333 PushRegsInMask(volatileRegs
);
3335 using Fn
= bool (*)(JSString
* str1
, JSString
* str2
);
3336 setupUnalignedABICall(scratch
);
3337 movePtr(ImmGCPtr(atom
), scratch
);
3338 passABIArg(scratch
);
3340 callWithABI
<Fn
, EqualStringsHelperPure
>();
3341 storeCallPointerResult(scratch
);
3343 MOZ_ASSERT(!volatileRegs
.has(scratch
));
3344 PopRegsInMask(volatileRegs
);
3345 branchIfFalseBool(scratch
, fail
);
3350 void MacroAssembler::guardStringToInt32(Register str
, Register output
,
3352 LiveRegisterSet volatileRegs
,
3355 // Use indexed value as fast path if possible.
3356 loadStringIndexValue(str
, output
, &vmCall
);
3361 // Reserve space for holding the result int32_t of the call. Use
3362 // pointer-size to avoid misaligning the stack on 64-bit platforms.
3363 reserveStack(sizeof(uintptr_t));
3364 moveStackPtrTo(output
);
3366 volatileRegs
.takeUnchecked(scratch
);
3367 if (output
.volatile_()) {
3368 volatileRegs
.addUnchecked(output
);
3370 PushRegsInMask(volatileRegs
);
3372 using Fn
= bool (*)(JSContext
* cx
, JSString
* str
, int32_t* result
);
3373 setupUnalignedABICall(scratch
);
3374 loadJSContext(scratch
);
3375 passABIArg(scratch
);
3378 callWithABI
<Fn
, GetInt32FromStringPure
>();
3379 storeCallPointerResult(scratch
);
3381 PopRegsInMask(volatileRegs
);
3384 branchIfTrueBool(scratch
, &ok
);
3386 // OOM path, recovered by GetInt32FromStringPure.
3388 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
3389 // flow-insensitively, and using it twice would confuse the stack height
3391 addToStackPtr(Imm32(sizeof(uintptr_t)));
3395 load32(Address(output
, 0), output
);
3396 freeStack(sizeof(uintptr_t));
3401 void MacroAssembler::generateBailoutTail(Register scratch
,
3402 Register bailoutInfo
) {
3403 Label bailoutFailed
;
3404 branchIfFalseBool(ReturnReg
, &bailoutFailed
);
3406 // Finish bailing out to Baseline.
3408 // Prepare a register set for use in this case.
3409 AllocatableGeneralRegisterSet
regs(GeneralRegisterSet::All());
3410 MOZ_ASSERT_IF(!IsHiddenSP(getStackPointer()),
3411 !regs
.has(AsRegister(getStackPointer())));
3412 regs
.take(bailoutInfo
);
3414 Register temp
= regs
.takeAny();
3417 // Assert the stack pointer points to the JitFrameLayout header. Copying
3420 loadPtr(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, incomingStack
)),
3422 branchStackPtr(Assembler::Equal
, temp
, &ok
);
3423 assumeUnreachable("Unexpected stack pointer value");
3427 Register copyCur
= regs
.takeAny();
3428 Register copyEnd
= regs
.takeAny();
3430 // Copy data onto stack.
3431 loadPtr(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, copyStackTop
)),
3434 Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, copyStackBottom
)),
3440 branchPtr(Assembler::BelowOrEqual
, copyCur
, copyEnd
, &endOfCopy
);
3441 subPtr(Imm32(sizeof(uintptr_t)), copyCur
);
3442 subFromStackPtr(Imm32(sizeof(uintptr_t)));
3443 loadPtr(Address(copyCur
, 0), temp
);
3444 storePtr(temp
, Address(getStackPointer(), 0));
3449 loadPtr(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, resumeFramePtr
)),
3452 // Enter exit frame for the FinishBailoutToBaseline call.
3453 pushFrameDescriptor(FrameType::BaselineJS
);
3454 push(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, resumeAddr
)));
3456 // No GC things to mark on the stack, push a bare token.
3457 loadJSContext(scratch
);
3458 enterFakeExitFrame(scratch
, scratch
, ExitFrameType::Bare
);
3460 // Save needed values onto stack temporarily.
3461 push(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, resumeAddr
)));
3463 // Call a stub to free allocated memory and create arguments objects.
3464 using Fn
= bool (*)(BaselineBailoutInfo
* bailoutInfoArg
);
3465 setupUnalignedABICall(temp
);
3466 passABIArg(bailoutInfo
);
3467 callWithABI
<Fn
, FinishBailoutToBaseline
>(
3468 ABIType::General
, CheckUnsafeCallWithABI::DontCheckHasExitFrame
);
3469 branchIfFalseBool(ReturnReg
, exceptionLabel());
3471 // Restore values where they need to be and resume execution.
3472 AllocatableGeneralRegisterSet
enterRegs(GeneralRegisterSet::All());
3473 MOZ_ASSERT(!enterRegs
.has(FramePointer
));
3474 Register jitcodeReg
= enterRegs
.takeAny();
3478 // Discard exit frame.
3479 addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
3484 bind(&bailoutFailed
);
3486 // jit::Bailout or jit::InvalidationBailout failed and returned false. The
3487 // Ion frame has already been discarded and the stack pointer points to the
3488 // JitFrameLayout header. Turn it into an ExitFrameLayout, similar to
3489 // EnsureUnwoundJitExitFrame, and call the exception handler.
3490 loadJSContext(scratch
);
3491 enterFakeExitFrame(scratch
, scratch
, ExitFrameType::UnwoundJit
);
3492 jump(exceptionLabel());
3496 void MacroAssembler::loadJitCodeRaw(Register func
, Register dest
) {
3497 static_assert(BaseScript::offsetOfJitCodeRaw() ==
3498 SelfHostedLazyScript::offsetOfJitCodeRaw(),
3499 "SelfHostedLazyScript and BaseScript must use same layout for "
3502 BaseScript::offsetOfJitCodeRaw() == wasm::JumpTableJitEntryOffset
,
3503 "Wasm exported functions jit entries must use same layout for "
3505 loadPrivate(Address(func
, JSFunction::offsetOfJitInfoOrScript()), dest
);
3506 loadPtr(Address(dest
, BaseScript::offsetOfJitCodeRaw()), dest
);
3509 void MacroAssembler::loadBaselineJitCodeRaw(Register func
, Register dest
,
3512 loadPrivate(Address(func
, JSFunction::offsetOfJitInfoOrScript()), dest
);
3514 branchIfScriptHasNoJitScript(dest
, failure
);
3516 loadJitScript(dest
, dest
);
3518 // Load BaselineScript
3519 loadPtr(Address(dest
, JitScript::offsetOfBaselineScript()), dest
);
3521 static_assert(BaselineDisabledScript
== 0x1);
3522 branchPtr(Assembler::BelowOrEqual
, dest
, ImmWord(BaselineDisabledScript
),
3526 // Load Baseline jitcode
3527 loadPtr(Address(dest
, BaselineScript::offsetOfMethod()), dest
);
3528 loadPtr(Address(dest
, JitCode::offsetOfCode()), dest
);
3531 void MacroAssembler::loadBaselineFramePtr(Register framePtr
, Register dest
) {
3532 if (framePtr
!= dest
) {
3533 movePtr(framePtr
, dest
);
3535 subPtr(Imm32(BaselineFrame::Size()), dest
);
3538 static const uint8_t* ContextInlinedICScriptPtr(CompileRuntime
* rt
) {
3539 return (static_cast<const uint8_t*>(rt
->mainContextPtr()) +
3540 JSContext::offsetOfInlinedICScript());
3543 void MacroAssembler::storeICScriptInJSContext(Register icScript
) {
3544 storePtr(icScript
, AbsoluteAddress(ContextInlinedICScriptPtr(runtime())));
3547 void MacroAssembler::handleFailure() {
3548 // Re-entry code is irrelevant because the exception will leave the
3549 // running function and never come back
3550 TrampolinePtr excTail
= runtime()->jitRuntime()->getExceptionTail();
3554 void MacroAssembler::assumeUnreachable(const char* output
) {
3555 #ifdef JS_MASM_VERBOSE
3556 if (!IsCompilingWasm()) {
3557 AllocatableRegisterSet
regs(RegisterSet::Volatile());
3558 LiveRegisterSet
save(regs
.asLiveSet());
3559 PushRegsInMask(save
);
3560 Register temp
= regs
.takeAnyGeneral();
3562 using Fn
= void (*)(const char* output
);
3563 setupUnalignedABICall(temp
);
3564 movePtr(ImmPtr(output
), temp
);
3566 callWithABI
<Fn
, AssumeUnreachable
>(ABIType::General
,
3567 CheckUnsafeCallWithABI::DontCheckOther
);
3569 PopRegsInMask(save
);
3576 void MacroAssembler::printf(const char* output
) {
3577 #ifdef JS_MASM_VERBOSE
3578 AllocatableRegisterSet
regs(RegisterSet::Volatile());
3579 LiveRegisterSet
save(regs
.asLiveSet());
3580 PushRegsInMask(save
);
3582 Register temp
= regs
.takeAnyGeneral();
3584 using Fn
= void (*)(const char* output
);
3585 setupUnalignedABICall(temp
);
3586 movePtr(ImmPtr(output
), temp
);
3588 callWithABI
<Fn
, Printf0
>();
3590 PopRegsInMask(save
);
3594 void MacroAssembler::printf(const char* output
, Register value
) {
3595 #ifdef JS_MASM_VERBOSE
3596 AllocatableRegisterSet
regs(RegisterSet::Volatile());
3597 LiveRegisterSet
save(regs
.asLiveSet());
3598 PushRegsInMask(save
);
3600 regs
.takeUnchecked(value
);
3602 Register temp
= regs
.takeAnyGeneral();
3604 using Fn
= void (*)(const char* output
, uintptr_t value
);
3605 setupUnalignedABICall(temp
);
3606 movePtr(ImmPtr(output
), temp
);
3609 callWithABI
<Fn
, Printf1
>();
3611 PopRegsInMask(save
);
3615 void MacroAssembler::convertInt32ValueToDouble(ValueOperand val
) {
3617 branchTestInt32(Assembler::NotEqual
, val
, &done
);
3618 unboxInt32(val
, val
.scratchReg());
3619 ScratchDoubleScope
fpscratch(*this);
3620 convertInt32ToDouble(val
.scratchReg(), fpscratch
);
3621 boxDouble(fpscratch
, val
, fpscratch
);
3625 void MacroAssembler::convertValueToFloatingPoint(ValueOperand value
,
3626 FloatRegister output
,
3628 MIRType outputType
) {
3629 Label isDouble
, isInt32
, isBool
, isNull
, done
;
3632 ScratchTagScope
tag(*this, value
);
3633 splitTagForTest(value
, tag
);
3635 branchTestDouble(Assembler::Equal
, tag
, &isDouble
);
3636 branchTestInt32(Assembler::Equal
, tag
, &isInt32
);
3637 branchTestBoolean(Assembler::Equal
, tag
, &isBool
);
3638 branchTestNull(Assembler::Equal
, tag
, &isNull
);
3639 branchTestUndefined(Assembler::NotEqual
, tag
, fail
);
3642 // fall-through: undefined
3643 loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output
,
3648 loadConstantFloatingPoint(0.0, 0.0f
, output
, outputType
);
3652 boolValueToFloatingPoint(value
, output
, outputType
);
3656 int32ValueToFloatingPoint(value
, output
, outputType
);
3659 // On some non-multiAlias platforms, unboxDouble may use the scratch register,
3660 // so do not merge code paths here.
3662 if (outputType
== MIRType::Float32
&& hasMultiAlias()) {
3663 ScratchDoubleScope
tmp(*this);
3664 unboxDouble(value
, tmp
);
3665 convertDoubleToFloat32(tmp
, output
);
3667 FloatRegister tmp
= output
.asDouble();
3668 unboxDouble(value
, tmp
);
3669 if (outputType
== MIRType::Float32
) {
3670 convertDoubleToFloat32(tmp
, output
);
3677 void MacroAssembler::outOfLineTruncateSlow(FloatRegister src
, Register dest
,
3678 bool widenFloatToDouble
,
3680 wasm::BytecodeOffset callOffset
) {
3681 if (compilingWasm
) {
3684 int32_t framePushedAfterInstance
= framePushed();
3686 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
3687 defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
3688 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
3689 ScratchDoubleScope
fpscratch(*this);
3690 if (widenFloatToDouble
) {
3691 convertFloat32ToDouble(src
, fpscratch
);
3694 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
3695 FloatRegister srcSingle
;
3696 if (widenFloatToDouble
) {
3697 MOZ_ASSERT(src
.isSingle());
3699 src
= src
.asDouble();
3701 convertFloat32ToDouble(srcSingle
, src
);
3705 MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
3708 MOZ_ASSERT(src
.isDouble());
3710 if (compilingWasm
) {
3711 int32_t instanceOffset
= framePushed() - framePushedAfterInstance
;
3713 passABIArg(src
, ABIType::Float64
);
3714 callWithABI(callOffset
, wasm::SymbolicAddress::ToInt32
,
3715 mozilla::Some(instanceOffset
));
3717 using Fn
= int32_t (*)(double);
3718 setupUnalignedABICall(dest
);
3719 passABIArg(src
, ABIType::Float64
);
3720 callWithABI
<Fn
, JS::ToInt32
>(ABIType::General
,
3721 CheckUnsafeCallWithABI::DontCheckOther
);
3723 storeCallInt32Result(dest
);
3725 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
3726 defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
3727 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
3729 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
3730 if (widenFloatToDouble
) {
3734 MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
3737 if (compilingWasm
) {
3742 void MacroAssembler::convertDoubleToInt(FloatRegister src
, Register output
,
3743 FloatRegister temp
, Label
* truncateFail
,
3745 IntConversionBehavior behavior
) {
3747 case IntConversionBehavior::Normal
:
3748 case IntConversionBehavior::NegativeZeroCheck
:
3749 convertDoubleToInt32(
3751 behavior
== IntConversionBehavior::NegativeZeroCheck
);
3753 case IntConversionBehavior::Truncate
:
3754 branchTruncateDoubleMaybeModUint32(src
, output
,
3755 truncateFail
? truncateFail
: fail
);
3757 case IntConversionBehavior::ClampToUint8
:
3758 // Clamping clobbers the input register, so use a temp.
3760 moveDouble(src
, temp
);
3762 clampDoubleToUint8(temp
, output
);
3767 void MacroAssembler::convertValueToInt(
3768 ValueOperand value
, Label
* handleStringEntry
, Label
* handleStringRejoin
,
3769 Label
* truncateDoubleSlow
, Register stringReg
, FloatRegister temp
,
3770 Register output
, Label
* fail
, IntConversionBehavior behavior
,
3771 IntConversionInputKind conversion
) {
3772 Label done
, isInt32
, isBool
, isDouble
, isNull
, isString
;
3774 bool handleStrings
= (behavior
== IntConversionBehavior::Truncate
||
3775 behavior
== IntConversionBehavior::ClampToUint8
) &&
3776 handleStringEntry
&& handleStringRejoin
;
3778 MOZ_ASSERT_IF(handleStrings
, conversion
== IntConversionInputKind::Any
);
3781 ScratchTagScope
tag(*this, value
);
3782 splitTagForTest(value
, tag
);
3784 branchTestInt32(Equal
, tag
, &isInt32
);
3785 if (conversion
== IntConversionInputKind::Any
||
3786 conversion
== IntConversionInputKind::NumbersOrBoolsOnly
) {
3787 branchTestBoolean(Equal
, tag
, &isBool
);
3789 branchTestDouble(Equal
, tag
, &isDouble
);
3791 if (conversion
== IntConversionInputKind::Any
) {
3792 // If we are not truncating, we fail for anything that's not
3793 // null. Otherwise we might be able to handle strings and undefined.
3795 case IntConversionBehavior::Normal
:
3796 case IntConversionBehavior::NegativeZeroCheck
:
3797 branchTestNull(Assembler::NotEqual
, tag
, fail
);
3800 case IntConversionBehavior::Truncate
:
3801 case IntConversionBehavior::ClampToUint8
:
3802 branchTestNull(Equal
, tag
, &isNull
);
3803 if (handleStrings
) {
3804 branchTestString(Equal
, tag
, &isString
);
3806 branchTestUndefined(Assembler::NotEqual
, tag
, fail
);
3814 // The value is null or undefined in truncation contexts - just emit 0.
3815 if (conversion
== IntConversionInputKind::Any
) {
3816 if (isNull
.used()) {
3819 mov(ImmWord(0), output
);
3823 // |output| needs to be different from |stringReg| to load string indices.
3824 bool handleStringIndices
= handleStrings
&& output
!= stringReg
;
3826 // First try loading a string index. If that fails, try converting a string
3827 // into a double, then jump to the double case.
3828 Label handleStringIndex
;
3829 if (handleStrings
) {
3831 unboxString(value
, stringReg
);
3832 if (handleStringIndices
) {
3833 loadStringIndexValue(stringReg
, output
, handleStringEntry
);
3834 jump(&handleStringIndex
);
3836 jump(handleStringEntry
);
3840 // Try converting double into integer.
3841 if (isDouble
.used() || handleStrings
) {
3842 if (isDouble
.used()) {
3844 unboxDouble(value
, temp
);
3847 if (handleStrings
) {
3848 bind(handleStringRejoin
);
3851 convertDoubleToInt(temp
, output
, temp
, truncateDoubleSlow
, fail
, behavior
);
3855 // Just unbox a bool, the result is 0 or 1.
3856 if (isBool
.used()) {
3858 unboxBoolean(value
, output
);
3862 // Integers can be unboxed.
3863 if (isInt32
.used() || handleStringIndices
) {
3864 if (isInt32
.used()) {
3866 unboxInt32(value
, output
);
3869 if (handleStringIndices
) {
3870 bind(&handleStringIndex
);
3873 if (behavior
== IntConversionBehavior::ClampToUint8
) {
3874 clampIntToUint8(output
);
3881 void MacroAssembler::finish() {
3882 if (failureLabel_
.used()) {
3883 bind(&failureLabel_
);
3887 MacroAssemblerSpecific::finish();
3890 size() <= MaxCodeBytesPerProcess
,
3891 "AssemblerBuffer should ensure we don't exceed MaxCodeBytesPerProcess");
3893 if (bytesNeeded() > MaxCodeBytesPerProcess
) {
3898 void MacroAssembler::link(JitCode
* code
) {
3900 linkProfilerCallSites(code
);
3903 MacroAssembler::AutoProfilerCallInstrumentation::
3904 AutoProfilerCallInstrumentation(MacroAssembler
& masm
) {
3905 if (!masm
.emitProfilingInstrumentation_
) {
3909 Register reg
= CallTempReg0
;
3910 Register reg2
= CallTempReg1
;
3914 CodeOffset label
= masm
.movWithPatch(ImmWord(uintptr_t(-1)), reg
);
3915 masm
.loadJSContext(reg2
);
3916 masm
.loadPtr(Address(reg2
, offsetof(JSContext
, profilingActivation_
)), reg2
);
3918 Address(reg2
, JitActivation::offsetOfLastProfilingCallSite()));
3920 masm
.appendProfilerCallSite(label
);
3926 void MacroAssembler::linkProfilerCallSites(JitCode
* code
) {
3927 for (size_t i
= 0; i
< profilerCallSites_
.length(); i
++) {
3928 CodeOffset offset
= profilerCallSites_
[i
];
3929 CodeLocationLabel
location(code
, offset
);
3930 PatchDataWithValueCheck(location
, ImmPtr(location
.raw()),
3935 void MacroAssembler::alignJitStackBasedOnNArgs(Register nargs
,
3936 bool countIncludesThis
) {
3937 // The stack should already be aligned to the size of a value.
3938 assertStackAlignment(sizeof(Value
), 0);
3940 static_assert(JitStackValueAlignment
== 1 || JitStackValueAlignment
== 2,
3941 "JitStackValueAlignment is either 1 or 2.");
3942 if (JitStackValueAlignment
== 1) {
3945 // A jit frame is composed of the following:
3947 // [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
3948 // \________JitFrameLayout_________/
3949 // (The stack grows this way --->)
3951 // We want to ensure that |raddr|, the return address, is 16-byte aligned.
3952 // (Note: if 8-byte alignment was sufficient, we would have already
3955 // JitFrameLayout does not affect the alignment, so we can ignore it.
3956 static_assert(sizeof(JitFrameLayout
) % JitStackAlignment
== 0,
3957 "JitFrameLayout doesn't affect stack alignment");
3959 // Therefore, we need to ensure that |this| is aligned.
3960 // This implies that |argN| must be aligned if N is even,
3961 // and offset by |sizeof(Value)| if N is odd.
3963 // Depending on the context of the caller, it may be easier to pass in a
3964 // register that has already been modified to include |this|. If that is the
3965 // case, we want to flip the direction of the test.
3966 Assembler::Condition condition
=
3967 countIncludesThis
? Assembler::NonZero
: Assembler::Zero
;
3969 Label alignmentIsOffset
, end
;
3970 branchTestPtr(condition
, nargs
, Imm32(1), &alignmentIsOffset
);
3972 // |argN| should be aligned to 16 bytes.
3973 andToStackPtr(Imm32(~(JitStackAlignment
- 1)));
3976 // |argN| should be offset by 8 bytes from 16-byte alignment.
3977 // We already know that it is 8-byte aligned, so the only possibilities are:
3978 // a) It is 16-byte aligned, and we must offset it by 8 bytes.
3979 // b) It is not 16-byte aligned, and therefore already has the right offset.
3980 // Therefore, we test to see if it is 16-byte aligned, and adjust it if it is.
3981 bind(&alignmentIsOffset
);
3982 branchTestStackPtr(Assembler::NonZero
, Imm32(JitStackAlignment
- 1), &end
);
3983 subFromStackPtr(Imm32(sizeof(Value
)));
3988 void MacroAssembler::alignJitStackBasedOnNArgs(uint32_t argc
,
3989 bool countIncludesThis
) {
3990 // The stack should already be aligned to the size of a value.
3991 assertStackAlignment(sizeof(Value
), 0);
3993 static_assert(JitStackValueAlignment
== 1 || JitStackValueAlignment
== 2,
3994 "JitStackValueAlignment is either 1 or 2.");
3995 if (JitStackValueAlignment
== 1) {
3999 // See above for full explanation.
4000 uint32_t nArgs
= argc
+ !countIncludesThis
;
4001 if (nArgs
% 2 == 0) {
4002 // |argN| should be 16-byte aligned
4003 andToStackPtr(Imm32(~(JitStackAlignment
- 1)));
4005 // |argN| must be 16-byte aligned if argc is even,
4006 // and offset by 8 if argc is odd.
4008 branchTestStackPtr(Assembler::NonZero
, Imm32(JitStackAlignment
- 1), &end
);
4009 subFromStackPtr(Imm32(sizeof(Value
)));
4011 assertStackAlignment(JitStackAlignment
, sizeof(Value
));
4015 // ===============================================================
4017 MacroAssembler::MacroAssembler(TempAllocator
& alloc
,
4018 CompileRuntime
* maybeRuntime
,
4019 CompileRealm
* maybeRealm
)
4020 : maybeRuntime_(maybeRuntime
),
4021 maybeRealm_(maybeRealm
),
4026 dynamicAlignment_(false),
4027 emitProfilingInstrumentation_(false) {
4028 moveResolver_
.setAllocator(alloc
);
4031 StackMacroAssembler::StackMacroAssembler(JSContext
* cx
, TempAllocator
& alloc
)
4032 : MacroAssembler(alloc
, CompileRuntime::get(cx
->runtime()),
4033 CompileRealm::get(cx
->realm())) {}
4035 IonHeapMacroAssembler::IonHeapMacroAssembler(TempAllocator
& alloc
,
4036 CompileRealm
* realm
)
4037 : MacroAssembler(alloc
, realm
->runtime(), realm
) {
4038 MOZ_ASSERT(CurrentThreadIsIonCompiling());
4041 WasmMacroAssembler::WasmMacroAssembler(TempAllocator
& alloc
, bool limitedSize
)
4042 : MacroAssembler(alloc
) {
4043 #if defined(JS_CODEGEN_ARM64)
4044 // Stubs + builtins + the baseline compiler all require the native SP,
4046 SetStackPointer64(sp
);
4049 setUnlimitedBuffer();
4053 WasmMacroAssembler::WasmMacroAssembler(TempAllocator
& alloc
,
4054 const wasm::ModuleEnvironment
& env
,
4056 : MacroAssembler(alloc
) {
4057 #if defined(JS_CODEGEN_ARM64)
4058 // Stubs + builtins + the baseline compiler all require the native SP,
4060 SetStackPointer64(sp
);
4063 setUnlimitedBuffer();
4067 bool MacroAssembler::icBuildOOLFakeExitFrame(void* fakeReturnAddr
,
4068 AutoSaveLiveRegisters
& save
) {
4069 return buildOOLFakeExitFrame(fakeReturnAddr
);
4072 #ifndef JS_CODEGEN_ARM64
4073 void MacroAssembler::subFromStackPtr(Register reg
) {
4074 subPtr(reg
, getStackPointer());
4076 #endif // JS_CODEGEN_ARM64
4078 //{{{ check_macroassembler_style
4079 // ===============================================================
4080 // Stack manipulation functions.
4082 void MacroAssembler::PushRegsInMask(LiveGeneralRegisterSet set
) {
4083 PushRegsInMask(LiveRegisterSet(set
.set(), FloatRegisterSet()));
4086 void MacroAssembler::PopRegsInMask(LiveRegisterSet set
) {
4087 PopRegsInMaskIgnore(set
, LiveRegisterSet());
4090 void MacroAssembler::PopRegsInMask(LiveGeneralRegisterSet set
) {
4091 PopRegsInMask(LiveRegisterSet(set
.set(), FloatRegisterSet()));
4094 void MacroAssembler::Push(PropertyKey key
, Register scratchReg
) {
4095 if (key
.isGCThing()) {
4096 // If we're pushing a gcthing, then we can't just push the tagged key
4097 // value since the GC won't have any idea that the push instruction
4098 // carries a reference to a gcthing. Need to unpack the pointer,
4099 // push it using ImmGCPtr, and then rematerialize the PropertyKey at
4102 if (key
.isString()) {
4103 JSString
* str
= key
.toString();
4104 MOZ_ASSERT((uintptr_t(str
) & PropertyKey::TypeMask
) == 0);
4105 static_assert(PropertyKey::StringTypeTag
== 0,
4106 "need to orPtr StringTypeTag if it's not 0");
4107 Push(ImmGCPtr(str
));
4109 MOZ_ASSERT(key
.isSymbol());
4110 movePropertyKey(key
, scratchReg
);
4114 MOZ_ASSERT(key
.isInt());
4115 Push(ImmWord(key
.asRawBits()));
4119 void MacroAssembler::movePropertyKey(PropertyKey key
, Register dest
) {
4120 if (key
.isGCThing()) {
4121 // See comment in |Push(PropertyKey, ...)| above for an explanation.
4122 if (key
.isString()) {
4123 JSString
* str
= key
.toString();
4124 MOZ_ASSERT((uintptr_t(str
) & PropertyKey::TypeMask
) == 0);
4125 static_assert(PropertyKey::StringTypeTag
== 0,
4126 "need to orPtr StringTypeTag tag if it's not 0");
4127 movePtr(ImmGCPtr(str
), dest
);
4129 MOZ_ASSERT(key
.isSymbol());
4130 JS::Symbol
* sym
= key
.toSymbol();
4131 movePtr(ImmGCPtr(sym
), dest
);
4132 orPtr(Imm32(PropertyKey::SymbolTypeTag
), dest
);
4135 MOZ_ASSERT(key
.isInt());
4136 movePtr(ImmWord(key
.asRawBits()), dest
);
4140 void MacroAssembler::Push(TypedOrValueRegister v
) {
4143 } else if (IsFloatingPointType(v
.type())) {
4144 FloatRegister reg
= v
.typedReg().fpu();
4145 if (v
.type() == MIRType::Float32
) {
4146 ScratchDoubleScope
fpscratch(*this);
4147 convertFloat32ToDouble(reg
, fpscratch
);
4148 PushBoxed(fpscratch
);
4153 Push(ValueTypeFromMIRType(v
.type()), v
.typedReg().gpr());
4157 void MacroAssembler::Push(const ConstantOrRegister
& v
) {
4165 void MacroAssembler::Push(const Address
& addr
) {
4167 framePushed_
+= sizeof(uintptr_t);
4170 void MacroAssembler::Push(const ValueOperand
& val
) {
4172 framePushed_
+= sizeof(Value
);
4175 void MacroAssembler::Push(const Value
& val
) {
4177 framePushed_
+= sizeof(Value
);
4180 void MacroAssembler::Push(JSValueType type
, Register reg
) {
4181 pushValue(type
, reg
);
4182 framePushed_
+= sizeof(Value
);
4185 void MacroAssembler::Push(const Register64 reg
) {
4186 #if JS_BITS_PER_WORD == 64
4189 MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Big-endian not supported.");
4195 void MacroAssembler::PushEmptyRooted(VMFunctionData::RootType rootType
) {
4197 case VMFunctionData::RootNone
:
4198 MOZ_CRASH("Handle must have root type");
4199 case VMFunctionData::RootObject
:
4200 case VMFunctionData::RootString
:
4201 case VMFunctionData::RootCell
:
4202 case VMFunctionData::RootBigInt
:
4203 Push(ImmPtr(nullptr));
4205 case VMFunctionData::RootValue
:
4206 Push(UndefinedValue());
4208 case VMFunctionData::RootId
:
4209 Push(ImmWord(JS::PropertyKey::Void().asRawBits()));
4214 void MacroAssembler::adjustStack(int amount
) {
4217 } else if (amount
< 0) {
4218 reserveStack(-amount
);
4222 void MacroAssembler::freeStack(uint32_t amount
) {
4223 MOZ_ASSERT(amount
<= framePushed_
);
4225 addToStackPtr(Imm32(amount
));
4227 framePushed_
-= amount
;
4230 void MacroAssembler::freeStack(Register amount
) { addToStackPtr(amount
); }
4232 void MacroAssembler::reserveVMFunctionOutParamSpace(const VMFunctionData
& f
) {
4233 switch (f
.outParam
) {
4235 PushEmptyRooted(f
.outParamRootType
);
4243 reserveStack(f
.sizeOfOutParamStackSlot());
4250 MOZ_CRASH("Unexpected outparam type");
4254 void MacroAssembler::loadVMFunctionOutParam(const VMFunctionData
& f
,
4255 const Address
& addr
) {
4256 switch (f
.outParam
) {
4258 switch (f
.outParamRootType
) {
4259 case VMFunctionData::RootNone
:
4260 MOZ_CRASH("Handle must have root type");
4261 case VMFunctionData::RootObject
:
4262 case VMFunctionData::RootString
:
4263 case VMFunctionData::RootCell
:
4264 case VMFunctionData::RootBigInt
:
4265 case VMFunctionData::RootId
:
4266 loadPtr(addr
, ReturnReg
);
4268 case VMFunctionData::RootValue
:
4269 loadValue(addr
, JSReturnOperand
);
4275 loadValue(addr
, JSReturnOperand
);
4279 load32(addr
, ReturnReg
);
4283 load8ZeroExtend(addr
, ReturnReg
);
4287 loadDouble(addr
, ReturnDoubleReg
);
4291 loadPtr(addr
, ReturnReg
);
4298 MOZ_CRASH("Unexpected outparam type");
4302 // ===============================================================
4303 // ABI function calls.
4304 template <class ABIArgGeneratorT
>
4305 void MacroAssembler::setupABICallHelper() {
4307 MOZ_ASSERT(!inCall_
);
4315 // Reinitialize the ABIArg generator.
4316 abiArgs_
= ABIArgGeneratorT();
4318 #if defined(JS_CODEGEN_ARM)
4319 // On ARM, we need to know what ABI we are using, either in the
4320 // simulator, or based on the configure flags.
4321 # if defined(JS_SIMULATOR_ARM)
4322 abiArgs_
.setUseHardFp(UseHardFpABI());
4323 # elif defined(JS_CODEGEN_ARM_HARDFP)
4324 abiArgs_
.setUseHardFp(true);
4326 abiArgs_
.setUseHardFp(false);
4330 #if defined(JS_CODEGEN_MIPS32)
4331 // On MIPS, the system ABI use general registers pairs to encode double
4332 // arguments, after one or 2 integer-like arguments. Unfortunately, the
4333 // Lowering phase is not capable to express it at the moment. So we enforce
4334 // the system ABI here.
4335 abiArgs_
.enforceO32ABI();
4339 void MacroAssembler::setupNativeABICall() {
4340 setupABICallHelper
<ABIArgGenerator
>();
4343 void MacroAssembler::setupWasmABICall() {
4344 MOZ_ASSERT(IsCompilingWasm(), "non-wasm should use setupAlignedABICall");
4345 setupABICallHelper
<WasmABIArgGenerator
>();
4347 #if defined(JS_CODEGEN_ARM)
4348 // The builtin thunk does the FP -> GPR moving on soft-FP, so
4349 // use hard fp unconditionally.
4350 abiArgs_
.setUseHardFp(true);
4352 dynamicAlignment_
= false;
4355 void MacroAssembler::setupUnalignedABICallDontSaveRestoreSP() {
4356 andToStackPtr(Imm32(~(ABIStackAlignment
- 1)));
4357 setFramePushed(0); // Required for aligned callWithABI.
4358 setupAlignedABICall();
4361 void MacroAssembler::setupAlignedABICall() {
4362 MOZ_ASSERT(!IsCompilingWasm(), "wasm should use setupWasmABICall");
4363 setupNativeABICall();
4364 dynamicAlignment_
= false;
4367 void MacroAssembler::passABIArg(const MoveOperand
& from
, ABIType type
) {
4368 MOZ_ASSERT(inCall_
);
4369 appendSignatureType(type
);
4372 MoveOp::Type moveType
;
4374 case ABIType::Float32
:
4375 arg
= abiArgs_
.next(MIRType::Float32
);
4376 moveType
= MoveOp::FLOAT32
;
4378 case ABIType::Float64
:
4379 arg
= abiArgs_
.next(MIRType::Double
);
4380 moveType
= MoveOp::DOUBLE
;
4382 case ABIType::General
:
4383 arg
= abiArgs_
.next(MIRType::Pointer
);
4384 moveType
= MoveOp::GENERAL
;
4387 MOZ_CRASH("Unexpected argument type");
4390 MoveOperand
to(*this, arg
);
4398 propagateOOM(moveResolver_
.addMove(from
, to
, moveType
));
4401 void MacroAssembler::callWithABINoProfiler(void* fun
, ABIType result
,
4402 CheckUnsafeCallWithABI check
) {
4403 appendSignatureType(result
);
4405 fun
= Simulator::RedirectNativeFunction(fun
, signature());
4408 uint32_t stackAdjust
;
4409 callWithABIPre(&stackAdjust
);
4412 if (check
== CheckUnsafeCallWithABI::Check
) {
4414 loadJSContext(ReturnReg
);
4415 Address
flagAddr(ReturnReg
, JSContext::offsetOfInUnsafeCallWithABI());
4416 store32(Imm32(1), flagAddr
);
4418 // On arm64, SP may be < PSP now (that's OK).
4419 // eg testcase: tests/bug1375074.js
4425 callWithABIPost(stackAdjust
, result
);
4428 if (check
== CheckUnsafeCallWithABI::Check
) {
4431 loadJSContext(ReturnReg
);
4432 Address
flagAddr(ReturnReg
, JSContext::offsetOfInUnsafeCallWithABI());
4433 branch32(Assembler::Equal
, flagAddr
, Imm32(0), &ok
);
4434 assumeUnreachable("callWithABI: callee did not use AutoUnsafeCallWithABI");
4437 // On arm64, SP may be < PSP now (that's OK).
4438 // eg testcase: tests/bug1375074.js
4443 CodeOffset
MacroAssembler::callWithABI(wasm::BytecodeOffset bytecode
,
4444 wasm::SymbolicAddress imm
,
4445 mozilla::Maybe
<int32_t> instanceOffset
,
4447 MOZ_ASSERT(wasm::NeedsBuiltinThunk(imm
));
4449 uint32_t stackAdjust
;
4450 callWithABIPre(&stackAdjust
, /* callFromWasm = */ true);
4452 // The instance register is used in builtin thunks and must be set.
4453 if (instanceOffset
) {
4454 loadPtr(Address(getStackPointer(), *instanceOffset
+ stackAdjust
),
4457 MOZ_CRASH("instanceOffset is Nothing only for unsupported abi calls.");
4459 CodeOffset raOffset
= call(
4460 wasm::CallSiteDesc(bytecode
.offset(), wasm::CallSite::Symbolic
), imm
);
4462 callWithABIPost(stackAdjust
, result
, /* callFromWasm = */ true);
4467 void MacroAssembler::callDebugWithABI(wasm::SymbolicAddress imm
,
4469 MOZ_ASSERT(!wasm::NeedsBuiltinThunk(imm
));
4470 uint32_t stackAdjust
;
4471 callWithABIPre(&stackAdjust
, /* callFromWasm = */ false);
4473 callWithABIPost(stackAdjust
, result
, /* callFromWasm = */ false);
4476 // ===============================================================
4477 // Exit frame footer.
4479 void MacroAssembler::linkExitFrame(Register cxreg
, Register scratch
) {
4480 loadPtr(Address(cxreg
, JSContext::offsetOfActivation()), scratch
);
4481 storeStackPtr(Address(scratch
, JitActivation::offsetOfPackedExitFP()));
4484 // ===============================================================
4485 // Simple value-shuffling helpers, to hide MoveResolver verbosity
4488 void MacroAssembler::moveRegPair(Register src0
, Register src1
, Register dst0
,
4489 Register dst1
, MoveOp::Type type
) {
4490 MoveResolver
& moves
= moveResolver();
4492 propagateOOM(moves
.addMove(MoveOperand(src0
), MoveOperand(dst0
), type
));
4495 propagateOOM(moves
.addMove(MoveOperand(src1
), MoveOperand(dst1
), type
));
4497 propagateOOM(moves
.resolve());
4502 MoveEmitter
emitter(*this);
4503 emitter
.emit(moves
);
4507 // ===============================================================
4508 // Arithmetic functions
4510 void MacroAssembler::pow32(Register base
, Register power
, Register dest
,
4511 Register temp1
, Register temp2
, Label
* onOver
) {
4512 // Inline int32-specialized implementation of js::powi with overflow
4515 move32(Imm32(1), dest
); // result = 1
4517 // x^y where x == 1 returns 1 for any y.
4519 branch32(Assembler::Equal
, base
, Imm32(1), &done
);
4521 move32(base
, temp1
); // runningSquare = x
4522 move32(power
, temp2
); // n = y
4524 // x^y where y < 0 returns a non-int32 value for any x != 1. Except when y is
4525 // large enough so that the result is no longer representable as a double with
4526 // fractional parts. We can't easily determine when y is too large, so we bail
4528 // Note: it's important for this condition to match the code in CacheIR.cpp
4529 // (CanAttachInt32Pow) to prevent failure loops.
4531 branchTest32(Assembler::NotSigned
, power
, power
, &start
);
4537 // runningSquare *= runningSquare
4538 branchMul32(Assembler::Overflow
, temp1
, temp1
, onOver
);
4542 // if ((n & 1) != 0) result *= runningSquare
4544 branchTest32(Assembler::Zero
, temp2
, Imm32(1), &even
);
4545 branchMul32(Assembler::Overflow
, temp1
, dest
, onOver
);
4549 // if (n == 0) return result
4550 branchRshift32(Assembler::NonZero
, Imm32(1), temp2
, &loop
);
4555 void MacroAssembler::signInt32(Register input
, Register output
) {
4556 MOZ_ASSERT(input
!= output
);
4558 move32(input
, output
);
4559 rshift32Arithmetic(Imm32(31), output
);
4560 or32(Imm32(1), output
);
4561 cmp32Move32(Assembler::Equal
, input
, Imm32(0), input
, output
);
4564 void MacroAssembler::signDouble(FloatRegister input
, FloatRegister output
) {
4565 MOZ_ASSERT(input
!= output
);
4567 Label done
, zeroOrNaN
, negative
;
4568 loadConstantDouble(0.0, output
);
4569 branchDouble(Assembler::DoubleEqualOrUnordered
, input
, output
, &zeroOrNaN
);
4570 branchDouble(Assembler::DoubleLessThan
, input
, output
, &negative
);
4572 loadConstantDouble(1.0, output
);
4576 loadConstantDouble(-1.0, output
);
4580 moveDouble(input
, output
);
4585 void MacroAssembler::signDoubleToInt32(FloatRegister input
, Register output
,
4586 FloatRegister temp
, Label
* fail
) {
4587 MOZ_ASSERT(input
!= temp
);
4589 Label done
, zeroOrNaN
, negative
;
4590 loadConstantDouble(0.0, temp
);
4591 branchDouble(Assembler::DoubleEqualOrUnordered
, input
, temp
, &zeroOrNaN
);
4592 branchDouble(Assembler::DoubleLessThan
, input
, temp
, &negative
);
4594 move32(Imm32(1), output
);
4598 move32(Imm32(-1), output
);
4601 // Fail for NaN and negative zero.
4603 branchDouble(Assembler::DoubleUnordered
, input
, input
, fail
);
4605 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
4606 // is -Infinity instead of Infinity.
4607 loadConstantDouble(1.0, temp
);
4608 divDouble(input
, temp
);
4609 branchDouble(Assembler::DoubleLessThan
, temp
, input
, fail
);
4610 move32(Imm32(0), output
);
4615 void MacroAssembler::randomDouble(Register rng
, FloatRegister dest
,
4616 Register64 temp0
, Register64 temp1
) {
4617 using mozilla::non_crypto::XorShift128PlusRNG
;
4620 sizeof(XorShift128PlusRNG
) == 2 * sizeof(uint64_t),
4621 "Code below assumes XorShift128PlusRNG contains two uint64_t values");
4623 Address
state0Addr(rng
, XorShift128PlusRNG::offsetOfState0());
4624 Address
state1Addr(rng
, XorShift128PlusRNG::offsetOfState1());
4626 Register64 s0Reg
= temp0
;
4627 Register64 s1Reg
= temp1
;
4629 // uint64_t s1 = mState[0];
4630 load64(state0Addr
, s1Reg
);
4633 move64(s1Reg
, s0Reg
);
4634 lshift64(Imm32(23), s1Reg
);
4635 xor64(s0Reg
, s1Reg
);
4638 move64(s1Reg
, s0Reg
);
4639 rshift64(Imm32(17), s1Reg
);
4640 xor64(s0Reg
, s1Reg
);
4642 // const uint64_t s0 = mState[1];
4643 load64(state1Addr
, s0Reg
);
4646 store64(s0Reg
, state0Addr
);
4649 xor64(s0Reg
, s1Reg
);
4652 rshift64(Imm32(26), s0Reg
);
4653 xor64(s0Reg
, s1Reg
);
4656 store64(s1Reg
, state1Addr
);
4659 load64(state0Addr
, s0Reg
);
4660 add64(s0Reg
, s1Reg
);
4662 // See comment in XorShift128PlusRNG::nextDouble().
4663 static constexpr int MantissaBits
=
4664 mozilla::FloatingPoint
<double>::kExponentShift
+ 1;
4665 static constexpr double ScaleInv
= double(1) / (1ULL << MantissaBits
);
4667 and64(Imm64((1ULL << MantissaBits
) - 1), s1Reg
);
4669 // Note: we know s1Reg isn't signed after the and64 so we can use the faster
4670 // convertInt64ToDouble instead of convertUInt64ToDouble.
4671 convertInt64ToDouble(s1Reg
, dest
);
4674 mulDoublePtr(ImmPtr(&ScaleInv
), s0Reg
.scratchReg(), dest
);
4677 void MacroAssembler::sameValueDouble(FloatRegister left
, FloatRegister right
,
4678 FloatRegister temp
, Register dest
) {
4679 Label nonEqual
, isSameValue
, isNotSameValue
;
4680 branchDouble(Assembler::DoubleNotEqualOrUnordered
, left
, right
, &nonEqual
);
4682 // First, test for being equal to 0.0, which also includes -0.0.
4683 loadConstantDouble(0.0, temp
);
4684 branchDouble(Assembler::DoubleNotEqual
, left
, temp
, &isSameValue
);
4686 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
4687 // is -Infinity instead of Infinity.
4689 loadConstantDouble(1.0, temp
);
4690 divDouble(left
, temp
);
4691 branchDouble(Assembler::DoubleLessThan
, temp
, left
, &isNegInf
);
4693 loadConstantDouble(1.0, temp
);
4694 divDouble(right
, temp
);
4695 branchDouble(Assembler::DoubleGreaterThan
, temp
, right
, &isSameValue
);
4696 jump(&isNotSameValue
);
4700 loadConstantDouble(1.0, temp
);
4701 divDouble(right
, temp
);
4702 branchDouble(Assembler::DoubleLessThan
, temp
, right
, &isSameValue
);
4703 jump(&isNotSameValue
);
4708 // Test if both values are NaN.
4709 branchDouble(Assembler::DoubleOrdered
, left
, left
, &isNotSameValue
);
4710 branchDouble(Assembler::DoubleOrdered
, right
, right
, &isNotSameValue
);
4715 move32(Imm32(1), dest
);
4718 bind(&isNotSameValue
);
4719 move32(Imm32(0), dest
);
4724 void MacroAssembler::minMaxArrayInt32(Register array
, Register result
,
4725 Register temp1
, Register temp2
,
4726 Register temp3
, bool isMax
, Label
* fail
) {
4727 // array must be a packed array. Load its elements.
4728 Register elements
= temp1
;
4729 loadPtr(Address(array
, NativeObject::offsetOfElements()), elements
);
4731 // Load the length and guard that it is non-zero.
4732 Address
lengthAddr(elements
, ObjectElements::offsetOfInitializedLength());
4733 load32(lengthAddr
, temp3
);
4734 branchTest32(Assembler::Zero
, temp3
, temp3
, fail
);
4736 // Compute the address of the last element.
4737 Register elementsEnd
= temp2
;
4738 BaseObjectElementIndex
elementsEndAddr(elements
, temp3
,
4739 -int32_t(sizeof(Value
)));
4740 computeEffectiveAddress(elementsEndAddr
, elementsEnd
);
4742 // Load the first element into result.
4743 fallibleUnboxInt32(Address(elements
, 0), result
, fail
);
4748 // Check whether we're done.
4749 branchPtr(Assembler::Equal
, elements
, elementsEnd
, &done
);
4751 // If not, advance to the next element and load it.
4752 addPtr(Imm32(sizeof(Value
)), elements
);
4753 fallibleUnboxInt32(Address(elements
, 0), temp3
, fail
);
4755 // Update result if necessary.
4756 Assembler::Condition cond
=
4757 isMax
? Assembler::GreaterThan
: Assembler::LessThan
;
4758 cmp32Move32(cond
, temp3
, result
, temp3
, result
);
4764 void MacroAssembler::minMaxArrayNumber(Register array
, FloatRegister result
,
4765 FloatRegister floatTemp
, Register temp1
,
4766 Register temp2
, bool isMax
,
4768 // array must be a packed array. Load its elements.
4769 Register elements
= temp1
;
4770 loadPtr(Address(array
, NativeObject::offsetOfElements()), elements
);
4772 // Load the length and check if the array is empty.
4774 Address
lengthAddr(elements
, ObjectElements::offsetOfInitializedLength());
4775 load32(lengthAddr
, temp2
);
4776 branchTest32(Assembler::Zero
, temp2
, temp2
, &isEmpty
);
4778 // Compute the address of the last element.
4779 Register elementsEnd
= temp2
;
4780 BaseObjectElementIndex
elementsEndAddr(elements
, temp2
,
4781 -int32_t(sizeof(Value
)));
4782 computeEffectiveAddress(elementsEndAddr
, elementsEnd
);
4784 // Load the first element into result.
4785 ensureDouble(Address(elements
, 0), result
, fail
);
4790 // Check whether we're done.
4791 branchPtr(Assembler::Equal
, elements
, elementsEnd
, &done
);
4793 // If not, advance to the next element and load it into floatTemp.
4794 addPtr(Imm32(sizeof(Value
)), elements
);
4795 ensureDouble(Address(elements
, 0), floatTemp
, fail
);
4797 // Update result if necessary.
4799 maxDouble(floatTemp
, result
, /* handleNaN = */ true);
4801 minDouble(floatTemp
, result
, /* handleNaN = */ true);
4805 // With no arguments, min/max return +Infinity/-Infinity respectively.
4808 loadConstantDouble(mozilla::NegativeInfinity
<double>(), result
);
4810 loadConstantDouble(mozilla::PositiveInfinity
<double>(), result
);
4816 void MacroAssembler::branchIfNotRegExpPrototypeOptimizable(
4817 Register proto
, Register temp
, const GlobalObject
* maybeGlobal
,
4820 movePtr(ImmGCPtr(maybeGlobal
), temp
);
4821 loadPrivate(Address(temp
, GlobalObject::offsetOfGlobalDataSlot()), temp
);
4823 loadGlobalObjectData(temp
);
4825 size_t offset
= GlobalObjectData::offsetOfRegExpRealm() +
4826 RegExpRealm::offsetOfOptimizableRegExpPrototypeShape();
4827 loadPtr(Address(temp
, offset
), temp
);
4828 branchTestObjShapeUnsafe(Assembler::NotEqual
, proto
, temp
, fail
);
4831 void MacroAssembler::branchIfNotRegExpInstanceOptimizable(
4832 Register regexp
, Register temp
, const GlobalObject
* maybeGlobal
,
4835 movePtr(ImmGCPtr(maybeGlobal
), temp
);
4836 loadPrivate(Address(temp
, GlobalObject::offsetOfGlobalDataSlot()), temp
);
4838 loadGlobalObjectData(temp
);
4840 size_t offset
= GlobalObjectData::offsetOfRegExpRealm() +
4841 RegExpRealm::offsetOfOptimizableRegExpInstanceShape();
4842 loadPtr(Address(temp
, offset
), temp
);
4843 branchTestObjShapeUnsafe(Assembler::NotEqual
, regexp
, temp
, label
);
4846 void MacroAssembler::loadRegExpLastIndex(Register regexp
, Register string
,
4848 Label
* notFoundZeroLastIndex
) {
4849 Address
flagsSlot(regexp
, RegExpObject::offsetOfFlags());
4850 Address
lastIndexSlot(regexp
, RegExpObject::offsetOfLastIndex());
4851 Address
stringLength(string
, JSString::offsetOfLength());
4853 Label notGlobalOrSticky
, loadedLastIndex
;
4855 branchTest32(Assembler::Zero
, flagsSlot
,
4856 Imm32(JS::RegExpFlag::Global
| JS::RegExpFlag::Sticky
),
4857 ¬GlobalOrSticky
);
4859 // It's a global or sticky regular expression. Emit the following code:
4861 // lastIndex = regexp.lastIndex
4862 // if lastIndex > string.length:
4863 // jump to notFoundZeroLastIndex (skip the regexp match/test operation)
4865 // The `notFoundZeroLastIndex` code should set regexp.lastIndex to 0 and
4866 // treat this as a not-found result.
4868 // See steps 5-8 in js::RegExpBuiltinExec.
4870 // Earlier guards must have ensured regexp.lastIndex is a non-negative
4875 branchTestInt32(Assembler::Equal
, lastIndexSlot
, &ok
);
4876 assumeUnreachable("Expected int32 value for lastIndex");
4880 unboxInt32(lastIndexSlot
, lastIndex
);
4884 branchTest32(Assembler::NotSigned
, lastIndex
, lastIndex
, &ok
);
4885 assumeUnreachable("Expected non-negative lastIndex");
4889 branch32(Assembler::Below
, stringLength
, lastIndex
, notFoundZeroLastIndex
);
4890 jump(&loadedLastIndex
);
4893 bind(¬GlobalOrSticky
);
4894 move32(Imm32(0), lastIndex
);
4896 bind(&loadedLastIndex
);
4899 void MacroAssembler::loadAndClearRegExpSearcherLastLimit(Register result
,
4901 MOZ_ASSERT(result
!= scratch
);
4903 loadJSContext(scratch
);
4905 Address
limitField(scratch
, JSContext::offsetOfRegExpSearcherLastLimit());
4906 load32(limitField
, result
);
4910 branch32(Assembler::NotEqual
, result
, Imm32(RegExpSearcherLastLimitSentinel
),
4912 assumeUnreachable("Unexpected sentinel for regExpSearcherLastLimit");
4914 store32(Imm32(RegExpSearcherLastLimitSentinel
), limitField
);
4918 void MacroAssembler::loadParsedRegExpShared(Register regexp
, Register result
,
4920 Address
sharedSlot(regexp
, RegExpObject::offsetOfShared());
4921 branchTestUndefined(Assembler::Equal
, sharedSlot
, unparsed
);
4922 unboxNonDouble(sharedSlot
, result
, JSVAL_TYPE_PRIVATE_GCTHING
);
4924 static_assert(sizeof(RegExpShared::Kind
) == sizeof(uint32_t));
4925 branch32(Assembler::Equal
, Address(result
, RegExpShared::offsetOfKind()),
4926 Imm32(int32_t(RegExpShared::Kind::Unparsed
)), unparsed
);
4929 // ===============================================================
4932 void MacroAssembler::loadFunctionLength(Register func
,
4933 Register funFlagsAndArgCount
,
4934 Register output
, Label
* slowPath
) {
4937 // These flags should already have been checked by caller.
4939 uint32_t FlagsToCheck
=
4940 FunctionFlags::SELFHOSTLAZY
| FunctionFlags::RESOLVED_LENGTH
;
4941 branchTest32(Assembler::Zero
, funFlagsAndArgCount
, Imm32(FlagsToCheck
),
4943 assumeUnreachable("The function flags should already have been checked.");
4948 // NOTE: `funFlagsAndArgCount` and `output` must be allowed to alias.
4950 // Load the target function's length.
4951 Label isInterpreted
, lengthLoaded
;
4952 branchTest32(Assembler::NonZero
, funFlagsAndArgCount
,
4953 Imm32(FunctionFlags::BASESCRIPT
), &isInterpreted
);
4955 // The length property of a native function stored with the flags.
4956 move32(funFlagsAndArgCount
, output
);
4957 rshift32(Imm32(JSFunction::ArgCountShift
), output
);
4958 jump(&lengthLoaded
);
4960 bind(&isInterpreted
);
4962 // Load the length property of an interpreted function.
4963 loadPrivate(Address(func
, JSFunction::offsetOfJitInfoOrScript()), output
);
4964 loadPtr(Address(output
, JSScript::offsetOfSharedData()), output
);
4965 branchTestPtr(Assembler::Zero
, output
, output
, slowPath
);
4966 loadPtr(Address(output
, SharedImmutableScriptData::offsetOfISD()), output
);
4967 load16ZeroExtend(Address(output
, ImmutableScriptData::offsetOfFunLength()),
4970 bind(&lengthLoaded
);
4973 void MacroAssembler::loadFunctionName(Register func
, Register output
,
4974 ImmGCPtr emptyString
, Label
* slowPath
) {
4975 MOZ_ASSERT(func
!= output
);
4977 // Get the JSFunction flags.
4978 load32(Address(func
, JSFunction::offsetOfFlagsAndArgCount()), output
);
4980 // If the name was previously resolved, the name property may be shadowed.
4981 // If the function is an accessor with lazy name, AtomSlot contains the
4984 Assembler::NonZero
, output
,
4985 Imm32(FunctionFlags::RESOLVED_NAME
| FunctionFlags::LAZY_ACCESSOR_NAME
),
4989 branchTest32(Assembler::NonZero
, output
,
4990 Imm32(FunctionFlags::HAS_GUESSED_ATOM
), &noName
);
4992 Address
atomAddr(func
, JSFunction::offsetOfAtom());
4993 branchTestUndefined(Assembler::Equal
, atomAddr
, &noName
);
4994 unboxString(atomAddr
, output
);
5000 // An absent name property defaults to the empty string.
5001 movePtr(emptyString
, output
);
5007 void MacroAssembler::assertFunctionIsExtended(Register func
) {
5010 branchTestFunctionFlags(func
, FunctionFlags::EXTENDED
, Assembler::NonZero
,
5012 assumeUnreachable("Function is not extended");
5017 void MacroAssembler::branchTestType(Condition cond
, Register tag
,
5018 JSValueType type
, Label
* label
) {
5020 case JSVAL_TYPE_DOUBLE
:
5021 branchTestDouble(cond
, tag
, label
);
5023 case JSVAL_TYPE_INT32
:
5024 branchTestInt32(cond
, tag
, label
);
5026 case JSVAL_TYPE_BOOLEAN
:
5027 branchTestBoolean(cond
, tag
, label
);
5029 case JSVAL_TYPE_UNDEFINED
:
5030 branchTestUndefined(cond
, tag
, label
);
5032 case JSVAL_TYPE_NULL
:
5033 branchTestNull(cond
, tag
, label
);
5035 case JSVAL_TYPE_MAGIC
:
5036 branchTestMagic(cond
, tag
, label
);
5038 case JSVAL_TYPE_STRING
:
5039 branchTestString(cond
, tag
, label
);
5041 case JSVAL_TYPE_SYMBOL
:
5042 branchTestSymbol(cond
, tag
, label
);
5044 case JSVAL_TYPE_BIGINT
:
5045 branchTestBigInt(cond
, tag
, label
);
5047 case JSVAL_TYPE_OBJECT
:
5048 branchTestObject(cond
, tag
, label
);
5051 MOZ_CRASH("Unexpected value type");
5055 void MacroAssembler::branchTestObjShapeList(
5056 Condition cond
, Register obj
, Register shapeElements
, Register shapeScratch
,
5057 Register endScratch
, Register spectreScratch
, Label
* label
) {
5058 MOZ_ASSERT(cond
== Assembler::Equal
|| cond
== Assembler::NotEqual
);
5060 bool needSpectreMitigations
= spectreScratch
!= InvalidReg
;
5063 Label
* onMatch
= cond
== Assembler::Equal
? label
: &done
;
5064 Label
* onNoMatch
= cond
== Assembler::Equal
? &done
: label
;
5066 // Load the object's shape pointer into shapeScratch, and prepare to compare
5067 // it with the shapes in the list. The shapes are stored as private values so
5068 // we can compare directly.
5069 loadPtr(Address(obj
, JSObject::offsetOfShape()), shapeScratch
);
5071 // Compute end pointer.
5072 Address
lengthAddr(shapeElements
,
5073 ObjectElements::offsetOfInitializedLength());
5074 load32(lengthAddr
, endScratch
);
5075 branch32(Assembler::Equal
, endScratch
, Imm32(0), onNoMatch
);
5076 BaseObjectElementIndex
endPtrAddr(shapeElements
, endScratch
);
5077 computeEffectiveAddress(endPtrAddr
, endScratch
);
5082 // Compare the object's shape with a shape from the list. Note that on 64-bit
5083 // this includes the tag bits, but on 32-bit we only compare the low word of
5084 // the value. This is fine because the list of shapes is never exposed and the
5085 // tag is guaranteed to be PrivateGCThing.
5086 if (needSpectreMitigations
) {
5087 move32(Imm32(0), spectreScratch
);
5089 branchPtr(Assembler::Equal
, Address(shapeElements
, 0), shapeScratch
, onMatch
);
5090 if (needSpectreMitigations
) {
5091 spectreMovePtr(Assembler::Equal
, spectreScratch
, obj
);
5094 // Advance to next shape and loop if not finished.
5095 addPtr(Imm32(sizeof(Value
)), shapeElements
);
5096 branchPtr(Assembler::Below
, shapeElements
, endScratch
, &loop
);
5098 if (cond
== Assembler::NotEqual
) {
5104 void MacroAssembler::branchTestObjCompartment(Condition cond
, Register obj
,
5105 const Address
& compartment
,
5106 Register scratch
, Label
* label
) {
5107 MOZ_ASSERT(obj
!= scratch
);
5108 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
5109 loadPtr(Address(scratch
, Shape::offsetOfBaseShape()), scratch
);
5110 loadPtr(Address(scratch
, BaseShape::offsetOfRealm()), scratch
);
5111 loadPtr(Address(scratch
, Realm::offsetOfCompartment()), scratch
);
5112 branchPtr(cond
, compartment
, scratch
, label
);
5115 void MacroAssembler::branchTestObjCompartment(
5116 Condition cond
, Register obj
, const JS::Compartment
* compartment
,
5117 Register scratch
, Label
* label
) {
5118 MOZ_ASSERT(obj
!= scratch
);
5119 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
5120 loadPtr(Address(scratch
, Shape::offsetOfBaseShape()), scratch
);
5121 loadPtr(Address(scratch
, BaseShape::offsetOfRealm()), scratch
);
5122 loadPtr(Address(scratch
, Realm::offsetOfCompartment()), scratch
);
5123 branchPtr(cond
, scratch
, ImmPtr(compartment
), label
);
5126 void MacroAssembler::branchIfNonNativeObj(Register obj
, Register scratch
,
5128 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
5129 branchTest32(Assembler::Zero
,
5130 Address(scratch
, Shape::offsetOfImmutableFlags()),
5131 Imm32(Shape::isNativeBit()), label
);
5134 void MacroAssembler::branchIfObjectNotExtensible(Register obj
, Register scratch
,
5136 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
5138 // Spectre-style checks are not needed here because we do not interpret data
5139 // based on this check.
5140 static_assert(sizeof(ObjectFlags
) == sizeof(uint16_t));
5141 load16ZeroExtend(Address(scratch
, Shape::offsetOfObjectFlags()), scratch
);
5142 branchTest32(Assembler::NonZero
, scratch
,
5143 Imm32(uint32_t(ObjectFlag::NotExtensible
)), label
);
5146 void MacroAssembler::branchTestObjectNeedsProxyResultValidation(
5147 Condition cond
, Register obj
, Register scratch
, Label
* label
) {
5148 MOZ_ASSERT(cond
== Assembler::Zero
|| cond
== Assembler::NonZero
);
5151 Label
* doValidation
= cond
== NonZero
? label
: &done
;
5152 Label
* skipValidation
= cond
== NonZero
? &done
: label
;
5154 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
5155 branchTest32(Assembler::Zero
,
5156 Address(scratch
, Shape::offsetOfImmutableFlags()),
5157 Imm32(Shape::isNativeBit()), doValidation
);
5158 static_assert(sizeof(ObjectFlags
) == sizeof(uint16_t));
5159 load16ZeroExtend(Address(scratch
, Shape::offsetOfObjectFlags()), scratch
);
5160 branchTest32(Assembler::NonZero
, scratch
,
5161 Imm32(uint32_t(ObjectFlag::NeedsProxyGetSetResultValidation
)),
5164 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
5165 loadPtr(Address(scratch
, Shape::offsetOfBaseShape()), scratch
);
5166 loadPtr(Address(scratch
, BaseShape::offsetOfClasp()), scratch
);
5167 loadPtr(Address(scratch
, offsetof(JSClass
, cOps
)), scratch
);
5168 branchTestPtr(Assembler::Zero
, scratch
, scratch
, skipValidation
);
5169 loadPtr(Address(scratch
, offsetof(JSClassOps
, resolve
)), scratch
);
5170 branchTestPtr(Assembler::NonZero
, scratch
, scratch
, doValidation
);
5174 void MacroAssembler::wasmTrap(wasm::Trap trap
,
5175 wasm::BytecodeOffset bytecodeOffset
) {
5176 FaultingCodeOffset fco
= wasmTrapInstruction();
5177 MOZ_ASSERT_IF(!oom(),
5178 currentOffset() - fco
.get() == WasmTrapInstructionLength
);
5180 append(trap
, wasm::TrapSite(wasm::TrapMachineInsn::OfficialUD
, fco
,
5184 std::pair
<CodeOffset
, uint32_t> MacroAssembler::wasmReserveStackChecked(
5185 uint32_t amount
, wasm::BytecodeOffset trapOffset
) {
5186 if (amount
> MAX_UNCHECKED_LEAF_FRAME_SIZE
) {
5187 // The frame is large. Don't bump sp until after the stack limit check so
5188 // that the trap handler isn't called with a wild sp.
5190 Register scratch
= ABINonArgReg0
;
5191 moveStackPtrTo(scratch
);
5194 branchPtr(Assembler::Below
, scratch
, Imm32(amount
), &trap
);
5195 subPtr(Imm32(amount
), scratch
);
5196 branchPtr(Assembler::Below
,
5197 Address(InstanceReg
, wasm::Instance::offsetOfStackLimit()),
5201 wasmTrap(wasm::Trap::StackOverflow
, trapOffset
);
5202 CodeOffset trapInsnOffset
= CodeOffset(currentOffset());
5205 reserveStack(amount
);
5206 return std::pair
<CodeOffset
, uint32_t>(trapInsnOffset
, 0);
5209 reserveStack(amount
);
5211 branchStackPtrRhs(Assembler::Below
,
5212 Address(InstanceReg
, wasm::Instance::offsetOfStackLimit()),
5214 wasmTrap(wasm::Trap::StackOverflow
, trapOffset
);
5215 CodeOffset trapInsnOffset
= CodeOffset(currentOffset());
5217 return std::pair
<CodeOffset
, uint32_t>(trapInsnOffset
, amount
);
5220 #ifdef ENABLE_WASM_TAIL_CALLS
5221 static void MoveDataBlock(MacroAssembler
& masm
, Register base
, int32_t from
,
5222 int32_t to
, uint32_t size
) {
5223 MOZ_ASSERT(base
!= masm
.getStackPointer());
5224 if (from
== to
|| size
== 0) {
5228 # ifdef JS_CODEGEN_ARM64
5229 vixl::UseScratchRegisterScope
temps(&masm
);
5230 const Register scratch
= temps
.AcquireX().asUnsized();
5231 # elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_X86)
5232 static constexpr Register scratch
= ABINonArgReg0
;
5234 # elif defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_MIPS64) || \
5235 defined(JS_CODEGEN_RISCV64)
5236 ScratchRegisterScope
scratch(masm
);
5237 # elif !defined(JS_CODEGEN_NONE)
5238 const Register scratch
= ScratchReg
;
5240 const Register scratch
= InvalidReg
;
5244 for (uint32_t i
= 0; i
< size
; i
+= sizeof(void*)) {
5245 masm
.loadPtr(Address(base
, from
+ i
), scratch
);
5246 masm
.storePtr(scratch
, Address(base
, to
+ i
));
5249 for (uint32_t i
= size
; i
> 0;) {
5251 masm
.loadPtr(Address(base
, from
+ i
), scratch
);
5252 masm
.storePtr(scratch
, Address(base
, to
+ i
));
5256 # if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_X86)
5261 struct ReturnCallTrampolineData
{
5262 # ifdef JS_CODEGEN_ARM
5263 uint32_t trampolineOffset
;
5265 CodeLabel trampoline
;
5269 static ReturnCallTrampolineData
MakeReturnCallTrampoline(MacroAssembler
& masm
) {
5270 uint32_t savedPushed
= masm
.framePushed();
5272 // Build simple trampoline code: load the instance slot from the frame,
5273 // restore FP, and return to prevous caller.
5274 ReturnCallTrampolineData data
;
5275 # ifdef JS_CODEGEN_ARM
5276 data
.trampolineOffset
= masm
.currentOffset();
5278 masm
.bind(&data
.trampoline
);
5281 masm
.setFramePushed(
5282 AlignBytes(wasm::FrameWithInstances::sizeOfInstanceFieldsAndShadowStack(),
5283 WasmStackAlignment
));
5285 # ifdef ENABLE_WASM_TAIL_CALLS
5286 masm
.wasmMarkSlowCall();
5290 Address(masm
.getStackPointer(), WasmCallerInstanceOffsetBeforeCall
),
5292 masm
.switchToWasmInstanceRealm(ABINonArgReturnReg0
, ABINonArgReturnReg1
);
5293 masm
.moveToStackPtr(FramePointer
);
5294 # ifdef JS_CODEGEN_ARM64
5295 masm
.pop(FramePointer
, lr
);
5296 masm
.append(wasm::CodeRangeUnwindInfo::UseFpLr
, masm
.currentOffset());
5297 masm
.Mov(PseudoStackPointer64
, vixl::sp
);
5300 masm
.pop(FramePointer
);
5301 masm
.append(wasm::CodeRangeUnwindInfo::UseFp
, masm
.currentOffset());
5305 masm
.append(wasm::CodeRangeUnwindInfo::Normal
, masm
.currentOffset());
5306 masm
.setFramePushed(savedPushed
);
5310 // CollapseWasmFrame methods merge frames fields: callee parameters, instance
5311 // slots, and caller RA. See the diagram below. The C0 is the previous caller,
5312 // the C1 is the caller of the return call, and the C2 is the callee.
5314 // +-------------------+ +--------------------+
5315 // |C0 instance slots | |C0 instance slots |
5316 // +-------------------+ -+ +--------------------+ -+
5317 // | RA | | | RA | |
5318 // +-------------------+ | C0 +--------------------+ |C0
5319 // | FP | v | FP | v
5320 // +-------------------+ +--------------------+
5321 // |C0 private frame | |C0 private frame |
5322 // +-------------------+ +--------------------+
5323 // |C1 results area | |C1/C2 results area |
5324 // +-------------------+ +--------------------+
5325 // |C1 parameters | |? trampoline frame |
5326 // +-------------------+ +--------------------+
5327 // |C1 instance slots | |C2 parameters |
5328 // +-------------------+ -+ +--------------------+
5329 // |C0 RA | | |C2 instance slots’ |
5330 // +-------------------+ | C1 +--------------------+ -+
5331 // |C0 FP | v |C0 RA’ | |
5332 // +-------------------+ +--------------------+ | C2
5333 // |C1 private frame | |C0 FP’ | v
5334 // +-------------------+ +--------------------+ <= start of C2
5336 // +-------------------+
5337 // |C2 instance slots |
5338 // +-------------------+ <= call C2
5340 // The C2 parameters are moved in place of the C1 parameters, and the
5341 // C1 frame data is removed. The instance slots, return address, and
5342 // frame pointer to the C0 callsite are saved or adjusted.
5344 // For cross-instance calls, the trampoline frame will be introduced
5345 // if the C0 callsite has no ability to restore instance registers and realm.
5347 static void CollapseWasmFrameFast(MacroAssembler
& masm
,
5348 const ReturnCallAdjustmentInfo
& retCallInfo
) {
5349 uint32_t framePushedAtStart
= masm
.framePushed();
5350 static_assert(sizeof(wasm::Frame
) == 2 * sizeof(void*));
5352 // The instance slots + stack arguments are expected to be padded and
5353 // aligned to the WasmStackAlignment boundary. There is no data expected
5354 // in the padded region, such as results stack area or locals, to avoid
5355 // unwanted stack growth.
5356 uint32_t newSlotsAndStackArgBytes
=
5357 AlignBytes(retCallInfo
.newSlotsAndStackArgBytes
, WasmStackAlignment
);
5358 uint32_t oldSlotsAndStackArgBytes
=
5359 AlignBytes(retCallInfo
.oldSlotsAndStackArgBytes
, WasmStackAlignment
);
5361 static constexpr Register tempForCaller
= WasmTailCallInstanceScratchReg
;
5362 static constexpr Register tempForFP
= WasmTailCallFPScratchReg
;
5363 static constexpr Register tempForRA
= WasmTailCallRAScratchReg
;
5364 # ifndef JS_USE_LINK_REGISTER
5365 masm
.push(tempForRA
);
5368 // Load the FP, RA, and instance slots into registers to preserve them while
5369 // the new frame is collapsed over the current one.
5370 masm
.loadPtr(Address(FramePointer
, wasm::Frame::callerFPOffset()), tempForFP
);
5371 masm
.loadPtr(Address(FramePointer
, wasm::Frame::returnAddressOffset()),
5373 masm
.append(wasm::CodeRangeUnwindInfo::RestoreFpRa
, masm
.currentOffset());
5374 bool copyCallerSlot
= oldSlotsAndStackArgBytes
!= newSlotsAndStackArgBytes
;
5375 if (copyCallerSlot
) {
5377 Address(FramePointer
, wasm::FrameWithInstances::callerInstanceOffset()),
5381 // Copy parameters data, ignoring shadow data and instance slots.
5382 // Make all offsets relative to the FramePointer.
5383 int32_t newArgSrc
= -framePushedAtStart
;
5384 int32_t newArgDest
=
5385 sizeof(wasm::Frame
) + oldSlotsAndStackArgBytes
- newSlotsAndStackArgBytes
;
5386 const uint32_t SlotsSize
=
5387 wasm::FrameWithInstances::sizeOfInstanceFieldsAndShadowStack();
5388 MoveDataBlock(masm
, FramePointer
, newArgSrc
+ SlotsSize
,
5389 newArgDest
+ SlotsSize
,
5390 retCallInfo
.newSlotsAndStackArgBytes
- SlotsSize
);
5392 // Copy caller instance slots from the current frame.
5393 if (copyCallerSlot
) {
5396 Address(FramePointer
, newArgDest
+ WasmCallerInstanceOffsetBeforeCall
));
5399 // Store current instance as the new callee instance slot.
5402 Address(FramePointer
, newArgDest
+ WasmCalleeInstanceOffsetBeforeCall
));
5404 # ifdef JS_USE_LINK_REGISTER
5405 // RA is already in its place, just move stack.
5406 masm
.addToStackPtr(Imm32(framePushedAtStart
+ newArgDest
));
5408 // Push RA to new frame: store RA, restore temp, and move stack.
5409 int32_t newFrameOffset
= newArgDest
- sizeof(wasm::Frame
);
5410 masm
.storePtr(tempForRA
,
5411 Address(FramePointer
,
5412 newFrameOffset
+ wasm::Frame::returnAddressOffset()));
5413 // Restore tempForRA, but keep RA on top of the stack.
5414 // There is no non-locking exchange instruction between register and memory.
5415 // Using tempForCaller as scratch register.
5416 masm
.loadPtr(Address(masm
.getStackPointer(), 0), tempForCaller
);
5417 masm
.storePtr(tempForRA
, Address(masm
.getStackPointer(), 0));
5418 masm
.mov(tempForCaller
, tempForRA
);
5419 masm
.append(wasm::CodeRangeUnwindInfo::RestoreFp
, masm
.currentOffset());
5420 masm
.addToStackPtr(Imm32(framePushedAtStart
+ newFrameOffset
+
5421 wasm::Frame::returnAddressOffset() + sizeof(void*)));
5424 masm
.movePtr(tempForFP
, FramePointer
);
5425 // Setting framePushed to pre-collapse state, to properly set that in the
5427 masm
.setFramePushed(framePushedAtStart
);
5430 static void CollapseWasmFrameSlow(MacroAssembler
& masm
,
5431 const ReturnCallAdjustmentInfo
& retCallInfo
,
5432 wasm::CallSiteDesc desc
,
5433 ReturnCallTrampolineData data
) {
5434 uint32_t framePushedAtStart
= masm
.framePushed();
5435 static constexpr Register tempForCaller
= WasmTailCallInstanceScratchReg
;
5436 static constexpr Register tempForFP
= WasmTailCallFPScratchReg
;
5437 static constexpr Register tempForRA
= WasmTailCallRAScratchReg
;
5439 static_assert(sizeof(wasm::Frame
) == 2 * sizeof(void*));
5441 // The hidden frame will "break" after wasm::Frame data fields.
5442 // Calculate sum of wasm stack alignment before and after the break as
5443 // the size to reserve.
5444 const uint32_t HiddenFrameAfterSize
=
5445 AlignBytes(wasm::FrameWithInstances::sizeOfInstanceFieldsAndShadowStack(),
5446 WasmStackAlignment
);
5447 const uint32_t HiddenFrameSize
=
5448 AlignBytes(sizeof(wasm::Frame
), WasmStackAlignment
) +
5449 HiddenFrameAfterSize
;
5451 // If it is not slow, prepare two frame: one is regular wasm frame, and
5452 // another one is hidden. The hidden frame contains one instance slots
5453 // for unwind and recovering pinned registers.
5454 // The instance slots + stack arguments are expected to be padded and
5455 // aligned to the WasmStackAlignment boundary. There is no data expected
5456 // in the padded region, such as results stack area or locals, to avoid
5457 // unwanted stack growth.
5458 // The Hidden frame will be inserted with this constraint too.
5459 uint32_t newSlotsAndStackArgBytes
=
5460 AlignBytes(retCallInfo
.newSlotsAndStackArgBytes
, WasmStackAlignment
);
5461 uint32_t oldSlotsAndStackArgBytes
=
5462 AlignBytes(retCallInfo
.oldSlotsAndStackArgBytes
, WasmStackAlignment
);
5464 // Make all offsets relative to the FramePointer.
5465 int32_t newArgSrc
= -framePushedAtStart
;
5466 int32_t newArgDest
= sizeof(wasm::Frame
) + oldSlotsAndStackArgBytes
-
5467 HiddenFrameSize
- newSlotsAndStackArgBytes
;
5468 int32_t hiddenFrameArgsDest
=
5469 sizeof(wasm::Frame
) + oldSlotsAndStackArgBytes
- HiddenFrameAfterSize
;
5471 // It will be possible to overwrite data (on the top of the stack) due to
5472 // the added hidden frame, reserve needed space.
5473 uint32_t reserved
= newArgDest
- int32_t(sizeof(void*)) < newArgSrc
5474 ? newArgSrc
- newArgDest
+ sizeof(void*)
5476 masm
.reserveStack(reserved
);
5478 # ifndef JS_USE_LINK_REGISTER
5479 masm
.push(tempForRA
);
5482 // Load FP, RA and instance slots to preserve them from being overwritten.
5483 masm
.loadPtr(Address(FramePointer
, wasm::Frame::callerFPOffset()), tempForFP
);
5484 masm
.loadPtr(Address(FramePointer
, wasm::Frame::returnAddressOffset()),
5486 masm
.append(wasm::CodeRangeUnwindInfo::RestoreFpRa
, masm
.currentOffset());
5488 Address(FramePointer
, newArgSrc
+ WasmCallerInstanceOffsetBeforeCall
),
5491 // Copy parameters data, ignoring shadow data and instance slots.
5492 const uint32_t SlotsSize
=
5493 wasm::FrameWithInstances::sizeOfInstanceFieldsAndShadowStack();
5494 MoveDataBlock(masm
, FramePointer
, newArgSrc
+ SlotsSize
,
5495 newArgDest
+ SlotsSize
,
5496 retCallInfo
.newSlotsAndStackArgBytes
- SlotsSize
);
5498 // Form hidden frame for trampoline.
5499 int32_t newFPOffset
= hiddenFrameArgsDest
- sizeof(wasm::Frame
);
5502 Address(FramePointer
, newFPOffset
+ wasm::Frame::returnAddressOffset()));
5504 // Copy original FP.
5507 Address(FramePointer
, newFPOffset
+ wasm::Frame::callerFPOffset()));
5509 // Set up instance slots.
5512 Address(FramePointer
,
5513 newFPOffset
+ wasm::FrameWithInstances::calleeInstanceOffset()));
5516 Address(FramePointer
, newArgDest
+ WasmCallerInstanceOffsetBeforeCall
));
5519 Address(FramePointer
, newArgDest
+ WasmCalleeInstanceOffsetBeforeCall
));
5521 # ifdef JS_CODEGEN_ARM
5522 // ARM has no CodeLabel -- calculate PC directly.
5523 masm
.mov(pc
, tempForRA
);
5524 masm
.computeEffectiveAddress(
5526 int32_t(data
.trampolineOffset
- masm
.currentOffset() - 4)),
5528 masm
.append(desc
, CodeOffset(data
.trampolineOffset
));
5530 masm
.mov(&data
.trampoline
, tempForRA
);
5532 masm
.addCodeLabel(data
.trampoline
);
5533 // Add slow trampoline callsite description, to be annotated in
5534 // stack/frame iterators.
5535 masm
.append(desc
, *data
.trampoline
.target());
5538 # ifdef JS_USE_LINK_REGISTER
5539 masm
.freeStack(reserved
);
5540 // RA is already in its place, just move stack.
5541 masm
.addToStackPtr(Imm32(framePushedAtStart
+ newArgDest
));
5543 // Push RA to new frame: store RA, restore temp, and move stack.
5544 int32_t newFrameOffset
= newArgDest
- sizeof(wasm::Frame
);
5545 masm
.storePtr(tempForRA
,
5546 Address(FramePointer
,
5547 newFrameOffset
+ wasm::Frame::returnAddressOffset()));
5548 // Restore tempForRA, but keep RA on top of the stack.
5549 // There is no non-locking exchange instruction between register and memory.
5550 // Using tempForCaller as scratch register.
5551 masm
.loadPtr(Address(masm
.getStackPointer(), 0), tempForCaller
);
5552 masm
.storePtr(tempForRA
, Address(masm
.getStackPointer(), 0));
5553 masm
.mov(tempForCaller
, tempForRA
);
5554 masm
.append(wasm::CodeRangeUnwindInfo::RestoreFp
, masm
.currentOffset());
5555 masm
.addToStackPtr(Imm32(framePushedAtStart
+ newFrameOffset
+
5556 wasm::Frame::returnAddressOffset() + reserved
+
5560 // Point FramePointer to hidden frame.
5561 masm
.computeEffectiveAddress(Address(FramePointer
, newFPOffset
),
5563 // Setting framePushed to pre-collapse state, to properly set that in the
5565 masm
.setFramePushed(framePushedAtStart
);
5568 void MacroAssembler::wasmCollapseFrameFast(
5569 const ReturnCallAdjustmentInfo
& retCallInfo
) {
5570 CollapseWasmFrameFast(*this, retCallInfo
);
5573 void MacroAssembler::wasmCollapseFrameSlow(
5574 const ReturnCallAdjustmentInfo
& retCallInfo
, wasm::CallSiteDesc desc
) {
5575 static constexpr Register temp1
= ABINonArgReg1
;
5576 static constexpr Register temp2
= ABINonArgReg3
;
5578 // Check if RA has slow marker. If there is no marker, generate a trampoline
5579 // frame to restore register state when this tail call returns.
5582 loadPtr(Address(FramePointer
, wasm::Frame::returnAddressOffset()), temp1
);
5583 wasmCheckSlowCallsite(temp1
, &slow
, temp1
, temp2
);
5584 CollapseWasmFrameFast(*this, retCallInfo
);
5586 append(wasm::CodeRangeUnwindInfo::Normal
, currentOffset());
5588 ReturnCallTrampolineData data
= MakeReturnCallTrampoline(*this);
5591 CollapseWasmFrameSlow(*this, retCallInfo
, desc
, data
);
5595 #endif // ENABLE_WASM_TAIL_CALLS
5597 CodeOffset
MacroAssembler::wasmCallImport(const wasm::CallSiteDesc
& desc
,
5598 const wasm::CalleeDesc
& callee
) {
5599 storePtr(InstanceReg
,
5600 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
5602 // Load the callee, before the caller's registers are clobbered.
5603 uint32_t instanceDataOffset
= callee
.importInstanceDataOffset();
5605 Address(InstanceReg
, wasm::Instance::offsetInData(
5606 instanceDataOffset
+
5607 offsetof(wasm::FuncImportInstanceData
, code
))),
5610 #if !defined(JS_CODEGEN_NONE) && !defined(JS_CODEGEN_WASM32)
5611 static_assert(ABINonArgReg0
!= InstanceReg
, "by constraint");
5614 // Switch to the callee's realm.
5616 Address(InstanceReg
, wasm::Instance::offsetInData(
5617 instanceDataOffset
+
5618 offsetof(wasm::FuncImportInstanceData
, realm
))),
5620 loadPtr(Address(InstanceReg
, wasm::Instance::offsetOfCx()), ABINonArgReg2
);
5621 storePtr(ABINonArgReg1
, Address(ABINonArgReg2
, JSContext::offsetOfRealm()));
5623 // Switch to the callee's instance and pinned registers and make the call.
5624 loadPtr(Address(InstanceReg
,
5625 wasm::Instance::offsetInData(
5626 instanceDataOffset
+
5627 offsetof(wasm::FuncImportInstanceData
, instance
))),
5630 storePtr(InstanceReg
,
5631 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
5632 loadWasmPinnedRegsFromInstance();
5634 CodeOffset res
= call(desc
, ABINonArgReg0
);
5635 #ifdef ENABLE_WASM_TAIL_CALLS
5641 #ifdef ENABLE_WASM_TAIL_CALLS
5642 CodeOffset
MacroAssembler::wasmReturnCallImport(
5643 const wasm::CallSiteDesc
& desc
, const wasm::CalleeDesc
& callee
,
5644 const ReturnCallAdjustmentInfo
& retCallInfo
) {
5645 storePtr(InstanceReg
,
5646 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
5648 // Load the callee, before the caller's registers are clobbered.
5649 uint32_t instanceDataOffset
= callee
.importInstanceDataOffset();
5651 Address(InstanceReg
, wasm::Instance::offsetInData(
5652 instanceDataOffset
+
5653 offsetof(wasm::FuncImportInstanceData
, code
))),
5656 # if !defined(JS_CODEGEN_NONE) && !defined(JS_CODEGEN_WASM32)
5657 static_assert(ABINonArgReg0
!= InstanceReg
, "by constraint");
5660 // Switch to the callee's realm.
5662 Address(InstanceReg
, wasm::Instance::offsetInData(
5663 instanceDataOffset
+
5664 offsetof(wasm::FuncImportInstanceData
, realm
))),
5666 loadPtr(Address(InstanceReg
, wasm::Instance::offsetOfCx()), ABINonArgReg2
);
5667 storePtr(ABINonArgReg1
, Address(ABINonArgReg2
, JSContext::offsetOfRealm()));
5669 // Switch to the callee's instance and pinned registers and make the call.
5670 loadPtr(Address(InstanceReg
,
5671 wasm::Instance::offsetInData(
5672 instanceDataOffset
+
5673 offsetof(wasm::FuncImportInstanceData
, instance
))),
5676 storePtr(InstanceReg
,
5677 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
5678 loadWasmPinnedRegsFromInstance();
5680 wasm::CallSiteDesc
stubDesc(desc
.lineOrBytecode(),
5681 wasm::CallSiteDesc::ReturnStub
);
5682 wasmCollapseFrameSlow(retCallInfo
, stubDesc
);
5683 jump(ABINonArgReg0
);
5684 append(wasm::CodeRangeUnwindInfo::Normal
, currentOffset());
5685 return CodeOffset(currentOffset());
5688 CodeOffset
MacroAssembler::wasmReturnCall(
5689 const wasm::CallSiteDesc
& desc
, uint32_t funcDefIndex
,
5690 const ReturnCallAdjustmentInfo
& retCallInfo
) {
5691 wasmCollapseFrameFast(retCallInfo
);
5692 CodeOffset offset
= farJumpWithPatch();
5693 append(desc
, offset
, funcDefIndex
);
5694 append(wasm::CodeRangeUnwindInfo::Normal
, currentOffset());
5697 #endif // ENABLE_WASM_TAIL_CALLS
5699 CodeOffset
MacroAssembler::wasmCallBuiltinInstanceMethod(
5700 const wasm::CallSiteDesc
& desc
, const ABIArg
& instanceArg
,
5701 wasm::SymbolicAddress builtin
, wasm::FailureMode failureMode
) {
5702 MOZ_ASSERT(instanceArg
!= ABIArg());
5704 storePtr(InstanceReg
,
5705 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
5706 storePtr(InstanceReg
,
5707 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
5709 if (instanceArg
.kind() == ABIArg::GPR
) {
5710 movePtr(InstanceReg
, instanceArg
.gpr());
5711 } else if (instanceArg
.kind() == ABIArg::Stack
) {
5712 storePtr(InstanceReg
,
5713 Address(getStackPointer(), instanceArg
.offsetFromArgBase()));
5715 MOZ_CRASH("Unknown abi passing style for pointer");
5718 CodeOffset ret
= call(desc
, builtin
);
5720 if (failureMode
!= wasm::FailureMode::Infallible
) {
5722 switch (failureMode
) {
5723 case wasm::FailureMode::Infallible
:
5725 case wasm::FailureMode::FailOnNegI32
:
5726 branchTest32(Assembler::NotSigned
, ReturnReg
, ReturnReg
, &noTrap
);
5728 case wasm::FailureMode::FailOnMaxI32
:
5729 branchPtr(Assembler::NotEqual
, ReturnReg
, ImmWord(uintptr_t(INT32_MAX
)),
5732 case wasm::FailureMode::FailOnNullPtr
:
5733 branchTestPtr(Assembler::NonZero
, ReturnReg
, ReturnReg
, &noTrap
);
5735 case wasm::FailureMode::FailOnInvalidRef
:
5736 branchPtr(Assembler::NotEqual
, ReturnReg
,
5737 ImmWord(uintptr_t(wasm::AnyRef::invalid().forCompiledCode())),
5741 wasmTrap(wasm::Trap::ThrowReported
,
5742 wasm::BytecodeOffset(desc
.lineOrBytecode()));
5749 CodeOffset
MacroAssembler::asmCallIndirect(const wasm::CallSiteDesc
& desc
,
5750 const wasm::CalleeDesc
& callee
) {
5751 MOZ_ASSERT(callee
.which() == wasm::CalleeDesc::AsmJSTable
);
5753 const Register scratch
= WasmTableCallScratchReg0
;
5754 const Register index
= WasmTableCallIndexReg
;
5756 // Optimization opportunity: when offsetof(FunctionTableElem, code) == 0, as
5757 // it is at present, we can probably generate better code here by folding
5758 // the address computation into the load.
5760 static_assert(sizeof(wasm::FunctionTableElem
) == 8 ||
5761 sizeof(wasm::FunctionTableElem
) == 16,
5762 "elements of function tables are two words");
5764 // asm.js tables require no signature check, and have had their index
5765 // masked into range and thus need no bounds check.
5767 Address(InstanceReg
, wasm::Instance::offsetInData(
5768 callee
.tableFunctionBaseInstanceDataOffset())),
5770 if (sizeof(wasm::FunctionTableElem
) == 8) {
5771 computeEffectiveAddress(BaseIndex(scratch
, index
, TimesEight
), scratch
);
5773 lshift32(Imm32(4), index
);
5774 addPtr(index
, scratch
);
5776 loadPtr(Address(scratch
, offsetof(wasm::FunctionTableElem
, code
)), scratch
);
5777 storePtr(InstanceReg
,
5778 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
5779 storePtr(InstanceReg
,
5780 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
5781 return call(desc
, scratch
);
5784 // In principle, call_indirect requires an expensive context switch to the
5785 // callee's instance and realm before the call and an almost equally expensive
5786 // switch back to the caller's ditto after. However, if the caller's instance
5787 // is the same as the callee's instance then no context switch is required, and
5788 // it only takes a compare-and-branch at run-time to test this - all values are
5789 // in registers already. We therefore generate two call paths, one for the fast
5790 // call without the context switch (which additionally avoids a null check) and
5791 // one for the slow call with the context switch.
5793 void MacroAssembler::wasmCallIndirect(const wasm::CallSiteDesc
& desc
,
5794 const wasm::CalleeDesc
& callee
,
5795 Label
* boundsCheckFailedLabel
,
5796 Label
* nullCheckFailedLabel
,
5797 mozilla::Maybe
<uint32_t> tableSize
,
5798 CodeOffset
* fastCallOffset
,
5799 CodeOffset
* slowCallOffset
) {
5800 static_assert(sizeof(wasm::FunctionTableElem
) == 2 * sizeof(void*),
5801 "Exactly two pointers or index scaling won't work correctly");
5802 MOZ_ASSERT(callee
.which() == wasm::CalleeDesc::WasmTable
);
5804 const int shift
= sizeof(wasm::FunctionTableElem
) == 8 ? 3 : 4;
5805 wasm::BytecodeOffset
trapOffset(desc
.lineOrBytecode());
5806 const Register calleeScratch
= WasmTableCallScratchReg0
;
5807 const Register index
= WasmTableCallIndexReg
;
5809 // Check the table index and throw if out-of-bounds.
5811 // Frequently the table size is known, so optimize for that. Otherwise
5812 // compare with a memory operand when that's possible. (There's little sense
5813 // in hoisting the load of the bound into a register at a higher level and
5814 // reusing that register, because a hoisted value would either have to be
5815 // spilled and re-loaded before the next call_indirect, or would be abandoned
5816 // because we could not trust that a hoisted value would not have changed.)
5818 if (boundsCheckFailedLabel
) {
5819 if (tableSize
.isSome()) {
5820 branch32(Assembler::Condition::AboveOrEqual
, index
, Imm32(*tableSize
),
5821 boundsCheckFailedLabel
);
5824 Assembler::Condition::BelowOrEqual
,
5825 Address(InstanceReg
, wasm::Instance::offsetInData(
5826 callee
.tableLengthInstanceDataOffset())),
5827 index
, boundsCheckFailedLabel
);
5831 // Write the functype-id into the ABI functype-id register.
5833 const wasm::CallIndirectId callIndirectId
= callee
.wasmTableSigId();
5834 switch (callIndirectId
.kind()) {
5835 case wasm::CallIndirectIdKind::Global
:
5836 loadPtr(Address(InstanceReg
, wasm::Instance::offsetInData(
5837 callIndirectId
.instanceDataOffset() +
5838 offsetof(wasm::TypeDefInstanceData
,
5840 WasmTableCallSigReg
);
5842 case wasm::CallIndirectIdKind::Immediate
:
5843 move32(Imm32(callIndirectId
.immediate()), WasmTableCallSigReg
);
5845 case wasm::CallIndirectIdKind::AsmJS
:
5846 case wasm::CallIndirectIdKind::None
:
5850 // Load the base pointer of the table and compute the address of the callee in
5854 Address(InstanceReg
, wasm::Instance::offsetInData(
5855 callee
.tableFunctionBaseInstanceDataOffset())),
5857 shiftIndex32AndAdd(index
, shift
, calleeScratch
);
5859 // Load the callee instance and decide whether to take the fast path or the
5864 const Register newInstanceTemp
= WasmTableCallScratchReg1
;
5865 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, instance
)),
5867 branchPtr(Assembler::Equal
, InstanceReg
, newInstanceTemp
, &fastCall
);
5869 // Slow path: Save context, check for null, setup new context, call, restore
5872 // TODO: The slow path could usefully be out-of-line and the test above would
5873 // just fall through to the fast path. This keeps the fast-path code dense,
5874 // and has correct static prediction for the branch (forward conditional
5875 // branches predicted not taken, normally).
5877 storePtr(InstanceReg
,
5878 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
5879 movePtr(newInstanceTemp
, InstanceReg
);
5880 storePtr(InstanceReg
,
5881 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
5883 #ifdef WASM_HAS_HEAPREG
5884 // Use the null pointer exception resulting from loading HeapReg from a null
5885 // instance to handle a call to a null slot.
5886 MOZ_ASSERT(nullCheckFailedLabel
== nullptr);
5887 loadWasmPinnedRegsFromInstance(mozilla::Some(trapOffset
));
5889 MOZ_ASSERT(nullCheckFailedLabel
!= nullptr);
5890 branchTestPtr(Assembler::Zero
, InstanceReg
, InstanceReg
,
5891 nullCheckFailedLabel
);
5893 loadWasmPinnedRegsFromInstance();
5895 switchToWasmInstanceRealm(index
, WasmTableCallScratchReg1
);
5897 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, code
)),
5900 *slowCallOffset
= call(desc
, calleeScratch
);
5901 #ifdef ENABLE_WASM_TAIL_CALLS
5905 // Restore registers and realm and join up with the fast path.
5907 loadPtr(Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
),
5909 loadWasmPinnedRegsFromInstance();
5910 switchToWasmInstanceRealm(ABINonArgReturnReg0
, ABINonArgReturnReg1
);
5913 // Fast path: just load the code pointer and go. The instance and heap
5914 // register are the same as in the caller, and nothing will be null.
5916 // (In particular, the code pointer will not be null: if it were, the instance
5917 // would have been null, and then it would not have been equivalent to our
5918 // current instance. So no null check is needed on the fast path.)
5922 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, code
)),
5925 // We use a different type of call site for the fast call since the instance
5926 // slots in the frame do not have valid values.
5928 wasm::CallSiteDesc
newDesc(desc
.lineOrBytecode(),
5929 wasm::CallSiteDesc::IndirectFast
);
5930 *fastCallOffset
= call(newDesc
, calleeScratch
);
5935 #ifdef ENABLE_WASM_TAIL_CALLS
5936 void MacroAssembler::wasmReturnCallIndirect(
5937 const wasm::CallSiteDesc
& desc
, const wasm::CalleeDesc
& callee
,
5938 Label
* boundsCheckFailedLabel
, Label
* nullCheckFailedLabel
,
5939 mozilla::Maybe
<uint32_t> tableSize
,
5940 const ReturnCallAdjustmentInfo
& retCallInfo
) {
5941 static_assert(sizeof(wasm::FunctionTableElem
) == 2 * sizeof(void*),
5942 "Exactly two pointers or index scaling won't work correctly");
5943 MOZ_ASSERT(callee
.which() == wasm::CalleeDesc::WasmTable
);
5945 const int shift
= sizeof(wasm::FunctionTableElem
) == 8 ? 3 : 4;
5946 wasm::BytecodeOffset
trapOffset(desc
.lineOrBytecode());
5947 const Register calleeScratch
= WasmTableCallScratchReg0
;
5948 const Register index
= WasmTableCallIndexReg
;
5950 // Check the table index and throw if out-of-bounds.
5952 // Frequently the table size is known, so optimize for that. Otherwise
5953 // compare with a memory operand when that's possible. (There's little sense
5954 // in hoisting the load of the bound into a register at a higher level and
5955 // reusing that register, because a hoisted value would either have to be
5956 // spilled and re-loaded before the next call_indirect, or would be abandoned
5957 // because we could not trust that a hoisted value would not have changed.)
5959 if (boundsCheckFailedLabel
) {
5960 if (tableSize
.isSome()) {
5961 branch32(Assembler::Condition::AboveOrEqual
, index
, Imm32(*tableSize
),
5962 boundsCheckFailedLabel
);
5965 Assembler::Condition::BelowOrEqual
,
5966 Address(InstanceReg
, wasm::Instance::offsetInData(
5967 callee
.tableLengthInstanceDataOffset())),
5968 index
, boundsCheckFailedLabel
);
5972 // Write the functype-id into the ABI functype-id register.
5974 const wasm::CallIndirectId callIndirectId
= callee
.wasmTableSigId();
5975 switch (callIndirectId
.kind()) {
5976 case wasm::CallIndirectIdKind::Global
:
5977 loadPtr(Address(InstanceReg
, wasm::Instance::offsetInData(
5978 callIndirectId
.instanceDataOffset() +
5979 offsetof(wasm::TypeDefInstanceData
,
5981 WasmTableCallSigReg
);
5983 case wasm::CallIndirectIdKind::Immediate
:
5984 move32(Imm32(callIndirectId
.immediate()), WasmTableCallSigReg
);
5986 case wasm::CallIndirectIdKind::AsmJS
:
5987 case wasm::CallIndirectIdKind::None
:
5991 // Load the base pointer of the table and compute the address of the callee in
5995 Address(InstanceReg
, wasm::Instance::offsetInData(
5996 callee
.tableFunctionBaseInstanceDataOffset())),
5998 shiftIndex32AndAdd(index
, shift
, calleeScratch
);
6000 // Load the callee instance and decide whether to take the fast path or the
6005 const Register newInstanceTemp
= WasmTableCallScratchReg1
;
6006 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, instance
)),
6008 branchPtr(Assembler::Equal
, InstanceReg
, newInstanceTemp
, &fastCall
);
6010 // Slow path: Save context, check for null, setup new context.
6012 storePtr(InstanceReg
,
6013 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
6014 movePtr(newInstanceTemp
, InstanceReg
);
6016 # ifdef WASM_HAS_HEAPREG
6017 // Use the null pointer exception resulting from loading HeapReg from a null
6018 // instance to handle a call to a null slot.
6019 MOZ_ASSERT(nullCheckFailedLabel
== nullptr);
6020 loadWasmPinnedRegsFromInstance(mozilla::Some(trapOffset
));
6022 MOZ_ASSERT(nullCheckFailedLabel
!= nullptr);
6023 branchTestPtr(Assembler::Zero
, InstanceReg
, InstanceReg
,
6024 nullCheckFailedLabel
);
6026 loadWasmPinnedRegsFromInstance();
6028 switchToWasmInstanceRealm(index
, WasmTableCallScratchReg1
);
6030 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, code
)),
6033 wasm::CallSiteDesc
stubDesc(desc
.lineOrBytecode(),
6034 wasm::CallSiteDesc::ReturnStub
);
6035 wasmCollapseFrameSlow(retCallInfo
, stubDesc
);
6036 jump(calleeScratch
);
6037 append(wasm::CodeRangeUnwindInfo::Normal
, currentOffset());
6039 // Fast path: just load the code pointer and go.
6043 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, code
)),
6046 wasmCollapseFrameFast(retCallInfo
);
6047 jump(calleeScratch
);
6048 append(wasm::CodeRangeUnwindInfo::Normal
, currentOffset());
6050 #endif // ENABLE_WASM_TAIL_CALLS
6052 void MacroAssembler::wasmCallRef(const wasm::CallSiteDesc
& desc
,
6053 const wasm::CalleeDesc
& callee
,
6054 CodeOffset
* fastCallOffset
,
6055 CodeOffset
* slowCallOffset
) {
6056 MOZ_ASSERT(callee
.which() == wasm::CalleeDesc::FuncRef
);
6057 const Register calleeScratch
= WasmCallRefCallScratchReg0
;
6058 const Register calleeFnObj
= WasmCallRefReg
;
6060 // Load from the function's WASM_INSTANCE_SLOT extended slot, and decide
6061 // whether to take the fast path or the slow path. Register this load
6062 // instruction to be source of a trap -- null pointer check.
6066 const Register newInstanceTemp
= WasmCallRefCallScratchReg1
;
6067 size_t instanceSlotOffset
= FunctionExtended::offsetOfExtendedSlot(
6068 FunctionExtended::WASM_INSTANCE_SLOT
);
6069 static_assert(FunctionExtended::WASM_INSTANCE_SLOT
< wasm::NullPtrGuardSize
);
6070 wasm::BytecodeOffset
trapOffset(desc
.lineOrBytecode());
6071 FaultingCodeOffset fco
=
6072 loadPtr(Address(calleeFnObj
, instanceSlotOffset
), newInstanceTemp
);
6073 append(wasm::Trap::NullPointerDereference
,
6074 wasm::TrapSite(wasm::TrapMachineInsnForLoadWord(), fco
, trapOffset
));
6075 branchPtr(Assembler::Equal
, InstanceReg
, newInstanceTemp
, &fastCall
);
6077 storePtr(InstanceReg
,
6078 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
6079 movePtr(newInstanceTemp
, InstanceReg
);
6080 storePtr(InstanceReg
,
6081 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
6083 loadWasmPinnedRegsFromInstance();
6084 switchToWasmInstanceRealm(WasmCallRefCallScratchReg0
,
6085 WasmCallRefCallScratchReg1
);
6087 // Get funcUncheckedCallEntry() from the function's
6088 // WASM_FUNC_UNCHECKED_ENTRY_SLOT extended slot.
6089 size_t uncheckedEntrySlotOffset
= FunctionExtended::offsetOfExtendedSlot(
6090 FunctionExtended::WASM_FUNC_UNCHECKED_ENTRY_SLOT
);
6091 loadPtr(Address(calleeFnObj
, uncheckedEntrySlotOffset
), calleeScratch
);
6093 *slowCallOffset
= call(desc
, calleeScratch
);
6094 #ifdef ENABLE_WASM_TAIL_CALLS
6098 // Restore registers and realm and back to this caller's.
6099 loadPtr(Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
),
6101 loadWasmPinnedRegsFromInstance();
6102 switchToWasmInstanceRealm(ABINonArgReturnReg0
, ABINonArgReturnReg1
);
6105 // Fast path: just load WASM_FUNC_UNCHECKED_ENTRY_SLOT value and go.
6106 // The instance and pinned registers are the same as in the caller.
6110 loadPtr(Address(calleeFnObj
, uncheckedEntrySlotOffset
), calleeScratch
);
6112 // We use a different type of call site for the fast call since the instance
6113 // slots in the frame do not have valid values.
6115 wasm::CallSiteDesc
newDesc(desc
.lineOrBytecode(),
6116 wasm::CallSiteDesc::FuncRefFast
);
6117 *fastCallOffset
= call(newDesc
, calleeScratch
);
6122 #ifdef ENABLE_WASM_TAIL_CALLS
6123 void MacroAssembler::wasmReturnCallRef(
6124 const wasm::CallSiteDesc
& desc
, const wasm::CalleeDesc
& callee
,
6125 const ReturnCallAdjustmentInfo
& retCallInfo
) {
6126 MOZ_ASSERT(callee
.which() == wasm::CalleeDesc::FuncRef
);
6127 const Register calleeScratch
= WasmCallRefCallScratchReg0
;
6128 const Register calleeFnObj
= WasmCallRefReg
;
6130 // Load from the function's WASM_INSTANCE_SLOT extended slot, and decide
6131 // whether to take the fast path or the slow path. Register this load
6132 // instruction to be source of a trap -- null pointer check.
6136 const Register newInstanceTemp
= WasmCallRefCallScratchReg1
;
6137 size_t instanceSlotOffset
= FunctionExtended::offsetOfExtendedSlot(
6138 FunctionExtended::WASM_INSTANCE_SLOT
);
6139 static_assert(FunctionExtended::WASM_INSTANCE_SLOT
< wasm::NullPtrGuardSize
);
6140 wasm::BytecodeOffset
trapOffset(desc
.lineOrBytecode());
6141 FaultingCodeOffset fco
=
6142 loadPtr(Address(calleeFnObj
, instanceSlotOffset
), newInstanceTemp
);
6143 append(wasm::Trap::NullPointerDereference
,
6144 wasm::TrapSite(wasm::TrapMachineInsnForLoadWord(), fco
, trapOffset
));
6145 branchPtr(Assembler::Equal
, InstanceReg
, newInstanceTemp
, &fastCall
);
6147 storePtr(InstanceReg
,
6148 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
6149 movePtr(newInstanceTemp
, InstanceReg
);
6150 storePtr(InstanceReg
,
6151 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
6153 loadWasmPinnedRegsFromInstance();
6154 switchToWasmInstanceRealm(WasmCallRefCallScratchReg0
,
6155 WasmCallRefCallScratchReg1
);
6157 // Get funcUncheckedCallEntry() from the function's
6158 // WASM_FUNC_UNCHECKED_ENTRY_SLOT extended slot.
6159 size_t uncheckedEntrySlotOffset
= FunctionExtended::offsetOfExtendedSlot(
6160 FunctionExtended::WASM_FUNC_UNCHECKED_ENTRY_SLOT
);
6161 loadPtr(Address(calleeFnObj
, uncheckedEntrySlotOffset
), calleeScratch
);
6163 wasm::CallSiteDesc
stubDesc(desc
.lineOrBytecode(),
6164 wasm::CallSiteDesc::ReturnStub
);
6165 wasmCollapseFrameSlow(retCallInfo
, stubDesc
);
6166 jump(calleeScratch
);
6167 append(wasm::CodeRangeUnwindInfo::Normal
, currentOffset());
6169 // Fast path: just load WASM_FUNC_UNCHECKED_ENTRY_SLOT value and go.
6170 // The instance and pinned registers are the same as in the caller.
6174 loadPtr(Address(calleeFnObj
, uncheckedEntrySlotOffset
), calleeScratch
);
6176 wasmCollapseFrameFast(retCallInfo
);
6177 jump(calleeScratch
);
6178 append(wasm::CodeRangeUnwindInfo::Normal
, currentOffset());
6182 void MacroAssembler::wasmBoundsCheckRange32(
6183 Register index
, Register length
, Register limit
, Register tmp
,
6184 wasm::BytecodeOffset bytecodeOffset
) {
6189 branchAdd32(Assembler::CarrySet
, length
, tmp
, &fail
);
6190 branch32(Assembler::Above
, tmp
, limit
, &fail
);
6194 wasmTrap(wasm::Trap::OutOfBounds
, bytecodeOffset
);
6199 bool MacroAssembler::needScratch1ForBranchWasmRefIsSubtypeAny(
6200 wasm::RefType type
) {
6201 MOZ_ASSERT(type
.isValid());
6202 MOZ_ASSERT(type
.isAnyHierarchy());
6203 return !type
.isNone() && !type
.isAny();
6206 bool MacroAssembler::needScratch2ForBranchWasmRefIsSubtypeAny(
6207 wasm::RefType type
) {
6208 MOZ_ASSERT(type
.isValid());
6209 MOZ_ASSERT(type
.isAnyHierarchy());
6210 return type
.isTypeRef() &&
6211 type
.typeDef()->subTypingDepth() >= wasm::MinSuperTypeVectorLength
;
6214 bool MacroAssembler::needSuperSTVForBranchWasmRefIsSubtypeAny(
6215 wasm::RefType type
) {
6216 MOZ_ASSERT(type
.isValid());
6217 MOZ_ASSERT(type
.isAnyHierarchy());
6218 return type
.isTypeRef();
6221 void MacroAssembler::branchWasmRefIsSubtypeAny(
6222 Register ref
, wasm::RefType sourceType
, wasm::RefType destType
,
6223 Label
* label
, bool onSuccess
, Register superSTV
, Register scratch1
,
6224 Register scratch2
) {
6225 MOZ_ASSERT(sourceType
.isValid());
6226 MOZ_ASSERT(destType
.isValid());
6227 MOZ_ASSERT(sourceType
.isAnyHierarchy());
6228 MOZ_ASSERT(destType
.isAnyHierarchy());
6229 MOZ_ASSERT_IF(needScratch1ForBranchWasmRefIsSubtypeAny(destType
),
6230 scratch1
!= Register::Invalid());
6231 MOZ_ASSERT_IF(needScratch2ForBranchWasmRefIsSubtypeAny(destType
),
6232 scratch2
!= Register::Invalid());
6233 MOZ_ASSERT_IF(needSuperSTVForBranchWasmRefIsSubtypeAny(destType
),
6234 superSTV
!= Register::Invalid());
6237 Label
* successLabel
= onSuccess
? label
: &fallthrough
;
6238 Label
* failLabel
= onSuccess
? &fallthrough
: label
;
6239 Label
* nullLabel
= destType
.isNullable() ? successLabel
: failLabel
;
6242 if (sourceType
.isNullable()) {
6243 branchWasmAnyRefIsNull(true, ref
, nullLabel
);
6246 // The only value that can inhabit 'none' is null. So, early out if we got
6248 if (destType
.isNone()) {
6254 if (destType
.isAny()) {
6255 // No further checks for 'any'
6261 // 'type' is now 'eq' or lower, which currently will either be a gc object or
6264 // Check first for i31 values, and get them out of the way. i31 values are
6265 // valid when casting to i31 or eq, and invalid otherwise.
6266 if (destType
.isI31() || destType
.isEq()) {
6267 branchWasmAnyRefIsI31(true, ref
, successLabel
);
6269 if (destType
.isI31()) {
6270 // No further checks for 'i31'
6277 // Then check for any kind of gc object.
6278 MOZ_ASSERT(scratch1
!= Register::Invalid());
6279 if (!wasm::RefType::isSubTypeOf(sourceType
, wasm::RefType::struct_()) &&
6280 !wasm::RefType::isSubTypeOf(sourceType
, wasm::RefType::array())) {
6281 branchWasmAnyRefIsObjectOrNull(false, ref
, failLabel
);
6282 branchObjectIsWasmGcObject(false, ref
, scratch1
, failLabel
);
6285 if (destType
.isEq()) {
6286 // No further checks for 'eq'
6292 // 'type' is now 'struct', 'array', or a concrete type. (Bottom types and i31
6293 // were handled above.)
6295 // Casting to a concrete type only requires a simple check on the
6296 // object's super type vector. Casting to an abstract type (struct, array)
6297 // requires loading the object's superTypeVector->typeDef->kind, and checking
6298 // that it is correct.
6300 loadPtr(Address(ref
, int32_t(WasmGcObject::offsetOfSuperTypeVector())),
6302 if (destType
.isTypeRef()) {
6303 // concrete type, do superTypeVector check
6304 branchWasmSTVIsSubtype(scratch1
, superSTV
, scratch2
,
6305 destType
.typeDef()->subTypingDepth(), successLabel
,
6308 // abstract type, do kind check
6309 loadPtr(Address(scratch1
,
6310 int32_t(wasm::SuperTypeVector::offsetOfSelfTypeDef())),
6312 load8ZeroExtend(Address(scratch1
, int32_t(wasm::TypeDef::offsetOfKind())),
6314 branch32(Assembler::Equal
, scratch1
, Imm32(int32_t(destType
.typeDefKind())),
6323 bool MacroAssembler::needSuperSTVAndScratch1ForBranchWasmRefIsSubtypeFunc(
6324 wasm::RefType type
) {
6325 MOZ_ASSERT(type
.isValid());
6326 MOZ_ASSERT(type
.isFuncHierarchy());
6327 return type
.isTypeRef();
6330 bool MacroAssembler::needScratch2ForBranchWasmRefIsSubtypeFunc(
6331 wasm::RefType type
) {
6332 MOZ_ASSERT(type
.isValid());
6333 MOZ_ASSERT(type
.isFuncHierarchy());
6334 return type
.isTypeRef() &&
6335 type
.typeDef()->subTypingDepth() >= wasm::MinSuperTypeVectorLength
;
6338 void MacroAssembler::branchWasmRefIsSubtypeFunc(
6339 Register ref
, wasm::RefType sourceType
, wasm::RefType destType
,
6340 Label
* label
, bool onSuccess
, Register superSTV
, Register scratch1
,
6341 Register scratch2
) {
6342 MOZ_ASSERT(sourceType
.isValid());
6343 MOZ_ASSERT(destType
.isValid());
6344 MOZ_ASSERT(sourceType
.isFuncHierarchy());
6345 MOZ_ASSERT(destType
.isFuncHierarchy());
6347 needSuperSTVAndScratch1ForBranchWasmRefIsSubtypeFunc(destType
),
6348 superSTV
!= Register::Invalid() && scratch1
!= Register::Invalid());
6349 MOZ_ASSERT_IF(needScratch2ForBranchWasmRefIsSubtypeFunc(destType
),
6350 scratch2
!= Register::Invalid());
6353 Label
* successLabel
= onSuccess
? label
: &fallthrough
;
6354 Label
* failLabel
= onSuccess
? &fallthrough
: label
;
6355 Label
* nullLabel
= destType
.isNullable() ? successLabel
: failLabel
;
6358 if (sourceType
.isNullable()) {
6359 branchTestPtr(Assembler::Zero
, ref
, ref
, nullLabel
);
6362 // The only value that can inhabit 'nofunc' is null. So, early out if we got
6364 if (destType
.isNoFunc()) {
6370 if (destType
.isFunc()) {
6371 // No further checks for 'func' (any func)
6377 // In the func hierarchy, a supertype vector check is now sufficient for all
6379 loadPrivate(Address(ref
, int32_t(FunctionExtended::offsetOfWasmSTV())),
6381 branchWasmSTVIsSubtype(scratch1
, superSTV
, scratch2
,
6382 destType
.typeDef()->subTypingDepth(), successLabel
,
6385 // If we didn't branch away, the cast failed.
6390 void MacroAssembler::branchWasmRefIsSubtypeExtern(Register ref
,
6391 wasm::RefType sourceType
,
6392 wasm::RefType destType
,
6395 MOZ_ASSERT(sourceType
.isValid());
6396 MOZ_ASSERT(destType
.isValid());
6397 MOZ_ASSERT(sourceType
.isExternHierarchy());
6398 MOZ_ASSERT(destType
.isExternHierarchy());
6401 Label
* successLabel
= onSuccess
? label
: &fallthrough
;
6402 Label
* failLabel
= onSuccess
? &fallthrough
: label
;
6403 Label
* nullLabel
= destType
.isNullable() ? successLabel
: failLabel
;
6406 if (sourceType
.isNullable()) {
6407 branchTestPtr(Assembler::Zero
, ref
, ref
, nullLabel
);
6410 // The only value that can inhabit 'noextern' is null. So, early out if we got
6412 if (destType
.isNoExtern()) {
6418 // There are no other possible types except externref, so succeed!
6423 void MacroAssembler::branchWasmSTVIsSubtype(Register subSTV
, Register superSTV
,
6425 uint32_t superDepth
, Label
* label
,
6427 MOZ_ASSERT_IF(superDepth
>= wasm::MinSuperTypeVectorLength
,
6428 scratch
!= Register::Invalid());
6430 Label
* failed
= onSuccess
? &fallthrough
: label
;
6432 // At this point, we could generate a fast success check which jumps to
6433 // `success` if `subSTV == superSTV`. However,
6434 // profiling of Barista-3 seems to show this is hardly worth anything,
6435 // whereas it is worth us generating smaller code and in particular one
6436 // fewer conditional branch.
6438 // Emit a bounds check if the super type depth may be out-of-bounds.
6439 if (superDepth
>= wasm::MinSuperTypeVectorLength
) {
6440 load32(Address(subSTV
, wasm::SuperTypeVector::offsetOfLength()), scratch
);
6441 branch32(Assembler::BelowOrEqual
, scratch
, Imm32(superDepth
), failed
);
6444 // Load the `superTypeDepth` entry from subSTV. This will be `superSTV` if
6445 // `subSTV` is indeed a subtype.
6447 Address(subSTV
, wasm::SuperTypeVector::offsetOfSTVInVector(superDepth
)),
6450 // We succeed iff the entries are equal
6451 branchPtr(onSuccess
? Assembler::Equal
: Assembler::NotEqual
, subSTV
,
6457 void MacroAssembler::branchWasmSTVIsSubtypeDynamicDepth(
6458 Register subSTV
, Register superSTV
, Register superDepth
, Register scratch
,
6459 Label
* label
, bool onSuccess
) {
6461 Label
* failed
= onSuccess
? &fallthrough
: label
;
6463 // Bounds check of the super type vector
6464 load32(Address(subSTV
, wasm::SuperTypeVector::offsetOfLength()), scratch
);
6465 branch32(Assembler::BelowOrEqual
, scratch
, superDepth
, failed
);
6467 // Load `subSTV[superTypeDepth]`. This will be `superSTV` if `subSTV` is
6468 // indeed a subtype.
6469 loadPtr(BaseIndex(subSTV
, superDepth
, ScalePointer
,
6470 offsetof(wasm::SuperTypeVector
, types_
)),
6473 // We succeed iff the entries are equal
6474 branchPtr(onSuccess
? Assembler::Equal
: Assembler::NotEqual
, subSTV
,
6480 void MacroAssembler::branchWasmAnyRefIsNull(bool isNull
, Register src
,
6482 branchTestPtr(isNull
? Assembler::Zero
: Assembler::NonZero
, src
, src
, label
);
6485 void MacroAssembler::branchWasmAnyRefIsI31(bool isI31
, Register src
,
6487 branchTestPtr(isI31
? Assembler::NonZero
: Assembler::Zero
, src
,
6488 Imm32(int32_t(wasm::AnyRefTag::I31
)), label
);
6491 void MacroAssembler::branchWasmAnyRefIsObjectOrNull(bool isObject
, Register src
,
6493 branchTestPtr(isObject
? Assembler::Zero
: Assembler::NonZero
, src
,
6494 Imm32(int32_t(wasm::AnyRef::TagMask
)), label
);
6497 void MacroAssembler::branchWasmAnyRefIsGCThing(bool isGCThing
, Register src
,
6500 Label
* isGCThingLabel
= isGCThing
? label
: &fallthrough
;
6501 Label
* isNotGCThingLabel
= isGCThing
? &fallthrough
: label
;
6503 // A null value or i31 value are not GC things.
6504 branchWasmAnyRefIsNull(true, src
, isNotGCThingLabel
);
6505 branchWasmAnyRefIsI31(true, src
, isNotGCThingLabel
);
6506 jump(isGCThingLabel
);
6510 void MacroAssembler::branchWasmAnyRefIsNurseryCell(bool isNurseryCell
,
6511 Register src
, Register temp
,
6514 branchWasmAnyRefIsGCThing(false, src
, isNurseryCell
? &done
: label
);
6516 getWasmAnyRefGCThingChunk(src
, temp
);
6517 branchPtr(isNurseryCell
? Assembler::NotEqual
: Assembler::Equal
,
6518 Address(temp
, gc::ChunkStoreBufferOffset
), ImmWord(0), label
);
6522 void MacroAssembler::truncate32ToWasmI31Ref(Register src
, Register dest
) {
6523 // This will either zero-extend or sign-extend the high 32-bits on 64-bit
6524 // platforms (see comments on invariants in MacroAssembler.h). Either case
6525 // is fine, as we won't use this bits.
6527 // Move the payload of the integer over by 1 to make room for the tag. This
6528 // will perform the truncation required by the spec.
6529 lshift32(Imm32(1), dest
);
6530 // Add the i31 tag to the integer.
6531 orPtr(Imm32(int32_t(wasm::AnyRefTag::I31
)), dest
);
6534 void MacroAssembler::convertWasmI31RefTo32Signed(Register src
, Register dest
) {
6535 // This will either zero-extend or sign-extend the high 32-bits on 64-bit
6536 // platforms (see comments on invariants in MacroAssembler.h). Either case
6537 // is fine, as we won't use this bits.
6539 // Shift the payload back (clobbering the tag). This will sign-extend, giving
6540 // us the unsigned behavior we want.
6541 rshift32Arithmetic(Imm32(1), dest
);
6544 void MacroAssembler::convertWasmI31RefTo32Unsigned(Register src
,
6546 // This will either zero-extend or sign-extend the high 32-bits on 64-bit
6547 // platforms (see comments on invariants in MacroAssembler.h). Either case
6548 // is fine, as we won't use this bits.
6550 // Shift the payload back (clobbering the tag). This will zero-extend, giving
6551 // us the unsigned behavior we want.
6552 rshift32(Imm32(1), dest
);
6555 void MacroAssembler::branchValueConvertsToWasmAnyRefInline(
6556 ValueOperand src
, Register scratchInt
, FloatRegister scratchFloat
,
6558 // We can convert objects, strings, 31-bit integers and null without boxing.
6562 ScratchTagScope
tag(*this, src
);
6563 splitTagForTest(src
, tag
);
6564 branchTestObject(Assembler::Equal
, tag
, label
);
6565 branchTestString(Assembler::Equal
, tag
, label
);
6566 branchTestNull(Assembler::Equal
, tag
, label
);
6567 branchTestInt32(Assembler::Equal
, tag
, &checkInt32
);
6568 branchTestDouble(Assembler::Equal
, tag
, &checkDouble
);
6572 unboxInt32(src
, scratchInt
);
6573 branch32(Assembler::GreaterThan
, scratchInt
, Imm32(wasm::AnyRef::MaxI31Value
),
6575 branch32(Assembler::LessThan
, scratchInt
, Imm32(wasm::AnyRef::MinI31Value
),
6581 ScratchTagScopeRelease
_(&tag
);
6582 convertValueToInt32(src
, scratchFloat
, scratchInt
, &fallthrough
, true,
6583 IntConversionInputKind::NumbersOnly
);
6585 branch32(Assembler::GreaterThan
, scratchInt
, Imm32(wasm::AnyRef::MaxI31Value
),
6587 branch32(Assembler::LessThan
, scratchInt
, Imm32(wasm::AnyRef::MinI31Value
),
6594 void MacroAssembler::convertValueToWasmAnyRef(ValueOperand src
, Register dest
,
6595 FloatRegister scratchFloat
,
6596 Label
* oolConvert
) {
6597 Label doubleValue
, int32Value
, nullValue
, stringValue
, objectValue
, done
;
6599 ScratchTagScope
tag(*this, src
);
6600 splitTagForTest(src
, tag
);
6601 branchTestObject(Assembler::Equal
, tag
, &objectValue
);
6602 branchTestString(Assembler::Equal
, tag
, &stringValue
);
6603 branchTestNull(Assembler::Equal
, tag
, &nullValue
);
6604 branchTestInt32(Assembler::Equal
, tag
, &int32Value
);
6605 branchTestDouble(Assembler::Equal
, tag
, &doubleValue
);
6610 convertValueToInt32(src
, scratchFloat
, dest
, oolConvert
, true,
6611 IntConversionInputKind::NumbersOnly
);
6612 branch32(Assembler::GreaterThan
, dest
, Imm32(wasm::AnyRef::MaxI31Value
),
6614 branch32(Assembler::LessThan
, dest
, Imm32(wasm::AnyRef::MinI31Value
),
6616 lshiftPtr(Imm32(1), dest
);
6617 orPtr(Imm32((int32_t)wasm::AnyRefTag::I31
), dest
);
6621 unboxInt32(src
, dest
);
6622 branch32(Assembler::GreaterThan
, dest
, Imm32(wasm::AnyRef::MaxI31Value
),
6624 branch32(Assembler::LessThan
, dest
, Imm32(wasm::AnyRef::MinI31Value
),
6626 lshiftPtr(Imm32(1), dest
);
6627 orPtr(Imm32((int32_t)wasm::AnyRefTag::I31
), dest
);
6631 static_assert(wasm::AnyRef::NullRefValue
== 0);
6636 unboxString(src
, dest
);
6637 orPtr(Imm32((int32_t)wasm::AnyRefTag::String
), dest
);
6641 unboxObject(src
, dest
);
6646 void MacroAssembler::convertObjectToWasmAnyRef(Register src
, Register dest
) {
6647 // JS objects are represented without any tagging.
6651 void MacroAssembler::convertStringToWasmAnyRef(Register src
, Register dest
) {
6652 // JS strings require a tag.
6654 orPtr(Imm32(int32_t(wasm::AnyRefTag::String
)), dest
);
6657 void MacroAssembler::branchObjectIsWasmGcObject(bool isGcObject
, Register src
,
6660 constexpr uint32_t ShiftedMask
= (Shape::kindMask() << Shape::kindShift());
6661 constexpr uint32_t ShiftedKind
=
6662 (uint32_t(Shape::Kind::WasmGC
) << Shape::kindShift());
6663 MOZ_ASSERT(src
!= scratch
);
6665 loadPtr(Address(src
, JSObject::offsetOfShape()), scratch
);
6666 load32(Address(scratch
, Shape::offsetOfImmutableFlags()), scratch
);
6667 and32(Imm32(ShiftedMask
), scratch
);
6668 branch32(isGcObject
? Assembler::Equal
: Assembler::NotEqual
, scratch
,
6669 Imm32(ShiftedKind
), label
);
6672 void MacroAssembler::wasmNewStructObject(Register instance
, Register result
,
6673 Register typeDefData
, Register temp1
,
6674 Register temp2
, Label
* fail
,
6675 gc::AllocKind allocKind
,
6677 // Don't execute the inline path if GC probes are built in.
6683 // Don't execute the inline path if gc zeal or tracing are active.
6684 loadPtr(Address(instance
, wasm::Instance::offsetOfAddressOfGCZealModeBits()),
6686 loadPtr(Address(temp1
, 0), temp1
);
6687 branch32(Assembler::NotEqual
, temp1
, Imm32(0), fail
);
6690 // If the alloc site is long lived, immediately fall back to the OOL path,
6691 // which will handle that.
6692 loadPtr(Address(typeDefData
, wasm::TypeDefInstanceData::offsetOfAllocSite()),
6694 branchTestPtr(Assembler::NonZero
,
6695 Address(temp1
, gc::AllocSite::offsetOfScriptAndState()),
6696 Imm32(gc::AllocSite::LONG_LIVED_BIT
), fail
);
6698 size_t sizeBytes
= gc::Arena::thingSize(allocKind
);
6699 wasmBumpPointerAllocate(instance
, result
, typeDefData
, temp1
, temp2
, fail
,
6701 loadPtr(Address(typeDefData
, wasm::TypeDefInstanceData::offsetOfShape()),
6703 loadPtr(Address(typeDefData
,
6704 wasm::TypeDefInstanceData::offsetOfSuperTypeVector()),
6706 storePtr(temp1
, Address(result
, WasmStructObject::offsetOfShape()));
6707 storePtr(temp2
, Address(result
, WasmStructObject::offsetOfSuperTypeVector()));
6708 storePtr(ImmWord(0),
6709 Address(result
, WasmStructObject::offsetOfOutlineData()));
6712 MOZ_ASSERT(sizeBytes
% sizeof(void*) == 0);
6713 for (size_t i
= WasmStructObject::offsetOfInlineData(); i
< sizeBytes
;
6714 i
+= sizeof(void*)) {
6715 storePtr(ImmWord(0), Address(result
, i
));
6720 // This function handles nursery allocations for wasm. For JS, see
6721 // MacroAssembler::bumpPointerAllocate.
6722 void MacroAssembler::wasmBumpPointerAllocate(Register instance
, Register result
,
6723 Register typeDefData
,
6724 Register temp1
, Register temp2
,
6725 Label
* fail
, uint32_t size
) {
6726 MOZ_ASSERT(size
>= gc::MinCellSize
);
6728 uint32_t totalSize
= size
+ Nursery::nurseryCellHeaderSize();
6729 MOZ_ASSERT(totalSize
< INT32_MAX
, "Nursery allocation too large");
6730 MOZ_ASSERT(totalSize
% gc::CellAlignBytes
== 0);
6732 int32_t endOffset
= Nursery::offsetOfCurrentEndFromPosition();
6734 // Bail to OOL code if the alloc site needs to be initialized. Keep allocCount
6735 // in temp2 for later.
6736 computeEffectiveAddress(
6737 Address(typeDefData
, wasm::TypeDefInstanceData::offsetOfAllocSite()),
6739 load32(Address(temp1
, gc::AllocSite::offsetOfNurseryAllocCount()), temp2
);
6740 branch32(Assembler::Equal
, temp2
, Imm32(0), fail
);
6742 // Bump allocate in the nursery, bailing if there is not enough room.
6743 loadPtr(Address(instance
, wasm::Instance::offsetOfAddressOfNurseryPosition()),
6745 loadPtr(Address(temp1
, 0), result
);
6746 addPtr(Imm32(totalSize
), result
);
6747 branchPtr(Assembler::Below
, Address(temp1
, endOffset
), result
, fail
);
6748 storePtr(result
, Address(temp1
, 0));
6749 subPtr(Imm32(size
), result
);
6751 // Increment the alloc count in the allocation site and store pointer in the
6752 // nursery cell header. See NurseryCellHeader::MakeValue.
6753 computeEffectiveAddress(
6754 Address(typeDefData
, wasm::TypeDefInstanceData::offsetOfAllocSite()),
6756 add32(Imm32(1), temp2
);
6757 store32(temp2
, Address(temp1
, gc::AllocSite::offsetOfNurseryAllocCount()));
6758 // Because JS::TraceKind::Object is zero, there is no need to explicitly set
6759 // it in the nursery cell header.
6760 static_assert(int(JS::TraceKind::Object
) == 0);
6761 storePtr(temp1
, Address(result
, -js::Nursery::nurseryCellHeaderSize()));
6764 // Unboxing is branchy and contorted because of Spectre mitigations - we don't
6765 // have enough scratch registers. Were it not for the spectre mitigations in
6766 // branchTestObjClass, the branch nest below would be restructured significantly
6767 // by inverting branches and using fewer registers.
6769 // Unbox an anyref in src (clobbering src in the process) and then re-box it as
6770 // a Value in *dst. See the definition of AnyRef for a discussion of pointer
6772 void MacroAssembler::convertWasmAnyRefToValue(Register instance
, Register src
,
6775 MOZ_ASSERT(src
!= scratch
);
6776 #if JS_BITS_PER_WORD == 32
6777 MOZ_ASSERT(dst
.typeReg() != scratch
);
6778 MOZ_ASSERT(dst
.payloadReg() != scratch
);
6780 MOZ_ASSERT(dst
.valueReg() != scratch
);
6783 Label isI31
, isObjectOrNull
, isObject
, isWasmValueBox
, done
;
6785 // Check for if this is an i31 value first
6786 branchTestPtr(Assembler::NonZero
, src
, Imm32(int32_t(wasm::AnyRefTag::I31
)),
6788 // Then check for the object or null tag
6789 branchTestPtr(Assembler::Zero
, src
, Imm32(wasm::AnyRef::TagMask
),
6792 // If we're not i31, object, or null, we must be a string
6793 rshiftPtr(Imm32(wasm::AnyRef::TagShift
), src
);
6794 lshiftPtr(Imm32(wasm::AnyRef::TagShift
), src
);
6795 moveValue(TypedOrValueRegister(MIRType::String
, AnyRegister(src
)), dst
);
6798 // This is an i31 value, convert to an int32 JS value
6800 convertWasmI31RefTo32Signed(src
, src
);
6801 moveValue(TypedOrValueRegister(MIRType::Int32
, AnyRegister(src
)), dst
);
6804 // Check for the null value
6805 bind(&isObjectOrNull
);
6806 branchTestPtr(Assembler::NonZero
, src
, src
, &isObject
);
6807 moveValue(NullValue(), dst
);
6810 // Otherwise we must be a non-null object. We next to check if it's storing a
6813 // The type test will clear src if the test fails, so store early.
6814 moveValue(TypedOrValueRegister(MIRType::Object
, AnyRegister(src
)), dst
);
6815 // Spectre mitigations: see comment above about efficiency.
6816 branchTestObjClass(Assembler::Equal
, src
,
6817 Address(instance
, wasm::Instance::offsetOfValueBoxClass()),
6818 scratch
, src
, &isWasmValueBox
);
6821 // This is a boxed JS value, unbox it.
6822 bind(&isWasmValueBox
);
6823 loadValue(Address(src
, wasm::AnyRef::valueBoxOffsetOfValue()), dst
);
6828 void MacroAssembler::convertWasmAnyRefToValue(Register instance
, Register src
,
6831 MOZ_ASSERT(src
!= scratch
);
6833 Label isI31
, isObjectOrNull
, isObject
, isWasmValueBox
, done
;
6835 // Check for if this is an i31 value first
6836 branchTestPtr(Assembler::NonZero
, src
, Imm32(int32_t(wasm::AnyRefTag::I31
)),
6838 // Then check for the object or null tag
6839 branchTestPtr(Assembler::Zero
, src
, Imm32(wasm::AnyRef::TagMask
),
6842 // If we're not i31, object, or null, we must be a string
6843 rshiftPtr(Imm32(wasm::AnyRef::TagShift
), src
);
6844 lshiftPtr(Imm32(wasm::AnyRef::TagShift
), src
);
6845 storeValue(JSVAL_TYPE_STRING
, src
, dst
);
6848 // This is an i31 value, convert to an int32 JS value
6850 convertWasmI31RefTo32Signed(src
, src
);
6851 storeValue(JSVAL_TYPE_INT32
, src
, dst
);
6854 // Check for the null value
6855 bind(&isObjectOrNull
);
6856 branchTestPtr(Assembler::NonZero
, src
, src
, &isObject
);
6857 storeValue(NullValue(), dst
);
6860 // Otherwise we must be a non-null object. We next to check if it's storing a
6863 // The type test will clear src if the test fails, so store early.
6864 storeValue(JSVAL_TYPE_OBJECT
, src
, dst
);
6865 // Spectre mitigations: see comment above about efficiency.
6866 branchTestObjClass(Assembler::Equal
, src
,
6867 Address(instance
, wasm::Instance::offsetOfValueBoxClass()),
6868 scratch
, src
, &isWasmValueBox
);
6871 // This is a boxed JS value, unbox it.
6872 bind(&isWasmValueBox
);
6873 copy64(Address(src
, wasm::AnyRef::valueBoxOffsetOfValue()), dst
, scratch
);
6878 void MacroAssembler::nopPatchableToCall(const wasm::CallSiteDesc
& desc
) {
6879 CodeOffset offset
= nopPatchableToCall();
6880 append(desc
, offset
);
6883 void MacroAssembler::emitPreBarrierFastPath(JSRuntime
* rt
, MIRType type
,
6884 Register temp1
, Register temp2
,
6885 Register temp3
, Label
* noBarrier
) {
6886 MOZ_ASSERT(temp1
!= PreBarrierReg
);
6887 MOZ_ASSERT(temp2
!= PreBarrierReg
);
6888 MOZ_ASSERT(temp3
!= PreBarrierReg
);
6890 // Load the GC thing in temp1.
6891 if (type
== MIRType::Value
) {
6892 unboxGCThingForGCBarrier(Address(PreBarrierReg
, 0), temp1
);
6893 } else if (type
== MIRType::WasmAnyRef
) {
6894 unboxWasmAnyRefGCThingForGCBarrier(Address(PreBarrierReg
, 0), temp1
);
6896 MOZ_ASSERT(type
== MIRType::Object
|| type
== MIRType::String
||
6897 type
== MIRType::Shape
);
6898 loadPtr(Address(PreBarrierReg
, 0), temp1
);
6902 // The caller should have checked for null pointers.
6904 branchTestPtr(Assembler::NonZero
, temp1
, temp1
, &nonZero
);
6905 assumeUnreachable("JIT pre-barrier: unexpected nullptr");
6909 // Load the chunk address in temp2.
6910 movePtr(temp1
, temp2
);
6911 andPtr(Imm32(int32_t(~gc::ChunkMask
)), temp2
);
6913 // If the GC thing is in the nursery, we don't need to barrier it.
6914 if (type
== MIRType::Value
|| type
== MIRType::Object
||
6915 type
== MIRType::String
|| type
== MIRType::WasmAnyRef
) {
6916 branchPtr(Assembler::NotEqual
, Address(temp2
, gc::ChunkStoreBufferOffset
),
6917 ImmWord(0), noBarrier
);
6921 branchPtr(Assembler::Equal
, Address(temp2
, gc::ChunkStoreBufferOffset
),
6922 ImmWord(0), &isTenured
);
6923 assumeUnreachable("JIT pre-barrier: unexpected nursery pointer");
6928 // Determine the bit index and store in temp1.
6930 // bit = (addr & js::gc::ChunkMask) / js::gc::CellBytesPerMarkBit +
6931 // static_cast<uint32_t>(colorBit);
6932 static_assert(gc::CellBytesPerMarkBit
== 8,
6933 "Calculation below relies on this");
6934 static_assert(size_t(gc::ColorBit::BlackBit
) == 0,
6935 "Calculation below relies on this");
6936 andPtr(Imm32(gc::ChunkMask
), temp1
);
6937 rshiftPtr(Imm32(3), temp1
);
6939 static_assert(gc::MarkBitmapWordBits
== JS_BITS_PER_WORD
,
6940 "Calculation below relies on this");
6942 // Load the bitmap word in temp2.
6944 // word = chunk.bitmap[bit / MarkBitmapWordBits];
6946 // Fold the adjustment for the fact that arenas don't start at the beginning
6947 // of the chunk into the offset to the chunk bitmap.
6948 const size_t firstArenaAdjustment
= gc::FirstArenaAdjustmentBits
/ CHAR_BIT
;
6949 const intptr_t offset
=
6950 intptr_t(gc::ChunkMarkBitmapOffset
) - intptr_t(firstArenaAdjustment
);
6952 movePtr(temp1
, temp3
);
6953 #if JS_BITS_PER_WORD == 64
6954 rshiftPtr(Imm32(6), temp1
);
6955 loadPtr(BaseIndex(temp2
, temp1
, TimesEight
, offset
), temp2
);
6957 rshiftPtr(Imm32(5), temp1
);
6958 loadPtr(BaseIndex(temp2
, temp1
, TimesFour
, offset
), temp2
);
6961 // Load the mask in temp1.
6963 // mask = uintptr_t(1) << (bit % MarkBitmapWordBits);
6964 andPtr(Imm32(gc::MarkBitmapWordBits
- 1), temp3
);
6965 move32(Imm32(1), temp1
);
6966 #ifdef JS_CODEGEN_X64
6967 MOZ_ASSERT(temp3
== rcx
);
6969 #elif JS_CODEGEN_X86
6970 MOZ_ASSERT(temp3
== ecx
);
6972 #elif JS_CODEGEN_ARM
6973 ma_lsl(temp3
, temp1
, temp1
);
6974 #elif JS_CODEGEN_ARM64
6975 Lsl(ARMRegister(temp1
, 64), ARMRegister(temp1
, 64), ARMRegister(temp3
, 64));
6976 #elif JS_CODEGEN_MIPS32
6977 ma_sll(temp1
, temp1
, temp3
);
6978 #elif JS_CODEGEN_MIPS64
6979 ma_dsll(temp1
, temp1
, temp3
);
6980 #elif JS_CODEGEN_LOONG64
6981 as_sll_d(temp1
, temp1
, temp3
);
6982 #elif JS_CODEGEN_RISCV64
6983 sll(temp1
, temp1
, temp3
);
6984 #elif JS_CODEGEN_WASM32
6986 #elif JS_CODEGEN_NONE
6989 # error "Unknown architecture"
6992 // No barrier is needed if the bit is set, |word & mask != 0|.
6993 branchTestPtr(Assembler::NonZero
, temp2
, temp1
, noBarrier
);
6996 // ========================================================================
6997 // JS atomic operations.
6999 void MacroAssembler::atomicIsLockFreeJS(Register value
, Register output
) {
7000 // Keep this in sync with isLockfreeJS() in jit/AtomicOperations.h.
7001 static_assert(AtomicOperations::isLockfreeJS(1)); // Implementation artifact
7002 static_assert(AtomicOperations::isLockfreeJS(2)); // Implementation artifact
7003 static_assert(AtomicOperations::isLockfreeJS(4)); // Spec requirement
7004 static_assert(AtomicOperations::isLockfreeJS(8)); // Implementation artifact
7007 move32(Imm32(1), output
);
7008 branch32(Assembler::Equal
, value
, Imm32(8), &done
);
7009 branch32(Assembler::Equal
, value
, Imm32(4), &done
);
7010 branch32(Assembler::Equal
, value
, Imm32(2), &done
);
7011 branch32(Assembler::Equal
, value
, Imm32(1), &done
);
7012 move32(Imm32(0), output
);
7016 // ========================================================================
7017 // Spectre Mitigations.
7019 void MacroAssembler::spectreMaskIndex32(Register index
, Register length
,
7021 MOZ_ASSERT(JitOptions
.spectreIndexMasking
);
7022 MOZ_ASSERT(length
!= output
);
7023 MOZ_ASSERT(index
!= output
);
7025 move32(Imm32(0), output
);
7026 cmp32Move32(Assembler::Below
, index
, length
, index
, output
);
7029 void MacroAssembler::spectreMaskIndex32(Register index
, const Address
& length
,
7031 MOZ_ASSERT(JitOptions
.spectreIndexMasking
);
7032 MOZ_ASSERT(index
!= length
.base
);
7033 MOZ_ASSERT(length
.base
!= output
);
7034 MOZ_ASSERT(index
!= output
);
7036 move32(Imm32(0), output
);
7037 cmp32Move32(Assembler::Below
, index
, length
, index
, output
);
7040 void MacroAssembler::spectreMaskIndexPtr(Register index
, Register length
,
7042 MOZ_ASSERT(JitOptions
.spectreIndexMasking
);
7043 MOZ_ASSERT(length
!= output
);
7044 MOZ_ASSERT(index
!= output
);
7046 movePtr(ImmWord(0), output
);
7047 cmpPtrMovePtr(Assembler::Below
, index
, length
, index
, output
);
7050 void MacroAssembler::spectreMaskIndexPtr(Register index
, const Address
& length
,
7052 MOZ_ASSERT(JitOptions
.spectreIndexMasking
);
7053 MOZ_ASSERT(index
!= length
.base
);
7054 MOZ_ASSERT(length
.base
!= output
);
7055 MOZ_ASSERT(index
!= output
);
7057 movePtr(ImmWord(0), output
);
7058 cmpPtrMovePtr(Assembler::Below
, index
, length
, index
, output
);
7061 void MacroAssembler::boundsCheck32PowerOfTwo(Register index
, uint32_t length
,
7063 MOZ_ASSERT(mozilla::IsPowerOfTwo(length
));
7064 branch32(Assembler::AboveOrEqual
, index
, Imm32(length
), failure
);
7066 // Note: it's fine to clobber the input register, as this is a no-op: it
7067 // only affects speculative execution.
7068 if (JitOptions
.spectreIndexMasking
) {
7069 and32(Imm32(length
- 1), index
);
7073 void MacroAssembler::loadWasmPinnedRegsFromInstance(
7074 mozilla::Maybe
<wasm::BytecodeOffset
> trapOffset
) {
7075 #ifdef WASM_HAS_HEAPREG
7076 static_assert(wasm::Instance::offsetOfMemory0Base() < 4096,
7077 "We count only on the low page being inaccessible");
7078 FaultingCodeOffset fco
= loadPtr(
7079 Address(InstanceReg
, wasm::Instance::offsetOfMemory0Base()), HeapReg
);
7082 wasm::Trap::IndirectCallToNull
,
7083 wasm::TrapSite(wasm::TrapMachineInsnForLoadWord(), fco
, *trapOffset
));
7086 MOZ_ASSERT(!trapOffset
);
7090 //}}} check_macroassembler_style
7093 void MacroAssembler::debugAssertCanonicalInt32(Register r
) {
7095 if (!js::jit::JitOptions
.lessDebugCode
) {
7096 # if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64)
7098 branchPtr(Assembler::BelowOrEqual
, r
, ImmWord(UINT32_MAX
), &ok
);
7101 # elif defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64)
7103 ScratchRegisterScope
scratch(asMasm());
7104 move32SignExtendToPtr(r
, scratch
);
7105 branchPtr(Assembler::Equal
, r
, scratch
, &ok
);
7109 MOZ_CRASH("IMPLEMENT ME");
7116 void MacroAssembler::memoryBarrierBefore(const Synchronization
& sync
) {
7117 memoryBarrier(sync
.barrierBefore
);
7120 void MacroAssembler::memoryBarrierAfter(const Synchronization
& sync
) {
7121 memoryBarrier(sync
.barrierAfter
);
7124 void MacroAssembler::debugAssertIsObject(const ValueOperand
& val
) {
7127 branchTestObject(Assembler::Equal
, val
, &ok
);
7128 assumeUnreachable("Expected an object!");
7133 void MacroAssembler::debugAssertObjHasFixedSlots(Register obj
,
7136 Label hasFixedSlots
;
7137 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
7138 branchTest32(Assembler::NonZero
,
7139 Address(scratch
, Shape::offsetOfImmutableFlags()),
7140 Imm32(NativeShape::fixedSlotsMask()), &hasFixedSlots
);
7141 assumeUnreachable("Expected a fixed slot");
7142 bind(&hasFixedSlots
);
7146 void MacroAssembler::debugAssertObjectHasClass(Register obj
, Register scratch
,
7147 const JSClass
* clasp
) {
7150 branchTestObjClassNoSpectreMitigations(Assembler::Equal
, obj
, clasp
, scratch
,
7152 assumeUnreachable("Class check failed");
7157 void MacroAssembler::debugAssertGCThingIsTenured(Register ptr
, Register temp
) {
7160 branchPtrInNurseryChunk(Assembler::NotEqual
, ptr
, temp
, &done
);
7161 assumeUnreachable("Expected a tenured pointer");
7166 void MacroAssembler::branchArrayIsNotPacked(Register array
, Register temp1
,
7167 Register temp2
, Label
* label
) {
7168 loadPtr(Address(array
, NativeObject::offsetOfElements()), temp1
);
7170 // Test length == initializedLength.
7171 Address
initLength(temp1
, ObjectElements::offsetOfInitializedLength());
7172 load32(Address(temp1
, ObjectElements::offsetOfLength()), temp2
);
7173 branch32(Assembler::NotEqual
, initLength
, temp2
, label
);
7175 // Test the NON_PACKED flag.
7176 Address
flags(temp1
, ObjectElements::offsetOfFlags());
7177 branchTest32(Assembler::NonZero
, flags
, Imm32(ObjectElements::NON_PACKED
),
7181 void MacroAssembler::setIsPackedArray(Register obj
, Register output
,
7183 // Ensure it's an ArrayObject.
7184 Label notPackedArray
;
7185 branchTestObjClass(Assembler::NotEqual
, obj
, &ArrayObject::class_
, temp
, obj
,
7188 branchArrayIsNotPacked(obj
, temp
, output
, ¬PackedArray
);
7191 move32(Imm32(1), output
);
7194 bind(¬PackedArray
);
7195 move32(Imm32(0), output
);
7200 void MacroAssembler::packedArrayPop(Register array
, ValueOperand output
,
7201 Register temp1
, Register temp2
,
7203 // Load obj->elements in temp1.
7204 loadPtr(Address(array
, NativeObject::offsetOfElements()), temp1
);
7207 static constexpr uint32_t UnhandledFlags
=
7208 ObjectElements::Flags::NON_PACKED
|
7209 ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH
|
7210 ObjectElements::Flags::NOT_EXTENSIBLE
|
7211 ObjectElements::Flags::MAYBE_IN_ITERATION
;
7212 Address
flags(temp1
, ObjectElements::offsetOfFlags());
7213 branchTest32(Assembler::NonZero
, flags
, Imm32(UnhandledFlags
), fail
);
7215 // Load length in temp2. Ensure length == initializedLength.
7216 Address
lengthAddr(temp1
, ObjectElements::offsetOfLength());
7217 Address
initLengthAddr(temp1
, ObjectElements::offsetOfInitializedLength());
7218 load32(lengthAddr
, temp2
);
7219 branch32(Assembler::NotEqual
, initLengthAddr
, temp2
, fail
);
7221 // Result is |undefined| if length == 0.
7222 Label notEmpty
, done
;
7223 branchTest32(Assembler::NonZero
, temp2
, temp2
, ¬Empty
);
7225 moveValue(UndefinedValue(), output
);
7231 // Load the last element.
7232 sub32(Imm32(1), temp2
);
7233 BaseObjectElementIndex
elementAddr(temp1
, temp2
);
7234 loadValue(elementAddr
, output
);
7236 // Pre-barrier the element because we're removing it from the array.
7237 EmitPreBarrier(*this, elementAddr
, MIRType::Value
);
7239 // Update length and initializedLength.
7240 store32(temp2
, lengthAddr
);
7241 store32(temp2
, initLengthAddr
);
7246 void MacroAssembler::packedArrayShift(Register array
, ValueOperand output
,
7247 Register temp1
, Register temp2
,
7248 LiveRegisterSet volatileRegs
,
7250 // Load obj->elements in temp1.
7251 loadPtr(Address(array
, NativeObject::offsetOfElements()), temp1
);
7254 static constexpr uint32_t UnhandledFlags
=
7255 ObjectElements::Flags::NON_PACKED
|
7256 ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH
|
7257 ObjectElements::Flags::NOT_EXTENSIBLE
|
7258 ObjectElements::Flags::MAYBE_IN_ITERATION
;
7259 Address
flags(temp1
, ObjectElements::offsetOfFlags());
7260 branchTest32(Assembler::NonZero
, flags
, Imm32(UnhandledFlags
), fail
);
7262 // Load length in temp2. Ensure length == initializedLength.
7263 Address
lengthAddr(temp1
, ObjectElements::offsetOfLength());
7264 Address
initLengthAddr(temp1
, ObjectElements::offsetOfInitializedLength());
7265 load32(lengthAddr
, temp2
);
7266 branch32(Assembler::NotEqual
, initLengthAddr
, temp2
, fail
);
7268 // Result is |undefined| if length == 0.
7269 Label notEmpty
, done
;
7270 branchTest32(Assembler::NonZero
, temp2
, temp2
, ¬Empty
);
7272 moveValue(UndefinedValue(), output
);
7278 // Load the first element.
7279 Address
elementAddr(temp1
, 0);
7280 loadValue(elementAddr
, output
);
7282 // Move the other elements and update the initializedLength/length. This will
7283 // also trigger pre-barriers.
7285 // Ensure output is in volatileRegs. Don't preserve temp1 and temp2.
7286 volatileRegs
.takeUnchecked(temp1
);
7287 volatileRegs
.takeUnchecked(temp2
);
7288 if (output
.hasVolatileReg()) {
7289 volatileRegs
.addUnchecked(output
);
7292 PushRegsInMask(volatileRegs
);
7294 using Fn
= void (*)(ArrayObject
* arr
);
7295 setupUnalignedABICall(temp1
);
7297 callWithABI
<Fn
, ArrayShiftMoveElements
>();
7299 PopRegsInMask(volatileRegs
);
7305 void MacroAssembler::loadArgumentsObjectElement(Register obj
, Register index
,
7306 ValueOperand output
,
7307 Register temp
, Label
* fail
) {
7308 Register temp2
= output
.scratchReg();
7310 // Get initial length value.
7311 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()), temp
);
7313 // Ensure no overridden elements.
7314 branchTest32(Assembler::NonZero
, temp
,
7315 Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT
), fail
);
7318 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT
), temp
);
7319 spectreBoundsCheck32(index
, temp
, temp2
, fail
);
7321 // Load ArgumentsData.
7322 loadPrivate(Address(obj
, ArgumentsObject::getDataSlotOffset()), temp
);
7324 // Guard the argument is not a FORWARD_TO_CALL_SLOT MagicValue.
7325 BaseValueIndex
argValue(temp
, index
, ArgumentsData::offsetOfArgs());
7326 branchTestMagic(Assembler::Equal
, argValue
, fail
);
7327 loadValue(argValue
, output
);
7330 void MacroAssembler::loadArgumentsObjectElementHole(Register obj
,
7332 ValueOperand output
,
7335 Register temp2
= output
.scratchReg();
7337 // Get initial length value.
7338 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()), temp
);
7340 // Ensure no overridden elements.
7341 branchTest32(Assembler::NonZero
, temp
,
7342 Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT
), fail
);
7345 Label outOfBounds
, done
;
7346 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT
), temp
);
7347 spectreBoundsCheck32(index
, temp
, temp2
, &outOfBounds
);
7349 // Load ArgumentsData.
7350 loadPrivate(Address(obj
, ArgumentsObject::getDataSlotOffset()), temp
);
7352 // Guard the argument is not a FORWARD_TO_CALL_SLOT MagicValue.
7353 BaseValueIndex
argValue(temp
, index
, ArgumentsData::offsetOfArgs());
7354 branchTestMagic(Assembler::Equal
, argValue
, fail
);
7355 loadValue(argValue
, output
);
7359 branch32(Assembler::LessThan
, index
, Imm32(0), fail
);
7360 moveValue(UndefinedValue(), output
);
7365 void MacroAssembler::loadArgumentsObjectElementExists(
7366 Register obj
, Register index
, Register output
, Register temp
, Label
* fail
) {
7367 // Ensure the index is non-negative.
7368 branch32(Assembler::LessThan
, index
, Imm32(0), fail
);
7370 // Get initial length value.
7371 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()), temp
);
7373 // Ensure no overridden or deleted elements.
7374 branchTest32(Assembler::NonZero
, temp
,
7375 Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT
), fail
);
7377 // Compare index against the length.
7378 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT
), temp
);
7379 cmp32Set(Assembler::LessThan
, index
, temp
, output
);
7382 void MacroAssembler::loadArgumentsObjectLength(Register obj
, Register output
,
7384 // Get initial length value.
7385 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()),
7388 // Test if length has been overridden.
7389 branchTest32(Assembler::NonZero
, output
,
7390 Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT
), fail
);
7392 // Shift out arguments length and return it.
7393 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT
), output
);
7396 void MacroAssembler::branchTestArgumentsObjectFlags(Register obj
, Register temp
,
7400 MOZ_ASSERT((flags
& ~ArgumentsObject::PACKED_BITS_MASK
) == 0);
7402 // Get initial length value.
7403 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()), temp
);
7406 branchTest32(cond
, temp
, Imm32(flags
), label
);
7409 static constexpr bool ValidateSizeRange(Scalar::Type from
, Scalar::Type to
) {
7410 for (Scalar::Type type
= from
; type
< to
; type
= Scalar::Type(type
+ 1)) {
7411 if (TypedArrayElemSize(type
) != TypedArrayElemSize(from
)) {
7418 void MacroAssembler::typedArrayElementSize(Register obj
, Register output
) {
7419 static_assert(Scalar::Int8
== 0, "Int8 is the first typed array class");
7421 (Scalar::BigUint64
- Scalar::Int8
) == Scalar::MaxTypedArrayViewType
- 1,
7422 "BigUint64 is the last typed array class");
7424 Label one
, two
, four
, eight
, done
;
7426 // TODO(anba): Handle resizable TypedArrays
7427 loadObjClassUnsafe(obj
, output
);
7429 static_assert(ValidateSizeRange(Scalar::Int8
, Scalar::Int16
),
7430 "element size is one in [Int8, Int16)");
7431 branchPtr(Assembler::Below
, output
,
7432 ImmPtr(TypedArrayObject::classForType(Scalar::Int16
)), &one
);
7434 static_assert(ValidateSizeRange(Scalar::Int16
, Scalar::Int32
),
7435 "element size is two in [Int16, Int32)");
7436 branchPtr(Assembler::Below
, output
,
7437 ImmPtr(TypedArrayObject::classForType(Scalar::Int32
)), &two
);
7439 static_assert(ValidateSizeRange(Scalar::Int32
, Scalar::Float64
),
7440 "element size is four in [Int32, Float64)");
7441 branchPtr(Assembler::Below
, output
,
7442 ImmPtr(TypedArrayObject::classForType(Scalar::Float64
)), &four
);
7444 static_assert(ValidateSizeRange(Scalar::Float64
, Scalar::Uint8Clamped
),
7445 "element size is eight in [Float64, Uint8Clamped)");
7446 branchPtr(Assembler::Below
, output
,
7447 ImmPtr(TypedArrayObject::classForType(Scalar::Uint8Clamped
)),
7450 static_assert(ValidateSizeRange(Scalar::Uint8Clamped
, Scalar::BigInt64
),
7451 "element size is one in [Uint8Clamped, BigInt64)");
7452 branchPtr(Assembler::Below
, output
,
7453 ImmPtr(TypedArrayObject::classForType(Scalar::BigInt64
)), &one
);
7456 ValidateSizeRange(Scalar::BigInt64
, Scalar::MaxTypedArrayViewType
),
7457 "element size is eight in [BigInt64, MaxTypedArrayViewType)");
7458 // Fall through for BigInt64 and BigUint64
7461 move32(Imm32(8), output
);
7465 move32(Imm32(4), output
);
7469 move32(Imm32(2), output
);
7473 move32(Imm32(1), output
);
7478 void MacroAssembler::branchIfClassIsNotTypedArray(Register clasp
,
7479 Label
* notTypedArray
) {
7480 // TODO(anba): Handle resizable TypedArrays
7481 static_assert(Scalar::Int8
== 0, "Int8 is the first typed array class");
7482 const JSClass
* firstTypedArrayClass
=
7483 TypedArrayObject::classForType(Scalar::Int8
);
7486 (Scalar::BigUint64
- Scalar::Int8
) == Scalar::MaxTypedArrayViewType
- 1,
7487 "BigUint64 is the last typed array class");
7488 const JSClass
* lastTypedArrayClass
=
7489 TypedArrayObject::classForType(Scalar::BigUint64
);
7491 branchPtr(Assembler::Below
, clasp
, ImmPtr(firstTypedArrayClass
),
7493 branchPtr(Assembler::Above
, clasp
, ImmPtr(lastTypedArrayClass
),
7497 void MacroAssembler::branchIfHasDetachedArrayBuffer(Register obj
, Register temp
,
7499 // Inline implementation of ArrayBufferViewObject::hasDetachedBuffer().
7501 // Load obj->elements in temp.
7502 loadPtr(Address(obj
, NativeObject::offsetOfElements()), temp
);
7504 // Shared buffers can't be detached.
7506 branchTest32(Assembler::NonZero
,
7507 Address(temp
, ObjectElements::offsetOfFlags()),
7508 Imm32(ObjectElements::SHARED_MEMORY
), &done
);
7510 // An ArrayBufferView with a null/true buffer has never had its buffer
7511 // exposed, so nothing can possibly detach it.
7512 fallibleUnboxObject(Address(obj
, ArrayBufferViewObject::bufferOffset()), temp
,
7515 // Load the ArrayBuffer flags and branch if the detached flag is set.
7516 unboxInt32(Address(temp
, ArrayBufferObject::offsetOfFlagsSlot()), temp
);
7517 branchTest32(Assembler::NonZero
, temp
, Imm32(ArrayBufferObject::DETACHED
),
7523 void MacroAssembler::branchIfNativeIteratorNotReusable(Register ni
,
7524 Label
* notReusable
) {
7525 // See NativeIterator::isReusable.
7526 Address
flagsAddr(ni
, NativeIterator::offsetOfFlagsAndCount());
7529 Label niIsInitialized
;
7530 branchTest32(Assembler::NonZero
, flagsAddr
,
7531 Imm32(NativeIterator::Flags::Initialized
), &niIsInitialized
);
7533 "Expected a NativeIterator that's been completely "
7535 bind(&niIsInitialized
);
7538 branchTest32(Assembler::NonZero
, flagsAddr
,
7539 Imm32(NativeIterator::Flags::NotReusable
), notReusable
);
7542 void MacroAssembler::branchNativeIteratorIndices(Condition cond
, Register ni
,
7544 NativeIteratorIndices kind
,
7546 Address
iterFlagsAddr(ni
, NativeIterator::offsetOfFlagsAndCount());
7547 load32(iterFlagsAddr
, temp
);
7548 and32(Imm32(NativeIterator::IndicesMask
), temp
);
7549 uint32_t shiftedKind
= uint32_t(kind
) << NativeIterator::IndicesShift
;
7550 branch32(cond
, temp
, Imm32(shiftedKind
), label
);
7553 static void LoadNativeIterator(MacroAssembler
& masm
, Register obj
,
7555 MOZ_ASSERT(obj
!= dest
);
7558 // Assert we have a PropertyIteratorObject.
7560 masm
.branchTestObjClass(Assembler::Equal
, obj
,
7561 &PropertyIteratorObject::class_
, dest
, obj
, &ok
);
7562 masm
.assumeUnreachable("Expected PropertyIteratorObject!");
7566 // Load NativeIterator object.
7567 Address
slotAddr(obj
, PropertyIteratorObject::offsetOfIteratorSlot());
7568 masm
.loadPrivate(slotAddr
, dest
);
7571 // The ShapeCachePtr may be used to cache an iterator for for-in. Return that
7572 // iterator in |dest| if:
7573 // - the shape cache pointer exists and stores a native iterator
7574 // - the iterator is reusable
7575 // - the iterated object has no dense elements
7576 // - the shapes of each object on the proto chain of |obj| match the cached
7578 // - the proto chain has no dense elements
7579 // Otherwise, jump to |failure|.
7580 void MacroAssembler::maybeLoadIteratorFromShape(Register obj
, Register dest
,
7581 Register temp
, Register temp2
,
7585 // obj: always contains the input object
7586 // temp: walks the obj->shape->baseshape->proto->shape->... chain
7587 // temp2: points to the native iterator. Incremented to walk the shapes array.
7588 // temp3: scratch space
7589 // dest: stores the resulting PropertyIteratorObject on success
7592 Register shapeAndProto
= temp
;
7593 Register nativeIterator
= temp2
;
7595 // Load ShapeCache from shape.
7596 loadPtr(Address(obj
, JSObject::offsetOfShape()), shapeAndProto
);
7597 loadPtr(Address(shapeAndProto
, Shape::offsetOfCachePtr()), dest
);
7599 // Check if it's an iterator.
7600 movePtr(dest
, temp3
);
7601 andPtr(Imm32(ShapeCachePtr::MASK
), temp3
);
7602 branch32(Assembler::NotEqual
, temp3
, Imm32(ShapeCachePtr::ITERATOR
), failure
);
7604 // If we've cached an iterator, |obj| must be a native object.
7607 branchIfNonNativeObj(obj
, temp3
, &nonNative
);
7610 // Verify that |obj| has no dense elements.
7611 loadPtr(Address(obj
, NativeObject::offsetOfElements()), temp3
);
7612 branch32(Assembler::NotEqual
,
7613 Address(temp3
, ObjectElements::offsetOfInitializedLength()),
7616 // Clear tag bits from iterator object. |dest| is now valid.
7617 // Load the native iterator and verify that it's reusable.
7618 andPtr(Imm32(~ShapeCachePtr::MASK
), dest
);
7619 LoadNativeIterator(*this, dest
, nativeIterator
);
7620 branchIfNativeIteratorNotReusable(nativeIterator
, failure
);
7622 // We have to compare the shapes in the native iterator with the shapes on the
7623 // proto chain to ensure the cached iterator is still valid. The shape array
7624 // always starts at a fixed offset from the base of the NativeIterator, so
7625 // instead of using an instruction outside the loop to initialize a pointer to
7626 // the shapes array, we can bake it into the offset and reuse the pointer to
7627 // the NativeIterator. We add |sizeof(Shape*)| to start at the second shape.
7628 // (The first shape corresponds to the object itself. We don't have to check
7629 // it, because we got the iterator via the shape.)
7630 size_t nativeIteratorProtoShapeOffset
=
7631 NativeIterator::offsetOfFirstShape() + sizeof(Shape
*);
7633 // Loop over the proto chain. At the head of the loop, |shape| is the shape of
7634 // the current object, and |iteratorShapes| points to the expected shape of
7639 // Load the proto. If the proto is null, then we're done.
7640 loadPtr(Address(shapeAndProto
, Shape::offsetOfBaseShape()), shapeAndProto
);
7641 loadPtr(Address(shapeAndProto
, BaseShape::offsetOfProto()), shapeAndProto
);
7642 branchPtr(Assembler::Equal
, shapeAndProto
, ImmPtr(nullptr), &success
);
7645 // We have guarded every shape up until this point, so we know that the proto
7646 // is a native object.
7647 branchIfNonNativeObj(shapeAndProto
, temp3
, &nonNative
);
7650 // Verify that the proto has no dense elements.
7651 loadPtr(Address(shapeAndProto
, NativeObject::offsetOfElements()), temp3
);
7652 branch32(Assembler::NotEqual
,
7653 Address(temp3
, ObjectElements::offsetOfInitializedLength()),
7656 // Compare the shape of the proto to the expected shape.
7657 loadPtr(Address(shapeAndProto
, JSObject::offsetOfShape()), shapeAndProto
);
7658 loadPtr(Address(nativeIterator
, nativeIteratorProtoShapeOffset
), temp3
);
7659 branchPtr(Assembler::NotEqual
, shapeAndProto
, temp3
, failure
);
7661 // Increment |iteratorShapes| and jump back to the top of the loop.
7662 addPtr(Imm32(sizeof(Shape
*)), nativeIterator
);
7667 assumeUnreachable("Expected NativeObject in maybeLoadIteratorFromShape");
7673 void MacroAssembler::iteratorMore(Register obj
, ValueOperand output
,
7676 Register outputScratch
= output
.scratchReg();
7677 LoadNativeIterator(*this, obj
, outputScratch
);
7679 // If propertyCursor_ < propertiesEnd_, load the next string and advance
7680 // the cursor. Otherwise return MagicValue(JS_NO_ITER_VALUE).
7682 Address
cursorAddr(outputScratch
, NativeIterator::offsetOfPropertyCursor());
7683 Address
cursorEndAddr(outputScratch
, NativeIterator::offsetOfPropertiesEnd());
7684 loadPtr(cursorAddr
, temp
);
7685 branchPtr(Assembler::BelowOrEqual
, cursorEndAddr
, temp
, &iterDone
);
7688 loadPtr(Address(temp
, 0), temp
);
7690 // Increase the cursor.
7691 addPtr(Imm32(sizeof(GCPtr
<JSLinearString
*>)), cursorAddr
);
7693 tagValue(JSVAL_TYPE_STRING
, temp
, output
);
7697 moveValue(MagicValue(JS_NO_ITER_VALUE
), output
);
7702 void MacroAssembler::iteratorClose(Register obj
, Register temp1
, Register temp2
,
7704 LoadNativeIterator(*this, obj
, temp1
);
7706 // The shared iterator used for for-in with null/undefined is immutable and
7707 // unlinked. See NativeIterator::isEmptyIteratorSingleton.
7709 branchTest32(Assembler::NonZero
,
7710 Address(temp1
, NativeIterator::offsetOfFlagsAndCount()),
7711 Imm32(NativeIterator::Flags::IsEmptyIteratorSingleton
), &done
);
7713 // Clear active bit.
7714 and32(Imm32(~NativeIterator::Flags::Active
),
7715 Address(temp1
, NativeIterator::offsetOfFlagsAndCount()));
7717 // Clear objectBeingIterated.
7718 Address
iterObjAddr(temp1
, NativeIterator::offsetOfObjectBeingIterated());
7719 guardedCallPreBarrierAnyZone(iterObjAddr
, MIRType::Object
, temp2
);
7720 storePtr(ImmPtr(nullptr), iterObjAddr
);
7722 // Reset property cursor.
7723 loadPtr(Address(temp1
, NativeIterator::offsetOfShapesEnd()), temp2
);
7724 storePtr(temp2
, Address(temp1
, NativeIterator::offsetOfPropertyCursor()));
7726 // Unlink from the iterator list.
7727 const Register next
= temp2
;
7728 const Register prev
= temp3
;
7729 loadPtr(Address(temp1
, NativeIterator::offsetOfNext()), next
);
7730 loadPtr(Address(temp1
, NativeIterator::offsetOfPrev()), prev
);
7731 storePtr(prev
, Address(next
, NativeIterator::offsetOfPrev()));
7732 storePtr(next
, Address(prev
, NativeIterator::offsetOfNext()));
7734 storePtr(ImmPtr(nullptr), Address(temp1
, NativeIterator::offsetOfNext()));
7735 storePtr(ImmPtr(nullptr), Address(temp1
, NativeIterator::offsetOfPrev()));
7741 void MacroAssembler::registerIterator(Register enumeratorsList
, Register iter
,
7743 // iter->next = list
7744 storePtr(enumeratorsList
, Address(iter
, NativeIterator::offsetOfNext()));
7746 // iter->prev = list->prev
7747 loadPtr(Address(enumeratorsList
, NativeIterator::offsetOfPrev()), temp
);
7748 storePtr(temp
, Address(iter
, NativeIterator::offsetOfPrev()));
7750 // list->prev->next = iter
7751 storePtr(iter
, Address(temp
, NativeIterator::offsetOfNext()));
7753 // list->prev = iter
7754 storePtr(iter
, Address(enumeratorsList
, NativeIterator::offsetOfPrev()));
7757 void MacroAssembler::toHashableNonGCThing(ValueOperand value
,
7758 ValueOperand result
,
7759 FloatRegister tempFloat
) {
7760 // Inline implementation of |HashableValue::setValue()|.
7764 branchTestGCThing(Assembler::NotEqual
, value
, &ok
);
7765 assumeUnreachable("Unexpected GC thing");
7769 Label useInput
, done
;
7770 branchTestDouble(Assembler::NotEqual
, value
, &useInput
);
7772 Register int32
= result
.scratchReg();
7773 unboxDouble(value
, tempFloat
);
7775 // Normalize int32-valued doubles to int32 and negative zero to +0.
7777 convertDoubleToInt32(tempFloat
, int32
, &canonicalize
, false);
7779 tagValue(JSVAL_TYPE_INT32
, int32
, result
);
7782 bind(&canonicalize
);
7784 // Normalize the sign bit of a NaN.
7785 branchDouble(Assembler::DoubleOrdered
, tempFloat
, tempFloat
, &useInput
);
7786 moveValue(JS::NaNValue(), result
);
7792 moveValue(value
, result
);
7797 void MacroAssembler::toHashableValue(ValueOperand value
, ValueOperand result
,
7798 FloatRegister tempFloat
,
7799 Label
* atomizeString
, Label
* tagString
) {
7800 // Inline implementation of |HashableValue::setValue()|.
7802 ScratchTagScope
tag(*this, value
);
7803 splitTagForTest(value
, tag
);
7805 Label notString
, useInput
, done
;
7806 branchTestString(Assembler::NotEqual
, tag
, ¬String
);
7808 ScratchTagScopeRelease
_(&tag
);
7810 Register str
= result
.scratchReg();
7811 unboxString(value
, str
);
7813 branchTest32(Assembler::NonZero
, Address(str
, JSString::offsetOfFlags()),
7814 Imm32(JSString::ATOM_BIT
), &useInput
);
7816 jump(atomizeString
);
7819 tagValue(JSVAL_TYPE_STRING
, str
, result
);
7823 branchTestDouble(Assembler::NotEqual
, tag
, &useInput
);
7825 ScratchTagScopeRelease
_(&tag
);
7827 Register int32
= result
.scratchReg();
7828 unboxDouble(value
, tempFloat
);
7831 convertDoubleToInt32(tempFloat
, int32
, &canonicalize
, false);
7833 tagValue(JSVAL_TYPE_INT32
, int32
, result
);
7836 bind(&canonicalize
);
7838 branchDouble(Assembler::DoubleOrdered
, tempFloat
, tempFloat
, &useInput
);
7839 moveValue(JS::NaNValue(), result
);
7845 moveValue(value
, result
);
7850 void MacroAssembler::scrambleHashCode(Register result
) {
7851 // Inline implementation of |mozilla::ScrambleHashCode()|.
7853 mul32(Imm32(mozilla::kGoldenRatioU32
), result
);
7856 void MacroAssembler::prepareHashNonGCThing(ValueOperand value
, Register result
,
7858 // Inline implementation of |OrderedHashTable::prepareHash()| and
7859 // |mozilla::HashGeneric(v.asRawBits())|.
7863 branchTestGCThing(Assembler::NotEqual
, value
, &ok
);
7864 assumeUnreachable("Unexpected GC thing");
7868 // uint32_t v1 = static_cast<uint32_t>(aValue);
7870 move64To32(value
.toRegister64(), result
);
7872 move32(value
.payloadReg(), result
);
7875 // uint32_t v2 = static_cast<uint32_t>(static_cast<uint64_t>(aValue) >> 32);
7877 auto r64
= Register64(temp
);
7878 move64(value
.toRegister64(), r64
);
7879 rshift64Arithmetic(Imm32(32), r64
);
7881 move32(value
.typeReg(), temp
);
7884 // mozilla::WrappingMultiply(kGoldenRatioU32, RotateLeft5(aHash) ^ aValue);
7885 // with |aHash = 0| and |aValue = v1|.
7886 mul32(Imm32(mozilla::kGoldenRatioU32
), result
);
7888 // mozilla::WrappingMultiply(kGoldenRatioU32, RotateLeft5(aHash) ^ aValue);
7889 // with |aHash = <above hash>| and |aValue = v2|.
7890 rotateLeft(Imm32(5), result
, result
);
7891 xor32(temp
, result
);
7893 // Combine |mul32| and |scrambleHashCode| by directly multiplying with
7894 // |kGoldenRatioU32 * kGoldenRatioU32|.
7896 // mul32(Imm32(mozilla::kGoldenRatioU32), result);
7898 // scrambleHashCode(result);
7899 mul32(Imm32(mozilla::kGoldenRatioU32
* mozilla::kGoldenRatioU32
), result
);
7902 void MacroAssembler::prepareHashString(Register str
, Register result
,
7904 // Inline implementation of |OrderedHashTable::prepareHash()| and
7905 // |JSAtom::hash()|.
7909 branchTest32(Assembler::NonZero
, Address(str
, JSString::offsetOfFlags()),
7910 Imm32(JSString::ATOM_BIT
), &ok
);
7911 assumeUnreachable("Unexpected non-atom string");
7916 static_assert(FatInlineAtom::offsetOfHash() == NormalAtom::offsetOfHash());
7917 load32(Address(str
, NormalAtom::offsetOfHash()), result
);
7919 move32(Imm32(JSString::FAT_INLINE_MASK
), temp
);
7920 and32(Address(str
, JSString::offsetOfFlags()), temp
);
7922 // Set |result| to 1 for FatInlineAtoms.
7923 move32(Imm32(0), result
);
7924 cmp32Set(Assembler::Equal
, temp
, Imm32(JSString::FAT_INLINE_MASK
), result
);
7926 // Use a computed load for branch-free code.
7928 static_assert(FatInlineAtom::offsetOfHash() > NormalAtom::offsetOfHash());
7930 constexpr size_t offsetDiff
=
7931 FatInlineAtom::offsetOfHash() - NormalAtom::offsetOfHash();
7932 static_assert(mozilla::IsPowerOfTwo(offsetDiff
));
7934 uint8_t shift
= mozilla::FloorLog2Size(offsetDiff
);
7935 if (IsShiftInScaleRange(shift
)) {
7937 BaseIndex(str
, result
, ShiftToScale(shift
), NormalAtom::offsetOfHash()),
7940 lshift32(Imm32(shift
), result
);
7941 load32(BaseIndex(str
, result
, TimesOne
, NormalAtom::offsetOfHash()),
7946 scrambleHashCode(result
);
7949 void MacroAssembler::prepareHashSymbol(Register sym
, Register result
) {
7950 // Inline implementation of |OrderedHashTable::prepareHash()| and
7951 // |Symbol::hash()|.
7953 load32(Address(sym
, JS::Symbol::offsetOfHash()), result
);
7955 scrambleHashCode(result
);
7958 void MacroAssembler::prepareHashBigInt(Register bigInt
, Register result
,
7959 Register temp1
, Register temp2
,
7961 // Inline implementation of |OrderedHashTable::prepareHash()| and
7962 // |BigInt::hash()|.
7964 // Inline implementation of |mozilla::AddU32ToHash()|.
7965 auto addU32ToHash
= [&](auto toAdd
) {
7966 rotateLeft(Imm32(5), result
, result
);
7967 xor32(toAdd
, result
);
7968 mul32(Imm32(mozilla::kGoldenRatioU32
), result
);
7971 move32(Imm32(0), result
);
7973 // Inline |mozilla::HashBytes()|.
7975 load32(Address(bigInt
, BigInt::offsetOfLength()), temp1
);
7976 loadBigIntDigits(bigInt
, temp2
);
7983 // Compute |AddToHash(AddToHash(hash, data), sizeof(Digit))|.
7984 #if defined(JS_CODEGEN_MIPS64)
7985 // Hash the lower 32-bits.
7986 addU32ToHash(Address(temp2
, 0));
7988 // Hash the upper 32-bits.
7989 addU32ToHash(Address(temp2
, sizeof(int32_t)));
7991 // Use a single 64-bit load on non-MIPS64 platforms.
7992 loadPtr(Address(temp2
, 0), temp3
);
7994 // Hash the lower 32-bits.
7995 addU32ToHash(temp3
);
7997 // Hash the upper 32-bits.
7998 rshiftPtr(Imm32(32), temp3
);
7999 addU32ToHash(temp3
);
8001 addU32ToHash(Address(temp2
, 0));
8004 addPtr(Imm32(sizeof(BigInt::Digit
)), temp2
);
8007 branchSub32(Assembler::NotSigned
, Imm32(1), temp1
, &loop
);
8009 // Compute |mozilla::AddToHash(h, isNegative())|.
8011 static_assert(mozilla::IsPowerOfTwo(BigInt::signBitMask()));
8013 load32(Address(bigInt
, BigInt::offsetOfFlags()), temp1
);
8014 and32(Imm32(BigInt::signBitMask()), temp1
);
8015 rshift32(Imm32(mozilla::FloorLog2(BigInt::signBitMask())), temp1
);
8017 addU32ToHash(temp1
);
8020 scrambleHashCode(result
);
8023 void MacroAssembler::prepareHashObject(Register setObj
, ValueOperand value
,
8024 Register result
, Register temp1
,
8025 Register temp2
, Register temp3
,
8028 // Inline implementation of |OrderedHashTable::prepareHash()| and
8029 // |HashCodeScrambler::scramble(v.asRawBits())|.
8031 // Load the |ValueSet| or |ValueMap|.
8032 static_assert(SetObject::getDataSlotOffset() ==
8033 MapObject::getDataSlotOffset());
8034 loadPrivate(Address(setObj
, SetObject::getDataSlotOffset()), temp1
);
8036 // Load |HashCodeScrambler::mK0| and |HashCodeScrambler::mK0|.
8037 static_assert(ValueSet::offsetOfImplHcsK0() == ValueMap::offsetOfImplHcsK0());
8038 static_assert(ValueSet::offsetOfImplHcsK1() == ValueMap::offsetOfImplHcsK1());
8039 auto k0
= Register64(temp1
);
8040 auto k1
= Register64(temp2
);
8041 load64(Address(temp1
, ValueSet::offsetOfImplHcsK1()), k1
);
8042 load64(Address(temp1
, ValueSet::offsetOfImplHcsK0()), k0
);
8044 // Hash numbers are 32-bit values, so only hash the lower double-word.
8045 static_assert(sizeof(mozilla::HashNumber
) == 4);
8046 move32To64ZeroExtend(value
.valueReg(), Register64(result
));
8048 // Inline implementation of |SipHasher::sipHash()|.
8049 auto m
= Register64(result
);
8050 auto v0
= Register64(temp3
);
8051 auto v1
= Register64(temp4
);
8055 auto sipRound
= [&]() {
8056 // mV0 = WrappingAdd(mV0, mV1);
8059 // mV1 = RotateLeft(mV1, 13);
8060 rotateLeft64(Imm32(13), v1
, v1
, InvalidReg
);
8065 // mV0 = RotateLeft(mV0, 32);
8066 rotateLeft64(Imm32(32), v0
, v0
, InvalidReg
);
8068 // mV2 = WrappingAdd(mV2, mV3);
8071 // mV3 = RotateLeft(mV3, 16);
8072 rotateLeft64(Imm32(16), v3
, v3
, InvalidReg
);
8077 // mV0 = WrappingAdd(mV0, mV3);
8080 // mV3 = RotateLeft(mV3, 21);
8081 rotateLeft64(Imm32(21), v3
, v3
, InvalidReg
);
8086 // mV2 = WrappingAdd(mV2, mV1);
8089 // mV1 = RotateLeft(mV1, 17);
8090 rotateLeft64(Imm32(17), v1
, v1
, InvalidReg
);
8095 // mV2 = RotateLeft(mV2, 32);
8096 rotateLeft64(Imm32(32), v2
, v2
, InvalidReg
);
8099 // 1. Initialization.
8100 // mV0 = aK0 ^ UINT64_C(0x736f6d6570736575);
8101 move64(Imm64(0x736f6d6570736575), v0
);
8104 // mV1 = aK1 ^ UINT64_C(0x646f72616e646f6d);
8105 move64(Imm64(0x646f72616e646f6d), v1
);
8108 // mV2 = aK0 ^ UINT64_C(0x6c7967656e657261);
8109 MOZ_ASSERT(v2
== k0
);
8110 xor64(Imm64(0x6c7967656e657261), v2
);
8112 // mV3 = aK1 ^ UINT64_C(0x7465646279746573);
8113 MOZ_ASSERT(v3
== k1
);
8114 xor64(Imm64(0x7465646279746573), v3
);
8128 xor64(Imm64(0xff), v2
);
8130 // for (int i = 0; i < 3; i++) sipRound();
8131 for (int i
= 0; i
< 3; i
++) {
8135 // return mV0 ^ mV1 ^ mV2 ^ mV3;
8140 move64To32(v0
, result
);
8142 scrambleHashCode(result
);
8144 MOZ_CRASH("Not implemented");
8148 void MacroAssembler::prepareHashValue(Register setObj
, ValueOperand value
,
8149 Register result
, Register temp1
,
8150 Register temp2
, Register temp3
,
8152 Label isString
, isObject
, isSymbol
, isBigInt
;
8154 ScratchTagScope
tag(*this, value
);
8155 splitTagForTest(value
, tag
);
8157 branchTestString(Assembler::Equal
, tag
, &isString
);
8158 branchTestObject(Assembler::Equal
, tag
, &isObject
);
8159 branchTestSymbol(Assembler::Equal
, tag
, &isSymbol
);
8160 branchTestBigInt(Assembler::Equal
, tag
, &isBigInt
);
8165 prepareHashNonGCThing(value
, result
, temp1
);
8170 unboxString(value
, temp1
);
8171 prepareHashString(temp1
, result
, temp2
);
8176 prepareHashObject(setObj
, value
, result
, temp1
, temp2
, temp3
, temp4
);
8181 unboxSymbol(value
, temp1
);
8182 prepareHashSymbol(temp1
, result
);
8187 unboxBigInt(value
, temp1
);
8188 prepareHashBigInt(temp1
, result
, temp2
, temp3
, temp4
);
8190 // Fallthrough to |done|.
8196 template <typename OrderedHashTable
>
8197 void MacroAssembler::orderedHashTableLookup(Register setOrMapObj
,
8198 ValueOperand value
, Register hash
,
8199 Register entryTemp
, Register temp1
,
8200 Register temp2
, Register temp3
,
8201 Register temp4
, Label
* found
,
8202 IsBigInt isBigInt
) {
8203 // Inline implementation of |OrderedHashTable::lookup()|.
8205 MOZ_ASSERT_IF(isBigInt
== IsBigInt::No
, temp3
== InvalidReg
);
8206 MOZ_ASSERT_IF(isBigInt
== IsBigInt::No
, temp4
== InvalidReg
);
8210 if (isBigInt
== IsBigInt::No
) {
8211 branchTestBigInt(Assembler::NotEqual
, value
, &ok
);
8212 assumeUnreachable("Unexpected BigInt");
8213 } else if (isBigInt
== IsBigInt::Yes
) {
8214 branchTestBigInt(Assembler::Equal
, value
, &ok
);
8215 assumeUnreachable("Unexpected non-BigInt");
8221 PushRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
8224 moveStackPtrTo(temp2
);
8226 setupUnalignedABICall(temp1
);
8227 loadJSContext(temp1
);
8229 passABIArg(setOrMapObj
);
8233 if constexpr (std::is_same_v
<OrderedHashTable
, ValueSet
>) {
8235 void (*)(JSContext
*, SetObject
*, const Value
*, mozilla::HashNumber
);
8236 callWithABI
<Fn
, jit::AssertSetObjectHash
>();
8239 void (*)(JSContext
*, MapObject
*, const Value
*, mozilla::HashNumber
);
8240 callWithABI
<Fn
, jit::AssertMapObjectHash
>();
8244 PopRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
8247 // Load the |ValueSet| or |ValueMap|.
8248 static_assert(SetObject::getDataSlotOffset() ==
8249 MapObject::getDataSlotOffset());
8250 loadPrivate(Address(setOrMapObj
, SetObject::getDataSlotOffset()), temp1
);
8253 move32(hash
, entryTemp
);
8254 load32(Address(temp1
, OrderedHashTable::offsetOfImplHashShift()), temp2
);
8255 flexibleRshift32(temp2
, entryTemp
);
8257 loadPtr(Address(temp1
, OrderedHashTable::offsetOfImplHashTable()), temp2
);
8258 loadPtr(BaseIndex(temp2
, entryTemp
, ScalePointer
), entryTemp
);
8260 // Search for a match in this bucket.
8265 // Inline implementation of |HashableValue::operator==|.
8267 static_assert(OrderedHashTable::offsetOfImplDataElement() == 0,
8268 "offsetof(Data, element) is 0");
8269 auto keyAddr
= Address(entryTemp
, OrderedHashTable::offsetOfEntryKey());
8271 if (isBigInt
== IsBigInt::No
) {
8272 // Two HashableValues are equal if they have equal bits.
8273 branch64(Assembler::Equal
, keyAddr
, value
.toRegister64(), found
);
8276 auto key
= ValueOperand(temp1
);
8278 auto key
= ValueOperand(temp1
, temp2
);
8281 loadValue(keyAddr
, key
);
8283 // Two HashableValues are equal if they have equal bits.
8284 branch64(Assembler::Equal
, key
.toRegister64(), value
.toRegister64(),
8287 // BigInt values are considered equal if they represent the same
8288 // mathematical value.
8290 fallibleUnboxBigInt(key
, temp2
, &next
);
8291 if (isBigInt
== IsBigInt::Yes
) {
8292 unboxBigInt(value
, temp1
);
8294 fallibleUnboxBigInt(value
, temp1
, &next
);
8296 equalBigInts(temp1
, temp2
, temp3
, temp4
, temp1
, temp2
, &next
, &next
,
8302 loadPtr(Address(entryTemp
, OrderedHashTable::offsetOfImplDataChain()),
8305 branchTestPtr(Assembler::NonZero
, entryTemp
, entryTemp
, &loop
);
8308 void MacroAssembler::setObjectHas(Register setObj
, ValueOperand value
,
8309 Register hash
, Register result
,
8310 Register temp1
, Register temp2
,
8311 Register temp3
, Register temp4
,
8312 IsBigInt isBigInt
) {
8314 orderedHashTableLookup
<ValueSet
>(setObj
, value
, hash
, result
, temp1
, temp2
,
8315 temp3
, temp4
, &found
, isBigInt
);
8318 move32(Imm32(0), result
);
8322 move32(Imm32(1), result
);
8326 void MacroAssembler::mapObjectHas(Register mapObj
, ValueOperand value
,
8327 Register hash
, Register result
,
8328 Register temp1
, Register temp2
,
8329 Register temp3
, Register temp4
,
8330 IsBigInt isBigInt
) {
8332 orderedHashTableLookup
<ValueMap
>(mapObj
, value
, hash
, result
, temp1
, temp2
,
8333 temp3
, temp4
, &found
, isBigInt
);
8336 move32(Imm32(0), result
);
8340 move32(Imm32(1), result
);
8344 void MacroAssembler::mapObjectGet(Register mapObj
, ValueOperand value
,
8345 Register hash
, ValueOperand result
,
8346 Register temp1
, Register temp2
,
8347 Register temp3
, Register temp4
,
8348 Register temp5
, IsBigInt isBigInt
) {
8350 orderedHashTableLookup
<ValueMap
>(mapObj
, value
, hash
, temp1
, temp2
, temp3
,
8351 temp4
, temp5
, &found
, isBigInt
);
8354 moveValue(UndefinedValue(), result
);
8357 // |temp1| holds the found entry.
8359 loadValue(Address(temp1
, ValueMap::Entry::offsetOfValue()), result
);
8364 template <typename OrderedHashTable
>
8365 void MacroAssembler::loadOrderedHashTableCount(Register setOrMapObj
,
8367 // Inline implementation of |OrderedHashTable::count()|.
8369 // Load the |ValueSet| or |ValueMap|.
8370 static_assert(SetObject::getDataSlotOffset() ==
8371 MapObject::getDataSlotOffset());
8372 loadPrivate(Address(setOrMapObj
, SetObject::getDataSlotOffset()), result
);
8374 // Load the live count.
8375 load32(Address(result
, OrderedHashTable::offsetOfImplLiveCount()), result
);
8378 void MacroAssembler::loadSetObjectSize(Register setObj
, Register result
) {
8379 loadOrderedHashTableCount
<ValueSet
>(setObj
, result
);
8382 void MacroAssembler::loadMapObjectSize(Register mapObj
, Register result
) {
8383 loadOrderedHashTableCount
<ValueMap
>(mapObj
, result
);
8386 // Can't push large frames blindly on windows, so we must touch frame memory
8387 // incrementally, with no more than 4096 - 1 bytes between touches.
8389 // This is used across all platforms for simplicity.
8390 void MacroAssembler::touchFrameValues(Register numStackValues
,
8391 Register scratch1
, Register scratch2
) {
8392 const size_t FRAME_TOUCH_INCREMENT
= 2048;
8393 static_assert(FRAME_TOUCH_INCREMENT
< 4096 - 1,
8394 "Frame increment is too large");
8396 moveStackPtrTo(scratch2
);
8398 mov(numStackValues
, scratch1
);
8399 lshiftPtr(Imm32(3), scratch1
);
8401 // Note: this loop needs to update the stack pointer register because older
8402 // Linux kernels check the distance between the touched address and RSP.
8403 // See bug 1839669 comment 47.
8404 Label touchFrameLoop
;
8405 Label touchFrameLoopEnd
;
8406 bind(&touchFrameLoop
);
8407 branchSub32(Assembler::Signed
, Imm32(FRAME_TOUCH_INCREMENT
), scratch1
,
8408 &touchFrameLoopEnd
);
8409 subFromStackPtr(Imm32(FRAME_TOUCH_INCREMENT
));
8410 store32(Imm32(0), Address(getStackPointer(), 0));
8411 jump(&touchFrameLoop
);
8412 bind(&touchFrameLoopEnd
);
8415 moveToStackPtr(scratch2
);
8422 template <class RegisterType
>
8423 AutoGenericRegisterScope
<RegisterType
>::AutoGenericRegisterScope(
8424 MacroAssembler
& masm
, RegisterType reg
)
8425 : RegisterType(reg
), masm_(masm
), released_(false) {
8426 masm
.debugTrackedRegisters_
.add(reg
);
8429 template AutoGenericRegisterScope
<Register
>::AutoGenericRegisterScope(
8430 MacroAssembler
& masm
, Register reg
);
8431 template AutoGenericRegisterScope
<FloatRegister
>::AutoGenericRegisterScope(
8432 MacroAssembler
& masm
, FloatRegister reg
);
8436 template <class RegisterType
>
8437 AutoGenericRegisterScope
<RegisterType
>::~AutoGenericRegisterScope() {
8443 template AutoGenericRegisterScope
<Register
>::~AutoGenericRegisterScope();
8444 template AutoGenericRegisterScope
<FloatRegister
>::~AutoGenericRegisterScope();
8446 template <class RegisterType
>
8447 void AutoGenericRegisterScope
<RegisterType
>::release() {
8448 MOZ_ASSERT(!released_
);
8450 const RegisterType
& reg
= *dynamic_cast<RegisterType
*>(this);
8451 masm_
.debugTrackedRegisters_
.take(reg
);
8454 template void AutoGenericRegisterScope
<Register
>::release();
8455 template void AutoGenericRegisterScope
<FloatRegister
>::release();
8457 template <class RegisterType
>
8458 void AutoGenericRegisterScope
<RegisterType
>::reacquire() {
8459 MOZ_ASSERT(released_
);
8461 const RegisterType
& reg
= *dynamic_cast<RegisterType
*>(this);
8462 masm_
.debugTrackedRegisters_
.add(reg
);
8465 template void AutoGenericRegisterScope
<Register
>::reacquire();
8466 template void AutoGenericRegisterScope
<FloatRegister
>::reacquire();