1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/MacroAssembler-inl.h"
9 #include "mozilla/FloatingPoint.h"
10 #include "mozilla/MathAlgorithms.h"
11 #include "mozilla/XorShift128PlusRNG.h"
16 #include "jit/AtomicOp.h"
17 #include "jit/AtomicOperations.h"
18 #include "jit/Bailouts.h"
19 #include "jit/BaselineFrame.h"
20 #include "jit/BaselineJIT.h"
21 #include "jit/JitFrames.h"
22 #include "jit/JitOptions.h"
23 #include "jit/JitRuntime.h"
24 #include "jit/JitScript.h"
25 #include "jit/MoveEmitter.h"
26 #include "jit/ReciprocalMulConstants.h"
27 #include "jit/SharedICHelpers.h"
28 #include "jit/SharedICRegisters.h"
29 #include "jit/Simulator.h"
30 #include "jit/VMFunctions.h"
31 #include "js/Conversions.h"
32 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
33 #include "js/ScalarType.h" // js::Scalar::Type
34 #include "vm/ArgumentsObject.h"
35 #include "vm/ArrayBufferViewObject.h"
36 #include "vm/BoundFunctionObject.h"
37 #include "vm/FunctionFlags.h" // js::FunctionFlags
38 #include "vm/Iteration.h"
39 #include "vm/JSContext.h"
40 #include "vm/TypedArrayObject.h"
41 #include "wasm/WasmBuiltins.h"
42 #include "wasm/WasmCodegenConstants.h"
43 #include "wasm/WasmCodegenTypes.h"
44 #include "wasm/WasmGcObject.h"
45 #include "wasm/WasmInstanceData.h"
46 #include "wasm/WasmMemory.h"
47 #include "wasm/WasmTypeDef.h"
48 #include "wasm/WasmValidate.h"
50 #include "jit/TemplateObject-inl.h"
51 #include "vm/BytecodeUtil-inl.h"
52 #include "vm/Interpreter-inl.h"
53 #include "vm/JSObject-inl.h"
56 using namespace js::jit
;
61 using mozilla::CheckedInt
;
63 TrampolinePtr
MacroAssembler::preBarrierTrampoline(MIRType type
) {
64 const JitRuntime
* rt
= runtime()->jitRuntime();
65 return rt
->preBarrier(type
);
68 template <typename S
, typename T
>
69 static void StoreToTypedFloatArray(MacroAssembler
& masm
, int arrayType
,
70 const S
& value
, const T
& dest
) {
73 masm
.storeFloat32(value
, dest
);
76 masm
.storeDouble(value
, dest
);
79 MOZ_CRASH("Invalid typed array type");
83 void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType
,
85 const BaseIndex
& dest
) {
86 StoreToTypedFloatArray(*this, arrayType
, value
, dest
);
88 void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType
,
90 const Address
& dest
) {
91 StoreToTypedFloatArray(*this, arrayType
, value
, dest
);
94 template <typename S
, typename T
>
95 static void StoreToTypedBigIntArray(MacroAssembler
& masm
,
96 Scalar::Type arrayType
, const S
& value
,
98 MOZ_ASSERT(Scalar::isBigIntType(arrayType
));
99 masm
.store64(value
, dest
);
102 void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType
,
104 const BaseIndex
& dest
) {
105 StoreToTypedBigIntArray(*this, arrayType
, value
, dest
);
107 void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType
,
109 const Address
& dest
) {
110 StoreToTypedBigIntArray(*this, arrayType
, value
, dest
);
113 void MacroAssembler::boxUint32(Register source
, ValueOperand dest
,
114 Uint32Mode mode
, Label
* fail
) {
116 // Fail if the value does not fit in an int32.
117 case Uint32Mode::FailOnDouble
: {
118 branchTest32(Assembler::Signed
, source
, source
, fail
);
119 tagValue(JSVAL_TYPE_INT32
, source
, dest
);
122 case Uint32Mode::ForceDouble
: {
123 // Always convert the value to double.
124 ScratchDoubleScope
fpscratch(*this);
125 convertUInt32ToDouble(source
, fpscratch
);
126 boxDouble(fpscratch
, dest
, fpscratch
);
132 template <typename T
>
133 void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
, const T
& src
,
134 AnyRegister dest
, Register temp
,
138 load8SignExtend(src
, dest
.gpr());
141 case Scalar::Uint8Clamped
:
142 load8ZeroExtend(src
, dest
.gpr());
145 load16SignExtend(src
, dest
.gpr());
148 load16ZeroExtend(src
, dest
.gpr());
151 load32(src
, dest
.gpr());
154 if (dest
.isFloat()) {
156 convertUInt32ToDouble(temp
, dest
.fpu());
158 load32(src
, dest
.gpr());
160 // Bail out if the value doesn't fit into a signed int32 value. This
161 // is what allows MLoadUnboxedScalar to have a type() of
162 // MIRType::Int32 for UInt32 array loads.
163 branchTest32(Assembler::Signed
, dest
.gpr(), dest
.gpr(), fail
);
166 case Scalar::Float32
:
167 loadFloat32(src
, dest
.fpu());
168 canonicalizeFloat(dest
.fpu());
170 case Scalar::Float64
:
171 loadDouble(src
, dest
.fpu());
172 canonicalizeDouble(dest
.fpu());
174 case Scalar::BigInt64
:
175 case Scalar::BigUint64
:
177 MOZ_CRASH("Invalid typed array type");
181 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
,
184 Register temp
, Label
* fail
);
185 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
,
186 const BaseIndex
& src
,
188 Register temp
, Label
* fail
);
190 template <typename T
>
191 void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
, const T
& src
,
192 const ValueOperand
& dest
,
193 Uint32Mode uint32Mode
, Register temp
,
198 case Scalar::Uint8Clamped
:
202 loadFromTypedArray(arrayType
, src
, AnyRegister(dest
.scratchReg()),
203 InvalidReg
, nullptr);
204 tagValue(JSVAL_TYPE_INT32
, dest
.scratchReg(), dest
);
207 // Don't clobber dest when we could fail, instead use temp.
209 boxUint32(temp
, dest
, uint32Mode
, fail
);
211 case Scalar::Float32
: {
212 ScratchDoubleScope
dscratch(*this);
213 FloatRegister fscratch
= dscratch
.asSingle();
214 loadFromTypedArray(arrayType
, src
, AnyRegister(fscratch
),
215 dest
.scratchReg(), nullptr);
216 convertFloat32ToDouble(fscratch
, dscratch
);
217 boxDouble(dscratch
, dest
, dscratch
);
220 case Scalar::Float64
: {
221 ScratchDoubleScope
fpscratch(*this);
222 loadFromTypedArray(arrayType
, src
, AnyRegister(fpscratch
),
223 dest
.scratchReg(), nullptr);
224 boxDouble(fpscratch
, dest
, fpscratch
);
227 case Scalar::BigInt64
:
228 case Scalar::BigUint64
:
230 MOZ_CRASH("Invalid typed array type");
234 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
,
236 const ValueOperand
& dest
,
237 Uint32Mode uint32Mode
,
238 Register temp
, Label
* fail
);
239 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
,
240 const BaseIndex
& src
,
241 const ValueOperand
& dest
,
242 Uint32Mode uint32Mode
,
243 Register temp
, Label
* fail
);
245 template <typename T
>
246 void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType
,
247 const T
& src
, Register bigInt
,
249 MOZ_ASSERT(Scalar::isBigIntType(arrayType
));
252 initializeBigInt64(arrayType
, bigInt
, temp
);
255 template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType
,
259 template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType
,
260 const BaseIndex
& src
,
264 // Inlined version of gc::CheckAllocatorState that checks the bare essentials
265 // and bails for anything that cannot be handled with our jit allocators.
266 void MacroAssembler::checkAllocatorState(Label
* fail
) {
267 // Don't execute the inline path if GC probes are built in.
273 // Don't execute the inline path if gc zeal or tracing are active.
274 const uint32_t* ptrZealModeBits
= runtime()->addressOfGCZealModeBits();
275 branch32(Assembler::NotEqual
, AbsoluteAddress(ptrZealModeBits
), Imm32(0),
279 // Don't execute the inline path if the realm has an object metadata callback,
280 // as the metadata to use for the object may vary between executions of the
282 if (realm()->hasAllocationMetadataBuilder()) {
287 bool MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind
,
288 gc::InitialHeap initialHeap
) {
289 // Note that Ion elides barriers on writes to objects known to be in the
290 // nursery, so any allocation that can be made into the nursery must be made
291 // into the nursery, even if the nursery is disabled. At runtime these will
292 // take the out-of-line path, which is required to insert a barrier for the
293 // initializing writes.
294 return IsNurseryAllocable(allocKind
) && initialHeap
!= gc::TenuredHeap
;
297 // Inline version of Nursery::allocateObject. If the object has dynamic slots,
298 // this fills in the slots_ pointer.
299 void MacroAssembler::nurseryAllocateObject(Register result
, Register temp
,
300 gc::AllocKind allocKind
,
301 size_t nDynamicSlots
, Label
* fail
,
302 const AllocSiteInput
& allocSite
) {
303 MOZ_ASSERT(IsNurseryAllocable(allocKind
));
305 // Currently the JIT does not nursery allocate foreground finalized
306 // objects. This is allowed for objects that support this and have the
307 // JSCLASS_SKIP_NURSERY_FINALIZE class flag set. It's hard to assert that here
308 // though so disallow all foreground finalized objects for now.
309 MOZ_ASSERT(!IsForegroundFinalized(allocKind
));
311 // We still need to allocate in the nursery, per the comment in
312 // shouldNurseryAllocate; however, we need to insert into the
313 // mallocedBuffers set, so bail to do the nursery allocation in the
315 if (nDynamicSlots
>= Nursery::MaxNurseryBufferSize
/ sizeof(Value
)) {
320 // Check whether this allocation site needs pretenuring. This dynamic check
321 // only happens for baseline code.
322 if (allocSite
.is
<Register
>()) {
323 Register site
= allocSite
.as
<Register
>();
324 branchTestPtr(Assembler::NonZero
,
325 Address(site
, gc::AllocSite::offsetOfScriptAndState()),
326 Imm32(gc::AllocSite::LONG_LIVED_BIT
), fail
);
329 // No explicit check for nursery.isEnabled() is needed, as the comparison
330 // with the nursery's end will always fail in such cases.
331 CompileZone
* zone
= realm()->zone();
332 size_t thingSize
= gc::Arena::thingSize(allocKind
);
333 size_t totalSize
= thingSize
;
335 totalSize
+= ObjectSlots::allocSize(nDynamicSlots
);
337 MOZ_ASSERT(totalSize
< INT32_MAX
);
338 MOZ_ASSERT(totalSize
% gc::CellAlignBytes
== 0);
340 bumpPointerAllocate(result
, temp
, fail
, zone
,
341 zone
->addressOfNurseryPosition(),
342 zone
->addressOfNurseryCurrentEnd(), JS::TraceKind::Object
,
343 totalSize
, allocSite
);
346 store32(Imm32(nDynamicSlots
),
347 Address(result
, thingSize
+ ObjectSlots::offsetOfCapacity()));
350 Address(result
, thingSize
+ ObjectSlots::offsetOfDictionarySlotSpan()));
351 store64(Imm64(ObjectSlots::NoUniqueIdInDynamicSlots
),
352 Address(result
, thingSize
+ ObjectSlots::offsetOfMaybeUniqueId()));
353 computeEffectiveAddress(
354 Address(result
, thingSize
+ ObjectSlots::offsetOfSlots()), temp
);
355 storePtr(temp
, Address(result
, NativeObject::offsetOfSlots()));
359 // Inlined version of FreeSpan::allocate. This does not fill in slots_.
360 void MacroAssembler::freeListAllocate(Register result
, Register temp
,
361 gc::AllocKind allocKind
, Label
* fail
) {
362 CompileZone
* zone
= realm()->zone();
363 int thingSize
= int(gc::Arena::thingSize(allocKind
));
368 // Load the first and last offsets of |zone|'s free list for |allocKind|.
369 // If there is no room remaining in the span, fall back to get the next one.
370 gc::FreeSpan
** ptrFreeList
= zone
->addressOfFreeList(allocKind
);
371 loadPtr(AbsoluteAddress(ptrFreeList
), temp
);
372 load16ZeroExtend(Address(temp
, js::gc::FreeSpan::offsetOfFirst()), result
);
373 load16ZeroExtend(Address(temp
, js::gc::FreeSpan::offsetOfLast()), temp
);
374 branch32(Assembler::AboveOrEqual
, result
, temp
, &fallback
);
376 // Bump the offset for the next allocation.
377 add32(Imm32(thingSize
), result
);
378 loadPtr(AbsoluteAddress(ptrFreeList
), temp
);
379 store16(result
, Address(temp
, js::gc::FreeSpan::offsetOfFirst()));
380 sub32(Imm32(thingSize
), result
);
381 addPtr(temp
, result
); // Turn the offset into a pointer.
385 // If there are no free spans left, we bail to finish the allocation. The
386 // interpreter will call the GC allocator to set up a new arena to allocate
387 // from, after which we can resume allocating in the jit.
388 branchTest32(Assembler::Zero
, result
, result
, fail
);
389 loadPtr(AbsoluteAddress(ptrFreeList
), temp
);
390 addPtr(temp
, result
); // Turn the offset into a pointer.
392 // Update the free list to point to the next span (which may be empty).
393 load32(Address(result
, 0), result
);
394 store32(result
, Address(temp
, js::gc::FreeSpan::offsetOfFirst()));
399 if (runtime()->geckoProfiler().enabled()) {
400 uint32_t* countAddress
= zone
->addressOfTenuredAllocCount();
401 movePtr(ImmPtr(countAddress
), temp
);
402 add32(Imm32(1), Address(temp
, 0));
406 void MacroAssembler::callFreeStub(Register slots
) {
407 // This register must match the one in JitRuntime::generateFreeStub.
408 const Register regSlots
= CallTempReg0
;
411 movePtr(slots
, regSlots
);
412 call(runtime()->jitRuntime()->freeStub());
416 // Inlined equivalent of gc::AllocateObject, without failure case handling.
417 void MacroAssembler::allocateObject(Register result
, Register temp
,
418 gc::AllocKind allocKind
,
419 uint32_t nDynamicSlots
,
420 gc::InitialHeap initialHeap
, Label
* fail
,
421 const AllocSiteInput
& allocSite
) {
422 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind
));
424 checkAllocatorState(fail
);
426 if (shouldNurseryAllocate(allocKind
, initialHeap
)) {
427 MOZ_ASSERT(initialHeap
== gc::DefaultHeap
);
428 return nurseryAllocateObject(result
, temp
, allocKind
, nDynamicSlots
, fail
,
432 // Fall back to calling into the VM to allocate objects in the tenured heap
433 // that have dynamic slots.
439 return freeListAllocate(result
, temp
, allocKind
, fail
);
442 void MacroAssembler::createGCObject(Register obj
, Register temp
,
443 const TemplateObject
& templateObj
,
444 gc::InitialHeap initialHeap
, Label
* fail
,
445 bool initContents
/* = true */) {
446 gc::AllocKind allocKind
= templateObj
.getAllocKind();
447 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind
));
449 uint32_t nDynamicSlots
= 0;
450 if (templateObj
.isNativeObject()) {
451 const TemplateNativeObject
& ntemplate
=
452 templateObj
.asTemplateNativeObject();
453 nDynamicSlots
= ntemplate
.numDynamicSlots();
456 allocateObject(obj
, temp
, allocKind
, nDynamicSlots
, initialHeap
, fail
);
457 initGCThing(obj
, temp
, templateObj
, initContents
);
460 void MacroAssembler::createPlainGCObject(
461 Register result
, Register shape
, Register temp
, Register temp2
,
462 uint32_t numFixedSlots
, uint32_t numDynamicSlots
, gc::AllocKind allocKind
,
463 gc::InitialHeap initialHeap
, Label
* fail
, const AllocSiteInput
& allocSite
,
464 bool initContents
/* = true */) {
465 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind
));
466 MOZ_ASSERT(shape
!= temp
, "shape can overlap with temp2, but not temp");
469 allocateObject(result
, temp
, allocKind
, numDynamicSlots
, initialHeap
, fail
,
472 // Initialize shape field.
473 storePtr(shape
, Address(result
, JSObject::offsetOfShape()));
475 // If the object has dynamic slots, allocateObject will initialize
476 // the slots field. If not, we must initialize it now.
477 if (numDynamicSlots
== 0) {
478 storePtr(ImmPtr(emptyObjectSlots
),
479 Address(result
, NativeObject::offsetOfSlots()));
482 // Initialize elements field.
483 storePtr(ImmPtr(emptyObjectElements
),
484 Address(result
, NativeObject::offsetOfElements()));
486 // Initialize fixed slots.
488 fillSlotsWithUndefined(Address(result
, NativeObject::getFixedSlotOffset(0)),
489 temp
, 0, numFixedSlots
);
492 // Initialize dynamic slots.
493 if (numDynamicSlots
> 0) {
494 loadPtr(Address(result
, NativeObject::offsetOfSlots()), temp2
);
495 fillSlotsWithUndefined(Address(temp2
, 0), temp
, 0, numDynamicSlots
);
499 void MacroAssembler::createArrayWithFixedElements(
500 Register result
, Register shape
, Register temp
, uint32_t arrayLength
,
501 uint32_t arrayCapacity
, gc::AllocKind allocKind
,
502 gc::InitialHeap initialHeap
, Label
* fail
, const AllocSiteInput
& allocSite
) {
503 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind
));
504 MOZ_ASSERT(shape
!= temp
, "shape can overlap with temp2, but not temp");
505 MOZ_ASSERT(result
!= temp
);
507 // This only supports allocating arrays with fixed elements and does not
508 // support any dynamic slots or elements.
509 MOZ_ASSERT(arrayCapacity
>= arrayLength
);
510 MOZ_ASSERT(gc::GetGCKindSlots(allocKind
) >=
511 arrayCapacity
+ ObjectElements::VALUES_PER_HEADER
);
514 allocateObject(result
, temp
, allocKind
, 0, initialHeap
, fail
, allocSite
);
516 // Initialize shape field.
517 storePtr(shape
, Address(result
, JSObject::offsetOfShape()));
519 // There are no dynamic slots.
520 storePtr(ImmPtr(emptyObjectSlots
),
521 Address(result
, NativeObject::offsetOfSlots()));
523 // Initialize elements pointer for fixed (inline) elements.
524 computeEffectiveAddress(
525 Address(result
, NativeObject::offsetOfFixedElements()), temp
);
526 storePtr(temp
, Address(result
, NativeObject::offsetOfElements()));
528 // Initialize elements header.
529 store32(Imm32(ObjectElements::FIXED
),
530 Address(temp
, ObjectElements::offsetOfFlags()));
531 store32(Imm32(0), Address(temp
, ObjectElements::offsetOfInitializedLength()));
532 store32(Imm32(arrayCapacity
),
533 Address(temp
, ObjectElements::offsetOfCapacity()));
534 store32(Imm32(arrayLength
), Address(temp
, ObjectElements::offsetOfLength()));
537 // Inline version of Nursery::allocateString.
538 void MacroAssembler::nurseryAllocateString(Register result
, Register temp
,
539 gc::AllocKind allocKind
,
541 MOZ_ASSERT(IsNurseryAllocable(allocKind
));
543 // No explicit check for nursery.isEnabled() is needed, as the comparison
544 // with the nursery's end will always fail in such cases.
546 CompileZone
* zone
= realm()->zone();
547 size_t thingSize
= gc::Arena::thingSize(allocKind
);
548 bumpPointerAllocate(result
, temp
, fail
, zone
,
549 zone
->addressOfStringNurseryPosition(),
550 zone
->addressOfStringNurseryCurrentEnd(),
551 JS::TraceKind::String
, thingSize
);
554 // Inline version of Nursery::allocateBigInt.
555 void MacroAssembler::nurseryAllocateBigInt(Register result
, Register temp
,
557 MOZ_ASSERT(IsNurseryAllocable(gc::AllocKind::BIGINT
));
559 // No explicit check for nursery.isEnabled() is needed, as the comparison
560 // with the nursery's end will always fail in such cases.
562 CompileZone
* zone
= realm()->zone();
563 size_t thingSize
= gc::Arena::thingSize(gc::AllocKind::BIGINT
);
565 bumpPointerAllocate(result
, temp
, fail
, zone
,
566 zone
->addressOfBigIntNurseryPosition(),
567 zone
->addressOfBigIntNurseryCurrentEnd(),
568 JS::TraceKind::BigInt
, thingSize
);
571 void MacroAssembler::bumpPointerAllocate(Register result
, Register temp
,
572 Label
* fail
, CompileZone
* zone
,
573 void* posAddr
, const void* curEndAddr
,
574 JS::TraceKind traceKind
, uint32_t size
,
575 const AllocSiteInput
& allocSite
) {
576 MOZ_ASSERT(size
>= gc::MinCellSize
);
578 uint32_t totalSize
= size
+ Nursery::nurseryCellHeaderSize();
579 MOZ_ASSERT(totalSize
< INT32_MAX
, "Nursery allocation too large");
580 MOZ_ASSERT(totalSize
% gc::CellAlignBytes
== 0);
582 // The position (allocation pointer) and the end pointer are stored
583 // very close to each other -- specifically, easily within a 32 bit offset.
584 // Use relative offsets between them, to avoid 64-bit immediate loads.
586 // I tried to optimise this further by using an extra register to avoid
587 // the final subtraction and hopefully get some more instruction
588 // parallelism, but it made no difference.
589 movePtr(ImmPtr(posAddr
), temp
);
590 loadPtr(Address(temp
, 0), result
);
591 addPtr(Imm32(totalSize
), result
);
592 CheckedInt
<int32_t> endOffset
=
593 (CheckedInt
<uintptr_t>(uintptr_t(curEndAddr
)) -
594 CheckedInt
<uintptr_t>(uintptr_t(posAddr
)))
595 .toChecked
<int32_t>();
596 MOZ_ASSERT(endOffset
.isValid(), "Position and end pointers must be nearby");
597 branchPtr(Assembler::Below
, Address(temp
, endOffset
.value()), result
, fail
);
598 storePtr(result
, Address(temp
, 0));
599 subPtr(Imm32(size
), result
);
601 if (allocSite
.is
<gc::CatchAllAllocSite
>()) {
602 // No allocation site supplied. This is the case when called from Warp, or
603 // from places that don't support pretenuring.
604 gc::CatchAllAllocSite siteKind
= allocSite
.as
<gc::CatchAllAllocSite
>();
605 gc::AllocSite
* site
= zone
->catchAllAllocSite(traceKind
, siteKind
);
606 uintptr_t headerWord
= gc::NurseryCellHeader::MakeValue(site
, traceKind
);
607 storePtr(ImmWord(headerWord
),
608 Address(result
, -js::Nursery::nurseryCellHeaderSize()));
610 // Update the catch all allocation site for strings or if the profiler is
611 // enabled. This is used to calculate the nursery allocation count. The
612 // string data is used to determine whether to disable nursery string
614 if (traceKind
== JS::TraceKind::String
||
615 runtime()->geckoProfiler().enabled()) {
616 uint32_t* countAddress
= site
->nurseryAllocCountAddress();
617 CheckedInt
<int32_t> counterOffset
=
618 (CheckedInt
<uintptr_t>(uintptr_t(countAddress
)) -
619 CheckedInt
<uintptr_t>(uintptr_t(posAddr
)))
620 .toChecked
<int32_t>();
621 if (counterOffset
.isValid()) {
622 add32(Imm32(1), Address(temp
, counterOffset
.value()));
624 movePtr(ImmPtr(countAddress
), temp
);
625 add32(Imm32(1), Address(temp
, 0));
629 // Update allocation site and store pointer in the nursery cell header. This
630 // is only used from baseline.
631 Register site
= allocSite
.as
<Register
>();
632 updateAllocSite(temp
, result
, zone
, site
);
633 // See NurseryCellHeader::MakeValue.
634 orPtr(Imm32(int32_t(traceKind
)), site
);
635 storePtr(site
, Address(result
, -js::Nursery::nurseryCellHeaderSize()));
639 // Update the allocation site in the same way as Nursery::allocateCell.
640 void MacroAssembler::updateAllocSite(Register temp
, Register result
,
641 CompileZone
* zone
, Register site
) {
644 add32(Imm32(1), Address(site
, gc::AllocSite::offsetOfNurseryAllocCount()));
646 branch32(Assembler::NotEqual
,
647 Address(site
, gc::AllocSite::offsetOfNurseryAllocCount()), Imm32(1),
650 loadPtr(AbsoluteAddress(zone
->addressOfNurseryAllocatedSites()), temp
);
651 storePtr(temp
, Address(site
, gc::AllocSite::offsetOfNextNurseryAllocated()));
652 storePtr(site
, AbsoluteAddress(zone
->addressOfNurseryAllocatedSites()));
657 // Inlined equivalent of gc::AllocateString, jumping to fail if nursery
658 // allocation requested but unsuccessful.
659 void MacroAssembler::allocateString(Register result
, Register temp
,
660 gc::AllocKind allocKind
,
661 gc::InitialHeap initialHeap
, Label
* fail
) {
662 MOZ_ASSERT(allocKind
== gc::AllocKind::STRING
||
663 allocKind
== gc::AllocKind::FAT_INLINE_STRING
);
665 checkAllocatorState(fail
);
667 if (shouldNurseryAllocate(allocKind
, initialHeap
)) {
668 MOZ_ASSERT(initialHeap
== gc::DefaultHeap
);
669 return nurseryAllocateString(result
, temp
, allocKind
, fail
);
672 freeListAllocate(result
, temp
, allocKind
, fail
);
675 void MacroAssembler::newGCString(Register result
, Register temp
,
676 gc::InitialHeap initialHeap
, Label
* fail
) {
677 allocateString(result
, temp
, js::gc::AllocKind::STRING
, initialHeap
, fail
);
680 void MacroAssembler::newGCFatInlineString(Register result
, Register temp
,
681 gc::InitialHeap initialHeap
,
683 allocateString(result
, temp
, js::gc::AllocKind::FAT_INLINE_STRING
,
687 void MacroAssembler::newGCBigInt(Register result
, Register temp
,
688 gc::InitialHeap initialHeap
, Label
* fail
) {
689 checkAllocatorState(fail
);
691 if (shouldNurseryAllocate(gc::AllocKind::BIGINT
, initialHeap
)) {
692 MOZ_ASSERT(initialHeap
== gc::DefaultHeap
);
693 return nurseryAllocateBigInt(result
, temp
, fail
);
696 freeListAllocate(result
, temp
, gc::AllocKind::BIGINT
, fail
);
699 void MacroAssembler::copySlotsFromTemplate(
700 Register obj
, const TemplateNativeObject
& templateObj
, uint32_t start
,
702 uint32_t nfixed
= std::min(templateObj
.numFixedSlots(), end
);
703 for (unsigned i
= start
; i
< nfixed
; i
++) {
704 // Template objects are not exposed to script and therefore immutable.
705 // However, regexp template objects are sometimes used directly (when
706 // the cloning is not observable), and therefore we can end up with a
707 // non-zero lastIndex. Detect this case here and just substitute 0, to
708 // avoid racing with the main thread updating this slot.
710 if (templateObj
.isRegExpObject() && i
== RegExpObject::lastIndexSlot()) {
713 v
= templateObj
.getSlot(i
);
715 storeValue(v
, Address(obj
, NativeObject::getFixedSlotOffset(i
)));
719 void MacroAssembler::fillSlotsWithConstantValue(Address base
, Register temp
,
720 uint32_t start
, uint32_t end
,
722 MOZ_ASSERT(v
.isUndefined() || IsUninitializedLexical(v
));
729 // We only have a single spare register, so do the initialization as two
730 // strided writes of the tag and body.
732 move32(Imm32(v
.toNunboxPayload()), temp
);
733 for (unsigned i
= start
; i
< end
; ++i
, addr
.offset
+= sizeof(GCPtr
<Value
>)) {
734 store32(temp
, ToPayload(addr
));
738 move32(Imm32(v
.toNunboxTag()), temp
);
739 for (unsigned i
= start
; i
< end
; ++i
, addr
.offset
+= sizeof(GCPtr
<Value
>)) {
740 store32(temp
, ToType(addr
));
743 moveValue(v
, ValueOperand(temp
));
744 for (uint32_t i
= start
; i
< end
; ++i
, base
.offset
+= sizeof(GCPtr
<Value
>)) {
745 storePtr(temp
, base
);
750 void MacroAssembler::fillSlotsWithUndefined(Address base
, Register temp
,
751 uint32_t start
, uint32_t end
) {
752 fillSlotsWithConstantValue(base
, temp
, start
, end
, UndefinedValue());
755 void MacroAssembler::fillSlotsWithUninitialized(Address base
, Register temp
,
756 uint32_t start
, uint32_t end
) {
757 fillSlotsWithConstantValue(base
, temp
, start
, end
,
758 MagicValue(JS_UNINITIALIZED_LEXICAL
));
761 static std::pair
<uint32_t, uint32_t> FindStartOfUninitializedAndUndefinedSlots(
762 const TemplateNativeObject
& templateObj
, uint32_t nslots
) {
763 MOZ_ASSERT(nslots
== templateObj
.slotSpan());
764 MOZ_ASSERT(nslots
> 0);
766 uint32_t first
= nslots
;
767 for (; first
!= 0; --first
) {
768 if (templateObj
.getSlot(first
- 1) != UndefinedValue()) {
772 uint32_t startOfUndefined
= first
;
774 if (first
!= 0 && IsUninitializedLexical(templateObj
.getSlot(first
- 1))) {
775 for (; first
!= 0; --first
) {
776 if (!IsUninitializedLexical(templateObj
.getSlot(first
- 1))) {
781 uint32_t startOfUninitialized
= first
;
783 return {startOfUninitialized
, startOfUndefined
};
786 void MacroAssembler::initTypedArraySlots(Register obj
, Register temp
,
788 LiveRegisterSet liveRegs
, Label
* fail
,
789 TypedArrayObject
* templateObj
,
790 TypedArrayLength lengthKind
) {
791 MOZ_ASSERT(!templateObj
->hasBuffer());
793 constexpr size_t dataSlotOffset
= ArrayBufferViewObject::dataOffset();
794 constexpr size_t dataOffset
= dataSlotOffset
+ sizeof(HeapSlot
);
797 TypedArrayObject::FIXED_DATA_START
== TypedArrayObject::DATA_SLOT
+ 1,
798 "fixed inline element data assumed to begin after the data slot");
801 TypedArrayObject::INLINE_BUFFER_LIMIT
==
802 JSObject::MAX_BYTE_SIZE
- dataOffset
,
803 "typed array inline buffer is limited by the maximum object byte size");
805 // Initialise data elements to zero.
806 size_t length
= templateObj
->length();
807 MOZ_ASSERT(length
<= INT32_MAX
,
808 "Template objects are only created for int32 lengths");
809 size_t nbytes
= length
* templateObj
->bytesPerElement();
811 if (lengthKind
== TypedArrayLength::Fixed
&&
812 nbytes
<= TypedArrayObject::INLINE_BUFFER_LIMIT
) {
813 MOZ_ASSERT(dataOffset
+ nbytes
<= templateObj
->tenuredSizeOfThis());
815 // Store data elements inside the remaining JSObject slots.
816 computeEffectiveAddress(Address(obj
, dataOffset
), temp
);
817 storePrivateValue(temp
, Address(obj
, dataSlotOffset
));
819 // Write enough zero pointers into fixed data to zero every
820 // element. (This zeroes past the end of a byte count that's
821 // not a multiple of pointer size. That's okay, because fixed
822 // data is a count of 8-byte HeapSlots (i.e. <= pointer size),
823 // and we won't inline unless the desired memory fits in that
825 static_assert(sizeof(HeapSlot
) == 8, "Assumed 8 bytes alignment");
827 size_t numZeroPointers
= ((nbytes
+ 7) & ~0x7) / sizeof(char*);
828 for (size_t i
= 0; i
< numZeroPointers
; i
++) {
829 storePtr(ImmWord(0), Address(obj
, dataOffset
+ i
* sizeof(char*)));
831 MOZ_ASSERT(nbytes
> 0, "Zero-length TypedArrays need ZeroLengthArrayData");
833 if (lengthKind
== TypedArrayLength::Fixed
) {
834 move32(Imm32(length
), lengthReg
);
837 // Ensure volatile |obj| is saved across the call.
838 if (obj
.volatile_()) {
839 liveRegs
.addUnchecked(obj
);
842 // Allocate a buffer on the heap to store the data elements.
843 PushRegsInMask(liveRegs
);
844 using Fn
= void (*)(JSContext
* cx
, TypedArrayObject
* obj
, int32_t count
);
845 setupUnalignedABICall(temp
);
849 passABIArg(lengthReg
);
850 callWithABI
<Fn
, AllocateAndInitTypedArrayBuffer
>();
851 PopRegsInMask(liveRegs
);
853 // Fail when data slot is UndefinedValue.
854 branchTestUndefined(Assembler::Equal
, Address(obj
, dataSlotOffset
), fail
);
858 void MacroAssembler::initGCSlots(Register obj
, Register temp
,
859 const TemplateNativeObject
& templateObj
) {
860 MOZ_ASSERT(!templateObj
.isArrayObject());
862 // Slots of non-array objects are required to be initialized.
863 // Use the values currently in the template object.
864 uint32_t nslots
= templateObj
.slotSpan();
869 uint32_t nfixed
= templateObj
.numUsedFixedSlots();
870 uint32_t ndynamic
= templateObj
.numDynamicSlots();
872 // Attempt to group slot writes such that we minimize the amount of
873 // duplicated data we need to embed in code and load into registers. In
874 // general, most template object slots will be undefined except for any
875 // reserved slots. Since reserved slots come first, we split the object
876 // logically into independent non-UndefinedValue writes to the head and
877 // duplicated writes of UndefinedValue to the tail. For the majority of
878 // objects, the "tail" will be the entire slot range.
880 // The template object may be a CallObject, in which case we need to
881 // account for uninitialized lexical slots as well as undefined
882 // slots. Uninitialized lexical slots appears in CallObjects if the function
883 // has parameter expressions, in which case closed over parameters have
884 // TDZ. Uninitialized slots come before undefined slots in CallObjects.
885 auto [startOfUninitialized
, startOfUndefined
] =
886 FindStartOfUninitializedAndUndefinedSlots(templateObj
, nslots
);
887 MOZ_ASSERT(startOfUninitialized
<= nfixed
); // Reserved slots must be fixed.
888 MOZ_ASSERT(startOfUndefined
>= startOfUninitialized
);
889 MOZ_ASSERT_IF(!templateObj
.isCallObject() &&
890 !templateObj
.isBlockLexicalEnvironmentObject(),
891 startOfUninitialized
== startOfUndefined
);
893 // Copy over any preserved reserved slots.
894 copySlotsFromTemplate(obj
, templateObj
, 0, startOfUninitialized
);
896 // Fill the rest of the fixed slots with undefined and uninitialized.
897 size_t offset
= NativeObject::getFixedSlotOffset(startOfUninitialized
);
898 fillSlotsWithUninitialized(Address(obj
, offset
), temp
, startOfUninitialized
,
899 std::min(startOfUndefined
, nfixed
));
901 if (startOfUndefined
< nfixed
) {
902 offset
= NativeObject::getFixedSlotOffset(startOfUndefined
);
903 fillSlotsWithUndefined(Address(obj
, offset
), temp
, startOfUndefined
,
908 // We are short one register to do this elegantly. Borrow the obj
909 // register briefly for our slots base address.
911 loadPtr(Address(obj
, NativeObject::offsetOfSlots()), obj
);
913 // Fill uninitialized slots if necessary. Otherwise initialize all
914 // slots to undefined.
915 if (startOfUndefined
> nfixed
) {
916 MOZ_ASSERT(startOfUninitialized
!= startOfUndefined
);
917 fillSlotsWithUninitialized(Address(obj
, 0), temp
, 0,
918 startOfUndefined
- nfixed
);
919 size_t offset
= (startOfUndefined
- nfixed
) * sizeof(Value
);
920 fillSlotsWithUndefined(Address(obj
, offset
), temp
,
921 startOfUndefined
- nfixed
, ndynamic
);
923 fillSlotsWithUndefined(Address(obj
, 0), temp
, 0, ndynamic
);
930 void MacroAssembler::initGCThing(Register obj
, Register temp
,
931 const TemplateObject
& templateObj
,
933 // Fast initialization of an empty object returned by allocateObject().
935 storePtr(ImmGCPtr(templateObj
.shape()),
936 Address(obj
, JSObject::offsetOfShape()));
938 if (templateObj
.isNativeObject()) {
939 const TemplateNativeObject
& ntemplate
=
940 templateObj
.asTemplateNativeObject();
941 MOZ_ASSERT(!ntemplate
.hasDynamicElements());
943 // If the object has dynamic slots, the slots member has already been
945 if (ntemplate
.numDynamicSlots() == 0) {
946 storePtr(ImmPtr(emptyObjectSlots
),
947 Address(obj
, NativeObject::offsetOfSlots()));
950 if (ntemplate
.isArrayObject()) {
951 // Can't skip initializing reserved slots.
952 MOZ_ASSERT(initContents
);
954 int elementsOffset
= NativeObject::offsetOfFixedElements();
956 computeEffectiveAddress(Address(obj
, elementsOffset
), temp
);
957 storePtr(temp
, Address(obj
, NativeObject::offsetOfElements()));
959 // Fill in the elements header.
961 Imm32(ntemplate
.getDenseCapacity()),
962 Address(obj
, elementsOffset
+ ObjectElements::offsetOfCapacity()));
963 store32(Imm32(ntemplate
.getDenseInitializedLength()),
964 Address(obj
, elementsOffset
+
965 ObjectElements::offsetOfInitializedLength()));
966 store32(Imm32(ntemplate
.getArrayLength()),
967 Address(obj
, elementsOffset
+ ObjectElements::offsetOfLength()));
968 store32(Imm32(ObjectElements::FIXED
),
969 Address(obj
, elementsOffset
+ ObjectElements::offsetOfFlags()));
970 } else if (ntemplate
.isArgumentsObject()) {
971 // The caller will initialize the reserved slots.
972 MOZ_ASSERT(!initContents
);
973 storePtr(ImmPtr(emptyObjectElements
),
974 Address(obj
, NativeObject::offsetOfElements()));
976 // If the target type could be a TypedArray that maps shared memory
977 // then this would need to store emptyObjectElementsShared in that case.
978 MOZ_ASSERT(!ntemplate
.isSharedMemory());
980 // Can't skip initializing reserved slots.
981 MOZ_ASSERT(initContents
);
983 storePtr(ImmPtr(emptyObjectElements
),
984 Address(obj
, NativeObject::offsetOfElements()));
986 initGCSlots(obj
, temp
, ntemplate
);
989 MOZ_CRASH("Unknown object");
993 AllocatableRegisterSet
regs(RegisterSet::Volatile());
994 LiveRegisterSet
save(regs
.asLiveSet());
995 PushRegsInMask(save
);
997 regs
.takeUnchecked(obj
);
998 Register temp2
= regs
.takeAnyGeneral();
1000 using Fn
= void (*)(JSObject
* obj
);
1001 setupUnalignedABICall(temp2
);
1003 callWithABI
<Fn
, TraceCreateObject
>();
1005 PopRegsInMask(save
);
1009 void MacroAssembler::compareStrings(JSOp op
, Register left
, Register right
,
1010 Register result
, Label
* fail
) {
1011 MOZ_ASSERT(left
!= result
);
1012 MOZ_ASSERT(right
!= result
);
1013 MOZ_ASSERT(IsEqualityOp(op
) || IsRelationalOp(op
));
1015 Label notPointerEqual
;
1016 // If operands point to the same instance, the strings are trivially equal.
1017 branchPtr(Assembler::NotEqual
, left
, right
,
1018 IsEqualityOp(op
) ? ¬PointerEqual
: fail
);
1019 move32(Imm32(op
== JSOp::Eq
|| op
== JSOp::StrictEq
|| op
== JSOp::Le
||
1023 if (IsEqualityOp(op
)) {
1027 bind(¬PointerEqual
);
1029 Label leftIsNotAtom
;
1030 Label setNotEqualResult
;
1031 // Atoms cannot be equal to each other if they point to different strings.
1032 Imm32
atomBit(JSString::ATOM_BIT
);
1033 branchTest32(Assembler::Zero
, Address(left
, JSString::offsetOfFlags()),
1034 atomBit
, &leftIsNotAtom
);
1035 branchTest32(Assembler::NonZero
, Address(right
, JSString::offsetOfFlags()),
1036 atomBit
, &setNotEqualResult
);
1038 bind(&leftIsNotAtom
);
1039 // Strings of different length can never be equal.
1040 loadStringLength(left
, result
);
1041 branch32(Assembler::Equal
, Address(right
, JSString::offsetOfLength()),
1044 bind(&setNotEqualResult
);
1045 move32(Imm32(op
== JSOp::Ne
|| op
== JSOp::StrictNe
), result
);
1051 void MacroAssembler::loadStringChars(Register str
, Register dest
,
1052 CharEncoding encoding
) {
1053 MOZ_ASSERT(str
!= dest
);
1055 if (JitOptions
.spectreStringMitigations
) {
1056 if (encoding
== CharEncoding::Latin1
) {
1057 // If the string is a rope, zero the |str| register. The code below
1058 // depends on str->flags so this should block speculative execution.
1059 movePtr(ImmWord(0), dest
);
1060 test32MovePtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1061 Imm32(JSString::LINEAR_BIT
), dest
, str
);
1063 // If we're loading TwoByte chars, there's an additional risk:
1064 // if the string has Latin1 chars, we could read out-of-bounds. To
1065 // prevent this, we check both the Linear and Latin1 bits. We don't
1066 // have a scratch register, so we use these flags also to block
1067 // speculative execution, similar to the use of 0 above.
1068 MOZ_ASSERT(encoding
== CharEncoding::TwoByte
);
1069 static constexpr uint32_t Mask
=
1070 JSString::LINEAR_BIT
| JSString::LATIN1_CHARS_BIT
;
1071 static_assert(Mask
< 1024,
1072 "Mask should be a small, near-null value to ensure we "
1073 "block speculative execution when it's used as string "
1075 move32(Imm32(Mask
), dest
);
1076 and32(Address(str
, JSString::offsetOfFlags()), dest
);
1077 cmp32MovePtr(Assembler::NotEqual
, dest
, Imm32(JSString::LINEAR_BIT
), dest
,
1082 // Load the inline chars.
1083 computeEffectiveAddress(Address(str
, JSInlineString::offsetOfInlineStorage()),
1086 // If it's not an inline string, load the non-inline chars. Use a
1087 // conditional move to prevent speculative execution.
1088 test32LoadPtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1089 Imm32(JSString::INLINE_CHARS_BIT
),
1090 Address(str
, JSString::offsetOfNonInlineChars()), dest
);
1093 void MacroAssembler::loadNonInlineStringChars(Register str
, Register dest
,
1094 CharEncoding encoding
) {
1095 MOZ_ASSERT(str
!= dest
);
1097 if (JitOptions
.spectreStringMitigations
) {
1098 // If the string is a rope, has inline chars, or has a different
1099 // character encoding, set str to a near-null value to prevent
1100 // speculative execution below (when reading str->nonInlineChars).
1102 static constexpr uint32_t Mask
= JSString::LINEAR_BIT
|
1103 JSString::INLINE_CHARS_BIT
|
1104 JSString::LATIN1_CHARS_BIT
;
1105 static_assert(Mask
< 1024,
1106 "Mask should be a small, near-null value to ensure we "
1107 "block speculative execution when it's used as string "
1110 uint32_t expectedBits
= JSString::LINEAR_BIT
;
1111 if (encoding
== CharEncoding::Latin1
) {
1112 expectedBits
|= JSString::LATIN1_CHARS_BIT
;
1115 move32(Imm32(Mask
), dest
);
1116 and32(Address(str
, JSString::offsetOfFlags()), dest
);
1118 cmp32MovePtr(Assembler::NotEqual
, dest
, Imm32(expectedBits
), dest
, str
);
1121 loadPtr(Address(str
, JSString::offsetOfNonInlineChars()), dest
);
1124 void MacroAssembler::storeNonInlineStringChars(Register chars
, Register str
) {
1125 MOZ_ASSERT(chars
!= str
);
1126 storePtr(chars
, Address(str
, JSString::offsetOfNonInlineChars()));
1129 void MacroAssembler::loadInlineStringCharsForStore(Register str
,
1131 computeEffectiveAddress(Address(str
, JSInlineString::offsetOfInlineStorage()),
1135 void MacroAssembler::loadInlineStringChars(Register str
, Register dest
,
1136 CharEncoding encoding
) {
1137 MOZ_ASSERT(str
!= dest
);
1139 if (JitOptions
.spectreStringMitigations
) {
1140 // Making this Spectre-safe is a bit complicated: using
1141 // computeEffectiveAddress and then zeroing the output register if
1142 // non-inline is not sufficient: when the index is very large, it would
1143 // allow reading |nullptr + index|. Just fall back to loadStringChars
1145 loadStringChars(str
, dest
, encoding
);
1147 computeEffectiveAddress(
1148 Address(str
, JSInlineString::offsetOfInlineStorage()), dest
);
1152 void MacroAssembler::loadRopeLeftChild(Register str
, Register dest
) {
1153 MOZ_ASSERT(str
!= dest
);
1155 if (JitOptions
.spectreStringMitigations
) {
1156 // Zero the output register if the input was not a rope.
1157 movePtr(ImmWord(0), dest
);
1158 test32LoadPtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1159 Imm32(JSString::LINEAR_BIT
),
1160 Address(str
, JSRope::offsetOfLeft()), dest
);
1162 loadPtr(Address(str
, JSRope::offsetOfLeft()), dest
);
1166 void MacroAssembler::loadRopeRightChild(Register str
, Register dest
) {
1167 MOZ_ASSERT(str
!= dest
);
1169 if (JitOptions
.spectreStringMitigations
) {
1170 // Zero the output register if the input was not a rope.
1171 movePtr(ImmWord(0), dest
);
1172 test32LoadPtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1173 Imm32(JSString::LINEAR_BIT
),
1174 Address(str
, JSRope::offsetOfRight()), dest
);
1176 loadPtr(Address(str
, JSRope::offsetOfRight()), dest
);
1180 void MacroAssembler::storeRopeChildren(Register left
, Register right
,
1182 storePtr(left
, Address(str
, JSRope::offsetOfLeft()));
1183 storePtr(right
, Address(str
, JSRope::offsetOfRight()));
1186 void MacroAssembler::loadDependentStringBase(Register str
, Register dest
) {
1187 MOZ_ASSERT(str
!= dest
);
1189 if (JitOptions
.spectreStringMitigations
) {
1190 // If the string is not a dependent string, zero the |str| register.
1191 // The code below loads str->base so this should block speculative
1193 movePtr(ImmWord(0), dest
);
1194 test32MovePtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1195 Imm32(JSString::DEPENDENT_BIT
), dest
, str
);
1198 loadPtr(Address(str
, JSDependentString::offsetOfBase()), dest
);
1201 void MacroAssembler::storeDependentStringBase(Register base
, Register str
) {
1202 storePtr(base
, Address(str
, JSDependentString::offsetOfBase()));
1205 void MacroAssembler::loadRopeChild(Register str
, Register index
,
1206 Register output
, Label
* isLinear
) {
1207 // This follows JSString::getChar.
1208 branchIfNotRope(str
, isLinear
);
1210 loadRopeLeftChild(str
, output
);
1212 // Check if the index is contained in the leftChild.
1214 branch32(Assembler::Above
, Address(output
, JSString::offsetOfLength()), index
,
1217 // The index must be in the rightChild.
1218 loadRopeRightChild(str
, output
);
1223 void MacroAssembler::branchIfCanLoadStringChar(Register str
, Register index
,
1224 Register scratch
, Label
* label
) {
1225 loadRopeChild(str
, index
, scratch
, label
);
1227 // Branch if the left resp. right side is linear.
1228 branchIfNotRope(scratch
, label
);
1231 void MacroAssembler::branchIfNotCanLoadStringChar(Register str
, Register index
,
1235 loadRopeChild(str
, index
, scratch
, &done
);
1237 // Branch if the left or right side is another rope.
1238 branchIfRope(scratch
, label
);
1243 void MacroAssembler::loadStringChar(Register str
, Register index
,
1244 Register output
, Register scratch1
,
1245 Register scratch2
, Label
* fail
) {
1246 MOZ_ASSERT(str
!= output
);
1247 MOZ_ASSERT(str
!= index
);
1248 MOZ_ASSERT(index
!= output
);
1249 MOZ_ASSERT(output
!= scratch1
);
1250 MOZ_ASSERT(output
!= scratch2
);
1252 // Use scratch1 for the index (adjusted below).
1253 move32(index
, scratch1
);
1254 movePtr(str
, output
);
1256 // This follows JSString::getChar.
1258 branchIfNotRope(str
, ¬Rope
);
1260 loadRopeLeftChild(str
, output
);
1262 // Check if the index is contained in the leftChild.
1263 Label loadedChild
, notInLeft
;
1264 spectreBoundsCheck32(scratch1
, Address(output
, JSString::offsetOfLength()),
1265 scratch2
, ¬InLeft
);
1268 // The index must be in the rightChild.
1269 // index -= rope->leftChild()->length()
1271 sub32(Address(output
, JSString::offsetOfLength()), scratch1
);
1272 loadRopeRightChild(str
, output
);
1274 // If the left or right side is another rope, give up.
1276 branchIfRope(output
, fail
);
1280 Label isLatin1
, done
;
1281 // We have to check the left/right side for ropes,
1282 // because a TwoByte rope might have a Latin1 child.
1283 branchLatin1String(output
, &isLatin1
);
1284 loadStringChars(output
, scratch2
, CharEncoding::TwoByte
);
1285 loadChar(scratch2
, scratch1
, output
, CharEncoding::TwoByte
);
1289 loadStringChars(output
, scratch2
, CharEncoding::Latin1
);
1290 loadChar(scratch2
, scratch1
, output
, CharEncoding::Latin1
);
1295 void MacroAssembler::loadStringIndexValue(Register str
, Register dest
,
1297 MOZ_ASSERT(str
!= dest
);
1299 load32(Address(str
, JSString::offsetOfFlags()), dest
);
1301 // Does not have a cached index value.
1302 branchTest32(Assembler::Zero
, dest
, Imm32(JSString::INDEX_VALUE_BIT
), fail
);
1304 // Extract the index.
1305 rshift32(Imm32(JSString::INDEX_VALUE_SHIFT
), dest
);
1308 void MacroAssembler::loadChar(Register chars
, Register index
, Register dest
,
1309 CharEncoding encoding
, int32_t offset
/* = 0 */) {
1310 if (encoding
== CharEncoding::Latin1
) {
1311 loadChar(BaseIndex(chars
, index
, TimesOne
, offset
), dest
, encoding
);
1313 loadChar(BaseIndex(chars
, index
, TimesTwo
, offset
), dest
, encoding
);
1317 void MacroAssembler::addToCharPtr(Register chars
, Register index
,
1318 CharEncoding encoding
) {
1319 if (encoding
== CharEncoding::Latin1
) {
1320 static_assert(sizeof(char) == 1,
1321 "Latin-1 string index shouldn't need scaling");
1322 addPtr(index
, chars
);
1324 computeEffectiveAddress(BaseIndex(chars
, index
, TimesTwo
), chars
);
1328 void MacroAssembler::loadStringFromUnit(Register unit
, Register dest
,
1329 const StaticStrings
& staticStrings
) {
1330 movePtr(ImmPtr(&staticStrings
.unitStaticTable
), dest
);
1331 loadPtr(BaseIndex(dest
, unit
, ScalePointer
), dest
);
1334 void MacroAssembler::loadLengthTwoString(Register c1
, Register c2
,
1336 const StaticStrings
& staticStrings
) {
1337 // Compute (toSmallCharTable[c1] << SMALL_CHAR_BITS) + toSmallCharTable[c2]
1338 // to obtain the index into `StaticStrings::length2StaticTable`.
1339 static_assert(sizeof(StaticStrings::SmallChar
) == 1);
1341 movePtr(ImmPtr(&StaticStrings::toSmallCharTable
.storage
), dest
);
1342 load8ZeroExtend(BaseIndex(dest
, c1
, Scale::TimesOne
), c1
);
1343 load8ZeroExtend(BaseIndex(dest
, c2
, Scale::TimesOne
), c2
);
1345 lshift32(Imm32(StaticStrings::SMALL_CHAR_BITS
), c1
);
1348 // Look up the string from the computed index.
1349 movePtr(ImmPtr(&staticStrings
.length2StaticTable
), dest
);
1350 loadPtr(BaseIndex(dest
, c1
, ScalePointer
), dest
);
1353 void MacroAssembler::loadInt32ToStringWithBase(
1354 Register input
, Register base
, Register dest
, Register scratch1
,
1355 Register scratch2
, const StaticStrings
& staticStrings
,
1356 const LiveRegisterSet
& volatileRegs
, Label
* fail
) {
1358 Label baseBad
, baseOk
;
1359 branch32(Assembler::LessThan
, base
, Imm32(2), &baseBad
);
1360 branch32(Assembler::LessThanOrEqual
, base
, Imm32(36), &baseOk
);
1362 assumeUnreachable("base must be in range [2, 36]");
1366 // Compute |"0123456789abcdefghijklmnopqrstuvwxyz"[r]|.
1367 auto toChar
= [this, base
](Register r
) {
1370 branch32(Assembler::Below
, r
, base
, &ok
);
1371 assumeUnreachable("bad digit");
1374 // Silence unused lambda capture warning.
1379 add32(Imm32('0'), r
);
1380 branch32(Assembler::BelowOrEqual
, r
, Imm32('9'), &done
);
1381 add32(Imm32('a' - '0' - 10), r
);
1385 // Perform a "unit" lookup when |unsigned(input) < unsigned(base)|.
1386 Label lengthTwo
, done
;
1387 branch32(Assembler::AboveOrEqual
, input
, base
, &lengthTwo
);
1389 move32(input
, scratch1
);
1392 loadStringFromUnit(scratch1
, dest
, staticStrings
);
1398 // Compute |base * base|.
1399 move32(base
, scratch1
);
1400 mul32(scratch1
, scratch1
);
1402 // Perform a "length2" lookup when |unsigned(input) < unsigned(base * base)|.
1403 branch32(Assembler::AboveOrEqual
, input
, scratch1
, fail
);
1405 // Compute |scratch1 = input / base| and |scratch2 = input % base|.
1406 move32(input
, scratch1
);
1407 flexibleDivMod32(base
, scratch1
, scratch2
, true, volatileRegs
);
1409 // Compute the digits of the divisor and remainder.
1413 // Look up the 2-character digit string in the small-char table.
1414 loadLengthTwoString(scratch1
, scratch2
, dest
, staticStrings
);
1419 void MacroAssembler::loadInt32ToStringWithBase(
1420 Register input
, int32_t base
, Register dest
, Register scratch1
,
1421 Register scratch2
, const StaticStrings
& staticStrings
, Label
* fail
) {
1422 MOZ_ASSERT(2 <= base
&& base
<= 36, "base must be in range [2, 36]");
1424 // Compute |"0123456789abcdefghijklmnopqrstuvwxyz"[r]|.
1425 auto toChar
= [this, base
](Register r
) {
1428 branch32(Assembler::Below
, r
, Imm32(base
), &ok
);
1429 assumeUnreachable("bad digit");
1434 add32(Imm32('0'), r
);
1437 add32(Imm32('0'), r
);
1438 branch32(Assembler::BelowOrEqual
, r
, Imm32('9'), &done
);
1439 add32(Imm32('a' - '0' - 10), r
);
1444 // Perform a "unit" lookup when |unsigned(input) < unsigned(base)|.
1445 Label lengthTwo
, done
;
1446 branch32(Assembler::AboveOrEqual
, input
, Imm32(base
), &lengthTwo
);
1448 move32(input
, scratch1
);
1451 loadStringFromUnit(scratch1
, dest
, staticStrings
);
1457 // Perform a "length2" lookup when |unsigned(input) < unsigned(base * base)|.
1458 branch32(Assembler::AboveOrEqual
, input
, Imm32(base
* base
), fail
);
1460 // Compute |scratch1 = input / base| and |scratch2 = input % base|.
1461 if (mozilla::IsPowerOfTwo(uint32_t(base
))) {
1462 uint32_t shift
= mozilla::FloorLog2(base
);
1464 move32(input
, scratch1
);
1465 rshift32(Imm32(shift
), scratch1
);
1467 move32(input
, scratch2
);
1468 and32(Imm32((uint32_t(1) << shift
) - 1), scratch2
);
1470 // The following code matches CodeGenerator::visitUDivOrModConstant()
1471 // for x86-shared. Also see Hacker's Delight 2nd edition, chapter 10-8
1472 // "Unsigned Division by 7" for the case when |rmc.multiplier| exceeds
1473 // UINT32_MAX and we need to adjust the shift amount.
1475 auto rmc
= ReciprocalMulConstants::computeUnsignedDivisionConstants(base
);
1477 // We first compute |q = (M * n) >> 32), where M = rmc.multiplier.
1478 mulHighUnsigned32(Imm32(rmc
.multiplier
), input
, scratch1
);
1480 if (rmc
.multiplier
> UINT32_MAX
) {
1481 // M >= 2^32 and shift == 0 is impossible, as d >= 2 implies that
1482 // ((M * n) >> (32 + shift)) >= n > floor(n/d) whenever n >= d,
1483 // contradicting the proof of correctness in computeDivisionConstants.
1484 MOZ_ASSERT(rmc
.shiftAmount
> 0);
1485 MOZ_ASSERT(rmc
.multiplier
< (int64_t(1) << 33));
1487 // Compute |t = (n - q) / 2|.
1488 move32(input
, scratch2
);
1489 sub32(scratch1
, scratch2
);
1490 rshift32(Imm32(1), scratch2
);
1492 // Compute |t = (n - q) / 2 + q = (n + q) / 2|.
1493 add32(scratch2
, scratch1
);
1495 // Finish the computation |q = floor(n / d)|.
1496 rshift32(Imm32(rmc
.shiftAmount
- 1), scratch1
);
1498 rshift32(Imm32(rmc
.shiftAmount
), scratch1
);
1501 // Compute the remainder from |r = n - q * d|.
1502 move32(scratch1
, dest
);
1503 mul32(Imm32(base
), dest
);
1504 move32(input
, scratch2
);
1505 sub32(dest
, scratch2
);
1508 // Compute the digits of the divisor and remainder.
1512 // Look up the 2-character digit string in the small-char table.
1513 loadLengthTwoString(scratch1
, scratch2
, dest
, staticStrings
);
1518 void MacroAssembler::loadBigIntDigits(Register bigInt
, Register digits
) {
1519 MOZ_ASSERT(digits
!= bigInt
);
1521 // Load the inline digits.
1522 computeEffectiveAddress(Address(bigInt
, BigInt::offsetOfInlineDigits()),
1525 // If inline digits aren't used, load the heap digits. Use a conditional move
1526 // to prevent speculative execution.
1527 cmp32LoadPtr(Assembler::Above
, Address(bigInt
, BigInt::offsetOfLength()),
1528 Imm32(int32_t(BigInt::inlineDigitsLength())),
1529 Address(bigInt
, BigInt::offsetOfHeapDigits()), digits
);
1532 void MacroAssembler::loadBigInt64(Register bigInt
, Register64 dest
) {
1533 // This code follows the implementation of |BigInt::toUint64()|. We're also
1534 // using it for inline callers of |BigInt::toInt64()|, which works, because
1535 // all supported Jit architectures use a two's complement representation for
1536 // int64 values, which means the WrapToSigned call in toInt64() is a no-op.
1538 Label done
, nonZero
;
1540 branchIfBigIntIsNonZero(bigInt
, &nonZero
);
1542 move64(Imm64(0), dest
);
1548 Register digits
= dest
.reg
;
1550 Register digits
= dest
.high
;
1553 loadBigIntDigits(bigInt
, digits
);
1556 // Load the first digit into the destination register.
1557 load64(Address(digits
, 0), dest
);
1559 // Load the first digit into the destination register's low value.
1560 load32(Address(digits
, 0), dest
.low
);
1562 // And conditionally load the second digit into the high value register.
1563 Label twoDigits
, digitsDone
;
1564 branch32(Assembler::Above
, Address(bigInt
, BigInt::offsetOfLength()),
1565 Imm32(1), &twoDigits
);
1567 move32(Imm32(0), dest
.high
);
1572 load32(Address(digits
, sizeof(BigInt::Digit
)), dest
.high
);
1577 branchTest32(Assembler::Zero
, Address(bigInt
, BigInt::offsetOfFlags()),
1578 Imm32(BigInt::signBitMask()), &done
);
1584 void MacroAssembler::loadFirstBigIntDigitOrZero(Register bigInt
,
1586 Label done
, nonZero
;
1587 branchIfBigIntIsNonZero(bigInt
, &nonZero
);
1589 movePtr(ImmWord(0), dest
);
1594 loadBigIntDigits(bigInt
, dest
);
1596 // Load the first digit into the destination register.
1597 loadPtr(Address(dest
, 0), dest
);
1602 void MacroAssembler::loadBigInt(Register bigInt
, Register dest
, Label
* fail
) {
1603 Label done
, nonZero
;
1604 branchIfBigIntIsNonZero(bigInt
, &nonZero
);
1606 movePtr(ImmWord(0), dest
);
1611 loadBigIntNonZero(bigInt
, dest
, fail
);
1616 void MacroAssembler::loadBigIntNonZero(Register bigInt
, Register dest
,
1618 MOZ_ASSERT(bigInt
!= dest
);
1622 branchIfBigIntIsNonZero(bigInt
, &nonZero
);
1623 assumeUnreachable("Unexpected zero BigInt");
1627 branch32(Assembler::Above
, Address(bigInt
, BigInt::offsetOfLength()),
1630 static_assert(BigInt::inlineDigitsLength() > 0,
1631 "Single digit BigInts use inline storage");
1633 // Load the first inline digit into the destination register.
1634 loadPtr(Address(bigInt
, BigInt::offsetOfInlineDigits()), dest
);
1636 // Return as a signed pointer.
1637 bigIntDigitToSignedPtr(bigInt
, dest
, fail
);
1640 void MacroAssembler::bigIntDigitToSignedPtr(Register bigInt
, Register digit
,
1642 // BigInt digits are stored as absolute numbers. Take the failure path when
1643 // the digit can't be stored in intptr_t.
1644 branchTestPtr(Assembler::Signed
, digit
, digit
, fail
);
1646 // Negate |dest| when the BigInt is negative.
1648 branchIfBigIntIsNonNegative(bigInt
, &nonNegative
);
1653 void MacroAssembler::loadBigIntAbsolute(Register bigInt
, Register dest
,
1655 MOZ_ASSERT(bigInt
!= dest
);
1657 branch32(Assembler::Above
, Address(bigInt
, BigInt::offsetOfLength()),
1660 static_assert(BigInt::inlineDigitsLength() > 0,
1661 "Single digit BigInts use inline storage");
1663 // Load the first inline digit into the destination register.
1664 movePtr(ImmWord(0), dest
);
1665 cmp32LoadPtr(Assembler::NotEqual
, Address(bigInt
, BigInt::offsetOfLength()),
1666 Imm32(0), Address(bigInt
, BigInt::offsetOfInlineDigits()), dest
);
1669 void MacroAssembler::initializeBigInt64(Scalar::Type type
, Register bigInt
,
1671 MOZ_ASSERT(Scalar::isBigIntType(type
));
1673 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfFlags()));
1675 Label done
, nonZero
;
1676 branch64(Assembler::NotEqual
, val
, Imm64(0), &nonZero
);
1678 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfLength()));
1683 if (type
== Scalar::BigInt64
) {
1684 // Set the sign-bit for negative values and then continue with the two's
1687 branch64(Assembler::GreaterThan
, val
, Imm64(0), &isPositive
);
1689 store32(Imm32(BigInt::signBitMask()),
1690 Address(bigInt
, BigInt::offsetOfFlags()));
1696 store32(Imm32(1), Address(bigInt
, BigInt::offsetOfLength()));
1698 static_assert(sizeof(BigInt::Digit
) == sizeof(uintptr_t),
1699 "BigInt Digit size matches uintptr_t, so there's a single "
1700 "store on 64-bit and up to two stores on 32-bit");
1704 branchTest32(Assembler::Zero
, val
.high
, val
.high
, &singleDigit
);
1705 store32(Imm32(2), Address(bigInt
, BigInt::offsetOfLength()));
1708 // We can perform a single store64 on 32-bit platforms, because inline
1709 // storage can store at least two 32-bit integers.
1710 static_assert(BigInt::inlineDigitsLength() >= 2,
1711 "BigInt inline storage can store at least two digits");
1714 store64(val
, Address(bigInt
, js::BigInt::offsetOfInlineDigits()));
1719 void MacroAssembler::initializeBigInt(Register bigInt
, Register val
) {
1720 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfFlags()));
1722 Label done
, nonZero
;
1723 branchTestPtr(Assembler::NonZero
, val
, val
, &nonZero
);
1725 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfLength()));
1730 // Set the sign-bit for negative values and then continue with the two's
1733 branchTestPtr(Assembler::NotSigned
, val
, val
, &isPositive
);
1735 store32(Imm32(BigInt::signBitMask()),
1736 Address(bigInt
, BigInt::offsetOfFlags()));
1741 store32(Imm32(1), Address(bigInt
, BigInt::offsetOfLength()));
1743 static_assert(sizeof(BigInt::Digit
) == sizeof(uintptr_t),
1744 "BigInt Digit size matches uintptr_t");
1746 storePtr(val
, Address(bigInt
, js::BigInt::offsetOfInlineDigits()));
1751 void MacroAssembler::initializeBigIntAbsolute(Register bigInt
, Register val
) {
1752 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfFlags()));
1754 Label done
, nonZero
;
1755 branchTestPtr(Assembler::NonZero
, val
, val
, &nonZero
);
1757 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfLength()));
1762 store32(Imm32(1), Address(bigInt
, BigInt::offsetOfLength()));
1764 static_assert(sizeof(BigInt::Digit
) == sizeof(uintptr_t),
1765 "BigInt Digit size matches uintptr_t");
1767 storePtr(val
, Address(bigInt
, js::BigInt::offsetOfInlineDigits()));
1772 void MacroAssembler::copyBigIntWithInlineDigits(Register src
, Register dest
,
1774 gc::InitialHeap initialHeap
,
1776 branch32(Assembler::Above
, Address(src
, BigInt::offsetOfLength()),
1777 Imm32(int32_t(BigInt::inlineDigitsLength())), fail
);
1779 newGCBigInt(dest
, temp
, initialHeap
, fail
);
1781 // Copy the sign-bit, but not any of the other bits used by the GC.
1782 load32(Address(src
, BigInt::offsetOfFlags()), temp
);
1783 and32(Imm32(BigInt::signBitMask()), temp
);
1784 store32(temp
, Address(dest
, BigInt::offsetOfFlags()));
1787 load32(Address(src
, BigInt::offsetOfLength()), temp
);
1788 store32(temp
, Address(dest
, BigInt::offsetOfLength()));
1791 Address
srcDigits(src
, js::BigInt::offsetOfInlineDigits());
1792 Address
destDigits(dest
, js::BigInt::offsetOfInlineDigits());
1794 for (size_t i
= 0; i
< BigInt::inlineDigitsLength(); i
++) {
1795 static_assert(sizeof(BigInt::Digit
) == sizeof(uintptr_t),
1796 "BigInt Digit size matches uintptr_t");
1798 loadPtr(srcDigits
, temp
);
1799 storePtr(temp
, destDigits
);
1801 srcDigits
= Address(src
, srcDigits
.offset
+ sizeof(BigInt::Digit
));
1802 destDigits
= Address(dest
, destDigits
.offset
+ sizeof(BigInt::Digit
));
1806 void MacroAssembler::compareBigIntAndInt32(JSOp op
, Register bigInt
,
1807 Register int32
, Register scratch1
,
1808 Register scratch2
, Label
* ifTrue
,
1810 MOZ_ASSERT(IsLooseEqualityOp(op
) || IsRelationalOp(op
));
1812 static_assert(std::is_same_v
<BigInt::Digit
, uintptr_t>,
1813 "BigInt digit can be loaded in a pointer-sized register");
1814 static_assert(sizeof(BigInt::Digit
) >= sizeof(uint32_t),
1815 "BigInt digit stores at least an uint32");
1817 // Test for too large numbers.
1819 // If the absolute value of the BigInt can't be expressed in an uint32/uint64,
1820 // the result of the comparison is a constant.
1821 if (op
== JSOp::Eq
|| op
== JSOp::Ne
) {
1822 Label
* tooLarge
= op
== JSOp::Eq
? ifFalse
: ifTrue
;
1823 branch32(Assembler::GreaterThan
,
1824 Address(bigInt
, BigInt::offsetOfDigitLength()), Imm32(1),
1828 branch32(Assembler::LessThanOrEqual
,
1829 Address(bigInt
, BigInt::offsetOfDigitLength()), Imm32(1),
1832 // Still need to take the sign-bit into account for relational operations.
1833 if (op
== JSOp::Lt
|| op
== JSOp::Le
) {
1834 branchIfBigIntIsNegative(bigInt
, ifTrue
);
1837 branchIfBigIntIsNegative(bigInt
, ifFalse
);
1844 // Test for mismatched signs and, if the signs are equal, load |abs(x)| in
1845 // |scratch1| and |abs(y)| in |scratch2| and then compare the absolute numbers
1846 // against each other.
1848 // Jump to |ifTrue| resp. |ifFalse| if the BigInt is strictly less than
1849 // resp. strictly greater than the int32 value, depending on the comparison
1853 if (op
== JSOp::Eq
) {
1854 greaterThan
= ifFalse
;
1856 } else if (op
== JSOp::Ne
) {
1857 greaterThan
= ifTrue
;
1859 } else if (op
== JSOp::Lt
|| op
== JSOp::Le
) {
1860 greaterThan
= ifFalse
;
1863 MOZ_ASSERT(op
== JSOp::Gt
|| op
== JSOp::Ge
);
1864 greaterThan
= ifTrue
;
1868 // BigInt digits are always stored as an absolute number.
1869 loadFirstBigIntDigitOrZero(bigInt
, scratch1
);
1871 // Load the int32 into |scratch2| and negate it for negative numbers.
1872 move32(int32
, scratch2
);
1874 Label isNegative
, doCompare
;
1875 branchIfBigIntIsNegative(bigInt
, &isNegative
);
1876 branch32(Assembler::LessThan
, int32
, Imm32(0), greaterThan
);
1879 // We rely on |neg32(INT32_MIN)| staying INT32_MIN, because we're using an
1880 // unsigned comparison below.
1882 branch32(Assembler::GreaterThanOrEqual
, int32
, Imm32(0), lessThan
);
1885 // Not all supported platforms (e.g. MIPS64) zero-extend 32-bit operations,
1886 // so we need to explicitly clear any high 32-bits.
1887 move32ZeroExtendToPtr(scratch2
, scratch2
);
1889 // Reverse the relational comparator for negative numbers.
1890 // |-x < -y| <=> |+x > +y|.
1891 // |-x ≤ -y| <=> |+x ≥ +y|.
1892 // |-x > -y| <=> |+x < +y|.
1893 // |-x ≥ -y| <=> |+x ≤ +y|.
1894 JSOp reversed
= ReverseCompareOp(op
);
1895 if (reversed
!= op
) {
1896 branchPtr(JSOpToCondition(reversed
, /* isSigned = */ false), scratch1
,
1902 branchPtr(JSOpToCondition(op
, /* isSigned = */ false), scratch1
, scratch2
,
1907 void MacroAssembler::equalBigInts(Register left
, Register right
, Register temp1
,
1908 Register temp2
, Register temp3
,
1909 Register temp4
, Label
* notSameSign
,
1910 Label
* notSameLength
, Label
* notSameDigit
) {
1911 MOZ_ASSERT(left
!= temp1
);
1912 MOZ_ASSERT(right
!= temp1
);
1913 MOZ_ASSERT(right
!= temp2
);
1915 // Jump to |notSameSign| when the sign aren't the same.
1916 load32(Address(left
, BigInt::offsetOfFlags()), temp1
);
1917 xor32(Address(right
, BigInt::offsetOfFlags()), temp1
);
1918 branchTest32(Assembler::NonZero
, temp1
, Imm32(BigInt::signBitMask()),
1921 // Jump to |notSameLength| when the digits length is different.
1922 load32(Address(right
, BigInt::offsetOfLength()), temp1
);
1923 branch32(Assembler::NotEqual
, Address(left
, BigInt::offsetOfLength()), temp1
,
1926 // Both BigInts have the same sign and the same number of digits. Loop
1927 // over each digit, starting with the left-most one, and break from the
1928 // loop when the first non-matching digit was found.
1930 loadBigIntDigits(left
, temp2
);
1931 loadBigIntDigits(right
, temp3
);
1933 static_assert(sizeof(BigInt::Digit
) == sizeof(void*),
1934 "BigInt::Digit is pointer sized");
1936 computeEffectiveAddress(BaseIndex(temp2
, temp1
, ScalePointer
), temp2
);
1937 computeEffectiveAddress(BaseIndex(temp3
, temp1
, ScalePointer
), temp3
);
1943 subPtr(Imm32(sizeof(BigInt::Digit
)), temp2
);
1944 subPtr(Imm32(sizeof(BigInt::Digit
)), temp3
);
1946 loadPtr(Address(temp3
, 0), temp4
);
1947 branchPtr(Assembler::NotEqual
, Address(temp2
, 0), temp4
, notSameDigit
);
1950 branchSub32(Assembler::NotSigned
, Imm32(1), temp1
, &loop
);
1952 // No different digits were found, both BigInts are equal to each other.
1955 void MacroAssembler::typeOfObject(Register obj
, Register scratch
, Label
* slow
,
1956 Label
* isObject
, Label
* isCallable
,
1957 Label
* isUndefined
) {
1958 loadObjClassUnsafe(obj
, scratch
);
1960 // Proxies can emulate undefined and have complex isCallable behavior.
1961 branchTestClassIsProxy(true, scratch
, slow
);
1963 // JSFunctions are always callable.
1964 branchTestClassIsFunction(Assembler::Equal
, scratch
, isCallable
);
1966 // Objects that emulate undefined.
1967 Address
flags(scratch
, JSClass::offsetOfFlags());
1968 branchTest32(Assembler::NonZero
, flags
, Imm32(JSCLASS_EMULATES_UNDEFINED
),
1971 // Handle classes with a call hook.
1972 branchPtr(Assembler::Equal
, Address(scratch
, offsetof(JSClass
, cOps
)),
1973 ImmPtr(nullptr), isObject
);
1975 loadPtr(Address(scratch
, offsetof(JSClass
, cOps
)), scratch
);
1976 branchPtr(Assembler::Equal
, Address(scratch
, offsetof(JSClassOps
, call
)),
1977 ImmPtr(nullptr), isObject
);
1982 void MacroAssembler::isCallableOrConstructor(bool isCallable
, Register obj
,
1983 Register output
, Label
* isProxy
) {
1984 MOZ_ASSERT(obj
!= output
);
1986 Label notFunction
, hasCOps
, done
;
1987 loadObjClassUnsafe(obj
, output
);
1989 // An object is callable iff:
1990 // is<JSFunction>() || (getClass()->cOps && getClass()->cOps->call).
1991 // An object is constructor iff:
1992 // ((is<JSFunction>() && as<JSFunction>().isConstructor) ||
1993 // (getClass()->cOps && getClass()->cOps->construct)).
1994 branchTestClassIsFunction(Assembler::NotEqual
, output
, ¬Function
);
1996 move32(Imm32(1), output
);
1998 static_assert(mozilla::IsPowerOfTwo(uint32_t(FunctionFlags::CONSTRUCTOR
)),
1999 "FunctionFlags::CONSTRUCTOR has only one bit set");
2001 load32(Address(obj
, JSFunction::offsetOfFlagsAndArgCount()), output
);
2002 rshift32(Imm32(mozilla::FloorLog2(uint32_t(FunctionFlags::CONSTRUCTOR
))),
2004 and32(Imm32(1), output
);
2011 // For bound functions, we need to check the isConstructor flag.
2012 Label notBoundFunction
;
2013 branchPtr(Assembler::NotEqual
, output
, ImmPtr(&BoundFunctionObject::class_
),
2016 static_assert(BoundFunctionObject::IsConstructorFlag
== 0b1,
2017 "AND operation results in boolean value");
2018 unboxInt32(Address(obj
, BoundFunctionObject::offsetOfFlagsSlot()), output
);
2019 and32(Imm32(BoundFunctionObject::IsConstructorFlag
), output
);
2022 bind(¬BoundFunction
);
2025 // Just skim proxies off. Their notion of isCallable()/isConstructor() is
2026 // more complicated.
2027 branchTestClassIsProxy(true, output
, isProxy
);
2029 branchPtr(Assembler::NonZero
, Address(output
, offsetof(JSClass
, cOps
)),
2030 ImmPtr(nullptr), &hasCOps
);
2031 move32(Imm32(0), output
);
2035 loadPtr(Address(output
, offsetof(JSClass
, cOps
)), output
);
2037 isCallable
? offsetof(JSClassOps
, call
) : offsetof(JSClassOps
, construct
);
2038 cmpPtrSet(Assembler::NonZero
, Address(output
, opsOffset
), ImmPtr(nullptr),
2044 void MacroAssembler::loadJSContext(Register dest
) {
2045 movePtr(ImmPtr(runtime()->mainContextPtr()), dest
);
2048 static const uint8_t* ContextRealmPtr(CompileRuntime
* rt
) {
2049 return (static_cast<const uint8_t*>(rt
->mainContextPtr()) +
2050 JSContext::offsetOfRealm());
2053 void MacroAssembler::switchToRealm(Register realm
) {
2054 storePtr(realm
, AbsoluteAddress(ContextRealmPtr(runtime())));
2057 void MacroAssembler::switchToRealm(const void* realm
, Register scratch
) {
2060 movePtr(ImmPtr(realm
), scratch
);
2061 switchToRealm(scratch
);
2064 void MacroAssembler::switchToObjectRealm(Register obj
, Register scratch
) {
2065 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
2066 loadPtr(Address(scratch
, Shape::offsetOfBaseShape()), scratch
);
2067 loadPtr(Address(scratch
, BaseShape::offsetOfRealm()), scratch
);
2068 switchToRealm(scratch
);
2071 void MacroAssembler::switchToBaselineFrameRealm(Register scratch
) {
2072 Address
envChain(FramePointer
,
2073 BaselineFrame::reverseOffsetOfEnvironmentChain());
2074 loadPtr(envChain
, scratch
);
2075 switchToObjectRealm(scratch
, scratch
);
2078 void MacroAssembler::switchToWasmInstanceRealm(Register scratch1
,
2079 Register scratch2
) {
2080 loadPtr(Address(InstanceReg
, wasm::Instance::offsetOfCx()), scratch1
);
2081 loadPtr(Address(InstanceReg
, wasm::Instance::offsetOfRealm()), scratch2
);
2082 storePtr(scratch2
, Address(scratch1
, JSContext::offsetOfRealm()));
2085 void MacroAssembler::debugAssertContextRealm(const void* realm
,
2089 movePtr(ImmPtr(realm
), scratch
);
2090 branchPtr(Assembler::Equal
, AbsoluteAddress(ContextRealmPtr(runtime())),
2092 assumeUnreachable("Unexpected context realm");
2097 void MacroAssembler::setIsCrossRealmArrayConstructor(Register obj
,
2101 branchTestObjectIsProxy(false, obj
, output
, ¬Proxy
);
2102 assumeUnreachable("Unexpected proxy in setIsCrossRealmArrayConstructor");
2106 // The object's realm must not be cx->realm.
2107 Label isFalse
, done
;
2108 loadPtr(Address(obj
, JSObject::offsetOfShape()), output
);
2109 loadPtr(Address(output
, Shape::offsetOfBaseShape()), output
);
2110 loadPtr(Address(output
, BaseShape::offsetOfRealm()), output
);
2111 branchPtr(Assembler::Equal
, AbsoluteAddress(ContextRealmPtr(runtime())),
2114 // The object must be a function.
2115 branchTestObjIsFunction(Assembler::NotEqual
, obj
, output
, obj
, &isFalse
);
2117 // The function must be the ArrayConstructor native.
2118 branchPtr(Assembler::NotEqual
,
2119 Address(obj
, JSFunction::offsetOfNativeOrEnv()),
2120 ImmPtr(js::ArrayConstructor
), &isFalse
);
2122 move32(Imm32(1), output
);
2126 move32(Imm32(0), output
);
2131 void MacroAssembler::setIsDefinitelyTypedArrayConstructor(Register obj
,
2133 Label isFalse
, isTrue
, done
;
2135 // The object must be a function. (Wrappers are not supported.)
2136 branchTestObjIsFunction(Assembler::NotEqual
, obj
, output
, obj
, &isFalse
);
2138 // Load the native into |output|.
2139 loadPtr(Address(obj
, JSFunction::offsetOfNativeOrEnv()), output
);
2141 auto branchIsTypedArrayCtor
= [&](Scalar::Type type
) {
2142 // The function must be a TypedArrayConstructor native (from any realm).
2143 JSNative constructor
= TypedArrayConstructorNative(type
);
2144 branchPtr(Assembler::Equal
, output
, ImmPtr(constructor
), &isTrue
);
2147 #define TYPED_ARRAY_CONSTRUCTOR_NATIVE(_, T, N) \
2148 branchIsTypedArrayCtor(Scalar::N);
2149 JS_FOR_EACH_TYPED_ARRAY(TYPED_ARRAY_CONSTRUCTOR_NATIVE
)
2150 #undef TYPED_ARRAY_CONSTRUCTOR_NATIVE
2152 // Falls through to the false case.
2155 move32(Imm32(0), output
);
2159 move32(Imm32(1), output
);
2164 void MacroAssembler::loadMegamorphicCache(Register dest
) {
2165 movePtr(ImmPtr(runtime()->addressOfMegamorphicCache()), dest
);
2167 void MacroAssembler::loadMegamorphicSetPropCache(Register dest
) {
2168 movePtr(ImmPtr(runtime()->addressOfMegamorphicSetPropCache()), dest
);
2171 void MacroAssembler::loadStringToAtomCacheLastLookups(Register dest
) {
2172 uintptr_t cachePtr
= uintptr_t(runtime()->addressOfStringToAtomCache());
2173 void* offset
= (void*)(cachePtr
+ StringToAtomCache::offsetOfLastLookups());
2174 movePtr(ImmPtr(offset
), dest
);
2177 void MacroAssembler::loadAtomHash(Register id
, Register outHash
, Label
* done
) {
2178 Label doneInner
, fatInline
;
2182 move32(Imm32(JSString::FAT_INLINE_MASK
), outHash
);
2183 and32(Address(id
, JSString::offsetOfFlags()), outHash
);
2185 branch32(Assembler::Equal
, outHash
, Imm32(JSString::FAT_INLINE_MASK
),
2187 load32(Address(id
, NormalAtom::offsetOfHash()), outHash
);
2190 load32(Address(id
, FatInlineAtom::offsetOfHash()), outHash
);
2195 void MacroAssembler::loadAtomOrSymbolAndHash(ValueOperand value
, Register outId
,
2198 Label isString
, isSymbol
, isNull
, isUndefined
, done
, nonAtom
, atom
,
2202 ScratchTagScope
tag(*this, value
);
2203 splitTagForTest(value
, tag
);
2204 branchTestString(Assembler::Equal
, tag
, &isString
);
2205 branchTestSymbol(Assembler::Equal
, tag
, &isSymbol
);
2206 branchTestNull(Assembler::Equal
, tag
, &isNull
);
2207 branchTestUndefined(Assembler::NotEqual
, tag
, cacheMiss
);
2210 const JSAtomState
& names
= runtime()->names();
2211 movePropertyKey(PropertyKey::NonIntAtom(names
.undefined
), outId
);
2212 move32(Imm32(names
.undefined
->hash()), outHash
);
2216 movePropertyKey(PropertyKey::NonIntAtom(names
.null
), outId
);
2217 move32(Imm32(names
.null
->hash()), outHash
);
2221 unboxSymbol(value
, outId
);
2222 load32(Address(outId
, JS::Symbol::offsetOfHash()), outHash
);
2223 orPtr(Imm32(PropertyKey::SymbolTypeTag
), outId
);
2227 unboxString(value
, outId
);
2228 branchTest32(Assembler::Zero
, Address(outId
, JSString::offsetOfFlags()),
2229 Imm32(JSString::ATOM_BIT
), &nonAtom
);
2232 loadAtomHash(outId
, outHash
, &done
);
2235 loadStringToAtomCacheLastLookups(outHash
);
2237 // Compare each entry in the StringToAtomCache's lastLookups_ array
2238 size_t stringOffset
= StringToAtomCache::LastLookup::offsetOfString();
2239 branchPtr(Assembler::Equal
, Address(outHash
, stringOffset
), outId
,
2241 for (size_t i
= 0; i
< StringToAtomCache::NumLastLookups
- 1; ++i
) {
2242 addPtr(Imm32(sizeof(StringToAtomCache::LastLookup
)), outHash
);
2243 branchPtr(Assembler::Equal
, Address(outHash
, stringOffset
), outId
,
2247 // Couldn't find us in the cache, so fall back to the C++ call
2250 // We found a hit in the lastLookups_ array! Load the associated atom
2251 // and jump back up to our usual atom handling code
2252 bind(&lastLookupAtom
);
2253 size_t atomOffset
= StringToAtomCache::LastLookup::offsetOfAtom();
2254 loadPtr(Address(outHash
, atomOffset
), outId
);
2260 void MacroAssembler::emitExtractValueFromMegamorphicCacheEntry(
2261 Register obj
, Register entry
, Register scratch1
, Register scratch2
,
2262 ValueOperand output
, Label
* cacheHit
, Label
* cacheMiss
) {
2263 Label isMissing
, dynamicSlot
, protoLoopHead
, protoLoopTail
;
2265 // scratch2 = entry->numHops_
2266 load8ZeroExtend(Address(entry
, MegamorphicCache::Entry::offsetOfNumHops()),
2268 // if (scratch2 == NumHopsForMissingOwnProperty) goto cacheMiss
2269 branch32(Assembler::Equal
, scratch2
,
2270 Imm32(MegamorphicCache::Entry::NumHopsForMissingOwnProperty
),
2272 // if (scratch2 == NumHopsForMissingProperty) goto isMissing
2273 branch32(Assembler::Equal
, scratch2
,
2274 Imm32(MegamorphicCache::Entry::NumHopsForMissingProperty
),
2277 // NOTE: Where this is called, `output` can actually alias `obj`, and before
2278 // the last cacheMiss branch above we can't write to `obj`, so we can't
2279 // use `output`'s scratch register there. However a cache miss is impossible
2280 // now, so we're free to use `output` as we like.
2281 Register outputScratch
= output
.scratchReg();
2282 if (!outputScratch
.aliases(obj
)) {
2283 // We're okay with paying this very slight extra cost to avoid a potential
2284 // footgun of writing to what callers understand as only an input register.
2285 movePtr(obj
, outputScratch
);
2287 branchTest32(Assembler::Zero
, scratch2
, scratch2
, &protoLoopTail
);
2288 bind(&protoLoopHead
);
2289 loadObjProto(outputScratch
, outputScratch
);
2290 branchSub32(Assembler::NonZero
, Imm32(1), scratch2
, &protoLoopHead
);
2291 bind(&protoLoopTail
);
2293 // scratch1 = entry->slotOffset()
2294 load32(Address(entry
, MegamorphicCacheEntry::offsetOfSlotOffset()), scratch1
);
2296 // scratch2 = slotOffset.offset()
2297 move32(scratch1
, scratch2
);
2298 rshift32(Imm32(TaggedSlotOffset::OffsetShift
), scratch2
);
2300 // if (!slotOffset.isFixedSlot()) goto dynamicSlot
2301 branchTest32(Assembler::Zero
, scratch1
,
2302 Imm32(TaggedSlotOffset::IsFixedSlotFlag
), &dynamicSlot
);
2303 // output = outputScratch[scratch2]
2304 loadValue(BaseIndex(outputScratch
, scratch2
, TimesOne
), output
);
2308 // output = outputScratch->slots_[scratch2]
2309 loadPtr(Address(outputScratch
, NativeObject::offsetOfSlots()), outputScratch
);
2310 loadValue(BaseIndex(outputScratch
, scratch2
, TimesOne
), output
);
2314 // output = undefined
2315 moveValue(UndefinedValue(), output
);
2319 template <typename IdOperandType
>
2320 void MacroAssembler::emitMegamorphicCacheLookupByValueCommon(
2321 IdOperandType id
, Register obj
, Register scratch1
, Register scratch2
,
2322 Register outEntryPtr
, Label
* cacheMiss
, Label
* cacheMissWithEntry
) {
2323 // A lot of this code is shared with emitMegamorphicCacheLookup. It would
2324 // be nice to be able to avoid the duplication here, but due to a few
2325 // differences like taking the id in a ValueOperand instead of being able
2326 // to bake it in as an immediate, and only needing a Register for the output
2327 // value, it seemed more awkward to read once it was deduplicated.
2329 // outEntryPtr = obj->shape()
2330 loadPtr(Address(obj
, JSObject::offsetOfShape()), outEntryPtr
);
2332 movePtr(outEntryPtr
, scratch2
);
2334 // outEntryPtr = (outEntryPtr >> 3) ^ (outEntryPtr >> 13) + idHash
2335 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift1
), outEntryPtr
);
2336 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift2
), scratch2
);
2337 xorPtr(scratch2
, outEntryPtr
);
2339 if constexpr (std::is_same
<IdOperandType
, ValueOperand
>::value
) {
2340 loadAtomOrSymbolAndHash(id
, scratch1
, scratch2
, cacheMiss
);
2342 static_assert(std::is_same
<IdOperandType
, Register
>::value
);
2343 movePtr(id
, scratch1
);
2344 loadAtomHash(scratch1
, scratch2
, nullptr);
2346 addPtr(scratch2
, outEntryPtr
);
2348 // outEntryPtr %= MegamorphicCache::NumEntries
2349 constexpr size_t cacheSize
= MegamorphicCache::NumEntries
;
2350 static_assert(mozilla::IsPowerOfTwo(cacheSize
));
2351 size_t cacheMask
= cacheSize
- 1;
2352 and32(Imm32(cacheMask
), outEntryPtr
);
2354 loadMegamorphicCache(scratch2
);
2355 // outEntryPtr = &scratch2->entries_[outEntryPtr]
2356 constexpr size_t entrySize
= sizeof(MegamorphicCache::Entry
);
2357 static_assert(sizeof(void*) == 4 || entrySize
== 24);
2358 if constexpr (sizeof(void*) == 4) {
2359 mul32(Imm32(entrySize
), outEntryPtr
);
2360 computeEffectiveAddress(BaseIndex(scratch2
, outEntryPtr
, TimesOne
,
2361 MegamorphicCache::offsetOfEntries()),
2364 computeEffectiveAddress(BaseIndex(outEntryPtr
, outEntryPtr
, TimesTwo
),
2366 computeEffectiveAddress(BaseIndex(scratch2
, outEntryPtr
, TimesEight
,
2367 MegamorphicCache::offsetOfEntries()),
2371 // if (outEntryPtr->key_ != scratch1) goto cacheMissWithEntry
2372 branchPtr(Assembler::NotEqual
,
2373 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfKey()),
2374 scratch1
, cacheMissWithEntry
);
2375 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch1
);
2377 // if (outEntryPtr->shape_ != scratch1) goto cacheMissWithEntry
2378 branchPtr(Assembler::NotEqual
,
2379 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfShape()),
2380 scratch1
, cacheMissWithEntry
);
2382 // scratch2 = scratch2->generation_
2383 load16ZeroExtend(Address(scratch2
, MegamorphicCache::offsetOfGeneration()),
2386 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfGeneration()),
2388 // if (outEntryPtr->generation_ != scratch2) goto cacheMissWithEntry
2389 branch32(Assembler::NotEqual
, scratch1
, scratch2
, cacheMissWithEntry
);
2392 void MacroAssembler::emitMegamorphicCacheLookup(
2393 PropertyKey id
, Register obj
, Register scratch1
, Register scratch2
,
2394 Register outEntryPtr
, ValueOperand output
, Label
* cacheHit
) {
2395 Label cacheMiss
, isMissing
, dynamicSlot
, protoLoopHead
, protoLoopTail
;
2397 // scratch1 = obj->shape()
2398 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch1
);
2400 movePtr(scratch1
, outEntryPtr
);
2401 movePtr(scratch1
, scratch2
);
2403 // outEntryPtr = (scratch1 >> 3) ^ (scratch1 >> 13) + hash(id)
2404 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift1
), outEntryPtr
);
2405 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift2
), scratch2
);
2406 xorPtr(scratch2
, outEntryPtr
);
2407 addPtr(Imm32(HashAtomOrSymbolPropertyKey(id
)), outEntryPtr
);
2409 // outEntryPtr %= MegamorphicCache::NumEntries
2410 constexpr size_t cacheSize
= MegamorphicCache::NumEntries
;
2411 static_assert(mozilla::IsPowerOfTwo(cacheSize
));
2412 size_t cacheMask
= cacheSize
- 1;
2413 and32(Imm32(cacheMask
), outEntryPtr
);
2415 loadMegamorphicCache(scratch2
);
2416 // outEntryPtr = &scratch2->entries_[outEntryPtr]
2417 constexpr size_t entrySize
= sizeof(MegamorphicCache::Entry
);
2418 static_assert(sizeof(void*) == 4 || entrySize
== 24);
2419 if constexpr (sizeof(void*) == 4) {
2420 mul32(Imm32(entrySize
), outEntryPtr
);
2421 computeEffectiveAddress(BaseIndex(scratch2
, outEntryPtr
, TimesOne
,
2422 MegamorphicCache::offsetOfEntries()),
2425 computeEffectiveAddress(BaseIndex(outEntryPtr
, outEntryPtr
, TimesTwo
),
2427 computeEffectiveAddress(BaseIndex(scratch2
, outEntryPtr
, TimesEight
,
2428 MegamorphicCache::offsetOfEntries()),
2432 // if (outEntryPtr->shape_ != scratch1) goto cacheMiss
2433 branchPtr(Assembler::NotEqual
,
2434 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfShape()),
2435 scratch1
, &cacheMiss
);
2437 // if (outEntryPtr->key_ != id) goto cacheMiss
2438 movePropertyKey(id
, scratch1
);
2439 branchPtr(Assembler::NotEqual
,
2440 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfKey()),
2441 scratch1
, &cacheMiss
);
2443 // scratch2 = scratch2->generation_
2444 load16ZeroExtend(Address(scratch2
, MegamorphicCache::offsetOfGeneration()),
2447 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfGeneration()),
2449 // if (outEntryPtr->generation_ != scratch2) goto cacheMiss
2450 branch32(Assembler::NotEqual
, scratch1
, scratch2
, &cacheMiss
);
2452 emitExtractValueFromMegamorphicCacheEntry(
2453 obj
, outEntryPtr
, scratch1
, scratch2
, output
, cacheHit
, &cacheMiss
);
2458 template <typename IdOperandType
>
2459 void MacroAssembler::emitMegamorphicCacheLookupByValue(
2460 IdOperandType id
, Register obj
, Register scratch1
, Register scratch2
,
2461 Register outEntryPtr
, ValueOperand output
, Label
* cacheHit
) {
2462 Label cacheMiss
, cacheMissWithEntry
;
2463 emitMegamorphicCacheLookupByValueCommon(id
, obj
, scratch1
, scratch2
,
2464 outEntryPtr
, &cacheMiss
,
2465 &cacheMissWithEntry
);
2466 emitExtractValueFromMegamorphicCacheEntry(obj
, outEntryPtr
, scratch1
,
2467 scratch2
, output
, cacheHit
,
2468 &cacheMissWithEntry
);
2470 xorPtr(outEntryPtr
, outEntryPtr
);
2471 bind(&cacheMissWithEntry
);
2474 template void MacroAssembler::emitMegamorphicCacheLookupByValue
<ValueOperand
>(
2475 ValueOperand id
, Register obj
, Register scratch1
, Register scratch2
,
2476 Register outEntryPtr
, ValueOperand output
, Label
* cacheHit
);
2478 template void MacroAssembler::emitMegamorphicCacheLookupByValue
<Register
>(
2479 Register id
, Register obj
, Register scratch1
, Register scratch2
,
2480 Register outEntryPtr
, ValueOperand output
, Label
* cacheHit
);
2482 void MacroAssembler::emitMegamorphicCacheLookupExists(
2483 ValueOperand id
, Register obj
, Register scratch1
, Register scratch2
,
2484 Register outEntryPtr
, Register output
, Label
* cacheHit
, bool hasOwn
) {
2485 Label cacheMiss
, cacheMissWithEntry
, cacheHitFalse
;
2486 emitMegamorphicCacheLookupByValueCommon(id
, obj
, scratch1
, scratch2
,
2487 outEntryPtr
, &cacheMiss
,
2488 &cacheMissWithEntry
);
2490 // scratch1 = outEntryPtr->numHops_
2492 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfNumHops()),
2495 branch32(Assembler::Equal
, scratch1
,
2496 Imm32(MegamorphicCache::Entry::NumHopsForMissingProperty
),
2500 branch32(Assembler::NotEqual
, scratch1
, Imm32(0), &cacheHitFalse
);
2502 branch32(Assembler::Equal
, scratch1
,
2503 Imm32(MegamorphicCache::Entry::NumHopsForMissingOwnProperty
),
2504 &cacheMissWithEntry
);
2507 move32(Imm32(1), output
);
2510 bind(&cacheHitFalse
);
2511 xor32(output
, output
);
2515 xorPtr(outEntryPtr
, outEntryPtr
);
2516 bind(&cacheMissWithEntry
);
2519 void MacroAssembler::extractCurrentIndexAndKindFromIterator(Register iterator
,
2522 // Load iterator object
2523 Address
nativeIterAddr(iterator
,
2524 PropertyIteratorObject::offsetOfIteratorSlot());
2525 loadPrivate(nativeIterAddr
, outIndex
);
2527 // Compute offset of propertyCursor_ from propertiesBegin()
2528 loadPtr(Address(outIndex
, NativeIterator::offsetOfPropertyCursor()), outKind
);
2529 subPtr(Address(outIndex
, NativeIterator::offsetOfShapesEnd()), outKind
);
2531 // Compute offset of current index from indicesBegin(). Note that because
2532 // propertyCursor has already been incremented, this is actually the offset
2533 // of the next index. We adjust accordingly below.
2534 size_t indexAdjustment
=
2535 sizeof(GCPtr
<JSLinearString
*>) / sizeof(PropertyIndex
);
2536 if (indexAdjustment
!= 1) {
2537 MOZ_ASSERT(indexAdjustment
== 2);
2538 rshift32(Imm32(1), outKind
);
2541 // Load current index.
2542 loadPtr(Address(outIndex
, NativeIterator::offsetOfPropertiesEnd()), outIndex
);
2543 load32(BaseIndex(outIndex
, outKind
, Scale::TimesOne
,
2544 -int32_t(sizeof(PropertyIndex
))),
2548 move32(outIndex
, outKind
);
2549 rshift32(Imm32(PropertyIndex::KindShift
), outKind
);
2552 and32(Imm32(PropertyIndex::IndexMask
), outIndex
);
2555 template <typename IdType
>
2556 void MacroAssembler::emitMegamorphicCachedSetSlot(
2557 IdType id
, Register obj
, Register scratch1
,
2558 #ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
2559 Register scratch2
, Register scratch3
,
2561 ValueOperand value
, Label
* cacheHit
,
2562 void (*emitPreBarrier
)(MacroAssembler
&, const Address
&, MIRType
)) {
2563 Label cacheMiss
, dynamicSlot
, doAdd
, doSet
, doAddDynamic
, doSetDynamic
;
2565 #ifdef JS_CODEGEN_X86
2567 Register scratch2
= value
.typeReg();
2568 Register scratch3
= value
.payloadReg();
2571 // outEntryPtr = obj->shape()
2572 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch3
);
2574 movePtr(scratch3
, scratch2
);
2576 // scratch3 = (scratch3 >> 3) ^ (scratch3 >> 13) + idHash
2577 rshiftPtr(Imm32(MegamorphicSetPropCache::ShapeHashShift1
), scratch3
);
2578 rshiftPtr(Imm32(MegamorphicSetPropCache::ShapeHashShift2
), scratch2
);
2579 xorPtr(scratch2
, scratch3
);
2581 if constexpr (std::is_same
<IdType
, ValueOperand
>::value
) {
2582 loadAtomOrSymbolAndHash(id
, scratch1
, scratch2
, &cacheMiss
);
2583 addPtr(scratch2
, scratch3
);
2585 static_assert(std::is_same
<IdType
, PropertyKey
>::value
);
2586 addPtr(Imm32(HashAtomOrSymbolPropertyKey(id
)), scratch3
);
2587 movePropertyKey(id
, scratch1
);
2590 // scratch3 %= MegamorphicSetPropCache::NumEntries
2591 constexpr size_t cacheSize
= MegamorphicSetPropCache::NumEntries
;
2592 static_assert(mozilla::IsPowerOfTwo(cacheSize
));
2593 size_t cacheMask
= cacheSize
- 1;
2594 and32(Imm32(cacheMask
), scratch3
);
2596 loadMegamorphicSetPropCache(scratch2
);
2597 // scratch3 = &scratch2->entries_[scratch3]
2598 constexpr size_t entrySize
= sizeof(MegamorphicSetPropCache::Entry
);
2599 mul32(Imm32(entrySize
), scratch3
);
2600 computeEffectiveAddress(BaseIndex(scratch2
, scratch3
, TimesOne
,
2601 MegamorphicSetPropCache::offsetOfEntries()),
2604 // if (scratch3->key_ != scratch1) goto cacheMiss
2605 branchPtr(Assembler::NotEqual
,
2606 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfKey()),
2607 scratch1
, &cacheMiss
);
2609 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch1
);
2610 // if (scratch3->shape_ != scratch1) goto cacheMiss
2611 branchPtr(Assembler::NotEqual
,
2612 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfShape()),
2613 scratch1
, &cacheMiss
);
2615 // scratch2 = scratch2->generation_
2617 Address(scratch2
, MegamorphicSetPropCache::offsetOfGeneration()),
2620 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfGeneration()),
2622 // if (scratch3->generation_ != scratch2) goto cacheMiss
2623 branch32(Assembler::NotEqual
, scratch1
, scratch2
, &cacheMiss
);
2625 // scratch2 = entry->slotOffset()
2627 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfSlotOffset()),
2630 // scratch1 = slotOffset.offset()
2631 move32(scratch2
, scratch1
);
2632 rshift32(Imm32(TaggedSlotOffset::OffsetShift
), scratch1
);
2634 Address
afterShapePtr(scratch3
,
2635 MegamorphicSetPropCache::Entry::offsetOfAfterShape());
2637 // if (!slotOffset.isFixedSlot()) goto dynamicSlot
2638 branchTest32(Assembler::Zero
, scratch2
,
2639 Imm32(TaggedSlotOffset::IsFixedSlotFlag
), &dynamicSlot
);
2641 // Calculate slot address in scratch1. Jump to doSet if scratch3 == nullptr,
2642 // else jump (or fall-through) to doAdd.
2643 addPtr(obj
, scratch1
);
2644 branchPtr(Assembler::Equal
, afterShapePtr
, ImmPtr(nullptr), &doSet
);
2648 branchPtr(Assembler::Equal
, afterShapePtr
, ImmPtr(nullptr), &doSetDynamic
);
2650 Address
slotAddr(scratch1
, 0);
2652 // If entry->newCapacity_ is nonzero, we need to grow the slots on the
2653 // object. Otherwise just jump straight to a dynamic add.
2655 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfNewCapacity()),
2657 branchTest32(Assembler::Zero
, scratch2
, scratch2
, &doAddDynamic
);
2659 AllocatableRegisterSet
regs(RegisterSet::Volatile());
2660 LiveRegisterSet
save(regs
.asLiveSet());
2662 PushRegsInMask(save
);
2664 regs
.takeUnchecked(scratch2
);
2666 if (regs
.has(obj
)) {
2667 regs
.takeUnchecked(obj
);
2668 tmp
= regs
.takeAnyGeneral();
2669 regs
.addUnchecked(obj
);
2671 tmp
= regs
.takeAnyGeneral();
2674 using Fn
= bool (*)(JSContext
* cx
, NativeObject
* obj
, uint32_t newCount
);
2675 setupUnalignedABICall(tmp
);
2679 passABIArg(scratch2
);
2680 callWithABI
<Fn
, NativeObject::growSlotsPure
>();
2681 storeCallPointerResult(scratch2
);
2682 PopRegsInMask(save
);
2684 branchIfFalseBool(scratch2
, &cacheMiss
);
2686 bind(&doAddDynamic
);
2687 addPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch1
);
2690 // scratch3 = entry->afterShape()
2692 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfAfterShape()),
2695 storeObjShape(scratch3
, obj
,
2696 [emitPreBarrier
](MacroAssembler
& masm
, const Address
& addr
) {
2697 emitPreBarrier(masm
, addr
, MIRType::Shape
);
2699 #ifdef JS_CODEGEN_X86
2702 storeValue(value
, slotAddr
);
2705 bind(&doSetDynamic
);
2706 addPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch1
);
2708 guardedCallPreBarrier(slotAddr
, MIRType::Value
);
2710 #ifdef JS_CODEGEN_X86
2713 storeValue(value
, slotAddr
);
2717 #ifdef JS_CODEGEN_X86
2722 template void MacroAssembler::emitMegamorphicCachedSetSlot
<PropertyKey
>(
2723 PropertyKey id
, Register obj
, Register scratch1
,
2724 #ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
2725 Register scratch2
, Register scratch3
,
2727 ValueOperand value
, Label
* cacheHit
,
2728 void (*emitPreBarrier
)(MacroAssembler
&, const Address
&, MIRType
));
2730 template void MacroAssembler::emitMegamorphicCachedSetSlot
<ValueOperand
>(
2731 ValueOperand id
, Register obj
, Register scratch1
,
2732 #ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
2733 Register scratch2
, Register scratch3
,
2735 ValueOperand value
, Label
* cacheHit
,
2736 void (*emitPreBarrier
)(MacroAssembler
&, const Address
&, MIRType
));
2738 void MacroAssembler::guardNonNegativeIntPtrToInt32(Register reg
, Label
* fail
) {
2741 branchPtr(Assembler::NotSigned
, reg
, reg
, &ok
);
2742 assumeUnreachable("Unexpected negative value");
2747 branchPtr(Assembler::Above
, reg
, Imm32(INT32_MAX
), fail
);
2751 void MacroAssembler::loadArrayBufferByteLengthIntPtr(Register obj
,
2753 Address
slotAddr(obj
, ArrayBufferObject::offsetOfByteLengthSlot());
2754 loadPrivate(slotAddr
, output
);
2757 void MacroAssembler::loadArrayBufferViewByteOffsetIntPtr(Register obj
,
2759 Address
slotAddr(obj
, ArrayBufferViewObject::byteOffsetOffset());
2760 loadPrivate(slotAddr
, output
);
2763 void MacroAssembler::loadArrayBufferViewLengthIntPtr(Register obj
,
2765 Address
slotAddr(obj
, ArrayBufferViewObject::lengthOffset());
2766 loadPrivate(slotAddr
, output
);
2769 void MacroAssembler::loadDOMExpandoValueGuardGeneration(
2770 Register obj
, ValueOperand output
,
2771 JS::ExpandoAndGeneration
* expandoAndGeneration
, uint64_t generation
,
2773 loadPtr(Address(obj
, ProxyObject::offsetOfReservedSlots()),
2774 output
.scratchReg());
2775 loadValue(Address(output
.scratchReg(),
2776 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
2779 // Guard the ExpandoAndGeneration* matches the proxy's ExpandoAndGeneration
2781 branchTestValue(Assembler::NotEqual
, output
,
2782 PrivateValue(expandoAndGeneration
), fail
);
2784 // Guard expandoAndGeneration->generation matches the expected generation.
2785 Address
generationAddr(output
.payloadOrValueReg(),
2786 JS::ExpandoAndGeneration::offsetOfGeneration());
2787 branch64(Assembler::NotEqual
, generationAddr
, Imm64(generation
), fail
);
2789 // Load expandoAndGeneration->expando into the output Value register.
2790 loadValue(Address(output
.payloadOrValueReg(),
2791 JS::ExpandoAndGeneration::offsetOfExpando()),
2795 void MacroAssembler::loadJitActivation(Register dest
) {
2796 loadJSContext(dest
);
2797 loadPtr(Address(dest
, offsetof(JSContext
, activation_
)), dest
);
2800 void MacroAssembler::guardSpecificAtom(Register str
, JSAtom
* atom
,
2802 const LiveRegisterSet
& volatileRegs
,
2805 branchPtr(Assembler::Equal
, str
, ImmGCPtr(atom
), &done
);
2807 // The pointers are not equal, so if the input string is also an atom it
2808 // must be a different string.
2809 branchTest32(Assembler::NonZero
, Address(str
, JSString::offsetOfFlags()),
2810 Imm32(JSString::ATOM_BIT
), fail
);
2812 // Check the length.
2813 branch32(Assembler::NotEqual
, Address(str
, JSString::offsetOfLength()),
2814 Imm32(atom
->length()), fail
);
2816 // We have a non-atomized string with the same length. Call a helper
2817 // function to do the comparison.
2818 PushRegsInMask(volatileRegs
);
2820 using Fn
= bool (*)(JSString
* str1
, JSString
* str2
);
2821 setupUnalignedABICall(scratch
);
2822 movePtr(ImmGCPtr(atom
), scratch
);
2823 passABIArg(scratch
);
2825 callWithABI
<Fn
, EqualStringsHelperPure
>();
2826 storeCallPointerResult(scratch
);
2828 MOZ_ASSERT(!volatileRegs
.has(scratch
));
2829 PopRegsInMask(volatileRegs
);
2830 branchIfFalseBool(scratch
, fail
);
2835 void MacroAssembler::guardStringToInt32(Register str
, Register output
,
2837 LiveRegisterSet volatileRegs
,
2840 // Use indexed value as fast path if possible.
2841 loadStringIndexValue(str
, output
, &vmCall
);
2846 // Reserve space for holding the result int32_t of the call. Use
2847 // pointer-size to avoid misaligning the stack on 64-bit platforms.
2848 reserveStack(sizeof(uintptr_t));
2849 moveStackPtrTo(output
);
2851 volatileRegs
.takeUnchecked(scratch
);
2852 if (output
.volatile_()) {
2853 volatileRegs
.addUnchecked(output
);
2855 PushRegsInMask(volatileRegs
);
2857 using Fn
= bool (*)(JSContext
* cx
, JSString
* str
, int32_t* result
);
2858 setupUnalignedABICall(scratch
);
2859 loadJSContext(scratch
);
2860 passABIArg(scratch
);
2863 callWithABI
<Fn
, GetInt32FromStringPure
>();
2864 storeCallPointerResult(scratch
);
2866 PopRegsInMask(volatileRegs
);
2869 branchIfTrueBool(scratch
, &ok
);
2871 // OOM path, recovered by GetInt32FromStringPure.
2873 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
2874 // flow-insensitively, and using it twice would confuse the stack height
2876 addToStackPtr(Imm32(sizeof(uintptr_t)));
2880 load32(Address(output
, 0), output
);
2881 freeStack(sizeof(uintptr_t));
2886 void MacroAssembler::generateBailoutTail(Register scratch
,
2887 Register bailoutInfo
) {
2888 Label bailoutFailed
;
2889 branchIfFalseBool(ReturnReg
, &bailoutFailed
);
2891 // Finish bailing out to Baseline.
2893 // Prepare a register set for use in this case.
2894 AllocatableGeneralRegisterSet
regs(GeneralRegisterSet::All());
2895 MOZ_ASSERT_IF(!IsHiddenSP(getStackPointer()),
2896 !regs
.has(AsRegister(getStackPointer())));
2897 regs
.take(bailoutInfo
);
2899 Register temp
= regs
.takeAny();
2902 // Assert the stack pointer points to the JitFrameLayout header. Copying
2905 loadPtr(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, incomingStack
)),
2907 branchStackPtr(Assembler::Equal
, temp
, &ok
);
2908 assumeUnreachable("Unexpected stack pointer value");
2912 Register copyCur
= regs
.takeAny();
2913 Register copyEnd
= regs
.takeAny();
2915 // Copy data onto stack.
2916 loadPtr(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, copyStackTop
)),
2919 Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, copyStackBottom
)),
2925 branchPtr(Assembler::BelowOrEqual
, copyCur
, copyEnd
, &endOfCopy
);
2926 subPtr(Imm32(sizeof(uintptr_t)), copyCur
);
2927 subFromStackPtr(Imm32(sizeof(uintptr_t)));
2928 loadPtr(Address(copyCur
, 0), temp
);
2929 storePtr(temp
, Address(getStackPointer(), 0));
2934 loadPtr(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, resumeFramePtr
)),
2937 // Enter exit frame for the FinishBailoutToBaseline call.
2938 pushFrameDescriptor(FrameType::BaselineJS
);
2939 push(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, resumeAddr
)));
2941 // No GC things to mark on the stack, push a bare token.
2942 loadJSContext(scratch
);
2943 enterFakeExitFrame(scratch
, scratch
, ExitFrameType::Bare
);
2945 // Save needed values onto stack temporarily.
2946 push(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, resumeAddr
)));
2948 // Call a stub to free allocated memory and create arguments objects.
2949 using Fn
= bool (*)(BaselineBailoutInfo
* bailoutInfoArg
);
2950 setupUnalignedABICall(temp
);
2951 passABIArg(bailoutInfo
);
2952 callWithABI
<Fn
, FinishBailoutToBaseline
>(
2953 MoveOp::GENERAL
, CheckUnsafeCallWithABI::DontCheckHasExitFrame
);
2954 branchIfFalseBool(ReturnReg
, exceptionLabel());
2956 // Restore values where they need to be and resume execution.
2957 AllocatableGeneralRegisterSet
enterRegs(GeneralRegisterSet::All());
2958 MOZ_ASSERT(!enterRegs
.has(FramePointer
));
2959 Register jitcodeReg
= enterRegs
.takeAny();
2963 // Discard exit frame.
2964 addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
2969 bind(&bailoutFailed
);
2971 // jit::Bailout or jit::InvalidationBailout failed and returned false. The
2972 // Ion frame has already been discarded and the stack pointer points to the
2973 // JitFrameLayout header. Turn it into an ExitFrameLayout, similar to
2974 // EnsureUnwoundJitExitFrame, and call the exception handler.
2975 loadJSContext(scratch
);
2976 enterFakeExitFrame(scratch
, scratch
, ExitFrameType::UnwoundJit
);
2977 jump(exceptionLabel());
2981 void MacroAssembler::loadJitCodeRaw(Register func
, Register dest
) {
2982 static_assert(BaseScript::offsetOfJitCodeRaw() ==
2983 SelfHostedLazyScript::offsetOfJitCodeRaw(),
2984 "SelfHostedLazyScript and BaseScript must use same layout for "
2987 BaseScript::offsetOfJitCodeRaw() == wasm::JumpTableJitEntryOffset
,
2988 "Wasm exported functions jit entries must use same layout for "
2990 loadPrivate(Address(func
, JSFunction::offsetOfJitInfoOrScript()), dest
);
2991 loadPtr(Address(dest
, BaseScript::offsetOfJitCodeRaw()), dest
);
2994 void MacroAssembler::loadBaselineJitCodeRaw(Register func
, Register dest
,
2997 loadPrivate(Address(func
, JSFunction::offsetOfJitInfoOrScript()), dest
);
2999 branchIfScriptHasNoJitScript(dest
, failure
);
3001 loadJitScript(dest
, dest
);
3003 // Load BaselineScript
3004 loadPtr(Address(dest
, JitScript::offsetOfBaselineScript()), dest
);
3006 static_assert(BaselineDisabledScript
== 0x1);
3007 branchPtr(Assembler::BelowOrEqual
, dest
, ImmWord(BaselineDisabledScript
),
3011 // Load Baseline jitcode
3012 loadPtr(Address(dest
, BaselineScript::offsetOfMethod()), dest
);
3013 loadPtr(Address(dest
, JitCode::offsetOfCode()), dest
);
3016 void MacroAssembler::loadBaselineFramePtr(Register framePtr
, Register dest
) {
3017 if (framePtr
!= dest
) {
3018 movePtr(framePtr
, dest
);
3020 subPtr(Imm32(BaselineFrame::Size()), dest
);
3023 static const uint8_t* ContextInlinedICScriptPtr(CompileRuntime
* rt
) {
3024 return (static_cast<const uint8_t*>(rt
->mainContextPtr()) +
3025 JSContext::offsetOfInlinedICScript());
3028 void MacroAssembler::storeICScriptInJSContext(Register icScript
) {
3029 storePtr(icScript
, AbsoluteAddress(ContextInlinedICScriptPtr(runtime())));
3032 void MacroAssembler::handleFailure() {
3033 // Re-entry code is irrelevant because the exception will leave the
3034 // running function and never come back
3035 TrampolinePtr excTail
= runtime()->jitRuntime()->getExceptionTail();
3039 void MacroAssembler::assumeUnreachable(const char* output
) {
3040 #ifdef JS_MASM_VERBOSE
3041 if (!IsCompilingWasm()) {
3042 AllocatableRegisterSet
regs(RegisterSet::Volatile());
3043 LiveRegisterSet
save(regs
.asLiveSet());
3044 PushRegsInMask(save
);
3045 Register temp
= regs
.takeAnyGeneral();
3047 using Fn
= void (*)(const char* output
);
3048 setupUnalignedABICall(temp
);
3049 movePtr(ImmPtr(output
), temp
);
3051 callWithABI
<Fn
, AssumeUnreachable
>(MoveOp::GENERAL
,
3052 CheckUnsafeCallWithABI::DontCheckOther
);
3054 PopRegsInMask(save
);
3061 void MacroAssembler::printf(const char* output
) {
3062 #ifdef JS_MASM_VERBOSE
3063 AllocatableRegisterSet
regs(RegisterSet::Volatile());
3064 LiveRegisterSet
save(regs
.asLiveSet());
3065 PushRegsInMask(save
);
3067 Register temp
= regs
.takeAnyGeneral();
3069 using Fn
= void (*)(const char* output
);
3070 setupUnalignedABICall(temp
);
3071 movePtr(ImmPtr(output
), temp
);
3073 callWithABI
<Fn
, Printf0
>();
3075 PopRegsInMask(save
);
3079 void MacroAssembler::printf(const char* output
, Register value
) {
3080 #ifdef JS_MASM_VERBOSE
3081 AllocatableRegisterSet
regs(RegisterSet::Volatile());
3082 LiveRegisterSet
save(regs
.asLiveSet());
3083 PushRegsInMask(save
);
3085 regs
.takeUnchecked(value
);
3087 Register temp
= regs
.takeAnyGeneral();
3089 using Fn
= void (*)(const char* output
, uintptr_t value
);
3090 setupUnalignedABICall(temp
);
3091 movePtr(ImmPtr(output
), temp
);
3094 callWithABI
<Fn
, Printf1
>();
3096 PopRegsInMask(save
);
3100 void MacroAssembler::convertInt32ValueToDouble(ValueOperand val
) {
3102 branchTestInt32(Assembler::NotEqual
, val
, &done
);
3103 unboxInt32(val
, val
.scratchReg());
3104 ScratchDoubleScope
fpscratch(*this);
3105 convertInt32ToDouble(val
.scratchReg(), fpscratch
);
3106 boxDouble(fpscratch
, val
, fpscratch
);
3110 void MacroAssembler::convertValueToFloatingPoint(ValueOperand value
,
3111 FloatRegister output
,
3113 MIRType outputType
) {
3114 Label isDouble
, isInt32
, isBool
, isNull
, done
;
3117 ScratchTagScope
tag(*this, value
);
3118 splitTagForTest(value
, tag
);
3120 branchTestDouble(Assembler::Equal
, tag
, &isDouble
);
3121 branchTestInt32(Assembler::Equal
, tag
, &isInt32
);
3122 branchTestBoolean(Assembler::Equal
, tag
, &isBool
);
3123 branchTestNull(Assembler::Equal
, tag
, &isNull
);
3124 branchTestUndefined(Assembler::NotEqual
, tag
, fail
);
3127 // fall-through: undefined
3128 loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output
,
3133 loadConstantFloatingPoint(0.0, 0.0f
, output
, outputType
);
3137 boolValueToFloatingPoint(value
, output
, outputType
);
3141 int32ValueToFloatingPoint(value
, output
, outputType
);
3144 // On some non-multiAlias platforms, unboxDouble may use the scratch register,
3145 // so do not merge code paths here.
3147 if (outputType
== MIRType::Float32
&& hasMultiAlias()) {
3148 ScratchDoubleScope
tmp(*this);
3149 unboxDouble(value
, tmp
);
3150 convertDoubleToFloat32(tmp
, output
);
3152 FloatRegister tmp
= output
.asDouble();
3153 unboxDouble(value
, tmp
);
3154 if (outputType
== MIRType::Float32
) {
3155 convertDoubleToFloat32(tmp
, output
);
3162 void MacroAssembler::outOfLineTruncateSlow(FloatRegister src
, Register dest
,
3163 bool widenFloatToDouble
,
3165 wasm::BytecodeOffset callOffset
) {
3166 if (compilingWasm
) {
3169 int32_t framePushedAfterInstance
= framePushed();
3171 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
3172 defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
3173 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
3174 ScratchDoubleScope
fpscratch(*this);
3175 if (widenFloatToDouble
) {
3176 convertFloat32ToDouble(src
, fpscratch
);
3179 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
3180 FloatRegister srcSingle
;
3181 if (widenFloatToDouble
) {
3182 MOZ_ASSERT(src
.isSingle());
3184 src
= src
.asDouble();
3186 convertFloat32ToDouble(srcSingle
, src
);
3190 MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
3193 MOZ_ASSERT(src
.isDouble());
3195 if (compilingWasm
) {
3196 int32_t instanceOffset
= framePushed() - framePushedAfterInstance
;
3198 passABIArg(src
, MoveOp::DOUBLE
);
3199 callWithABI(callOffset
, wasm::SymbolicAddress::ToInt32
,
3200 mozilla::Some(instanceOffset
));
3202 using Fn
= int32_t (*)(double);
3203 setupUnalignedABICall(dest
);
3204 passABIArg(src
, MoveOp::DOUBLE
);
3205 callWithABI
<Fn
, JS::ToInt32
>(MoveOp::GENERAL
,
3206 CheckUnsafeCallWithABI::DontCheckOther
);
3208 storeCallInt32Result(dest
);
3210 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
3211 defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
3212 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
3214 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
3215 if (widenFloatToDouble
) {
3219 MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
3222 if (compilingWasm
) {
3227 void MacroAssembler::convertDoubleToInt(FloatRegister src
, Register output
,
3228 FloatRegister temp
, Label
* truncateFail
,
3230 IntConversionBehavior behavior
) {
3232 case IntConversionBehavior::Normal
:
3233 case IntConversionBehavior::NegativeZeroCheck
:
3234 convertDoubleToInt32(
3236 behavior
== IntConversionBehavior::NegativeZeroCheck
);
3238 case IntConversionBehavior::Truncate
:
3239 branchTruncateDoubleMaybeModUint32(src
, output
,
3240 truncateFail
? truncateFail
: fail
);
3242 case IntConversionBehavior::ClampToUint8
:
3243 // Clamping clobbers the input register, so use a temp.
3245 moveDouble(src
, temp
);
3247 clampDoubleToUint8(temp
, output
);
3252 void MacroAssembler::convertValueToInt(
3253 ValueOperand value
, Label
* handleStringEntry
, Label
* handleStringRejoin
,
3254 Label
* truncateDoubleSlow
, Register stringReg
, FloatRegister temp
,
3255 Register output
, Label
* fail
, IntConversionBehavior behavior
,
3256 IntConversionInputKind conversion
) {
3257 Label done
, isInt32
, isBool
, isDouble
, isNull
, isString
;
3259 bool handleStrings
= (behavior
== IntConversionBehavior::Truncate
||
3260 behavior
== IntConversionBehavior::ClampToUint8
) &&
3261 handleStringEntry
&& handleStringRejoin
;
3263 MOZ_ASSERT_IF(handleStrings
, conversion
== IntConversionInputKind::Any
);
3266 ScratchTagScope
tag(*this, value
);
3267 splitTagForTest(value
, tag
);
3269 branchTestInt32(Equal
, tag
, &isInt32
);
3270 if (conversion
== IntConversionInputKind::Any
||
3271 conversion
== IntConversionInputKind::NumbersOrBoolsOnly
) {
3272 branchTestBoolean(Equal
, tag
, &isBool
);
3274 branchTestDouble(Equal
, tag
, &isDouble
);
3276 if (conversion
== IntConversionInputKind::Any
) {
3277 // If we are not truncating, we fail for anything that's not
3278 // null. Otherwise we might be able to handle strings and undefined.
3280 case IntConversionBehavior::Normal
:
3281 case IntConversionBehavior::NegativeZeroCheck
:
3282 branchTestNull(Assembler::NotEqual
, tag
, fail
);
3285 case IntConversionBehavior::Truncate
:
3286 case IntConversionBehavior::ClampToUint8
:
3287 branchTestNull(Equal
, tag
, &isNull
);
3288 if (handleStrings
) {
3289 branchTestString(Equal
, tag
, &isString
);
3291 branchTestUndefined(Assembler::NotEqual
, tag
, fail
);
3299 // The value is null or undefined in truncation contexts - just emit 0.
3300 if (conversion
== IntConversionInputKind::Any
) {
3301 if (isNull
.used()) {
3304 mov(ImmWord(0), output
);
3308 // |output| needs to be different from |stringReg| to load string indices.
3309 bool handleStringIndices
= handleStrings
&& output
!= stringReg
;
3311 // First try loading a string index. If that fails, try converting a string
3312 // into a double, then jump to the double case.
3313 Label handleStringIndex
;
3314 if (handleStrings
) {
3316 unboxString(value
, stringReg
);
3317 if (handleStringIndices
) {
3318 loadStringIndexValue(stringReg
, output
, handleStringEntry
);
3319 jump(&handleStringIndex
);
3321 jump(handleStringEntry
);
3325 // Try converting double into integer.
3326 if (isDouble
.used() || handleStrings
) {
3327 if (isDouble
.used()) {
3329 unboxDouble(value
, temp
);
3332 if (handleStrings
) {
3333 bind(handleStringRejoin
);
3336 convertDoubleToInt(temp
, output
, temp
, truncateDoubleSlow
, fail
, behavior
);
3340 // Just unbox a bool, the result is 0 or 1.
3341 if (isBool
.used()) {
3343 unboxBoolean(value
, output
);
3347 // Integers can be unboxed.
3348 if (isInt32
.used() || handleStringIndices
) {
3349 if (isInt32
.used()) {
3351 unboxInt32(value
, output
);
3354 if (handleStringIndices
) {
3355 bind(&handleStringIndex
);
3358 if (behavior
== IntConversionBehavior::ClampToUint8
) {
3359 clampIntToUint8(output
);
3366 void MacroAssembler::finish() {
3367 if (failureLabel_
.used()) {
3368 bind(&failureLabel_
);
3372 MacroAssemblerSpecific::finish();
3375 size() <= MaxCodeBytesPerProcess
,
3376 "AssemblerBuffer should ensure we don't exceed MaxCodeBytesPerProcess");
3378 if (bytesNeeded() > MaxCodeBytesPerProcess
) {
3383 void MacroAssembler::link(JitCode
* code
) {
3385 linkProfilerCallSites(code
);
3388 MacroAssembler::AutoProfilerCallInstrumentation::
3389 AutoProfilerCallInstrumentation(MacroAssembler
& masm
) {
3390 if (!masm
.emitProfilingInstrumentation_
) {
3394 Register reg
= CallTempReg0
;
3395 Register reg2
= CallTempReg1
;
3399 CodeOffset label
= masm
.movWithPatch(ImmWord(uintptr_t(-1)), reg
);
3400 masm
.loadJSContext(reg2
);
3401 masm
.loadPtr(Address(reg2
, offsetof(JSContext
, profilingActivation_
)), reg2
);
3403 Address(reg2
, JitActivation::offsetOfLastProfilingCallSite()));
3405 masm
.appendProfilerCallSite(label
);
3411 void MacroAssembler::linkProfilerCallSites(JitCode
* code
) {
3412 for (size_t i
= 0; i
< profilerCallSites_
.length(); i
++) {
3413 CodeOffset offset
= profilerCallSites_
[i
];
3414 CodeLocationLabel
location(code
, offset
);
3415 PatchDataWithValueCheck(location
, ImmPtr(location
.raw()),
3420 void MacroAssembler::alignJitStackBasedOnNArgs(Register nargs
,
3421 bool countIncludesThis
) {
3422 // The stack should already be aligned to the size of a value.
3423 assertStackAlignment(sizeof(Value
), 0);
3425 static_assert(JitStackValueAlignment
== 1 || JitStackValueAlignment
== 2,
3426 "JitStackValueAlignment is either 1 or 2.");
3427 if (JitStackValueAlignment
== 1) {
3430 // A jit frame is composed of the following:
3432 // [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
3433 // \________JitFrameLayout_________/
3434 // (The stack grows this way --->)
3436 // We want to ensure that |raddr|, the return address, is 16-byte aligned.
3437 // (Note: if 8-byte alignment was sufficient, we would have already
3440 // JitFrameLayout does not affect the alignment, so we can ignore it.
3441 static_assert(sizeof(JitFrameLayout
) % JitStackAlignment
== 0,
3442 "JitFrameLayout doesn't affect stack alignment");
3444 // Therefore, we need to ensure that |this| is aligned.
3445 // This implies that |argN| must be aligned if N is even,
3446 // and offset by |sizeof(Value)| if N is odd.
3448 // Depending on the context of the caller, it may be easier to pass in a
3449 // register that has already been modified to include |this|. If that is the
3450 // case, we want to flip the direction of the test.
3451 Assembler::Condition condition
=
3452 countIncludesThis
? Assembler::NonZero
: Assembler::Zero
;
3454 Label alignmentIsOffset
, end
;
3455 branchTestPtr(condition
, nargs
, Imm32(1), &alignmentIsOffset
);
3457 // |argN| should be aligned to 16 bytes.
3458 andToStackPtr(Imm32(~(JitStackAlignment
- 1)));
3461 // |argN| should be offset by 8 bytes from 16-byte alignment.
3462 // We already know that it is 8-byte aligned, so the only possibilities are:
3463 // a) It is 16-byte aligned, and we must offset it by 8 bytes.
3464 // b) It is not 16-byte aligned, and therefore already has the right offset.
3465 // Therefore, we test to see if it is 16-byte aligned, and adjust it if it is.
3466 bind(&alignmentIsOffset
);
3467 branchTestStackPtr(Assembler::NonZero
, Imm32(JitStackAlignment
- 1), &end
);
3468 subFromStackPtr(Imm32(sizeof(Value
)));
3473 void MacroAssembler::alignJitStackBasedOnNArgs(uint32_t argc
,
3474 bool countIncludesThis
) {
3475 // The stack should already be aligned to the size of a value.
3476 assertStackAlignment(sizeof(Value
), 0);
3478 static_assert(JitStackValueAlignment
== 1 || JitStackValueAlignment
== 2,
3479 "JitStackValueAlignment is either 1 or 2.");
3480 if (JitStackValueAlignment
== 1) {
3484 // See above for full explanation.
3485 uint32_t nArgs
= argc
+ !countIncludesThis
;
3486 if (nArgs
% 2 == 0) {
3487 // |argN| should be 16-byte aligned
3488 andToStackPtr(Imm32(~(JitStackAlignment
- 1)));
3490 // |argN| must be 16-byte aligned if argc is even,
3491 // and offset by 8 if argc is odd.
3493 branchTestStackPtr(Assembler::NonZero
, Imm32(JitStackAlignment
- 1), &end
);
3494 subFromStackPtr(Imm32(sizeof(Value
)));
3496 assertStackAlignment(JitStackAlignment
, sizeof(Value
));
3500 // ===============================================================
3502 MacroAssembler::MacroAssembler(TempAllocator
& alloc
,
3503 CompileRuntime
* maybeRuntime
,
3504 CompileRealm
* maybeRealm
)
3505 : maybeRuntime_(maybeRuntime
),
3506 maybeRealm_(maybeRealm
),
3507 wasmMaxOffsetGuardLimit_(0),
3512 dynamicAlignment_(false),
3513 emitProfilingInstrumentation_(false) {
3514 moveResolver_
.setAllocator(alloc
);
3517 StackMacroAssembler::StackMacroAssembler(JSContext
* cx
, TempAllocator
& alloc
)
3518 : MacroAssembler(alloc
, CompileRuntime::get(cx
->runtime()),
3519 CompileRealm::get(cx
->realm())) {}
3521 IonHeapMacroAssembler::IonHeapMacroAssembler(TempAllocator
& alloc
,
3522 CompileRealm
* realm
)
3523 : MacroAssembler(alloc
, realm
->runtime(), realm
) {
3524 MOZ_ASSERT(CurrentThreadIsIonCompiling());
3527 WasmMacroAssembler::WasmMacroAssembler(TempAllocator
& alloc
, bool limitedSize
)
3528 : MacroAssembler(alloc
) {
3529 #if defined(JS_CODEGEN_ARM64)
3530 // Stubs + builtins + the baseline compiler all require the native SP,
3532 SetStackPointer64(sp
);
3535 setUnlimitedBuffer();
3539 WasmMacroAssembler::WasmMacroAssembler(TempAllocator
& alloc
,
3540 const wasm::ModuleEnvironment
& env
,
3542 : MacroAssembler(alloc
) {
3543 #if defined(JS_CODEGEN_ARM64)
3544 // Stubs + builtins + the baseline compiler all require the native SP,
3546 SetStackPointer64(sp
);
3548 setWasmMaxOffsetGuardLimit(
3549 wasm::GetMaxOffsetGuardLimit(env
.hugeMemoryEnabled()));
3551 setUnlimitedBuffer();
3555 bool MacroAssembler::icBuildOOLFakeExitFrame(void* fakeReturnAddr
,
3556 AutoSaveLiveRegisters
& save
) {
3557 return buildOOLFakeExitFrame(fakeReturnAddr
);
3560 #ifndef JS_CODEGEN_ARM64
3561 void MacroAssembler::subFromStackPtr(Register reg
) {
3562 subPtr(reg
, getStackPointer());
3564 #endif // JS_CODEGEN_ARM64
3566 //{{{ check_macroassembler_style
3567 // ===============================================================
3568 // Stack manipulation functions.
3570 void MacroAssembler::PushRegsInMask(LiveGeneralRegisterSet set
) {
3571 PushRegsInMask(LiveRegisterSet(set
.set(), FloatRegisterSet()));
3574 void MacroAssembler::PopRegsInMask(LiveRegisterSet set
) {
3575 PopRegsInMaskIgnore(set
, LiveRegisterSet());
3578 void MacroAssembler::PopRegsInMask(LiveGeneralRegisterSet set
) {
3579 PopRegsInMask(LiveRegisterSet(set
.set(), FloatRegisterSet()));
3582 void MacroAssembler::Push(PropertyKey key
, Register scratchReg
) {
3583 if (key
.isGCThing()) {
3584 // If we're pushing a gcthing, then we can't just push the tagged key
3585 // value since the GC won't have any idea that the push instruction
3586 // carries a reference to a gcthing. Need to unpack the pointer,
3587 // push it using ImmGCPtr, and then rematerialize the PropertyKey at
3590 if (key
.isString()) {
3591 JSString
* str
= key
.toString();
3592 MOZ_ASSERT((uintptr_t(str
) & PropertyKey::TypeMask
) == 0);
3593 static_assert(PropertyKey::StringTypeTag
== 0,
3594 "need to orPtr StringTypeTag if it's not 0");
3595 Push(ImmGCPtr(str
));
3597 MOZ_ASSERT(key
.isSymbol());
3598 movePropertyKey(key
, scratchReg
);
3602 MOZ_ASSERT(key
.isInt());
3603 Push(ImmWord(key
.asRawBits()));
3607 void MacroAssembler::movePropertyKey(PropertyKey key
, Register dest
) {
3608 if (key
.isGCThing()) {
3609 // See comment in |Push(PropertyKey, ...)| above for an explanation.
3610 if (key
.isString()) {
3611 JSString
* str
= key
.toString();
3612 MOZ_ASSERT((uintptr_t(str
) & PropertyKey::TypeMask
) == 0);
3613 static_assert(PropertyKey::StringTypeTag
== 0,
3614 "need to orPtr JSID_TYPE_STRING tag if it's not 0");
3615 movePtr(ImmGCPtr(str
), dest
);
3617 MOZ_ASSERT(key
.isSymbol());
3618 JS::Symbol
* sym
= key
.toSymbol();
3619 movePtr(ImmGCPtr(sym
), dest
);
3620 orPtr(Imm32(PropertyKey::SymbolTypeTag
), dest
);
3623 MOZ_ASSERT(key
.isInt());
3624 movePtr(ImmWord(key
.asRawBits()), dest
);
3628 void MacroAssembler::Push(TypedOrValueRegister v
) {
3631 } else if (IsFloatingPointType(v
.type())) {
3632 FloatRegister reg
= v
.typedReg().fpu();
3633 if (v
.type() == MIRType::Float32
) {
3634 ScratchDoubleScope
fpscratch(*this);
3635 convertFloat32ToDouble(reg
, fpscratch
);
3636 PushBoxed(fpscratch
);
3641 Push(ValueTypeFromMIRType(v
.type()), v
.typedReg().gpr());
3645 void MacroAssembler::Push(const ConstantOrRegister
& v
) {
3653 void MacroAssembler::Push(const Address
& addr
) {
3655 framePushed_
+= sizeof(uintptr_t);
3658 void MacroAssembler::Push(const ValueOperand
& val
) {
3660 framePushed_
+= sizeof(Value
);
3663 void MacroAssembler::Push(const Value
& val
) {
3665 framePushed_
+= sizeof(Value
);
3668 void MacroAssembler::Push(JSValueType type
, Register reg
) {
3669 pushValue(type
, reg
);
3670 framePushed_
+= sizeof(Value
);
3673 void MacroAssembler::Push(const Register64 reg
) {
3674 #if JS_BITS_PER_WORD == 64
3677 MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Big-endian not supported.");
3683 void MacroAssembler::PushEmptyRooted(VMFunctionData::RootType rootType
) {
3685 case VMFunctionData::RootNone
:
3686 MOZ_CRASH("Handle must have root type");
3687 case VMFunctionData::RootObject
:
3688 case VMFunctionData::RootString
:
3689 case VMFunctionData::RootCell
:
3690 case VMFunctionData::RootBigInt
:
3691 Push(ImmPtr(nullptr));
3693 case VMFunctionData::RootValue
:
3694 Push(UndefinedValue());
3696 case VMFunctionData::RootId
:
3697 Push(ImmWord(JS::PropertyKey::Void().asRawBits()));
3702 void MacroAssembler::popRooted(VMFunctionData::RootType rootType
,
3703 Register cellReg
, const ValueOperand
& valueReg
) {
3705 case VMFunctionData::RootNone
:
3706 MOZ_CRASH("Handle must have root type");
3707 case VMFunctionData::RootObject
:
3708 case VMFunctionData::RootString
:
3709 case VMFunctionData::RootCell
:
3710 case VMFunctionData::RootId
:
3711 case VMFunctionData::RootBigInt
:
3714 case VMFunctionData::RootValue
:
3720 void MacroAssembler::adjustStack(int amount
) {
3723 } else if (amount
< 0) {
3724 reserveStack(-amount
);
3728 void MacroAssembler::freeStack(uint32_t amount
) {
3729 MOZ_ASSERT(amount
<= framePushed_
);
3731 addToStackPtr(Imm32(amount
));
3733 framePushed_
-= amount
;
3736 void MacroAssembler::freeStack(Register amount
) { addToStackPtr(amount
); }
3738 // ===============================================================
3739 // ABI function calls.
3740 template <class ABIArgGeneratorT
>
3741 void MacroAssembler::setupABICallHelper() {
3743 MOZ_ASSERT(!inCall_
);
3751 // Reinitialize the ABIArg generator.
3752 abiArgs_
= ABIArgGeneratorT();
3754 #if defined(JS_CODEGEN_ARM)
3755 // On ARM, we need to know what ABI we are using, either in the
3756 // simulator, or based on the configure flags.
3757 # if defined(JS_SIMULATOR_ARM)
3758 abiArgs_
.setUseHardFp(UseHardFpABI());
3759 # elif defined(JS_CODEGEN_ARM_HARDFP)
3760 abiArgs_
.setUseHardFp(true);
3762 abiArgs_
.setUseHardFp(false);
3766 #if defined(JS_CODEGEN_MIPS32)
3767 // On MIPS, the system ABI use general registers pairs to encode double
3768 // arguments, after one or 2 integer-like arguments. Unfortunately, the
3769 // Lowering phase is not capable to express it at the moment. So we enforce
3770 // the system ABI here.
3771 abiArgs_
.enforceO32ABI();
3775 void MacroAssembler::setupNativeABICall() {
3776 setupABICallHelper
<ABIArgGenerator
>();
3779 void MacroAssembler::setupWasmABICall() {
3780 MOZ_ASSERT(IsCompilingWasm(), "non-wasm should use setupAlignedABICall");
3781 setupABICallHelper
<WasmABIArgGenerator
>();
3783 #if defined(JS_CODEGEN_ARM)
3784 // The builtin thunk does the FP -> GPR moving on soft-FP, so
3785 // use hard fp unconditionally.
3786 abiArgs_
.setUseHardFp(true);
3788 dynamicAlignment_
= false;
3791 void MacroAssembler::setupAlignedABICall() {
3792 MOZ_ASSERT(!IsCompilingWasm(), "wasm should use setupWasmABICall");
3793 setupNativeABICall();
3794 dynamicAlignment_
= false;
3797 void MacroAssembler::passABIArg(const MoveOperand
& from
, MoveOp::Type type
) {
3798 MOZ_ASSERT(inCall_
);
3799 appendSignatureType(type
);
3803 case MoveOp::FLOAT32
:
3804 arg
= abiArgs_
.next(MIRType::Float32
);
3806 case MoveOp::DOUBLE
:
3807 arg
= abiArgs_
.next(MIRType::Double
);
3809 case MoveOp::GENERAL
:
3810 arg
= abiArgs_
.next(MIRType::Pointer
);
3813 MOZ_CRASH("Unexpected argument type");
3816 MoveOperand
to(*this, arg
);
3824 propagateOOM(moveResolver_
.addMove(from
, to
, type
));
3827 void MacroAssembler::callWithABINoProfiler(void* fun
, MoveOp::Type result
,
3828 CheckUnsafeCallWithABI check
) {
3829 appendSignatureType(result
);
3831 fun
= Simulator::RedirectNativeFunction(fun
, signature());
3834 uint32_t stackAdjust
;
3835 callWithABIPre(&stackAdjust
);
3838 if (check
== CheckUnsafeCallWithABI::Check
) {
3840 loadJSContext(ReturnReg
);
3841 Address
flagAddr(ReturnReg
, JSContext::offsetOfInUnsafeCallWithABI());
3842 store32(Imm32(1), flagAddr
);
3844 // On arm64, SP may be < PSP now (that's OK).
3845 // eg testcase: tests/bug1375074.js
3851 callWithABIPost(stackAdjust
, result
);
3854 if (check
== CheckUnsafeCallWithABI::Check
) {
3857 loadJSContext(ReturnReg
);
3858 Address
flagAddr(ReturnReg
, JSContext::offsetOfInUnsafeCallWithABI());
3859 branch32(Assembler::Equal
, flagAddr
, Imm32(0), &ok
);
3860 assumeUnreachable("callWithABI: callee did not use AutoUnsafeCallWithABI");
3863 // On arm64, SP may be < PSP now (that's OK).
3864 // eg testcase: tests/bug1375074.js
3869 CodeOffset
MacroAssembler::callWithABI(wasm::BytecodeOffset bytecode
,
3870 wasm::SymbolicAddress imm
,
3871 mozilla::Maybe
<int32_t> instanceOffset
,
3872 MoveOp::Type result
) {
3873 MOZ_ASSERT(wasm::NeedsBuiltinThunk(imm
));
3875 uint32_t stackAdjust
;
3876 callWithABIPre(&stackAdjust
, /* callFromWasm = */ true);
3878 // The instance register is used in builtin thunks and must be set.
3879 if (instanceOffset
) {
3880 loadPtr(Address(getStackPointer(), *instanceOffset
+ stackAdjust
),
3883 MOZ_CRASH("instanceOffset is Nothing only for unsupported abi calls.");
3885 CodeOffset raOffset
= call(
3886 wasm::CallSiteDesc(bytecode
.offset(), wasm::CallSite::Symbolic
), imm
);
3888 callWithABIPost(stackAdjust
, result
, /* callFromWasm = */ true);
3893 void MacroAssembler::callDebugWithABI(wasm::SymbolicAddress imm
,
3894 MoveOp::Type result
) {
3895 MOZ_ASSERT(!wasm::NeedsBuiltinThunk(imm
));
3896 uint32_t stackAdjust
;
3897 callWithABIPre(&stackAdjust
, /* callFromWasm = */ false);
3899 callWithABIPost(stackAdjust
, result
, /* callFromWasm = */ false);
3902 // ===============================================================
3903 // Exit frame footer.
3905 void MacroAssembler::linkExitFrame(Register cxreg
, Register scratch
) {
3906 loadPtr(Address(cxreg
, JSContext::offsetOfActivation()), scratch
);
3907 storeStackPtr(Address(scratch
, JitActivation::offsetOfPackedExitFP()));
3910 // ===============================================================
3911 // Simple value-shuffling helpers, to hide MoveResolver verbosity
3914 void MacroAssembler::moveRegPair(Register src0
, Register src1
, Register dst0
,
3915 Register dst1
, MoveOp::Type type
) {
3916 MoveResolver
& moves
= moveResolver();
3918 propagateOOM(moves
.addMove(MoveOperand(src0
), MoveOperand(dst0
), type
));
3921 propagateOOM(moves
.addMove(MoveOperand(src1
), MoveOperand(dst1
), type
));
3923 propagateOOM(moves
.resolve());
3928 MoveEmitter
emitter(*this);
3929 emitter
.emit(moves
);
3933 // ===============================================================
3934 // Arithmetic functions
3936 void MacroAssembler::pow32(Register base
, Register power
, Register dest
,
3937 Register temp1
, Register temp2
, Label
* onOver
) {
3938 // Inline int32-specialized implementation of js::powi with overflow
3941 move32(Imm32(1), dest
); // result = 1
3943 // x^y where x == 1 returns 1 for any y.
3945 branch32(Assembler::Equal
, base
, Imm32(1), &done
);
3947 move32(base
, temp1
); // runningSquare = x
3948 move32(power
, temp2
); // n = y
3950 // x^y where y < 0 returns a non-int32 value for any x != 1. Except when y is
3951 // large enough so that the result is no longer representable as a double with
3952 // fractional parts. We can't easily determine when y is too large, so we bail
3954 // Note: it's important for this condition to match the code in CacheIR.cpp
3955 // (CanAttachInt32Pow) to prevent failure loops.
3957 branchTest32(Assembler::NotSigned
, power
, power
, &start
);
3963 // runningSquare *= runningSquare
3964 branchMul32(Assembler::Overflow
, temp1
, temp1
, onOver
);
3968 // if ((n & 1) != 0) result *= runningSquare
3970 branchTest32(Assembler::Zero
, temp2
, Imm32(1), &even
);
3971 branchMul32(Assembler::Overflow
, temp1
, dest
, onOver
);
3975 // if (n == 0) return result
3976 branchRshift32(Assembler::NonZero
, Imm32(1), temp2
, &loop
);
3981 void MacroAssembler::signInt32(Register input
, Register output
) {
3982 MOZ_ASSERT(input
!= output
);
3985 move32(input
, output
);
3986 rshift32Arithmetic(Imm32(31), output
);
3987 branch32(Assembler::LessThanOrEqual
, input
, Imm32(0), &done
);
3988 move32(Imm32(1), output
);
3992 void MacroAssembler::signDouble(FloatRegister input
, FloatRegister output
) {
3993 MOZ_ASSERT(input
!= output
);
3995 Label done
, zeroOrNaN
, negative
;
3996 loadConstantDouble(0.0, output
);
3997 branchDouble(Assembler::DoubleEqualOrUnordered
, input
, output
, &zeroOrNaN
);
3998 branchDouble(Assembler::DoubleLessThan
, input
, output
, &negative
);
4000 loadConstantDouble(1.0, output
);
4004 loadConstantDouble(-1.0, output
);
4008 moveDouble(input
, output
);
4013 void MacroAssembler::signDoubleToInt32(FloatRegister input
, Register output
,
4014 FloatRegister temp
, Label
* fail
) {
4015 MOZ_ASSERT(input
!= temp
);
4017 Label done
, zeroOrNaN
, negative
;
4018 loadConstantDouble(0.0, temp
);
4019 branchDouble(Assembler::DoubleEqualOrUnordered
, input
, temp
, &zeroOrNaN
);
4020 branchDouble(Assembler::DoubleLessThan
, input
, temp
, &negative
);
4022 move32(Imm32(1), output
);
4026 move32(Imm32(-1), output
);
4029 // Fail for NaN and negative zero.
4031 branchDouble(Assembler::DoubleUnordered
, input
, input
, fail
);
4033 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
4034 // is -Infinity instead of Infinity.
4035 loadConstantDouble(1.0, temp
);
4036 divDouble(input
, temp
);
4037 branchDouble(Assembler::DoubleLessThan
, temp
, input
, fail
);
4038 move32(Imm32(0), output
);
4043 void MacroAssembler::randomDouble(Register rng
, FloatRegister dest
,
4044 Register64 temp0
, Register64 temp1
) {
4045 using mozilla::non_crypto::XorShift128PlusRNG
;
4048 sizeof(XorShift128PlusRNG
) == 2 * sizeof(uint64_t),
4049 "Code below assumes XorShift128PlusRNG contains two uint64_t values");
4051 Address
state0Addr(rng
, XorShift128PlusRNG::offsetOfState0());
4052 Address
state1Addr(rng
, XorShift128PlusRNG::offsetOfState1());
4054 Register64 s0Reg
= temp0
;
4055 Register64 s1Reg
= temp1
;
4057 // uint64_t s1 = mState[0];
4058 load64(state0Addr
, s1Reg
);
4061 move64(s1Reg
, s0Reg
);
4062 lshift64(Imm32(23), s1Reg
);
4063 xor64(s0Reg
, s1Reg
);
4066 move64(s1Reg
, s0Reg
);
4067 rshift64(Imm32(17), s1Reg
);
4068 xor64(s0Reg
, s1Reg
);
4070 // const uint64_t s0 = mState[1];
4071 load64(state1Addr
, s0Reg
);
4074 store64(s0Reg
, state0Addr
);
4077 xor64(s0Reg
, s1Reg
);
4080 rshift64(Imm32(26), s0Reg
);
4081 xor64(s0Reg
, s1Reg
);
4084 store64(s1Reg
, state1Addr
);
4087 load64(state0Addr
, s0Reg
);
4088 add64(s0Reg
, s1Reg
);
4090 // See comment in XorShift128PlusRNG::nextDouble().
4091 static constexpr int MantissaBits
=
4092 mozilla::FloatingPoint
<double>::kExponentShift
+ 1;
4093 static constexpr double ScaleInv
= double(1) / (1ULL << MantissaBits
);
4095 and64(Imm64((1ULL << MantissaBits
) - 1), s1Reg
);
4097 // Note: we know s1Reg isn't signed after the and64 so we can use the faster
4098 // convertInt64ToDouble instead of convertUInt64ToDouble.
4099 convertInt64ToDouble(s1Reg
, dest
);
4102 mulDoublePtr(ImmPtr(&ScaleInv
), s0Reg
.scratchReg(), dest
);
4105 void MacroAssembler::sameValueDouble(FloatRegister left
, FloatRegister right
,
4106 FloatRegister temp
, Register dest
) {
4107 Label nonEqual
, isSameValue
, isNotSameValue
;
4108 branchDouble(Assembler::DoubleNotEqualOrUnordered
, left
, right
, &nonEqual
);
4110 // First, test for being equal to 0.0, which also includes -0.0.
4111 loadConstantDouble(0.0, temp
);
4112 branchDouble(Assembler::DoubleNotEqual
, left
, temp
, &isSameValue
);
4114 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
4115 // is -Infinity instead of Infinity.
4117 loadConstantDouble(1.0, temp
);
4118 divDouble(left
, temp
);
4119 branchDouble(Assembler::DoubleLessThan
, temp
, left
, &isNegInf
);
4121 loadConstantDouble(1.0, temp
);
4122 divDouble(right
, temp
);
4123 branchDouble(Assembler::DoubleGreaterThan
, temp
, right
, &isSameValue
);
4124 jump(&isNotSameValue
);
4128 loadConstantDouble(1.0, temp
);
4129 divDouble(right
, temp
);
4130 branchDouble(Assembler::DoubleLessThan
, temp
, right
, &isSameValue
);
4131 jump(&isNotSameValue
);
4136 // Test if both values are NaN.
4137 branchDouble(Assembler::DoubleOrdered
, left
, left
, &isNotSameValue
);
4138 branchDouble(Assembler::DoubleOrdered
, right
, right
, &isNotSameValue
);
4143 move32(Imm32(1), dest
);
4146 bind(&isNotSameValue
);
4147 move32(Imm32(0), dest
);
4152 void MacroAssembler::minMaxArrayInt32(Register array
, Register result
,
4153 Register temp1
, Register temp2
,
4154 Register temp3
, bool isMax
, Label
* fail
) {
4155 // array must be a packed array. Load its elements.
4156 Register elements
= temp1
;
4157 loadPtr(Address(array
, NativeObject::offsetOfElements()), elements
);
4159 // Load the length and guard that it is non-zero.
4160 Address
lengthAddr(elements
, ObjectElements::offsetOfInitializedLength());
4161 load32(lengthAddr
, temp3
);
4162 branchTest32(Assembler::Zero
, temp3
, temp3
, fail
);
4164 // Compute the address of the last element.
4165 Register elementsEnd
= temp2
;
4166 BaseObjectElementIndex
elementsEndAddr(elements
, temp3
,
4167 -int32_t(sizeof(Value
)));
4168 computeEffectiveAddress(elementsEndAddr
, elementsEnd
);
4170 // Load the first element into result.
4171 fallibleUnboxInt32(Address(elements
, 0), result
, fail
);
4176 // Check whether we're done.
4177 branchPtr(Assembler::Equal
, elements
, elementsEnd
, &done
);
4179 // If not, advance to the next element and load it.
4180 addPtr(Imm32(sizeof(Value
)), elements
);
4181 fallibleUnboxInt32(Address(elements
, 0), temp3
, fail
);
4183 // Update result if necessary.
4184 Assembler::Condition cond
=
4185 isMax
? Assembler::GreaterThan
: Assembler::LessThan
;
4186 cmp32Move32(cond
, temp3
, result
, temp3
, result
);
4192 void MacroAssembler::minMaxArrayNumber(Register array
, FloatRegister result
,
4193 FloatRegister floatTemp
, Register temp1
,
4194 Register temp2
, bool isMax
,
4196 // array must be a packed array. Load its elements.
4197 Register elements
= temp1
;
4198 loadPtr(Address(array
, NativeObject::offsetOfElements()), elements
);
4200 // Load the length and check if the array is empty.
4202 Address
lengthAddr(elements
, ObjectElements::offsetOfInitializedLength());
4203 load32(lengthAddr
, temp2
);
4204 branchTest32(Assembler::Zero
, temp2
, temp2
, &isEmpty
);
4206 // Compute the address of the last element.
4207 Register elementsEnd
= temp2
;
4208 BaseObjectElementIndex
elementsEndAddr(elements
, temp2
,
4209 -int32_t(sizeof(Value
)));
4210 computeEffectiveAddress(elementsEndAddr
, elementsEnd
);
4212 // Load the first element into result.
4213 ensureDouble(Address(elements
, 0), result
, fail
);
4218 // Check whether we're done.
4219 branchPtr(Assembler::Equal
, elements
, elementsEnd
, &done
);
4221 // If not, advance to the next element and load it into floatTemp.
4222 addPtr(Imm32(sizeof(Value
)), elements
);
4223 ensureDouble(Address(elements
, 0), floatTemp
, fail
);
4225 // Update result if necessary.
4227 maxDouble(floatTemp
, result
, /* handleNaN = */ true);
4229 minDouble(floatTemp
, result
, /* handleNaN = */ true);
4233 // With no arguments, min/max return +Infinity/-Infinity respectively.
4236 loadConstantDouble(mozilla::NegativeInfinity
<double>(), result
);
4238 loadConstantDouble(mozilla::PositiveInfinity
<double>(), result
);
4244 void MacroAssembler::branchIfNotRegExpPrototypeOptimizable(Register proto
,
4247 loadJSContext(temp
);
4248 loadPtr(Address(temp
, JSContext::offsetOfRealm()), temp
);
4249 size_t offset
= Realm::offsetOfRegExps() +
4250 RegExpRealm::offsetOfOptimizableRegExpPrototypeShape();
4251 loadPtr(Address(temp
, offset
), temp
);
4252 branchTestObjShapeUnsafe(Assembler::NotEqual
, proto
, temp
, fail
);
4255 void MacroAssembler::branchIfNotRegExpInstanceOptimizable(Register regexp
,
4258 loadJSContext(temp
);
4259 loadPtr(Address(temp
, JSContext::offsetOfRealm()), temp
);
4260 size_t offset
= Realm::offsetOfRegExps() +
4261 RegExpRealm::offsetOfOptimizableRegExpInstanceShape();
4262 loadPtr(Address(temp
, offset
), temp
);
4263 branchTestObjShapeUnsafe(Assembler::NotEqual
, regexp
, temp
, label
);
4266 void MacroAssembler::loadRegExpLastIndex(Register regexp
, Register string
,
4268 Label
* notFoundZeroLastIndex
) {
4269 Address
flagsSlot(regexp
, RegExpObject::offsetOfFlags());
4270 Address
lastIndexSlot(regexp
, RegExpObject::offsetOfLastIndex());
4271 Address
stringLength(string
, JSString::offsetOfLength());
4273 Label notGlobalOrSticky
, loadedLastIndex
;
4275 branchTest32(Assembler::Zero
, flagsSlot
,
4276 Imm32(JS::RegExpFlag::Global
| JS::RegExpFlag::Sticky
),
4277 ¬GlobalOrSticky
);
4279 // It's a global or sticky regular expression. Emit the following code:
4281 // lastIndex = regexp.lastIndex
4282 // if lastIndex > string.length:
4283 // jump to notFoundZeroLastIndex (skip the regexp match/test operation)
4285 // The `notFoundZeroLastIndex` code should set regexp.lastIndex to 0 and
4286 // treat this as a not-found result.
4288 // See steps 5-8 in js::RegExpBuiltinExec.
4290 // Earlier guards must have ensured regexp.lastIndex is a non-negative
4295 branchTestInt32(Assembler::Equal
, lastIndexSlot
, &ok
);
4296 assumeUnreachable("Expected int32 value for lastIndex");
4300 unboxInt32(lastIndexSlot
, lastIndex
);
4304 branchTest32(Assembler::NotSigned
, lastIndex
, lastIndex
, &ok
);
4305 assumeUnreachable("Expected non-negative lastIndex");
4309 branch32(Assembler::Below
, stringLength
, lastIndex
, notFoundZeroLastIndex
);
4310 jump(&loadedLastIndex
);
4313 bind(¬GlobalOrSticky
);
4314 move32(Imm32(0), lastIndex
);
4316 bind(&loadedLastIndex
);
4319 // ===============================================================
4322 void MacroAssembler::loadFunctionLength(Register func
,
4323 Register funFlagsAndArgCount
,
4324 Register output
, Label
* slowPath
) {
4327 // These flags should already have been checked by caller.
4329 uint32_t FlagsToCheck
=
4330 FunctionFlags::SELFHOSTLAZY
| FunctionFlags::RESOLVED_LENGTH
;
4331 branchTest32(Assembler::Zero
, funFlagsAndArgCount
, Imm32(FlagsToCheck
),
4333 assumeUnreachable("The function flags should already have been checked.");
4338 // NOTE: `funFlagsAndArgCount` and `output` must be allowed to alias.
4340 // Load the target function's length.
4341 Label isInterpreted
, lengthLoaded
;
4342 branchTest32(Assembler::NonZero
, funFlagsAndArgCount
,
4343 Imm32(FunctionFlags::BASESCRIPT
), &isInterpreted
);
4345 // The length property of a native function stored with the flags.
4346 move32(funFlagsAndArgCount
, output
);
4347 rshift32(Imm32(JSFunction::ArgCountShift
), output
);
4348 jump(&lengthLoaded
);
4350 bind(&isInterpreted
);
4352 // Load the length property of an interpreted function.
4353 loadPrivate(Address(func
, JSFunction::offsetOfJitInfoOrScript()), output
);
4354 loadPtr(Address(output
, JSScript::offsetOfSharedData()), output
);
4355 branchTestPtr(Assembler::Zero
, output
, output
, slowPath
);
4356 loadPtr(Address(output
, SharedImmutableScriptData::offsetOfISD()), output
);
4357 load16ZeroExtend(Address(output
, ImmutableScriptData::offsetOfFunLength()),
4360 bind(&lengthLoaded
);
4363 void MacroAssembler::loadFunctionName(Register func
, Register output
,
4364 ImmGCPtr emptyString
, Label
* slowPath
) {
4365 MOZ_ASSERT(func
!= output
);
4367 // Get the JSFunction flags.
4368 load32(Address(func
, JSFunction::offsetOfFlagsAndArgCount()), output
);
4370 // If the name was previously resolved, the name property may be shadowed.
4371 branchTest32(Assembler::NonZero
, output
, Imm32(FunctionFlags::RESOLVED_NAME
),
4375 branchTest32(Assembler::NonZero
, output
,
4376 Imm32(FunctionFlags::HAS_GUESSED_ATOM
), &noName
);
4378 Address
atomAddr(func
, JSFunction::offsetOfAtom());
4379 branchTestUndefined(Assembler::Equal
, atomAddr
, &noName
);
4380 unboxString(atomAddr
, output
);
4386 // An absent name property defaults to the empty string.
4387 movePtr(emptyString
, output
);
4393 void MacroAssembler::assertFunctionIsExtended(Register func
) {
4396 branchTestFunctionFlags(func
, FunctionFlags::EXTENDED
, Assembler::NonZero
,
4398 assumeUnreachable("Function is not extended");
4403 void MacroAssembler::branchTestType(Condition cond
, Register tag
,
4404 JSValueType type
, Label
* label
) {
4406 case JSVAL_TYPE_DOUBLE
:
4407 branchTestDouble(cond
, tag
, label
);
4409 case JSVAL_TYPE_INT32
:
4410 branchTestInt32(cond
, tag
, label
);
4412 case JSVAL_TYPE_BOOLEAN
:
4413 branchTestBoolean(cond
, tag
, label
);
4415 case JSVAL_TYPE_UNDEFINED
:
4416 branchTestUndefined(cond
, tag
, label
);
4418 case JSVAL_TYPE_NULL
:
4419 branchTestNull(cond
, tag
, label
);
4421 case JSVAL_TYPE_MAGIC
:
4422 branchTestMagic(cond
, tag
, label
);
4424 case JSVAL_TYPE_STRING
:
4425 branchTestString(cond
, tag
, label
);
4427 case JSVAL_TYPE_SYMBOL
:
4428 branchTestSymbol(cond
, tag
, label
);
4430 case JSVAL_TYPE_BIGINT
:
4431 branchTestBigInt(cond
, tag
, label
);
4433 case JSVAL_TYPE_OBJECT
:
4434 branchTestObject(cond
, tag
, label
);
4437 MOZ_CRASH("Unexpected value type");
4441 void MacroAssembler::branchTestObjShapeList(
4442 Condition cond
, Register obj
, Register shapeElements
, Register shapeScratch
,
4443 Register endScratch
, Register spectreScratch
, Label
* label
) {
4444 MOZ_ASSERT(cond
== Assembler::Equal
|| cond
== Assembler::NotEqual
);
4446 bool needSpectreMitigations
= spectreScratch
!= InvalidReg
;
4449 Label
* onMatch
= cond
== Assembler::Equal
? label
: &done
;
4451 // Load the object's shape pointer into shapeScratch, and prepare to compare
4452 // it with the shapes in the list. On 64-bit, we box the shape. On 32-bit,
4453 // we only have to compare the 32-bit payload.
4455 loadPtr(Address(obj
, JSObject::offsetOfShape()), endScratch
);
4456 tagValue(JSVAL_TYPE_PRIVATE_GCTHING
, endScratch
, ValueOperand(shapeScratch
));
4458 loadPtr(Address(obj
, JSObject::offsetOfShape()), shapeScratch
);
4461 // Compute end pointer.
4462 Address
lengthAddr(shapeElements
,
4463 ObjectElements::offsetOfInitializedLength());
4464 load32(lengthAddr
, endScratch
);
4465 BaseObjectElementIndex
endPtrAddr(shapeElements
, endScratch
);
4466 computeEffectiveAddress(endPtrAddr
, endScratch
);
4471 // Compare the object's shape with a shape from the list. Note that on 64-bit
4472 // this includes the tag bits, but on 32-bit we only compare the low word of
4473 // the value. This is fine because the list of shapes is never exposed and the
4474 // tag is guaranteed to be PrivateGCThing.
4475 if (needSpectreMitigations
) {
4476 move32(Imm32(0), spectreScratch
);
4478 branchPtr(Assembler::Equal
, Address(shapeElements
, 0), shapeScratch
, onMatch
);
4479 if (needSpectreMitigations
) {
4480 spectreMovePtr(Assembler::Equal
, spectreScratch
, obj
);
4483 // Advance to next shape and loop if not finished.
4484 addPtr(Imm32(sizeof(Value
)), shapeElements
);
4485 branchPtr(Assembler::Below
, shapeElements
, endScratch
, &loop
);
4487 if (cond
== Assembler::NotEqual
) {
4493 void MacroAssembler::branchTestObjCompartment(Condition cond
, Register obj
,
4494 const Address
& compartment
,
4495 Register scratch
, Label
* label
) {
4496 MOZ_ASSERT(obj
!= scratch
);
4497 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
4498 loadPtr(Address(scratch
, Shape::offsetOfBaseShape()), scratch
);
4499 loadPtr(Address(scratch
, BaseShape::offsetOfRealm()), scratch
);
4500 loadPtr(Address(scratch
, Realm::offsetOfCompartment()), scratch
);
4501 branchPtr(cond
, compartment
, scratch
, label
);
4504 void MacroAssembler::branchTestObjCompartment(
4505 Condition cond
, Register obj
, const JS::Compartment
* compartment
,
4506 Register scratch
, Label
* label
) {
4507 MOZ_ASSERT(obj
!= scratch
);
4508 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
4509 loadPtr(Address(scratch
, Shape::offsetOfBaseShape()), scratch
);
4510 loadPtr(Address(scratch
, BaseShape::offsetOfRealm()), scratch
);
4511 loadPtr(Address(scratch
, Realm::offsetOfCompartment()), scratch
);
4512 branchPtr(cond
, scratch
, ImmPtr(compartment
), label
);
4515 void MacroAssembler::branchIfNonNativeObj(Register obj
, Register scratch
,
4517 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
4518 branchTest32(Assembler::Zero
,
4519 Address(scratch
, Shape::offsetOfImmutableFlags()),
4520 Imm32(Shape::isNativeBit()), label
);
4523 void MacroAssembler::branchIfObjectNotExtensible(Register obj
, Register scratch
,
4525 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
4527 // Spectre-style checks are not needed here because we do not interpret data
4528 // based on this check.
4529 static_assert(sizeof(ObjectFlags
) == sizeof(uint16_t));
4530 load16ZeroExtend(Address(scratch
, Shape::offsetOfObjectFlags()), scratch
);
4531 branchTest32(Assembler::NonZero
, scratch
,
4532 Imm32(uint32_t(ObjectFlag::NotExtensible
)), label
);
4535 void MacroAssembler::wasmTrap(wasm::Trap trap
,
4536 wasm::BytecodeOffset bytecodeOffset
) {
4537 uint32_t trapOffset
= wasmTrapInstruction().offset();
4538 MOZ_ASSERT_IF(!oom(),
4539 currentOffset() - trapOffset
== WasmTrapInstructionLength
);
4541 append(trap
, wasm::TrapSite(trapOffset
, bytecodeOffset
));
4544 std::pair
<CodeOffset
, uint32_t> MacroAssembler::wasmReserveStackChecked(
4545 uint32_t amount
, wasm::BytecodeOffset trapOffset
) {
4546 if (amount
> MAX_UNCHECKED_LEAF_FRAME_SIZE
) {
4547 // The frame is large. Don't bump sp until after the stack limit check so
4548 // that the trap handler isn't called with a wild sp.
4550 Register scratch
= ABINonArgReg0
;
4551 moveStackPtrTo(scratch
);
4554 branchPtr(Assembler::Below
, scratch
, Imm32(amount
), &trap
);
4555 subPtr(Imm32(amount
), scratch
);
4556 branchPtr(Assembler::Below
,
4557 Address(InstanceReg
, wasm::Instance::offsetOfStackLimit()),
4561 wasmTrap(wasm::Trap::StackOverflow
, trapOffset
);
4562 CodeOffset trapInsnOffset
= CodeOffset(currentOffset());
4565 reserveStack(amount
);
4566 return std::pair
<CodeOffset
, uint32_t>(trapInsnOffset
, 0);
4569 reserveStack(amount
);
4571 branchStackPtrRhs(Assembler::Below
,
4572 Address(InstanceReg
, wasm::Instance::offsetOfStackLimit()),
4574 wasmTrap(wasm::Trap::StackOverflow
, trapOffset
);
4575 CodeOffset trapInsnOffset
= CodeOffset(currentOffset());
4577 return std::pair
<CodeOffset
, uint32_t>(trapInsnOffset
, amount
);
4580 CodeOffset
MacroAssembler::wasmCallImport(const wasm::CallSiteDesc
& desc
,
4581 const wasm::CalleeDesc
& callee
) {
4582 storePtr(InstanceReg
,
4583 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
4585 // Load the callee, before the caller's registers are clobbered.
4586 uint32_t instanceDataOffset
= callee
.importInstanceDataOffset();
4588 Address(InstanceReg
, wasm::Instance::offsetInData(
4589 instanceDataOffset
+
4590 offsetof(wasm::FuncImportInstanceData
, code
))),
4593 #if !defined(JS_CODEGEN_NONE) && !defined(JS_CODEGEN_WASM32)
4594 static_assert(ABINonArgReg0
!= InstanceReg
, "by constraint");
4597 // Switch to the callee's realm.
4599 Address(InstanceReg
, wasm::Instance::offsetInData(
4600 instanceDataOffset
+
4601 offsetof(wasm::FuncImportInstanceData
, realm
))),
4603 loadPtr(Address(InstanceReg
, wasm::Instance::offsetOfCx()), ABINonArgReg2
);
4604 storePtr(ABINonArgReg1
, Address(ABINonArgReg2
, JSContext::offsetOfRealm()));
4606 // Switch to the callee's instance and pinned registers and make the call.
4607 loadPtr(Address(InstanceReg
,
4608 wasm::Instance::offsetInData(
4609 instanceDataOffset
+
4610 offsetof(wasm::FuncImportInstanceData
, instance
))),
4613 storePtr(InstanceReg
,
4614 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
4615 loadWasmPinnedRegsFromInstance();
4617 return call(desc
, ABINonArgReg0
);
4620 CodeOffset
MacroAssembler::wasmCallBuiltinInstanceMethod(
4621 const wasm::CallSiteDesc
& desc
, const ABIArg
& instanceArg
,
4622 wasm::SymbolicAddress builtin
, wasm::FailureMode failureMode
) {
4623 MOZ_ASSERT(instanceArg
!= ABIArg());
4625 storePtr(InstanceReg
,
4626 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
4627 storePtr(InstanceReg
,
4628 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
4630 if (instanceArg
.kind() == ABIArg::GPR
) {
4631 movePtr(InstanceReg
, instanceArg
.gpr());
4632 } else if (instanceArg
.kind() == ABIArg::Stack
) {
4633 storePtr(InstanceReg
,
4634 Address(getStackPointer(), instanceArg
.offsetFromArgBase()));
4636 MOZ_CRASH("Unknown abi passing style for pointer");
4639 CodeOffset ret
= call(desc
, builtin
);
4641 if (failureMode
!= wasm::FailureMode::Infallible
) {
4643 switch (failureMode
) {
4644 case wasm::FailureMode::Infallible
:
4646 case wasm::FailureMode::FailOnNegI32
:
4647 branchTest32(Assembler::NotSigned
, ReturnReg
, ReturnReg
, &noTrap
);
4649 case wasm::FailureMode::FailOnNullPtr
:
4650 branchTestPtr(Assembler::NonZero
, ReturnReg
, ReturnReg
, &noTrap
);
4652 case wasm::FailureMode::FailOnInvalidRef
:
4653 branchPtr(Assembler::NotEqual
, ReturnReg
,
4654 ImmWord(uintptr_t(wasm::AnyRef::invalid().forCompiledCode())),
4658 wasmTrap(wasm::Trap::ThrowReported
,
4659 wasm::BytecodeOffset(desc
.lineOrBytecode()));
4666 CodeOffset
MacroAssembler::asmCallIndirect(const wasm::CallSiteDesc
& desc
,
4667 const wasm::CalleeDesc
& callee
) {
4668 MOZ_ASSERT(callee
.which() == wasm::CalleeDesc::AsmJSTable
);
4670 const Register scratch
= WasmTableCallScratchReg0
;
4671 const Register index
= WasmTableCallIndexReg
;
4673 // Optimization opportunity: when offsetof(FunctionTableElem, code) == 0, as
4674 // it is at present, we can probably generate better code here by folding
4675 // the address computation into the load.
4677 static_assert(sizeof(wasm::FunctionTableElem
) == 8 ||
4678 sizeof(wasm::FunctionTableElem
) == 16,
4679 "elements of function tables are two words");
4681 // asm.js tables require no signature check, and have had their index
4682 // masked into range and thus need no bounds check.
4684 Address(InstanceReg
, wasm::Instance::offsetInData(
4685 callee
.tableFunctionBaseInstanceDataOffset())),
4687 if (sizeof(wasm::FunctionTableElem
) == 8) {
4688 computeEffectiveAddress(BaseIndex(scratch
, index
, TimesEight
), scratch
);
4690 lshift32(Imm32(4), index
);
4691 addPtr(index
, scratch
);
4693 loadPtr(Address(scratch
, offsetof(wasm::FunctionTableElem
, code
)), scratch
);
4694 storePtr(InstanceReg
,
4695 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
4696 storePtr(InstanceReg
,
4697 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
4698 return call(desc
, scratch
);
4701 // In principle, call_indirect requires an expensive context switch to the
4702 // callee's instance and realm before the call and an almost equally expensive
4703 // switch back to the caller's ditto after. However, if the caller's instance
4704 // is the same as the callee's instance then no context switch is required, and
4705 // it only takes a compare-and-branch at run-time to test this - all values are
4706 // in registers already. We therefore generate two call paths, one for the fast
4707 // call without the context switch (which additionally avoids a null check) and
4708 // one for the slow call with the context switch.
4710 void MacroAssembler::wasmCallIndirect(const wasm::CallSiteDesc
& desc
,
4711 const wasm::CalleeDesc
& callee
,
4712 Label
* boundsCheckFailedLabel
,
4713 Label
* nullCheckFailedLabel
,
4714 mozilla::Maybe
<uint32_t> tableSize
,
4715 CodeOffset
* fastCallOffset
,
4716 CodeOffset
* slowCallOffset
) {
4717 static_assert(sizeof(wasm::FunctionTableElem
) == 2 * sizeof(void*),
4718 "Exactly two pointers or index scaling won't work correctly");
4719 MOZ_ASSERT(callee
.which() == wasm::CalleeDesc::WasmTable
);
4721 const int shift
= sizeof(wasm::FunctionTableElem
) == 8 ? 3 : 4;
4722 wasm::BytecodeOffset
trapOffset(desc
.lineOrBytecode());
4723 const Register calleeScratch
= WasmTableCallScratchReg0
;
4724 const Register index
= WasmTableCallIndexReg
;
4726 // Check the table index and throw if out-of-bounds.
4728 // Frequently the table size is known, so optimize for that. Otherwise
4729 // compare with a memory operand when that's possible. (There's little sense
4730 // in hoisting the load of the bound into a register at a higher level and
4731 // reusing that register, because a hoisted value would either have to be
4732 // spilled and re-loaded before the next call_indirect, or would be abandoned
4733 // because we could not trust that a hoisted value would not have changed.)
4735 if (boundsCheckFailedLabel
) {
4736 if (tableSize
.isSome()) {
4737 branch32(Assembler::Condition::AboveOrEqual
, index
, Imm32(*tableSize
),
4738 boundsCheckFailedLabel
);
4741 Assembler::Condition::BelowOrEqual
,
4742 Address(InstanceReg
, wasm::Instance::offsetInData(
4743 callee
.tableLengthInstanceDataOffset())),
4744 index
, boundsCheckFailedLabel
);
4748 // Write the functype-id into the ABI functype-id register.
4750 const wasm::CallIndirectId callIndirectId
= callee
.wasmTableSigId();
4751 switch (callIndirectId
.kind()) {
4752 case wasm::CallIndirectIdKind::Global
:
4753 loadPtr(Address(InstanceReg
, wasm::Instance::offsetInData(
4754 callIndirectId
.instanceDataOffset())),
4755 WasmTableCallSigReg
);
4757 case wasm::CallIndirectIdKind::Immediate
:
4758 move32(Imm32(callIndirectId
.immediate()), WasmTableCallSigReg
);
4760 case wasm::CallIndirectIdKind::AsmJS
:
4761 case wasm::CallIndirectIdKind::None
:
4765 // Load the base pointer of the table and compute the address of the callee in
4769 Address(InstanceReg
, wasm::Instance::offsetInData(
4770 callee
.tableFunctionBaseInstanceDataOffset())),
4772 shiftIndex32AndAdd(index
, shift
, calleeScratch
);
4774 // Load the callee instance and decide whether to take the fast path or the
4779 const Register newInstanceTemp
= WasmTableCallScratchReg1
;
4780 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, instance
)),
4782 branchPtr(Assembler::Equal
, InstanceReg
, newInstanceTemp
, &fastCall
);
4784 // Slow path: Save context, check for null, setup new context, call, restore
4787 // TODO: The slow path could usefully be out-of-line and the test above would
4788 // just fall through to the fast path. This keeps the fast-path code dense,
4789 // and has correct static prediction for the branch (forward conditional
4790 // branches predicted not taken, normally).
4792 storePtr(InstanceReg
,
4793 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
4794 movePtr(newInstanceTemp
, InstanceReg
);
4795 storePtr(InstanceReg
,
4796 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
4798 #ifdef WASM_HAS_HEAPREG
4799 // Use the null pointer exception resulting from loading HeapReg from a null
4800 // instance to handle a call to a null slot.
4801 MOZ_ASSERT(nullCheckFailedLabel
== nullptr);
4802 loadWasmPinnedRegsFromInstance(mozilla::Some(trapOffset
));
4804 MOZ_ASSERT(nullCheckFailedLabel
!= nullptr);
4805 branchTestPtr(Assembler::Zero
, InstanceReg
, InstanceReg
,
4806 nullCheckFailedLabel
);
4808 loadWasmPinnedRegsFromInstance();
4810 switchToWasmInstanceRealm(index
, WasmTableCallScratchReg1
);
4812 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, code
)),
4815 *slowCallOffset
= call(desc
, calleeScratch
);
4817 // Restore registers and realm and join up with the fast path.
4819 loadPtr(Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
),
4821 loadWasmPinnedRegsFromInstance();
4822 switchToWasmInstanceRealm(ABINonArgReturnReg0
, ABINonArgReturnReg1
);
4825 // Fast path: just load the code pointer and go. The instance and heap
4826 // register are the same as in the caller, and nothing will be null.
4828 // (In particular, the code pointer will not be null: if it were, the instance
4829 // would have been null, and then it would not have been equivalent to our
4830 // current instance. So no null check is needed on the fast path.)
4834 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, code
)),
4837 // We use a different type of call site for the fast call since the instance
4838 // slots in the frame do not have valid values.
4840 wasm::CallSiteDesc
newDesc(desc
.lineOrBytecode(),
4841 wasm::CallSiteDesc::IndirectFast
);
4842 *fastCallOffset
= call(newDesc
, calleeScratch
);
4847 void MacroAssembler::wasmCallRef(const wasm::CallSiteDesc
& desc
,
4848 const wasm::CalleeDesc
& callee
,
4849 CodeOffset
* fastCallOffset
,
4850 CodeOffset
* slowCallOffset
) {
4851 MOZ_ASSERT(callee
.which() == wasm::CalleeDesc::FuncRef
);
4852 const Register calleeScratch
= WasmCallRefCallScratchReg0
;
4853 const Register calleeFnObj
= WasmCallRefReg
;
4855 // Load from the function's WASM_INSTANCE_SLOT extended slot, and decide
4856 // whether to take the fast path or the slow path. Register this load
4857 // instruction to be source of a trap -- null pointer check.
4861 const Register newInstanceTemp
= WasmCallRefCallScratchReg1
;
4862 size_t instanceSlotOffset
= FunctionExtended::offsetOfExtendedSlot(
4863 FunctionExtended::WASM_INSTANCE_SLOT
);
4864 static_assert(FunctionExtended::WASM_INSTANCE_SLOT
< wasm::NullPtrGuardSize
);
4865 wasm::BytecodeOffset
trapOffset(desc
.lineOrBytecode());
4866 append(wasm::Trap::NullPointerDereference
,
4867 wasm::TrapSite(currentOffset(), trapOffset
));
4868 loadPtr(Address(calleeFnObj
, instanceSlotOffset
), newInstanceTemp
);
4869 branchPtr(Assembler::Equal
, InstanceReg
, newInstanceTemp
, &fastCall
);
4871 storePtr(InstanceReg
,
4872 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
4873 movePtr(newInstanceTemp
, InstanceReg
);
4874 storePtr(InstanceReg
,
4875 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
4877 loadWasmPinnedRegsFromInstance();
4878 switchToWasmInstanceRealm(WasmCallRefCallScratchReg0
,
4879 WasmCallRefCallScratchReg1
);
4881 // Get funcUncheckedCallEntry() from the function's
4882 // WASM_FUNC_UNCHECKED_ENTRY_SLOT extended slot.
4883 size_t uncheckedEntrySlotOffset
= FunctionExtended::offsetOfExtendedSlot(
4884 FunctionExtended::WASM_FUNC_UNCHECKED_ENTRY_SLOT
);
4885 loadPtr(Address(calleeFnObj
, uncheckedEntrySlotOffset
), calleeScratch
);
4887 *slowCallOffset
= call(desc
, calleeScratch
);
4889 // Restore registers and realm and back to this caller's.
4890 loadPtr(Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
),
4892 loadWasmPinnedRegsFromInstance();
4893 switchToWasmInstanceRealm(ABINonArgReturnReg0
, ABINonArgReturnReg1
);
4896 // Fast path: just load WASM_FUNC_UNCHECKED_ENTRY_SLOT value and go.
4897 // The instance and pinned registers are the same as in the caller.
4901 loadPtr(Address(calleeFnObj
, uncheckedEntrySlotOffset
), calleeScratch
);
4903 // We use a different type of call site for the fast call since the instance
4904 // slots in the frame do not have valid values.
4906 wasm::CallSiteDesc
newDesc(desc
.lineOrBytecode(),
4907 wasm::CallSiteDesc::FuncRefFast
);
4908 *fastCallOffset
= call(newDesc
, calleeScratch
);
4913 bool MacroAssembler::needScratch1ForBranchWasmGcRefType(wasm::RefType type
) {
4914 MOZ_ASSERT(type
.isValid());
4915 MOZ_ASSERT(type
.isAnyHierarchy());
4916 return !type
.isNone() && !type
.isAny();
4919 bool MacroAssembler::needScratch2ForBranchWasmGcRefType(wasm::RefType type
) {
4920 MOZ_ASSERT(type
.isValid());
4921 MOZ_ASSERT(type
.isAnyHierarchy());
4922 return type
.isTypeRef() &&
4923 type
.typeDef()->subTypingDepth() >= wasm::MinSuperTypeVectorLength
;
4926 bool MacroAssembler::needSuperSuperTypeVectorForBranchWasmGcRefType(
4927 wasm::RefType type
) {
4928 return type
.isTypeRef();
4931 void MacroAssembler::branchWasmGcObjectIsRefType(
4932 Register object
, wasm::RefType sourceType
, wasm::RefType destType
,
4933 Label
* label
, bool onSuccess
, Register superSuperTypeVector
,
4934 Register scratch1
, Register scratch2
) {
4935 MOZ_ASSERT(sourceType
.isValid());
4936 MOZ_ASSERT(destType
.isValid());
4937 MOZ_ASSERT(sourceType
.isAnyHierarchy());
4938 MOZ_ASSERT(destType
.isAnyHierarchy());
4939 MOZ_ASSERT_IF(needScratch1ForBranchWasmGcRefType(destType
),
4940 scratch1
!= Register::Invalid());
4941 MOZ_ASSERT_IF(needScratch2ForBranchWasmGcRefType(destType
),
4942 scratch2
!= Register::Invalid());
4943 MOZ_ASSERT_IF(needSuperSuperTypeVectorForBranchWasmGcRefType(destType
),
4944 superSuperTypeVector
!= Register::Invalid());
4947 Label
* successLabel
= onSuccess
? label
: &fallthrough
;
4948 Label
* failLabel
= onSuccess
? &fallthrough
: label
;
4949 Label
* nullLabel
= destType
.isNullable() ? successLabel
: failLabel
;
4952 if (sourceType
.isNullable()) {
4953 branchTestPtr(Assembler::Zero
, object
, object
, nullLabel
);
4956 // The only value that can inhabit 'none' is null. So, early out if we got
4958 if (destType
.isNone()) {
4964 if (destType
.isAny()) {
4965 // No further checks for 'any'
4971 // 'type' is now 'eq' or lower, which currently will always be a gc object.
4972 // Test for non-gc objects.
4973 MOZ_ASSERT(scratch1
!= Register::Invalid());
4974 if (!wasm::RefType::isSubTypeOf(sourceType
, wasm::RefType::eq())) {
4975 branchTestObjectIsWasmGcObject(false, object
, scratch1
, failLabel
);
4978 if (destType
.isEq()) {
4979 // No further checks for 'eq'
4985 // 'type' is now 'struct', 'array', or a concrete type. (Bottom types were
4988 // Casting to a concrete type only requires a simple check on the
4989 // object's superTypeVector. Casting to an abstract type (struct, array)
4990 // requires loading the object's superTypeVector->typeDef->kind, and checking
4991 // that it is correct.
4993 loadPtr(Address(object
, int32_t(WasmGcObject::offsetOfSuperTypeVector())),
4995 if (destType
.isTypeRef()) {
4996 // concrete type, do superTypeVector check
4997 branchWasmSuperTypeVectorIsSubtype(scratch1
, superSuperTypeVector
, scratch2
,
4998 destType
.typeDef()->subTypingDepth(),
4999 successLabel
, true);
5001 // abstract type, do kind check
5002 loadPtr(Address(scratch1
,
5003 int32_t(wasm::SuperTypeVector::offsetOfSelfTypeDef())),
5005 load8ZeroExtend(Address(scratch1
, int32_t(wasm::TypeDef::offsetOfKind())),
5007 branch32(Assembler::Equal
, scratch1
, Imm32(int32_t(destType
.typeDefKind())),
5016 void MacroAssembler::branchWasmSuperTypeVectorIsSubtype(
5017 Register subSuperTypeVector
, Register superSuperTypeVector
,
5018 Register scratch
, uint32_t superTypeDepth
, Label
* label
, bool onSuccess
) {
5019 MOZ_ASSERT_IF(superTypeDepth
>= wasm::MinSuperTypeVectorLength
,
5020 scratch
!= Register::Invalid());
5022 // We generate just different enough code for 'is' subtype vs 'is not'
5023 // subtype that we handle them separately.
5027 // At this point, we could generate a fast success check which jumps to
5028 // `label` if `subSuperTypeVector == superSuperTypeVector`. However,
5029 // profiling of Barista-3 seems to show this is hardly worth anything,
5030 // whereas it is worth us generating smaller code and in particular one
5031 // fewer conditional branch. So it is omitted:
5033 // branchPtr(Assembler::Equal, subSuperTypeVector, superSuperTypeVector,
5036 // Emit a bounds check if the super type depth may be out-of-bounds.
5037 if (superTypeDepth
>= wasm::MinSuperTypeVectorLength
) {
5038 // Slowest path for having a bounds check of the super type vector
5040 Address(subSuperTypeVector
, wasm::SuperTypeVector::offsetOfLength()),
5042 branch32(Assembler::LessThanOrEqual
, scratch
, Imm32(superTypeDepth
),
5046 // Load the `superTypeDepth` entry from subSuperTypeVector. This
5047 // will be `superSuperTypeVector` if `subSuperTypeVector` is indeed a
5050 Address(subSuperTypeVector
,
5051 wasm::SuperTypeVector::offsetOfTypeDefInVector(superTypeDepth
)),
5052 subSuperTypeVector
);
5053 branchPtr(Assembler::Equal
, subSuperTypeVector
, superSuperTypeVector
,
5056 // Fallthrough to the failed case
5061 // Emit a bounds check if the super type depth may be out-of-bounds.
5062 if (superTypeDepth
>= wasm::MinSuperTypeVectorLength
) {
5063 load32(Address(subSuperTypeVector
, wasm::SuperTypeVector::offsetOfLength()),
5065 branch32(Assembler::LessThanOrEqual
, scratch
, Imm32(superTypeDepth
), label
);
5068 // Load the `superTypeDepth` entry from subSuperTypeVector. This will be
5069 // `superSuperTypeVector` if `subSuperTypeVector` is indeed a subtype.
5071 Address(subSuperTypeVector
,
5072 wasm::SuperTypeVector::offsetOfTypeDefInVector(superTypeDepth
)),
5073 subSuperTypeVector
);
5074 branchPtr(Assembler::NotEqual
, subSuperTypeVector
, superSuperTypeVector
,
5076 // Fallthrough to the success case
5079 void MacroAssembler::nopPatchableToCall(const wasm::CallSiteDesc
& desc
) {
5080 CodeOffset offset
= nopPatchableToCall();
5081 append(desc
, offset
);
5084 void MacroAssembler::emitPreBarrierFastPath(JSRuntime
* rt
, MIRType type
,
5085 Register temp1
, Register temp2
,
5086 Register temp3
, Label
* noBarrier
) {
5087 MOZ_ASSERT(temp1
!= PreBarrierReg
);
5088 MOZ_ASSERT(temp2
!= PreBarrierReg
);
5089 MOZ_ASSERT(temp3
!= PreBarrierReg
);
5091 // Load the GC thing in temp1.
5092 if (type
== MIRType::Value
) {
5093 unboxGCThingForGCBarrier(Address(PreBarrierReg
, 0), temp1
);
5095 MOZ_ASSERT(type
== MIRType::Object
|| type
== MIRType::String
||
5096 type
== MIRType::Shape
);
5097 loadPtr(Address(PreBarrierReg
, 0), temp1
);
5101 // The caller should have checked for null pointers.
5103 branchTestPtr(Assembler::NonZero
, temp1
, temp1
, &nonZero
);
5104 assumeUnreachable("JIT pre-barrier: unexpected nullptr");
5108 // Load the chunk address in temp2.
5109 movePtr(temp1
, temp2
);
5110 andPtr(Imm32(int32_t(~gc::ChunkMask
)), temp2
);
5112 // If the GC thing is in the nursery, we don't need to barrier it.
5113 if (type
== MIRType::Value
|| type
== MIRType::Object
||
5114 type
== MIRType::String
) {
5115 branchPtr(Assembler::NotEqual
, Address(temp2
, gc::ChunkStoreBufferOffset
),
5116 ImmWord(0), noBarrier
);
5120 branchPtr(Assembler::Equal
, Address(temp2
, gc::ChunkStoreBufferOffset
),
5121 ImmWord(0), &isTenured
);
5122 assumeUnreachable("JIT pre-barrier: unexpected nursery pointer");
5127 // Determine the bit index and store in temp1.
5129 // bit = (addr & js::gc::ChunkMask) / js::gc::CellBytesPerMarkBit +
5130 // static_cast<uint32_t>(colorBit);
5131 static_assert(gc::CellBytesPerMarkBit
== 8,
5132 "Calculation below relies on this");
5133 static_assert(size_t(gc::ColorBit::BlackBit
) == 0,
5134 "Calculation below relies on this");
5135 andPtr(Imm32(gc::ChunkMask
), temp1
);
5136 rshiftPtr(Imm32(3), temp1
);
5138 static_assert(gc::MarkBitmapWordBits
== JS_BITS_PER_WORD
,
5139 "Calculation below relies on this");
5141 // Load the bitmap word in temp2.
5143 // word = chunk.bitmap[bit / MarkBitmapWordBits];
5145 // Fold the adjustment for the fact that arenas don't start at the beginning
5146 // of the chunk into the offset to the chunk bitmap.
5147 const size_t firstArenaAdjustment
= gc::FirstArenaAdjustmentBits
/ CHAR_BIT
;
5148 const intptr_t offset
=
5149 intptr_t(gc::ChunkMarkBitmapOffset
) - intptr_t(firstArenaAdjustment
);
5151 movePtr(temp1
, temp3
);
5152 #if JS_BITS_PER_WORD == 64
5153 rshiftPtr(Imm32(6), temp1
);
5154 loadPtr(BaseIndex(temp2
, temp1
, TimesEight
, offset
), temp2
);
5156 rshiftPtr(Imm32(5), temp1
);
5157 loadPtr(BaseIndex(temp2
, temp1
, TimesFour
, offset
), temp2
);
5160 // Load the mask in temp1.
5162 // mask = uintptr_t(1) << (bit % MarkBitmapWordBits);
5163 andPtr(Imm32(gc::MarkBitmapWordBits
- 1), temp3
);
5164 move32(Imm32(1), temp1
);
5165 #ifdef JS_CODEGEN_X64
5166 MOZ_ASSERT(temp3
== rcx
);
5168 #elif JS_CODEGEN_X86
5169 MOZ_ASSERT(temp3
== ecx
);
5171 #elif JS_CODEGEN_ARM
5172 ma_lsl(temp3
, temp1
, temp1
);
5173 #elif JS_CODEGEN_ARM64
5174 Lsl(ARMRegister(temp1
, 64), ARMRegister(temp1
, 64), ARMRegister(temp3
, 64));
5175 #elif JS_CODEGEN_MIPS32
5176 ma_sll(temp1
, temp1
, temp3
);
5177 #elif JS_CODEGEN_MIPS64
5178 ma_dsll(temp1
, temp1
, temp3
);
5179 #elif JS_CODEGEN_LOONG64
5180 as_sll_d(temp1
, temp1
, temp3
);
5181 #elif JS_CODEGEN_RISCV64
5182 sll(temp1
, temp1
, temp3
);
5183 #elif JS_CODEGEN_WASM32
5185 #elif JS_CODEGEN_NONE
5188 # error "Unknown architecture"
5191 // No barrier is needed if the bit is set, |word & mask != 0|.
5192 branchTestPtr(Assembler::NonZero
, temp2
, temp1
, noBarrier
);
5195 // ========================================================================
5196 // JS atomic operations.
5198 void MacroAssembler::atomicIsLockFreeJS(Register value
, Register output
) {
5199 // Keep this in sync with isLockfreeJS() in jit/AtomicOperations.h.
5200 static_assert(AtomicOperations::isLockfreeJS(1)); // Implementation artifact
5201 static_assert(AtomicOperations::isLockfreeJS(2)); // Implementation artifact
5202 static_assert(AtomicOperations::isLockfreeJS(4)); // Spec requirement
5203 static_assert(AtomicOperations::isLockfreeJS(8)); // Implementation artifact
5206 move32(Imm32(1), output
);
5207 branch32(Assembler::Equal
, value
, Imm32(8), &done
);
5208 branch32(Assembler::Equal
, value
, Imm32(4), &done
);
5209 branch32(Assembler::Equal
, value
, Imm32(2), &done
);
5210 branch32(Assembler::Equal
, value
, Imm32(1), &done
);
5211 move32(Imm32(0), output
);
5215 // ========================================================================
5216 // Spectre Mitigations.
5218 void MacroAssembler::spectreMaskIndex32(Register index
, Register length
,
5220 MOZ_ASSERT(JitOptions
.spectreIndexMasking
);
5221 MOZ_ASSERT(length
!= output
);
5222 MOZ_ASSERT(index
!= output
);
5224 move32(Imm32(0), output
);
5225 cmp32Move32(Assembler::Below
, index
, length
, index
, output
);
5228 void MacroAssembler::spectreMaskIndex32(Register index
, const Address
& length
,
5230 MOZ_ASSERT(JitOptions
.spectreIndexMasking
);
5231 MOZ_ASSERT(index
!= length
.base
);
5232 MOZ_ASSERT(length
.base
!= output
);
5233 MOZ_ASSERT(index
!= output
);
5235 move32(Imm32(0), output
);
5236 cmp32Move32(Assembler::Below
, index
, length
, index
, output
);
5239 void MacroAssembler::spectreMaskIndexPtr(Register index
, Register length
,
5241 MOZ_ASSERT(JitOptions
.spectreIndexMasking
);
5242 MOZ_ASSERT(length
!= output
);
5243 MOZ_ASSERT(index
!= output
);
5245 movePtr(ImmWord(0), output
);
5246 cmpPtrMovePtr(Assembler::Below
, index
, length
, index
, output
);
5249 void MacroAssembler::spectreMaskIndexPtr(Register index
, const Address
& length
,
5251 MOZ_ASSERT(JitOptions
.spectreIndexMasking
);
5252 MOZ_ASSERT(index
!= length
.base
);
5253 MOZ_ASSERT(length
.base
!= output
);
5254 MOZ_ASSERT(index
!= output
);
5256 movePtr(ImmWord(0), output
);
5257 cmpPtrMovePtr(Assembler::Below
, index
, length
, index
, output
);
5260 void MacroAssembler::boundsCheck32PowerOfTwo(Register index
, uint32_t length
,
5262 MOZ_ASSERT(mozilla::IsPowerOfTwo(length
));
5263 branch32(Assembler::AboveOrEqual
, index
, Imm32(length
), failure
);
5265 // Note: it's fine to clobber the input register, as this is a no-op: it
5266 // only affects speculative execution.
5267 if (JitOptions
.spectreIndexMasking
) {
5268 and32(Imm32(length
- 1), index
);
5272 void MacroAssembler::loadWasmPinnedRegsFromInstance(
5273 mozilla::Maybe
<wasm::BytecodeOffset
> trapOffset
) {
5274 #ifdef WASM_HAS_HEAPREG
5275 static_assert(wasm::Instance::offsetOfMemoryBase() < 4096,
5276 "We count only on the low page being inaccessible");
5278 append(wasm::Trap::IndirectCallToNull
,
5279 wasm::TrapSite(currentOffset(), *trapOffset
));
5281 loadPtr(Address(InstanceReg
, wasm::Instance::offsetOfMemoryBase()), HeapReg
);
5283 MOZ_ASSERT(!trapOffset
);
5287 //}}} check_macroassembler_style
5290 void MacroAssembler::debugAssertCanonicalInt32(Register r
) {
5292 if (!js::jit::JitOptions
.lessDebugCode
) {
5293 # if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64)
5295 branchPtr(Assembler::BelowOrEqual
, r
, ImmWord(UINT32_MAX
), &ok
);
5298 # elif defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64)
5300 ScratchRegisterScope
scratch(asMasm());
5301 move32SignExtendToPtr(r
, scratch
);
5302 branchPtr(Assembler::Equal
, r
, scratch
, &ok
);
5306 MOZ_CRASH("IMPLEMENT ME");
5313 void MacroAssembler::memoryBarrierBefore(const Synchronization
& sync
) {
5314 memoryBarrier(sync
.barrierBefore
);
5317 void MacroAssembler::memoryBarrierAfter(const Synchronization
& sync
) {
5318 memoryBarrier(sync
.barrierAfter
);
5321 void MacroAssembler::debugAssertIsObject(const ValueOperand
& val
) {
5324 branchTestObject(Assembler::Equal
, val
, &ok
);
5325 assumeUnreachable("Expected an object!");
5330 void MacroAssembler::debugAssertObjHasFixedSlots(Register obj
,
5333 Label hasFixedSlots
;
5334 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
5335 branchTest32(Assembler::NonZero
,
5336 Address(scratch
, Shape::offsetOfImmutableFlags()),
5337 Imm32(NativeShape::fixedSlotsMask()), &hasFixedSlots
);
5338 assumeUnreachable("Expected a fixed slot");
5339 bind(&hasFixedSlots
);
5343 void MacroAssembler::debugAssertObjectHasClass(Register obj
, Register scratch
,
5344 const JSClass
* clasp
) {
5347 branchTestObjClassNoSpectreMitigations(Assembler::Equal
, obj
, clasp
, scratch
,
5349 assumeUnreachable("Class check failed");
5354 void MacroAssembler::branchArrayIsNotPacked(Register array
, Register temp1
,
5355 Register temp2
, Label
* label
) {
5356 loadPtr(Address(array
, NativeObject::offsetOfElements()), temp1
);
5358 // Test length == initializedLength.
5359 Address
initLength(temp1
, ObjectElements::offsetOfInitializedLength());
5360 load32(Address(temp1
, ObjectElements::offsetOfLength()), temp2
);
5361 branch32(Assembler::NotEqual
, initLength
, temp2
, label
);
5363 // Test the NON_PACKED flag.
5364 Address
flags(temp1
, ObjectElements::offsetOfFlags());
5365 branchTest32(Assembler::NonZero
, flags
, Imm32(ObjectElements::NON_PACKED
),
5369 void MacroAssembler::setIsPackedArray(Register obj
, Register output
,
5371 // Ensure it's an ArrayObject.
5372 Label notPackedArray
;
5373 branchTestObjClass(Assembler::NotEqual
, obj
, &ArrayObject::class_
, temp
, obj
,
5376 branchArrayIsNotPacked(obj
, temp
, output
, ¬PackedArray
);
5379 move32(Imm32(1), output
);
5382 bind(¬PackedArray
);
5383 move32(Imm32(0), output
);
5388 void MacroAssembler::packedArrayPop(Register array
, ValueOperand output
,
5389 Register temp1
, Register temp2
,
5391 // Load obj->elements in temp1.
5392 loadPtr(Address(array
, NativeObject::offsetOfElements()), temp1
);
5395 static constexpr uint32_t UnhandledFlags
=
5396 ObjectElements::Flags::NON_PACKED
|
5397 ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH
|
5398 ObjectElements::Flags::NOT_EXTENSIBLE
|
5399 ObjectElements::Flags::MAYBE_IN_ITERATION
;
5400 Address
flags(temp1
, ObjectElements::offsetOfFlags());
5401 branchTest32(Assembler::NonZero
, flags
, Imm32(UnhandledFlags
), fail
);
5403 // Load length in temp2. Ensure length == initializedLength.
5404 Address
lengthAddr(temp1
, ObjectElements::offsetOfLength());
5405 Address
initLengthAddr(temp1
, ObjectElements::offsetOfInitializedLength());
5406 load32(lengthAddr
, temp2
);
5407 branch32(Assembler::NotEqual
, initLengthAddr
, temp2
, fail
);
5409 // Result is |undefined| if length == 0.
5410 Label notEmpty
, done
;
5411 branchTest32(Assembler::NonZero
, temp2
, temp2
, ¬Empty
);
5413 moveValue(UndefinedValue(), output
);
5419 // Load the last element.
5420 sub32(Imm32(1), temp2
);
5421 BaseObjectElementIndex
elementAddr(temp1
, temp2
);
5422 loadValue(elementAddr
, output
);
5424 // Pre-barrier the element because we're removing it from the array.
5425 EmitPreBarrier(*this, elementAddr
, MIRType::Value
);
5427 // Update length and initializedLength.
5428 store32(temp2
, lengthAddr
);
5429 store32(temp2
, initLengthAddr
);
5434 void MacroAssembler::packedArrayShift(Register array
, ValueOperand output
,
5435 Register temp1
, Register temp2
,
5436 LiveRegisterSet volatileRegs
,
5438 // Load obj->elements in temp1.
5439 loadPtr(Address(array
, NativeObject::offsetOfElements()), temp1
);
5442 static constexpr uint32_t UnhandledFlags
=
5443 ObjectElements::Flags::NON_PACKED
|
5444 ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH
|
5445 ObjectElements::Flags::NOT_EXTENSIBLE
|
5446 ObjectElements::Flags::MAYBE_IN_ITERATION
;
5447 Address
flags(temp1
, ObjectElements::offsetOfFlags());
5448 branchTest32(Assembler::NonZero
, flags
, Imm32(UnhandledFlags
), fail
);
5450 // Load length in temp2. Ensure length == initializedLength.
5451 Address
lengthAddr(temp1
, ObjectElements::offsetOfLength());
5452 Address
initLengthAddr(temp1
, ObjectElements::offsetOfInitializedLength());
5453 load32(lengthAddr
, temp2
);
5454 branch32(Assembler::NotEqual
, initLengthAddr
, temp2
, fail
);
5456 // Result is |undefined| if length == 0.
5457 Label notEmpty
, done
;
5458 branchTest32(Assembler::NonZero
, temp2
, temp2
, ¬Empty
);
5460 moveValue(UndefinedValue(), output
);
5466 // Load the first element.
5467 Address
elementAddr(temp1
, 0);
5468 loadValue(elementAddr
, output
);
5470 // Move the other elements and update the initializedLength/length. This will
5471 // also trigger pre-barriers.
5473 // Ensure output is in volatileRegs. Don't preserve temp1 and temp2.
5474 volatileRegs
.takeUnchecked(temp1
);
5475 volatileRegs
.takeUnchecked(temp2
);
5476 if (output
.hasVolatileReg()) {
5477 volatileRegs
.addUnchecked(output
);
5480 PushRegsInMask(volatileRegs
);
5482 using Fn
= void (*)(ArrayObject
* arr
);
5483 setupUnalignedABICall(temp1
);
5485 callWithABI
<Fn
, ArrayShiftMoveElements
>();
5487 PopRegsInMask(volatileRegs
);
5493 void MacroAssembler::loadArgumentsObjectElement(Register obj
, Register index
,
5494 ValueOperand output
,
5495 Register temp
, Label
* fail
) {
5496 Register temp2
= output
.scratchReg();
5498 // Get initial length value.
5499 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()), temp
);
5501 // Ensure no overridden elements.
5502 branchTest32(Assembler::NonZero
, temp
,
5503 Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT
), fail
);
5506 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT
), temp
);
5507 spectreBoundsCheck32(index
, temp
, temp2
, fail
);
5509 // Load ArgumentsData.
5510 loadPrivate(Address(obj
, ArgumentsObject::getDataSlotOffset()), temp
);
5512 // Guard the argument is not a FORWARD_TO_CALL_SLOT MagicValue.
5513 BaseValueIndex
argValue(temp
, index
, ArgumentsData::offsetOfArgs());
5514 branchTestMagic(Assembler::Equal
, argValue
, fail
);
5515 loadValue(argValue
, output
);
5518 void MacroAssembler::loadArgumentsObjectElementHole(Register obj
,
5520 ValueOperand output
,
5523 Register temp2
= output
.scratchReg();
5525 // Get initial length value.
5526 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()), temp
);
5528 // Ensure no overridden elements.
5529 branchTest32(Assembler::NonZero
, temp
,
5530 Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT
), fail
);
5533 Label outOfBounds
, done
;
5534 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT
), temp
);
5535 spectreBoundsCheck32(index
, temp
, temp2
, &outOfBounds
);
5537 // Load ArgumentsData.
5538 loadPrivate(Address(obj
, ArgumentsObject::getDataSlotOffset()), temp
);
5540 // Guard the argument is not a FORWARD_TO_CALL_SLOT MagicValue.
5541 BaseValueIndex
argValue(temp
, index
, ArgumentsData::offsetOfArgs());
5542 branchTestMagic(Assembler::Equal
, argValue
, fail
);
5543 loadValue(argValue
, output
);
5547 branch32(Assembler::LessThan
, index
, Imm32(0), fail
);
5548 moveValue(UndefinedValue(), output
);
5553 void MacroAssembler::loadArgumentsObjectElementExists(
5554 Register obj
, Register index
, Register output
, Register temp
, Label
* fail
) {
5555 // Ensure the index is non-negative.
5556 branch32(Assembler::LessThan
, index
, Imm32(0), fail
);
5558 // Get initial length value.
5559 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()), temp
);
5561 // Ensure no overridden or deleted elements.
5562 branchTest32(Assembler::NonZero
, temp
,
5563 Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT
), fail
);
5565 // Compare index against the length.
5566 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT
), temp
);
5567 cmp32Set(Assembler::LessThan
, index
, temp
, output
);
5570 void MacroAssembler::loadArgumentsObjectLength(Register obj
, Register output
,
5572 // Get initial length value.
5573 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()),
5576 // Test if length has been overridden.
5577 branchTest32(Assembler::NonZero
, output
,
5578 Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT
), fail
);
5580 // Shift out arguments length and return it.
5581 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT
), output
);
5584 void MacroAssembler::branchTestArgumentsObjectFlags(Register obj
, Register temp
,
5588 MOZ_ASSERT((flags
& ~ArgumentsObject::PACKED_BITS_MASK
) == 0);
5590 // Get initial length value.
5591 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()), temp
);
5594 branchTest32(cond
, temp
, Imm32(flags
), label
);
5597 static constexpr bool ValidateSizeRange(Scalar::Type from
, Scalar::Type to
) {
5598 for (Scalar::Type type
= from
; type
< to
; type
= Scalar::Type(type
+ 1)) {
5599 if (TypedArrayElemSize(type
) != TypedArrayElemSize(from
)) {
5606 void MacroAssembler::typedArrayElementSize(Register obj
, Register output
) {
5607 static_assert(Scalar::Int8
== 0, "Int8 is the first typed array class");
5609 (Scalar::BigUint64
- Scalar::Int8
) == Scalar::MaxTypedArrayViewType
- 1,
5610 "BigUint64 is the last typed array class");
5612 Label one
, two
, four
, eight
, done
;
5614 loadObjClassUnsafe(obj
, output
);
5616 static_assert(ValidateSizeRange(Scalar::Int8
, Scalar::Int16
),
5617 "element size is one in [Int8, Int16)");
5618 branchPtr(Assembler::Below
, output
,
5619 ImmPtr(TypedArrayObject::classForType(Scalar::Int16
)), &one
);
5621 static_assert(ValidateSizeRange(Scalar::Int16
, Scalar::Int32
),
5622 "element size is two in [Int16, Int32)");
5623 branchPtr(Assembler::Below
, output
,
5624 ImmPtr(TypedArrayObject::classForType(Scalar::Int32
)), &two
);
5626 static_assert(ValidateSizeRange(Scalar::Int32
, Scalar::Float64
),
5627 "element size is four in [Int32, Float64)");
5628 branchPtr(Assembler::Below
, output
,
5629 ImmPtr(TypedArrayObject::classForType(Scalar::Float64
)), &four
);
5631 static_assert(ValidateSizeRange(Scalar::Float64
, Scalar::Uint8Clamped
),
5632 "element size is eight in [Float64, Uint8Clamped)");
5633 branchPtr(Assembler::Below
, output
,
5634 ImmPtr(TypedArrayObject::classForType(Scalar::Uint8Clamped
)),
5637 static_assert(ValidateSizeRange(Scalar::Uint8Clamped
, Scalar::BigInt64
),
5638 "element size is one in [Uint8Clamped, BigInt64)");
5639 branchPtr(Assembler::Below
, output
,
5640 ImmPtr(TypedArrayObject::classForType(Scalar::BigInt64
)), &one
);
5643 ValidateSizeRange(Scalar::BigInt64
, Scalar::MaxTypedArrayViewType
),
5644 "element size is eight in [BigInt64, MaxTypedArrayViewType)");
5645 // Fall through for BigInt64 and BigUint64
5648 move32(Imm32(8), output
);
5652 move32(Imm32(4), output
);
5656 move32(Imm32(2), output
);
5660 move32(Imm32(1), output
);
5665 void MacroAssembler::branchIfClassIsNotTypedArray(Register clasp
,
5666 Label
* notTypedArray
) {
5667 static_assert(Scalar::Int8
== 0, "Int8 is the first typed array class");
5668 const JSClass
* firstTypedArrayClass
=
5669 TypedArrayObject::classForType(Scalar::Int8
);
5672 (Scalar::BigUint64
- Scalar::Int8
) == Scalar::MaxTypedArrayViewType
- 1,
5673 "BigUint64 is the last typed array class");
5674 const JSClass
* lastTypedArrayClass
=
5675 TypedArrayObject::classForType(Scalar::BigUint64
);
5677 branchPtr(Assembler::Below
, clasp
, ImmPtr(firstTypedArrayClass
),
5679 branchPtr(Assembler::Above
, clasp
, ImmPtr(lastTypedArrayClass
),
5683 void MacroAssembler::branchIfHasDetachedArrayBuffer(Register obj
, Register temp
,
5685 // Inline implementation of ArrayBufferViewObject::hasDetachedBuffer().
5687 // Load obj->elements in temp.
5688 loadPtr(Address(obj
, NativeObject::offsetOfElements()), temp
);
5690 // Shared buffers can't be detached.
5692 branchTest32(Assembler::NonZero
,
5693 Address(temp
, ObjectElements::offsetOfFlags()),
5694 Imm32(ObjectElements::SHARED_MEMORY
), &done
);
5696 // An ArrayBufferView with a null buffer has never had its buffer exposed to
5698 fallibleUnboxObject(Address(obj
, ArrayBufferViewObject::bufferOffset()), temp
,
5701 // Load the ArrayBuffer flags and branch if the detached flag is set.
5702 unboxInt32(Address(temp
, ArrayBufferObject::offsetOfFlagsSlot()), temp
);
5703 branchTest32(Assembler::NonZero
, temp
, Imm32(ArrayBufferObject::DETACHED
),
5709 void MacroAssembler::branchIfNativeIteratorNotReusable(Register ni
,
5710 Label
* notReusable
) {
5711 // See NativeIterator::isReusable.
5712 Address
flagsAddr(ni
, NativeIterator::offsetOfFlagsAndCount());
5715 Label niIsInitialized
;
5716 branchTest32(Assembler::NonZero
, flagsAddr
,
5717 Imm32(NativeIterator::Flags::Initialized
), &niIsInitialized
);
5719 "Expected a NativeIterator that's been completely "
5721 bind(&niIsInitialized
);
5724 branchTest32(Assembler::NonZero
, flagsAddr
,
5725 Imm32(NativeIterator::Flags::NotReusable
), notReusable
);
5728 void MacroAssembler::branchNativeIteratorIndices(Condition cond
, Register ni
,
5730 NativeIteratorIndices kind
,
5732 Address
iterFlagsAddr(ni
, NativeIterator::offsetOfFlagsAndCount());
5733 load32(iterFlagsAddr
, temp
);
5734 and32(Imm32(NativeIterator::IndicesMask
), temp
);
5735 uint32_t shiftedKind
= uint32_t(kind
) << NativeIterator::IndicesShift
;
5736 branch32(cond
, temp
, Imm32(shiftedKind
), label
);
5739 static void LoadNativeIterator(MacroAssembler
& masm
, Register obj
,
5741 MOZ_ASSERT(obj
!= dest
);
5744 // Assert we have a PropertyIteratorObject.
5746 masm
.branchTestObjClass(Assembler::Equal
, obj
,
5747 &PropertyIteratorObject::class_
, dest
, obj
, &ok
);
5748 masm
.assumeUnreachable("Expected PropertyIteratorObject!");
5752 // Load NativeIterator object.
5753 Address
slotAddr(obj
, PropertyIteratorObject::offsetOfIteratorSlot());
5754 masm
.loadPrivate(slotAddr
, dest
);
5757 // The ShapeCachePtr may be used to cache an iterator for for-in. Return that
5758 // iterator in |dest| if:
5759 // - the shape cache pointer exists and stores a native iterator
5760 // - the iterator is reusable
5761 // - the iterated object has no dense elements
5762 // - the shapes of each object on the proto chain of |obj| match the cached
5764 // - the proto chain has no dense elements
5765 // Otherwise, jump to |failure|.
5766 void MacroAssembler::maybeLoadIteratorFromShape(Register obj
, Register dest
,
5767 Register temp
, Register temp2
,
5771 // obj: always contains the input object
5772 // temp: walks the obj->shape->baseshape->proto->shape->... chain
5773 // temp2: points to the native iterator. Incremented to walk the shapes array.
5774 // temp3: scratch space
5775 // dest: stores the resulting PropertyIteratorObject on success
5778 Register shapeAndProto
= temp
;
5779 Register nativeIterator
= temp2
;
5781 // Load ShapeCache from shape.
5782 loadPtr(Address(obj
, JSObject::offsetOfShape()), shapeAndProto
);
5783 loadPtr(Address(shapeAndProto
, Shape::offsetOfCachePtr()), dest
);
5785 // Check if it's an iterator.
5786 movePtr(dest
, temp3
);
5787 andPtr(Imm32(ShapeCachePtr::MASK
), temp3
);
5788 branch32(Assembler::NotEqual
, temp3
, Imm32(ShapeCachePtr::ITERATOR
), failure
);
5790 // If we've cached an iterator, |obj| must be a native object.
5793 branchIfNonNativeObj(obj
, temp3
, &nonNative
);
5796 // Verify that |obj| has no dense elements.
5797 loadPtr(Address(obj
, NativeObject::offsetOfElements()), temp3
);
5798 branch32(Assembler::NotEqual
,
5799 Address(temp3
, ObjectElements::offsetOfInitializedLength()),
5802 // Clear tag bits from iterator object. |dest| is now valid.
5803 // Load the native iterator and verify that it's reusable.
5804 andPtr(Imm32(~ShapeCachePtr::MASK
), dest
);
5805 LoadNativeIterator(*this, dest
, nativeIterator
);
5806 branchIfNativeIteratorNotReusable(nativeIterator
, failure
);
5808 // We have to compare the shapes in the native iterator with the shapes on the
5809 // proto chain to ensure the cached iterator is still valid. The shape array
5810 // always starts at a fixed offset from the base of the NativeIterator, so
5811 // instead of using an instruction outside the loop to initialize a pointer to
5812 // the shapes array, we can bake it into the offset and reuse the pointer to
5813 // the NativeIterator. We add |sizeof(Shape*)| to start at the second shape.
5814 // (The first shape corresponds to the object itself. We don't have to check
5815 // it, because we got the iterator via the shape.)
5816 size_t nativeIteratorProtoShapeOffset
=
5817 NativeIterator::offsetOfFirstShape() + sizeof(Shape
*);
5819 // Loop over the proto chain. At the head of the loop, |shape| is the shape of
5820 // the current object, and |iteratorShapes| points to the expected shape of
5825 // Load the proto. If the proto is null, then we're done.
5826 loadPtr(Address(shapeAndProto
, Shape::offsetOfBaseShape()), shapeAndProto
);
5827 loadPtr(Address(shapeAndProto
, BaseShape::offsetOfProto()), shapeAndProto
);
5828 branchPtr(Assembler::Equal
, shapeAndProto
, ImmPtr(nullptr), &success
);
5831 // We have guarded every shape up until this point, so we know that the proto
5832 // is a native object.
5833 branchIfNonNativeObj(shapeAndProto
, temp3
, &nonNative
);
5836 // Verify that the proto has no dense elements.
5837 loadPtr(Address(shapeAndProto
, NativeObject::offsetOfElements()), temp3
);
5838 branch32(Assembler::NotEqual
,
5839 Address(temp3
, ObjectElements::offsetOfInitializedLength()),
5842 // Compare the shape of the proto to the expected shape.
5843 loadPtr(Address(shapeAndProto
, JSObject::offsetOfShape()), shapeAndProto
);
5844 loadPtr(Address(nativeIterator
, nativeIteratorProtoShapeOffset
), temp3
);
5845 branchPtr(Assembler::NotEqual
, shapeAndProto
, temp3
, failure
);
5847 // Increment |iteratorShapes| and jump back to the top of the loop.
5848 addPtr(Imm32(sizeof(Shape
*)), nativeIterator
);
5853 assumeUnreachable("Expected NativeObject in maybeLoadIteratorFromShape");
5859 void MacroAssembler::iteratorMore(Register obj
, ValueOperand output
,
5862 Register outputScratch
= output
.scratchReg();
5863 LoadNativeIterator(*this, obj
, outputScratch
);
5865 // If propertyCursor_ < propertiesEnd_, load the next string and advance
5866 // the cursor. Otherwise return MagicValue(JS_NO_ITER_VALUE).
5868 Address
cursorAddr(outputScratch
, NativeIterator::offsetOfPropertyCursor());
5869 Address
cursorEndAddr(outputScratch
, NativeIterator::offsetOfPropertiesEnd());
5870 loadPtr(cursorAddr
, temp
);
5871 branchPtr(Assembler::BelowOrEqual
, cursorEndAddr
, temp
, &iterDone
);
5874 loadPtr(Address(temp
, 0), temp
);
5876 // Increase the cursor.
5877 addPtr(Imm32(sizeof(GCPtr
<JSLinearString
*>)), cursorAddr
);
5879 tagValue(JSVAL_TYPE_STRING
, temp
, output
);
5883 moveValue(MagicValue(JS_NO_ITER_VALUE
), output
);
5888 void MacroAssembler::iteratorClose(Register obj
, Register temp1
, Register temp2
,
5890 LoadNativeIterator(*this, obj
, temp1
);
5892 // The shared iterator used for for-in with null/undefined is immutable and
5893 // unlinked. See NativeIterator::isEmptyIteratorSingleton.
5895 branchTest32(Assembler::NonZero
,
5896 Address(temp1
, NativeIterator::offsetOfFlagsAndCount()),
5897 Imm32(NativeIterator::Flags::IsEmptyIteratorSingleton
), &done
);
5899 // Clear active bit.
5900 and32(Imm32(~NativeIterator::Flags::Active
),
5901 Address(temp1
, NativeIterator::offsetOfFlagsAndCount()));
5903 // Clear objectBeingIterated.
5904 Address
iterObjAddr(temp1
, NativeIterator::offsetOfObjectBeingIterated());
5905 guardedCallPreBarrierAnyZone(iterObjAddr
, MIRType::Object
, temp2
);
5906 storePtr(ImmPtr(nullptr), iterObjAddr
);
5908 // Reset property cursor.
5909 loadPtr(Address(temp1
, NativeIterator::offsetOfShapesEnd()), temp2
);
5910 storePtr(temp2
, Address(temp1
, NativeIterator::offsetOfPropertyCursor()));
5912 // Unlink from the iterator list.
5913 const Register next
= temp2
;
5914 const Register prev
= temp3
;
5915 loadPtr(Address(temp1
, NativeIterator::offsetOfNext()), next
);
5916 loadPtr(Address(temp1
, NativeIterator::offsetOfPrev()), prev
);
5917 storePtr(prev
, Address(next
, NativeIterator::offsetOfPrev()));
5918 storePtr(next
, Address(prev
, NativeIterator::offsetOfNext()));
5920 storePtr(ImmPtr(nullptr), Address(temp1
, NativeIterator::offsetOfNext()));
5921 storePtr(ImmPtr(nullptr), Address(temp1
, NativeIterator::offsetOfPrev()));
5927 void MacroAssembler::registerIterator(Register enumeratorsList
, Register iter
,
5929 // iter->next = list
5930 storePtr(enumeratorsList
, Address(iter
, NativeIterator::offsetOfNext()));
5932 // iter->prev = list->prev
5933 loadPtr(Address(enumeratorsList
, NativeIterator::offsetOfPrev()), temp
);
5934 storePtr(temp
, Address(iter
, NativeIterator::offsetOfPrev()));
5936 // list->prev->next = iter
5937 storePtr(iter
, Address(temp
, NativeIterator::offsetOfNext()));
5939 // list->prev = iter
5940 storePtr(iter
, Address(enumeratorsList
, NativeIterator::offsetOfPrev()));
5943 void MacroAssembler::toHashableNonGCThing(ValueOperand value
,
5944 ValueOperand result
,
5945 FloatRegister tempFloat
) {
5946 // Inline implementation of |HashableValue::setValue()|.
5950 branchTestGCThing(Assembler::NotEqual
, value
, &ok
);
5951 assumeUnreachable("Unexpected GC thing");
5955 Label useInput
, done
;
5956 branchTestDouble(Assembler::NotEqual
, value
, &useInput
);
5958 Register int32
= result
.scratchReg();
5959 unboxDouble(value
, tempFloat
);
5961 // Normalize int32-valued doubles to int32 and negative zero to +0.
5963 convertDoubleToInt32(tempFloat
, int32
, &canonicalize
, false);
5965 tagValue(JSVAL_TYPE_INT32
, int32
, result
);
5968 bind(&canonicalize
);
5970 // Normalize the sign bit of a NaN.
5971 branchDouble(Assembler::DoubleOrdered
, tempFloat
, tempFloat
, &useInput
);
5972 moveValue(JS::NaNValue(), result
);
5978 moveValue(value
, result
);
5983 void MacroAssembler::toHashableValue(ValueOperand value
, ValueOperand result
,
5984 FloatRegister tempFloat
,
5985 Label
* atomizeString
, Label
* tagString
) {
5986 // Inline implementation of |HashableValue::setValue()|.
5988 ScratchTagScope
tag(*this, value
);
5989 splitTagForTest(value
, tag
);
5991 Label notString
, useInput
, done
;
5992 branchTestString(Assembler::NotEqual
, tag
, ¬String
);
5994 ScratchTagScopeRelease
_(&tag
);
5996 Register str
= result
.scratchReg();
5997 unboxString(value
, str
);
5999 branchTest32(Assembler::NonZero
, Address(str
, JSString::offsetOfFlags()),
6000 Imm32(JSString::ATOM_BIT
), &useInput
);
6002 jump(atomizeString
);
6005 tagValue(JSVAL_TYPE_STRING
, str
, result
);
6009 branchTestDouble(Assembler::NotEqual
, tag
, &useInput
);
6011 ScratchTagScopeRelease
_(&tag
);
6013 Register int32
= result
.scratchReg();
6014 unboxDouble(value
, tempFloat
);
6017 convertDoubleToInt32(tempFloat
, int32
, &canonicalize
, false);
6019 tagValue(JSVAL_TYPE_INT32
, int32
, result
);
6022 bind(&canonicalize
);
6024 branchDouble(Assembler::DoubleOrdered
, tempFloat
, tempFloat
, &useInput
);
6025 moveValue(JS::NaNValue(), result
);
6031 moveValue(value
, result
);
6036 void MacroAssembler::scrambleHashCode(Register result
) {
6037 // Inline implementation of |mozilla::ScrambleHashCode()|.
6039 mul32(Imm32(mozilla::kGoldenRatioU32
), result
);
6042 void MacroAssembler::prepareHashNonGCThing(ValueOperand value
, Register result
,
6044 // Inline implementation of |OrderedHashTable::prepareHash()| and
6045 // |mozilla::HashGeneric(v.asRawBits())|.
6049 branchTestGCThing(Assembler::NotEqual
, value
, &ok
);
6050 assumeUnreachable("Unexpected GC thing");
6054 // uint32_t v1 = static_cast<uint32_t>(aValue);
6056 move64To32(value
.toRegister64(), result
);
6058 move32(value
.payloadReg(), result
);
6061 // uint32_t v2 = static_cast<uint32_t>(static_cast<uint64_t>(aValue) >> 32);
6063 auto r64
= Register64(temp
);
6064 move64(value
.toRegister64(), r64
);
6065 rshift64Arithmetic(Imm32(32), r64
);
6067 // TODO: This seems like a bug in mozilla::detail::AddUintptrToHash().
6068 // The uint64_t input is first converted to uintptr_t and then back to
6069 // uint64_t. But |uint64_t(uintptr_t(bits))| actually only clears the high
6070 // bits, so this computation:
6072 // aValue = uintptr_t(bits)
6073 // v2 = static_cast<uint32_t>(static_cast<uint64_t>(aValue) >> 32)
6075 // really just sets |v2 = 0|. And that means the xor-operation in AddU32ToHash
6076 // can be optimized away, because |x ^ 0 = x|.
6078 // Filed as bug 1718516.
6081 // mozilla::WrappingMultiply(kGoldenRatioU32, RotateLeft5(aHash) ^ aValue);
6082 // with |aHash = 0| and |aValue = v1|.
6083 mul32(Imm32(mozilla::kGoldenRatioU32
), result
);
6085 // mozilla::WrappingMultiply(kGoldenRatioU32, RotateLeft5(aHash) ^ aValue);
6086 // with |aHash = <above hash>| and |aValue = v2|.
6087 rotateLeft(Imm32(5), result
, result
);
6089 xor32(temp
, result
);
6092 // Combine |mul32| and |scrambleHashCode| by directly multiplying with
6093 // |kGoldenRatioU32 * kGoldenRatioU32|.
6095 // mul32(Imm32(mozilla::kGoldenRatioU32), result);
6097 // scrambleHashCode(result);
6098 mul32(Imm32(mozilla::kGoldenRatioU32
* mozilla::kGoldenRatioU32
), result
);
6101 void MacroAssembler::prepareHashString(Register str
, Register result
,
6103 // Inline implementation of |OrderedHashTable::prepareHash()| and
6104 // |JSAtom::hash()|.
6108 branchTest32(Assembler::NonZero
, Address(str
, JSString::offsetOfFlags()),
6109 Imm32(JSString::ATOM_BIT
), &ok
);
6110 assumeUnreachable("Unexpected non-atom string");
6114 move32(Imm32(JSString::FAT_INLINE_MASK
), temp
);
6115 and32(Address(str
, JSString::offsetOfFlags()), temp
);
6117 // Set |result| to 1 for FatInlineAtoms.
6118 move32(Imm32(0), result
);
6119 cmp32Set(Assembler::Equal
, temp
, Imm32(JSString::FAT_INLINE_MASK
), result
);
6121 // Use a computed load for branch-free code.
6123 static_assert(FatInlineAtom::offsetOfHash() > NormalAtom::offsetOfHash());
6125 constexpr size_t offsetDiff
=
6126 FatInlineAtom::offsetOfHash() - NormalAtom::offsetOfHash();
6127 static_assert(mozilla::IsPowerOfTwo(offsetDiff
));
6129 uint8_t shift
= mozilla::FloorLog2Size(offsetDiff
);
6130 if (IsShiftInScaleRange(shift
)) {
6132 BaseIndex(str
, result
, ShiftToScale(shift
), NormalAtom::offsetOfHash()),
6135 lshift32(Imm32(shift
), result
);
6136 load32(BaseIndex(str
, result
, TimesOne
, NormalAtom::offsetOfHash()),
6140 scrambleHashCode(result
);
6143 void MacroAssembler::prepareHashSymbol(Register sym
, Register result
) {
6144 // Inline implementation of |OrderedHashTable::prepareHash()| and
6145 // |Symbol::hash()|.
6147 load32(Address(sym
, JS::Symbol::offsetOfHash()), result
);
6149 scrambleHashCode(result
);
6152 void MacroAssembler::prepareHashBigInt(Register bigInt
, Register result
,
6153 Register temp1
, Register temp2
,
6155 // Inline implementation of |OrderedHashTable::prepareHash()| and
6156 // |BigInt::hash()|.
6158 // Inline implementation of |mozilla::AddU32ToHash()|.
6159 auto addU32ToHash
= [&](auto toAdd
) {
6160 rotateLeft(Imm32(5), result
, result
);
6161 xor32(toAdd
, result
);
6162 mul32(Imm32(mozilla::kGoldenRatioU32
), result
);
6165 move32(Imm32(0), result
);
6167 // Inline |mozilla::HashBytes()|.
6169 load32(Address(bigInt
, BigInt::offsetOfLength()), temp1
);
6170 loadBigIntDigits(bigInt
, temp2
);
6177 // Compute |AddToHash(AddToHash(hash, data), sizeof(Digit))|.
6178 #if defined(JS_CODEGEN_MIPS64)
6179 // Hash the lower 32-bits.
6180 addU32ToHash(Address(temp2
, 0));
6182 // Hash the upper 32-bits.
6183 addU32ToHash(Address(temp2
, sizeof(int32_t)));
6185 // Use a single 64-bit load on non-MIPS64 platforms.
6186 loadPtr(Address(temp2
, 0), temp3
);
6188 // Hash the lower 32-bits.
6189 addU32ToHash(temp3
);
6191 // Hash the upper 32-bits.
6192 rshiftPtr(Imm32(32), temp3
);
6193 addU32ToHash(temp3
);
6195 addU32ToHash(Address(temp2
, 0));
6198 addPtr(Imm32(sizeof(BigInt::Digit
)), temp2
);
6201 branchSub32(Assembler::NotSigned
, Imm32(1), temp1
, &loop
);
6203 // Compute |mozilla::AddToHash(h, isNegative())|.
6205 static_assert(mozilla::IsPowerOfTwo(BigInt::signBitMask()));
6207 load32(Address(bigInt
, BigInt::offsetOfFlags()), temp1
);
6208 and32(Imm32(BigInt::signBitMask()), temp1
);
6209 rshift32(Imm32(mozilla::FloorLog2(BigInt::signBitMask())), temp1
);
6211 addU32ToHash(temp1
);
6214 scrambleHashCode(result
);
6217 void MacroAssembler::prepareHashObject(Register setObj
, ValueOperand value
,
6218 Register result
, Register temp1
,
6219 Register temp2
, Register temp3
,
6222 // Inline implementation of |OrderedHashTable::prepareHash()| and
6223 // |HashCodeScrambler::scramble(v.asRawBits())|.
6225 // Load the |ValueSet| or |ValueMap|.
6226 static_assert(SetObject::getDataSlotOffset() ==
6227 MapObject::getDataSlotOffset());
6228 loadPrivate(Address(setObj
, SetObject::getDataSlotOffset()), temp1
);
6230 // Load |HashCodeScrambler::mK0| and |HashCodeScrambler::mK0|.
6231 static_assert(ValueSet::offsetOfImplHcsK0() == ValueMap::offsetOfImplHcsK0());
6232 static_assert(ValueSet::offsetOfImplHcsK1() == ValueMap::offsetOfImplHcsK1());
6233 auto k0
= Register64(temp1
);
6234 auto k1
= Register64(temp2
);
6235 load64(Address(temp1
, ValueSet::offsetOfImplHcsK1()), k1
);
6236 load64(Address(temp1
, ValueSet::offsetOfImplHcsK0()), k0
);
6238 // Hash numbers are 32-bit values, so only hash the lower double-word.
6239 static_assert(sizeof(mozilla::HashNumber
) == 4);
6240 move32To64ZeroExtend(value
.valueReg(), Register64(result
));
6242 // Inline implementation of |SipHasher::sipHash()|.
6243 auto m
= Register64(result
);
6244 auto v0
= Register64(temp3
);
6245 auto v1
= Register64(temp4
);
6249 auto sipRound
= [&]() {
6250 // mV0 = WrappingAdd(mV0, mV1);
6253 // mV1 = RotateLeft(mV1, 13);
6254 rotateLeft64(Imm32(13), v1
, v1
, InvalidReg
);
6259 // mV0 = RotateLeft(mV0, 32);
6260 rotateLeft64(Imm32(32), v0
, v0
, InvalidReg
);
6262 // mV2 = WrappingAdd(mV2, mV3);
6265 // mV3 = RotateLeft(mV3, 16);
6266 rotateLeft64(Imm32(16), v3
, v3
, InvalidReg
);
6271 // mV0 = WrappingAdd(mV0, mV3);
6274 // mV3 = RotateLeft(mV3, 21);
6275 rotateLeft64(Imm32(21), v3
, v3
, InvalidReg
);
6280 // mV2 = WrappingAdd(mV2, mV1);
6283 // mV1 = RotateLeft(mV1, 17);
6284 rotateLeft64(Imm32(17), v1
, v1
, InvalidReg
);
6289 // mV2 = RotateLeft(mV2, 32);
6290 rotateLeft64(Imm32(32), v2
, v2
, InvalidReg
);
6293 // 1. Initialization.
6294 // mV0 = aK0 ^ UINT64_C(0x736f6d6570736575);
6295 move64(Imm64(0x736f6d6570736575), v0
);
6298 // mV1 = aK1 ^ UINT64_C(0x646f72616e646f6d);
6299 move64(Imm64(0x646f72616e646f6d), v1
);
6302 // mV2 = aK0 ^ UINT64_C(0x6c7967656e657261);
6303 MOZ_ASSERT(v2
== k0
);
6304 xor64(Imm64(0x6c7967656e657261), v2
);
6306 // mV3 = aK1 ^ UINT64_C(0x7465646279746573);
6307 MOZ_ASSERT(v3
== k1
);
6308 xor64(Imm64(0x7465646279746573), v3
);
6322 xor64(Imm64(0xff), v2
);
6324 // for (int i = 0; i < 3; i++) sipRound();
6325 for (int i
= 0; i
< 3; i
++) {
6329 // return mV0 ^ mV1 ^ mV2 ^ mV3;
6334 move64To32(v0
, result
);
6336 scrambleHashCode(result
);
6338 MOZ_CRASH("Not implemented");
6342 void MacroAssembler::prepareHashValue(Register setObj
, ValueOperand value
,
6343 Register result
, Register temp1
,
6344 Register temp2
, Register temp3
,
6346 Label isString
, isObject
, isSymbol
, isBigInt
;
6348 ScratchTagScope
tag(*this, value
);
6349 splitTagForTest(value
, tag
);
6351 branchTestString(Assembler::Equal
, tag
, &isString
);
6352 branchTestObject(Assembler::Equal
, tag
, &isObject
);
6353 branchTestSymbol(Assembler::Equal
, tag
, &isSymbol
);
6354 branchTestBigInt(Assembler::Equal
, tag
, &isBigInt
);
6359 prepareHashNonGCThing(value
, result
, temp1
);
6364 unboxString(value
, temp1
);
6365 prepareHashString(temp1
, result
, temp2
);
6370 prepareHashObject(setObj
, value
, result
, temp1
, temp2
, temp3
, temp4
);
6375 unboxSymbol(value
, temp1
);
6376 prepareHashSymbol(temp1
, result
);
6381 unboxBigInt(value
, temp1
);
6382 prepareHashBigInt(temp1
, result
, temp2
, temp3
, temp4
);
6384 // Fallthrough to |done|.
6390 template <typename OrderedHashTable
>
6391 void MacroAssembler::orderedHashTableLookup(Register setOrMapObj
,
6392 ValueOperand value
, Register hash
,
6393 Register entryTemp
, Register temp1
,
6394 Register temp2
, Register temp3
,
6395 Register temp4
, Label
* found
,
6396 IsBigInt isBigInt
) {
6397 // Inline implementation of |OrderedHashTable::lookup()|.
6399 MOZ_ASSERT_IF(isBigInt
== IsBigInt::No
, temp3
== InvalidReg
);
6400 MOZ_ASSERT_IF(isBigInt
== IsBigInt::No
, temp4
== InvalidReg
);
6404 if (isBigInt
== IsBigInt::No
) {
6405 branchTestBigInt(Assembler::NotEqual
, value
, &ok
);
6406 assumeUnreachable("Unexpected BigInt");
6407 } else if (isBigInt
== IsBigInt::Yes
) {
6408 branchTestBigInt(Assembler::Equal
, value
, &ok
);
6409 assumeUnreachable("Unexpected non-BigInt");
6415 PushRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
6418 moveStackPtrTo(temp2
);
6420 setupUnalignedABICall(temp1
);
6421 loadJSContext(temp1
);
6423 passABIArg(setOrMapObj
);
6427 if constexpr (std::is_same_v
<OrderedHashTable
, ValueSet
>) {
6429 void (*)(JSContext
*, SetObject
*, const Value
*, mozilla::HashNumber
);
6430 callWithABI
<Fn
, jit::AssertSetObjectHash
>();
6433 void (*)(JSContext
*, MapObject
*, const Value
*, mozilla::HashNumber
);
6434 callWithABI
<Fn
, jit::AssertMapObjectHash
>();
6438 PopRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
6441 // Load the |ValueSet| or |ValueMap|.
6442 static_assert(SetObject::getDataSlotOffset() ==
6443 MapObject::getDataSlotOffset());
6444 loadPrivate(Address(setOrMapObj
, SetObject::getDataSlotOffset()), temp1
);
6447 move32(hash
, entryTemp
);
6448 load32(Address(temp1
, OrderedHashTable::offsetOfImplHashShift()), temp2
);
6449 flexibleRshift32(temp2
, entryTemp
);
6451 loadPtr(Address(temp1
, OrderedHashTable::offsetOfImplHashTable()), temp2
);
6452 loadPtr(BaseIndex(temp2
, entryTemp
, ScalePointer
), entryTemp
);
6454 // Search for a match in this bucket.
6459 // Inline implementation of |HashableValue::operator==|.
6461 static_assert(OrderedHashTable::offsetOfImplDataElement() == 0,
6462 "offsetof(Data, element) is 0");
6463 auto keyAddr
= Address(entryTemp
, OrderedHashTable::offsetOfEntryKey());
6465 if (isBigInt
== IsBigInt::No
) {
6466 // Two HashableValues are equal if they have equal bits.
6467 branch64(Assembler::Equal
, keyAddr
, value
.toRegister64(), found
);
6470 auto key
= ValueOperand(temp1
);
6472 auto key
= ValueOperand(temp1
, temp2
);
6475 loadValue(keyAddr
, key
);
6477 // Two HashableValues are equal if they have equal bits.
6478 branch64(Assembler::Equal
, key
.toRegister64(), value
.toRegister64(),
6481 // BigInt values are considered equal if they represent the same
6482 // mathematical value.
6484 fallibleUnboxBigInt(key
, temp2
, &next
);
6485 if (isBigInt
== IsBigInt::Yes
) {
6486 unboxBigInt(value
, temp1
);
6488 fallibleUnboxBigInt(value
, temp1
, &next
);
6490 equalBigInts(temp1
, temp2
, temp3
, temp4
, temp1
, temp2
, &next
, &next
,
6496 loadPtr(Address(entryTemp
, OrderedHashTable::offsetOfImplDataChain()),
6499 branchTestPtr(Assembler::NonZero
, entryTemp
, entryTemp
, &loop
);
6502 void MacroAssembler::setObjectHas(Register setObj
, ValueOperand value
,
6503 Register hash
, Register result
,
6504 Register temp1
, Register temp2
,
6505 Register temp3
, Register temp4
,
6506 IsBigInt isBigInt
) {
6508 orderedHashTableLookup
<ValueSet
>(setObj
, value
, hash
, result
, temp1
, temp2
,
6509 temp3
, temp4
, &found
, isBigInt
);
6512 move32(Imm32(0), result
);
6516 move32(Imm32(1), result
);
6520 void MacroAssembler::mapObjectHas(Register mapObj
, ValueOperand value
,
6521 Register hash
, Register result
,
6522 Register temp1
, Register temp2
,
6523 Register temp3
, Register temp4
,
6524 IsBigInt isBigInt
) {
6526 orderedHashTableLookup
<ValueMap
>(mapObj
, value
, hash
, result
, temp1
, temp2
,
6527 temp3
, temp4
, &found
, isBigInt
);
6530 move32(Imm32(0), result
);
6534 move32(Imm32(1), result
);
6538 void MacroAssembler::mapObjectGet(Register mapObj
, ValueOperand value
,
6539 Register hash
, ValueOperand result
,
6540 Register temp1
, Register temp2
,
6541 Register temp3
, Register temp4
,
6542 Register temp5
, IsBigInt isBigInt
) {
6544 orderedHashTableLookup
<ValueMap
>(mapObj
, value
, hash
, temp1
, temp2
, temp3
,
6545 temp4
, temp5
, &found
, isBigInt
);
6548 moveValue(UndefinedValue(), result
);
6551 // |temp1| holds the found entry.
6553 loadValue(Address(temp1
, ValueMap::Entry::offsetOfValue()), result
);
6558 template <typename OrderedHashTable
>
6559 void MacroAssembler::loadOrderedHashTableCount(Register setOrMapObj
,
6561 // Inline implementation of |OrderedHashTable::count()|.
6563 // Load the |ValueSet| or |ValueMap|.
6564 static_assert(SetObject::getDataSlotOffset() ==
6565 MapObject::getDataSlotOffset());
6566 loadPrivate(Address(setOrMapObj
, SetObject::getDataSlotOffset()), result
);
6568 // Load the live count.
6569 load32(Address(result
, OrderedHashTable::offsetOfImplLiveCount()), result
);
6572 void MacroAssembler::loadSetObjectSize(Register setObj
, Register result
) {
6573 loadOrderedHashTableCount
<ValueSet
>(setObj
, result
);
6576 void MacroAssembler::loadMapObjectSize(Register mapObj
, Register result
) {
6577 loadOrderedHashTableCount
<ValueMap
>(mapObj
, result
);
6580 // Can't push large frames blindly on windows, so we must touch frame memory
6581 // incrementally, with no more than 4096 - 1 bytes between touches.
6583 // This is used across all platforms for simplicity.
6584 void MacroAssembler::touchFrameValues(Register numStackValues
,
6585 Register scratch1
, Register scratch2
) {
6586 const size_t FRAME_TOUCH_INCREMENT
= 2048;
6587 static_assert(FRAME_TOUCH_INCREMENT
< 4096 - 1,
6588 "Frame increment is too large");
6590 moveStackPtrTo(scratch2
);
6591 mov(numStackValues
, scratch1
);
6592 lshiftPtr(Imm32(3), scratch1
);
6593 subPtr(scratch1
, scratch2
);
6595 moveStackPtrTo(scratch1
);
6596 subPtr(Imm32(FRAME_TOUCH_INCREMENT
), scratch1
);
6598 Label touchFrameLoop
;
6599 Label touchFrameLoopEnd
;
6600 bind(&touchFrameLoop
);
6601 branchPtr(Assembler::Below
, scratch1
, scratch2
, &touchFrameLoopEnd
);
6602 store32(Imm32(0), Address(scratch1
, 0));
6603 subPtr(Imm32(FRAME_TOUCH_INCREMENT
), scratch1
);
6604 jump(&touchFrameLoop
);
6605 bind(&touchFrameLoopEnd
);
6613 template <class RegisterType
>
6614 AutoGenericRegisterScope
<RegisterType
>::AutoGenericRegisterScope(
6615 MacroAssembler
& masm
, RegisterType reg
)
6616 : RegisterType(reg
), masm_(masm
), released_(false) {
6617 masm
.debugTrackedRegisters_
.add(reg
);
6620 template AutoGenericRegisterScope
<Register
>::AutoGenericRegisterScope(
6621 MacroAssembler
& masm
, Register reg
);
6622 template AutoGenericRegisterScope
<FloatRegister
>::AutoGenericRegisterScope(
6623 MacroAssembler
& masm
, FloatRegister reg
);
6627 template <class RegisterType
>
6628 AutoGenericRegisterScope
<RegisterType
>::~AutoGenericRegisterScope() {
6634 template AutoGenericRegisterScope
<Register
>::~AutoGenericRegisterScope();
6635 template AutoGenericRegisterScope
<FloatRegister
>::~AutoGenericRegisterScope();
6637 template <class RegisterType
>
6638 void AutoGenericRegisterScope
<RegisterType
>::release() {
6639 MOZ_ASSERT(!released_
);
6641 const RegisterType
& reg
= *dynamic_cast<RegisterType
*>(this);
6642 masm_
.debugTrackedRegisters_
.take(reg
);
6645 template void AutoGenericRegisterScope
<Register
>::release();
6646 template void AutoGenericRegisterScope
<FloatRegister
>::release();
6648 template <class RegisterType
>
6649 void AutoGenericRegisterScope
<RegisterType
>::reacquire() {
6650 MOZ_ASSERT(released_
);
6652 const RegisterType
& reg
= *dynamic_cast<RegisterType
*>(this);
6653 masm_
.debugTrackedRegisters_
.add(reg
);
6656 template void AutoGenericRegisterScope
<Register
>::reacquire();
6657 template void AutoGenericRegisterScope
<FloatRegister
>::reacquire();