1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/MacroAssembler-inl.h"
9 #include "mozilla/FloatingPoint.h"
10 #include "mozilla/MathAlgorithms.h"
11 #include "mozilla/XorShift128PlusRNG.h"
16 #include "jit/AtomicOp.h"
17 #include "jit/AtomicOperations.h"
18 #include "jit/Bailouts.h"
19 #include "jit/BaselineFrame.h"
20 #include "jit/BaselineJIT.h"
21 #include "jit/JitFrames.h"
22 #include "jit/JitOptions.h"
23 #include "jit/JitRuntime.h"
24 #include "jit/JitScript.h"
25 #include "jit/MoveEmitter.h"
26 #include "jit/ReciprocalMulConstants.h"
27 #include "jit/SharedICHelpers.h"
28 #include "jit/SharedICRegisters.h"
29 #include "jit/Simulator.h"
30 #include "jit/VMFunctions.h"
31 #include "js/Conversions.h"
32 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
33 #include "js/ScalarType.h" // js::Scalar::Type
34 #include "vm/ArgumentsObject.h"
35 #include "vm/ArrayBufferViewObject.h"
36 #include "vm/BoundFunctionObject.h"
37 #include "vm/FunctionFlags.h" // js::FunctionFlags
38 #include "vm/Iteration.h"
39 #include "vm/JSContext.h"
40 #include "vm/TypedArrayObject.h"
41 #include "wasm/WasmBuiltins.h"
42 #include "wasm/WasmCodegenConstants.h"
43 #include "wasm/WasmCodegenTypes.h"
44 #include "wasm/WasmGcObject.h"
45 #include "wasm/WasmInstanceData.h"
46 #include "wasm/WasmMemory.h"
47 #include "wasm/WasmTypeDef.h"
48 #include "wasm/WasmValidate.h"
50 #include "jit/TemplateObject-inl.h"
51 #include "vm/BytecodeUtil-inl.h"
52 #include "vm/Interpreter-inl.h"
53 #include "vm/JSObject-inl.h"
56 using namespace js::jit
;
61 using mozilla::CheckedInt
;
63 TrampolinePtr
MacroAssembler::preBarrierTrampoline(MIRType type
) {
64 const JitRuntime
* rt
= runtime()->jitRuntime();
65 return rt
->preBarrier(type
);
68 template <typename S
, typename T
>
69 static void StoreToTypedFloatArray(MacroAssembler
& masm
, int arrayType
,
70 const S
& value
, const T
& dest
) {
73 masm
.storeFloat32(value
, dest
);
76 masm
.storeDouble(value
, dest
);
79 MOZ_CRASH("Invalid typed array type");
83 void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType
,
85 const BaseIndex
& dest
) {
86 StoreToTypedFloatArray(*this, arrayType
, value
, dest
);
88 void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType
,
90 const Address
& dest
) {
91 StoreToTypedFloatArray(*this, arrayType
, value
, dest
);
94 template <typename S
, typename T
>
95 static void StoreToTypedBigIntArray(MacroAssembler
& masm
,
96 Scalar::Type arrayType
, const S
& value
,
98 MOZ_ASSERT(Scalar::isBigIntType(arrayType
));
99 masm
.store64(value
, dest
);
102 void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType
,
104 const BaseIndex
& dest
) {
105 StoreToTypedBigIntArray(*this, arrayType
, value
, dest
);
107 void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType
,
109 const Address
& dest
) {
110 StoreToTypedBigIntArray(*this, arrayType
, value
, dest
);
113 void MacroAssembler::boxUint32(Register source
, ValueOperand dest
,
114 Uint32Mode mode
, Label
* fail
) {
116 // Fail if the value does not fit in an int32.
117 case Uint32Mode::FailOnDouble
: {
118 branchTest32(Assembler::Signed
, source
, source
, fail
);
119 tagValue(JSVAL_TYPE_INT32
, source
, dest
);
122 case Uint32Mode::ForceDouble
: {
123 // Always convert the value to double.
124 ScratchDoubleScope
fpscratch(*this);
125 convertUInt32ToDouble(source
, fpscratch
);
126 boxDouble(fpscratch
, dest
, fpscratch
);
132 template <typename T
>
133 void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
, const T
& src
,
134 AnyRegister dest
, Register temp
,
138 load8SignExtend(src
, dest
.gpr());
141 case Scalar::Uint8Clamped
:
142 load8ZeroExtend(src
, dest
.gpr());
145 load16SignExtend(src
, dest
.gpr());
148 load16ZeroExtend(src
, dest
.gpr());
151 load32(src
, dest
.gpr());
154 if (dest
.isFloat()) {
156 convertUInt32ToDouble(temp
, dest
.fpu());
158 load32(src
, dest
.gpr());
160 // Bail out if the value doesn't fit into a signed int32 value. This
161 // is what allows MLoadUnboxedScalar to have a type() of
162 // MIRType::Int32 for UInt32 array loads.
163 branchTest32(Assembler::Signed
, dest
.gpr(), dest
.gpr(), fail
);
166 case Scalar::Float32
:
167 loadFloat32(src
, dest
.fpu());
168 canonicalizeFloat(dest
.fpu());
170 case Scalar::Float64
:
171 loadDouble(src
, dest
.fpu());
172 canonicalizeDouble(dest
.fpu());
174 case Scalar::BigInt64
:
175 case Scalar::BigUint64
:
177 MOZ_CRASH("Invalid typed array type");
181 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
,
184 Register temp
, Label
* fail
);
185 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
,
186 const BaseIndex
& src
,
188 Register temp
, Label
* fail
);
190 template <typename T
>
191 void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
, const T
& src
,
192 const ValueOperand
& dest
,
193 Uint32Mode uint32Mode
, Register temp
,
198 case Scalar::Uint8Clamped
:
202 loadFromTypedArray(arrayType
, src
, AnyRegister(dest
.scratchReg()),
203 InvalidReg
, nullptr);
204 tagValue(JSVAL_TYPE_INT32
, dest
.scratchReg(), dest
);
207 // Don't clobber dest when we could fail, instead use temp.
209 boxUint32(temp
, dest
, uint32Mode
, fail
);
211 case Scalar::Float32
: {
212 ScratchDoubleScope
dscratch(*this);
213 FloatRegister fscratch
= dscratch
.asSingle();
214 loadFromTypedArray(arrayType
, src
, AnyRegister(fscratch
),
215 dest
.scratchReg(), nullptr);
216 convertFloat32ToDouble(fscratch
, dscratch
);
217 boxDouble(dscratch
, dest
, dscratch
);
220 case Scalar::Float64
: {
221 ScratchDoubleScope
fpscratch(*this);
222 loadFromTypedArray(arrayType
, src
, AnyRegister(fpscratch
),
223 dest
.scratchReg(), nullptr);
224 boxDouble(fpscratch
, dest
, fpscratch
);
227 case Scalar::BigInt64
:
228 case Scalar::BigUint64
:
230 MOZ_CRASH("Invalid typed array type");
234 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
,
236 const ValueOperand
& dest
,
237 Uint32Mode uint32Mode
,
238 Register temp
, Label
* fail
);
239 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
,
240 const BaseIndex
& src
,
241 const ValueOperand
& dest
,
242 Uint32Mode uint32Mode
,
243 Register temp
, Label
* fail
);
245 template <typename T
>
246 void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType
,
247 const T
& src
, Register bigInt
,
249 MOZ_ASSERT(Scalar::isBigIntType(arrayType
));
252 initializeBigInt64(arrayType
, bigInt
, temp
);
255 template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType
,
259 template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType
,
260 const BaseIndex
& src
,
264 // Inlined version of gc::CheckAllocatorState that checks the bare essentials
265 // and bails for anything that cannot be handled with our jit allocators.
266 void MacroAssembler::checkAllocatorState(Label
* fail
) {
267 // Don't execute the inline path if GC probes are built in.
273 // Don't execute the inline path if gc zeal or tracing are active.
274 const uint32_t* ptrZealModeBits
= runtime()->addressOfGCZealModeBits();
275 branch32(Assembler::NotEqual
, AbsoluteAddress(ptrZealModeBits
), Imm32(0),
279 // Don't execute the inline path if the realm has an object metadata callback,
280 // as the metadata to use for the object may vary between executions of the
282 if (realm()->hasAllocationMetadataBuilder()) {
287 bool MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind
,
288 gc::Heap initialHeap
) {
289 // Note that Ion elides barriers on writes to objects known to be in the
290 // nursery, so any allocation that can be made into the nursery must be made
291 // into the nursery, even if the nursery is disabled. At runtime these will
292 // take the out-of-line path, which is required to insert a barrier for the
293 // initializing writes.
294 return IsNurseryAllocable(allocKind
) && initialHeap
!= gc::Heap::Tenured
;
297 // Inline version of Nursery::allocateObject. If the object has dynamic slots,
298 // this fills in the slots_ pointer.
299 void MacroAssembler::nurseryAllocateObject(Register result
, Register temp
,
300 gc::AllocKind allocKind
,
301 size_t nDynamicSlots
, Label
* fail
,
302 const AllocSiteInput
& allocSite
) {
303 MOZ_ASSERT(IsNurseryAllocable(allocKind
));
305 // Currently the JIT does not nursery allocate foreground finalized
306 // objects. This is allowed for objects that support this and have the
307 // JSCLASS_SKIP_NURSERY_FINALIZE class flag set. It's hard to assert that here
308 // though so disallow all foreground finalized objects for now.
309 MOZ_ASSERT(!IsForegroundFinalized(allocKind
));
311 // We still need to allocate in the nursery, per the comment in
312 // shouldNurseryAllocate; however, we need to insert into the
313 // mallocedBuffers set, so bail to do the nursery allocation in the
315 if (nDynamicSlots
>= Nursery::MaxNurseryBufferSize
/ sizeof(Value
)) {
320 // Check whether this allocation site needs pretenuring. This dynamic check
321 // only happens for baseline code.
322 if (allocSite
.is
<Register
>()) {
323 Register site
= allocSite
.as
<Register
>();
324 branchTestPtr(Assembler::NonZero
,
325 Address(site
, gc::AllocSite::offsetOfScriptAndState()),
326 Imm32(gc::AllocSite::LONG_LIVED_BIT
), fail
);
329 // No explicit check for nursery.isEnabled() is needed, as the comparison
330 // with the nursery's end will always fail in such cases.
331 CompileZone
* zone
= realm()->zone();
332 size_t thingSize
= gc::Arena::thingSize(allocKind
);
333 size_t totalSize
= thingSize
;
335 totalSize
+= ObjectSlots::allocSize(nDynamicSlots
);
337 MOZ_ASSERT(totalSize
< INT32_MAX
);
338 MOZ_ASSERT(totalSize
% gc::CellAlignBytes
== 0);
340 bumpPointerAllocate(result
, temp
, fail
, zone
, JS::TraceKind::Object
,
341 totalSize
, allocSite
);
344 store32(Imm32(nDynamicSlots
),
345 Address(result
, thingSize
+ ObjectSlots::offsetOfCapacity()));
348 Address(result
, thingSize
+ ObjectSlots::offsetOfDictionarySlotSpan()));
349 store64(Imm64(ObjectSlots::NoUniqueIdInDynamicSlots
),
350 Address(result
, thingSize
+ ObjectSlots::offsetOfMaybeUniqueId()));
351 computeEffectiveAddress(
352 Address(result
, thingSize
+ ObjectSlots::offsetOfSlots()), temp
);
353 storePtr(temp
, Address(result
, NativeObject::offsetOfSlots()));
357 // Inlined version of FreeSpan::allocate. This does not fill in slots_.
358 void MacroAssembler::freeListAllocate(Register result
, Register temp
,
359 gc::AllocKind allocKind
, Label
* fail
) {
360 CompileZone
* zone
= realm()->zone();
361 int thingSize
= int(gc::Arena::thingSize(allocKind
));
366 // Load the first and last offsets of |zone|'s free list for |allocKind|.
367 // If there is no room remaining in the span, fall back to get the next one.
368 gc::FreeSpan
** ptrFreeList
= zone
->addressOfFreeList(allocKind
);
369 loadPtr(AbsoluteAddress(ptrFreeList
), temp
);
370 load16ZeroExtend(Address(temp
, js::gc::FreeSpan::offsetOfFirst()), result
);
371 load16ZeroExtend(Address(temp
, js::gc::FreeSpan::offsetOfLast()), temp
);
372 branch32(Assembler::AboveOrEqual
, result
, temp
, &fallback
);
374 // Bump the offset for the next allocation.
375 add32(Imm32(thingSize
), result
);
376 loadPtr(AbsoluteAddress(ptrFreeList
), temp
);
377 store16(result
, Address(temp
, js::gc::FreeSpan::offsetOfFirst()));
378 sub32(Imm32(thingSize
), result
);
379 addPtr(temp
, result
); // Turn the offset into a pointer.
383 // If there are no free spans left, we bail to finish the allocation. The
384 // interpreter will call the GC allocator to set up a new arena to allocate
385 // from, after which we can resume allocating in the jit.
386 branchTest32(Assembler::Zero
, result
, result
, fail
);
387 loadPtr(AbsoluteAddress(ptrFreeList
), temp
);
388 addPtr(temp
, result
); // Turn the offset into a pointer.
390 // Update the free list to point to the next span (which may be empty).
391 load32(Address(result
, 0), result
);
392 store32(result
, Address(temp
, js::gc::FreeSpan::offsetOfFirst()));
397 if (runtime()->geckoProfiler().enabled()) {
398 uint32_t* countAddress
= zone
->addressOfTenuredAllocCount();
399 movePtr(ImmPtr(countAddress
), temp
);
400 add32(Imm32(1), Address(temp
, 0));
404 void MacroAssembler::callFreeStub(Register slots
) {
405 // This register must match the one in JitRuntime::generateFreeStub.
406 const Register regSlots
= CallTempReg0
;
409 movePtr(slots
, regSlots
);
410 call(runtime()->jitRuntime()->freeStub());
414 // Inlined equivalent of gc::AllocateObject, without failure case handling.
415 void MacroAssembler::allocateObject(Register result
, Register temp
,
416 gc::AllocKind allocKind
,
417 uint32_t nDynamicSlots
,
418 gc::Heap initialHeap
, Label
* fail
,
419 const AllocSiteInput
& allocSite
) {
420 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind
));
422 checkAllocatorState(fail
);
424 if (shouldNurseryAllocate(allocKind
, initialHeap
)) {
425 MOZ_ASSERT(initialHeap
== gc::Heap::Default
);
426 return nurseryAllocateObject(result
, temp
, allocKind
, nDynamicSlots
, fail
,
430 // Fall back to calling into the VM to allocate objects in the tenured heap
431 // that have dynamic slots.
437 return freeListAllocate(result
, temp
, allocKind
, fail
);
440 void MacroAssembler::createGCObject(Register obj
, Register temp
,
441 const TemplateObject
& templateObj
,
442 gc::Heap initialHeap
, Label
* fail
,
443 bool initContents
/* = true */) {
444 gc::AllocKind allocKind
= templateObj
.getAllocKind();
445 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind
));
447 uint32_t nDynamicSlots
= 0;
448 if (templateObj
.isNativeObject()) {
449 const TemplateNativeObject
& ntemplate
=
450 templateObj
.asTemplateNativeObject();
451 nDynamicSlots
= ntemplate
.numDynamicSlots();
454 allocateObject(obj
, temp
, allocKind
, nDynamicSlots
, initialHeap
, fail
);
455 initGCThing(obj
, temp
, templateObj
, initContents
);
458 void MacroAssembler::createPlainGCObject(
459 Register result
, Register shape
, Register temp
, Register temp2
,
460 uint32_t numFixedSlots
, uint32_t numDynamicSlots
, gc::AllocKind allocKind
,
461 gc::Heap initialHeap
, Label
* fail
, const AllocSiteInput
& allocSite
,
462 bool initContents
/* = true */) {
463 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind
));
464 MOZ_ASSERT(shape
!= temp
, "shape can overlap with temp2, but not temp");
467 allocateObject(result
, temp
, allocKind
, numDynamicSlots
, initialHeap
, fail
,
470 // Initialize shape field.
471 storePtr(shape
, Address(result
, JSObject::offsetOfShape()));
473 // If the object has dynamic slots, allocateObject will initialize
474 // the slots field. If not, we must initialize it now.
475 if (numDynamicSlots
== 0) {
476 storePtr(ImmPtr(emptyObjectSlots
),
477 Address(result
, NativeObject::offsetOfSlots()));
480 // Initialize elements field.
481 storePtr(ImmPtr(emptyObjectElements
),
482 Address(result
, NativeObject::offsetOfElements()));
484 // Initialize fixed slots.
486 fillSlotsWithUndefined(Address(result
, NativeObject::getFixedSlotOffset(0)),
487 temp
, 0, numFixedSlots
);
490 // Initialize dynamic slots.
491 if (numDynamicSlots
> 0) {
492 loadPtr(Address(result
, NativeObject::offsetOfSlots()), temp2
);
493 fillSlotsWithUndefined(Address(temp2
, 0), temp
, 0, numDynamicSlots
);
497 void MacroAssembler::createArrayWithFixedElements(
498 Register result
, Register shape
, Register temp
, uint32_t arrayLength
,
499 uint32_t arrayCapacity
, gc::AllocKind allocKind
, gc::Heap initialHeap
,
500 Label
* fail
, const AllocSiteInput
& allocSite
) {
501 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind
));
502 MOZ_ASSERT(shape
!= temp
, "shape can overlap with temp2, but not temp");
503 MOZ_ASSERT(result
!= temp
);
505 // This only supports allocating arrays with fixed elements and does not
506 // support any dynamic slots or elements.
507 MOZ_ASSERT(arrayCapacity
>= arrayLength
);
508 MOZ_ASSERT(gc::GetGCKindSlots(allocKind
) >=
509 arrayCapacity
+ ObjectElements::VALUES_PER_HEADER
);
512 allocateObject(result
, temp
, allocKind
, 0, initialHeap
, fail
, allocSite
);
514 // Initialize shape field.
515 storePtr(shape
, Address(result
, JSObject::offsetOfShape()));
517 // There are no dynamic slots.
518 storePtr(ImmPtr(emptyObjectSlots
),
519 Address(result
, NativeObject::offsetOfSlots()));
521 // Initialize elements pointer for fixed (inline) elements.
522 computeEffectiveAddress(
523 Address(result
, NativeObject::offsetOfFixedElements()), temp
);
524 storePtr(temp
, Address(result
, NativeObject::offsetOfElements()));
526 // Initialize elements header.
527 store32(Imm32(ObjectElements::FIXED
),
528 Address(temp
, ObjectElements::offsetOfFlags()));
529 store32(Imm32(0), Address(temp
, ObjectElements::offsetOfInitializedLength()));
530 store32(Imm32(arrayCapacity
),
531 Address(temp
, ObjectElements::offsetOfCapacity()));
532 store32(Imm32(arrayLength
), Address(temp
, ObjectElements::offsetOfLength()));
535 // Inline version of Nursery::allocateString.
536 void MacroAssembler::nurseryAllocateString(Register result
, Register temp
,
537 gc::AllocKind allocKind
,
539 MOZ_ASSERT(IsNurseryAllocable(allocKind
));
541 // No explicit check for nursery.isEnabled() is needed, as the comparison
542 // with the nursery's end will always fail in such cases.
544 CompileZone
* zone
= realm()->zone();
545 size_t thingSize
= gc::Arena::thingSize(allocKind
);
546 bumpPointerAllocate(result
, temp
, fail
, zone
, JS::TraceKind::String
,
550 // Inline version of Nursery::allocateBigInt.
551 void MacroAssembler::nurseryAllocateBigInt(Register result
, Register temp
,
553 MOZ_ASSERT(IsNurseryAllocable(gc::AllocKind::BIGINT
));
555 // No explicit check for nursery.isEnabled() is needed, as the comparison
556 // with the nursery's end will always fail in such cases.
558 CompileZone
* zone
= realm()->zone();
559 size_t thingSize
= gc::Arena::thingSize(gc::AllocKind::BIGINT
);
561 bumpPointerAllocate(result
, temp
, fail
, zone
, JS::TraceKind::BigInt
,
565 static bool IsNurseryAllocEnabled(CompileZone
* zone
, JS::TraceKind kind
) {
567 case JS::TraceKind::Object
:
568 return zone
->allocNurseryObjects();
569 case JS::TraceKind::String
:
570 return zone
->allocNurseryStrings();
571 case JS::TraceKind::BigInt
:
572 return zone
->allocNurseryBigInts();
574 MOZ_CRASH("Bad nursery allocation kind");
578 void MacroAssembler::bumpPointerAllocate(Register result
, Register temp
,
579 Label
* fail
, CompileZone
* zone
,
580 JS::TraceKind traceKind
, uint32_t size
,
581 const AllocSiteInput
& allocSite
) {
582 MOZ_ASSERT(size
>= gc::MinCellSize
);
584 uint32_t totalSize
= size
+ Nursery::nurseryCellHeaderSize();
585 MOZ_ASSERT(totalSize
< INT32_MAX
, "Nursery allocation too large");
586 MOZ_ASSERT(totalSize
% gc::CellAlignBytes
== 0);
588 // We know statically whether nursery allocation is enable for a particular
589 // kind because we discard JIT code when this changes.
590 if (!IsNurseryAllocEnabled(zone
, traceKind
)) {
595 // Use a relative 32 bit offset to the Nursery position_ to currentEnd_ to
596 // avoid 64-bit immediate loads.
597 void* posAddr
= zone
->addressOfNurseryPosition();
598 int32_t endOffset
= Nursery::offsetOfCurrentEndFromPosition();
600 movePtr(ImmPtr(posAddr
), temp
);
601 loadPtr(Address(temp
, 0), result
);
602 addPtr(Imm32(totalSize
), result
);
603 branchPtr(Assembler::Below
, Address(temp
, endOffset
), result
, fail
);
604 storePtr(result
, Address(temp
, 0));
605 subPtr(Imm32(size
), result
);
607 if (allocSite
.is
<gc::CatchAllAllocSite
>()) {
608 // No allocation site supplied. This is the case when called from Warp, or
609 // from places that don't support pretenuring.
610 gc::CatchAllAllocSite siteKind
= allocSite
.as
<gc::CatchAllAllocSite
>();
611 gc::AllocSite
* site
= zone
->catchAllAllocSite(traceKind
, siteKind
);
612 uintptr_t headerWord
= gc::NurseryCellHeader::MakeValue(site
, traceKind
);
613 storePtr(ImmWord(headerWord
),
614 Address(result
, -js::Nursery::nurseryCellHeaderSize()));
616 // Update the catch all allocation site for strings or if the profiler is
617 // enabled. This is used to calculate the nursery allocation count. The
618 // string data is used to determine whether to disable nursery string
620 if (traceKind
== JS::TraceKind::String
||
621 runtime()->geckoProfiler().enabled()) {
622 uint32_t* countAddress
= site
->nurseryAllocCountAddress();
623 CheckedInt
<int32_t> counterOffset
=
624 (CheckedInt
<uintptr_t>(uintptr_t(countAddress
)) -
625 CheckedInt
<uintptr_t>(uintptr_t(posAddr
)))
626 .toChecked
<int32_t>();
627 if (counterOffset
.isValid()) {
628 add32(Imm32(1), Address(temp
, counterOffset
.value()));
630 movePtr(ImmPtr(countAddress
), temp
);
631 add32(Imm32(1), Address(temp
, 0));
635 // Update allocation site and store pointer in the nursery cell header. This
636 // is only used from baseline.
637 Register site
= allocSite
.as
<Register
>();
638 updateAllocSite(temp
, result
, zone
, site
);
639 // See NurseryCellHeader::MakeValue.
640 orPtr(Imm32(int32_t(traceKind
)), site
);
641 storePtr(site
, Address(result
, -js::Nursery::nurseryCellHeaderSize()));
645 // Update the allocation site in the same way as Nursery::allocateCell.
646 void MacroAssembler::updateAllocSite(Register temp
, Register result
,
647 CompileZone
* zone
, Register site
) {
650 add32(Imm32(1), Address(site
, gc::AllocSite::offsetOfNurseryAllocCount()));
652 branch32(Assembler::NotEqual
,
653 Address(site
, gc::AllocSite::offsetOfNurseryAllocCount()), Imm32(1),
656 loadPtr(AbsoluteAddress(zone
->addressOfNurseryAllocatedSites()), temp
);
657 storePtr(temp
, Address(site
, gc::AllocSite::offsetOfNextNurseryAllocated()));
658 storePtr(site
, AbsoluteAddress(zone
->addressOfNurseryAllocatedSites()));
663 // Inlined equivalent of gc::AllocateString, jumping to fail if nursery
664 // allocation requested but unsuccessful.
665 void MacroAssembler::allocateString(Register result
, Register temp
,
666 gc::AllocKind allocKind
,
667 gc::Heap initialHeap
, Label
* fail
) {
668 MOZ_ASSERT(allocKind
== gc::AllocKind::STRING
||
669 allocKind
== gc::AllocKind::FAT_INLINE_STRING
);
671 checkAllocatorState(fail
);
673 if (shouldNurseryAllocate(allocKind
, initialHeap
)) {
674 MOZ_ASSERT(initialHeap
== gc::Heap::Default
);
675 return nurseryAllocateString(result
, temp
, allocKind
, fail
);
678 freeListAllocate(result
, temp
, allocKind
, fail
);
681 void MacroAssembler::newGCString(Register result
, Register temp
,
682 gc::Heap initialHeap
, Label
* fail
) {
683 allocateString(result
, temp
, js::gc::AllocKind::STRING
, initialHeap
, fail
);
686 void MacroAssembler::newGCFatInlineString(Register result
, Register temp
,
687 gc::Heap initialHeap
, Label
* fail
) {
688 allocateString(result
, temp
, js::gc::AllocKind::FAT_INLINE_STRING
,
692 void MacroAssembler::newGCBigInt(Register result
, Register temp
,
693 gc::Heap initialHeap
, Label
* fail
) {
694 checkAllocatorState(fail
);
696 if (shouldNurseryAllocate(gc::AllocKind::BIGINT
, initialHeap
)) {
697 MOZ_ASSERT(initialHeap
== gc::Heap::Default
);
698 return nurseryAllocateBigInt(result
, temp
, fail
);
701 freeListAllocate(result
, temp
, gc::AllocKind::BIGINT
, fail
);
704 void MacroAssembler::copySlotsFromTemplate(
705 Register obj
, const TemplateNativeObject
& templateObj
, uint32_t start
,
707 uint32_t nfixed
= std::min(templateObj
.numFixedSlots(), end
);
708 for (unsigned i
= start
; i
< nfixed
; i
++) {
709 // Template objects are not exposed to script and therefore immutable.
710 // However, regexp template objects are sometimes used directly (when
711 // the cloning is not observable), and therefore we can end up with a
712 // non-zero lastIndex. Detect this case here and just substitute 0, to
713 // avoid racing with the main thread updating this slot.
715 if (templateObj
.isRegExpObject() && i
== RegExpObject::lastIndexSlot()) {
718 v
= templateObj
.getSlot(i
);
720 storeValue(v
, Address(obj
, NativeObject::getFixedSlotOffset(i
)));
724 void MacroAssembler::fillSlotsWithConstantValue(Address base
, Register temp
,
725 uint32_t start
, uint32_t end
,
727 MOZ_ASSERT(v
.isUndefined() || IsUninitializedLexical(v
));
734 // We only have a single spare register, so do the initialization as two
735 // strided writes of the tag and body.
737 move32(Imm32(v
.toNunboxPayload()), temp
);
738 for (unsigned i
= start
; i
< end
; ++i
, addr
.offset
+= sizeof(GCPtr
<Value
>)) {
739 store32(temp
, ToPayload(addr
));
743 move32(Imm32(v
.toNunboxTag()), temp
);
744 for (unsigned i
= start
; i
< end
; ++i
, addr
.offset
+= sizeof(GCPtr
<Value
>)) {
745 store32(temp
, ToType(addr
));
748 moveValue(v
, ValueOperand(temp
));
749 for (uint32_t i
= start
; i
< end
; ++i
, base
.offset
+= sizeof(GCPtr
<Value
>)) {
750 storePtr(temp
, base
);
755 void MacroAssembler::fillSlotsWithUndefined(Address base
, Register temp
,
756 uint32_t start
, uint32_t end
) {
757 fillSlotsWithConstantValue(base
, temp
, start
, end
, UndefinedValue());
760 void MacroAssembler::fillSlotsWithUninitialized(Address base
, Register temp
,
761 uint32_t start
, uint32_t end
) {
762 fillSlotsWithConstantValue(base
, temp
, start
, end
,
763 MagicValue(JS_UNINITIALIZED_LEXICAL
));
766 static std::pair
<uint32_t, uint32_t> FindStartOfUninitializedAndUndefinedSlots(
767 const TemplateNativeObject
& templateObj
, uint32_t nslots
) {
768 MOZ_ASSERT(nslots
== templateObj
.slotSpan());
769 MOZ_ASSERT(nslots
> 0);
771 uint32_t first
= nslots
;
772 for (; first
!= 0; --first
) {
773 if (templateObj
.getSlot(first
- 1) != UndefinedValue()) {
777 uint32_t startOfUndefined
= first
;
779 if (first
!= 0 && IsUninitializedLexical(templateObj
.getSlot(first
- 1))) {
780 for (; first
!= 0; --first
) {
781 if (!IsUninitializedLexical(templateObj
.getSlot(first
- 1))) {
786 uint32_t startOfUninitialized
= first
;
788 return {startOfUninitialized
, startOfUndefined
};
791 void MacroAssembler::initTypedArraySlots(Register obj
, Register temp
,
793 LiveRegisterSet liveRegs
, Label
* fail
,
794 TypedArrayObject
* templateObj
,
795 TypedArrayLength lengthKind
) {
796 MOZ_ASSERT(!templateObj
->hasBuffer());
798 constexpr size_t dataSlotOffset
= ArrayBufferViewObject::dataOffset();
799 constexpr size_t dataOffset
= dataSlotOffset
+ sizeof(HeapSlot
);
802 TypedArrayObject::FIXED_DATA_START
== TypedArrayObject::DATA_SLOT
+ 1,
803 "fixed inline element data assumed to begin after the data slot");
806 TypedArrayObject::INLINE_BUFFER_LIMIT
==
807 JSObject::MAX_BYTE_SIZE
- dataOffset
,
808 "typed array inline buffer is limited by the maximum object byte size");
810 // Initialise data elements to zero.
811 size_t length
= templateObj
->length();
812 MOZ_ASSERT(length
<= INT32_MAX
,
813 "Template objects are only created for int32 lengths");
814 size_t nbytes
= length
* templateObj
->bytesPerElement();
816 if (lengthKind
== TypedArrayLength::Fixed
&&
817 nbytes
<= TypedArrayObject::INLINE_BUFFER_LIMIT
) {
818 MOZ_ASSERT(dataOffset
+ nbytes
<= templateObj
->tenuredSizeOfThis());
820 // Store data elements inside the remaining JSObject slots.
821 computeEffectiveAddress(Address(obj
, dataOffset
), temp
);
822 storePrivateValue(temp
, Address(obj
, dataSlotOffset
));
824 // Write enough zero pointers into fixed data to zero every
825 // element. (This zeroes past the end of a byte count that's
826 // not a multiple of pointer size. That's okay, because fixed
827 // data is a count of 8-byte HeapSlots (i.e. <= pointer size),
828 // and we won't inline unless the desired memory fits in that
830 static_assert(sizeof(HeapSlot
) == 8, "Assumed 8 bytes alignment");
832 size_t numZeroPointers
= ((nbytes
+ 7) & ~0x7) / sizeof(char*);
833 for (size_t i
= 0; i
< numZeroPointers
; i
++) {
834 storePtr(ImmWord(0), Address(obj
, dataOffset
+ i
* sizeof(char*)));
836 MOZ_ASSERT(nbytes
> 0, "Zero-length TypedArrays need ZeroLengthArrayData");
838 if (lengthKind
== TypedArrayLength::Fixed
) {
839 move32(Imm32(length
), lengthReg
);
842 // Ensure volatile |obj| is saved across the call.
843 if (obj
.volatile_()) {
844 liveRegs
.addUnchecked(obj
);
847 // Allocate a buffer on the heap to store the data elements.
848 PushRegsInMask(liveRegs
);
849 using Fn
= void (*)(JSContext
* cx
, TypedArrayObject
* obj
, int32_t count
);
850 setupUnalignedABICall(temp
);
854 passABIArg(lengthReg
);
855 callWithABI
<Fn
, AllocateAndInitTypedArrayBuffer
>();
856 PopRegsInMask(liveRegs
);
858 // Fail when data slot is UndefinedValue.
859 branchTestUndefined(Assembler::Equal
, Address(obj
, dataSlotOffset
), fail
);
863 void MacroAssembler::initGCSlots(Register obj
, Register temp
,
864 const TemplateNativeObject
& templateObj
) {
865 MOZ_ASSERT(!templateObj
.isArrayObject());
867 // Slots of non-array objects are required to be initialized.
868 // Use the values currently in the template object.
869 uint32_t nslots
= templateObj
.slotSpan();
874 uint32_t nfixed
= templateObj
.numUsedFixedSlots();
875 uint32_t ndynamic
= templateObj
.numDynamicSlots();
877 // Attempt to group slot writes such that we minimize the amount of
878 // duplicated data we need to embed in code and load into registers. In
879 // general, most template object slots will be undefined except for any
880 // reserved slots. Since reserved slots come first, we split the object
881 // logically into independent non-UndefinedValue writes to the head and
882 // duplicated writes of UndefinedValue to the tail. For the majority of
883 // objects, the "tail" will be the entire slot range.
885 // The template object may be a CallObject, in which case we need to
886 // account for uninitialized lexical slots as well as undefined
887 // slots. Uninitialized lexical slots appears in CallObjects if the function
888 // has parameter expressions, in which case closed over parameters have
889 // TDZ. Uninitialized slots come before undefined slots in CallObjects.
890 auto [startOfUninitialized
, startOfUndefined
] =
891 FindStartOfUninitializedAndUndefinedSlots(templateObj
, nslots
);
892 MOZ_ASSERT(startOfUninitialized
<= nfixed
); // Reserved slots must be fixed.
893 MOZ_ASSERT(startOfUndefined
>= startOfUninitialized
);
894 MOZ_ASSERT_IF(!templateObj
.isCallObject() &&
895 !templateObj
.isBlockLexicalEnvironmentObject(),
896 startOfUninitialized
== startOfUndefined
);
898 // Copy over any preserved reserved slots.
899 copySlotsFromTemplate(obj
, templateObj
, 0, startOfUninitialized
);
901 // Fill the rest of the fixed slots with undefined and uninitialized.
902 size_t offset
= NativeObject::getFixedSlotOffset(startOfUninitialized
);
903 fillSlotsWithUninitialized(Address(obj
, offset
), temp
, startOfUninitialized
,
904 std::min(startOfUndefined
, nfixed
));
906 if (startOfUndefined
< nfixed
) {
907 offset
= NativeObject::getFixedSlotOffset(startOfUndefined
);
908 fillSlotsWithUndefined(Address(obj
, offset
), temp
, startOfUndefined
,
913 // We are short one register to do this elegantly. Borrow the obj
914 // register briefly for our slots base address.
916 loadPtr(Address(obj
, NativeObject::offsetOfSlots()), obj
);
918 // Fill uninitialized slots if necessary. Otherwise initialize all
919 // slots to undefined.
920 if (startOfUndefined
> nfixed
) {
921 MOZ_ASSERT(startOfUninitialized
!= startOfUndefined
);
922 fillSlotsWithUninitialized(Address(obj
, 0), temp
, 0,
923 startOfUndefined
- nfixed
);
924 size_t offset
= (startOfUndefined
- nfixed
) * sizeof(Value
);
925 fillSlotsWithUndefined(Address(obj
, offset
), temp
,
926 startOfUndefined
- nfixed
, ndynamic
);
928 fillSlotsWithUndefined(Address(obj
, 0), temp
, 0, ndynamic
);
935 void MacroAssembler::initGCThing(Register obj
, Register temp
,
936 const TemplateObject
& templateObj
,
938 // Fast initialization of an empty object returned by allocateObject().
940 storePtr(ImmGCPtr(templateObj
.shape()),
941 Address(obj
, JSObject::offsetOfShape()));
943 if (templateObj
.isNativeObject()) {
944 const TemplateNativeObject
& ntemplate
=
945 templateObj
.asTemplateNativeObject();
946 MOZ_ASSERT(!ntemplate
.hasDynamicElements());
948 // If the object has dynamic slots, the slots member has already been
950 if (ntemplate
.numDynamicSlots() == 0) {
951 storePtr(ImmPtr(emptyObjectSlots
),
952 Address(obj
, NativeObject::offsetOfSlots()));
955 if (ntemplate
.isArrayObject()) {
956 // Can't skip initializing reserved slots.
957 MOZ_ASSERT(initContents
);
959 int elementsOffset
= NativeObject::offsetOfFixedElements();
961 computeEffectiveAddress(Address(obj
, elementsOffset
), temp
);
962 storePtr(temp
, Address(obj
, NativeObject::offsetOfElements()));
964 // Fill in the elements header.
966 Imm32(ntemplate
.getDenseCapacity()),
967 Address(obj
, elementsOffset
+ ObjectElements::offsetOfCapacity()));
968 store32(Imm32(ntemplate
.getDenseInitializedLength()),
969 Address(obj
, elementsOffset
+
970 ObjectElements::offsetOfInitializedLength()));
971 store32(Imm32(ntemplate
.getArrayLength()),
972 Address(obj
, elementsOffset
+ ObjectElements::offsetOfLength()));
973 store32(Imm32(ObjectElements::FIXED
),
974 Address(obj
, elementsOffset
+ ObjectElements::offsetOfFlags()));
975 } else if (ntemplate
.isArgumentsObject()) {
976 // The caller will initialize the reserved slots.
977 MOZ_ASSERT(!initContents
);
978 storePtr(ImmPtr(emptyObjectElements
),
979 Address(obj
, NativeObject::offsetOfElements()));
981 // If the target type could be a TypedArray that maps shared memory
982 // then this would need to store emptyObjectElementsShared in that case.
983 MOZ_ASSERT(!ntemplate
.isSharedMemory());
985 // Can't skip initializing reserved slots.
986 MOZ_ASSERT(initContents
);
988 storePtr(ImmPtr(emptyObjectElements
),
989 Address(obj
, NativeObject::offsetOfElements()));
991 initGCSlots(obj
, temp
, ntemplate
);
994 MOZ_CRASH("Unknown object");
998 AllocatableRegisterSet
regs(RegisterSet::Volatile());
999 LiveRegisterSet
save(regs
.asLiveSet());
1000 PushRegsInMask(save
);
1002 regs
.takeUnchecked(obj
);
1003 Register temp2
= regs
.takeAnyGeneral();
1005 using Fn
= void (*)(JSObject
* obj
);
1006 setupUnalignedABICall(temp2
);
1008 callWithABI
<Fn
, TraceCreateObject
>();
1010 PopRegsInMask(save
);
1014 void MacroAssembler::compareStrings(JSOp op
, Register left
, Register right
,
1015 Register result
, Label
* fail
) {
1016 MOZ_ASSERT(left
!= result
);
1017 MOZ_ASSERT(right
!= result
);
1018 MOZ_ASSERT(IsEqualityOp(op
) || IsRelationalOp(op
));
1020 Label notPointerEqual
;
1021 // If operands point to the same instance, the strings are trivially equal.
1022 branchPtr(Assembler::NotEqual
, left
, right
,
1023 IsEqualityOp(op
) ? ¬PointerEqual
: fail
);
1024 move32(Imm32(op
== JSOp::Eq
|| op
== JSOp::StrictEq
|| op
== JSOp::Le
||
1028 if (IsEqualityOp(op
)) {
1032 bind(¬PointerEqual
);
1034 Label leftIsNotAtom
;
1035 Label setNotEqualResult
;
1036 // Atoms cannot be equal to each other if they point to different strings.
1037 Imm32
atomBit(JSString::ATOM_BIT
);
1038 branchTest32(Assembler::Zero
, Address(left
, JSString::offsetOfFlags()),
1039 atomBit
, &leftIsNotAtom
);
1040 branchTest32(Assembler::NonZero
, Address(right
, JSString::offsetOfFlags()),
1041 atomBit
, &setNotEqualResult
);
1043 bind(&leftIsNotAtom
);
1044 // Strings of different length can never be equal.
1045 loadStringLength(left
, result
);
1046 branch32(Assembler::Equal
, Address(right
, JSString::offsetOfLength()),
1049 bind(&setNotEqualResult
);
1050 move32(Imm32(op
== JSOp::Ne
|| op
== JSOp::StrictNe
), result
);
1056 void MacroAssembler::loadStringChars(Register str
, Register dest
,
1057 CharEncoding encoding
) {
1058 MOZ_ASSERT(str
!= dest
);
1060 if (JitOptions
.spectreStringMitigations
) {
1061 if (encoding
== CharEncoding::Latin1
) {
1062 // If the string is a rope, zero the |str| register. The code below
1063 // depends on str->flags so this should block speculative execution.
1064 movePtr(ImmWord(0), dest
);
1065 test32MovePtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1066 Imm32(JSString::LINEAR_BIT
), dest
, str
);
1068 // If we're loading TwoByte chars, there's an additional risk:
1069 // if the string has Latin1 chars, we could read out-of-bounds. To
1070 // prevent this, we check both the Linear and Latin1 bits. We don't
1071 // have a scratch register, so we use these flags also to block
1072 // speculative execution, similar to the use of 0 above.
1073 MOZ_ASSERT(encoding
== CharEncoding::TwoByte
);
1074 static constexpr uint32_t Mask
=
1075 JSString::LINEAR_BIT
| JSString::LATIN1_CHARS_BIT
;
1076 static_assert(Mask
< 1024,
1077 "Mask should be a small, near-null value to ensure we "
1078 "block speculative execution when it's used as string "
1080 move32(Imm32(Mask
), dest
);
1081 and32(Address(str
, JSString::offsetOfFlags()), dest
);
1082 cmp32MovePtr(Assembler::NotEqual
, dest
, Imm32(JSString::LINEAR_BIT
), dest
,
1087 // Load the inline chars.
1088 computeEffectiveAddress(Address(str
, JSInlineString::offsetOfInlineStorage()),
1091 // If it's not an inline string, load the non-inline chars. Use a
1092 // conditional move to prevent speculative execution.
1093 test32LoadPtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1094 Imm32(JSString::INLINE_CHARS_BIT
),
1095 Address(str
, JSString::offsetOfNonInlineChars()), dest
);
1098 void MacroAssembler::loadNonInlineStringChars(Register str
, Register dest
,
1099 CharEncoding encoding
) {
1100 MOZ_ASSERT(str
!= dest
);
1102 if (JitOptions
.spectreStringMitigations
) {
1103 // If the string is a rope, has inline chars, or has a different
1104 // character encoding, set str to a near-null value to prevent
1105 // speculative execution below (when reading str->nonInlineChars).
1107 static constexpr uint32_t Mask
= JSString::LINEAR_BIT
|
1108 JSString::INLINE_CHARS_BIT
|
1109 JSString::LATIN1_CHARS_BIT
;
1110 static_assert(Mask
< 1024,
1111 "Mask should be a small, near-null value to ensure we "
1112 "block speculative execution when it's used as string "
1115 uint32_t expectedBits
= JSString::LINEAR_BIT
;
1116 if (encoding
== CharEncoding::Latin1
) {
1117 expectedBits
|= JSString::LATIN1_CHARS_BIT
;
1120 move32(Imm32(Mask
), dest
);
1121 and32(Address(str
, JSString::offsetOfFlags()), dest
);
1123 cmp32MovePtr(Assembler::NotEqual
, dest
, Imm32(expectedBits
), dest
, str
);
1126 loadPtr(Address(str
, JSString::offsetOfNonInlineChars()), dest
);
1129 void MacroAssembler::storeNonInlineStringChars(Register chars
, Register str
) {
1130 MOZ_ASSERT(chars
!= str
);
1131 storePtr(chars
, Address(str
, JSString::offsetOfNonInlineChars()));
1134 void MacroAssembler::loadInlineStringCharsForStore(Register str
,
1136 computeEffectiveAddress(Address(str
, JSInlineString::offsetOfInlineStorage()),
1140 void MacroAssembler::loadInlineStringChars(Register str
, Register dest
,
1141 CharEncoding encoding
) {
1142 MOZ_ASSERT(str
!= dest
);
1144 if (JitOptions
.spectreStringMitigations
) {
1145 // Making this Spectre-safe is a bit complicated: using
1146 // computeEffectiveAddress and then zeroing the output register if
1147 // non-inline is not sufficient: when the index is very large, it would
1148 // allow reading |nullptr + index|. Just fall back to loadStringChars
1150 loadStringChars(str
, dest
, encoding
);
1152 computeEffectiveAddress(
1153 Address(str
, JSInlineString::offsetOfInlineStorage()), dest
);
1157 void MacroAssembler::loadRopeLeftChild(Register str
, Register dest
) {
1158 MOZ_ASSERT(str
!= dest
);
1160 if (JitOptions
.spectreStringMitigations
) {
1161 // Zero the output register if the input was not a rope.
1162 movePtr(ImmWord(0), dest
);
1163 test32LoadPtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1164 Imm32(JSString::LINEAR_BIT
),
1165 Address(str
, JSRope::offsetOfLeft()), dest
);
1167 loadPtr(Address(str
, JSRope::offsetOfLeft()), dest
);
1171 void MacroAssembler::loadRopeRightChild(Register str
, Register dest
) {
1172 MOZ_ASSERT(str
!= dest
);
1174 if (JitOptions
.spectreStringMitigations
) {
1175 // Zero the output register if the input was not a rope.
1176 movePtr(ImmWord(0), dest
);
1177 test32LoadPtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1178 Imm32(JSString::LINEAR_BIT
),
1179 Address(str
, JSRope::offsetOfRight()), dest
);
1181 loadPtr(Address(str
, JSRope::offsetOfRight()), dest
);
1185 void MacroAssembler::storeRopeChildren(Register left
, Register right
,
1187 storePtr(left
, Address(str
, JSRope::offsetOfLeft()));
1188 storePtr(right
, Address(str
, JSRope::offsetOfRight()));
1191 void MacroAssembler::loadDependentStringBase(Register str
, Register dest
) {
1192 MOZ_ASSERT(str
!= dest
);
1194 if (JitOptions
.spectreStringMitigations
) {
1195 // If the string is not a dependent string, zero the |str| register.
1196 // The code below loads str->base so this should block speculative
1198 movePtr(ImmWord(0), dest
);
1199 test32MovePtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1200 Imm32(JSString::DEPENDENT_BIT
), dest
, str
);
1203 loadPtr(Address(str
, JSDependentString::offsetOfBase()), dest
);
1206 void MacroAssembler::storeDependentStringBase(Register base
, Register str
) {
1207 storePtr(base
, Address(str
, JSDependentString::offsetOfBase()));
1210 void MacroAssembler::loadRopeChild(Register str
, Register index
,
1211 Register output
, Label
* isLinear
) {
1212 // This follows JSString::getChar.
1213 branchIfNotRope(str
, isLinear
);
1215 loadRopeLeftChild(str
, output
);
1217 // Check if the index is contained in the leftChild.
1219 branch32(Assembler::Above
, Address(output
, JSString::offsetOfLength()), index
,
1222 // The index must be in the rightChild.
1223 loadRopeRightChild(str
, output
);
1228 void MacroAssembler::branchIfCanLoadStringChar(Register str
, Register index
,
1229 Register scratch
, Label
* label
) {
1230 loadRopeChild(str
, index
, scratch
, label
);
1232 // Branch if the left resp. right side is linear.
1233 branchIfNotRope(scratch
, label
);
1236 void MacroAssembler::branchIfNotCanLoadStringChar(Register str
, Register index
,
1240 loadRopeChild(str
, index
, scratch
, &done
);
1242 // Branch if the left or right side is another rope.
1243 branchIfRope(scratch
, label
);
1248 void MacroAssembler::loadStringChar(Register str
, Register index
,
1249 Register output
, Register scratch1
,
1250 Register scratch2
, Label
* fail
) {
1251 MOZ_ASSERT(str
!= output
);
1252 MOZ_ASSERT(str
!= index
);
1253 MOZ_ASSERT(index
!= output
);
1254 MOZ_ASSERT(output
!= scratch1
);
1255 MOZ_ASSERT(output
!= scratch2
);
1257 // Use scratch1 for the index (adjusted below).
1258 move32(index
, scratch1
);
1259 movePtr(str
, output
);
1261 // This follows JSString::getChar.
1263 branchIfNotRope(str
, ¬Rope
);
1265 loadRopeLeftChild(str
, output
);
1267 // Check if the index is contained in the leftChild.
1268 Label loadedChild
, notInLeft
;
1269 spectreBoundsCheck32(scratch1
, Address(output
, JSString::offsetOfLength()),
1270 scratch2
, ¬InLeft
);
1273 // The index must be in the rightChild.
1274 // index -= rope->leftChild()->length()
1276 sub32(Address(output
, JSString::offsetOfLength()), scratch1
);
1277 loadRopeRightChild(str
, output
);
1279 // If the left or right side is another rope, give up.
1281 branchIfRope(output
, fail
);
1285 Label isLatin1
, done
;
1286 // We have to check the left/right side for ropes,
1287 // because a TwoByte rope might have a Latin1 child.
1288 branchLatin1String(output
, &isLatin1
);
1289 loadStringChars(output
, scratch2
, CharEncoding::TwoByte
);
1290 loadChar(scratch2
, scratch1
, output
, CharEncoding::TwoByte
);
1294 loadStringChars(output
, scratch2
, CharEncoding::Latin1
);
1295 loadChar(scratch2
, scratch1
, output
, CharEncoding::Latin1
);
1300 void MacroAssembler::loadStringIndexValue(Register str
, Register dest
,
1302 MOZ_ASSERT(str
!= dest
);
1304 load32(Address(str
, JSString::offsetOfFlags()), dest
);
1306 // Does not have a cached index value.
1307 branchTest32(Assembler::Zero
, dest
, Imm32(JSString::INDEX_VALUE_BIT
), fail
);
1309 // Extract the index.
1310 rshift32(Imm32(JSString::INDEX_VALUE_SHIFT
), dest
);
1313 void MacroAssembler::loadChar(Register chars
, Register index
, Register dest
,
1314 CharEncoding encoding
, int32_t offset
/* = 0 */) {
1315 if (encoding
== CharEncoding::Latin1
) {
1316 loadChar(BaseIndex(chars
, index
, TimesOne
, offset
), dest
, encoding
);
1318 loadChar(BaseIndex(chars
, index
, TimesTwo
, offset
), dest
, encoding
);
1322 void MacroAssembler::addToCharPtr(Register chars
, Register index
,
1323 CharEncoding encoding
) {
1324 if (encoding
== CharEncoding::Latin1
) {
1325 static_assert(sizeof(char) == 1,
1326 "Latin-1 string index shouldn't need scaling");
1327 addPtr(index
, chars
);
1329 computeEffectiveAddress(BaseIndex(chars
, index
, TimesTwo
), chars
);
1333 void MacroAssembler::loadStringFromUnit(Register unit
, Register dest
,
1334 const StaticStrings
& staticStrings
) {
1335 movePtr(ImmPtr(&staticStrings
.unitStaticTable
), dest
);
1336 loadPtr(BaseIndex(dest
, unit
, ScalePointer
), dest
);
1339 void MacroAssembler::loadLengthTwoString(Register c1
, Register c2
,
1341 const StaticStrings
& staticStrings
) {
1342 // Compute (toSmallCharTable[c1] << SMALL_CHAR_BITS) + toSmallCharTable[c2]
1343 // to obtain the index into `StaticStrings::length2StaticTable`.
1344 static_assert(sizeof(StaticStrings::SmallChar
) == 1);
1346 movePtr(ImmPtr(&StaticStrings::toSmallCharTable
.storage
), dest
);
1347 load8ZeroExtend(BaseIndex(dest
, c1
, Scale::TimesOne
), c1
);
1348 load8ZeroExtend(BaseIndex(dest
, c2
, Scale::TimesOne
), c2
);
1350 lshift32(Imm32(StaticStrings::SMALL_CHAR_BITS
), c1
);
1353 // Look up the string from the computed index.
1354 movePtr(ImmPtr(&staticStrings
.length2StaticTable
), dest
);
1355 loadPtr(BaseIndex(dest
, c1
, ScalePointer
), dest
);
1358 void MacroAssembler::loadInt32ToStringWithBase(
1359 Register input
, Register base
, Register dest
, Register scratch1
,
1360 Register scratch2
, const StaticStrings
& staticStrings
,
1361 const LiveRegisterSet
& volatileRegs
, Label
* fail
) {
1363 Label baseBad
, baseOk
;
1364 branch32(Assembler::LessThan
, base
, Imm32(2), &baseBad
);
1365 branch32(Assembler::LessThanOrEqual
, base
, Imm32(36), &baseOk
);
1367 assumeUnreachable("base must be in range [2, 36]");
1371 // Compute |"0123456789abcdefghijklmnopqrstuvwxyz"[r]|.
1372 auto toChar
= [this, base
](Register r
) {
1375 branch32(Assembler::Below
, r
, base
, &ok
);
1376 assumeUnreachable("bad digit");
1379 // Silence unused lambda capture warning.
1384 add32(Imm32('0'), r
);
1385 branch32(Assembler::BelowOrEqual
, r
, Imm32('9'), &done
);
1386 add32(Imm32('a' - '0' - 10), r
);
1390 // Perform a "unit" lookup when |unsigned(input) < unsigned(base)|.
1391 Label lengthTwo
, done
;
1392 branch32(Assembler::AboveOrEqual
, input
, base
, &lengthTwo
);
1394 move32(input
, scratch1
);
1397 loadStringFromUnit(scratch1
, dest
, staticStrings
);
1403 // Compute |base * base|.
1404 move32(base
, scratch1
);
1405 mul32(scratch1
, scratch1
);
1407 // Perform a "length2" lookup when |unsigned(input) < unsigned(base * base)|.
1408 branch32(Assembler::AboveOrEqual
, input
, scratch1
, fail
);
1410 // Compute |scratch1 = input / base| and |scratch2 = input % base|.
1411 move32(input
, scratch1
);
1412 flexibleDivMod32(base
, scratch1
, scratch2
, true, volatileRegs
);
1414 // Compute the digits of the divisor and remainder.
1418 // Look up the 2-character digit string in the small-char table.
1419 loadLengthTwoString(scratch1
, scratch2
, dest
, staticStrings
);
1424 void MacroAssembler::loadInt32ToStringWithBase(
1425 Register input
, int32_t base
, Register dest
, Register scratch1
,
1426 Register scratch2
, const StaticStrings
& staticStrings
, Label
* fail
) {
1427 MOZ_ASSERT(2 <= base
&& base
<= 36, "base must be in range [2, 36]");
1429 // Compute |"0123456789abcdefghijklmnopqrstuvwxyz"[r]|.
1430 auto toChar
= [this, base
](Register r
) {
1433 branch32(Assembler::Below
, r
, Imm32(base
), &ok
);
1434 assumeUnreachable("bad digit");
1439 add32(Imm32('0'), r
);
1442 add32(Imm32('0'), r
);
1443 branch32(Assembler::BelowOrEqual
, r
, Imm32('9'), &done
);
1444 add32(Imm32('a' - '0' - 10), r
);
1449 // Perform a "unit" lookup when |unsigned(input) < unsigned(base)|.
1450 Label lengthTwo
, done
;
1451 branch32(Assembler::AboveOrEqual
, input
, Imm32(base
), &lengthTwo
);
1453 move32(input
, scratch1
);
1456 loadStringFromUnit(scratch1
, dest
, staticStrings
);
1462 // Perform a "length2" lookup when |unsigned(input) < unsigned(base * base)|.
1463 branch32(Assembler::AboveOrEqual
, input
, Imm32(base
* base
), fail
);
1465 // Compute |scratch1 = input / base| and |scratch2 = input % base|.
1466 if (mozilla::IsPowerOfTwo(uint32_t(base
))) {
1467 uint32_t shift
= mozilla::FloorLog2(base
);
1469 move32(input
, scratch1
);
1470 rshift32(Imm32(shift
), scratch1
);
1472 move32(input
, scratch2
);
1473 and32(Imm32((uint32_t(1) << shift
) - 1), scratch2
);
1475 // The following code matches CodeGenerator::visitUDivOrModConstant()
1476 // for x86-shared. Also see Hacker's Delight 2nd edition, chapter 10-8
1477 // "Unsigned Division by 7" for the case when |rmc.multiplier| exceeds
1478 // UINT32_MAX and we need to adjust the shift amount.
1480 auto rmc
= ReciprocalMulConstants::computeUnsignedDivisionConstants(base
);
1482 // We first compute |q = (M * n) >> 32), where M = rmc.multiplier.
1483 mulHighUnsigned32(Imm32(rmc
.multiplier
), input
, scratch1
);
1485 if (rmc
.multiplier
> UINT32_MAX
) {
1486 // M >= 2^32 and shift == 0 is impossible, as d >= 2 implies that
1487 // ((M * n) >> (32 + shift)) >= n > floor(n/d) whenever n >= d,
1488 // contradicting the proof of correctness in computeDivisionConstants.
1489 MOZ_ASSERT(rmc
.shiftAmount
> 0);
1490 MOZ_ASSERT(rmc
.multiplier
< (int64_t(1) << 33));
1492 // Compute |t = (n - q) / 2|.
1493 move32(input
, scratch2
);
1494 sub32(scratch1
, scratch2
);
1495 rshift32(Imm32(1), scratch2
);
1497 // Compute |t = (n - q) / 2 + q = (n + q) / 2|.
1498 add32(scratch2
, scratch1
);
1500 // Finish the computation |q = floor(n / d)|.
1501 rshift32(Imm32(rmc
.shiftAmount
- 1), scratch1
);
1503 rshift32(Imm32(rmc
.shiftAmount
), scratch1
);
1506 // Compute the remainder from |r = n - q * d|.
1507 move32(scratch1
, dest
);
1508 mul32(Imm32(base
), dest
);
1509 move32(input
, scratch2
);
1510 sub32(dest
, scratch2
);
1513 // Compute the digits of the divisor and remainder.
1517 // Look up the 2-character digit string in the small-char table.
1518 loadLengthTwoString(scratch1
, scratch2
, dest
, staticStrings
);
1523 void MacroAssembler::loadBigIntDigits(Register bigInt
, Register digits
) {
1524 MOZ_ASSERT(digits
!= bigInt
);
1526 // Load the inline digits.
1527 computeEffectiveAddress(Address(bigInt
, BigInt::offsetOfInlineDigits()),
1530 // If inline digits aren't used, load the heap digits. Use a conditional move
1531 // to prevent speculative execution.
1532 cmp32LoadPtr(Assembler::Above
, Address(bigInt
, BigInt::offsetOfLength()),
1533 Imm32(int32_t(BigInt::inlineDigitsLength())),
1534 Address(bigInt
, BigInt::offsetOfHeapDigits()), digits
);
1537 void MacroAssembler::loadBigInt64(Register bigInt
, Register64 dest
) {
1538 // This code follows the implementation of |BigInt::toUint64()|. We're also
1539 // using it for inline callers of |BigInt::toInt64()|, which works, because
1540 // all supported Jit architectures use a two's complement representation for
1541 // int64 values, which means the WrapToSigned call in toInt64() is a no-op.
1543 Label done
, nonZero
;
1545 branchIfBigIntIsNonZero(bigInt
, &nonZero
);
1547 move64(Imm64(0), dest
);
1553 Register digits
= dest
.reg
;
1555 Register digits
= dest
.high
;
1558 loadBigIntDigits(bigInt
, digits
);
1561 // Load the first digit into the destination register.
1562 load64(Address(digits
, 0), dest
);
1564 // Load the first digit into the destination register's low value.
1565 load32(Address(digits
, 0), dest
.low
);
1567 // And conditionally load the second digit into the high value register.
1568 Label twoDigits
, digitsDone
;
1569 branch32(Assembler::Above
, Address(bigInt
, BigInt::offsetOfLength()),
1570 Imm32(1), &twoDigits
);
1572 move32(Imm32(0), dest
.high
);
1577 load32(Address(digits
, sizeof(BigInt::Digit
)), dest
.high
);
1582 branchTest32(Assembler::Zero
, Address(bigInt
, BigInt::offsetOfFlags()),
1583 Imm32(BigInt::signBitMask()), &done
);
1589 void MacroAssembler::loadFirstBigIntDigitOrZero(Register bigInt
,
1591 Label done
, nonZero
;
1592 branchIfBigIntIsNonZero(bigInt
, &nonZero
);
1594 movePtr(ImmWord(0), dest
);
1599 loadBigIntDigits(bigInt
, dest
);
1601 // Load the first digit into the destination register.
1602 loadPtr(Address(dest
, 0), dest
);
1607 void MacroAssembler::loadBigInt(Register bigInt
, Register dest
, Label
* fail
) {
1608 Label done
, nonZero
;
1609 branchIfBigIntIsNonZero(bigInt
, &nonZero
);
1611 movePtr(ImmWord(0), dest
);
1616 loadBigIntNonZero(bigInt
, dest
, fail
);
1621 void MacroAssembler::loadBigIntNonZero(Register bigInt
, Register dest
,
1623 MOZ_ASSERT(bigInt
!= dest
);
1627 branchIfBigIntIsNonZero(bigInt
, &nonZero
);
1628 assumeUnreachable("Unexpected zero BigInt");
1632 branch32(Assembler::Above
, Address(bigInt
, BigInt::offsetOfLength()),
1635 static_assert(BigInt::inlineDigitsLength() > 0,
1636 "Single digit BigInts use inline storage");
1638 // Load the first inline digit into the destination register.
1639 loadPtr(Address(bigInt
, BigInt::offsetOfInlineDigits()), dest
);
1641 // Return as a signed pointer.
1642 bigIntDigitToSignedPtr(bigInt
, dest
, fail
);
1645 void MacroAssembler::bigIntDigitToSignedPtr(Register bigInt
, Register digit
,
1647 // BigInt digits are stored as absolute numbers. Take the failure path when
1648 // the digit can't be stored in intptr_t.
1649 branchTestPtr(Assembler::Signed
, digit
, digit
, fail
);
1651 // Negate |dest| when the BigInt is negative.
1653 branchIfBigIntIsNonNegative(bigInt
, &nonNegative
);
1658 void MacroAssembler::loadBigIntAbsolute(Register bigInt
, Register dest
,
1660 MOZ_ASSERT(bigInt
!= dest
);
1662 branch32(Assembler::Above
, Address(bigInt
, BigInt::offsetOfLength()),
1665 static_assert(BigInt::inlineDigitsLength() > 0,
1666 "Single digit BigInts use inline storage");
1668 // Load the first inline digit into the destination register.
1669 movePtr(ImmWord(0), dest
);
1670 cmp32LoadPtr(Assembler::NotEqual
, Address(bigInt
, BigInt::offsetOfLength()),
1671 Imm32(0), Address(bigInt
, BigInt::offsetOfInlineDigits()), dest
);
1674 void MacroAssembler::initializeBigInt64(Scalar::Type type
, Register bigInt
,
1676 MOZ_ASSERT(Scalar::isBigIntType(type
));
1678 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfFlags()));
1680 Label done
, nonZero
;
1681 branch64(Assembler::NotEqual
, val
, Imm64(0), &nonZero
);
1683 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfLength()));
1688 if (type
== Scalar::BigInt64
) {
1689 // Set the sign-bit for negative values and then continue with the two's
1692 branch64(Assembler::GreaterThan
, val
, Imm64(0), &isPositive
);
1694 store32(Imm32(BigInt::signBitMask()),
1695 Address(bigInt
, BigInt::offsetOfFlags()));
1701 store32(Imm32(1), Address(bigInt
, BigInt::offsetOfLength()));
1703 static_assert(sizeof(BigInt::Digit
) == sizeof(uintptr_t),
1704 "BigInt Digit size matches uintptr_t, so there's a single "
1705 "store on 64-bit and up to two stores on 32-bit");
1709 branchTest32(Assembler::Zero
, val
.high
, val
.high
, &singleDigit
);
1710 store32(Imm32(2), Address(bigInt
, BigInt::offsetOfLength()));
1713 // We can perform a single store64 on 32-bit platforms, because inline
1714 // storage can store at least two 32-bit integers.
1715 static_assert(BigInt::inlineDigitsLength() >= 2,
1716 "BigInt inline storage can store at least two digits");
1719 store64(val
, Address(bigInt
, js::BigInt::offsetOfInlineDigits()));
1724 void MacroAssembler::initializeBigInt(Register bigInt
, Register val
) {
1725 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfFlags()));
1727 Label done
, nonZero
;
1728 branchTestPtr(Assembler::NonZero
, val
, val
, &nonZero
);
1730 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfLength()));
1735 // Set the sign-bit for negative values and then continue with the two's
1738 branchTestPtr(Assembler::NotSigned
, val
, val
, &isPositive
);
1740 store32(Imm32(BigInt::signBitMask()),
1741 Address(bigInt
, BigInt::offsetOfFlags()));
1746 store32(Imm32(1), Address(bigInt
, BigInt::offsetOfLength()));
1748 static_assert(sizeof(BigInt::Digit
) == sizeof(uintptr_t),
1749 "BigInt Digit size matches uintptr_t");
1751 storePtr(val
, Address(bigInt
, js::BigInt::offsetOfInlineDigits()));
1756 void MacroAssembler::initializeBigIntAbsolute(Register bigInt
, Register val
) {
1757 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfFlags()));
1759 Label done
, nonZero
;
1760 branchTestPtr(Assembler::NonZero
, val
, val
, &nonZero
);
1762 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfLength()));
1767 store32(Imm32(1), Address(bigInt
, BigInt::offsetOfLength()));
1769 static_assert(sizeof(BigInt::Digit
) == sizeof(uintptr_t),
1770 "BigInt Digit size matches uintptr_t");
1772 storePtr(val
, Address(bigInt
, js::BigInt::offsetOfInlineDigits()));
1777 void MacroAssembler::copyBigIntWithInlineDigits(Register src
, Register dest
,
1779 gc::Heap initialHeap
,
1781 branch32(Assembler::Above
, Address(src
, BigInt::offsetOfLength()),
1782 Imm32(int32_t(BigInt::inlineDigitsLength())), fail
);
1784 newGCBigInt(dest
, temp
, initialHeap
, fail
);
1786 // Copy the sign-bit, but not any of the other bits used by the GC.
1787 load32(Address(src
, BigInt::offsetOfFlags()), temp
);
1788 and32(Imm32(BigInt::signBitMask()), temp
);
1789 store32(temp
, Address(dest
, BigInt::offsetOfFlags()));
1792 load32(Address(src
, BigInt::offsetOfLength()), temp
);
1793 store32(temp
, Address(dest
, BigInt::offsetOfLength()));
1796 Address
srcDigits(src
, js::BigInt::offsetOfInlineDigits());
1797 Address
destDigits(dest
, js::BigInt::offsetOfInlineDigits());
1799 for (size_t i
= 0; i
< BigInt::inlineDigitsLength(); i
++) {
1800 static_assert(sizeof(BigInt::Digit
) == sizeof(uintptr_t),
1801 "BigInt Digit size matches uintptr_t");
1803 loadPtr(srcDigits
, temp
);
1804 storePtr(temp
, destDigits
);
1806 srcDigits
= Address(src
, srcDigits
.offset
+ sizeof(BigInt::Digit
));
1807 destDigits
= Address(dest
, destDigits
.offset
+ sizeof(BigInt::Digit
));
1811 void MacroAssembler::compareBigIntAndInt32(JSOp op
, Register bigInt
,
1812 Register int32
, Register scratch1
,
1813 Register scratch2
, Label
* ifTrue
,
1815 MOZ_ASSERT(IsLooseEqualityOp(op
) || IsRelationalOp(op
));
1817 static_assert(std::is_same_v
<BigInt::Digit
, uintptr_t>,
1818 "BigInt digit can be loaded in a pointer-sized register");
1819 static_assert(sizeof(BigInt::Digit
) >= sizeof(uint32_t),
1820 "BigInt digit stores at least an uint32");
1822 // Test for too large numbers.
1824 // If the absolute value of the BigInt can't be expressed in an uint32/uint64,
1825 // the result of the comparison is a constant.
1826 if (op
== JSOp::Eq
|| op
== JSOp::Ne
) {
1827 Label
* tooLarge
= op
== JSOp::Eq
? ifFalse
: ifTrue
;
1828 branch32(Assembler::GreaterThan
,
1829 Address(bigInt
, BigInt::offsetOfDigitLength()), Imm32(1),
1833 branch32(Assembler::LessThanOrEqual
,
1834 Address(bigInt
, BigInt::offsetOfDigitLength()), Imm32(1),
1837 // Still need to take the sign-bit into account for relational operations.
1838 if (op
== JSOp::Lt
|| op
== JSOp::Le
) {
1839 branchIfBigIntIsNegative(bigInt
, ifTrue
);
1842 branchIfBigIntIsNegative(bigInt
, ifFalse
);
1849 // Test for mismatched signs and, if the signs are equal, load |abs(x)| in
1850 // |scratch1| and |abs(y)| in |scratch2| and then compare the absolute numbers
1851 // against each other.
1853 // Jump to |ifTrue| resp. |ifFalse| if the BigInt is strictly less than
1854 // resp. strictly greater than the int32 value, depending on the comparison
1858 if (op
== JSOp::Eq
) {
1859 greaterThan
= ifFalse
;
1861 } else if (op
== JSOp::Ne
) {
1862 greaterThan
= ifTrue
;
1864 } else if (op
== JSOp::Lt
|| op
== JSOp::Le
) {
1865 greaterThan
= ifFalse
;
1868 MOZ_ASSERT(op
== JSOp::Gt
|| op
== JSOp::Ge
);
1869 greaterThan
= ifTrue
;
1873 // BigInt digits are always stored as an absolute number.
1874 loadFirstBigIntDigitOrZero(bigInt
, scratch1
);
1876 // Load the int32 into |scratch2| and negate it for negative numbers.
1877 move32(int32
, scratch2
);
1879 Label isNegative
, doCompare
;
1880 branchIfBigIntIsNegative(bigInt
, &isNegative
);
1881 branch32(Assembler::LessThan
, int32
, Imm32(0), greaterThan
);
1884 // We rely on |neg32(INT32_MIN)| staying INT32_MIN, because we're using an
1885 // unsigned comparison below.
1887 branch32(Assembler::GreaterThanOrEqual
, int32
, Imm32(0), lessThan
);
1890 // Not all supported platforms (e.g. MIPS64) zero-extend 32-bit operations,
1891 // so we need to explicitly clear any high 32-bits.
1892 move32ZeroExtendToPtr(scratch2
, scratch2
);
1894 // Reverse the relational comparator for negative numbers.
1895 // |-x < -y| <=> |+x > +y|.
1896 // |-x ≤ -y| <=> |+x ≥ +y|.
1897 // |-x > -y| <=> |+x < +y|.
1898 // |-x ≥ -y| <=> |+x ≤ +y|.
1899 JSOp reversed
= ReverseCompareOp(op
);
1900 if (reversed
!= op
) {
1901 branchPtr(JSOpToCondition(reversed
, /* isSigned = */ false), scratch1
,
1907 branchPtr(JSOpToCondition(op
, /* isSigned = */ false), scratch1
, scratch2
,
1912 void MacroAssembler::equalBigInts(Register left
, Register right
, Register temp1
,
1913 Register temp2
, Register temp3
,
1914 Register temp4
, Label
* notSameSign
,
1915 Label
* notSameLength
, Label
* notSameDigit
) {
1916 MOZ_ASSERT(left
!= temp1
);
1917 MOZ_ASSERT(right
!= temp1
);
1918 MOZ_ASSERT(right
!= temp2
);
1920 // Jump to |notSameSign| when the sign aren't the same.
1921 load32(Address(left
, BigInt::offsetOfFlags()), temp1
);
1922 xor32(Address(right
, BigInt::offsetOfFlags()), temp1
);
1923 branchTest32(Assembler::NonZero
, temp1
, Imm32(BigInt::signBitMask()),
1926 // Jump to |notSameLength| when the digits length is different.
1927 load32(Address(right
, BigInt::offsetOfLength()), temp1
);
1928 branch32(Assembler::NotEqual
, Address(left
, BigInt::offsetOfLength()), temp1
,
1931 // Both BigInts have the same sign and the same number of digits. Loop
1932 // over each digit, starting with the left-most one, and break from the
1933 // loop when the first non-matching digit was found.
1935 loadBigIntDigits(left
, temp2
);
1936 loadBigIntDigits(right
, temp3
);
1938 static_assert(sizeof(BigInt::Digit
) == sizeof(void*),
1939 "BigInt::Digit is pointer sized");
1941 computeEffectiveAddress(BaseIndex(temp2
, temp1
, ScalePointer
), temp2
);
1942 computeEffectiveAddress(BaseIndex(temp3
, temp1
, ScalePointer
), temp3
);
1948 subPtr(Imm32(sizeof(BigInt::Digit
)), temp2
);
1949 subPtr(Imm32(sizeof(BigInt::Digit
)), temp3
);
1951 loadPtr(Address(temp3
, 0), temp4
);
1952 branchPtr(Assembler::NotEqual
, Address(temp2
, 0), temp4
, notSameDigit
);
1955 branchSub32(Assembler::NotSigned
, Imm32(1), temp1
, &loop
);
1957 // No different digits were found, both BigInts are equal to each other.
1960 void MacroAssembler::typeOfObject(Register obj
, Register scratch
, Label
* slow
,
1961 Label
* isObject
, Label
* isCallable
,
1962 Label
* isUndefined
) {
1963 loadObjClassUnsafe(obj
, scratch
);
1965 // Proxies can emulate undefined and have complex isCallable behavior.
1966 branchTestClassIsProxy(true, scratch
, slow
);
1968 // JSFunctions are always callable.
1969 branchTestClassIsFunction(Assembler::Equal
, scratch
, isCallable
);
1971 // Objects that emulate undefined.
1972 Address
flags(scratch
, JSClass::offsetOfFlags());
1973 branchTest32(Assembler::NonZero
, flags
, Imm32(JSCLASS_EMULATES_UNDEFINED
),
1976 // Handle classes with a call hook.
1977 branchPtr(Assembler::Equal
, Address(scratch
, offsetof(JSClass
, cOps
)),
1978 ImmPtr(nullptr), isObject
);
1980 loadPtr(Address(scratch
, offsetof(JSClass
, cOps
)), scratch
);
1981 branchPtr(Assembler::Equal
, Address(scratch
, offsetof(JSClassOps
, call
)),
1982 ImmPtr(nullptr), isObject
);
1987 void MacroAssembler::isCallableOrConstructor(bool isCallable
, Register obj
,
1988 Register output
, Label
* isProxy
) {
1989 MOZ_ASSERT(obj
!= output
);
1991 Label notFunction
, hasCOps
, done
;
1992 loadObjClassUnsafe(obj
, output
);
1994 // An object is callable iff:
1995 // is<JSFunction>() || (getClass()->cOps && getClass()->cOps->call).
1996 // An object is constructor iff:
1997 // ((is<JSFunction>() && as<JSFunction>().isConstructor) ||
1998 // (getClass()->cOps && getClass()->cOps->construct)).
1999 branchTestClassIsFunction(Assembler::NotEqual
, output
, ¬Function
);
2001 move32(Imm32(1), output
);
2003 static_assert(mozilla::IsPowerOfTwo(uint32_t(FunctionFlags::CONSTRUCTOR
)),
2004 "FunctionFlags::CONSTRUCTOR has only one bit set");
2006 load32(Address(obj
, JSFunction::offsetOfFlagsAndArgCount()), output
);
2007 rshift32(Imm32(mozilla::FloorLog2(uint32_t(FunctionFlags::CONSTRUCTOR
))),
2009 and32(Imm32(1), output
);
2016 // For bound functions, we need to check the isConstructor flag.
2017 Label notBoundFunction
;
2018 branchPtr(Assembler::NotEqual
, output
, ImmPtr(&BoundFunctionObject::class_
),
2021 static_assert(BoundFunctionObject::IsConstructorFlag
== 0b1,
2022 "AND operation results in boolean value");
2023 unboxInt32(Address(obj
, BoundFunctionObject::offsetOfFlagsSlot()), output
);
2024 and32(Imm32(BoundFunctionObject::IsConstructorFlag
), output
);
2027 bind(¬BoundFunction
);
2030 // Just skim proxies off. Their notion of isCallable()/isConstructor() is
2031 // more complicated.
2032 branchTestClassIsProxy(true, output
, isProxy
);
2034 branchPtr(Assembler::NonZero
, Address(output
, offsetof(JSClass
, cOps
)),
2035 ImmPtr(nullptr), &hasCOps
);
2036 move32(Imm32(0), output
);
2040 loadPtr(Address(output
, offsetof(JSClass
, cOps
)), output
);
2042 isCallable
? offsetof(JSClassOps
, call
) : offsetof(JSClassOps
, construct
);
2043 cmpPtrSet(Assembler::NonZero
, Address(output
, opsOffset
), ImmPtr(nullptr),
2049 void MacroAssembler::loadJSContext(Register dest
) {
2050 movePtr(ImmPtr(runtime()->mainContextPtr()), dest
);
2053 static const uint8_t* ContextRealmPtr(CompileRuntime
* rt
) {
2054 return (static_cast<const uint8_t*>(rt
->mainContextPtr()) +
2055 JSContext::offsetOfRealm());
2058 void MacroAssembler::switchToRealm(Register realm
) {
2059 storePtr(realm
, AbsoluteAddress(ContextRealmPtr(runtime())));
2062 void MacroAssembler::switchToRealm(const void* realm
, Register scratch
) {
2065 movePtr(ImmPtr(realm
), scratch
);
2066 switchToRealm(scratch
);
2069 void MacroAssembler::switchToObjectRealm(Register obj
, Register scratch
) {
2070 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
2071 loadPtr(Address(scratch
, Shape::offsetOfBaseShape()), scratch
);
2072 loadPtr(Address(scratch
, BaseShape::offsetOfRealm()), scratch
);
2073 switchToRealm(scratch
);
2076 void MacroAssembler::switchToBaselineFrameRealm(Register scratch
) {
2077 Address
envChain(FramePointer
,
2078 BaselineFrame::reverseOffsetOfEnvironmentChain());
2079 loadPtr(envChain
, scratch
);
2080 switchToObjectRealm(scratch
, scratch
);
2083 void MacroAssembler::switchToWasmInstanceRealm(Register scratch1
,
2084 Register scratch2
) {
2085 loadPtr(Address(InstanceReg
, wasm::Instance::offsetOfCx()), scratch1
);
2086 loadPtr(Address(InstanceReg
, wasm::Instance::offsetOfRealm()), scratch2
);
2087 storePtr(scratch2
, Address(scratch1
, JSContext::offsetOfRealm()));
2090 void MacroAssembler::debugAssertContextRealm(const void* realm
,
2094 movePtr(ImmPtr(realm
), scratch
);
2095 branchPtr(Assembler::Equal
, AbsoluteAddress(ContextRealmPtr(runtime())),
2097 assumeUnreachable("Unexpected context realm");
2102 void MacroAssembler::setIsCrossRealmArrayConstructor(Register obj
,
2106 branchTestObjectIsProxy(false, obj
, output
, ¬Proxy
);
2107 assumeUnreachable("Unexpected proxy in setIsCrossRealmArrayConstructor");
2111 // The object's realm must not be cx->realm.
2112 Label isFalse
, done
;
2113 loadPtr(Address(obj
, JSObject::offsetOfShape()), output
);
2114 loadPtr(Address(output
, Shape::offsetOfBaseShape()), output
);
2115 loadPtr(Address(output
, BaseShape::offsetOfRealm()), output
);
2116 branchPtr(Assembler::Equal
, AbsoluteAddress(ContextRealmPtr(runtime())),
2119 // The object must be a function.
2120 branchTestObjIsFunction(Assembler::NotEqual
, obj
, output
, obj
, &isFalse
);
2122 // The function must be the ArrayConstructor native.
2123 branchPtr(Assembler::NotEqual
,
2124 Address(obj
, JSFunction::offsetOfNativeOrEnv()),
2125 ImmPtr(js::ArrayConstructor
), &isFalse
);
2127 move32(Imm32(1), output
);
2131 move32(Imm32(0), output
);
2136 void MacroAssembler::setIsDefinitelyTypedArrayConstructor(Register obj
,
2138 Label isFalse
, isTrue
, done
;
2140 // The object must be a function. (Wrappers are not supported.)
2141 branchTestObjIsFunction(Assembler::NotEqual
, obj
, output
, obj
, &isFalse
);
2143 // Load the native into |output|.
2144 loadPtr(Address(obj
, JSFunction::offsetOfNativeOrEnv()), output
);
2146 auto branchIsTypedArrayCtor
= [&](Scalar::Type type
) {
2147 // The function must be a TypedArrayConstructor native (from any realm).
2148 JSNative constructor
= TypedArrayConstructorNative(type
);
2149 branchPtr(Assembler::Equal
, output
, ImmPtr(constructor
), &isTrue
);
2152 #define TYPED_ARRAY_CONSTRUCTOR_NATIVE(_, T, N) \
2153 branchIsTypedArrayCtor(Scalar::N);
2154 JS_FOR_EACH_TYPED_ARRAY(TYPED_ARRAY_CONSTRUCTOR_NATIVE
)
2155 #undef TYPED_ARRAY_CONSTRUCTOR_NATIVE
2157 // Falls through to the false case.
2160 move32(Imm32(0), output
);
2164 move32(Imm32(1), output
);
2169 void MacroAssembler::loadMegamorphicCache(Register dest
) {
2170 movePtr(ImmPtr(runtime()->addressOfMegamorphicCache()), dest
);
2172 void MacroAssembler::loadMegamorphicSetPropCache(Register dest
) {
2173 movePtr(ImmPtr(runtime()->addressOfMegamorphicSetPropCache()), dest
);
2176 void MacroAssembler::loadStringToAtomCacheLastLookups(Register dest
) {
2177 uintptr_t cachePtr
= uintptr_t(runtime()->addressOfStringToAtomCache());
2178 void* offset
= (void*)(cachePtr
+ StringToAtomCache::offsetOfLastLookups());
2179 movePtr(ImmPtr(offset
), dest
);
2182 void MacroAssembler::loadAtomHash(Register id
, Register outHash
, Label
* done
) {
2183 Label doneInner
, fatInline
;
2187 move32(Imm32(JSString::FAT_INLINE_MASK
), outHash
);
2188 and32(Address(id
, JSString::offsetOfFlags()), outHash
);
2190 branch32(Assembler::Equal
, outHash
, Imm32(JSString::FAT_INLINE_MASK
),
2192 load32(Address(id
, NormalAtom::offsetOfHash()), outHash
);
2195 load32(Address(id
, FatInlineAtom::offsetOfHash()), outHash
);
2200 void MacroAssembler::loadAtomOrSymbolAndHash(ValueOperand value
, Register outId
,
2203 Label isString
, isSymbol
, isNull
, isUndefined
, done
, nonAtom
, atom
,
2207 ScratchTagScope
tag(*this, value
);
2208 splitTagForTest(value
, tag
);
2209 branchTestString(Assembler::Equal
, tag
, &isString
);
2210 branchTestSymbol(Assembler::Equal
, tag
, &isSymbol
);
2211 branchTestNull(Assembler::Equal
, tag
, &isNull
);
2212 branchTestUndefined(Assembler::NotEqual
, tag
, cacheMiss
);
2215 const JSAtomState
& names
= runtime()->names();
2216 movePropertyKey(PropertyKey::NonIntAtom(names
.undefined
), outId
);
2217 move32(Imm32(names
.undefined
->hash()), outHash
);
2221 movePropertyKey(PropertyKey::NonIntAtom(names
.null
), outId
);
2222 move32(Imm32(names
.null
->hash()), outHash
);
2226 unboxSymbol(value
, outId
);
2227 load32(Address(outId
, JS::Symbol::offsetOfHash()), outHash
);
2228 orPtr(Imm32(PropertyKey::SymbolTypeTag
), outId
);
2232 unboxString(value
, outId
);
2233 branchTest32(Assembler::Zero
, Address(outId
, JSString::offsetOfFlags()),
2234 Imm32(JSString::ATOM_BIT
), &nonAtom
);
2237 loadAtomHash(outId
, outHash
, &done
);
2240 loadStringToAtomCacheLastLookups(outHash
);
2242 // Compare each entry in the StringToAtomCache's lastLookups_ array
2243 size_t stringOffset
= StringToAtomCache::LastLookup::offsetOfString();
2244 branchPtr(Assembler::Equal
, Address(outHash
, stringOffset
), outId
,
2246 for (size_t i
= 0; i
< StringToAtomCache::NumLastLookups
- 1; ++i
) {
2247 addPtr(Imm32(sizeof(StringToAtomCache::LastLookup
)), outHash
);
2248 branchPtr(Assembler::Equal
, Address(outHash
, stringOffset
), outId
,
2252 // Couldn't find us in the cache, so fall back to the C++ call
2255 // We found a hit in the lastLookups_ array! Load the associated atom
2256 // and jump back up to our usual atom handling code
2257 bind(&lastLookupAtom
);
2258 size_t atomOffset
= StringToAtomCache::LastLookup::offsetOfAtom();
2259 loadPtr(Address(outHash
, atomOffset
), outId
);
2265 void MacroAssembler::emitExtractValueFromMegamorphicCacheEntry(
2266 Register obj
, Register entry
, Register scratch1
, Register scratch2
,
2267 ValueOperand output
, Label
* cacheHit
, Label
* cacheMiss
) {
2268 Label isMissing
, dynamicSlot
, protoLoopHead
, protoLoopTail
;
2270 // scratch2 = entry->numHops_
2271 load8ZeroExtend(Address(entry
, MegamorphicCache::Entry::offsetOfNumHops()),
2273 // if (scratch2 == NumHopsForMissingOwnProperty) goto cacheMiss
2274 branch32(Assembler::Equal
, scratch2
,
2275 Imm32(MegamorphicCache::Entry::NumHopsForMissingOwnProperty
),
2277 // if (scratch2 == NumHopsForMissingProperty) goto isMissing
2278 branch32(Assembler::Equal
, scratch2
,
2279 Imm32(MegamorphicCache::Entry::NumHopsForMissingProperty
),
2282 // NOTE: Where this is called, `output` can actually alias `obj`, and before
2283 // the last cacheMiss branch above we can't write to `obj`, so we can't
2284 // use `output`'s scratch register there. However a cache miss is impossible
2285 // now, so we're free to use `output` as we like.
2286 Register outputScratch
= output
.scratchReg();
2287 if (!outputScratch
.aliases(obj
)) {
2288 // We're okay with paying this very slight extra cost to avoid a potential
2289 // footgun of writing to what callers understand as only an input register.
2290 movePtr(obj
, outputScratch
);
2292 branchTest32(Assembler::Zero
, scratch2
, scratch2
, &protoLoopTail
);
2293 bind(&protoLoopHead
);
2294 loadObjProto(outputScratch
, outputScratch
);
2295 branchSub32(Assembler::NonZero
, Imm32(1), scratch2
, &protoLoopHead
);
2296 bind(&protoLoopTail
);
2298 // scratch1 = entry->slotOffset()
2299 load32(Address(entry
, MegamorphicCacheEntry::offsetOfSlotOffset()), scratch1
);
2301 // scratch2 = slotOffset.offset()
2302 move32(scratch1
, scratch2
);
2303 rshift32(Imm32(TaggedSlotOffset::OffsetShift
), scratch2
);
2305 // if (!slotOffset.isFixedSlot()) goto dynamicSlot
2306 branchTest32(Assembler::Zero
, scratch1
,
2307 Imm32(TaggedSlotOffset::IsFixedSlotFlag
), &dynamicSlot
);
2308 // output = outputScratch[scratch2]
2309 loadValue(BaseIndex(outputScratch
, scratch2
, TimesOne
), output
);
2313 // output = outputScratch->slots_[scratch2]
2314 loadPtr(Address(outputScratch
, NativeObject::offsetOfSlots()), outputScratch
);
2315 loadValue(BaseIndex(outputScratch
, scratch2
, TimesOne
), output
);
2319 // output = undefined
2320 moveValue(UndefinedValue(), output
);
2324 template <typename IdOperandType
>
2325 void MacroAssembler::emitMegamorphicCacheLookupByValueCommon(
2326 IdOperandType id
, Register obj
, Register scratch1
, Register scratch2
,
2327 Register outEntryPtr
, Label
* cacheMiss
, Label
* cacheMissWithEntry
) {
2328 // A lot of this code is shared with emitMegamorphicCacheLookup. It would
2329 // be nice to be able to avoid the duplication here, but due to a few
2330 // differences like taking the id in a ValueOperand instead of being able
2331 // to bake it in as an immediate, and only needing a Register for the output
2332 // value, it seemed more awkward to read once it was deduplicated.
2334 // outEntryPtr = obj->shape()
2335 loadPtr(Address(obj
, JSObject::offsetOfShape()), outEntryPtr
);
2337 movePtr(outEntryPtr
, scratch2
);
2339 // outEntryPtr = (outEntryPtr >> 3) ^ (outEntryPtr >> 13) + idHash
2340 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift1
), outEntryPtr
);
2341 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift2
), scratch2
);
2342 xorPtr(scratch2
, outEntryPtr
);
2344 if constexpr (std::is_same
<IdOperandType
, ValueOperand
>::value
) {
2345 loadAtomOrSymbolAndHash(id
, scratch1
, scratch2
, cacheMiss
);
2347 static_assert(std::is_same
<IdOperandType
, Register
>::value
);
2348 movePtr(id
, scratch1
);
2349 loadAtomHash(scratch1
, scratch2
, nullptr);
2351 addPtr(scratch2
, outEntryPtr
);
2353 // outEntryPtr %= MegamorphicCache::NumEntries
2354 constexpr size_t cacheSize
= MegamorphicCache::NumEntries
;
2355 static_assert(mozilla::IsPowerOfTwo(cacheSize
));
2356 size_t cacheMask
= cacheSize
- 1;
2357 and32(Imm32(cacheMask
), outEntryPtr
);
2359 loadMegamorphicCache(scratch2
);
2360 // outEntryPtr = &scratch2->entries_[outEntryPtr]
2361 constexpr size_t entrySize
= sizeof(MegamorphicCache::Entry
);
2362 static_assert(sizeof(void*) == 4 || entrySize
== 24);
2363 if constexpr (sizeof(void*) == 4) {
2364 mul32(Imm32(entrySize
), outEntryPtr
);
2365 computeEffectiveAddress(BaseIndex(scratch2
, outEntryPtr
, TimesOne
,
2366 MegamorphicCache::offsetOfEntries()),
2369 computeEffectiveAddress(BaseIndex(outEntryPtr
, outEntryPtr
, TimesTwo
),
2371 computeEffectiveAddress(BaseIndex(scratch2
, outEntryPtr
, TimesEight
,
2372 MegamorphicCache::offsetOfEntries()),
2376 // if (outEntryPtr->key_ != scratch1) goto cacheMissWithEntry
2377 branchPtr(Assembler::NotEqual
,
2378 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfKey()),
2379 scratch1
, cacheMissWithEntry
);
2380 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch1
);
2382 // if (outEntryPtr->shape_ != scratch1) goto cacheMissWithEntry
2383 branchPtr(Assembler::NotEqual
,
2384 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfShape()),
2385 scratch1
, cacheMissWithEntry
);
2387 // scratch2 = scratch2->generation_
2388 load16ZeroExtend(Address(scratch2
, MegamorphicCache::offsetOfGeneration()),
2391 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfGeneration()),
2393 // if (outEntryPtr->generation_ != scratch2) goto cacheMissWithEntry
2394 branch32(Assembler::NotEqual
, scratch1
, scratch2
, cacheMissWithEntry
);
2397 void MacroAssembler::emitMegamorphicCacheLookup(
2398 PropertyKey id
, Register obj
, Register scratch1
, Register scratch2
,
2399 Register outEntryPtr
, ValueOperand output
, Label
* cacheHit
) {
2400 Label cacheMiss
, isMissing
, dynamicSlot
, protoLoopHead
, protoLoopTail
;
2402 // scratch1 = obj->shape()
2403 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch1
);
2405 movePtr(scratch1
, outEntryPtr
);
2406 movePtr(scratch1
, scratch2
);
2408 // outEntryPtr = (scratch1 >> 3) ^ (scratch1 >> 13) + hash(id)
2409 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift1
), outEntryPtr
);
2410 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift2
), scratch2
);
2411 xorPtr(scratch2
, outEntryPtr
);
2412 addPtr(Imm32(HashAtomOrSymbolPropertyKey(id
)), outEntryPtr
);
2414 // outEntryPtr %= MegamorphicCache::NumEntries
2415 constexpr size_t cacheSize
= MegamorphicCache::NumEntries
;
2416 static_assert(mozilla::IsPowerOfTwo(cacheSize
));
2417 size_t cacheMask
= cacheSize
- 1;
2418 and32(Imm32(cacheMask
), outEntryPtr
);
2420 loadMegamorphicCache(scratch2
);
2421 // outEntryPtr = &scratch2->entries_[outEntryPtr]
2422 constexpr size_t entrySize
= sizeof(MegamorphicCache::Entry
);
2423 static_assert(sizeof(void*) == 4 || entrySize
== 24);
2424 if constexpr (sizeof(void*) == 4) {
2425 mul32(Imm32(entrySize
), outEntryPtr
);
2426 computeEffectiveAddress(BaseIndex(scratch2
, outEntryPtr
, TimesOne
,
2427 MegamorphicCache::offsetOfEntries()),
2430 computeEffectiveAddress(BaseIndex(outEntryPtr
, outEntryPtr
, TimesTwo
),
2432 computeEffectiveAddress(BaseIndex(scratch2
, outEntryPtr
, TimesEight
,
2433 MegamorphicCache::offsetOfEntries()),
2437 // if (outEntryPtr->shape_ != scratch1) goto cacheMiss
2438 branchPtr(Assembler::NotEqual
,
2439 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfShape()),
2440 scratch1
, &cacheMiss
);
2442 // if (outEntryPtr->key_ != id) goto cacheMiss
2443 movePropertyKey(id
, scratch1
);
2444 branchPtr(Assembler::NotEqual
,
2445 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfKey()),
2446 scratch1
, &cacheMiss
);
2448 // scratch2 = scratch2->generation_
2449 load16ZeroExtend(Address(scratch2
, MegamorphicCache::offsetOfGeneration()),
2452 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfGeneration()),
2454 // if (outEntryPtr->generation_ != scratch2) goto cacheMiss
2455 branch32(Assembler::NotEqual
, scratch1
, scratch2
, &cacheMiss
);
2457 emitExtractValueFromMegamorphicCacheEntry(
2458 obj
, outEntryPtr
, scratch1
, scratch2
, output
, cacheHit
, &cacheMiss
);
2463 template <typename IdOperandType
>
2464 void MacroAssembler::emitMegamorphicCacheLookupByValue(
2465 IdOperandType id
, Register obj
, Register scratch1
, Register scratch2
,
2466 Register outEntryPtr
, ValueOperand output
, Label
* cacheHit
) {
2467 Label cacheMiss
, cacheMissWithEntry
;
2468 emitMegamorphicCacheLookupByValueCommon(id
, obj
, scratch1
, scratch2
,
2469 outEntryPtr
, &cacheMiss
,
2470 &cacheMissWithEntry
);
2471 emitExtractValueFromMegamorphicCacheEntry(obj
, outEntryPtr
, scratch1
,
2472 scratch2
, output
, cacheHit
,
2473 &cacheMissWithEntry
);
2475 xorPtr(outEntryPtr
, outEntryPtr
);
2476 bind(&cacheMissWithEntry
);
2479 template void MacroAssembler::emitMegamorphicCacheLookupByValue
<ValueOperand
>(
2480 ValueOperand id
, Register obj
, Register scratch1
, Register scratch2
,
2481 Register outEntryPtr
, ValueOperand output
, Label
* cacheHit
);
2483 template void MacroAssembler::emitMegamorphicCacheLookupByValue
<Register
>(
2484 Register id
, Register obj
, Register scratch1
, Register scratch2
,
2485 Register outEntryPtr
, ValueOperand output
, Label
* cacheHit
);
2487 void MacroAssembler::emitMegamorphicCacheLookupExists(
2488 ValueOperand id
, Register obj
, Register scratch1
, Register scratch2
,
2489 Register outEntryPtr
, Register output
, Label
* cacheHit
, bool hasOwn
) {
2490 Label cacheMiss
, cacheMissWithEntry
, cacheHitFalse
;
2491 emitMegamorphicCacheLookupByValueCommon(id
, obj
, scratch1
, scratch2
,
2492 outEntryPtr
, &cacheMiss
,
2493 &cacheMissWithEntry
);
2495 // scratch1 = outEntryPtr->numHops_
2497 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfNumHops()),
2500 branch32(Assembler::Equal
, scratch1
,
2501 Imm32(MegamorphicCache::Entry::NumHopsForMissingProperty
),
2505 branch32(Assembler::NotEqual
, scratch1
, Imm32(0), &cacheHitFalse
);
2507 branch32(Assembler::Equal
, scratch1
,
2508 Imm32(MegamorphicCache::Entry::NumHopsForMissingOwnProperty
),
2509 &cacheMissWithEntry
);
2512 move32(Imm32(1), output
);
2515 bind(&cacheHitFalse
);
2516 xor32(output
, output
);
2520 xorPtr(outEntryPtr
, outEntryPtr
);
2521 bind(&cacheMissWithEntry
);
2524 void MacroAssembler::extractCurrentIndexAndKindFromIterator(Register iterator
,
2527 // Load iterator object
2528 Address
nativeIterAddr(iterator
,
2529 PropertyIteratorObject::offsetOfIteratorSlot());
2530 loadPrivate(nativeIterAddr
, outIndex
);
2532 // Compute offset of propertyCursor_ from propertiesBegin()
2533 loadPtr(Address(outIndex
, NativeIterator::offsetOfPropertyCursor()), outKind
);
2534 subPtr(Address(outIndex
, NativeIterator::offsetOfShapesEnd()), outKind
);
2536 // Compute offset of current index from indicesBegin(). Note that because
2537 // propertyCursor has already been incremented, this is actually the offset
2538 // of the next index. We adjust accordingly below.
2539 size_t indexAdjustment
=
2540 sizeof(GCPtr
<JSLinearString
*>) / sizeof(PropertyIndex
);
2541 if (indexAdjustment
!= 1) {
2542 MOZ_ASSERT(indexAdjustment
== 2);
2543 rshift32(Imm32(1), outKind
);
2546 // Load current index.
2547 loadPtr(Address(outIndex
, NativeIterator::offsetOfPropertiesEnd()), outIndex
);
2548 load32(BaseIndex(outIndex
, outKind
, Scale::TimesOne
,
2549 -int32_t(sizeof(PropertyIndex
))),
2553 move32(outIndex
, outKind
);
2554 rshift32(Imm32(PropertyIndex::KindShift
), outKind
);
2557 and32(Imm32(PropertyIndex::IndexMask
), outIndex
);
2560 template <typename IdType
>
2561 void MacroAssembler::emitMegamorphicCachedSetSlot(
2562 IdType id
, Register obj
, Register scratch1
,
2563 #ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
2564 Register scratch2
, Register scratch3
,
2566 ValueOperand value
, Label
* cacheHit
,
2567 void (*emitPreBarrier
)(MacroAssembler
&, const Address
&, MIRType
)) {
2568 Label cacheMiss
, dynamicSlot
, doAdd
, doSet
, doAddDynamic
, doSetDynamic
;
2570 #ifdef JS_CODEGEN_X86
2572 Register scratch2
= value
.typeReg();
2573 Register scratch3
= value
.payloadReg();
2576 // outEntryPtr = obj->shape()
2577 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch3
);
2579 movePtr(scratch3
, scratch2
);
2581 // scratch3 = (scratch3 >> 3) ^ (scratch3 >> 13) + idHash
2582 rshiftPtr(Imm32(MegamorphicSetPropCache::ShapeHashShift1
), scratch3
);
2583 rshiftPtr(Imm32(MegamorphicSetPropCache::ShapeHashShift2
), scratch2
);
2584 xorPtr(scratch2
, scratch3
);
2586 if constexpr (std::is_same
<IdType
, ValueOperand
>::value
) {
2587 loadAtomOrSymbolAndHash(id
, scratch1
, scratch2
, &cacheMiss
);
2588 addPtr(scratch2
, scratch3
);
2590 static_assert(std::is_same
<IdType
, PropertyKey
>::value
);
2591 addPtr(Imm32(HashAtomOrSymbolPropertyKey(id
)), scratch3
);
2592 movePropertyKey(id
, scratch1
);
2595 // scratch3 %= MegamorphicSetPropCache::NumEntries
2596 constexpr size_t cacheSize
= MegamorphicSetPropCache::NumEntries
;
2597 static_assert(mozilla::IsPowerOfTwo(cacheSize
));
2598 size_t cacheMask
= cacheSize
- 1;
2599 and32(Imm32(cacheMask
), scratch3
);
2601 loadMegamorphicSetPropCache(scratch2
);
2602 // scratch3 = &scratch2->entries_[scratch3]
2603 constexpr size_t entrySize
= sizeof(MegamorphicSetPropCache::Entry
);
2604 mul32(Imm32(entrySize
), scratch3
);
2605 computeEffectiveAddress(BaseIndex(scratch2
, scratch3
, TimesOne
,
2606 MegamorphicSetPropCache::offsetOfEntries()),
2609 // if (scratch3->key_ != scratch1) goto cacheMiss
2610 branchPtr(Assembler::NotEqual
,
2611 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfKey()),
2612 scratch1
, &cacheMiss
);
2614 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch1
);
2615 // if (scratch3->shape_ != scratch1) goto cacheMiss
2616 branchPtr(Assembler::NotEqual
,
2617 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfShape()),
2618 scratch1
, &cacheMiss
);
2620 // scratch2 = scratch2->generation_
2622 Address(scratch2
, MegamorphicSetPropCache::offsetOfGeneration()),
2625 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfGeneration()),
2627 // if (scratch3->generation_ != scratch2) goto cacheMiss
2628 branch32(Assembler::NotEqual
, scratch1
, scratch2
, &cacheMiss
);
2630 // scratch2 = entry->slotOffset()
2632 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfSlotOffset()),
2635 // scratch1 = slotOffset.offset()
2636 move32(scratch2
, scratch1
);
2637 rshift32(Imm32(TaggedSlotOffset::OffsetShift
), scratch1
);
2639 Address
afterShapePtr(scratch3
,
2640 MegamorphicSetPropCache::Entry::offsetOfAfterShape());
2642 // if (!slotOffset.isFixedSlot()) goto dynamicSlot
2643 branchTest32(Assembler::Zero
, scratch2
,
2644 Imm32(TaggedSlotOffset::IsFixedSlotFlag
), &dynamicSlot
);
2646 // Calculate slot address in scratch1. Jump to doSet if scratch3 == nullptr,
2647 // else jump (or fall-through) to doAdd.
2648 addPtr(obj
, scratch1
);
2649 branchPtr(Assembler::Equal
, afterShapePtr
, ImmPtr(nullptr), &doSet
);
2653 branchPtr(Assembler::Equal
, afterShapePtr
, ImmPtr(nullptr), &doSetDynamic
);
2655 Address
slotAddr(scratch1
, 0);
2657 // If entry->newCapacity_ is nonzero, we need to grow the slots on the
2658 // object. Otherwise just jump straight to a dynamic add.
2660 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfNewCapacity()),
2662 branchTest32(Assembler::Zero
, scratch2
, scratch2
, &doAddDynamic
);
2664 AllocatableRegisterSet
regs(RegisterSet::Volatile());
2665 LiveRegisterSet
save(regs
.asLiveSet());
2667 PushRegsInMask(save
);
2669 regs
.takeUnchecked(scratch2
);
2671 if (regs
.has(obj
)) {
2672 regs
.takeUnchecked(obj
);
2673 tmp
= regs
.takeAnyGeneral();
2674 regs
.addUnchecked(obj
);
2676 tmp
= regs
.takeAnyGeneral();
2679 using Fn
= bool (*)(JSContext
* cx
, NativeObject
* obj
, uint32_t newCount
);
2680 setupUnalignedABICall(tmp
);
2684 passABIArg(scratch2
);
2685 callWithABI
<Fn
, NativeObject::growSlotsPure
>();
2686 storeCallPointerResult(scratch2
);
2687 PopRegsInMask(save
);
2689 branchIfFalseBool(scratch2
, &cacheMiss
);
2691 bind(&doAddDynamic
);
2692 addPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch1
);
2695 // scratch3 = entry->afterShape()
2697 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfAfterShape()),
2700 storeObjShape(scratch3
, obj
,
2701 [emitPreBarrier
](MacroAssembler
& masm
, const Address
& addr
) {
2702 emitPreBarrier(masm
, addr
, MIRType::Shape
);
2704 #ifdef JS_CODEGEN_X86
2707 storeValue(value
, slotAddr
);
2710 bind(&doSetDynamic
);
2711 addPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch1
);
2713 guardedCallPreBarrier(slotAddr
, MIRType::Value
);
2715 #ifdef JS_CODEGEN_X86
2718 storeValue(value
, slotAddr
);
2722 #ifdef JS_CODEGEN_X86
2727 template void MacroAssembler::emitMegamorphicCachedSetSlot
<PropertyKey
>(
2728 PropertyKey id
, Register obj
, Register scratch1
,
2729 #ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
2730 Register scratch2
, Register scratch3
,
2732 ValueOperand value
, Label
* cacheHit
,
2733 void (*emitPreBarrier
)(MacroAssembler
&, const Address
&, MIRType
));
2735 template void MacroAssembler::emitMegamorphicCachedSetSlot
<ValueOperand
>(
2736 ValueOperand id
, Register obj
, Register scratch1
,
2737 #ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
2738 Register scratch2
, Register scratch3
,
2740 ValueOperand value
, Label
* cacheHit
,
2741 void (*emitPreBarrier
)(MacroAssembler
&, const Address
&, MIRType
));
2743 void MacroAssembler::guardNonNegativeIntPtrToInt32(Register reg
, Label
* fail
) {
2746 branchPtr(Assembler::NotSigned
, reg
, reg
, &ok
);
2747 assumeUnreachable("Unexpected negative value");
2752 branchPtr(Assembler::Above
, reg
, Imm32(INT32_MAX
), fail
);
2756 void MacroAssembler::loadArrayBufferByteLengthIntPtr(Register obj
,
2758 Address
slotAddr(obj
, ArrayBufferObject::offsetOfByteLengthSlot());
2759 loadPrivate(slotAddr
, output
);
2762 void MacroAssembler::loadArrayBufferViewByteOffsetIntPtr(Register obj
,
2764 Address
slotAddr(obj
, ArrayBufferViewObject::byteOffsetOffset());
2765 loadPrivate(slotAddr
, output
);
2768 void MacroAssembler::loadArrayBufferViewLengthIntPtr(Register obj
,
2770 Address
slotAddr(obj
, ArrayBufferViewObject::lengthOffset());
2771 loadPrivate(slotAddr
, output
);
2774 void MacroAssembler::loadDOMExpandoValueGuardGeneration(
2775 Register obj
, ValueOperand output
,
2776 JS::ExpandoAndGeneration
* expandoAndGeneration
, uint64_t generation
,
2778 loadPtr(Address(obj
, ProxyObject::offsetOfReservedSlots()),
2779 output
.scratchReg());
2780 loadValue(Address(output
.scratchReg(),
2781 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
2784 // Guard the ExpandoAndGeneration* matches the proxy's ExpandoAndGeneration
2786 branchTestValue(Assembler::NotEqual
, output
,
2787 PrivateValue(expandoAndGeneration
), fail
);
2789 // Guard expandoAndGeneration->generation matches the expected generation.
2790 Address
generationAddr(output
.payloadOrValueReg(),
2791 JS::ExpandoAndGeneration::offsetOfGeneration());
2792 branch64(Assembler::NotEqual
, generationAddr
, Imm64(generation
), fail
);
2794 // Load expandoAndGeneration->expando into the output Value register.
2795 loadValue(Address(output
.payloadOrValueReg(),
2796 JS::ExpandoAndGeneration::offsetOfExpando()),
2800 void MacroAssembler::loadJitActivation(Register dest
) {
2801 loadJSContext(dest
);
2802 loadPtr(Address(dest
, offsetof(JSContext
, activation_
)), dest
);
2805 void MacroAssembler::guardSpecificAtom(Register str
, JSAtom
* atom
,
2807 const LiveRegisterSet
& volatileRegs
,
2810 branchPtr(Assembler::Equal
, str
, ImmGCPtr(atom
), &done
);
2812 // The pointers are not equal, so if the input string is also an atom it
2813 // must be a different string.
2814 branchTest32(Assembler::NonZero
, Address(str
, JSString::offsetOfFlags()),
2815 Imm32(JSString::ATOM_BIT
), fail
);
2817 // Check the length.
2818 branch32(Assembler::NotEqual
, Address(str
, JSString::offsetOfLength()),
2819 Imm32(atom
->length()), fail
);
2821 // We have a non-atomized string with the same length. Call a helper
2822 // function to do the comparison.
2823 PushRegsInMask(volatileRegs
);
2825 using Fn
= bool (*)(JSString
* str1
, JSString
* str2
);
2826 setupUnalignedABICall(scratch
);
2827 movePtr(ImmGCPtr(atom
), scratch
);
2828 passABIArg(scratch
);
2830 callWithABI
<Fn
, EqualStringsHelperPure
>();
2831 storeCallPointerResult(scratch
);
2833 MOZ_ASSERT(!volatileRegs
.has(scratch
));
2834 PopRegsInMask(volatileRegs
);
2835 branchIfFalseBool(scratch
, fail
);
2840 void MacroAssembler::guardStringToInt32(Register str
, Register output
,
2842 LiveRegisterSet volatileRegs
,
2845 // Use indexed value as fast path if possible.
2846 loadStringIndexValue(str
, output
, &vmCall
);
2851 // Reserve space for holding the result int32_t of the call. Use
2852 // pointer-size to avoid misaligning the stack on 64-bit platforms.
2853 reserveStack(sizeof(uintptr_t));
2854 moveStackPtrTo(output
);
2856 volatileRegs
.takeUnchecked(scratch
);
2857 if (output
.volatile_()) {
2858 volatileRegs
.addUnchecked(output
);
2860 PushRegsInMask(volatileRegs
);
2862 using Fn
= bool (*)(JSContext
* cx
, JSString
* str
, int32_t* result
);
2863 setupUnalignedABICall(scratch
);
2864 loadJSContext(scratch
);
2865 passABIArg(scratch
);
2868 callWithABI
<Fn
, GetInt32FromStringPure
>();
2869 storeCallPointerResult(scratch
);
2871 PopRegsInMask(volatileRegs
);
2874 branchIfTrueBool(scratch
, &ok
);
2876 // OOM path, recovered by GetInt32FromStringPure.
2878 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
2879 // flow-insensitively, and using it twice would confuse the stack height
2881 addToStackPtr(Imm32(sizeof(uintptr_t)));
2885 load32(Address(output
, 0), output
);
2886 freeStack(sizeof(uintptr_t));
2891 void MacroAssembler::generateBailoutTail(Register scratch
,
2892 Register bailoutInfo
) {
2893 Label bailoutFailed
;
2894 branchIfFalseBool(ReturnReg
, &bailoutFailed
);
2896 // Finish bailing out to Baseline.
2898 // Prepare a register set for use in this case.
2899 AllocatableGeneralRegisterSet
regs(GeneralRegisterSet::All());
2900 MOZ_ASSERT_IF(!IsHiddenSP(getStackPointer()),
2901 !regs
.has(AsRegister(getStackPointer())));
2902 regs
.take(bailoutInfo
);
2904 Register temp
= regs
.takeAny();
2907 // Assert the stack pointer points to the JitFrameLayout header. Copying
2910 loadPtr(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, incomingStack
)),
2912 branchStackPtr(Assembler::Equal
, temp
, &ok
);
2913 assumeUnreachable("Unexpected stack pointer value");
2917 Register copyCur
= regs
.takeAny();
2918 Register copyEnd
= regs
.takeAny();
2920 // Copy data onto stack.
2921 loadPtr(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, copyStackTop
)),
2924 Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, copyStackBottom
)),
2930 branchPtr(Assembler::BelowOrEqual
, copyCur
, copyEnd
, &endOfCopy
);
2931 subPtr(Imm32(sizeof(uintptr_t)), copyCur
);
2932 subFromStackPtr(Imm32(sizeof(uintptr_t)));
2933 loadPtr(Address(copyCur
, 0), temp
);
2934 storePtr(temp
, Address(getStackPointer(), 0));
2939 loadPtr(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, resumeFramePtr
)),
2942 // Enter exit frame for the FinishBailoutToBaseline call.
2943 pushFrameDescriptor(FrameType::BaselineJS
);
2944 push(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, resumeAddr
)));
2946 // No GC things to mark on the stack, push a bare token.
2947 loadJSContext(scratch
);
2948 enterFakeExitFrame(scratch
, scratch
, ExitFrameType::Bare
);
2950 // Save needed values onto stack temporarily.
2951 push(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, resumeAddr
)));
2953 // Call a stub to free allocated memory and create arguments objects.
2954 using Fn
= bool (*)(BaselineBailoutInfo
* bailoutInfoArg
);
2955 setupUnalignedABICall(temp
);
2956 passABIArg(bailoutInfo
);
2957 callWithABI
<Fn
, FinishBailoutToBaseline
>(
2958 MoveOp::GENERAL
, CheckUnsafeCallWithABI::DontCheckHasExitFrame
);
2959 branchIfFalseBool(ReturnReg
, exceptionLabel());
2961 // Restore values where they need to be and resume execution.
2962 AllocatableGeneralRegisterSet
enterRegs(GeneralRegisterSet::All());
2963 MOZ_ASSERT(!enterRegs
.has(FramePointer
));
2964 Register jitcodeReg
= enterRegs
.takeAny();
2968 // Discard exit frame.
2969 addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
2974 bind(&bailoutFailed
);
2976 // jit::Bailout or jit::InvalidationBailout failed and returned false. The
2977 // Ion frame has already been discarded and the stack pointer points to the
2978 // JitFrameLayout header. Turn it into an ExitFrameLayout, similar to
2979 // EnsureUnwoundJitExitFrame, and call the exception handler.
2980 loadJSContext(scratch
);
2981 enterFakeExitFrame(scratch
, scratch
, ExitFrameType::UnwoundJit
);
2982 jump(exceptionLabel());
2986 void MacroAssembler::loadJitCodeRaw(Register func
, Register dest
) {
2987 static_assert(BaseScript::offsetOfJitCodeRaw() ==
2988 SelfHostedLazyScript::offsetOfJitCodeRaw(),
2989 "SelfHostedLazyScript and BaseScript must use same layout for "
2992 BaseScript::offsetOfJitCodeRaw() == wasm::JumpTableJitEntryOffset
,
2993 "Wasm exported functions jit entries must use same layout for "
2995 loadPrivate(Address(func
, JSFunction::offsetOfJitInfoOrScript()), dest
);
2996 loadPtr(Address(dest
, BaseScript::offsetOfJitCodeRaw()), dest
);
2999 void MacroAssembler::loadBaselineJitCodeRaw(Register func
, Register dest
,
3002 loadPrivate(Address(func
, JSFunction::offsetOfJitInfoOrScript()), dest
);
3004 branchIfScriptHasNoJitScript(dest
, failure
);
3006 loadJitScript(dest
, dest
);
3008 // Load BaselineScript
3009 loadPtr(Address(dest
, JitScript::offsetOfBaselineScript()), dest
);
3011 static_assert(BaselineDisabledScript
== 0x1);
3012 branchPtr(Assembler::BelowOrEqual
, dest
, ImmWord(BaselineDisabledScript
),
3016 // Load Baseline jitcode
3017 loadPtr(Address(dest
, BaselineScript::offsetOfMethod()), dest
);
3018 loadPtr(Address(dest
, JitCode::offsetOfCode()), dest
);
3021 void MacroAssembler::loadBaselineFramePtr(Register framePtr
, Register dest
) {
3022 if (framePtr
!= dest
) {
3023 movePtr(framePtr
, dest
);
3025 subPtr(Imm32(BaselineFrame::Size()), dest
);
3028 static const uint8_t* ContextInlinedICScriptPtr(CompileRuntime
* rt
) {
3029 return (static_cast<const uint8_t*>(rt
->mainContextPtr()) +
3030 JSContext::offsetOfInlinedICScript());
3033 void MacroAssembler::storeICScriptInJSContext(Register icScript
) {
3034 storePtr(icScript
, AbsoluteAddress(ContextInlinedICScriptPtr(runtime())));
3037 void MacroAssembler::handleFailure() {
3038 // Re-entry code is irrelevant because the exception will leave the
3039 // running function and never come back
3040 TrampolinePtr excTail
= runtime()->jitRuntime()->getExceptionTail();
3044 void MacroAssembler::assumeUnreachable(const char* output
) {
3045 #ifdef JS_MASM_VERBOSE
3046 if (!IsCompilingWasm()) {
3047 AllocatableRegisterSet
regs(RegisterSet::Volatile());
3048 LiveRegisterSet
save(regs
.asLiveSet());
3049 PushRegsInMask(save
);
3050 Register temp
= regs
.takeAnyGeneral();
3052 using Fn
= void (*)(const char* output
);
3053 setupUnalignedABICall(temp
);
3054 movePtr(ImmPtr(output
), temp
);
3056 callWithABI
<Fn
, AssumeUnreachable
>(MoveOp::GENERAL
,
3057 CheckUnsafeCallWithABI::DontCheckOther
);
3059 PopRegsInMask(save
);
3066 void MacroAssembler::printf(const char* output
) {
3067 #ifdef JS_MASM_VERBOSE
3068 AllocatableRegisterSet
regs(RegisterSet::Volatile());
3069 LiveRegisterSet
save(regs
.asLiveSet());
3070 PushRegsInMask(save
);
3072 Register temp
= regs
.takeAnyGeneral();
3074 using Fn
= void (*)(const char* output
);
3075 setupUnalignedABICall(temp
);
3076 movePtr(ImmPtr(output
), temp
);
3078 callWithABI
<Fn
, Printf0
>();
3080 PopRegsInMask(save
);
3084 void MacroAssembler::printf(const char* output
, Register value
) {
3085 #ifdef JS_MASM_VERBOSE
3086 AllocatableRegisterSet
regs(RegisterSet::Volatile());
3087 LiveRegisterSet
save(regs
.asLiveSet());
3088 PushRegsInMask(save
);
3090 regs
.takeUnchecked(value
);
3092 Register temp
= regs
.takeAnyGeneral();
3094 using Fn
= void (*)(const char* output
, uintptr_t value
);
3095 setupUnalignedABICall(temp
);
3096 movePtr(ImmPtr(output
), temp
);
3099 callWithABI
<Fn
, Printf1
>();
3101 PopRegsInMask(save
);
3105 void MacroAssembler::convertInt32ValueToDouble(ValueOperand val
) {
3107 branchTestInt32(Assembler::NotEqual
, val
, &done
);
3108 unboxInt32(val
, val
.scratchReg());
3109 ScratchDoubleScope
fpscratch(*this);
3110 convertInt32ToDouble(val
.scratchReg(), fpscratch
);
3111 boxDouble(fpscratch
, val
, fpscratch
);
3115 void MacroAssembler::convertValueToFloatingPoint(ValueOperand value
,
3116 FloatRegister output
,
3118 MIRType outputType
) {
3119 Label isDouble
, isInt32
, isBool
, isNull
, done
;
3122 ScratchTagScope
tag(*this, value
);
3123 splitTagForTest(value
, tag
);
3125 branchTestDouble(Assembler::Equal
, tag
, &isDouble
);
3126 branchTestInt32(Assembler::Equal
, tag
, &isInt32
);
3127 branchTestBoolean(Assembler::Equal
, tag
, &isBool
);
3128 branchTestNull(Assembler::Equal
, tag
, &isNull
);
3129 branchTestUndefined(Assembler::NotEqual
, tag
, fail
);
3132 // fall-through: undefined
3133 loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output
,
3138 loadConstantFloatingPoint(0.0, 0.0f
, output
, outputType
);
3142 boolValueToFloatingPoint(value
, output
, outputType
);
3146 int32ValueToFloatingPoint(value
, output
, outputType
);
3149 // On some non-multiAlias platforms, unboxDouble may use the scratch register,
3150 // so do not merge code paths here.
3152 if (outputType
== MIRType::Float32
&& hasMultiAlias()) {
3153 ScratchDoubleScope
tmp(*this);
3154 unboxDouble(value
, tmp
);
3155 convertDoubleToFloat32(tmp
, output
);
3157 FloatRegister tmp
= output
.asDouble();
3158 unboxDouble(value
, tmp
);
3159 if (outputType
== MIRType::Float32
) {
3160 convertDoubleToFloat32(tmp
, output
);
3167 void MacroAssembler::outOfLineTruncateSlow(FloatRegister src
, Register dest
,
3168 bool widenFloatToDouble
,
3170 wasm::BytecodeOffset callOffset
) {
3171 if (compilingWasm
) {
3174 int32_t framePushedAfterInstance
= framePushed();
3176 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
3177 defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
3178 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
3179 ScratchDoubleScope
fpscratch(*this);
3180 if (widenFloatToDouble
) {
3181 convertFloat32ToDouble(src
, fpscratch
);
3184 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
3185 FloatRegister srcSingle
;
3186 if (widenFloatToDouble
) {
3187 MOZ_ASSERT(src
.isSingle());
3189 src
= src
.asDouble();
3191 convertFloat32ToDouble(srcSingle
, src
);
3195 MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
3198 MOZ_ASSERT(src
.isDouble());
3200 if (compilingWasm
) {
3201 int32_t instanceOffset
= framePushed() - framePushedAfterInstance
;
3203 passABIArg(src
, MoveOp::DOUBLE
);
3204 callWithABI(callOffset
, wasm::SymbolicAddress::ToInt32
,
3205 mozilla::Some(instanceOffset
));
3207 using Fn
= int32_t (*)(double);
3208 setupUnalignedABICall(dest
);
3209 passABIArg(src
, MoveOp::DOUBLE
);
3210 callWithABI
<Fn
, JS::ToInt32
>(MoveOp::GENERAL
,
3211 CheckUnsafeCallWithABI::DontCheckOther
);
3213 storeCallInt32Result(dest
);
3215 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
3216 defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
3217 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
3219 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
3220 if (widenFloatToDouble
) {
3224 MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
3227 if (compilingWasm
) {
3232 void MacroAssembler::convertDoubleToInt(FloatRegister src
, Register output
,
3233 FloatRegister temp
, Label
* truncateFail
,
3235 IntConversionBehavior behavior
) {
3237 case IntConversionBehavior::Normal
:
3238 case IntConversionBehavior::NegativeZeroCheck
:
3239 convertDoubleToInt32(
3241 behavior
== IntConversionBehavior::NegativeZeroCheck
);
3243 case IntConversionBehavior::Truncate
:
3244 branchTruncateDoubleMaybeModUint32(src
, output
,
3245 truncateFail
? truncateFail
: fail
);
3247 case IntConversionBehavior::ClampToUint8
:
3248 // Clamping clobbers the input register, so use a temp.
3250 moveDouble(src
, temp
);
3252 clampDoubleToUint8(temp
, output
);
3257 void MacroAssembler::convertValueToInt(
3258 ValueOperand value
, Label
* handleStringEntry
, Label
* handleStringRejoin
,
3259 Label
* truncateDoubleSlow
, Register stringReg
, FloatRegister temp
,
3260 Register output
, Label
* fail
, IntConversionBehavior behavior
,
3261 IntConversionInputKind conversion
) {
3262 Label done
, isInt32
, isBool
, isDouble
, isNull
, isString
;
3264 bool handleStrings
= (behavior
== IntConversionBehavior::Truncate
||
3265 behavior
== IntConversionBehavior::ClampToUint8
) &&
3266 handleStringEntry
&& handleStringRejoin
;
3268 MOZ_ASSERT_IF(handleStrings
, conversion
== IntConversionInputKind::Any
);
3271 ScratchTagScope
tag(*this, value
);
3272 splitTagForTest(value
, tag
);
3274 branchTestInt32(Equal
, tag
, &isInt32
);
3275 if (conversion
== IntConversionInputKind::Any
||
3276 conversion
== IntConversionInputKind::NumbersOrBoolsOnly
) {
3277 branchTestBoolean(Equal
, tag
, &isBool
);
3279 branchTestDouble(Equal
, tag
, &isDouble
);
3281 if (conversion
== IntConversionInputKind::Any
) {
3282 // If we are not truncating, we fail for anything that's not
3283 // null. Otherwise we might be able to handle strings and undefined.
3285 case IntConversionBehavior::Normal
:
3286 case IntConversionBehavior::NegativeZeroCheck
:
3287 branchTestNull(Assembler::NotEqual
, tag
, fail
);
3290 case IntConversionBehavior::Truncate
:
3291 case IntConversionBehavior::ClampToUint8
:
3292 branchTestNull(Equal
, tag
, &isNull
);
3293 if (handleStrings
) {
3294 branchTestString(Equal
, tag
, &isString
);
3296 branchTestUndefined(Assembler::NotEqual
, tag
, fail
);
3304 // The value is null or undefined in truncation contexts - just emit 0.
3305 if (conversion
== IntConversionInputKind::Any
) {
3306 if (isNull
.used()) {
3309 mov(ImmWord(0), output
);
3313 // |output| needs to be different from |stringReg| to load string indices.
3314 bool handleStringIndices
= handleStrings
&& output
!= stringReg
;
3316 // First try loading a string index. If that fails, try converting a string
3317 // into a double, then jump to the double case.
3318 Label handleStringIndex
;
3319 if (handleStrings
) {
3321 unboxString(value
, stringReg
);
3322 if (handleStringIndices
) {
3323 loadStringIndexValue(stringReg
, output
, handleStringEntry
);
3324 jump(&handleStringIndex
);
3326 jump(handleStringEntry
);
3330 // Try converting double into integer.
3331 if (isDouble
.used() || handleStrings
) {
3332 if (isDouble
.used()) {
3334 unboxDouble(value
, temp
);
3337 if (handleStrings
) {
3338 bind(handleStringRejoin
);
3341 convertDoubleToInt(temp
, output
, temp
, truncateDoubleSlow
, fail
, behavior
);
3345 // Just unbox a bool, the result is 0 or 1.
3346 if (isBool
.used()) {
3348 unboxBoolean(value
, output
);
3352 // Integers can be unboxed.
3353 if (isInt32
.used() || handleStringIndices
) {
3354 if (isInt32
.used()) {
3356 unboxInt32(value
, output
);
3359 if (handleStringIndices
) {
3360 bind(&handleStringIndex
);
3363 if (behavior
== IntConversionBehavior::ClampToUint8
) {
3364 clampIntToUint8(output
);
3371 void MacroAssembler::finish() {
3372 if (failureLabel_
.used()) {
3373 bind(&failureLabel_
);
3377 MacroAssemblerSpecific::finish();
3380 size() <= MaxCodeBytesPerProcess
,
3381 "AssemblerBuffer should ensure we don't exceed MaxCodeBytesPerProcess");
3383 if (bytesNeeded() > MaxCodeBytesPerProcess
) {
3388 void MacroAssembler::link(JitCode
* code
) {
3390 linkProfilerCallSites(code
);
3393 MacroAssembler::AutoProfilerCallInstrumentation::
3394 AutoProfilerCallInstrumentation(MacroAssembler
& masm
) {
3395 if (!masm
.emitProfilingInstrumentation_
) {
3399 Register reg
= CallTempReg0
;
3400 Register reg2
= CallTempReg1
;
3404 CodeOffset label
= masm
.movWithPatch(ImmWord(uintptr_t(-1)), reg
);
3405 masm
.loadJSContext(reg2
);
3406 masm
.loadPtr(Address(reg2
, offsetof(JSContext
, profilingActivation_
)), reg2
);
3408 Address(reg2
, JitActivation::offsetOfLastProfilingCallSite()));
3410 masm
.appendProfilerCallSite(label
);
3416 void MacroAssembler::linkProfilerCallSites(JitCode
* code
) {
3417 for (size_t i
= 0; i
< profilerCallSites_
.length(); i
++) {
3418 CodeOffset offset
= profilerCallSites_
[i
];
3419 CodeLocationLabel
location(code
, offset
);
3420 PatchDataWithValueCheck(location
, ImmPtr(location
.raw()),
3425 void MacroAssembler::alignJitStackBasedOnNArgs(Register nargs
,
3426 bool countIncludesThis
) {
3427 // The stack should already be aligned to the size of a value.
3428 assertStackAlignment(sizeof(Value
), 0);
3430 static_assert(JitStackValueAlignment
== 1 || JitStackValueAlignment
== 2,
3431 "JitStackValueAlignment is either 1 or 2.");
3432 if (JitStackValueAlignment
== 1) {
3435 // A jit frame is composed of the following:
3437 // [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
3438 // \________JitFrameLayout_________/
3439 // (The stack grows this way --->)
3441 // We want to ensure that |raddr|, the return address, is 16-byte aligned.
3442 // (Note: if 8-byte alignment was sufficient, we would have already
3445 // JitFrameLayout does not affect the alignment, so we can ignore it.
3446 static_assert(sizeof(JitFrameLayout
) % JitStackAlignment
== 0,
3447 "JitFrameLayout doesn't affect stack alignment");
3449 // Therefore, we need to ensure that |this| is aligned.
3450 // This implies that |argN| must be aligned if N is even,
3451 // and offset by |sizeof(Value)| if N is odd.
3453 // Depending on the context of the caller, it may be easier to pass in a
3454 // register that has already been modified to include |this|. If that is the
3455 // case, we want to flip the direction of the test.
3456 Assembler::Condition condition
=
3457 countIncludesThis
? Assembler::NonZero
: Assembler::Zero
;
3459 Label alignmentIsOffset
, end
;
3460 branchTestPtr(condition
, nargs
, Imm32(1), &alignmentIsOffset
);
3462 // |argN| should be aligned to 16 bytes.
3463 andToStackPtr(Imm32(~(JitStackAlignment
- 1)));
3466 // |argN| should be offset by 8 bytes from 16-byte alignment.
3467 // We already know that it is 8-byte aligned, so the only possibilities are:
3468 // a) It is 16-byte aligned, and we must offset it by 8 bytes.
3469 // b) It is not 16-byte aligned, and therefore already has the right offset.
3470 // Therefore, we test to see if it is 16-byte aligned, and adjust it if it is.
3471 bind(&alignmentIsOffset
);
3472 branchTestStackPtr(Assembler::NonZero
, Imm32(JitStackAlignment
- 1), &end
);
3473 subFromStackPtr(Imm32(sizeof(Value
)));
3478 void MacroAssembler::alignJitStackBasedOnNArgs(uint32_t argc
,
3479 bool countIncludesThis
) {
3480 // The stack should already be aligned to the size of a value.
3481 assertStackAlignment(sizeof(Value
), 0);
3483 static_assert(JitStackValueAlignment
== 1 || JitStackValueAlignment
== 2,
3484 "JitStackValueAlignment is either 1 or 2.");
3485 if (JitStackValueAlignment
== 1) {
3489 // See above for full explanation.
3490 uint32_t nArgs
= argc
+ !countIncludesThis
;
3491 if (nArgs
% 2 == 0) {
3492 // |argN| should be 16-byte aligned
3493 andToStackPtr(Imm32(~(JitStackAlignment
- 1)));
3495 // |argN| must be 16-byte aligned if argc is even,
3496 // and offset by 8 if argc is odd.
3498 branchTestStackPtr(Assembler::NonZero
, Imm32(JitStackAlignment
- 1), &end
);
3499 subFromStackPtr(Imm32(sizeof(Value
)));
3501 assertStackAlignment(JitStackAlignment
, sizeof(Value
));
3505 // ===============================================================
3507 MacroAssembler::MacroAssembler(TempAllocator
& alloc
,
3508 CompileRuntime
* maybeRuntime
,
3509 CompileRealm
* maybeRealm
)
3510 : maybeRuntime_(maybeRuntime
),
3511 maybeRealm_(maybeRealm
),
3512 wasmMaxOffsetGuardLimit_(0),
3517 dynamicAlignment_(false),
3518 emitProfilingInstrumentation_(false) {
3519 moveResolver_
.setAllocator(alloc
);
3522 StackMacroAssembler::StackMacroAssembler(JSContext
* cx
, TempAllocator
& alloc
)
3523 : MacroAssembler(alloc
, CompileRuntime::get(cx
->runtime()),
3524 CompileRealm::get(cx
->realm())) {}
3526 IonHeapMacroAssembler::IonHeapMacroAssembler(TempAllocator
& alloc
,
3527 CompileRealm
* realm
)
3528 : MacroAssembler(alloc
, realm
->runtime(), realm
) {
3529 MOZ_ASSERT(CurrentThreadIsIonCompiling());
3532 WasmMacroAssembler::WasmMacroAssembler(TempAllocator
& alloc
, bool limitedSize
)
3533 : MacroAssembler(alloc
) {
3534 #if defined(JS_CODEGEN_ARM64)
3535 // Stubs + builtins + the baseline compiler all require the native SP,
3537 SetStackPointer64(sp
);
3540 setUnlimitedBuffer();
3544 WasmMacroAssembler::WasmMacroAssembler(TempAllocator
& alloc
,
3545 const wasm::ModuleEnvironment
& env
,
3547 : MacroAssembler(alloc
) {
3548 #if defined(JS_CODEGEN_ARM64)
3549 // Stubs + builtins + the baseline compiler all require the native SP,
3551 SetStackPointer64(sp
);
3553 setWasmMaxOffsetGuardLimit(
3554 wasm::GetMaxOffsetGuardLimit(env
.hugeMemoryEnabled()));
3556 setUnlimitedBuffer();
3560 bool MacroAssembler::icBuildOOLFakeExitFrame(void* fakeReturnAddr
,
3561 AutoSaveLiveRegisters
& save
) {
3562 return buildOOLFakeExitFrame(fakeReturnAddr
);
3565 #ifndef JS_CODEGEN_ARM64
3566 void MacroAssembler::subFromStackPtr(Register reg
) {
3567 subPtr(reg
, getStackPointer());
3569 #endif // JS_CODEGEN_ARM64
3571 //{{{ check_macroassembler_style
3572 // ===============================================================
3573 // Stack manipulation functions.
3575 void MacroAssembler::PushRegsInMask(LiveGeneralRegisterSet set
) {
3576 PushRegsInMask(LiveRegisterSet(set
.set(), FloatRegisterSet()));
3579 void MacroAssembler::PopRegsInMask(LiveRegisterSet set
) {
3580 PopRegsInMaskIgnore(set
, LiveRegisterSet());
3583 void MacroAssembler::PopRegsInMask(LiveGeneralRegisterSet set
) {
3584 PopRegsInMask(LiveRegisterSet(set
.set(), FloatRegisterSet()));
3587 void MacroAssembler::Push(PropertyKey key
, Register scratchReg
) {
3588 if (key
.isGCThing()) {
3589 // If we're pushing a gcthing, then we can't just push the tagged key
3590 // value since the GC won't have any idea that the push instruction
3591 // carries a reference to a gcthing. Need to unpack the pointer,
3592 // push it using ImmGCPtr, and then rematerialize the PropertyKey at
3595 if (key
.isString()) {
3596 JSString
* str
= key
.toString();
3597 MOZ_ASSERT((uintptr_t(str
) & PropertyKey::TypeMask
) == 0);
3598 static_assert(PropertyKey::StringTypeTag
== 0,
3599 "need to orPtr StringTypeTag if it's not 0");
3600 Push(ImmGCPtr(str
));
3602 MOZ_ASSERT(key
.isSymbol());
3603 movePropertyKey(key
, scratchReg
);
3607 MOZ_ASSERT(key
.isInt());
3608 Push(ImmWord(key
.asRawBits()));
3612 void MacroAssembler::movePropertyKey(PropertyKey key
, Register dest
) {
3613 if (key
.isGCThing()) {
3614 // See comment in |Push(PropertyKey, ...)| above for an explanation.
3615 if (key
.isString()) {
3616 JSString
* str
= key
.toString();
3617 MOZ_ASSERT((uintptr_t(str
) & PropertyKey::TypeMask
) == 0);
3618 static_assert(PropertyKey::StringTypeTag
== 0,
3619 "need to orPtr JSID_TYPE_STRING tag if it's not 0");
3620 movePtr(ImmGCPtr(str
), dest
);
3622 MOZ_ASSERT(key
.isSymbol());
3623 JS::Symbol
* sym
= key
.toSymbol();
3624 movePtr(ImmGCPtr(sym
), dest
);
3625 orPtr(Imm32(PropertyKey::SymbolTypeTag
), dest
);
3628 MOZ_ASSERT(key
.isInt());
3629 movePtr(ImmWord(key
.asRawBits()), dest
);
3633 void MacroAssembler::Push(TypedOrValueRegister v
) {
3636 } else if (IsFloatingPointType(v
.type())) {
3637 FloatRegister reg
= v
.typedReg().fpu();
3638 if (v
.type() == MIRType::Float32
) {
3639 ScratchDoubleScope
fpscratch(*this);
3640 convertFloat32ToDouble(reg
, fpscratch
);
3641 PushBoxed(fpscratch
);
3646 Push(ValueTypeFromMIRType(v
.type()), v
.typedReg().gpr());
3650 void MacroAssembler::Push(const ConstantOrRegister
& v
) {
3658 void MacroAssembler::Push(const Address
& addr
) {
3660 framePushed_
+= sizeof(uintptr_t);
3663 void MacroAssembler::Push(const ValueOperand
& val
) {
3665 framePushed_
+= sizeof(Value
);
3668 void MacroAssembler::Push(const Value
& val
) {
3670 framePushed_
+= sizeof(Value
);
3673 void MacroAssembler::Push(JSValueType type
, Register reg
) {
3674 pushValue(type
, reg
);
3675 framePushed_
+= sizeof(Value
);
3678 void MacroAssembler::Push(const Register64 reg
) {
3679 #if JS_BITS_PER_WORD == 64
3682 MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Big-endian not supported.");
3688 void MacroAssembler::PushEmptyRooted(VMFunctionData::RootType rootType
) {
3690 case VMFunctionData::RootNone
:
3691 MOZ_CRASH("Handle must have root type");
3692 case VMFunctionData::RootObject
:
3693 case VMFunctionData::RootString
:
3694 case VMFunctionData::RootCell
:
3695 case VMFunctionData::RootBigInt
:
3696 Push(ImmPtr(nullptr));
3698 case VMFunctionData::RootValue
:
3699 Push(UndefinedValue());
3701 case VMFunctionData::RootId
:
3702 Push(ImmWord(JS::PropertyKey::Void().asRawBits()));
3707 void MacroAssembler::popRooted(VMFunctionData::RootType rootType
,
3708 Register cellReg
, const ValueOperand
& valueReg
) {
3710 case VMFunctionData::RootNone
:
3711 MOZ_CRASH("Handle must have root type");
3712 case VMFunctionData::RootObject
:
3713 case VMFunctionData::RootString
:
3714 case VMFunctionData::RootCell
:
3715 case VMFunctionData::RootId
:
3716 case VMFunctionData::RootBigInt
:
3719 case VMFunctionData::RootValue
:
3725 void MacroAssembler::adjustStack(int amount
) {
3728 } else if (amount
< 0) {
3729 reserveStack(-amount
);
3733 void MacroAssembler::freeStack(uint32_t amount
) {
3734 MOZ_ASSERT(amount
<= framePushed_
);
3736 addToStackPtr(Imm32(amount
));
3738 framePushed_
-= amount
;
3741 void MacroAssembler::freeStack(Register amount
) { addToStackPtr(amount
); }
3743 // ===============================================================
3744 // ABI function calls.
3745 template <class ABIArgGeneratorT
>
3746 void MacroAssembler::setupABICallHelper() {
3748 MOZ_ASSERT(!inCall_
);
3756 // Reinitialize the ABIArg generator.
3757 abiArgs_
= ABIArgGeneratorT();
3759 #if defined(JS_CODEGEN_ARM)
3760 // On ARM, we need to know what ABI we are using, either in the
3761 // simulator, or based on the configure flags.
3762 # if defined(JS_SIMULATOR_ARM)
3763 abiArgs_
.setUseHardFp(UseHardFpABI());
3764 # elif defined(JS_CODEGEN_ARM_HARDFP)
3765 abiArgs_
.setUseHardFp(true);
3767 abiArgs_
.setUseHardFp(false);
3771 #if defined(JS_CODEGEN_MIPS32)
3772 // On MIPS, the system ABI use general registers pairs to encode double
3773 // arguments, after one or 2 integer-like arguments. Unfortunately, the
3774 // Lowering phase is not capable to express it at the moment. So we enforce
3775 // the system ABI here.
3776 abiArgs_
.enforceO32ABI();
3780 void MacroAssembler::setupNativeABICall() {
3781 setupABICallHelper
<ABIArgGenerator
>();
3784 void MacroAssembler::setupWasmABICall() {
3785 MOZ_ASSERT(IsCompilingWasm(), "non-wasm should use setupAlignedABICall");
3786 setupABICallHelper
<WasmABIArgGenerator
>();
3788 #if defined(JS_CODEGEN_ARM)
3789 // The builtin thunk does the FP -> GPR moving on soft-FP, so
3790 // use hard fp unconditionally.
3791 abiArgs_
.setUseHardFp(true);
3793 dynamicAlignment_
= false;
3796 void MacroAssembler::setupAlignedABICall() {
3797 MOZ_ASSERT(!IsCompilingWasm(), "wasm should use setupWasmABICall");
3798 setupNativeABICall();
3799 dynamicAlignment_
= false;
3802 void MacroAssembler::passABIArg(const MoveOperand
& from
, MoveOp::Type type
) {
3803 MOZ_ASSERT(inCall_
);
3804 appendSignatureType(type
);
3808 case MoveOp::FLOAT32
:
3809 arg
= abiArgs_
.next(MIRType::Float32
);
3811 case MoveOp::DOUBLE
:
3812 arg
= abiArgs_
.next(MIRType::Double
);
3814 case MoveOp::GENERAL
:
3815 arg
= abiArgs_
.next(MIRType::Pointer
);
3818 MOZ_CRASH("Unexpected argument type");
3821 MoveOperand
to(*this, arg
);
3829 propagateOOM(moveResolver_
.addMove(from
, to
, type
));
3832 void MacroAssembler::callWithABINoProfiler(void* fun
, MoveOp::Type result
,
3833 CheckUnsafeCallWithABI check
) {
3834 appendSignatureType(result
);
3836 fun
= Simulator::RedirectNativeFunction(fun
, signature());
3839 uint32_t stackAdjust
;
3840 callWithABIPre(&stackAdjust
);
3843 if (check
== CheckUnsafeCallWithABI::Check
) {
3845 loadJSContext(ReturnReg
);
3846 Address
flagAddr(ReturnReg
, JSContext::offsetOfInUnsafeCallWithABI());
3847 store32(Imm32(1), flagAddr
);
3849 // On arm64, SP may be < PSP now (that's OK).
3850 // eg testcase: tests/bug1375074.js
3856 callWithABIPost(stackAdjust
, result
);
3859 if (check
== CheckUnsafeCallWithABI::Check
) {
3862 loadJSContext(ReturnReg
);
3863 Address
flagAddr(ReturnReg
, JSContext::offsetOfInUnsafeCallWithABI());
3864 branch32(Assembler::Equal
, flagAddr
, Imm32(0), &ok
);
3865 assumeUnreachable("callWithABI: callee did not use AutoUnsafeCallWithABI");
3868 // On arm64, SP may be < PSP now (that's OK).
3869 // eg testcase: tests/bug1375074.js
3874 CodeOffset
MacroAssembler::callWithABI(wasm::BytecodeOffset bytecode
,
3875 wasm::SymbolicAddress imm
,
3876 mozilla::Maybe
<int32_t> instanceOffset
,
3877 MoveOp::Type result
) {
3878 MOZ_ASSERT(wasm::NeedsBuiltinThunk(imm
));
3880 uint32_t stackAdjust
;
3881 callWithABIPre(&stackAdjust
, /* callFromWasm = */ true);
3883 // The instance register is used in builtin thunks and must be set.
3884 if (instanceOffset
) {
3885 loadPtr(Address(getStackPointer(), *instanceOffset
+ stackAdjust
),
3888 MOZ_CRASH("instanceOffset is Nothing only for unsupported abi calls.");
3890 CodeOffset raOffset
= call(
3891 wasm::CallSiteDesc(bytecode
.offset(), wasm::CallSite::Symbolic
), imm
);
3893 callWithABIPost(stackAdjust
, result
, /* callFromWasm = */ true);
3898 void MacroAssembler::callDebugWithABI(wasm::SymbolicAddress imm
,
3899 MoveOp::Type result
) {
3900 MOZ_ASSERT(!wasm::NeedsBuiltinThunk(imm
));
3901 uint32_t stackAdjust
;
3902 callWithABIPre(&stackAdjust
, /* callFromWasm = */ false);
3904 callWithABIPost(stackAdjust
, result
, /* callFromWasm = */ false);
3907 // ===============================================================
3908 // Exit frame footer.
3910 void MacroAssembler::linkExitFrame(Register cxreg
, Register scratch
) {
3911 loadPtr(Address(cxreg
, JSContext::offsetOfActivation()), scratch
);
3912 storeStackPtr(Address(scratch
, JitActivation::offsetOfPackedExitFP()));
3915 // ===============================================================
3916 // Simple value-shuffling helpers, to hide MoveResolver verbosity
3919 void MacroAssembler::moveRegPair(Register src0
, Register src1
, Register dst0
,
3920 Register dst1
, MoveOp::Type type
) {
3921 MoveResolver
& moves
= moveResolver();
3923 propagateOOM(moves
.addMove(MoveOperand(src0
), MoveOperand(dst0
), type
));
3926 propagateOOM(moves
.addMove(MoveOperand(src1
), MoveOperand(dst1
), type
));
3928 propagateOOM(moves
.resolve());
3933 MoveEmitter
emitter(*this);
3934 emitter
.emit(moves
);
3938 // ===============================================================
3939 // Arithmetic functions
3941 void MacroAssembler::pow32(Register base
, Register power
, Register dest
,
3942 Register temp1
, Register temp2
, Label
* onOver
) {
3943 // Inline int32-specialized implementation of js::powi with overflow
3946 move32(Imm32(1), dest
); // result = 1
3948 // x^y where x == 1 returns 1 for any y.
3950 branch32(Assembler::Equal
, base
, Imm32(1), &done
);
3952 move32(base
, temp1
); // runningSquare = x
3953 move32(power
, temp2
); // n = y
3955 // x^y where y < 0 returns a non-int32 value for any x != 1. Except when y is
3956 // large enough so that the result is no longer representable as a double with
3957 // fractional parts. We can't easily determine when y is too large, so we bail
3959 // Note: it's important for this condition to match the code in CacheIR.cpp
3960 // (CanAttachInt32Pow) to prevent failure loops.
3962 branchTest32(Assembler::NotSigned
, power
, power
, &start
);
3968 // runningSquare *= runningSquare
3969 branchMul32(Assembler::Overflow
, temp1
, temp1
, onOver
);
3973 // if ((n & 1) != 0) result *= runningSquare
3975 branchTest32(Assembler::Zero
, temp2
, Imm32(1), &even
);
3976 branchMul32(Assembler::Overflow
, temp1
, dest
, onOver
);
3980 // if (n == 0) return result
3981 branchRshift32(Assembler::NonZero
, Imm32(1), temp2
, &loop
);
3986 void MacroAssembler::signInt32(Register input
, Register output
) {
3987 MOZ_ASSERT(input
!= output
);
3990 move32(input
, output
);
3991 rshift32Arithmetic(Imm32(31), output
);
3992 branch32(Assembler::LessThanOrEqual
, input
, Imm32(0), &done
);
3993 move32(Imm32(1), output
);
3997 void MacroAssembler::signDouble(FloatRegister input
, FloatRegister output
) {
3998 MOZ_ASSERT(input
!= output
);
4000 Label done
, zeroOrNaN
, negative
;
4001 loadConstantDouble(0.0, output
);
4002 branchDouble(Assembler::DoubleEqualOrUnordered
, input
, output
, &zeroOrNaN
);
4003 branchDouble(Assembler::DoubleLessThan
, input
, output
, &negative
);
4005 loadConstantDouble(1.0, output
);
4009 loadConstantDouble(-1.0, output
);
4013 moveDouble(input
, output
);
4018 void MacroAssembler::signDoubleToInt32(FloatRegister input
, Register output
,
4019 FloatRegister temp
, Label
* fail
) {
4020 MOZ_ASSERT(input
!= temp
);
4022 Label done
, zeroOrNaN
, negative
;
4023 loadConstantDouble(0.0, temp
);
4024 branchDouble(Assembler::DoubleEqualOrUnordered
, input
, temp
, &zeroOrNaN
);
4025 branchDouble(Assembler::DoubleLessThan
, input
, temp
, &negative
);
4027 move32(Imm32(1), output
);
4031 move32(Imm32(-1), output
);
4034 // Fail for NaN and negative zero.
4036 branchDouble(Assembler::DoubleUnordered
, input
, input
, fail
);
4038 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
4039 // is -Infinity instead of Infinity.
4040 loadConstantDouble(1.0, temp
);
4041 divDouble(input
, temp
);
4042 branchDouble(Assembler::DoubleLessThan
, temp
, input
, fail
);
4043 move32(Imm32(0), output
);
4048 void MacroAssembler::randomDouble(Register rng
, FloatRegister dest
,
4049 Register64 temp0
, Register64 temp1
) {
4050 using mozilla::non_crypto::XorShift128PlusRNG
;
4053 sizeof(XorShift128PlusRNG
) == 2 * sizeof(uint64_t),
4054 "Code below assumes XorShift128PlusRNG contains two uint64_t values");
4056 Address
state0Addr(rng
, XorShift128PlusRNG::offsetOfState0());
4057 Address
state1Addr(rng
, XorShift128PlusRNG::offsetOfState1());
4059 Register64 s0Reg
= temp0
;
4060 Register64 s1Reg
= temp1
;
4062 // uint64_t s1 = mState[0];
4063 load64(state0Addr
, s1Reg
);
4066 move64(s1Reg
, s0Reg
);
4067 lshift64(Imm32(23), s1Reg
);
4068 xor64(s0Reg
, s1Reg
);
4071 move64(s1Reg
, s0Reg
);
4072 rshift64(Imm32(17), s1Reg
);
4073 xor64(s0Reg
, s1Reg
);
4075 // const uint64_t s0 = mState[1];
4076 load64(state1Addr
, s0Reg
);
4079 store64(s0Reg
, state0Addr
);
4082 xor64(s0Reg
, s1Reg
);
4085 rshift64(Imm32(26), s0Reg
);
4086 xor64(s0Reg
, s1Reg
);
4089 store64(s1Reg
, state1Addr
);
4092 load64(state0Addr
, s0Reg
);
4093 add64(s0Reg
, s1Reg
);
4095 // See comment in XorShift128PlusRNG::nextDouble().
4096 static constexpr int MantissaBits
=
4097 mozilla::FloatingPoint
<double>::kExponentShift
+ 1;
4098 static constexpr double ScaleInv
= double(1) / (1ULL << MantissaBits
);
4100 and64(Imm64((1ULL << MantissaBits
) - 1), s1Reg
);
4102 // Note: we know s1Reg isn't signed after the and64 so we can use the faster
4103 // convertInt64ToDouble instead of convertUInt64ToDouble.
4104 convertInt64ToDouble(s1Reg
, dest
);
4107 mulDoublePtr(ImmPtr(&ScaleInv
), s0Reg
.scratchReg(), dest
);
4110 void MacroAssembler::sameValueDouble(FloatRegister left
, FloatRegister right
,
4111 FloatRegister temp
, Register dest
) {
4112 Label nonEqual
, isSameValue
, isNotSameValue
;
4113 branchDouble(Assembler::DoubleNotEqualOrUnordered
, left
, right
, &nonEqual
);
4115 // First, test for being equal to 0.0, which also includes -0.0.
4116 loadConstantDouble(0.0, temp
);
4117 branchDouble(Assembler::DoubleNotEqual
, left
, temp
, &isSameValue
);
4119 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
4120 // is -Infinity instead of Infinity.
4122 loadConstantDouble(1.0, temp
);
4123 divDouble(left
, temp
);
4124 branchDouble(Assembler::DoubleLessThan
, temp
, left
, &isNegInf
);
4126 loadConstantDouble(1.0, temp
);
4127 divDouble(right
, temp
);
4128 branchDouble(Assembler::DoubleGreaterThan
, temp
, right
, &isSameValue
);
4129 jump(&isNotSameValue
);
4133 loadConstantDouble(1.0, temp
);
4134 divDouble(right
, temp
);
4135 branchDouble(Assembler::DoubleLessThan
, temp
, right
, &isSameValue
);
4136 jump(&isNotSameValue
);
4141 // Test if both values are NaN.
4142 branchDouble(Assembler::DoubleOrdered
, left
, left
, &isNotSameValue
);
4143 branchDouble(Assembler::DoubleOrdered
, right
, right
, &isNotSameValue
);
4148 move32(Imm32(1), dest
);
4151 bind(&isNotSameValue
);
4152 move32(Imm32(0), dest
);
4157 void MacroAssembler::minMaxArrayInt32(Register array
, Register result
,
4158 Register temp1
, Register temp2
,
4159 Register temp3
, bool isMax
, Label
* fail
) {
4160 // array must be a packed array. Load its elements.
4161 Register elements
= temp1
;
4162 loadPtr(Address(array
, NativeObject::offsetOfElements()), elements
);
4164 // Load the length and guard that it is non-zero.
4165 Address
lengthAddr(elements
, ObjectElements::offsetOfInitializedLength());
4166 load32(lengthAddr
, temp3
);
4167 branchTest32(Assembler::Zero
, temp3
, temp3
, fail
);
4169 // Compute the address of the last element.
4170 Register elementsEnd
= temp2
;
4171 BaseObjectElementIndex
elementsEndAddr(elements
, temp3
,
4172 -int32_t(sizeof(Value
)));
4173 computeEffectiveAddress(elementsEndAddr
, elementsEnd
);
4175 // Load the first element into result.
4176 fallibleUnboxInt32(Address(elements
, 0), result
, fail
);
4181 // Check whether we're done.
4182 branchPtr(Assembler::Equal
, elements
, elementsEnd
, &done
);
4184 // If not, advance to the next element and load it.
4185 addPtr(Imm32(sizeof(Value
)), elements
);
4186 fallibleUnboxInt32(Address(elements
, 0), temp3
, fail
);
4188 // Update result if necessary.
4189 Assembler::Condition cond
=
4190 isMax
? Assembler::GreaterThan
: Assembler::LessThan
;
4191 cmp32Move32(cond
, temp3
, result
, temp3
, result
);
4197 void MacroAssembler::minMaxArrayNumber(Register array
, FloatRegister result
,
4198 FloatRegister floatTemp
, Register temp1
,
4199 Register temp2
, bool isMax
,
4201 // array must be a packed array. Load its elements.
4202 Register elements
= temp1
;
4203 loadPtr(Address(array
, NativeObject::offsetOfElements()), elements
);
4205 // Load the length and check if the array is empty.
4207 Address
lengthAddr(elements
, ObjectElements::offsetOfInitializedLength());
4208 load32(lengthAddr
, temp2
);
4209 branchTest32(Assembler::Zero
, temp2
, temp2
, &isEmpty
);
4211 // Compute the address of the last element.
4212 Register elementsEnd
= temp2
;
4213 BaseObjectElementIndex
elementsEndAddr(elements
, temp2
,
4214 -int32_t(sizeof(Value
)));
4215 computeEffectiveAddress(elementsEndAddr
, elementsEnd
);
4217 // Load the first element into result.
4218 ensureDouble(Address(elements
, 0), result
, fail
);
4223 // Check whether we're done.
4224 branchPtr(Assembler::Equal
, elements
, elementsEnd
, &done
);
4226 // If not, advance to the next element and load it into floatTemp.
4227 addPtr(Imm32(sizeof(Value
)), elements
);
4228 ensureDouble(Address(elements
, 0), floatTemp
, fail
);
4230 // Update result if necessary.
4232 maxDouble(floatTemp
, result
, /* handleNaN = */ true);
4234 minDouble(floatTemp
, result
, /* handleNaN = */ true);
4238 // With no arguments, min/max return +Infinity/-Infinity respectively.
4241 loadConstantDouble(mozilla::NegativeInfinity
<double>(), result
);
4243 loadConstantDouble(mozilla::PositiveInfinity
<double>(), result
);
4249 void MacroAssembler::branchIfNotRegExpPrototypeOptimizable(Register proto
,
4252 loadJSContext(temp
);
4253 loadPtr(Address(temp
, JSContext::offsetOfRealm()), temp
);
4254 size_t offset
= Realm::offsetOfRegExps() +
4255 RegExpRealm::offsetOfOptimizableRegExpPrototypeShape();
4256 loadPtr(Address(temp
, offset
), temp
);
4257 branchTestObjShapeUnsafe(Assembler::NotEqual
, proto
, temp
, fail
);
4260 void MacroAssembler::branchIfNotRegExpInstanceOptimizable(Register regexp
,
4263 loadJSContext(temp
);
4264 loadPtr(Address(temp
, JSContext::offsetOfRealm()), temp
);
4265 size_t offset
= Realm::offsetOfRegExps() +
4266 RegExpRealm::offsetOfOptimizableRegExpInstanceShape();
4267 loadPtr(Address(temp
, offset
), temp
);
4268 branchTestObjShapeUnsafe(Assembler::NotEqual
, regexp
, temp
, label
);
4271 void MacroAssembler::loadRegExpLastIndex(Register regexp
, Register string
,
4273 Label
* notFoundZeroLastIndex
) {
4274 Address
flagsSlot(regexp
, RegExpObject::offsetOfFlags());
4275 Address
lastIndexSlot(regexp
, RegExpObject::offsetOfLastIndex());
4276 Address
stringLength(string
, JSString::offsetOfLength());
4278 Label notGlobalOrSticky
, loadedLastIndex
;
4280 branchTest32(Assembler::Zero
, flagsSlot
,
4281 Imm32(JS::RegExpFlag::Global
| JS::RegExpFlag::Sticky
),
4282 ¬GlobalOrSticky
);
4284 // It's a global or sticky regular expression. Emit the following code:
4286 // lastIndex = regexp.lastIndex
4287 // if lastIndex > string.length:
4288 // jump to notFoundZeroLastIndex (skip the regexp match/test operation)
4290 // The `notFoundZeroLastIndex` code should set regexp.lastIndex to 0 and
4291 // treat this as a not-found result.
4293 // See steps 5-8 in js::RegExpBuiltinExec.
4295 // Earlier guards must have ensured regexp.lastIndex is a non-negative
4300 branchTestInt32(Assembler::Equal
, lastIndexSlot
, &ok
);
4301 assumeUnreachable("Expected int32 value for lastIndex");
4305 unboxInt32(lastIndexSlot
, lastIndex
);
4309 branchTest32(Assembler::NotSigned
, lastIndex
, lastIndex
, &ok
);
4310 assumeUnreachable("Expected non-negative lastIndex");
4314 branch32(Assembler::Below
, stringLength
, lastIndex
, notFoundZeroLastIndex
);
4315 jump(&loadedLastIndex
);
4318 bind(¬GlobalOrSticky
);
4319 move32(Imm32(0), lastIndex
);
4321 bind(&loadedLastIndex
);
4324 // ===============================================================
4327 void MacroAssembler::loadFunctionLength(Register func
,
4328 Register funFlagsAndArgCount
,
4329 Register output
, Label
* slowPath
) {
4332 // These flags should already have been checked by caller.
4334 uint32_t FlagsToCheck
=
4335 FunctionFlags::SELFHOSTLAZY
| FunctionFlags::RESOLVED_LENGTH
;
4336 branchTest32(Assembler::Zero
, funFlagsAndArgCount
, Imm32(FlagsToCheck
),
4338 assumeUnreachable("The function flags should already have been checked.");
4343 // NOTE: `funFlagsAndArgCount` and `output` must be allowed to alias.
4345 // Load the target function's length.
4346 Label isInterpreted
, lengthLoaded
;
4347 branchTest32(Assembler::NonZero
, funFlagsAndArgCount
,
4348 Imm32(FunctionFlags::BASESCRIPT
), &isInterpreted
);
4350 // The length property of a native function stored with the flags.
4351 move32(funFlagsAndArgCount
, output
);
4352 rshift32(Imm32(JSFunction::ArgCountShift
), output
);
4353 jump(&lengthLoaded
);
4355 bind(&isInterpreted
);
4357 // Load the length property of an interpreted function.
4358 loadPrivate(Address(func
, JSFunction::offsetOfJitInfoOrScript()), output
);
4359 loadPtr(Address(output
, JSScript::offsetOfSharedData()), output
);
4360 branchTestPtr(Assembler::Zero
, output
, output
, slowPath
);
4361 loadPtr(Address(output
, SharedImmutableScriptData::offsetOfISD()), output
);
4362 load16ZeroExtend(Address(output
, ImmutableScriptData::offsetOfFunLength()),
4365 bind(&lengthLoaded
);
4368 void MacroAssembler::loadFunctionName(Register func
, Register output
,
4369 ImmGCPtr emptyString
, Label
* slowPath
) {
4370 MOZ_ASSERT(func
!= output
);
4372 // Get the JSFunction flags.
4373 load32(Address(func
, JSFunction::offsetOfFlagsAndArgCount()), output
);
4375 // If the name was previously resolved, the name property may be shadowed.
4376 branchTest32(Assembler::NonZero
, output
, Imm32(FunctionFlags::RESOLVED_NAME
),
4380 branchTest32(Assembler::NonZero
, output
,
4381 Imm32(FunctionFlags::HAS_GUESSED_ATOM
), &noName
);
4383 Address
atomAddr(func
, JSFunction::offsetOfAtom());
4384 branchTestUndefined(Assembler::Equal
, atomAddr
, &noName
);
4385 unboxString(atomAddr
, output
);
4391 // An absent name property defaults to the empty string.
4392 movePtr(emptyString
, output
);
4398 void MacroAssembler::assertFunctionIsExtended(Register func
) {
4401 branchTestFunctionFlags(func
, FunctionFlags::EXTENDED
, Assembler::NonZero
,
4403 assumeUnreachable("Function is not extended");
4408 void MacroAssembler::branchTestType(Condition cond
, Register tag
,
4409 JSValueType type
, Label
* label
) {
4411 case JSVAL_TYPE_DOUBLE
:
4412 branchTestDouble(cond
, tag
, label
);
4414 case JSVAL_TYPE_INT32
:
4415 branchTestInt32(cond
, tag
, label
);
4417 case JSVAL_TYPE_BOOLEAN
:
4418 branchTestBoolean(cond
, tag
, label
);
4420 case JSVAL_TYPE_UNDEFINED
:
4421 branchTestUndefined(cond
, tag
, label
);
4423 case JSVAL_TYPE_NULL
:
4424 branchTestNull(cond
, tag
, label
);
4426 case JSVAL_TYPE_MAGIC
:
4427 branchTestMagic(cond
, tag
, label
);
4429 case JSVAL_TYPE_STRING
:
4430 branchTestString(cond
, tag
, label
);
4432 case JSVAL_TYPE_SYMBOL
:
4433 branchTestSymbol(cond
, tag
, label
);
4435 case JSVAL_TYPE_BIGINT
:
4436 branchTestBigInt(cond
, tag
, label
);
4438 case JSVAL_TYPE_OBJECT
:
4439 branchTestObject(cond
, tag
, label
);
4442 MOZ_CRASH("Unexpected value type");
4446 void MacroAssembler::branchTestObjShapeList(
4447 Condition cond
, Register obj
, Register shapeElements
, Register shapeScratch
,
4448 Register endScratch
, Register spectreScratch
, Label
* label
) {
4449 MOZ_ASSERT(cond
== Assembler::Equal
|| cond
== Assembler::NotEqual
);
4451 bool needSpectreMitigations
= spectreScratch
!= InvalidReg
;
4454 Label
* onMatch
= cond
== Assembler::Equal
? label
: &done
;
4456 // Load the object's shape pointer into shapeScratch, and prepare to compare
4457 // it with the shapes in the list. The shapes are stored as private values so
4458 // we can compare directly.
4459 loadPtr(Address(obj
, JSObject::offsetOfShape()), shapeScratch
);
4461 // Compute end pointer.
4462 Address
lengthAddr(shapeElements
,
4463 ObjectElements::offsetOfInitializedLength());
4464 load32(lengthAddr
, endScratch
);
4465 BaseObjectElementIndex
endPtrAddr(shapeElements
, endScratch
);
4466 computeEffectiveAddress(endPtrAddr
, endScratch
);
4471 // Compare the object's shape with a shape from the list. Note that on 64-bit
4472 // this includes the tag bits, but on 32-bit we only compare the low word of
4473 // the value. This is fine because the list of shapes is never exposed and the
4474 // tag is guaranteed to be PrivateGCThing.
4475 if (needSpectreMitigations
) {
4476 move32(Imm32(0), spectreScratch
);
4478 branchPtr(Assembler::Equal
, Address(shapeElements
, 0), shapeScratch
, onMatch
);
4479 if (needSpectreMitigations
) {
4480 spectreMovePtr(Assembler::Equal
, spectreScratch
, obj
);
4483 // Advance to next shape and loop if not finished.
4484 addPtr(Imm32(sizeof(Value
)), shapeElements
);
4485 branchPtr(Assembler::Below
, shapeElements
, endScratch
, &loop
);
4487 if (cond
== Assembler::NotEqual
) {
4493 void MacroAssembler::branchTestObjCompartment(Condition cond
, Register obj
,
4494 const Address
& compartment
,
4495 Register scratch
, Label
* label
) {
4496 MOZ_ASSERT(obj
!= scratch
);
4497 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
4498 loadPtr(Address(scratch
, Shape::offsetOfBaseShape()), scratch
);
4499 loadPtr(Address(scratch
, BaseShape::offsetOfRealm()), scratch
);
4500 loadPtr(Address(scratch
, Realm::offsetOfCompartment()), scratch
);
4501 branchPtr(cond
, compartment
, scratch
, label
);
4504 void MacroAssembler::branchTestObjCompartment(
4505 Condition cond
, Register obj
, const JS::Compartment
* compartment
,
4506 Register scratch
, Label
* label
) {
4507 MOZ_ASSERT(obj
!= scratch
);
4508 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
4509 loadPtr(Address(scratch
, Shape::offsetOfBaseShape()), scratch
);
4510 loadPtr(Address(scratch
, BaseShape::offsetOfRealm()), scratch
);
4511 loadPtr(Address(scratch
, Realm::offsetOfCompartment()), scratch
);
4512 branchPtr(cond
, scratch
, ImmPtr(compartment
), label
);
4515 void MacroAssembler::branchIfNonNativeObj(Register obj
, Register scratch
,
4517 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
4518 branchTest32(Assembler::Zero
,
4519 Address(scratch
, Shape::offsetOfImmutableFlags()),
4520 Imm32(Shape::isNativeBit()), label
);
4523 void MacroAssembler::branchIfObjectNotExtensible(Register obj
, Register scratch
,
4525 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
4527 // Spectre-style checks are not needed here because we do not interpret data
4528 // based on this check.
4529 static_assert(sizeof(ObjectFlags
) == sizeof(uint16_t));
4530 load16ZeroExtend(Address(scratch
, Shape::offsetOfObjectFlags()), scratch
);
4531 branchTest32(Assembler::NonZero
, scratch
,
4532 Imm32(uint32_t(ObjectFlag::NotExtensible
)), label
);
4535 void MacroAssembler::wasmTrap(wasm::Trap trap
,
4536 wasm::BytecodeOffset bytecodeOffset
) {
4537 uint32_t trapOffset
= wasmTrapInstruction().offset();
4538 MOZ_ASSERT_IF(!oom(),
4539 currentOffset() - trapOffset
== WasmTrapInstructionLength
);
4541 append(trap
, wasm::TrapSite(trapOffset
, bytecodeOffset
));
4544 std::pair
<CodeOffset
, uint32_t> MacroAssembler::wasmReserveStackChecked(
4545 uint32_t amount
, wasm::BytecodeOffset trapOffset
) {
4546 if (amount
> MAX_UNCHECKED_LEAF_FRAME_SIZE
) {
4547 // The frame is large. Don't bump sp until after the stack limit check so
4548 // that the trap handler isn't called with a wild sp.
4550 Register scratch
= ABINonArgReg0
;
4551 moveStackPtrTo(scratch
);
4554 branchPtr(Assembler::Below
, scratch
, Imm32(amount
), &trap
);
4555 subPtr(Imm32(amount
), scratch
);
4556 branchPtr(Assembler::Below
,
4557 Address(InstanceReg
, wasm::Instance::offsetOfStackLimit()),
4561 wasmTrap(wasm::Trap::StackOverflow
, trapOffset
);
4562 CodeOffset trapInsnOffset
= CodeOffset(currentOffset());
4565 reserveStack(amount
);
4566 return std::pair
<CodeOffset
, uint32_t>(trapInsnOffset
, 0);
4569 reserveStack(amount
);
4571 branchStackPtrRhs(Assembler::Below
,
4572 Address(InstanceReg
, wasm::Instance::offsetOfStackLimit()),
4574 wasmTrap(wasm::Trap::StackOverflow
, trapOffset
);
4575 CodeOffset trapInsnOffset
= CodeOffset(currentOffset());
4577 return std::pair
<CodeOffset
, uint32_t>(trapInsnOffset
, amount
);
4580 CodeOffset
MacroAssembler::wasmCallImport(const wasm::CallSiteDesc
& desc
,
4581 const wasm::CalleeDesc
& callee
) {
4582 storePtr(InstanceReg
,
4583 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
4585 // Load the callee, before the caller's registers are clobbered.
4586 uint32_t instanceDataOffset
= callee
.importInstanceDataOffset();
4588 Address(InstanceReg
, wasm::Instance::offsetInData(
4589 instanceDataOffset
+
4590 offsetof(wasm::FuncImportInstanceData
, code
))),
4593 #if !defined(JS_CODEGEN_NONE) && !defined(JS_CODEGEN_WASM32)
4594 static_assert(ABINonArgReg0
!= InstanceReg
, "by constraint");
4597 // Switch to the callee's realm.
4599 Address(InstanceReg
, wasm::Instance::offsetInData(
4600 instanceDataOffset
+
4601 offsetof(wasm::FuncImportInstanceData
, realm
))),
4603 loadPtr(Address(InstanceReg
, wasm::Instance::offsetOfCx()), ABINonArgReg2
);
4604 storePtr(ABINonArgReg1
, Address(ABINonArgReg2
, JSContext::offsetOfRealm()));
4606 // Switch to the callee's instance and pinned registers and make the call.
4607 loadPtr(Address(InstanceReg
,
4608 wasm::Instance::offsetInData(
4609 instanceDataOffset
+
4610 offsetof(wasm::FuncImportInstanceData
, instance
))),
4613 storePtr(InstanceReg
,
4614 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
4615 loadWasmPinnedRegsFromInstance();
4617 return call(desc
, ABINonArgReg0
);
4620 CodeOffset
MacroAssembler::wasmCallBuiltinInstanceMethod(
4621 const wasm::CallSiteDesc
& desc
, const ABIArg
& instanceArg
,
4622 wasm::SymbolicAddress builtin
, wasm::FailureMode failureMode
) {
4623 MOZ_ASSERT(instanceArg
!= ABIArg());
4625 storePtr(InstanceReg
,
4626 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
4627 storePtr(InstanceReg
,
4628 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
4630 if (instanceArg
.kind() == ABIArg::GPR
) {
4631 movePtr(InstanceReg
, instanceArg
.gpr());
4632 } else if (instanceArg
.kind() == ABIArg::Stack
) {
4633 storePtr(InstanceReg
,
4634 Address(getStackPointer(), instanceArg
.offsetFromArgBase()));
4636 MOZ_CRASH("Unknown abi passing style for pointer");
4639 CodeOffset ret
= call(desc
, builtin
);
4641 if (failureMode
!= wasm::FailureMode::Infallible
) {
4643 switch (failureMode
) {
4644 case wasm::FailureMode::Infallible
:
4646 case wasm::FailureMode::FailOnNegI32
:
4647 branchTest32(Assembler::NotSigned
, ReturnReg
, ReturnReg
, &noTrap
);
4649 case wasm::FailureMode::FailOnNullPtr
:
4650 branchTestPtr(Assembler::NonZero
, ReturnReg
, ReturnReg
, &noTrap
);
4652 case wasm::FailureMode::FailOnInvalidRef
:
4653 branchPtr(Assembler::NotEqual
, ReturnReg
,
4654 ImmWord(uintptr_t(wasm::AnyRef::invalid().forCompiledCode())),
4658 wasmTrap(wasm::Trap::ThrowReported
,
4659 wasm::BytecodeOffset(desc
.lineOrBytecode()));
4666 CodeOffset
MacroAssembler::asmCallIndirect(const wasm::CallSiteDesc
& desc
,
4667 const wasm::CalleeDesc
& callee
) {
4668 MOZ_ASSERT(callee
.which() == wasm::CalleeDesc::AsmJSTable
);
4670 const Register scratch
= WasmTableCallScratchReg0
;
4671 const Register index
= WasmTableCallIndexReg
;
4673 // Optimization opportunity: when offsetof(FunctionTableElem, code) == 0, as
4674 // it is at present, we can probably generate better code here by folding
4675 // the address computation into the load.
4677 static_assert(sizeof(wasm::FunctionTableElem
) == 8 ||
4678 sizeof(wasm::FunctionTableElem
) == 16,
4679 "elements of function tables are two words");
4681 // asm.js tables require no signature check, and have had their index
4682 // masked into range and thus need no bounds check.
4684 Address(InstanceReg
, wasm::Instance::offsetInData(
4685 callee
.tableFunctionBaseInstanceDataOffset())),
4687 if (sizeof(wasm::FunctionTableElem
) == 8) {
4688 computeEffectiveAddress(BaseIndex(scratch
, index
, TimesEight
), scratch
);
4690 lshift32(Imm32(4), index
);
4691 addPtr(index
, scratch
);
4693 loadPtr(Address(scratch
, offsetof(wasm::FunctionTableElem
, code
)), scratch
);
4694 storePtr(InstanceReg
,
4695 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
4696 storePtr(InstanceReg
,
4697 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
4698 return call(desc
, scratch
);
4701 // In principle, call_indirect requires an expensive context switch to the
4702 // callee's instance and realm before the call and an almost equally expensive
4703 // switch back to the caller's ditto after. However, if the caller's instance
4704 // is the same as the callee's instance then no context switch is required, and
4705 // it only takes a compare-and-branch at run-time to test this - all values are
4706 // in registers already. We therefore generate two call paths, one for the fast
4707 // call without the context switch (which additionally avoids a null check) and
4708 // one for the slow call with the context switch.
4710 void MacroAssembler::wasmCallIndirect(const wasm::CallSiteDesc
& desc
,
4711 const wasm::CalleeDesc
& callee
,
4712 Label
* boundsCheckFailedLabel
,
4713 Label
* nullCheckFailedLabel
,
4714 mozilla::Maybe
<uint32_t> tableSize
,
4715 CodeOffset
* fastCallOffset
,
4716 CodeOffset
* slowCallOffset
) {
4717 static_assert(sizeof(wasm::FunctionTableElem
) == 2 * sizeof(void*),
4718 "Exactly two pointers or index scaling won't work correctly");
4719 MOZ_ASSERT(callee
.which() == wasm::CalleeDesc::WasmTable
);
4721 const int shift
= sizeof(wasm::FunctionTableElem
) == 8 ? 3 : 4;
4722 wasm::BytecodeOffset
trapOffset(desc
.lineOrBytecode());
4723 const Register calleeScratch
= WasmTableCallScratchReg0
;
4724 const Register index
= WasmTableCallIndexReg
;
4726 // Check the table index and throw if out-of-bounds.
4728 // Frequently the table size is known, so optimize for that. Otherwise
4729 // compare with a memory operand when that's possible. (There's little sense
4730 // in hoisting the load of the bound into a register at a higher level and
4731 // reusing that register, because a hoisted value would either have to be
4732 // spilled and re-loaded before the next call_indirect, or would be abandoned
4733 // because we could not trust that a hoisted value would not have changed.)
4735 if (boundsCheckFailedLabel
) {
4736 if (tableSize
.isSome()) {
4737 branch32(Assembler::Condition::AboveOrEqual
, index
, Imm32(*tableSize
),
4738 boundsCheckFailedLabel
);
4741 Assembler::Condition::BelowOrEqual
,
4742 Address(InstanceReg
, wasm::Instance::offsetInData(
4743 callee
.tableLengthInstanceDataOffset())),
4744 index
, boundsCheckFailedLabel
);
4748 // Write the functype-id into the ABI functype-id register.
4750 const wasm::CallIndirectId callIndirectId
= callee
.wasmTableSigId();
4751 switch (callIndirectId
.kind()) {
4752 case wasm::CallIndirectIdKind::Global
:
4753 loadPtr(Address(InstanceReg
, wasm::Instance::offsetInData(
4754 callIndirectId
.instanceDataOffset())),
4755 WasmTableCallSigReg
);
4757 case wasm::CallIndirectIdKind::Immediate
:
4758 move32(Imm32(callIndirectId
.immediate()), WasmTableCallSigReg
);
4760 case wasm::CallIndirectIdKind::AsmJS
:
4761 case wasm::CallIndirectIdKind::None
:
4765 // Load the base pointer of the table and compute the address of the callee in
4769 Address(InstanceReg
, wasm::Instance::offsetInData(
4770 callee
.tableFunctionBaseInstanceDataOffset())),
4772 shiftIndex32AndAdd(index
, shift
, calleeScratch
);
4774 // Load the callee instance and decide whether to take the fast path or the
4779 const Register newInstanceTemp
= WasmTableCallScratchReg1
;
4780 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, instance
)),
4782 branchPtr(Assembler::Equal
, InstanceReg
, newInstanceTemp
, &fastCall
);
4784 // Slow path: Save context, check for null, setup new context, call, restore
4787 // TODO: The slow path could usefully be out-of-line and the test above would
4788 // just fall through to the fast path. This keeps the fast-path code dense,
4789 // and has correct static prediction for the branch (forward conditional
4790 // branches predicted not taken, normally).
4792 storePtr(InstanceReg
,
4793 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
4794 movePtr(newInstanceTemp
, InstanceReg
);
4795 storePtr(InstanceReg
,
4796 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
4798 #ifdef WASM_HAS_HEAPREG
4799 // Use the null pointer exception resulting from loading HeapReg from a null
4800 // instance to handle a call to a null slot.
4801 MOZ_ASSERT(nullCheckFailedLabel
== nullptr);
4802 loadWasmPinnedRegsFromInstance(mozilla::Some(trapOffset
));
4804 MOZ_ASSERT(nullCheckFailedLabel
!= nullptr);
4805 branchTestPtr(Assembler::Zero
, InstanceReg
, InstanceReg
,
4806 nullCheckFailedLabel
);
4808 loadWasmPinnedRegsFromInstance();
4810 switchToWasmInstanceRealm(index
, WasmTableCallScratchReg1
);
4812 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, code
)),
4815 *slowCallOffset
= call(desc
, calleeScratch
);
4817 // Restore registers and realm and join up with the fast path.
4819 loadPtr(Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
),
4821 loadWasmPinnedRegsFromInstance();
4822 switchToWasmInstanceRealm(ABINonArgReturnReg0
, ABINonArgReturnReg1
);
4825 // Fast path: just load the code pointer and go. The instance and heap
4826 // register are the same as in the caller, and nothing will be null.
4828 // (In particular, the code pointer will not be null: if it were, the instance
4829 // would have been null, and then it would not have been equivalent to our
4830 // current instance. So no null check is needed on the fast path.)
4834 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, code
)),
4837 // We use a different type of call site for the fast call since the instance
4838 // slots in the frame do not have valid values.
4840 wasm::CallSiteDesc
newDesc(desc
.lineOrBytecode(),
4841 wasm::CallSiteDesc::IndirectFast
);
4842 *fastCallOffset
= call(newDesc
, calleeScratch
);
4847 void MacroAssembler::wasmCallRef(const wasm::CallSiteDesc
& desc
,
4848 const wasm::CalleeDesc
& callee
,
4849 CodeOffset
* fastCallOffset
,
4850 CodeOffset
* slowCallOffset
) {
4851 MOZ_ASSERT(callee
.which() == wasm::CalleeDesc::FuncRef
);
4852 const Register calleeScratch
= WasmCallRefCallScratchReg0
;
4853 const Register calleeFnObj
= WasmCallRefReg
;
4855 // Load from the function's WASM_INSTANCE_SLOT extended slot, and decide
4856 // whether to take the fast path or the slow path. Register this load
4857 // instruction to be source of a trap -- null pointer check.
4861 const Register newInstanceTemp
= WasmCallRefCallScratchReg1
;
4862 size_t instanceSlotOffset
= FunctionExtended::offsetOfExtendedSlot(
4863 FunctionExtended::WASM_INSTANCE_SLOT
);
4864 static_assert(FunctionExtended::WASM_INSTANCE_SLOT
< wasm::NullPtrGuardSize
);
4865 wasm::BytecodeOffset
trapOffset(desc
.lineOrBytecode());
4866 append(wasm::Trap::NullPointerDereference
,
4867 wasm::TrapSite(currentOffset(), trapOffset
));
4868 loadPtr(Address(calleeFnObj
, instanceSlotOffset
), newInstanceTemp
);
4869 branchPtr(Assembler::Equal
, InstanceReg
, newInstanceTemp
, &fastCall
);
4871 storePtr(InstanceReg
,
4872 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
4873 movePtr(newInstanceTemp
, InstanceReg
);
4874 storePtr(InstanceReg
,
4875 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
4877 loadWasmPinnedRegsFromInstance();
4878 switchToWasmInstanceRealm(WasmCallRefCallScratchReg0
,
4879 WasmCallRefCallScratchReg1
);
4881 // Get funcUncheckedCallEntry() from the function's
4882 // WASM_FUNC_UNCHECKED_ENTRY_SLOT extended slot.
4883 size_t uncheckedEntrySlotOffset
= FunctionExtended::offsetOfExtendedSlot(
4884 FunctionExtended::WASM_FUNC_UNCHECKED_ENTRY_SLOT
);
4885 loadPtr(Address(calleeFnObj
, uncheckedEntrySlotOffset
), calleeScratch
);
4887 *slowCallOffset
= call(desc
, calleeScratch
);
4889 // Restore registers and realm and back to this caller's.
4890 loadPtr(Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
),
4892 loadWasmPinnedRegsFromInstance();
4893 switchToWasmInstanceRealm(ABINonArgReturnReg0
, ABINonArgReturnReg1
);
4896 // Fast path: just load WASM_FUNC_UNCHECKED_ENTRY_SLOT value and go.
4897 // The instance and pinned registers are the same as in the caller.
4901 loadPtr(Address(calleeFnObj
, uncheckedEntrySlotOffset
), calleeScratch
);
4903 // We use a different type of call site for the fast call since the instance
4904 // slots in the frame do not have valid values.
4906 wasm::CallSiteDesc
newDesc(desc
.lineOrBytecode(),
4907 wasm::CallSiteDesc::FuncRefFast
);
4908 *fastCallOffset
= call(newDesc
, calleeScratch
);
4913 bool MacroAssembler::needScratch1ForBranchWasmGcRefType(wasm::RefType type
) {
4914 MOZ_ASSERT(type
.isValid());
4915 MOZ_ASSERT(type
.isAnyHierarchy());
4916 return !type
.isNone() && !type
.isAny();
4919 bool MacroAssembler::needScratch2ForBranchWasmGcRefType(wasm::RefType type
) {
4920 MOZ_ASSERT(type
.isValid());
4921 MOZ_ASSERT(type
.isAnyHierarchy());
4922 return type
.isTypeRef() &&
4923 type
.typeDef()->subTypingDepth() >= wasm::MinSuperTypeVectorLength
;
4926 bool MacroAssembler::needSuperSuperTypeVectorForBranchWasmGcRefType(
4927 wasm::RefType type
) {
4928 return type
.isTypeRef();
4931 void MacroAssembler::branchWasmGcObjectIsRefType(
4932 Register object
, wasm::RefType sourceType
, wasm::RefType destType
,
4933 Label
* label
, bool onSuccess
, Register superSuperTypeVector
,
4934 Register scratch1
, Register scratch2
) {
4935 MOZ_ASSERT(sourceType
.isValid());
4936 MOZ_ASSERT(destType
.isValid());
4937 MOZ_ASSERT(sourceType
.isAnyHierarchy());
4938 MOZ_ASSERT(destType
.isAnyHierarchy());
4939 MOZ_ASSERT_IF(needScratch1ForBranchWasmGcRefType(destType
),
4940 scratch1
!= Register::Invalid());
4941 MOZ_ASSERT_IF(needScratch2ForBranchWasmGcRefType(destType
),
4942 scratch2
!= Register::Invalid());
4943 MOZ_ASSERT_IF(needSuperSuperTypeVectorForBranchWasmGcRefType(destType
),
4944 superSuperTypeVector
!= Register::Invalid());
4947 Label
* successLabel
= onSuccess
? label
: &fallthrough
;
4948 Label
* failLabel
= onSuccess
? &fallthrough
: label
;
4949 Label
* nullLabel
= destType
.isNullable() ? successLabel
: failLabel
;
4952 if (sourceType
.isNullable()) {
4953 branchTestPtr(Assembler::Zero
, object
, object
, nullLabel
);
4956 // The only value that can inhabit 'none' is null. So, early out if we got
4958 if (destType
.isNone()) {
4964 if (destType
.isAny()) {
4965 // No further checks for 'any'
4971 // 'type' is now 'eq' or lower, which currently will always be a gc object.
4972 // Test for non-gc objects.
4973 MOZ_ASSERT(scratch1
!= Register::Invalid());
4974 if (!wasm::RefType::isSubTypeOf(sourceType
, wasm::RefType::eq())) {
4975 branchTestObjectIsWasmGcObject(false, object
, scratch1
, failLabel
);
4978 if (destType
.isEq()) {
4979 // No further checks for 'eq'
4985 // 'type' is now 'struct', 'array', or a concrete type. (Bottom types were
4988 // Casting to a concrete type only requires a simple check on the
4989 // object's superTypeVector. Casting to an abstract type (struct, array)
4990 // requires loading the object's superTypeVector->typeDef->kind, and checking
4991 // that it is correct.
4993 loadPtr(Address(object
, int32_t(WasmGcObject::offsetOfSuperTypeVector())),
4995 if (destType
.isTypeRef()) {
4996 // concrete type, do superTypeVector check
4997 branchWasmSuperTypeVectorIsSubtype(scratch1
, superSuperTypeVector
, scratch2
,
4998 destType
.typeDef()->subTypingDepth(),
4999 successLabel
, true);
5001 // abstract type, do kind check
5002 loadPtr(Address(scratch1
,
5003 int32_t(wasm::SuperTypeVector::offsetOfSelfTypeDef())),
5005 load8ZeroExtend(Address(scratch1
, int32_t(wasm::TypeDef::offsetOfKind())),
5007 branch32(Assembler::Equal
, scratch1
, Imm32(int32_t(destType
.typeDefKind())),
5016 void MacroAssembler::branchWasmSuperTypeVectorIsSubtype(
5017 Register subSuperTypeVector
, Register superSuperTypeVector
,
5018 Register scratch
, uint32_t superTypeDepth
, Label
* label
, bool onSuccess
) {
5019 MOZ_ASSERT_IF(superTypeDepth
>= wasm::MinSuperTypeVectorLength
,
5020 scratch
!= Register::Invalid());
5022 // We generate just different enough code for 'is' subtype vs 'is not'
5023 // subtype that we handle them separately.
5027 // At this point, we could generate a fast success check which jumps to
5028 // `label` if `subSuperTypeVector == superSuperTypeVector`. However,
5029 // profiling of Barista-3 seems to show this is hardly worth anything,
5030 // whereas it is worth us generating smaller code and in particular one
5031 // fewer conditional branch. So it is omitted:
5033 // branchPtr(Assembler::Equal, subSuperTypeVector, superSuperTypeVector,
5036 // Emit a bounds check if the super type depth may be out-of-bounds.
5037 if (superTypeDepth
>= wasm::MinSuperTypeVectorLength
) {
5038 // Slowest path for having a bounds check of the super type vector
5040 Address(subSuperTypeVector
, wasm::SuperTypeVector::offsetOfLength()),
5042 branch32(Assembler::LessThanOrEqual
, scratch
, Imm32(superTypeDepth
),
5046 // Load the `superTypeDepth` entry from subSuperTypeVector. This
5047 // will be `superSuperTypeVector` if `subSuperTypeVector` is indeed a
5050 Address(subSuperTypeVector
,
5051 wasm::SuperTypeVector::offsetOfTypeDefInVector(superTypeDepth
)),
5052 subSuperTypeVector
);
5053 branchPtr(Assembler::Equal
, subSuperTypeVector
, superSuperTypeVector
,
5056 // Fallthrough to the failed case
5061 // Emit a bounds check if the super type depth may be out-of-bounds.
5062 if (superTypeDepth
>= wasm::MinSuperTypeVectorLength
) {
5063 load32(Address(subSuperTypeVector
, wasm::SuperTypeVector::offsetOfLength()),
5065 branch32(Assembler::LessThanOrEqual
, scratch
, Imm32(superTypeDepth
), label
);
5068 // Load the `superTypeDepth` entry from subSuperTypeVector. This will be
5069 // `superSuperTypeVector` if `subSuperTypeVector` is indeed a subtype.
5071 Address(subSuperTypeVector
,
5072 wasm::SuperTypeVector::offsetOfTypeDefInVector(superTypeDepth
)),
5073 subSuperTypeVector
);
5074 branchPtr(Assembler::NotEqual
, subSuperTypeVector
, superSuperTypeVector
,
5076 // Fallthrough to the success case
5079 void MacroAssembler::nopPatchableToCall(const wasm::CallSiteDesc
& desc
) {
5080 CodeOffset offset
= nopPatchableToCall();
5081 append(desc
, offset
);
5084 void MacroAssembler::emitPreBarrierFastPath(JSRuntime
* rt
, MIRType type
,
5085 Register temp1
, Register temp2
,
5086 Register temp3
, Label
* noBarrier
) {
5087 MOZ_ASSERT(temp1
!= PreBarrierReg
);
5088 MOZ_ASSERT(temp2
!= PreBarrierReg
);
5089 MOZ_ASSERT(temp3
!= PreBarrierReg
);
5091 // Load the GC thing in temp1.
5092 if (type
== MIRType::Value
) {
5093 unboxGCThingForGCBarrier(Address(PreBarrierReg
, 0), temp1
);
5095 MOZ_ASSERT(type
== MIRType::Object
|| type
== MIRType::String
||
5096 type
== MIRType::Shape
);
5097 loadPtr(Address(PreBarrierReg
, 0), temp1
);
5101 // The caller should have checked for null pointers.
5103 branchTestPtr(Assembler::NonZero
, temp1
, temp1
, &nonZero
);
5104 assumeUnreachable("JIT pre-barrier: unexpected nullptr");
5108 // Load the chunk address in temp2.
5109 movePtr(temp1
, temp2
);
5110 andPtr(Imm32(int32_t(~gc::ChunkMask
)), temp2
);
5112 // If the GC thing is in the nursery, we don't need to barrier it.
5113 if (type
== MIRType::Value
|| type
== MIRType::Object
||
5114 type
== MIRType::String
) {
5115 branchPtr(Assembler::NotEqual
, Address(temp2
, gc::ChunkStoreBufferOffset
),
5116 ImmWord(0), noBarrier
);
5120 branchPtr(Assembler::Equal
, Address(temp2
, gc::ChunkStoreBufferOffset
),
5121 ImmWord(0), &isTenured
);
5122 assumeUnreachable("JIT pre-barrier: unexpected nursery pointer");
5127 // Determine the bit index and store in temp1.
5129 // bit = (addr & js::gc::ChunkMask) / js::gc::CellBytesPerMarkBit +
5130 // static_cast<uint32_t>(colorBit);
5131 static_assert(gc::CellBytesPerMarkBit
== 8,
5132 "Calculation below relies on this");
5133 static_assert(size_t(gc::ColorBit::BlackBit
) == 0,
5134 "Calculation below relies on this");
5135 andPtr(Imm32(gc::ChunkMask
), temp1
);
5136 rshiftPtr(Imm32(3), temp1
);
5138 static_assert(gc::MarkBitmapWordBits
== JS_BITS_PER_WORD
,
5139 "Calculation below relies on this");
5141 // Load the bitmap word in temp2.
5143 // word = chunk.bitmap[bit / MarkBitmapWordBits];
5145 // Fold the adjustment for the fact that arenas don't start at the beginning
5146 // of the chunk into the offset to the chunk bitmap.
5147 const size_t firstArenaAdjustment
= gc::FirstArenaAdjustmentBits
/ CHAR_BIT
;
5148 const intptr_t offset
=
5149 intptr_t(gc::ChunkMarkBitmapOffset
) - intptr_t(firstArenaAdjustment
);
5151 movePtr(temp1
, temp3
);
5152 #if JS_BITS_PER_WORD == 64
5153 rshiftPtr(Imm32(6), temp1
);
5154 loadPtr(BaseIndex(temp2
, temp1
, TimesEight
, offset
), temp2
);
5156 rshiftPtr(Imm32(5), temp1
);
5157 loadPtr(BaseIndex(temp2
, temp1
, TimesFour
, offset
), temp2
);
5160 // Load the mask in temp1.
5162 // mask = uintptr_t(1) << (bit % MarkBitmapWordBits);
5163 andPtr(Imm32(gc::MarkBitmapWordBits
- 1), temp3
);
5164 move32(Imm32(1), temp1
);
5165 #ifdef JS_CODEGEN_X64
5166 MOZ_ASSERT(temp3
== rcx
);
5168 #elif JS_CODEGEN_X86
5169 MOZ_ASSERT(temp3
== ecx
);
5171 #elif JS_CODEGEN_ARM
5172 ma_lsl(temp3
, temp1
, temp1
);
5173 #elif JS_CODEGEN_ARM64
5174 Lsl(ARMRegister(temp1
, 64), ARMRegister(temp1
, 64), ARMRegister(temp3
, 64));
5175 #elif JS_CODEGEN_MIPS32
5176 ma_sll(temp1
, temp1
, temp3
);
5177 #elif JS_CODEGEN_MIPS64
5178 ma_dsll(temp1
, temp1
, temp3
);
5179 #elif JS_CODEGEN_LOONG64
5180 as_sll_d(temp1
, temp1
, temp3
);
5181 #elif JS_CODEGEN_RISCV64
5182 sll(temp1
, temp1
, temp3
);
5183 #elif JS_CODEGEN_WASM32
5185 #elif JS_CODEGEN_NONE
5188 # error "Unknown architecture"
5191 // No barrier is needed if the bit is set, |word & mask != 0|.
5192 branchTestPtr(Assembler::NonZero
, temp2
, temp1
, noBarrier
);
5195 // ========================================================================
5196 // JS atomic operations.
5198 void MacroAssembler::atomicIsLockFreeJS(Register value
, Register output
) {
5199 // Keep this in sync with isLockfreeJS() in jit/AtomicOperations.h.
5200 static_assert(AtomicOperations::isLockfreeJS(1)); // Implementation artifact
5201 static_assert(AtomicOperations::isLockfreeJS(2)); // Implementation artifact
5202 static_assert(AtomicOperations::isLockfreeJS(4)); // Spec requirement
5203 static_assert(AtomicOperations::isLockfreeJS(8)); // Implementation artifact
5206 move32(Imm32(1), output
);
5207 branch32(Assembler::Equal
, value
, Imm32(8), &done
);
5208 branch32(Assembler::Equal
, value
, Imm32(4), &done
);
5209 branch32(Assembler::Equal
, value
, Imm32(2), &done
);
5210 branch32(Assembler::Equal
, value
, Imm32(1), &done
);
5211 move32(Imm32(0), output
);
5215 // ========================================================================
5216 // Spectre Mitigations.
5218 void MacroAssembler::spectreMaskIndex32(Register index
, Register length
,
5220 MOZ_ASSERT(JitOptions
.spectreIndexMasking
);
5221 MOZ_ASSERT(length
!= output
);
5222 MOZ_ASSERT(index
!= output
);
5224 move32(Imm32(0), output
);
5225 cmp32Move32(Assembler::Below
, index
, length
, index
, output
);
5228 void MacroAssembler::spectreMaskIndex32(Register index
, const Address
& length
,
5230 MOZ_ASSERT(JitOptions
.spectreIndexMasking
);
5231 MOZ_ASSERT(index
!= length
.base
);
5232 MOZ_ASSERT(length
.base
!= output
);
5233 MOZ_ASSERT(index
!= output
);
5235 move32(Imm32(0), output
);
5236 cmp32Move32(Assembler::Below
, index
, length
, index
, output
);
5239 void MacroAssembler::spectreMaskIndexPtr(Register index
, Register length
,
5241 MOZ_ASSERT(JitOptions
.spectreIndexMasking
);
5242 MOZ_ASSERT(length
!= output
);
5243 MOZ_ASSERT(index
!= output
);
5245 movePtr(ImmWord(0), output
);
5246 cmpPtrMovePtr(Assembler::Below
, index
, length
, index
, output
);
5249 void MacroAssembler::spectreMaskIndexPtr(Register index
, const Address
& length
,
5251 MOZ_ASSERT(JitOptions
.spectreIndexMasking
);
5252 MOZ_ASSERT(index
!= length
.base
);
5253 MOZ_ASSERT(length
.base
!= output
);
5254 MOZ_ASSERT(index
!= output
);
5256 movePtr(ImmWord(0), output
);
5257 cmpPtrMovePtr(Assembler::Below
, index
, length
, index
, output
);
5260 void MacroAssembler::boundsCheck32PowerOfTwo(Register index
, uint32_t length
,
5262 MOZ_ASSERT(mozilla::IsPowerOfTwo(length
));
5263 branch32(Assembler::AboveOrEqual
, index
, Imm32(length
), failure
);
5265 // Note: it's fine to clobber the input register, as this is a no-op: it
5266 // only affects speculative execution.
5267 if (JitOptions
.spectreIndexMasking
) {
5268 and32(Imm32(length
- 1), index
);
5272 void MacroAssembler::loadWasmPinnedRegsFromInstance(
5273 mozilla::Maybe
<wasm::BytecodeOffset
> trapOffset
) {
5274 #ifdef WASM_HAS_HEAPREG
5275 static_assert(wasm::Instance::offsetOfMemoryBase() < 4096,
5276 "We count only on the low page being inaccessible");
5278 append(wasm::Trap::IndirectCallToNull
,
5279 wasm::TrapSite(currentOffset(), *trapOffset
));
5281 loadPtr(Address(InstanceReg
, wasm::Instance::offsetOfMemoryBase()), HeapReg
);
5283 MOZ_ASSERT(!trapOffset
);
5287 //}}} check_macroassembler_style
5290 void MacroAssembler::debugAssertCanonicalInt32(Register r
) {
5292 if (!js::jit::JitOptions
.lessDebugCode
) {
5293 # if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64)
5295 branchPtr(Assembler::BelowOrEqual
, r
, ImmWord(UINT32_MAX
), &ok
);
5298 # elif defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64)
5300 ScratchRegisterScope
scratch(asMasm());
5301 move32SignExtendToPtr(r
, scratch
);
5302 branchPtr(Assembler::Equal
, r
, scratch
, &ok
);
5306 MOZ_CRASH("IMPLEMENT ME");
5313 void MacroAssembler::memoryBarrierBefore(const Synchronization
& sync
) {
5314 memoryBarrier(sync
.barrierBefore
);
5317 void MacroAssembler::memoryBarrierAfter(const Synchronization
& sync
) {
5318 memoryBarrier(sync
.barrierAfter
);
5321 void MacroAssembler::debugAssertIsObject(const ValueOperand
& val
) {
5324 branchTestObject(Assembler::Equal
, val
, &ok
);
5325 assumeUnreachable("Expected an object!");
5330 void MacroAssembler::debugAssertObjHasFixedSlots(Register obj
,
5333 Label hasFixedSlots
;
5334 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
5335 branchTest32(Assembler::NonZero
,
5336 Address(scratch
, Shape::offsetOfImmutableFlags()),
5337 Imm32(NativeShape::fixedSlotsMask()), &hasFixedSlots
);
5338 assumeUnreachable("Expected a fixed slot");
5339 bind(&hasFixedSlots
);
5343 void MacroAssembler::debugAssertObjectHasClass(Register obj
, Register scratch
,
5344 const JSClass
* clasp
) {
5347 branchTestObjClassNoSpectreMitigations(Assembler::Equal
, obj
, clasp
, scratch
,
5349 assumeUnreachable("Class check failed");
5354 void MacroAssembler::branchArrayIsNotPacked(Register array
, Register temp1
,
5355 Register temp2
, Label
* label
) {
5356 loadPtr(Address(array
, NativeObject::offsetOfElements()), temp1
);
5358 // Test length == initializedLength.
5359 Address
initLength(temp1
, ObjectElements::offsetOfInitializedLength());
5360 load32(Address(temp1
, ObjectElements::offsetOfLength()), temp2
);
5361 branch32(Assembler::NotEqual
, initLength
, temp2
, label
);
5363 // Test the NON_PACKED flag.
5364 Address
flags(temp1
, ObjectElements::offsetOfFlags());
5365 branchTest32(Assembler::NonZero
, flags
, Imm32(ObjectElements::NON_PACKED
),
5369 void MacroAssembler::setIsPackedArray(Register obj
, Register output
,
5371 // Ensure it's an ArrayObject.
5372 Label notPackedArray
;
5373 branchTestObjClass(Assembler::NotEqual
, obj
, &ArrayObject::class_
, temp
, obj
,
5376 branchArrayIsNotPacked(obj
, temp
, output
, ¬PackedArray
);
5379 move32(Imm32(1), output
);
5382 bind(¬PackedArray
);
5383 move32(Imm32(0), output
);
5388 void MacroAssembler::packedArrayPop(Register array
, ValueOperand output
,
5389 Register temp1
, Register temp2
,
5391 // Load obj->elements in temp1.
5392 loadPtr(Address(array
, NativeObject::offsetOfElements()), temp1
);
5395 static constexpr uint32_t UnhandledFlags
=
5396 ObjectElements::Flags::NON_PACKED
|
5397 ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH
|
5398 ObjectElements::Flags::NOT_EXTENSIBLE
|
5399 ObjectElements::Flags::MAYBE_IN_ITERATION
;
5400 Address
flags(temp1
, ObjectElements::offsetOfFlags());
5401 branchTest32(Assembler::NonZero
, flags
, Imm32(UnhandledFlags
), fail
);
5403 // Load length in temp2. Ensure length == initializedLength.
5404 Address
lengthAddr(temp1
, ObjectElements::offsetOfLength());
5405 Address
initLengthAddr(temp1
, ObjectElements::offsetOfInitializedLength());
5406 load32(lengthAddr
, temp2
);
5407 branch32(Assembler::NotEqual
, initLengthAddr
, temp2
, fail
);
5409 // Result is |undefined| if length == 0.
5410 Label notEmpty
, done
;
5411 branchTest32(Assembler::NonZero
, temp2
, temp2
, ¬Empty
);
5413 moveValue(UndefinedValue(), output
);
5419 // Load the last element.
5420 sub32(Imm32(1), temp2
);
5421 BaseObjectElementIndex
elementAddr(temp1
, temp2
);
5422 loadValue(elementAddr
, output
);
5424 // Pre-barrier the element because we're removing it from the array.
5425 EmitPreBarrier(*this, elementAddr
, MIRType::Value
);
5427 // Update length and initializedLength.
5428 store32(temp2
, lengthAddr
);
5429 store32(temp2
, initLengthAddr
);
5434 void MacroAssembler::packedArrayShift(Register array
, ValueOperand output
,
5435 Register temp1
, Register temp2
,
5436 LiveRegisterSet volatileRegs
,
5438 // Load obj->elements in temp1.
5439 loadPtr(Address(array
, NativeObject::offsetOfElements()), temp1
);
5442 static constexpr uint32_t UnhandledFlags
=
5443 ObjectElements::Flags::NON_PACKED
|
5444 ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH
|
5445 ObjectElements::Flags::NOT_EXTENSIBLE
|
5446 ObjectElements::Flags::MAYBE_IN_ITERATION
;
5447 Address
flags(temp1
, ObjectElements::offsetOfFlags());
5448 branchTest32(Assembler::NonZero
, flags
, Imm32(UnhandledFlags
), fail
);
5450 // Load length in temp2. Ensure length == initializedLength.
5451 Address
lengthAddr(temp1
, ObjectElements::offsetOfLength());
5452 Address
initLengthAddr(temp1
, ObjectElements::offsetOfInitializedLength());
5453 load32(lengthAddr
, temp2
);
5454 branch32(Assembler::NotEqual
, initLengthAddr
, temp2
, fail
);
5456 // Result is |undefined| if length == 0.
5457 Label notEmpty
, done
;
5458 branchTest32(Assembler::NonZero
, temp2
, temp2
, ¬Empty
);
5460 moveValue(UndefinedValue(), output
);
5466 // Load the first element.
5467 Address
elementAddr(temp1
, 0);
5468 loadValue(elementAddr
, output
);
5470 // Move the other elements and update the initializedLength/length. This will
5471 // also trigger pre-barriers.
5473 // Ensure output is in volatileRegs. Don't preserve temp1 and temp2.
5474 volatileRegs
.takeUnchecked(temp1
);
5475 volatileRegs
.takeUnchecked(temp2
);
5476 if (output
.hasVolatileReg()) {
5477 volatileRegs
.addUnchecked(output
);
5480 PushRegsInMask(volatileRegs
);
5482 using Fn
= void (*)(ArrayObject
* arr
);
5483 setupUnalignedABICall(temp1
);
5485 callWithABI
<Fn
, ArrayShiftMoveElements
>();
5487 PopRegsInMask(volatileRegs
);
5493 void MacroAssembler::loadArgumentsObjectElement(Register obj
, Register index
,
5494 ValueOperand output
,
5495 Register temp
, Label
* fail
) {
5496 Register temp2
= output
.scratchReg();
5498 // Get initial length value.
5499 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()), temp
);
5501 // Ensure no overridden elements.
5502 branchTest32(Assembler::NonZero
, temp
,
5503 Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT
), fail
);
5506 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT
), temp
);
5507 spectreBoundsCheck32(index
, temp
, temp2
, fail
);
5509 // Load ArgumentsData.
5510 loadPrivate(Address(obj
, ArgumentsObject::getDataSlotOffset()), temp
);
5512 // Guard the argument is not a FORWARD_TO_CALL_SLOT MagicValue.
5513 BaseValueIndex
argValue(temp
, index
, ArgumentsData::offsetOfArgs());
5514 branchTestMagic(Assembler::Equal
, argValue
, fail
);
5515 loadValue(argValue
, output
);
5518 void MacroAssembler::loadArgumentsObjectElementHole(Register obj
,
5520 ValueOperand output
,
5523 Register temp2
= output
.scratchReg();
5525 // Get initial length value.
5526 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()), temp
);
5528 // Ensure no overridden elements.
5529 branchTest32(Assembler::NonZero
, temp
,
5530 Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT
), fail
);
5533 Label outOfBounds
, done
;
5534 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT
), temp
);
5535 spectreBoundsCheck32(index
, temp
, temp2
, &outOfBounds
);
5537 // Load ArgumentsData.
5538 loadPrivate(Address(obj
, ArgumentsObject::getDataSlotOffset()), temp
);
5540 // Guard the argument is not a FORWARD_TO_CALL_SLOT MagicValue.
5541 BaseValueIndex
argValue(temp
, index
, ArgumentsData::offsetOfArgs());
5542 branchTestMagic(Assembler::Equal
, argValue
, fail
);
5543 loadValue(argValue
, output
);
5547 branch32(Assembler::LessThan
, index
, Imm32(0), fail
);
5548 moveValue(UndefinedValue(), output
);
5553 void MacroAssembler::loadArgumentsObjectElementExists(
5554 Register obj
, Register index
, Register output
, Register temp
, Label
* fail
) {
5555 // Ensure the index is non-negative.
5556 branch32(Assembler::LessThan
, index
, Imm32(0), fail
);
5558 // Get initial length value.
5559 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()), temp
);
5561 // Ensure no overridden or deleted elements.
5562 branchTest32(Assembler::NonZero
, temp
,
5563 Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT
), fail
);
5565 // Compare index against the length.
5566 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT
), temp
);
5567 cmp32Set(Assembler::LessThan
, index
, temp
, output
);
5570 void MacroAssembler::loadArgumentsObjectLength(Register obj
, Register output
,
5572 // Get initial length value.
5573 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()),
5576 // Test if length has been overridden.
5577 branchTest32(Assembler::NonZero
, output
,
5578 Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT
), fail
);
5580 // Shift out arguments length and return it.
5581 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT
), output
);
5584 void MacroAssembler::branchTestArgumentsObjectFlags(Register obj
, Register temp
,
5588 MOZ_ASSERT((flags
& ~ArgumentsObject::PACKED_BITS_MASK
) == 0);
5590 // Get initial length value.
5591 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()), temp
);
5594 branchTest32(cond
, temp
, Imm32(flags
), label
);
5597 static constexpr bool ValidateSizeRange(Scalar::Type from
, Scalar::Type to
) {
5598 for (Scalar::Type type
= from
; type
< to
; type
= Scalar::Type(type
+ 1)) {
5599 if (TypedArrayElemSize(type
) != TypedArrayElemSize(from
)) {
5606 void MacroAssembler::typedArrayElementSize(Register obj
, Register output
) {
5607 static_assert(Scalar::Int8
== 0, "Int8 is the first typed array class");
5609 (Scalar::BigUint64
- Scalar::Int8
) == Scalar::MaxTypedArrayViewType
- 1,
5610 "BigUint64 is the last typed array class");
5612 Label one
, two
, four
, eight
, done
;
5614 loadObjClassUnsafe(obj
, output
);
5616 static_assert(ValidateSizeRange(Scalar::Int8
, Scalar::Int16
),
5617 "element size is one in [Int8, Int16)");
5618 branchPtr(Assembler::Below
, output
,
5619 ImmPtr(TypedArrayObject::classForType(Scalar::Int16
)), &one
);
5621 static_assert(ValidateSizeRange(Scalar::Int16
, Scalar::Int32
),
5622 "element size is two in [Int16, Int32)");
5623 branchPtr(Assembler::Below
, output
,
5624 ImmPtr(TypedArrayObject::classForType(Scalar::Int32
)), &two
);
5626 static_assert(ValidateSizeRange(Scalar::Int32
, Scalar::Float64
),
5627 "element size is four in [Int32, Float64)");
5628 branchPtr(Assembler::Below
, output
,
5629 ImmPtr(TypedArrayObject::classForType(Scalar::Float64
)), &four
);
5631 static_assert(ValidateSizeRange(Scalar::Float64
, Scalar::Uint8Clamped
),
5632 "element size is eight in [Float64, Uint8Clamped)");
5633 branchPtr(Assembler::Below
, output
,
5634 ImmPtr(TypedArrayObject::classForType(Scalar::Uint8Clamped
)),
5637 static_assert(ValidateSizeRange(Scalar::Uint8Clamped
, Scalar::BigInt64
),
5638 "element size is one in [Uint8Clamped, BigInt64)");
5639 branchPtr(Assembler::Below
, output
,
5640 ImmPtr(TypedArrayObject::classForType(Scalar::BigInt64
)), &one
);
5643 ValidateSizeRange(Scalar::BigInt64
, Scalar::MaxTypedArrayViewType
),
5644 "element size is eight in [BigInt64, MaxTypedArrayViewType)");
5645 // Fall through for BigInt64 and BigUint64
5648 move32(Imm32(8), output
);
5652 move32(Imm32(4), output
);
5656 move32(Imm32(2), output
);
5660 move32(Imm32(1), output
);
5665 void MacroAssembler::branchIfClassIsNotTypedArray(Register clasp
,
5666 Label
* notTypedArray
) {
5667 static_assert(Scalar::Int8
== 0, "Int8 is the first typed array class");
5668 const JSClass
* firstTypedArrayClass
=
5669 TypedArrayObject::classForType(Scalar::Int8
);
5672 (Scalar::BigUint64
- Scalar::Int8
) == Scalar::MaxTypedArrayViewType
- 1,
5673 "BigUint64 is the last typed array class");
5674 const JSClass
* lastTypedArrayClass
=
5675 TypedArrayObject::classForType(Scalar::BigUint64
);
5677 branchPtr(Assembler::Below
, clasp
, ImmPtr(firstTypedArrayClass
),
5679 branchPtr(Assembler::Above
, clasp
, ImmPtr(lastTypedArrayClass
),
5683 void MacroAssembler::branchIfHasDetachedArrayBuffer(Register obj
, Register temp
,
5685 // Inline implementation of ArrayBufferViewObject::hasDetachedBuffer().
5687 // Load obj->elements in temp.
5688 loadPtr(Address(obj
, NativeObject::offsetOfElements()), temp
);
5690 // Shared buffers can't be detached.
5692 branchTest32(Assembler::NonZero
,
5693 Address(temp
, ObjectElements::offsetOfFlags()),
5694 Imm32(ObjectElements::SHARED_MEMORY
), &done
);
5696 // An ArrayBufferView with a null buffer has never had its buffer exposed to
5698 fallibleUnboxObject(Address(obj
, ArrayBufferViewObject::bufferOffset()), temp
,
5701 // Load the ArrayBuffer flags and branch if the detached flag is set.
5702 unboxInt32(Address(temp
, ArrayBufferObject::offsetOfFlagsSlot()), temp
);
5703 branchTest32(Assembler::NonZero
, temp
, Imm32(ArrayBufferObject::DETACHED
),
5709 void MacroAssembler::branchIfNativeIteratorNotReusable(Register ni
,
5710 Label
* notReusable
) {
5711 // See NativeIterator::isReusable.
5712 Address
flagsAddr(ni
, NativeIterator::offsetOfFlagsAndCount());
5715 Label niIsInitialized
;
5716 branchTest32(Assembler::NonZero
, flagsAddr
,
5717 Imm32(NativeIterator::Flags::Initialized
), &niIsInitialized
);
5719 "Expected a NativeIterator that's been completely "
5721 bind(&niIsInitialized
);
5724 branchTest32(Assembler::NonZero
, flagsAddr
,
5725 Imm32(NativeIterator::Flags::NotReusable
), notReusable
);
5728 void MacroAssembler::branchNativeIteratorIndices(Condition cond
, Register ni
,
5730 NativeIteratorIndices kind
,
5732 Address
iterFlagsAddr(ni
, NativeIterator::offsetOfFlagsAndCount());
5733 load32(iterFlagsAddr
, temp
);
5734 and32(Imm32(NativeIterator::IndicesMask
), temp
);
5735 uint32_t shiftedKind
= uint32_t(kind
) << NativeIterator::IndicesShift
;
5736 branch32(cond
, temp
, Imm32(shiftedKind
), label
);
5739 static void LoadNativeIterator(MacroAssembler
& masm
, Register obj
,
5741 MOZ_ASSERT(obj
!= dest
);
5744 // Assert we have a PropertyIteratorObject.
5746 masm
.branchTestObjClass(Assembler::Equal
, obj
,
5747 &PropertyIteratorObject::class_
, dest
, obj
, &ok
);
5748 masm
.assumeUnreachable("Expected PropertyIteratorObject!");
5752 // Load NativeIterator object.
5753 Address
slotAddr(obj
, PropertyIteratorObject::offsetOfIteratorSlot());
5754 masm
.loadPrivate(slotAddr
, dest
);
5757 // The ShapeCachePtr may be used to cache an iterator for for-in. Return that
5758 // iterator in |dest| if:
5759 // - the shape cache pointer exists and stores a native iterator
5760 // - the iterator is reusable
5761 // - the iterated object has no dense elements
5762 // - the shapes of each object on the proto chain of |obj| match the cached
5764 // - the proto chain has no dense elements
5765 // Otherwise, jump to |failure|.
5766 void MacroAssembler::maybeLoadIteratorFromShape(Register obj
, Register dest
,
5767 Register temp
, Register temp2
,
5771 // obj: always contains the input object
5772 // temp: walks the obj->shape->baseshape->proto->shape->... chain
5773 // temp2: points to the native iterator. Incremented to walk the shapes array.
5774 // temp3: scratch space
5775 // dest: stores the resulting PropertyIteratorObject on success
5778 Register shapeAndProto
= temp
;
5779 Register nativeIterator
= temp2
;
5781 // Load ShapeCache from shape.
5782 loadPtr(Address(obj
, JSObject::offsetOfShape()), shapeAndProto
);
5783 loadPtr(Address(shapeAndProto
, Shape::offsetOfCachePtr()), dest
);
5785 // Check if it's an iterator.
5786 movePtr(dest
, temp3
);
5787 andPtr(Imm32(ShapeCachePtr::MASK
), temp3
);
5788 branch32(Assembler::NotEqual
, temp3
, Imm32(ShapeCachePtr::ITERATOR
), failure
);
5790 // If we've cached an iterator, |obj| must be a native object.
5793 branchIfNonNativeObj(obj
, temp3
, &nonNative
);
5796 // Verify that |obj| has no dense elements.
5797 loadPtr(Address(obj
, NativeObject::offsetOfElements()), temp3
);
5798 branch32(Assembler::NotEqual
,
5799 Address(temp3
, ObjectElements::offsetOfInitializedLength()),
5802 // Clear tag bits from iterator object. |dest| is now valid.
5803 // Load the native iterator and verify that it's reusable.
5804 andPtr(Imm32(~ShapeCachePtr::MASK
), dest
);
5805 LoadNativeIterator(*this, dest
, nativeIterator
);
5806 branchIfNativeIteratorNotReusable(nativeIterator
, failure
);
5808 // We have to compare the shapes in the native iterator with the shapes on the
5809 // proto chain to ensure the cached iterator is still valid. The shape array
5810 // always starts at a fixed offset from the base of the NativeIterator, so
5811 // instead of using an instruction outside the loop to initialize a pointer to
5812 // the shapes array, we can bake it into the offset and reuse the pointer to
5813 // the NativeIterator. We add |sizeof(Shape*)| to start at the second shape.
5814 // (The first shape corresponds to the object itself. We don't have to check
5815 // it, because we got the iterator via the shape.)
5816 size_t nativeIteratorProtoShapeOffset
=
5817 NativeIterator::offsetOfFirstShape() + sizeof(Shape
*);
5819 // Loop over the proto chain. At the head of the loop, |shape| is the shape of
5820 // the current object, and |iteratorShapes| points to the expected shape of
5825 // Load the proto. If the proto is null, then we're done.
5826 loadPtr(Address(shapeAndProto
, Shape::offsetOfBaseShape()), shapeAndProto
);
5827 loadPtr(Address(shapeAndProto
, BaseShape::offsetOfProto()), shapeAndProto
);
5828 branchPtr(Assembler::Equal
, shapeAndProto
, ImmPtr(nullptr), &success
);
5831 // We have guarded every shape up until this point, so we know that the proto
5832 // is a native object.
5833 branchIfNonNativeObj(shapeAndProto
, temp3
, &nonNative
);
5836 // Verify that the proto has no dense elements.
5837 loadPtr(Address(shapeAndProto
, NativeObject::offsetOfElements()), temp3
);
5838 branch32(Assembler::NotEqual
,
5839 Address(temp3
, ObjectElements::offsetOfInitializedLength()),
5842 // Compare the shape of the proto to the expected shape.
5843 loadPtr(Address(shapeAndProto
, JSObject::offsetOfShape()), shapeAndProto
);
5844 loadPtr(Address(nativeIterator
, nativeIteratorProtoShapeOffset
), temp3
);
5845 branchPtr(Assembler::NotEqual
, shapeAndProto
, temp3
, failure
);
5847 // Increment |iteratorShapes| and jump back to the top of the loop.
5848 addPtr(Imm32(sizeof(Shape
*)), nativeIterator
);
5853 assumeUnreachable("Expected NativeObject in maybeLoadIteratorFromShape");
5859 void MacroAssembler::iteratorMore(Register obj
, ValueOperand output
,
5862 Register outputScratch
= output
.scratchReg();
5863 LoadNativeIterator(*this, obj
, outputScratch
);
5865 // If propertyCursor_ < propertiesEnd_, load the next string and advance
5866 // the cursor. Otherwise return MagicValue(JS_NO_ITER_VALUE).
5868 Address
cursorAddr(outputScratch
, NativeIterator::offsetOfPropertyCursor());
5869 Address
cursorEndAddr(outputScratch
, NativeIterator::offsetOfPropertiesEnd());
5870 loadPtr(cursorAddr
, temp
);
5871 branchPtr(Assembler::BelowOrEqual
, cursorEndAddr
, temp
, &iterDone
);
5874 loadPtr(Address(temp
, 0), temp
);
5876 // Increase the cursor.
5877 addPtr(Imm32(sizeof(GCPtr
<JSLinearString
*>)), cursorAddr
);
5879 tagValue(JSVAL_TYPE_STRING
, temp
, output
);
5883 moveValue(MagicValue(JS_NO_ITER_VALUE
), output
);
5888 void MacroAssembler::iteratorClose(Register obj
, Register temp1
, Register temp2
,
5890 LoadNativeIterator(*this, obj
, temp1
);
5892 // The shared iterator used for for-in with null/undefined is immutable and
5893 // unlinked. See NativeIterator::isEmptyIteratorSingleton.
5895 branchTest32(Assembler::NonZero
,
5896 Address(temp1
, NativeIterator::offsetOfFlagsAndCount()),
5897 Imm32(NativeIterator::Flags::IsEmptyIteratorSingleton
), &done
);
5899 // Clear active bit.
5900 and32(Imm32(~NativeIterator::Flags::Active
),
5901 Address(temp1
, NativeIterator::offsetOfFlagsAndCount()));
5903 // Clear objectBeingIterated.
5904 Address
iterObjAddr(temp1
, NativeIterator::offsetOfObjectBeingIterated());
5905 guardedCallPreBarrierAnyZone(iterObjAddr
, MIRType::Object
, temp2
);
5906 storePtr(ImmPtr(nullptr), iterObjAddr
);
5908 // Reset property cursor.
5909 loadPtr(Address(temp1
, NativeIterator::offsetOfShapesEnd()), temp2
);
5910 storePtr(temp2
, Address(temp1
, NativeIterator::offsetOfPropertyCursor()));
5912 // Unlink from the iterator list.
5913 const Register next
= temp2
;
5914 const Register prev
= temp3
;
5915 loadPtr(Address(temp1
, NativeIterator::offsetOfNext()), next
);
5916 loadPtr(Address(temp1
, NativeIterator::offsetOfPrev()), prev
);
5917 storePtr(prev
, Address(next
, NativeIterator::offsetOfPrev()));
5918 storePtr(next
, Address(prev
, NativeIterator::offsetOfNext()));
5920 storePtr(ImmPtr(nullptr), Address(temp1
, NativeIterator::offsetOfNext()));
5921 storePtr(ImmPtr(nullptr), Address(temp1
, NativeIterator::offsetOfPrev()));
5927 void MacroAssembler::registerIterator(Register enumeratorsList
, Register iter
,
5929 // iter->next = list
5930 storePtr(enumeratorsList
, Address(iter
, NativeIterator::offsetOfNext()));
5932 // iter->prev = list->prev
5933 loadPtr(Address(enumeratorsList
, NativeIterator::offsetOfPrev()), temp
);
5934 storePtr(temp
, Address(iter
, NativeIterator::offsetOfPrev()));
5936 // list->prev->next = iter
5937 storePtr(iter
, Address(temp
, NativeIterator::offsetOfNext()));
5939 // list->prev = iter
5940 storePtr(iter
, Address(enumeratorsList
, NativeIterator::offsetOfPrev()));
5943 void MacroAssembler::toHashableNonGCThing(ValueOperand value
,
5944 ValueOperand result
,
5945 FloatRegister tempFloat
) {
5946 // Inline implementation of |HashableValue::setValue()|.
5950 branchTestGCThing(Assembler::NotEqual
, value
, &ok
);
5951 assumeUnreachable("Unexpected GC thing");
5955 Label useInput
, done
;
5956 branchTestDouble(Assembler::NotEqual
, value
, &useInput
);
5958 Register int32
= result
.scratchReg();
5959 unboxDouble(value
, tempFloat
);
5961 // Normalize int32-valued doubles to int32 and negative zero to +0.
5963 convertDoubleToInt32(tempFloat
, int32
, &canonicalize
, false);
5965 tagValue(JSVAL_TYPE_INT32
, int32
, result
);
5968 bind(&canonicalize
);
5970 // Normalize the sign bit of a NaN.
5971 branchDouble(Assembler::DoubleOrdered
, tempFloat
, tempFloat
, &useInput
);
5972 moveValue(JS::NaNValue(), result
);
5978 moveValue(value
, result
);
5983 void MacroAssembler::toHashableValue(ValueOperand value
, ValueOperand result
,
5984 FloatRegister tempFloat
,
5985 Label
* atomizeString
, Label
* tagString
) {
5986 // Inline implementation of |HashableValue::setValue()|.
5988 ScratchTagScope
tag(*this, value
);
5989 splitTagForTest(value
, tag
);
5991 Label notString
, useInput
, done
;
5992 branchTestString(Assembler::NotEqual
, tag
, ¬String
);
5994 ScratchTagScopeRelease
_(&tag
);
5996 Register str
= result
.scratchReg();
5997 unboxString(value
, str
);
5999 branchTest32(Assembler::NonZero
, Address(str
, JSString::offsetOfFlags()),
6000 Imm32(JSString::ATOM_BIT
), &useInput
);
6002 jump(atomizeString
);
6005 tagValue(JSVAL_TYPE_STRING
, str
, result
);
6009 branchTestDouble(Assembler::NotEqual
, tag
, &useInput
);
6011 ScratchTagScopeRelease
_(&tag
);
6013 Register int32
= result
.scratchReg();
6014 unboxDouble(value
, tempFloat
);
6017 convertDoubleToInt32(tempFloat
, int32
, &canonicalize
, false);
6019 tagValue(JSVAL_TYPE_INT32
, int32
, result
);
6022 bind(&canonicalize
);
6024 branchDouble(Assembler::DoubleOrdered
, tempFloat
, tempFloat
, &useInput
);
6025 moveValue(JS::NaNValue(), result
);
6031 moveValue(value
, result
);
6036 void MacroAssembler::scrambleHashCode(Register result
) {
6037 // Inline implementation of |mozilla::ScrambleHashCode()|.
6039 mul32(Imm32(mozilla::kGoldenRatioU32
), result
);
6042 void MacroAssembler::prepareHashNonGCThing(ValueOperand value
, Register result
,
6044 // Inline implementation of |OrderedHashTable::prepareHash()| and
6045 // |mozilla::HashGeneric(v.asRawBits())|.
6049 branchTestGCThing(Assembler::NotEqual
, value
, &ok
);
6050 assumeUnreachable("Unexpected GC thing");
6054 // uint32_t v1 = static_cast<uint32_t>(aValue);
6056 move64To32(value
.toRegister64(), result
);
6058 move32(value
.payloadReg(), result
);
6061 // uint32_t v2 = static_cast<uint32_t>(static_cast<uint64_t>(aValue) >> 32);
6063 auto r64
= Register64(temp
);
6064 move64(value
.toRegister64(), r64
);
6065 rshift64Arithmetic(Imm32(32), r64
);
6067 // TODO: This seems like a bug in mozilla::detail::AddUintptrToHash().
6068 // The uint64_t input is first converted to uintptr_t and then back to
6069 // uint64_t. But |uint64_t(uintptr_t(bits))| actually only clears the high
6070 // bits, so this computation:
6072 // aValue = uintptr_t(bits)
6073 // v2 = static_cast<uint32_t>(static_cast<uint64_t>(aValue) >> 32)
6075 // really just sets |v2 = 0|. And that means the xor-operation in AddU32ToHash
6076 // can be optimized away, because |x ^ 0 = x|.
6078 // Filed as bug 1718516.
6081 // mozilla::WrappingMultiply(kGoldenRatioU32, RotateLeft5(aHash) ^ aValue);
6082 // with |aHash = 0| and |aValue = v1|.
6083 mul32(Imm32(mozilla::kGoldenRatioU32
), result
);
6085 // mozilla::WrappingMultiply(kGoldenRatioU32, RotateLeft5(aHash) ^ aValue);
6086 // with |aHash = <above hash>| and |aValue = v2|.
6087 rotateLeft(Imm32(5), result
, result
);
6089 xor32(temp
, result
);
6092 // Combine |mul32| and |scrambleHashCode| by directly multiplying with
6093 // |kGoldenRatioU32 * kGoldenRatioU32|.
6095 // mul32(Imm32(mozilla::kGoldenRatioU32), result);
6097 // scrambleHashCode(result);
6098 mul32(Imm32(mozilla::kGoldenRatioU32
* mozilla::kGoldenRatioU32
), result
);
6101 void MacroAssembler::prepareHashString(Register str
, Register result
,
6103 // Inline implementation of |OrderedHashTable::prepareHash()| and
6104 // |JSAtom::hash()|.
6108 branchTest32(Assembler::NonZero
, Address(str
, JSString::offsetOfFlags()),
6109 Imm32(JSString::ATOM_BIT
), &ok
);
6110 assumeUnreachable("Unexpected non-atom string");
6114 move32(Imm32(JSString::FAT_INLINE_MASK
), temp
);
6115 and32(Address(str
, JSString::offsetOfFlags()), temp
);
6117 // Set |result| to 1 for FatInlineAtoms.
6118 move32(Imm32(0), result
);
6119 cmp32Set(Assembler::Equal
, temp
, Imm32(JSString::FAT_INLINE_MASK
), result
);
6121 // Use a computed load for branch-free code.
6123 static_assert(FatInlineAtom::offsetOfHash() > NormalAtom::offsetOfHash());
6125 constexpr size_t offsetDiff
=
6126 FatInlineAtom::offsetOfHash() - NormalAtom::offsetOfHash();
6127 static_assert(mozilla::IsPowerOfTwo(offsetDiff
));
6129 uint8_t shift
= mozilla::FloorLog2Size(offsetDiff
);
6130 if (IsShiftInScaleRange(shift
)) {
6132 BaseIndex(str
, result
, ShiftToScale(shift
), NormalAtom::offsetOfHash()),
6135 lshift32(Imm32(shift
), result
);
6136 load32(BaseIndex(str
, result
, TimesOne
, NormalAtom::offsetOfHash()),
6140 scrambleHashCode(result
);
6143 void MacroAssembler::prepareHashSymbol(Register sym
, Register result
) {
6144 // Inline implementation of |OrderedHashTable::prepareHash()| and
6145 // |Symbol::hash()|.
6147 load32(Address(sym
, JS::Symbol::offsetOfHash()), result
);
6149 scrambleHashCode(result
);
6152 void MacroAssembler::prepareHashBigInt(Register bigInt
, Register result
,
6153 Register temp1
, Register temp2
,
6155 // Inline implementation of |OrderedHashTable::prepareHash()| and
6156 // |BigInt::hash()|.
6158 // Inline implementation of |mozilla::AddU32ToHash()|.
6159 auto addU32ToHash
= [&](auto toAdd
) {
6160 rotateLeft(Imm32(5), result
, result
);
6161 xor32(toAdd
, result
);
6162 mul32(Imm32(mozilla::kGoldenRatioU32
), result
);
6165 move32(Imm32(0), result
);
6167 // Inline |mozilla::HashBytes()|.
6169 load32(Address(bigInt
, BigInt::offsetOfLength()), temp1
);
6170 loadBigIntDigits(bigInt
, temp2
);
6177 // Compute |AddToHash(AddToHash(hash, data), sizeof(Digit))|.
6178 #if defined(JS_CODEGEN_MIPS64)
6179 // Hash the lower 32-bits.
6180 addU32ToHash(Address(temp2
, 0));
6182 // Hash the upper 32-bits.
6183 addU32ToHash(Address(temp2
, sizeof(int32_t)));
6185 // Use a single 64-bit load on non-MIPS64 platforms.
6186 loadPtr(Address(temp2
, 0), temp3
);
6188 // Hash the lower 32-bits.
6189 addU32ToHash(temp3
);
6191 // Hash the upper 32-bits.
6192 rshiftPtr(Imm32(32), temp3
);
6193 addU32ToHash(temp3
);
6195 addU32ToHash(Address(temp2
, 0));
6198 addPtr(Imm32(sizeof(BigInt::Digit
)), temp2
);
6201 branchSub32(Assembler::NotSigned
, Imm32(1), temp1
, &loop
);
6203 // Compute |mozilla::AddToHash(h, isNegative())|.
6205 static_assert(mozilla::IsPowerOfTwo(BigInt::signBitMask()));
6207 load32(Address(bigInt
, BigInt::offsetOfFlags()), temp1
);
6208 and32(Imm32(BigInt::signBitMask()), temp1
);
6209 rshift32(Imm32(mozilla::FloorLog2(BigInt::signBitMask())), temp1
);
6211 addU32ToHash(temp1
);
6214 scrambleHashCode(result
);
6217 void MacroAssembler::prepareHashObject(Register setObj
, ValueOperand value
,
6218 Register result
, Register temp1
,
6219 Register temp2
, Register temp3
,
6222 // Inline implementation of |OrderedHashTable::prepareHash()| and
6223 // |HashCodeScrambler::scramble(v.asRawBits())|.
6225 // Load the |ValueSet| or |ValueMap|.
6226 static_assert(SetObject::getDataSlotOffset() ==
6227 MapObject::getDataSlotOffset());
6228 loadPrivate(Address(setObj
, SetObject::getDataSlotOffset()), temp1
);
6230 // Load |HashCodeScrambler::mK0| and |HashCodeScrambler::mK0|.
6231 static_assert(ValueSet::offsetOfImplHcsK0() == ValueMap::offsetOfImplHcsK0());
6232 static_assert(ValueSet::offsetOfImplHcsK1() == ValueMap::offsetOfImplHcsK1());
6233 auto k0
= Register64(temp1
);
6234 auto k1
= Register64(temp2
);
6235 load64(Address(temp1
, ValueSet::offsetOfImplHcsK1()), k1
);
6236 load64(Address(temp1
, ValueSet::offsetOfImplHcsK0()), k0
);
6238 // Hash numbers are 32-bit values, so only hash the lower double-word.
6239 static_assert(sizeof(mozilla::HashNumber
) == 4);
6240 move32To64ZeroExtend(value
.valueReg(), Register64(result
));
6242 // Inline implementation of |SipHasher::sipHash()|.
6243 auto m
= Register64(result
);
6244 auto v0
= Register64(temp3
);
6245 auto v1
= Register64(temp4
);
6249 auto sipRound
= [&]() {
6250 // mV0 = WrappingAdd(mV0, mV1);
6253 // mV1 = RotateLeft(mV1, 13);
6254 rotateLeft64(Imm32(13), v1
, v1
, InvalidReg
);
6259 // mV0 = RotateLeft(mV0, 32);
6260 rotateLeft64(Imm32(32), v0
, v0
, InvalidReg
);
6262 // mV2 = WrappingAdd(mV2, mV3);
6265 // mV3 = RotateLeft(mV3, 16);
6266 rotateLeft64(Imm32(16), v3
, v3
, InvalidReg
);
6271 // mV0 = WrappingAdd(mV0, mV3);
6274 // mV3 = RotateLeft(mV3, 21);
6275 rotateLeft64(Imm32(21), v3
, v3
, InvalidReg
);
6280 // mV2 = WrappingAdd(mV2, mV1);
6283 // mV1 = RotateLeft(mV1, 17);
6284 rotateLeft64(Imm32(17), v1
, v1
, InvalidReg
);
6289 // mV2 = RotateLeft(mV2, 32);
6290 rotateLeft64(Imm32(32), v2
, v2
, InvalidReg
);
6293 // 1. Initialization.
6294 // mV0 = aK0 ^ UINT64_C(0x736f6d6570736575);
6295 move64(Imm64(0x736f6d6570736575), v0
);
6298 // mV1 = aK1 ^ UINT64_C(0x646f72616e646f6d);
6299 move64(Imm64(0x646f72616e646f6d), v1
);
6302 // mV2 = aK0 ^ UINT64_C(0x6c7967656e657261);
6303 MOZ_ASSERT(v2
== k0
);
6304 xor64(Imm64(0x6c7967656e657261), v2
);
6306 // mV3 = aK1 ^ UINT64_C(0x7465646279746573);
6307 MOZ_ASSERT(v3
== k1
);
6308 xor64(Imm64(0x7465646279746573), v3
);
6322 xor64(Imm64(0xff), v2
);
6324 // for (int i = 0; i < 3; i++) sipRound();
6325 for (int i
= 0; i
< 3; i
++) {
6329 // return mV0 ^ mV1 ^ mV2 ^ mV3;
6334 move64To32(v0
, result
);
6336 scrambleHashCode(result
);
6338 MOZ_CRASH("Not implemented");
6342 void MacroAssembler::prepareHashValue(Register setObj
, ValueOperand value
,
6343 Register result
, Register temp1
,
6344 Register temp2
, Register temp3
,
6346 Label isString
, isObject
, isSymbol
, isBigInt
;
6348 ScratchTagScope
tag(*this, value
);
6349 splitTagForTest(value
, tag
);
6351 branchTestString(Assembler::Equal
, tag
, &isString
);
6352 branchTestObject(Assembler::Equal
, tag
, &isObject
);
6353 branchTestSymbol(Assembler::Equal
, tag
, &isSymbol
);
6354 branchTestBigInt(Assembler::Equal
, tag
, &isBigInt
);
6359 prepareHashNonGCThing(value
, result
, temp1
);
6364 unboxString(value
, temp1
);
6365 prepareHashString(temp1
, result
, temp2
);
6370 prepareHashObject(setObj
, value
, result
, temp1
, temp2
, temp3
, temp4
);
6375 unboxSymbol(value
, temp1
);
6376 prepareHashSymbol(temp1
, result
);
6381 unboxBigInt(value
, temp1
);
6382 prepareHashBigInt(temp1
, result
, temp2
, temp3
, temp4
);
6384 // Fallthrough to |done|.
6390 template <typename OrderedHashTable
>
6391 void MacroAssembler::orderedHashTableLookup(Register setOrMapObj
,
6392 ValueOperand value
, Register hash
,
6393 Register entryTemp
, Register temp1
,
6394 Register temp2
, Register temp3
,
6395 Register temp4
, Label
* found
,
6396 IsBigInt isBigInt
) {
6397 // Inline implementation of |OrderedHashTable::lookup()|.
6399 MOZ_ASSERT_IF(isBigInt
== IsBigInt::No
, temp3
== InvalidReg
);
6400 MOZ_ASSERT_IF(isBigInt
== IsBigInt::No
, temp4
== InvalidReg
);
6404 if (isBigInt
== IsBigInt::No
) {
6405 branchTestBigInt(Assembler::NotEqual
, value
, &ok
);
6406 assumeUnreachable("Unexpected BigInt");
6407 } else if (isBigInt
== IsBigInt::Yes
) {
6408 branchTestBigInt(Assembler::Equal
, value
, &ok
);
6409 assumeUnreachable("Unexpected non-BigInt");
6415 PushRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
6418 moveStackPtrTo(temp2
);
6420 setupUnalignedABICall(temp1
);
6421 loadJSContext(temp1
);
6423 passABIArg(setOrMapObj
);
6427 if constexpr (std::is_same_v
<OrderedHashTable
, ValueSet
>) {
6429 void (*)(JSContext
*, SetObject
*, const Value
*, mozilla::HashNumber
);
6430 callWithABI
<Fn
, jit::AssertSetObjectHash
>();
6433 void (*)(JSContext
*, MapObject
*, const Value
*, mozilla::HashNumber
);
6434 callWithABI
<Fn
, jit::AssertMapObjectHash
>();
6438 PopRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
6441 // Load the |ValueSet| or |ValueMap|.
6442 static_assert(SetObject::getDataSlotOffset() ==
6443 MapObject::getDataSlotOffset());
6444 loadPrivate(Address(setOrMapObj
, SetObject::getDataSlotOffset()), temp1
);
6447 move32(hash
, entryTemp
);
6448 load32(Address(temp1
, OrderedHashTable::offsetOfImplHashShift()), temp2
);
6449 flexibleRshift32(temp2
, entryTemp
);
6451 loadPtr(Address(temp1
, OrderedHashTable::offsetOfImplHashTable()), temp2
);
6452 loadPtr(BaseIndex(temp2
, entryTemp
, ScalePointer
), entryTemp
);
6454 // Search for a match in this bucket.
6459 // Inline implementation of |HashableValue::operator==|.
6461 static_assert(OrderedHashTable::offsetOfImplDataElement() == 0,
6462 "offsetof(Data, element) is 0");
6463 auto keyAddr
= Address(entryTemp
, OrderedHashTable::offsetOfEntryKey());
6465 if (isBigInt
== IsBigInt::No
) {
6466 // Two HashableValues are equal if they have equal bits.
6467 branch64(Assembler::Equal
, keyAddr
, value
.toRegister64(), found
);
6470 auto key
= ValueOperand(temp1
);
6472 auto key
= ValueOperand(temp1
, temp2
);
6475 loadValue(keyAddr
, key
);
6477 // Two HashableValues are equal if they have equal bits.
6478 branch64(Assembler::Equal
, key
.toRegister64(), value
.toRegister64(),
6481 // BigInt values are considered equal if they represent the same
6482 // mathematical value.
6484 fallibleUnboxBigInt(key
, temp2
, &next
);
6485 if (isBigInt
== IsBigInt::Yes
) {
6486 unboxBigInt(value
, temp1
);
6488 fallibleUnboxBigInt(value
, temp1
, &next
);
6490 equalBigInts(temp1
, temp2
, temp3
, temp4
, temp1
, temp2
, &next
, &next
,
6496 loadPtr(Address(entryTemp
, OrderedHashTable::offsetOfImplDataChain()),
6499 branchTestPtr(Assembler::NonZero
, entryTemp
, entryTemp
, &loop
);
6502 void MacroAssembler::setObjectHas(Register setObj
, ValueOperand value
,
6503 Register hash
, Register result
,
6504 Register temp1
, Register temp2
,
6505 Register temp3
, Register temp4
,
6506 IsBigInt isBigInt
) {
6508 orderedHashTableLookup
<ValueSet
>(setObj
, value
, hash
, result
, temp1
, temp2
,
6509 temp3
, temp4
, &found
, isBigInt
);
6512 move32(Imm32(0), result
);
6516 move32(Imm32(1), result
);
6520 void MacroAssembler::mapObjectHas(Register mapObj
, ValueOperand value
,
6521 Register hash
, Register result
,
6522 Register temp1
, Register temp2
,
6523 Register temp3
, Register temp4
,
6524 IsBigInt isBigInt
) {
6526 orderedHashTableLookup
<ValueMap
>(mapObj
, value
, hash
, result
, temp1
, temp2
,
6527 temp3
, temp4
, &found
, isBigInt
);
6530 move32(Imm32(0), result
);
6534 move32(Imm32(1), result
);
6538 void MacroAssembler::mapObjectGet(Register mapObj
, ValueOperand value
,
6539 Register hash
, ValueOperand result
,
6540 Register temp1
, Register temp2
,
6541 Register temp3
, Register temp4
,
6542 Register temp5
, IsBigInt isBigInt
) {
6544 orderedHashTableLookup
<ValueMap
>(mapObj
, value
, hash
, temp1
, temp2
, temp3
,
6545 temp4
, temp5
, &found
, isBigInt
);
6548 moveValue(UndefinedValue(), result
);
6551 // |temp1| holds the found entry.
6553 loadValue(Address(temp1
, ValueMap::Entry::offsetOfValue()), result
);
6558 template <typename OrderedHashTable
>
6559 void MacroAssembler::loadOrderedHashTableCount(Register setOrMapObj
,
6561 // Inline implementation of |OrderedHashTable::count()|.
6563 // Load the |ValueSet| or |ValueMap|.
6564 static_assert(SetObject::getDataSlotOffset() ==
6565 MapObject::getDataSlotOffset());
6566 loadPrivate(Address(setOrMapObj
, SetObject::getDataSlotOffset()), result
);
6568 // Load the live count.
6569 load32(Address(result
, OrderedHashTable::offsetOfImplLiveCount()), result
);
6572 void MacroAssembler::loadSetObjectSize(Register setObj
, Register result
) {
6573 loadOrderedHashTableCount
<ValueSet
>(setObj
, result
);
6576 void MacroAssembler::loadMapObjectSize(Register mapObj
, Register result
) {
6577 loadOrderedHashTableCount
<ValueMap
>(mapObj
, result
);
6580 // Can't push large frames blindly on windows, so we must touch frame memory
6581 // incrementally, with no more than 4096 - 1 bytes between touches.
6583 // This is used across all platforms for simplicity.
6584 void MacroAssembler::touchFrameValues(Register numStackValues
,
6585 Register scratch1
, Register scratch2
) {
6586 const size_t FRAME_TOUCH_INCREMENT
= 2048;
6587 static_assert(FRAME_TOUCH_INCREMENT
< 4096 - 1,
6588 "Frame increment is too large");
6590 moveStackPtrTo(scratch2
);
6592 mov(numStackValues
, scratch1
);
6593 lshiftPtr(Imm32(3), scratch1
);
6595 // Note: this loop needs to update the stack pointer register because older
6596 // Linux kernels check the distance between the touched address and RSP.
6597 // See bug 1839669 comment 47.
6598 Label touchFrameLoop
;
6599 Label touchFrameLoopEnd
;
6600 bind(&touchFrameLoop
);
6601 branchSub32(Assembler::Signed
, Imm32(FRAME_TOUCH_INCREMENT
), scratch1
,
6602 &touchFrameLoopEnd
);
6603 subFromStackPtr(Imm32(FRAME_TOUCH_INCREMENT
));
6604 store32(Imm32(0), Address(getStackPointer(), 0));
6605 jump(&touchFrameLoop
);
6606 bind(&touchFrameLoopEnd
);
6609 moveToStackPtr(scratch2
);
6616 template <class RegisterType
>
6617 AutoGenericRegisterScope
<RegisterType
>::AutoGenericRegisterScope(
6618 MacroAssembler
& masm
, RegisterType reg
)
6619 : RegisterType(reg
), masm_(masm
), released_(false) {
6620 masm
.debugTrackedRegisters_
.add(reg
);
6623 template AutoGenericRegisterScope
<Register
>::AutoGenericRegisterScope(
6624 MacroAssembler
& masm
, Register reg
);
6625 template AutoGenericRegisterScope
<FloatRegister
>::AutoGenericRegisterScope(
6626 MacroAssembler
& masm
, FloatRegister reg
);
6630 template <class RegisterType
>
6631 AutoGenericRegisterScope
<RegisterType
>::~AutoGenericRegisterScope() {
6637 template AutoGenericRegisterScope
<Register
>::~AutoGenericRegisterScope();
6638 template AutoGenericRegisterScope
<FloatRegister
>::~AutoGenericRegisterScope();
6640 template <class RegisterType
>
6641 void AutoGenericRegisterScope
<RegisterType
>::release() {
6642 MOZ_ASSERT(!released_
);
6644 const RegisterType
& reg
= *dynamic_cast<RegisterType
*>(this);
6645 masm_
.debugTrackedRegisters_
.take(reg
);
6648 template void AutoGenericRegisterScope
<Register
>::release();
6649 template void AutoGenericRegisterScope
<FloatRegister
>::release();
6651 template <class RegisterType
>
6652 void AutoGenericRegisterScope
<RegisterType
>::reacquire() {
6653 MOZ_ASSERT(released_
);
6655 const RegisterType
& reg
= *dynamic_cast<RegisterType
*>(this);
6656 masm_
.debugTrackedRegisters_
.add(reg
);
6659 template void AutoGenericRegisterScope
<Register
>::reacquire();
6660 template void AutoGenericRegisterScope
<FloatRegister
>::reacquire();