1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/MacroAssembler-inl.h"
9 #include "mozilla/FloatingPoint.h"
10 #include "mozilla/Latin1.h"
11 #include "mozilla/MathAlgorithms.h"
12 #include "mozilla/XorShift128PlusRNG.h"
17 #include "jit/AtomicOp.h"
18 #include "jit/AtomicOperations.h"
19 #include "jit/Bailouts.h"
20 #include "jit/BaselineFrame.h"
21 #include "jit/BaselineJIT.h"
22 #include "jit/JitFrames.h"
23 #include "jit/JitOptions.h"
24 #include "jit/JitRuntime.h"
25 #include "jit/JitScript.h"
26 #include "jit/MoveEmitter.h"
27 #include "jit/ReciprocalMulConstants.h"
28 #include "jit/SharedICHelpers.h"
29 #include "jit/SharedICRegisters.h"
30 #include "jit/Simulator.h"
31 #include "jit/VMFunctions.h"
32 #include "js/Conversions.h"
33 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
34 #include "js/GCAPI.h" // JS::AutoCheckCannotGC
35 #include "js/ScalarType.h" // js::Scalar::Type
36 #include "util/Unicode.h"
37 #include "vm/ArgumentsObject.h"
38 #include "vm/ArrayBufferViewObject.h"
39 #include "vm/BoundFunctionObject.h"
40 #include "vm/FunctionFlags.h" // js::FunctionFlags
41 #include "vm/Iteration.h"
42 #include "vm/JSContext.h"
43 #include "vm/JSFunction.h"
44 #include "vm/StringType.h"
45 #include "vm/TypedArrayObject.h"
46 #include "wasm/WasmBuiltins.h"
47 #include "wasm/WasmCodegenConstants.h"
48 #include "wasm/WasmCodegenTypes.h"
49 #include "wasm/WasmGcObject.h"
50 #include "wasm/WasmInstanceData.h"
51 #include "wasm/WasmMemory.h"
52 #include "wasm/WasmTypeDef.h"
53 #include "wasm/WasmValidate.h"
55 #include "jit/TemplateObject-inl.h"
56 #include "vm/BytecodeUtil-inl.h"
57 #include "vm/Interpreter-inl.h"
58 #include "vm/JSObject-inl.h"
61 using namespace js::jit
;
66 using mozilla::CheckedInt
;
68 TrampolinePtr
MacroAssembler::preBarrierTrampoline(MIRType type
) {
69 const JitRuntime
* rt
= runtime()->jitRuntime();
70 return rt
->preBarrier(type
);
73 template <typename S
, typename T
>
74 static void StoreToTypedFloatArray(MacroAssembler
& masm
, int arrayType
,
75 const S
& value
, const T
& dest
) {
78 masm
.storeFloat32(value
, dest
);
81 masm
.storeDouble(value
, dest
);
84 MOZ_CRASH("Invalid typed array type");
88 void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType
,
90 const BaseIndex
& dest
) {
91 StoreToTypedFloatArray(*this, arrayType
, value
, dest
);
93 void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType
,
95 const Address
& dest
) {
96 StoreToTypedFloatArray(*this, arrayType
, value
, dest
);
99 template <typename S
, typename T
>
100 static void StoreToTypedBigIntArray(MacroAssembler
& masm
,
101 Scalar::Type arrayType
, const S
& value
,
103 MOZ_ASSERT(Scalar::isBigIntType(arrayType
));
104 masm
.store64(value
, dest
);
107 void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType
,
109 const BaseIndex
& dest
) {
110 StoreToTypedBigIntArray(*this, arrayType
, value
, dest
);
112 void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType
,
114 const Address
& dest
) {
115 StoreToTypedBigIntArray(*this, arrayType
, value
, dest
);
118 void MacroAssembler::boxUint32(Register source
, ValueOperand dest
,
119 Uint32Mode mode
, Label
* fail
) {
121 // Fail if the value does not fit in an int32.
122 case Uint32Mode::FailOnDouble
: {
123 branchTest32(Assembler::Signed
, source
, source
, fail
);
124 tagValue(JSVAL_TYPE_INT32
, source
, dest
);
127 case Uint32Mode::ForceDouble
: {
128 // Always convert the value to double.
129 ScratchDoubleScope
fpscratch(*this);
130 convertUInt32ToDouble(source
, fpscratch
);
131 boxDouble(fpscratch
, dest
, fpscratch
);
137 template <typename T
>
138 void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
, const T
& src
,
139 AnyRegister dest
, Register temp
,
143 load8SignExtend(src
, dest
.gpr());
146 case Scalar::Uint8Clamped
:
147 load8ZeroExtend(src
, dest
.gpr());
150 load16SignExtend(src
, dest
.gpr());
153 load16ZeroExtend(src
, dest
.gpr());
156 load32(src
, dest
.gpr());
159 if (dest
.isFloat()) {
161 convertUInt32ToDouble(temp
, dest
.fpu());
163 load32(src
, dest
.gpr());
165 // Bail out if the value doesn't fit into a signed int32 value. This
166 // is what allows MLoadUnboxedScalar to have a type() of
167 // MIRType::Int32 for UInt32 array loads.
168 branchTest32(Assembler::Signed
, dest
.gpr(), dest
.gpr(), fail
);
171 case Scalar::Float32
:
172 loadFloat32(src
, dest
.fpu());
173 canonicalizeFloat(dest
.fpu());
175 case Scalar::Float64
:
176 loadDouble(src
, dest
.fpu());
177 canonicalizeDouble(dest
.fpu());
179 case Scalar::BigInt64
:
180 case Scalar::BigUint64
:
182 MOZ_CRASH("Invalid typed array type");
186 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
,
189 Register temp
, Label
* fail
);
190 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
,
191 const BaseIndex
& src
,
193 Register temp
, Label
* fail
);
195 template <typename T
>
196 void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
, const T
& src
,
197 const ValueOperand
& dest
,
198 Uint32Mode uint32Mode
, Register temp
,
203 case Scalar::Uint8Clamped
:
207 loadFromTypedArray(arrayType
, src
, AnyRegister(dest
.scratchReg()),
208 InvalidReg
, nullptr);
209 tagValue(JSVAL_TYPE_INT32
, dest
.scratchReg(), dest
);
212 // Don't clobber dest when we could fail, instead use temp.
214 boxUint32(temp
, dest
, uint32Mode
, fail
);
216 case Scalar::Float32
: {
217 ScratchDoubleScope
dscratch(*this);
218 FloatRegister fscratch
= dscratch
.asSingle();
219 loadFromTypedArray(arrayType
, src
, AnyRegister(fscratch
),
220 dest
.scratchReg(), nullptr);
221 convertFloat32ToDouble(fscratch
, dscratch
);
222 boxDouble(dscratch
, dest
, dscratch
);
225 case Scalar::Float64
: {
226 ScratchDoubleScope
fpscratch(*this);
227 loadFromTypedArray(arrayType
, src
, AnyRegister(fpscratch
),
228 dest
.scratchReg(), nullptr);
229 boxDouble(fpscratch
, dest
, fpscratch
);
232 case Scalar::BigInt64
:
233 case Scalar::BigUint64
:
235 MOZ_CRASH("Invalid typed array type");
239 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
,
241 const ValueOperand
& dest
,
242 Uint32Mode uint32Mode
,
243 Register temp
, Label
* fail
);
244 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType
,
245 const BaseIndex
& src
,
246 const ValueOperand
& dest
,
247 Uint32Mode uint32Mode
,
248 Register temp
, Label
* fail
);
250 template <typename T
>
251 void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType
,
252 const T
& src
, Register bigInt
,
254 MOZ_ASSERT(Scalar::isBigIntType(arrayType
));
257 initializeBigInt64(arrayType
, bigInt
, temp
);
260 template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType
,
264 template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType
,
265 const BaseIndex
& src
,
269 // Inlined version of gc::CheckAllocatorState that checks the bare essentials
270 // and bails for anything that cannot be handled with our jit allocators.
271 void MacroAssembler::checkAllocatorState(Register temp
, gc::AllocKind allocKind
,
273 // Don't execute the inline path if GC probes are built in.
279 // Don't execute the inline path if gc zeal or tracing are active.
280 const uint32_t* ptrZealModeBits
= runtime()->addressOfGCZealModeBits();
281 branch32(Assembler::NotEqual
, AbsoluteAddress(ptrZealModeBits
), Imm32(0),
285 // If the zone has a realm with an object allocation metadata hook, emit a
286 // guard for this. Note that IC stubs and some other trampolines can be shared
287 // across realms, so we don't bake in a realm pointer.
288 if (gc::IsObjectAllocKind(allocKind
) &&
289 realm()->zone()->hasRealmWithAllocMetadataBuilder()) {
291 loadPtr(Address(temp
, JSContext::offsetOfRealm()), temp
);
292 branchPtr(Assembler::NotEqual
,
293 Address(temp
, Realm::offsetOfAllocationMetadataBuilder()),
298 bool MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind
,
299 gc::Heap initialHeap
) {
300 // Note that Ion elides barriers on writes to objects known to be in the
301 // nursery, so any allocation that can be made into the nursery must be made
302 // into the nursery, even if the nursery is disabled. At runtime these will
303 // take the out-of-line path, which is required to insert a barrier for the
304 // initializing writes.
305 return IsNurseryAllocable(allocKind
) && initialHeap
!= gc::Heap::Tenured
;
308 // Inline version of Nursery::allocateObject. If the object has dynamic slots,
309 // this fills in the slots_ pointer.
310 void MacroAssembler::nurseryAllocateObject(Register result
, Register temp
,
311 gc::AllocKind allocKind
,
312 size_t nDynamicSlots
, Label
* fail
,
313 const AllocSiteInput
& allocSite
) {
314 MOZ_ASSERT(IsNurseryAllocable(allocKind
));
316 // Currently the JIT does not nursery allocate foreground finalized
317 // objects. This is allowed for objects that support this and have the
318 // JSCLASS_SKIP_NURSERY_FINALIZE class flag set. It's hard to assert that here
319 // though so disallow all foreground finalized objects for now.
320 MOZ_ASSERT(!IsForegroundFinalized(allocKind
));
322 // We still need to allocate in the nursery, per the comment in
323 // shouldNurseryAllocate; however, we need to insert into the
324 // mallocedBuffers set, so bail to do the nursery allocation in the
326 if (nDynamicSlots
>= Nursery::MaxNurseryBufferSize
/ sizeof(Value
)) {
331 // Check whether this allocation site needs pretenuring. This dynamic check
332 // only happens for baseline code.
333 if (allocSite
.is
<Register
>()) {
334 Register site
= allocSite
.as
<Register
>();
335 branchTestPtr(Assembler::NonZero
,
336 Address(site
, gc::AllocSite::offsetOfScriptAndState()),
337 Imm32(gc::AllocSite::LONG_LIVED_BIT
), fail
);
340 // No explicit check for nursery.isEnabled() is needed, as the comparison
341 // with the nursery's end will always fail in such cases.
342 CompileZone
* zone
= realm()->zone();
343 size_t thingSize
= gc::Arena::thingSize(allocKind
);
344 size_t totalSize
= thingSize
;
346 totalSize
+= ObjectSlots::allocSize(nDynamicSlots
);
348 MOZ_ASSERT(totalSize
< INT32_MAX
);
349 MOZ_ASSERT(totalSize
% gc::CellAlignBytes
== 0);
351 bumpPointerAllocate(result
, temp
, fail
, zone
, JS::TraceKind::Object
,
352 totalSize
, allocSite
);
355 store32(Imm32(nDynamicSlots
),
356 Address(result
, thingSize
+ ObjectSlots::offsetOfCapacity()));
359 Address(result
, thingSize
+ ObjectSlots::offsetOfDictionarySlotSpan()));
360 store64(Imm64(ObjectSlots::NoUniqueIdInDynamicSlots
),
361 Address(result
, thingSize
+ ObjectSlots::offsetOfMaybeUniqueId()));
362 computeEffectiveAddress(
363 Address(result
, thingSize
+ ObjectSlots::offsetOfSlots()), temp
);
364 storePtr(temp
, Address(result
, NativeObject::offsetOfSlots()));
368 // Inlined version of FreeSpan::allocate. This does not fill in slots_.
369 void MacroAssembler::freeListAllocate(Register result
, Register temp
,
370 gc::AllocKind allocKind
, Label
* fail
) {
371 CompileZone
* zone
= realm()->zone();
372 int thingSize
= int(gc::Arena::thingSize(allocKind
));
377 // Load the first and last offsets of |zone|'s free list for |allocKind|.
378 // If there is no room remaining in the span, fall back to get the next one.
379 gc::FreeSpan
** ptrFreeList
= zone
->addressOfFreeList(allocKind
);
380 loadPtr(AbsoluteAddress(ptrFreeList
), temp
);
381 load16ZeroExtend(Address(temp
, js::gc::FreeSpan::offsetOfFirst()), result
);
382 load16ZeroExtend(Address(temp
, js::gc::FreeSpan::offsetOfLast()), temp
);
383 branch32(Assembler::AboveOrEqual
, result
, temp
, &fallback
);
385 // Bump the offset for the next allocation.
386 add32(Imm32(thingSize
), result
);
387 loadPtr(AbsoluteAddress(ptrFreeList
), temp
);
388 store16(result
, Address(temp
, js::gc::FreeSpan::offsetOfFirst()));
389 sub32(Imm32(thingSize
), result
);
390 addPtr(temp
, result
); // Turn the offset into a pointer.
394 // If there are no free spans left, we bail to finish the allocation. The
395 // interpreter will call the GC allocator to set up a new arena to allocate
396 // from, after which we can resume allocating in the jit.
397 branchTest32(Assembler::Zero
, result
, result
, fail
);
398 loadPtr(AbsoluteAddress(ptrFreeList
), temp
);
399 addPtr(temp
, result
); // Turn the offset into a pointer.
401 // Update the free list to point to the next span (which may be empty).
402 load32(Address(result
, 0), result
);
403 store32(result
, Address(temp
, js::gc::FreeSpan::offsetOfFirst()));
408 if (runtime()->geckoProfiler().enabled()) {
409 uint32_t* countAddress
= zone
->addressOfTenuredAllocCount();
410 movePtr(ImmPtr(countAddress
), temp
);
411 add32(Imm32(1), Address(temp
, 0));
415 void MacroAssembler::callFreeStub(Register slots
) {
416 // This register must match the one in JitRuntime::generateFreeStub.
417 const Register regSlots
= CallTempReg0
;
420 movePtr(slots
, regSlots
);
421 call(runtime()->jitRuntime()->freeStub());
425 // Inlined equivalent of gc::AllocateObject, without failure case handling.
426 void MacroAssembler::allocateObject(Register result
, Register temp
,
427 gc::AllocKind allocKind
,
428 uint32_t nDynamicSlots
,
429 gc::Heap initialHeap
, Label
* fail
,
430 const AllocSiteInput
& allocSite
) {
431 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind
));
433 checkAllocatorState(temp
, allocKind
, fail
);
435 if (shouldNurseryAllocate(allocKind
, initialHeap
)) {
436 MOZ_ASSERT(initialHeap
== gc::Heap::Default
);
437 return nurseryAllocateObject(result
, temp
, allocKind
, nDynamicSlots
, fail
,
441 // Fall back to calling into the VM to allocate objects in the tenured heap
442 // that have dynamic slots.
448 return freeListAllocate(result
, temp
, allocKind
, fail
);
451 void MacroAssembler::createGCObject(Register obj
, Register temp
,
452 const TemplateObject
& templateObj
,
453 gc::Heap initialHeap
, Label
* fail
,
454 bool initContents
/* = true */) {
455 gc::AllocKind allocKind
= templateObj
.getAllocKind();
456 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind
));
458 uint32_t nDynamicSlots
= 0;
459 if (templateObj
.isNativeObject()) {
460 const TemplateNativeObject
& ntemplate
=
461 templateObj
.asTemplateNativeObject();
462 nDynamicSlots
= ntemplate
.numDynamicSlots();
465 allocateObject(obj
, temp
, allocKind
, nDynamicSlots
, initialHeap
, fail
);
466 initGCThing(obj
, temp
, templateObj
, initContents
);
469 void MacroAssembler::createPlainGCObject(
470 Register result
, Register shape
, Register temp
, Register temp2
,
471 uint32_t numFixedSlots
, uint32_t numDynamicSlots
, gc::AllocKind allocKind
,
472 gc::Heap initialHeap
, Label
* fail
, const AllocSiteInput
& allocSite
,
473 bool initContents
/* = true */) {
474 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind
));
475 MOZ_ASSERT(shape
!= temp
, "shape can overlap with temp2, but not temp");
478 allocateObject(result
, temp
, allocKind
, numDynamicSlots
, initialHeap
, fail
,
481 // Initialize shape field.
482 storePtr(shape
, Address(result
, JSObject::offsetOfShape()));
484 // If the object has dynamic slots, allocateObject will initialize
485 // the slots field. If not, we must initialize it now.
486 if (numDynamicSlots
== 0) {
487 storePtr(ImmPtr(emptyObjectSlots
),
488 Address(result
, NativeObject::offsetOfSlots()));
491 // Initialize elements field.
492 storePtr(ImmPtr(emptyObjectElements
),
493 Address(result
, NativeObject::offsetOfElements()));
495 // Initialize fixed slots.
497 fillSlotsWithUndefined(Address(result
, NativeObject::getFixedSlotOffset(0)),
498 temp
, 0, numFixedSlots
);
501 // Initialize dynamic slots.
502 if (numDynamicSlots
> 0) {
503 loadPtr(Address(result
, NativeObject::offsetOfSlots()), temp2
);
504 fillSlotsWithUndefined(Address(temp2
, 0), temp
, 0, numDynamicSlots
);
508 void MacroAssembler::createArrayWithFixedElements(
509 Register result
, Register shape
, Register temp
, Register dynamicSlotsTemp
,
510 uint32_t arrayLength
, uint32_t arrayCapacity
, uint32_t numUsedDynamicSlots
,
511 uint32_t numDynamicSlots
, gc::AllocKind allocKind
, gc::Heap initialHeap
,
512 Label
* fail
, const AllocSiteInput
& allocSite
) {
513 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind
));
514 MOZ_ASSERT(shape
!= temp
, "shape can overlap with temp2, but not temp");
515 MOZ_ASSERT(result
!= temp
);
517 // This only supports allocating arrays with fixed elements and does not
518 // support any dynamic elements.
519 MOZ_ASSERT(arrayCapacity
>= arrayLength
);
520 MOZ_ASSERT(gc::GetGCKindSlots(allocKind
) >=
521 arrayCapacity
+ ObjectElements::VALUES_PER_HEADER
);
523 MOZ_ASSERT(numUsedDynamicSlots
<= numDynamicSlots
);
526 allocateObject(result
, temp
, allocKind
, numDynamicSlots
, initialHeap
, fail
,
529 // Initialize shape field.
530 storePtr(shape
, Address(result
, JSObject::offsetOfShape()));
532 // If the object has dynamic slots, allocateObject will initialize
533 // the slots field. If not, we must initialize it now.
534 if (numDynamicSlots
== 0) {
535 storePtr(ImmPtr(emptyObjectSlots
),
536 Address(result
, NativeObject::offsetOfSlots()));
539 // Initialize elements pointer for fixed (inline) elements.
540 computeEffectiveAddress(
541 Address(result
, NativeObject::offsetOfFixedElements()), temp
);
542 storePtr(temp
, Address(result
, NativeObject::offsetOfElements()));
544 // Initialize elements header.
545 store32(Imm32(ObjectElements::FIXED
),
546 Address(temp
, ObjectElements::offsetOfFlags()));
547 store32(Imm32(0), Address(temp
, ObjectElements::offsetOfInitializedLength()));
548 store32(Imm32(arrayCapacity
),
549 Address(temp
, ObjectElements::offsetOfCapacity()));
550 store32(Imm32(arrayLength
), Address(temp
, ObjectElements::offsetOfLength()));
552 // Initialize dynamic slots.
553 if (numUsedDynamicSlots
> 0) {
554 MOZ_ASSERT(dynamicSlotsTemp
!= temp
);
555 MOZ_ASSERT(dynamicSlotsTemp
!= InvalidReg
);
556 loadPtr(Address(result
, NativeObject::offsetOfSlots()), dynamicSlotsTemp
);
557 fillSlotsWithUndefined(Address(dynamicSlotsTemp
, 0), temp
, 0,
558 numUsedDynamicSlots
);
562 // Inline version of Nursery::allocateString.
563 void MacroAssembler::nurseryAllocateString(Register result
, Register temp
,
564 gc::AllocKind allocKind
,
566 MOZ_ASSERT(IsNurseryAllocable(allocKind
));
568 // No explicit check for nursery.isEnabled() is needed, as the comparison
569 // with the nursery's end will always fail in such cases.
571 CompileZone
* zone
= realm()->zone();
572 size_t thingSize
= gc::Arena::thingSize(allocKind
);
573 bumpPointerAllocate(result
, temp
, fail
, zone
, JS::TraceKind::String
,
577 // Inline version of Nursery::allocateBigInt.
578 void MacroAssembler::nurseryAllocateBigInt(Register result
, Register temp
,
580 MOZ_ASSERT(IsNurseryAllocable(gc::AllocKind::BIGINT
));
582 // No explicit check for nursery.isEnabled() is needed, as the comparison
583 // with the nursery's end will always fail in such cases.
585 CompileZone
* zone
= realm()->zone();
586 size_t thingSize
= gc::Arena::thingSize(gc::AllocKind::BIGINT
);
588 bumpPointerAllocate(result
, temp
, fail
, zone
, JS::TraceKind::BigInt
,
592 static bool IsNurseryAllocEnabled(CompileZone
* zone
, JS::TraceKind kind
) {
594 case JS::TraceKind::Object
:
595 return zone
->allocNurseryObjects();
596 case JS::TraceKind::String
:
597 return zone
->allocNurseryStrings();
598 case JS::TraceKind::BigInt
:
599 return zone
->allocNurseryBigInts();
601 MOZ_CRASH("Bad nursery allocation kind");
605 // This function handles nursery allocations for JS. For wasm, see
606 // MacroAssembler::wasmBumpPointerAllocate.
607 void MacroAssembler::bumpPointerAllocate(Register result
, Register temp
,
608 Label
* fail
, CompileZone
* zone
,
609 JS::TraceKind traceKind
, uint32_t size
,
610 const AllocSiteInput
& allocSite
) {
611 MOZ_ASSERT(size
>= gc::MinCellSize
);
613 uint32_t totalSize
= size
+ Nursery::nurseryCellHeaderSize();
614 MOZ_ASSERT(totalSize
< INT32_MAX
, "Nursery allocation too large");
615 MOZ_ASSERT(totalSize
% gc::CellAlignBytes
== 0);
617 // We know statically whether nursery allocation is enable for a particular
618 // kind because we discard JIT code when this changes.
619 if (!IsNurseryAllocEnabled(zone
, traceKind
)) {
624 // Use a relative 32 bit offset to the Nursery position_ to currentEnd_ to
625 // avoid 64-bit immediate loads.
626 void* posAddr
= zone
->addressOfNurseryPosition();
627 int32_t endOffset
= Nursery::offsetOfCurrentEndFromPosition();
629 movePtr(ImmPtr(posAddr
), temp
);
630 loadPtr(Address(temp
, 0), result
);
631 addPtr(Imm32(totalSize
), result
);
632 branchPtr(Assembler::Below
, Address(temp
, endOffset
), result
, fail
);
633 storePtr(result
, Address(temp
, 0));
634 subPtr(Imm32(size
), result
);
636 if (allocSite
.is
<gc::CatchAllAllocSite
>()) {
637 // No allocation site supplied. This is the case when called from Warp, or
638 // from places that don't support pretenuring.
639 gc::CatchAllAllocSite siteKind
= allocSite
.as
<gc::CatchAllAllocSite
>();
640 gc::AllocSite
* site
= zone
->catchAllAllocSite(traceKind
, siteKind
);
641 uintptr_t headerWord
= gc::NurseryCellHeader::MakeValue(site
, traceKind
);
642 storePtr(ImmWord(headerWord
),
643 Address(result
, -js::Nursery::nurseryCellHeaderSize()));
645 if (traceKind
!= JS::TraceKind::Object
||
646 runtime()->geckoProfiler().enabled()) {
647 // Update the catch all allocation site, which his is used to calculate
648 // nursery allocation counts so we can determine whether to disable
649 // nursery allocation of strings and bigints.
650 uint32_t* countAddress
= site
->nurseryAllocCountAddress();
651 CheckedInt
<int32_t> counterOffset
=
652 (CheckedInt
<uintptr_t>(uintptr_t(countAddress
)) -
653 CheckedInt
<uintptr_t>(uintptr_t(posAddr
)))
654 .toChecked
<int32_t>();
655 if (counterOffset
.isValid()) {
656 add32(Imm32(1), Address(temp
, counterOffset
.value()));
658 movePtr(ImmPtr(countAddress
), temp
);
659 add32(Imm32(1), Address(temp
, 0));
663 // Update allocation site and store pointer in the nursery cell header. This
664 // is only used from baseline.
665 Register site
= allocSite
.as
<Register
>();
666 updateAllocSite(temp
, result
, zone
, site
);
667 // See NurseryCellHeader::MakeValue.
668 orPtr(Imm32(int32_t(traceKind
)), site
);
669 storePtr(site
, Address(result
, -js::Nursery::nurseryCellHeaderSize()));
673 // Update the allocation site in the same way as Nursery::allocateCell.
674 void MacroAssembler::updateAllocSite(Register temp
, Register result
,
675 CompileZone
* zone
, Register site
) {
678 add32(Imm32(1), Address(site
, gc::AllocSite::offsetOfNurseryAllocCount()));
680 branch32(Assembler::NotEqual
,
681 Address(site
, gc::AllocSite::offsetOfNurseryAllocCount()), Imm32(1),
684 loadPtr(AbsoluteAddress(zone
->addressOfNurseryAllocatedSites()), temp
);
685 storePtr(temp
, Address(site
, gc::AllocSite::offsetOfNextNurseryAllocated()));
686 storePtr(site
, AbsoluteAddress(zone
->addressOfNurseryAllocatedSites()));
691 // Inlined equivalent of gc::AllocateString, jumping to fail if nursery
692 // allocation requested but unsuccessful.
693 void MacroAssembler::allocateString(Register result
, Register temp
,
694 gc::AllocKind allocKind
,
695 gc::Heap initialHeap
, Label
* fail
) {
696 MOZ_ASSERT(allocKind
== gc::AllocKind::STRING
||
697 allocKind
== gc::AllocKind::FAT_INLINE_STRING
);
699 checkAllocatorState(temp
, allocKind
, fail
);
701 if (shouldNurseryAllocate(allocKind
, initialHeap
)) {
702 MOZ_ASSERT(initialHeap
== gc::Heap::Default
);
703 return nurseryAllocateString(result
, temp
, allocKind
, fail
);
706 freeListAllocate(result
, temp
, allocKind
, fail
);
709 void MacroAssembler::newGCString(Register result
, Register temp
,
710 gc::Heap initialHeap
, Label
* fail
) {
711 allocateString(result
, temp
, js::gc::AllocKind::STRING
, initialHeap
, fail
);
714 void MacroAssembler::newGCFatInlineString(Register result
, Register temp
,
715 gc::Heap initialHeap
, Label
* fail
) {
716 allocateString(result
, temp
, js::gc::AllocKind::FAT_INLINE_STRING
,
720 void MacroAssembler::newGCBigInt(Register result
, Register temp
,
721 gc::Heap initialHeap
, Label
* fail
) {
722 constexpr gc::AllocKind allocKind
= gc::AllocKind::BIGINT
;
724 checkAllocatorState(temp
, allocKind
, fail
);
726 if (shouldNurseryAllocate(allocKind
, initialHeap
)) {
727 MOZ_ASSERT(initialHeap
== gc::Heap::Default
);
728 return nurseryAllocateBigInt(result
, temp
, fail
);
731 freeListAllocate(result
, temp
, allocKind
, fail
);
734 void MacroAssembler::copySlotsFromTemplate(
735 Register obj
, const TemplateNativeObject
& templateObj
, uint32_t start
,
737 uint32_t nfixed
= std::min(templateObj
.numFixedSlots(), end
);
738 for (unsigned i
= start
; i
< nfixed
; i
++) {
739 // Template objects are not exposed to script and therefore immutable.
740 // However, regexp template objects are sometimes used directly (when
741 // the cloning is not observable), and therefore we can end up with a
742 // non-zero lastIndex. Detect this case here and just substitute 0, to
743 // avoid racing with the main thread updating this slot.
745 if (templateObj
.isRegExpObject() && i
== RegExpObject::lastIndexSlot()) {
748 v
= templateObj
.getSlot(i
);
750 storeValue(v
, Address(obj
, NativeObject::getFixedSlotOffset(i
)));
754 void MacroAssembler::fillSlotsWithConstantValue(Address base
, Register temp
,
755 uint32_t start
, uint32_t end
,
757 MOZ_ASSERT(v
.isUndefined() || IsUninitializedLexical(v
));
764 // We only have a single spare register, so do the initialization as two
765 // strided writes of the tag and body.
767 move32(Imm32(v
.toNunboxPayload()), temp
);
768 for (unsigned i
= start
; i
< end
; ++i
, addr
.offset
+= sizeof(GCPtr
<Value
>)) {
769 store32(temp
, ToPayload(addr
));
773 move32(Imm32(v
.toNunboxTag()), temp
);
774 for (unsigned i
= start
; i
< end
; ++i
, addr
.offset
+= sizeof(GCPtr
<Value
>)) {
775 store32(temp
, ToType(addr
));
778 moveValue(v
, ValueOperand(temp
));
779 for (uint32_t i
= start
; i
< end
; ++i
, base
.offset
+= sizeof(GCPtr
<Value
>)) {
780 storePtr(temp
, base
);
785 void MacroAssembler::fillSlotsWithUndefined(Address base
, Register temp
,
786 uint32_t start
, uint32_t end
) {
787 fillSlotsWithConstantValue(base
, temp
, start
, end
, UndefinedValue());
790 void MacroAssembler::fillSlotsWithUninitialized(Address base
, Register temp
,
791 uint32_t start
, uint32_t end
) {
792 fillSlotsWithConstantValue(base
, temp
, start
, end
,
793 MagicValue(JS_UNINITIALIZED_LEXICAL
));
796 static std::pair
<uint32_t, uint32_t> FindStartOfUninitializedAndUndefinedSlots(
797 const TemplateNativeObject
& templateObj
, uint32_t nslots
) {
798 MOZ_ASSERT(nslots
== templateObj
.slotSpan());
799 MOZ_ASSERT(nslots
> 0);
801 uint32_t first
= nslots
;
802 for (; first
!= 0; --first
) {
803 if (templateObj
.getSlot(first
- 1) != UndefinedValue()) {
807 uint32_t startOfUndefined
= first
;
809 if (first
!= 0 && IsUninitializedLexical(templateObj
.getSlot(first
- 1))) {
810 for (; first
!= 0; --first
) {
811 if (!IsUninitializedLexical(templateObj
.getSlot(first
- 1))) {
816 uint32_t startOfUninitialized
= first
;
818 return {startOfUninitialized
, startOfUndefined
};
821 void MacroAssembler::initTypedArraySlots(
822 Register obj
, Register temp
, Register lengthReg
, LiveRegisterSet liveRegs
,
823 Label
* fail
, FixedLengthTypedArrayObject
* templateObj
,
824 TypedArrayLength lengthKind
) {
825 MOZ_ASSERT(!templateObj
->hasBuffer());
827 constexpr size_t dataSlotOffset
= ArrayBufferViewObject::dataOffset();
828 constexpr size_t dataOffset
= dataSlotOffset
+ sizeof(HeapSlot
);
831 FixedLengthTypedArrayObject::FIXED_DATA_START
==
832 FixedLengthTypedArrayObject::DATA_SLOT
+ 1,
833 "fixed inline element data assumed to begin after the data slot");
836 FixedLengthTypedArrayObject::INLINE_BUFFER_LIMIT
==
837 JSObject::MAX_BYTE_SIZE
- dataOffset
,
838 "typed array inline buffer is limited by the maximum object byte size");
840 // Initialise data elements to zero.
841 size_t length
= templateObj
->length();
842 MOZ_ASSERT(length
<= INT32_MAX
,
843 "Template objects are only created for int32 lengths");
844 size_t nbytes
= length
* templateObj
->bytesPerElement();
846 if (lengthKind
== TypedArrayLength::Fixed
&&
847 nbytes
<= FixedLengthTypedArrayObject::INLINE_BUFFER_LIMIT
) {
848 MOZ_ASSERT(dataOffset
+ nbytes
<= templateObj
->tenuredSizeOfThis());
850 // Store data elements inside the remaining JSObject slots.
851 computeEffectiveAddress(Address(obj
, dataOffset
), temp
);
852 storePrivateValue(temp
, Address(obj
, dataSlotOffset
));
854 // Write enough zero pointers into fixed data to zero every
855 // element. (This zeroes past the end of a byte count that's
856 // not a multiple of pointer size. That's okay, because fixed
857 // data is a count of 8-byte HeapSlots (i.e. <= pointer size),
858 // and we won't inline unless the desired memory fits in that
860 static_assert(sizeof(HeapSlot
) == 8, "Assumed 8 bytes alignment");
862 size_t numZeroPointers
= ((nbytes
+ 7) & ~0x7) / sizeof(char*);
863 for (size_t i
= 0; i
< numZeroPointers
; i
++) {
864 storePtr(ImmWord(0), Address(obj
, dataOffset
+ i
* sizeof(char*)));
866 MOZ_ASSERT(nbytes
> 0, "Zero-length TypedArrays need ZeroLengthArrayData");
868 if (lengthKind
== TypedArrayLength::Fixed
) {
869 move32(Imm32(length
), lengthReg
);
872 // Ensure volatile |obj| is saved across the call.
873 if (obj
.volatile_()) {
874 liveRegs
.addUnchecked(obj
);
877 // Allocate a buffer on the heap to store the data elements.
878 PushRegsInMask(liveRegs
);
879 using Fn
= void (*)(JSContext
* cx
, TypedArrayObject
* obj
, int32_t count
);
880 setupUnalignedABICall(temp
);
884 passABIArg(lengthReg
);
885 callWithABI
<Fn
, AllocateAndInitTypedArrayBuffer
>();
886 PopRegsInMask(liveRegs
);
888 // Fail when data slot is UndefinedValue.
889 branchTestUndefined(Assembler::Equal
, Address(obj
, dataSlotOffset
), fail
);
893 void MacroAssembler::initGCSlots(Register obj
, Register temp
,
894 const TemplateNativeObject
& templateObj
) {
895 MOZ_ASSERT(!templateObj
.isArrayObject());
897 // Slots of non-array objects are required to be initialized.
898 // Use the values currently in the template object.
899 uint32_t nslots
= templateObj
.slotSpan();
904 uint32_t nfixed
= templateObj
.numUsedFixedSlots();
905 uint32_t ndynamic
= templateObj
.numDynamicSlots();
907 // Attempt to group slot writes such that we minimize the amount of
908 // duplicated data we need to embed in code and load into registers. In
909 // general, most template object slots will be undefined except for any
910 // reserved slots. Since reserved slots come first, we split the object
911 // logically into independent non-UndefinedValue writes to the head and
912 // duplicated writes of UndefinedValue to the tail. For the majority of
913 // objects, the "tail" will be the entire slot range.
915 // The template object may be a CallObject, in which case we need to
916 // account for uninitialized lexical slots as well as undefined
917 // slots. Uninitialized lexical slots appears in CallObjects if the function
918 // has parameter expressions, in which case closed over parameters have
919 // TDZ. Uninitialized slots come before undefined slots in CallObjects.
920 auto [startOfUninitialized
, startOfUndefined
] =
921 FindStartOfUninitializedAndUndefinedSlots(templateObj
, nslots
);
922 MOZ_ASSERT(startOfUninitialized
<= nfixed
); // Reserved slots must be fixed.
923 MOZ_ASSERT(startOfUndefined
>= startOfUninitialized
);
924 MOZ_ASSERT_IF(!templateObj
.isCallObject() &&
925 !templateObj
.isBlockLexicalEnvironmentObject(),
926 startOfUninitialized
== startOfUndefined
);
928 // Copy over any preserved reserved slots.
929 copySlotsFromTemplate(obj
, templateObj
, 0, startOfUninitialized
);
931 // Fill the rest of the fixed slots with undefined and uninitialized.
932 size_t offset
= NativeObject::getFixedSlotOffset(startOfUninitialized
);
933 fillSlotsWithUninitialized(Address(obj
, offset
), temp
, startOfUninitialized
,
934 std::min(startOfUndefined
, nfixed
));
936 if (startOfUndefined
< nfixed
) {
937 offset
= NativeObject::getFixedSlotOffset(startOfUndefined
);
938 fillSlotsWithUndefined(Address(obj
, offset
), temp
, startOfUndefined
,
943 // We are short one register to do this elegantly. Borrow the obj
944 // register briefly for our slots base address.
946 loadPtr(Address(obj
, NativeObject::offsetOfSlots()), obj
);
948 // Fill uninitialized slots if necessary. Otherwise initialize all
949 // slots to undefined.
950 if (startOfUndefined
> nfixed
) {
951 MOZ_ASSERT(startOfUninitialized
!= startOfUndefined
);
952 fillSlotsWithUninitialized(Address(obj
, 0), temp
, 0,
953 startOfUndefined
- nfixed
);
954 size_t offset
= (startOfUndefined
- nfixed
) * sizeof(Value
);
955 fillSlotsWithUndefined(Address(obj
, offset
), temp
,
956 startOfUndefined
- nfixed
, ndynamic
);
958 fillSlotsWithUndefined(Address(obj
, 0), temp
, 0, ndynamic
);
965 void MacroAssembler::initGCThing(Register obj
, Register temp
,
966 const TemplateObject
& templateObj
,
968 // Fast initialization of an empty object returned by allocateObject().
970 storePtr(ImmGCPtr(templateObj
.shape()),
971 Address(obj
, JSObject::offsetOfShape()));
973 if (templateObj
.isNativeObject()) {
974 const TemplateNativeObject
& ntemplate
=
975 templateObj
.asTemplateNativeObject();
976 MOZ_ASSERT(!ntemplate
.hasDynamicElements());
978 // If the object has dynamic slots, the slots member has already been
980 if (ntemplate
.numDynamicSlots() == 0) {
981 storePtr(ImmPtr(emptyObjectSlots
),
982 Address(obj
, NativeObject::offsetOfSlots()));
985 if (ntemplate
.isArrayObject()) {
986 // Can't skip initializing reserved slots.
987 MOZ_ASSERT(initContents
);
989 int elementsOffset
= NativeObject::offsetOfFixedElements();
991 computeEffectiveAddress(Address(obj
, elementsOffset
), temp
);
992 storePtr(temp
, Address(obj
, NativeObject::offsetOfElements()));
994 // Fill in the elements header.
996 Imm32(ntemplate
.getDenseCapacity()),
997 Address(obj
, elementsOffset
+ ObjectElements::offsetOfCapacity()));
998 store32(Imm32(ntemplate
.getDenseInitializedLength()),
999 Address(obj
, elementsOffset
+
1000 ObjectElements::offsetOfInitializedLength()));
1001 store32(Imm32(ntemplate
.getArrayLength()),
1002 Address(obj
, elementsOffset
+ ObjectElements::offsetOfLength()));
1003 store32(Imm32(ObjectElements::FIXED
),
1004 Address(obj
, elementsOffset
+ ObjectElements::offsetOfFlags()));
1005 } else if (ntemplate
.isArgumentsObject()) {
1006 // The caller will initialize the reserved slots.
1007 MOZ_ASSERT(!initContents
);
1008 storePtr(ImmPtr(emptyObjectElements
),
1009 Address(obj
, NativeObject::offsetOfElements()));
1011 // If the target type could be a TypedArray that maps shared memory
1012 // then this would need to store emptyObjectElementsShared in that case.
1013 MOZ_ASSERT(!ntemplate
.isSharedMemory());
1015 // Can't skip initializing reserved slots.
1016 MOZ_ASSERT(initContents
);
1018 storePtr(ImmPtr(emptyObjectElements
),
1019 Address(obj
, NativeObject::offsetOfElements()));
1021 initGCSlots(obj
, temp
, ntemplate
);
1024 MOZ_CRASH("Unknown object");
1028 AllocatableRegisterSet
regs(RegisterSet::Volatile());
1029 LiveRegisterSet
save(regs
.asLiveSet());
1030 PushRegsInMask(save
);
1032 regs
.takeUnchecked(obj
);
1033 Register temp2
= regs
.takeAnyGeneral();
1035 using Fn
= void (*)(JSObject
* obj
);
1036 setupUnalignedABICall(temp2
);
1038 callWithABI
<Fn
, TraceCreateObject
>();
1040 PopRegsInMask(save
);
1044 static size_t StringCharsByteLength(const JSLinearString
* linear
) {
1045 CharEncoding encoding
=
1046 linear
->hasLatin1Chars() ? CharEncoding::Latin1
: CharEncoding::TwoByte
;
1047 size_t encodingSize
= encoding
== CharEncoding::Latin1
1048 ? sizeof(JS::Latin1Char
)
1050 return linear
->length() * encodingSize
;
1053 bool MacroAssembler::canCompareStringCharsInline(const JSLinearString
* linear
) {
1054 // Limit the number of inline instructions used for character comparisons. Use
1055 // the same instruction limit for both encodings, i.e. two-byte uses half the
1056 // limit of Latin-1 strings.
1057 constexpr size_t ByteLengthCompareCutoff
= 32;
1059 size_t byteLength
= StringCharsByteLength(linear
);
1060 return 0 < byteLength
&& byteLength
<= ByteLengthCompareCutoff
;
1063 template <typename T
, typename CharT
>
1064 static inline T
CopyCharacters(const CharT
* chars
) {
1066 std::memcpy(&value
, chars
, sizeof(T
));
1070 template <typename T
>
1071 static inline T
CopyCharacters(const JSLinearString
* linear
, size_t index
) {
1072 JS::AutoCheckCannotGC nogc
;
1074 if (linear
->hasLatin1Chars()) {
1075 MOZ_ASSERT(index
+ sizeof(T
) / sizeof(JS::Latin1Char
) <= linear
->length());
1076 return CopyCharacters
<T
>(linear
->latin1Chars(nogc
) + index
);
1079 MOZ_ASSERT(sizeof(T
) >= sizeof(char16_t
));
1080 MOZ_ASSERT(index
+ sizeof(T
) / sizeof(char16_t
) <= linear
->length());
1081 return CopyCharacters
<T
>(linear
->twoByteChars(nogc
) + index
);
1084 void MacroAssembler::branchIfNotStringCharsEquals(Register stringChars
,
1085 const JSLinearString
* linear
,
1087 CharEncoding encoding
=
1088 linear
->hasLatin1Chars() ? CharEncoding::Latin1
: CharEncoding::TwoByte
;
1089 size_t encodingSize
= encoding
== CharEncoding::Latin1
1090 ? sizeof(JS::Latin1Char
)
1092 size_t byteLength
= StringCharsByteLength(linear
);
1095 for (size_t stride
: {8, 4, 2, 1}) {
1096 while (byteLength
>= stride
) {
1097 Address
addr(stringChars
, pos
* encodingSize
);
1100 auto x
= CopyCharacters
<uint64_t>(linear
, pos
);
1101 branch64(Assembler::NotEqual
, addr
, Imm64(x
), label
);
1105 auto x
= CopyCharacters
<uint32_t>(linear
, pos
);
1106 branch32(Assembler::NotEqual
, addr
, Imm32(x
), label
);
1110 auto x
= CopyCharacters
<uint16_t>(linear
, pos
);
1111 branch16(Assembler::NotEqual
, addr
, Imm32(x
), label
);
1115 auto x
= CopyCharacters
<uint8_t>(linear
, pos
);
1116 branch8(Assembler::NotEqual
, addr
, Imm32(x
), label
);
1121 byteLength
-= stride
;
1122 pos
+= stride
/ encodingSize
;
1125 // Prefer a single comparison for trailing bytes instead of doing
1126 // multiple consecutive comparisons.
1128 // For example when comparing against the string "example", emit two
1129 // four-byte comparisons against "exam" and "mple" instead of doing
1130 // three comparisons against "exam", "pl", and finally "e".
1131 if (pos
> 0 && byteLength
> stride
/ 2) {
1132 MOZ_ASSERT(stride
== 8 || stride
== 4);
1134 size_t prev
= pos
- (stride
- byteLength
) / encodingSize
;
1135 Address
addr(stringChars
, prev
* encodingSize
);
1138 auto x
= CopyCharacters
<uint64_t>(linear
, prev
);
1139 branch64(Assembler::NotEqual
, addr
, Imm64(x
), label
);
1143 auto x
= CopyCharacters
<uint32_t>(linear
, prev
);
1144 branch32(Assembler::NotEqual
, addr
, Imm32(x
), label
);
1149 // Break from the loop, because we've finished the complete string.
1155 void MacroAssembler::loadStringCharsForCompare(Register input
,
1156 const JSLinearString
* linear
,
1157 Register stringChars
,
1159 CharEncoding encoding
=
1160 linear
->hasLatin1Chars() ? CharEncoding::Latin1
: CharEncoding::TwoByte
;
1162 // Take the slow path when the string is a rope or has a different character
1164 branchIfRope(input
, fail
);
1165 if (encoding
== CharEncoding::Latin1
) {
1166 branchTwoByteString(input
, fail
);
1168 JS::AutoCheckCannotGC nogc
;
1169 if (mozilla::IsUtf16Latin1(linear
->twoByteRange(nogc
))) {
1170 branchLatin1String(input
, fail
);
1172 // This case was already handled in the caller.
1175 branchTwoByteString(input
, &ok
);
1176 assumeUnreachable("Unexpected Latin-1 string");
1184 size_t length
= linear
->length();
1185 MOZ_ASSERT(length
> 0);
1188 branch32(Assembler::AboveOrEqual
,
1189 Address(input
, JSString::offsetOfLength()), Imm32(length
), &ok
);
1190 assumeUnreachable("Input mustn't be smaller than search string");
1195 // Load the input string's characters.
1196 loadStringChars(input
, stringChars
, encoding
);
1199 void MacroAssembler::compareStringChars(JSOp op
, Register stringChars
,
1200 const JSLinearString
* linear
,
1202 MOZ_ASSERT(IsEqualityOp(op
));
1204 size_t byteLength
= StringCharsByteLength(linear
);
1206 // Prefer a single compare-and-set instruction if possible.
1207 if (byteLength
== 1 || byteLength
== 2 || byteLength
== 4 ||
1209 auto cond
= JSOpToCondition(op
, /* isSigned = */ false);
1211 Address
addr(stringChars
, 0);
1212 switch (byteLength
) {
1214 auto x
= CopyCharacters
<uint64_t>(linear
, 0);
1215 cmp64Set(cond
, addr
, Imm64(x
), output
);
1219 auto x
= CopyCharacters
<uint32_t>(linear
, 0);
1220 cmp32Set(cond
, addr
, Imm32(x
), output
);
1224 auto x
= CopyCharacters
<uint16_t>(linear
, 0);
1225 cmp16Set(cond
, addr
, Imm32(x
), output
);
1229 auto x
= CopyCharacters
<uint8_t>(linear
, 0);
1230 cmp8Set(cond
, addr
, Imm32(x
), output
);
1235 Label setNotEqualResult
;
1236 branchIfNotStringCharsEquals(stringChars
, linear
, &setNotEqualResult
);
1238 // Falls through if both strings are equal.
1241 move32(Imm32(op
== JSOp::Eq
|| op
== JSOp::StrictEq
), output
);
1244 bind(&setNotEqualResult
);
1245 move32(Imm32(op
== JSOp::Ne
|| op
== JSOp::StrictNe
), output
);
1251 void MacroAssembler::compareStrings(JSOp op
, Register left
, Register right
,
1252 Register result
, Label
* fail
) {
1253 MOZ_ASSERT(left
!= result
);
1254 MOZ_ASSERT(right
!= result
);
1255 MOZ_ASSERT(IsEqualityOp(op
) || IsRelationalOp(op
));
1257 Label notPointerEqual
;
1258 // If operands point to the same instance, the strings are trivially equal.
1259 branchPtr(Assembler::NotEqual
, left
, right
,
1260 IsEqualityOp(op
) ? ¬PointerEqual
: fail
);
1261 move32(Imm32(op
== JSOp::Eq
|| op
== JSOp::StrictEq
|| op
== JSOp::Le
||
1265 if (IsEqualityOp(op
)) {
1269 bind(¬PointerEqual
);
1271 Label leftIsNotAtom
;
1272 Label setNotEqualResult
;
1273 // Atoms cannot be equal to each other if they point to different strings.
1274 Imm32
atomBit(JSString::ATOM_BIT
);
1275 branchTest32(Assembler::Zero
, Address(left
, JSString::offsetOfFlags()),
1276 atomBit
, &leftIsNotAtom
);
1277 branchTest32(Assembler::NonZero
, Address(right
, JSString::offsetOfFlags()),
1278 atomBit
, &setNotEqualResult
);
1280 bind(&leftIsNotAtom
);
1281 // Strings of different length can never be equal.
1282 loadStringLength(left
, result
);
1283 branch32(Assembler::Equal
, Address(right
, JSString::offsetOfLength()),
1286 bind(&setNotEqualResult
);
1287 move32(Imm32(op
== JSOp::Ne
|| op
== JSOp::StrictNe
), result
);
1293 void MacroAssembler::loadStringChars(Register str
, Register dest
,
1294 CharEncoding encoding
) {
1295 MOZ_ASSERT(str
!= dest
);
1297 if (JitOptions
.spectreStringMitigations
) {
1298 if (encoding
== CharEncoding::Latin1
) {
1299 // If the string is a rope, zero the |str| register. The code below
1300 // depends on str->flags so this should block speculative execution.
1301 movePtr(ImmWord(0), dest
);
1302 test32MovePtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1303 Imm32(JSString::LINEAR_BIT
), dest
, str
);
1305 // If we're loading TwoByte chars, there's an additional risk:
1306 // if the string has Latin1 chars, we could read out-of-bounds. To
1307 // prevent this, we check both the Linear and Latin1 bits. We don't
1308 // have a scratch register, so we use these flags also to block
1309 // speculative execution, similar to the use of 0 above.
1310 MOZ_ASSERT(encoding
== CharEncoding::TwoByte
);
1311 static constexpr uint32_t Mask
=
1312 JSString::LINEAR_BIT
| JSString::LATIN1_CHARS_BIT
;
1313 static_assert(Mask
< 1024,
1314 "Mask should be a small, near-null value to ensure we "
1315 "block speculative execution when it's used as string "
1317 move32(Imm32(Mask
), dest
);
1318 and32(Address(str
, JSString::offsetOfFlags()), dest
);
1319 cmp32MovePtr(Assembler::NotEqual
, dest
, Imm32(JSString::LINEAR_BIT
), dest
,
1324 // Load the inline chars.
1325 computeEffectiveAddress(Address(str
, JSInlineString::offsetOfInlineStorage()),
1328 // If it's not an inline string, load the non-inline chars. Use a
1329 // conditional move to prevent speculative execution.
1330 test32LoadPtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1331 Imm32(JSString::INLINE_CHARS_BIT
),
1332 Address(str
, JSString::offsetOfNonInlineChars()), dest
);
1335 void MacroAssembler::loadNonInlineStringChars(Register str
, Register dest
,
1336 CharEncoding encoding
) {
1337 MOZ_ASSERT(str
!= dest
);
1339 if (JitOptions
.spectreStringMitigations
) {
1340 // If the string is a rope, has inline chars, or has a different
1341 // character encoding, set str to a near-null value to prevent
1342 // speculative execution below (when reading str->nonInlineChars).
1344 static constexpr uint32_t Mask
= JSString::LINEAR_BIT
|
1345 JSString::INLINE_CHARS_BIT
|
1346 JSString::LATIN1_CHARS_BIT
;
1347 static_assert(Mask
< 1024,
1348 "Mask should be a small, near-null value to ensure we "
1349 "block speculative execution when it's used as string "
1352 uint32_t expectedBits
= JSString::LINEAR_BIT
;
1353 if (encoding
== CharEncoding::Latin1
) {
1354 expectedBits
|= JSString::LATIN1_CHARS_BIT
;
1357 move32(Imm32(Mask
), dest
);
1358 and32(Address(str
, JSString::offsetOfFlags()), dest
);
1360 cmp32MovePtr(Assembler::NotEqual
, dest
, Imm32(expectedBits
), dest
, str
);
1363 loadPtr(Address(str
, JSString::offsetOfNonInlineChars()), dest
);
1366 void MacroAssembler::storeNonInlineStringChars(Register chars
, Register str
) {
1367 MOZ_ASSERT(chars
!= str
);
1368 storePtr(chars
, Address(str
, JSString::offsetOfNonInlineChars()));
1371 void MacroAssembler::loadInlineStringCharsForStore(Register str
,
1373 computeEffectiveAddress(Address(str
, JSInlineString::offsetOfInlineStorage()),
1377 void MacroAssembler::loadInlineStringChars(Register str
, Register dest
,
1378 CharEncoding encoding
) {
1379 MOZ_ASSERT(str
!= dest
);
1381 if (JitOptions
.spectreStringMitigations
) {
1382 // Making this Spectre-safe is a bit complicated: using
1383 // computeEffectiveAddress and then zeroing the output register if
1384 // non-inline is not sufficient: when the index is very large, it would
1385 // allow reading |nullptr + index|. Just fall back to loadStringChars
1387 loadStringChars(str
, dest
, encoding
);
1389 computeEffectiveAddress(
1390 Address(str
, JSInlineString::offsetOfInlineStorage()), dest
);
1394 void MacroAssembler::loadRopeLeftChild(Register str
, Register dest
) {
1395 MOZ_ASSERT(str
!= dest
);
1397 if (JitOptions
.spectreStringMitigations
) {
1398 // Zero the output register if the input was not a rope.
1399 movePtr(ImmWord(0), dest
);
1400 test32LoadPtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1401 Imm32(JSString::LINEAR_BIT
),
1402 Address(str
, JSRope::offsetOfLeft()), dest
);
1404 loadPtr(Address(str
, JSRope::offsetOfLeft()), dest
);
1408 void MacroAssembler::loadRopeRightChild(Register str
, Register dest
) {
1409 MOZ_ASSERT(str
!= dest
);
1411 if (JitOptions
.spectreStringMitigations
) {
1412 // Zero the output register if the input was not a rope.
1413 movePtr(ImmWord(0), dest
);
1414 test32LoadPtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1415 Imm32(JSString::LINEAR_BIT
),
1416 Address(str
, JSRope::offsetOfRight()), dest
);
1418 loadPtr(Address(str
, JSRope::offsetOfRight()), dest
);
1422 void MacroAssembler::storeRopeChildren(Register left
, Register right
,
1424 storePtr(left
, Address(str
, JSRope::offsetOfLeft()));
1425 storePtr(right
, Address(str
, JSRope::offsetOfRight()));
1428 void MacroAssembler::loadDependentStringBase(Register str
, Register dest
) {
1429 MOZ_ASSERT(str
!= dest
);
1431 if (JitOptions
.spectreStringMitigations
) {
1432 // If the string is not a dependent string, zero the |str| register.
1433 // The code below loads str->base so this should block speculative
1435 movePtr(ImmWord(0), dest
);
1436 test32MovePtr(Assembler::Zero
, Address(str
, JSString::offsetOfFlags()),
1437 Imm32(JSString::DEPENDENT_BIT
), dest
, str
);
1440 loadPtr(Address(str
, JSDependentString::offsetOfBase()), dest
);
1443 void MacroAssembler::storeDependentStringBase(Register base
, Register str
) {
1444 storePtr(base
, Address(str
, JSDependentString::offsetOfBase()));
1447 void MacroAssembler::branchIfMaybeSplitSurrogatePair(Register leftChild
,
1452 // If |index| is the last character of the left child and the left child
1453 // is a two-byte string, it's possible that a surrogate pair is split
1454 // between the left and right child of a rope.
1456 // Can't be a split surrogate when the left child is a Latin-1 string.
1457 branchLatin1String(leftChild
, notSplit
);
1459 // Can't be a split surrogate when |index + 1| is in the left child.
1460 add32(Imm32(1), index
, scratch
);
1461 branch32(Assembler::Above
, Address(leftChild
, JSString::offsetOfLength()),
1464 // Load the character at |index|.
1465 loadStringChars(leftChild
, scratch
, CharEncoding::TwoByte
);
1466 loadChar(scratch
, index
, scratch
, CharEncoding::TwoByte
);
1468 // Jump to |maybeSplit| if the last character is a lead surrogate.
1469 branchIfLeadSurrogate(scratch
, scratch
, maybeSplit
);
1472 void MacroAssembler::loadRopeChild(CharKind kind
, Register str
, Register index
,
1473 Register output
, Register maybeScratch
,
1474 Label
* isLinear
, Label
* splitSurrogate
) {
1475 // This follows JSString::getChar.
1476 branchIfNotRope(str
, isLinear
);
1478 loadRopeLeftChild(str
, output
);
1481 if (kind
== CharKind::CharCode
) {
1482 // Check if |index| is contained in the left child.
1483 branch32(Assembler::Above
, Address(output
, JSString::offsetOfLength()),
1484 index
, &loadedChild
);
1486 MOZ_ASSERT(maybeScratch
!= InvalidReg
);
1488 // Check if |index| is contained in the left child.
1490 branch32(Assembler::BelowOrEqual
,
1491 Address(output
, JSString::offsetOfLength()), index
, &loadRight
);
1493 // Handle possible split surrogate pairs.
1494 branchIfMaybeSplitSurrogatePair(output
, index
, maybeScratch
,
1495 splitSurrogate
, &loadedChild
);
1501 // The index must be in the rightChild.
1502 loadRopeRightChild(str
, output
);
1507 void MacroAssembler::branchIfCanLoadStringChar(CharKind kind
, Register str
,
1508 Register index
, Register scratch
,
1509 Register maybeScratch
,
1511 Label splitSurrogate
;
1512 loadRopeChild(kind
, str
, index
, scratch
, maybeScratch
, label
,
1515 // Branch if the left resp. right side is linear.
1516 branchIfNotRope(scratch
, label
);
1518 if (kind
== CharKind::CodePoint
) {
1519 bind(&splitSurrogate
);
1523 void MacroAssembler::branchIfNotCanLoadStringChar(CharKind kind
, Register str
,
1526 Register maybeScratch
,
1529 loadRopeChild(kind
, str
, index
, scratch
, maybeScratch
, &done
, label
);
1531 // Branch if the left or right side is another rope.
1532 branchIfRope(scratch
, label
);
1537 void MacroAssembler::loadStringChar(CharKind kind
, Register str
, Register index
,
1538 Register output
, Register scratch1
,
1539 Register scratch2
, Label
* fail
) {
1540 MOZ_ASSERT(str
!= output
);
1541 MOZ_ASSERT(str
!= index
);
1542 MOZ_ASSERT(index
!= output
);
1543 MOZ_ASSERT_IF(kind
== CharKind::CodePoint
, index
!= scratch1
);
1544 MOZ_ASSERT(output
!= scratch1
);
1545 MOZ_ASSERT(output
!= scratch2
);
1547 // Use scratch1 for the index (adjusted below).
1548 if (index
!= scratch1
) {
1549 move32(index
, scratch1
);
1551 movePtr(str
, output
);
1553 // This follows JSString::getChar.
1555 branchIfNotRope(str
, ¬Rope
);
1557 loadRopeLeftChild(str
, output
);
1559 // Check if the index is contained in the leftChild.
1560 Label loadedChild
, notInLeft
;
1561 spectreBoundsCheck32(scratch1
, Address(output
, JSString::offsetOfLength()),
1562 scratch2
, ¬InLeft
);
1563 if (kind
== CharKind::CodePoint
) {
1564 branchIfMaybeSplitSurrogatePair(output
, scratch1
, scratch2
, fail
,
1569 // The index must be in the rightChild.
1570 // index -= rope->leftChild()->length()
1572 sub32(Address(output
, JSString::offsetOfLength()), scratch1
);
1573 loadRopeRightChild(str
, output
);
1575 // If the left or right side is another rope, give up.
1577 branchIfRope(output
, fail
);
1581 Label isLatin1
, done
;
1582 branchLatin1String(output
, &isLatin1
);
1584 loadStringChars(output
, scratch2
, CharEncoding::TwoByte
);
1586 if (kind
== CharKind::CharCode
) {
1587 loadChar(scratch2
, scratch1
, output
, CharEncoding::TwoByte
);
1589 // Load the first character.
1590 addToCharPtr(scratch2
, scratch1
, CharEncoding::TwoByte
);
1591 loadChar(Address(scratch2
, 0), output
, CharEncoding::TwoByte
);
1593 // If the first character isn't a lead surrogate, go to |done|.
1594 branchIfNotLeadSurrogate(output
, &done
);
1596 // branchIfMaybeSplitSurrogatePair ensures that the surrogate pair can't
1597 // split between two rope children. So if |index + 1 < str.length|, then
1598 // |index| and |index + 1| are in the same rope child.
1600 // NB: We use the non-adjusted |index| and |str| inputs, because |output|
1601 // was overwritten and no longer contains the rope child.
1603 // If |index + 1| is a valid index into |str|.
1604 add32(Imm32(1), index
, scratch1
);
1605 spectreBoundsCheck32(scratch1
, Address(str
, JSString::offsetOfLength()),
1608 // Then load the next character at |scratch2 + sizeof(char16_t)|.
1609 loadChar(Address(scratch2
, sizeof(char16_t
)), scratch1
,
1610 CharEncoding::TwoByte
);
1612 // If the next character isn't a trail surrogate, go to |done|.
1613 branchIfNotTrailSurrogate(scratch1
, scratch2
, &done
);
1615 // Inlined unicode::UTF16Decode(char16_t, char16_t).
1616 lshift32(Imm32(10), output
);
1617 add32(Imm32(unicode::NonBMPMin
- (unicode::LeadSurrogateMin
<< 10) -
1618 unicode::TrailSurrogateMin
),
1620 add32(scratch1
, output
);
1627 loadStringChars(output
, scratch2
, CharEncoding::Latin1
);
1628 loadChar(scratch2
, scratch1
, output
, CharEncoding::Latin1
);
1634 void MacroAssembler::loadStringChar(Register str
, int32_t index
,
1635 Register output
, Register scratch1
,
1636 Register scratch2
, Label
* fail
) {
1637 MOZ_ASSERT(str
!= output
);
1638 MOZ_ASSERT(output
!= scratch1
);
1639 MOZ_ASSERT(output
!= scratch2
);
1642 movePtr(str
, scratch1
);
1644 // This follows JSString::getChar.
1646 branchIfNotRope(str
, ¬Rope
);
1648 loadRopeLeftChild(str
, scratch1
);
1650 // Rope children can't be empty, so the index can't be in the right side.
1652 // If the left side is another rope, give up.
1653 branchIfRope(scratch1
, fail
);
1657 Label isLatin1
, done
;
1658 branchLatin1String(scratch1
, &isLatin1
);
1659 loadStringChars(scratch1
, scratch2
, CharEncoding::TwoByte
);
1660 loadChar(Address(scratch2
, 0), output
, CharEncoding::TwoByte
);
1664 loadStringChars(scratch1
, scratch2
, CharEncoding::Latin1
);
1665 loadChar(Address(scratch2
, 0), output
, CharEncoding::Latin1
);
1669 move32(Imm32(index
), scratch1
);
1670 loadStringChar(str
, scratch1
, output
, scratch1
, scratch2
, fail
);
1674 void MacroAssembler::loadStringIndexValue(Register str
, Register dest
,
1676 MOZ_ASSERT(str
!= dest
);
1678 load32(Address(str
, JSString::offsetOfFlags()), dest
);
1680 // Does not have a cached index value.
1681 branchTest32(Assembler::Zero
, dest
, Imm32(JSString::INDEX_VALUE_BIT
), fail
);
1683 // Extract the index.
1684 rshift32(Imm32(JSString::INDEX_VALUE_SHIFT
), dest
);
1687 void MacroAssembler::loadChar(Register chars
, Register index
, Register dest
,
1688 CharEncoding encoding
, int32_t offset
/* = 0 */) {
1689 if (encoding
== CharEncoding::Latin1
) {
1690 loadChar(BaseIndex(chars
, index
, TimesOne
, offset
), dest
, encoding
);
1692 loadChar(BaseIndex(chars
, index
, TimesTwo
, offset
), dest
, encoding
);
1696 void MacroAssembler::addToCharPtr(Register chars
, Register index
,
1697 CharEncoding encoding
) {
1698 if (encoding
== CharEncoding::Latin1
) {
1699 static_assert(sizeof(char) == 1,
1700 "Latin-1 string index shouldn't need scaling");
1701 addPtr(index
, chars
);
1703 computeEffectiveAddress(BaseIndex(chars
, index
, TimesTwo
), chars
);
1707 void MacroAssembler::branchIfNotLeadSurrogate(Register src
, Label
* label
) {
1708 branch32(Assembler::Below
, src
, Imm32(unicode::LeadSurrogateMin
), label
);
1709 branch32(Assembler::Above
, src
, Imm32(unicode::LeadSurrogateMax
), label
);
1712 void MacroAssembler::branchSurrogate(Assembler::Condition cond
, Register src
,
1713 Register scratch
, Label
* label
,
1714 SurrogateChar surrogateChar
) {
1715 // For TrailSurrogateMin ≤ x ≤ TrailSurrogateMax and
1716 // LeadSurrogateMin ≤ x ≤ LeadSurrogateMax, the following equations hold.
1718 // SurrogateMin ≤ x ≤ SurrogateMax
1719 // <> SurrogateMin ≤ x ≤ SurrogateMin + 2^10 - 1
1720 // <> ((x - SurrogateMin) >>> 10) = 0 where >>> is an unsigned-shift
1721 // See Hacker's Delight, section 4-1 for details.
1723 // ((x - SurrogateMin) >>> 10) = 0
1724 // <> floor((x - SurrogateMin) / 1024) = 0
1725 // <> floor((x / 1024) - (SurrogateMin / 1024)) = 0
1726 // <> floor(x / 1024) = SurrogateMin / 1024
1727 // <> floor(x / 1024) * 1024 = SurrogateMin
1728 // <> (x >>> 10) << 10 = SurrogateMin
1729 // <> x & ~(2^10 - 1) = SurrogateMin
1731 constexpr char16_t SurrogateMask
= 0xFC00;
1732 char16_t SurrogateMin
= surrogateChar
== SurrogateChar::Lead
1733 ? unicode::LeadSurrogateMin
1734 : unicode::TrailSurrogateMin
;
1736 if (src
!= scratch
) {
1737 move32(src
, scratch
);
1740 and32(Imm32(SurrogateMask
), scratch
);
1741 branch32(cond
, scratch
, Imm32(SurrogateMin
), label
);
1744 void MacroAssembler::loadStringFromUnit(Register unit
, Register dest
,
1745 const StaticStrings
& staticStrings
) {
1746 movePtr(ImmPtr(&staticStrings
.unitStaticTable
), dest
);
1747 loadPtr(BaseIndex(dest
, unit
, ScalePointer
), dest
);
1750 void MacroAssembler::loadLengthTwoString(Register c1
, Register c2
,
1752 const StaticStrings
& staticStrings
) {
1753 // Compute (toSmallCharTable[c1] << SMALL_CHAR_BITS) + toSmallCharTable[c2]
1754 // to obtain the index into `StaticStrings::length2StaticTable`.
1755 static_assert(sizeof(StaticStrings::SmallChar
) == 1);
1757 movePtr(ImmPtr(&StaticStrings::toSmallCharTable
.storage
), dest
);
1758 load8ZeroExtend(BaseIndex(dest
, c1
, Scale::TimesOne
), c1
);
1759 load8ZeroExtend(BaseIndex(dest
, c2
, Scale::TimesOne
), c2
);
1761 lshift32(Imm32(StaticStrings::SMALL_CHAR_BITS
), c1
);
1764 // Look up the string from the computed index.
1765 movePtr(ImmPtr(&staticStrings
.length2StaticTable
), dest
);
1766 loadPtr(BaseIndex(dest
, c1
, ScalePointer
), dest
);
1769 void MacroAssembler::lookupStaticString(Register ch
, Register dest
,
1770 const StaticStrings
& staticStrings
) {
1771 MOZ_ASSERT(ch
!= dest
);
1773 movePtr(ImmPtr(&staticStrings
.unitStaticTable
), dest
);
1774 loadPtr(BaseIndex(dest
, ch
, ScalePointer
), dest
);
1777 void MacroAssembler::lookupStaticString(Register ch
, Register dest
,
1778 const StaticStrings
& staticStrings
,
1780 MOZ_ASSERT(ch
!= dest
);
1782 boundsCheck32PowerOfTwo(ch
, StaticStrings::UNIT_STATIC_LIMIT
, fail
);
1783 movePtr(ImmPtr(&staticStrings
.unitStaticTable
), dest
);
1784 loadPtr(BaseIndex(dest
, ch
, ScalePointer
), dest
);
1787 void MacroAssembler::lookupStaticString(Register ch1
, Register ch2
,
1789 const StaticStrings
& staticStrings
,
1791 MOZ_ASSERT(ch1
!= dest
);
1792 MOZ_ASSERT(ch2
!= dest
);
1794 branch32(Assembler::AboveOrEqual
, ch1
,
1795 Imm32(StaticStrings::SMALL_CHAR_TABLE_SIZE
), fail
);
1796 branch32(Assembler::AboveOrEqual
, ch2
,
1797 Imm32(StaticStrings::SMALL_CHAR_TABLE_SIZE
), fail
);
1799 movePtr(ImmPtr(&StaticStrings::toSmallCharTable
.storage
), dest
);
1800 load8ZeroExtend(BaseIndex(dest
, ch1
, Scale::TimesOne
), ch1
);
1801 load8ZeroExtend(BaseIndex(dest
, ch2
, Scale::TimesOne
), ch2
);
1803 branch32(Assembler::Equal
, ch1
, Imm32(StaticStrings::INVALID_SMALL_CHAR
),
1805 branch32(Assembler::Equal
, ch2
, Imm32(StaticStrings::INVALID_SMALL_CHAR
),
1808 lshift32(Imm32(StaticStrings::SMALL_CHAR_BITS
), ch1
);
1811 // Look up the string from the computed index.
1812 movePtr(ImmPtr(&staticStrings
.length2StaticTable
), dest
);
1813 loadPtr(BaseIndex(dest
, ch1
, ScalePointer
), dest
);
1816 void MacroAssembler::lookupStaticIntString(Register integer
, Register dest
,
1818 const StaticStrings
& staticStrings
,
1820 MOZ_ASSERT(integer
!= scratch
);
1822 boundsCheck32PowerOfTwo(integer
, StaticStrings::INT_STATIC_LIMIT
, fail
);
1823 movePtr(ImmPtr(&staticStrings
.intStaticTable
), scratch
);
1824 loadPtr(BaseIndex(scratch
, integer
, ScalePointer
), dest
);
1827 void MacroAssembler::loadInt32ToStringWithBase(
1828 Register input
, Register base
, Register dest
, Register scratch1
,
1829 Register scratch2
, const StaticStrings
& staticStrings
,
1830 const LiveRegisterSet
& volatileRegs
, bool lowerCase
, Label
* fail
) {
1832 Label baseBad
, baseOk
;
1833 branch32(Assembler::LessThan
, base
, Imm32(2), &baseBad
);
1834 branch32(Assembler::LessThanOrEqual
, base
, Imm32(36), &baseOk
);
1836 assumeUnreachable("base must be in range [2, 36]");
1840 // Compute |"0123456789abcdefghijklmnopqrstuvwxyz"[r]|.
1841 auto toChar
= [this, base
, lowerCase
](Register r
) {
1844 branch32(Assembler::Below
, r
, base
, &ok
);
1845 assumeUnreachable("bad digit");
1848 // Silence unused lambda capture warning.
1853 add32(Imm32('0'), r
);
1854 branch32(Assembler::BelowOrEqual
, r
, Imm32('9'), &done
);
1855 add32(Imm32((lowerCase
? 'a' : 'A') - '0' - 10), r
);
1859 // Perform a "unit" lookup when |unsigned(input) < unsigned(base)|.
1860 Label lengthTwo
, done
;
1861 branch32(Assembler::AboveOrEqual
, input
, base
, &lengthTwo
);
1863 move32(input
, scratch1
);
1866 loadStringFromUnit(scratch1
, dest
, staticStrings
);
1872 // Compute |base * base|.
1873 move32(base
, scratch1
);
1874 mul32(scratch1
, scratch1
);
1876 // Perform a "length2" lookup when |unsigned(input) < unsigned(base * base)|.
1877 branch32(Assembler::AboveOrEqual
, input
, scratch1
, fail
);
1879 // Compute |scratch1 = input / base| and |scratch2 = input % base|.
1880 move32(input
, scratch1
);
1881 flexibleDivMod32(base
, scratch1
, scratch2
, true, volatileRegs
);
1883 // Compute the digits of the divisor and remainder.
1887 // Look up the 2-character digit string in the small-char table.
1888 loadLengthTwoString(scratch1
, scratch2
, dest
, staticStrings
);
1893 void MacroAssembler::loadInt32ToStringWithBase(
1894 Register input
, int32_t base
, Register dest
, Register scratch1
,
1895 Register scratch2
, const StaticStrings
& staticStrings
, bool lowerCase
,
1897 MOZ_ASSERT(2 <= base
&& base
<= 36, "base must be in range [2, 36]");
1899 // Compute |"0123456789abcdefghijklmnopqrstuvwxyz"[r]|.
1900 auto toChar
= [this, base
, lowerCase
](Register r
) {
1903 branch32(Assembler::Below
, r
, Imm32(base
), &ok
);
1904 assumeUnreachable("bad digit");
1909 add32(Imm32('0'), r
);
1912 add32(Imm32('0'), r
);
1913 branch32(Assembler::BelowOrEqual
, r
, Imm32('9'), &done
);
1914 add32(Imm32((lowerCase
? 'a' : 'A') - '0' - 10), r
);
1919 // Perform a "unit" lookup when |unsigned(input) < unsigned(base)|.
1920 Label lengthTwo
, done
;
1921 branch32(Assembler::AboveOrEqual
, input
, Imm32(base
), &lengthTwo
);
1923 move32(input
, scratch1
);
1926 loadStringFromUnit(scratch1
, dest
, staticStrings
);
1932 // Perform a "length2" lookup when |unsigned(input) < unsigned(base * base)|.
1933 branch32(Assembler::AboveOrEqual
, input
, Imm32(base
* base
), fail
);
1935 // Compute |scratch1 = input / base| and |scratch2 = input % base|.
1936 if (mozilla::IsPowerOfTwo(uint32_t(base
))) {
1937 uint32_t shift
= mozilla::FloorLog2(base
);
1939 move32(input
, scratch1
);
1940 rshift32(Imm32(shift
), scratch1
);
1942 move32(input
, scratch2
);
1943 and32(Imm32((uint32_t(1) << shift
) - 1), scratch2
);
1945 // The following code matches CodeGenerator::visitUDivOrModConstant()
1946 // for x86-shared. Also see Hacker's Delight 2nd edition, chapter 10-8
1947 // "Unsigned Division by 7" for the case when |rmc.multiplier| exceeds
1948 // UINT32_MAX and we need to adjust the shift amount.
1950 auto rmc
= ReciprocalMulConstants::computeUnsignedDivisionConstants(base
);
1952 // We first compute |q = (M * n) >> 32), where M = rmc.multiplier.
1953 mulHighUnsigned32(Imm32(rmc
.multiplier
), input
, scratch1
);
1955 if (rmc
.multiplier
> UINT32_MAX
) {
1956 // M >= 2^32 and shift == 0 is impossible, as d >= 2 implies that
1957 // ((M * n) >> (32 + shift)) >= n > floor(n/d) whenever n >= d,
1958 // contradicting the proof of correctness in computeDivisionConstants.
1959 MOZ_ASSERT(rmc
.shiftAmount
> 0);
1960 MOZ_ASSERT(rmc
.multiplier
< (int64_t(1) << 33));
1962 // Compute |t = (n - q) / 2|.
1963 move32(input
, scratch2
);
1964 sub32(scratch1
, scratch2
);
1965 rshift32(Imm32(1), scratch2
);
1967 // Compute |t = (n - q) / 2 + q = (n + q) / 2|.
1968 add32(scratch2
, scratch1
);
1970 // Finish the computation |q = floor(n / d)|.
1971 rshift32(Imm32(rmc
.shiftAmount
- 1), scratch1
);
1973 rshift32(Imm32(rmc
.shiftAmount
), scratch1
);
1976 // Compute the remainder from |r = n - q * d|.
1977 move32(scratch1
, dest
);
1978 mul32(Imm32(base
), dest
);
1979 move32(input
, scratch2
);
1980 sub32(dest
, scratch2
);
1983 // Compute the digits of the divisor and remainder.
1987 // Look up the 2-character digit string in the small-char table.
1988 loadLengthTwoString(scratch1
, scratch2
, dest
, staticStrings
);
1993 void MacroAssembler::loadBigIntDigits(Register bigInt
, Register digits
) {
1994 MOZ_ASSERT(digits
!= bigInt
);
1996 // Load the inline digits.
1997 computeEffectiveAddress(Address(bigInt
, BigInt::offsetOfInlineDigits()),
2000 // If inline digits aren't used, load the heap digits. Use a conditional move
2001 // to prevent speculative execution.
2002 cmp32LoadPtr(Assembler::Above
, Address(bigInt
, BigInt::offsetOfLength()),
2003 Imm32(int32_t(BigInt::inlineDigitsLength())),
2004 Address(bigInt
, BigInt::offsetOfHeapDigits()), digits
);
2007 void MacroAssembler::loadBigInt64(Register bigInt
, Register64 dest
) {
2008 // This code follows the implementation of |BigInt::toUint64()|. We're also
2009 // using it for inline callers of |BigInt::toInt64()|, which works, because
2010 // all supported Jit architectures use a two's complement representation for
2011 // int64 values, which means the WrapToSigned call in toInt64() is a no-op.
2013 Label done
, nonZero
;
2015 branchIfBigIntIsNonZero(bigInt
, &nonZero
);
2017 move64(Imm64(0), dest
);
2023 Register digits
= dest
.reg
;
2025 Register digits
= dest
.high
;
2028 loadBigIntDigits(bigInt
, digits
);
2031 // Load the first digit into the destination register.
2032 load64(Address(digits
, 0), dest
);
2034 // Load the first digit into the destination register's low value.
2035 load32(Address(digits
, 0), dest
.low
);
2037 // And conditionally load the second digit into the high value register.
2038 Label twoDigits
, digitsDone
;
2039 branch32(Assembler::Above
, Address(bigInt
, BigInt::offsetOfLength()),
2040 Imm32(1), &twoDigits
);
2042 move32(Imm32(0), dest
.high
);
2047 load32(Address(digits
, sizeof(BigInt::Digit
)), dest
.high
);
2052 branchTest32(Assembler::Zero
, Address(bigInt
, BigInt::offsetOfFlags()),
2053 Imm32(BigInt::signBitMask()), &done
);
2059 void MacroAssembler::loadFirstBigIntDigitOrZero(Register bigInt
,
2061 Label done
, nonZero
;
2062 branchIfBigIntIsNonZero(bigInt
, &nonZero
);
2064 movePtr(ImmWord(0), dest
);
2069 loadBigIntDigits(bigInt
, dest
);
2071 // Load the first digit into the destination register.
2072 loadPtr(Address(dest
, 0), dest
);
2077 void MacroAssembler::loadBigInt(Register bigInt
, Register dest
, Label
* fail
) {
2078 Label done
, nonZero
;
2079 branchIfBigIntIsNonZero(bigInt
, &nonZero
);
2081 movePtr(ImmWord(0), dest
);
2086 loadBigIntNonZero(bigInt
, dest
, fail
);
2091 void MacroAssembler::loadBigIntNonZero(Register bigInt
, Register dest
,
2093 MOZ_ASSERT(bigInt
!= dest
);
2097 branchIfBigIntIsNonZero(bigInt
, &nonZero
);
2098 assumeUnreachable("Unexpected zero BigInt");
2102 branch32(Assembler::Above
, Address(bigInt
, BigInt::offsetOfLength()),
2105 static_assert(BigInt::inlineDigitsLength() > 0,
2106 "Single digit BigInts use inline storage");
2108 // Load the first inline digit into the destination register.
2109 loadPtr(Address(bigInt
, BigInt::offsetOfInlineDigits()), dest
);
2111 // Return as a signed pointer.
2112 bigIntDigitToSignedPtr(bigInt
, dest
, fail
);
2115 void MacroAssembler::bigIntDigitToSignedPtr(Register bigInt
, Register digit
,
2117 // BigInt digits are stored as absolute numbers. Take the failure path when
2118 // the digit can't be stored in intptr_t.
2119 branchTestPtr(Assembler::Signed
, digit
, digit
, fail
);
2121 // Negate |dest| when the BigInt is negative.
2123 branchIfBigIntIsNonNegative(bigInt
, &nonNegative
);
2128 void MacroAssembler::loadBigIntAbsolute(Register bigInt
, Register dest
,
2130 MOZ_ASSERT(bigInt
!= dest
);
2132 branch32(Assembler::Above
, Address(bigInt
, BigInt::offsetOfLength()),
2135 static_assert(BigInt::inlineDigitsLength() > 0,
2136 "Single digit BigInts use inline storage");
2138 // Load the first inline digit into the destination register.
2139 movePtr(ImmWord(0), dest
);
2140 cmp32LoadPtr(Assembler::NotEqual
, Address(bigInt
, BigInt::offsetOfLength()),
2141 Imm32(0), Address(bigInt
, BigInt::offsetOfInlineDigits()), dest
);
2144 void MacroAssembler::initializeBigInt64(Scalar::Type type
, Register bigInt
,
2146 MOZ_ASSERT(Scalar::isBigIntType(type
));
2148 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfFlags()));
2150 Label done
, nonZero
;
2151 branch64(Assembler::NotEqual
, val
, Imm64(0), &nonZero
);
2153 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfLength()));
2158 if (type
== Scalar::BigInt64
) {
2159 // Set the sign-bit for negative values and then continue with the two's
2162 branch64(Assembler::GreaterThan
, val
, Imm64(0), &isPositive
);
2164 store32(Imm32(BigInt::signBitMask()),
2165 Address(bigInt
, BigInt::offsetOfFlags()));
2171 store32(Imm32(1), Address(bigInt
, BigInt::offsetOfLength()));
2173 static_assert(sizeof(BigInt::Digit
) == sizeof(uintptr_t),
2174 "BigInt Digit size matches uintptr_t, so there's a single "
2175 "store on 64-bit and up to two stores on 32-bit");
2179 branchTest32(Assembler::Zero
, val
.high
, val
.high
, &singleDigit
);
2180 store32(Imm32(2), Address(bigInt
, BigInt::offsetOfLength()));
2183 // We can perform a single store64 on 32-bit platforms, because inline
2184 // storage can store at least two 32-bit integers.
2185 static_assert(BigInt::inlineDigitsLength() >= 2,
2186 "BigInt inline storage can store at least two digits");
2189 store64(val
, Address(bigInt
, js::BigInt::offsetOfInlineDigits()));
2194 void MacroAssembler::initializeBigInt(Register bigInt
, Register val
) {
2195 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfFlags()));
2197 Label done
, nonZero
;
2198 branchTestPtr(Assembler::NonZero
, val
, val
, &nonZero
);
2200 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfLength()));
2205 // Set the sign-bit for negative values and then continue with the two's
2208 branchTestPtr(Assembler::NotSigned
, val
, val
, &isPositive
);
2210 store32(Imm32(BigInt::signBitMask()),
2211 Address(bigInt
, BigInt::offsetOfFlags()));
2216 store32(Imm32(1), Address(bigInt
, BigInt::offsetOfLength()));
2218 static_assert(sizeof(BigInt::Digit
) == sizeof(uintptr_t),
2219 "BigInt Digit size matches uintptr_t");
2221 storePtr(val
, Address(bigInt
, js::BigInt::offsetOfInlineDigits()));
2226 void MacroAssembler::initializeBigIntAbsolute(Register bigInt
, Register val
) {
2227 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfFlags()));
2229 Label done
, nonZero
;
2230 branchTestPtr(Assembler::NonZero
, val
, val
, &nonZero
);
2232 store32(Imm32(0), Address(bigInt
, BigInt::offsetOfLength()));
2237 store32(Imm32(1), Address(bigInt
, BigInt::offsetOfLength()));
2239 static_assert(sizeof(BigInt::Digit
) == sizeof(uintptr_t),
2240 "BigInt Digit size matches uintptr_t");
2242 storePtr(val
, Address(bigInt
, js::BigInt::offsetOfInlineDigits()));
2247 void MacroAssembler::copyBigIntWithInlineDigits(Register src
, Register dest
,
2249 gc::Heap initialHeap
,
2251 branch32(Assembler::Above
, Address(src
, BigInt::offsetOfLength()),
2252 Imm32(int32_t(BigInt::inlineDigitsLength())), fail
);
2254 newGCBigInt(dest
, temp
, initialHeap
, fail
);
2256 // Copy the sign-bit, but not any of the other bits used by the GC.
2257 load32(Address(src
, BigInt::offsetOfFlags()), temp
);
2258 and32(Imm32(BigInt::signBitMask()), temp
);
2259 store32(temp
, Address(dest
, BigInt::offsetOfFlags()));
2262 load32(Address(src
, BigInt::offsetOfLength()), temp
);
2263 store32(temp
, Address(dest
, BigInt::offsetOfLength()));
2266 Address
srcDigits(src
, js::BigInt::offsetOfInlineDigits());
2267 Address
destDigits(dest
, js::BigInt::offsetOfInlineDigits());
2269 for (size_t i
= 0; i
< BigInt::inlineDigitsLength(); i
++) {
2270 static_assert(sizeof(BigInt::Digit
) == sizeof(uintptr_t),
2271 "BigInt Digit size matches uintptr_t");
2273 loadPtr(srcDigits
, temp
);
2274 storePtr(temp
, destDigits
);
2276 srcDigits
= Address(src
, srcDigits
.offset
+ sizeof(BigInt::Digit
));
2277 destDigits
= Address(dest
, destDigits
.offset
+ sizeof(BigInt::Digit
));
2281 void MacroAssembler::compareBigIntAndInt32(JSOp op
, Register bigInt
,
2282 Register int32
, Register scratch1
,
2283 Register scratch2
, Label
* ifTrue
,
2285 MOZ_ASSERT(IsLooseEqualityOp(op
) || IsRelationalOp(op
));
2287 static_assert(std::is_same_v
<BigInt::Digit
, uintptr_t>,
2288 "BigInt digit can be loaded in a pointer-sized register");
2289 static_assert(sizeof(BigInt::Digit
) >= sizeof(uint32_t),
2290 "BigInt digit stores at least an uint32");
2292 // Test for too large numbers.
2294 // If the absolute value of the BigInt can't be expressed in an uint32/uint64,
2295 // the result of the comparison is a constant.
2296 if (op
== JSOp::Eq
|| op
== JSOp::Ne
) {
2297 Label
* tooLarge
= op
== JSOp::Eq
? ifFalse
: ifTrue
;
2298 branch32(Assembler::GreaterThan
,
2299 Address(bigInt
, BigInt::offsetOfDigitLength()), Imm32(1),
2303 branch32(Assembler::LessThanOrEqual
,
2304 Address(bigInt
, BigInt::offsetOfDigitLength()), Imm32(1),
2307 // Still need to take the sign-bit into account for relational operations.
2308 if (op
== JSOp::Lt
|| op
== JSOp::Le
) {
2309 branchIfBigIntIsNegative(bigInt
, ifTrue
);
2312 branchIfBigIntIsNegative(bigInt
, ifFalse
);
2319 // Test for mismatched signs and, if the signs are equal, load |abs(x)| in
2320 // |scratch1| and |abs(y)| in |scratch2| and then compare the absolute numbers
2321 // against each other.
2323 // Jump to |ifTrue| resp. |ifFalse| if the BigInt is strictly less than
2324 // resp. strictly greater than the int32 value, depending on the comparison
2328 if (op
== JSOp::Eq
) {
2329 greaterThan
= ifFalse
;
2331 } else if (op
== JSOp::Ne
) {
2332 greaterThan
= ifTrue
;
2334 } else if (op
== JSOp::Lt
|| op
== JSOp::Le
) {
2335 greaterThan
= ifFalse
;
2338 MOZ_ASSERT(op
== JSOp::Gt
|| op
== JSOp::Ge
);
2339 greaterThan
= ifTrue
;
2343 // BigInt digits are always stored as an absolute number.
2344 loadFirstBigIntDigitOrZero(bigInt
, scratch1
);
2346 // Load the int32 into |scratch2| and negate it for negative numbers.
2347 move32(int32
, scratch2
);
2349 Label isNegative
, doCompare
;
2350 branchIfBigIntIsNegative(bigInt
, &isNegative
);
2351 branch32(Assembler::LessThan
, int32
, Imm32(0), greaterThan
);
2354 // We rely on |neg32(INT32_MIN)| staying INT32_MIN, because we're using an
2355 // unsigned comparison below.
2357 branch32(Assembler::GreaterThanOrEqual
, int32
, Imm32(0), lessThan
);
2360 // Not all supported platforms (e.g. MIPS64) zero-extend 32-bit operations,
2361 // so we need to explicitly clear any high 32-bits.
2362 move32ZeroExtendToPtr(scratch2
, scratch2
);
2364 // Reverse the relational comparator for negative numbers.
2365 // |-x < -y| <=> |+x > +y|.
2366 // |-x ≤ -y| <=> |+x ≥ +y|.
2367 // |-x > -y| <=> |+x < +y|.
2368 // |-x ≥ -y| <=> |+x ≤ +y|.
2369 JSOp reversed
= ReverseCompareOp(op
);
2370 if (reversed
!= op
) {
2371 branchPtr(JSOpToCondition(reversed
, /* isSigned = */ false), scratch1
,
2377 branchPtr(JSOpToCondition(op
, /* isSigned = */ false), scratch1
, scratch2
,
2382 void MacroAssembler::equalBigInts(Register left
, Register right
, Register temp1
,
2383 Register temp2
, Register temp3
,
2384 Register temp4
, Label
* notSameSign
,
2385 Label
* notSameLength
, Label
* notSameDigit
) {
2386 MOZ_ASSERT(left
!= temp1
);
2387 MOZ_ASSERT(right
!= temp1
);
2388 MOZ_ASSERT(right
!= temp2
);
2390 // Jump to |notSameSign| when the sign aren't the same.
2391 load32(Address(left
, BigInt::offsetOfFlags()), temp1
);
2392 xor32(Address(right
, BigInt::offsetOfFlags()), temp1
);
2393 branchTest32(Assembler::NonZero
, temp1
, Imm32(BigInt::signBitMask()),
2396 // Jump to |notSameLength| when the digits length is different.
2397 load32(Address(right
, BigInt::offsetOfLength()), temp1
);
2398 branch32(Assembler::NotEqual
, Address(left
, BigInt::offsetOfLength()), temp1
,
2401 // Both BigInts have the same sign and the same number of digits. Loop
2402 // over each digit, starting with the left-most one, and break from the
2403 // loop when the first non-matching digit was found.
2405 loadBigIntDigits(left
, temp2
);
2406 loadBigIntDigits(right
, temp3
);
2408 static_assert(sizeof(BigInt::Digit
) == sizeof(void*),
2409 "BigInt::Digit is pointer sized");
2411 computeEffectiveAddress(BaseIndex(temp2
, temp1
, ScalePointer
), temp2
);
2412 computeEffectiveAddress(BaseIndex(temp3
, temp1
, ScalePointer
), temp3
);
2418 subPtr(Imm32(sizeof(BigInt::Digit
)), temp2
);
2419 subPtr(Imm32(sizeof(BigInt::Digit
)), temp3
);
2421 loadPtr(Address(temp3
, 0), temp4
);
2422 branchPtr(Assembler::NotEqual
, Address(temp2
, 0), temp4
, notSameDigit
);
2425 branchSub32(Assembler::NotSigned
, Imm32(1), temp1
, &loop
);
2427 // No different digits were found, both BigInts are equal to each other.
2430 void MacroAssembler::typeOfObject(Register obj
, Register scratch
, Label
* slow
,
2431 Label
* isObject
, Label
* isCallable
,
2432 Label
* isUndefined
) {
2433 loadObjClassUnsafe(obj
, scratch
);
2435 // Proxies can emulate undefined and have complex isCallable behavior.
2436 branchTestClassIsProxy(true, scratch
, slow
);
2438 // JSFunctions are always callable.
2439 branchTestClassIsFunction(Assembler::Equal
, scratch
, isCallable
);
2441 // Objects that emulate undefined.
2442 Address
flags(scratch
, JSClass::offsetOfFlags());
2443 branchTest32(Assembler::NonZero
, flags
, Imm32(JSCLASS_EMULATES_UNDEFINED
),
2446 // Handle classes with a call hook.
2447 branchPtr(Assembler::Equal
, Address(scratch
, offsetof(JSClass
, cOps
)),
2448 ImmPtr(nullptr), isObject
);
2450 loadPtr(Address(scratch
, offsetof(JSClass
, cOps
)), scratch
);
2451 branchPtr(Assembler::Equal
, Address(scratch
, offsetof(JSClassOps
, call
)),
2452 ImmPtr(nullptr), isObject
);
2457 void MacroAssembler::isCallableOrConstructor(bool isCallable
, Register obj
,
2458 Register output
, Label
* isProxy
) {
2459 MOZ_ASSERT(obj
!= output
);
2461 Label notFunction
, hasCOps
, done
;
2462 loadObjClassUnsafe(obj
, output
);
2464 // An object is callable iff:
2465 // is<JSFunction>() || (getClass()->cOps && getClass()->cOps->call).
2466 // An object is constructor iff:
2467 // ((is<JSFunction>() && as<JSFunction>().isConstructor) ||
2468 // (getClass()->cOps && getClass()->cOps->construct)).
2469 branchTestClassIsFunction(Assembler::NotEqual
, output
, ¬Function
);
2471 move32(Imm32(1), output
);
2473 static_assert(mozilla::IsPowerOfTwo(uint32_t(FunctionFlags::CONSTRUCTOR
)),
2474 "FunctionFlags::CONSTRUCTOR has only one bit set");
2476 load32(Address(obj
, JSFunction::offsetOfFlagsAndArgCount()), output
);
2477 rshift32(Imm32(mozilla::FloorLog2(uint32_t(FunctionFlags::CONSTRUCTOR
))),
2479 and32(Imm32(1), output
);
2486 // For bound functions, we need to check the isConstructor flag.
2487 Label notBoundFunction
;
2488 branchPtr(Assembler::NotEqual
, output
, ImmPtr(&BoundFunctionObject::class_
),
2491 static_assert(BoundFunctionObject::IsConstructorFlag
== 0b1,
2492 "AND operation results in boolean value");
2493 unboxInt32(Address(obj
, BoundFunctionObject::offsetOfFlagsSlot()), output
);
2494 and32(Imm32(BoundFunctionObject::IsConstructorFlag
), output
);
2497 bind(¬BoundFunction
);
2500 // Just skim proxies off. Their notion of isCallable()/isConstructor() is
2501 // more complicated.
2502 branchTestClassIsProxy(true, output
, isProxy
);
2504 branchPtr(Assembler::NonZero
, Address(output
, offsetof(JSClass
, cOps
)),
2505 ImmPtr(nullptr), &hasCOps
);
2506 move32(Imm32(0), output
);
2510 loadPtr(Address(output
, offsetof(JSClass
, cOps
)), output
);
2512 isCallable
? offsetof(JSClassOps
, call
) : offsetof(JSClassOps
, construct
);
2513 cmpPtrSet(Assembler::NonZero
, Address(output
, opsOffset
), ImmPtr(nullptr),
2519 void MacroAssembler::loadJSContext(Register dest
) {
2520 movePtr(ImmPtr(runtime()->mainContextPtr()), dest
);
2523 static const uint8_t* ContextRealmPtr(CompileRuntime
* rt
) {
2524 return (static_cast<const uint8_t*>(rt
->mainContextPtr()) +
2525 JSContext::offsetOfRealm());
2528 void MacroAssembler::loadGlobalObjectData(Register dest
) {
2529 loadPtr(AbsoluteAddress(ContextRealmPtr(runtime())), dest
);
2530 loadPtr(Address(dest
, Realm::offsetOfActiveGlobal()), dest
);
2531 loadPrivate(Address(dest
, GlobalObject::offsetOfGlobalDataSlot()), dest
);
2534 void MacroAssembler::switchToRealm(Register realm
) {
2535 storePtr(realm
, AbsoluteAddress(ContextRealmPtr(runtime())));
2538 void MacroAssembler::loadRealmFuse(RealmFuses::FuseIndex index
, Register dest
) {
2539 // Load Realm pointer
2540 loadPtr(AbsoluteAddress(ContextRealmPtr(runtime())), dest
);
2541 loadPtr(Address(dest
, RealmFuses::offsetOfFuseWordRelativeToRealm(index
)),
2545 void MacroAssembler::switchToRealm(const void* realm
, Register scratch
) {
2548 movePtr(ImmPtr(realm
), scratch
);
2549 switchToRealm(scratch
);
2552 void MacroAssembler::switchToObjectRealm(Register obj
, Register scratch
) {
2553 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
2554 loadPtr(Address(scratch
, Shape::offsetOfBaseShape()), scratch
);
2555 loadPtr(Address(scratch
, BaseShape::offsetOfRealm()), scratch
);
2556 switchToRealm(scratch
);
2559 void MacroAssembler::switchToBaselineFrameRealm(Register scratch
) {
2560 Address
envChain(FramePointer
,
2561 BaselineFrame::reverseOffsetOfEnvironmentChain());
2562 loadPtr(envChain
, scratch
);
2563 switchToObjectRealm(scratch
, scratch
);
2566 void MacroAssembler::switchToWasmInstanceRealm(Register scratch1
,
2567 Register scratch2
) {
2568 loadPtr(Address(InstanceReg
, wasm::Instance::offsetOfCx()), scratch1
);
2569 loadPtr(Address(InstanceReg
, wasm::Instance::offsetOfRealm()), scratch2
);
2570 storePtr(scratch2
, Address(scratch1
, JSContext::offsetOfRealm()));
2573 void MacroAssembler::debugAssertContextRealm(const void* realm
,
2577 movePtr(ImmPtr(realm
), scratch
);
2578 branchPtr(Assembler::Equal
, AbsoluteAddress(ContextRealmPtr(runtime())),
2580 assumeUnreachable("Unexpected context realm");
2585 void MacroAssembler::setIsCrossRealmArrayConstructor(Register obj
,
2589 branchTestObjectIsProxy(false, obj
, output
, ¬Proxy
);
2590 assumeUnreachable("Unexpected proxy in setIsCrossRealmArrayConstructor");
2594 // The object's realm must not be cx->realm.
2595 Label isFalse
, done
;
2596 loadPtr(Address(obj
, JSObject::offsetOfShape()), output
);
2597 loadPtr(Address(output
, Shape::offsetOfBaseShape()), output
);
2598 loadPtr(Address(output
, BaseShape::offsetOfRealm()), output
);
2599 branchPtr(Assembler::Equal
, AbsoluteAddress(ContextRealmPtr(runtime())),
2602 // The object must be a function.
2603 branchTestObjIsFunction(Assembler::NotEqual
, obj
, output
, obj
, &isFalse
);
2605 // The function must be the ArrayConstructor native.
2606 branchPtr(Assembler::NotEqual
,
2607 Address(obj
, JSFunction::offsetOfNativeOrEnv()),
2608 ImmPtr(js::ArrayConstructor
), &isFalse
);
2610 move32(Imm32(1), output
);
2614 move32(Imm32(0), output
);
2619 void MacroAssembler::setIsDefinitelyTypedArrayConstructor(Register obj
,
2621 Label isFalse
, isTrue
, done
;
2623 // The object must be a function. (Wrappers are not supported.)
2624 branchTestObjIsFunction(Assembler::NotEqual
, obj
, output
, obj
, &isFalse
);
2626 // Load the native into |output|.
2627 loadPtr(Address(obj
, JSFunction::offsetOfNativeOrEnv()), output
);
2629 auto branchIsTypedArrayCtor
= [&](Scalar::Type type
) {
2630 // The function must be a TypedArrayConstructor native (from any realm).
2631 JSNative constructor
= TypedArrayConstructorNative(type
);
2632 branchPtr(Assembler::Equal
, output
, ImmPtr(constructor
), &isTrue
);
2635 #define TYPED_ARRAY_CONSTRUCTOR_NATIVE(_, T, N) \
2636 branchIsTypedArrayCtor(Scalar::N);
2637 JS_FOR_EACH_TYPED_ARRAY(TYPED_ARRAY_CONSTRUCTOR_NATIVE
)
2638 #undef TYPED_ARRAY_CONSTRUCTOR_NATIVE
2640 // Falls through to the false case.
2643 move32(Imm32(0), output
);
2647 move32(Imm32(1), output
);
2652 void MacroAssembler::loadMegamorphicCache(Register dest
) {
2653 movePtr(ImmPtr(runtime()->addressOfMegamorphicCache()), dest
);
2655 void MacroAssembler::loadMegamorphicSetPropCache(Register dest
) {
2656 movePtr(ImmPtr(runtime()->addressOfMegamorphicSetPropCache()), dest
);
2659 void MacroAssembler::loadStringToAtomCacheLastLookups(Register dest
) {
2660 uintptr_t cachePtr
= uintptr_t(runtime()->addressOfStringToAtomCache());
2661 void* offset
= (void*)(cachePtr
+ StringToAtomCache::offsetOfLastLookups());
2662 movePtr(ImmPtr(offset
), dest
);
2665 void MacroAssembler::loadAtomHash(Register id
, Register outHash
, Label
* done
) {
2666 Label doneInner
, fatInline
;
2670 move32(Imm32(JSString::FAT_INLINE_MASK
), outHash
);
2671 and32(Address(id
, JSString::offsetOfFlags()), outHash
);
2673 branch32(Assembler::Equal
, outHash
, Imm32(JSString::FAT_INLINE_MASK
),
2675 load32(Address(id
, NormalAtom::offsetOfHash()), outHash
);
2678 load32(Address(id
, FatInlineAtom::offsetOfHash()), outHash
);
2683 void MacroAssembler::loadAtomOrSymbolAndHash(ValueOperand value
, Register outId
,
2686 Label isString
, isSymbol
, isNull
, isUndefined
, done
, nonAtom
, atom
,
2690 ScratchTagScope
tag(*this, value
);
2691 splitTagForTest(value
, tag
);
2692 branchTestString(Assembler::Equal
, tag
, &isString
);
2693 branchTestSymbol(Assembler::Equal
, tag
, &isSymbol
);
2694 branchTestNull(Assembler::Equal
, tag
, &isNull
);
2695 branchTestUndefined(Assembler::NotEqual
, tag
, cacheMiss
);
2698 const JSAtomState
& names
= runtime()->names();
2699 movePropertyKey(PropertyKey::NonIntAtom(names
.undefined
), outId
);
2700 move32(Imm32(names
.undefined
->hash()), outHash
);
2704 movePropertyKey(PropertyKey::NonIntAtom(names
.null
), outId
);
2705 move32(Imm32(names
.null
->hash()), outHash
);
2709 unboxSymbol(value
, outId
);
2710 load32(Address(outId
, JS::Symbol::offsetOfHash()), outHash
);
2711 orPtr(Imm32(PropertyKey::SymbolTypeTag
), outId
);
2715 unboxString(value
, outId
);
2716 branchTest32(Assembler::Zero
, Address(outId
, JSString::offsetOfFlags()),
2717 Imm32(JSString::ATOM_BIT
), &nonAtom
);
2720 loadAtomHash(outId
, outHash
, &done
);
2723 loadStringToAtomCacheLastLookups(outHash
);
2725 // Compare each entry in the StringToAtomCache's lastLookups_ array
2726 size_t stringOffset
= StringToAtomCache::LastLookup::offsetOfString();
2727 branchPtr(Assembler::Equal
, Address(outHash
, stringOffset
), outId
,
2729 for (size_t i
= 0; i
< StringToAtomCache::NumLastLookups
- 1; ++i
) {
2730 addPtr(Imm32(sizeof(StringToAtomCache::LastLookup
)), outHash
);
2731 branchPtr(Assembler::Equal
, Address(outHash
, stringOffset
), outId
,
2735 // Couldn't find us in the cache, so fall back to the C++ call
2738 // We found a hit in the lastLookups_ array! Load the associated atom
2739 // and jump back up to our usual atom handling code
2740 bind(&lastLookupAtom
);
2741 size_t atomOffset
= StringToAtomCache::LastLookup::offsetOfAtom();
2742 loadPtr(Address(outHash
, atomOffset
), outId
);
2748 void MacroAssembler::emitExtractValueFromMegamorphicCacheEntry(
2749 Register obj
, Register entry
, Register scratch1
, Register scratch2
,
2750 ValueOperand output
, Label
* cacheHit
, Label
* cacheMiss
) {
2751 Label isMissing
, dynamicSlot
, protoLoopHead
, protoLoopTail
;
2753 // scratch2 = entry->numHops_
2754 load8ZeroExtend(Address(entry
, MegamorphicCache::Entry::offsetOfNumHops()),
2756 // if (scratch2 == NumHopsForMissingOwnProperty) goto cacheMiss
2757 branch32(Assembler::Equal
, scratch2
,
2758 Imm32(MegamorphicCache::Entry::NumHopsForMissingOwnProperty
),
2760 // if (scratch2 == NumHopsForMissingProperty) goto isMissing
2761 branch32(Assembler::Equal
, scratch2
,
2762 Imm32(MegamorphicCache::Entry::NumHopsForMissingProperty
),
2765 // NOTE: Where this is called, `output` can actually alias `obj`, and before
2766 // the last cacheMiss branch above we can't write to `obj`, so we can't
2767 // use `output`'s scratch register there. However a cache miss is impossible
2768 // now, so we're free to use `output` as we like.
2769 Register outputScratch
= output
.scratchReg();
2770 if (!outputScratch
.aliases(obj
)) {
2771 // We're okay with paying this very slight extra cost to avoid a potential
2772 // footgun of writing to what callers understand as only an input register.
2773 movePtr(obj
, outputScratch
);
2775 branchTest32(Assembler::Zero
, scratch2
, scratch2
, &protoLoopTail
);
2776 bind(&protoLoopHead
);
2777 loadObjProto(outputScratch
, outputScratch
);
2778 branchSub32(Assembler::NonZero
, Imm32(1), scratch2
, &protoLoopHead
);
2779 bind(&protoLoopTail
);
2781 // scratch1 = entry->slotOffset()
2782 load32(Address(entry
, MegamorphicCacheEntry::offsetOfSlotOffset()), scratch1
);
2784 // scratch2 = slotOffset.offset()
2785 move32(scratch1
, scratch2
);
2786 rshift32(Imm32(TaggedSlotOffset::OffsetShift
), scratch2
);
2788 // if (!slotOffset.isFixedSlot()) goto dynamicSlot
2789 branchTest32(Assembler::Zero
, scratch1
,
2790 Imm32(TaggedSlotOffset::IsFixedSlotFlag
), &dynamicSlot
);
2791 // output = outputScratch[scratch2]
2792 loadValue(BaseIndex(outputScratch
, scratch2
, TimesOne
), output
);
2796 // output = outputScratch->slots_[scratch2]
2797 loadPtr(Address(outputScratch
, NativeObject::offsetOfSlots()), outputScratch
);
2798 loadValue(BaseIndex(outputScratch
, scratch2
, TimesOne
), output
);
2802 // output = undefined
2803 moveValue(UndefinedValue(), output
);
2807 template <typename IdOperandType
>
2808 void MacroAssembler::emitMegamorphicCacheLookupByValueCommon(
2809 IdOperandType id
, Register obj
, Register scratch1
, Register scratch2
,
2810 Register outEntryPtr
, Label
* cacheMiss
, Label
* cacheMissWithEntry
) {
2811 // A lot of this code is shared with emitMegamorphicCacheLookup. It would
2812 // be nice to be able to avoid the duplication here, but due to a few
2813 // differences like taking the id in a ValueOperand instead of being able
2814 // to bake it in as an immediate, and only needing a Register for the output
2815 // value, it seemed more awkward to read once it was deduplicated.
2817 // outEntryPtr = obj->shape()
2818 loadPtr(Address(obj
, JSObject::offsetOfShape()), outEntryPtr
);
2820 movePtr(outEntryPtr
, scratch2
);
2822 // outEntryPtr = (outEntryPtr >> 3) ^ (outEntryPtr >> 13) + idHash
2823 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift1
), outEntryPtr
);
2824 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift2
), scratch2
);
2825 xorPtr(scratch2
, outEntryPtr
);
2827 if constexpr (std::is_same
<IdOperandType
, ValueOperand
>::value
) {
2828 loadAtomOrSymbolAndHash(id
, scratch1
, scratch2
, cacheMiss
);
2830 static_assert(std::is_same
<IdOperandType
, Register
>::value
);
2831 movePtr(id
, scratch1
);
2832 loadAtomHash(scratch1
, scratch2
, nullptr);
2834 addPtr(scratch2
, outEntryPtr
);
2836 // outEntryPtr %= MegamorphicCache::NumEntries
2837 constexpr size_t cacheSize
= MegamorphicCache::NumEntries
;
2838 static_assert(mozilla::IsPowerOfTwo(cacheSize
));
2839 size_t cacheMask
= cacheSize
- 1;
2840 and32(Imm32(cacheMask
), outEntryPtr
);
2842 loadMegamorphicCache(scratch2
);
2843 // outEntryPtr = &scratch2->entries_[outEntryPtr]
2844 constexpr size_t entrySize
= sizeof(MegamorphicCache::Entry
);
2845 static_assert(sizeof(void*) == 4 || entrySize
== 24);
2846 if constexpr (sizeof(void*) == 4) {
2847 mul32(Imm32(entrySize
), outEntryPtr
);
2848 computeEffectiveAddress(BaseIndex(scratch2
, outEntryPtr
, TimesOne
,
2849 MegamorphicCache::offsetOfEntries()),
2852 computeEffectiveAddress(BaseIndex(outEntryPtr
, outEntryPtr
, TimesTwo
),
2854 computeEffectiveAddress(BaseIndex(scratch2
, outEntryPtr
, TimesEight
,
2855 MegamorphicCache::offsetOfEntries()),
2859 // if (outEntryPtr->key_ != scratch1) goto cacheMissWithEntry
2860 branchPtr(Assembler::NotEqual
,
2861 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfKey()),
2862 scratch1
, cacheMissWithEntry
);
2863 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch1
);
2865 // if (outEntryPtr->shape_ != scratch1) goto cacheMissWithEntry
2866 branchPtr(Assembler::NotEqual
,
2867 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfShape()),
2868 scratch1
, cacheMissWithEntry
);
2870 // scratch2 = scratch2->generation_
2871 load16ZeroExtend(Address(scratch2
, MegamorphicCache::offsetOfGeneration()),
2874 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfGeneration()),
2876 // if (outEntryPtr->generation_ != scratch2) goto cacheMissWithEntry
2877 branch32(Assembler::NotEqual
, scratch1
, scratch2
, cacheMissWithEntry
);
2880 void MacroAssembler::emitMegamorphicCacheLookup(
2881 PropertyKey id
, Register obj
, Register scratch1
, Register scratch2
,
2882 Register outEntryPtr
, ValueOperand output
, Label
* cacheHit
) {
2883 Label cacheMiss
, isMissing
, dynamicSlot
, protoLoopHead
, protoLoopTail
;
2885 // scratch1 = obj->shape()
2886 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch1
);
2888 movePtr(scratch1
, outEntryPtr
);
2889 movePtr(scratch1
, scratch2
);
2891 // outEntryPtr = (scratch1 >> 3) ^ (scratch1 >> 13) + hash(id)
2892 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift1
), outEntryPtr
);
2893 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift2
), scratch2
);
2894 xorPtr(scratch2
, outEntryPtr
);
2895 addPtr(Imm32(HashAtomOrSymbolPropertyKey(id
)), outEntryPtr
);
2897 // outEntryPtr %= MegamorphicCache::NumEntries
2898 constexpr size_t cacheSize
= MegamorphicCache::NumEntries
;
2899 static_assert(mozilla::IsPowerOfTwo(cacheSize
));
2900 size_t cacheMask
= cacheSize
- 1;
2901 and32(Imm32(cacheMask
), outEntryPtr
);
2903 loadMegamorphicCache(scratch2
);
2904 // outEntryPtr = &scratch2->entries_[outEntryPtr]
2905 constexpr size_t entrySize
= sizeof(MegamorphicCache::Entry
);
2906 static_assert(sizeof(void*) == 4 || entrySize
== 24);
2907 if constexpr (sizeof(void*) == 4) {
2908 mul32(Imm32(entrySize
), outEntryPtr
);
2909 computeEffectiveAddress(BaseIndex(scratch2
, outEntryPtr
, TimesOne
,
2910 MegamorphicCache::offsetOfEntries()),
2913 computeEffectiveAddress(BaseIndex(outEntryPtr
, outEntryPtr
, TimesTwo
),
2915 computeEffectiveAddress(BaseIndex(scratch2
, outEntryPtr
, TimesEight
,
2916 MegamorphicCache::offsetOfEntries()),
2920 // if (outEntryPtr->shape_ != scratch1) goto cacheMiss
2921 branchPtr(Assembler::NotEqual
,
2922 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfShape()),
2923 scratch1
, &cacheMiss
);
2925 // if (outEntryPtr->key_ != id) goto cacheMiss
2926 movePropertyKey(id
, scratch1
);
2927 branchPtr(Assembler::NotEqual
,
2928 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfKey()),
2929 scratch1
, &cacheMiss
);
2931 // scratch2 = scratch2->generation_
2932 load16ZeroExtend(Address(scratch2
, MegamorphicCache::offsetOfGeneration()),
2935 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfGeneration()),
2937 // if (outEntryPtr->generation_ != scratch2) goto cacheMiss
2938 branch32(Assembler::NotEqual
, scratch1
, scratch2
, &cacheMiss
);
2940 emitExtractValueFromMegamorphicCacheEntry(
2941 obj
, outEntryPtr
, scratch1
, scratch2
, output
, cacheHit
, &cacheMiss
);
2946 template <typename IdOperandType
>
2947 void MacroAssembler::emitMegamorphicCacheLookupByValue(
2948 IdOperandType id
, Register obj
, Register scratch1
, Register scratch2
,
2949 Register outEntryPtr
, ValueOperand output
, Label
* cacheHit
) {
2950 Label cacheMiss
, cacheMissWithEntry
;
2951 emitMegamorphicCacheLookupByValueCommon(id
, obj
, scratch1
, scratch2
,
2952 outEntryPtr
, &cacheMiss
,
2953 &cacheMissWithEntry
);
2954 emitExtractValueFromMegamorphicCacheEntry(obj
, outEntryPtr
, scratch1
,
2955 scratch2
, output
, cacheHit
,
2956 &cacheMissWithEntry
);
2958 xorPtr(outEntryPtr
, outEntryPtr
);
2959 bind(&cacheMissWithEntry
);
2962 template void MacroAssembler::emitMegamorphicCacheLookupByValue
<ValueOperand
>(
2963 ValueOperand id
, Register obj
, Register scratch1
, Register scratch2
,
2964 Register outEntryPtr
, ValueOperand output
, Label
* cacheHit
);
2966 template void MacroAssembler::emitMegamorphicCacheLookupByValue
<Register
>(
2967 Register id
, Register obj
, Register scratch1
, Register scratch2
,
2968 Register outEntryPtr
, ValueOperand output
, Label
* cacheHit
);
2970 void MacroAssembler::emitMegamorphicCacheLookupExists(
2971 ValueOperand id
, Register obj
, Register scratch1
, Register scratch2
,
2972 Register outEntryPtr
, Register output
, Label
* cacheHit
, bool hasOwn
) {
2973 Label cacheMiss
, cacheMissWithEntry
, cacheHitFalse
;
2974 emitMegamorphicCacheLookupByValueCommon(id
, obj
, scratch1
, scratch2
,
2975 outEntryPtr
, &cacheMiss
,
2976 &cacheMissWithEntry
);
2978 // scratch1 = outEntryPtr->numHops_
2980 Address(outEntryPtr
, MegamorphicCache::Entry::offsetOfNumHops()),
2983 branch32(Assembler::Equal
, scratch1
,
2984 Imm32(MegamorphicCache::Entry::NumHopsForMissingProperty
),
2988 branch32(Assembler::NotEqual
, scratch1
, Imm32(0), &cacheHitFalse
);
2990 branch32(Assembler::Equal
, scratch1
,
2991 Imm32(MegamorphicCache::Entry::NumHopsForMissingOwnProperty
),
2992 &cacheMissWithEntry
);
2995 move32(Imm32(1), output
);
2998 bind(&cacheHitFalse
);
2999 xor32(output
, output
);
3003 xorPtr(outEntryPtr
, outEntryPtr
);
3004 bind(&cacheMissWithEntry
);
3007 void MacroAssembler::extractCurrentIndexAndKindFromIterator(Register iterator
,
3010 // Load iterator object
3011 Address
nativeIterAddr(iterator
,
3012 PropertyIteratorObject::offsetOfIteratorSlot());
3013 loadPrivate(nativeIterAddr
, outIndex
);
3015 // Compute offset of propertyCursor_ from propertiesBegin()
3016 loadPtr(Address(outIndex
, NativeIterator::offsetOfPropertyCursor()), outKind
);
3017 subPtr(Address(outIndex
, NativeIterator::offsetOfShapesEnd()), outKind
);
3019 // Compute offset of current index from indicesBegin(). Note that because
3020 // propertyCursor has already been incremented, this is actually the offset
3021 // of the next index. We adjust accordingly below.
3022 size_t indexAdjustment
=
3023 sizeof(GCPtr
<JSLinearString
*>) / sizeof(PropertyIndex
);
3024 if (indexAdjustment
!= 1) {
3025 MOZ_ASSERT(indexAdjustment
== 2);
3026 rshift32(Imm32(1), outKind
);
3029 // Load current index.
3030 loadPtr(Address(outIndex
, NativeIterator::offsetOfPropertiesEnd()), outIndex
);
3031 load32(BaseIndex(outIndex
, outKind
, Scale::TimesOne
,
3032 -int32_t(sizeof(PropertyIndex
))),
3036 move32(outIndex
, outKind
);
3037 rshift32(Imm32(PropertyIndex::KindShift
), outKind
);
3040 and32(Imm32(PropertyIndex::IndexMask
), outIndex
);
3043 template <typename IdType
>
3044 void MacroAssembler::emitMegamorphicCachedSetSlot(
3045 IdType id
, Register obj
, Register scratch1
,
3046 #ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
3047 Register scratch2
, Register scratch3
,
3049 ValueOperand value
, Label
* cacheHit
,
3050 void (*emitPreBarrier
)(MacroAssembler
&, const Address
&, MIRType
)) {
3051 Label cacheMiss
, dynamicSlot
, doAdd
, doSet
, doAddDynamic
, doSetDynamic
;
3053 #ifdef JS_CODEGEN_X86
3055 Register scratch2
= value
.typeReg();
3056 Register scratch3
= value
.payloadReg();
3059 // outEntryPtr = obj->shape()
3060 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch3
);
3062 movePtr(scratch3
, scratch2
);
3064 // scratch3 = (scratch3 >> 3) ^ (scratch3 >> 13) + idHash
3065 rshiftPtr(Imm32(MegamorphicSetPropCache::ShapeHashShift1
), scratch3
);
3066 rshiftPtr(Imm32(MegamorphicSetPropCache::ShapeHashShift2
), scratch2
);
3067 xorPtr(scratch2
, scratch3
);
3069 if constexpr (std::is_same
<IdType
, ValueOperand
>::value
) {
3070 loadAtomOrSymbolAndHash(id
, scratch1
, scratch2
, &cacheMiss
);
3071 addPtr(scratch2
, scratch3
);
3073 static_assert(std::is_same
<IdType
, PropertyKey
>::value
);
3074 addPtr(Imm32(HashAtomOrSymbolPropertyKey(id
)), scratch3
);
3075 movePropertyKey(id
, scratch1
);
3078 // scratch3 %= MegamorphicSetPropCache::NumEntries
3079 constexpr size_t cacheSize
= MegamorphicSetPropCache::NumEntries
;
3080 static_assert(mozilla::IsPowerOfTwo(cacheSize
));
3081 size_t cacheMask
= cacheSize
- 1;
3082 and32(Imm32(cacheMask
), scratch3
);
3084 loadMegamorphicSetPropCache(scratch2
);
3085 // scratch3 = &scratch2->entries_[scratch3]
3086 constexpr size_t entrySize
= sizeof(MegamorphicSetPropCache::Entry
);
3087 mul32(Imm32(entrySize
), scratch3
);
3088 computeEffectiveAddress(BaseIndex(scratch2
, scratch3
, TimesOne
,
3089 MegamorphicSetPropCache::offsetOfEntries()),
3092 // if (scratch3->key_ != scratch1) goto cacheMiss
3093 branchPtr(Assembler::NotEqual
,
3094 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfKey()),
3095 scratch1
, &cacheMiss
);
3097 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch1
);
3098 // if (scratch3->shape_ != scratch1) goto cacheMiss
3099 branchPtr(Assembler::NotEqual
,
3100 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfShape()),
3101 scratch1
, &cacheMiss
);
3103 // scratch2 = scratch2->generation_
3105 Address(scratch2
, MegamorphicSetPropCache::offsetOfGeneration()),
3108 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfGeneration()),
3110 // if (scratch3->generation_ != scratch2) goto cacheMiss
3111 branch32(Assembler::NotEqual
, scratch1
, scratch2
, &cacheMiss
);
3113 // scratch2 = entry->slotOffset()
3115 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfSlotOffset()),
3118 // scratch1 = slotOffset.offset()
3119 move32(scratch2
, scratch1
);
3120 rshift32(Imm32(TaggedSlotOffset::OffsetShift
), scratch1
);
3122 Address
afterShapePtr(scratch3
,
3123 MegamorphicSetPropCache::Entry::offsetOfAfterShape());
3125 // if (!slotOffset.isFixedSlot()) goto dynamicSlot
3126 branchTest32(Assembler::Zero
, scratch2
,
3127 Imm32(TaggedSlotOffset::IsFixedSlotFlag
), &dynamicSlot
);
3129 // Calculate slot address in scratch1. Jump to doSet if scratch3 == nullptr,
3130 // else jump (or fall-through) to doAdd.
3131 addPtr(obj
, scratch1
);
3132 branchPtr(Assembler::Equal
, afterShapePtr
, ImmPtr(nullptr), &doSet
);
3136 branchPtr(Assembler::Equal
, afterShapePtr
, ImmPtr(nullptr), &doSetDynamic
);
3138 Address
slotAddr(scratch1
, 0);
3140 // If entry->newCapacity_ is nonzero, we need to grow the slots on the
3141 // object. Otherwise just jump straight to a dynamic add.
3143 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfNewCapacity()),
3145 branchTest32(Assembler::Zero
, scratch2
, scratch2
, &doAddDynamic
);
3147 AllocatableRegisterSet
regs(RegisterSet::Volatile());
3148 regs
.takeUnchecked(scratch2
);
3150 LiveRegisterSet
save(regs
.asLiveSet());
3151 PushRegsInMask(save
);
3154 if (regs
.has(obj
)) {
3155 regs
.takeUnchecked(obj
);
3156 tmp
= regs
.takeAnyGeneral();
3157 regs
.addUnchecked(obj
);
3159 tmp
= regs
.takeAnyGeneral();
3162 using Fn
= bool (*)(JSContext
* cx
, NativeObject
* obj
, uint32_t newCount
);
3163 setupUnalignedABICall(tmp
);
3167 passABIArg(scratch2
);
3168 callWithABI
<Fn
, NativeObject::growSlotsPure
>();
3169 storeCallPointerResult(scratch2
);
3170 PopRegsInMask(save
);
3172 branchIfFalseBool(scratch2
, &cacheMiss
);
3174 bind(&doAddDynamic
);
3175 addPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch1
);
3178 // scratch3 = entry->afterShape()
3180 Address(scratch3
, MegamorphicSetPropCache::Entry::offsetOfAfterShape()),
3183 storeObjShape(scratch3
, obj
,
3184 [emitPreBarrier
](MacroAssembler
& masm
, const Address
& addr
) {
3185 emitPreBarrier(masm
, addr
, MIRType::Shape
);
3187 #ifdef JS_CODEGEN_X86
3190 storeValue(value
, slotAddr
);
3193 bind(&doSetDynamic
);
3194 addPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch1
);
3196 guardedCallPreBarrier(slotAddr
, MIRType::Value
);
3198 #ifdef JS_CODEGEN_X86
3201 storeValue(value
, slotAddr
);
3205 #ifdef JS_CODEGEN_X86
3210 template void MacroAssembler::emitMegamorphicCachedSetSlot
<PropertyKey
>(
3211 PropertyKey id
, Register obj
, Register scratch1
,
3212 #ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
3213 Register scratch2
, Register scratch3
,
3215 ValueOperand value
, Label
* cacheHit
,
3216 void (*emitPreBarrier
)(MacroAssembler
&, const Address
&, MIRType
));
3218 template void MacroAssembler::emitMegamorphicCachedSetSlot
<ValueOperand
>(
3219 ValueOperand id
, Register obj
, Register scratch1
,
3220 #ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
3221 Register scratch2
, Register scratch3
,
3223 ValueOperand value
, Label
* cacheHit
,
3224 void (*emitPreBarrier
)(MacroAssembler
&, const Address
&, MIRType
));
3226 void MacroAssembler::guardNonNegativeIntPtrToInt32(Register reg
, Label
* fail
) {
3229 branchPtr(Assembler::NotSigned
, reg
, reg
, &ok
);
3230 assumeUnreachable("Unexpected negative value");
3235 branchPtr(Assembler::Above
, reg
, Imm32(INT32_MAX
), fail
);
3239 void MacroAssembler::loadArrayBufferByteLengthIntPtr(Register obj
,
3241 Address
slotAddr(obj
, ArrayBufferObject::offsetOfByteLengthSlot());
3242 loadPrivate(slotAddr
, output
);
3245 void MacroAssembler::loadArrayBufferViewByteOffsetIntPtr(Register obj
,
3247 Address
slotAddr(obj
, ArrayBufferViewObject::byteOffsetOffset());
3248 loadPrivate(slotAddr
, output
);
3251 void MacroAssembler::loadArrayBufferViewLengthIntPtr(Register obj
,
3253 Address
slotAddr(obj
, ArrayBufferViewObject::lengthOffset());
3254 loadPrivate(slotAddr
, output
);
3257 void MacroAssembler::loadDOMExpandoValueGuardGeneration(
3258 Register obj
, ValueOperand output
,
3259 JS::ExpandoAndGeneration
* expandoAndGeneration
, uint64_t generation
,
3261 loadPtr(Address(obj
, ProxyObject::offsetOfReservedSlots()),
3262 output
.scratchReg());
3263 loadValue(Address(output
.scratchReg(),
3264 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
3267 // Guard the ExpandoAndGeneration* matches the proxy's ExpandoAndGeneration
3269 branchTestValue(Assembler::NotEqual
, output
,
3270 PrivateValue(expandoAndGeneration
), fail
);
3272 // Guard expandoAndGeneration->generation matches the expected generation.
3273 Address
generationAddr(output
.payloadOrValueReg(),
3274 JS::ExpandoAndGeneration::offsetOfGeneration());
3275 branch64(Assembler::NotEqual
, generationAddr
, Imm64(generation
), fail
);
3277 // Load expandoAndGeneration->expando into the output Value register.
3278 loadValue(Address(output
.payloadOrValueReg(),
3279 JS::ExpandoAndGeneration::offsetOfExpando()),
3283 void MacroAssembler::loadJitActivation(Register dest
) {
3284 loadJSContext(dest
);
3285 loadPtr(Address(dest
, offsetof(JSContext
, activation_
)), dest
);
3288 void MacroAssembler::guardSpecificAtom(Register str
, JSAtom
* atom
,
3290 const LiveRegisterSet
& volatileRegs
,
3293 branchPtr(Assembler::Equal
, str
, ImmGCPtr(atom
), &done
);
3295 // The pointers are not equal, so if the input string is also an atom it
3296 // must be a different string.
3297 branchTest32(Assembler::NonZero
, Address(str
, JSString::offsetOfFlags()),
3298 Imm32(JSString::ATOM_BIT
), fail
);
3300 // Check the length.
3301 branch32(Assembler::NotEqual
, Address(str
, JSString::offsetOfLength()),
3302 Imm32(atom
->length()), fail
);
3304 // Compare short atoms using inline assembly.
3305 if (canCompareStringCharsInline(atom
)) {
3306 // Pure two-byte strings can't be equal to Latin-1 strings.
3307 if (atom
->hasTwoByteChars()) {
3308 JS::AutoCheckCannotGC nogc
;
3309 if (!mozilla::IsUtf16Latin1(atom
->twoByteRange(nogc
))) {
3310 branchLatin1String(str
, fail
);
3314 // Call into the VM when the input is a rope or has a different encoding.
3317 // Load the input string's characters.
3318 Register stringChars
= scratch
;
3319 loadStringCharsForCompare(str
, atom
, stringChars
, &vmCall
);
3321 // Start comparing character by character.
3322 branchIfNotStringCharsEquals(stringChars
, atom
, fail
);
3324 // Falls through if both strings are equal.
3330 // We have a non-atomized string with the same length. Call a helper
3331 // function to do the comparison.
3332 PushRegsInMask(volatileRegs
);
3334 using Fn
= bool (*)(JSString
* str1
, JSString
* str2
);
3335 setupUnalignedABICall(scratch
);
3336 movePtr(ImmGCPtr(atom
), scratch
);
3337 passABIArg(scratch
);
3339 callWithABI
<Fn
, EqualStringsHelperPure
>();
3340 storeCallPointerResult(scratch
);
3342 MOZ_ASSERT(!volatileRegs
.has(scratch
));
3343 PopRegsInMask(volatileRegs
);
3344 branchIfFalseBool(scratch
, fail
);
3349 void MacroAssembler::guardStringToInt32(Register str
, Register output
,
3351 LiveRegisterSet volatileRegs
,
3354 // Use indexed value as fast path if possible.
3355 loadStringIndexValue(str
, output
, &vmCall
);
3360 // Reserve space for holding the result int32_t of the call. Use
3361 // pointer-size to avoid misaligning the stack on 64-bit platforms.
3362 reserveStack(sizeof(uintptr_t));
3363 moveStackPtrTo(output
);
3365 volatileRegs
.takeUnchecked(scratch
);
3366 if (output
.volatile_()) {
3367 volatileRegs
.addUnchecked(output
);
3369 PushRegsInMask(volatileRegs
);
3371 using Fn
= bool (*)(JSContext
* cx
, JSString
* str
, int32_t* result
);
3372 setupUnalignedABICall(scratch
);
3373 loadJSContext(scratch
);
3374 passABIArg(scratch
);
3377 callWithABI
<Fn
, GetInt32FromStringPure
>();
3378 storeCallPointerResult(scratch
);
3380 PopRegsInMask(volatileRegs
);
3383 branchIfTrueBool(scratch
, &ok
);
3385 // OOM path, recovered by GetInt32FromStringPure.
3387 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
3388 // flow-insensitively, and using it twice would confuse the stack height
3390 addToStackPtr(Imm32(sizeof(uintptr_t)));
3394 load32(Address(output
, 0), output
);
3395 freeStack(sizeof(uintptr_t));
3400 void MacroAssembler::generateBailoutTail(Register scratch
,
3401 Register bailoutInfo
) {
3402 Label bailoutFailed
;
3403 branchIfFalseBool(ReturnReg
, &bailoutFailed
);
3405 // Finish bailing out to Baseline.
3407 // Prepare a register set for use in this case.
3408 AllocatableGeneralRegisterSet
regs(GeneralRegisterSet::All());
3409 MOZ_ASSERT_IF(!IsHiddenSP(getStackPointer()),
3410 !regs
.has(AsRegister(getStackPointer())));
3411 regs
.take(bailoutInfo
);
3413 Register temp
= regs
.takeAny();
3416 // Assert the stack pointer points to the JitFrameLayout header. Copying
3419 loadPtr(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, incomingStack
)),
3421 branchStackPtr(Assembler::Equal
, temp
, &ok
);
3422 assumeUnreachable("Unexpected stack pointer value");
3426 Register copyCur
= regs
.takeAny();
3427 Register copyEnd
= regs
.takeAny();
3429 // Copy data onto stack.
3430 loadPtr(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, copyStackTop
)),
3433 Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, copyStackBottom
)),
3439 branchPtr(Assembler::BelowOrEqual
, copyCur
, copyEnd
, &endOfCopy
);
3440 subPtr(Imm32(sizeof(uintptr_t)), copyCur
);
3441 subFromStackPtr(Imm32(sizeof(uintptr_t)));
3442 loadPtr(Address(copyCur
, 0), temp
);
3443 storePtr(temp
, Address(getStackPointer(), 0));
3448 loadPtr(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, resumeFramePtr
)),
3451 // Enter exit frame for the FinishBailoutToBaseline call.
3452 pushFrameDescriptor(FrameType::BaselineJS
);
3453 push(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, resumeAddr
)));
3455 // No GC things to mark on the stack, push a bare token.
3456 loadJSContext(scratch
);
3457 enterFakeExitFrame(scratch
, scratch
, ExitFrameType::Bare
);
3459 // Save needed values onto stack temporarily.
3460 push(Address(bailoutInfo
, offsetof(BaselineBailoutInfo
, resumeAddr
)));
3462 // Call a stub to free allocated memory and create arguments objects.
3463 using Fn
= bool (*)(BaselineBailoutInfo
* bailoutInfoArg
);
3464 setupUnalignedABICall(temp
);
3465 passABIArg(bailoutInfo
);
3466 callWithABI
<Fn
, FinishBailoutToBaseline
>(
3467 ABIType::General
, CheckUnsafeCallWithABI::DontCheckHasExitFrame
);
3468 branchIfFalseBool(ReturnReg
, exceptionLabel());
3470 // Restore values where they need to be and resume execution.
3471 AllocatableGeneralRegisterSet
enterRegs(GeneralRegisterSet::All());
3472 MOZ_ASSERT(!enterRegs
.has(FramePointer
));
3473 Register jitcodeReg
= enterRegs
.takeAny();
3477 // Discard exit frame.
3478 addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
3483 bind(&bailoutFailed
);
3485 // jit::Bailout or jit::InvalidationBailout failed and returned false. The
3486 // Ion frame has already been discarded and the stack pointer points to the
3487 // JitFrameLayout header. Turn it into an ExitFrameLayout, similar to
3488 // EnsureUnwoundJitExitFrame, and call the exception handler.
3489 loadJSContext(scratch
);
3490 enterFakeExitFrame(scratch
, scratch
, ExitFrameType::UnwoundJit
);
3491 jump(exceptionLabel());
3495 void MacroAssembler::loadJitCodeRaw(Register func
, Register dest
) {
3496 static_assert(BaseScript::offsetOfJitCodeRaw() ==
3497 SelfHostedLazyScript::offsetOfJitCodeRaw(),
3498 "SelfHostedLazyScript and BaseScript must use same layout for "
3501 BaseScript::offsetOfJitCodeRaw() == wasm::JumpTableJitEntryOffset
,
3502 "Wasm exported functions jit entries must use same layout for "
3504 loadPrivate(Address(func
, JSFunction::offsetOfJitInfoOrScript()), dest
);
3505 loadPtr(Address(dest
, BaseScript::offsetOfJitCodeRaw()), dest
);
3508 void MacroAssembler::loadBaselineJitCodeRaw(Register func
, Register dest
,
3511 loadPrivate(Address(func
, JSFunction::offsetOfJitInfoOrScript()), dest
);
3513 branchIfScriptHasNoJitScript(dest
, failure
);
3515 loadJitScript(dest
, dest
);
3517 // Load BaselineScript
3518 loadPtr(Address(dest
, JitScript::offsetOfBaselineScript()), dest
);
3520 static_assert(BaselineDisabledScript
== 0x1);
3521 branchPtr(Assembler::BelowOrEqual
, dest
, ImmWord(BaselineDisabledScript
),
3525 // Load Baseline jitcode
3526 loadPtr(Address(dest
, BaselineScript::offsetOfMethod()), dest
);
3527 loadPtr(Address(dest
, JitCode::offsetOfCode()), dest
);
3530 void MacroAssembler::loadBaselineFramePtr(Register framePtr
, Register dest
) {
3531 if (framePtr
!= dest
) {
3532 movePtr(framePtr
, dest
);
3534 subPtr(Imm32(BaselineFrame::Size()), dest
);
3537 static const uint8_t* ContextInlinedICScriptPtr(CompileRuntime
* rt
) {
3538 return (static_cast<const uint8_t*>(rt
->mainContextPtr()) +
3539 JSContext::offsetOfInlinedICScript());
3542 void MacroAssembler::storeICScriptInJSContext(Register icScript
) {
3543 storePtr(icScript
, AbsoluteAddress(ContextInlinedICScriptPtr(runtime())));
3546 void MacroAssembler::handleFailure() {
3547 // Re-entry code is irrelevant because the exception will leave the
3548 // running function and never come back
3549 TrampolinePtr excTail
= runtime()->jitRuntime()->getExceptionTail();
3553 void MacroAssembler::assumeUnreachable(const char* output
) {
3554 #ifdef JS_MASM_VERBOSE
3555 if (!IsCompilingWasm()) {
3556 AllocatableRegisterSet
regs(RegisterSet::Volatile());
3557 LiveRegisterSet
save(regs
.asLiveSet());
3558 PushRegsInMask(save
);
3559 Register temp
= regs
.takeAnyGeneral();
3561 using Fn
= void (*)(const char* output
);
3562 setupUnalignedABICall(temp
);
3563 movePtr(ImmPtr(output
), temp
);
3565 callWithABI
<Fn
, AssumeUnreachable
>(ABIType::General
,
3566 CheckUnsafeCallWithABI::DontCheckOther
);
3568 PopRegsInMask(save
);
3575 void MacroAssembler::printf(const char* output
) {
3576 #ifdef JS_MASM_VERBOSE
3577 AllocatableRegisterSet
regs(RegisterSet::Volatile());
3578 LiveRegisterSet
save(regs
.asLiveSet());
3579 PushRegsInMask(save
);
3581 Register temp
= regs
.takeAnyGeneral();
3583 using Fn
= void (*)(const char* output
);
3584 setupUnalignedABICall(temp
);
3585 movePtr(ImmPtr(output
), temp
);
3587 callWithABI
<Fn
, Printf0
>();
3589 PopRegsInMask(save
);
3593 void MacroAssembler::printf(const char* output
, Register value
) {
3594 #ifdef JS_MASM_VERBOSE
3595 AllocatableRegisterSet
regs(RegisterSet::Volatile());
3596 LiveRegisterSet
save(regs
.asLiveSet());
3597 PushRegsInMask(save
);
3599 regs
.takeUnchecked(value
);
3601 Register temp
= regs
.takeAnyGeneral();
3603 using Fn
= void (*)(const char* output
, uintptr_t value
);
3604 setupUnalignedABICall(temp
);
3605 movePtr(ImmPtr(output
), temp
);
3608 callWithABI
<Fn
, Printf1
>();
3610 PopRegsInMask(save
);
3614 void MacroAssembler::convertInt32ValueToDouble(ValueOperand val
) {
3616 branchTestInt32(Assembler::NotEqual
, val
, &done
);
3617 unboxInt32(val
, val
.scratchReg());
3618 ScratchDoubleScope
fpscratch(*this);
3619 convertInt32ToDouble(val
.scratchReg(), fpscratch
);
3620 boxDouble(fpscratch
, val
, fpscratch
);
3624 void MacroAssembler::convertValueToFloatingPoint(ValueOperand value
,
3625 FloatRegister output
,
3627 MIRType outputType
) {
3628 Label isDouble
, isInt32
, isBool
, isNull
, done
;
3631 ScratchTagScope
tag(*this, value
);
3632 splitTagForTest(value
, tag
);
3634 branchTestDouble(Assembler::Equal
, tag
, &isDouble
);
3635 branchTestInt32(Assembler::Equal
, tag
, &isInt32
);
3636 branchTestBoolean(Assembler::Equal
, tag
, &isBool
);
3637 branchTestNull(Assembler::Equal
, tag
, &isNull
);
3638 branchTestUndefined(Assembler::NotEqual
, tag
, fail
);
3641 // fall-through: undefined
3642 loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output
,
3647 loadConstantFloatingPoint(0.0, 0.0f
, output
, outputType
);
3651 boolValueToFloatingPoint(value
, output
, outputType
);
3655 int32ValueToFloatingPoint(value
, output
, outputType
);
3658 // On some non-multiAlias platforms, unboxDouble may use the scratch register,
3659 // so do not merge code paths here.
3661 if (outputType
== MIRType::Float32
&& hasMultiAlias()) {
3662 ScratchDoubleScope
tmp(*this);
3663 unboxDouble(value
, tmp
);
3664 convertDoubleToFloat32(tmp
, output
);
3666 FloatRegister tmp
= output
.asDouble();
3667 unboxDouble(value
, tmp
);
3668 if (outputType
== MIRType::Float32
) {
3669 convertDoubleToFloat32(tmp
, output
);
3676 void MacroAssembler::outOfLineTruncateSlow(FloatRegister src
, Register dest
,
3677 bool widenFloatToDouble
,
3679 wasm::BytecodeOffset callOffset
) {
3680 if (compilingWasm
) {
3683 int32_t framePushedAfterInstance
= framePushed();
3685 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
3686 defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
3687 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
3688 ScratchDoubleScope
fpscratch(*this);
3689 if (widenFloatToDouble
) {
3690 convertFloat32ToDouble(src
, fpscratch
);
3693 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
3694 FloatRegister srcSingle
;
3695 if (widenFloatToDouble
) {
3696 MOZ_ASSERT(src
.isSingle());
3698 src
= src
.asDouble();
3700 convertFloat32ToDouble(srcSingle
, src
);
3704 MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
3707 MOZ_ASSERT(src
.isDouble());
3709 if (compilingWasm
) {
3710 int32_t instanceOffset
= framePushed() - framePushedAfterInstance
;
3712 passABIArg(src
, ABIType::Float64
);
3713 callWithABI(callOffset
, wasm::SymbolicAddress::ToInt32
,
3714 mozilla::Some(instanceOffset
));
3716 using Fn
= int32_t (*)(double);
3717 setupUnalignedABICall(dest
);
3718 passABIArg(src
, ABIType::Float64
);
3719 callWithABI
<Fn
, JS::ToInt32
>(ABIType::General
,
3720 CheckUnsafeCallWithABI::DontCheckOther
);
3722 storeCallInt32Result(dest
);
3724 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
3725 defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
3726 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
3728 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
3729 if (widenFloatToDouble
) {
3733 MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
3736 if (compilingWasm
) {
3741 void MacroAssembler::convertDoubleToInt(FloatRegister src
, Register output
,
3742 FloatRegister temp
, Label
* truncateFail
,
3744 IntConversionBehavior behavior
) {
3746 case IntConversionBehavior::Normal
:
3747 case IntConversionBehavior::NegativeZeroCheck
:
3748 convertDoubleToInt32(
3750 behavior
== IntConversionBehavior::NegativeZeroCheck
);
3752 case IntConversionBehavior::Truncate
:
3753 branchTruncateDoubleMaybeModUint32(src
, output
,
3754 truncateFail
? truncateFail
: fail
);
3756 case IntConversionBehavior::ClampToUint8
:
3757 // Clamping clobbers the input register, so use a temp.
3759 moveDouble(src
, temp
);
3761 clampDoubleToUint8(temp
, output
);
3766 void MacroAssembler::convertValueToInt(
3767 ValueOperand value
, Label
* handleStringEntry
, Label
* handleStringRejoin
,
3768 Label
* truncateDoubleSlow
, Register stringReg
, FloatRegister temp
,
3769 Register output
, Label
* fail
, IntConversionBehavior behavior
,
3770 IntConversionInputKind conversion
) {
3771 Label done
, isInt32
, isBool
, isDouble
, isNull
, isString
;
3773 bool handleStrings
= (behavior
== IntConversionBehavior::Truncate
||
3774 behavior
== IntConversionBehavior::ClampToUint8
) &&
3775 handleStringEntry
&& handleStringRejoin
;
3777 MOZ_ASSERT_IF(handleStrings
, conversion
== IntConversionInputKind::Any
);
3780 ScratchTagScope
tag(*this, value
);
3781 splitTagForTest(value
, tag
);
3783 branchTestInt32(Equal
, tag
, &isInt32
);
3784 if (conversion
== IntConversionInputKind::Any
||
3785 conversion
== IntConversionInputKind::NumbersOrBoolsOnly
) {
3786 branchTestBoolean(Equal
, tag
, &isBool
);
3788 branchTestDouble(Equal
, tag
, &isDouble
);
3790 if (conversion
== IntConversionInputKind::Any
) {
3791 // If we are not truncating, we fail for anything that's not
3792 // null. Otherwise we might be able to handle strings and undefined.
3794 case IntConversionBehavior::Normal
:
3795 case IntConversionBehavior::NegativeZeroCheck
:
3796 branchTestNull(Assembler::NotEqual
, tag
, fail
);
3799 case IntConversionBehavior::Truncate
:
3800 case IntConversionBehavior::ClampToUint8
:
3801 branchTestNull(Equal
, tag
, &isNull
);
3802 if (handleStrings
) {
3803 branchTestString(Equal
, tag
, &isString
);
3805 branchTestUndefined(Assembler::NotEqual
, tag
, fail
);
3813 // The value is null or undefined in truncation contexts - just emit 0.
3814 if (conversion
== IntConversionInputKind::Any
) {
3815 if (isNull
.used()) {
3818 mov(ImmWord(0), output
);
3822 // |output| needs to be different from |stringReg| to load string indices.
3823 bool handleStringIndices
= handleStrings
&& output
!= stringReg
;
3825 // First try loading a string index. If that fails, try converting a string
3826 // into a double, then jump to the double case.
3827 Label handleStringIndex
;
3828 if (handleStrings
) {
3830 unboxString(value
, stringReg
);
3831 if (handleStringIndices
) {
3832 loadStringIndexValue(stringReg
, output
, handleStringEntry
);
3833 jump(&handleStringIndex
);
3835 jump(handleStringEntry
);
3839 // Try converting double into integer.
3840 if (isDouble
.used() || handleStrings
) {
3841 if (isDouble
.used()) {
3843 unboxDouble(value
, temp
);
3846 if (handleStrings
) {
3847 bind(handleStringRejoin
);
3850 convertDoubleToInt(temp
, output
, temp
, truncateDoubleSlow
, fail
, behavior
);
3854 // Just unbox a bool, the result is 0 or 1.
3855 if (isBool
.used()) {
3857 unboxBoolean(value
, output
);
3861 // Integers can be unboxed.
3862 if (isInt32
.used() || handleStringIndices
) {
3863 if (isInt32
.used()) {
3865 unboxInt32(value
, output
);
3868 if (handleStringIndices
) {
3869 bind(&handleStringIndex
);
3872 if (behavior
== IntConversionBehavior::ClampToUint8
) {
3873 clampIntToUint8(output
);
3880 void MacroAssembler::finish() {
3881 if (failureLabel_
.used()) {
3882 bind(&failureLabel_
);
3886 MacroAssemblerSpecific::finish();
3889 size() <= MaxCodeBytesPerProcess
,
3890 "AssemblerBuffer should ensure we don't exceed MaxCodeBytesPerProcess");
3892 if (bytesNeeded() > MaxCodeBytesPerProcess
) {
3897 void MacroAssembler::link(JitCode
* code
) {
3899 linkProfilerCallSites(code
);
3902 MacroAssembler::AutoProfilerCallInstrumentation::
3903 AutoProfilerCallInstrumentation(MacroAssembler
& masm
) {
3904 if (!masm
.emitProfilingInstrumentation_
) {
3908 Register reg
= CallTempReg0
;
3909 Register reg2
= CallTempReg1
;
3913 CodeOffset label
= masm
.movWithPatch(ImmWord(uintptr_t(-1)), reg
);
3914 masm
.loadJSContext(reg2
);
3915 masm
.loadPtr(Address(reg2
, offsetof(JSContext
, profilingActivation_
)), reg2
);
3917 Address(reg2
, JitActivation::offsetOfLastProfilingCallSite()));
3919 masm
.appendProfilerCallSite(label
);
3925 void MacroAssembler::linkProfilerCallSites(JitCode
* code
) {
3926 for (size_t i
= 0; i
< profilerCallSites_
.length(); i
++) {
3927 CodeOffset offset
= profilerCallSites_
[i
];
3928 CodeLocationLabel
location(code
, offset
);
3929 PatchDataWithValueCheck(location
, ImmPtr(location
.raw()),
3934 void MacroAssembler::alignJitStackBasedOnNArgs(Register nargs
,
3935 bool countIncludesThis
) {
3936 // The stack should already be aligned to the size of a value.
3937 assertStackAlignment(sizeof(Value
), 0);
3939 static_assert(JitStackValueAlignment
== 1 || JitStackValueAlignment
== 2,
3940 "JitStackValueAlignment is either 1 or 2.");
3941 if (JitStackValueAlignment
== 1) {
3944 // A jit frame is composed of the following:
3946 // [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
3947 // \________JitFrameLayout_________/
3948 // (The stack grows this way --->)
3950 // We want to ensure that |raddr|, the return address, is 16-byte aligned.
3951 // (Note: if 8-byte alignment was sufficient, we would have already
3954 // JitFrameLayout does not affect the alignment, so we can ignore it.
3955 static_assert(sizeof(JitFrameLayout
) % JitStackAlignment
== 0,
3956 "JitFrameLayout doesn't affect stack alignment");
3958 // Therefore, we need to ensure that |this| is aligned.
3959 // This implies that |argN| must be aligned if N is even,
3960 // and offset by |sizeof(Value)| if N is odd.
3962 // Depending on the context of the caller, it may be easier to pass in a
3963 // register that has already been modified to include |this|. If that is the
3964 // case, we want to flip the direction of the test.
3965 Assembler::Condition condition
=
3966 countIncludesThis
? Assembler::NonZero
: Assembler::Zero
;
3968 Label alignmentIsOffset
, end
;
3969 branchTestPtr(condition
, nargs
, Imm32(1), &alignmentIsOffset
);
3971 // |argN| should be aligned to 16 bytes.
3972 andToStackPtr(Imm32(~(JitStackAlignment
- 1)));
3975 // |argN| should be offset by 8 bytes from 16-byte alignment.
3976 // We already know that it is 8-byte aligned, so the only possibilities are:
3977 // a) It is 16-byte aligned, and we must offset it by 8 bytes.
3978 // b) It is not 16-byte aligned, and therefore already has the right offset.
3979 // Therefore, we test to see if it is 16-byte aligned, and adjust it if it is.
3980 bind(&alignmentIsOffset
);
3981 branchTestStackPtr(Assembler::NonZero
, Imm32(JitStackAlignment
- 1), &end
);
3982 subFromStackPtr(Imm32(sizeof(Value
)));
3987 void MacroAssembler::alignJitStackBasedOnNArgs(uint32_t argc
,
3988 bool countIncludesThis
) {
3989 // The stack should already be aligned to the size of a value.
3990 assertStackAlignment(sizeof(Value
), 0);
3992 static_assert(JitStackValueAlignment
== 1 || JitStackValueAlignment
== 2,
3993 "JitStackValueAlignment is either 1 or 2.");
3994 if (JitStackValueAlignment
== 1) {
3998 // See above for full explanation.
3999 uint32_t nArgs
= argc
+ !countIncludesThis
;
4000 if (nArgs
% 2 == 0) {
4001 // |argN| should be 16-byte aligned
4002 andToStackPtr(Imm32(~(JitStackAlignment
- 1)));
4004 // |argN| must be 16-byte aligned if argc is even,
4005 // and offset by 8 if argc is odd.
4007 branchTestStackPtr(Assembler::NonZero
, Imm32(JitStackAlignment
- 1), &end
);
4008 subFromStackPtr(Imm32(sizeof(Value
)));
4010 assertStackAlignment(JitStackAlignment
, sizeof(Value
));
4014 // ===============================================================
4016 MacroAssembler::MacroAssembler(TempAllocator
& alloc
,
4017 CompileRuntime
* maybeRuntime
,
4018 CompileRealm
* maybeRealm
)
4019 : maybeRuntime_(maybeRuntime
),
4020 maybeRealm_(maybeRealm
),
4025 dynamicAlignment_(false),
4026 emitProfilingInstrumentation_(false) {
4027 moveResolver_
.setAllocator(alloc
);
4030 StackMacroAssembler::StackMacroAssembler(JSContext
* cx
, TempAllocator
& alloc
)
4031 : MacroAssembler(alloc
, CompileRuntime::get(cx
->runtime()),
4032 CompileRealm::get(cx
->realm())) {}
4034 IonHeapMacroAssembler::IonHeapMacroAssembler(TempAllocator
& alloc
,
4035 CompileRealm
* realm
)
4036 : MacroAssembler(alloc
, realm
->runtime(), realm
) {
4037 MOZ_ASSERT(CurrentThreadIsIonCompiling());
4040 WasmMacroAssembler::WasmMacroAssembler(TempAllocator
& alloc
, bool limitedSize
)
4041 : MacroAssembler(alloc
) {
4042 #if defined(JS_CODEGEN_ARM64)
4043 // Stubs + builtins + the baseline compiler all require the native SP,
4045 SetStackPointer64(sp
);
4048 setUnlimitedBuffer();
4052 WasmMacroAssembler::WasmMacroAssembler(TempAllocator
& alloc
,
4053 const wasm::ModuleEnvironment
& env
,
4055 : MacroAssembler(alloc
) {
4056 #if defined(JS_CODEGEN_ARM64)
4057 // Stubs + builtins + the baseline compiler all require the native SP,
4059 SetStackPointer64(sp
);
4062 setUnlimitedBuffer();
4066 bool MacroAssembler::icBuildOOLFakeExitFrame(void* fakeReturnAddr
,
4067 AutoSaveLiveRegisters
& save
) {
4068 return buildOOLFakeExitFrame(fakeReturnAddr
);
4071 #ifndef JS_CODEGEN_ARM64
4072 void MacroAssembler::subFromStackPtr(Register reg
) {
4073 subPtr(reg
, getStackPointer());
4075 #endif // JS_CODEGEN_ARM64
4077 //{{{ check_macroassembler_style
4078 // ===============================================================
4079 // Stack manipulation functions.
4081 void MacroAssembler::PushRegsInMask(LiveGeneralRegisterSet set
) {
4082 PushRegsInMask(LiveRegisterSet(set
.set(), FloatRegisterSet()));
4085 void MacroAssembler::PopRegsInMask(LiveRegisterSet set
) {
4086 PopRegsInMaskIgnore(set
, LiveRegisterSet());
4089 void MacroAssembler::PopRegsInMask(LiveGeneralRegisterSet set
) {
4090 PopRegsInMask(LiveRegisterSet(set
.set(), FloatRegisterSet()));
4093 void MacroAssembler::Push(PropertyKey key
, Register scratchReg
) {
4094 if (key
.isGCThing()) {
4095 // If we're pushing a gcthing, then we can't just push the tagged key
4096 // value since the GC won't have any idea that the push instruction
4097 // carries a reference to a gcthing. Need to unpack the pointer,
4098 // push it using ImmGCPtr, and then rematerialize the PropertyKey at
4101 if (key
.isString()) {
4102 JSString
* str
= key
.toString();
4103 MOZ_ASSERT((uintptr_t(str
) & PropertyKey::TypeMask
) == 0);
4104 static_assert(PropertyKey::StringTypeTag
== 0,
4105 "need to orPtr StringTypeTag if it's not 0");
4106 Push(ImmGCPtr(str
));
4108 MOZ_ASSERT(key
.isSymbol());
4109 movePropertyKey(key
, scratchReg
);
4113 MOZ_ASSERT(key
.isInt());
4114 Push(ImmWord(key
.asRawBits()));
4118 void MacroAssembler::movePropertyKey(PropertyKey key
, Register dest
) {
4119 if (key
.isGCThing()) {
4120 // See comment in |Push(PropertyKey, ...)| above for an explanation.
4121 if (key
.isString()) {
4122 JSString
* str
= key
.toString();
4123 MOZ_ASSERT((uintptr_t(str
) & PropertyKey::TypeMask
) == 0);
4124 static_assert(PropertyKey::StringTypeTag
== 0,
4125 "need to orPtr StringTypeTag tag if it's not 0");
4126 movePtr(ImmGCPtr(str
), dest
);
4128 MOZ_ASSERT(key
.isSymbol());
4129 JS::Symbol
* sym
= key
.toSymbol();
4130 movePtr(ImmGCPtr(sym
), dest
);
4131 orPtr(Imm32(PropertyKey::SymbolTypeTag
), dest
);
4134 MOZ_ASSERT(key
.isInt());
4135 movePtr(ImmWord(key
.asRawBits()), dest
);
4139 void MacroAssembler::Push(TypedOrValueRegister v
) {
4142 } else if (IsFloatingPointType(v
.type())) {
4143 FloatRegister reg
= v
.typedReg().fpu();
4144 if (v
.type() == MIRType::Float32
) {
4145 ScratchDoubleScope
fpscratch(*this);
4146 convertFloat32ToDouble(reg
, fpscratch
);
4147 PushBoxed(fpscratch
);
4152 Push(ValueTypeFromMIRType(v
.type()), v
.typedReg().gpr());
4156 void MacroAssembler::Push(const ConstantOrRegister
& v
) {
4164 void MacroAssembler::Push(const Address
& addr
) {
4166 framePushed_
+= sizeof(uintptr_t);
4169 void MacroAssembler::Push(const ValueOperand
& val
) {
4171 framePushed_
+= sizeof(Value
);
4174 void MacroAssembler::Push(const Value
& val
) {
4176 framePushed_
+= sizeof(Value
);
4179 void MacroAssembler::Push(JSValueType type
, Register reg
) {
4180 pushValue(type
, reg
);
4181 framePushed_
+= sizeof(Value
);
4184 void MacroAssembler::Push(const Register64 reg
) {
4185 #if JS_BITS_PER_WORD == 64
4188 MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Big-endian not supported.");
4194 void MacroAssembler::PushEmptyRooted(VMFunctionData::RootType rootType
) {
4196 case VMFunctionData::RootNone
:
4197 MOZ_CRASH("Handle must have root type");
4198 case VMFunctionData::RootObject
:
4199 case VMFunctionData::RootString
:
4200 case VMFunctionData::RootCell
:
4201 case VMFunctionData::RootBigInt
:
4202 Push(ImmPtr(nullptr));
4204 case VMFunctionData::RootValue
:
4205 Push(UndefinedValue());
4207 case VMFunctionData::RootId
:
4208 Push(ImmWord(JS::PropertyKey::Void().asRawBits()));
4213 void MacroAssembler::adjustStack(int amount
) {
4216 } else if (amount
< 0) {
4217 reserveStack(-amount
);
4221 void MacroAssembler::freeStack(uint32_t amount
) {
4222 MOZ_ASSERT(amount
<= framePushed_
);
4224 addToStackPtr(Imm32(amount
));
4226 framePushed_
-= amount
;
4229 void MacroAssembler::freeStack(Register amount
) { addToStackPtr(amount
); }
4231 void MacroAssembler::reserveVMFunctionOutParamSpace(const VMFunctionData
& f
) {
4232 switch (f
.outParam
) {
4234 PushEmptyRooted(f
.outParamRootType
);
4242 reserveStack(f
.sizeOfOutParamStackSlot());
4249 MOZ_CRASH("Unexpected outparam type");
4253 void MacroAssembler::loadVMFunctionOutParam(const VMFunctionData
& f
,
4254 const Address
& addr
) {
4255 switch (f
.outParam
) {
4257 switch (f
.outParamRootType
) {
4258 case VMFunctionData::RootNone
:
4259 MOZ_CRASH("Handle must have root type");
4260 case VMFunctionData::RootObject
:
4261 case VMFunctionData::RootString
:
4262 case VMFunctionData::RootCell
:
4263 case VMFunctionData::RootBigInt
:
4264 case VMFunctionData::RootId
:
4265 loadPtr(addr
, ReturnReg
);
4267 case VMFunctionData::RootValue
:
4268 loadValue(addr
, JSReturnOperand
);
4274 loadValue(addr
, JSReturnOperand
);
4278 load32(addr
, ReturnReg
);
4282 load8ZeroExtend(addr
, ReturnReg
);
4286 loadDouble(addr
, ReturnDoubleReg
);
4290 loadPtr(addr
, ReturnReg
);
4297 MOZ_CRASH("Unexpected outparam type");
4301 // ===============================================================
4302 // ABI function calls.
4303 template <class ABIArgGeneratorT
>
4304 void MacroAssembler::setupABICallHelper() {
4306 MOZ_ASSERT(!inCall_
);
4314 // Reinitialize the ABIArg generator.
4315 abiArgs_
= ABIArgGeneratorT();
4317 #if defined(JS_CODEGEN_ARM)
4318 // On ARM, we need to know what ABI we are using, either in the
4319 // simulator, or based on the configure flags.
4320 # if defined(JS_SIMULATOR_ARM)
4321 abiArgs_
.setUseHardFp(UseHardFpABI());
4322 # elif defined(JS_CODEGEN_ARM_HARDFP)
4323 abiArgs_
.setUseHardFp(true);
4325 abiArgs_
.setUseHardFp(false);
4329 #if defined(JS_CODEGEN_MIPS32)
4330 // On MIPS, the system ABI use general registers pairs to encode double
4331 // arguments, after one or 2 integer-like arguments. Unfortunately, the
4332 // Lowering phase is not capable to express it at the moment. So we enforce
4333 // the system ABI here.
4334 abiArgs_
.enforceO32ABI();
4338 void MacroAssembler::setupNativeABICall() {
4339 setupABICallHelper
<ABIArgGenerator
>();
4342 void MacroAssembler::setupWasmABICall() {
4343 MOZ_ASSERT(IsCompilingWasm(), "non-wasm should use setupAlignedABICall");
4344 setupABICallHelper
<WasmABIArgGenerator
>();
4346 #if defined(JS_CODEGEN_ARM)
4347 // The builtin thunk does the FP -> GPR moving on soft-FP, so
4348 // use hard fp unconditionally.
4349 abiArgs_
.setUseHardFp(true);
4351 dynamicAlignment_
= false;
4354 void MacroAssembler::setupUnalignedABICallDontSaveRestoreSP() {
4355 andToStackPtr(Imm32(~(ABIStackAlignment
- 1)));
4356 setFramePushed(0); // Required for aligned callWithABI.
4357 setupAlignedABICall();
4360 void MacroAssembler::setupAlignedABICall() {
4361 MOZ_ASSERT(!IsCompilingWasm(), "wasm should use setupWasmABICall");
4362 setupNativeABICall();
4363 dynamicAlignment_
= false;
4366 void MacroAssembler::passABIArg(const MoveOperand
& from
, ABIType type
) {
4367 MOZ_ASSERT(inCall_
);
4368 appendSignatureType(type
);
4371 MoveOp::Type moveType
;
4373 case ABIType::Float32
:
4374 arg
= abiArgs_
.next(MIRType::Float32
);
4375 moveType
= MoveOp::FLOAT32
;
4377 case ABIType::Float64
:
4378 arg
= abiArgs_
.next(MIRType::Double
);
4379 moveType
= MoveOp::DOUBLE
;
4381 case ABIType::General
:
4382 arg
= abiArgs_
.next(MIRType::Pointer
);
4383 moveType
= MoveOp::GENERAL
;
4386 MOZ_CRASH("Unexpected argument type");
4389 MoveOperand
to(*this, arg
);
4397 propagateOOM(moveResolver_
.addMove(from
, to
, moveType
));
4400 void MacroAssembler::callWithABINoProfiler(void* fun
, ABIType result
,
4401 CheckUnsafeCallWithABI check
) {
4402 appendSignatureType(result
);
4404 fun
= Simulator::RedirectNativeFunction(fun
, signature());
4407 uint32_t stackAdjust
;
4408 callWithABIPre(&stackAdjust
);
4411 if (check
== CheckUnsafeCallWithABI::Check
) {
4413 loadJSContext(ReturnReg
);
4414 Address
flagAddr(ReturnReg
, JSContext::offsetOfInUnsafeCallWithABI());
4415 store32(Imm32(1), flagAddr
);
4417 // On arm64, SP may be < PSP now (that's OK).
4418 // eg testcase: tests/bug1375074.js
4424 callWithABIPost(stackAdjust
, result
);
4427 if (check
== CheckUnsafeCallWithABI::Check
) {
4430 loadJSContext(ReturnReg
);
4431 Address
flagAddr(ReturnReg
, JSContext::offsetOfInUnsafeCallWithABI());
4432 branch32(Assembler::Equal
, flagAddr
, Imm32(0), &ok
);
4433 assumeUnreachable("callWithABI: callee did not use AutoUnsafeCallWithABI");
4436 // On arm64, SP may be < PSP now (that's OK).
4437 // eg testcase: tests/bug1375074.js
4442 CodeOffset
MacroAssembler::callWithABI(wasm::BytecodeOffset bytecode
,
4443 wasm::SymbolicAddress imm
,
4444 mozilla::Maybe
<int32_t> instanceOffset
,
4446 MOZ_ASSERT(wasm::NeedsBuiltinThunk(imm
));
4448 uint32_t stackAdjust
;
4449 callWithABIPre(&stackAdjust
, /* callFromWasm = */ true);
4451 // The instance register is used in builtin thunks and must be set.
4452 if (instanceOffset
) {
4453 loadPtr(Address(getStackPointer(), *instanceOffset
+ stackAdjust
),
4456 MOZ_CRASH("instanceOffset is Nothing only for unsupported abi calls.");
4458 CodeOffset raOffset
= call(
4459 wasm::CallSiteDesc(bytecode
.offset(), wasm::CallSite::Symbolic
), imm
);
4461 callWithABIPost(stackAdjust
, result
, /* callFromWasm = */ true);
4466 void MacroAssembler::callDebugWithABI(wasm::SymbolicAddress imm
,
4468 MOZ_ASSERT(!wasm::NeedsBuiltinThunk(imm
));
4469 uint32_t stackAdjust
;
4470 callWithABIPre(&stackAdjust
, /* callFromWasm = */ false);
4472 callWithABIPost(stackAdjust
, result
, /* callFromWasm = */ false);
4475 // ===============================================================
4476 // Exit frame footer.
4478 void MacroAssembler::linkExitFrame(Register cxreg
, Register scratch
) {
4479 loadPtr(Address(cxreg
, JSContext::offsetOfActivation()), scratch
);
4480 storeStackPtr(Address(scratch
, JitActivation::offsetOfPackedExitFP()));
4483 // ===============================================================
4484 // Simple value-shuffling helpers, to hide MoveResolver verbosity
4487 void MacroAssembler::moveRegPair(Register src0
, Register src1
, Register dst0
,
4488 Register dst1
, MoveOp::Type type
) {
4489 MoveResolver
& moves
= moveResolver();
4491 propagateOOM(moves
.addMove(MoveOperand(src0
), MoveOperand(dst0
), type
));
4494 propagateOOM(moves
.addMove(MoveOperand(src1
), MoveOperand(dst1
), type
));
4496 propagateOOM(moves
.resolve());
4501 MoveEmitter
emitter(*this);
4502 emitter
.emit(moves
);
4506 // ===============================================================
4507 // Arithmetic functions
4509 void MacroAssembler::pow32(Register base
, Register power
, Register dest
,
4510 Register temp1
, Register temp2
, Label
* onOver
) {
4511 // Inline int32-specialized implementation of js::powi with overflow
4514 move32(Imm32(1), dest
); // result = 1
4516 // x^y where x == 1 returns 1 for any y.
4518 branch32(Assembler::Equal
, base
, Imm32(1), &done
);
4520 move32(base
, temp1
); // runningSquare = x
4521 move32(power
, temp2
); // n = y
4523 // x^y where y < 0 returns a non-int32 value for any x != 1. Except when y is
4524 // large enough so that the result is no longer representable as a double with
4525 // fractional parts. We can't easily determine when y is too large, so we bail
4527 // Note: it's important for this condition to match the code in CacheIR.cpp
4528 // (CanAttachInt32Pow) to prevent failure loops.
4530 branchTest32(Assembler::NotSigned
, power
, power
, &start
);
4536 // runningSquare *= runningSquare
4537 branchMul32(Assembler::Overflow
, temp1
, temp1
, onOver
);
4541 // if ((n & 1) != 0) result *= runningSquare
4543 branchTest32(Assembler::Zero
, temp2
, Imm32(1), &even
);
4544 branchMul32(Assembler::Overflow
, temp1
, dest
, onOver
);
4548 // if (n == 0) return result
4549 branchRshift32(Assembler::NonZero
, Imm32(1), temp2
, &loop
);
4554 void MacroAssembler::signInt32(Register input
, Register output
) {
4555 MOZ_ASSERT(input
!= output
);
4557 move32(input
, output
);
4558 rshift32Arithmetic(Imm32(31), output
);
4559 or32(Imm32(1), output
);
4560 cmp32Move32(Assembler::Equal
, input
, Imm32(0), input
, output
);
4563 void MacroAssembler::signDouble(FloatRegister input
, FloatRegister output
) {
4564 MOZ_ASSERT(input
!= output
);
4566 Label done
, zeroOrNaN
, negative
;
4567 loadConstantDouble(0.0, output
);
4568 branchDouble(Assembler::DoubleEqualOrUnordered
, input
, output
, &zeroOrNaN
);
4569 branchDouble(Assembler::DoubleLessThan
, input
, output
, &negative
);
4571 loadConstantDouble(1.0, output
);
4575 loadConstantDouble(-1.0, output
);
4579 moveDouble(input
, output
);
4584 void MacroAssembler::signDoubleToInt32(FloatRegister input
, Register output
,
4585 FloatRegister temp
, Label
* fail
) {
4586 MOZ_ASSERT(input
!= temp
);
4588 Label done
, zeroOrNaN
, negative
;
4589 loadConstantDouble(0.0, temp
);
4590 branchDouble(Assembler::DoubleEqualOrUnordered
, input
, temp
, &zeroOrNaN
);
4591 branchDouble(Assembler::DoubleLessThan
, input
, temp
, &negative
);
4593 move32(Imm32(1), output
);
4597 move32(Imm32(-1), output
);
4600 // Fail for NaN and negative zero.
4602 branchDouble(Assembler::DoubleUnordered
, input
, input
, fail
);
4604 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
4605 // is -Infinity instead of Infinity.
4606 loadConstantDouble(1.0, temp
);
4607 divDouble(input
, temp
);
4608 branchDouble(Assembler::DoubleLessThan
, temp
, input
, fail
);
4609 move32(Imm32(0), output
);
4614 void MacroAssembler::randomDouble(Register rng
, FloatRegister dest
,
4615 Register64 temp0
, Register64 temp1
) {
4616 using mozilla::non_crypto::XorShift128PlusRNG
;
4619 sizeof(XorShift128PlusRNG
) == 2 * sizeof(uint64_t),
4620 "Code below assumes XorShift128PlusRNG contains two uint64_t values");
4622 Address
state0Addr(rng
, XorShift128PlusRNG::offsetOfState0());
4623 Address
state1Addr(rng
, XorShift128PlusRNG::offsetOfState1());
4625 Register64 s0Reg
= temp0
;
4626 Register64 s1Reg
= temp1
;
4628 // uint64_t s1 = mState[0];
4629 load64(state0Addr
, s1Reg
);
4632 move64(s1Reg
, s0Reg
);
4633 lshift64(Imm32(23), s1Reg
);
4634 xor64(s0Reg
, s1Reg
);
4637 move64(s1Reg
, s0Reg
);
4638 rshift64(Imm32(17), s1Reg
);
4639 xor64(s0Reg
, s1Reg
);
4641 // const uint64_t s0 = mState[1];
4642 load64(state1Addr
, s0Reg
);
4645 store64(s0Reg
, state0Addr
);
4648 xor64(s0Reg
, s1Reg
);
4651 rshift64(Imm32(26), s0Reg
);
4652 xor64(s0Reg
, s1Reg
);
4655 store64(s1Reg
, state1Addr
);
4658 load64(state0Addr
, s0Reg
);
4659 add64(s0Reg
, s1Reg
);
4661 // See comment in XorShift128PlusRNG::nextDouble().
4662 static constexpr int MantissaBits
=
4663 mozilla::FloatingPoint
<double>::kExponentShift
+ 1;
4664 static constexpr double ScaleInv
= double(1) / (1ULL << MantissaBits
);
4666 and64(Imm64((1ULL << MantissaBits
) - 1), s1Reg
);
4668 // Note: we know s1Reg isn't signed after the and64 so we can use the faster
4669 // convertInt64ToDouble instead of convertUInt64ToDouble.
4670 convertInt64ToDouble(s1Reg
, dest
);
4673 mulDoublePtr(ImmPtr(&ScaleInv
), s0Reg
.scratchReg(), dest
);
4676 void MacroAssembler::sameValueDouble(FloatRegister left
, FloatRegister right
,
4677 FloatRegister temp
, Register dest
) {
4678 Label nonEqual
, isSameValue
, isNotSameValue
;
4679 branchDouble(Assembler::DoubleNotEqualOrUnordered
, left
, right
, &nonEqual
);
4681 // First, test for being equal to 0.0, which also includes -0.0.
4682 loadConstantDouble(0.0, temp
);
4683 branchDouble(Assembler::DoubleNotEqual
, left
, temp
, &isSameValue
);
4685 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
4686 // is -Infinity instead of Infinity.
4688 loadConstantDouble(1.0, temp
);
4689 divDouble(left
, temp
);
4690 branchDouble(Assembler::DoubleLessThan
, temp
, left
, &isNegInf
);
4692 loadConstantDouble(1.0, temp
);
4693 divDouble(right
, temp
);
4694 branchDouble(Assembler::DoubleGreaterThan
, temp
, right
, &isSameValue
);
4695 jump(&isNotSameValue
);
4699 loadConstantDouble(1.0, temp
);
4700 divDouble(right
, temp
);
4701 branchDouble(Assembler::DoubleLessThan
, temp
, right
, &isSameValue
);
4702 jump(&isNotSameValue
);
4707 // Test if both values are NaN.
4708 branchDouble(Assembler::DoubleOrdered
, left
, left
, &isNotSameValue
);
4709 branchDouble(Assembler::DoubleOrdered
, right
, right
, &isNotSameValue
);
4714 move32(Imm32(1), dest
);
4717 bind(&isNotSameValue
);
4718 move32(Imm32(0), dest
);
4723 void MacroAssembler::minMaxArrayInt32(Register array
, Register result
,
4724 Register temp1
, Register temp2
,
4725 Register temp3
, bool isMax
, Label
* fail
) {
4726 // array must be a packed array. Load its elements.
4727 Register elements
= temp1
;
4728 loadPtr(Address(array
, NativeObject::offsetOfElements()), elements
);
4730 // Load the length and guard that it is non-zero.
4731 Address
lengthAddr(elements
, ObjectElements::offsetOfInitializedLength());
4732 load32(lengthAddr
, temp3
);
4733 branchTest32(Assembler::Zero
, temp3
, temp3
, fail
);
4735 // Compute the address of the last element.
4736 Register elementsEnd
= temp2
;
4737 BaseObjectElementIndex
elementsEndAddr(elements
, temp3
,
4738 -int32_t(sizeof(Value
)));
4739 computeEffectiveAddress(elementsEndAddr
, elementsEnd
);
4741 // Load the first element into result.
4742 fallibleUnboxInt32(Address(elements
, 0), result
, fail
);
4747 // Check whether we're done.
4748 branchPtr(Assembler::Equal
, elements
, elementsEnd
, &done
);
4750 // If not, advance to the next element and load it.
4751 addPtr(Imm32(sizeof(Value
)), elements
);
4752 fallibleUnboxInt32(Address(elements
, 0), temp3
, fail
);
4754 // Update result if necessary.
4755 Assembler::Condition cond
=
4756 isMax
? Assembler::GreaterThan
: Assembler::LessThan
;
4757 cmp32Move32(cond
, temp3
, result
, temp3
, result
);
4763 void MacroAssembler::minMaxArrayNumber(Register array
, FloatRegister result
,
4764 FloatRegister floatTemp
, Register temp1
,
4765 Register temp2
, bool isMax
,
4767 // array must be a packed array. Load its elements.
4768 Register elements
= temp1
;
4769 loadPtr(Address(array
, NativeObject::offsetOfElements()), elements
);
4771 // Load the length and check if the array is empty.
4773 Address
lengthAddr(elements
, ObjectElements::offsetOfInitializedLength());
4774 load32(lengthAddr
, temp2
);
4775 branchTest32(Assembler::Zero
, temp2
, temp2
, &isEmpty
);
4777 // Compute the address of the last element.
4778 Register elementsEnd
= temp2
;
4779 BaseObjectElementIndex
elementsEndAddr(elements
, temp2
,
4780 -int32_t(sizeof(Value
)));
4781 computeEffectiveAddress(elementsEndAddr
, elementsEnd
);
4783 // Load the first element into result.
4784 ensureDouble(Address(elements
, 0), result
, fail
);
4789 // Check whether we're done.
4790 branchPtr(Assembler::Equal
, elements
, elementsEnd
, &done
);
4792 // If not, advance to the next element and load it into floatTemp.
4793 addPtr(Imm32(sizeof(Value
)), elements
);
4794 ensureDouble(Address(elements
, 0), floatTemp
, fail
);
4796 // Update result if necessary.
4798 maxDouble(floatTemp
, result
, /* handleNaN = */ true);
4800 minDouble(floatTemp
, result
, /* handleNaN = */ true);
4804 // With no arguments, min/max return +Infinity/-Infinity respectively.
4807 loadConstantDouble(mozilla::NegativeInfinity
<double>(), result
);
4809 loadConstantDouble(mozilla::PositiveInfinity
<double>(), result
);
4815 void MacroAssembler::branchIfNotRegExpPrototypeOptimizable(
4816 Register proto
, Register temp
, const GlobalObject
* maybeGlobal
,
4819 movePtr(ImmGCPtr(maybeGlobal
), temp
);
4820 loadPrivate(Address(temp
, GlobalObject::offsetOfGlobalDataSlot()), temp
);
4822 loadGlobalObjectData(temp
);
4824 size_t offset
= GlobalObjectData::offsetOfRegExpRealm() +
4825 RegExpRealm::offsetOfOptimizableRegExpPrototypeShape();
4826 loadPtr(Address(temp
, offset
), temp
);
4827 branchTestObjShapeUnsafe(Assembler::NotEqual
, proto
, temp
, fail
);
4830 void MacroAssembler::branchIfNotRegExpInstanceOptimizable(
4831 Register regexp
, Register temp
, const GlobalObject
* maybeGlobal
,
4834 movePtr(ImmGCPtr(maybeGlobal
), temp
);
4835 loadPrivate(Address(temp
, GlobalObject::offsetOfGlobalDataSlot()), temp
);
4837 loadGlobalObjectData(temp
);
4839 size_t offset
= GlobalObjectData::offsetOfRegExpRealm() +
4840 RegExpRealm::offsetOfOptimizableRegExpInstanceShape();
4841 loadPtr(Address(temp
, offset
), temp
);
4842 branchTestObjShapeUnsafe(Assembler::NotEqual
, regexp
, temp
, label
);
4845 void MacroAssembler::loadRegExpLastIndex(Register regexp
, Register string
,
4847 Label
* notFoundZeroLastIndex
) {
4848 Address
flagsSlot(regexp
, RegExpObject::offsetOfFlags());
4849 Address
lastIndexSlot(regexp
, RegExpObject::offsetOfLastIndex());
4850 Address
stringLength(string
, JSString::offsetOfLength());
4852 Label notGlobalOrSticky
, loadedLastIndex
;
4854 branchTest32(Assembler::Zero
, flagsSlot
,
4855 Imm32(JS::RegExpFlag::Global
| JS::RegExpFlag::Sticky
),
4856 ¬GlobalOrSticky
);
4858 // It's a global or sticky regular expression. Emit the following code:
4860 // lastIndex = regexp.lastIndex
4861 // if lastIndex > string.length:
4862 // jump to notFoundZeroLastIndex (skip the regexp match/test operation)
4864 // The `notFoundZeroLastIndex` code should set regexp.lastIndex to 0 and
4865 // treat this as a not-found result.
4867 // See steps 5-8 in js::RegExpBuiltinExec.
4869 // Earlier guards must have ensured regexp.lastIndex is a non-negative
4874 branchTestInt32(Assembler::Equal
, lastIndexSlot
, &ok
);
4875 assumeUnreachable("Expected int32 value for lastIndex");
4879 unboxInt32(lastIndexSlot
, lastIndex
);
4883 branchTest32(Assembler::NotSigned
, lastIndex
, lastIndex
, &ok
);
4884 assumeUnreachable("Expected non-negative lastIndex");
4888 branch32(Assembler::Below
, stringLength
, lastIndex
, notFoundZeroLastIndex
);
4889 jump(&loadedLastIndex
);
4892 bind(¬GlobalOrSticky
);
4893 move32(Imm32(0), lastIndex
);
4895 bind(&loadedLastIndex
);
4898 void MacroAssembler::loadAndClearRegExpSearcherLastLimit(Register result
,
4900 MOZ_ASSERT(result
!= scratch
);
4902 loadJSContext(scratch
);
4904 Address
limitField(scratch
, JSContext::offsetOfRegExpSearcherLastLimit());
4905 load32(limitField
, result
);
4909 branch32(Assembler::NotEqual
, result
, Imm32(RegExpSearcherLastLimitSentinel
),
4911 assumeUnreachable("Unexpected sentinel for regExpSearcherLastLimit");
4913 store32(Imm32(RegExpSearcherLastLimitSentinel
), limitField
);
4917 void MacroAssembler::loadParsedRegExpShared(Register regexp
, Register result
,
4919 Address
sharedSlot(regexp
, RegExpObject::offsetOfShared());
4920 branchTestUndefined(Assembler::Equal
, sharedSlot
, unparsed
);
4921 unboxNonDouble(sharedSlot
, result
, JSVAL_TYPE_PRIVATE_GCTHING
);
4923 static_assert(sizeof(RegExpShared::Kind
) == sizeof(uint32_t));
4924 branch32(Assembler::Equal
, Address(result
, RegExpShared::offsetOfKind()),
4925 Imm32(int32_t(RegExpShared::Kind::Unparsed
)), unparsed
);
4928 // ===============================================================
4931 void MacroAssembler::loadFunctionLength(Register func
,
4932 Register funFlagsAndArgCount
,
4933 Register output
, Label
* slowPath
) {
4936 // These flags should already have been checked by caller.
4938 uint32_t FlagsToCheck
=
4939 FunctionFlags::SELFHOSTLAZY
| FunctionFlags::RESOLVED_LENGTH
;
4940 branchTest32(Assembler::Zero
, funFlagsAndArgCount
, Imm32(FlagsToCheck
),
4942 assumeUnreachable("The function flags should already have been checked.");
4947 // NOTE: `funFlagsAndArgCount` and `output` must be allowed to alias.
4949 // Load the target function's length.
4950 Label isInterpreted
, lengthLoaded
;
4951 branchTest32(Assembler::NonZero
, funFlagsAndArgCount
,
4952 Imm32(FunctionFlags::BASESCRIPT
), &isInterpreted
);
4954 // The length property of a native function stored with the flags.
4955 move32(funFlagsAndArgCount
, output
);
4956 rshift32(Imm32(JSFunction::ArgCountShift
), output
);
4957 jump(&lengthLoaded
);
4959 bind(&isInterpreted
);
4961 // Load the length property of an interpreted function.
4962 loadPrivate(Address(func
, JSFunction::offsetOfJitInfoOrScript()), output
);
4963 loadPtr(Address(output
, JSScript::offsetOfSharedData()), output
);
4964 branchTestPtr(Assembler::Zero
, output
, output
, slowPath
);
4965 loadPtr(Address(output
, SharedImmutableScriptData::offsetOfISD()), output
);
4966 load16ZeroExtend(Address(output
, ImmutableScriptData::offsetOfFunLength()),
4969 bind(&lengthLoaded
);
4972 void MacroAssembler::loadFunctionName(Register func
, Register output
,
4973 ImmGCPtr emptyString
, Label
* slowPath
) {
4974 MOZ_ASSERT(func
!= output
);
4976 // Get the JSFunction flags.
4977 load32(Address(func
, JSFunction::offsetOfFlagsAndArgCount()), output
);
4979 // If the name was previously resolved, the name property may be shadowed.
4980 // If the function is an accessor with lazy name, AtomSlot contains the
4983 Assembler::NonZero
, output
,
4984 Imm32(FunctionFlags::RESOLVED_NAME
| FunctionFlags::LAZY_ACCESSOR_NAME
),
4988 branchTest32(Assembler::NonZero
, output
,
4989 Imm32(FunctionFlags::HAS_GUESSED_ATOM
), &noName
);
4991 Address
atomAddr(func
, JSFunction::offsetOfAtom());
4992 branchTestUndefined(Assembler::Equal
, atomAddr
, &noName
);
4993 unboxString(atomAddr
, output
);
4999 // An absent name property defaults to the empty string.
5000 movePtr(emptyString
, output
);
5006 void MacroAssembler::assertFunctionIsExtended(Register func
) {
5009 branchTestFunctionFlags(func
, FunctionFlags::EXTENDED
, Assembler::NonZero
,
5011 assumeUnreachable("Function is not extended");
5016 void MacroAssembler::branchTestType(Condition cond
, Register tag
,
5017 JSValueType type
, Label
* label
) {
5019 case JSVAL_TYPE_DOUBLE
:
5020 branchTestDouble(cond
, tag
, label
);
5022 case JSVAL_TYPE_INT32
:
5023 branchTestInt32(cond
, tag
, label
);
5025 case JSVAL_TYPE_BOOLEAN
:
5026 branchTestBoolean(cond
, tag
, label
);
5028 case JSVAL_TYPE_UNDEFINED
:
5029 branchTestUndefined(cond
, tag
, label
);
5031 case JSVAL_TYPE_NULL
:
5032 branchTestNull(cond
, tag
, label
);
5034 case JSVAL_TYPE_MAGIC
:
5035 branchTestMagic(cond
, tag
, label
);
5037 case JSVAL_TYPE_STRING
:
5038 branchTestString(cond
, tag
, label
);
5040 case JSVAL_TYPE_SYMBOL
:
5041 branchTestSymbol(cond
, tag
, label
);
5043 case JSVAL_TYPE_BIGINT
:
5044 branchTestBigInt(cond
, tag
, label
);
5046 case JSVAL_TYPE_OBJECT
:
5047 branchTestObject(cond
, tag
, label
);
5050 MOZ_CRASH("Unexpected value type");
5054 void MacroAssembler::branchTestObjShapeList(
5055 Condition cond
, Register obj
, Register shapeElements
, Register shapeScratch
,
5056 Register endScratch
, Register spectreScratch
, Label
* label
) {
5057 MOZ_ASSERT(cond
== Assembler::Equal
|| cond
== Assembler::NotEqual
);
5059 bool needSpectreMitigations
= spectreScratch
!= InvalidReg
;
5062 Label
* onMatch
= cond
== Assembler::Equal
? label
: &done
;
5063 Label
* onNoMatch
= cond
== Assembler::Equal
? &done
: label
;
5065 // Load the object's shape pointer into shapeScratch, and prepare to compare
5066 // it with the shapes in the list. The shapes are stored as private values so
5067 // we can compare directly.
5068 loadPtr(Address(obj
, JSObject::offsetOfShape()), shapeScratch
);
5070 // Compute end pointer.
5071 Address
lengthAddr(shapeElements
,
5072 ObjectElements::offsetOfInitializedLength());
5073 load32(lengthAddr
, endScratch
);
5074 branch32(Assembler::Equal
, endScratch
, Imm32(0), onNoMatch
);
5075 BaseObjectElementIndex
endPtrAddr(shapeElements
, endScratch
);
5076 computeEffectiveAddress(endPtrAddr
, endScratch
);
5081 // Compare the object's shape with a shape from the list. Note that on 64-bit
5082 // this includes the tag bits, but on 32-bit we only compare the low word of
5083 // the value. This is fine because the list of shapes is never exposed and the
5084 // tag is guaranteed to be PrivateGCThing.
5085 if (needSpectreMitigations
) {
5086 move32(Imm32(0), spectreScratch
);
5088 branchPtr(Assembler::Equal
, Address(shapeElements
, 0), shapeScratch
, onMatch
);
5089 if (needSpectreMitigations
) {
5090 spectreMovePtr(Assembler::Equal
, spectreScratch
, obj
);
5093 // Advance to next shape and loop if not finished.
5094 addPtr(Imm32(sizeof(Value
)), shapeElements
);
5095 branchPtr(Assembler::Below
, shapeElements
, endScratch
, &loop
);
5097 if (cond
== Assembler::NotEqual
) {
5103 void MacroAssembler::branchTestObjCompartment(Condition cond
, Register obj
,
5104 const Address
& compartment
,
5105 Register scratch
, Label
* label
) {
5106 MOZ_ASSERT(obj
!= scratch
);
5107 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
5108 loadPtr(Address(scratch
, Shape::offsetOfBaseShape()), scratch
);
5109 loadPtr(Address(scratch
, BaseShape::offsetOfRealm()), scratch
);
5110 loadPtr(Address(scratch
, Realm::offsetOfCompartment()), scratch
);
5111 branchPtr(cond
, compartment
, scratch
, label
);
5114 void MacroAssembler::branchTestObjCompartment(
5115 Condition cond
, Register obj
, const JS::Compartment
* compartment
,
5116 Register scratch
, Label
* label
) {
5117 MOZ_ASSERT(obj
!= scratch
);
5118 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
5119 loadPtr(Address(scratch
, Shape::offsetOfBaseShape()), scratch
);
5120 loadPtr(Address(scratch
, BaseShape::offsetOfRealm()), scratch
);
5121 loadPtr(Address(scratch
, Realm::offsetOfCompartment()), scratch
);
5122 branchPtr(cond
, scratch
, ImmPtr(compartment
), label
);
5125 void MacroAssembler::branchIfNonNativeObj(Register obj
, Register scratch
,
5127 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
5128 branchTest32(Assembler::Zero
,
5129 Address(scratch
, Shape::offsetOfImmutableFlags()),
5130 Imm32(Shape::isNativeBit()), label
);
5133 void MacroAssembler::branchIfObjectNotExtensible(Register obj
, Register scratch
,
5135 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
5137 // Spectre-style checks are not needed here because we do not interpret data
5138 // based on this check.
5139 static_assert(sizeof(ObjectFlags
) == sizeof(uint16_t));
5140 load16ZeroExtend(Address(scratch
, Shape::offsetOfObjectFlags()), scratch
);
5141 branchTest32(Assembler::NonZero
, scratch
,
5142 Imm32(uint32_t(ObjectFlag::NotExtensible
)), label
);
5145 void MacroAssembler::branchTestObjectNeedsProxyResultValidation(
5146 Condition cond
, Register obj
, Register scratch
, Label
* label
) {
5147 MOZ_ASSERT(cond
== Assembler::Zero
|| cond
== Assembler::NonZero
);
5150 Label
* doValidation
= cond
== NonZero
? label
: &done
;
5151 Label
* skipValidation
= cond
== NonZero
? &done
: label
;
5153 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
5154 branchTest32(Assembler::Zero
,
5155 Address(scratch
, Shape::offsetOfImmutableFlags()),
5156 Imm32(Shape::isNativeBit()), doValidation
);
5157 static_assert(sizeof(ObjectFlags
) == sizeof(uint16_t));
5158 load16ZeroExtend(Address(scratch
, Shape::offsetOfObjectFlags()), scratch
);
5159 branchTest32(Assembler::NonZero
, scratch
,
5160 Imm32(uint32_t(ObjectFlag::NeedsProxyGetSetResultValidation
)),
5163 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
5164 loadPtr(Address(scratch
, Shape::offsetOfBaseShape()), scratch
);
5165 loadPtr(Address(scratch
, BaseShape::offsetOfClasp()), scratch
);
5166 loadPtr(Address(scratch
, offsetof(JSClass
, cOps
)), scratch
);
5167 branchTestPtr(Assembler::Zero
, scratch
, scratch
, skipValidation
);
5168 loadPtr(Address(scratch
, offsetof(JSClassOps
, resolve
)), scratch
);
5169 branchTestPtr(Assembler::NonZero
, scratch
, scratch
, doValidation
);
5173 void MacroAssembler::wasmTrap(wasm::Trap trap
,
5174 wasm::BytecodeOffset bytecodeOffset
) {
5175 FaultingCodeOffset fco
= wasmTrapInstruction();
5176 MOZ_ASSERT_IF(!oom(),
5177 currentOffset() - fco
.get() == WasmTrapInstructionLength
);
5179 append(trap
, wasm::TrapSite(wasm::TrapMachineInsn::OfficialUD
, fco
,
5183 std::pair
<CodeOffset
, uint32_t> MacroAssembler::wasmReserveStackChecked(
5184 uint32_t amount
, wasm::BytecodeOffset trapOffset
) {
5185 if (amount
> MAX_UNCHECKED_LEAF_FRAME_SIZE
) {
5186 // The frame is large. Don't bump sp until after the stack limit check so
5187 // that the trap handler isn't called with a wild sp.
5189 Register scratch
= ABINonArgReg0
;
5190 moveStackPtrTo(scratch
);
5193 branchPtr(Assembler::Below
, scratch
, Imm32(amount
), &trap
);
5194 subPtr(Imm32(amount
), scratch
);
5195 branchPtr(Assembler::Below
,
5196 Address(InstanceReg
, wasm::Instance::offsetOfStackLimit()),
5200 wasmTrap(wasm::Trap::StackOverflow
, trapOffset
);
5201 CodeOffset trapInsnOffset
= CodeOffset(currentOffset());
5204 reserveStack(amount
);
5205 return std::pair
<CodeOffset
, uint32_t>(trapInsnOffset
, 0);
5208 reserveStack(amount
);
5210 branchStackPtrRhs(Assembler::Below
,
5211 Address(InstanceReg
, wasm::Instance::offsetOfStackLimit()),
5213 wasmTrap(wasm::Trap::StackOverflow
, trapOffset
);
5214 CodeOffset trapInsnOffset
= CodeOffset(currentOffset());
5216 return std::pair
<CodeOffset
, uint32_t>(trapInsnOffset
, amount
);
5219 #ifdef ENABLE_WASM_TAIL_CALLS
5220 static void MoveDataBlock(MacroAssembler
& masm
, Register base
, int32_t from
,
5221 int32_t to
, uint32_t size
) {
5222 MOZ_ASSERT(base
!= masm
.getStackPointer());
5223 if (from
== to
|| size
== 0) {
5227 # ifdef JS_CODEGEN_ARM64
5228 vixl::UseScratchRegisterScope
temps(&masm
);
5229 const Register scratch
= temps
.AcquireX().asUnsized();
5230 # elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_X86)
5231 static constexpr Register scratch
= ABINonArgReg0
;
5233 # elif defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_MIPS64) || \
5234 defined(JS_CODEGEN_RISCV64)
5235 ScratchRegisterScope
scratch(masm
);
5236 # elif !defined(JS_CODEGEN_NONE)
5237 const Register scratch
= ScratchReg
;
5239 const Register scratch
= InvalidReg
;
5243 for (uint32_t i
= 0; i
< size
; i
+= sizeof(void*)) {
5244 masm
.loadPtr(Address(base
, from
+ i
), scratch
);
5245 masm
.storePtr(scratch
, Address(base
, to
+ i
));
5248 for (uint32_t i
= size
; i
> 0;) {
5250 masm
.loadPtr(Address(base
, from
+ i
), scratch
);
5251 masm
.storePtr(scratch
, Address(base
, to
+ i
));
5255 # if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_X86)
5260 struct ReturnCallTrampolineData
{
5261 # ifdef JS_CODEGEN_ARM
5262 uint32_t trampolineOffset
;
5264 CodeLabel trampoline
;
5268 static ReturnCallTrampolineData
MakeReturnCallTrampoline(MacroAssembler
& masm
) {
5269 uint32_t savedPushed
= masm
.framePushed();
5271 // Build simple trampoline code: load the instance slot from the frame,
5272 // restore FP, and return to prevous caller.
5273 ReturnCallTrampolineData data
;
5274 # ifdef JS_CODEGEN_ARM
5275 data
.trampolineOffset
= masm
.currentOffset();
5277 masm
.bind(&data
.trampoline
);
5280 masm
.setFramePushed(
5281 AlignBytes(wasm::FrameWithInstances::sizeOfInstanceFieldsAndShadowStack(),
5282 WasmStackAlignment
));
5284 # ifdef ENABLE_WASM_TAIL_CALLS
5285 masm
.wasmMarkSlowCall();
5289 Address(masm
.getStackPointer(), WasmCallerInstanceOffsetBeforeCall
),
5291 masm
.switchToWasmInstanceRealm(ABINonArgReturnReg0
, ABINonArgReturnReg1
);
5292 masm
.moveToStackPtr(FramePointer
);
5293 # ifdef JS_CODEGEN_ARM64
5294 masm
.pop(FramePointer
, lr
);
5295 masm
.append(wasm::CodeRangeUnwindInfo::UseFpLr
, masm
.currentOffset());
5296 masm
.Mov(PseudoStackPointer64
, vixl::sp
);
5299 masm
.pop(FramePointer
);
5300 masm
.append(wasm::CodeRangeUnwindInfo::UseFp
, masm
.currentOffset());
5304 masm
.append(wasm::CodeRangeUnwindInfo::Normal
, masm
.currentOffset());
5305 masm
.setFramePushed(savedPushed
);
5309 // CollapseWasmFrame methods merge frames fields: callee parameters, instance
5310 // slots, and caller RA. See the diagram below. The C0 is the previous caller,
5311 // the C1 is the caller of the return call, and the C2 is the callee.
5313 // +-------------------+ +--------------------+
5314 // |C0 instance slots | |C0 instance slots |
5315 // +-------------------+ -+ +--------------------+ -+
5316 // | RA | | | RA | |
5317 // +-------------------+ | C0 +--------------------+ |C0
5318 // | FP | v | FP | v
5319 // +-------------------+ +--------------------+
5320 // |C0 private frame | |C0 private frame |
5321 // +-------------------+ +--------------------+
5322 // |C1 results area | |C1/C2 results area |
5323 // +-------------------+ +--------------------+
5324 // |C1 parameters | |? trampoline frame |
5325 // +-------------------+ +--------------------+
5326 // |C1 instance slots | |C2 parameters |
5327 // +-------------------+ -+ +--------------------+
5328 // |C0 RA | | |C2 instance slots’ |
5329 // +-------------------+ | C1 +--------------------+ -+
5330 // |C0 FP | v |C0 RA’ | |
5331 // +-------------------+ +--------------------+ | C2
5332 // |C1 private frame | |C0 FP’ | v
5333 // +-------------------+ +--------------------+ <= start of C2
5335 // +-------------------+
5336 // |C2 instance slots |
5337 // +-------------------+ <= call C2
5339 // The C2 parameters are moved in place of the C1 parameters, and the
5340 // C1 frame data is removed. The instance slots, return address, and
5341 // frame pointer to the C0 callsite are saved or adjusted.
5343 // For cross-instance calls, the trampoline frame will be introduced
5344 // if the C0 callsite has no ability to restore instance registers and realm.
5346 static void CollapseWasmFrameFast(MacroAssembler
& masm
,
5347 const ReturnCallAdjustmentInfo
& retCallInfo
) {
5348 uint32_t framePushedAtStart
= masm
.framePushed();
5349 static_assert(sizeof(wasm::Frame
) == 2 * sizeof(void*));
5351 // The instance slots + stack arguments are expected to be padded and
5352 // aligned to the WasmStackAlignment boundary. There is no data expected
5353 // in the padded region, such as results stack area or locals, to avoid
5354 // unwanted stack growth.
5355 uint32_t newSlotsAndStackArgBytes
=
5356 AlignBytes(retCallInfo
.newSlotsAndStackArgBytes
, WasmStackAlignment
);
5357 uint32_t oldSlotsAndStackArgBytes
=
5358 AlignBytes(retCallInfo
.oldSlotsAndStackArgBytes
, WasmStackAlignment
);
5360 static constexpr Register tempForCaller
= WasmTailCallInstanceScratchReg
;
5361 static constexpr Register tempForFP
= WasmTailCallFPScratchReg
;
5362 static constexpr Register tempForRA
= WasmTailCallRAScratchReg
;
5363 # ifndef JS_USE_LINK_REGISTER
5364 masm
.push(tempForRA
);
5367 // Load the FP, RA, and instance slots into registers to preserve them while
5368 // the new frame is collapsed over the current one.
5369 masm
.loadPtr(Address(FramePointer
, wasm::Frame::callerFPOffset()), tempForFP
);
5370 masm
.loadPtr(Address(FramePointer
, wasm::Frame::returnAddressOffset()),
5372 masm
.append(wasm::CodeRangeUnwindInfo::RestoreFpRa
, masm
.currentOffset());
5373 bool copyCallerSlot
= oldSlotsAndStackArgBytes
!= newSlotsAndStackArgBytes
;
5374 if (copyCallerSlot
) {
5376 Address(FramePointer
, wasm::FrameWithInstances::callerInstanceOffset()),
5380 // Copy parameters data, ignoring shadow data and instance slots.
5381 // Make all offsets relative to the FramePointer.
5382 int32_t newArgSrc
= -framePushedAtStart
;
5383 int32_t newArgDest
=
5384 sizeof(wasm::Frame
) + oldSlotsAndStackArgBytes
- newSlotsAndStackArgBytes
;
5385 const uint32_t SlotsSize
=
5386 wasm::FrameWithInstances::sizeOfInstanceFieldsAndShadowStack();
5387 MoveDataBlock(masm
, FramePointer
, newArgSrc
+ SlotsSize
,
5388 newArgDest
+ SlotsSize
,
5389 retCallInfo
.newSlotsAndStackArgBytes
- SlotsSize
);
5391 // Copy caller instance slots from the current frame.
5392 if (copyCallerSlot
) {
5395 Address(FramePointer
, newArgDest
+ WasmCallerInstanceOffsetBeforeCall
));
5398 // Store current instance as the new callee instance slot.
5401 Address(FramePointer
, newArgDest
+ WasmCalleeInstanceOffsetBeforeCall
));
5403 # ifdef JS_USE_LINK_REGISTER
5404 // RA is already in its place, just move stack.
5405 masm
.addToStackPtr(Imm32(framePushedAtStart
+ newArgDest
));
5407 // Push RA to new frame: store RA, restore temp, and move stack.
5408 int32_t newFrameOffset
= newArgDest
- sizeof(wasm::Frame
);
5409 masm
.storePtr(tempForRA
,
5410 Address(FramePointer
,
5411 newFrameOffset
+ wasm::Frame::returnAddressOffset()));
5412 // Restore tempForRA, but keep RA on top of the stack.
5413 // There is no non-locking exchange instruction between register and memory.
5414 // Using tempForCaller as scratch register.
5415 masm
.loadPtr(Address(masm
.getStackPointer(), 0), tempForCaller
);
5416 masm
.storePtr(tempForRA
, Address(masm
.getStackPointer(), 0));
5417 masm
.mov(tempForCaller
, tempForRA
);
5418 masm
.append(wasm::CodeRangeUnwindInfo::RestoreFp
, masm
.currentOffset());
5419 masm
.addToStackPtr(Imm32(framePushedAtStart
+ newFrameOffset
+
5420 wasm::Frame::returnAddressOffset() + sizeof(void*)));
5423 masm
.movePtr(tempForFP
, FramePointer
);
5424 // Setting framePushed to pre-collapse state, to properly set that in the
5426 masm
.setFramePushed(framePushedAtStart
);
5429 static void CollapseWasmFrameSlow(MacroAssembler
& masm
,
5430 const ReturnCallAdjustmentInfo
& retCallInfo
,
5431 wasm::CallSiteDesc desc
,
5432 ReturnCallTrampolineData data
) {
5433 uint32_t framePushedAtStart
= masm
.framePushed();
5434 static constexpr Register tempForCaller
= WasmTailCallInstanceScratchReg
;
5435 static constexpr Register tempForFP
= WasmTailCallFPScratchReg
;
5436 static constexpr Register tempForRA
= WasmTailCallRAScratchReg
;
5438 static_assert(sizeof(wasm::Frame
) == 2 * sizeof(void*));
5440 // The hidden frame will "break" after wasm::Frame data fields.
5441 // Calculate sum of wasm stack alignment before and after the break as
5442 // the size to reserve.
5443 const uint32_t HiddenFrameAfterSize
=
5444 AlignBytes(wasm::FrameWithInstances::sizeOfInstanceFieldsAndShadowStack(),
5445 WasmStackAlignment
);
5446 const uint32_t HiddenFrameSize
=
5447 AlignBytes(sizeof(wasm::Frame
), WasmStackAlignment
) +
5448 HiddenFrameAfterSize
;
5450 // If it is not slow, prepare two frame: one is regular wasm frame, and
5451 // another one is hidden. The hidden frame contains one instance slots
5452 // for unwind and recovering pinned registers.
5453 // The instance slots + stack arguments are expected to be padded and
5454 // aligned to the WasmStackAlignment boundary. There is no data expected
5455 // in the padded region, such as results stack area or locals, to avoid
5456 // unwanted stack growth.
5457 // The Hidden frame will be inserted with this constraint too.
5458 uint32_t newSlotsAndStackArgBytes
=
5459 AlignBytes(retCallInfo
.newSlotsAndStackArgBytes
, WasmStackAlignment
);
5460 uint32_t oldSlotsAndStackArgBytes
=
5461 AlignBytes(retCallInfo
.oldSlotsAndStackArgBytes
, WasmStackAlignment
);
5463 // Make all offsets relative to the FramePointer.
5464 int32_t newArgSrc
= -framePushedAtStart
;
5465 int32_t newArgDest
= sizeof(wasm::Frame
) + oldSlotsAndStackArgBytes
-
5466 HiddenFrameSize
- newSlotsAndStackArgBytes
;
5467 int32_t hiddenFrameArgsDest
=
5468 sizeof(wasm::Frame
) + oldSlotsAndStackArgBytes
- HiddenFrameAfterSize
;
5470 // It will be possible to overwrite data (on the top of the stack) due to
5471 // the added hidden frame, reserve needed space.
5472 uint32_t reserved
= newArgDest
- int32_t(sizeof(void*)) < newArgSrc
5473 ? newArgSrc
- newArgDest
+ sizeof(void*)
5475 masm
.reserveStack(reserved
);
5477 # ifndef JS_USE_LINK_REGISTER
5478 masm
.push(tempForRA
);
5481 // Load FP, RA and instance slots to preserve them from being overwritten.
5482 masm
.loadPtr(Address(FramePointer
, wasm::Frame::callerFPOffset()), tempForFP
);
5483 masm
.loadPtr(Address(FramePointer
, wasm::Frame::returnAddressOffset()),
5485 masm
.append(wasm::CodeRangeUnwindInfo::RestoreFpRa
, masm
.currentOffset());
5487 Address(FramePointer
, newArgSrc
+ WasmCallerInstanceOffsetBeforeCall
),
5490 // Copy parameters data, ignoring shadow data and instance slots.
5491 const uint32_t SlotsSize
=
5492 wasm::FrameWithInstances::sizeOfInstanceFieldsAndShadowStack();
5493 MoveDataBlock(masm
, FramePointer
, newArgSrc
+ SlotsSize
,
5494 newArgDest
+ SlotsSize
,
5495 retCallInfo
.newSlotsAndStackArgBytes
- SlotsSize
);
5497 // Form hidden frame for trampoline.
5498 int32_t newFPOffset
= hiddenFrameArgsDest
- sizeof(wasm::Frame
);
5501 Address(FramePointer
, newFPOffset
+ wasm::Frame::returnAddressOffset()));
5503 // Copy original FP.
5506 Address(FramePointer
, newFPOffset
+ wasm::Frame::callerFPOffset()));
5508 // Set up instance slots.
5511 Address(FramePointer
,
5512 newFPOffset
+ wasm::FrameWithInstances::calleeInstanceOffset()));
5515 Address(FramePointer
, newArgDest
+ WasmCallerInstanceOffsetBeforeCall
));
5518 Address(FramePointer
, newArgDest
+ WasmCalleeInstanceOffsetBeforeCall
));
5520 # ifdef JS_CODEGEN_ARM
5521 // ARM has no CodeLabel -- calculate PC directly.
5522 masm
.mov(pc
, tempForRA
);
5523 masm
.computeEffectiveAddress(
5525 int32_t(data
.trampolineOffset
- masm
.currentOffset() - 4)),
5527 masm
.append(desc
, CodeOffset(data
.trampolineOffset
));
5529 masm
.mov(&data
.trampoline
, tempForRA
);
5531 masm
.addCodeLabel(data
.trampoline
);
5532 // Add slow trampoline callsite description, to be annotated in
5533 // stack/frame iterators.
5534 masm
.append(desc
, *data
.trampoline
.target());
5537 # ifdef JS_USE_LINK_REGISTER
5538 masm
.freeStack(reserved
);
5539 // RA is already in its place, just move stack.
5540 masm
.addToStackPtr(Imm32(framePushedAtStart
+ newArgDest
));
5542 // Push RA to new frame: store RA, restore temp, and move stack.
5543 int32_t newFrameOffset
= newArgDest
- sizeof(wasm::Frame
);
5544 masm
.storePtr(tempForRA
,
5545 Address(FramePointer
,
5546 newFrameOffset
+ wasm::Frame::returnAddressOffset()));
5547 // Restore tempForRA, but keep RA on top of the stack.
5548 // There is no non-locking exchange instruction between register and memory.
5549 // Using tempForCaller as scratch register.
5550 masm
.loadPtr(Address(masm
.getStackPointer(), 0), tempForCaller
);
5551 masm
.storePtr(tempForRA
, Address(masm
.getStackPointer(), 0));
5552 masm
.mov(tempForCaller
, tempForRA
);
5553 masm
.append(wasm::CodeRangeUnwindInfo::RestoreFp
, masm
.currentOffset());
5554 masm
.addToStackPtr(Imm32(framePushedAtStart
+ newFrameOffset
+
5555 wasm::Frame::returnAddressOffset() + reserved
+
5559 // Point FramePointer to hidden frame.
5560 masm
.computeEffectiveAddress(Address(FramePointer
, newFPOffset
),
5562 // Setting framePushed to pre-collapse state, to properly set that in the
5564 masm
.setFramePushed(framePushedAtStart
);
5567 void MacroAssembler::wasmCollapseFrameFast(
5568 const ReturnCallAdjustmentInfo
& retCallInfo
) {
5569 CollapseWasmFrameFast(*this, retCallInfo
);
5572 void MacroAssembler::wasmCollapseFrameSlow(
5573 const ReturnCallAdjustmentInfo
& retCallInfo
, wasm::CallSiteDesc desc
) {
5574 static constexpr Register temp1
= ABINonArgReg1
;
5575 static constexpr Register temp2
= ABINonArgReg3
;
5577 // Check if RA has slow marker. If there is no marker, generate a trampoline
5578 // frame to restore register state when this tail call returns.
5581 loadPtr(Address(FramePointer
, wasm::Frame::returnAddressOffset()), temp1
);
5582 wasmCheckSlowCallsite(temp1
, &slow
, temp1
, temp2
);
5583 CollapseWasmFrameFast(*this, retCallInfo
);
5585 append(wasm::CodeRangeUnwindInfo::Normal
, currentOffset());
5587 ReturnCallTrampolineData data
= MakeReturnCallTrampoline(*this);
5590 CollapseWasmFrameSlow(*this, retCallInfo
, desc
, data
);
5594 #endif // ENABLE_WASM_TAIL_CALLS
5596 CodeOffset
MacroAssembler::wasmCallImport(const wasm::CallSiteDesc
& desc
,
5597 const wasm::CalleeDesc
& callee
) {
5598 storePtr(InstanceReg
,
5599 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
5601 // Load the callee, before the caller's registers are clobbered.
5602 uint32_t instanceDataOffset
= callee
.importInstanceDataOffset();
5604 Address(InstanceReg
, wasm::Instance::offsetInData(
5605 instanceDataOffset
+
5606 offsetof(wasm::FuncImportInstanceData
, code
))),
5609 #if !defined(JS_CODEGEN_NONE) && !defined(JS_CODEGEN_WASM32)
5610 static_assert(ABINonArgReg0
!= InstanceReg
, "by constraint");
5613 // Switch to the callee's realm.
5615 Address(InstanceReg
, wasm::Instance::offsetInData(
5616 instanceDataOffset
+
5617 offsetof(wasm::FuncImportInstanceData
, realm
))),
5619 loadPtr(Address(InstanceReg
, wasm::Instance::offsetOfCx()), ABINonArgReg2
);
5620 storePtr(ABINonArgReg1
, Address(ABINonArgReg2
, JSContext::offsetOfRealm()));
5622 // Switch to the callee's instance and pinned registers and make the call.
5623 loadPtr(Address(InstanceReg
,
5624 wasm::Instance::offsetInData(
5625 instanceDataOffset
+
5626 offsetof(wasm::FuncImportInstanceData
, instance
))),
5629 storePtr(InstanceReg
,
5630 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
5631 loadWasmPinnedRegsFromInstance();
5633 CodeOffset res
= call(desc
, ABINonArgReg0
);
5634 #ifdef ENABLE_WASM_TAIL_CALLS
5640 #ifdef ENABLE_WASM_TAIL_CALLS
5641 CodeOffset
MacroAssembler::wasmReturnCallImport(
5642 const wasm::CallSiteDesc
& desc
, const wasm::CalleeDesc
& callee
,
5643 const ReturnCallAdjustmentInfo
& retCallInfo
) {
5644 storePtr(InstanceReg
,
5645 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
5647 // Load the callee, before the caller's registers are clobbered.
5648 uint32_t instanceDataOffset
= callee
.importInstanceDataOffset();
5650 Address(InstanceReg
, wasm::Instance::offsetInData(
5651 instanceDataOffset
+
5652 offsetof(wasm::FuncImportInstanceData
, code
))),
5655 # if !defined(JS_CODEGEN_NONE) && !defined(JS_CODEGEN_WASM32)
5656 static_assert(ABINonArgReg0
!= InstanceReg
, "by constraint");
5659 // Switch to the callee's realm.
5661 Address(InstanceReg
, wasm::Instance::offsetInData(
5662 instanceDataOffset
+
5663 offsetof(wasm::FuncImportInstanceData
, realm
))),
5665 loadPtr(Address(InstanceReg
, wasm::Instance::offsetOfCx()), ABINonArgReg2
);
5666 storePtr(ABINonArgReg1
, Address(ABINonArgReg2
, JSContext::offsetOfRealm()));
5668 // Switch to the callee's instance and pinned registers and make the call.
5669 loadPtr(Address(InstanceReg
,
5670 wasm::Instance::offsetInData(
5671 instanceDataOffset
+
5672 offsetof(wasm::FuncImportInstanceData
, instance
))),
5675 storePtr(InstanceReg
,
5676 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
5677 loadWasmPinnedRegsFromInstance();
5679 wasm::CallSiteDesc
stubDesc(desc
.lineOrBytecode(),
5680 wasm::CallSiteDesc::ReturnStub
);
5681 wasmCollapseFrameSlow(retCallInfo
, stubDesc
);
5682 jump(ABINonArgReg0
);
5683 append(wasm::CodeRangeUnwindInfo::Normal
, currentOffset());
5684 return CodeOffset(currentOffset());
5687 CodeOffset
MacroAssembler::wasmReturnCall(
5688 const wasm::CallSiteDesc
& desc
, uint32_t funcDefIndex
,
5689 const ReturnCallAdjustmentInfo
& retCallInfo
) {
5690 wasmCollapseFrameFast(retCallInfo
);
5691 CodeOffset offset
= farJumpWithPatch();
5692 append(desc
, offset
, funcDefIndex
);
5693 append(wasm::CodeRangeUnwindInfo::Normal
, currentOffset());
5696 #endif // ENABLE_WASM_TAIL_CALLS
5698 CodeOffset
MacroAssembler::wasmCallBuiltinInstanceMethod(
5699 const wasm::CallSiteDesc
& desc
, const ABIArg
& instanceArg
,
5700 wasm::SymbolicAddress builtin
, wasm::FailureMode failureMode
) {
5701 MOZ_ASSERT(instanceArg
!= ABIArg());
5703 storePtr(InstanceReg
,
5704 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
5705 storePtr(InstanceReg
,
5706 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
5708 if (instanceArg
.kind() == ABIArg::GPR
) {
5709 movePtr(InstanceReg
, instanceArg
.gpr());
5710 } else if (instanceArg
.kind() == ABIArg::Stack
) {
5711 storePtr(InstanceReg
,
5712 Address(getStackPointer(), instanceArg
.offsetFromArgBase()));
5714 MOZ_CRASH("Unknown abi passing style for pointer");
5717 CodeOffset ret
= call(desc
, builtin
);
5719 if (failureMode
!= wasm::FailureMode::Infallible
) {
5721 switch (failureMode
) {
5722 case wasm::FailureMode::Infallible
:
5724 case wasm::FailureMode::FailOnNegI32
:
5725 branchTest32(Assembler::NotSigned
, ReturnReg
, ReturnReg
, &noTrap
);
5727 case wasm::FailureMode::FailOnMaxI32
:
5728 branchPtr(Assembler::NotEqual
, ReturnReg
, ImmWord(uintptr_t(INT32_MAX
)),
5731 case wasm::FailureMode::FailOnNullPtr
:
5732 branchTestPtr(Assembler::NonZero
, ReturnReg
, ReturnReg
, &noTrap
);
5734 case wasm::FailureMode::FailOnInvalidRef
:
5735 branchPtr(Assembler::NotEqual
, ReturnReg
,
5736 ImmWord(uintptr_t(wasm::AnyRef::invalid().forCompiledCode())),
5740 wasmTrap(wasm::Trap::ThrowReported
,
5741 wasm::BytecodeOffset(desc
.lineOrBytecode()));
5748 CodeOffset
MacroAssembler::asmCallIndirect(const wasm::CallSiteDesc
& desc
,
5749 const wasm::CalleeDesc
& callee
) {
5750 MOZ_ASSERT(callee
.which() == wasm::CalleeDesc::AsmJSTable
);
5752 const Register scratch
= WasmTableCallScratchReg0
;
5753 const Register index
= WasmTableCallIndexReg
;
5755 // Optimization opportunity: when offsetof(FunctionTableElem, code) == 0, as
5756 // it is at present, we can probably generate better code here by folding
5757 // the address computation into the load.
5759 static_assert(sizeof(wasm::FunctionTableElem
) == 8 ||
5760 sizeof(wasm::FunctionTableElem
) == 16,
5761 "elements of function tables are two words");
5763 // asm.js tables require no signature check, and have had their index
5764 // masked into range and thus need no bounds check.
5766 Address(InstanceReg
, wasm::Instance::offsetInData(
5767 callee
.tableFunctionBaseInstanceDataOffset())),
5769 if (sizeof(wasm::FunctionTableElem
) == 8) {
5770 computeEffectiveAddress(BaseIndex(scratch
, index
, TimesEight
), scratch
);
5772 lshift32(Imm32(4), index
);
5773 addPtr(index
, scratch
);
5775 loadPtr(Address(scratch
, offsetof(wasm::FunctionTableElem
, code
)), scratch
);
5776 storePtr(InstanceReg
,
5777 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
5778 storePtr(InstanceReg
,
5779 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
5780 return call(desc
, scratch
);
5783 // In principle, call_indirect requires an expensive context switch to the
5784 // callee's instance and realm before the call and an almost equally expensive
5785 // switch back to the caller's ditto after. However, if the caller's instance
5786 // is the same as the callee's instance then no context switch is required, and
5787 // it only takes a compare-and-branch at run-time to test this - all values are
5788 // in registers already. We therefore generate two call paths, one for the fast
5789 // call without the context switch (which additionally avoids a null check) and
5790 // one for the slow call with the context switch.
5792 void MacroAssembler::wasmCallIndirect(const wasm::CallSiteDesc
& desc
,
5793 const wasm::CalleeDesc
& callee
,
5794 Label
* boundsCheckFailedLabel
,
5795 Label
* nullCheckFailedLabel
,
5796 mozilla::Maybe
<uint32_t> tableSize
,
5797 CodeOffset
* fastCallOffset
,
5798 CodeOffset
* slowCallOffset
) {
5799 static_assert(sizeof(wasm::FunctionTableElem
) == 2 * sizeof(void*),
5800 "Exactly two pointers or index scaling won't work correctly");
5801 MOZ_ASSERT(callee
.which() == wasm::CalleeDesc::WasmTable
);
5803 const int shift
= sizeof(wasm::FunctionTableElem
) == 8 ? 3 : 4;
5804 wasm::BytecodeOffset
trapOffset(desc
.lineOrBytecode());
5805 const Register calleeScratch
= WasmTableCallScratchReg0
;
5806 const Register index
= WasmTableCallIndexReg
;
5808 // Check the table index and throw if out-of-bounds.
5810 // Frequently the table size is known, so optimize for that. Otherwise
5811 // compare with a memory operand when that's possible. (There's little sense
5812 // in hoisting the load of the bound into a register at a higher level and
5813 // reusing that register, because a hoisted value would either have to be
5814 // spilled and re-loaded before the next call_indirect, or would be abandoned
5815 // because we could not trust that a hoisted value would not have changed.)
5817 if (boundsCheckFailedLabel
) {
5818 if (tableSize
.isSome()) {
5819 branch32(Assembler::Condition::AboveOrEqual
, index
, Imm32(*tableSize
),
5820 boundsCheckFailedLabel
);
5823 Assembler::Condition::BelowOrEqual
,
5824 Address(InstanceReg
, wasm::Instance::offsetInData(
5825 callee
.tableLengthInstanceDataOffset())),
5826 index
, boundsCheckFailedLabel
);
5830 // Write the functype-id into the ABI functype-id register.
5832 const wasm::CallIndirectId callIndirectId
= callee
.wasmTableSigId();
5833 switch (callIndirectId
.kind()) {
5834 case wasm::CallIndirectIdKind::Global
:
5835 loadPtr(Address(InstanceReg
, wasm::Instance::offsetInData(
5836 callIndirectId
.instanceDataOffset() +
5837 offsetof(wasm::TypeDefInstanceData
,
5839 WasmTableCallSigReg
);
5841 case wasm::CallIndirectIdKind::Immediate
:
5842 move32(Imm32(callIndirectId
.immediate()), WasmTableCallSigReg
);
5844 case wasm::CallIndirectIdKind::AsmJS
:
5845 case wasm::CallIndirectIdKind::None
:
5849 // Load the base pointer of the table and compute the address of the callee in
5853 Address(InstanceReg
, wasm::Instance::offsetInData(
5854 callee
.tableFunctionBaseInstanceDataOffset())),
5856 shiftIndex32AndAdd(index
, shift
, calleeScratch
);
5858 // Load the callee instance and decide whether to take the fast path or the
5863 const Register newInstanceTemp
= WasmTableCallScratchReg1
;
5864 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, instance
)),
5866 branchPtr(Assembler::Equal
, InstanceReg
, newInstanceTemp
, &fastCall
);
5868 // Slow path: Save context, check for null, setup new context, call, restore
5871 // TODO: The slow path could usefully be out-of-line and the test above would
5872 // just fall through to the fast path. This keeps the fast-path code dense,
5873 // and has correct static prediction for the branch (forward conditional
5874 // branches predicted not taken, normally).
5876 storePtr(InstanceReg
,
5877 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
5878 movePtr(newInstanceTemp
, InstanceReg
);
5879 storePtr(InstanceReg
,
5880 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
5882 #ifdef WASM_HAS_HEAPREG
5883 // Use the null pointer exception resulting from loading HeapReg from a null
5884 // instance to handle a call to a null slot.
5885 MOZ_ASSERT(nullCheckFailedLabel
== nullptr);
5886 loadWasmPinnedRegsFromInstance(mozilla::Some(trapOffset
));
5888 MOZ_ASSERT(nullCheckFailedLabel
!= nullptr);
5889 branchTestPtr(Assembler::Zero
, InstanceReg
, InstanceReg
,
5890 nullCheckFailedLabel
);
5892 loadWasmPinnedRegsFromInstance();
5894 switchToWasmInstanceRealm(index
, WasmTableCallScratchReg1
);
5896 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, code
)),
5899 *slowCallOffset
= call(desc
, calleeScratch
);
5900 #ifdef ENABLE_WASM_TAIL_CALLS
5904 // Restore registers and realm and join up with the fast path.
5906 loadPtr(Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
),
5908 loadWasmPinnedRegsFromInstance();
5909 switchToWasmInstanceRealm(ABINonArgReturnReg0
, ABINonArgReturnReg1
);
5912 // Fast path: just load the code pointer and go. The instance and heap
5913 // register are the same as in the caller, and nothing will be null.
5915 // (In particular, the code pointer will not be null: if it were, the instance
5916 // would have been null, and then it would not have been equivalent to our
5917 // current instance. So no null check is needed on the fast path.)
5921 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, code
)),
5924 // We use a different type of call site for the fast call since the instance
5925 // slots in the frame do not have valid values.
5927 wasm::CallSiteDesc
newDesc(desc
.lineOrBytecode(),
5928 wasm::CallSiteDesc::IndirectFast
);
5929 *fastCallOffset
= call(newDesc
, calleeScratch
);
5934 #ifdef ENABLE_WASM_TAIL_CALLS
5935 void MacroAssembler::wasmReturnCallIndirect(
5936 const wasm::CallSiteDesc
& desc
, const wasm::CalleeDesc
& callee
,
5937 Label
* boundsCheckFailedLabel
, Label
* nullCheckFailedLabel
,
5938 mozilla::Maybe
<uint32_t> tableSize
,
5939 const ReturnCallAdjustmentInfo
& retCallInfo
) {
5940 static_assert(sizeof(wasm::FunctionTableElem
) == 2 * sizeof(void*),
5941 "Exactly two pointers or index scaling won't work correctly");
5942 MOZ_ASSERT(callee
.which() == wasm::CalleeDesc::WasmTable
);
5944 const int shift
= sizeof(wasm::FunctionTableElem
) == 8 ? 3 : 4;
5945 wasm::BytecodeOffset
trapOffset(desc
.lineOrBytecode());
5946 const Register calleeScratch
= WasmTableCallScratchReg0
;
5947 const Register index
= WasmTableCallIndexReg
;
5949 // Check the table index and throw if out-of-bounds.
5951 // Frequently the table size is known, so optimize for that. Otherwise
5952 // compare with a memory operand when that's possible. (There's little sense
5953 // in hoisting the load of the bound into a register at a higher level and
5954 // reusing that register, because a hoisted value would either have to be
5955 // spilled and re-loaded before the next call_indirect, or would be abandoned
5956 // because we could not trust that a hoisted value would not have changed.)
5958 if (boundsCheckFailedLabel
) {
5959 if (tableSize
.isSome()) {
5960 branch32(Assembler::Condition::AboveOrEqual
, index
, Imm32(*tableSize
),
5961 boundsCheckFailedLabel
);
5964 Assembler::Condition::BelowOrEqual
,
5965 Address(InstanceReg
, wasm::Instance::offsetInData(
5966 callee
.tableLengthInstanceDataOffset())),
5967 index
, boundsCheckFailedLabel
);
5971 // Write the functype-id into the ABI functype-id register.
5973 const wasm::CallIndirectId callIndirectId
= callee
.wasmTableSigId();
5974 switch (callIndirectId
.kind()) {
5975 case wasm::CallIndirectIdKind::Global
:
5976 loadPtr(Address(InstanceReg
, wasm::Instance::offsetInData(
5977 callIndirectId
.instanceDataOffset() +
5978 offsetof(wasm::TypeDefInstanceData
,
5980 WasmTableCallSigReg
);
5982 case wasm::CallIndirectIdKind::Immediate
:
5983 move32(Imm32(callIndirectId
.immediate()), WasmTableCallSigReg
);
5985 case wasm::CallIndirectIdKind::AsmJS
:
5986 case wasm::CallIndirectIdKind::None
:
5990 // Load the base pointer of the table and compute the address of the callee in
5994 Address(InstanceReg
, wasm::Instance::offsetInData(
5995 callee
.tableFunctionBaseInstanceDataOffset())),
5997 shiftIndex32AndAdd(index
, shift
, calleeScratch
);
5999 // Load the callee instance and decide whether to take the fast path or the
6004 const Register newInstanceTemp
= WasmTableCallScratchReg1
;
6005 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, instance
)),
6007 branchPtr(Assembler::Equal
, InstanceReg
, newInstanceTemp
, &fastCall
);
6009 // Slow path: Save context, check for null, setup new context.
6011 storePtr(InstanceReg
,
6012 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
6013 movePtr(newInstanceTemp
, InstanceReg
);
6015 # ifdef WASM_HAS_HEAPREG
6016 // Use the null pointer exception resulting from loading HeapReg from a null
6017 // instance to handle a call to a null slot.
6018 MOZ_ASSERT(nullCheckFailedLabel
== nullptr);
6019 loadWasmPinnedRegsFromInstance(mozilla::Some(trapOffset
));
6021 MOZ_ASSERT(nullCheckFailedLabel
!= nullptr);
6022 branchTestPtr(Assembler::Zero
, InstanceReg
, InstanceReg
,
6023 nullCheckFailedLabel
);
6025 loadWasmPinnedRegsFromInstance();
6027 switchToWasmInstanceRealm(index
, WasmTableCallScratchReg1
);
6029 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, code
)),
6032 wasm::CallSiteDesc
stubDesc(desc
.lineOrBytecode(),
6033 wasm::CallSiteDesc::ReturnStub
);
6034 wasmCollapseFrameSlow(retCallInfo
, stubDesc
);
6035 jump(calleeScratch
);
6036 append(wasm::CodeRangeUnwindInfo::Normal
, currentOffset());
6038 // Fast path: just load the code pointer and go.
6042 loadPtr(Address(calleeScratch
, offsetof(wasm::FunctionTableElem
, code
)),
6045 wasmCollapseFrameFast(retCallInfo
);
6046 jump(calleeScratch
);
6047 append(wasm::CodeRangeUnwindInfo::Normal
, currentOffset());
6049 #endif // ENABLE_WASM_TAIL_CALLS
6051 void MacroAssembler::wasmCallRef(const wasm::CallSiteDesc
& desc
,
6052 const wasm::CalleeDesc
& callee
,
6053 CodeOffset
* fastCallOffset
,
6054 CodeOffset
* slowCallOffset
) {
6055 MOZ_ASSERT(callee
.which() == wasm::CalleeDesc::FuncRef
);
6056 const Register calleeScratch
= WasmCallRefCallScratchReg0
;
6057 const Register calleeFnObj
= WasmCallRefReg
;
6059 // Load from the function's WASM_INSTANCE_SLOT extended slot, and decide
6060 // whether to take the fast path or the slow path. Register this load
6061 // instruction to be source of a trap -- null pointer check.
6065 const Register newInstanceTemp
= WasmCallRefCallScratchReg1
;
6066 size_t instanceSlotOffset
= FunctionExtended::offsetOfExtendedSlot(
6067 FunctionExtended::WASM_INSTANCE_SLOT
);
6068 static_assert(FunctionExtended::WASM_INSTANCE_SLOT
< wasm::NullPtrGuardSize
);
6069 wasm::BytecodeOffset
trapOffset(desc
.lineOrBytecode());
6070 FaultingCodeOffset fco
=
6071 loadPtr(Address(calleeFnObj
, instanceSlotOffset
), newInstanceTemp
);
6072 append(wasm::Trap::NullPointerDereference
,
6073 wasm::TrapSite(wasm::TrapMachineInsnForLoadWord(), fco
, trapOffset
));
6074 branchPtr(Assembler::Equal
, InstanceReg
, newInstanceTemp
, &fastCall
);
6076 storePtr(InstanceReg
,
6077 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
6078 movePtr(newInstanceTemp
, InstanceReg
);
6079 storePtr(InstanceReg
,
6080 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
6082 loadWasmPinnedRegsFromInstance();
6083 switchToWasmInstanceRealm(WasmCallRefCallScratchReg0
,
6084 WasmCallRefCallScratchReg1
);
6086 // Get funcUncheckedCallEntry() from the function's
6087 // WASM_FUNC_UNCHECKED_ENTRY_SLOT extended slot.
6088 size_t uncheckedEntrySlotOffset
= FunctionExtended::offsetOfExtendedSlot(
6089 FunctionExtended::WASM_FUNC_UNCHECKED_ENTRY_SLOT
);
6090 loadPtr(Address(calleeFnObj
, uncheckedEntrySlotOffset
), calleeScratch
);
6092 *slowCallOffset
= call(desc
, calleeScratch
);
6093 #ifdef ENABLE_WASM_TAIL_CALLS
6097 // Restore registers and realm and back to this caller's.
6098 loadPtr(Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
),
6100 loadWasmPinnedRegsFromInstance();
6101 switchToWasmInstanceRealm(ABINonArgReturnReg0
, ABINonArgReturnReg1
);
6104 // Fast path: just load WASM_FUNC_UNCHECKED_ENTRY_SLOT value and go.
6105 // The instance and pinned registers are the same as in the caller.
6109 loadPtr(Address(calleeFnObj
, uncheckedEntrySlotOffset
), calleeScratch
);
6111 // We use a different type of call site for the fast call since the instance
6112 // slots in the frame do not have valid values.
6114 wasm::CallSiteDesc
newDesc(desc
.lineOrBytecode(),
6115 wasm::CallSiteDesc::FuncRefFast
);
6116 *fastCallOffset
= call(newDesc
, calleeScratch
);
6121 #ifdef ENABLE_WASM_TAIL_CALLS
6122 void MacroAssembler::wasmReturnCallRef(
6123 const wasm::CallSiteDesc
& desc
, const wasm::CalleeDesc
& callee
,
6124 const ReturnCallAdjustmentInfo
& retCallInfo
) {
6125 MOZ_ASSERT(callee
.which() == wasm::CalleeDesc::FuncRef
);
6126 const Register calleeScratch
= WasmCallRefCallScratchReg0
;
6127 const Register calleeFnObj
= WasmCallRefReg
;
6129 // Load from the function's WASM_INSTANCE_SLOT extended slot, and decide
6130 // whether to take the fast path or the slow path. Register this load
6131 // instruction to be source of a trap -- null pointer check.
6135 const Register newInstanceTemp
= WasmCallRefCallScratchReg1
;
6136 size_t instanceSlotOffset
= FunctionExtended::offsetOfExtendedSlot(
6137 FunctionExtended::WASM_INSTANCE_SLOT
);
6138 static_assert(FunctionExtended::WASM_INSTANCE_SLOT
< wasm::NullPtrGuardSize
);
6139 wasm::BytecodeOffset
trapOffset(desc
.lineOrBytecode());
6140 FaultingCodeOffset fco
=
6141 loadPtr(Address(calleeFnObj
, instanceSlotOffset
), newInstanceTemp
);
6142 append(wasm::Trap::NullPointerDereference
,
6143 wasm::TrapSite(wasm::TrapMachineInsnForLoadWord(), fco
, trapOffset
));
6144 branchPtr(Assembler::Equal
, InstanceReg
, newInstanceTemp
, &fastCall
);
6146 storePtr(InstanceReg
,
6147 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall
));
6148 movePtr(newInstanceTemp
, InstanceReg
);
6149 storePtr(InstanceReg
,
6150 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall
));
6152 loadWasmPinnedRegsFromInstance();
6153 switchToWasmInstanceRealm(WasmCallRefCallScratchReg0
,
6154 WasmCallRefCallScratchReg1
);
6156 // Get funcUncheckedCallEntry() from the function's
6157 // WASM_FUNC_UNCHECKED_ENTRY_SLOT extended slot.
6158 size_t uncheckedEntrySlotOffset
= FunctionExtended::offsetOfExtendedSlot(
6159 FunctionExtended::WASM_FUNC_UNCHECKED_ENTRY_SLOT
);
6160 loadPtr(Address(calleeFnObj
, uncheckedEntrySlotOffset
), calleeScratch
);
6162 wasm::CallSiteDesc
stubDesc(desc
.lineOrBytecode(),
6163 wasm::CallSiteDesc::ReturnStub
);
6164 wasmCollapseFrameSlow(retCallInfo
, stubDesc
);
6165 jump(calleeScratch
);
6166 append(wasm::CodeRangeUnwindInfo::Normal
, currentOffset());
6168 // Fast path: just load WASM_FUNC_UNCHECKED_ENTRY_SLOT value and go.
6169 // The instance and pinned registers are the same as in the caller.
6173 loadPtr(Address(calleeFnObj
, uncheckedEntrySlotOffset
), calleeScratch
);
6175 wasmCollapseFrameFast(retCallInfo
);
6176 jump(calleeScratch
);
6177 append(wasm::CodeRangeUnwindInfo::Normal
, currentOffset());
6181 void MacroAssembler::wasmBoundsCheckRange32(
6182 Register index
, Register length
, Register limit
, Register tmp
,
6183 wasm::BytecodeOffset bytecodeOffset
) {
6188 branchAdd32(Assembler::CarrySet
, length
, tmp
, &fail
);
6189 branch32(Assembler::Above
, tmp
, limit
, &fail
);
6193 wasmTrap(wasm::Trap::OutOfBounds
, bytecodeOffset
);
6198 bool MacroAssembler::needScratch1ForBranchWasmRefIsSubtypeAny(
6199 wasm::RefType type
) {
6200 MOZ_ASSERT(type
.isValid());
6201 MOZ_ASSERT(type
.isAnyHierarchy());
6202 return !type
.isNone() && !type
.isAny();
6205 bool MacroAssembler::needScratch2ForBranchWasmRefIsSubtypeAny(
6206 wasm::RefType type
) {
6207 MOZ_ASSERT(type
.isValid());
6208 MOZ_ASSERT(type
.isAnyHierarchy());
6209 return type
.isTypeRef() &&
6210 type
.typeDef()->subTypingDepth() >= wasm::MinSuperTypeVectorLength
;
6213 bool MacroAssembler::needSuperSTVForBranchWasmRefIsSubtypeAny(
6214 wasm::RefType type
) {
6215 MOZ_ASSERT(type
.isValid());
6216 MOZ_ASSERT(type
.isAnyHierarchy());
6217 return type
.isTypeRef();
6220 void MacroAssembler::branchWasmRefIsSubtypeAny(
6221 Register ref
, wasm::RefType sourceType
, wasm::RefType destType
,
6222 Label
* label
, bool onSuccess
, Register superSTV
, Register scratch1
,
6223 Register scratch2
) {
6224 MOZ_ASSERT(sourceType
.isValid());
6225 MOZ_ASSERT(destType
.isValid());
6226 MOZ_ASSERT(sourceType
.isAnyHierarchy());
6227 MOZ_ASSERT(destType
.isAnyHierarchy());
6228 MOZ_ASSERT_IF(needScratch1ForBranchWasmRefIsSubtypeAny(destType
),
6229 scratch1
!= Register::Invalid());
6230 MOZ_ASSERT_IF(needScratch2ForBranchWasmRefIsSubtypeAny(destType
),
6231 scratch2
!= Register::Invalid());
6232 MOZ_ASSERT_IF(needSuperSTVForBranchWasmRefIsSubtypeAny(destType
),
6233 superSTV
!= Register::Invalid());
6236 Label
* successLabel
= onSuccess
? label
: &fallthrough
;
6237 Label
* failLabel
= onSuccess
? &fallthrough
: label
;
6238 Label
* nullLabel
= destType
.isNullable() ? successLabel
: failLabel
;
6241 if (sourceType
.isNullable()) {
6242 branchWasmAnyRefIsNull(true, ref
, nullLabel
);
6245 // The only value that can inhabit 'none' is null. So, early out if we got
6247 if (destType
.isNone()) {
6253 if (destType
.isAny()) {
6254 // No further checks for 'any'
6260 // 'type' is now 'eq' or lower, which currently will either be a gc object or
6263 // Check first for i31 values, and get them out of the way. i31 values are
6264 // valid when casting to i31 or eq, and invalid otherwise.
6265 if (destType
.isI31() || destType
.isEq()) {
6266 branchWasmAnyRefIsI31(true, ref
, successLabel
);
6268 if (destType
.isI31()) {
6269 // No further checks for 'i31'
6276 // Then check for any kind of gc object.
6277 MOZ_ASSERT(scratch1
!= Register::Invalid());
6278 if (!wasm::RefType::isSubTypeOf(sourceType
, wasm::RefType::struct_()) &&
6279 !wasm::RefType::isSubTypeOf(sourceType
, wasm::RefType::array())) {
6280 branchWasmAnyRefIsObjectOrNull(false, ref
, failLabel
);
6281 branchObjectIsWasmGcObject(false, ref
, scratch1
, failLabel
);
6284 if (destType
.isEq()) {
6285 // No further checks for 'eq'
6291 // 'type' is now 'struct', 'array', or a concrete type. (Bottom types and i31
6292 // were handled above.)
6294 // Casting to a concrete type only requires a simple check on the
6295 // object's super type vector. Casting to an abstract type (struct, array)
6296 // requires loading the object's superTypeVector->typeDef->kind, and checking
6297 // that it is correct.
6299 loadPtr(Address(ref
, int32_t(WasmGcObject::offsetOfSuperTypeVector())),
6301 if (destType
.isTypeRef()) {
6302 // concrete type, do superTypeVector check
6303 branchWasmSTVIsSubtype(scratch1
, superSTV
, scratch2
,
6304 destType
.typeDef()->subTypingDepth(), successLabel
,
6307 // abstract type, do kind check
6308 loadPtr(Address(scratch1
,
6309 int32_t(wasm::SuperTypeVector::offsetOfSelfTypeDef())),
6311 load8ZeroExtend(Address(scratch1
, int32_t(wasm::TypeDef::offsetOfKind())),
6313 branch32(Assembler::Equal
, scratch1
, Imm32(int32_t(destType
.typeDefKind())),
6322 bool MacroAssembler::needSuperSTVAndScratch1ForBranchWasmRefIsSubtypeFunc(
6323 wasm::RefType type
) {
6324 MOZ_ASSERT(type
.isValid());
6325 MOZ_ASSERT(type
.isFuncHierarchy());
6326 return type
.isTypeRef();
6329 bool MacroAssembler::needScratch2ForBranchWasmRefIsSubtypeFunc(
6330 wasm::RefType type
) {
6331 MOZ_ASSERT(type
.isValid());
6332 MOZ_ASSERT(type
.isFuncHierarchy());
6333 return type
.isTypeRef() &&
6334 type
.typeDef()->subTypingDepth() >= wasm::MinSuperTypeVectorLength
;
6337 void MacroAssembler::branchWasmRefIsSubtypeFunc(
6338 Register ref
, wasm::RefType sourceType
, wasm::RefType destType
,
6339 Label
* label
, bool onSuccess
, Register superSTV
, Register scratch1
,
6340 Register scratch2
) {
6341 MOZ_ASSERT(sourceType
.isValid());
6342 MOZ_ASSERT(destType
.isValid());
6343 MOZ_ASSERT(sourceType
.isFuncHierarchy());
6344 MOZ_ASSERT(destType
.isFuncHierarchy());
6346 needSuperSTVAndScratch1ForBranchWasmRefIsSubtypeFunc(destType
),
6347 superSTV
!= Register::Invalid() && scratch1
!= Register::Invalid());
6348 MOZ_ASSERT_IF(needScratch2ForBranchWasmRefIsSubtypeFunc(destType
),
6349 scratch2
!= Register::Invalid());
6352 Label
* successLabel
= onSuccess
? label
: &fallthrough
;
6353 Label
* failLabel
= onSuccess
? &fallthrough
: label
;
6354 Label
* nullLabel
= destType
.isNullable() ? successLabel
: failLabel
;
6357 if (sourceType
.isNullable()) {
6358 branchTestPtr(Assembler::Zero
, ref
, ref
, nullLabel
);
6361 // The only value that can inhabit 'nofunc' is null. So, early out if we got
6363 if (destType
.isNoFunc()) {
6369 if (destType
.isFunc()) {
6370 // No further checks for 'func' (any func)
6376 // In the func hierarchy, a supertype vector check is now sufficient for all
6378 loadPrivate(Address(ref
, int32_t(FunctionExtended::offsetOfWasmSTV())),
6380 branchWasmSTVIsSubtype(scratch1
, superSTV
, scratch2
,
6381 destType
.typeDef()->subTypingDepth(), successLabel
,
6384 // If we didn't branch away, the cast failed.
6389 void MacroAssembler::branchWasmRefIsSubtypeExtern(Register ref
,
6390 wasm::RefType sourceType
,
6391 wasm::RefType destType
,
6394 MOZ_ASSERT(sourceType
.isValid());
6395 MOZ_ASSERT(destType
.isValid());
6396 MOZ_ASSERT(sourceType
.isExternHierarchy());
6397 MOZ_ASSERT(destType
.isExternHierarchy());
6400 Label
* successLabel
= onSuccess
? label
: &fallthrough
;
6401 Label
* failLabel
= onSuccess
? &fallthrough
: label
;
6402 Label
* nullLabel
= destType
.isNullable() ? successLabel
: failLabel
;
6405 if (sourceType
.isNullable()) {
6406 branchTestPtr(Assembler::Zero
, ref
, ref
, nullLabel
);
6409 // The only value that can inhabit 'noextern' is null. So, early out if we got
6411 if (destType
.isNoExtern()) {
6417 // There are no other possible types except externref, so succeed!
6422 void MacroAssembler::branchWasmSTVIsSubtype(Register subSTV
, Register superSTV
,
6424 uint32_t superDepth
, Label
* label
,
6426 MOZ_ASSERT_IF(superDepth
>= wasm::MinSuperTypeVectorLength
,
6427 scratch
!= Register::Invalid());
6429 Label
* failed
= onSuccess
? &fallthrough
: label
;
6431 // At this point, we could generate a fast success check which jumps to
6432 // `success` if `subSTV == superSTV`. However,
6433 // profiling of Barista-3 seems to show this is hardly worth anything,
6434 // whereas it is worth us generating smaller code and in particular one
6435 // fewer conditional branch.
6437 // Emit a bounds check if the super type depth may be out-of-bounds.
6438 if (superDepth
>= wasm::MinSuperTypeVectorLength
) {
6439 load32(Address(subSTV
, wasm::SuperTypeVector::offsetOfLength()), scratch
);
6440 branch32(Assembler::BelowOrEqual
, scratch
, Imm32(superDepth
), failed
);
6443 // Load the `superTypeDepth` entry from subSTV. This will be `superSTV` if
6444 // `subSTV` is indeed a subtype.
6446 Address(subSTV
, wasm::SuperTypeVector::offsetOfSTVInVector(superDepth
)),
6449 // We succeed iff the entries are equal
6450 branchPtr(onSuccess
? Assembler::Equal
: Assembler::NotEqual
, subSTV
,
6456 void MacroAssembler::branchWasmSTVIsSubtypeDynamicDepth(
6457 Register subSTV
, Register superSTV
, Register superDepth
, Register scratch
,
6458 Label
* label
, bool onSuccess
) {
6460 Label
* failed
= onSuccess
? &fallthrough
: label
;
6462 // Bounds check of the super type vector
6463 load32(Address(subSTV
, wasm::SuperTypeVector::offsetOfLength()), scratch
);
6464 branch32(Assembler::BelowOrEqual
, scratch
, superDepth
, failed
);
6466 // Load `subSTV[superTypeDepth]`. This will be `superSTV` if `subSTV` is
6467 // indeed a subtype.
6468 loadPtr(BaseIndex(subSTV
, superDepth
, ScalePointer
,
6469 offsetof(wasm::SuperTypeVector
, types_
)),
6472 // We succeed iff the entries are equal
6473 branchPtr(onSuccess
? Assembler::Equal
: Assembler::NotEqual
, subSTV
,
6479 void MacroAssembler::branchWasmAnyRefIsNull(bool isNull
, Register src
,
6481 branchTestPtr(isNull
? Assembler::Zero
: Assembler::NonZero
, src
, src
, label
);
6484 void MacroAssembler::branchWasmAnyRefIsI31(bool isI31
, Register src
,
6486 branchTestPtr(isI31
? Assembler::NonZero
: Assembler::Zero
, src
,
6487 Imm32(int32_t(wasm::AnyRefTag::I31
)), label
);
6490 void MacroAssembler::branchWasmAnyRefIsObjectOrNull(bool isObject
, Register src
,
6492 branchTestPtr(isObject
? Assembler::Zero
: Assembler::NonZero
, src
,
6493 Imm32(int32_t(wasm::AnyRef::TagMask
)), label
);
6496 void MacroAssembler::branchWasmAnyRefIsGCThing(bool isGCThing
, Register src
,
6499 Label
* isGCThingLabel
= isGCThing
? label
: &fallthrough
;
6500 Label
* isNotGCThingLabel
= isGCThing
? &fallthrough
: label
;
6502 // A null value or i31 value are not GC things.
6503 branchWasmAnyRefIsNull(true, src
, isNotGCThingLabel
);
6504 branchWasmAnyRefIsI31(true, src
, isNotGCThingLabel
);
6505 jump(isGCThingLabel
);
6509 void MacroAssembler::branchWasmAnyRefIsNurseryCell(bool isNurseryCell
,
6510 Register src
, Register temp
,
6513 branchWasmAnyRefIsGCThing(false, src
, isNurseryCell
? &done
: label
);
6515 getWasmAnyRefGCThingChunk(src
, temp
);
6516 branchPtr(isNurseryCell
? Assembler::NotEqual
: Assembler::Equal
,
6517 Address(temp
, gc::ChunkStoreBufferOffset
), ImmWord(0), label
);
6521 void MacroAssembler::truncate32ToWasmI31Ref(Register src
, Register dest
) {
6522 // This will either zero-extend or sign-extend the high 32-bits on 64-bit
6523 // platforms (see comments on invariants in MacroAssembler.h). Either case
6524 // is fine, as we won't use this bits.
6526 // Move the payload of the integer over by 1 to make room for the tag. This
6527 // will perform the truncation required by the spec.
6528 lshift32(Imm32(1), dest
);
6529 // Add the i31 tag to the integer.
6530 orPtr(Imm32(int32_t(wasm::AnyRefTag::I31
)), dest
);
6533 void MacroAssembler::convertWasmI31RefTo32Signed(Register src
, Register dest
) {
6534 // This will either zero-extend or sign-extend the high 32-bits on 64-bit
6535 // platforms (see comments on invariants in MacroAssembler.h). Either case
6536 // is fine, as we won't use this bits.
6538 // Shift the payload back (clobbering the tag). This will sign-extend, giving
6539 // us the unsigned behavior we want.
6540 rshift32Arithmetic(Imm32(1), dest
);
6543 void MacroAssembler::convertWasmI31RefTo32Unsigned(Register src
,
6545 // This will either zero-extend or sign-extend the high 32-bits on 64-bit
6546 // platforms (see comments on invariants in MacroAssembler.h). Either case
6547 // is fine, as we won't use this bits.
6549 // Shift the payload back (clobbering the tag). This will zero-extend, giving
6550 // us the unsigned behavior we want.
6551 rshift32(Imm32(1), dest
);
6554 void MacroAssembler::branchValueConvertsToWasmAnyRefInline(
6555 ValueOperand src
, Register scratchInt
, FloatRegister scratchFloat
,
6557 // We can convert objects, strings, 31-bit integers and null without boxing.
6561 ScratchTagScope
tag(*this, src
);
6562 splitTagForTest(src
, tag
);
6563 branchTestObject(Assembler::Equal
, tag
, label
);
6564 branchTestString(Assembler::Equal
, tag
, label
);
6565 branchTestNull(Assembler::Equal
, tag
, label
);
6566 branchTestInt32(Assembler::Equal
, tag
, &checkInt32
);
6567 branchTestDouble(Assembler::Equal
, tag
, &checkDouble
);
6571 unboxInt32(src
, scratchInt
);
6572 branch32(Assembler::GreaterThan
, scratchInt
, Imm32(wasm::AnyRef::MaxI31Value
),
6574 branch32(Assembler::LessThan
, scratchInt
, Imm32(wasm::AnyRef::MinI31Value
),
6580 ScratchTagScopeRelease
_(&tag
);
6581 convertValueToInt32(src
, scratchFloat
, scratchInt
, &fallthrough
, true,
6582 IntConversionInputKind::NumbersOnly
);
6584 branch32(Assembler::GreaterThan
, scratchInt
, Imm32(wasm::AnyRef::MaxI31Value
),
6586 branch32(Assembler::LessThan
, scratchInt
, Imm32(wasm::AnyRef::MinI31Value
),
6593 void MacroAssembler::convertValueToWasmAnyRef(ValueOperand src
, Register dest
,
6594 FloatRegister scratchFloat
,
6595 Label
* oolConvert
) {
6596 Label doubleValue
, int32Value
, nullValue
, stringValue
, objectValue
, done
;
6598 ScratchTagScope
tag(*this, src
);
6599 splitTagForTest(src
, tag
);
6600 branchTestObject(Assembler::Equal
, tag
, &objectValue
);
6601 branchTestString(Assembler::Equal
, tag
, &stringValue
);
6602 branchTestNull(Assembler::Equal
, tag
, &nullValue
);
6603 branchTestInt32(Assembler::Equal
, tag
, &int32Value
);
6604 branchTestDouble(Assembler::Equal
, tag
, &doubleValue
);
6609 convertValueToInt32(src
, scratchFloat
, dest
, oolConvert
, true,
6610 IntConversionInputKind::NumbersOnly
);
6611 branch32(Assembler::GreaterThan
, dest
, Imm32(wasm::AnyRef::MaxI31Value
),
6613 branch32(Assembler::LessThan
, dest
, Imm32(wasm::AnyRef::MinI31Value
),
6615 lshiftPtr(Imm32(1), dest
);
6616 orPtr(Imm32((int32_t)wasm::AnyRefTag::I31
), dest
);
6620 unboxInt32(src
, dest
);
6621 branch32(Assembler::GreaterThan
, dest
, Imm32(wasm::AnyRef::MaxI31Value
),
6623 branch32(Assembler::LessThan
, dest
, Imm32(wasm::AnyRef::MinI31Value
),
6625 lshiftPtr(Imm32(1), dest
);
6626 orPtr(Imm32((int32_t)wasm::AnyRefTag::I31
), dest
);
6630 static_assert(wasm::AnyRef::NullRefValue
== 0);
6635 unboxString(src
, dest
);
6636 orPtr(Imm32((int32_t)wasm::AnyRefTag::String
), dest
);
6640 unboxObject(src
, dest
);
6645 void MacroAssembler::convertObjectToWasmAnyRef(Register src
, Register dest
) {
6646 // JS objects are represented without any tagging.
6650 void MacroAssembler::convertStringToWasmAnyRef(Register src
, Register dest
) {
6651 // JS strings require a tag.
6653 orPtr(Imm32(int32_t(wasm::AnyRefTag::String
)), dest
);
6656 void MacroAssembler::branchObjectIsWasmGcObject(bool isGcObject
, Register src
,
6659 constexpr uint32_t ShiftedMask
= (Shape::kindMask() << Shape::kindShift());
6660 constexpr uint32_t ShiftedKind
=
6661 (uint32_t(Shape::Kind::WasmGC
) << Shape::kindShift());
6662 MOZ_ASSERT(src
!= scratch
);
6664 loadPtr(Address(src
, JSObject::offsetOfShape()), scratch
);
6665 load32(Address(scratch
, Shape::offsetOfImmutableFlags()), scratch
);
6666 and32(Imm32(ShiftedMask
), scratch
);
6667 branch32(isGcObject
? Assembler::Equal
: Assembler::NotEqual
, scratch
,
6668 Imm32(ShiftedKind
), label
);
6671 void MacroAssembler::wasmNewStructObject(Register instance
, Register result
,
6672 Register typeDefData
, Register temp1
,
6673 Register temp2
, Label
* fail
,
6674 gc::AllocKind allocKind
,
6676 // Don't execute the inline path if GC probes are built in.
6682 // Don't execute the inline path if gc zeal or tracing are active.
6683 loadPtr(Address(instance
, wasm::Instance::offsetOfAddressOfGCZealModeBits()),
6685 loadPtr(Address(temp1
, 0), temp1
);
6686 branch32(Assembler::NotEqual
, temp1
, Imm32(0), fail
);
6689 // If the alloc site is long lived, immediately fall back to the OOL path,
6690 // which will handle that.
6691 loadPtr(Address(typeDefData
, wasm::TypeDefInstanceData::offsetOfAllocSite()),
6693 branchTestPtr(Assembler::NonZero
,
6694 Address(temp1
, gc::AllocSite::offsetOfScriptAndState()),
6695 Imm32(gc::AllocSite::LONG_LIVED_BIT
), fail
);
6697 size_t sizeBytes
= gc::Arena::thingSize(allocKind
);
6698 wasmBumpPointerAllocate(instance
, result
, typeDefData
, temp1
, temp2
, fail
,
6700 loadPtr(Address(typeDefData
, wasm::TypeDefInstanceData::offsetOfShape()),
6702 loadPtr(Address(typeDefData
,
6703 wasm::TypeDefInstanceData::offsetOfSuperTypeVector()),
6705 storePtr(temp1
, Address(result
, WasmStructObject::offsetOfShape()));
6706 storePtr(temp2
, Address(result
, WasmStructObject::offsetOfSuperTypeVector()));
6707 storePtr(ImmWord(0),
6708 Address(result
, WasmStructObject::offsetOfOutlineData()));
6711 MOZ_ASSERT(sizeBytes
% sizeof(void*) == 0);
6712 for (size_t i
= WasmStructObject::offsetOfInlineData(); i
< sizeBytes
;
6713 i
+= sizeof(void*)) {
6714 storePtr(ImmWord(0), Address(result
, i
));
6719 // This function handles nursery allocations for wasm. For JS, see
6720 // MacroAssembler::bumpPointerAllocate.
6721 void MacroAssembler::wasmBumpPointerAllocate(Register instance
, Register result
,
6722 Register typeDefData
,
6723 Register temp1
, Register temp2
,
6724 Label
* fail
, uint32_t size
) {
6725 MOZ_ASSERT(size
>= gc::MinCellSize
);
6727 uint32_t totalSize
= size
+ Nursery::nurseryCellHeaderSize();
6728 MOZ_ASSERT(totalSize
< INT32_MAX
, "Nursery allocation too large");
6729 MOZ_ASSERT(totalSize
% gc::CellAlignBytes
== 0);
6731 int32_t endOffset
= Nursery::offsetOfCurrentEndFromPosition();
6733 // Bail to OOL code if the alloc site needs to be initialized. Keep allocCount
6734 // in temp2 for later.
6735 computeEffectiveAddress(
6736 Address(typeDefData
, wasm::TypeDefInstanceData::offsetOfAllocSite()),
6738 load32(Address(temp1
, gc::AllocSite::offsetOfNurseryAllocCount()), temp2
);
6739 branch32(Assembler::Equal
, temp2
, Imm32(0), fail
);
6741 // Bump allocate in the nursery, bailing if there is not enough room.
6742 loadPtr(Address(instance
, wasm::Instance::offsetOfAddressOfNurseryPosition()),
6744 loadPtr(Address(temp1
, 0), result
);
6745 addPtr(Imm32(totalSize
), result
);
6746 branchPtr(Assembler::Below
, Address(temp1
, endOffset
), result
, fail
);
6747 storePtr(result
, Address(temp1
, 0));
6748 subPtr(Imm32(size
), result
);
6750 // Increment the alloc count in the allocation site and store pointer in the
6751 // nursery cell header. See NurseryCellHeader::MakeValue.
6752 computeEffectiveAddress(
6753 Address(typeDefData
, wasm::TypeDefInstanceData::offsetOfAllocSite()),
6755 add32(Imm32(1), temp2
);
6756 store32(temp2
, Address(temp1
, gc::AllocSite::offsetOfNurseryAllocCount()));
6757 // Because JS::TraceKind::Object is zero, there is no need to explicitly set
6758 // it in the nursery cell header.
6759 static_assert(int(JS::TraceKind::Object
) == 0);
6760 storePtr(temp1
, Address(result
, -js::Nursery::nurseryCellHeaderSize()));
6763 // Unboxing is branchy and contorted because of Spectre mitigations - we don't
6764 // have enough scratch registers. Were it not for the spectre mitigations in
6765 // branchTestObjClass, the branch nest below would be restructured significantly
6766 // by inverting branches and using fewer registers.
6768 // Unbox an anyref in src (clobbering src in the process) and then re-box it as
6769 // a Value in *dst. See the definition of AnyRef for a discussion of pointer
6771 void MacroAssembler::convertWasmAnyRefToValue(Register instance
, Register src
,
6774 MOZ_ASSERT(src
!= scratch
);
6775 #if JS_BITS_PER_WORD == 32
6776 MOZ_ASSERT(dst
.typeReg() != scratch
);
6777 MOZ_ASSERT(dst
.payloadReg() != scratch
);
6779 MOZ_ASSERT(dst
.valueReg() != scratch
);
6782 Label isI31
, isObjectOrNull
, isObject
, isWasmValueBox
, done
;
6784 // Check for if this is an i31 value first
6785 branchTestPtr(Assembler::NonZero
, src
, Imm32(int32_t(wasm::AnyRefTag::I31
)),
6787 // Then check for the object or null tag
6788 branchTestPtr(Assembler::Zero
, src
, Imm32(wasm::AnyRef::TagMask
),
6791 // If we're not i31, object, or null, we must be a string
6792 rshiftPtr(Imm32(wasm::AnyRef::TagShift
), src
);
6793 lshiftPtr(Imm32(wasm::AnyRef::TagShift
), src
);
6794 moveValue(TypedOrValueRegister(MIRType::String
, AnyRegister(src
)), dst
);
6797 // This is an i31 value, convert to an int32 JS value
6799 convertWasmI31RefTo32Signed(src
, src
);
6800 moveValue(TypedOrValueRegister(MIRType::Int32
, AnyRegister(src
)), dst
);
6803 // Check for the null value
6804 bind(&isObjectOrNull
);
6805 branchTestPtr(Assembler::NonZero
, src
, src
, &isObject
);
6806 moveValue(NullValue(), dst
);
6809 // Otherwise we must be a non-null object. We next to check if it's storing a
6812 // The type test will clear src if the test fails, so store early.
6813 moveValue(TypedOrValueRegister(MIRType::Object
, AnyRegister(src
)), dst
);
6814 // Spectre mitigations: see comment above about efficiency.
6815 branchTestObjClass(Assembler::Equal
, src
,
6816 Address(instance
, wasm::Instance::offsetOfValueBoxClass()),
6817 scratch
, src
, &isWasmValueBox
);
6820 // This is a boxed JS value, unbox it.
6821 bind(&isWasmValueBox
);
6822 loadValue(Address(src
, wasm::AnyRef::valueBoxOffsetOfValue()), dst
);
6827 void MacroAssembler::convertWasmAnyRefToValue(Register instance
, Register src
,
6830 MOZ_ASSERT(src
!= scratch
);
6832 Label isI31
, isObjectOrNull
, isObject
, isWasmValueBox
, done
;
6834 // Check for if this is an i31 value first
6835 branchTestPtr(Assembler::NonZero
, src
, Imm32(int32_t(wasm::AnyRefTag::I31
)),
6837 // Then check for the object or null tag
6838 branchTestPtr(Assembler::Zero
, src
, Imm32(wasm::AnyRef::TagMask
),
6841 // If we're not i31, object, or null, we must be a string
6842 rshiftPtr(Imm32(wasm::AnyRef::TagShift
), src
);
6843 lshiftPtr(Imm32(wasm::AnyRef::TagShift
), src
);
6844 storeValue(JSVAL_TYPE_STRING
, src
, dst
);
6847 // This is an i31 value, convert to an int32 JS value
6849 convertWasmI31RefTo32Signed(src
, src
);
6850 storeValue(JSVAL_TYPE_INT32
, src
, dst
);
6853 // Check for the null value
6854 bind(&isObjectOrNull
);
6855 branchTestPtr(Assembler::NonZero
, src
, src
, &isObject
);
6856 storeValue(NullValue(), dst
);
6859 // Otherwise we must be a non-null object. We next to check if it's storing a
6862 // The type test will clear src if the test fails, so store early.
6863 storeValue(JSVAL_TYPE_OBJECT
, src
, dst
);
6864 // Spectre mitigations: see comment above about efficiency.
6865 branchTestObjClass(Assembler::Equal
, src
,
6866 Address(instance
, wasm::Instance::offsetOfValueBoxClass()),
6867 scratch
, src
, &isWasmValueBox
);
6870 // This is a boxed JS value, unbox it.
6871 bind(&isWasmValueBox
);
6872 copy64(Address(src
, wasm::AnyRef::valueBoxOffsetOfValue()), dst
, scratch
);
6877 void MacroAssembler::nopPatchableToCall(const wasm::CallSiteDesc
& desc
) {
6878 CodeOffset offset
= nopPatchableToCall();
6879 append(desc
, offset
);
6882 void MacroAssembler::emitPreBarrierFastPath(JSRuntime
* rt
, MIRType type
,
6883 Register temp1
, Register temp2
,
6884 Register temp3
, Label
* noBarrier
) {
6885 MOZ_ASSERT(temp1
!= PreBarrierReg
);
6886 MOZ_ASSERT(temp2
!= PreBarrierReg
);
6887 MOZ_ASSERT(temp3
!= PreBarrierReg
);
6889 // Load the GC thing in temp1.
6890 if (type
== MIRType::Value
) {
6891 unboxGCThingForGCBarrier(Address(PreBarrierReg
, 0), temp1
);
6892 } else if (type
== MIRType::WasmAnyRef
) {
6893 unboxWasmAnyRefGCThingForGCBarrier(Address(PreBarrierReg
, 0), temp1
);
6895 MOZ_ASSERT(type
== MIRType::Object
|| type
== MIRType::String
||
6896 type
== MIRType::Shape
);
6897 loadPtr(Address(PreBarrierReg
, 0), temp1
);
6901 // The caller should have checked for null pointers.
6903 branchTestPtr(Assembler::NonZero
, temp1
, temp1
, &nonZero
);
6904 assumeUnreachable("JIT pre-barrier: unexpected nullptr");
6908 // Load the chunk address in temp2.
6909 movePtr(temp1
, temp2
);
6910 andPtr(Imm32(int32_t(~gc::ChunkMask
)), temp2
);
6912 // If the GC thing is in the nursery, we don't need to barrier it.
6913 if (type
== MIRType::Value
|| type
== MIRType::Object
||
6914 type
== MIRType::String
|| type
== MIRType::WasmAnyRef
) {
6915 branchPtr(Assembler::NotEqual
, Address(temp2
, gc::ChunkStoreBufferOffset
),
6916 ImmWord(0), noBarrier
);
6920 branchPtr(Assembler::Equal
, Address(temp2
, gc::ChunkStoreBufferOffset
),
6921 ImmWord(0), &isTenured
);
6922 assumeUnreachable("JIT pre-barrier: unexpected nursery pointer");
6927 // Determine the bit index and store in temp1.
6929 // bit = (addr & js::gc::ChunkMask) / js::gc::CellBytesPerMarkBit +
6930 // static_cast<uint32_t>(colorBit);
6931 static_assert(gc::CellBytesPerMarkBit
== 8,
6932 "Calculation below relies on this");
6933 static_assert(size_t(gc::ColorBit::BlackBit
) == 0,
6934 "Calculation below relies on this");
6935 andPtr(Imm32(gc::ChunkMask
), temp1
);
6936 rshiftPtr(Imm32(3), temp1
);
6938 static_assert(gc::MarkBitmapWordBits
== JS_BITS_PER_WORD
,
6939 "Calculation below relies on this");
6941 // Load the bitmap word in temp2.
6943 // word = chunk.bitmap[bit / MarkBitmapWordBits];
6945 // Fold the adjustment for the fact that arenas don't start at the beginning
6946 // of the chunk into the offset to the chunk bitmap.
6947 const size_t firstArenaAdjustment
= gc::FirstArenaAdjustmentBits
/ CHAR_BIT
;
6948 const intptr_t offset
=
6949 intptr_t(gc::ChunkMarkBitmapOffset
) - intptr_t(firstArenaAdjustment
);
6951 movePtr(temp1
, temp3
);
6952 #if JS_BITS_PER_WORD == 64
6953 rshiftPtr(Imm32(6), temp1
);
6954 loadPtr(BaseIndex(temp2
, temp1
, TimesEight
, offset
), temp2
);
6956 rshiftPtr(Imm32(5), temp1
);
6957 loadPtr(BaseIndex(temp2
, temp1
, TimesFour
, offset
), temp2
);
6960 // Load the mask in temp1.
6962 // mask = uintptr_t(1) << (bit % MarkBitmapWordBits);
6963 andPtr(Imm32(gc::MarkBitmapWordBits
- 1), temp3
);
6964 move32(Imm32(1), temp1
);
6965 #ifdef JS_CODEGEN_X64
6966 MOZ_ASSERT(temp3
== rcx
);
6968 #elif JS_CODEGEN_X86
6969 MOZ_ASSERT(temp3
== ecx
);
6971 #elif JS_CODEGEN_ARM
6972 ma_lsl(temp3
, temp1
, temp1
);
6973 #elif JS_CODEGEN_ARM64
6974 Lsl(ARMRegister(temp1
, 64), ARMRegister(temp1
, 64), ARMRegister(temp3
, 64));
6975 #elif JS_CODEGEN_MIPS32
6976 ma_sll(temp1
, temp1
, temp3
);
6977 #elif JS_CODEGEN_MIPS64
6978 ma_dsll(temp1
, temp1
, temp3
);
6979 #elif JS_CODEGEN_LOONG64
6980 as_sll_d(temp1
, temp1
, temp3
);
6981 #elif JS_CODEGEN_RISCV64
6982 sll(temp1
, temp1
, temp3
);
6983 #elif JS_CODEGEN_WASM32
6985 #elif JS_CODEGEN_NONE
6988 # error "Unknown architecture"
6991 // No barrier is needed if the bit is set, |word & mask != 0|.
6992 branchTestPtr(Assembler::NonZero
, temp2
, temp1
, noBarrier
);
6995 // ========================================================================
6996 // JS atomic operations.
6998 void MacroAssembler::atomicIsLockFreeJS(Register value
, Register output
) {
6999 // Keep this in sync with isLockfreeJS() in jit/AtomicOperations.h.
7000 static_assert(AtomicOperations::isLockfreeJS(1)); // Implementation artifact
7001 static_assert(AtomicOperations::isLockfreeJS(2)); // Implementation artifact
7002 static_assert(AtomicOperations::isLockfreeJS(4)); // Spec requirement
7003 static_assert(AtomicOperations::isLockfreeJS(8)); // Implementation artifact
7006 move32(Imm32(1), output
);
7007 branch32(Assembler::Equal
, value
, Imm32(8), &done
);
7008 branch32(Assembler::Equal
, value
, Imm32(4), &done
);
7009 branch32(Assembler::Equal
, value
, Imm32(2), &done
);
7010 branch32(Assembler::Equal
, value
, Imm32(1), &done
);
7011 move32(Imm32(0), output
);
7015 // ========================================================================
7016 // Spectre Mitigations.
7018 void MacroAssembler::spectreMaskIndex32(Register index
, Register length
,
7020 MOZ_ASSERT(JitOptions
.spectreIndexMasking
);
7021 MOZ_ASSERT(length
!= output
);
7022 MOZ_ASSERT(index
!= output
);
7024 move32(Imm32(0), output
);
7025 cmp32Move32(Assembler::Below
, index
, length
, index
, output
);
7028 void MacroAssembler::spectreMaskIndex32(Register index
, const Address
& length
,
7030 MOZ_ASSERT(JitOptions
.spectreIndexMasking
);
7031 MOZ_ASSERT(index
!= length
.base
);
7032 MOZ_ASSERT(length
.base
!= output
);
7033 MOZ_ASSERT(index
!= output
);
7035 move32(Imm32(0), output
);
7036 cmp32Move32(Assembler::Below
, index
, length
, index
, output
);
7039 void MacroAssembler::spectreMaskIndexPtr(Register index
, Register length
,
7041 MOZ_ASSERT(JitOptions
.spectreIndexMasking
);
7042 MOZ_ASSERT(length
!= output
);
7043 MOZ_ASSERT(index
!= output
);
7045 movePtr(ImmWord(0), output
);
7046 cmpPtrMovePtr(Assembler::Below
, index
, length
, index
, output
);
7049 void MacroAssembler::spectreMaskIndexPtr(Register index
, const Address
& length
,
7051 MOZ_ASSERT(JitOptions
.spectreIndexMasking
);
7052 MOZ_ASSERT(index
!= length
.base
);
7053 MOZ_ASSERT(length
.base
!= output
);
7054 MOZ_ASSERT(index
!= output
);
7056 movePtr(ImmWord(0), output
);
7057 cmpPtrMovePtr(Assembler::Below
, index
, length
, index
, output
);
7060 void MacroAssembler::boundsCheck32PowerOfTwo(Register index
, uint32_t length
,
7062 MOZ_ASSERT(mozilla::IsPowerOfTwo(length
));
7063 branch32(Assembler::AboveOrEqual
, index
, Imm32(length
), failure
);
7065 // Note: it's fine to clobber the input register, as this is a no-op: it
7066 // only affects speculative execution.
7067 if (JitOptions
.spectreIndexMasking
) {
7068 and32(Imm32(length
- 1), index
);
7072 void MacroAssembler::loadWasmPinnedRegsFromInstance(
7073 mozilla::Maybe
<wasm::BytecodeOffset
> trapOffset
) {
7074 #ifdef WASM_HAS_HEAPREG
7075 static_assert(wasm::Instance::offsetOfMemory0Base() < 4096,
7076 "We count only on the low page being inaccessible");
7077 FaultingCodeOffset fco
= loadPtr(
7078 Address(InstanceReg
, wasm::Instance::offsetOfMemory0Base()), HeapReg
);
7081 wasm::Trap::IndirectCallToNull
,
7082 wasm::TrapSite(wasm::TrapMachineInsnForLoadWord(), fco
, *trapOffset
));
7085 MOZ_ASSERT(!trapOffset
);
7089 //}}} check_macroassembler_style
7092 void MacroAssembler::debugAssertCanonicalInt32(Register r
) {
7094 if (!js::jit::JitOptions
.lessDebugCode
) {
7095 # if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64)
7097 branchPtr(Assembler::BelowOrEqual
, r
, ImmWord(UINT32_MAX
), &ok
);
7100 # elif defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64)
7102 ScratchRegisterScope
scratch(asMasm());
7103 move32SignExtendToPtr(r
, scratch
);
7104 branchPtr(Assembler::Equal
, r
, scratch
, &ok
);
7108 MOZ_CRASH("IMPLEMENT ME");
7115 void MacroAssembler::memoryBarrierBefore(const Synchronization
& sync
) {
7116 memoryBarrier(sync
.barrierBefore
);
7119 void MacroAssembler::memoryBarrierAfter(const Synchronization
& sync
) {
7120 memoryBarrier(sync
.barrierAfter
);
7123 void MacroAssembler::debugAssertIsObject(const ValueOperand
& val
) {
7126 branchTestObject(Assembler::Equal
, val
, &ok
);
7127 assumeUnreachable("Expected an object!");
7132 void MacroAssembler::debugAssertObjHasFixedSlots(Register obj
,
7135 Label hasFixedSlots
;
7136 loadPtr(Address(obj
, JSObject::offsetOfShape()), scratch
);
7137 branchTest32(Assembler::NonZero
,
7138 Address(scratch
, Shape::offsetOfImmutableFlags()),
7139 Imm32(NativeShape::fixedSlotsMask()), &hasFixedSlots
);
7140 assumeUnreachable("Expected a fixed slot");
7141 bind(&hasFixedSlots
);
7145 void MacroAssembler::debugAssertObjectHasClass(Register obj
, Register scratch
,
7146 const JSClass
* clasp
) {
7149 branchTestObjClassNoSpectreMitigations(Assembler::Equal
, obj
, clasp
, scratch
,
7151 assumeUnreachable("Class check failed");
7156 void MacroAssembler::debugAssertGCThingIsTenured(Register ptr
, Register temp
) {
7159 branchPtrInNurseryChunk(Assembler::NotEqual
, ptr
, temp
, &done
);
7160 assumeUnreachable("Expected a tenured pointer");
7165 void MacroAssembler::branchArrayIsNotPacked(Register array
, Register temp1
,
7166 Register temp2
, Label
* label
) {
7167 loadPtr(Address(array
, NativeObject::offsetOfElements()), temp1
);
7169 // Test length == initializedLength.
7170 Address
initLength(temp1
, ObjectElements::offsetOfInitializedLength());
7171 load32(Address(temp1
, ObjectElements::offsetOfLength()), temp2
);
7172 branch32(Assembler::NotEqual
, initLength
, temp2
, label
);
7174 // Test the NON_PACKED flag.
7175 Address
flags(temp1
, ObjectElements::offsetOfFlags());
7176 branchTest32(Assembler::NonZero
, flags
, Imm32(ObjectElements::NON_PACKED
),
7180 void MacroAssembler::setIsPackedArray(Register obj
, Register output
,
7182 // Ensure it's an ArrayObject.
7183 Label notPackedArray
;
7184 branchTestObjClass(Assembler::NotEqual
, obj
, &ArrayObject::class_
, temp
, obj
,
7187 branchArrayIsNotPacked(obj
, temp
, output
, ¬PackedArray
);
7190 move32(Imm32(1), output
);
7193 bind(¬PackedArray
);
7194 move32(Imm32(0), output
);
7199 void MacroAssembler::packedArrayPop(Register array
, ValueOperand output
,
7200 Register temp1
, Register temp2
,
7202 // Load obj->elements in temp1.
7203 loadPtr(Address(array
, NativeObject::offsetOfElements()), temp1
);
7206 static constexpr uint32_t UnhandledFlags
=
7207 ObjectElements::Flags::NON_PACKED
|
7208 ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH
|
7209 ObjectElements::Flags::NOT_EXTENSIBLE
|
7210 ObjectElements::Flags::MAYBE_IN_ITERATION
;
7211 Address
flags(temp1
, ObjectElements::offsetOfFlags());
7212 branchTest32(Assembler::NonZero
, flags
, Imm32(UnhandledFlags
), fail
);
7214 // Load length in temp2. Ensure length == initializedLength.
7215 Address
lengthAddr(temp1
, ObjectElements::offsetOfLength());
7216 Address
initLengthAddr(temp1
, ObjectElements::offsetOfInitializedLength());
7217 load32(lengthAddr
, temp2
);
7218 branch32(Assembler::NotEqual
, initLengthAddr
, temp2
, fail
);
7220 // Result is |undefined| if length == 0.
7221 Label notEmpty
, done
;
7222 branchTest32(Assembler::NonZero
, temp2
, temp2
, ¬Empty
);
7224 moveValue(UndefinedValue(), output
);
7230 // Load the last element.
7231 sub32(Imm32(1), temp2
);
7232 BaseObjectElementIndex
elementAddr(temp1
, temp2
);
7233 loadValue(elementAddr
, output
);
7235 // Pre-barrier the element because we're removing it from the array.
7236 EmitPreBarrier(*this, elementAddr
, MIRType::Value
);
7238 // Update length and initializedLength.
7239 store32(temp2
, lengthAddr
);
7240 store32(temp2
, initLengthAddr
);
7245 void MacroAssembler::packedArrayShift(Register array
, ValueOperand output
,
7246 Register temp1
, Register temp2
,
7247 LiveRegisterSet volatileRegs
,
7249 // Load obj->elements in temp1.
7250 loadPtr(Address(array
, NativeObject::offsetOfElements()), temp1
);
7253 static constexpr uint32_t UnhandledFlags
=
7254 ObjectElements::Flags::NON_PACKED
|
7255 ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH
|
7256 ObjectElements::Flags::NOT_EXTENSIBLE
|
7257 ObjectElements::Flags::MAYBE_IN_ITERATION
;
7258 Address
flags(temp1
, ObjectElements::offsetOfFlags());
7259 branchTest32(Assembler::NonZero
, flags
, Imm32(UnhandledFlags
), fail
);
7261 // Load length in temp2. Ensure length == initializedLength.
7262 Address
lengthAddr(temp1
, ObjectElements::offsetOfLength());
7263 Address
initLengthAddr(temp1
, ObjectElements::offsetOfInitializedLength());
7264 load32(lengthAddr
, temp2
);
7265 branch32(Assembler::NotEqual
, initLengthAddr
, temp2
, fail
);
7267 // Result is |undefined| if length == 0.
7268 Label notEmpty
, done
;
7269 branchTest32(Assembler::NonZero
, temp2
, temp2
, ¬Empty
);
7271 moveValue(UndefinedValue(), output
);
7277 // Load the first element.
7278 Address
elementAddr(temp1
, 0);
7279 loadValue(elementAddr
, output
);
7281 // Move the other elements and update the initializedLength/length. This will
7282 // also trigger pre-barriers.
7284 // Ensure output is in volatileRegs. Don't preserve temp1 and temp2.
7285 volatileRegs
.takeUnchecked(temp1
);
7286 volatileRegs
.takeUnchecked(temp2
);
7287 if (output
.hasVolatileReg()) {
7288 volatileRegs
.addUnchecked(output
);
7291 PushRegsInMask(volatileRegs
);
7293 using Fn
= void (*)(ArrayObject
* arr
);
7294 setupUnalignedABICall(temp1
);
7296 callWithABI
<Fn
, ArrayShiftMoveElements
>();
7298 PopRegsInMask(volatileRegs
);
7304 void MacroAssembler::loadArgumentsObjectElement(Register obj
, Register index
,
7305 ValueOperand output
,
7306 Register temp
, Label
* fail
) {
7307 Register temp2
= output
.scratchReg();
7309 // Get initial length value.
7310 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()), temp
);
7312 // Ensure no overridden elements.
7313 branchTest32(Assembler::NonZero
, temp
,
7314 Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT
), fail
);
7317 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT
), temp
);
7318 spectreBoundsCheck32(index
, temp
, temp2
, fail
);
7320 // Load ArgumentsData.
7321 loadPrivate(Address(obj
, ArgumentsObject::getDataSlotOffset()), temp
);
7323 // Guard the argument is not a FORWARD_TO_CALL_SLOT MagicValue.
7324 BaseValueIndex
argValue(temp
, index
, ArgumentsData::offsetOfArgs());
7325 branchTestMagic(Assembler::Equal
, argValue
, fail
);
7326 loadValue(argValue
, output
);
7329 void MacroAssembler::loadArgumentsObjectElementHole(Register obj
,
7331 ValueOperand output
,
7334 Register temp2
= output
.scratchReg();
7336 // Get initial length value.
7337 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()), temp
);
7339 // Ensure no overridden elements.
7340 branchTest32(Assembler::NonZero
, temp
,
7341 Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT
), fail
);
7344 Label outOfBounds
, done
;
7345 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT
), temp
);
7346 spectreBoundsCheck32(index
, temp
, temp2
, &outOfBounds
);
7348 // Load ArgumentsData.
7349 loadPrivate(Address(obj
, ArgumentsObject::getDataSlotOffset()), temp
);
7351 // Guard the argument is not a FORWARD_TO_CALL_SLOT MagicValue.
7352 BaseValueIndex
argValue(temp
, index
, ArgumentsData::offsetOfArgs());
7353 branchTestMagic(Assembler::Equal
, argValue
, fail
);
7354 loadValue(argValue
, output
);
7358 branch32(Assembler::LessThan
, index
, Imm32(0), fail
);
7359 moveValue(UndefinedValue(), output
);
7364 void MacroAssembler::loadArgumentsObjectElementExists(
7365 Register obj
, Register index
, Register output
, Register temp
, Label
* fail
) {
7366 // Ensure the index is non-negative.
7367 branch32(Assembler::LessThan
, index
, Imm32(0), fail
);
7369 // Get initial length value.
7370 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()), temp
);
7372 // Ensure no overridden or deleted elements.
7373 branchTest32(Assembler::NonZero
, temp
,
7374 Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT
), fail
);
7376 // Compare index against the length.
7377 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT
), temp
);
7378 cmp32Set(Assembler::LessThan
, index
, temp
, output
);
7381 void MacroAssembler::loadArgumentsObjectLength(Register obj
, Register output
,
7383 // Get initial length value.
7384 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()),
7387 // Test if length has been overridden.
7388 branchTest32(Assembler::NonZero
, output
,
7389 Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT
), fail
);
7391 // Shift out arguments length and return it.
7392 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT
), output
);
7395 void MacroAssembler::branchTestArgumentsObjectFlags(Register obj
, Register temp
,
7399 MOZ_ASSERT((flags
& ~ArgumentsObject::PACKED_BITS_MASK
) == 0);
7401 // Get initial length value.
7402 unboxInt32(Address(obj
, ArgumentsObject::getInitialLengthSlotOffset()), temp
);
7405 branchTest32(cond
, temp
, Imm32(flags
), label
);
7408 static constexpr bool ValidateSizeRange(Scalar::Type from
, Scalar::Type to
) {
7409 for (Scalar::Type type
= from
; type
< to
; type
= Scalar::Type(type
+ 1)) {
7410 if (TypedArrayElemSize(type
) != TypedArrayElemSize(from
)) {
7417 void MacroAssembler::typedArrayElementSize(Register obj
, Register output
) {
7418 loadObjClassUnsafe(obj
, output
);
7420 // Map resizable to fixed-length TypedArray classes.
7422 branchPtr(Assembler::Below
, output
,
7423 ImmPtr(std::end(TypedArrayObject::fixedLengthClasses
)),
7426 MOZ_ASSERT(std::end(TypedArrayObject::fixedLengthClasses
) ==
7427 std::begin(TypedArrayObject::resizableClasses
),
7428 "TypedArray classes are in contiguous memory");
7430 const auto* firstFixedLengthTypedArrayClass
=
7431 std::begin(TypedArrayObject::fixedLengthClasses
);
7432 const auto* firstResizableTypedArrayClass
=
7433 std::begin(TypedArrayObject::resizableClasses
);
7435 MOZ_ASSERT(firstFixedLengthTypedArrayClass
< firstResizableTypedArrayClass
);
7438 firstResizableTypedArrayClass
- firstFixedLengthTypedArrayClass
;
7440 mozilla::CheckedInt
<int32_t> checked
= diff
;
7441 checked
*= sizeof(JSClass
);
7442 MOZ_ASSERT(checked
.isValid(), "pointer difference fits in int32");
7444 subPtr(Imm32(int32_t(checked
.value())), output
);
7449 Label invalidClass
, validClass
;
7450 branchPtr(Assembler::Below
, output
,
7451 ImmPtr(std::begin(TypedArrayObject::fixedLengthClasses
)),
7453 branchPtr(Assembler::Below
, output
,
7454 ImmPtr(std::end(TypedArrayObject::fixedLengthClasses
)),
7456 bind(&invalidClass
);
7457 assumeUnreachable("value isn't a valid FixedLengthTypedArray class");
7461 auto classForType
= [](Scalar::Type type
) {
7462 MOZ_ASSERT(type
< Scalar::MaxTypedArrayViewType
);
7463 return &TypedArrayObject::fixedLengthClasses
[type
];
7466 Label one
, two
, four
, eight
, done
;
7468 static_assert(ValidateSizeRange(Scalar::Int8
, Scalar::Int16
),
7469 "element size is one in [Int8, Int16)");
7470 branchPtr(Assembler::Below
, output
, ImmPtr(classForType(Scalar::Int16
)),
7473 static_assert(ValidateSizeRange(Scalar::Int16
, Scalar::Int32
),
7474 "element size is two in [Int16, Int32)");
7475 branchPtr(Assembler::Below
, output
, ImmPtr(classForType(Scalar::Int32
)),
7478 static_assert(ValidateSizeRange(Scalar::Int32
, Scalar::Float64
),
7479 "element size is four in [Int32, Float64)");
7480 branchPtr(Assembler::Below
, output
, ImmPtr(classForType(Scalar::Float64
)),
7483 static_assert(ValidateSizeRange(Scalar::Float64
, Scalar::Uint8Clamped
),
7484 "element size is eight in [Float64, Uint8Clamped)");
7485 branchPtr(Assembler::Below
, output
,
7486 ImmPtr(classForType(Scalar::Uint8Clamped
)), &eight
);
7488 static_assert(ValidateSizeRange(Scalar::Uint8Clamped
, Scalar::BigInt64
),
7489 "element size is one in [Uint8Clamped, BigInt64)");
7490 branchPtr(Assembler::Below
, output
, ImmPtr(classForType(Scalar::BigInt64
)),
7494 ValidateSizeRange(Scalar::BigInt64
, Scalar::MaxTypedArrayViewType
),
7495 "element size is eight in [BigInt64, MaxTypedArrayViewType)");
7496 // Fall through for BigInt64 and BigUint64
7499 move32(Imm32(8), output
);
7503 move32(Imm32(4), output
);
7507 move32(Imm32(2), output
);
7511 move32(Imm32(1), output
);
7516 void MacroAssembler::branchIfClassIsNotTypedArray(Register clasp
,
7517 Label
* notTypedArray
) {
7518 // Inline implementation of IsTypedArrayClass().
7520 const auto* firstTypedArrayClass
=
7521 std::begin(TypedArrayObject::fixedLengthClasses
);
7522 const auto* lastTypedArrayClass
=
7523 std::prev(std::end(TypedArrayObject::resizableClasses
));
7524 MOZ_ASSERT(std::end(TypedArrayObject::fixedLengthClasses
) ==
7525 std::begin(TypedArrayObject::resizableClasses
),
7526 "TypedArray classes are in contiguous memory");
7528 branchPtr(Assembler::Below
, clasp
, ImmPtr(firstTypedArrayClass
),
7530 branchPtr(Assembler::Above
, clasp
, ImmPtr(lastTypedArrayClass
),
7534 void MacroAssembler::branchIfClassIsNotFixedLengthTypedArray(
7535 Register clasp
, Label
* notTypedArray
) {
7536 // Inline implementation of IsFixedLengthTypedArrayClass().
7538 const auto* firstTypedArrayClass
=
7539 std::begin(TypedArrayObject::fixedLengthClasses
);
7540 const auto* lastTypedArrayClass
=
7541 std::prev(std::end(TypedArrayObject::fixedLengthClasses
));
7543 branchPtr(Assembler::Below
, clasp
, ImmPtr(firstTypedArrayClass
),
7545 branchPtr(Assembler::Above
, clasp
, ImmPtr(lastTypedArrayClass
),
7549 void MacroAssembler::branchIfHasDetachedArrayBuffer(Register obj
, Register temp
,
7551 // Inline implementation of ArrayBufferViewObject::hasDetachedBuffer().
7553 // Load obj->elements in temp.
7554 loadPtr(Address(obj
, NativeObject::offsetOfElements()), temp
);
7556 // Shared buffers can't be detached.
7558 branchTest32(Assembler::NonZero
,
7559 Address(temp
, ObjectElements::offsetOfFlags()),
7560 Imm32(ObjectElements::SHARED_MEMORY
), &done
);
7562 // An ArrayBufferView with a null/true buffer has never had its buffer
7563 // exposed, so nothing can possibly detach it.
7564 fallibleUnboxObject(Address(obj
, ArrayBufferViewObject::bufferOffset()), temp
,
7567 // Load the ArrayBuffer flags and branch if the detached flag is set.
7568 unboxInt32(Address(temp
, ArrayBufferObject::offsetOfFlagsSlot()), temp
);
7569 branchTest32(Assembler::NonZero
, temp
, Imm32(ArrayBufferObject::DETACHED
),
7575 void MacroAssembler::branchIfNativeIteratorNotReusable(Register ni
,
7576 Label
* notReusable
) {
7577 // See NativeIterator::isReusable.
7578 Address
flagsAddr(ni
, NativeIterator::offsetOfFlagsAndCount());
7581 Label niIsInitialized
;
7582 branchTest32(Assembler::NonZero
, flagsAddr
,
7583 Imm32(NativeIterator::Flags::Initialized
), &niIsInitialized
);
7585 "Expected a NativeIterator that's been completely "
7587 bind(&niIsInitialized
);
7590 branchTest32(Assembler::NonZero
, flagsAddr
,
7591 Imm32(NativeIterator::Flags::NotReusable
), notReusable
);
7594 void MacroAssembler::branchNativeIteratorIndices(Condition cond
, Register ni
,
7596 NativeIteratorIndices kind
,
7598 Address
iterFlagsAddr(ni
, NativeIterator::offsetOfFlagsAndCount());
7599 load32(iterFlagsAddr
, temp
);
7600 and32(Imm32(NativeIterator::IndicesMask
), temp
);
7601 uint32_t shiftedKind
= uint32_t(kind
) << NativeIterator::IndicesShift
;
7602 branch32(cond
, temp
, Imm32(shiftedKind
), label
);
7605 static void LoadNativeIterator(MacroAssembler
& masm
, Register obj
,
7607 MOZ_ASSERT(obj
!= dest
);
7610 // Assert we have a PropertyIteratorObject.
7612 masm
.branchTestObjClass(Assembler::Equal
, obj
,
7613 &PropertyIteratorObject::class_
, dest
, obj
, &ok
);
7614 masm
.assumeUnreachable("Expected PropertyIteratorObject!");
7618 // Load NativeIterator object.
7619 Address
slotAddr(obj
, PropertyIteratorObject::offsetOfIteratorSlot());
7620 masm
.loadPrivate(slotAddr
, dest
);
7623 // The ShapeCachePtr may be used to cache an iterator for for-in. Return that
7624 // iterator in |dest| if:
7625 // - the shape cache pointer exists and stores a native iterator
7626 // - the iterator is reusable
7627 // - the iterated object has no dense elements
7628 // - the shapes of each object on the proto chain of |obj| match the cached
7630 // - the proto chain has no dense elements
7631 // Otherwise, jump to |failure|.
7632 void MacroAssembler::maybeLoadIteratorFromShape(Register obj
, Register dest
,
7633 Register temp
, Register temp2
,
7637 // obj: always contains the input object
7638 // temp: walks the obj->shape->baseshape->proto->shape->... chain
7639 // temp2: points to the native iterator. Incremented to walk the shapes array.
7640 // temp3: scratch space
7641 // dest: stores the resulting PropertyIteratorObject on success
7644 Register shapeAndProto
= temp
;
7645 Register nativeIterator
= temp2
;
7647 // Load ShapeCache from shape.
7648 loadPtr(Address(obj
, JSObject::offsetOfShape()), shapeAndProto
);
7649 loadPtr(Address(shapeAndProto
, Shape::offsetOfCachePtr()), dest
);
7651 // Check if it's an iterator.
7652 movePtr(dest
, temp3
);
7653 andPtr(Imm32(ShapeCachePtr::MASK
), temp3
);
7654 branch32(Assembler::NotEqual
, temp3
, Imm32(ShapeCachePtr::ITERATOR
), failure
);
7656 // If we've cached an iterator, |obj| must be a native object.
7659 branchIfNonNativeObj(obj
, temp3
, &nonNative
);
7662 // Verify that |obj| has no dense elements.
7663 loadPtr(Address(obj
, NativeObject::offsetOfElements()), temp3
);
7664 branch32(Assembler::NotEqual
,
7665 Address(temp3
, ObjectElements::offsetOfInitializedLength()),
7668 // Clear tag bits from iterator object. |dest| is now valid.
7669 // Load the native iterator and verify that it's reusable.
7670 andPtr(Imm32(~ShapeCachePtr::MASK
), dest
);
7671 LoadNativeIterator(*this, dest
, nativeIterator
);
7672 branchIfNativeIteratorNotReusable(nativeIterator
, failure
);
7674 // We have to compare the shapes in the native iterator with the shapes on the
7675 // proto chain to ensure the cached iterator is still valid. The shape array
7676 // always starts at a fixed offset from the base of the NativeIterator, so
7677 // instead of using an instruction outside the loop to initialize a pointer to
7678 // the shapes array, we can bake it into the offset and reuse the pointer to
7679 // the NativeIterator. We add |sizeof(Shape*)| to start at the second shape.
7680 // (The first shape corresponds to the object itself. We don't have to check
7681 // it, because we got the iterator via the shape.)
7682 size_t nativeIteratorProtoShapeOffset
=
7683 NativeIterator::offsetOfFirstShape() + sizeof(Shape
*);
7685 // Loop over the proto chain. At the head of the loop, |shape| is the shape of
7686 // the current object, and |iteratorShapes| points to the expected shape of
7691 // Load the proto. If the proto is null, then we're done.
7692 loadPtr(Address(shapeAndProto
, Shape::offsetOfBaseShape()), shapeAndProto
);
7693 loadPtr(Address(shapeAndProto
, BaseShape::offsetOfProto()), shapeAndProto
);
7694 branchPtr(Assembler::Equal
, shapeAndProto
, ImmPtr(nullptr), &success
);
7697 // We have guarded every shape up until this point, so we know that the proto
7698 // is a native object.
7699 branchIfNonNativeObj(shapeAndProto
, temp3
, &nonNative
);
7702 // Verify that the proto has no dense elements.
7703 loadPtr(Address(shapeAndProto
, NativeObject::offsetOfElements()), temp3
);
7704 branch32(Assembler::NotEqual
,
7705 Address(temp3
, ObjectElements::offsetOfInitializedLength()),
7708 // Compare the shape of the proto to the expected shape.
7709 loadPtr(Address(shapeAndProto
, JSObject::offsetOfShape()), shapeAndProto
);
7710 loadPtr(Address(nativeIterator
, nativeIteratorProtoShapeOffset
), temp3
);
7711 branchPtr(Assembler::NotEqual
, shapeAndProto
, temp3
, failure
);
7713 // Increment |iteratorShapes| and jump back to the top of the loop.
7714 addPtr(Imm32(sizeof(Shape
*)), nativeIterator
);
7719 assumeUnreachable("Expected NativeObject in maybeLoadIteratorFromShape");
7725 void MacroAssembler::iteratorMore(Register obj
, ValueOperand output
,
7728 Register outputScratch
= output
.scratchReg();
7729 LoadNativeIterator(*this, obj
, outputScratch
);
7731 // If propertyCursor_ < propertiesEnd_, load the next string and advance
7732 // the cursor. Otherwise return MagicValue(JS_NO_ITER_VALUE).
7734 Address
cursorAddr(outputScratch
, NativeIterator::offsetOfPropertyCursor());
7735 Address
cursorEndAddr(outputScratch
, NativeIterator::offsetOfPropertiesEnd());
7736 loadPtr(cursorAddr
, temp
);
7737 branchPtr(Assembler::BelowOrEqual
, cursorEndAddr
, temp
, &iterDone
);
7740 loadPtr(Address(temp
, 0), temp
);
7742 // Increase the cursor.
7743 addPtr(Imm32(sizeof(GCPtr
<JSLinearString
*>)), cursorAddr
);
7745 tagValue(JSVAL_TYPE_STRING
, temp
, output
);
7749 moveValue(MagicValue(JS_NO_ITER_VALUE
), output
);
7754 void MacroAssembler::iteratorClose(Register obj
, Register temp1
, Register temp2
,
7756 LoadNativeIterator(*this, obj
, temp1
);
7758 // The shared iterator used for for-in with null/undefined is immutable and
7759 // unlinked. See NativeIterator::isEmptyIteratorSingleton.
7761 branchTest32(Assembler::NonZero
,
7762 Address(temp1
, NativeIterator::offsetOfFlagsAndCount()),
7763 Imm32(NativeIterator::Flags::IsEmptyIteratorSingleton
), &done
);
7765 // Clear active bit.
7766 and32(Imm32(~NativeIterator::Flags::Active
),
7767 Address(temp1
, NativeIterator::offsetOfFlagsAndCount()));
7769 // Clear objectBeingIterated.
7770 Address
iterObjAddr(temp1
, NativeIterator::offsetOfObjectBeingIterated());
7771 guardedCallPreBarrierAnyZone(iterObjAddr
, MIRType::Object
, temp2
);
7772 storePtr(ImmPtr(nullptr), iterObjAddr
);
7774 // Reset property cursor.
7775 loadPtr(Address(temp1
, NativeIterator::offsetOfShapesEnd()), temp2
);
7776 storePtr(temp2
, Address(temp1
, NativeIterator::offsetOfPropertyCursor()));
7778 // Unlink from the iterator list.
7779 const Register next
= temp2
;
7780 const Register prev
= temp3
;
7781 loadPtr(Address(temp1
, NativeIterator::offsetOfNext()), next
);
7782 loadPtr(Address(temp1
, NativeIterator::offsetOfPrev()), prev
);
7783 storePtr(prev
, Address(next
, NativeIterator::offsetOfPrev()));
7784 storePtr(next
, Address(prev
, NativeIterator::offsetOfNext()));
7786 storePtr(ImmPtr(nullptr), Address(temp1
, NativeIterator::offsetOfNext()));
7787 storePtr(ImmPtr(nullptr), Address(temp1
, NativeIterator::offsetOfPrev()));
7793 void MacroAssembler::registerIterator(Register enumeratorsList
, Register iter
,
7795 // iter->next = list
7796 storePtr(enumeratorsList
, Address(iter
, NativeIterator::offsetOfNext()));
7798 // iter->prev = list->prev
7799 loadPtr(Address(enumeratorsList
, NativeIterator::offsetOfPrev()), temp
);
7800 storePtr(temp
, Address(iter
, NativeIterator::offsetOfPrev()));
7802 // list->prev->next = iter
7803 storePtr(iter
, Address(temp
, NativeIterator::offsetOfNext()));
7805 // list->prev = iter
7806 storePtr(iter
, Address(enumeratorsList
, NativeIterator::offsetOfPrev()));
7809 void MacroAssembler::toHashableNonGCThing(ValueOperand value
,
7810 ValueOperand result
,
7811 FloatRegister tempFloat
) {
7812 // Inline implementation of |HashableValue::setValue()|.
7816 branchTestGCThing(Assembler::NotEqual
, value
, &ok
);
7817 assumeUnreachable("Unexpected GC thing");
7821 Label useInput
, done
;
7822 branchTestDouble(Assembler::NotEqual
, value
, &useInput
);
7824 Register int32
= result
.scratchReg();
7825 unboxDouble(value
, tempFloat
);
7827 // Normalize int32-valued doubles to int32 and negative zero to +0.
7829 convertDoubleToInt32(tempFloat
, int32
, &canonicalize
, false);
7831 tagValue(JSVAL_TYPE_INT32
, int32
, result
);
7834 bind(&canonicalize
);
7836 // Normalize the sign bit of a NaN.
7837 branchDouble(Assembler::DoubleOrdered
, tempFloat
, tempFloat
, &useInput
);
7838 moveValue(JS::NaNValue(), result
);
7844 moveValue(value
, result
);
7849 void MacroAssembler::toHashableValue(ValueOperand value
, ValueOperand result
,
7850 FloatRegister tempFloat
,
7851 Label
* atomizeString
, Label
* tagString
) {
7852 // Inline implementation of |HashableValue::setValue()|.
7854 ScratchTagScope
tag(*this, value
);
7855 splitTagForTest(value
, tag
);
7857 Label notString
, useInput
, done
;
7858 branchTestString(Assembler::NotEqual
, tag
, ¬String
);
7860 ScratchTagScopeRelease
_(&tag
);
7862 Register str
= result
.scratchReg();
7863 unboxString(value
, str
);
7865 branchTest32(Assembler::NonZero
, Address(str
, JSString::offsetOfFlags()),
7866 Imm32(JSString::ATOM_BIT
), &useInput
);
7868 jump(atomizeString
);
7871 tagValue(JSVAL_TYPE_STRING
, str
, result
);
7875 branchTestDouble(Assembler::NotEqual
, tag
, &useInput
);
7877 ScratchTagScopeRelease
_(&tag
);
7879 Register int32
= result
.scratchReg();
7880 unboxDouble(value
, tempFloat
);
7883 convertDoubleToInt32(tempFloat
, int32
, &canonicalize
, false);
7885 tagValue(JSVAL_TYPE_INT32
, int32
, result
);
7888 bind(&canonicalize
);
7890 branchDouble(Assembler::DoubleOrdered
, tempFloat
, tempFloat
, &useInput
);
7891 moveValue(JS::NaNValue(), result
);
7897 moveValue(value
, result
);
7902 void MacroAssembler::scrambleHashCode(Register result
) {
7903 // Inline implementation of |mozilla::ScrambleHashCode()|.
7905 mul32(Imm32(mozilla::kGoldenRatioU32
), result
);
7908 void MacroAssembler::prepareHashNonGCThing(ValueOperand value
, Register result
,
7910 // Inline implementation of |OrderedHashTable::prepareHash()| and
7911 // |mozilla::HashGeneric(v.asRawBits())|.
7915 branchTestGCThing(Assembler::NotEqual
, value
, &ok
);
7916 assumeUnreachable("Unexpected GC thing");
7920 // uint32_t v1 = static_cast<uint32_t>(aValue);
7922 move64To32(value
.toRegister64(), result
);
7924 move32(value
.payloadReg(), result
);
7927 // uint32_t v2 = static_cast<uint32_t>(static_cast<uint64_t>(aValue) >> 32);
7929 auto r64
= Register64(temp
);
7930 move64(value
.toRegister64(), r64
);
7931 rshift64Arithmetic(Imm32(32), r64
);
7933 move32(value
.typeReg(), temp
);
7936 // mozilla::WrappingMultiply(kGoldenRatioU32, RotateLeft5(aHash) ^ aValue);
7937 // with |aHash = 0| and |aValue = v1|.
7938 mul32(Imm32(mozilla::kGoldenRatioU32
), result
);
7940 // mozilla::WrappingMultiply(kGoldenRatioU32, RotateLeft5(aHash) ^ aValue);
7941 // with |aHash = <above hash>| and |aValue = v2|.
7942 rotateLeft(Imm32(5), result
, result
);
7943 xor32(temp
, result
);
7945 // Combine |mul32| and |scrambleHashCode| by directly multiplying with
7946 // |kGoldenRatioU32 * kGoldenRatioU32|.
7948 // mul32(Imm32(mozilla::kGoldenRatioU32), result);
7950 // scrambleHashCode(result);
7951 mul32(Imm32(mozilla::kGoldenRatioU32
* mozilla::kGoldenRatioU32
), result
);
7954 void MacroAssembler::prepareHashString(Register str
, Register result
,
7956 // Inline implementation of |OrderedHashTable::prepareHash()| and
7957 // |JSAtom::hash()|.
7961 branchTest32(Assembler::NonZero
, Address(str
, JSString::offsetOfFlags()),
7962 Imm32(JSString::ATOM_BIT
), &ok
);
7963 assumeUnreachable("Unexpected non-atom string");
7968 static_assert(FatInlineAtom::offsetOfHash() == NormalAtom::offsetOfHash());
7969 load32(Address(str
, NormalAtom::offsetOfHash()), result
);
7971 move32(Imm32(JSString::FAT_INLINE_MASK
), temp
);
7972 and32(Address(str
, JSString::offsetOfFlags()), temp
);
7974 // Set |result| to 1 for FatInlineAtoms.
7975 move32(Imm32(0), result
);
7976 cmp32Set(Assembler::Equal
, temp
, Imm32(JSString::FAT_INLINE_MASK
), result
);
7978 // Use a computed load for branch-free code.
7980 static_assert(FatInlineAtom::offsetOfHash() > NormalAtom::offsetOfHash());
7982 constexpr size_t offsetDiff
=
7983 FatInlineAtom::offsetOfHash() - NormalAtom::offsetOfHash();
7984 static_assert(mozilla::IsPowerOfTwo(offsetDiff
));
7986 uint8_t shift
= mozilla::FloorLog2Size(offsetDiff
);
7987 if (IsShiftInScaleRange(shift
)) {
7989 BaseIndex(str
, result
, ShiftToScale(shift
), NormalAtom::offsetOfHash()),
7992 lshift32(Imm32(shift
), result
);
7993 load32(BaseIndex(str
, result
, TimesOne
, NormalAtom::offsetOfHash()),
7998 scrambleHashCode(result
);
8001 void MacroAssembler::prepareHashSymbol(Register sym
, Register result
) {
8002 // Inline implementation of |OrderedHashTable::prepareHash()| and
8003 // |Symbol::hash()|.
8005 load32(Address(sym
, JS::Symbol::offsetOfHash()), result
);
8007 scrambleHashCode(result
);
8010 void MacroAssembler::prepareHashBigInt(Register bigInt
, Register result
,
8011 Register temp1
, Register temp2
,
8013 // Inline implementation of |OrderedHashTable::prepareHash()| and
8014 // |BigInt::hash()|.
8016 // Inline implementation of |mozilla::AddU32ToHash()|.
8017 auto addU32ToHash
= [&](auto toAdd
) {
8018 rotateLeft(Imm32(5), result
, result
);
8019 xor32(toAdd
, result
);
8020 mul32(Imm32(mozilla::kGoldenRatioU32
), result
);
8023 move32(Imm32(0), result
);
8025 // Inline |mozilla::HashBytes()|.
8027 load32(Address(bigInt
, BigInt::offsetOfLength()), temp1
);
8028 loadBigIntDigits(bigInt
, temp2
);
8035 // Compute |AddToHash(AddToHash(hash, data), sizeof(Digit))|.
8036 #if defined(JS_CODEGEN_MIPS64)
8037 // Hash the lower 32-bits.
8038 addU32ToHash(Address(temp2
, 0));
8040 // Hash the upper 32-bits.
8041 addU32ToHash(Address(temp2
, sizeof(int32_t)));
8043 // Use a single 64-bit load on non-MIPS64 platforms.
8044 loadPtr(Address(temp2
, 0), temp3
);
8046 // Hash the lower 32-bits.
8047 addU32ToHash(temp3
);
8049 // Hash the upper 32-bits.
8050 rshiftPtr(Imm32(32), temp3
);
8051 addU32ToHash(temp3
);
8053 addU32ToHash(Address(temp2
, 0));
8056 addPtr(Imm32(sizeof(BigInt::Digit
)), temp2
);
8059 branchSub32(Assembler::NotSigned
, Imm32(1), temp1
, &loop
);
8061 // Compute |mozilla::AddToHash(h, isNegative())|.
8063 static_assert(mozilla::IsPowerOfTwo(BigInt::signBitMask()));
8065 load32(Address(bigInt
, BigInt::offsetOfFlags()), temp1
);
8066 and32(Imm32(BigInt::signBitMask()), temp1
);
8067 rshift32(Imm32(mozilla::FloorLog2(BigInt::signBitMask())), temp1
);
8069 addU32ToHash(temp1
);
8072 scrambleHashCode(result
);
8075 void MacroAssembler::prepareHashObject(Register setObj
, ValueOperand value
,
8076 Register result
, Register temp1
,
8077 Register temp2
, Register temp3
,
8080 // Inline implementation of |OrderedHashTable::prepareHash()| and
8081 // |HashCodeScrambler::scramble(v.asRawBits())|.
8083 // Load the |ValueSet| or |ValueMap|.
8084 static_assert(SetObject::getDataSlotOffset() ==
8085 MapObject::getDataSlotOffset());
8086 loadPrivate(Address(setObj
, SetObject::getDataSlotOffset()), temp1
);
8088 // Load |HashCodeScrambler::mK0| and |HashCodeScrambler::mK0|.
8089 static_assert(ValueSet::offsetOfImplHcsK0() == ValueMap::offsetOfImplHcsK0());
8090 static_assert(ValueSet::offsetOfImplHcsK1() == ValueMap::offsetOfImplHcsK1());
8091 auto k0
= Register64(temp1
);
8092 auto k1
= Register64(temp2
);
8093 load64(Address(temp1
, ValueSet::offsetOfImplHcsK1()), k1
);
8094 load64(Address(temp1
, ValueSet::offsetOfImplHcsK0()), k0
);
8096 // Hash numbers are 32-bit values, so only hash the lower double-word.
8097 static_assert(sizeof(mozilla::HashNumber
) == 4);
8098 move32To64ZeroExtend(value
.valueReg(), Register64(result
));
8100 // Inline implementation of |SipHasher::sipHash()|.
8101 auto m
= Register64(result
);
8102 auto v0
= Register64(temp3
);
8103 auto v1
= Register64(temp4
);
8107 auto sipRound
= [&]() {
8108 // mV0 = WrappingAdd(mV0, mV1);
8111 // mV1 = RotateLeft(mV1, 13);
8112 rotateLeft64(Imm32(13), v1
, v1
, InvalidReg
);
8117 // mV0 = RotateLeft(mV0, 32);
8118 rotateLeft64(Imm32(32), v0
, v0
, InvalidReg
);
8120 // mV2 = WrappingAdd(mV2, mV3);
8123 // mV3 = RotateLeft(mV3, 16);
8124 rotateLeft64(Imm32(16), v3
, v3
, InvalidReg
);
8129 // mV0 = WrappingAdd(mV0, mV3);
8132 // mV3 = RotateLeft(mV3, 21);
8133 rotateLeft64(Imm32(21), v3
, v3
, InvalidReg
);
8138 // mV2 = WrappingAdd(mV2, mV1);
8141 // mV1 = RotateLeft(mV1, 17);
8142 rotateLeft64(Imm32(17), v1
, v1
, InvalidReg
);
8147 // mV2 = RotateLeft(mV2, 32);
8148 rotateLeft64(Imm32(32), v2
, v2
, InvalidReg
);
8151 // 1. Initialization.
8152 // mV0 = aK0 ^ UINT64_C(0x736f6d6570736575);
8153 move64(Imm64(0x736f6d6570736575), v0
);
8156 // mV1 = aK1 ^ UINT64_C(0x646f72616e646f6d);
8157 move64(Imm64(0x646f72616e646f6d), v1
);
8160 // mV2 = aK0 ^ UINT64_C(0x6c7967656e657261);
8161 MOZ_ASSERT(v2
== k0
);
8162 xor64(Imm64(0x6c7967656e657261), v2
);
8164 // mV3 = aK1 ^ UINT64_C(0x7465646279746573);
8165 MOZ_ASSERT(v3
== k1
);
8166 xor64(Imm64(0x7465646279746573), v3
);
8180 xor64(Imm64(0xff), v2
);
8182 // for (int i = 0; i < 3; i++) sipRound();
8183 for (int i
= 0; i
< 3; i
++) {
8187 // return mV0 ^ mV1 ^ mV2 ^ mV3;
8192 move64To32(v0
, result
);
8194 scrambleHashCode(result
);
8196 MOZ_CRASH("Not implemented");
8200 void MacroAssembler::prepareHashValue(Register setObj
, ValueOperand value
,
8201 Register result
, Register temp1
,
8202 Register temp2
, Register temp3
,
8204 Label isString
, isObject
, isSymbol
, isBigInt
;
8206 ScratchTagScope
tag(*this, value
);
8207 splitTagForTest(value
, tag
);
8209 branchTestString(Assembler::Equal
, tag
, &isString
);
8210 branchTestObject(Assembler::Equal
, tag
, &isObject
);
8211 branchTestSymbol(Assembler::Equal
, tag
, &isSymbol
);
8212 branchTestBigInt(Assembler::Equal
, tag
, &isBigInt
);
8217 prepareHashNonGCThing(value
, result
, temp1
);
8222 unboxString(value
, temp1
);
8223 prepareHashString(temp1
, result
, temp2
);
8228 prepareHashObject(setObj
, value
, result
, temp1
, temp2
, temp3
, temp4
);
8233 unboxSymbol(value
, temp1
);
8234 prepareHashSymbol(temp1
, result
);
8239 unboxBigInt(value
, temp1
);
8240 prepareHashBigInt(temp1
, result
, temp2
, temp3
, temp4
);
8242 // Fallthrough to |done|.
8248 template <typename OrderedHashTable
>
8249 void MacroAssembler::orderedHashTableLookup(Register setOrMapObj
,
8250 ValueOperand value
, Register hash
,
8251 Register entryTemp
, Register temp1
,
8252 Register temp2
, Register temp3
,
8253 Register temp4
, Label
* found
,
8254 IsBigInt isBigInt
) {
8255 // Inline implementation of |OrderedHashTable::lookup()|.
8257 MOZ_ASSERT_IF(isBigInt
== IsBigInt::No
, temp3
== InvalidReg
);
8258 MOZ_ASSERT_IF(isBigInt
== IsBigInt::No
, temp4
== InvalidReg
);
8262 if (isBigInt
== IsBigInt::No
) {
8263 branchTestBigInt(Assembler::NotEqual
, value
, &ok
);
8264 assumeUnreachable("Unexpected BigInt");
8265 } else if (isBigInt
== IsBigInt::Yes
) {
8266 branchTestBigInt(Assembler::Equal
, value
, &ok
);
8267 assumeUnreachable("Unexpected non-BigInt");
8273 PushRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
8276 moveStackPtrTo(temp2
);
8278 setupUnalignedABICall(temp1
);
8279 loadJSContext(temp1
);
8281 passABIArg(setOrMapObj
);
8285 if constexpr (std::is_same_v
<OrderedHashTable
, ValueSet
>) {
8287 void (*)(JSContext
*, SetObject
*, const Value
*, mozilla::HashNumber
);
8288 callWithABI
<Fn
, jit::AssertSetObjectHash
>();
8291 void (*)(JSContext
*, MapObject
*, const Value
*, mozilla::HashNumber
);
8292 callWithABI
<Fn
, jit::AssertMapObjectHash
>();
8296 PopRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
8299 // Load the |ValueSet| or |ValueMap|.
8300 static_assert(SetObject::getDataSlotOffset() ==
8301 MapObject::getDataSlotOffset());
8302 loadPrivate(Address(setOrMapObj
, SetObject::getDataSlotOffset()), temp1
);
8305 move32(hash
, entryTemp
);
8306 load32(Address(temp1
, OrderedHashTable::offsetOfImplHashShift()), temp2
);
8307 flexibleRshift32(temp2
, entryTemp
);
8309 loadPtr(Address(temp1
, OrderedHashTable::offsetOfImplHashTable()), temp2
);
8310 loadPtr(BaseIndex(temp2
, entryTemp
, ScalePointer
), entryTemp
);
8312 // Search for a match in this bucket.
8317 // Inline implementation of |HashableValue::operator==|.
8319 static_assert(OrderedHashTable::offsetOfImplDataElement() == 0,
8320 "offsetof(Data, element) is 0");
8321 auto keyAddr
= Address(entryTemp
, OrderedHashTable::offsetOfEntryKey());
8323 if (isBigInt
== IsBigInt::No
) {
8324 // Two HashableValues are equal if they have equal bits.
8325 branch64(Assembler::Equal
, keyAddr
, value
.toRegister64(), found
);
8328 auto key
= ValueOperand(temp1
);
8330 auto key
= ValueOperand(temp1
, temp2
);
8333 loadValue(keyAddr
, key
);
8335 // Two HashableValues are equal if they have equal bits.
8336 branch64(Assembler::Equal
, key
.toRegister64(), value
.toRegister64(),
8339 // BigInt values are considered equal if they represent the same
8340 // mathematical value.
8342 fallibleUnboxBigInt(key
, temp2
, &next
);
8343 if (isBigInt
== IsBigInt::Yes
) {
8344 unboxBigInt(value
, temp1
);
8346 fallibleUnboxBigInt(value
, temp1
, &next
);
8348 equalBigInts(temp1
, temp2
, temp3
, temp4
, temp1
, temp2
, &next
, &next
,
8354 loadPtr(Address(entryTemp
, OrderedHashTable::offsetOfImplDataChain()),
8357 branchTestPtr(Assembler::NonZero
, entryTemp
, entryTemp
, &loop
);
8360 void MacroAssembler::setObjectHas(Register setObj
, ValueOperand value
,
8361 Register hash
, Register result
,
8362 Register temp1
, Register temp2
,
8363 Register temp3
, Register temp4
,
8364 IsBigInt isBigInt
) {
8366 orderedHashTableLookup
<ValueSet
>(setObj
, value
, hash
, result
, temp1
, temp2
,
8367 temp3
, temp4
, &found
, isBigInt
);
8370 move32(Imm32(0), result
);
8374 move32(Imm32(1), result
);
8378 void MacroAssembler::mapObjectHas(Register mapObj
, ValueOperand value
,
8379 Register hash
, Register result
,
8380 Register temp1
, Register temp2
,
8381 Register temp3
, Register temp4
,
8382 IsBigInt isBigInt
) {
8384 orderedHashTableLookup
<ValueMap
>(mapObj
, value
, hash
, result
, temp1
, temp2
,
8385 temp3
, temp4
, &found
, isBigInt
);
8388 move32(Imm32(0), result
);
8392 move32(Imm32(1), result
);
8396 void MacroAssembler::mapObjectGet(Register mapObj
, ValueOperand value
,
8397 Register hash
, ValueOperand result
,
8398 Register temp1
, Register temp2
,
8399 Register temp3
, Register temp4
,
8400 Register temp5
, IsBigInt isBigInt
) {
8402 orderedHashTableLookup
<ValueMap
>(mapObj
, value
, hash
, temp1
, temp2
, temp3
,
8403 temp4
, temp5
, &found
, isBigInt
);
8406 moveValue(UndefinedValue(), result
);
8409 // |temp1| holds the found entry.
8411 loadValue(Address(temp1
, ValueMap::Entry::offsetOfValue()), result
);
8416 template <typename OrderedHashTable
>
8417 void MacroAssembler::loadOrderedHashTableCount(Register setOrMapObj
,
8419 // Inline implementation of |OrderedHashTable::count()|.
8421 // Load the |ValueSet| or |ValueMap|.
8422 static_assert(SetObject::getDataSlotOffset() ==
8423 MapObject::getDataSlotOffset());
8424 loadPrivate(Address(setOrMapObj
, SetObject::getDataSlotOffset()), result
);
8426 // Load the live count.
8427 load32(Address(result
, OrderedHashTable::offsetOfImplLiveCount()), result
);
8430 void MacroAssembler::loadSetObjectSize(Register setObj
, Register result
) {
8431 loadOrderedHashTableCount
<ValueSet
>(setObj
, result
);
8434 void MacroAssembler::loadMapObjectSize(Register mapObj
, Register result
) {
8435 loadOrderedHashTableCount
<ValueMap
>(mapObj
, result
);
8438 // Can't push large frames blindly on windows, so we must touch frame memory
8439 // incrementally, with no more than 4096 - 1 bytes between touches.
8441 // This is used across all platforms for simplicity.
8442 void MacroAssembler::touchFrameValues(Register numStackValues
,
8443 Register scratch1
, Register scratch2
) {
8444 const size_t FRAME_TOUCH_INCREMENT
= 2048;
8445 static_assert(FRAME_TOUCH_INCREMENT
< 4096 - 1,
8446 "Frame increment is too large");
8448 moveStackPtrTo(scratch2
);
8450 mov(numStackValues
, scratch1
);
8451 lshiftPtr(Imm32(3), scratch1
);
8453 // Note: this loop needs to update the stack pointer register because older
8454 // Linux kernels check the distance between the touched address and RSP.
8455 // See bug 1839669 comment 47.
8456 Label touchFrameLoop
;
8457 Label touchFrameLoopEnd
;
8458 bind(&touchFrameLoop
);
8459 branchSub32(Assembler::Signed
, Imm32(FRAME_TOUCH_INCREMENT
), scratch1
,
8460 &touchFrameLoopEnd
);
8461 subFromStackPtr(Imm32(FRAME_TOUCH_INCREMENT
));
8462 store32(Imm32(0), Address(getStackPointer(), 0));
8463 jump(&touchFrameLoop
);
8464 bind(&touchFrameLoopEnd
);
8467 moveToStackPtr(scratch2
);
8474 template <class RegisterType
>
8475 AutoGenericRegisterScope
<RegisterType
>::AutoGenericRegisterScope(
8476 MacroAssembler
& masm
, RegisterType reg
)
8477 : RegisterType(reg
), masm_(masm
), released_(false) {
8478 masm
.debugTrackedRegisters_
.add(reg
);
8481 template AutoGenericRegisterScope
<Register
>::AutoGenericRegisterScope(
8482 MacroAssembler
& masm
, Register reg
);
8483 template AutoGenericRegisterScope
<FloatRegister
>::AutoGenericRegisterScope(
8484 MacroAssembler
& masm
, FloatRegister reg
);
8488 template <class RegisterType
>
8489 AutoGenericRegisterScope
<RegisterType
>::~AutoGenericRegisterScope() {
8495 template AutoGenericRegisterScope
<Register
>::~AutoGenericRegisterScope();
8496 template AutoGenericRegisterScope
<FloatRegister
>::~AutoGenericRegisterScope();
8498 template <class RegisterType
>
8499 void AutoGenericRegisterScope
<RegisterType
>::release() {
8500 MOZ_ASSERT(!released_
);
8502 const RegisterType
& reg
= *dynamic_cast<RegisterType
*>(this);
8503 masm_
.debugTrackedRegisters_
.take(reg
);
8506 template void AutoGenericRegisterScope
<Register
>::release();
8507 template void AutoGenericRegisterScope
<FloatRegister
>::release();
8509 template <class RegisterType
>
8510 void AutoGenericRegisterScope
<RegisterType
>::reacquire() {
8511 MOZ_ASSERT(released_
);
8513 const RegisterType
& reg
= *dynamic_cast<RegisterType
*>(this);
8514 masm_
.debugTrackedRegisters_
.add(reg
);
8517 template void AutoGenericRegisterScope
<Register
>::reacquire();
8518 template void AutoGenericRegisterScope
<FloatRegister
>::reacquire();