Bug 1837620 - Part 6: Make edges for multiple shape guard weak too r=sfink
[gecko.git] / js / src / jit / MacroAssembler.cpp
blob2883deac4d3adb9382a298334bbbb51a157c53b0
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/MacroAssembler-inl.h"
9 #include "mozilla/FloatingPoint.h"
10 #include "mozilla/MathAlgorithms.h"
11 #include "mozilla/XorShift128PlusRNG.h"
13 #include <algorithm>
14 #include <utility>
16 #include "jit/AtomicOp.h"
17 #include "jit/AtomicOperations.h"
18 #include "jit/Bailouts.h"
19 #include "jit/BaselineFrame.h"
20 #include "jit/BaselineJIT.h"
21 #include "jit/JitFrames.h"
22 #include "jit/JitOptions.h"
23 #include "jit/JitRuntime.h"
24 #include "jit/JitScript.h"
25 #include "jit/MoveEmitter.h"
26 #include "jit/ReciprocalMulConstants.h"
27 #include "jit/SharedICHelpers.h"
28 #include "jit/SharedICRegisters.h"
29 #include "jit/Simulator.h"
30 #include "jit/VMFunctions.h"
31 #include "js/Conversions.h"
32 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
33 #include "js/ScalarType.h" // js::Scalar::Type
34 #include "vm/ArgumentsObject.h"
35 #include "vm/ArrayBufferViewObject.h"
36 #include "vm/BoundFunctionObject.h"
37 #include "vm/FunctionFlags.h" // js::FunctionFlags
38 #include "vm/Iteration.h"
39 #include "vm/JSContext.h"
40 #include "vm/TypedArrayObject.h"
41 #include "wasm/WasmBuiltins.h"
42 #include "wasm/WasmCodegenConstants.h"
43 #include "wasm/WasmCodegenTypes.h"
44 #include "wasm/WasmGcObject.h"
45 #include "wasm/WasmInstanceData.h"
46 #include "wasm/WasmMemory.h"
47 #include "wasm/WasmTypeDef.h"
48 #include "wasm/WasmValidate.h"
50 #include "jit/TemplateObject-inl.h"
51 #include "vm/BytecodeUtil-inl.h"
52 #include "vm/Interpreter-inl.h"
53 #include "vm/JSObject-inl.h"
55 using namespace js;
56 using namespace js::jit;
58 using JS::GenericNaN;
59 using JS::ToInt32;
61 using mozilla::CheckedInt;
63 TrampolinePtr MacroAssembler::preBarrierTrampoline(MIRType type) {
64 const JitRuntime* rt = runtime()->jitRuntime();
65 return rt->preBarrier(type);
68 template <typename S, typename T>
69 static void StoreToTypedFloatArray(MacroAssembler& masm, int arrayType,
70 const S& value, const T& dest) {
71 switch (arrayType) {
72 case Scalar::Float32:
73 masm.storeFloat32(value, dest);
74 break;
75 case Scalar::Float64:
76 masm.storeDouble(value, dest);
77 break;
78 default:
79 MOZ_CRASH("Invalid typed array type");
83 void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
84 FloatRegister value,
85 const BaseIndex& dest) {
86 StoreToTypedFloatArray(*this, arrayType, value, dest);
88 void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
89 FloatRegister value,
90 const Address& dest) {
91 StoreToTypedFloatArray(*this, arrayType, value, dest);
94 template <typename S, typename T>
95 static void StoreToTypedBigIntArray(MacroAssembler& masm,
96 Scalar::Type arrayType, const S& value,
97 const T& dest) {
98 MOZ_ASSERT(Scalar::isBigIntType(arrayType));
99 masm.store64(value, dest);
102 void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType,
103 Register64 value,
104 const BaseIndex& dest) {
105 StoreToTypedBigIntArray(*this, arrayType, value, dest);
107 void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType,
108 Register64 value,
109 const Address& dest) {
110 StoreToTypedBigIntArray(*this, arrayType, value, dest);
113 void MacroAssembler::boxUint32(Register source, ValueOperand dest,
114 Uint32Mode mode, Label* fail) {
115 switch (mode) {
116 // Fail if the value does not fit in an int32.
117 case Uint32Mode::FailOnDouble: {
118 branchTest32(Assembler::Signed, source, source, fail);
119 tagValue(JSVAL_TYPE_INT32, source, dest);
120 break;
122 case Uint32Mode::ForceDouble: {
123 // Always convert the value to double.
124 ScratchDoubleScope fpscratch(*this);
125 convertUInt32ToDouble(source, fpscratch);
126 boxDouble(fpscratch, dest, fpscratch);
127 break;
132 template <typename T>
133 void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
134 AnyRegister dest, Register temp,
135 Label* fail) {
136 switch (arrayType) {
137 case Scalar::Int8:
138 load8SignExtend(src, dest.gpr());
139 break;
140 case Scalar::Uint8:
141 case Scalar::Uint8Clamped:
142 load8ZeroExtend(src, dest.gpr());
143 break;
144 case Scalar::Int16:
145 load16SignExtend(src, dest.gpr());
146 break;
147 case Scalar::Uint16:
148 load16ZeroExtend(src, dest.gpr());
149 break;
150 case Scalar::Int32:
151 load32(src, dest.gpr());
152 break;
153 case Scalar::Uint32:
154 if (dest.isFloat()) {
155 load32(src, temp);
156 convertUInt32ToDouble(temp, dest.fpu());
157 } else {
158 load32(src, dest.gpr());
160 // Bail out if the value doesn't fit into a signed int32 value. This
161 // is what allows MLoadUnboxedScalar to have a type() of
162 // MIRType::Int32 for UInt32 array loads.
163 branchTest32(Assembler::Signed, dest.gpr(), dest.gpr(), fail);
165 break;
166 case Scalar::Float32:
167 loadFloat32(src, dest.fpu());
168 canonicalizeFloat(dest.fpu());
169 break;
170 case Scalar::Float64:
171 loadDouble(src, dest.fpu());
172 canonicalizeDouble(dest.fpu());
173 break;
174 case Scalar::BigInt64:
175 case Scalar::BigUint64:
176 default:
177 MOZ_CRASH("Invalid typed array type");
181 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
182 const Address& src,
183 AnyRegister dest,
184 Register temp, Label* fail);
185 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
186 const BaseIndex& src,
187 AnyRegister dest,
188 Register temp, Label* fail);
190 template <typename T>
191 void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
192 const ValueOperand& dest,
193 Uint32Mode uint32Mode, Register temp,
194 Label* fail) {
195 switch (arrayType) {
196 case Scalar::Int8:
197 case Scalar::Uint8:
198 case Scalar::Uint8Clamped:
199 case Scalar::Int16:
200 case Scalar::Uint16:
201 case Scalar::Int32:
202 loadFromTypedArray(arrayType, src, AnyRegister(dest.scratchReg()),
203 InvalidReg, nullptr);
204 tagValue(JSVAL_TYPE_INT32, dest.scratchReg(), dest);
205 break;
206 case Scalar::Uint32:
207 // Don't clobber dest when we could fail, instead use temp.
208 load32(src, temp);
209 boxUint32(temp, dest, uint32Mode, fail);
210 break;
211 case Scalar::Float32: {
212 ScratchDoubleScope dscratch(*this);
213 FloatRegister fscratch = dscratch.asSingle();
214 loadFromTypedArray(arrayType, src, AnyRegister(fscratch),
215 dest.scratchReg(), nullptr);
216 convertFloat32ToDouble(fscratch, dscratch);
217 boxDouble(dscratch, dest, dscratch);
218 break;
220 case Scalar::Float64: {
221 ScratchDoubleScope fpscratch(*this);
222 loadFromTypedArray(arrayType, src, AnyRegister(fpscratch),
223 dest.scratchReg(), nullptr);
224 boxDouble(fpscratch, dest, fpscratch);
225 break;
227 case Scalar::BigInt64:
228 case Scalar::BigUint64:
229 default:
230 MOZ_CRASH("Invalid typed array type");
234 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
235 const Address& src,
236 const ValueOperand& dest,
237 Uint32Mode uint32Mode,
238 Register temp, Label* fail);
239 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
240 const BaseIndex& src,
241 const ValueOperand& dest,
242 Uint32Mode uint32Mode,
243 Register temp, Label* fail);
245 template <typename T>
246 void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
247 const T& src, Register bigInt,
248 Register64 temp) {
249 MOZ_ASSERT(Scalar::isBigIntType(arrayType));
251 load64(src, temp);
252 initializeBigInt64(arrayType, bigInt, temp);
255 template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
256 const Address& src,
257 Register bigInt,
258 Register64 temp);
259 template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
260 const BaseIndex& src,
261 Register bigInt,
262 Register64 temp);
264 // Inlined version of gc::CheckAllocatorState that checks the bare essentials
265 // and bails for anything that cannot be handled with our jit allocators.
266 void MacroAssembler::checkAllocatorState(Label* fail) {
267 // Don't execute the inline path if GC probes are built in.
268 #ifdef JS_GC_PROBES
269 jump(fail);
270 #endif
272 #ifdef JS_GC_ZEAL
273 // Don't execute the inline path if gc zeal or tracing are active.
274 const uint32_t* ptrZealModeBits = runtime()->addressOfGCZealModeBits();
275 branch32(Assembler::NotEqual, AbsoluteAddress(ptrZealModeBits), Imm32(0),
276 fail);
277 #endif
279 // Don't execute the inline path if the realm has an object metadata callback,
280 // as the metadata to use for the object may vary between executions of the
281 // op.
282 if (realm()->hasAllocationMetadataBuilder()) {
283 jump(fail);
287 bool MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind,
288 gc::Heap initialHeap) {
289 // Note that Ion elides barriers on writes to objects known to be in the
290 // nursery, so any allocation that can be made into the nursery must be made
291 // into the nursery, even if the nursery is disabled. At runtime these will
292 // take the out-of-line path, which is required to insert a barrier for the
293 // initializing writes.
294 return IsNurseryAllocable(allocKind) && initialHeap != gc::Heap::Tenured;
297 // Inline version of Nursery::allocateObject. If the object has dynamic slots,
298 // this fills in the slots_ pointer.
299 void MacroAssembler::nurseryAllocateObject(Register result, Register temp,
300 gc::AllocKind allocKind,
301 size_t nDynamicSlots, Label* fail,
302 const AllocSiteInput& allocSite) {
303 MOZ_ASSERT(IsNurseryAllocable(allocKind));
305 // Currently the JIT does not nursery allocate foreground finalized
306 // objects. This is allowed for objects that support this and have the
307 // JSCLASS_SKIP_NURSERY_FINALIZE class flag set. It's hard to assert that here
308 // though so disallow all foreground finalized objects for now.
309 MOZ_ASSERT(!IsForegroundFinalized(allocKind));
311 // We still need to allocate in the nursery, per the comment in
312 // shouldNurseryAllocate; however, we need to insert into the
313 // mallocedBuffers set, so bail to do the nursery allocation in the
314 // interpreter.
315 if (nDynamicSlots >= Nursery::MaxNurseryBufferSize / sizeof(Value)) {
316 jump(fail);
317 return;
320 // Check whether this allocation site needs pretenuring. This dynamic check
321 // only happens for baseline code.
322 if (allocSite.is<Register>()) {
323 Register site = allocSite.as<Register>();
324 branchTestPtr(Assembler::NonZero,
325 Address(site, gc::AllocSite::offsetOfScriptAndState()),
326 Imm32(gc::AllocSite::LONG_LIVED_BIT), fail);
329 // No explicit check for nursery.isEnabled() is needed, as the comparison
330 // with the nursery's end will always fail in such cases.
331 CompileZone* zone = realm()->zone();
332 size_t thingSize = gc::Arena::thingSize(allocKind);
333 size_t totalSize = thingSize;
334 if (nDynamicSlots) {
335 totalSize += ObjectSlots::allocSize(nDynamicSlots);
337 MOZ_ASSERT(totalSize < INT32_MAX);
338 MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
340 bumpPointerAllocate(result, temp, fail, zone, JS::TraceKind::Object,
341 totalSize, allocSite);
343 if (nDynamicSlots) {
344 store32(Imm32(nDynamicSlots),
345 Address(result, thingSize + ObjectSlots::offsetOfCapacity()));
346 store32(
347 Imm32(0),
348 Address(result, thingSize + ObjectSlots::offsetOfDictionarySlotSpan()));
349 store64(Imm64(ObjectSlots::NoUniqueIdInDynamicSlots),
350 Address(result, thingSize + ObjectSlots::offsetOfMaybeUniqueId()));
351 computeEffectiveAddress(
352 Address(result, thingSize + ObjectSlots::offsetOfSlots()), temp);
353 storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
357 // Inlined version of FreeSpan::allocate. This does not fill in slots_.
358 void MacroAssembler::freeListAllocate(Register result, Register temp,
359 gc::AllocKind allocKind, Label* fail) {
360 CompileZone* zone = realm()->zone();
361 int thingSize = int(gc::Arena::thingSize(allocKind));
363 Label fallback;
364 Label success;
366 // Load the first and last offsets of |zone|'s free list for |allocKind|.
367 // If there is no room remaining in the span, fall back to get the next one.
368 gc::FreeSpan** ptrFreeList = zone->addressOfFreeList(allocKind);
369 loadPtr(AbsoluteAddress(ptrFreeList), temp);
370 load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfFirst()), result);
371 load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfLast()), temp);
372 branch32(Assembler::AboveOrEqual, result, temp, &fallback);
374 // Bump the offset for the next allocation.
375 add32(Imm32(thingSize), result);
376 loadPtr(AbsoluteAddress(ptrFreeList), temp);
377 store16(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
378 sub32(Imm32(thingSize), result);
379 addPtr(temp, result); // Turn the offset into a pointer.
380 jump(&success);
382 bind(&fallback);
383 // If there are no free spans left, we bail to finish the allocation. The
384 // interpreter will call the GC allocator to set up a new arena to allocate
385 // from, after which we can resume allocating in the jit.
386 branchTest32(Assembler::Zero, result, result, fail);
387 loadPtr(AbsoluteAddress(ptrFreeList), temp);
388 addPtr(temp, result); // Turn the offset into a pointer.
389 Push(result);
390 // Update the free list to point to the next span (which may be empty).
391 load32(Address(result, 0), result);
392 store32(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
393 Pop(result);
395 bind(&success);
397 if (runtime()->geckoProfiler().enabled()) {
398 uint32_t* countAddress = zone->addressOfTenuredAllocCount();
399 movePtr(ImmPtr(countAddress), temp);
400 add32(Imm32(1), Address(temp, 0));
404 void MacroAssembler::callFreeStub(Register slots) {
405 // This register must match the one in JitRuntime::generateFreeStub.
406 const Register regSlots = CallTempReg0;
408 push(regSlots);
409 movePtr(slots, regSlots);
410 call(runtime()->jitRuntime()->freeStub());
411 pop(regSlots);
414 // Inlined equivalent of gc::AllocateObject, without failure case handling.
415 void MacroAssembler::allocateObject(Register result, Register temp,
416 gc::AllocKind allocKind,
417 uint32_t nDynamicSlots,
418 gc::Heap initialHeap, Label* fail,
419 const AllocSiteInput& allocSite) {
420 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
422 checkAllocatorState(fail);
424 if (shouldNurseryAllocate(allocKind, initialHeap)) {
425 MOZ_ASSERT(initialHeap == gc::Heap::Default);
426 return nurseryAllocateObject(result, temp, allocKind, nDynamicSlots, fail,
427 allocSite);
430 // Fall back to calling into the VM to allocate objects in the tenured heap
431 // that have dynamic slots.
432 if (nDynamicSlots) {
433 jump(fail);
434 return;
437 return freeListAllocate(result, temp, allocKind, fail);
440 void MacroAssembler::createGCObject(Register obj, Register temp,
441 const TemplateObject& templateObj,
442 gc::Heap initialHeap, Label* fail,
443 bool initContents /* = true */) {
444 gc::AllocKind allocKind = templateObj.getAllocKind();
445 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
447 uint32_t nDynamicSlots = 0;
448 if (templateObj.isNativeObject()) {
449 const TemplateNativeObject& ntemplate =
450 templateObj.asTemplateNativeObject();
451 nDynamicSlots = ntemplate.numDynamicSlots();
454 allocateObject(obj, temp, allocKind, nDynamicSlots, initialHeap, fail);
455 initGCThing(obj, temp, templateObj, initContents);
458 void MacroAssembler::createPlainGCObject(
459 Register result, Register shape, Register temp, Register temp2,
460 uint32_t numFixedSlots, uint32_t numDynamicSlots, gc::AllocKind allocKind,
461 gc::Heap initialHeap, Label* fail, const AllocSiteInput& allocSite,
462 bool initContents /* = true */) {
463 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
464 MOZ_ASSERT(shape != temp, "shape can overlap with temp2, but not temp");
466 // Allocate object.
467 allocateObject(result, temp, allocKind, numDynamicSlots, initialHeap, fail,
468 allocSite);
470 // Initialize shape field.
471 storePtr(shape, Address(result, JSObject::offsetOfShape()));
473 // If the object has dynamic slots, allocateObject will initialize
474 // the slots field. If not, we must initialize it now.
475 if (numDynamicSlots == 0) {
476 storePtr(ImmPtr(emptyObjectSlots),
477 Address(result, NativeObject::offsetOfSlots()));
480 // Initialize elements field.
481 storePtr(ImmPtr(emptyObjectElements),
482 Address(result, NativeObject::offsetOfElements()));
484 // Initialize fixed slots.
485 if (initContents) {
486 fillSlotsWithUndefined(Address(result, NativeObject::getFixedSlotOffset(0)),
487 temp, 0, numFixedSlots);
490 // Initialize dynamic slots.
491 if (numDynamicSlots > 0) {
492 loadPtr(Address(result, NativeObject::offsetOfSlots()), temp2);
493 fillSlotsWithUndefined(Address(temp2, 0), temp, 0, numDynamicSlots);
497 void MacroAssembler::createArrayWithFixedElements(
498 Register result, Register shape, Register temp, uint32_t arrayLength,
499 uint32_t arrayCapacity, gc::AllocKind allocKind, gc::Heap initialHeap,
500 Label* fail, const AllocSiteInput& allocSite) {
501 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
502 MOZ_ASSERT(shape != temp, "shape can overlap with temp2, but not temp");
503 MOZ_ASSERT(result != temp);
505 // This only supports allocating arrays with fixed elements and does not
506 // support any dynamic slots or elements.
507 MOZ_ASSERT(arrayCapacity >= arrayLength);
508 MOZ_ASSERT(gc::GetGCKindSlots(allocKind) >=
509 arrayCapacity + ObjectElements::VALUES_PER_HEADER);
511 // Allocate object.
512 allocateObject(result, temp, allocKind, 0, initialHeap, fail, allocSite);
514 // Initialize shape field.
515 storePtr(shape, Address(result, JSObject::offsetOfShape()));
517 // There are no dynamic slots.
518 storePtr(ImmPtr(emptyObjectSlots),
519 Address(result, NativeObject::offsetOfSlots()));
521 // Initialize elements pointer for fixed (inline) elements.
522 computeEffectiveAddress(
523 Address(result, NativeObject::offsetOfFixedElements()), temp);
524 storePtr(temp, Address(result, NativeObject::offsetOfElements()));
526 // Initialize elements header.
527 store32(Imm32(ObjectElements::FIXED),
528 Address(temp, ObjectElements::offsetOfFlags()));
529 store32(Imm32(0), Address(temp, ObjectElements::offsetOfInitializedLength()));
530 store32(Imm32(arrayCapacity),
531 Address(temp, ObjectElements::offsetOfCapacity()));
532 store32(Imm32(arrayLength), Address(temp, ObjectElements::offsetOfLength()));
535 // Inline version of Nursery::allocateString.
536 void MacroAssembler::nurseryAllocateString(Register result, Register temp,
537 gc::AllocKind allocKind,
538 Label* fail) {
539 MOZ_ASSERT(IsNurseryAllocable(allocKind));
541 // No explicit check for nursery.isEnabled() is needed, as the comparison
542 // with the nursery's end will always fail in such cases.
544 CompileZone* zone = realm()->zone();
545 size_t thingSize = gc::Arena::thingSize(allocKind);
546 bumpPointerAllocate(result, temp, fail, zone, JS::TraceKind::String,
547 thingSize);
550 // Inline version of Nursery::allocateBigInt.
551 void MacroAssembler::nurseryAllocateBigInt(Register result, Register temp,
552 Label* fail) {
553 MOZ_ASSERT(IsNurseryAllocable(gc::AllocKind::BIGINT));
555 // No explicit check for nursery.isEnabled() is needed, as the comparison
556 // with the nursery's end will always fail in such cases.
558 CompileZone* zone = realm()->zone();
559 size_t thingSize = gc::Arena::thingSize(gc::AllocKind::BIGINT);
561 bumpPointerAllocate(result, temp, fail, zone, JS::TraceKind::BigInt,
562 thingSize);
565 static bool IsNurseryAllocEnabled(CompileZone* zone, JS::TraceKind kind) {
566 switch (kind) {
567 case JS::TraceKind::Object:
568 return zone->allocNurseryObjects();
569 case JS::TraceKind::String:
570 return zone->allocNurseryStrings();
571 case JS::TraceKind::BigInt:
572 return zone->allocNurseryBigInts();
573 default:
574 MOZ_CRASH("Bad nursery allocation kind");
578 void MacroAssembler::bumpPointerAllocate(Register result, Register temp,
579 Label* fail, CompileZone* zone,
580 JS::TraceKind traceKind, uint32_t size,
581 const AllocSiteInput& allocSite) {
582 MOZ_ASSERT(size >= gc::MinCellSize);
584 uint32_t totalSize = size + Nursery::nurseryCellHeaderSize();
585 MOZ_ASSERT(totalSize < INT32_MAX, "Nursery allocation too large");
586 MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
588 // We know statically whether nursery allocation is enable for a particular
589 // kind because we discard JIT code when this changes.
590 if (!IsNurseryAllocEnabled(zone, traceKind)) {
591 jump(fail);
592 return;
595 // Use a relative 32 bit offset to the Nursery position_ to currentEnd_ to
596 // avoid 64-bit immediate loads.
597 void* posAddr = zone->addressOfNurseryPosition();
598 int32_t endOffset = Nursery::offsetOfCurrentEndFromPosition();
600 movePtr(ImmPtr(posAddr), temp);
601 loadPtr(Address(temp, 0), result);
602 addPtr(Imm32(totalSize), result);
603 branchPtr(Assembler::Below, Address(temp, endOffset), result, fail);
604 storePtr(result, Address(temp, 0));
605 subPtr(Imm32(size), result);
607 if (allocSite.is<gc::CatchAllAllocSite>()) {
608 // No allocation site supplied. This is the case when called from Warp, or
609 // from places that don't support pretenuring.
610 gc::CatchAllAllocSite siteKind = allocSite.as<gc::CatchAllAllocSite>();
611 gc::AllocSite* site = zone->catchAllAllocSite(traceKind, siteKind);
612 uintptr_t headerWord = gc::NurseryCellHeader::MakeValue(site, traceKind);
613 storePtr(ImmWord(headerWord),
614 Address(result, -js::Nursery::nurseryCellHeaderSize()));
616 // Update the catch all allocation site for strings or if the profiler is
617 // enabled. This is used to calculate the nursery allocation count. The
618 // string data is used to determine whether to disable nursery string
619 // allocation.
620 if (traceKind == JS::TraceKind::String ||
621 runtime()->geckoProfiler().enabled()) {
622 uint32_t* countAddress = site->nurseryAllocCountAddress();
623 CheckedInt<int32_t> counterOffset =
624 (CheckedInt<uintptr_t>(uintptr_t(countAddress)) -
625 CheckedInt<uintptr_t>(uintptr_t(posAddr)))
626 .toChecked<int32_t>();
627 if (counterOffset.isValid()) {
628 add32(Imm32(1), Address(temp, counterOffset.value()));
629 } else {
630 movePtr(ImmPtr(countAddress), temp);
631 add32(Imm32(1), Address(temp, 0));
634 } else {
635 // Update allocation site and store pointer in the nursery cell header. This
636 // is only used from baseline.
637 Register site = allocSite.as<Register>();
638 updateAllocSite(temp, result, zone, site);
639 // See NurseryCellHeader::MakeValue.
640 orPtr(Imm32(int32_t(traceKind)), site);
641 storePtr(site, Address(result, -js::Nursery::nurseryCellHeaderSize()));
645 // Update the allocation site in the same way as Nursery::allocateCell.
646 void MacroAssembler::updateAllocSite(Register temp, Register result,
647 CompileZone* zone, Register site) {
648 Label done;
650 add32(Imm32(1), Address(site, gc::AllocSite::offsetOfNurseryAllocCount()));
652 branch32(Assembler::NotEqual,
653 Address(site, gc::AllocSite::offsetOfNurseryAllocCount()), Imm32(1),
654 &done);
656 loadPtr(AbsoluteAddress(zone->addressOfNurseryAllocatedSites()), temp);
657 storePtr(temp, Address(site, gc::AllocSite::offsetOfNextNurseryAllocated()));
658 storePtr(site, AbsoluteAddress(zone->addressOfNurseryAllocatedSites()));
660 bind(&done);
663 // Inlined equivalent of gc::AllocateString, jumping to fail if nursery
664 // allocation requested but unsuccessful.
665 void MacroAssembler::allocateString(Register result, Register temp,
666 gc::AllocKind allocKind,
667 gc::Heap initialHeap, Label* fail) {
668 MOZ_ASSERT(allocKind == gc::AllocKind::STRING ||
669 allocKind == gc::AllocKind::FAT_INLINE_STRING);
671 checkAllocatorState(fail);
673 if (shouldNurseryAllocate(allocKind, initialHeap)) {
674 MOZ_ASSERT(initialHeap == gc::Heap::Default);
675 return nurseryAllocateString(result, temp, allocKind, fail);
678 freeListAllocate(result, temp, allocKind, fail);
681 void MacroAssembler::newGCString(Register result, Register temp,
682 gc::Heap initialHeap, Label* fail) {
683 allocateString(result, temp, js::gc::AllocKind::STRING, initialHeap, fail);
686 void MacroAssembler::newGCFatInlineString(Register result, Register temp,
687 gc::Heap initialHeap, Label* fail) {
688 allocateString(result, temp, js::gc::AllocKind::FAT_INLINE_STRING,
689 initialHeap, fail);
692 void MacroAssembler::newGCBigInt(Register result, Register temp,
693 gc::Heap initialHeap, Label* fail) {
694 checkAllocatorState(fail);
696 if (shouldNurseryAllocate(gc::AllocKind::BIGINT, initialHeap)) {
697 MOZ_ASSERT(initialHeap == gc::Heap::Default);
698 return nurseryAllocateBigInt(result, temp, fail);
701 freeListAllocate(result, temp, gc::AllocKind::BIGINT, fail);
704 void MacroAssembler::copySlotsFromTemplate(
705 Register obj, const TemplateNativeObject& templateObj, uint32_t start,
706 uint32_t end) {
707 uint32_t nfixed = std::min(templateObj.numFixedSlots(), end);
708 for (unsigned i = start; i < nfixed; i++) {
709 // Template objects are not exposed to script and therefore immutable.
710 // However, regexp template objects are sometimes used directly (when
711 // the cloning is not observable), and therefore we can end up with a
712 // non-zero lastIndex. Detect this case here and just substitute 0, to
713 // avoid racing with the main thread updating this slot.
714 Value v;
715 if (templateObj.isRegExpObject() && i == RegExpObject::lastIndexSlot()) {
716 v = Int32Value(0);
717 } else {
718 v = templateObj.getSlot(i);
720 storeValue(v, Address(obj, NativeObject::getFixedSlotOffset(i)));
724 void MacroAssembler::fillSlotsWithConstantValue(Address base, Register temp,
725 uint32_t start, uint32_t end,
726 const Value& v) {
727 MOZ_ASSERT(v.isUndefined() || IsUninitializedLexical(v));
729 if (start >= end) {
730 return;
733 #ifdef JS_NUNBOX32
734 // We only have a single spare register, so do the initialization as two
735 // strided writes of the tag and body.
736 Address addr = base;
737 move32(Imm32(v.toNunboxPayload()), temp);
738 for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtr<Value>)) {
739 store32(temp, ToPayload(addr));
742 addr = base;
743 move32(Imm32(v.toNunboxTag()), temp);
744 for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtr<Value>)) {
745 store32(temp, ToType(addr));
747 #else
748 moveValue(v, ValueOperand(temp));
749 for (uint32_t i = start; i < end; ++i, base.offset += sizeof(GCPtr<Value>)) {
750 storePtr(temp, base);
752 #endif
755 void MacroAssembler::fillSlotsWithUndefined(Address base, Register temp,
756 uint32_t start, uint32_t end) {
757 fillSlotsWithConstantValue(base, temp, start, end, UndefinedValue());
760 void MacroAssembler::fillSlotsWithUninitialized(Address base, Register temp,
761 uint32_t start, uint32_t end) {
762 fillSlotsWithConstantValue(base, temp, start, end,
763 MagicValue(JS_UNINITIALIZED_LEXICAL));
766 static std::pair<uint32_t, uint32_t> FindStartOfUninitializedAndUndefinedSlots(
767 const TemplateNativeObject& templateObj, uint32_t nslots) {
768 MOZ_ASSERT(nslots == templateObj.slotSpan());
769 MOZ_ASSERT(nslots > 0);
771 uint32_t first = nslots;
772 for (; first != 0; --first) {
773 if (templateObj.getSlot(first - 1) != UndefinedValue()) {
774 break;
777 uint32_t startOfUndefined = first;
779 if (first != 0 && IsUninitializedLexical(templateObj.getSlot(first - 1))) {
780 for (; first != 0; --first) {
781 if (!IsUninitializedLexical(templateObj.getSlot(first - 1))) {
782 break;
786 uint32_t startOfUninitialized = first;
788 return {startOfUninitialized, startOfUndefined};
791 void MacroAssembler::initTypedArraySlots(Register obj, Register temp,
792 Register lengthReg,
793 LiveRegisterSet liveRegs, Label* fail,
794 TypedArrayObject* templateObj,
795 TypedArrayLength lengthKind) {
796 MOZ_ASSERT(!templateObj->hasBuffer());
798 constexpr size_t dataSlotOffset = ArrayBufferViewObject::dataOffset();
799 constexpr size_t dataOffset = dataSlotOffset + sizeof(HeapSlot);
801 static_assert(
802 TypedArrayObject::FIXED_DATA_START == TypedArrayObject::DATA_SLOT + 1,
803 "fixed inline element data assumed to begin after the data slot");
805 static_assert(
806 TypedArrayObject::INLINE_BUFFER_LIMIT ==
807 JSObject::MAX_BYTE_SIZE - dataOffset,
808 "typed array inline buffer is limited by the maximum object byte size");
810 // Initialise data elements to zero.
811 size_t length = templateObj->length();
812 MOZ_ASSERT(length <= INT32_MAX,
813 "Template objects are only created for int32 lengths");
814 size_t nbytes = length * templateObj->bytesPerElement();
816 if (lengthKind == TypedArrayLength::Fixed &&
817 nbytes <= TypedArrayObject::INLINE_BUFFER_LIMIT) {
818 MOZ_ASSERT(dataOffset + nbytes <= templateObj->tenuredSizeOfThis());
820 // Store data elements inside the remaining JSObject slots.
821 computeEffectiveAddress(Address(obj, dataOffset), temp);
822 storePrivateValue(temp, Address(obj, dataSlotOffset));
824 // Write enough zero pointers into fixed data to zero every
825 // element. (This zeroes past the end of a byte count that's
826 // not a multiple of pointer size. That's okay, because fixed
827 // data is a count of 8-byte HeapSlots (i.e. <= pointer size),
828 // and we won't inline unless the desired memory fits in that
829 // space.)
830 static_assert(sizeof(HeapSlot) == 8, "Assumed 8 bytes alignment");
832 size_t numZeroPointers = ((nbytes + 7) & ~0x7) / sizeof(char*);
833 for (size_t i = 0; i < numZeroPointers; i++) {
834 storePtr(ImmWord(0), Address(obj, dataOffset + i * sizeof(char*)));
836 MOZ_ASSERT(nbytes > 0, "Zero-length TypedArrays need ZeroLengthArrayData");
837 } else {
838 if (lengthKind == TypedArrayLength::Fixed) {
839 move32(Imm32(length), lengthReg);
842 // Ensure volatile |obj| is saved across the call.
843 if (obj.volatile_()) {
844 liveRegs.addUnchecked(obj);
847 // Allocate a buffer on the heap to store the data elements.
848 PushRegsInMask(liveRegs);
849 using Fn = void (*)(JSContext* cx, TypedArrayObject* obj, int32_t count);
850 setupUnalignedABICall(temp);
851 loadJSContext(temp);
852 passABIArg(temp);
853 passABIArg(obj);
854 passABIArg(lengthReg);
855 callWithABI<Fn, AllocateAndInitTypedArrayBuffer>();
856 PopRegsInMask(liveRegs);
858 // Fail when data slot is UndefinedValue.
859 branchTestUndefined(Assembler::Equal, Address(obj, dataSlotOffset), fail);
863 void MacroAssembler::initGCSlots(Register obj, Register temp,
864 const TemplateNativeObject& templateObj) {
865 MOZ_ASSERT(!templateObj.isArrayObject());
867 // Slots of non-array objects are required to be initialized.
868 // Use the values currently in the template object.
869 uint32_t nslots = templateObj.slotSpan();
870 if (nslots == 0) {
871 return;
874 uint32_t nfixed = templateObj.numUsedFixedSlots();
875 uint32_t ndynamic = templateObj.numDynamicSlots();
877 // Attempt to group slot writes such that we minimize the amount of
878 // duplicated data we need to embed in code and load into registers. In
879 // general, most template object slots will be undefined except for any
880 // reserved slots. Since reserved slots come first, we split the object
881 // logically into independent non-UndefinedValue writes to the head and
882 // duplicated writes of UndefinedValue to the tail. For the majority of
883 // objects, the "tail" will be the entire slot range.
885 // The template object may be a CallObject, in which case we need to
886 // account for uninitialized lexical slots as well as undefined
887 // slots. Uninitialized lexical slots appears in CallObjects if the function
888 // has parameter expressions, in which case closed over parameters have
889 // TDZ. Uninitialized slots come before undefined slots in CallObjects.
890 auto [startOfUninitialized, startOfUndefined] =
891 FindStartOfUninitializedAndUndefinedSlots(templateObj, nslots);
892 MOZ_ASSERT(startOfUninitialized <= nfixed); // Reserved slots must be fixed.
893 MOZ_ASSERT(startOfUndefined >= startOfUninitialized);
894 MOZ_ASSERT_IF(!templateObj.isCallObject() &&
895 !templateObj.isBlockLexicalEnvironmentObject(),
896 startOfUninitialized == startOfUndefined);
898 // Copy over any preserved reserved slots.
899 copySlotsFromTemplate(obj, templateObj, 0, startOfUninitialized);
901 // Fill the rest of the fixed slots with undefined and uninitialized.
902 size_t offset = NativeObject::getFixedSlotOffset(startOfUninitialized);
903 fillSlotsWithUninitialized(Address(obj, offset), temp, startOfUninitialized,
904 std::min(startOfUndefined, nfixed));
906 if (startOfUndefined < nfixed) {
907 offset = NativeObject::getFixedSlotOffset(startOfUndefined);
908 fillSlotsWithUndefined(Address(obj, offset), temp, startOfUndefined,
909 nfixed);
912 if (ndynamic) {
913 // We are short one register to do this elegantly. Borrow the obj
914 // register briefly for our slots base address.
915 push(obj);
916 loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
918 // Fill uninitialized slots if necessary. Otherwise initialize all
919 // slots to undefined.
920 if (startOfUndefined > nfixed) {
921 MOZ_ASSERT(startOfUninitialized != startOfUndefined);
922 fillSlotsWithUninitialized(Address(obj, 0), temp, 0,
923 startOfUndefined - nfixed);
924 size_t offset = (startOfUndefined - nfixed) * sizeof(Value);
925 fillSlotsWithUndefined(Address(obj, offset), temp,
926 startOfUndefined - nfixed, ndynamic);
927 } else {
928 fillSlotsWithUndefined(Address(obj, 0), temp, 0, ndynamic);
931 pop(obj);
935 void MacroAssembler::initGCThing(Register obj, Register temp,
936 const TemplateObject& templateObj,
937 bool initContents) {
938 // Fast initialization of an empty object returned by allocateObject().
940 storePtr(ImmGCPtr(templateObj.shape()),
941 Address(obj, JSObject::offsetOfShape()));
943 if (templateObj.isNativeObject()) {
944 const TemplateNativeObject& ntemplate =
945 templateObj.asTemplateNativeObject();
946 MOZ_ASSERT(!ntemplate.hasDynamicElements());
948 // If the object has dynamic slots, the slots member has already been
949 // filled in.
950 if (ntemplate.numDynamicSlots() == 0) {
951 storePtr(ImmPtr(emptyObjectSlots),
952 Address(obj, NativeObject::offsetOfSlots()));
955 if (ntemplate.isArrayObject()) {
956 // Can't skip initializing reserved slots.
957 MOZ_ASSERT(initContents);
959 int elementsOffset = NativeObject::offsetOfFixedElements();
961 computeEffectiveAddress(Address(obj, elementsOffset), temp);
962 storePtr(temp, Address(obj, NativeObject::offsetOfElements()));
964 // Fill in the elements header.
965 store32(
966 Imm32(ntemplate.getDenseCapacity()),
967 Address(obj, elementsOffset + ObjectElements::offsetOfCapacity()));
968 store32(Imm32(ntemplate.getDenseInitializedLength()),
969 Address(obj, elementsOffset +
970 ObjectElements::offsetOfInitializedLength()));
971 store32(Imm32(ntemplate.getArrayLength()),
972 Address(obj, elementsOffset + ObjectElements::offsetOfLength()));
973 store32(Imm32(ObjectElements::FIXED),
974 Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
975 } else if (ntemplate.isArgumentsObject()) {
976 // The caller will initialize the reserved slots.
977 MOZ_ASSERT(!initContents);
978 storePtr(ImmPtr(emptyObjectElements),
979 Address(obj, NativeObject::offsetOfElements()));
980 } else {
981 // If the target type could be a TypedArray that maps shared memory
982 // then this would need to store emptyObjectElementsShared in that case.
983 MOZ_ASSERT(!ntemplate.isSharedMemory());
985 // Can't skip initializing reserved slots.
986 MOZ_ASSERT(initContents);
988 storePtr(ImmPtr(emptyObjectElements),
989 Address(obj, NativeObject::offsetOfElements()));
991 initGCSlots(obj, temp, ntemplate);
993 } else {
994 MOZ_CRASH("Unknown object");
997 #ifdef JS_GC_PROBES
998 AllocatableRegisterSet regs(RegisterSet::Volatile());
999 LiveRegisterSet save(regs.asLiveSet());
1000 PushRegsInMask(save);
1002 regs.takeUnchecked(obj);
1003 Register temp2 = regs.takeAnyGeneral();
1005 using Fn = void (*)(JSObject* obj);
1006 setupUnalignedABICall(temp2);
1007 passABIArg(obj);
1008 callWithABI<Fn, TraceCreateObject>();
1010 PopRegsInMask(save);
1011 #endif
1014 void MacroAssembler::compareStrings(JSOp op, Register left, Register right,
1015 Register result, Label* fail) {
1016 MOZ_ASSERT(left != result);
1017 MOZ_ASSERT(right != result);
1018 MOZ_ASSERT(IsEqualityOp(op) || IsRelationalOp(op));
1020 Label notPointerEqual;
1021 // If operands point to the same instance, the strings are trivially equal.
1022 branchPtr(Assembler::NotEqual, left, right,
1023 IsEqualityOp(op) ? &notPointerEqual : fail);
1024 move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
1025 op == JSOp::Ge),
1026 result);
1028 if (IsEqualityOp(op)) {
1029 Label done;
1030 jump(&done);
1032 bind(&notPointerEqual);
1034 Label leftIsNotAtom;
1035 Label setNotEqualResult;
1036 // Atoms cannot be equal to each other if they point to different strings.
1037 Imm32 atomBit(JSString::ATOM_BIT);
1038 branchTest32(Assembler::Zero, Address(left, JSString::offsetOfFlags()),
1039 atomBit, &leftIsNotAtom);
1040 branchTest32(Assembler::NonZero, Address(right, JSString::offsetOfFlags()),
1041 atomBit, &setNotEqualResult);
1043 bind(&leftIsNotAtom);
1044 // Strings of different length can never be equal.
1045 loadStringLength(left, result);
1046 branch32(Assembler::Equal, Address(right, JSString::offsetOfLength()),
1047 result, fail);
1049 bind(&setNotEqualResult);
1050 move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), result);
1052 bind(&done);
1056 void MacroAssembler::loadStringChars(Register str, Register dest,
1057 CharEncoding encoding) {
1058 MOZ_ASSERT(str != dest);
1060 if (JitOptions.spectreStringMitigations) {
1061 if (encoding == CharEncoding::Latin1) {
1062 // If the string is a rope, zero the |str| register. The code below
1063 // depends on str->flags so this should block speculative execution.
1064 movePtr(ImmWord(0), dest);
1065 test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1066 Imm32(JSString::LINEAR_BIT), dest, str);
1067 } else {
1068 // If we're loading TwoByte chars, there's an additional risk:
1069 // if the string has Latin1 chars, we could read out-of-bounds. To
1070 // prevent this, we check both the Linear and Latin1 bits. We don't
1071 // have a scratch register, so we use these flags also to block
1072 // speculative execution, similar to the use of 0 above.
1073 MOZ_ASSERT(encoding == CharEncoding::TwoByte);
1074 static constexpr uint32_t Mask =
1075 JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT;
1076 static_assert(Mask < 1024,
1077 "Mask should be a small, near-null value to ensure we "
1078 "block speculative execution when it's used as string "
1079 "pointer");
1080 move32(Imm32(Mask), dest);
1081 and32(Address(str, JSString::offsetOfFlags()), dest);
1082 cmp32MovePtr(Assembler::NotEqual, dest, Imm32(JSString::LINEAR_BIT), dest,
1083 str);
1087 // Load the inline chars.
1088 computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
1089 dest);
1091 // If it's not an inline string, load the non-inline chars. Use a
1092 // conditional move to prevent speculative execution.
1093 test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1094 Imm32(JSString::INLINE_CHARS_BIT),
1095 Address(str, JSString::offsetOfNonInlineChars()), dest);
1098 void MacroAssembler::loadNonInlineStringChars(Register str, Register dest,
1099 CharEncoding encoding) {
1100 MOZ_ASSERT(str != dest);
1102 if (JitOptions.spectreStringMitigations) {
1103 // If the string is a rope, has inline chars, or has a different
1104 // character encoding, set str to a near-null value to prevent
1105 // speculative execution below (when reading str->nonInlineChars).
1107 static constexpr uint32_t Mask = JSString::LINEAR_BIT |
1108 JSString::INLINE_CHARS_BIT |
1109 JSString::LATIN1_CHARS_BIT;
1110 static_assert(Mask < 1024,
1111 "Mask should be a small, near-null value to ensure we "
1112 "block speculative execution when it's used as string "
1113 "pointer");
1115 uint32_t expectedBits = JSString::LINEAR_BIT;
1116 if (encoding == CharEncoding::Latin1) {
1117 expectedBits |= JSString::LATIN1_CHARS_BIT;
1120 move32(Imm32(Mask), dest);
1121 and32(Address(str, JSString::offsetOfFlags()), dest);
1123 cmp32MovePtr(Assembler::NotEqual, dest, Imm32(expectedBits), dest, str);
1126 loadPtr(Address(str, JSString::offsetOfNonInlineChars()), dest);
1129 void MacroAssembler::storeNonInlineStringChars(Register chars, Register str) {
1130 MOZ_ASSERT(chars != str);
1131 storePtr(chars, Address(str, JSString::offsetOfNonInlineChars()));
1134 void MacroAssembler::loadInlineStringCharsForStore(Register str,
1135 Register dest) {
1136 computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
1137 dest);
1140 void MacroAssembler::loadInlineStringChars(Register str, Register dest,
1141 CharEncoding encoding) {
1142 MOZ_ASSERT(str != dest);
1144 if (JitOptions.spectreStringMitigations) {
1145 // Making this Spectre-safe is a bit complicated: using
1146 // computeEffectiveAddress and then zeroing the output register if
1147 // non-inline is not sufficient: when the index is very large, it would
1148 // allow reading |nullptr + index|. Just fall back to loadStringChars
1149 // for now.
1150 loadStringChars(str, dest, encoding);
1151 } else {
1152 computeEffectiveAddress(
1153 Address(str, JSInlineString::offsetOfInlineStorage()), dest);
1157 void MacroAssembler::loadRopeLeftChild(Register str, Register dest) {
1158 MOZ_ASSERT(str != dest);
1160 if (JitOptions.spectreStringMitigations) {
1161 // Zero the output register if the input was not a rope.
1162 movePtr(ImmWord(0), dest);
1163 test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1164 Imm32(JSString::LINEAR_BIT),
1165 Address(str, JSRope::offsetOfLeft()), dest);
1166 } else {
1167 loadPtr(Address(str, JSRope::offsetOfLeft()), dest);
1171 void MacroAssembler::loadRopeRightChild(Register str, Register dest) {
1172 MOZ_ASSERT(str != dest);
1174 if (JitOptions.spectreStringMitigations) {
1175 // Zero the output register if the input was not a rope.
1176 movePtr(ImmWord(0), dest);
1177 test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1178 Imm32(JSString::LINEAR_BIT),
1179 Address(str, JSRope::offsetOfRight()), dest);
1180 } else {
1181 loadPtr(Address(str, JSRope::offsetOfRight()), dest);
1185 void MacroAssembler::storeRopeChildren(Register left, Register right,
1186 Register str) {
1187 storePtr(left, Address(str, JSRope::offsetOfLeft()));
1188 storePtr(right, Address(str, JSRope::offsetOfRight()));
1191 void MacroAssembler::loadDependentStringBase(Register str, Register dest) {
1192 MOZ_ASSERT(str != dest);
1194 if (JitOptions.spectreStringMitigations) {
1195 // If the string is not a dependent string, zero the |str| register.
1196 // The code below loads str->base so this should block speculative
1197 // execution.
1198 movePtr(ImmWord(0), dest);
1199 test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1200 Imm32(JSString::DEPENDENT_BIT), dest, str);
1203 loadPtr(Address(str, JSDependentString::offsetOfBase()), dest);
1206 void MacroAssembler::storeDependentStringBase(Register base, Register str) {
1207 storePtr(base, Address(str, JSDependentString::offsetOfBase()));
1210 void MacroAssembler::loadRopeChild(Register str, Register index,
1211 Register output, Label* isLinear) {
1212 // This follows JSString::getChar.
1213 branchIfNotRope(str, isLinear);
1215 loadRopeLeftChild(str, output);
1217 // Check if the index is contained in the leftChild.
1218 Label loadedChild;
1219 branch32(Assembler::Above, Address(output, JSString::offsetOfLength()), index,
1220 &loadedChild);
1222 // The index must be in the rightChild.
1223 loadRopeRightChild(str, output);
1225 bind(&loadedChild);
1228 void MacroAssembler::branchIfCanLoadStringChar(Register str, Register index,
1229 Register scratch, Label* label) {
1230 loadRopeChild(str, index, scratch, label);
1232 // Branch if the left resp. right side is linear.
1233 branchIfNotRope(scratch, label);
1236 void MacroAssembler::branchIfNotCanLoadStringChar(Register str, Register index,
1237 Register scratch,
1238 Label* label) {
1239 Label done;
1240 loadRopeChild(str, index, scratch, &done);
1242 // Branch if the left or right side is another rope.
1243 branchIfRope(scratch, label);
1245 bind(&done);
1248 void MacroAssembler::loadStringChar(Register str, Register index,
1249 Register output, Register scratch1,
1250 Register scratch2, Label* fail) {
1251 MOZ_ASSERT(str != output);
1252 MOZ_ASSERT(str != index);
1253 MOZ_ASSERT(index != output);
1254 MOZ_ASSERT(output != scratch1);
1255 MOZ_ASSERT(output != scratch2);
1257 // Use scratch1 for the index (adjusted below).
1258 move32(index, scratch1);
1259 movePtr(str, output);
1261 // This follows JSString::getChar.
1262 Label notRope;
1263 branchIfNotRope(str, &notRope);
1265 loadRopeLeftChild(str, output);
1267 // Check if the index is contained in the leftChild.
1268 Label loadedChild, notInLeft;
1269 spectreBoundsCheck32(scratch1, Address(output, JSString::offsetOfLength()),
1270 scratch2, &notInLeft);
1271 jump(&loadedChild);
1273 // The index must be in the rightChild.
1274 // index -= rope->leftChild()->length()
1275 bind(&notInLeft);
1276 sub32(Address(output, JSString::offsetOfLength()), scratch1);
1277 loadRopeRightChild(str, output);
1279 // If the left or right side is another rope, give up.
1280 bind(&loadedChild);
1281 branchIfRope(output, fail);
1283 bind(&notRope);
1285 Label isLatin1, done;
1286 // We have to check the left/right side for ropes,
1287 // because a TwoByte rope might have a Latin1 child.
1288 branchLatin1String(output, &isLatin1);
1289 loadStringChars(output, scratch2, CharEncoding::TwoByte);
1290 loadChar(scratch2, scratch1, output, CharEncoding::TwoByte);
1291 jump(&done);
1293 bind(&isLatin1);
1294 loadStringChars(output, scratch2, CharEncoding::Latin1);
1295 loadChar(scratch2, scratch1, output, CharEncoding::Latin1);
1297 bind(&done);
1300 void MacroAssembler::loadStringIndexValue(Register str, Register dest,
1301 Label* fail) {
1302 MOZ_ASSERT(str != dest);
1304 load32(Address(str, JSString::offsetOfFlags()), dest);
1306 // Does not have a cached index value.
1307 branchTest32(Assembler::Zero, dest, Imm32(JSString::INDEX_VALUE_BIT), fail);
1309 // Extract the index.
1310 rshift32(Imm32(JSString::INDEX_VALUE_SHIFT), dest);
1313 void MacroAssembler::loadChar(Register chars, Register index, Register dest,
1314 CharEncoding encoding, int32_t offset /* = 0 */) {
1315 if (encoding == CharEncoding::Latin1) {
1316 loadChar(BaseIndex(chars, index, TimesOne, offset), dest, encoding);
1317 } else {
1318 loadChar(BaseIndex(chars, index, TimesTwo, offset), dest, encoding);
1322 void MacroAssembler::addToCharPtr(Register chars, Register index,
1323 CharEncoding encoding) {
1324 if (encoding == CharEncoding::Latin1) {
1325 static_assert(sizeof(char) == 1,
1326 "Latin-1 string index shouldn't need scaling");
1327 addPtr(index, chars);
1328 } else {
1329 computeEffectiveAddress(BaseIndex(chars, index, TimesTwo), chars);
1333 void MacroAssembler::loadStringFromUnit(Register unit, Register dest,
1334 const StaticStrings& staticStrings) {
1335 movePtr(ImmPtr(&staticStrings.unitStaticTable), dest);
1336 loadPtr(BaseIndex(dest, unit, ScalePointer), dest);
1339 void MacroAssembler::loadLengthTwoString(Register c1, Register c2,
1340 Register dest,
1341 const StaticStrings& staticStrings) {
1342 // Compute (toSmallCharTable[c1] << SMALL_CHAR_BITS) + toSmallCharTable[c2]
1343 // to obtain the index into `StaticStrings::length2StaticTable`.
1344 static_assert(sizeof(StaticStrings::SmallChar) == 1);
1346 movePtr(ImmPtr(&StaticStrings::toSmallCharTable.storage), dest);
1347 load8ZeroExtend(BaseIndex(dest, c1, Scale::TimesOne), c1);
1348 load8ZeroExtend(BaseIndex(dest, c2, Scale::TimesOne), c2);
1350 lshift32(Imm32(StaticStrings::SMALL_CHAR_BITS), c1);
1351 add32(c2, c1);
1353 // Look up the string from the computed index.
1354 movePtr(ImmPtr(&staticStrings.length2StaticTable), dest);
1355 loadPtr(BaseIndex(dest, c1, ScalePointer), dest);
1358 void MacroAssembler::loadInt32ToStringWithBase(
1359 Register input, Register base, Register dest, Register scratch1,
1360 Register scratch2, const StaticStrings& staticStrings,
1361 const LiveRegisterSet& volatileRegs, Label* fail) {
1362 #ifdef DEBUG
1363 Label baseBad, baseOk;
1364 branch32(Assembler::LessThan, base, Imm32(2), &baseBad);
1365 branch32(Assembler::LessThanOrEqual, base, Imm32(36), &baseOk);
1366 bind(&baseBad);
1367 assumeUnreachable("base must be in range [2, 36]");
1368 bind(&baseOk);
1369 #endif
1371 // Compute |"0123456789abcdefghijklmnopqrstuvwxyz"[r]|.
1372 auto toChar = [this, base](Register r) {
1373 #ifdef DEBUG
1374 Label ok;
1375 branch32(Assembler::Below, r, base, &ok);
1376 assumeUnreachable("bad digit");
1377 bind(&ok);
1378 #else
1379 // Silence unused lambda capture warning.
1380 (void)base;
1381 #endif
1383 Label done;
1384 add32(Imm32('0'), r);
1385 branch32(Assembler::BelowOrEqual, r, Imm32('9'), &done);
1386 add32(Imm32('a' - '0' - 10), r);
1387 bind(&done);
1390 // Perform a "unit" lookup when |unsigned(input) < unsigned(base)|.
1391 Label lengthTwo, done;
1392 branch32(Assembler::AboveOrEqual, input, base, &lengthTwo);
1394 move32(input, scratch1);
1395 toChar(scratch1);
1397 loadStringFromUnit(scratch1, dest, staticStrings);
1399 jump(&done);
1401 bind(&lengthTwo);
1403 // Compute |base * base|.
1404 move32(base, scratch1);
1405 mul32(scratch1, scratch1);
1407 // Perform a "length2" lookup when |unsigned(input) < unsigned(base * base)|.
1408 branch32(Assembler::AboveOrEqual, input, scratch1, fail);
1410 // Compute |scratch1 = input / base| and |scratch2 = input % base|.
1411 move32(input, scratch1);
1412 flexibleDivMod32(base, scratch1, scratch2, true, volatileRegs);
1414 // Compute the digits of the divisor and remainder.
1415 toChar(scratch1);
1416 toChar(scratch2);
1418 // Look up the 2-character digit string in the small-char table.
1419 loadLengthTwoString(scratch1, scratch2, dest, staticStrings);
1421 bind(&done);
1424 void MacroAssembler::loadInt32ToStringWithBase(
1425 Register input, int32_t base, Register dest, Register scratch1,
1426 Register scratch2, const StaticStrings& staticStrings, Label* fail) {
1427 MOZ_ASSERT(2 <= base && base <= 36, "base must be in range [2, 36]");
1429 // Compute |"0123456789abcdefghijklmnopqrstuvwxyz"[r]|.
1430 auto toChar = [this, base](Register r) {
1431 #ifdef DEBUG
1432 Label ok;
1433 branch32(Assembler::Below, r, Imm32(base), &ok);
1434 assumeUnreachable("bad digit");
1435 bind(&ok);
1436 #endif
1438 if (base <= 10) {
1439 add32(Imm32('0'), r);
1440 } else {
1441 Label done;
1442 add32(Imm32('0'), r);
1443 branch32(Assembler::BelowOrEqual, r, Imm32('9'), &done);
1444 add32(Imm32('a' - '0' - 10), r);
1445 bind(&done);
1449 // Perform a "unit" lookup when |unsigned(input) < unsigned(base)|.
1450 Label lengthTwo, done;
1451 branch32(Assembler::AboveOrEqual, input, Imm32(base), &lengthTwo);
1453 move32(input, scratch1);
1454 toChar(scratch1);
1456 loadStringFromUnit(scratch1, dest, staticStrings);
1458 jump(&done);
1460 bind(&lengthTwo);
1462 // Perform a "length2" lookup when |unsigned(input) < unsigned(base * base)|.
1463 branch32(Assembler::AboveOrEqual, input, Imm32(base * base), fail);
1465 // Compute |scratch1 = input / base| and |scratch2 = input % base|.
1466 if (mozilla::IsPowerOfTwo(uint32_t(base))) {
1467 uint32_t shift = mozilla::FloorLog2(base);
1469 move32(input, scratch1);
1470 rshift32(Imm32(shift), scratch1);
1472 move32(input, scratch2);
1473 and32(Imm32((uint32_t(1) << shift) - 1), scratch2);
1474 } else {
1475 // The following code matches CodeGenerator::visitUDivOrModConstant()
1476 // for x86-shared. Also see Hacker's Delight 2nd edition, chapter 10-8
1477 // "Unsigned Division by 7" for the case when |rmc.multiplier| exceeds
1478 // UINT32_MAX and we need to adjust the shift amount.
1480 auto rmc = ReciprocalMulConstants::computeUnsignedDivisionConstants(base);
1482 // We first compute |q = (M * n) >> 32), where M = rmc.multiplier.
1483 mulHighUnsigned32(Imm32(rmc.multiplier), input, scratch1);
1485 if (rmc.multiplier > UINT32_MAX) {
1486 // M >= 2^32 and shift == 0 is impossible, as d >= 2 implies that
1487 // ((M * n) >> (32 + shift)) >= n > floor(n/d) whenever n >= d,
1488 // contradicting the proof of correctness in computeDivisionConstants.
1489 MOZ_ASSERT(rmc.shiftAmount > 0);
1490 MOZ_ASSERT(rmc.multiplier < (int64_t(1) << 33));
1492 // Compute |t = (n - q) / 2|.
1493 move32(input, scratch2);
1494 sub32(scratch1, scratch2);
1495 rshift32(Imm32(1), scratch2);
1497 // Compute |t = (n - q) / 2 + q = (n + q) / 2|.
1498 add32(scratch2, scratch1);
1500 // Finish the computation |q = floor(n / d)|.
1501 rshift32(Imm32(rmc.shiftAmount - 1), scratch1);
1502 } else {
1503 rshift32(Imm32(rmc.shiftAmount), scratch1);
1506 // Compute the remainder from |r = n - q * d|.
1507 move32(scratch1, dest);
1508 mul32(Imm32(base), dest);
1509 move32(input, scratch2);
1510 sub32(dest, scratch2);
1513 // Compute the digits of the divisor and remainder.
1514 toChar(scratch1);
1515 toChar(scratch2);
1517 // Look up the 2-character digit string in the small-char table.
1518 loadLengthTwoString(scratch1, scratch2, dest, staticStrings);
1520 bind(&done);
1523 void MacroAssembler::loadBigIntDigits(Register bigInt, Register digits) {
1524 MOZ_ASSERT(digits != bigInt);
1526 // Load the inline digits.
1527 computeEffectiveAddress(Address(bigInt, BigInt::offsetOfInlineDigits()),
1528 digits);
1530 // If inline digits aren't used, load the heap digits. Use a conditional move
1531 // to prevent speculative execution.
1532 cmp32LoadPtr(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
1533 Imm32(int32_t(BigInt::inlineDigitsLength())),
1534 Address(bigInt, BigInt::offsetOfHeapDigits()), digits);
1537 void MacroAssembler::loadBigInt64(Register bigInt, Register64 dest) {
1538 // This code follows the implementation of |BigInt::toUint64()|. We're also
1539 // using it for inline callers of |BigInt::toInt64()|, which works, because
1540 // all supported Jit architectures use a two's complement representation for
1541 // int64 values, which means the WrapToSigned call in toInt64() is a no-op.
1543 Label done, nonZero;
1545 branchIfBigIntIsNonZero(bigInt, &nonZero);
1547 move64(Imm64(0), dest);
1548 jump(&done);
1550 bind(&nonZero);
1552 #ifdef JS_PUNBOX64
1553 Register digits = dest.reg;
1554 #else
1555 Register digits = dest.high;
1556 #endif
1558 loadBigIntDigits(bigInt, digits);
1560 #if JS_PUNBOX64
1561 // Load the first digit into the destination register.
1562 load64(Address(digits, 0), dest);
1563 #else
1564 // Load the first digit into the destination register's low value.
1565 load32(Address(digits, 0), dest.low);
1567 // And conditionally load the second digit into the high value register.
1568 Label twoDigits, digitsDone;
1569 branch32(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
1570 Imm32(1), &twoDigits);
1572 move32(Imm32(0), dest.high);
1573 jump(&digitsDone);
1576 bind(&twoDigits);
1577 load32(Address(digits, sizeof(BigInt::Digit)), dest.high);
1579 bind(&digitsDone);
1580 #endif
1582 branchTest32(Assembler::Zero, Address(bigInt, BigInt::offsetOfFlags()),
1583 Imm32(BigInt::signBitMask()), &done);
1584 neg64(dest);
1586 bind(&done);
1589 void MacroAssembler::loadFirstBigIntDigitOrZero(Register bigInt,
1590 Register dest) {
1591 Label done, nonZero;
1592 branchIfBigIntIsNonZero(bigInt, &nonZero);
1594 movePtr(ImmWord(0), dest);
1595 jump(&done);
1597 bind(&nonZero);
1599 loadBigIntDigits(bigInt, dest);
1601 // Load the first digit into the destination register.
1602 loadPtr(Address(dest, 0), dest);
1604 bind(&done);
1607 void MacroAssembler::loadBigInt(Register bigInt, Register dest, Label* fail) {
1608 Label done, nonZero;
1609 branchIfBigIntIsNonZero(bigInt, &nonZero);
1611 movePtr(ImmWord(0), dest);
1612 jump(&done);
1614 bind(&nonZero);
1616 loadBigIntNonZero(bigInt, dest, fail);
1618 bind(&done);
1621 void MacroAssembler::loadBigIntNonZero(Register bigInt, Register dest,
1622 Label* fail) {
1623 MOZ_ASSERT(bigInt != dest);
1625 #ifdef DEBUG
1626 Label nonZero;
1627 branchIfBigIntIsNonZero(bigInt, &nonZero);
1628 assumeUnreachable("Unexpected zero BigInt");
1629 bind(&nonZero);
1630 #endif
1632 branch32(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
1633 Imm32(1), fail);
1635 static_assert(BigInt::inlineDigitsLength() > 0,
1636 "Single digit BigInts use inline storage");
1638 // Load the first inline digit into the destination register.
1639 loadPtr(Address(bigInt, BigInt::offsetOfInlineDigits()), dest);
1641 // Return as a signed pointer.
1642 bigIntDigitToSignedPtr(bigInt, dest, fail);
1645 void MacroAssembler::bigIntDigitToSignedPtr(Register bigInt, Register digit,
1646 Label* fail) {
1647 // BigInt digits are stored as absolute numbers. Take the failure path when
1648 // the digit can't be stored in intptr_t.
1649 branchTestPtr(Assembler::Signed, digit, digit, fail);
1651 // Negate |dest| when the BigInt is negative.
1652 Label nonNegative;
1653 branchIfBigIntIsNonNegative(bigInt, &nonNegative);
1654 negPtr(digit);
1655 bind(&nonNegative);
1658 void MacroAssembler::loadBigIntAbsolute(Register bigInt, Register dest,
1659 Label* fail) {
1660 MOZ_ASSERT(bigInt != dest);
1662 branch32(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
1663 Imm32(1), fail);
1665 static_assert(BigInt::inlineDigitsLength() > 0,
1666 "Single digit BigInts use inline storage");
1668 // Load the first inline digit into the destination register.
1669 movePtr(ImmWord(0), dest);
1670 cmp32LoadPtr(Assembler::NotEqual, Address(bigInt, BigInt::offsetOfLength()),
1671 Imm32(0), Address(bigInt, BigInt::offsetOfInlineDigits()), dest);
1674 void MacroAssembler::initializeBigInt64(Scalar::Type type, Register bigInt,
1675 Register64 val) {
1676 MOZ_ASSERT(Scalar::isBigIntType(type));
1678 store32(Imm32(0), Address(bigInt, BigInt::offsetOfFlags()));
1680 Label done, nonZero;
1681 branch64(Assembler::NotEqual, val, Imm64(0), &nonZero);
1683 store32(Imm32(0), Address(bigInt, BigInt::offsetOfLength()));
1684 jump(&done);
1686 bind(&nonZero);
1688 if (type == Scalar::BigInt64) {
1689 // Set the sign-bit for negative values and then continue with the two's
1690 // complement.
1691 Label isPositive;
1692 branch64(Assembler::GreaterThan, val, Imm64(0), &isPositive);
1694 store32(Imm32(BigInt::signBitMask()),
1695 Address(bigInt, BigInt::offsetOfFlags()));
1696 neg64(val);
1698 bind(&isPositive);
1701 store32(Imm32(1), Address(bigInt, BigInt::offsetOfLength()));
1703 static_assert(sizeof(BigInt::Digit) == sizeof(uintptr_t),
1704 "BigInt Digit size matches uintptr_t, so there's a single "
1705 "store on 64-bit and up to two stores on 32-bit");
1707 #ifndef JS_PUNBOX64
1708 Label singleDigit;
1709 branchTest32(Assembler::Zero, val.high, val.high, &singleDigit);
1710 store32(Imm32(2), Address(bigInt, BigInt::offsetOfLength()));
1711 bind(&singleDigit);
1713 // We can perform a single store64 on 32-bit platforms, because inline
1714 // storage can store at least two 32-bit integers.
1715 static_assert(BigInt::inlineDigitsLength() >= 2,
1716 "BigInt inline storage can store at least two digits");
1717 #endif
1719 store64(val, Address(bigInt, js::BigInt::offsetOfInlineDigits()));
1721 bind(&done);
1724 void MacroAssembler::initializeBigInt(Register bigInt, Register val) {
1725 store32(Imm32(0), Address(bigInt, BigInt::offsetOfFlags()));
1727 Label done, nonZero;
1728 branchTestPtr(Assembler::NonZero, val, val, &nonZero);
1730 store32(Imm32(0), Address(bigInt, BigInt::offsetOfLength()));
1731 jump(&done);
1733 bind(&nonZero);
1735 // Set the sign-bit for negative values and then continue with the two's
1736 // complement.
1737 Label isPositive;
1738 branchTestPtr(Assembler::NotSigned, val, val, &isPositive);
1740 store32(Imm32(BigInt::signBitMask()),
1741 Address(bigInt, BigInt::offsetOfFlags()));
1742 negPtr(val);
1744 bind(&isPositive);
1746 store32(Imm32(1), Address(bigInt, BigInt::offsetOfLength()));
1748 static_assert(sizeof(BigInt::Digit) == sizeof(uintptr_t),
1749 "BigInt Digit size matches uintptr_t");
1751 storePtr(val, Address(bigInt, js::BigInt::offsetOfInlineDigits()));
1753 bind(&done);
1756 void MacroAssembler::initializeBigIntAbsolute(Register bigInt, Register val) {
1757 store32(Imm32(0), Address(bigInt, BigInt::offsetOfFlags()));
1759 Label done, nonZero;
1760 branchTestPtr(Assembler::NonZero, val, val, &nonZero);
1762 store32(Imm32(0), Address(bigInt, BigInt::offsetOfLength()));
1763 jump(&done);
1765 bind(&nonZero);
1767 store32(Imm32(1), Address(bigInt, BigInt::offsetOfLength()));
1769 static_assert(sizeof(BigInt::Digit) == sizeof(uintptr_t),
1770 "BigInt Digit size matches uintptr_t");
1772 storePtr(val, Address(bigInt, js::BigInt::offsetOfInlineDigits()));
1774 bind(&done);
1777 void MacroAssembler::copyBigIntWithInlineDigits(Register src, Register dest,
1778 Register temp,
1779 gc::Heap initialHeap,
1780 Label* fail) {
1781 branch32(Assembler::Above, Address(src, BigInt::offsetOfLength()),
1782 Imm32(int32_t(BigInt::inlineDigitsLength())), fail);
1784 newGCBigInt(dest, temp, initialHeap, fail);
1786 // Copy the sign-bit, but not any of the other bits used by the GC.
1787 load32(Address(src, BigInt::offsetOfFlags()), temp);
1788 and32(Imm32(BigInt::signBitMask()), temp);
1789 store32(temp, Address(dest, BigInt::offsetOfFlags()));
1791 // Copy the length.
1792 load32(Address(src, BigInt::offsetOfLength()), temp);
1793 store32(temp, Address(dest, BigInt::offsetOfLength()));
1795 // Copy the digits.
1796 Address srcDigits(src, js::BigInt::offsetOfInlineDigits());
1797 Address destDigits(dest, js::BigInt::offsetOfInlineDigits());
1799 for (size_t i = 0; i < BigInt::inlineDigitsLength(); i++) {
1800 static_assert(sizeof(BigInt::Digit) == sizeof(uintptr_t),
1801 "BigInt Digit size matches uintptr_t");
1803 loadPtr(srcDigits, temp);
1804 storePtr(temp, destDigits);
1806 srcDigits = Address(src, srcDigits.offset + sizeof(BigInt::Digit));
1807 destDigits = Address(dest, destDigits.offset + sizeof(BigInt::Digit));
1811 void MacroAssembler::compareBigIntAndInt32(JSOp op, Register bigInt,
1812 Register int32, Register scratch1,
1813 Register scratch2, Label* ifTrue,
1814 Label* ifFalse) {
1815 MOZ_ASSERT(IsLooseEqualityOp(op) || IsRelationalOp(op));
1817 static_assert(std::is_same_v<BigInt::Digit, uintptr_t>,
1818 "BigInt digit can be loaded in a pointer-sized register");
1819 static_assert(sizeof(BigInt::Digit) >= sizeof(uint32_t),
1820 "BigInt digit stores at least an uint32");
1822 // Test for too large numbers.
1824 // If the absolute value of the BigInt can't be expressed in an uint32/uint64,
1825 // the result of the comparison is a constant.
1826 if (op == JSOp::Eq || op == JSOp::Ne) {
1827 Label* tooLarge = op == JSOp::Eq ? ifFalse : ifTrue;
1828 branch32(Assembler::GreaterThan,
1829 Address(bigInt, BigInt::offsetOfDigitLength()), Imm32(1),
1830 tooLarge);
1831 } else {
1832 Label doCompare;
1833 branch32(Assembler::LessThanOrEqual,
1834 Address(bigInt, BigInt::offsetOfDigitLength()), Imm32(1),
1835 &doCompare);
1837 // Still need to take the sign-bit into account for relational operations.
1838 if (op == JSOp::Lt || op == JSOp::Le) {
1839 branchIfBigIntIsNegative(bigInt, ifTrue);
1840 jump(ifFalse);
1841 } else {
1842 branchIfBigIntIsNegative(bigInt, ifFalse);
1843 jump(ifTrue);
1846 bind(&doCompare);
1849 // Test for mismatched signs and, if the signs are equal, load |abs(x)| in
1850 // |scratch1| and |abs(y)| in |scratch2| and then compare the absolute numbers
1851 // against each other.
1853 // Jump to |ifTrue| resp. |ifFalse| if the BigInt is strictly less than
1854 // resp. strictly greater than the int32 value, depending on the comparison
1855 // operator.
1856 Label* greaterThan;
1857 Label* lessThan;
1858 if (op == JSOp::Eq) {
1859 greaterThan = ifFalse;
1860 lessThan = ifFalse;
1861 } else if (op == JSOp::Ne) {
1862 greaterThan = ifTrue;
1863 lessThan = ifTrue;
1864 } else if (op == JSOp::Lt || op == JSOp::Le) {
1865 greaterThan = ifFalse;
1866 lessThan = ifTrue;
1867 } else {
1868 MOZ_ASSERT(op == JSOp::Gt || op == JSOp::Ge);
1869 greaterThan = ifTrue;
1870 lessThan = ifFalse;
1873 // BigInt digits are always stored as an absolute number.
1874 loadFirstBigIntDigitOrZero(bigInt, scratch1);
1876 // Load the int32 into |scratch2| and negate it for negative numbers.
1877 move32(int32, scratch2);
1879 Label isNegative, doCompare;
1880 branchIfBigIntIsNegative(bigInt, &isNegative);
1881 branch32(Assembler::LessThan, int32, Imm32(0), greaterThan);
1882 jump(&doCompare);
1884 // We rely on |neg32(INT32_MIN)| staying INT32_MIN, because we're using an
1885 // unsigned comparison below.
1886 bind(&isNegative);
1887 branch32(Assembler::GreaterThanOrEqual, int32, Imm32(0), lessThan);
1888 neg32(scratch2);
1890 // Not all supported platforms (e.g. MIPS64) zero-extend 32-bit operations,
1891 // so we need to explicitly clear any high 32-bits.
1892 move32ZeroExtendToPtr(scratch2, scratch2);
1894 // Reverse the relational comparator for negative numbers.
1895 // |-x < -y| <=> |+x > +y|.
1896 // |-x ≤ -y| <=> |+x ≥ +y|.
1897 // |-x > -y| <=> |+x < +y|.
1898 // |-x ≥ -y| <=> |+x ≤ +y|.
1899 JSOp reversed = ReverseCompareOp(op);
1900 if (reversed != op) {
1901 branchPtr(JSOpToCondition(reversed, /* isSigned = */ false), scratch1,
1902 scratch2, ifTrue);
1903 jump(ifFalse);
1906 bind(&doCompare);
1907 branchPtr(JSOpToCondition(op, /* isSigned = */ false), scratch1, scratch2,
1908 ifTrue);
1912 void MacroAssembler::equalBigInts(Register left, Register right, Register temp1,
1913 Register temp2, Register temp3,
1914 Register temp4, Label* notSameSign,
1915 Label* notSameLength, Label* notSameDigit) {
1916 MOZ_ASSERT(left != temp1);
1917 MOZ_ASSERT(right != temp1);
1918 MOZ_ASSERT(right != temp2);
1920 // Jump to |notSameSign| when the sign aren't the same.
1921 load32(Address(left, BigInt::offsetOfFlags()), temp1);
1922 xor32(Address(right, BigInt::offsetOfFlags()), temp1);
1923 branchTest32(Assembler::NonZero, temp1, Imm32(BigInt::signBitMask()),
1924 notSameSign);
1926 // Jump to |notSameLength| when the digits length is different.
1927 load32(Address(right, BigInt::offsetOfLength()), temp1);
1928 branch32(Assembler::NotEqual, Address(left, BigInt::offsetOfLength()), temp1,
1929 notSameLength);
1931 // Both BigInts have the same sign and the same number of digits. Loop
1932 // over each digit, starting with the left-most one, and break from the
1933 // loop when the first non-matching digit was found.
1935 loadBigIntDigits(left, temp2);
1936 loadBigIntDigits(right, temp3);
1938 static_assert(sizeof(BigInt::Digit) == sizeof(void*),
1939 "BigInt::Digit is pointer sized");
1941 computeEffectiveAddress(BaseIndex(temp2, temp1, ScalePointer), temp2);
1942 computeEffectiveAddress(BaseIndex(temp3, temp1, ScalePointer), temp3);
1944 Label start, loop;
1945 jump(&start);
1946 bind(&loop);
1948 subPtr(Imm32(sizeof(BigInt::Digit)), temp2);
1949 subPtr(Imm32(sizeof(BigInt::Digit)), temp3);
1951 loadPtr(Address(temp3, 0), temp4);
1952 branchPtr(Assembler::NotEqual, Address(temp2, 0), temp4, notSameDigit);
1954 bind(&start);
1955 branchSub32(Assembler::NotSigned, Imm32(1), temp1, &loop);
1957 // No different digits were found, both BigInts are equal to each other.
1960 void MacroAssembler::typeOfObject(Register obj, Register scratch, Label* slow,
1961 Label* isObject, Label* isCallable,
1962 Label* isUndefined) {
1963 loadObjClassUnsafe(obj, scratch);
1965 // Proxies can emulate undefined and have complex isCallable behavior.
1966 branchTestClassIsProxy(true, scratch, slow);
1968 // JSFunctions are always callable.
1969 branchTestClassIsFunction(Assembler::Equal, scratch, isCallable);
1971 // Objects that emulate undefined.
1972 Address flags(scratch, JSClass::offsetOfFlags());
1973 branchTest32(Assembler::NonZero, flags, Imm32(JSCLASS_EMULATES_UNDEFINED),
1974 isUndefined);
1976 // Handle classes with a call hook.
1977 branchPtr(Assembler::Equal, Address(scratch, offsetof(JSClass, cOps)),
1978 ImmPtr(nullptr), isObject);
1980 loadPtr(Address(scratch, offsetof(JSClass, cOps)), scratch);
1981 branchPtr(Assembler::Equal, Address(scratch, offsetof(JSClassOps, call)),
1982 ImmPtr(nullptr), isObject);
1984 jump(isCallable);
1987 void MacroAssembler::isCallableOrConstructor(bool isCallable, Register obj,
1988 Register output, Label* isProxy) {
1989 MOZ_ASSERT(obj != output);
1991 Label notFunction, hasCOps, done;
1992 loadObjClassUnsafe(obj, output);
1994 // An object is callable iff:
1995 // is<JSFunction>() || (getClass()->cOps && getClass()->cOps->call).
1996 // An object is constructor iff:
1997 // ((is<JSFunction>() && as<JSFunction>().isConstructor) ||
1998 // (getClass()->cOps && getClass()->cOps->construct)).
1999 branchTestClassIsFunction(Assembler::NotEqual, output, &notFunction);
2000 if (isCallable) {
2001 move32(Imm32(1), output);
2002 } else {
2003 static_assert(mozilla::IsPowerOfTwo(uint32_t(FunctionFlags::CONSTRUCTOR)),
2004 "FunctionFlags::CONSTRUCTOR has only one bit set");
2006 load32(Address(obj, JSFunction::offsetOfFlagsAndArgCount()), output);
2007 rshift32(Imm32(mozilla::FloorLog2(uint32_t(FunctionFlags::CONSTRUCTOR))),
2008 output);
2009 and32(Imm32(1), output);
2011 jump(&done);
2013 bind(&notFunction);
2015 if (!isCallable) {
2016 // For bound functions, we need to check the isConstructor flag.
2017 Label notBoundFunction;
2018 branchPtr(Assembler::NotEqual, output, ImmPtr(&BoundFunctionObject::class_),
2019 &notBoundFunction);
2021 static_assert(BoundFunctionObject::IsConstructorFlag == 0b1,
2022 "AND operation results in boolean value");
2023 unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()), output);
2024 and32(Imm32(BoundFunctionObject::IsConstructorFlag), output);
2025 jump(&done);
2027 bind(&notBoundFunction);
2030 // Just skim proxies off. Their notion of isCallable()/isConstructor() is
2031 // more complicated.
2032 branchTestClassIsProxy(true, output, isProxy);
2034 branchPtr(Assembler::NonZero, Address(output, offsetof(JSClass, cOps)),
2035 ImmPtr(nullptr), &hasCOps);
2036 move32(Imm32(0), output);
2037 jump(&done);
2039 bind(&hasCOps);
2040 loadPtr(Address(output, offsetof(JSClass, cOps)), output);
2041 size_t opsOffset =
2042 isCallable ? offsetof(JSClassOps, call) : offsetof(JSClassOps, construct);
2043 cmpPtrSet(Assembler::NonZero, Address(output, opsOffset), ImmPtr(nullptr),
2044 output);
2046 bind(&done);
2049 void MacroAssembler::loadJSContext(Register dest) {
2050 movePtr(ImmPtr(runtime()->mainContextPtr()), dest);
2053 static const uint8_t* ContextRealmPtr(CompileRuntime* rt) {
2054 return (static_cast<const uint8_t*>(rt->mainContextPtr()) +
2055 JSContext::offsetOfRealm());
2058 void MacroAssembler::switchToRealm(Register realm) {
2059 storePtr(realm, AbsoluteAddress(ContextRealmPtr(runtime())));
2062 void MacroAssembler::switchToRealm(const void* realm, Register scratch) {
2063 MOZ_ASSERT(realm);
2065 movePtr(ImmPtr(realm), scratch);
2066 switchToRealm(scratch);
2069 void MacroAssembler::switchToObjectRealm(Register obj, Register scratch) {
2070 loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
2071 loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
2072 loadPtr(Address(scratch, BaseShape::offsetOfRealm()), scratch);
2073 switchToRealm(scratch);
2076 void MacroAssembler::switchToBaselineFrameRealm(Register scratch) {
2077 Address envChain(FramePointer,
2078 BaselineFrame::reverseOffsetOfEnvironmentChain());
2079 loadPtr(envChain, scratch);
2080 switchToObjectRealm(scratch, scratch);
2083 void MacroAssembler::switchToWasmInstanceRealm(Register scratch1,
2084 Register scratch2) {
2085 loadPtr(Address(InstanceReg, wasm::Instance::offsetOfCx()), scratch1);
2086 loadPtr(Address(InstanceReg, wasm::Instance::offsetOfRealm()), scratch2);
2087 storePtr(scratch2, Address(scratch1, JSContext::offsetOfRealm()));
2090 void MacroAssembler::debugAssertContextRealm(const void* realm,
2091 Register scratch) {
2092 #ifdef DEBUG
2093 Label ok;
2094 movePtr(ImmPtr(realm), scratch);
2095 branchPtr(Assembler::Equal, AbsoluteAddress(ContextRealmPtr(runtime())),
2096 scratch, &ok);
2097 assumeUnreachable("Unexpected context realm");
2098 bind(&ok);
2099 #endif
2102 void MacroAssembler::setIsCrossRealmArrayConstructor(Register obj,
2103 Register output) {
2104 #ifdef DEBUG
2105 Label notProxy;
2106 branchTestObjectIsProxy(false, obj, output, &notProxy);
2107 assumeUnreachable("Unexpected proxy in setIsCrossRealmArrayConstructor");
2108 bind(&notProxy);
2109 #endif
2111 // The object's realm must not be cx->realm.
2112 Label isFalse, done;
2113 loadPtr(Address(obj, JSObject::offsetOfShape()), output);
2114 loadPtr(Address(output, Shape::offsetOfBaseShape()), output);
2115 loadPtr(Address(output, BaseShape::offsetOfRealm()), output);
2116 branchPtr(Assembler::Equal, AbsoluteAddress(ContextRealmPtr(runtime())),
2117 output, &isFalse);
2119 // The object must be a function.
2120 branchTestObjIsFunction(Assembler::NotEqual, obj, output, obj, &isFalse);
2122 // The function must be the ArrayConstructor native.
2123 branchPtr(Assembler::NotEqual,
2124 Address(obj, JSFunction::offsetOfNativeOrEnv()),
2125 ImmPtr(js::ArrayConstructor), &isFalse);
2127 move32(Imm32(1), output);
2128 jump(&done);
2130 bind(&isFalse);
2131 move32(Imm32(0), output);
2133 bind(&done);
2136 void MacroAssembler::setIsDefinitelyTypedArrayConstructor(Register obj,
2137 Register output) {
2138 Label isFalse, isTrue, done;
2140 // The object must be a function. (Wrappers are not supported.)
2141 branchTestObjIsFunction(Assembler::NotEqual, obj, output, obj, &isFalse);
2143 // Load the native into |output|.
2144 loadPtr(Address(obj, JSFunction::offsetOfNativeOrEnv()), output);
2146 auto branchIsTypedArrayCtor = [&](Scalar::Type type) {
2147 // The function must be a TypedArrayConstructor native (from any realm).
2148 JSNative constructor = TypedArrayConstructorNative(type);
2149 branchPtr(Assembler::Equal, output, ImmPtr(constructor), &isTrue);
2152 #define TYPED_ARRAY_CONSTRUCTOR_NATIVE(_, T, N) \
2153 branchIsTypedArrayCtor(Scalar::N);
2154 JS_FOR_EACH_TYPED_ARRAY(TYPED_ARRAY_CONSTRUCTOR_NATIVE)
2155 #undef TYPED_ARRAY_CONSTRUCTOR_NATIVE
2157 // Falls through to the false case.
2159 bind(&isFalse);
2160 move32(Imm32(0), output);
2161 jump(&done);
2163 bind(&isTrue);
2164 move32(Imm32(1), output);
2166 bind(&done);
2169 void MacroAssembler::loadMegamorphicCache(Register dest) {
2170 movePtr(ImmPtr(runtime()->addressOfMegamorphicCache()), dest);
2172 void MacroAssembler::loadMegamorphicSetPropCache(Register dest) {
2173 movePtr(ImmPtr(runtime()->addressOfMegamorphicSetPropCache()), dest);
2176 void MacroAssembler::loadStringToAtomCacheLastLookups(Register dest) {
2177 uintptr_t cachePtr = uintptr_t(runtime()->addressOfStringToAtomCache());
2178 void* offset = (void*)(cachePtr + StringToAtomCache::offsetOfLastLookups());
2179 movePtr(ImmPtr(offset), dest);
2182 void MacroAssembler::loadAtomHash(Register id, Register outHash, Label* done) {
2183 Label doneInner, fatInline;
2184 if (!done) {
2185 done = &doneInner;
2187 move32(Imm32(JSString::FAT_INLINE_MASK), outHash);
2188 and32(Address(id, JSString::offsetOfFlags()), outHash);
2190 branch32(Assembler::Equal, outHash, Imm32(JSString::FAT_INLINE_MASK),
2191 &fatInline);
2192 load32(Address(id, NormalAtom::offsetOfHash()), outHash);
2193 jump(done);
2194 bind(&fatInline);
2195 load32(Address(id, FatInlineAtom::offsetOfHash()), outHash);
2196 jump(done);
2197 bind(&doneInner);
2200 void MacroAssembler::loadAtomOrSymbolAndHash(ValueOperand value, Register outId,
2201 Register outHash,
2202 Label* cacheMiss) {
2203 Label isString, isSymbol, isNull, isUndefined, done, nonAtom, atom,
2204 lastLookupAtom;
2207 ScratchTagScope tag(*this, value);
2208 splitTagForTest(value, tag);
2209 branchTestString(Assembler::Equal, tag, &isString);
2210 branchTestSymbol(Assembler::Equal, tag, &isSymbol);
2211 branchTestNull(Assembler::Equal, tag, &isNull);
2212 branchTestUndefined(Assembler::NotEqual, tag, cacheMiss);
2215 const JSAtomState& names = runtime()->names();
2216 movePropertyKey(PropertyKey::NonIntAtom(names.undefined), outId);
2217 move32(Imm32(names.undefined->hash()), outHash);
2218 jump(&done);
2220 bind(&isNull);
2221 movePropertyKey(PropertyKey::NonIntAtom(names.null), outId);
2222 move32(Imm32(names.null->hash()), outHash);
2223 jump(&done);
2225 bind(&isSymbol);
2226 unboxSymbol(value, outId);
2227 load32(Address(outId, JS::Symbol::offsetOfHash()), outHash);
2228 orPtr(Imm32(PropertyKey::SymbolTypeTag), outId);
2229 jump(&done);
2231 bind(&isString);
2232 unboxString(value, outId);
2233 branchTest32(Assembler::Zero, Address(outId, JSString::offsetOfFlags()),
2234 Imm32(JSString::ATOM_BIT), &nonAtom);
2236 bind(&atom);
2237 loadAtomHash(outId, outHash, &done);
2239 bind(&nonAtom);
2240 loadStringToAtomCacheLastLookups(outHash);
2242 // Compare each entry in the StringToAtomCache's lastLookups_ array
2243 size_t stringOffset = StringToAtomCache::LastLookup::offsetOfString();
2244 branchPtr(Assembler::Equal, Address(outHash, stringOffset), outId,
2245 &lastLookupAtom);
2246 for (size_t i = 0; i < StringToAtomCache::NumLastLookups - 1; ++i) {
2247 addPtr(Imm32(sizeof(StringToAtomCache::LastLookup)), outHash);
2248 branchPtr(Assembler::Equal, Address(outHash, stringOffset), outId,
2249 &lastLookupAtom);
2252 // Couldn't find us in the cache, so fall back to the C++ call
2253 jump(cacheMiss);
2255 // We found a hit in the lastLookups_ array! Load the associated atom
2256 // and jump back up to our usual atom handling code
2257 bind(&lastLookupAtom);
2258 size_t atomOffset = StringToAtomCache::LastLookup::offsetOfAtom();
2259 loadPtr(Address(outHash, atomOffset), outId);
2260 jump(&atom);
2262 bind(&done);
2265 void MacroAssembler::emitExtractValueFromMegamorphicCacheEntry(
2266 Register obj, Register entry, Register scratch1, Register scratch2,
2267 ValueOperand output, Label* cacheHit, Label* cacheMiss) {
2268 Label isMissing, dynamicSlot, protoLoopHead, protoLoopTail;
2270 // scratch2 = entry->numHops_
2271 load8ZeroExtend(Address(entry, MegamorphicCache::Entry::offsetOfNumHops()),
2272 scratch2);
2273 // if (scratch2 == NumHopsForMissingOwnProperty) goto cacheMiss
2274 branch32(Assembler::Equal, scratch2,
2275 Imm32(MegamorphicCache::Entry::NumHopsForMissingOwnProperty),
2276 cacheMiss);
2277 // if (scratch2 == NumHopsForMissingProperty) goto isMissing
2278 branch32(Assembler::Equal, scratch2,
2279 Imm32(MegamorphicCache::Entry::NumHopsForMissingProperty),
2280 &isMissing);
2282 // NOTE: Where this is called, `output` can actually alias `obj`, and before
2283 // the last cacheMiss branch above we can't write to `obj`, so we can't
2284 // use `output`'s scratch register there. However a cache miss is impossible
2285 // now, so we're free to use `output` as we like.
2286 Register outputScratch = output.scratchReg();
2287 if (!outputScratch.aliases(obj)) {
2288 // We're okay with paying this very slight extra cost to avoid a potential
2289 // footgun of writing to what callers understand as only an input register.
2290 movePtr(obj, outputScratch);
2292 branchTest32(Assembler::Zero, scratch2, scratch2, &protoLoopTail);
2293 bind(&protoLoopHead);
2294 loadObjProto(outputScratch, outputScratch);
2295 branchSub32(Assembler::NonZero, Imm32(1), scratch2, &protoLoopHead);
2296 bind(&protoLoopTail);
2298 // scratch1 = entry->slotOffset()
2299 load32(Address(entry, MegamorphicCacheEntry::offsetOfSlotOffset()), scratch1);
2301 // scratch2 = slotOffset.offset()
2302 move32(scratch1, scratch2);
2303 rshift32(Imm32(TaggedSlotOffset::OffsetShift), scratch2);
2305 // if (!slotOffset.isFixedSlot()) goto dynamicSlot
2306 branchTest32(Assembler::Zero, scratch1,
2307 Imm32(TaggedSlotOffset::IsFixedSlotFlag), &dynamicSlot);
2308 // output = outputScratch[scratch2]
2309 loadValue(BaseIndex(outputScratch, scratch2, TimesOne), output);
2310 jump(cacheHit);
2312 bind(&dynamicSlot);
2313 // output = outputScratch->slots_[scratch2]
2314 loadPtr(Address(outputScratch, NativeObject::offsetOfSlots()), outputScratch);
2315 loadValue(BaseIndex(outputScratch, scratch2, TimesOne), output);
2316 jump(cacheHit);
2318 bind(&isMissing);
2319 // output = undefined
2320 moveValue(UndefinedValue(), output);
2321 jump(cacheHit);
2324 template <typename IdOperandType>
2325 void MacroAssembler::emitMegamorphicCacheLookupByValueCommon(
2326 IdOperandType id, Register obj, Register scratch1, Register scratch2,
2327 Register outEntryPtr, Label* cacheMiss, Label* cacheMissWithEntry) {
2328 // A lot of this code is shared with emitMegamorphicCacheLookup. It would
2329 // be nice to be able to avoid the duplication here, but due to a few
2330 // differences like taking the id in a ValueOperand instead of being able
2331 // to bake it in as an immediate, and only needing a Register for the output
2332 // value, it seemed more awkward to read once it was deduplicated.
2334 // outEntryPtr = obj->shape()
2335 loadPtr(Address(obj, JSObject::offsetOfShape()), outEntryPtr);
2337 movePtr(outEntryPtr, scratch2);
2339 // outEntryPtr = (outEntryPtr >> 3) ^ (outEntryPtr >> 13) + idHash
2340 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift1), outEntryPtr);
2341 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift2), scratch2);
2342 xorPtr(scratch2, outEntryPtr);
2344 if constexpr (std::is_same<IdOperandType, ValueOperand>::value) {
2345 loadAtomOrSymbolAndHash(id, scratch1, scratch2, cacheMiss);
2346 } else {
2347 static_assert(std::is_same<IdOperandType, Register>::value);
2348 movePtr(id, scratch1);
2349 loadAtomHash(scratch1, scratch2, nullptr);
2351 addPtr(scratch2, outEntryPtr);
2353 // outEntryPtr %= MegamorphicCache::NumEntries
2354 constexpr size_t cacheSize = MegamorphicCache::NumEntries;
2355 static_assert(mozilla::IsPowerOfTwo(cacheSize));
2356 size_t cacheMask = cacheSize - 1;
2357 and32(Imm32(cacheMask), outEntryPtr);
2359 loadMegamorphicCache(scratch2);
2360 // outEntryPtr = &scratch2->entries_[outEntryPtr]
2361 constexpr size_t entrySize = sizeof(MegamorphicCache::Entry);
2362 static_assert(sizeof(void*) == 4 || entrySize == 24);
2363 if constexpr (sizeof(void*) == 4) {
2364 mul32(Imm32(entrySize), outEntryPtr);
2365 computeEffectiveAddress(BaseIndex(scratch2, outEntryPtr, TimesOne,
2366 MegamorphicCache::offsetOfEntries()),
2367 outEntryPtr);
2368 } else {
2369 computeEffectiveAddress(BaseIndex(outEntryPtr, outEntryPtr, TimesTwo),
2370 outEntryPtr);
2371 computeEffectiveAddress(BaseIndex(scratch2, outEntryPtr, TimesEight,
2372 MegamorphicCache::offsetOfEntries()),
2373 outEntryPtr);
2376 // if (outEntryPtr->key_ != scratch1) goto cacheMissWithEntry
2377 branchPtr(Assembler::NotEqual,
2378 Address(outEntryPtr, MegamorphicCache::Entry::offsetOfKey()),
2379 scratch1, cacheMissWithEntry);
2380 loadPtr(Address(obj, JSObject::offsetOfShape()), scratch1);
2382 // if (outEntryPtr->shape_ != scratch1) goto cacheMissWithEntry
2383 branchPtr(Assembler::NotEqual,
2384 Address(outEntryPtr, MegamorphicCache::Entry::offsetOfShape()),
2385 scratch1, cacheMissWithEntry);
2387 // scratch2 = scratch2->generation_
2388 load16ZeroExtend(Address(scratch2, MegamorphicCache::offsetOfGeneration()),
2389 scratch2);
2390 load16ZeroExtend(
2391 Address(outEntryPtr, MegamorphicCache::Entry::offsetOfGeneration()),
2392 scratch1);
2393 // if (outEntryPtr->generation_ != scratch2) goto cacheMissWithEntry
2394 branch32(Assembler::NotEqual, scratch1, scratch2, cacheMissWithEntry);
2397 void MacroAssembler::emitMegamorphicCacheLookup(
2398 PropertyKey id, Register obj, Register scratch1, Register scratch2,
2399 Register outEntryPtr, ValueOperand output, Label* cacheHit) {
2400 Label cacheMiss, isMissing, dynamicSlot, protoLoopHead, protoLoopTail;
2402 // scratch1 = obj->shape()
2403 loadPtr(Address(obj, JSObject::offsetOfShape()), scratch1);
2405 movePtr(scratch1, outEntryPtr);
2406 movePtr(scratch1, scratch2);
2408 // outEntryPtr = (scratch1 >> 3) ^ (scratch1 >> 13) + hash(id)
2409 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift1), outEntryPtr);
2410 rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift2), scratch2);
2411 xorPtr(scratch2, outEntryPtr);
2412 addPtr(Imm32(HashAtomOrSymbolPropertyKey(id)), outEntryPtr);
2414 // outEntryPtr %= MegamorphicCache::NumEntries
2415 constexpr size_t cacheSize = MegamorphicCache::NumEntries;
2416 static_assert(mozilla::IsPowerOfTwo(cacheSize));
2417 size_t cacheMask = cacheSize - 1;
2418 and32(Imm32(cacheMask), outEntryPtr);
2420 loadMegamorphicCache(scratch2);
2421 // outEntryPtr = &scratch2->entries_[outEntryPtr]
2422 constexpr size_t entrySize = sizeof(MegamorphicCache::Entry);
2423 static_assert(sizeof(void*) == 4 || entrySize == 24);
2424 if constexpr (sizeof(void*) == 4) {
2425 mul32(Imm32(entrySize), outEntryPtr);
2426 computeEffectiveAddress(BaseIndex(scratch2, outEntryPtr, TimesOne,
2427 MegamorphicCache::offsetOfEntries()),
2428 outEntryPtr);
2429 } else {
2430 computeEffectiveAddress(BaseIndex(outEntryPtr, outEntryPtr, TimesTwo),
2431 outEntryPtr);
2432 computeEffectiveAddress(BaseIndex(scratch2, outEntryPtr, TimesEight,
2433 MegamorphicCache::offsetOfEntries()),
2434 outEntryPtr);
2437 // if (outEntryPtr->shape_ != scratch1) goto cacheMiss
2438 branchPtr(Assembler::NotEqual,
2439 Address(outEntryPtr, MegamorphicCache::Entry::offsetOfShape()),
2440 scratch1, &cacheMiss);
2442 // if (outEntryPtr->key_ != id) goto cacheMiss
2443 movePropertyKey(id, scratch1);
2444 branchPtr(Assembler::NotEqual,
2445 Address(outEntryPtr, MegamorphicCache::Entry::offsetOfKey()),
2446 scratch1, &cacheMiss);
2448 // scratch2 = scratch2->generation_
2449 load16ZeroExtend(Address(scratch2, MegamorphicCache::offsetOfGeneration()),
2450 scratch2);
2451 load16ZeroExtend(
2452 Address(outEntryPtr, MegamorphicCache::Entry::offsetOfGeneration()),
2453 scratch1);
2454 // if (outEntryPtr->generation_ != scratch2) goto cacheMiss
2455 branch32(Assembler::NotEqual, scratch1, scratch2, &cacheMiss);
2457 emitExtractValueFromMegamorphicCacheEntry(
2458 obj, outEntryPtr, scratch1, scratch2, output, cacheHit, &cacheMiss);
2460 bind(&cacheMiss);
2463 template <typename IdOperandType>
2464 void MacroAssembler::emitMegamorphicCacheLookupByValue(
2465 IdOperandType id, Register obj, Register scratch1, Register scratch2,
2466 Register outEntryPtr, ValueOperand output, Label* cacheHit) {
2467 Label cacheMiss, cacheMissWithEntry;
2468 emitMegamorphicCacheLookupByValueCommon(id, obj, scratch1, scratch2,
2469 outEntryPtr, &cacheMiss,
2470 &cacheMissWithEntry);
2471 emitExtractValueFromMegamorphicCacheEntry(obj, outEntryPtr, scratch1,
2472 scratch2, output, cacheHit,
2473 &cacheMissWithEntry);
2474 bind(&cacheMiss);
2475 xorPtr(outEntryPtr, outEntryPtr);
2476 bind(&cacheMissWithEntry);
2479 template void MacroAssembler::emitMegamorphicCacheLookupByValue<ValueOperand>(
2480 ValueOperand id, Register obj, Register scratch1, Register scratch2,
2481 Register outEntryPtr, ValueOperand output, Label* cacheHit);
2483 template void MacroAssembler::emitMegamorphicCacheLookupByValue<Register>(
2484 Register id, Register obj, Register scratch1, Register scratch2,
2485 Register outEntryPtr, ValueOperand output, Label* cacheHit);
2487 void MacroAssembler::emitMegamorphicCacheLookupExists(
2488 ValueOperand id, Register obj, Register scratch1, Register scratch2,
2489 Register outEntryPtr, Register output, Label* cacheHit, bool hasOwn) {
2490 Label cacheMiss, cacheMissWithEntry, cacheHitFalse;
2491 emitMegamorphicCacheLookupByValueCommon(id, obj, scratch1, scratch2,
2492 outEntryPtr, &cacheMiss,
2493 &cacheMissWithEntry);
2495 // scratch1 = outEntryPtr->numHops_
2496 load8ZeroExtend(
2497 Address(outEntryPtr, MegamorphicCache::Entry::offsetOfNumHops()),
2498 scratch1);
2500 branch32(Assembler::Equal, scratch1,
2501 Imm32(MegamorphicCache::Entry::NumHopsForMissingProperty),
2502 &cacheHitFalse);
2504 if (hasOwn) {
2505 branch32(Assembler::NotEqual, scratch1, Imm32(0), &cacheHitFalse);
2506 } else {
2507 branch32(Assembler::Equal, scratch1,
2508 Imm32(MegamorphicCache::Entry::NumHopsForMissingOwnProperty),
2509 &cacheMissWithEntry);
2512 move32(Imm32(1), output);
2513 jump(cacheHit);
2515 bind(&cacheHitFalse);
2516 xor32(output, output);
2517 jump(cacheHit);
2519 bind(&cacheMiss);
2520 xorPtr(outEntryPtr, outEntryPtr);
2521 bind(&cacheMissWithEntry);
2524 void MacroAssembler::extractCurrentIndexAndKindFromIterator(Register iterator,
2525 Register outIndex,
2526 Register outKind) {
2527 // Load iterator object
2528 Address nativeIterAddr(iterator,
2529 PropertyIteratorObject::offsetOfIteratorSlot());
2530 loadPrivate(nativeIterAddr, outIndex);
2532 // Compute offset of propertyCursor_ from propertiesBegin()
2533 loadPtr(Address(outIndex, NativeIterator::offsetOfPropertyCursor()), outKind);
2534 subPtr(Address(outIndex, NativeIterator::offsetOfShapesEnd()), outKind);
2536 // Compute offset of current index from indicesBegin(). Note that because
2537 // propertyCursor has already been incremented, this is actually the offset
2538 // of the next index. We adjust accordingly below.
2539 size_t indexAdjustment =
2540 sizeof(GCPtr<JSLinearString*>) / sizeof(PropertyIndex);
2541 if (indexAdjustment != 1) {
2542 MOZ_ASSERT(indexAdjustment == 2);
2543 rshift32(Imm32(1), outKind);
2546 // Load current index.
2547 loadPtr(Address(outIndex, NativeIterator::offsetOfPropertiesEnd()), outIndex);
2548 load32(BaseIndex(outIndex, outKind, Scale::TimesOne,
2549 -int32_t(sizeof(PropertyIndex))),
2550 outIndex);
2552 // Extract kind.
2553 move32(outIndex, outKind);
2554 rshift32(Imm32(PropertyIndex::KindShift), outKind);
2556 // Extract index.
2557 and32(Imm32(PropertyIndex::IndexMask), outIndex);
2560 template <typename IdType>
2561 void MacroAssembler::emitMegamorphicCachedSetSlot(
2562 IdType id, Register obj, Register scratch1,
2563 #ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
2564 Register scratch2, Register scratch3,
2565 #endif
2566 ValueOperand value, Label* cacheHit,
2567 void (*emitPreBarrier)(MacroAssembler&, const Address&, MIRType)) {
2568 Label cacheMiss, dynamicSlot, doAdd, doSet, doAddDynamic, doSetDynamic;
2570 #ifdef JS_CODEGEN_X86
2571 pushValue(value);
2572 Register scratch2 = value.typeReg();
2573 Register scratch3 = value.payloadReg();
2574 #endif
2576 // outEntryPtr = obj->shape()
2577 loadPtr(Address(obj, JSObject::offsetOfShape()), scratch3);
2579 movePtr(scratch3, scratch2);
2581 // scratch3 = (scratch3 >> 3) ^ (scratch3 >> 13) + idHash
2582 rshiftPtr(Imm32(MegamorphicSetPropCache::ShapeHashShift1), scratch3);
2583 rshiftPtr(Imm32(MegamorphicSetPropCache::ShapeHashShift2), scratch2);
2584 xorPtr(scratch2, scratch3);
2586 if constexpr (std::is_same<IdType, ValueOperand>::value) {
2587 loadAtomOrSymbolAndHash(id, scratch1, scratch2, &cacheMiss);
2588 addPtr(scratch2, scratch3);
2589 } else {
2590 static_assert(std::is_same<IdType, PropertyKey>::value);
2591 addPtr(Imm32(HashAtomOrSymbolPropertyKey(id)), scratch3);
2592 movePropertyKey(id, scratch1);
2595 // scratch3 %= MegamorphicSetPropCache::NumEntries
2596 constexpr size_t cacheSize = MegamorphicSetPropCache::NumEntries;
2597 static_assert(mozilla::IsPowerOfTwo(cacheSize));
2598 size_t cacheMask = cacheSize - 1;
2599 and32(Imm32(cacheMask), scratch3);
2601 loadMegamorphicSetPropCache(scratch2);
2602 // scratch3 = &scratch2->entries_[scratch3]
2603 constexpr size_t entrySize = sizeof(MegamorphicSetPropCache::Entry);
2604 mul32(Imm32(entrySize), scratch3);
2605 computeEffectiveAddress(BaseIndex(scratch2, scratch3, TimesOne,
2606 MegamorphicSetPropCache::offsetOfEntries()),
2607 scratch3);
2609 // if (scratch3->key_ != scratch1) goto cacheMiss
2610 branchPtr(Assembler::NotEqual,
2611 Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfKey()),
2612 scratch1, &cacheMiss);
2614 loadPtr(Address(obj, JSObject::offsetOfShape()), scratch1);
2615 // if (scratch3->shape_ != scratch1) goto cacheMiss
2616 branchPtr(Assembler::NotEqual,
2617 Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfShape()),
2618 scratch1, &cacheMiss);
2620 // scratch2 = scratch2->generation_
2621 load16ZeroExtend(
2622 Address(scratch2, MegamorphicSetPropCache::offsetOfGeneration()),
2623 scratch2);
2624 load16ZeroExtend(
2625 Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfGeneration()),
2626 scratch1);
2627 // if (scratch3->generation_ != scratch2) goto cacheMiss
2628 branch32(Assembler::NotEqual, scratch1, scratch2, &cacheMiss);
2630 // scratch2 = entry->slotOffset()
2631 load32(
2632 Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfSlotOffset()),
2633 scratch2);
2635 // scratch1 = slotOffset.offset()
2636 move32(scratch2, scratch1);
2637 rshift32(Imm32(TaggedSlotOffset::OffsetShift), scratch1);
2639 Address afterShapePtr(scratch3,
2640 MegamorphicSetPropCache::Entry::offsetOfAfterShape());
2642 // if (!slotOffset.isFixedSlot()) goto dynamicSlot
2643 branchTest32(Assembler::Zero, scratch2,
2644 Imm32(TaggedSlotOffset::IsFixedSlotFlag), &dynamicSlot);
2646 // Calculate slot address in scratch1. Jump to doSet if scratch3 == nullptr,
2647 // else jump (or fall-through) to doAdd.
2648 addPtr(obj, scratch1);
2649 branchPtr(Assembler::Equal, afterShapePtr, ImmPtr(nullptr), &doSet);
2650 jump(&doAdd);
2652 bind(&dynamicSlot);
2653 branchPtr(Assembler::Equal, afterShapePtr, ImmPtr(nullptr), &doSetDynamic);
2655 Address slotAddr(scratch1, 0);
2657 // If entry->newCapacity_ is nonzero, we need to grow the slots on the
2658 // object. Otherwise just jump straight to a dynamic add.
2659 load16ZeroExtend(
2660 Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfNewCapacity()),
2661 scratch2);
2662 branchTest32(Assembler::Zero, scratch2, scratch2, &doAddDynamic);
2664 AllocatableRegisterSet regs(RegisterSet::Volatile());
2665 LiveRegisterSet save(regs.asLiveSet());
2667 PushRegsInMask(save);
2669 regs.takeUnchecked(scratch2);
2670 Register tmp;
2671 if (regs.has(obj)) {
2672 regs.takeUnchecked(obj);
2673 tmp = regs.takeAnyGeneral();
2674 regs.addUnchecked(obj);
2675 } else {
2676 tmp = regs.takeAnyGeneral();
2679 using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
2680 setupUnalignedABICall(tmp);
2681 loadJSContext(tmp);
2682 passABIArg(tmp);
2683 passABIArg(obj);
2684 passABIArg(scratch2);
2685 callWithABI<Fn, NativeObject::growSlotsPure>();
2686 storeCallPointerResult(scratch2);
2687 PopRegsInMask(save);
2689 branchIfFalseBool(scratch2, &cacheMiss);
2691 bind(&doAddDynamic);
2692 addPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2694 bind(&doAdd);
2695 // scratch3 = entry->afterShape()
2696 loadPtr(
2697 Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfAfterShape()),
2698 scratch3);
2700 storeObjShape(scratch3, obj,
2701 [emitPreBarrier](MacroAssembler& masm, const Address& addr) {
2702 emitPreBarrier(masm, addr, MIRType::Shape);
2704 #ifdef JS_CODEGEN_X86
2705 popValue(value);
2706 #endif
2707 storeValue(value, slotAddr);
2708 jump(cacheHit);
2710 bind(&doSetDynamic);
2711 addPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2712 bind(&doSet);
2713 guardedCallPreBarrier(slotAddr, MIRType::Value);
2715 #ifdef JS_CODEGEN_X86
2716 popValue(value);
2717 #endif
2718 storeValue(value, slotAddr);
2719 jump(cacheHit);
2721 bind(&cacheMiss);
2722 #ifdef JS_CODEGEN_X86
2723 popValue(value);
2724 #endif
2727 template void MacroAssembler::emitMegamorphicCachedSetSlot<PropertyKey>(
2728 PropertyKey id, Register obj, Register scratch1,
2729 #ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
2730 Register scratch2, Register scratch3,
2731 #endif
2732 ValueOperand value, Label* cacheHit,
2733 void (*emitPreBarrier)(MacroAssembler&, const Address&, MIRType));
2735 template void MacroAssembler::emitMegamorphicCachedSetSlot<ValueOperand>(
2736 ValueOperand id, Register obj, Register scratch1,
2737 #ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
2738 Register scratch2, Register scratch3,
2739 #endif
2740 ValueOperand value, Label* cacheHit,
2741 void (*emitPreBarrier)(MacroAssembler&, const Address&, MIRType));
2743 void MacroAssembler::guardNonNegativeIntPtrToInt32(Register reg, Label* fail) {
2744 #ifdef DEBUG
2745 Label ok;
2746 branchPtr(Assembler::NotSigned, reg, reg, &ok);
2747 assumeUnreachable("Unexpected negative value");
2748 bind(&ok);
2749 #endif
2751 #ifdef JS_64BIT
2752 branchPtr(Assembler::Above, reg, Imm32(INT32_MAX), fail);
2753 #endif
2756 void MacroAssembler::loadArrayBufferByteLengthIntPtr(Register obj,
2757 Register output) {
2758 Address slotAddr(obj, ArrayBufferObject::offsetOfByteLengthSlot());
2759 loadPrivate(slotAddr, output);
2762 void MacroAssembler::loadArrayBufferViewByteOffsetIntPtr(Register obj,
2763 Register output) {
2764 Address slotAddr(obj, ArrayBufferViewObject::byteOffsetOffset());
2765 loadPrivate(slotAddr, output);
2768 void MacroAssembler::loadArrayBufferViewLengthIntPtr(Register obj,
2769 Register output) {
2770 Address slotAddr(obj, ArrayBufferViewObject::lengthOffset());
2771 loadPrivate(slotAddr, output);
2774 void MacroAssembler::loadDOMExpandoValueGuardGeneration(
2775 Register obj, ValueOperand output,
2776 JS::ExpandoAndGeneration* expandoAndGeneration, uint64_t generation,
2777 Label* fail) {
2778 loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
2779 output.scratchReg());
2780 loadValue(Address(output.scratchReg(),
2781 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
2782 output);
2784 // Guard the ExpandoAndGeneration* matches the proxy's ExpandoAndGeneration
2785 // privateSlot.
2786 branchTestValue(Assembler::NotEqual, output,
2787 PrivateValue(expandoAndGeneration), fail);
2789 // Guard expandoAndGeneration->generation matches the expected generation.
2790 Address generationAddr(output.payloadOrValueReg(),
2791 JS::ExpandoAndGeneration::offsetOfGeneration());
2792 branch64(Assembler::NotEqual, generationAddr, Imm64(generation), fail);
2794 // Load expandoAndGeneration->expando into the output Value register.
2795 loadValue(Address(output.payloadOrValueReg(),
2796 JS::ExpandoAndGeneration::offsetOfExpando()),
2797 output);
2800 void MacroAssembler::loadJitActivation(Register dest) {
2801 loadJSContext(dest);
2802 loadPtr(Address(dest, offsetof(JSContext, activation_)), dest);
2805 void MacroAssembler::guardSpecificAtom(Register str, JSAtom* atom,
2806 Register scratch,
2807 const LiveRegisterSet& volatileRegs,
2808 Label* fail) {
2809 Label done;
2810 branchPtr(Assembler::Equal, str, ImmGCPtr(atom), &done);
2812 // The pointers are not equal, so if the input string is also an atom it
2813 // must be a different string.
2814 branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
2815 Imm32(JSString::ATOM_BIT), fail);
2817 // Check the length.
2818 branch32(Assembler::NotEqual, Address(str, JSString::offsetOfLength()),
2819 Imm32(atom->length()), fail);
2821 // We have a non-atomized string with the same length. Call a helper
2822 // function to do the comparison.
2823 PushRegsInMask(volatileRegs);
2825 using Fn = bool (*)(JSString* str1, JSString* str2);
2826 setupUnalignedABICall(scratch);
2827 movePtr(ImmGCPtr(atom), scratch);
2828 passABIArg(scratch);
2829 passABIArg(str);
2830 callWithABI<Fn, EqualStringsHelperPure>();
2831 storeCallPointerResult(scratch);
2833 MOZ_ASSERT(!volatileRegs.has(scratch));
2834 PopRegsInMask(volatileRegs);
2835 branchIfFalseBool(scratch, fail);
2837 bind(&done);
2840 void MacroAssembler::guardStringToInt32(Register str, Register output,
2841 Register scratch,
2842 LiveRegisterSet volatileRegs,
2843 Label* fail) {
2844 Label vmCall, done;
2845 // Use indexed value as fast path if possible.
2846 loadStringIndexValue(str, output, &vmCall);
2847 jump(&done);
2849 bind(&vmCall);
2851 // Reserve space for holding the result int32_t of the call. Use
2852 // pointer-size to avoid misaligning the stack on 64-bit platforms.
2853 reserveStack(sizeof(uintptr_t));
2854 moveStackPtrTo(output);
2856 volatileRegs.takeUnchecked(scratch);
2857 if (output.volatile_()) {
2858 volatileRegs.addUnchecked(output);
2860 PushRegsInMask(volatileRegs);
2862 using Fn = bool (*)(JSContext* cx, JSString* str, int32_t* result);
2863 setupUnalignedABICall(scratch);
2864 loadJSContext(scratch);
2865 passABIArg(scratch);
2866 passABIArg(str);
2867 passABIArg(output);
2868 callWithABI<Fn, GetInt32FromStringPure>();
2869 storeCallPointerResult(scratch);
2871 PopRegsInMask(volatileRegs);
2873 Label ok;
2874 branchIfTrueBool(scratch, &ok);
2876 // OOM path, recovered by GetInt32FromStringPure.
2878 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
2879 // flow-insensitively, and using it twice would confuse the stack height
2880 // tracking.
2881 addToStackPtr(Imm32(sizeof(uintptr_t)));
2882 jump(fail);
2884 bind(&ok);
2885 load32(Address(output, 0), output);
2886 freeStack(sizeof(uintptr_t));
2888 bind(&done);
2891 void MacroAssembler::generateBailoutTail(Register scratch,
2892 Register bailoutInfo) {
2893 Label bailoutFailed;
2894 branchIfFalseBool(ReturnReg, &bailoutFailed);
2896 // Finish bailing out to Baseline.
2898 // Prepare a register set for use in this case.
2899 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2900 MOZ_ASSERT_IF(!IsHiddenSP(getStackPointer()),
2901 !regs.has(AsRegister(getStackPointer())));
2902 regs.take(bailoutInfo);
2904 Register temp = regs.takeAny();
2906 #ifdef DEBUG
2907 // Assert the stack pointer points to the JitFrameLayout header. Copying
2908 // starts here.
2909 Label ok;
2910 loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, incomingStack)),
2911 temp);
2912 branchStackPtr(Assembler::Equal, temp, &ok);
2913 assumeUnreachable("Unexpected stack pointer value");
2914 bind(&ok);
2915 #endif
2917 Register copyCur = regs.takeAny();
2918 Register copyEnd = regs.takeAny();
2920 // Copy data onto stack.
2921 loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackTop)),
2922 copyCur);
2923 loadPtr(
2924 Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackBottom)),
2925 copyEnd);
2927 Label copyLoop;
2928 Label endOfCopy;
2929 bind(&copyLoop);
2930 branchPtr(Assembler::BelowOrEqual, copyCur, copyEnd, &endOfCopy);
2931 subPtr(Imm32(sizeof(uintptr_t)), copyCur);
2932 subFromStackPtr(Imm32(sizeof(uintptr_t)));
2933 loadPtr(Address(copyCur, 0), temp);
2934 storePtr(temp, Address(getStackPointer(), 0));
2935 jump(&copyLoop);
2936 bind(&endOfCopy);
2939 loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)),
2940 FramePointer);
2942 // Enter exit frame for the FinishBailoutToBaseline call.
2943 pushFrameDescriptor(FrameType::BaselineJS);
2944 push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
2945 push(FramePointer);
2946 // No GC things to mark on the stack, push a bare token.
2947 loadJSContext(scratch);
2948 enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
2950 // Save needed values onto stack temporarily.
2951 push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
2953 // Call a stub to free allocated memory and create arguments objects.
2954 using Fn = bool (*)(BaselineBailoutInfo* bailoutInfoArg);
2955 setupUnalignedABICall(temp);
2956 passABIArg(bailoutInfo);
2957 callWithABI<Fn, FinishBailoutToBaseline>(
2958 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
2959 branchIfFalseBool(ReturnReg, exceptionLabel());
2961 // Restore values where they need to be and resume execution.
2962 AllocatableGeneralRegisterSet enterRegs(GeneralRegisterSet::All());
2963 MOZ_ASSERT(!enterRegs.has(FramePointer));
2964 Register jitcodeReg = enterRegs.takeAny();
2966 pop(jitcodeReg);
2968 // Discard exit frame.
2969 addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
2971 jump(jitcodeReg);
2974 bind(&bailoutFailed);
2976 // jit::Bailout or jit::InvalidationBailout failed and returned false. The
2977 // Ion frame has already been discarded and the stack pointer points to the
2978 // JitFrameLayout header. Turn it into an ExitFrameLayout, similar to
2979 // EnsureUnwoundJitExitFrame, and call the exception handler.
2980 loadJSContext(scratch);
2981 enterFakeExitFrame(scratch, scratch, ExitFrameType::UnwoundJit);
2982 jump(exceptionLabel());
2986 void MacroAssembler::loadJitCodeRaw(Register func, Register dest) {
2987 static_assert(BaseScript::offsetOfJitCodeRaw() ==
2988 SelfHostedLazyScript::offsetOfJitCodeRaw(),
2989 "SelfHostedLazyScript and BaseScript must use same layout for "
2990 "jitCodeRaw_");
2991 static_assert(
2992 BaseScript::offsetOfJitCodeRaw() == wasm::JumpTableJitEntryOffset,
2993 "Wasm exported functions jit entries must use same layout for "
2994 "jitCodeRaw_");
2995 loadPrivate(Address(func, JSFunction::offsetOfJitInfoOrScript()), dest);
2996 loadPtr(Address(dest, BaseScript::offsetOfJitCodeRaw()), dest);
2999 void MacroAssembler::loadBaselineJitCodeRaw(Register func, Register dest,
3000 Label* failure) {
3001 // Load JitScript
3002 loadPrivate(Address(func, JSFunction::offsetOfJitInfoOrScript()), dest);
3003 if (failure) {
3004 branchIfScriptHasNoJitScript(dest, failure);
3006 loadJitScript(dest, dest);
3008 // Load BaselineScript
3009 loadPtr(Address(dest, JitScript::offsetOfBaselineScript()), dest);
3010 if (failure) {
3011 static_assert(BaselineDisabledScript == 0x1);
3012 branchPtr(Assembler::BelowOrEqual, dest, ImmWord(BaselineDisabledScript),
3013 failure);
3016 // Load Baseline jitcode
3017 loadPtr(Address(dest, BaselineScript::offsetOfMethod()), dest);
3018 loadPtr(Address(dest, JitCode::offsetOfCode()), dest);
3021 void MacroAssembler::loadBaselineFramePtr(Register framePtr, Register dest) {
3022 if (framePtr != dest) {
3023 movePtr(framePtr, dest);
3025 subPtr(Imm32(BaselineFrame::Size()), dest);
3028 static const uint8_t* ContextInlinedICScriptPtr(CompileRuntime* rt) {
3029 return (static_cast<const uint8_t*>(rt->mainContextPtr()) +
3030 JSContext::offsetOfInlinedICScript());
3033 void MacroAssembler::storeICScriptInJSContext(Register icScript) {
3034 storePtr(icScript, AbsoluteAddress(ContextInlinedICScriptPtr(runtime())));
3037 void MacroAssembler::handleFailure() {
3038 // Re-entry code is irrelevant because the exception will leave the
3039 // running function and never come back
3040 TrampolinePtr excTail = runtime()->jitRuntime()->getExceptionTail();
3041 jump(excTail);
3044 void MacroAssembler::assumeUnreachable(const char* output) {
3045 #ifdef JS_MASM_VERBOSE
3046 if (!IsCompilingWasm()) {
3047 AllocatableRegisterSet regs(RegisterSet::Volatile());
3048 LiveRegisterSet save(regs.asLiveSet());
3049 PushRegsInMask(save);
3050 Register temp = regs.takeAnyGeneral();
3052 using Fn = void (*)(const char* output);
3053 setupUnalignedABICall(temp);
3054 movePtr(ImmPtr(output), temp);
3055 passABIArg(temp);
3056 callWithABI<Fn, AssumeUnreachable>(MoveOp::GENERAL,
3057 CheckUnsafeCallWithABI::DontCheckOther);
3059 PopRegsInMask(save);
3061 #endif
3063 breakpoint();
3066 void MacroAssembler::printf(const char* output) {
3067 #ifdef JS_MASM_VERBOSE
3068 AllocatableRegisterSet regs(RegisterSet::Volatile());
3069 LiveRegisterSet save(regs.asLiveSet());
3070 PushRegsInMask(save);
3072 Register temp = regs.takeAnyGeneral();
3074 using Fn = void (*)(const char* output);
3075 setupUnalignedABICall(temp);
3076 movePtr(ImmPtr(output), temp);
3077 passABIArg(temp);
3078 callWithABI<Fn, Printf0>();
3080 PopRegsInMask(save);
3081 #endif
3084 void MacroAssembler::printf(const char* output, Register value) {
3085 #ifdef JS_MASM_VERBOSE
3086 AllocatableRegisterSet regs(RegisterSet::Volatile());
3087 LiveRegisterSet save(regs.asLiveSet());
3088 PushRegsInMask(save);
3090 regs.takeUnchecked(value);
3092 Register temp = regs.takeAnyGeneral();
3094 using Fn = void (*)(const char* output, uintptr_t value);
3095 setupUnalignedABICall(temp);
3096 movePtr(ImmPtr(output), temp);
3097 passABIArg(temp);
3098 passABIArg(value);
3099 callWithABI<Fn, Printf1>();
3101 PopRegsInMask(save);
3102 #endif
3105 void MacroAssembler::convertInt32ValueToDouble(ValueOperand val) {
3106 Label done;
3107 branchTestInt32(Assembler::NotEqual, val, &done);
3108 unboxInt32(val, val.scratchReg());
3109 ScratchDoubleScope fpscratch(*this);
3110 convertInt32ToDouble(val.scratchReg(), fpscratch);
3111 boxDouble(fpscratch, val, fpscratch);
3112 bind(&done);
3115 void MacroAssembler::convertValueToFloatingPoint(ValueOperand value,
3116 FloatRegister output,
3117 Label* fail,
3118 MIRType outputType) {
3119 Label isDouble, isInt32, isBool, isNull, done;
3122 ScratchTagScope tag(*this, value);
3123 splitTagForTest(value, tag);
3125 branchTestDouble(Assembler::Equal, tag, &isDouble);
3126 branchTestInt32(Assembler::Equal, tag, &isInt32);
3127 branchTestBoolean(Assembler::Equal, tag, &isBool);
3128 branchTestNull(Assembler::Equal, tag, &isNull);
3129 branchTestUndefined(Assembler::NotEqual, tag, fail);
3132 // fall-through: undefined
3133 loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output,
3134 outputType);
3135 jump(&done);
3137 bind(&isNull);
3138 loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
3139 jump(&done);
3141 bind(&isBool);
3142 boolValueToFloatingPoint(value, output, outputType);
3143 jump(&done);
3145 bind(&isInt32);
3146 int32ValueToFloatingPoint(value, output, outputType);
3147 jump(&done);
3149 // On some non-multiAlias platforms, unboxDouble may use the scratch register,
3150 // so do not merge code paths here.
3151 bind(&isDouble);
3152 if (outputType == MIRType::Float32 && hasMultiAlias()) {
3153 ScratchDoubleScope tmp(*this);
3154 unboxDouble(value, tmp);
3155 convertDoubleToFloat32(tmp, output);
3156 } else {
3157 FloatRegister tmp = output.asDouble();
3158 unboxDouble(value, tmp);
3159 if (outputType == MIRType::Float32) {
3160 convertDoubleToFloat32(tmp, output);
3164 bind(&done);
3167 void MacroAssembler::outOfLineTruncateSlow(FloatRegister src, Register dest,
3168 bool widenFloatToDouble,
3169 bool compilingWasm,
3170 wasm::BytecodeOffset callOffset) {
3171 if (compilingWasm) {
3172 Push(InstanceReg);
3174 int32_t framePushedAfterInstance = framePushed();
3176 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
3177 defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
3178 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
3179 ScratchDoubleScope fpscratch(*this);
3180 if (widenFloatToDouble) {
3181 convertFloat32ToDouble(src, fpscratch);
3182 src = fpscratch;
3184 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
3185 FloatRegister srcSingle;
3186 if (widenFloatToDouble) {
3187 MOZ_ASSERT(src.isSingle());
3188 srcSingle = src;
3189 src = src.asDouble();
3190 Push(srcSingle);
3191 convertFloat32ToDouble(srcSingle, src);
3193 #else
3194 // Also see below
3195 MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
3196 #endif
3198 MOZ_ASSERT(src.isDouble());
3200 if (compilingWasm) {
3201 int32_t instanceOffset = framePushed() - framePushedAfterInstance;
3202 setupWasmABICall();
3203 passABIArg(src, MoveOp::DOUBLE);
3204 callWithABI(callOffset, wasm::SymbolicAddress::ToInt32,
3205 mozilla::Some(instanceOffset));
3206 } else {
3207 using Fn = int32_t (*)(double);
3208 setupUnalignedABICall(dest);
3209 passABIArg(src, MoveOp::DOUBLE);
3210 callWithABI<Fn, JS::ToInt32>(MoveOp::GENERAL,
3211 CheckUnsafeCallWithABI::DontCheckOther);
3213 storeCallInt32Result(dest);
3215 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
3216 defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
3217 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
3218 // Nothing
3219 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
3220 if (widenFloatToDouble) {
3221 Pop(srcSingle);
3223 #else
3224 MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
3225 #endif
3227 if (compilingWasm) {
3228 Pop(InstanceReg);
3232 void MacroAssembler::convertDoubleToInt(FloatRegister src, Register output,
3233 FloatRegister temp, Label* truncateFail,
3234 Label* fail,
3235 IntConversionBehavior behavior) {
3236 switch (behavior) {
3237 case IntConversionBehavior::Normal:
3238 case IntConversionBehavior::NegativeZeroCheck:
3239 convertDoubleToInt32(
3240 src, output, fail,
3241 behavior == IntConversionBehavior::NegativeZeroCheck);
3242 break;
3243 case IntConversionBehavior::Truncate:
3244 branchTruncateDoubleMaybeModUint32(src, output,
3245 truncateFail ? truncateFail : fail);
3246 break;
3247 case IntConversionBehavior::ClampToUint8:
3248 // Clamping clobbers the input register, so use a temp.
3249 if (src != temp) {
3250 moveDouble(src, temp);
3252 clampDoubleToUint8(temp, output);
3253 break;
3257 void MacroAssembler::convertValueToInt(
3258 ValueOperand value, Label* handleStringEntry, Label* handleStringRejoin,
3259 Label* truncateDoubleSlow, Register stringReg, FloatRegister temp,
3260 Register output, Label* fail, IntConversionBehavior behavior,
3261 IntConversionInputKind conversion) {
3262 Label done, isInt32, isBool, isDouble, isNull, isString;
3264 bool handleStrings = (behavior == IntConversionBehavior::Truncate ||
3265 behavior == IntConversionBehavior::ClampToUint8) &&
3266 handleStringEntry && handleStringRejoin;
3268 MOZ_ASSERT_IF(handleStrings, conversion == IntConversionInputKind::Any);
3271 ScratchTagScope tag(*this, value);
3272 splitTagForTest(value, tag);
3274 branchTestInt32(Equal, tag, &isInt32);
3275 if (conversion == IntConversionInputKind::Any ||
3276 conversion == IntConversionInputKind::NumbersOrBoolsOnly) {
3277 branchTestBoolean(Equal, tag, &isBool);
3279 branchTestDouble(Equal, tag, &isDouble);
3281 if (conversion == IntConversionInputKind::Any) {
3282 // If we are not truncating, we fail for anything that's not
3283 // null. Otherwise we might be able to handle strings and undefined.
3284 switch (behavior) {
3285 case IntConversionBehavior::Normal:
3286 case IntConversionBehavior::NegativeZeroCheck:
3287 branchTestNull(Assembler::NotEqual, tag, fail);
3288 break;
3290 case IntConversionBehavior::Truncate:
3291 case IntConversionBehavior::ClampToUint8:
3292 branchTestNull(Equal, tag, &isNull);
3293 if (handleStrings) {
3294 branchTestString(Equal, tag, &isString);
3296 branchTestUndefined(Assembler::NotEqual, tag, fail);
3297 break;
3299 } else {
3300 jump(fail);
3304 // The value is null or undefined in truncation contexts - just emit 0.
3305 if (conversion == IntConversionInputKind::Any) {
3306 if (isNull.used()) {
3307 bind(&isNull);
3309 mov(ImmWord(0), output);
3310 jump(&done);
3313 // |output| needs to be different from |stringReg| to load string indices.
3314 bool handleStringIndices = handleStrings && output != stringReg;
3316 // First try loading a string index. If that fails, try converting a string
3317 // into a double, then jump to the double case.
3318 Label handleStringIndex;
3319 if (handleStrings) {
3320 bind(&isString);
3321 unboxString(value, stringReg);
3322 if (handleStringIndices) {
3323 loadStringIndexValue(stringReg, output, handleStringEntry);
3324 jump(&handleStringIndex);
3325 } else {
3326 jump(handleStringEntry);
3330 // Try converting double into integer.
3331 if (isDouble.used() || handleStrings) {
3332 if (isDouble.used()) {
3333 bind(&isDouble);
3334 unboxDouble(value, temp);
3337 if (handleStrings) {
3338 bind(handleStringRejoin);
3341 convertDoubleToInt(temp, output, temp, truncateDoubleSlow, fail, behavior);
3342 jump(&done);
3345 // Just unbox a bool, the result is 0 or 1.
3346 if (isBool.used()) {
3347 bind(&isBool);
3348 unboxBoolean(value, output);
3349 jump(&done);
3352 // Integers can be unboxed.
3353 if (isInt32.used() || handleStringIndices) {
3354 if (isInt32.used()) {
3355 bind(&isInt32);
3356 unboxInt32(value, output);
3359 if (handleStringIndices) {
3360 bind(&handleStringIndex);
3363 if (behavior == IntConversionBehavior::ClampToUint8) {
3364 clampIntToUint8(output);
3368 bind(&done);
3371 void MacroAssembler::finish() {
3372 if (failureLabel_.used()) {
3373 bind(&failureLabel_);
3374 handleFailure();
3377 MacroAssemblerSpecific::finish();
3379 MOZ_RELEASE_ASSERT(
3380 size() <= MaxCodeBytesPerProcess,
3381 "AssemblerBuffer should ensure we don't exceed MaxCodeBytesPerProcess");
3383 if (bytesNeeded() > MaxCodeBytesPerProcess) {
3384 setOOM();
3388 void MacroAssembler::link(JitCode* code) {
3389 MOZ_ASSERT(!oom());
3390 linkProfilerCallSites(code);
3393 MacroAssembler::AutoProfilerCallInstrumentation::
3394 AutoProfilerCallInstrumentation(MacroAssembler& masm) {
3395 if (!masm.emitProfilingInstrumentation_) {
3396 return;
3399 Register reg = CallTempReg0;
3400 Register reg2 = CallTempReg1;
3401 masm.push(reg);
3402 masm.push(reg2);
3404 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), reg);
3405 masm.loadJSContext(reg2);
3406 masm.loadPtr(Address(reg2, offsetof(JSContext, profilingActivation_)), reg2);
3407 masm.storePtr(reg,
3408 Address(reg2, JitActivation::offsetOfLastProfilingCallSite()));
3410 masm.appendProfilerCallSite(label);
3412 masm.pop(reg2);
3413 masm.pop(reg);
3416 void MacroAssembler::linkProfilerCallSites(JitCode* code) {
3417 for (size_t i = 0; i < profilerCallSites_.length(); i++) {
3418 CodeOffset offset = profilerCallSites_[i];
3419 CodeLocationLabel location(code, offset);
3420 PatchDataWithValueCheck(location, ImmPtr(location.raw()),
3421 ImmPtr((void*)-1));
3425 void MacroAssembler::alignJitStackBasedOnNArgs(Register nargs,
3426 bool countIncludesThis) {
3427 // The stack should already be aligned to the size of a value.
3428 assertStackAlignment(sizeof(Value), 0);
3430 static_assert(JitStackValueAlignment == 1 || JitStackValueAlignment == 2,
3431 "JitStackValueAlignment is either 1 or 2.");
3432 if (JitStackValueAlignment == 1) {
3433 return;
3435 // A jit frame is composed of the following:
3437 // [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
3438 // \________JitFrameLayout_________/
3439 // (The stack grows this way --->)
3441 // We want to ensure that |raddr|, the return address, is 16-byte aligned.
3442 // (Note: if 8-byte alignment was sufficient, we would have already
3443 // returned above.)
3445 // JitFrameLayout does not affect the alignment, so we can ignore it.
3446 static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
3447 "JitFrameLayout doesn't affect stack alignment");
3449 // Therefore, we need to ensure that |this| is aligned.
3450 // This implies that |argN| must be aligned if N is even,
3451 // and offset by |sizeof(Value)| if N is odd.
3453 // Depending on the context of the caller, it may be easier to pass in a
3454 // register that has already been modified to include |this|. If that is the
3455 // case, we want to flip the direction of the test.
3456 Assembler::Condition condition =
3457 countIncludesThis ? Assembler::NonZero : Assembler::Zero;
3459 Label alignmentIsOffset, end;
3460 branchTestPtr(condition, nargs, Imm32(1), &alignmentIsOffset);
3462 // |argN| should be aligned to 16 bytes.
3463 andToStackPtr(Imm32(~(JitStackAlignment - 1)));
3464 jump(&end);
3466 // |argN| should be offset by 8 bytes from 16-byte alignment.
3467 // We already know that it is 8-byte aligned, so the only possibilities are:
3468 // a) It is 16-byte aligned, and we must offset it by 8 bytes.
3469 // b) It is not 16-byte aligned, and therefore already has the right offset.
3470 // Therefore, we test to see if it is 16-byte aligned, and adjust it if it is.
3471 bind(&alignmentIsOffset);
3472 branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), &end);
3473 subFromStackPtr(Imm32(sizeof(Value)));
3475 bind(&end);
3478 void MacroAssembler::alignJitStackBasedOnNArgs(uint32_t argc,
3479 bool countIncludesThis) {
3480 // The stack should already be aligned to the size of a value.
3481 assertStackAlignment(sizeof(Value), 0);
3483 static_assert(JitStackValueAlignment == 1 || JitStackValueAlignment == 2,
3484 "JitStackValueAlignment is either 1 or 2.");
3485 if (JitStackValueAlignment == 1) {
3486 return;
3489 // See above for full explanation.
3490 uint32_t nArgs = argc + !countIncludesThis;
3491 if (nArgs % 2 == 0) {
3492 // |argN| should be 16-byte aligned
3493 andToStackPtr(Imm32(~(JitStackAlignment - 1)));
3494 } else {
3495 // |argN| must be 16-byte aligned if argc is even,
3496 // and offset by 8 if argc is odd.
3497 Label end;
3498 branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), &end);
3499 subFromStackPtr(Imm32(sizeof(Value)));
3500 bind(&end);
3501 assertStackAlignment(JitStackAlignment, sizeof(Value));
3505 // ===============================================================
3507 MacroAssembler::MacroAssembler(TempAllocator& alloc,
3508 CompileRuntime* maybeRuntime,
3509 CompileRealm* maybeRealm)
3510 : maybeRuntime_(maybeRuntime),
3511 maybeRealm_(maybeRealm),
3512 wasmMaxOffsetGuardLimit_(0),
3513 framePushed_(0),
3514 #ifdef DEBUG
3515 inCall_(false),
3516 #endif
3517 dynamicAlignment_(false),
3518 emitProfilingInstrumentation_(false) {
3519 moveResolver_.setAllocator(alloc);
3522 StackMacroAssembler::StackMacroAssembler(JSContext* cx, TempAllocator& alloc)
3523 : MacroAssembler(alloc, CompileRuntime::get(cx->runtime()),
3524 CompileRealm::get(cx->realm())) {}
3526 IonHeapMacroAssembler::IonHeapMacroAssembler(TempAllocator& alloc,
3527 CompileRealm* realm)
3528 : MacroAssembler(alloc, realm->runtime(), realm) {
3529 MOZ_ASSERT(CurrentThreadIsIonCompiling());
3532 WasmMacroAssembler::WasmMacroAssembler(TempAllocator& alloc, bool limitedSize)
3533 : MacroAssembler(alloc) {
3534 #if defined(JS_CODEGEN_ARM64)
3535 // Stubs + builtins + the baseline compiler all require the native SP,
3536 // not the PSP.
3537 SetStackPointer64(sp);
3538 #endif
3539 if (!limitedSize) {
3540 setUnlimitedBuffer();
3544 WasmMacroAssembler::WasmMacroAssembler(TempAllocator& alloc,
3545 const wasm::ModuleEnvironment& env,
3546 bool limitedSize)
3547 : MacroAssembler(alloc) {
3548 #if defined(JS_CODEGEN_ARM64)
3549 // Stubs + builtins + the baseline compiler all require the native SP,
3550 // not the PSP.
3551 SetStackPointer64(sp);
3552 #endif
3553 setWasmMaxOffsetGuardLimit(
3554 wasm::GetMaxOffsetGuardLimit(env.hugeMemoryEnabled()));
3555 if (!limitedSize) {
3556 setUnlimitedBuffer();
3560 bool MacroAssembler::icBuildOOLFakeExitFrame(void* fakeReturnAddr,
3561 AutoSaveLiveRegisters& save) {
3562 return buildOOLFakeExitFrame(fakeReturnAddr);
3565 #ifndef JS_CODEGEN_ARM64
3566 void MacroAssembler::subFromStackPtr(Register reg) {
3567 subPtr(reg, getStackPointer());
3569 #endif // JS_CODEGEN_ARM64
3571 //{{{ check_macroassembler_style
3572 // ===============================================================
3573 // Stack manipulation functions.
3575 void MacroAssembler::PushRegsInMask(LiveGeneralRegisterSet set) {
3576 PushRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
3579 void MacroAssembler::PopRegsInMask(LiveRegisterSet set) {
3580 PopRegsInMaskIgnore(set, LiveRegisterSet());
3583 void MacroAssembler::PopRegsInMask(LiveGeneralRegisterSet set) {
3584 PopRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
3587 void MacroAssembler::Push(PropertyKey key, Register scratchReg) {
3588 if (key.isGCThing()) {
3589 // If we're pushing a gcthing, then we can't just push the tagged key
3590 // value since the GC won't have any idea that the push instruction
3591 // carries a reference to a gcthing. Need to unpack the pointer,
3592 // push it using ImmGCPtr, and then rematerialize the PropertyKey at
3593 // runtime.
3595 if (key.isString()) {
3596 JSString* str = key.toString();
3597 MOZ_ASSERT((uintptr_t(str) & PropertyKey::TypeMask) == 0);
3598 static_assert(PropertyKey::StringTypeTag == 0,
3599 "need to orPtr StringTypeTag if it's not 0");
3600 Push(ImmGCPtr(str));
3601 } else {
3602 MOZ_ASSERT(key.isSymbol());
3603 movePropertyKey(key, scratchReg);
3604 Push(scratchReg);
3606 } else {
3607 MOZ_ASSERT(key.isInt());
3608 Push(ImmWord(key.asRawBits()));
3612 void MacroAssembler::movePropertyKey(PropertyKey key, Register dest) {
3613 if (key.isGCThing()) {
3614 // See comment in |Push(PropertyKey, ...)| above for an explanation.
3615 if (key.isString()) {
3616 JSString* str = key.toString();
3617 MOZ_ASSERT((uintptr_t(str) & PropertyKey::TypeMask) == 0);
3618 static_assert(PropertyKey::StringTypeTag == 0,
3619 "need to orPtr JSID_TYPE_STRING tag if it's not 0");
3620 movePtr(ImmGCPtr(str), dest);
3621 } else {
3622 MOZ_ASSERT(key.isSymbol());
3623 JS::Symbol* sym = key.toSymbol();
3624 movePtr(ImmGCPtr(sym), dest);
3625 orPtr(Imm32(PropertyKey::SymbolTypeTag), dest);
3627 } else {
3628 MOZ_ASSERT(key.isInt());
3629 movePtr(ImmWord(key.asRawBits()), dest);
3633 void MacroAssembler::Push(TypedOrValueRegister v) {
3634 if (v.hasValue()) {
3635 Push(v.valueReg());
3636 } else if (IsFloatingPointType(v.type())) {
3637 FloatRegister reg = v.typedReg().fpu();
3638 if (v.type() == MIRType::Float32) {
3639 ScratchDoubleScope fpscratch(*this);
3640 convertFloat32ToDouble(reg, fpscratch);
3641 PushBoxed(fpscratch);
3642 } else {
3643 PushBoxed(reg);
3645 } else {
3646 Push(ValueTypeFromMIRType(v.type()), v.typedReg().gpr());
3650 void MacroAssembler::Push(const ConstantOrRegister& v) {
3651 if (v.constant()) {
3652 Push(v.value());
3653 } else {
3654 Push(v.reg());
3658 void MacroAssembler::Push(const Address& addr) {
3659 push(addr);
3660 framePushed_ += sizeof(uintptr_t);
3663 void MacroAssembler::Push(const ValueOperand& val) {
3664 pushValue(val);
3665 framePushed_ += sizeof(Value);
3668 void MacroAssembler::Push(const Value& val) {
3669 pushValue(val);
3670 framePushed_ += sizeof(Value);
3673 void MacroAssembler::Push(JSValueType type, Register reg) {
3674 pushValue(type, reg);
3675 framePushed_ += sizeof(Value);
3678 void MacroAssembler::Push(const Register64 reg) {
3679 #if JS_BITS_PER_WORD == 64
3680 Push(reg.reg);
3681 #else
3682 MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Big-endian not supported.");
3683 Push(reg.high);
3684 Push(reg.low);
3685 #endif
3688 void MacroAssembler::PushEmptyRooted(VMFunctionData::RootType rootType) {
3689 switch (rootType) {
3690 case VMFunctionData::RootNone:
3691 MOZ_CRASH("Handle must have root type");
3692 case VMFunctionData::RootObject:
3693 case VMFunctionData::RootString:
3694 case VMFunctionData::RootCell:
3695 case VMFunctionData::RootBigInt:
3696 Push(ImmPtr(nullptr));
3697 break;
3698 case VMFunctionData::RootValue:
3699 Push(UndefinedValue());
3700 break;
3701 case VMFunctionData::RootId:
3702 Push(ImmWord(JS::PropertyKey::Void().asRawBits()));
3703 break;
3707 void MacroAssembler::popRooted(VMFunctionData::RootType rootType,
3708 Register cellReg, const ValueOperand& valueReg) {
3709 switch (rootType) {
3710 case VMFunctionData::RootNone:
3711 MOZ_CRASH("Handle must have root type");
3712 case VMFunctionData::RootObject:
3713 case VMFunctionData::RootString:
3714 case VMFunctionData::RootCell:
3715 case VMFunctionData::RootId:
3716 case VMFunctionData::RootBigInt:
3717 Pop(cellReg);
3718 break;
3719 case VMFunctionData::RootValue:
3720 Pop(valueReg);
3721 break;
3725 void MacroAssembler::adjustStack(int amount) {
3726 if (amount > 0) {
3727 freeStack(amount);
3728 } else if (amount < 0) {
3729 reserveStack(-amount);
3733 void MacroAssembler::freeStack(uint32_t amount) {
3734 MOZ_ASSERT(amount <= framePushed_);
3735 if (amount) {
3736 addToStackPtr(Imm32(amount));
3738 framePushed_ -= amount;
3741 void MacroAssembler::freeStack(Register amount) { addToStackPtr(amount); }
3743 // ===============================================================
3744 // ABI function calls.
3745 template <class ABIArgGeneratorT>
3746 void MacroAssembler::setupABICallHelper() {
3747 #ifdef DEBUG
3748 MOZ_ASSERT(!inCall_);
3749 inCall_ = true;
3750 #endif
3752 #ifdef JS_SIMULATOR
3753 signature_ = 0;
3754 #endif
3756 // Reinitialize the ABIArg generator.
3757 abiArgs_ = ABIArgGeneratorT();
3759 #if defined(JS_CODEGEN_ARM)
3760 // On ARM, we need to know what ABI we are using, either in the
3761 // simulator, or based on the configure flags.
3762 # if defined(JS_SIMULATOR_ARM)
3763 abiArgs_.setUseHardFp(UseHardFpABI());
3764 # elif defined(JS_CODEGEN_ARM_HARDFP)
3765 abiArgs_.setUseHardFp(true);
3766 # else
3767 abiArgs_.setUseHardFp(false);
3768 # endif
3769 #endif
3771 #if defined(JS_CODEGEN_MIPS32)
3772 // On MIPS, the system ABI use general registers pairs to encode double
3773 // arguments, after one or 2 integer-like arguments. Unfortunately, the
3774 // Lowering phase is not capable to express it at the moment. So we enforce
3775 // the system ABI here.
3776 abiArgs_.enforceO32ABI();
3777 #endif
3780 void MacroAssembler::setupNativeABICall() {
3781 setupABICallHelper<ABIArgGenerator>();
3784 void MacroAssembler::setupWasmABICall() {
3785 MOZ_ASSERT(IsCompilingWasm(), "non-wasm should use setupAlignedABICall");
3786 setupABICallHelper<WasmABIArgGenerator>();
3788 #if defined(JS_CODEGEN_ARM)
3789 // The builtin thunk does the FP -> GPR moving on soft-FP, so
3790 // use hard fp unconditionally.
3791 abiArgs_.setUseHardFp(true);
3792 #endif
3793 dynamicAlignment_ = false;
3796 void MacroAssembler::setupAlignedABICall() {
3797 MOZ_ASSERT(!IsCompilingWasm(), "wasm should use setupWasmABICall");
3798 setupNativeABICall();
3799 dynamicAlignment_ = false;
3802 void MacroAssembler::passABIArg(const MoveOperand& from, MoveOp::Type type) {
3803 MOZ_ASSERT(inCall_);
3804 appendSignatureType(type);
3806 ABIArg arg;
3807 switch (type) {
3808 case MoveOp::FLOAT32:
3809 arg = abiArgs_.next(MIRType::Float32);
3810 break;
3811 case MoveOp::DOUBLE:
3812 arg = abiArgs_.next(MIRType::Double);
3813 break;
3814 case MoveOp::GENERAL:
3815 arg = abiArgs_.next(MIRType::Pointer);
3816 break;
3817 default:
3818 MOZ_CRASH("Unexpected argument type");
3821 MoveOperand to(*this, arg);
3822 if (from == to) {
3823 return;
3826 if (oom()) {
3827 return;
3829 propagateOOM(moveResolver_.addMove(from, to, type));
3832 void MacroAssembler::callWithABINoProfiler(void* fun, MoveOp::Type result,
3833 CheckUnsafeCallWithABI check) {
3834 appendSignatureType(result);
3835 #ifdef JS_SIMULATOR
3836 fun = Simulator::RedirectNativeFunction(fun, signature());
3837 #endif
3839 uint32_t stackAdjust;
3840 callWithABIPre(&stackAdjust);
3842 #ifdef DEBUG
3843 if (check == CheckUnsafeCallWithABI::Check) {
3844 push(ReturnReg);
3845 loadJSContext(ReturnReg);
3846 Address flagAddr(ReturnReg, JSContext::offsetOfInUnsafeCallWithABI());
3847 store32(Imm32(1), flagAddr);
3848 pop(ReturnReg);
3849 // On arm64, SP may be < PSP now (that's OK).
3850 // eg testcase: tests/bug1375074.js
3852 #endif
3854 call(ImmPtr(fun));
3856 callWithABIPost(stackAdjust, result);
3858 #ifdef DEBUG
3859 if (check == CheckUnsafeCallWithABI::Check) {
3860 Label ok;
3861 push(ReturnReg);
3862 loadJSContext(ReturnReg);
3863 Address flagAddr(ReturnReg, JSContext::offsetOfInUnsafeCallWithABI());
3864 branch32(Assembler::Equal, flagAddr, Imm32(0), &ok);
3865 assumeUnreachable("callWithABI: callee did not use AutoUnsafeCallWithABI");
3866 bind(&ok);
3867 pop(ReturnReg);
3868 // On arm64, SP may be < PSP now (that's OK).
3869 // eg testcase: tests/bug1375074.js
3871 #endif
3874 CodeOffset MacroAssembler::callWithABI(wasm::BytecodeOffset bytecode,
3875 wasm::SymbolicAddress imm,
3876 mozilla::Maybe<int32_t> instanceOffset,
3877 MoveOp::Type result) {
3878 MOZ_ASSERT(wasm::NeedsBuiltinThunk(imm));
3880 uint32_t stackAdjust;
3881 callWithABIPre(&stackAdjust, /* callFromWasm = */ true);
3883 // The instance register is used in builtin thunks and must be set.
3884 if (instanceOffset) {
3885 loadPtr(Address(getStackPointer(), *instanceOffset + stackAdjust),
3886 InstanceReg);
3887 } else {
3888 MOZ_CRASH("instanceOffset is Nothing only for unsupported abi calls.");
3890 CodeOffset raOffset = call(
3891 wasm::CallSiteDesc(bytecode.offset(), wasm::CallSite::Symbolic), imm);
3893 callWithABIPost(stackAdjust, result, /* callFromWasm = */ true);
3895 return raOffset;
3898 void MacroAssembler::callDebugWithABI(wasm::SymbolicAddress imm,
3899 MoveOp::Type result) {
3900 MOZ_ASSERT(!wasm::NeedsBuiltinThunk(imm));
3901 uint32_t stackAdjust;
3902 callWithABIPre(&stackAdjust, /* callFromWasm = */ false);
3903 call(imm);
3904 callWithABIPost(stackAdjust, result, /* callFromWasm = */ false);
3907 // ===============================================================
3908 // Exit frame footer.
3910 void MacroAssembler::linkExitFrame(Register cxreg, Register scratch) {
3911 loadPtr(Address(cxreg, JSContext::offsetOfActivation()), scratch);
3912 storeStackPtr(Address(scratch, JitActivation::offsetOfPackedExitFP()));
3915 // ===============================================================
3916 // Simple value-shuffling helpers, to hide MoveResolver verbosity
3917 // in common cases.
3919 void MacroAssembler::moveRegPair(Register src0, Register src1, Register dst0,
3920 Register dst1, MoveOp::Type type) {
3921 MoveResolver& moves = moveResolver();
3922 if (src0 != dst0) {
3923 propagateOOM(moves.addMove(MoveOperand(src0), MoveOperand(dst0), type));
3925 if (src1 != dst1) {
3926 propagateOOM(moves.addMove(MoveOperand(src1), MoveOperand(dst1), type));
3928 propagateOOM(moves.resolve());
3929 if (oom()) {
3930 return;
3933 MoveEmitter emitter(*this);
3934 emitter.emit(moves);
3935 emitter.finish();
3938 // ===============================================================
3939 // Arithmetic functions
3941 void MacroAssembler::pow32(Register base, Register power, Register dest,
3942 Register temp1, Register temp2, Label* onOver) {
3943 // Inline int32-specialized implementation of js::powi with overflow
3944 // detection.
3946 move32(Imm32(1), dest); // result = 1
3948 // x^y where x == 1 returns 1 for any y.
3949 Label done;
3950 branch32(Assembler::Equal, base, Imm32(1), &done);
3952 move32(base, temp1); // runningSquare = x
3953 move32(power, temp2); // n = y
3955 // x^y where y < 0 returns a non-int32 value for any x != 1. Except when y is
3956 // large enough so that the result is no longer representable as a double with
3957 // fractional parts. We can't easily determine when y is too large, so we bail
3958 // here.
3959 // Note: it's important for this condition to match the code in CacheIR.cpp
3960 // (CanAttachInt32Pow) to prevent failure loops.
3961 Label start;
3962 branchTest32(Assembler::NotSigned, power, power, &start);
3963 jump(onOver);
3965 Label loop;
3966 bind(&loop);
3968 // runningSquare *= runningSquare
3969 branchMul32(Assembler::Overflow, temp1, temp1, onOver);
3971 bind(&start);
3973 // if ((n & 1) != 0) result *= runningSquare
3974 Label even;
3975 branchTest32(Assembler::Zero, temp2, Imm32(1), &even);
3976 branchMul32(Assembler::Overflow, temp1, dest, onOver);
3977 bind(&even);
3979 // n >>= 1
3980 // if (n == 0) return result
3981 branchRshift32(Assembler::NonZero, Imm32(1), temp2, &loop);
3983 bind(&done);
3986 void MacroAssembler::signInt32(Register input, Register output) {
3987 MOZ_ASSERT(input != output);
3989 Label done;
3990 move32(input, output);
3991 rshift32Arithmetic(Imm32(31), output);
3992 branch32(Assembler::LessThanOrEqual, input, Imm32(0), &done);
3993 move32(Imm32(1), output);
3994 bind(&done);
3997 void MacroAssembler::signDouble(FloatRegister input, FloatRegister output) {
3998 MOZ_ASSERT(input != output);
4000 Label done, zeroOrNaN, negative;
4001 loadConstantDouble(0.0, output);
4002 branchDouble(Assembler::DoubleEqualOrUnordered, input, output, &zeroOrNaN);
4003 branchDouble(Assembler::DoubleLessThan, input, output, &negative);
4005 loadConstantDouble(1.0, output);
4006 jump(&done);
4008 bind(&negative);
4009 loadConstantDouble(-1.0, output);
4010 jump(&done);
4012 bind(&zeroOrNaN);
4013 moveDouble(input, output);
4015 bind(&done);
4018 void MacroAssembler::signDoubleToInt32(FloatRegister input, Register output,
4019 FloatRegister temp, Label* fail) {
4020 MOZ_ASSERT(input != temp);
4022 Label done, zeroOrNaN, negative;
4023 loadConstantDouble(0.0, temp);
4024 branchDouble(Assembler::DoubleEqualOrUnordered, input, temp, &zeroOrNaN);
4025 branchDouble(Assembler::DoubleLessThan, input, temp, &negative);
4027 move32(Imm32(1), output);
4028 jump(&done);
4030 bind(&negative);
4031 move32(Imm32(-1), output);
4032 jump(&done);
4034 // Fail for NaN and negative zero.
4035 bind(&zeroOrNaN);
4036 branchDouble(Assembler::DoubleUnordered, input, input, fail);
4038 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
4039 // is -Infinity instead of Infinity.
4040 loadConstantDouble(1.0, temp);
4041 divDouble(input, temp);
4042 branchDouble(Assembler::DoubleLessThan, temp, input, fail);
4043 move32(Imm32(0), output);
4045 bind(&done);
4048 void MacroAssembler::randomDouble(Register rng, FloatRegister dest,
4049 Register64 temp0, Register64 temp1) {
4050 using mozilla::non_crypto::XorShift128PlusRNG;
4052 static_assert(
4053 sizeof(XorShift128PlusRNG) == 2 * sizeof(uint64_t),
4054 "Code below assumes XorShift128PlusRNG contains two uint64_t values");
4056 Address state0Addr(rng, XorShift128PlusRNG::offsetOfState0());
4057 Address state1Addr(rng, XorShift128PlusRNG::offsetOfState1());
4059 Register64 s0Reg = temp0;
4060 Register64 s1Reg = temp1;
4062 // uint64_t s1 = mState[0];
4063 load64(state0Addr, s1Reg);
4065 // s1 ^= s1 << 23;
4066 move64(s1Reg, s0Reg);
4067 lshift64(Imm32(23), s1Reg);
4068 xor64(s0Reg, s1Reg);
4070 // s1 ^= s1 >> 17
4071 move64(s1Reg, s0Reg);
4072 rshift64(Imm32(17), s1Reg);
4073 xor64(s0Reg, s1Reg);
4075 // const uint64_t s0 = mState[1];
4076 load64(state1Addr, s0Reg);
4078 // mState[0] = s0;
4079 store64(s0Reg, state0Addr);
4081 // s1 ^= s0
4082 xor64(s0Reg, s1Reg);
4084 // s1 ^= s0 >> 26
4085 rshift64(Imm32(26), s0Reg);
4086 xor64(s0Reg, s1Reg);
4088 // mState[1] = s1
4089 store64(s1Reg, state1Addr);
4091 // s1 += mState[0]
4092 load64(state0Addr, s0Reg);
4093 add64(s0Reg, s1Reg);
4095 // See comment in XorShift128PlusRNG::nextDouble().
4096 static constexpr int MantissaBits =
4097 mozilla::FloatingPoint<double>::kExponentShift + 1;
4098 static constexpr double ScaleInv = double(1) / (1ULL << MantissaBits);
4100 and64(Imm64((1ULL << MantissaBits) - 1), s1Reg);
4102 // Note: we know s1Reg isn't signed after the and64 so we can use the faster
4103 // convertInt64ToDouble instead of convertUInt64ToDouble.
4104 convertInt64ToDouble(s1Reg, dest);
4106 // dest *= ScaleInv
4107 mulDoublePtr(ImmPtr(&ScaleInv), s0Reg.scratchReg(), dest);
4110 void MacroAssembler::sameValueDouble(FloatRegister left, FloatRegister right,
4111 FloatRegister temp, Register dest) {
4112 Label nonEqual, isSameValue, isNotSameValue;
4113 branchDouble(Assembler::DoubleNotEqualOrUnordered, left, right, &nonEqual);
4115 // First, test for being equal to 0.0, which also includes -0.0.
4116 loadConstantDouble(0.0, temp);
4117 branchDouble(Assembler::DoubleNotEqual, left, temp, &isSameValue);
4119 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
4120 // is -Infinity instead of Infinity.
4121 Label isNegInf;
4122 loadConstantDouble(1.0, temp);
4123 divDouble(left, temp);
4124 branchDouble(Assembler::DoubleLessThan, temp, left, &isNegInf);
4126 loadConstantDouble(1.0, temp);
4127 divDouble(right, temp);
4128 branchDouble(Assembler::DoubleGreaterThan, temp, right, &isSameValue);
4129 jump(&isNotSameValue);
4131 bind(&isNegInf);
4133 loadConstantDouble(1.0, temp);
4134 divDouble(right, temp);
4135 branchDouble(Assembler::DoubleLessThan, temp, right, &isSameValue);
4136 jump(&isNotSameValue);
4139 bind(&nonEqual);
4141 // Test if both values are NaN.
4142 branchDouble(Assembler::DoubleOrdered, left, left, &isNotSameValue);
4143 branchDouble(Assembler::DoubleOrdered, right, right, &isNotSameValue);
4146 Label done;
4147 bind(&isSameValue);
4148 move32(Imm32(1), dest);
4149 jump(&done);
4151 bind(&isNotSameValue);
4152 move32(Imm32(0), dest);
4154 bind(&done);
4157 void MacroAssembler::minMaxArrayInt32(Register array, Register result,
4158 Register temp1, Register temp2,
4159 Register temp3, bool isMax, Label* fail) {
4160 // array must be a packed array. Load its elements.
4161 Register elements = temp1;
4162 loadPtr(Address(array, NativeObject::offsetOfElements()), elements);
4164 // Load the length and guard that it is non-zero.
4165 Address lengthAddr(elements, ObjectElements::offsetOfInitializedLength());
4166 load32(lengthAddr, temp3);
4167 branchTest32(Assembler::Zero, temp3, temp3, fail);
4169 // Compute the address of the last element.
4170 Register elementsEnd = temp2;
4171 BaseObjectElementIndex elementsEndAddr(elements, temp3,
4172 -int32_t(sizeof(Value)));
4173 computeEffectiveAddress(elementsEndAddr, elementsEnd);
4175 // Load the first element into result.
4176 fallibleUnboxInt32(Address(elements, 0), result, fail);
4178 Label loop, done;
4179 bind(&loop);
4181 // Check whether we're done.
4182 branchPtr(Assembler::Equal, elements, elementsEnd, &done);
4184 // If not, advance to the next element and load it.
4185 addPtr(Imm32(sizeof(Value)), elements);
4186 fallibleUnboxInt32(Address(elements, 0), temp3, fail);
4188 // Update result if necessary.
4189 Assembler::Condition cond =
4190 isMax ? Assembler::GreaterThan : Assembler::LessThan;
4191 cmp32Move32(cond, temp3, result, temp3, result);
4193 jump(&loop);
4194 bind(&done);
4197 void MacroAssembler::minMaxArrayNumber(Register array, FloatRegister result,
4198 FloatRegister floatTemp, Register temp1,
4199 Register temp2, bool isMax,
4200 Label* fail) {
4201 // array must be a packed array. Load its elements.
4202 Register elements = temp1;
4203 loadPtr(Address(array, NativeObject::offsetOfElements()), elements);
4205 // Load the length and check if the array is empty.
4206 Label isEmpty;
4207 Address lengthAddr(elements, ObjectElements::offsetOfInitializedLength());
4208 load32(lengthAddr, temp2);
4209 branchTest32(Assembler::Zero, temp2, temp2, &isEmpty);
4211 // Compute the address of the last element.
4212 Register elementsEnd = temp2;
4213 BaseObjectElementIndex elementsEndAddr(elements, temp2,
4214 -int32_t(sizeof(Value)));
4215 computeEffectiveAddress(elementsEndAddr, elementsEnd);
4217 // Load the first element into result.
4218 ensureDouble(Address(elements, 0), result, fail);
4220 Label loop, done;
4221 bind(&loop);
4223 // Check whether we're done.
4224 branchPtr(Assembler::Equal, elements, elementsEnd, &done);
4226 // If not, advance to the next element and load it into floatTemp.
4227 addPtr(Imm32(sizeof(Value)), elements);
4228 ensureDouble(Address(elements, 0), floatTemp, fail);
4230 // Update result if necessary.
4231 if (isMax) {
4232 maxDouble(floatTemp, result, /* handleNaN = */ true);
4233 } else {
4234 minDouble(floatTemp, result, /* handleNaN = */ true);
4236 jump(&loop);
4238 // With no arguments, min/max return +Infinity/-Infinity respectively.
4239 bind(&isEmpty);
4240 if (isMax) {
4241 loadConstantDouble(mozilla::NegativeInfinity<double>(), result);
4242 } else {
4243 loadConstantDouble(mozilla::PositiveInfinity<double>(), result);
4246 bind(&done);
4249 void MacroAssembler::branchIfNotRegExpPrototypeOptimizable(Register proto,
4250 Register temp,
4251 Label* fail) {
4252 loadJSContext(temp);
4253 loadPtr(Address(temp, JSContext::offsetOfRealm()), temp);
4254 size_t offset = Realm::offsetOfRegExps() +
4255 RegExpRealm::offsetOfOptimizableRegExpPrototypeShape();
4256 loadPtr(Address(temp, offset), temp);
4257 branchTestObjShapeUnsafe(Assembler::NotEqual, proto, temp, fail);
4260 void MacroAssembler::branchIfNotRegExpInstanceOptimizable(Register regexp,
4261 Register temp,
4262 Label* label) {
4263 loadJSContext(temp);
4264 loadPtr(Address(temp, JSContext::offsetOfRealm()), temp);
4265 size_t offset = Realm::offsetOfRegExps() +
4266 RegExpRealm::offsetOfOptimizableRegExpInstanceShape();
4267 loadPtr(Address(temp, offset), temp);
4268 branchTestObjShapeUnsafe(Assembler::NotEqual, regexp, temp, label);
4271 void MacroAssembler::loadRegExpLastIndex(Register regexp, Register string,
4272 Register lastIndex,
4273 Label* notFoundZeroLastIndex) {
4274 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
4275 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
4276 Address stringLength(string, JSString::offsetOfLength());
4278 Label notGlobalOrSticky, loadedLastIndex;
4280 branchTest32(Assembler::Zero, flagsSlot,
4281 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
4282 &notGlobalOrSticky);
4284 // It's a global or sticky regular expression. Emit the following code:
4286 // lastIndex = regexp.lastIndex
4287 // if lastIndex > string.length:
4288 // jump to notFoundZeroLastIndex (skip the regexp match/test operation)
4290 // The `notFoundZeroLastIndex` code should set regexp.lastIndex to 0 and
4291 // treat this as a not-found result.
4293 // See steps 5-8 in js::RegExpBuiltinExec.
4295 // Earlier guards must have ensured regexp.lastIndex is a non-negative
4296 // integer.
4297 #ifdef DEBUG
4299 Label ok;
4300 branchTestInt32(Assembler::Equal, lastIndexSlot, &ok);
4301 assumeUnreachable("Expected int32 value for lastIndex");
4302 bind(&ok);
4304 #endif
4305 unboxInt32(lastIndexSlot, lastIndex);
4306 #ifdef DEBUG
4308 Label ok;
4309 branchTest32(Assembler::NotSigned, lastIndex, lastIndex, &ok);
4310 assumeUnreachable("Expected non-negative lastIndex");
4311 bind(&ok);
4313 #endif
4314 branch32(Assembler::Below, stringLength, lastIndex, notFoundZeroLastIndex);
4315 jump(&loadedLastIndex);
4318 bind(&notGlobalOrSticky);
4319 move32(Imm32(0), lastIndex);
4321 bind(&loadedLastIndex);
4324 // ===============================================================
4325 // Branch functions
4327 void MacroAssembler::loadFunctionLength(Register func,
4328 Register funFlagsAndArgCount,
4329 Register output, Label* slowPath) {
4330 #ifdef DEBUG
4332 // These flags should already have been checked by caller.
4333 Label ok;
4334 uint32_t FlagsToCheck =
4335 FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH;
4336 branchTest32(Assembler::Zero, funFlagsAndArgCount, Imm32(FlagsToCheck),
4337 &ok);
4338 assumeUnreachable("The function flags should already have been checked.");
4339 bind(&ok);
4341 #endif // DEBUG
4343 // NOTE: `funFlagsAndArgCount` and `output` must be allowed to alias.
4345 // Load the target function's length.
4346 Label isInterpreted, lengthLoaded;
4347 branchTest32(Assembler::NonZero, funFlagsAndArgCount,
4348 Imm32(FunctionFlags::BASESCRIPT), &isInterpreted);
4350 // The length property of a native function stored with the flags.
4351 move32(funFlagsAndArgCount, output);
4352 rshift32(Imm32(JSFunction::ArgCountShift), output);
4353 jump(&lengthLoaded);
4355 bind(&isInterpreted);
4357 // Load the length property of an interpreted function.
4358 loadPrivate(Address(func, JSFunction::offsetOfJitInfoOrScript()), output);
4359 loadPtr(Address(output, JSScript::offsetOfSharedData()), output);
4360 branchTestPtr(Assembler::Zero, output, output, slowPath);
4361 loadPtr(Address(output, SharedImmutableScriptData::offsetOfISD()), output);
4362 load16ZeroExtend(Address(output, ImmutableScriptData::offsetOfFunLength()),
4363 output);
4365 bind(&lengthLoaded);
4368 void MacroAssembler::loadFunctionName(Register func, Register output,
4369 ImmGCPtr emptyString, Label* slowPath) {
4370 MOZ_ASSERT(func != output);
4372 // Get the JSFunction flags.
4373 load32(Address(func, JSFunction::offsetOfFlagsAndArgCount()), output);
4375 // If the name was previously resolved, the name property may be shadowed.
4376 branchTest32(Assembler::NonZero, output, Imm32(FunctionFlags::RESOLVED_NAME),
4377 slowPath);
4379 Label noName, done;
4380 branchTest32(Assembler::NonZero, output,
4381 Imm32(FunctionFlags::HAS_GUESSED_ATOM), &noName);
4383 Address atomAddr(func, JSFunction::offsetOfAtom());
4384 branchTestUndefined(Assembler::Equal, atomAddr, &noName);
4385 unboxString(atomAddr, output);
4386 jump(&done);
4389 bind(&noName);
4391 // An absent name property defaults to the empty string.
4392 movePtr(emptyString, output);
4395 bind(&done);
4398 void MacroAssembler::assertFunctionIsExtended(Register func) {
4399 #ifdef DEBUG
4400 Label extended;
4401 branchTestFunctionFlags(func, FunctionFlags::EXTENDED, Assembler::NonZero,
4402 &extended);
4403 assumeUnreachable("Function is not extended");
4404 bind(&extended);
4405 #endif
4408 void MacroAssembler::branchTestType(Condition cond, Register tag,
4409 JSValueType type, Label* label) {
4410 switch (type) {
4411 case JSVAL_TYPE_DOUBLE:
4412 branchTestDouble(cond, tag, label);
4413 break;
4414 case JSVAL_TYPE_INT32:
4415 branchTestInt32(cond, tag, label);
4416 break;
4417 case JSVAL_TYPE_BOOLEAN:
4418 branchTestBoolean(cond, tag, label);
4419 break;
4420 case JSVAL_TYPE_UNDEFINED:
4421 branchTestUndefined(cond, tag, label);
4422 break;
4423 case JSVAL_TYPE_NULL:
4424 branchTestNull(cond, tag, label);
4425 break;
4426 case JSVAL_TYPE_MAGIC:
4427 branchTestMagic(cond, tag, label);
4428 break;
4429 case JSVAL_TYPE_STRING:
4430 branchTestString(cond, tag, label);
4431 break;
4432 case JSVAL_TYPE_SYMBOL:
4433 branchTestSymbol(cond, tag, label);
4434 break;
4435 case JSVAL_TYPE_BIGINT:
4436 branchTestBigInt(cond, tag, label);
4437 break;
4438 case JSVAL_TYPE_OBJECT:
4439 branchTestObject(cond, tag, label);
4440 break;
4441 default:
4442 MOZ_CRASH("Unexpected value type");
4446 void MacroAssembler::branchTestObjShapeList(
4447 Condition cond, Register obj, Register shapeElements, Register shapeScratch,
4448 Register endScratch, Register spectreScratch, Label* label) {
4449 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
4451 bool needSpectreMitigations = spectreScratch != InvalidReg;
4453 Label done;
4454 Label* onMatch = cond == Assembler::Equal ? label : &done;
4456 // Load the object's shape pointer into shapeScratch, and prepare to compare
4457 // it with the shapes in the list. The shapes are stored as private values so
4458 // we can compare directly.
4459 loadPtr(Address(obj, JSObject::offsetOfShape()), shapeScratch);
4461 // Compute end pointer.
4462 Address lengthAddr(shapeElements,
4463 ObjectElements::offsetOfInitializedLength());
4464 load32(lengthAddr, endScratch);
4465 BaseObjectElementIndex endPtrAddr(shapeElements, endScratch);
4466 computeEffectiveAddress(endPtrAddr, endScratch);
4468 Label loop;
4469 bind(&loop);
4471 // Compare the object's shape with a shape from the list. Note that on 64-bit
4472 // this includes the tag bits, but on 32-bit we only compare the low word of
4473 // the value. This is fine because the list of shapes is never exposed and the
4474 // tag is guaranteed to be PrivateGCThing.
4475 if (needSpectreMitigations) {
4476 move32(Imm32(0), spectreScratch);
4478 branchPtr(Assembler::Equal, Address(shapeElements, 0), shapeScratch, onMatch);
4479 if (needSpectreMitigations) {
4480 spectreMovePtr(Assembler::Equal, spectreScratch, obj);
4483 // Advance to next shape and loop if not finished.
4484 addPtr(Imm32(sizeof(Value)), shapeElements);
4485 branchPtr(Assembler::Below, shapeElements, endScratch, &loop);
4487 if (cond == Assembler::NotEqual) {
4488 jump(label);
4489 bind(&done);
4493 void MacroAssembler::branchTestObjCompartment(Condition cond, Register obj,
4494 const Address& compartment,
4495 Register scratch, Label* label) {
4496 MOZ_ASSERT(obj != scratch);
4497 loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
4498 loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
4499 loadPtr(Address(scratch, BaseShape::offsetOfRealm()), scratch);
4500 loadPtr(Address(scratch, Realm::offsetOfCompartment()), scratch);
4501 branchPtr(cond, compartment, scratch, label);
4504 void MacroAssembler::branchTestObjCompartment(
4505 Condition cond, Register obj, const JS::Compartment* compartment,
4506 Register scratch, Label* label) {
4507 MOZ_ASSERT(obj != scratch);
4508 loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
4509 loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
4510 loadPtr(Address(scratch, BaseShape::offsetOfRealm()), scratch);
4511 loadPtr(Address(scratch, Realm::offsetOfCompartment()), scratch);
4512 branchPtr(cond, scratch, ImmPtr(compartment), label);
4515 void MacroAssembler::branchIfNonNativeObj(Register obj, Register scratch,
4516 Label* label) {
4517 loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
4518 branchTest32(Assembler::Zero,
4519 Address(scratch, Shape::offsetOfImmutableFlags()),
4520 Imm32(Shape::isNativeBit()), label);
4523 void MacroAssembler::branchIfObjectNotExtensible(Register obj, Register scratch,
4524 Label* label) {
4525 loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
4527 // Spectre-style checks are not needed here because we do not interpret data
4528 // based on this check.
4529 static_assert(sizeof(ObjectFlags) == sizeof(uint16_t));
4530 load16ZeroExtend(Address(scratch, Shape::offsetOfObjectFlags()), scratch);
4531 branchTest32(Assembler::NonZero, scratch,
4532 Imm32(uint32_t(ObjectFlag::NotExtensible)), label);
4535 void MacroAssembler::wasmTrap(wasm::Trap trap,
4536 wasm::BytecodeOffset bytecodeOffset) {
4537 uint32_t trapOffset = wasmTrapInstruction().offset();
4538 MOZ_ASSERT_IF(!oom(),
4539 currentOffset() - trapOffset == WasmTrapInstructionLength);
4541 append(trap, wasm::TrapSite(trapOffset, bytecodeOffset));
4544 std::pair<CodeOffset, uint32_t> MacroAssembler::wasmReserveStackChecked(
4545 uint32_t amount, wasm::BytecodeOffset trapOffset) {
4546 if (amount > MAX_UNCHECKED_LEAF_FRAME_SIZE) {
4547 // The frame is large. Don't bump sp until after the stack limit check so
4548 // that the trap handler isn't called with a wild sp.
4549 Label ok;
4550 Register scratch = ABINonArgReg0;
4551 moveStackPtrTo(scratch);
4553 Label trap;
4554 branchPtr(Assembler::Below, scratch, Imm32(amount), &trap);
4555 subPtr(Imm32(amount), scratch);
4556 branchPtr(Assembler::Below,
4557 Address(InstanceReg, wasm::Instance::offsetOfStackLimit()),
4558 scratch, &ok);
4560 bind(&trap);
4561 wasmTrap(wasm::Trap::StackOverflow, trapOffset);
4562 CodeOffset trapInsnOffset = CodeOffset(currentOffset());
4564 bind(&ok);
4565 reserveStack(amount);
4566 return std::pair<CodeOffset, uint32_t>(trapInsnOffset, 0);
4569 reserveStack(amount);
4570 Label ok;
4571 branchStackPtrRhs(Assembler::Below,
4572 Address(InstanceReg, wasm::Instance::offsetOfStackLimit()),
4573 &ok);
4574 wasmTrap(wasm::Trap::StackOverflow, trapOffset);
4575 CodeOffset trapInsnOffset = CodeOffset(currentOffset());
4576 bind(&ok);
4577 return std::pair<CodeOffset, uint32_t>(trapInsnOffset, amount);
4580 CodeOffset MacroAssembler::wasmCallImport(const wasm::CallSiteDesc& desc,
4581 const wasm::CalleeDesc& callee) {
4582 storePtr(InstanceReg,
4583 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall));
4585 // Load the callee, before the caller's registers are clobbered.
4586 uint32_t instanceDataOffset = callee.importInstanceDataOffset();
4587 loadPtr(
4588 Address(InstanceReg, wasm::Instance::offsetInData(
4589 instanceDataOffset +
4590 offsetof(wasm::FuncImportInstanceData, code))),
4591 ABINonArgReg0);
4593 #if !defined(JS_CODEGEN_NONE) && !defined(JS_CODEGEN_WASM32)
4594 static_assert(ABINonArgReg0 != InstanceReg, "by constraint");
4595 #endif
4597 // Switch to the callee's realm.
4598 loadPtr(
4599 Address(InstanceReg, wasm::Instance::offsetInData(
4600 instanceDataOffset +
4601 offsetof(wasm::FuncImportInstanceData, realm))),
4602 ABINonArgReg1);
4603 loadPtr(Address(InstanceReg, wasm::Instance::offsetOfCx()), ABINonArgReg2);
4604 storePtr(ABINonArgReg1, Address(ABINonArgReg2, JSContext::offsetOfRealm()));
4606 // Switch to the callee's instance and pinned registers and make the call.
4607 loadPtr(Address(InstanceReg,
4608 wasm::Instance::offsetInData(
4609 instanceDataOffset +
4610 offsetof(wasm::FuncImportInstanceData, instance))),
4611 InstanceReg);
4613 storePtr(InstanceReg,
4614 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall));
4615 loadWasmPinnedRegsFromInstance();
4617 return call(desc, ABINonArgReg0);
4620 CodeOffset MacroAssembler::wasmCallBuiltinInstanceMethod(
4621 const wasm::CallSiteDesc& desc, const ABIArg& instanceArg,
4622 wasm::SymbolicAddress builtin, wasm::FailureMode failureMode) {
4623 MOZ_ASSERT(instanceArg != ABIArg());
4625 storePtr(InstanceReg,
4626 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall));
4627 storePtr(InstanceReg,
4628 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall));
4630 if (instanceArg.kind() == ABIArg::GPR) {
4631 movePtr(InstanceReg, instanceArg.gpr());
4632 } else if (instanceArg.kind() == ABIArg::Stack) {
4633 storePtr(InstanceReg,
4634 Address(getStackPointer(), instanceArg.offsetFromArgBase()));
4635 } else {
4636 MOZ_CRASH("Unknown abi passing style for pointer");
4639 CodeOffset ret = call(desc, builtin);
4641 if (failureMode != wasm::FailureMode::Infallible) {
4642 Label noTrap;
4643 switch (failureMode) {
4644 case wasm::FailureMode::Infallible:
4645 MOZ_CRASH();
4646 case wasm::FailureMode::FailOnNegI32:
4647 branchTest32(Assembler::NotSigned, ReturnReg, ReturnReg, &noTrap);
4648 break;
4649 case wasm::FailureMode::FailOnNullPtr:
4650 branchTestPtr(Assembler::NonZero, ReturnReg, ReturnReg, &noTrap);
4651 break;
4652 case wasm::FailureMode::FailOnInvalidRef:
4653 branchPtr(Assembler::NotEqual, ReturnReg,
4654 ImmWord(uintptr_t(wasm::AnyRef::invalid().forCompiledCode())),
4655 &noTrap);
4656 break;
4658 wasmTrap(wasm::Trap::ThrowReported,
4659 wasm::BytecodeOffset(desc.lineOrBytecode()));
4660 bind(&noTrap);
4663 return ret;
4666 CodeOffset MacroAssembler::asmCallIndirect(const wasm::CallSiteDesc& desc,
4667 const wasm::CalleeDesc& callee) {
4668 MOZ_ASSERT(callee.which() == wasm::CalleeDesc::AsmJSTable);
4670 const Register scratch = WasmTableCallScratchReg0;
4671 const Register index = WasmTableCallIndexReg;
4673 // Optimization opportunity: when offsetof(FunctionTableElem, code) == 0, as
4674 // it is at present, we can probably generate better code here by folding
4675 // the address computation into the load.
4677 static_assert(sizeof(wasm::FunctionTableElem) == 8 ||
4678 sizeof(wasm::FunctionTableElem) == 16,
4679 "elements of function tables are two words");
4681 // asm.js tables require no signature check, and have had their index
4682 // masked into range and thus need no bounds check.
4683 loadPtr(
4684 Address(InstanceReg, wasm::Instance::offsetInData(
4685 callee.tableFunctionBaseInstanceDataOffset())),
4686 scratch);
4687 if (sizeof(wasm::FunctionTableElem) == 8) {
4688 computeEffectiveAddress(BaseIndex(scratch, index, TimesEight), scratch);
4689 } else {
4690 lshift32(Imm32(4), index);
4691 addPtr(index, scratch);
4693 loadPtr(Address(scratch, offsetof(wasm::FunctionTableElem, code)), scratch);
4694 storePtr(InstanceReg,
4695 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall));
4696 storePtr(InstanceReg,
4697 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall));
4698 return call(desc, scratch);
4701 // In principle, call_indirect requires an expensive context switch to the
4702 // callee's instance and realm before the call and an almost equally expensive
4703 // switch back to the caller's ditto after. However, if the caller's instance
4704 // is the same as the callee's instance then no context switch is required, and
4705 // it only takes a compare-and-branch at run-time to test this - all values are
4706 // in registers already. We therefore generate two call paths, one for the fast
4707 // call without the context switch (which additionally avoids a null check) and
4708 // one for the slow call with the context switch.
4710 void MacroAssembler::wasmCallIndirect(const wasm::CallSiteDesc& desc,
4711 const wasm::CalleeDesc& callee,
4712 Label* boundsCheckFailedLabel,
4713 Label* nullCheckFailedLabel,
4714 mozilla::Maybe<uint32_t> tableSize,
4715 CodeOffset* fastCallOffset,
4716 CodeOffset* slowCallOffset) {
4717 static_assert(sizeof(wasm::FunctionTableElem) == 2 * sizeof(void*),
4718 "Exactly two pointers or index scaling won't work correctly");
4719 MOZ_ASSERT(callee.which() == wasm::CalleeDesc::WasmTable);
4721 const int shift = sizeof(wasm::FunctionTableElem) == 8 ? 3 : 4;
4722 wasm::BytecodeOffset trapOffset(desc.lineOrBytecode());
4723 const Register calleeScratch = WasmTableCallScratchReg0;
4724 const Register index = WasmTableCallIndexReg;
4726 // Check the table index and throw if out-of-bounds.
4728 // Frequently the table size is known, so optimize for that. Otherwise
4729 // compare with a memory operand when that's possible. (There's little sense
4730 // in hoisting the load of the bound into a register at a higher level and
4731 // reusing that register, because a hoisted value would either have to be
4732 // spilled and re-loaded before the next call_indirect, or would be abandoned
4733 // because we could not trust that a hoisted value would not have changed.)
4735 if (boundsCheckFailedLabel) {
4736 if (tableSize.isSome()) {
4737 branch32(Assembler::Condition::AboveOrEqual, index, Imm32(*tableSize),
4738 boundsCheckFailedLabel);
4739 } else {
4740 branch32(
4741 Assembler::Condition::BelowOrEqual,
4742 Address(InstanceReg, wasm::Instance::offsetInData(
4743 callee.tableLengthInstanceDataOffset())),
4744 index, boundsCheckFailedLabel);
4748 // Write the functype-id into the ABI functype-id register.
4750 const wasm::CallIndirectId callIndirectId = callee.wasmTableSigId();
4751 switch (callIndirectId.kind()) {
4752 case wasm::CallIndirectIdKind::Global:
4753 loadPtr(Address(InstanceReg, wasm::Instance::offsetInData(
4754 callIndirectId.instanceDataOffset())),
4755 WasmTableCallSigReg);
4756 break;
4757 case wasm::CallIndirectIdKind::Immediate:
4758 move32(Imm32(callIndirectId.immediate()), WasmTableCallSigReg);
4759 break;
4760 case wasm::CallIndirectIdKind::AsmJS:
4761 case wasm::CallIndirectIdKind::None:
4762 break;
4765 // Load the base pointer of the table and compute the address of the callee in
4766 // the table.
4768 loadPtr(
4769 Address(InstanceReg, wasm::Instance::offsetInData(
4770 callee.tableFunctionBaseInstanceDataOffset())),
4771 calleeScratch);
4772 shiftIndex32AndAdd(index, shift, calleeScratch);
4774 // Load the callee instance and decide whether to take the fast path or the
4775 // slow path.
4777 Label fastCall;
4778 Label done;
4779 const Register newInstanceTemp = WasmTableCallScratchReg1;
4780 loadPtr(Address(calleeScratch, offsetof(wasm::FunctionTableElem, instance)),
4781 newInstanceTemp);
4782 branchPtr(Assembler::Equal, InstanceReg, newInstanceTemp, &fastCall);
4784 // Slow path: Save context, check for null, setup new context, call, restore
4785 // context.
4787 // TODO: The slow path could usefully be out-of-line and the test above would
4788 // just fall through to the fast path. This keeps the fast-path code dense,
4789 // and has correct static prediction for the branch (forward conditional
4790 // branches predicted not taken, normally).
4792 storePtr(InstanceReg,
4793 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall));
4794 movePtr(newInstanceTemp, InstanceReg);
4795 storePtr(InstanceReg,
4796 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall));
4798 #ifdef WASM_HAS_HEAPREG
4799 // Use the null pointer exception resulting from loading HeapReg from a null
4800 // instance to handle a call to a null slot.
4801 MOZ_ASSERT(nullCheckFailedLabel == nullptr);
4802 loadWasmPinnedRegsFromInstance(mozilla::Some(trapOffset));
4803 #else
4804 MOZ_ASSERT(nullCheckFailedLabel != nullptr);
4805 branchTestPtr(Assembler::Zero, InstanceReg, InstanceReg,
4806 nullCheckFailedLabel);
4808 loadWasmPinnedRegsFromInstance();
4809 #endif
4810 switchToWasmInstanceRealm(index, WasmTableCallScratchReg1);
4812 loadPtr(Address(calleeScratch, offsetof(wasm::FunctionTableElem, code)),
4813 calleeScratch);
4815 *slowCallOffset = call(desc, calleeScratch);
4817 // Restore registers and realm and join up with the fast path.
4819 loadPtr(Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall),
4820 InstanceReg);
4821 loadWasmPinnedRegsFromInstance();
4822 switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
4823 jump(&done);
4825 // Fast path: just load the code pointer and go. The instance and heap
4826 // register are the same as in the caller, and nothing will be null.
4828 // (In particular, the code pointer will not be null: if it were, the instance
4829 // would have been null, and then it would not have been equivalent to our
4830 // current instance. So no null check is needed on the fast path.)
4832 bind(&fastCall);
4834 loadPtr(Address(calleeScratch, offsetof(wasm::FunctionTableElem, code)),
4835 calleeScratch);
4837 // We use a different type of call site for the fast call since the instance
4838 // slots in the frame do not have valid values.
4840 wasm::CallSiteDesc newDesc(desc.lineOrBytecode(),
4841 wasm::CallSiteDesc::IndirectFast);
4842 *fastCallOffset = call(newDesc, calleeScratch);
4844 bind(&done);
4847 void MacroAssembler::wasmCallRef(const wasm::CallSiteDesc& desc,
4848 const wasm::CalleeDesc& callee,
4849 CodeOffset* fastCallOffset,
4850 CodeOffset* slowCallOffset) {
4851 MOZ_ASSERT(callee.which() == wasm::CalleeDesc::FuncRef);
4852 const Register calleeScratch = WasmCallRefCallScratchReg0;
4853 const Register calleeFnObj = WasmCallRefReg;
4855 // Load from the function's WASM_INSTANCE_SLOT extended slot, and decide
4856 // whether to take the fast path or the slow path. Register this load
4857 // instruction to be source of a trap -- null pointer check.
4859 Label fastCall;
4860 Label done;
4861 const Register newInstanceTemp = WasmCallRefCallScratchReg1;
4862 size_t instanceSlotOffset = FunctionExtended::offsetOfExtendedSlot(
4863 FunctionExtended::WASM_INSTANCE_SLOT);
4864 static_assert(FunctionExtended::WASM_INSTANCE_SLOT < wasm::NullPtrGuardSize);
4865 wasm::BytecodeOffset trapOffset(desc.lineOrBytecode());
4866 append(wasm::Trap::NullPointerDereference,
4867 wasm::TrapSite(currentOffset(), trapOffset));
4868 loadPtr(Address(calleeFnObj, instanceSlotOffset), newInstanceTemp);
4869 branchPtr(Assembler::Equal, InstanceReg, newInstanceTemp, &fastCall);
4871 storePtr(InstanceReg,
4872 Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall));
4873 movePtr(newInstanceTemp, InstanceReg);
4874 storePtr(InstanceReg,
4875 Address(getStackPointer(), WasmCalleeInstanceOffsetBeforeCall));
4877 loadWasmPinnedRegsFromInstance();
4878 switchToWasmInstanceRealm(WasmCallRefCallScratchReg0,
4879 WasmCallRefCallScratchReg1);
4881 // Get funcUncheckedCallEntry() from the function's
4882 // WASM_FUNC_UNCHECKED_ENTRY_SLOT extended slot.
4883 size_t uncheckedEntrySlotOffset = FunctionExtended::offsetOfExtendedSlot(
4884 FunctionExtended::WASM_FUNC_UNCHECKED_ENTRY_SLOT);
4885 loadPtr(Address(calleeFnObj, uncheckedEntrySlotOffset), calleeScratch);
4887 *slowCallOffset = call(desc, calleeScratch);
4889 // Restore registers and realm and back to this caller's.
4890 loadPtr(Address(getStackPointer(), WasmCallerInstanceOffsetBeforeCall),
4891 InstanceReg);
4892 loadWasmPinnedRegsFromInstance();
4893 switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
4894 jump(&done);
4896 // Fast path: just load WASM_FUNC_UNCHECKED_ENTRY_SLOT value and go.
4897 // The instance and pinned registers are the same as in the caller.
4899 bind(&fastCall);
4901 loadPtr(Address(calleeFnObj, uncheckedEntrySlotOffset), calleeScratch);
4903 // We use a different type of call site for the fast call since the instance
4904 // slots in the frame do not have valid values.
4906 wasm::CallSiteDesc newDesc(desc.lineOrBytecode(),
4907 wasm::CallSiteDesc::FuncRefFast);
4908 *fastCallOffset = call(newDesc, calleeScratch);
4910 bind(&done);
4913 bool MacroAssembler::needScratch1ForBranchWasmGcRefType(wasm::RefType type) {
4914 MOZ_ASSERT(type.isValid());
4915 MOZ_ASSERT(type.isAnyHierarchy());
4916 return !type.isNone() && !type.isAny();
4919 bool MacroAssembler::needScratch2ForBranchWasmGcRefType(wasm::RefType type) {
4920 MOZ_ASSERT(type.isValid());
4921 MOZ_ASSERT(type.isAnyHierarchy());
4922 return type.isTypeRef() &&
4923 type.typeDef()->subTypingDepth() >= wasm::MinSuperTypeVectorLength;
4926 bool MacroAssembler::needSuperSuperTypeVectorForBranchWasmGcRefType(
4927 wasm::RefType type) {
4928 return type.isTypeRef();
4931 void MacroAssembler::branchWasmGcObjectIsRefType(
4932 Register object, wasm::RefType sourceType, wasm::RefType destType,
4933 Label* label, bool onSuccess, Register superSuperTypeVector,
4934 Register scratch1, Register scratch2) {
4935 MOZ_ASSERT(sourceType.isValid());
4936 MOZ_ASSERT(destType.isValid());
4937 MOZ_ASSERT(sourceType.isAnyHierarchy());
4938 MOZ_ASSERT(destType.isAnyHierarchy());
4939 MOZ_ASSERT_IF(needScratch1ForBranchWasmGcRefType(destType),
4940 scratch1 != Register::Invalid());
4941 MOZ_ASSERT_IF(needScratch2ForBranchWasmGcRefType(destType),
4942 scratch2 != Register::Invalid());
4943 MOZ_ASSERT_IF(needSuperSuperTypeVectorForBranchWasmGcRefType(destType),
4944 superSuperTypeVector != Register::Invalid());
4946 Label fallthrough;
4947 Label* successLabel = onSuccess ? label : &fallthrough;
4948 Label* failLabel = onSuccess ? &fallthrough : label;
4949 Label* nullLabel = destType.isNullable() ? successLabel : failLabel;
4951 // Check for null.
4952 if (sourceType.isNullable()) {
4953 branchTestPtr(Assembler::Zero, object, object, nullLabel);
4956 // The only value that can inhabit 'none' is null. So, early out if we got
4957 // not-null.
4958 if (destType.isNone()) {
4959 jump(failLabel);
4960 bind(&fallthrough);
4961 return;
4964 if (destType.isAny()) {
4965 // No further checks for 'any'
4966 jump(successLabel);
4967 bind(&fallthrough);
4968 return;
4971 // 'type' is now 'eq' or lower, which currently will always be a gc object.
4972 // Test for non-gc objects.
4973 MOZ_ASSERT(scratch1 != Register::Invalid());
4974 if (!wasm::RefType::isSubTypeOf(sourceType, wasm::RefType::eq())) {
4975 branchTestObjectIsWasmGcObject(false, object, scratch1, failLabel);
4978 if (destType.isEq()) {
4979 // No further checks for 'eq'
4980 jump(successLabel);
4981 bind(&fallthrough);
4982 return;
4985 // 'type' is now 'struct', 'array', or a concrete type. (Bottom types were
4986 // handled above.)
4988 // Casting to a concrete type only requires a simple check on the
4989 // object's superTypeVector. Casting to an abstract type (struct, array)
4990 // requires loading the object's superTypeVector->typeDef->kind, and checking
4991 // that it is correct.
4993 loadPtr(Address(object, int32_t(WasmGcObject::offsetOfSuperTypeVector())),
4994 scratch1);
4995 if (destType.isTypeRef()) {
4996 // concrete type, do superTypeVector check
4997 branchWasmSuperTypeVectorIsSubtype(scratch1, superSuperTypeVector, scratch2,
4998 destType.typeDef()->subTypingDepth(),
4999 successLabel, true);
5000 } else {
5001 // abstract type, do kind check
5002 loadPtr(Address(scratch1,
5003 int32_t(wasm::SuperTypeVector::offsetOfSelfTypeDef())),
5004 scratch1);
5005 load8ZeroExtend(Address(scratch1, int32_t(wasm::TypeDef::offsetOfKind())),
5006 scratch1);
5007 branch32(Assembler::Equal, scratch1, Imm32(int32_t(destType.typeDefKind())),
5008 successLabel);
5011 // The cast failed.
5012 jump(failLabel);
5013 bind(&fallthrough);
5016 void MacroAssembler::branchWasmSuperTypeVectorIsSubtype(
5017 Register subSuperTypeVector, Register superSuperTypeVector,
5018 Register scratch, uint32_t superTypeDepth, Label* label, bool onSuccess) {
5019 MOZ_ASSERT_IF(superTypeDepth >= wasm::MinSuperTypeVectorLength,
5020 scratch != Register::Invalid());
5022 // We generate just different enough code for 'is' subtype vs 'is not'
5023 // subtype that we handle them separately.
5024 if (onSuccess) {
5025 Label failed;
5027 // At this point, we could generate a fast success check which jumps to
5028 // `label` if `subSuperTypeVector == superSuperTypeVector`. However,
5029 // profiling of Barista-3 seems to show this is hardly worth anything,
5030 // whereas it is worth us generating smaller code and in particular one
5031 // fewer conditional branch. So it is omitted:
5033 // branchPtr(Assembler::Equal, subSuperTypeVector, superSuperTypeVector,
5034 // label);
5036 // Emit a bounds check if the super type depth may be out-of-bounds.
5037 if (superTypeDepth >= wasm::MinSuperTypeVectorLength) {
5038 // Slowest path for having a bounds check of the super type vector
5039 load32(
5040 Address(subSuperTypeVector, wasm::SuperTypeVector::offsetOfLength()),
5041 scratch);
5042 branch32(Assembler::LessThanOrEqual, scratch, Imm32(superTypeDepth),
5043 &failed);
5046 // Load the `superTypeDepth` entry from subSuperTypeVector. This
5047 // will be `superSuperTypeVector` if `subSuperTypeVector` is indeed a
5048 // subtype.
5049 loadPtr(
5050 Address(subSuperTypeVector,
5051 wasm::SuperTypeVector::offsetOfTypeDefInVector(superTypeDepth)),
5052 subSuperTypeVector);
5053 branchPtr(Assembler::Equal, subSuperTypeVector, superSuperTypeVector,
5054 label);
5056 // Fallthrough to the failed case
5057 bind(&failed);
5058 return;
5061 // Emit a bounds check if the super type depth may be out-of-bounds.
5062 if (superTypeDepth >= wasm::MinSuperTypeVectorLength) {
5063 load32(Address(subSuperTypeVector, wasm::SuperTypeVector::offsetOfLength()),
5064 scratch);
5065 branch32(Assembler::LessThanOrEqual, scratch, Imm32(superTypeDepth), label);
5068 // Load the `superTypeDepth` entry from subSuperTypeVector. This will be
5069 // `superSuperTypeVector` if `subSuperTypeVector` is indeed a subtype.
5070 loadPtr(
5071 Address(subSuperTypeVector,
5072 wasm::SuperTypeVector::offsetOfTypeDefInVector(superTypeDepth)),
5073 subSuperTypeVector);
5074 branchPtr(Assembler::NotEqual, subSuperTypeVector, superSuperTypeVector,
5075 label);
5076 // Fallthrough to the success case
5079 void MacroAssembler::nopPatchableToCall(const wasm::CallSiteDesc& desc) {
5080 CodeOffset offset = nopPatchableToCall();
5081 append(desc, offset);
5084 void MacroAssembler::emitPreBarrierFastPath(JSRuntime* rt, MIRType type,
5085 Register temp1, Register temp2,
5086 Register temp3, Label* noBarrier) {
5087 MOZ_ASSERT(temp1 != PreBarrierReg);
5088 MOZ_ASSERT(temp2 != PreBarrierReg);
5089 MOZ_ASSERT(temp3 != PreBarrierReg);
5091 // Load the GC thing in temp1.
5092 if (type == MIRType::Value) {
5093 unboxGCThingForGCBarrier(Address(PreBarrierReg, 0), temp1);
5094 } else {
5095 MOZ_ASSERT(type == MIRType::Object || type == MIRType::String ||
5096 type == MIRType::Shape);
5097 loadPtr(Address(PreBarrierReg, 0), temp1);
5100 #ifdef DEBUG
5101 // The caller should have checked for null pointers.
5102 Label nonZero;
5103 branchTestPtr(Assembler::NonZero, temp1, temp1, &nonZero);
5104 assumeUnreachable("JIT pre-barrier: unexpected nullptr");
5105 bind(&nonZero);
5106 #endif
5108 // Load the chunk address in temp2.
5109 movePtr(temp1, temp2);
5110 andPtr(Imm32(int32_t(~gc::ChunkMask)), temp2);
5112 // If the GC thing is in the nursery, we don't need to barrier it.
5113 if (type == MIRType::Value || type == MIRType::Object ||
5114 type == MIRType::String) {
5115 branchPtr(Assembler::NotEqual, Address(temp2, gc::ChunkStoreBufferOffset),
5116 ImmWord(0), noBarrier);
5117 } else {
5118 #ifdef DEBUG
5119 Label isTenured;
5120 branchPtr(Assembler::Equal, Address(temp2, gc::ChunkStoreBufferOffset),
5121 ImmWord(0), &isTenured);
5122 assumeUnreachable("JIT pre-barrier: unexpected nursery pointer");
5123 bind(&isTenured);
5124 #endif
5127 // Determine the bit index and store in temp1.
5129 // bit = (addr & js::gc::ChunkMask) / js::gc::CellBytesPerMarkBit +
5130 // static_cast<uint32_t>(colorBit);
5131 static_assert(gc::CellBytesPerMarkBit == 8,
5132 "Calculation below relies on this");
5133 static_assert(size_t(gc::ColorBit::BlackBit) == 0,
5134 "Calculation below relies on this");
5135 andPtr(Imm32(gc::ChunkMask), temp1);
5136 rshiftPtr(Imm32(3), temp1);
5138 static_assert(gc::MarkBitmapWordBits == JS_BITS_PER_WORD,
5139 "Calculation below relies on this");
5141 // Load the bitmap word in temp2.
5143 // word = chunk.bitmap[bit / MarkBitmapWordBits];
5145 // Fold the adjustment for the fact that arenas don't start at the beginning
5146 // of the chunk into the offset to the chunk bitmap.
5147 const size_t firstArenaAdjustment = gc::FirstArenaAdjustmentBits / CHAR_BIT;
5148 const intptr_t offset =
5149 intptr_t(gc::ChunkMarkBitmapOffset) - intptr_t(firstArenaAdjustment);
5151 movePtr(temp1, temp3);
5152 #if JS_BITS_PER_WORD == 64
5153 rshiftPtr(Imm32(6), temp1);
5154 loadPtr(BaseIndex(temp2, temp1, TimesEight, offset), temp2);
5155 #else
5156 rshiftPtr(Imm32(5), temp1);
5157 loadPtr(BaseIndex(temp2, temp1, TimesFour, offset), temp2);
5158 #endif
5160 // Load the mask in temp1.
5162 // mask = uintptr_t(1) << (bit % MarkBitmapWordBits);
5163 andPtr(Imm32(gc::MarkBitmapWordBits - 1), temp3);
5164 move32(Imm32(1), temp1);
5165 #ifdef JS_CODEGEN_X64
5166 MOZ_ASSERT(temp3 == rcx);
5167 shlq_cl(temp1);
5168 #elif JS_CODEGEN_X86
5169 MOZ_ASSERT(temp3 == ecx);
5170 shll_cl(temp1);
5171 #elif JS_CODEGEN_ARM
5172 ma_lsl(temp3, temp1, temp1);
5173 #elif JS_CODEGEN_ARM64
5174 Lsl(ARMRegister(temp1, 64), ARMRegister(temp1, 64), ARMRegister(temp3, 64));
5175 #elif JS_CODEGEN_MIPS32
5176 ma_sll(temp1, temp1, temp3);
5177 #elif JS_CODEGEN_MIPS64
5178 ma_dsll(temp1, temp1, temp3);
5179 #elif JS_CODEGEN_LOONG64
5180 as_sll_d(temp1, temp1, temp3);
5181 #elif JS_CODEGEN_RISCV64
5182 sll(temp1, temp1, temp3);
5183 #elif JS_CODEGEN_WASM32
5184 MOZ_CRASH();
5185 #elif JS_CODEGEN_NONE
5186 MOZ_CRASH();
5187 #else
5188 # error "Unknown architecture"
5189 #endif
5191 // No barrier is needed if the bit is set, |word & mask != 0|.
5192 branchTestPtr(Assembler::NonZero, temp2, temp1, noBarrier);
5195 // ========================================================================
5196 // JS atomic operations.
5198 void MacroAssembler::atomicIsLockFreeJS(Register value, Register output) {
5199 // Keep this in sync with isLockfreeJS() in jit/AtomicOperations.h.
5200 static_assert(AtomicOperations::isLockfreeJS(1)); // Implementation artifact
5201 static_assert(AtomicOperations::isLockfreeJS(2)); // Implementation artifact
5202 static_assert(AtomicOperations::isLockfreeJS(4)); // Spec requirement
5203 static_assert(AtomicOperations::isLockfreeJS(8)); // Implementation artifact
5205 Label done;
5206 move32(Imm32(1), output);
5207 branch32(Assembler::Equal, value, Imm32(8), &done);
5208 branch32(Assembler::Equal, value, Imm32(4), &done);
5209 branch32(Assembler::Equal, value, Imm32(2), &done);
5210 branch32(Assembler::Equal, value, Imm32(1), &done);
5211 move32(Imm32(0), output);
5212 bind(&done);
5215 // ========================================================================
5216 // Spectre Mitigations.
5218 void MacroAssembler::spectreMaskIndex32(Register index, Register length,
5219 Register output) {
5220 MOZ_ASSERT(JitOptions.spectreIndexMasking);
5221 MOZ_ASSERT(length != output);
5222 MOZ_ASSERT(index != output);
5224 move32(Imm32(0), output);
5225 cmp32Move32(Assembler::Below, index, length, index, output);
5228 void MacroAssembler::spectreMaskIndex32(Register index, const Address& length,
5229 Register output) {
5230 MOZ_ASSERT(JitOptions.spectreIndexMasking);
5231 MOZ_ASSERT(index != length.base);
5232 MOZ_ASSERT(length.base != output);
5233 MOZ_ASSERT(index != output);
5235 move32(Imm32(0), output);
5236 cmp32Move32(Assembler::Below, index, length, index, output);
5239 void MacroAssembler::spectreMaskIndexPtr(Register index, Register length,
5240 Register output) {
5241 MOZ_ASSERT(JitOptions.spectreIndexMasking);
5242 MOZ_ASSERT(length != output);
5243 MOZ_ASSERT(index != output);
5245 movePtr(ImmWord(0), output);
5246 cmpPtrMovePtr(Assembler::Below, index, length, index, output);
5249 void MacroAssembler::spectreMaskIndexPtr(Register index, const Address& length,
5250 Register output) {
5251 MOZ_ASSERT(JitOptions.spectreIndexMasking);
5252 MOZ_ASSERT(index != length.base);
5253 MOZ_ASSERT(length.base != output);
5254 MOZ_ASSERT(index != output);
5256 movePtr(ImmWord(0), output);
5257 cmpPtrMovePtr(Assembler::Below, index, length, index, output);
5260 void MacroAssembler::boundsCheck32PowerOfTwo(Register index, uint32_t length,
5261 Label* failure) {
5262 MOZ_ASSERT(mozilla::IsPowerOfTwo(length));
5263 branch32(Assembler::AboveOrEqual, index, Imm32(length), failure);
5265 // Note: it's fine to clobber the input register, as this is a no-op: it
5266 // only affects speculative execution.
5267 if (JitOptions.spectreIndexMasking) {
5268 and32(Imm32(length - 1), index);
5272 void MacroAssembler::loadWasmPinnedRegsFromInstance(
5273 mozilla::Maybe<wasm::BytecodeOffset> trapOffset) {
5274 #ifdef WASM_HAS_HEAPREG
5275 static_assert(wasm::Instance::offsetOfMemoryBase() < 4096,
5276 "We count only on the low page being inaccessible");
5277 if (trapOffset) {
5278 append(wasm::Trap::IndirectCallToNull,
5279 wasm::TrapSite(currentOffset(), *trapOffset));
5281 loadPtr(Address(InstanceReg, wasm::Instance::offsetOfMemoryBase()), HeapReg);
5282 #else
5283 MOZ_ASSERT(!trapOffset);
5284 #endif
5287 //}}} check_macroassembler_style
5289 #ifdef JS_64BIT
5290 void MacroAssembler::debugAssertCanonicalInt32(Register r) {
5291 # ifdef DEBUG
5292 if (!js::jit::JitOptions.lessDebugCode) {
5293 # if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64)
5294 Label ok;
5295 branchPtr(Assembler::BelowOrEqual, r, ImmWord(UINT32_MAX), &ok);
5296 breakpoint();
5297 bind(&ok);
5298 # elif defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64)
5299 Label ok;
5300 ScratchRegisterScope scratch(asMasm());
5301 move32SignExtendToPtr(r, scratch);
5302 branchPtr(Assembler::Equal, r, scratch, &ok);
5303 breakpoint();
5304 bind(&ok);
5305 # else
5306 MOZ_CRASH("IMPLEMENT ME");
5307 # endif
5309 # endif
5311 #endif
5313 void MacroAssembler::memoryBarrierBefore(const Synchronization& sync) {
5314 memoryBarrier(sync.barrierBefore);
5317 void MacroAssembler::memoryBarrierAfter(const Synchronization& sync) {
5318 memoryBarrier(sync.barrierAfter);
5321 void MacroAssembler::debugAssertIsObject(const ValueOperand& val) {
5322 #ifdef DEBUG
5323 Label ok;
5324 branchTestObject(Assembler::Equal, val, &ok);
5325 assumeUnreachable("Expected an object!");
5326 bind(&ok);
5327 #endif
5330 void MacroAssembler::debugAssertObjHasFixedSlots(Register obj,
5331 Register scratch) {
5332 #ifdef DEBUG
5333 Label hasFixedSlots;
5334 loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
5335 branchTest32(Assembler::NonZero,
5336 Address(scratch, Shape::offsetOfImmutableFlags()),
5337 Imm32(NativeShape::fixedSlotsMask()), &hasFixedSlots);
5338 assumeUnreachable("Expected a fixed slot");
5339 bind(&hasFixedSlots);
5340 #endif
5343 void MacroAssembler::debugAssertObjectHasClass(Register obj, Register scratch,
5344 const JSClass* clasp) {
5345 #ifdef DEBUG
5346 Label done;
5347 branchTestObjClassNoSpectreMitigations(Assembler::Equal, obj, clasp, scratch,
5348 &done);
5349 assumeUnreachable("Class check failed");
5350 bind(&done);
5351 #endif
5354 void MacroAssembler::branchArrayIsNotPacked(Register array, Register temp1,
5355 Register temp2, Label* label) {
5356 loadPtr(Address(array, NativeObject::offsetOfElements()), temp1);
5358 // Test length == initializedLength.
5359 Address initLength(temp1, ObjectElements::offsetOfInitializedLength());
5360 load32(Address(temp1, ObjectElements::offsetOfLength()), temp2);
5361 branch32(Assembler::NotEqual, initLength, temp2, label);
5363 // Test the NON_PACKED flag.
5364 Address flags(temp1, ObjectElements::offsetOfFlags());
5365 branchTest32(Assembler::NonZero, flags, Imm32(ObjectElements::NON_PACKED),
5366 label);
5369 void MacroAssembler::setIsPackedArray(Register obj, Register output,
5370 Register temp) {
5371 // Ensure it's an ArrayObject.
5372 Label notPackedArray;
5373 branchTestObjClass(Assembler::NotEqual, obj, &ArrayObject::class_, temp, obj,
5374 &notPackedArray);
5376 branchArrayIsNotPacked(obj, temp, output, &notPackedArray);
5378 Label done;
5379 move32(Imm32(1), output);
5380 jump(&done);
5382 bind(&notPackedArray);
5383 move32(Imm32(0), output);
5385 bind(&done);
5388 void MacroAssembler::packedArrayPop(Register array, ValueOperand output,
5389 Register temp1, Register temp2,
5390 Label* fail) {
5391 // Load obj->elements in temp1.
5392 loadPtr(Address(array, NativeObject::offsetOfElements()), temp1);
5394 // Check flags.
5395 static constexpr uint32_t UnhandledFlags =
5396 ObjectElements::Flags::NON_PACKED |
5397 ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH |
5398 ObjectElements::Flags::NOT_EXTENSIBLE |
5399 ObjectElements::Flags::MAYBE_IN_ITERATION;
5400 Address flags(temp1, ObjectElements::offsetOfFlags());
5401 branchTest32(Assembler::NonZero, flags, Imm32(UnhandledFlags), fail);
5403 // Load length in temp2. Ensure length == initializedLength.
5404 Address lengthAddr(temp1, ObjectElements::offsetOfLength());
5405 Address initLengthAddr(temp1, ObjectElements::offsetOfInitializedLength());
5406 load32(lengthAddr, temp2);
5407 branch32(Assembler::NotEqual, initLengthAddr, temp2, fail);
5409 // Result is |undefined| if length == 0.
5410 Label notEmpty, done;
5411 branchTest32(Assembler::NonZero, temp2, temp2, &notEmpty);
5413 moveValue(UndefinedValue(), output);
5414 jump(&done);
5417 bind(&notEmpty);
5419 // Load the last element.
5420 sub32(Imm32(1), temp2);
5421 BaseObjectElementIndex elementAddr(temp1, temp2);
5422 loadValue(elementAddr, output);
5424 // Pre-barrier the element because we're removing it from the array.
5425 EmitPreBarrier(*this, elementAddr, MIRType::Value);
5427 // Update length and initializedLength.
5428 store32(temp2, lengthAddr);
5429 store32(temp2, initLengthAddr);
5431 bind(&done);
5434 void MacroAssembler::packedArrayShift(Register array, ValueOperand output,
5435 Register temp1, Register temp2,
5436 LiveRegisterSet volatileRegs,
5437 Label* fail) {
5438 // Load obj->elements in temp1.
5439 loadPtr(Address(array, NativeObject::offsetOfElements()), temp1);
5441 // Check flags.
5442 static constexpr uint32_t UnhandledFlags =
5443 ObjectElements::Flags::NON_PACKED |
5444 ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH |
5445 ObjectElements::Flags::NOT_EXTENSIBLE |
5446 ObjectElements::Flags::MAYBE_IN_ITERATION;
5447 Address flags(temp1, ObjectElements::offsetOfFlags());
5448 branchTest32(Assembler::NonZero, flags, Imm32(UnhandledFlags), fail);
5450 // Load length in temp2. Ensure length == initializedLength.
5451 Address lengthAddr(temp1, ObjectElements::offsetOfLength());
5452 Address initLengthAddr(temp1, ObjectElements::offsetOfInitializedLength());
5453 load32(lengthAddr, temp2);
5454 branch32(Assembler::NotEqual, initLengthAddr, temp2, fail);
5456 // Result is |undefined| if length == 0.
5457 Label notEmpty, done;
5458 branchTest32(Assembler::NonZero, temp2, temp2, &notEmpty);
5460 moveValue(UndefinedValue(), output);
5461 jump(&done);
5464 bind(&notEmpty);
5466 // Load the first element.
5467 Address elementAddr(temp1, 0);
5468 loadValue(elementAddr, output);
5470 // Move the other elements and update the initializedLength/length. This will
5471 // also trigger pre-barriers.
5473 // Ensure output is in volatileRegs. Don't preserve temp1 and temp2.
5474 volatileRegs.takeUnchecked(temp1);
5475 volatileRegs.takeUnchecked(temp2);
5476 if (output.hasVolatileReg()) {
5477 volatileRegs.addUnchecked(output);
5480 PushRegsInMask(volatileRegs);
5482 using Fn = void (*)(ArrayObject* arr);
5483 setupUnalignedABICall(temp1);
5484 passABIArg(array);
5485 callWithABI<Fn, ArrayShiftMoveElements>();
5487 PopRegsInMask(volatileRegs);
5490 bind(&done);
5493 void MacroAssembler::loadArgumentsObjectElement(Register obj, Register index,
5494 ValueOperand output,
5495 Register temp, Label* fail) {
5496 Register temp2 = output.scratchReg();
5498 // Get initial length value.
5499 unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()), temp);
5501 // Ensure no overridden elements.
5502 branchTest32(Assembler::NonZero, temp,
5503 Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT), fail);
5505 // Bounds check.
5506 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), temp);
5507 spectreBoundsCheck32(index, temp, temp2, fail);
5509 // Load ArgumentsData.
5510 loadPrivate(Address(obj, ArgumentsObject::getDataSlotOffset()), temp);
5512 // Guard the argument is not a FORWARD_TO_CALL_SLOT MagicValue.
5513 BaseValueIndex argValue(temp, index, ArgumentsData::offsetOfArgs());
5514 branchTestMagic(Assembler::Equal, argValue, fail);
5515 loadValue(argValue, output);
5518 void MacroAssembler::loadArgumentsObjectElementHole(Register obj,
5519 Register index,
5520 ValueOperand output,
5521 Register temp,
5522 Label* fail) {
5523 Register temp2 = output.scratchReg();
5525 // Get initial length value.
5526 unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()), temp);
5528 // Ensure no overridden elements.
5529 branchTest32(Assembler::NonZero, temp,
5530 Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT), fail);
5532 // Bounds check.
5533 Label outOfBounds, done;
5534 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), temp);
5535 spectreBoundsCheck32(index, temp, temp2, &outOfBounds);
5537 // Load ArgumentsData.
5538 loadPrivate(Address(obj, ArgumentsObject::getDataSlotOffset()), temp);
5540 // Guard the argument is not a FORWARD_TO_CALL_SLOT MagicValue.
5541 BaseValueIndex argValue(temp, index, ArgumentsData::offsetOfArgs());
5542 branchTestMagic(Assembler::Equal, argValue, fail);
5543 loadValue(argValue, output);
5544 jump(&done);
5546 bind(&outOfBounds);
5547 branch32(Assembler::LessThan, index, Imm32(0), fail);
5548 moveValue(UndefinedValue(), output);
5550 bind(&done);
5553 void MacroAssembler::loadArgumentsObjectElementExists(
5554 Register obj, Register index, Register output, Register temp, Label* fail) {
5555 // Ensure the index is non-negative.
5556 branch32(Assembler::LessThan, index, Imm32(0), fail);
5558 // Get initial length value.
5559 unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()), temp);
5561 // Ensure no overridden or deleted elements.
5562 branchTest32(Assembler::NonZero, temp,
5563 Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT), fail);
5565 // Compare index against the length.
5566 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), temp);
5567 cmp32Set(Assembler::LessThan, index, temp, output);
5570 void MacroAssembler::loadArgumentsObjectLength(Register obj, Register output,
5571 Label* fail) {
5572 // Get initial length value.
5573 unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()),
5574 output);
5576 // Test if length has been overridden.
5577 branchTest32(Assembler::NonZero, output,
5578 Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT), fail);
5580 // Shift out arguments length and return it.
5581 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), output);
5584 void MacroAssembler::branchTestArgumentsObjectFlags(Register obj, Register temp,
5585 uint32_t flags,
5586 Condition cond,
5587 Label* label) {
5588 MOZ_ASSERT((flags & ~ArgumentsObject::PACKED_BITS_MASK) == 0);
5590 // Get initial length value.
5591 unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()), temp);
5593 // Test flags.
5594 branchTest32(cond, temp, Imm32(flags), label);
5597 static constexpr bool ValidateSizeRange(Scalar::Type from, Scalar::Type to) {
5598 for (Scalar::Type type = from; type < to; type = Scalar::Type(type + 1)) {
5599 if (TypedArrayElemSize(type) != TypedArrayElemSize(from)) {
5600 return false;
5603 return true;
5606 void MacroAssembler::typedArrayElementSize(Register obj, Register output) {
5607 static_assert(Scalar::Int8 == 0, "Int8 is the first typed array class");
5608 static_assert(
5609 (Scalar::BigUint64 - Scalar::Int8) == Scalar::MaxTypedArrayViewType - 1,
5610 "BigUint64 is the last typed array class");
5612 Label one, two, four, eight, done;
5614 loadObjClassUnsafe(obj, output);
5616 static_assert(ValidateSizeRange(Scalar::Int8, Scalar::Int16),
5617 "element size is one in [Int8, Int16)");
5618 branchPtr(Assembler::Below, output,
5619 ImmPtr(TypedArrayObject::classForType(Scalar::Int16)), &one);
5621 static_assert(ValidateSizeRange(Scalar::Int16, Scalar::Int32),
5622 "element size is two in [Int16, Int32)");
5623 branchPtr(Assembler::Below, output,
5624 ImmPtr(TypedArrayObject::classForType(Scalar::Int32)), &two);
5626 static_assert(ValidateSizeRange(Scalar::Int32, Scalar::Float64),
5627 "element size is four in [Int32, Float64)");
5628 branchPtr(Assembler::Below, output,
5629 ImmPtr(TypedArrayObject::classForType(Scalar::Float64)), &four);
5631 static_assert(ValidateSizeRange(Scalar::Float64, Scalar::Uint8Clamped),
5632 "element size is eight in [Float64, Uint8Clamped)");
5633 branchPtr(Assembler::Below, output,
5634 ImmPtr(TypedArrayObject::classForType(Scalar::Uint8Clamped)),
5635 &eight);
5637 static_assert(ValidateSizeRange(Scalar::Uint8Clamped, Scalar::BigInt64),
5638 "element size is one in [Uint8Clamped, BigInt64)");
5639 branchPtr(Assembler::Below, output,
5640 ImmPtr(TypedArrayObject::classForType(Scalar::BigInt64)), &one);
5642 static_assert(
5643 ValidateSizeRange(Scalar::BigInt64, Scalar::MaxTypedArrayViewType),
5644 "element size is eight in [BigInt64, MaxTypedArrayViewType)");
5645 // Fall through for BigInt64 and BigUint64
5647 bind(&eight);
5648 move32(Imm32(8), output);
5649 jump(&done);
5651 bind(&four);
5652 move32(Imm32(4), output);
5653 jump(&done);
5655 bind(&two);
5656 move32(Imm32(2), output);
5657 jump(&done);
5659 bind(&one);
5660 move32(Imm32(1), output);
5662 bind(&done);
5665 void MacroAssembler::branchIfClassIsNotTypedArray(Register clasp,
5666 Label* notTypedArray) {
5667 static_assert(Scalar::Int8 == 0, "Int8 is the first typed array class");
5668 const JSClass* firstTypedArrayClass =
5669 TypedArrayObject::classForType(Scalar::Int8);
5671 static_assert(
5672 (Scalar::BigUint64 - Scalar::Int8) == Scalar::MaxTypedArrayViewType - 1,
5673 "BigUint64 is the last typed array class");
5674 const JSClass* lastTypedArrayClass =
5675 TypedArrayObject::classForType(Scalar::BigUint64);
5677 branchPtr(Assembler::Below, clasp, ImmPtr(firstTypedArrayClass),
5678 notTypedArray);
5679 branchPtr(Assembler::Above, clasp, ImmPtr(lastTypedArrayClass),
5680 notTypedArray);
5683 void MacroAssembler::branchIfHasDetachedArrayBuffer(Register obj, Register temp,
5684 Label* label) {
5685 // Inline implementation of ArrayBufferViewObject::hasDetachedBuffer().
5687 // Load obj->elements in temp.
5688 loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
5690 // Shared buffers can't be detached.
5691 Label done;
5692 branchTest32(Assembler::NonZero,
5693 Address(temp, ObjectElements::offsetOfFlags()),
5694 Imm32(ObjectElements::SHARED_MEMORY), &done);
5696 // An ArrayBufferView with a null buffer has never had its buffer exposed to
5697 // become detached.
5698 fallibleUnboxObject(Address(obj, ArrayBufferViewObject::bufferOffset()), temp,
5699 &done);
5701 // Load the ArrayBuffer flags and branch if the detached flag is set.
5702 unboxInt32(Address(temp, ArrayBufferObject::offsetOfFlagsSlot()), temp);
5703 branchTest32(Assembler::NonZero, temp, Imm32(ArrayBufferObject::DETACHED),
5704 label);
5706 bind(&done);
5709 void MacroAssembler::branchIfNativeIteratorNotReusable(Register ni,
5710 Label* notReusable) {
5711 // See NativeIterator::isReusable.
5712 Address flagsAddr(ni, NativeIterator::offsetOfFlagsAndCount());
5714 #ifdef DEBUG
5715 Label niIsInitialized;
5716 branchTest32(Assembler::NonZero, flagsAddr,
5717 Imm32(NativeIterator::Flags::Initialized), &niIsInitialized);
5718 assumeUnreachable(
5719 "Expected a NativeIterator that's been completely "
5720 "initialized");
5721 bind(&niIsInitialized);
5722 #endif
5724 branchTest32(Assembler::NonZero, flagsAddr,
5725 Imm32(NativeIterator::Flags::NotReusable), notReusable);
5728 void MacroAssembler::branchNativeIteratorIndices(Condition cond, Register ni,
5729 Register temp,
5730 NativeIteratorIndices kind,
5731 Label* label) {
5732 Address iterFlagsAddr(ni, NativeIterator::offsetOfFlagsAndCount());
5733 load32(iterFlagsAddr, temp);
5734 and32(Imm32(NativeIterator::IndicesMask), temp);
5735 uint32_t shiftedKind = uint32_t(kind) << NativeIterator::IndicesShift;
5736 branch32(cond, temp, Imm32(shiftedKind), label);
5739 static void LoadNativeIterator(MacroAssembler& masm, Register obj,
5740 Register dest) {
5741 MOZ_ASSERT(obj != dest);
5743 #ifdef DEBUG
5744 // Assert we have a PropertyIteratorObject.
5745 Label ok;
5746 masm.branchTestObjClass(Assembler::Equal, obj,
5747 &PropertyIteratorObject::class_, dest, obj, &ok);
5748 masm.assumeUnreachable("Expected PropertyIteratorObject!");
5749 masm.bind(&ok);
5750 #endif
5752 // Load NativeIterator object.
5753 Address slotAddr(obj, PropertyIteratorObject::offsetOfIteratorSlot());
5754 masm.loadPrivate(slotAddr, dest);
5757 // The ShapeCachePtr may be used to cache an iterator for for-in. Return that
5758 // iterator in |dest| if:
5759 // - the shape cache pointer exists and stores a native iterator
5760 // - the iterator is reusable
5761 // - the iterated object has no dense elements
5762 // - the shapes of each object on the proto chain of |obj| match the cached
5763 // shapes
5764 // - the proto chain has no dense elements
5765 // Otherwise, jump to |failure|.
5766 void MacroAssembler::maybeLoadIteratorFromShape(Register obj, Register dest,
5767 Register temp, Register temp2,
5768 Register temp3,
5769 Label* failure) {
5770 // Register usage:
5771 // obj: always contains the input object
5772 // temp: walks the obj->shape->baseshape->proto->shape->... chain
5773 // temp2: points to the native iterator. Incremented to walk the shapes array.
5774 // temp3: scratch space
5775 // dest: stores the resulting PropertyIteratorObject on success
5777 Label success;
5778 Register shapeAndProto = temp;
5779 Register nativeIterator = temp2;
5781 // Load ShapeCache from shape.
5782 loadPtr(Address(obj, JSObject::offsetOfShape()), shapeAndProto);
5783 loadPtr(Address(shapeAndProto, Shape::offsetOfCachePtr()), dest);
5785 // Check if it's an iterator.
5786 movePtr(dest, temp3);
5787 andPtr(Imm32(ShapeCachePtr::MASK), temp3);
5788 branch32(Assembler::NotEqual, temp3, Imm32(ShapeCachePtr::ITERATOR), failure);
5790 // If we've cached an iterator, |obj| must be a native object.
5791 #ifdef DEBUG
5792 Label nonNative;
5793 branchIfNonNativeObj(obj, temp3, &nonNative);
5794 #endif
5796 // Verify that |obj| has no dense elements.
5797 loadPtr(Address(obj, NativeObject::offsetOfElements()), temp3);
5798 branch32(Assembler::NotEqual,
5799 Address(temp3, ObjectElements::offsetOfInitializedLength()),
5800 Imm32(0), failure);
5802 // Clear tag bits from iterator object. |dest| is now valid.
5803 // Load the native iterator and verify that it's reusable.
5804 andPtr(Imm32(~ShapeCachePtr::MASK), dest);
5805 LoadNativeIterator(*this, dest, nativeIterator);
5806 branchIfNativeIteratorNotReusable(nativeIterator, failure);
5808 // We have to compare the shapes in the native iterator with the shapes on the
5809 // proto chain to ensure the cached iterator is still valid. The shape array
5810 // always starts at a fixed offset from the base of the NativeIterator, so
5811 // instead of using an instruction outside the loop to initialize a pointer to
5812 // the shapes array, we can bake it into the offset and reuse the pointer to
5813 // the NativeIterator. We add |sizeof(Shape*)| to start at the second shape.
5814 // (The first shape corresponds to the object itself. We don't have to check
5815 // it, because we got the iterator via the shape.)
5816 size_t nativeIteratorProtoShapeOffset =
5817 NativeIterator::offsetOfFirstShape() + sizeof(Shape*);
5819 // Loop over the proto chain. At the head of the loop, |shape| is the shape of
5820 // the current object, and |iteratorShapes| points to the expected shape of
5821 // its proto.
5822 Label protoLoop;
5823 bind(&protoLoop);
5825 // Load the proto. If the proto is null, then we're done.
5826 loadPtr(Address(shapeAndProto, Shape::offsetOfBaseShape()), shapeAndProto);
5827 loadPtr(Address(shapeAndProto, BaseShape::offsetOfProto()), shapeAndProto);
5828 branchPtr(Assembler::Equal, shapeAndProto, ImmPtr(nullptr), &success);
5830 #ifdef DEBUG
5831 // We have guarded every shape up until this point, so we know that the proto
5832 // is a native object.
5833 branchIfNonNativeObj(shapeAndProto, temp3, &nonNative);
5834 #endif
5836 // Verify that the proto has no dense elements.
5837 loadPtr(Address(shapeAndProto, NativeObject::offsetOfElements()), temp3);
5838 branch32(Assembler::NotEqual,
5839 Address(temp3, ObjectElements::offsetOfInitializedLength()),
5840 Imm32(0), failure);
5842 // Compare the shape of the proto to the expected shape.
5843 loadPtr(Address(shapeAndProto, JSObject::offsetOfShape()), shapeAndProto);
5844 loadPtr(Address(nativeIterator, nativeIteratorProtoShapeOffset), temp3);
5845 branchPtr(Assembler::NotEqual, shapeAndProto, temp3, failure);
5847 // Increment |iteratorShapes| and jump back to the top of the loop.
5848 addPtr(Imm32(sizeof(Shape*)), nativeIterator);
5849 jump(&protoLoop);
5851 #ifdef DEBUG
5852 bind(&nonNative);
5853 assumeUnreachable("Expected NativeObject in maybeLoadIteratorFromShape");
5854 #endif
5856 bind(&success);
5859 void MacroAssembler::iteratorMore(Register obj, ValueOperand output,
5860 Register temp) {
5861 Label done;
5862 Register outputScratch = output.scratchReg();
5863 LoadNativeIterator(*this, obj, outputScratch);
5865 // If propertyCursor_ < propertiesEnd_, load the next string and advance
5866 // the cursor. Otherwise return MagicValue(JS_NO_ITER_VALUE).
5867 Label iterDone;
5868 Address cursorAddr(outputScratch, NativeIterator::offsetOfPropertyCursor());
5869 Address cursorEndAddr(outputScratch, NativeIterator::offsetOfPropertiesEnd());
5870 loadPtr(cursorAddr, temp);
5871 branchPtr(Assembler::BelowOrEqual, cursorEndAddr, temp, &iterDone);
5873 // Get next string.
5874 loadPtr(Address(temp, 0), temp);
5876 // Increase the cursor.
5877 addPtr(Imm32(sizeof(GCPtr<JSLinearString*>)), cursorAddr);
5879 tagValue(JSVAL_TYPE_STRING, temp, output);
5880 jump(&done);
5882 bind(&iterDone);
5883 moveValue(MagicValue(JS_NO_ITER_VALUE), output);
5885 bind(&done);
5888 void MacroAssembler::iteratorClose(Register obj, Register temp1, Register temp2,
5889 Register temp3) {
5890 LoadNativeIterator(*this, obj, temp1);
5892 // The shared iterator used for for-in with null/undefined is immutable and
5893 // unlinked. See NativeIterator::isEmptyIteratorSingleton.
5894 Label done;
5895 branchTest32(Assembler::NonZero,
5896 Address(temp1, NativeIterator::offsetOfFlagsAndCount()),
5897 Imm32(NativeIterator::Flags::IsEmptyIteratorSingleton), &done);
5899 // Clear active bit.
5900 and32(Imm32(~NativeIterator::Flags::Active),
5901 Address(temp1, NativeIterator::offsetOfFlagsAndCount()));
5903 // Clear objectBeingIterated.
5904 Address iterObjAddr(temp1, NativeIterator::offsetOfObjectBeingIterated());
5905 guardedCallPreBarrierAnyZone(iterObjAddr, MIRType::Object, temp2);
5906 storePtr(ImmPtr(nullptr), iterObjAddr);
5908 // Reset property cursor.
5909 loadPtr(Address(temp1, NativeIterator::offsetOfShapesEnd()), temp2);
5910 storePtr(temp2, Address(temp1, NativeIterator::offsetOfPropertyCursor()));
5912 // Unlink from the iterator list.
5913 const Register next = temp2;
5914 const Register prev = temp3;
5915 loadPtr(Address(temp1, NativeIterator::offsetOfNext()), next);
5916 loadPtr(Address(temp1, NativeIterator::offsetOfPrev()), prev);
5917 storePtr(prev, Address(next, NativeIterator::offsetOfPrev()));
5918 storePtr(next, Address(prev, NativeIterator::offsetOfNext()));
5919 #ifdef DEBUG
5920 storePtr(ImmPtr(nullptr), Address(temp1, NativeIterator::offsetOfNext()));
5921 storePtr(ImmPtr(nullptr), Address(temp1, NativeIterator::offsetOfPrev()));
5922 #endif
5924 bind(&done);
5927 void MacroAssembler::registerIterator(Register enumeratorsList, Register iter,
5928 Register temp) {
5929 // iter->next = list
5930 storePtr(enumeratorsList, Address(iter, NativeIterator::offsetOfNext()));
5932 // iter->prev = list->prev
5933 loadPtr(Address(enumeratorsList, NativeIterator::offsetOfPrev()), temp);
5934 storePtr(temp, Address(iter, NativeIterator::offsetOfPrev()));
5936 // list->prev->next = iter
5937 storePtr(iter, Address(temp, NativeIterator::offsetOfNext()));
5939 // list->prev = iter
5940 storePtr(iter, Address(enumeratorsList, NativeIterator::offsetOfPrev()));
5943 void MacroAssembler::toHashableNonGCThing(ValueOperand value,
5944 ValueOperand result,
5945 FloatRegister tempFloat) {
5946 // Inline implementation of |HashableValue::setValue()|.
5948 #ifdef DEBUG
5949 Label ok;
5950 branchTestGCThing(Assembler::NotEqual, value, &ok);
5951 assumeUnreachable("Unexpected GC thing");
5952 bind(&ok);
5953 #endif
5955 Label useInput, done;
5956 branchTestDouble(Assembler::NotEqual, value, &useInput);
5958 Register int32 = result.scratchReg();
5959 unboxDouble(value, tempFloat);
5961 // Normalize int32-valued doubles to int32 and negative zero to +0.
5962 Label canonicalize;
5963 convertDoubleToInt32(tempFloat, int32, &canonicalize, false);
5965 tagValue(JSVAL_TYPE_INT32, int32, result);
5966 jump(&done);
5968 bind(&canonicalize);
5970 // Normalize the sign bit of a NaN.
5971 branchDouble(Assembler::DoubleOrdered, tempFloat, tempFloat, &useInput);
5972 moveValue(JS::NaNValue(), result);
5973 jump(&done);
5977 bind(&useInput);
5978 moveValue(value, result);
5980 bind(&done);
5983 void MacroAssembler::toHashableValue(ValueOperand value, ValueOperand result,
5984 FloatRegister tempFloat,
5985 Label* atomizeString, Label* tagString) {
5986 // Inline implementation of |HashableValue::setValue()|.
5988 ScratchTagScope tag(*this, value);
5989 splitTagForTest(value, tag);
5991 Label notString, useInput, done;
5992 branchTestString(Assembler::NotEqual, tag, &notString);
5994 ScratchTagScopeRelease _(&tag);
5996 Register str = result.scratchReg();
5997 unboxString(value, str);
5999 branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
6000 Imm32(JSString::ATOM_BIT), &useInput);
6002 jump(atomizeString);
6003 bind(tagString);
6005 tagValue(JSVAL_TYPE_STRING, str, result);
6006 jump(&done);
6008 bind(&notString);
6009 branchTestDouble(Assembler::NotEqual, tag, &useInput);
6011 ScratchTagScopeRelease _(&tag);
6013 Register int32 = result.scratchReg();
6014 unboxDouble(value, tempFloat);
6016 Label canonicalize;
6017 convertDoubleToInt32(tempFloat, int32, &canonicalize, false);
6019 tagValue(JSVAL_TYPE_INT32, int32, result);
6020 jump(&done);
6022 bind(&canonicalize);
6024 branchDouble(Assembler::DoubleOrdered, tempFloat, tempFloat, &useInput);
6025 moveValue(JS::NaNValue(), result);
6026 jump(&done);
6030 bind(&useInput);
6031 moveValue(value, result);
6033 bind(&done);
6036 void MacroAssembler::scrambleHashCode(Register result) {
6037 // Inline implementation of |mozilla::ScrambleHashCode()|.
6039 mul32(Imm32(mozilla::kGoldenRatioU32), result);
6042 void MacroAssembler::prepareHashNonGCThing(ValueOperand value, Register result,
6043 Register temp) {
6044 // Inline implementation of |OrderedHashTable::prepareHash()| and
6045 // |mozilla::HashGeneric(v.asRawBits())|.
6047 #ifdef DEBUG
6048 Label ok;
6049 branchTestGCThing(Assembler::NotEqual, value, &ok);
6050 assumeUnreachable("Unexpected GC thing");
6051 bind(&ok);
6052 #endif
6054 // uint32_t v1 = static_cast<uint32_t>(aValue);
6055 #ifdef JS_PUNBOX64
6056 move64To32(value.toRegister64(), result);
6057 #else
6058 move32(value.payloadReg(), result);
6059 #endif
6061 // uint32_t v2 = static_cast<uint32_t>(static_cast<uint64_t>(aValue) >> 32);
6062 #ifdef JS_PUNBOX64
6063 auto r64 = Register64(temp);
6064 move64(value.toRegister64(), r64);
6065 rshift64Arithmetic(Imm32(32), r64);
6066 #else
6067 // TODO: This seems like a bug in mozilla::detail::AddUintptrToHash().
6068 // The uint64_t input is first converted to uintptr_t and then back to
6069 // uint64_t. But |uint64_t(uintptr_t(bits))| actually only clears the high
6070 // bits, so this computation:
6072 // aValue = uintptr_t(bits)
6073 // v2 = static_cast<uint32_t>(static_cast<uint64_t>(aValue) >> 32)
6075 // really just sets |v2 = 0|. And that means the xor-operation in AddU32ToHash
6076 // can be optimized away, because |x ^ 0 = x|.
6078 // Filed as bug 1718516.
6079 #endif
6081 // mozilla::WrappingMultiply(kGoldenRatioU32, RotateLeft5(aHash) ^ aValue);
6082 // with |aHash = 0| and |aValue = v1|.
6083 mul32(Imm32(mozilla::kGoldenRatioU32), result);
6085 // mozilla::WrappingMultiply(kGoldenRatioU32, RotateLeft5(aHash) ^ aValue);
6086 // with |aHash = <above hash>| and |aValue = v2|.
6087 rotateLeft(Imm32(5), result, result);
6088 #ifdef JS_PUNBOX64
6089 xor32(temp, result);
6090 #endif
6092 // Combine |mul32| and |scrambleHashCode| by directly multiplying with
6093 // |kGoldenRatioU32 * kGoldenRatioU32|.
6095 // mul32(Imm32(mozilla::kGoldenRatioU32), result);
6097 // scrambleHashCode(result);
6098 mul32(Imm32(mozilla::kGoldenRatioU32 * mozilla::kGoldenRatioU32), result);
6101 void MacroAssembler::prepareHashString(Register str, Register result,
6102 Register temp) {
6103 // Inline implementation of |OrderedHashTable::prepareHash()| and
6104 // |JSAtom::hash()|.
6106 #ifdef DEBUG
6107 Label ok;
6108 branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
6109 Imm32(JSString::ATOM_BIT), &ok);
6110 assumeUnreachable("Unexpected non-atom string");
6111 bind(&ok);
6112 #endif
6114 move32(Imm32(JSString::FAT_INLINE_MASK), temp);
6115 and32(Address(str, JSString::offsetOfFlags()), temp);
6117 // Set |result| to 1 for FatInlineAtoms.
6118 move32(Imm32(0), result);
6119 cmp32Set(Assembler::Equal, temp, Imm32(JSString::FAT_INLINE_MASK), result);
6121 // Use a computed load for branch-free code.
6123 static_assert(FatInlineAtom::offsetOfHash() > NormalAtom::offsetOfHash());
6125 constexpr size_t offsetDiff =
6126 FatInlineAtom::offsetOfHash() - NormalAtom::offsetOfHash();
6127 static_assert(mozilla::IsPowerOfTwo(offsetDiff));
6129 uint8_t shift = mozilla::FloorLog2Size(offsetDiff);
6130 if (IsShiftInScaleRange(shift)) {
6131 load32(
6132 BaseIndex(str, result, ShiftToScale(shift), NormalAtom::offsetOfHash()),
6133 result);
6134 } else {
6135 lshift32(Imm32(shift), result);
6136 load32(BaseIndex(str, result, TimesOne, NormalAtom::offsetOfHash()),
6137 result);
6140 scrambleHashCode(result);
6143 void MacroAssembler::prepareHashSymbol(Register sym, Register result) {
6144 // Inline implementation of |OrderedHashTable::prepareHash()| and
6145 // |Symbol::hash()|.
6147 load32(Address(sym, JS::Symbol::offsetOfHash()), result);
6149 scrambleHashCode(result);
6152 void MacroAssembler::prepareHashBigInt(Register bigInt, Register result,
6153 Register temp1, Register temp2,
6154 Register temp3) {
6155 // Inline implementation of |OrderedHashTable::prepareHash()| and
6156 // |BigInt::hash()|.
6158 // Inline implementation of |mozilla::AddU32ToHash()|.
6159 auto addU32ToHash = [&](auto toAdd) {
6160 rotateLeft(Imm32(5), result, result);
6161 xor32(toAdd, result);
6162 mul32(Imm32(mozilla::kGoldenRatioU32), result);
6165 move32(Imm32(0), result);
6167 // Inline |mozilla::HashBytes()|.
6169 load32(Address(bigInt, BigInt::offsetOfLength()), temp1);
6170 loadBigIntDigits(bigInt, temp2);
6172 Label start, loop;
6173 jump(&start);
6174 bind(&loop);
6177 // Compute |AddToHash(AddToHash(hash, data), sizeof(Digit))|.
6178 #if defined(JS_CODEGEN_MIPS64)
6179 // Hash the lower 32-bits.
6180 addU32ToHash(Address(temp2, 0));
6182 // Hash the upper 32-bits.
6183 addU32ToHash(Address(temp2, sizeof(int32_t)));
6184 #elif JS_PUNBOX64
6185 // Use a single 64-bit load on non-MIPS64 platforms.
6186 loadPtr(Address(temp2, 0), temp3);
6188 // Hash the lower 32-bits.
6189 addU32ToHash(temp3);
6191 // Hash the upper 32-bits.
6192 rshiftPtr(Imm32(32), temp3);
6193 addU32ToHash(temp3);
6194 #else
6195 addU32ToHash(Address(temp2, 0));
6196 #endif
6198 addPtr(Imm32(sizeof(BigInt::Digit)), temp2);
6200 bind(&start);
6201 branchSub32(Assembler::NotSigned, Imm32(1), temp1, &loop);
6203 // Compute |mozilla::AddToHash(h, isNegative())|.
6205 static_assert(mozilla::IsPowerOfTwo(BigInt::signBitMask()));
6207 load32(Address(bigInt, BigInt::offsetOfFlags()), temp1);
6208 and32(Imm32(BigInt::signBitMask()), temp1);
6209 rshift32(Imm32(mozilla::FloorLog2(BigInt::signBitMask())), temp1);
6211 addU32ToHash(temp1);
6214 scrambleHashCode(result);
6217 void MacroAssembler::prepareHashObject(Register setObj, ValueOperand value,
6218 Register result, Register temp1,
6219 Register temp2, Register temp3,
6220 Register temp4) {
6221 #ifdef JS_PUNBOX64
6222 // Inline implementation of |OrderedHashTable::prepareHash()| and
6223 // |HashCodeScrambler::scramble(v.asRawBits())|.
6225 // Load the |ValueSet| or |ValueMap|.
6226 static_assert(SetObject::getDataSlotOffset() ==
6227 MapObject::getDataSlotOffset());
6228 loadPrivate(Address(setObj, SetObject::getDataSlotOffset()), temp1);
6230 // Load |HashCodeScrambler::mK0| and |HashCodeScrambler::mK0|.
6231 static_assert(ValueSet::offsetOfImplHcsK0() == ValueMap::offsetOfImplHcsK0());
6232 static_assert(ValueSet::offsetOfImplHcsK1() == ValueMap::offsetOfImplHcsK1());
6233 auto k0 = Register64(temp1);
6234 auto k1 = Register64(temp2);
6235 load64(Address(temp1, ValueSet::offsetOfImplHcsK1()), k1);
6236 load64(Address(temp1, ValueSet::offsetOfImplHcsK0()), k0);
6238 // Hash numbers are 32-bit values, so only hash the lower double-word.
6239 static_assert(sizeof(mozilla::HashNumber) == 4);
6240 move32To64ZeroExtend(value.valueReg(), Register64(result));
6242 // Inline implementation of |SipHasher::sipHash()|.
6243 auto m = Register64(result);
6244 auto v0 = Register64(temp3);
6245 auto v1 = Register64(temp4);
6246 auto v2 = k0;
6247 auto v3 = k1;
6249 auto sipRound = [&]() {
6250 // mV0 = WrappingAdd(mV0, mV1);
6251 add64(v1, v0);
6253 // mV1 = RotateLeft(mV1, 13);
6254 rotateLeft64(Imm32(13), v1, v1, InvalidReg);
6256 // mV1 ^= mV0;
6257 xor64(v0, v1);
6259 // mV0 = RotateLeft(mV0, 32);
6260 rotateLeft64(Imm32(32), v0, v0, InvalidReg);
6262 // mV2 = WrappingAdd(mV2, mV3);
6263 add64(v3, v2);
6265 // mV3 = RotateLeft(mV3, 16);
6266 rotateLeft64(Imm32(16), v3, v3, InvalidReg);
6268 // mV3 ^= mV2;
6269 xor64(v2, v3);
6271 // mV0 = WrappingAdd(mV0, mV3);
6272 add64(v3, v0);
6274 // mV3 = RotateLeft(mV3, 21);
6275 rotateLeft64(Imm32(21), v3, v3, InvalidReg);
6277 // mV3 ^= mV0;
6278 xor64(v0, v3);
6280 // mV2 = WrappingAdd(mV2, mV1);
6281 add64(v1, v2);
6283 // mV1 = RotateLeft(mV1, 17);
6284 rotateLeft64(Imm32(17), v1, v1, InvalidReg);
6286 // mV1 ^= mV2;
6287 xor64(v2, v1);
6289 // mV2 = RotateLeft(mV2, 32);
6290 rotateLeft64(Imm32(32), v2, v2, InvalidReg);
6293 // 1. Initialization.
6294 // mV0 = aK0 ^ UINT64_C(0x736f6d6570736575);
6295 move64(Imm64(0x736f6d6570736575), v0);
6296 xor64(k0, v0);
6298 // mV1 = aK1 ^ UINT64_C(0x646f72616e646f6d);
6299 move64(Imm64(0x646f72616e646f6d), v1);
6300 xor64(k1, v1);
6302 // mV2 = aK0 ^ UINT64_C(0x6c7967656e657261);
6303 MOZ_ASSERT(v2 == k0);
6304 xor64(Imm64(0x6c7967656e657261), v2);
6306 // mV3 = aK1 ^ UINT64_C(0x7465646279746573);
6307 MOZ_ASSERT(v3 == k1);
6308 xor64(Imm64(0x7465646279746573), v3);
6310 // 2. Compression.
6311 // mV3 ^= aM;
6312 xor64(m, v3);
6314 // sipRound();
6315 sipRound();
6317 // mV0 ^= aM;
6318 xor64(m, v0);
6320 // 3. Finalization.
6321 // mV2 ^= 0xff;
6322 xor64(Imm64(0xff), v2);
6324 // for (int i = 0; i < 3; i++) sipRound();
6325 for (int i = 0; i < 3; i++) {
6326 sipRound();
6329 // return mV0 ^ mV1 ^ mV2 ^ mV3;
6330 xor64(v1, v0);
6331 xor64(v2, v3);
6332 xor64(v3, v0);
6334 move64To32(v0, result);
6336 scrambleHashCode(result);
6337 #else
6338 MOZ_CRASH("Not implemented");
6339 #endif
6342 void MacroAssembler::prepareHashValue(Register setObj, ValueOperand value,
6343 Register result, Register temp1,
6344 Register temp2, Register temp3,
6345 Register temp4) {
6346 Label isString, isObject, isSymbol, isBigInt;
6348 ScratchTagScope tag(*this, value);
6349 splitTagForTest(value, tag);
6351 branchTestString(Assembler::Equal, tag, &isString);
6352 branchTestObject(Assembler::Equal, tag, &isObject);
6353 branchTestSymbol(Assembler::Equal, tag, &isSymbol);
6354 branchTestBigInt(Assembler::Equal, tag, &isBigInt);
6357 Label done;
6359 prepareHashNonGCThing(value, result, temp1);
6360 jump(&done);
6362 bind(&isString);
6364 unboxString(value, temp1);
6365 prepareHashString(temp1, result, temp2);
6366 jump(&done);
6368 bind(&isObject);
6370 prepareHashObject(setObj, value, result, temp1, temp2, temp3, temp4);
6371 jump(&done);
6373 bind(&isSymbol);
6375 unboxSymbol(value, temp1);
6376 prepareHashSymbol(temp1, result);
6377 jump(&done);
6379 bind(&isBigInt);
6381 unboxBigInt(value, temp1);
6382 prepareHashBigInt(temp1, result, temp2, temp3, temp4);
6384 // Fallthrough to |done|.
6387 bind(&done);
6390 template <typename OrderedHashTable>
6391 void MacroAssembler::orderedHashTableLookup(Register setOrMapObj,
6392 ValueOperand value, Register hash,
6393 Register entryTemp, Register temp1,
6394 Register temp2, Register temp3,
6395 Register temp4, Label* found,
6396 IsBigInt isBigInt) {
6397 // Inline implementation of |OrderedHashTable::lookup()|.
6399 MOZ_ASSERT_IF(isBigInt == IsBigInt::No, temp3 == InvalidReg);
6400 MOZ_ASSERT_IF(isBigInt == IsBigInt::No, temp4 == InvalidReg);
6402 #ifdef DEBUG
6403 Label ok;
6404 if (isBigInt == IsBigInt::No) {
6405 branchTestBigInt(Assembler::NotEqual, value, &ok);
6406 assumeUnreachable("Unexpected BigInt");
6407 } else if (isBigInt == IsBigInt::Yes) {
6408 branchTestBigInt(Assembler::Equal, value, &ok);
6409 assumeUnreachable("Unexpected non-BigInt");
6411 bind(&ok);
6412 #endif
6414 #ifdef DEBUG
6415 PushRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
6417 pushValue(value);
6418 moveStackPtrTo(temp2);
6420 setupUnalignedABICall(temp1);
6421 loadJSContext(temp1);
6422 passABIArg(temp1);
6423 passABIArg(setOrMapObj);
6424 passABIArg(temp2);
6425 passABIArg(hash);
6427 if constexpr (std::is_same_v<OrderedHashTable, ValueSet>) {
6428 using Fn =
6429 void (*)(JSContext*, SetObject*, const Value*, mozilla::HashNumber);
6430 callWithABI<Fn, jit::AssertSetObjectHash>();
6431 } else {
6432 using Fn =
6433 void (*)(JSContext*, MapObject*, const Value*, mozilla::HashNumber);
6434 callWithABI<Fn, jit::AssertMapObjectHash>();
6437 popValue(value);
6438 PopRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
6439 #endif
6441 // Load the |ValueSet| or |ValueMap|.
6442 static_assert(SetObject::getDataSlotOffset() ==
6443 MapObject::getDataSlotOffset());
6444 loadPrivate(Address(setOrMapObj, SetObject::getDataSlotOffset()), temp1);
6446 // Load the bucket.
6447 move32(hash, entryTemp);
6448 load32(Address(temp1, OrderedHashTable::offsetOfImplHashShift()), temp2);
6449 flexibleRshift32(temp2, entryTemp);
6451 loadPtr(Address(temp1, OrderedHashTable::offsetOfImplHashTable()), temp2);
6452 loadPtr(BaseIndex(temp2, entryTemp, ScalePointer), entryTemp);
6454 // Search for a match in this bucket.
6455 Label start, loop;
6456 jump(&start);
6457 bind(&loop);
6459 // Inline implementation of |HashableValue::operator==|.
6461 static_assert(OrderedHashTable::offsetOfImplDataElement() == 0,
6462 "offsetof(Data, element) is 0");
6463 auto keyAddr = Address(entryTemp, OrderedHashTable::offsetOfEntryKey());
6465 if (isBigInt == IsBigInt::No) {
6466 // Two HashableValues are equal if they have equal bits.
6467 branch64(Assembler::Equal, keyAddr, value.toRegister64(), found);
6468 } else {
6469 #ifdef JS_PUNBOX64
6470 auto key = ValueOperand(temp1);
6471 #else
6472 auto key = ValueOperand(temp1, temp2);
6473 #endif
6475 loadValue(keyAddr, key);
6477 // Two HashableValues are equal if they have equal bits.
6478 branch64(Assembler::Equal, key.toRegister64(), value.toRegister64(),
6479 found);
6481 // BigInt values are considered equal if they represent the same
6482 // mathematical value.
6483 Label next;
6484 fallibleUnboxBigInt(key, temp2, &next);
6485 if (isBigInt == IsBigInt::Yes) {
6486 unboxBigInt(value, temp1);
6487 } else {
6488 fallibleUnboxBigInt(value, temp1, &next);
6490 equalBigInts(temp1, temp2, temp3, temp4, temp1, temp2, &next, &next,
6491 &next);
6492 jump(found);
6493 bind(&next);
6496 loadPtr(Address(entryTemp, OrderedHashTable::offsetOfImplDataChain()),
6497 entryTemp);
6498 bind(&start);
6499 branchTestPtr(Assembler::NonZero, entryTemp, entryTemp, &loop);
6502 void MacroAssembler::setObjectHas(Register setObj, ValueOperand value,
6503 Register hash, Register result,
6504 Register temp1, Register temp2,
6505 Register temp3, Register temp4,
6506 IsBigInt isBigInt) {
6507 Label found;
6508 orderedHashTableLookup<ValueSet>(setObj, value, hash, result, temp1, temp2,
6509 temp3, temp4, &found, isBigInt);
6511 Label done;
6512 move32(Imm32(0), result);
6513 jump(&done);
6515 bind(&found);
6516 move32(Imm32(1), result);
6517 bind(&done);
6520 void MacroAssembler::mapObjectHas(Register mapObj, ValueOperand value,
6521 Register hash, Register result,
6522 Register temp1, Register temp2,
6523 Register temp3, Register temp4,
6524 IsBigInt isBigInt) {
6525 Label found;
6526 orderedHashTableLookup<ValueMap>(mapObj, value, hash, result, temp1, temp2,
6527 temp3, temp4, &found, isBigInt);
6529 Label done;
6530 move32(Imm32(0), result);
6531 jump(&done);
6533 bind(&found);
6534 move32(Imm32(1), result);
6535 bind(&done);
6538 void MacroAssembler::mapObjectGet(Register mapObj, ValueOperand value,
6539 Register hash, ValueOperand result,
6540 Register temp1, Register temp2,
6541 Register temp3, Register temp4,
6542 Register temp5, IsBigInt isBigInt) {
6543 Label found;
6544 orderedHashTableLookup<ValueMap>(mapObj, value, hash, temp1, temp2, temp3,
6545 temp4, temp5, &found, isBigInt);
6547 Label done;
6548 moveValue(UndefinedValue(), result);
6549 jump(&done);
6551 // |temp1| holds the found entry.
6552 bind(&found);
6553 loadValue(Address(temp1, ValueMap::Entry::offsetOfValue()), result);
6555 bind(&done);
6558 template <typename OrderedHashTable>
6559 void MacroAssembler::loadOrderedHashTableCount(Register setOrMapObj,
6560 Register result) {
6561 // Inline implementation of |OrderedHashTable::count()|.
6563 // Load the |ValueSet| or |ValueMap|.
6564 static_assert(SetObject::getDataSlotOffset() ==
6565 MapObject::getDataSlotOffset());
6566 loadPrivate(Address(setOrMapObj, SetObject::getDataSlotOffset()), result);
6568 // Load the live count.
6569 load32(Address(result, OrderedHashTable::offsetOfImplLiveCount()), result);
6572 void MacroAssembler::loadSetObjectSize(Register setObj, Register result) {
6573 loadOrderedHashTableCount<ValueSet>(setObj, result);
6576 void MacroAssembler::loadMapObjectSize(Register mapObj, Register result) {
6577 loadOrderedHashTableCount<ValueMap>(mapObj, result);
6580 // Can't push large frames blindly on windows, so we must touch frame memory
6581 // incrementally, with no more than 4096 - 1 bytes between touches.
6583 // This is used across all platforms for simplicity.
6584 void MacroAssembler::touchFrameValues(Register numStackValues,
6585 Register scratch1, Register scratch2) {
6586 const size_t FRAME_TOUCH_INCREMENT = 2048;
6587 static_assert(FRAME_TOUCH_INCREMENT < 4096 - 1,
6588 "Frame increment is too large");
6590 moveStackPtrTo(scratch2);
6592 mov(numStackValues, scratch1);
6593 lshiftPtr(Imm32(3), scratch1);
6595 // Note: this loop needs to update the stack pointer register because older
6596 // Linux kernels check the distance between the touched address and RSP.
6597 // See bug 1839669 comment 47.
6598 Label touchFrameLoop;
6599 Label touchFrameLoopEnd;
6600 bind(&touchFrameLoop);
6601 branchSub32(Assembler::Signed, Imm32(FRAME_TOUCH_INCREMENT), scratch1,
6602 &touchFrameLoopEnd);
6603 subFromStackPtr(Imm32(FRAME_TOUCH_INCREMENT));
6604 store32(Imm32(0), Address(getStackPointer(), 0));
6605 jump(&touchFrameLoop);
6606 bind(&touchFrameLoopEnd);
6609 moveToStackPtr(scratch2);
6612 namespace js {
6613 namespace jit {
6615 #ifdef DEBUG
6616 template <class RegisterType>
6617 AutoGenericRegisterScope<RegisterType>::AutoGenericRegisterScope(
6618 MacroAssembler& masm, RegisterType reg)
6619 : RegisterType(reg), masm_(masm), released_(false) {
6620 masm.debugTrackedRegisters_.add(reg);
6623 template AutoGenericRegisterScope<Register>::AutoGenericRegisterScope(
6624 MacroAssembler& masm, Register reg);
6625 template AutoGenericRegisterScope<FloatRegister>::AutoGenericRegisterScope(
6626 MacroAssembler& masm, FloatRegister reg);
6627 #endif // DEBUG
6629 #ifdef DEBUG
6630 template <class RegisterType>
6631 AutoGenericRegisterScope<RegisterType>::~AutoGenericRegisterScope() {
6632 if (!released_) {
6633 release();
6637 template AutoGenericRegisterScope<Register>::~AutoGenericRegisterScope();
6638 template AutoGenericRegisterScope<FloatRegister>::~AutoGenericRegisterScope();
6640 template <class RegisterType>
6641 void AutoGenericRegisterScope<RegisterType>::release() {
6642 MOZ_ASSERT(!released_);
6643 released_ = true;
6644 const RegisterType& reg = *dynamic_cast<RegisterType*>(this);
6645 masm_.debugTrackedRegisters_.take(reg);
6648 template void AutoGenericRegisterScope<Register>::release();
6649 template void AutoGenericRegisterScope<FloatRegister>::release();
6651 template <class RegisterType>
6652 void AutoGenericRegisterScope<RegisterType>::reacquire() {
6653 MOZ_ASSERT(released_);
6654 released_ = false;
6655 const RegisterType& reg = *dynamic_cast<RegisterType*>(this);
6656 masm_.debugTrackedRegisters_.add(reg);
6659 template void AutoGenericRegisterScope<Register>::reacquire();
6660 template void AutoGenericRegisterScope<FloatRegister>::reacquire();
6662 #endif // DEBUG
6664 } // namespace jit
6666 } // namespace js