1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/CacheIRCompiler.h"
9 #include "mozilla/ArrayUtils.h"
10 #include "mozilla/FunctionTypeTraits.h"
11 #include "mozilla/MaybeOneOf.h"
12 #include "mozilla/ScopeExit.h"
14 #include <type_traits>
17 #include "jslibmath.h"
20 #include "builtin/DataViewObject.h"
21 #include "builtin/MapObject.h"
22 #include "builtin/Object.h"
23 #include "gc/GCEnum.h"
24 #include "gc/SweepingAPI.h" // js::gc::AutoLockStoreBuffer
25 #include "jit/BaselineCacheIRCompiler.h"
26 #include "jit/CacheIRGenerator.h"
27 #include "jit/IonCacheIRCompiler.h"
28 #include "jit/JitFrames.h"
29 #include "jit/JitRuntime.h"
30 #include "jit/JitZone.h"
31 #include "jit/SharedICHelpers.h"
32 #include "jit/SharedICRegisters.h"
33 #include "jit/VMFunctions.h"
34 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
35 #include "js/friend/XrayJitInfo.h" // js::jit::GetXrayJitInfo
36 #include "js/ScalarType.h" // js::Scalar::Type
37 #include "proxy/DOMProxy.h"
38 #include "proxy/Proxy.h"
39 #include "proxy/ScriptedProxyHandler.h"
40 #include "vm/ArgumentsObject.h"
41 #include "vm/ArrayBufferObject.h"
42 #include "vm/ArrayBufferViewObject.h"
43 #include "vm/BigIntType.h"
44 #include "vm/FunctionFlags.h" // js::FunctionFlags
45 #include "vm/GeneratorObject.h"
46 #include "vm/GetterSetter.h"
47 #include "vm/Interpreter.h"
48 #include "vm/Uint8Clamped.h"
50 #include "builtin/Boolean-inl.h"
51 #include "jit/MacroAssembler-inl.h"
52 #include "jit/SharedICHelpers-inl.h"
53 #include "jit/VMFunctionList-inl.h"
56 using namespace js::jit
;
58 using mozilla::BitwiseCast
;
61 using JS::ExpandoAndGeneration
;
63 ValueOperand
CacheRegisterAllocator::useValueRegister(MacroAssembler
& masm
,
65 OperandLocation
& loc
= operandLocations_
[op
.id()];
68 case OperandLocation::ValueReg
:
69 currentOpRegs_
.add(loc
.valueReg());
70 return loc
.valueReg();
72 case OperandLocation::ValueStack
: {
73 ValueOperand reg
= allocateValueRegister(masm
);
74 popValue(masm
, &loc
, reg
);
78 case OperandLocation::BaselineFrame
: {
79 ValueOperand reg
= allocateValueRegister(masm
);
80 Address addr
= addressOf(masm
, loc
.baselineFrameSlot());
81 masm
.loadValue(addr
, reg
);
86 case OperandLocation::Constant
: {
87 ValueOperand reg
= allocateValueRegister(masm
);
88 masm
.moveValue(loc
.constant(), reg
);
93 case OperandLocation::PayloadReg
: {
94 // Temporarily add the payload register to currentOpRegs_ so
95 // allocateValueRegister will stay away from it.
96 currentOpRegs_
.add(loc
.payloadReg());
97 ValueOperand reg
= allocateValueRegister(masm
);
98 masm
.tagValue(loc
.payloadType(), loc
.payloadReg(), reg
);
99 currentOpRegs_
.take(loc
.payloadReg());
100 availableRegs_
.add(loc
.payloadReg());
101 loc
.setValueReg(reg
);
105 case OperandLocation::PayloadStack
: {
106 ValueOperand reg
= allocateValueRegister(masm
);
107 popPayload(masm
, &loc
, reg
.scratchReg());
108 masm
.tagValue(loc
.payloadType(), reg
.scratchReg(), reg
);
109 loc
.setValueReg(reg
);
113 case OperandLocation::DoubleReg
: {
114 ValueOperand reg
= allocateValueRegister(masm
);
116 ScratchDoubleScope
fpscratch(masm
);
117 masm
.boxDouble(loc
.doubleReg(), reg
, fpscratch
);
119 loc
.setValueReg(reg
);
123 case OperandLocation::Uninitialized
:
130 // Load a value operand directly into a float register. Caller must have
131 // guarded isNumber on the provided val.
132 void CacheRegisterAllocator::ensureDoubleRegister(MacroAssembler
& masm
,
134 FloatRegister dest
) const {
135 // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
136 // any stack slot offsets below.
137 int32_t stackOffset
= hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
139 const OperandLocation
& loc
= operandLocations_
[op
.id()];
142 switch (loc
.kind()) {
143 case OperandLocation::ValueReg
: {
144 masm
.ensureDouble(loc
.valueReg(), dest
, &failure
);
148 case OperandLocation::ValueStack
: {
149 Address addr
= valueAddress(masm
, &loc
);
150 addr
.offset
+= stackOffset
;
151 masm
.ensureDouble(addr
, dest
, &failure
);
155 case OperandLocation::BaselineFrame
: {
156 Address addr
= addressOf(masm
, loc
.baselineFrameSlot());
157 addr
.offset
+= stackOffset
;
158 masm
.ensureDouble(addr
, dest
, &failure
);
162 case OperandLocation::DoubleReg
: {
163 masm
.moveDouble(loc
.doubleReg(), dest
);
167 case OperandLocation::Constant
: {
168 MOZ_ASSERT(loc
.constant().isNumber(),
169 "Caller must ensure the operand is a number value");
170 masm
.loadConstantDouble(loc
.constant().toNumber(), dest
);
174 case OperandLocation::PayloadReg
: {
175 // Doubles can't be stored in payload registers, so this must be an int32.
176 MOZ_ASSERT(loc
.payloadType() == JSVAL_TYPE_INT32
,
177 "Caller must ensure the operand is a number value");
178 masm
.convertInt32ToDouble(loc
.payloadReg(), dest
);
182 case OperandLocation::PayloadStack
: {
183 // Doubles can't be stored in payload registers, so this must be an int32.
184 MOZ_ASSERT(loc
.payloadType() == JSVAL_TYPE_INT32
,
185 "Caller must ensure the operand is a number value");
186 MOZ_ASSERT(loc
.payloadStack() <= stackPushed_
);
187 Address addr
= payloadAddress(masm
, &loc
);
188 addr
.offset
+= stackOffset
;
189 masm
.convertInt32ToDouble(addr
, dest
);
193 case OperandLocation::Uninitialized
:
194 MOZ_CRASH("Unhandled operand type in ensureDoubleRegister");
199 masm
.assumeUnreachable(
200 "Missing guard allowed non-number to hit ensureDoubleRegister");
204 void CacheRegisterAllocator::copyToScratchRegister(MacroAssembler
& masm
,
205 TypedOperandId typedId
,
206 Register dest
) const {
207 // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
208 // any stack slot offsets below.
209 int32_t stackOffset
= hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
211 const OperandLocation
& loc
= operandLocations_
[typedId
.id()];
213 switch (loc
.kind()) {
214 case OperandLocation::ValueReg
: {
215 masm
.unboxNonDouble(loc
.valueReg(), dest
, typedId
.type());
218 case OperandLocation::ValueStack
: {
219 Address addr
= valueAddress(masm
, &loc
);
220 addr
.offset
+= stackOffset
;
221 masm
.unboxNonDouble(addr
, dest
, typedId
.type());
224 case OperandLocation::BaselineFrame
: {
225 Address addr
= addressOf(masm
, loc
.baselineFrameSlot());
226 addr
.offset
+= stackOffset
;
227 masm
.unboxNonDouble(addr
, dest
, typedId
.type());
230 case OperandLocation::PayloadReg
: {
231 MOZ_ASSERT(loc
.payloadType() == typedId
.type());
232 masm
.mov(loc
.payloadReg(), dest
);
235 case OperandLocation::PayloadStack
: {
236 MOZ_ASSERT(loc
.payloadType() == typedId
.type());
237 MOZ_ASSERT(loc
.payloadStack() <= stackPushed_
);
238 Address addr
= payloadAddress(masm
, &loc
);
239 addr
.offset
+= stackOffset
;
240 masm
.loadPtr(addr
, dest
);
243 case OperandLocation::DoubleReg
:
244 case OperandLocation::Constant
:
245 case OperandLocation::Uninitialized
:
246 MOZ_CRASH("Unhandled operand location");
250 void CacheRegisterAllocator::copyToScratchValueRegister(
251 MacroAssembler
& masm
, ValOperandId valId
, ValueOperand dest
) const {
252 MOZ_ASSERT(!addedFailurePath_
);
253 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
255 const OperandLocation
& loc
= operandLocations_
[valId
.id()];
256 switch (loc
.kind()) {
257 case OperandLocation::ValueReg
:
258 masm
.moveValue(loc
.valueReg(), dest
);
260 case OperandLocation::ValueStack
: {
261 Address addr
= valueAddress(masm
, &loc
);
262 masm
.loadValue(addr
, dest
);
265 case OperandLocation::BaselineFrame
: {
266 Address addr
= addressOf(masm
, loc
.baselineFrameSlot());
267 masm
.loadValue(addr
, dest
);
270 case OperandLocation::Constant
:
271 masm
.moveValue(loc
.constant(), dest
);
273 case OperandLocation::PayloadReg
:
274 masm
.tagValue(loc
.payloadType(), loc
.payloadReg(), dest
);
276 case OperandLocation::PayloadStack
: {
277 Address addr
= payloadAddress(masm
, &loc
);
278 masm
.loadPtr(addr
, dest
.scratchReg());
279 masm
.tagValue(loc
.payloadType(), dest
.scratchReg(), dest
);
282 case OperandLocation::DoubleReg
: {
283 ScratchDoubleScope
fpscratch(masm
);
284 masm
.boxDouble(loc
.doubleReg(), dest
, fpscratch
);
287 case OperandLocation::Uninitialized
:
292 Register
CacheRegisterAllocator::useRegister(MacroAssembler
& masm
,
293 TypedOperandId typedId
) {
294 MOZ_ASSERT(!addedFailurePath_
);
295 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
297 OperandLocation
& loc
= operandLocations_
[typedId
.id()];
298 switch (loc
.kind()) {
299 case OperandLocation::PayloadReg
:
300 currentOpRegs_
.add(loc
.payloadReg());
301 return loc
.payloadReg();
303 case OperandLocation::ValueReg
: {
304 // It's possible the value is still boxed: as an optimization, we unbox
305 // the first time we use a value as object.
306 ValueOperand val
= loc
.valueReg();
307 availableRegs_
.add(val
);
308 Register reg
= val
.scratchReg();
309 availableRegs_
.take(reg
);
310 masm
.unboxNonDouble(val
, reg
, typedId
.type());
311 loc
.setPayloadReg(reg
, typedId
.type());
312 currentOpRegs_
.add(reg
);
316 case OperandLocation::PayloadStack
: {
317 Register reg
= allocateRegister(masm
);
318 popPayload(masm
, &loc
, reg
);
322 case OperandLocation::ValueStack
: {
323 // The value is on the stack, but boxed. If it's on top of the stack we
324 // unbox it and then remove it from the stack, else we just unbox.
325 Register reg
= allocateRegister(masm
);
326 if (loc
.valueStack() == stackPushed_
) {
327 masm
.unboxNonDouble(Address(masm
.getStackPointer(), 0), reg
,
329 masm
.addToStackPtr(Imm32(sizeof(js::Value
)));
330 MOZ_ASSERT(stackPushed_
>= sizeof(js::Value
));
331 stackPushed_
-= sizeof(js::Value
);
333 MOZ_ASSERT(loc
.valueStack() < stackPushed_
);
335 Address(masm
.getStackPointer(), stackPushed_
- loc
.valueStack()),
336 reg
, typedId
.type());
338 loc
.setPayloadReg(reg
, typedId
.type());
342 case OperandLocation::BaselineFrame
: {
343 Register reg
= allocateRegister(masm
);
344 Address addr
= addressOf(masm
, loc
.baselineFrameSlot());
345 masm
.unboxNonDouble(addr
, reg
, typedId
.type());
346 loc
.setPayloadReg(reg
, typedId
.type());
350 case OperandLocation::Constant
: {
351 Value v
= loc
.constant();
352 Register reg
= allocateRegister(masm
);
354 masm
.movePtr(ImmGCPtr(v
.toString()), reg
);
355 } else if (v
.isSymbol()) {
356 masm
.movePtr(ImmGCPtr(v
.toSymbol()), reg
);
357 } else if (v
.isBigInt()) {
358 masm
.movePtr(ImmGCPtr(v
.toBigInt()), reg
);
360 MOZ_CRASH("Unexpected Value");
362 loc
.setPayloadReg(reg
, v
.extractNonDoubleType());
366 case OperandLocation::DoubleReg
:
367 case OperandLocation::Uninitialized
:
374 ConstantOrRegister
CacheRegisterAllocator::useConstantOrRegister(
375 MacroAssembler
& masm
, ValOperandId val
) {
376 MOZ_ASSERT(!addedFailurePath_
);
377 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
379 OperandLocation
& loc
= operandLocations_
[val
.id()];
380 switch (loc
.kind()) {
381 case OperandLocation::Constant
:
382 return loc
.constant();
384 case OperandLocation::PayloadReg
:
385 case OperandLocation::PayloadStack
: {
386 JSValueType payloadType
= loc
.payloadType();
387 Register reg
= useRegister(masm
, TypedOperandId(val
, payloadType
));
388 return TypedOrValueRegister(MIRTypeFromValueType(payloadType
),
392 case OperandLocation::ValueReg
:
393 case OperandLocation::ValueStack
:
394 case OperandLocation::BaselineFrame
:
395 return TypedOrValueRegister(useValueRegister(masm
, val
));
397 case OperandLocation::DoubleReg
:
398 return TypedOrValueRegister(MIRType::Double
,
399 AnyRegister(loc
.doubleReg()));
401 case OperandLocation::Uninitialized
:
408 Register
CacheRegisterAllocator::defineRegister(MacroAssembler
& masm
,
409 TypedOperandId typedId
) {
410 MOZ_ASSERT(!addedFailurePath_
);
411 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
413 OperandLocation
& loc
= operandLocations_
[typedId
.id()];
414 MOZ_ASSERT(loc
.kind() == OperandLocation::Uninitialized
);
416 Register reg
= allocateRegister(masm
);
417 loc
.setPayloadReg(reg
, typedId
.type());
421 ValueOperand
CacheRegisterAllocator::defineValueRegister(MacroAssembler
& masm
,
423 MOZ_ASSERT(!addedFailurePath_
);
424 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
426 OperandLocation
& loc
= operandLocations_
[val
.id()];
427 MOZ_ASSERT(loc
.kind() == OperandLocation::Uninitialized
);
429 ValueOperand reg
= allocateValueRegister(masm
);
430 loc
.setValueReg(reg
);
434 void CacheRegisterAllocator::freeDeadOperandLocations(MacroAssembler
& masm
) {
435 // See if any operands are dead so we can reuse their registers. Note that
436 // we skip the input operands, as those are also used by failure paths, and
437 // we currently don't track those uses.
438 for (size_t i
= writer_
.numInputOperands(); i
< operandLocations_
.length();
440 if (!writer_
.operandIsDead(i
, currentInstruction_
)) {
444 OperandLocation
& loc
= operandLocations_
[i
];
445 switch (loc
.kind()) {
446 case OperandLocation::PayloadReg
:
447 availableRegs_
.add(loc
.payloadReg());
449 case OperandLocation::ValueReg
:
450 availableRegs_
.add(loc
.valueReg());
452 case OperandLocation::PayloadStack
:
453 masm
.propagateOOM(freePayloadSlots_
.append(loc
.payloadStack()));
455 case OperandLocation::ValueStack
:
456 masm
.propagateOOM(freeValueSlots_
.append(loc
.valueStack()));
458 case OperandLocation::Uninitialized
:
459 case OperandLocation::BaselineFrame
:
460 case OperandLocation::Constant
:
461 case OperandLocation::DoubleReg
:
464 loc
.setUninitialized();
468 void CacheRegisterAllocator::discardStack(MacroAssembler
& masm
) {
469 // This should only be called when we are no longer using the operands,
470 // as we're discarding everything from the native stack. Set all operand
471 // locations to Uninitialized to catch bugs.
472 for (size_t i
= 0; i
< operandLocations_
.length(); i
++) {
473 operandLocations_
[i
].setUninitialized();
476 if (stackPushed_
> 0) {
477 masm
.addToStackPtr(Imm32(stackPushed_
));
480 freePayloadSlots_
.clear();
481 freeValueSlots_
.clear();
484 Register
CacheRegisterAllocator::allocateRegister(MacroAssembler
& masm
) {
485 MOZ_ASSERT(!addedFailurePath_
);
486 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
488 if (availableRegs_
.empty()) {
489 freeDeadOperandLocations(masm
);
492 if (availableRegs_
.empty()) {
493 // Still no registers available, try to spill unused operands to
495 for (size_t i
= 0; i
< operandLocations_
.length(); i
++) {
496 OperandLocation
& loc
= operandLocations_
[i
];
497 if (loc
.kind() == OperandLocation::PayloadReg
) {
498 Register reg
= loc
.payloadReg();
499 if (currentOpRegs_
.has(reg
)) {
503 spillOperandToStack(masm
, &loc
);
504 availableRegs_
.add(reg
);
505 break; // We got a register, so break out of the loop.
507 if (loc
.kind() == OperandLocation::ValueReg
) {
508 ValueOperand reg
= loc
.valueReg();
509 if (currentOpRegs_
.aliases(reg
)) {
513 spillOperandToStack(masm
, &loc
);
514 availableRegs_
.add(reg
);
515 break; // Break out of the loop.
520 if (availableRegs_
.empty() && !availableRegsAfterSpill_
.empty()) {
521 Register reg
= availableRegsAfterSpill_
.takeAny();
523 stackPushed_
+= sizeof(uintptr_t);
525 masm
.propagateOOM(spilledRegs_
.append(SpilledRegister(reg
, stackPushed_
)));
527 availableRegs_
.add(reg
);
530 // At this point, there must be a free register.
531 MOZ_RELEASE_ASSERT(!availableRegs_
.empty());
533 Register reg
= availableRegs_
.takeAny();
534 currentOpRegs_
.add(reg
);
538 void CacheRegisterAllocator::allocateFixedRegister(MacroAssembler
& masm
,
540 MOZ_ASSERT(!addedFailurePath_
);
541 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
543 // Fixed registers should be allocated first, to ensure they're
545 MOZ_ASSERT(!currentOpRegs_
.has(reg
), "Register is in use");
547 freeDeadOperandLocations(masm
);
549 if (availableRegs_
.has(reg
)) {
550 availableRegs_
.take(reg
);
551 currentOpRegs_
.add(reg
);
555 // Register may be available only after spilling contents.
556 if (availableRegsAfterSpill_
.has(reg
)) {
557 availableRegsAfterSpill_
.take(reg
);
559 stackPushed_
+= sizeof(uintptr_t);
561 masm
.propagateOOM(spilledRegs_
.append(SpilledRegister(reg
, stackPushed_
)));
562 currentOpRegs_
.add(reg
);
566 // The register must be used by some operand. Spill it to the stack.
567 for (size_t i
= 0; i
< operandLocations_
.length(); i
++) {
568 OperandLocation
& loc
= operandLocations_
[i
];
569 if (loc
.kind() == OperandLocation::PayloadReg
) {
570 if (loc
.payloadReg() != reg
) {
574 spillOperandToStackOrRegister(masm
, &loc
);
575 currentOpRegs_
.add(reg
);
578 if (loc
.kind() == OperandLocation::ValueReg
) {
579 if (!loc
.valueReg().aliases(reg
)) {
583 ValueOperand valueReg
= loc
.valueReg();
584 spillOperandToStackOrRegister(masm
, &loc
);
586 availableRegs_
.add(valueReg
);
587 availableRegs_
.take(reg
);
588 currentOpRegs_
.add(reg
);
593 MOZ_CRASH("Invalid register");
596 void CacheRegisterAllocator::allocateFixedValueRegister(MacroAssembler
& masm
,
599 allocateFixedRegister(masm
, reg
.payloadReg());
600 allocateFixedRegister(masm
, reg
.typeReg());
602 allocateFixedRegister(masm
, reg
.valueReg());
607 // Possible miscompilation in clang-12 (bug 1689641)
610 ValueOperand
CacheRegisterAllocator::allocateValueRegister(
611 MacroAssembler
& masm
) {
613 Register reg1
= allocateRegister(masm
);
614 Register reg2
= allocateRegister(masm
);
615 return ValueOperand(reg1
, reg2
);
617 Register reg
= allocateRegister(masm
);
618 return ValueOperand(reg
);
622 bool CacheRegisterAllocator::init() {
623 if (!origInputLocations_
.resize(writer_
.numInputOperands())) {
626 if (!operandLocations_
.resize(writer_
.numOperandIds())) {
632 void CacheRegisterAllocator::initAvailableRegsAfterSpill() {
633 // Registers not in availableRegs_ and not used by input operands are
634 // available after being spilled.
635 availableRegsAfterSpill_
.set() = GeneralRegisterSet::Intersect(
636 GeneralRegisterSet::Not(availableRegs_
.set()),
637 GeneralRegisterSet::Not(inputRegisterSet()));
640 void CacheRegisterAllocator::fixupAliasedInputs(MacroAssembler
& masm
) {
641 // If IC inputs alias each other, make sure they are stored in different
642 // locations so we don't have to deal with this complexity in the rest of
645 // Note that this can happen in IonMonkey with something like |o.foo = o|
648 size_t numInputs
= writer_
.numInputOperands();
649 MOZ_ASSERT(origInputLocations_
.length() == numInputs
);
651 for (size_t i
= 1; i
< numInputs
; i
++) {
652 OperandLocation
& loc1
= operandLocations_
[i
];
653 if (!loc1
.isInRegister()) {
657 for (size_t j
= 0; j
< i
; j
++) {
658 OperandLocation
& loc2
= operandLocations_
[j
];
659 if (!loc1
.aliasesReg(loc2
)) {
663 // loc1 and loc2 alias so we spill one of them. If one is a
664 // ValueReg and the other is a PayloadReg, we have to spill the
665 // PayloadReg: spilling the ValueReg instead would leave its type
666 // register unallocated on 32-bit platforms.
667 if (loc1
.kind() == OperandLocation::ValueReg
) {
668 spillOperandToStack(masm
, &loc2
);
670 MOZ_ASSERT(loc1
.kind() == OperandLocation::PayloadReg
);
671 spillOperandToStack(masm
, &loc1
);
672 break; // Spilled loc1, so nothing else will alias it.
682 GeneralRegisterSet
CacheRegisterAllocator::inputRegisterSet() const {
683 MOZ_ASSERT(origInputLocations_
.length() == writer_
.numInputOperands());
685 AllocatableGeneralRegisterSet result
;
686 for (size_t i
= 0; i
< writer_
.numInputOperands(); i
++) {
687 const OperandLocation
& loc
= operandLocations_
[i
];
688 MOZ_ASSERT(loc
== origInputLocations_
[i
]);
690 switch (loc
.kind()) {
691 case OperandLocation::PayloadReg
:
692 result
.addUnchecked(loc
.payloadReg());
694 case OperandLocation::ValueReg
:
695 result
.addUnchecked(loc
.valueReg());
697 case OperandLocation::PayloadStack
:
698 case OperandLocation::ValueStack
:
699 case OperandLocation::BaselineFrame
:
700 case OperandLocation::Constant
:
701 case OperandLocation::DoubleReg
:
703 case OperandLocation::Uninitialized
:
706 MOZ_CRASH("Invalid kind");
712 JSValueType
CacheRegisterAllocator::knownType(ValOperandId val
) const {
713 const OperandLocation
& loc
= operandLocations_
[val
.id()];
715 switch (loc
.kind()) {
716 case OperandLocation::ValueReg
:
717 case OperandLocation::ValueStack
:
718 case OperandLocation::BaselineFrame
:
719 return JSVAL_TYPE_UNKNOWN
;
721 case OperandLocation::PayloadStack
:
722 case OperandLocation::PayloadReg
:
723 return loc
.payloadType();
725 case OperandLocation::Constant
:
726 return loc
.constant().isDouble() ? JSVAL_TYPE_DOUBLE
727 : loc
.constant().extractNonDoubleType();
729 case OperandLocation::DoubleReg
:
730 return JSVAL_TYPE_DOUBLE
;
732 case OperandLocation::Uninitialized
:
736 MOZ_CRASH("Invalid kind");
739 void CacheRegisterAllocator::initInputLocation(
740 size_t i
, const TypedOrValueRegister
& reg
) {
741 if (reg
.hasValue()) {
742 initInputLocation(i
, reg
.valueReg());
743 } else if (reg
.typedReg().isFloat()) {
744 MOZ_ASSERT(reg
.type() == MIRType::Double
);
745 initInputLocation(i
, reg
.typedReg().fpu());
747 initInputLocation(i
, reg
.typedReg().gpr(),
748 ValueTypeFromMIRType(reg
.type()));
752 void CacheRegisterAllocator::initInputLocation(
753 size_t i
, const ConstantOrRegister
& value
) {
754 if (value
.constant()) {
755 initInputLocation(i
, value
.value());
757 initInputLocation(i
, value
.reg());
761 void CacheRegisterAllocator::spillOperandToStack(MacroAssembler
& masm
,
762 OperandLocation
* loc
) {
763 MOZ_ASSERT(loc
>= operandLocations_
.begin() && loc
< operandLocations_
.end());
765 if (loc
->kind() == OperandLocation::ValueReg
) {
766 if (!freeValueSlots_
.empty()) {
767 uint32_t stackPos
= freeValueSlots_
.popCopy();
768 MOZ_ASSERT(stackPos
<= stackPushed_
);
769 masm
.storeValue(loc
->valueReg(),
770 Address(masm
.getStackPointer(), stackPushed_
- stackPos
));
771 loc
->setValueStack(stackPos
);
774 stackPushed_
+= sizeof(js::Value
);
775 masm
.pushValue(loc
->valueReg());
776 loc
->setValueStack(stackPushed_
);
780 MOZ_ASSERT(loc
->kind() == OperandLocation::PayloadReg
);
782 if (!freePayloadSlots_
.empty()) {
783 uint32_t stackPos
= freePayloadSlots_
.popCopy();
784 MOZ_ASSERT(stackPos
<= stackPushed_
);
785 masm
.storePtr(loc
->payloadReg(),
786 Address(masm
.getStackPointer(), stackPushed_
- stackPos
));
787 loc
->setPayloadStack(stackPos
, loc
->payloadType());
790 stackPushed_
+= sizeof(uintptr_t);
791 masm
.push(loc
->payloadReg());
792 loc
->setPayloadStack(stackPushed_
, loc
->payloadType());
795 void CacheRegisterAllocator::spillOperandToStackOrRegister(
796 MacroAssembler
& masm
, OperandLocation
* loc
) {
797 MOZ_ASSERT(loc
>= operandLocations_
.begin() && loc
< operandLocations_
.end());
799 // If enough registers are available, use them.
800 if (loc
->kind() == OperandLocation::ValueReg
) {
801 static const size_t BoxPieces
= sizeof(Value
) / sizeof(uintptr_t);
802 if (availableRegs_
.set().size() >= BoxPieces
) {
803 ValueOperand reg
= availableRegs_
.takeAnyValue();
804 masm
.moveValue(loc
->valueReg(), reg
);
805 loc
->setValueReg(reg
);
809 MOZ_ASSERT(loc
->kind() == OperandLocation::PayloadReg
);
810 if (!availableRegs_
.empty()) {
811 Register reg
= availableRegs_
.takeAny();
812 masm
.movePtr(loc
->payloadReg(), reg
);
813 loc
->setPayloadReg(reg
, loc
->payloadType());
818 // Not enough registers available, spill to the stack.
819 spillOperandToStack(masm
, loc
);
822 void CacheRegisterAllocator::popPayload(MacroAssembler
& masm
,
823 OperandLocation
* loc
, Register dest
) {
824 MOZ_ASSERT(loc
>= operandLocations_
.begin() && loc
< operandLocations_
.end());
825 MOZ_ASSERT(stackPushed_
>= sizeof(uintptr_t));
827 // The payload is on the stack. If it's on top of the stack we can just
828 // pop it, else we emit a load.
829 if (loc
->payloadStack() == stackPushed_
) {
831 stackPushed_
-= sizeof(uintptr_t);
833 MOZ_ASSERT(loc
->payloadStack() < stackPushed_
);
834 masm
.loadPtr(payloadAddress(masm
, loc
), dest
);
835 masm
.propagateOOM(freePayloadSlots_
.append(loc
->payloadStack()));
838 loc
->setPayloadReg(dest
, loc
->payloadType());
841 Address
CacheRegisterAllocator::valueAddress(MacroAssembler
& masm
,
842 const OperandLocation
* loc
) const {
843 MOZ_ASSERT(loc
>= operandLocations_
.begin() && loc
< operandLocations_
.end());
844 return Address(masm
.getStackPointer(), stackPushed_
- loc
->valueStack());
847 Address
CacheRegisterAllocator::payloadAddress(
848 MacroAssembler
& masm
, const OperandLocation
* loc
) const {
849 MOZ_ASSERT(loc
>= operandLocations_
.begin() && loc
< operandLocations_
.end());
850 return Address(masm
.getStackPointer(), stackPushed_
- loc
->payloadStack());
853 void CacheRegisterAllocator::popValue(MacroAssembler
& masm
,
854 OperandLocation
* loc
, ValueOperand dest
) {
855 MOZ_ASSERT(loc
>= operandLocations_
.begin() && loc
< operandLocations_
.end());
856 MOZ_ASSERT(stackPushed_
>= sizeof(js::Value
));
858 // The Value is on the stack. If it's on top of the stack we can just
859 // pop it, else we emit a load.
860 if (loc
->valueStack() == stackPushed_
) {
862 stackPushed_
-= sizeof(js::Value
);
864 MOZ_ASSERT(loc
->valueStack() < stackPushed_
);
866 Address(masm
.getStackPointer(), stackPushed_
- loc
->valueStack()),
868 masm
.propagateOOM(freeValueSlots_
.append(loc
->valueStack()));
871 loc
->setValueReg(dest
);
875 void CacheRegisterAllocator::assertValidState() const {
876 // Assert different operands don't have aliasing storage. We depend on this
877 // when spilling registers, for instance.
879 if (!JitOptions
.fullDebugChecks
) {
883 for (size_t i
= 0; i
< operandLocations_
.length(); i
++) {
884 const auto& loc1
= operandLocations_
[i
];
885 if (loc1
.isUninitialized()) {
889 for (size_t j
= 0; j
< i
; j
++) {
890 const auto& loc2
= operandLocations_
[j
];
891 if (loc2
.isUninitialized()) {
894 MOZ_ASSERT(!loc1
.aliasesReg(loc2
));
900 bool OperandLocation::aliasesReg(const OperandLocation
& other
) const {
901 MOZ_ASSERT(&other
!= this);
903 switch (other
.kind_
) {
905 return aliasesReg(other
.payloadReg());
907 return aliasesReg(other
.valueReg());
918 MOZ_CRASH("Invalid kind");
921 void CacheRegisterAllocator::restoreInputState(MacroAssembler
& masm
,
922 bool shouldDiscardStack
) {
923 size_t numInputOperands
= origInputLocations_
.length();
924 MOZ_ASSERT(writer_
.numInputOperands() == numInputOperands
);
926 for (size_t j
= 0; j
< numInputOperands
; j
++) {
927 const OperandLocation
& dest
= origInputLocations_
[j
];
928 OperandLocation
& cur
= operandLocations_
[j
];
933 auto autoAssign
= mozilla::MakeScopeExit([&] { cur
= dest
; });
935 // We have a cycle if a destination register will be used later
936 // as source register. If that happens, just push the current value
937 // on the stack and later get it from there.
938 for (size_t k
= j
+ 1; k
< numInputOperands
; k
++) {
939 OperandLocation
& laterSource
= operandLocations_
[k
];
940 if (dest
.aliasesReg(laterSource
)) {
941 spillOperandToStack(masm
, &laterSource
);
945 if (dest
.kind() == OperandLocation::ValueReg
) {
946 // We have to restore a Value register.
947 switch (cur
.kind()) {
948 case OperandLocation::ValueReg
:
949 masm
.moveValue(cur
.valueReg(), dest
.valueReg());
951 case OperandLocation::PayloadReg
:
952 masm
.tagValue(cur
.payloadType(), cur
.payloadReg(), dest
.valueReg());
954 case OperandLocation::PayloadStack
: {
955 Register scratch
= dest
.valueReg().scratchReg();
956 popPayload(masm
, &cur
, scratch
);
957 masm
.tagValue(cur
.payloadType(), scratch
, dest
.valueReg());
960 case OperandLocation::ValueStack
:
961 popValue(masm
, &cur
, dest
.valueReg());
963 case OperandLocation::DoubleReg
:
964 masm
.boxDouble(cur
.doubleReg(), dest
.valueReg(), cur
.doubleReg());
966 case OperandLocation::Constant
:
967 case OperandLocation::BaselineFrame
:
968 case OperandLocation::Uninitialized
:
971 } else if (dest
.kind() == OperandLocation::PayloadReg
) {
972 // We have to restore a payload register.
973 switch (cur
.kind()) {
974 case OperandLocation::ValueReg
:
975 MOZ_ASSERT(dest
.payloadType() != JSVAL_TYPE_DOUBLE
);
976 masm
.unboxNonDouble(cur
.valueReg(), dest
.payloadReg(),
979 case OperandLocation::PayloadReg
:
980 MOZ_ASSERT(cur
.payloadType() == dest
.payloadType());
981 masm
.mov(cur
.payloadReg(), dest
.payloadReg());
983 case OperandLocation::PayloadStack
: {
984 MOZ_ASSERT(cur
.payloadType() == dest
.payloadType());
985 popPayload(masm
, &cur
, dest
.payloadReg());
988 case OperandLocation::ValueStack
:
989 MOZ_ASSERT(stackPushed_
>= sizeof(js::Value
));
990 MOZ_ASSERT(cur
.valueStack() <= stackPushed_
);
991 MOZ_ASSERT(dest
.payloadType() != JSVAL_TYPE_DOUBLE
);
993 Address(masm
.getStackPointer(), stackPushed_
- cur
.valueStack()),
994 dest
.payloadReg(), dest
.payloadType());
996 case OperandLocation::Constant
:
997 case OperandLocation::BaselineFrame
:
998 case OperandLocation::DoubleReg
:
999 case OperandLocation::Uninitialized
:
1002 } else if (dest
.kind() == OperandLocation::Constant
||
1003 dest
.kind() == OperandLocation::BaselineFrame
||
1004 dest
.kind() == OperandLocation::DoubleReg
) {
1009 MOZ_CRASH("Invalid kind");
1012 for (const SpilledRegister
& spill
: spilledRegs_
) {
1013 MOZ_ASSERT(stackPushed_
>= sizeof(uintptr_t));
1015 if (spill
.stackPushed
== stackPushed_
) {
1016 masm
.pop(spill
.reg
);
1017 stackPushed_
-= sizeof(uintptr_t);
1019 MOZ_ASSERT(spill
.stackPushed
< stackPushed_
);
1021 Address(masm
.getStackPointer(), stackPushed_
- spill
.stackPushed
),
1026 if (shouldDiscardStack
) {
1031 size_t CacheIRStubInfo::stubDataSize() const {
1035 StubField::Type type
= fieldType(field
++);
1036 if (type
== StubField::Type::Limit
) {
1039 size
+= StubField::sizeInBytes(type
);
1043 template <typename T
>
1044 static GCPtr
<T
>* AsGCPtr(void* ptr
) {
1045 return static_cast<GCPtr
<T
>*>(ptr
);
1048 void CacheIRStubInfo::replaceStubRawWord(uint8_t* stubData
, uint32_t offset
,
1050 uintptr_t newWord
) const {
1051 MOZ_ASSERT(uintptr_t(stubData
+ offset
) % sizeof(uintptr_t) == 0);
1052 uintptr_t* addr
= reinterpret_cast<uintptr_t*>(stubData
+ offset
);
1053 MOZ_ASSERT(*addr
== oldWord
);
1057 template <class Stub
, StubField::Type type
>
1058 typename MapStubFieldToType
<type
>::WrappedType
& CacheIRStubInfo::getStubField(
1059 Stub
* stub
, uint32_t offset
) const {
1060 uint8_t* stubData
= (uint8_t*)stub
+ stubDataOffset_
;
1061 MOZ_ASSERT(uintptr_t(stubData
+ offset
) % sizeof(uintptr_t) == 0);
1063 using WrappedType
= typename MapStubFieldToType
<type
>::WrappedType
;
1064 return *reinterpret_cast<WrappedType
*>(stubData
+ offset
);
1067 #define INSTANTIATE_GET_STUB_FIELD(Type) \
1068 template typename MapStubFieldToType<Type>::WrappedType& \
1069 CacheIRStubInfo::getStubField<ICCacheIRStub, Type>(ICCacheIRStub * stub, \
1070 uint32_t offset) const;
1071 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Shape
)
1072 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakShape
)
1073 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakGetterSetter
)
1074 INSTANTIATE_GET_STUB_FIELD(StubField::Type::JSObject
)
1075 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakObject
)
1076 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Symbol
)
1077 INSTANTIATE_GET_STUB_FIELD(StubField::Type::String
)
1078 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakBaseScript
)
1079 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Value
)
1080 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Id
)
1081 #undef INSTANTIATE_GET_STUB_FIELD
1083 template <class Stub
, class T
>
1084 T
* CacheIRStubInfo::getPtrStubField(Stub
* stub
, uint32_t offset
) const {
1085 uint8_t* stubData
= (uint8_t*)stub
+ stubDataOffset_
;
1086 MOZ_ASSERT(uintptr_t(stubData
+ offset
) % sizeof(uintptr_t) == 0);
1088 return *reinterpret_cast<T
**>(stubData
+ offset
);
1091 template gc::AllocSite
* CacheIRStubInfo::getPtrStubField(ICCacheIRStub
* stub
,
1092 uint32_t offset
) const;
1094 template <StubField::Type type
, typename V
>
1095 static void InitWrappedPtr(void* ptr
, V val
) {
1096 using RawType
= typename MapStubFieldToType
<type
>::RawType
;
1097 using WrappedType
= typename MapStubFieldToType
<type
>::WrappedType
;
1098 auto* wrapped
= static_cast<WrappedType
*>(ptr
);
1099 new (wrapped
) WrappedType(mozilla::BitwiseCast
<RawType
>(val
));
1102 static void InitWordStubField(StubField::Type type
, void* dest
,
1104 MOZ_ASSERT(StubField::sizeIsWord(type
));
1105 MOZ_ASSERT((uintptr_t(dest
) % sizeof(uintptr_t)) == 0,
1106 "Unaligned stub field");
1109 case StubField::Type::RawInt32
:
1110 case StubField::Type::RawPointer
:
1111 case StubField::Type::AllocSite
:
1112 *static_cast<uintptr_t*>(dest
) = value
;
1114 case StubField::Type::Shape
:
1115 InitWrappedPtr
<StubField::Type::Shape
>(dest
, value
);
1117 case StubField::Type::WeakShape
:
1118 // No read barrier required to copy weak pointer.
1119 InitWrappedPtr
<StubField::Type::WeakShape
>(dest
, value
);
1121 case StubField::Type::WeakGetterSetter
:
1122 // No read barrier required to copy weak pointer.
1123 InitWrappedPtr
<StubField::Type::WeakGetterSetter
>(dest
, value
);
1125 case StubField::Type::JSObject
:
1126 InitWrappedPtr
<StubField::Type::JSObject
>(dest
, value
);
1128 case StubField::Type::WeakObject
:
1129 // No read barrier required to copy weak pointer.
1130 InitWrappedPtr
<StubField::Type::WeakObject
>(dest
, value
);
1132 case StubField::Type::Symbol
:
1133 InitWrappedPtr
<StubField::Type::Symbol
>(dest
, value
);
1135 case StubField::Type::String
:
1136 InitWrappedPtr
<StubField::Type::String
>(dest
, value
);
1138 case StubField::Type::WeakBaseScript
:
1139 // No read barrier required to copy weak pointer.
1140 InitWrappedPtr
<StubField::Type::WeakBaseScript
>(dest
, value
);
1142 case StubField::Type::JitCode
:
1143 InitWrappedPtr
<StubField::Type::JitCode
>(dest
, value
);
1145 case StubField::Type::Id
:
1146 AsGCPtr
<jsid
>(dest
)->init(jsid::fromRawBits(value
));
1148 case StubField::Type::RawInt64
:
1149 case StubField::Type::Double
:
1150 case StubField::Type::Value
:
1151 case StubField::Type::Limit
:
1152 MOZ_CRASH("Invalid type");
1156 static void InitInt64StubField(StubField::Type type
, void* dest
,
1158 MOZ_ASSERT(StubField::sizeIsInt64(type
));
1159 MOZ_ASSERT((uintptr_t(dest
) % sizeof(uint64_t)) == 0, "Unaligned stub field");
1162 case StubField::Type::RawInt64
:
1163 case StubField::Type::Double
:
1164 *static_cast<uint64_t*>(dest
) = value
;
1166 case StubField::Type::Value
:
1167 AsGCPtr
<Value
>(dest
)->init(Value::fromRawBits(value
));
1169 case StubField::Type::RawInt32
:
1170 case StubField::Type::RawPointer
:
1171 case StubField::Type::AllocSite
:
1172 case StubField::Type::Shape
:
1173 case StubField::Type::WeakShape
:
1174 case StubField::Type::WeakGetterSetter
:
1175 case StubField::Type::JSObject
:
1176 case StubField::Type::WeakObject
:
1177 case StubField::Type::Symbol
:
1178 case StubField::Type::String
:
1179 case StubField::Type::WeakBaseScript
:
1180 case StubField::Type::JitCode
:
1181 case StubField::Type::Id
:
1182 case StubField::Type::Limit
:
1183 MOZ_CRASH("Invalid type");
1187 void CacheIRWriter::copyStubData(uint8_t* dest
) const {
1188 MOZ_ASSERT(!failed());
1190 for (const StubField
& field
: stubFields_
) {
1191 if (field
.sizeIsWord()) {
1192 InitWordStubField(field
.type(), dest
, field
.asWord());
1193 dest
+= sizeof(uintptr_t);
1195 InitInt64StubField(field
.type(), dest
, field
.asInt64());
1196 dest
+= sizeof(uint64_t);
1201 ICCacheIRStub
* ICCacheIRStub::clone(JSRuntime
* rt
, ICStubSpace
& newSpace
) {
1202 const CacheIRStubInfo
* info
= stubInfo();
1203 MOZ_ASSERT(info
->makesGCCalls());
1205 size_t bytesNeeded
= info
->stubDataOffset() + info
->stubDataSize();
1207 AutoEnterOOMUnsafeRegion oomUnsafe
;
1208 void* newStubMem
= newSpace
.alloc(bytesNeeded
);
1210 oomUnsafe
.crash("ICCacheIRStub::clone");
1213 ICCacheIRStub
* newStub
= new (newStubMem
) ICCacheIRStub(*this);
1215 const uint8_t* src
= this->stubDataStart();
1216 uint8_t* dest
= newStub
->stubDataStart();
1218 // Because this can be called during sweeping when discarding JIT code, we
1219 // have to lock the store buffer
1220 gc::AutoLockStoreBuffer
lock(rt
);
1224 StubField::Type type
= info
->fieldType(field
);
1225 if (type
== StubField::Type::Limit
) {
1229 if (StubField::sizeIsWord(type
)) {
1230 const uintptr_t* srcField
= reinterpret_cast<const uintptr_t*>(src
);
1231 InitWordStubField(type
, dest
, *srcField
);
1232 src
+= sizeof(uintptr_t);
1233 dest
+= sizeof(uintptr_t);
1235 const uint64_t* srcField
= reinterpret_cast<const uint64_t*>(src
);
1236 InitInt64StubField(type
, dest
, *srcField
);
1237 src
+= sizeof(uint64_t);
1238 dest
+= sizeof(uint64_t);
1247 template <typename T
>
1248 static inline bool ShouldTraceWeakEdgeInStub(JSTracer
* trc
) {
1249 if constexpr (std::is_same_v
<T
, IonICStub
>) {
1250 // 'Weak' edges are traced strongly in IonICs.
1253 static_assert(std::is_same_v
<T
, ICCacheIRStub
>);
1254 return trc
->traceWeakEdges();
1258 template <typename T
>
1259 void jit::TraceCacheIRStub(JSTracer
* trc
, T
* stub
,
1260 const CacheIRStubInfo
* stubInfo
) {
1261 using Type
= StubField::Type
;
1266 Type fieldType
= stubInfo
->fieldType(field
);
1267 switch (fieldType
) {
1268 case Type::RawInt32
:
1269 case Type::RawPointer
:
1270 case Type::RawInt64
:
1274 // For CCW IC stubs, we can store same-zone but cross-compartment
1275 // shapes. Use TraceSameZoneCrossCompartmentEdge to not assert in the
1276 // GC. Note: CacheIRWriter::writeShapeField asserts we never store
1277 // cross-zone shapes.
1278 GCPtr
<Shape
*>& shapeField
=
1279 stubInfo
->getStubField
<T
, Type::Shape
>(stub
, offset
);
1280 TraceSameZoneCrossCompartmentEdge(trc
, &shapeField
, "cacheir-shape");
1283 case Type::WeakShape
:
1284 if (ShouldTraceWeakEdgeInStub
<T
>(trc
)) {
1285 WeakHeapPtr
<Shape
*>& shapeField
=
1286 stubInfo
->getStubField
<T
, Type::WeakShape
>(stub
, offset
);
1288 TraceSameZoneCrossCompartmentEdge(trc
, &shapeField
,
1289 "cacheir-weak-shape");
1293 case Type::WeakGetterSetter
:
1294 if (ShouldTraceWeakEdgeInStub
<T
>(trc
)) {
1297 &stubInfo
->getStubField
<T
, Type::WeakGetterSetter
>(stub
, offset
),
1298 "cacheir-weak-getter-setter");
1301 case Type::JSObject
: {
1302 TraceEdge(trc
, &stubInfo
->getStubField
<T
, Type::JSObject
>(stub
, offset
),
1306 case Type::WeakObject
:
1307 if (ShouldTraceWeakEdgeInStub
<T
>(trc
)) {
1309 trc
, &stubInfo
->getStubField
<T
, Type::WeakObject
>(stub
, offset
),
1310 "cacheir-weak-object");
1314 TraceEdge(trc
, &stubInfo
->getStubField
<T
, Type::Symbol
>(stub
, offset
),
1318 TraceEdge(trc
, &stubInfo
->getStubField
<T
, Type::String
>(stub
, offset
),
1321 case Type::WeakBaseScript
:
1322 if (ShouldTraceWeakEdgeInStub
<T
>(trc
)) {
1325 &stubInfo
->getStubField
<T
, Type::WeakBaseScript
>(stub
, offset
),
1326 "cacheir-weak-script");
1330 TraceEdge(trc
, &stubInfo
->getStubField
<T
, Type::JitCode
>(stub
, offset
),
1334 TraceEdge(trc
, &stubInfo
->getStubField
<T
, Type::Id
>(stub
, offset
),
1338 TraceEdge(trc
, &stubInfo
->getStubField
<T
, Type::Value
>(stub
, offset
),
1341 case Type::AllocSite
: {
1342 gc::AllocSite
* site
=
1343 stubInfo
->getPtrStubField
<T
, gc::AllocSite
>(stub
, offset
);
1351 offset
+= StubField::sizeInBytes(fieldType
);
1355 template void jit::TraceCacheIRStub(JSTracer
* trc
, ICCacheIRStub
* stub
,
1356 const CacheIRStubInfo
* stubInfo
);
1358 template void jit::TraceCacheIRStub(JSTracer
* trc
, IonICStub
* stub
,
1359 const CacheIRStubInfo
* stubInfo
);
1361 template <typename T
>
1362 bool jit::TraceWeakCacheIRStub(JSTracer
* trc
, T
* stub
,
1363 const CacheIRStubInfo
* stubInfo
) {
1364 using Type
= StubField::Type
;
1369 Type fieldType
= stubInfo
->fieldType(field
);
1370 switch (fieldType
) {
1371 case Type::WeakShape
: {
1372 WeakHeapPtr
<Shape
*>& shapeField
=
1373 stubInfo
->getStubField
<T
, Type::WeakShape
>(stub
, offset
);
1374 auto r
= TraceWeakEdge(trc
, &shapeField
, "cacheir-weak-shape");
1380 case Type::WeakObject
: {
1381 WeakHeapPtr
<JSObject
*>& objectField
=
1382 stubInfo
->getStubField
<T
, Type::WeakObject
>(stub
, offset
);
1383 auto r
= TraceWeakEdge(trc
, &objectField
, "cacheir-weak-object");
1389 case Type::WeakBaseScript
: {
1390 WeakHeapPtr
<BaseScript
*>& scriptField
=
1391 stubInfo
->getStubField
<T
, Type::WeakBaseScript
>(stub
, offset
);
1392 auto r
= TraceWeakEdge(trc
, &scriptField
, "cacheir-weak-script");
1398 case Type::WeakGetterSetter
: {
1399 WeakHeapPtr
<GetterSetter
*>& getterSetterField
=
1400 stubInfo
->getStubField
<T
, Type::WeakGetterSetter
>(stub
, offset
);
1401 auto r
= TraceWeakEdge(trc
, &getterSetterField
,
1402 "cacheir-weak-getter-setter");
1409 return true; // Done.
1410 case Type::RawInt32
:
1411 case Type::RawPointer
:
1413 case Type::JSObject
:
1418 case Type::AllocSite
:
1419 case Type::RawInt64
:
1422 break; // Skip non-weak fields.
1425 offset
+= StubField::sizeInBytes(fieldType
);
1429 template bool jit::TraceWeakCacheIRStub(JSTracer
* trc
, ICCacheIRStub
* stub
,
1430 const CacheIRStubInfo
* stubInfo
);
1432 template bool jit::TraceWeakCacheIRStub(JSTracer
* trc
, IonICStub
* stub
,
1433 const CacheIRStubInfo
* stubInfo
);
1435 bool CacheIRWriter::stubDataEquals(const uint8_t* stubData
) const {
1436 MOZ_ASSERT(!failed());
1438 const uintptr_t* stubDataWords
= reinterpret_cast<const uintptr_t*>(stubData
);
1440 for (const StubField
& field
: stubFields_
) {
1441 if (field
.sizeIsWord()) {
1442 if (field
.asWord() != *stubDataWords
) {
1449 if (field
.asInt64() != *reinterpret_cast<const uint64_t*>(stubDataWords
)) {
1452 stubDataWords
+= sizeof(uint64_t) / sizeof(uintptr_t);
1458 bool CacheIRWriter::stubDataEqualsIgnoring(const uint8_t* stubData
,
1459 uint32_t ignoreOffset
) const {
1460 MOZ_ASSERT(!failed());
1462 uint32_t offset
= 0;
1463 for (const StubField
& field
: stubFields_
) {
1464 if (offset
!= ignoreOffset
) {
1465 if (field
.sizeIsWord()) {
1466 uintptr_t raw
= *reinterpret_cast<const uintptr_t*>(stubData
+ offset
);
1467 if (field
.asWord() != raw
) {
1471 uint64_t raw
= *reinterpret_cast<const uint64_t*>(stubData
+ offset
);
1472 if (field
.asInt64() != raw
) {
1477 offset
+= StubField::sizeInBytes(field
.type());
1483 HashNumber
CacheIRStubKey::hash(const CacheIRStubKey::Lookup
& l
) {
1484 HashNumber hash
= mozilla::HashBytes(l
.code
, l
.length
);
1485 hash
= mozilla::AddToHash(hash
, uint32_t(l
.kind
));
1486 hash
= mozilla::AddToHash(hash
, uint32_t(l
.engine
));
1490 bool CacheIRStubKey::match(const CacheIRStubKey
& entry
,
1491 const CacheIRStubKey::Lookup
& l
) {
1492 if (entry
.stubInfo
->kind() != l
.kind
) {
1496 if (entry
.stubInfo
->engine() != l
.engine
) {
1500 if (entry
.stubInfo
->codeLength() != l
.length
) {
1504 if (!mozilla::ArrayEqual(entry
.stubInfo
->code(), l
.code
, l
.length
)) {
1511 CacheIRReader::CacheIRReader(const CacheIRStubInfo
* stubInfo
)
1512 : CacheIRReader(stubInfo
->code(),
1513 stubInfo
->code() + stubInfo
->codeLength()) {}
1515 CacheIRStubInfo
* CacheIRStubInfo::New(CacheKind kind
, ICStubEngine engine
,
1517 uint32_t stubDataOffset
,
1518 const CacheIRWriter
& writer
) {
1519 size_t numStubFields
= writer
.numStubFields();
1520 size_t bytesNeeded
=
1521 sizeof(CacheIRStubInfo
) + writer
.codeLength() +
1522 (numStubFields
+ 1); // +1 for the GCType::Limit terminator.
1523 uint8_t* p
= js_pod_malloc
<uint8_t>(bytesNeeded
);
1528 // Copy the CacheIR code.
1529 uint8_t* codeStart
= p
+ sizeof(CacheIRStubInfo
);
1530 mozilla::PodCopy(codeStart
, writer
.codeStart(), writer
.codeLength());
1532 static_assert(sizeof(StubField::Type
) == sizeof(uint8_t),
1533 "StubField::Type must fit in uint8_t");
1535 // Copy the stub field types.
1536 uint8_t* fieldTypes
= codeStart
+ writer
.codeLength();
1537 for (size_t i
= 0; i
< numStubFields
; i
++) {
1538 fieldTypes
[i
] = uint8_t(writer
.stubFieldType(i
));
1540 fieldTypes
[numStubFields
] = uint8_t(StubField::Type::Limit
);
1542 return new (p
) CacheIRStubInfo(kind
, engine
, makesGCCalls
, stubDataOffset
,
1543 writer
.codeLength());
1546 bool OperandLocation::operator==(const OperandLocation
& other
) const {
1547 if (kind_
!= other
.kind_
) {
1555 return payloadReg() == other
.payloadReg() &&
1556 payloadType() == other
.payloadType();
1558 return valueReg() == other
.valueReg();
1560 return payloadStack() == other
.payloadStack() &&
1561 payloadType() == other
.payloadType();
1563 return valueStack() == other
.valueStack();
1565 return baselineFrameSlot() == other
.baselineFrameSlot();
1567 return constant() == other
.constant();
1569 return doubleReg() == other
.doubleReg();
1572 MOZ_CRASH("Invalid OperandLocation kind");
1575 AutoOutputRegister::AutoOutputRegister(CacheIRCompiler
& compiler
)
1576 : output_(compiler
.outputUnchecked_
.ref()), alloc_(compiler
.allocator
) {
1577 if (output_
.hasValue()) {
1578 alloc_
.allocateFixedValueRegister(compiler
.masm
, output_
.valueReg());
1579 } else if (!output_
.typedReg().isFloat()) {
1580 alloc_
.allocateFixedRegister(compiler
.masm
, output_
.typedReg().gpr());
1584 AutoOutputRegister::~AutoOutputRegister() {
1585 if (output_
.hasValue()) {
1586 alloc_
.releaseValueRegister(output_
.valueReg());
1587 } else if (!output_
.typedReg().isFloat()) {
1588 alloc_
.releaseRegister(output_
.typedReg().gpr());
1592 bool FailurePath::canShareFailurePath(const FailurePath
& other
) const {
1593 if (stackPushed_
!= other
.stackPushed_
) {
1597 if (spilledRegs_
.length() != other
.spilledRegs_
.length()) {
1601 for (size_t i
= 0; i
< spilledRegs_
.length(); i
++) {
1602 if (spilledRegs_
[i
] != other
.spilledRegs_
[i
]) {
1607 MOZ_ASSERT(inputs_
.length() == other
.inputs_
.length());
1609 for (size_t i
= 0; i
< inputs_
.length(); i
++) {
1610 if (inputs_
[i
] != other
.inputs_
[i
]) {
1617 bool CacheIRCompiler::addFailurePath(FailurePath
** failure
) {
1619 allocator
.setAddedFailurePath();
1621 MOZ_ASSERT(!allocator
.hasAutoScratchFloatRegisterSpill());
1623 FailurePath newFailure
;
1624 for (size_t i
= 0; i
< writer_
.numInputOperands(); i
++) {
1625 if (!newFailure
.appendInput(allocator
.operandLocation(i
))) {
1629 if (!newFailure
.setSpilledRegs(allocator
.spilledRegs())) {
1632 newFailure
.setStackPushed(allocator
.stackPushed());
1634 // Reuse the previous failure path if the current one is the same, to
1635 // avoid emitting duplicate code.
1636 if (failurePaths
.length() > 0 &&
1637 failurePaths
.back().canShareFailurePath(newFailure
)) {
1638 *failure
= &failurePaths
.back();
1642 if (!failurePaths
.append(std::move(newFailure
))) {
1646 *failure
= &failurePaths
.back();
1650 bool CacheIRCompiler::emitFailurePath(size_t index
) {
1651 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1652 FailurePath
& failure
= failurePaths
[index
];
1654 allocator
.setStackPushed(failure
.stackPushed());
1656 for (size_t i
= 0; i
< writer_
.numInputOperands(); i
++) {
1657 allocator
.setOperandLocation(i
, failure
.input(i
));
1660 if (!allocator
.setSpilledRegs(failure
.spilledRegs())) {
1664 masm
.bind(failure
.label());
1665 allocator
.restoreInputState(masm
);
1669 bool CacheIRCompiler::emitGuardIsNumber(ValOperandId inputId
) {
1670 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1671 JSValueType knownType
= allocator
.knownType(inputId
);
1673 // Doubles and ints are numbers!
1674 if (knownType
== JSVAL_TYPE_DOUBLE
|| knownType
== JSVAL_TYPE_INT32
) {
1678 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1679 FailurePath
* failure
;
1680 if (!addFailurePath(&failure
)) {
1684 masm
.branchTestNumber(Assembler::NotEqual
, input
, failure
->label());
1688 bool CacheIRCompiler::emitGuardToObject(ValOperandId inputId
) {
1689 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1690 if (allocator
.knownType(inputId
) == JSVAL_TYPE_OBJECT
) {
1694 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1695 FailurePath
* failure
;
1696 if (!addFailurePath(&failure
)) {
1699 masm
.branchTestObject(Assembler::NotEqual
, input
, failure
->label());
1703 bool CacheIRCompiler::emitGuardIsNullOrUndefined(ValOperandId inputId
) {
1704 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1705 JSValueType knownType
= allocator
.knownType(inputId
);
1706 if (knownType
== JSVAL_TYPE_UNDEFINED
|| knownType
== JSVAL_TYPE_NULL
) {
1710 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1711 FailurePath
* failure
;
1712 if (!addFailurePath(&failure
)) {
1717 masm
.branchTestNull(Assembler::Equal
, input
, &success
);
1718 masm
.branchTestUndefined(Assembler::NotEqual
, input
, failure
->label());
1720 masm
.bind(&success
);
1724 bool CacheIRCompiler::emitGuardIsNull(ValOperandId inputId
) {
1725 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1726 JSValueType knownType
= allocator
.knownType(inputId
);
1727 if (knownType
== JSVAL_TYPE_NULL
) {
1731 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1732 FailurePath
* failure
;
1733 if (!addFailurePath(&failure
)) {
1737 masm
.branchTestNull(Assembler::NotEqual
, input
, failure
->label());
1741 bool CacheIRCompiler::emitGuardIsUndefined(ValOperandId inputId
) {
1742 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1743 JSValueType knownType
= allocator
.knownType(inputId
);
1744 if (knownType
== JSVAL_TYPE_UNDEFINED
) {
1748 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1749 FailurePath
* failure
;
1750 if (!addFailurePath(&failure
)) {
1754 masm
.branchTestUndefined(Assembler::NotEqual
, input
, failure
->label());
1758 bool CacheIRCompiler::emitGuardIsNotUninitializedLexical(ValOperandId valId
) {
1759 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1761 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
1763 FailurePath
* failure
;
1764 if (!addFailurePath(&failure
)) {
1768 masm
.branchTestMagicValue(Assembler::Equal
, val
, JS_UNINITIALIZED_LEXICAL
,
1773 bool CacheIRCompiler::emitGuardBooleanToInt32(ValOperandId inputId
,
1774 Int32OperandId resultId
) {
1775 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1776 Register output
= allocator
.defineRegister(masm
, resultId
);
1778 if (allocator
.knownType(inputId
) == JSVAL_TYPE_BOOLEAN
) {
1780 allocator
.useRegister(masm
, BooleanOperandId(inputId
.id()));
1781 masm
.move32(input
, output
);
1784 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1786 FailurePath
* failure
;
1787 if (!addFailurePath(&failure
)) {
1791 masm
.fallibleUnboxBoolean(input
, output
, failure
->label());
1795 bool CacheIRCompiler::emitGuardToString(ValOperandId inputId
) {
1796 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1797 if (allocator
.knownType(inputId
) == JSVAL_TYPE_STRING
) {
1801 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1802 FailurePath
* failure
;
1803 if (!addFailurePath(&failure
)) {
1806 masm
.branchTestString(Assembler::NotEqual
, input
, failure
->label());
1810 bool CacheIRCompiler::emitGuardToSymbol(ValOperandId inputId
) {
1811 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1812 if (allocator
.knownType(inputId
) == JSVAL_TYPE_SYMBOL
) {
1816 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1817 FailurePath
* failure
;
1818 if (!addFailurePath(&failure
)) {
1821 masm
.branchTestSymbol(Assembler::NotEqual
, input
, failure
->label());
1825 bool CacheIRCompiler::emitGuardToBigInt(ValOperandId inputId
) {
1826 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1827 if (allocator
.knownType(inputId
) == JSVAL_TYPE_BIGINT
) {
1831 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1832 FailurePath
* failure
;
1833 if (!addFailurePath(&failure
)) {
1836 masm
.branchTestBigInt(Assembler::NotEqual
, input
, failure
->label());
1840 bool CacheIRCompiler::emitGuardToBoolean(ValOperandId inputId
) {
1841 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1843 if (allocator
.knownType(inputId
) == JSVAL_TYPE_BOOLEAN
) {
1847 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1848 FailurePath
* failure
;
1849 if (!addFailurePath(&failure
)) {
1852 masm
.branchTestBoolean(Assembler::NotEqual
, input
, failure
->label());
1856 bool CacheIRCompiler::emitGuardToInt32(ValOperandId inputId
) {
1857 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1859 if (allocator
.knownType(inputId
) == JSVAL_TYPE_INT32
) {
1863 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1865 FailurePath
* failure
;
1866 if (!addFailurePath(&failure
)) {
1870 masm
.branchTestInt32(Assembler::NotEqual
, input
, failure
->label());
1874 bool CacheIRCompiler::emitGuardToNonGCThing(ValOperandId inputId
) {
1875 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1877 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1879 FailurePath
* failure
;
1880 if (!addFailurePath(&failure
)) {
1884 masm
.branchTestGCThing(Assembler::Equal
, input
, failure
->label());
1888 // Infallible |emitDouble| emitters can use this implementation to avoid
1889 // generating extra clean-up instructions to restore the scratch float register.
1890 // To select this function simply omit the |Label* fail| parameter for the
1891 // emitter lambda function.
1892 template <typename EmitDouble
>
1893 static std::enable_if_t
<mozilla::FunctionTypeTraits
<EmitDouble
>::arity
== 1,
1895 EmitGuardDouble(CacheIRCompiler
* compiler
, MacroAssembler
& masm
,
1896 ValueOperand input
, FailurePath
* failure
,
1897 EmitDouble emitDouble
) {
1898 AutoScratchFloatRegister
floatReg(compiler
);
1900 masm
.unboxDouble(input
, floatReg
);
1901 emitDouble(floatReg
.get());
1904 template <typename EmitDouble
>
1905 static std::enable_if_t
<mozilla::FunctionTypeTraits
<EmitDouble
>::arity
== 2,
1907 EmitGuardDouble(CacheIRCompiler
* compiler
, MacroAssembler
& masm
,
1908 ValueOperand input
, FailurePath
* failure
,
1909 EmitDouble emitDouble
) {
1910 AutoScratchFloatRegister
floatReg(compiler
, failure
);
1912 masm
.unboxDouble(input
, floatReg
);
1913 emitDouble(floatReg
.get(), floatReg
.failure());
1916 template <typename EmitInt32
, typename EmitDouble
>
1917 static void EmitGuardInt32OrDouble(CacheIRCompiler
* compiler
,
1918 MacroAssembler
& masm
, ValueOperand input
,
1919 Register output
, FailurePath
* failure
,
1920 EmitInt32 emitInt32
, EmitDouble emitDouble
) {
1924 ScratchTagScope
tag(masm
, input
);
1925 masm
.splitTagForTest(input
, tag
);
1928 masm
.branchTestInt32(Assembler::NotEqual
, tag
, ¬Int32
);
1930 ScratchTagScopeRelease
_(&tag
);
1932 masm
.unboxInt32(input
, output
);
1937 masm
.bind(¬Int32
);
1939 masm
.branchTestDouble(Assembler::NotEqual
, tag
, failure
->label());
1941 ScratchTagScopeRelease
_(&tag
);
1943 EmitGuardDouble(compiler
, masm
, input
, failure
, emitDouble
);
1950 bool CacheIRCompiler::emitGuardToInt32Index(ValOperandId inputId
,
1951 Int32OperandId resultId
) {
1952 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1953 Register output
= allocator
.defineRegister(masm
, resultId
);
1955 if (allocator
.knownType(inputId
) == JSVAL_TYPE_INT32
) {
1956 Register input
= allocator
.useRegister(masm
, Int32OperandId(inputId
.id()));
1957 masm
.move32(input
, output
);
1961 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1963 FailurePath
* failure
;
1964 if (!addFailurePath(&failure
)) {
1968 EmitGuardInt32OrDouble(
1969 this, masm
, input
, output
, failure
,
1971 // No-op if the value is already an int32.
1973 [&](FloatRegister floatReg
, Label
* fail
) {
1974 // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
1975 masm
.convertDoubleToInt32(floatReg
, output
, fail
, false);
1981 bool CacheIRCompiler::emitInt32ToIntPtr(Int32OperandId inputId
,
1982 IntPtrOperandId resultId
) {
1983 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1985 Register input
= allocator
.useRegister(masm
, inputId
);
1986 Register output
= allocator
.defineRegister(masm
, resultId
);
1988 masm
.move32SignExtendToPtr(input
, output
);
1992 bool CacheIRCompiler::emitGuardNumberToIntPtrIndex(NumberOperandId inputId
,
1994 IntPtrOperandId resultId
) {
1995 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1997 Register output
= allocator
.defineRegister(masm
, resultId
);
1999 FailurePath
* failure
= nullptr;
2001 if (!addFailurePath(&failure
)) {
2006 AutoScratchFloatRegister
floatReg(this, failure
);
2007 allocator
.ensureDoubleRegister(masm
, inputId
, floatReg
);
2009 // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
2012 masm
.convertDoubleToPtr(floatReg
, output
, &fail
, false);
2015 // Substitute the invalid index with an arbitrary out-of-bounds index.
2017 masm
.movePtr(ImmWord(-1), output
);
2021 masm
.convertDoubleToPtr(floatReg
, output
, floatReg
.failure(), false);
2027 bool CacheIRCompiler::emitGuardToInt32ModUint32(ValOperandId inputId
,
2028 Int32OperandId resultId
) {
2029 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2030 Register output
= allocator
.defineRegister(masm
, resultId
);
2032 if (allocator
.knownType(inputId
) == JSVAL_TYPE_INT32
) {
2033 ConstantOrRegister input
= allocator
.useConstantOrRegister(masm
, inputId
);
2034 if (input
.constant()) {
2035 masm
.move32(Imm32(input
.value().toInt32()), output
);
2037 MOZ_ASSERT(input
.reg().type() == MIRType::Int32
);
2038 masm
.move32(input
.reg().typedReg().gpr(), output
);
2043 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
2045 FailurePath
* failure
;
2046 if (!addFailurePath(&failure
)) {
2050 EmitGuardInt32OrDouble(
2051 this, masm
, input
, output
, failure
,
2053 // No-op if the value is already an int32.
2055 [&](FloatRegister floatReg
, Label
* fail
) {
2056 masm
.branchTruncateDoubleMaybeModUint32(floatReg
, output
, fail
);
2062 bool CacheIRCompiler::emitGuardToUint8Clamped(ValOperandId inputId
,
2063 Int32OperandId resultId
) {
2064 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2065 Register output
= allocator
.defineRegister(masm
, resultId
);
2067 if (allocator
.knownType(inputId
) == JSVAL_TYPE_INT32
) {
2068 ConstantOrRegister input
= allocator
.useConstantOrRegister(masm
, inputId
);
2069 if (input
.constant()) {
2070 masm
.move32(Imm32(ClampDoubleToUint8(input
.value().toInt32())), output
);
2072 MOZ_ASSERT(input
.reg().type() == MIRType::Int32
);
2073 masm
.move32(input
.reg().typedReg().gpr(), output
);
2074 masm
.clampIntToUint8(output
);
2079 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
2081 FailurePath
* failure
;
2082 if (!addFailurePath(&failure
)) {
2086 EmitGuardInt32OrDouble(
2087 this, masm
, input
, output
, failure
,
2089 // |output| holds the unboxed int32 value.
2090 masm
.clampIntToUint8(output
);
2092 [&](FloatRegister floatReg
) {
2093 masm
.clampDoubleToUint8(floatReg
, output
);
2099 bool CacheIRCompiler::emitGuardNonDoubleType(ValOperandId inputId
,
2101 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2103 if (allocator
.knownType(inputId
) == JSValueType(type
)) {
2107 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
2109 FailurePath
* failure
;
2110 if (!addFailurePath(&failure
)) {
2115 case ValueType::String
:
2116 masm
.branchTestString(Assembler::NotEqual
, input
, failure
->label());
2118 case ValueType::Symbol
:
2119 masm
.branchTestSymbol(Assembler::NotEqual
, input
, failure
->label());
2121 case ValueType::BigInt
:
2122 masm
.branchTestBigInt(Assembler::NotEqual
, input
, failure
->label());
2124 case ValueType::Int32
:
2125 masm
.branchTestInt32(Assembler::NotEqual
, input
, failure
->label());
2127 case ValueType::Boolean
:
2128 masm
.branchTestBoolean(Assembler::NotEqual
, input
, failure
->label());
2130 case ValueType::Undefined
:
2131 masm
.branchTestUndefined(Assembler::NotEqual
, input
, failure
->label());
2133 case ValueType::Null
:
2134 masm
.branchTestNull(Assembler::NotEqual
, input
, failure
->label());
2136 case ValueType::Double
:
2137 case ValueType::Magic
:
2138 case ValueType::PrivateGCThing
:
2139 case ValueType::Object
:
2140 #ifdef ENABLE_RECORD_TUPLE
2141 case ValueType::ExtendedPrimitive
:
2143 MOZ_CRASH("unexpected type");
2149 bool CacheIRCompiler::emitGuardClass(ObjOperandId objId
, GuardClassKind kind
) {
2150 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2151 Register obj
= allocator
.useRegister(masm
, objId
);
2152 AutoScratchRegister
scratch(allocator
, masm
);
2154 FailurePath
* failure
;
2155 if (!addFailurePath(&failure
)) {
2159 if (kind
== GuardClassKind::JSFunction
) {
2160 if (objectGuardNeedsSpectreMitigations(objId
)) {
2161 masm
.branchTestObjIsFunction(Assembler::NotEqual
, obj
, scratch
, obj
,
2164 masm
.branchTestObjIsFunctionNoSpectreMitigations(
2165 Assembler::NotEqual
, obj
, scratch
, failure
->label());
2170 const JSClass
* clasp
= nullptr;
2172 case GuardClassKind::Array
:
2173 clasp
= &ArrayObject::class_
;
2175 case GuardClassKind::PlainObject
:
2176 clasp
= &PlainObject::class_
;
2178 case GuardClassKind::FixedLengthArrayBuffer
:
2179 clasp
= &FixedLengthArrayBufferObject::class_
;
2181 case GuardClassKind::FixedLengthSharedArrayBuffer
:
2182 clasp
= &FixedLengthSharedArrayBufferObject::class_
;
2184 case GuardClassKind::FixedLengthDataView
:
2185 clasp
= &FixedLengthDataViewObject::class_
;
2187 case GuardClassKind::MappedArguments
:
2188 clasp
= &MappedArgumentsObject::class_
;
2190 case GuardClassKind::UnmappedArguments
:
2191 clasp
= &UnmappedArgumentsObject::class_
;
2193 case GuardClassKind::WindowProxy
:
2194 clasp
= cx_
->runtime()->maybeWindowProxyClass();
2196 case GuardClassKind::Set
:
2197 clasp
= &SetObject::class_
;
2199 case GuardClassKind::Map
:
2200 clasp
= &MapObject::class_
;
2202 case GuardClassKind::BoundFunction
:
2203 clasp
= &BoundFunctionObject::class_
;
2205 case GuardClassKind::JSFunction
:
2206 MOZ_CRASH("JSFunction handled before switch");
2210 if (objectGuardNeedsSpectreMitigations(objId
)) {
2211 masm
.branchTestObjClass(Assembler::NotEqual
, obj
, clasp
, scratch
, obj
,
2214 masm
.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual
, obj
, clasp
,
2215 scratch
, failure
->label());
2221 bool CacheIRCompiler::emitGuardNullProto(ObjOperandId objId
) {
2222 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2223 Register obj
= allocator
.useRegister(masm
, objId
);
2224 AutoScratchRegister
scratch(allocator
, masm
);
2226 FailurePath
* failure
;
2227 if (!addFailurePath(&failure
)) {
2231 masm
.loadObjProto(obj
, scratch
);
2232 masm
.branchTestPtr(Assembler::NonZero
, scratch
, scratch
, failure
->label());
2236 bool CacheIRCompiler::emitGuardIsExtensible(ObjOperandId objId
) {
2237 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2238 Register obj
= allocator
.useRegister(masm
, objId
);
2239 AutoScratchRegister
scratch(allocator
, masm
);
2241 FailurePath
* failure
;
2242 if (!addFailurePath(&failure
)) {
2246 masm
.branchIfObjectNotExtensible(obj
, scratch
, failure
->label());
2250 bool CacheIRCompiler::emitGuardDynamicSlotIsSpecificObject(
2251 ObjOperandId objId
, ObjOperandId expectedId
, uint32_t slotOffset
) {
2252 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2253 Register obj
= allocator
.useRegister(masm
, objId
);
2254 Register expectedObject
= allocator
.useRegister(masm
, expectedId
);
2256 // Allocate registers before the failure path to make sure they're registered
2257 // by addFailurePath.
2258 AutoScratchRegister
scratch1(allocator
, masm
);
2259 AutoScratchRegister
scratch2(allocator
, masm
);
2261 FailurePath
* failure
;
2262 if (!addFailurePath(&failure
)) {
2266 // Guard on the expected object.
2267 StubFieldOffset
slot(slotOffset
, StubField::Type::RawInt32
);
2268 masm
.loadPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch1
);
2269 emitLoadStubField(slot
, scratch2
);
2270 BaseObjectSlotIndex
expectedSlot(scratch1
, scratch2
);
2271 masm
.fallibleUnboxObject(expectedSlot
, scratch1
, failure
->label());
2272 masm
.branchPtr(Assembler::NotEqual
, expectedObject
, scratch1
,
2278 bool CacheIRCompiler::emitGuardDynamicSlotIsNotObject(ObjOperandId objId
,
2279 uint32_t slotOffset
) {
2280 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2281 Register obj
= allocator
.useRegister(masm
, objId
);
2283 AutoScratchRegister
scratch1(allocator
, masm
);
2284 AutoScratchRegister
scratch2(allocator
, masm
);
2286 FailurePath
* failure
;
2287 if (!addFailurePath(&failure
)) {
2291 // Guard that the slot isn't an object.
2292 StubFieldOffset
slot(slotOffset
, StubField::Type::RawInt32
);
2293 masm
.loadPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch1
);
2294 emitLoadStubField(slot
, scratch2
);
2295 BaseObjectSlotIndex
expectedSlot(scratch1
, scratch2
);
2296 masm
.branchTestObject(Assembler::Equal
, expectedSlot
, failure
->label());
2301 bool CacheIRCompiler::emitGuardFixedSlotValue(ObjOperandId objId
,
2302 uint32_t offsetOffset
,
2303 uint32_t valOffset
) {
2304 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2306 Register obj
= allocator
.useRegister(masm
, objId
);
2308 AutoScratchRegister
scratch(allocator
, masm
);
2309 AutoScratchValueRegister
scratchVal(allocator
, masm
);
2311 FailurePath
* failure
;
2312 if (!addFailurePath(&failure
)) {
2316 StubFieldOffset
offset(offsetOffset
, StubField::Type::RawInt32
);
2317 emitLoadStubField(offset
, scratch
);
2319 StubFieldOffset
val(valOffset
, StubField::Type::Value
);
2320 emitLoadValueStubField(val
, scratchVal
);
2322 BaseIndex
slotVal(obj
, scratch
, TimesOne
);
2323 masm
.branchTestValue(Assembler::NotEqual
, slotVal
, scratchVal
,
2328 bool CacheIRCompiler::emitGuardDynamicSlotValue(ObjOperandId objId
,
2329 uint32_t offsetOffset
,
2330 uint32_t valOffset
) {
2331 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2333 Register obj
= allocator
.useRegister(masm
, objId
);
2335 AutoScratchRegister
scratch1(allocator
, masm
);
2336 AutoScratchRegister
scratch2(allocator
, masm
);
2337 AutoScratchValueRegister
scratchVal(allocator
, masm
);
2339 FailurePath
* failure
;
2340 if (!addFailurePath(&failure
)) {
2344 masm
.loadPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch1
);
2346 StubFieldOffset
offset(offsetOffset
, StubField::Type::RawInt32
);
2347 emitLoadStubField(offset
, scratch2
);
2349 StubFieldOffset
val(valOffset
, StubField::Type::Value
);
2350 emitLoadValueStubField(val
, scratchVal
);
2352 BaseIndex
slotVal(scratch1
, scratch2
, TimesOne
);
2353 masm
.branchTestValue(Assembler::NotEqual
, slotVal
, scratchVal
,
2358 bool CacheIRCompiler::emitLoadScriptedProxyHandler(ValOperandId resultId
,
2359 ObjOperandId objId
) {
2360 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2362 Register obj
= allocator
.useRegister(masm
, objId
);
2363 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
2365 masm
.loadPtr(Address(obj
, ProxyObject::offsetOfReservedSlots()),
2366 output
.scratchReg());
2368 Address(output
.scratchReg(), js::detail::ProxyReservedSlots::offsetOfSlot(
2369 ScriptedProxyHandler::HANDLER_EXTRA
)),
2374 bool CacheIRCompiler::emitIdToStringOrSymbol(ValOperandId resultId
,
2375 ValOperandId idId
) {
2376 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2378 ValueOperand id
= allocator
.useValueRegister(masm
, idId
);
2379 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
2380 AutoScratchRegister
scratch(allocator
, masm
);
2382 FailurePath
* failure
;
2383 if (!addFailurePath(&failure
)) {
2387 masm
.moveValue(id
, output
);
2389 Label done
, intDone
, callVM
;
2391 ScratchTagScope
tag(masm
, output
);
2392 masm
.splitTagForTest(output
, tag
);
2393 masm
.branchTestString(Assembler::Equal
, tag
, &done
);
2394 masm
.branchTestSymbol(Assembler::Equal
, tag
, &done
);
2395 masm
.branchTestInt32(Assembler::NotEqual
, tag
, failure
->label());
2398 Register intReg
= output
.scratchReg();
2399 masm
.unboxInt32(output
, intReg
);
2401 // Fast path for small integers.
2402 masm
.lookupStaticIntString(intReg
, intReg
, scratch
, cx_
->staticStrings(),
2404 masm
.jump(&intDone
);
2407 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
2408 liveVolatileFloatRegs());
2409 masm
.PushRegsInMask(volatileRegs
);
2411 using Fn
= JSLinearString
* (*)(JSContext
* cx
, int32_t i
);
2412 masm
.setupUnalignedABICall(scratch
);
2413 masm
.loadJSContext(scratch
);
2414 masm
.passABIArg(scratch
);
2415 masm
.passABIArg(intReg
);
2416 masm
.callWithABI
<Fn
, js::Int32ToStringPure
>();
2418 masm
.storeCallPointerResult(intReg
);
2420 LiveRegisterSet ignore
;
2422 masm
.PopRegsInMaskIgnore(volatileRegs
, ignore
);
2424 masm
.branchPtr(Assembler::Equal
, intReg
, ImmPtr(nullptr), failure
->label());
2426 masm
.bind(&intDone
);
2427 masm
.tagValue(JSVAL_TYPE_STRING
, intReg
, output
);
2433 bool CacheIRCompiler::emitLoadFixedSlot(ValOperandId resultId
,
2435 uint32_t offsetOffset
) {
2436 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2438 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
2439 Register obj
= allocator
.useRegister(masm
, objId
);
2440 AutoScratchRegister
scratch(allocator
, masm
);
2442 StubFieldOffset
slotIndex(offsetOffset
, StubField::Type::RawInt32
);
2443 emitLoadStubField(slotIndex
, scratch
);
2445 masm
.loadValue(BaseIndex(obj
, scratch
, TimesOne
), output
);
2449 bool CacheIRCompiler::emitLoadDynamicSlot(ValOperandId resultId
,
2451 uint32_t slotOffset
) {
2452 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2454 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
2455 Register obj
= allocator
.useRegister(masm
, objId
);
2456 AutoScratchRegister
scratch1(allocator
, masm
);
2457 Register scratch2
= output
.scratchReg();
2459 StubFieldOffset
slotIndex(slotOffset
, StubField::Type::RawInt32
);
2460 emitLoadStubField(slotIndex
, scratch2
);
2462 masm
.loadPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch1
);
2463 masm
.loadValue(BaseObjectSlotIndex(scratch1
, scratch2
), output
);
2467 bool CacheIRCompiler::emitGuardIsNativeObject(ObjOperandId objId
) {
2468 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2470 Register obj
= allocator
.useRegister(masm
, objId
);
2471 AutoScratchRegister
scratch(allocator
, masm
);
2473 FailurePath
* failure
;
2474 if (!addFailurePath(&failure
)) {
2478 masm
.branchIfNonNativeObj(obj
, scratch
, failure
->label());
2482 bool CacheIRCompiler::emitGuardIsProxy(ObjOperandId objId
) {
2483 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2485 Register obj
= allocator
.useRegister(masm
, objId
);
2486 AutoScratchRegister
scratch(allocator
, masm
);
2488 FailurePath
* failure
;
2489 if (!addFailurePath(&failure
)) {
2493 masm
.branchTestObjectIsProxy(false, obj
, scratch
, failure
->label());
2497 bool CacheIRCompiler::emitGuardIsNotProxy(ObjOperandId objId
) {
2498 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2500 Register obj
= allocator
.useRegister(masm
, objId
);
2501 AutoScratchRegister
scratch(allocator
, masm
);
2503 FailurePath
* failure
;
2504 if (!addFailurePath(&failure
)) {
2508 masm
.branchTestObjectIsProxy(true, obj
, scratch
, failure
->label());
2512 bool CacheIRCompiler::emitGuardIsNotArrayBufferMaybeShared(ObjOperandId objId
) {
2513 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2515 Register obj
= allocator
.useRegister(masm
, objId
);
2516 AutoScratchRegister
scratch(allocator
, masm
);
2518 FailurePath
* failure
;
2519 if (!addFailurePath(&failure
)) {
2523 masm
.loadObjClassUnsafe(obj
, scratch
);
2524 masm
.branchPtr(Assembler::Equal
, scratch
,
2525 ImmPtr(&FixedLengthArrayBufferObject::class_
),
2527 masm
.branchPtr(Assembler::Equal
, scratch
,
2528 ImmPtr(&FixedLengthSharedArrayBufferObject::class_
),
2530 masm
.branchPtr(Assembler::Equal
, scratch
,
2531 ImmPtr(&ResizableArrayBufferObject::class_
), failure
->label());
2532 masm
.branchPtr(Assembler::Equal
, scratch
,
2533 ImmPtr(&GrowableSharedArrayBufferObject::class_
),
2538 bool CacheIRCompiler::emitGuardIsTypedArray(ObjOperandId objId
) {
2539 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2541 Register obj
= allocator
.useRegister(masm
, objId
);
2542 AutoScratchRegister
scratch(allocator
, masm
);
2544 FailurePath
* failure
;
2545 if (!addFailurePath(&failure
)) {
2549 masm
.loadObjClassUnsafe(obj
, scratch
);
2550 masm
.branchIfClassIsNotTypedArray(scratch
, failure
->label());
2554 bool CacheIRCompiler::emitGuardIsFixedLengthTypedArray(ObjOperandId objId
) {
2555 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2557 Register obj
= allocator
.useRegister(masm
, objId
);
2558 AutoScratchRegister
scratch(allocator
, masm
);
2560 FailurePath
* failure
;
2561 if (!addFailurePath(&failure
)) {
2565 masm
.loadObjClassUnsafe(obj
, scratch
);
2566 masm
.branchIfClassIsNotFixedLengthTypedArray(scratch
, failure
->label());
2570 bool CacheIRCompiler::emitGuardIsNotDOMProxy(ObjOperandId objId
) {
2571 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2572 Register obj
= allocator
.useRegister(masm
, objId
);
2573 AutoScratchRegister
scratch(allocator
, masm
);
2575 FailurePath
* failure
;
2576 if (!addFailurePath(&failure
)) {
2580 masm
.branchTestProxyHandlerFamily(Assembler::Equal
, obj
, scratch
,
2581 GetDOMProxyHandlerFamily(),
2586 bool CacheIRCompiler::emitGuardNoDenseElements(ObjOperandId objId
) {
2587 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2588 Register obj
= allocator
.useRegister(masm
, objId
);
2589 AutoScratchRegister
scratch(allocator
, masm
);
2591 FailurePath
* failure
;
2592 if (!addFailurePath(&failure
)) {
2596 // Load obj->elements.
2597 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
2599 // Make sure there are no dense elements.
2600 Address
initLength(scratch
, ObjectElements::offsetOfInitializedLength());
2601 masm
.branch32(Assembler::NotEqual
, initLength
, Imm32(0), failure
->label());
2605 bool CacheIRCompiler::emitGuardSpecificInt32(Int32OperandId numId
,
2607 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2608 Register num
= allocator
.useRegister(masm
, numId
);
2610 FailurePath
* failure
;
2611 if (!addFailurePath(&failure
)) {
2615 masm
.branch32(Assembler::NotEqual
, num
, Imm32(expected
), failure
->label());
2619 bool CacheIRCompiler::emitGuardStringToInt32(StringOperandId strId
,
2620 Int32OperandId resultId
) {
2621 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2622 Register str
= allocator
.useRegister(masm
, strId
);
2623 Register output
= allocator
.defineRegister(masm
, resultId
);
2624 AutoScratchRegister
scratch(allocator
, masm
);
2626 FailurePath
* failure
;
2627 if (!addFailurePath(&failure
)) {
2631 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
2632 liveVolatileFloatRegs());
2633 masm
.guardStringToInt32(str
, output
, scratch
, volatileRegs
, failure
->label());
2637 bool CacheIRCompiler::emitGuardStringToNumber(StringOperandId strId
,
2638 NumberOperandId resultId
) {
2639 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2640 Register str
= allocator
.useRegister(masm
, strId
);
2641 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
2642 AutoScratchRegister
scratch(allocator
, masm
);
2644 FailurePath
* failure
;
2645 if (!addFailurePath(&failure
)) {
2650 // Use indexed value as fast path if possible.
2651 masm
.loadStringIndexValue(str
, scratch
, &vmCall
);
2652 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
);
2657 // Reserve stack for holding the result value of the call.
2658 masm
.reserveStack(sizeof(double));
2659 masm
.moveStackPtrTo(output
.payloadOrValueReg());
2661 // We cannot use callVM, as callVM expects to be able to clobber all
2662 // operands, however, since this op is not the last in the generated IC, we
2663 // want to be able to reference other live values.
2664 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
2665 liveVolatileFloatRegs());
2666 masm
.PushRegsInMask(volatileRegs
);
2668 using Fn
= bool (*)(JSContext
* cx
, JSString
* str
, double* result
);
2669 masm
.setupUnalignedABICall(scratch
);
2670 masm
.loadJSContext(scratch
);
2671 masm
.passABIArg(scratch
);
2672 masm
.passABIArg(str
);
2673 masm
.passABIArg(output
.payloadOrValueReg());
2674 masm
.callWithABI
<Fn
, js::StringToNumberPure
>();
2675 masm
.storeCallPointerResult(scratch
);
2677 LiveRegisterSet ignore
;
2678 ignore
.add(scratch
);
2679 masm
.PopRegsInMaskIgnore(volatileRegs
, ignore
);
2682 masm
.branchIfTrueBool(scratch
, &ok
);
2684 // OOM path, recovered by StringToNumberPure.
2686 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
2687 // flow-insensitively, and using it twice would confuse the stack height
2689 masm
.addToStackPtr(Imm32(sizeof(double)));
2690 masm
.jump(failure
->label());
2695 ScratchDoubleScope
fpscratch(masm
);
2696 masm
.loadDouble(Address(output
.payloadOrValueReg(), 0), fpscratch
);
2697 masm
.boxDouble(fpscratch
, output
, fpscratch
);
2699 masm
.freeStack(sizeof(double));
2705 bool CacheIRCompiler::emitNumberParseIntResult(StringOperandId strId
,
2706 Int32OperandId radixId
) {
2707 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2709 AutoCallVM
callvm(masm
, this, allocator
);
2711 Register str
= allocator
.useRegister(masm
, strId
);
2712 Register radix
= allocator
.useRegister(masm
, radixId
);
2713 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, callvm
.output());
2717 masm
.branch32(Assembler::Equal
, radix
, Imm32(0), &ok
);
2718 masm
.branch32(Assembler::Equal
, radix
, Imm32(10), &ok
);
2719 masm
.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
2723 // Discard the stack to ensure it's balanced when we skip the vm-call.
2724 allocator
.discardStack(masm
);
2726 // Use indexed value as fast path if possible.
2728 masm
.loadStringIndexValue(str
, scratch
, &vmCall
);
2729 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, callvm
.outputValueReg());
2738 using Fn
= bool (*)(JSContext
*, HandleString
, int32_t, MutableHandleValue
);
2739 callvm
.call
<Fn
, js::NumberParseInt
>();
2745 bool CacheIRCompiler::emitDoubleParseIntResult(NumberOperandId numId
) {
2746 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2748 AutoOutputRegister
output(*this);
2749 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
2750 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg0
);
2751 AutoAvailableFloatRegister
floatScratch2(*this, FloatReg1
);
2753 FailurePath
* failure
;
2754 if (!addFailurePath(&failure
)) {
2758 allocator
.ensureDoubleRegister(masm
, numId
, floatScratch1
);
2760 masm
.branchDouble(Assembler::DoubleUnordered
, floatScratch1
, floatScratch1
,
2762 masm
.branchTruncateDoubleToInt32(floatScratch1
, scratch
, failure
->label());
2765 masm
.branch32(Assembler::NotEqual
, scratch
, Imm32(0), &ok
);
2767 // Accept both +0 and -0 and return 0.
2768 masm
.loadConstantDouble(0.0, floatScratch2
);
2769 masm
.branchDouble(Assembler::DoubleEqual
, floatScratch1
, floatScratch2
,
2772 // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
2773 masm
.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW
, floatScratch2
);
2774 masm
.branchDouble(Assembler::DoubleLessThan
, floatScratch1
, floatScratch2
,
2779 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
2783 bool CacheIRCompiler::emitBooleanToNumber(BooleanOperandId booleanId
,
2784 NumberOperandId resultId
) {
2785 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2786 Register boolean
= allocator
.useRegister(masm
, booleanId
);
2787 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
2788 masm
.tagValue(JSVAL_TYPE_INT32
, boolean
, output
);
2792 bool CacheIRCompiler::emitGuardStringToIndex(StringOperandId strId
,
2793 Int32OperandId resultId
) {
2794 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2795 Register str
= allocator
.useRegister(masm
, strId
);
2796 Register output
= allocator
.defineRegister(masm
, resultId
);
2798 FailurePath
* failure
;
2799 if (!addFailurePath(&failure
)) {
2804 masm
.loadStringIndexValue(str
, output
, &vmCall
);
2809 LiveRegisterSet
save(GeneralRegisterSet::Volatile(),
2810 liveVolatileFloatRegs());
2811 masm
.PushRegsInMask(save
);
2813 using Fn
= int32_t (*)(JSString
* str
);
2814 masm
.setupUnalignedABICall(output
);
2815 masm
.passABIArg(str
);
2816 masm
.callWithABI
<Fn
, GetIndexFromString
>();
2817 masm
.storeCallInt32Result(output
);
2819 LiveRegisterSet ignore
;
2821 masm
.PopRegsInMaskIgnore(save
, ignore
);
2823 // GetIndexFromString returns a negative value on failure.
2824 masm
.branchTest32(Assembler::Signed
, output
, output
, failure
->label());
2831 bool CacheIRCompiler::emitLoadProto(ObjOperandId objId
, ObjOperandId resultId
) {
2832 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2833 Register obj
= allocator
.useRegister(masm
, objId
);
2834 Register reg
= allocator
.defineRegister(masm
, resultId
);
2835 masm
.loadObjProto(obj
, reg
);
2838 // We shouldn't encounter a null or lazy proto.
2839 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto
) == 1);
2842 masm
.branchPtr(Assembler::Above
, reg
, ImmWord(1), &done
);
2843 masm
.assumeUnreachable("Unexpected null or lazy proto in CacheIR LoadProto");
2849 bool CacheIRCompiler::emitLoadEnclosingEnvironment(ObjOperandId objId
,
2850 ObjOperandId resultId
) {
2851 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2852 Register obj
= allocator
.useRegister(masm
, objId
);
2853 Register reg
= allocator
.defineRegister(masm
, resultId
);
2855 Address(obj
, EnvironmentObject::offsetOfEnclosingEnvironment()), reg
);
2859 bool CacheIRCompiler::emitLoadWrapperTarget(ObjOperandId objId
,
2860 ObjOperandId resultId
) {
2861 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2862 Register obj
= allocator
.useRegister(masm
, objId
);
2863 Register reg
= allocator
.defineRegister(masm
, resultId
);
2865 masm
.loadPtr(Address(obj
, ProxyObject::offsetOfReservedSlots()), reg
);
2867 Address(reg
, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()), reg
);
2871 bool CacheIRCompiler::emitLoadValueTag(ValOperandId valId
,
2872 ValueTagOperandId resultId
) {
2873 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2874 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
2875 Register res
= allocator
.defineRegister(masm
, resultId
);
2877 Register tag
= masm
.extractTag(val
, res
);
2884 bool CacheIRCompiler::emitLoadDOMExpandoValue(ObjOperandId objId
,
2885 ValOperandId resultId
) {
2886 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2887 Register obj
= allocator
.useRegister(masm
, objId
);
2888 ValueOperand val
= allocator
.defineValueRegister(masm
, resultId
);
2890 masm
.loadPtr(Address(obj
, ProxyObject::offsetOfReservedSlots()),
2892 masm
.loadValue(Address(val
.scratchReg(),
2893 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
2898 bool CacheIRCompiler::emitLoadDOMExpandoValueIgnoreGeneration(
2899 ObjOperandId objId
, ValOperandId resultId
) {
2900 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2901 Register obj
= allocator
.useRegister(masm
, objId
);
2902 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
2904 // Determine the expando's Address.
2905 Register scratch
= output
.scratchReg();
2906 masm
.loadPtr(Address(obj
, ProxyObject::offsetOfReservedSlots()), scratch
);
2907 Address
expandoAddr(scratch
,
2908 js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
2911 // Private values are stored as doubles, so assert we have a double.
2913 masm
.branchTestDouble(Assembler::Equal
, expandoAddr
, &ok
);
2914 masm
.assumeUnreachable("DOM expando is not a PrivateValue!");
2918 // Load the ExpandoAndGeneration* from the PrivateValue.
2919 masm
.loadPrivate(expandoAddr
, scratch
);
2921 // Load expandoAndGeneration->expando into the output Value register.
2922 masm
.loadValue(Address(scratch
, ExpandoAndGeneration::offsetOfExpando()),
2927 bool CacheIRCompiler::emitLoadUndefinedResult() {
2928 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2929 AutoOutputRegister
output(*this);
2930 masm
.moveValue(UndefinedValue(), output
.valueReg());
2934 static void EmitStoreBoolean(MacroAssembler
& masm
, bool b
,
2935 const AutoOutputRegister
& output
) {
2936 if (output
.hasValue()) {
2937 Value val
= BooleanValue(b
);
2938 masm
.moveValue(val
, output
.valueReg());
2940 MOZ_ASSERT(output
.type() == JSVAL_TYPE_BOOLEAN
);
2941 masm
.movePtr(ImmWord(b
), output
.typedReg().gpr());
2945 bool CacheIRCompiler::emitLoadBooleanResult(bool val
) {
2946 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2947 AutoOutputRegister
output(*this);
2948 EmitStoreBoolean(masm
, val
, output
);
2952 bool CacheIRCompiler::emitLoadOperandResult(ValOperandId inputId
) {
2953 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2954 AutoOutputRegister
output(*this);
2955 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
2956 masm
.moveValue(input
, output
.valueReg());
2960 static void EmitStoreResult(MacroAssembler
& masm
, Register reg
,
2962 const AutoOutputRegister
& output
) {
2963 if (output
.hasValue()) {
2964 masm
.tagValue(type
, reg
, output
.valueReg());
2967 if (type
== JSVAL_TYPE_INT32
&& output
.typedReg().isFloat()) {
2968 masm
.convertInt32ToDouble(reg
, output
.typedReg().fpu());
2971 if (type
== output
.type()) {
2972 masm
.mov(reg
, output
.typedReg().gpr());
2975 masm
.assumeUnreachable("Should have monitored result");
2978 bool CacheIRCompiler::emitLoadInt32ArrayLengthResult(ObjOperandId objId
) {
2979 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2980 AutoOutputRegister
output(*this);
2981 Register obj
= allocator
.useRegister(masm
, objId
);
2982 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
2984 FailurePath
* failure
;
2985 if (!addFailurePath(&failure
)) {
2989 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
2990 masm
.load32(Address(scratch
, ObjectElements::offsetOfLength()), scratch
);
2992 // Guard length fits in an int32.
2993 masm
.branchTest32(Assembler::Signed
, scratch
, scratch
, failure
->label());
2994 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
2998 bool CacheIRCompiler::emitLoadInt32ArrayLength(ObjOperandId objId
,
2999 Int32OperandId resultId
) {
3000 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3001 Register obj
= allocator
.useRegister(masm
, objId
);
3002 Register res
= allocator
.defineRegister(masm
, resultId
);
3004 FailurePath
* failure
;
3005 if (!addFailurePath(&failure
)) {
3009 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), res
);
3010 masm
.load32(Address(res
, ObjectElements::offsetOfLength()), res
);
3012 // Guard length fits in an int32.
3013 masm
.branchTest32(Assembler::Signed
, res
, res
, failure
->label());
3017 bool CacheIRCompiler::emitDoubleAddResult(NumberOperandId lhsId
,
3018 NumberOperandId rhsId
) {
3019 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3020 AutoOutputRegister
output(*this);
3022 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
3023 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
3025 allocator
.ensureDoubleRegister(masm
, lhsId
, floatScratch0
);
3026 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch1
);
3028 masm
.addDouble(floatScratch1
, floatScratch0
);
3029 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
3033 bool CacheIRCompiler::emitDoubleSubResult(NumberOperandId lhsId
,
3034 NumberOperandId rhsId
) {
3035 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3036 AutoOutputRegister
output(*this);
3038 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
3039 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
3041 allocator
.ensureDoubleRegister(masm
, lhsId
, floatScratch0
);
3042 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch1
);
3044 masm
.subDouble(floatScratch1
, floatScratch0
);
3045 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
3049 bool CacheIRCompiler::emitDoubleMulResult(NumberOperandId lhsId
,
3050 NumberOperandId rhsId
) {
3051 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3052 AutoOutputRegister
output(*this);
3054 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
3055 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
3057 allocator
.ensureDoubleRegister(masm
, lhsId
, floatScratch0
);
3058 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch1
);
3060 masm
.mulDouble(floatScratch1
, floatScratch0
);
3061 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
3065 bool CacheIRCompiler::emitDoubleDivResult(NumberOperandId lhsId
,
3066 NumberOperandId rhsId
) {
3067 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3068 AutoOutputRegister
output(*this);
3070 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
3071 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
3073 allocator
.ensureDoubleRegister(masm
, lhsId
, floatScratch0
);
3074 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch1
);
3076 masm
.divDouble(floatScratch1
, floatScratch0
);
3077 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
3081 bool CacheIRCompiler::emitDoubleModResult(NumberOperandId lhsId
,
3082 NumberOperandId rhsId
) {
3083 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3084 AutoOutputRegister
output(*this);
3085 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3087 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
3088 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
3090 allocator
.ensureDoubleRegister(masm
, lhsId
, floatScratch0
);
3091 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch1
);
3093 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3094 masm
.PushRegsInMask(save
);
3096 using Fn
= double (*)(double a
, double b
);
3097 masm
.setupUnalignedABICall(scratch
);
3098 masm
.passABIArg(floatScratch0
, ABIType::Float64
);
3099 masm
.passABIArg(floatScratch1
, ABIType::Float64
);
3100 masm
.callWithABI
<Fn
, js::NumberMod
>(ABIType::Float64
);
3101 masm
.storeCallFloatResult(floatScratch0
);
3103 LiveRegisterSet ignore
;
3104 ignore
.add(floatScratch0
);
3105 masm
.PopRegsInMaskIgnore(save
, ignore
);
3107 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
3111 bool CacheIRCompiler::emitDoublePowResult(NumberOperandId lhsId
,
3112 NumberOperandId rhsId
) {
3113 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3114 AutoOutputRegister
output(*this);
3115 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3117 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
3118 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
3120 allocator
.ensureDoubleRegister(masm
, lhsId
, floatScratch0
);
3121 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch1
);
3123 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3124 masm
.PushRegsInMask(save
);
3126 using Fn
= double (*)(double x
, double y
);
3127 masm
.setupUnalignedABICall(scratch
);
3128 masm
.passABIArg(floatScratch0
, ABIType::Float64
);
3129 masm
.passABIArg(floatScratch1
, ABIType::Float64
);
3130 masm
.callWithABI
<Fn
, js::ecmaPow
>(ABIType::Float64
);
3131 masm
.storeCallFloatResult(floatScratch0
);
3133 LiveRegisterSet ignore
;
3134 ignore
.add(floatScratch0
);
3135 masm
.PopRegsInMaskIgnore(save
, ignore
);
3137 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
3142 bool CacheIRCompiler::emitInt32AddResult(Int32OperandId lhsId
,
3143 Int32OperandId rhsId
) {
3144 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3145 AutoOutputRegister
output(*this);
3146 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3148 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3149 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3151 FailurePath
* failure
;
3152 if (!addFailurePath(&failure
)) {
3156 masm
.mov(rhs
, scratch
);
3157 masm
.branchAdd32(Assembler::Overflow
, lhs
, scratch
, failure
->label());
3158 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3162 bool CacheIRCompiler::emitInt32SubResult(Int32OperandId lhsId
,
3163 Int32OperandId rhsId
) {
3164 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3165 AutoOutputRegister
output(*this);
3166 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3167 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3168 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3170 FailurePath
* failure
;
3171 if (!addFailurePath(&failure
)) {
3175 masm
.mov(lhs
, scratch
);
3176 masm
.branchSub32(Assembler::Overflow
, rhs
, scratch
, failure
->label());
3177 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3182 bool CacheIRCompiler::emitInt32MulResult(Int32OperandId lhsId
,
3183 Int32OperandId rhsId
) {
3184 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3185 AutoOutputRegister
output(*this);
3186 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3187 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3188 AutoScratchRegister
scratch(allocator
, masm
);
3189 AutoScratchRegisterMaybeOutput
scratch2(allocator
, masm
, output
);
3191 FailurePath
* failure
;
3192 if (!addFailurePath(&failure
)) {
3196 Label maybeNegZero
, done
;
3197 masm
.mov(lhs
, scratch
);
3198 masm
.branchMul32(Assembler::Overflow
, rhs
, scratch
, failure
->label());
3199 masm
.branchTest32(Assembler::Zero
, scratch
, scratch
, &maybeNegZero
);
3202 masm
.bind(&maybeNegZero
);
3203 masm
.mov(lhs
, scratch2
);
3204 // Result is -0 if exactly one of lhs or rhs is negative.
3205 masm
.or32(rhs
, scratch2
);
3206 masm
.branchTest32(Assembler::Signed
, scratch2
, scratch2
, failure
->label());
3209 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3213 bool CacheIRCompiler::emitInt32DivResult(Int32OperandId lhsId
,
3214 Int32OperandId rhsId
) {
3215 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3216 AutoOutputRegister
output(*this);
3217 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3218 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3219 AutoScratchRegister
rem(allocator
, masm
);
3220 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3222 FailurePath
* failure
;
3223 if (!addFailurePath(&failure
)) {
3227 // Prevent division by 0.
3228 masm
.branchTest32(Assembler::Zero
, rhs
, rhs
, failure
->label());
3230 // Prevent -2147483648 / -1.
3232 masm
.branch32(Assembler::NotEqual
, lhs
, Imm32(INT32_MIN
), ¬Overflow
);
3233 masm
.branch32(Assembler::Equal
, rhs
, Imm32(-1), failure
->label());
3234 masm
.bind(¬Overflow
);
3236 // Prevent negative 0.
3238 masm
.branchTest32(Assembler::NonZero
, lhs
, lhs
, ¬Zero
);
3239 masm
.branchTest32(Assembler::Signed
, rhs
, rhs
, failure
->label());
3240 masm
.bind(¬Zero
);
3242 masm
.mov(lhs
, scratch
);
3243 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
3244 liveVolatileFloatRegs());
3245 masm
.flexibleDivMod32(rhs
, scratch
, rem
, false, volatileRegs
);
3247 // A remainder implies a double result.
3248 masm
.branchTest32(Assembler::NonZero
, rem
, rem
, failure
->label());
3249 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3253 bool CacheIRCompiler::emitInt32ModResult(Int32OperandId lhsId
,
3254 Int32OperandId rhsId
) {
3255 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3256 AutoOutputRegister
output(*this);
3257 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3258 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3259 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3261 FailurePath
* failure
;
3262 if (!addFailurePath(&failure
)) {
3266 // x % 0 results in NaN
3267 masm
.branchTest32(Assembler::Zero
, rhs
, rhs
, failure
->label());
3269 // Prevent -2147483648 % -1.
3271 // Traps on x86 and has undefined behavior on ARM32 (when __aeabi_idivmod is
3274 masm
.branch32(Assembler::NotEqual
, lhs
, Imm32(INT32_MIN
), ¬Overflow
);
3275 masm
.branch32(Assembler::Equal
, rhs
, Imm32(-1), failure
->label());
3276 masm
.bind(¬Overflow
);
3278 masm
.mov(lhs
, scratch
);
3279 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
3280 liveVolatileFloatRegs());
3281 masm
.flexibleRemainder32(rhs
, scratch
, false, volatileRegs
);
3283 // Modulo takes the sign of the dividend; we can't return negative zero here.
3285 masm
.branchTest32(Assembler::NonZero
, scratch
, scratch
, ¬Zero
);
3286 masm
.branchTest32(Assembler::Signed
, lhs
, lhs
, failure
->label());
3287 masm
.bind(¬Zero
);
3289 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3294 bool CacheIRCompiler::emitInt32PowResult(Int32OperandId lhsId
,
3295 Int32OperandId rhsId
) {
3296 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3297 AutoOutputRegister
output(*this);
3298 Register base
= allocator
.useRegister(masm
, lhsId
);
3299 Register power
= allocator
.useRegister(masm
, rhsId
);
3300 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
3301 AutoScratchRegisterMaybeOutputType
scratch2(allocator
, masm
, output
);
3302 AutoScratchRegister
scratch3(allocator
, masm
);
3304 FailurePath
* failure
;
3305 if (!addFailurePath(&failure
)) {
3309 masm
.pow32(base
, power
, scratch1
, scratch2
, scratch3
, failure
->label());
3311 masm
.tagValue(JSVAL_TYPE_INT32
, scratch1
, output
.valueReg());
3315 bool CacheIRCompiler::emitInt32BitOrResult(Int32OperandId lhsId
,
3316 Int32OperandId rhsId
) {
3317 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3318 AutoOutputRegister
output(*this);
3319 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3321 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3322 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3324 masm
.mov(rhs
, scratch
);
3325 masm
.or32(lhs
, scratch
);
3326 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3330 bool CacheIRCompiler::emitInt32BitXorResult(Int32OperandId lhsId
,
3331 Int32OperandId rhsId
) {
3332 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3333 AutoOutputRegister
output(*this);
3334 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3336 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3337 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3339 masm
.mov(rhs
, scratch
);
3340 masm
.xor32(lhs
, scratch
);
3341 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3345 bool CacheIRCompiler::emitInt32BitAndResult(Int32OperandId lhsId
,
3346 Int32OperandId rhsId
) {
3347 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3348 AutoOutputRegister
output(*this);
3349 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3351 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3352 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3354 masm
.mov(rhs
, scratch
);
3355 masm
.and32(lhs
, scratch
);
3356 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3360 bool CacheIRCompiler::emitInt32LeftShiftResult(Int32OperandId lhsId
,
3361 Int32OperandId rhsId
) {
3362 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3363 AutoOutputRegister
output(*this);
3364 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3365 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3366 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3368 masm
.mov(lhs
, scratch
);
3369 masm
.flexibleLshift32(rhs
, scratch
);
3370 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3375 bool CacheIRCompiler::emitInt32RightShiftResult(Int32OperandId lhsId
,
3376 Int32OperandId rhsId
) {
3377 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3378 AutoOutputRegister
output(*this);
3379 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3380 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3381 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3383 masm
.mov(lhs
, scratch
);
3384 masm
.flexibleRshift32Arithmetic(rhs
, scratch
);
3385 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3390 bool CacheIRCompiler::emitInt32URightShiftResult(Int32OperandId lhsId
,
3391 Int32OperandId rhsId
,
3393 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3394 AutoOutputRegister
output(*this);
3396 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3397 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3398 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3400 FailurePath
* failure
;
3401 if (!addFailurePath(&failure
)) {
3405 masm
.mov(lhs
, scratch
);
3406 masm
.flexibleRshift32(rhs
, scratch
);
3408 ScratchDoubleScope
fpscratch(masm
);
3409 masm
.convertUInt32ToDouble(scratch
, fpscratch
);
3410 masm
.boxDouble(fpscratch
, output
.valueReg(), fpscratch
);
3412 masm
.branchTest32(Assembler::Signed
, scratch
, scratch
, failure
->label());
3413 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3418 bool CacheIRCompiler::emitInt32NegationResult(Int32OperandId inputId
) {
3419 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3420 AutoOutputRegister
output(*this);
3421 Register val
= allocator
.useRegister(masm
, inputId
);
3422 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3424 FailurePath
* failure
;
3425 if (!addFailurePath(&failure
)) {
3429 // Guard against 0 and MIN_INT by checking if low 31-bits are all zero.
3430 // Both of these result in a double.
3431 masm
.branchTest32(Assembler::Zero
, val
, Imm32(0x7fffffff), failure
->label());
3432 masm
.mov(val
, scratch
);
3433 masm
.neg32(scratch
);
3434 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3438 bool CacheIRCompiler::emitInt32IncResult(Int32OperandId inputId
) {
3439 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3440 AutoOutputRegister
output(*this);
3441 Register input
= allocator
.useRegister(masm
, inputId
);
3442 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3444 FailurePath
* failure
;
3445 if (!addFailurePath(&failure
)) {
3449 masm
.mov(input
, scratch
);
3450 masm
.branchAdd32(Assembler::Overflow
, Imm32(1), scratch
, failure
->label());
3451 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3456 bool CacheIRCompiler::emitInt32DecResult(Int32OperandId inputId
) {
3457 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3458 AutoOutputRegister
output(*this);
3459 Register input
= allocator
.useRegister(masm
, inputId
);
3460 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3462 FailurePath
* failure
;
3463 if (!addFailurePath(&failure
)) {
3467 masm
.mov(input
, scratch
);
3468 masm
.branchSub32(Assembler::Overflow
, Imm32(1), scratch
, failure
->label());
3469 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3474 bool CacheIRCompiler::emitInt32NotResult(Int32OperandId inputId
) {
3475 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3476 AutoOutputRegister
output(*this);
3477 Register val
= allocator
.useRegister(masm
, inputId
);
3478 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3480 masm
.mov(val
, scratch
);
3481 masm
.not32(scratch
);
3482 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3486 bool CacheIRCompiler::emitDoubleNegationResult(NumberOperandId inputId
) {
3487 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3488 AutoOutputRegister
output(*this);
3490 AutoScratchFloatRegister
floatReg(this);
3492 allocator
.ensureDoubleRegister(masm
, inputId
, floatReg
);
3494 masm
.negateDouble(floatReg
);
3495 masm
.boxDouble(floatReg
, output
.valueReg(), floatReg
);
3500 bool CacheIRCompiler::emitDoubleIncDecResult(bool isInc
,
3501 NumberOperandId inputId
) {
3502 AutoOutputRegister
output(*this);
3504 AutoScratchFloatRegister
floatReg(this);
3506 allocator
.ensureDoubleRegister(masm
, inputId
, floatReg
);
3509 ScratchDoubleScope
fpscratch(masm
);
3510 masm
.loadConstantDouble(1.0, fpscratch
);
3512 masm
.addDouble(fpscratch
, floatReg
);
3514 masm
.subDouble(fpscratch
, floatReg
);
3517 masm
.boxDouble(floatReg
, output
.valueReg(), floatReg
);
3522 bool CacheIRCompiler::emitDoubleIncResult(NumberOperandId inputId
) {
3523 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3524 return emitDoubleIncDecResult(true, inputId
);
3527 bool CacheIRCompiler::emitDoubleDecResult(NumberOperandId inputId
) {
3528 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3529 return emitDoubleIncDecResult(false, inputId
);
3532 template <typename Fn
, Fn fn
>
3533 bool CacheIRCompiler::emitBigIntBinaryOperationShared(BigIntOperandId lhsId
,
3534 BigIntOperandId rhsId
) {
3535 AutoCallVM
callvm(masm
, this, allocator
);
3536 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3537 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3544 callvm
.call
<Fn
, fn
>();
3548 bool CacheIRCompiler::emitBigIntAddResult(BigIntOperandId lhsId
,
3549 BigIntOperandId rhsId
) {
3550 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3551 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3552 return emitBigIntBinaryOperationShared
<Fn
, BigInt::add
>(lhsId
, rhsId
);
3555 bool CacheIRCompiler::emitBigIntSubResult(BigIntOperandId lhsId
,
3556 BigIntOperandId rhsId
) {
3557 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3558 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3559 return emitBigIntBinaryOperationShared
<Fn
, BigInt::sub
>(lhsId
, rhsId
);
3562 bool CacheIRCompiler::emitBigIntMulResult(BigIntOperandId lhsId
,
3563 BigIntOperandId rhsId
) {
3564 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3565 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3566 return emitBigIntBinaryOperationShared
<Fn
, BigInt::mul
>(lhsId
, rhsId
);
3569 bool CacheIRCompiler::emitBigIntDivResult(BigIntOperandId lhsId
,
3570 BigIntOperandId rhsId
) {
3571 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3572 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3573 return emitBigIntBinaryOperationShared
<Fn
, BigInt::div
>(lhsId
, rhsId
);
3576 bool CacheIRCompiler::emitBigIntModResult(BigIntOperandId lhsId
,
3577 BigIntOperandId rhsId
) {
3578 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3579 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3580 return emitBigIntBinaryOperationShared
<Fn
, BigInt::mod
>(lhsId
, rhsId
);
3583 bool CacheIRCompiler::emitBigIntPowResult(BigIntOperandId lhsId
,
3584 BigIntOperandId rhsId
) {
3585 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3586 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3587 return emitBigIntBinaryOperationShared
<Fn
, BigInt::pow
>(lhsId
, rhsId
);
3590 bool CacheIRCompiler::emitBigIntBitAndResult(BigIntOperandId lhsId
,
3591 BigIntOperandId rhsId
) {
3592 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3593 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3594 return emitBigIntBinaryOperationShared
<Fn
, BigInt::bitAnd
>(lhsId
, rhsId
);
3597 bool CacheIRCompiler::emitBigIntBitOrResult(BigIntOperandId lhsId
,
3598 BigIntOperandId rhsId
) {
3599 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3600 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3601 return emitBigIntBinaryOperationShared
<Fn
, BigInt::bitOr
>(lhsId
, rhsId
);
3604 bool CacheIRCompiler::emitBigIntBitXorResult(BigIntOperandId lhsId
,
3605 BigIntOperandId rhsId
) {
3606 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3607 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3608 return emitBigIntBinaryOperationShared
<Fn
, BigInt::bitXor
>(lhsId
, rhsId
);
3611 bool CacheIRCompiler::emitBigIntLeftShiftResult(BigIntOperandId lhsId
,
3612 BigIntOperandId rhsId
) {
3613 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3614 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3615 return emitBigIntBinaryOperationShared
<Fn
, BigInt::lsh
>(lhsId
, rhsId
);
3618 bool CacheIRCompiler::emitBigIntRightShiftResult(BigIntOperandId lhsId
,
3619 BigIntOperandId rhsId
) {
3620 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3621 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3622 return emitBigIntBinaryOperationShared
<Fn
, BigInt::rsh
>(lhsId
, rhsId
);
3625 template <typename Fn
, Fn fn
>
3626 bool CacheIRCompiler::emitBigIntUnaryOperationShared(BigIntOperandId inputId
) {
3627 AutoCallVM
callvm(masm
, this, allocator
);
3628 Register val
= allocator
.useRegister(masm
, inputId
);
3634 callvm
.call
<Fn
, fn
>();
3638 bool CacheIRCompiler::emitBigIntNotResult(BigIntOperandId inputId
) {
3639 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3640 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
);
3641 return emitBigIntUnaryOperationShared
<Fn
, BigInt::bitNot
>(inputId
);
3644 bool CacheIRCompiler::emitBigIntNegationResult(BigIntOperandId inputId
) {
3645 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3646 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
);
3647 return emitBigIntUnaryOperationShared
<Fn
, BigInt::neg
>(inputId
);
3650 bool CacheIRCompiler::emitBigIntIncResult(BigIntOperandId inputId
) {
3651 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3652 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
);
3653 return emitBigIntUnaryOperationShared
<Fn
, BigInt::inc
>(inputId
);
3656 bool CacheIRCompiler::emitBigIntDecResult(BigIntOperandId inputId
) {
3657 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3658 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
);
3659 return emitBigIntUnaryOperationShared
<Fn
, BigInt::dec
>(inputId
);
3662 bool CacheIRCompiler::emitTruncateDoubleToUInt32(NumberOperandId inputId
,
3663 Int32OperandId resultId
) {
3664 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3665 Register res
= allocator
.defineRegister(masm
, resultId
);
3667 AutoScratchFloatRegister
floatReg(this);
3669 allocator
.ensureDoubleRegister(masm
, inputId
, floatReg
);
3671 Label done
, truncateABICall
;
3673 masm
.branchTruncateDoubleMaybeModUint32(floatReg
, res
, &truncateABICall
);
3676 masm
.bind(&truncateABICall
);
3677 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3678 save
.takeUnchecked(floatReg
);
3680 save
.takeUnchecked(floatReg
.get().asSingle());
3681 masm
.PushRegsInMask(save
);
3683 using Fn
= int32_t (*)(double);
3684 masm
.setupUnalignedABICall(res
);
3685 masm
.passABIArg(floatReg
, ABIType::Float64
);
3686 masm
.callWithABI
<Fn
, JS::ToInt32
>(ABIType::General
,
3687 CheckUnsafeCallWithABI::DontCheckOther
);
3688 masm
.storeCallInt32Result(res
);
3690 LiveRegisterSet ignore
;
3692 masm
.PopRegsInMaskIgnore(save
, ignore
);
3698 bool CacheIRCompiler::emitDoubleToUint8Clamped(NumberOperandId inputId
,
3699 Int32OperandId resultId
) {
3700 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3701 Register res
= allocator
.defineRegister(masm
, resultId
);
3703 AutoScratchFloatRegister
floatReg(this);
3705 allocator
.ensureDoubleRegister(masm
, inputId
, floatReg
);
3707 masm
.clampDoubleToUint8(floatReg
, res
);
3711 bool CacheIRCompiler::emitLoadArgumentsObjectLengthResult(ObjOperandId objId
) {
3712 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3713 AutoOutputRegister
output(*this);
3714 Register obj
= allocator
.useRegister(masm
, objId
);
3715 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3717 FailurePath
* failure
;
3718 if (!addFailurePath(&failure
)) {
3722 masm
.loadArgumentsObjectLength(obj
, scratch
, failure
->label());
3724 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3728 bool CacheIRCompiler::emitLoadArgumentsObjectLength(ObjOperandId objId
,
3729 Int32OperandId resultId
) {
3730 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3731 Register obj
= allocator
.useRegister(masm
, objId
);
3732 Register res
= allocator
.defineRegister(masm
, resultId
);
3734 FailurePath
* failure
;
3735 if (!addFailurePath(&failure
)) {
3739 masm
.loadArgumentsObjectLength(obj
, res
, failure
->label());
3743 bool CacheIRCompiler::emitLoadArrayBufferByteLengthInt32Result(
3744 ObjOperandId objId
) {
3745 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3746 AutoOutputRegister
output(*this);
3747 Register obj
= allocator
.useRegister(masm
, objId
);
3748 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3750 FailurePath
* failure
;
3751 if (!addFailurePath(&failure
)) {
3755 masm
.loadArrayBufferByteLengthIntPtr(obj
, scratch
);
3756 masm
.guardNonNegativeIntPtrToInt32(scratch
, failure
->label());
3757 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3761 bool CacheIRCompiler::emitLoadArrayBufferByteLengthDoubleResult(
3762 ObjOperandId objId
) {
3763 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3764 AutoOutputRegister
output(*this);
3765 Register obj
= allocator
.useRegister(masm
, objId
);
3766 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3768 ScratchDoubleScope
fpscratch(masm
);
3769 masm
.loadArrayBufferByteLengthIntPtr(obj
, scratch
);
3770 masm
.convertIntPtrToDouble(scratch
, fpscratch
);
3771 masm
.boxDouble(fpscratch
, output
.valueReg(), fpscratch
);
3775 bool CacheIRCompiler::emitLoadArrayBufferViewLengthInt32Result(
3776 ObjOperandId objId
) {
3777 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3778 AutoOutputRegister
output(*this);
3779 Register obj
= allocator
.useRegister(masm
, objId
);
3780 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3782 FailurePath
* failure
;
3783 if (!addFailurePath(&failure
)) {
3787 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
3788 masm
.guardNonNegativeIntPtrToInt32(scratch
, failure
->label());
3789 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3793 bool CacheIRCompiler::emitLoadArrayBufferViewLengthDoubleResult(
3794 ObjOperandId objId
) {
3795 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3796 AutoOutputRegister
output(*this);
3797 Register obj
= allocator
.useRegister(masm
, objId
);
3798 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3800 ScratchDoubleScope
fpscratch(masm
);
3801 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
3802 masm
.convertIntPtrToDouble(scratch
, fpscratch
);
3803 masm
.boxDouble(fpscratch
, output
.valueReg(), fpscratch
);
3807 bool CacheIRCompiler::emitLoadBoundFunctionNumArgs(ObjOperandId objId
,
3808 Int32OperandId resultId
) {
3809 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3811 Register obj
= allocator
.useRegister(masm
, objId
);
3812 Register output
= allocator
.defineRegister(masm
, resultId
);
3814 masm
.unboxInt32(Address(obj
, BoundFunctionObject::offsetOfFlagsSlot()),
3816 masm
.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift
), output
);
3820 bool CacheIRCompiler::emitLoadBoundFunctionTarget(ObjOperandId objId
,
3821 ObjOperandId resultId
) {
3822 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3824 Register obj
= allocator
.useRegister(masm
, objId
);
3825 Register output
= allocator
.defineRegister(masm
, resultId
);
3827 masm
.unboxObject(Address(obj
, BoundFunctionObject::offsetOfTargetSlot()),
3832 bool CacheIRCompiler::emitGuardBoundFunctionIsConstructor(ObjOperandId objId
) {
3833 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3835 Register obj
= allocator
.useRegister(masm
, objId
);
3837 FailurePath
* failure
;
3838 if (!addFailurePath(&failure
)) {
3842 Address
flagsSlot(obj
, BoundFunctionObject::offsetOfFlagsSlot());
3843 masm
.branchTest32(Assembler::Zero
, flagsSlot
,
3844 Imm32(BoundFunctionObject::IsConstructorFlag
),
3849 bool CacheIRCompiler::emitGuardObjectIdentity(ObjOperandId obj1Id
,
3850 ObjOperandId obj2Id
) {
3851 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3853 Register obj1
= allocator
.useRegister(masm
, obj1Id
);
3854 Register obj2
= allocator
.useRegister(masm
, obj2Id
);
3856 FailurePath
* failure
;
3857 if (!addFailurePath(&failure
)) {
3861 masm
.branchPtr(Assembler::NotEqual
, obj1
, obj2
, failure
->label());
3865 bool CacheIRCompiler::emitLoadFunctionLengthResult(ObjOperandId objId
) {
3866 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3867 AutoOutputRegister
output(*this);
3868 Register obj
= allocator
.useRegister(masm
, objId
);
3869 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3871 FailurePath
* failure
;
3872 if (!addFailurePath(&failure
)) {
3876 // Get the JSFunction flags and arg count.
3877 masm
.load32(Address(obj
, JSFunction::offsetOfFlagsAndArgCount()), scratch
);
3879 // Functions with a SelfHostedLazyScript must be compiled with the slow-path
3880 // before the function length is known. If the length was previously resolved,
3881 // the length property may be shadowed.
3883 Assembler::NonZero
, scratch
,
3884 Imm32(FunctionFlags::SELFHOSTLAZY
| FunctionFlags::RESOLVED_LENGTH
),
3887 masm
.loadFunctionLength(obj
, scratch
, scratch
, failure
->label());
3888 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3892 bool CacheIRCompiler::emitLoadFunctionNameResult(ObjOperandId objId
) {
3893 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3894 AutoOutputRegister
output(*this);
3895 Register obj
= allocator
.useRegister(masm
, objId
);
3896 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3898 FailurePath
* failure
;
3899 if (!addFailurePath(&failure
)) {
3903 masm
.loadFunctionName(obj
, scratch
, ImmGCPtr(cx_
->names().empty_
),
3906 masm
.tagValue(JSVAL_TYPE_STRING
, scratch
, output
.valueReg());
3910 bool CacheIRCompiler::emitLinearizeForCharAccess(StringOperandId strId
,
3911 Int32OperandId indexId
,
3912 StringOperandId resultId
) {
3913 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3914 Register str
= allocator
.useRegister(masm
, strId
);
3915 Register index
= allocator
.useRegister(masm
, indexId
);
3916 Register result
= allocator
.defineRegister(masm
, resultId
);
3917 AutoScratchRegister
scratch(allocator
, masm
);
3919 FailurePath
* failure
;
3920 if (!addFailurePath(&failure
)) {
3925 masm
.movePtr(str
, result
);
3927 // We can omit the bounds check, because we only compare the index against the
3928 // string length. In the worst case we unnecessarily linearize the string
3929 // when the index is out-of-bounds.
3931 masm
.branchIfCanLoadStringChar(str
, index
, scratch
, &done
);
3933 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
3934 liveVolatileFloatRegs());
3935 masm
.PushRegsInMask(volatileRegs
);
3937 using Fn
= JSLinearString
* (*)(JSString
*);
3938 masm
.setupUnalignedABICall(scratch
);
3939 masm
.passABIArg(str
);
3940 masm
.callWithABI
<Fn
, js::jit::LinearizeForCharAccessPure
>();
3941 masm
.storeCallPointerResult(result
);
3943 LiveRegisterSet ignore
;
3945 masm
.PopRegsInMaskIgnore(volatileRegs
, ignore
);
3947 masm
.branchTestPtr(Assembler::Zero
, result
, result
, failure
->label());
3954 bool CacheIRCompiler::emitLinearizeForCodePointAccess(
3955 StringOperandId strId
, Int32OperandId indexId
, StringOperandId resultId
) {
3956 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3957 Register str
= allocator
.useRegister(masm
, strId
);
3958 Register index
= allocator
.useRegister(masm
, indexId
);
3959 Register result
= allocator
.defineRegister(masm
, resultId
);
3960 AutoScratchRegister
scratch1(allocator
, masm
);
3961 AutoScratchRegister
scratch2(allocator
, masm
);
3963 FailurePath
* failure
;
3964 if (!addFailurePath(&failure
)) {
3969 masm
.movePtr(str
, result
);
3971 // We can omit the bounds check, because we only compare the index against the
3972 // string length. In the worst case we unnecessarily linearize the string
3973 // when the index is out-of-bounds.
3975 masm
.branchIfCanLoadStringCodePoint(str
, index
, scratch1
, scratch2
, &done
);
3977 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
3978 liveVolatileFloatRegs());
3979 masm
.PushRegsInMask(volatileRegs
);
3981 using Fn
= JSLinearString
* (*)(JSString
*);
3982 masm
.setupUnalignedABICall(scratch1
);
3983 masm
.passABIArg(str
);
3984 masm
.callWithABI
<Fn
, js::jit::LinearizeForCharAccessPure
>();
3985 masm
.storeCallPointerResult(result
);
3987 LiveRegisterSet ignore
;
3989 masm
.PopRegsInMaskIgnore(volatileRegs
, ignore
);
3991 masm
.branchTestPtr(Assembler::Zero
, result
, result
, failure
->label());
3998 bool CacheIRCompiler::emitToRelativeStringIndex(Int32OperandId indexId
,
3999 StringOperandId strId
,
4000 Int32OperandId resultId
) {
4001 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4002 Register index
= allocator
.useRegister(masm
, indexId
);
4003 Register str
= allocator
.useRegister(masm
, strId
);
4004 Register result
= allocator
.defineRegister(masm
, resultId
);
4006 // If |index| is non-negative, it's an index relative to the start of the
4007 // string. Otherwise it's an index relative to the end of the string.
4008 masm
.move32(Imm32(0), result
);
4009 masm
.cmp32Load32(Assembler::LessThan
, index
, Imm32(0),
4010 Address(str
, JSString::offsetOfLength()), result
);
4011 masm
.add32(index
, result
);
4015 bool CacheIRCompiler::emitLoadStringLengthResult(StringOperandId strId
) {
4016 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4017 AutoOutputRegister
output(*this);
4018 Register str
= allocator
.useRegister(masm
, strId
);
4019 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4021 masm
.loadStringLength(str
, scratch
);
4022 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
4026 bool CacheIRCompiler::emitLoadStringCharCodeResult(StringOperandId strId
,
4027 Int32OperandId indexId
,
4029 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4030 AutoOutputRegister
output(*this);
4031 Register str
= allocator
.useRegister(masm
, strId
);
4032 Register index
= allocator
.useRegister(masm
, indexId
);
4033 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
4034 AutoScratchRegisterMaybeOutputType
scratch2(allocator
, masm
, output
);
4035 AutoScratchRegister
scratch3(allocator
, masm
);
4037 // Bounds check, load string char.
4040 FailurePath
* failure
;
4041 if (!addFailurePath(&failure
)) {
4045 masm
.spectreBoundsCheck32(index
, Address(str
, JSString::offsetOfLength()),
4046 scratch1
, failure
->label());
4047 masm
.loadStringChar(str
, index
, scratch1
, scratch2
, scratch3
,
4050 // Return NaN for out-of-bounds access.
4051 masm
.moveValue(JS::NaNValue(), output
.valueReg());
4053 // The bounds check mustn't use a scratch register which aliases the output.
4054 MOZ_ASSERT(!output
.valueReg().aliases(scratch3
));
4056 // This CacheIR op is always preceded by |LinearizeForCharAccess|, so we're
4057 // guaranteed to see no nested ropes.
4059 masm
.spectreBoundsCheck32(index
, Address(str
, JSString::offsetOfLength()),
4061 masm
.loadStringChar(str
, index
, scratch1
, scratch2
, scratch3
, &loadFailed
);
4064 masm
.jump(&loadedChar
);
4065 masm
.bind(&loadFailed
);
4066 masm
.assumeUnreachable("loadStringChar can't fail for linear strings");
4067 masm
.bind(&loadedChar
);
4070 masm
.tagValue(JSVAL_TYPE_INT32
, scratch1
, output
.valueReg());
4075 bool CacheIRCompiler::emitLoadStringCodePointResult(StringOperandId strId
,
4076 Int32OperandId indexId
,
4078 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4079 AutoOutputRegister
output(*this);
4080 Register str
= allocator
.useRegister(masm
, strId
);
4081 Register index
= allocator
.useRegister(masm
, indexId
);
4082 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
4083 AutoScratchRegisterMaybeOutputType
scratch2(allocator
, masm
, output
);
4084 AutoScratchRegister
scratch3(allocator
, masm
);
4086 // Bounds check, load string char.
4089 FailurePath
* failure
;
4090 if (!addFailurePath(&failure
)) {
4094 masm
.spectreBoundsCheck32(index
, Address(str
, JSString::offsetOfLength()),
4095 scratch1
, failure
->label());
4096 masm
.loadStringCodePoint(str
, index
, scratch1
, scratch2
, scratch3
,
4099 // Return undefined for out-of-bounds access.
4100 masm
.moveValue(JS::UndefinedValue(), output
.valueReg());
4102 // The bounds check mustn't use a scratch register which aliases the output.
4103 MOZ_ASSERT(!output
.valueReg().aliases(scratch3
));
4105 // This CacheIR op is always preceded by |LinearizeForCodePointAccess|, so
4106 // we're guaranteed to see no nested ropes or split surrogates.
4108 masm
.spectreBoundsCheck32(index
, Address(str
, JSString::offsetOfLength()),
4110 masm
.loadStringCodePoint(str
, index
, scratch1
, scratch2
, scratch3
,
4114 masm
.jump(&loadedChar
);
4115 masm
.bind(&loadFailed
);
4116 masm
.assumeUnreachable("loadStringCodePoint can't fail for linear strings");
4117 masm
.bind(&loadedChar
);
4120 masm
.tagValue(JSVAL_TYPE_INT32
, scratch1
, output
.valueReg());
4125 bool CacheIRCompiler::emitNewStringObjectResult(uint32_t templateObjectOffset
,
4126 StringOperandId strId
) {
4127 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4129 AutoCallVM
callvm(masm
, this, allocator
);
4131 Register str
= allocator
.useRegister(masm
, strId
);
4136 using Fn
= JSObject
* (*)(JSContext
*, HandleString
);
4137 callvm
.call
<Fn
, NewStringObject
>();
4141 bool CacheIRCompiler::emitStringIncludesResult(StringOperandId strId
,
4142 StringOperandId searchStrId
) {
4143 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4145 AutoCallVM
callvm(masm
, this, allocator
);
4147 Register str
= allocator
.useRegister(masm
, strId
);
4148 Register searchStr
= allocator
.useRegister(masm
, searchStrId
);
4151 masm
.Push(searchStr
);
4154 using Fn
= bool (*)(JSContext
*, HandleString
, HandleString
, bool*);
4155 callvm
.call
<Fn
, js::StringIncludes
>();
4159 bool CacheIRCompiler::emitStringIndexOfResult(StringOperandId strId
,
4160 StringOperandId searchStrId
) {
4161 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4163 AutoCallVM
callvm(masm
, this, allocator
);
4165 Register str
= allocator
.useRegister(masm
, strId
);
4166 Register searchStr
= allocator
.useRegister(masm
, searchStrId
);
4169 masm
.Push(searchStr
);
4172 using Fn
= bool (*)(JSContext
*, HandleString
, HandleString
, int32_t*);
4173 callvm
.call
<Fn
, js::StringIndexOf
>();
4177 bool CacheIRCompiler::emitStringLastIndexOfResult(StringOperandId strId
,
4178 StringOperandId searchStrId
) {
4179 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4181 AutoCallVM
callvm(masm
, this, allocator
);
4183 Register str
= allocator
.useRegister(masm
, strId
);
4184 Register searchStr
= allocator
.useRegister(masm
, searchStrId
);
4187 masm
.Push(searchStr
);
4190 using Fn
= bool (*)(JSContext
*, HandleString
, HandleString
, int32_t*);
4191 callvm
.call
<Fn
, js::StringLastIndexOf
>();
4195 bool CacheIRCompiler::emitStringStartsWithResult(StringOperandId strId
,
4196 StringOperandId searchStrId
) {
4197 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4199 AutoCallVM
callvm(masm
, this, allocator
);
4201 Register str
= allocator
.useRegister(masm
, strId
);
4202 Register searchStr
= allocator
.useRegister(masm
, searchStrId
);
4205 masm
.Push(searchStr
);
4208 using Fn
= bool (*)(JSContext
*, HandleString
, HandleString
, bool*);
4209 callvm
.call
<Fn
, js::StringStartsWith
>();
4213 bool CacheIRCompiler::emitStringEndsWithResult(StringOperandId strId
,
4214 StringOperandId searchStrId
) {
4215 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4217 AutoCallVM
callvm(masm
, this, allocator
);
4219 Register str
= allocator
.useRegister(masm
, strId
);
4220 Register searchStr
= allocator
.useRegister(masm
, searchStrId
);
4223 masm
.Push(searchStr
);
4226 using Fn
= bool (*)(JSContext
*, HandleString
, HandleString
, bool*);
4227 callvm
.call
<Fn
, js::StringEndsWith
>();
4231 bool CacheIRCompiler::emitStringToLowerCaseResult(StringOperandId strId
) {
4232 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4234 AutoCallVM
callvm(masm
, this, allocator
);
4236 Register str
= allocator
.useRegister(masm
, strId
);
4241 using Fn
= JSString
* (*)(JSContext
*, HandleString
);
4242 callvm
.call
<Fn
, js::StringToLowerCase
>();
4246 bool CacheIRCompiler::emitStringToUpperCaseResult(StringOperandId strId
) {
4247 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4249 AutoCallVM
callvm(masm
, this, allocator
);
4251 Register str
= allocator
.useRegister(masm
, strId
);
4256 using Fn
= JSString
* (*)(JSContext
*, HandleString
);
4257 callvm
.call
<Fn
, js::StringToUpperCase
>();
4261 bool CacheIRCompiler::emitStringTrimResult(StringOperandId strId
) {
4262 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4264 AutoCallVM
callvm(masm
, this, allocator
);
4266 Register str
= allocator
.useRegister(masm
, strId
);
4271 using Fn
= JSString
* (*)(JSContext
*, HandleString
);
4272 callvm
.call
<Fn
, js::StringTrim
>();
4276 bool CacheIRCompiler::emitStringTrimStartResult(StringOperandId strId
) {
4277 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4279 AutoCallVM
callvm(masm
, this, allocator
);
4281 Register str
= allocator
.useRegister(masm
, strId
);
4286 using Fn
= JSString
* (*)(JSContext
*, HandleString
);
4287 callvm
.call
<Fn
, js::StringTrimStart
>();
4291 bool CacheIRCompiler::emitStringTrimEndResult(StringOperandId strId
) {
4292 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4294 AutoCallVM
callvm(masm
, this, allocator
);
4296 Register str
= allocator
.useRegister(masm
, strId
);
4301 using Fn
= JSString
* (*)(JSContext
*, HandleString
);
4302 callvm
.call
<Fn
, js::StringTrimEnd
>();
4306 bool CacheIRCompiler::emitLoadArgumentsObjectArgResult(ObjOperandId objId
,
4307 Int32OperandId indexId
) {
4308 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4309 AutoOutputRegister
output(*this);
4310 Register obj
= allocator
.useRegister(masm
, objId
);
4311 Register index
= allocator
.useRegister(masm
, indexId
);
4312 AutoScratchRegister
scratch(allocator
, masm
);
4314 FailurePath
* failure
;
4315 if (!addFailurePath(&failure
)) {
4319 masm
.loadArgumentsObjectElement(obj
, index
, output
.valueReg(), scratch
,
4324 bool CacheIRCompiler::emitLoadArgumentsObjectArgHoleResult(
4325 ObjOperandId objId
, Int32OperandId indexId
) {
4326 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4327 AutoOutputRegister
output(*this);
4328 Register obj
= allocator
.useRegister(masm
, objId
);
4329 Register index
= allocator
.useRegister(masm
, indexId
);
4330 AutoScratchRegister
scratch(allocator
, masm
);
4332 FailurePath
* failure
;
4333 if (!addFailurePath(&failure
)) {
4337 masm
.loadArgumentsObjectElementHole(obj
, index
, output
.valueReg(), scratch
,
4342 bool CacheIRCompiler::emitLoadArgumentsObjectArgExistsResult(
4343 ObjOperandId objId
, Int32OperandId indexId
) {
4344 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4345 AutoOutputRegister
output(*this);
4346 Register obj
= allocator
.useRegister(masm
, objId
);
4347 Register index
= allocator
.useRegister(masm
, indexId
);
4348 AutoScratchRegister
scratch1(allocator
, masm
);
4349 AutoScratchRegisterMaybeOutput
scratch2(allocator
, masm
, output
);
4351 FailurePath
* failure
;
4352 if (!addFailurePath(&failure
)) {
4356 masm
.loadArgumentsObjectElementExists(obj
, index
, scratch2
, scratch1
,
4358 EmitStoreResult(masm
, scratch2
, JSVAL_TYPE_BOOLEAN
, output
);
4362 bool CacheIRCompiler::emitLoadDenseElementResult(ObjOperandId objId
,
4363 Int32OperandId indexId
) {
4364 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4365 AutoOutputRegister
output(*this);
4366 Register obj
= allocator
.useRegister(masm
, objId
);
4367 Register index
= allocator
.useRegister(masm
, indexId
);
4368 AutoScratchRegister
scratch1(allocator
, masm
);
4369 AutoScratchRegisterMaybeOutput
scratch2(allocator
, masm
, output
);
4371 FailurePath
* failure
;
4372 if (!addFailurePath(&failure
)) {
4376 // Load obj->elements.
4377 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch1
);
4380 Address
initLength(scratch1
, ObjectElements::offsetOfInitializedLength());
4381 masm
.spectreBoundsCheck32(index
, initLength
, scratch2
, failure
->label());
4384 BaseObjectElementIndex
element(scratch1
, index
);
4385 masm
.branchTestMagic(Assembler::Equal
, element
, failure
->label());
4386 masm
.loadTypedOrValue(element
, output
);
4390 bool CacheIRCompiler::emitGuardInt32IsNonNegative(Int32OperandId indexId
) {
4391 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4392 Register index
= allocator
.useRegister(masm
, indexId
);
4394 FailurePath
* failure
;
4395 if (!addFailurePath(&failure
)) {
4399 masm
.branch32(Assembler::LessThan
, index
, Imm32(0), failure
->label());
4403 bool CacheIRCompiler::emitGuardIndexIsNotDenseElement(ObjOperandId objId
,
4404 Int32OperandId indexId
) {
4405 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4406 Register obj
= allocator
.useRegister(masm
, objId
);
4407 Register index
= allocator
.useRegister(masm
, indexId
);
4408 AutoScratchRegister
scratch(allocator
, masm
);
4409 AutoSpectreBoundsScratchRegister
spectreScratch(allocator
, masm
);
4411 FailurePath
* failure
;
4412 if (!addFailurePath(&failure
)) {
4416 // Load obj->elements.
4417 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
4419 // Ensure index >= initLength or the element is a hole.
4421 Address
capacity(scratch
, ObjectElements::offsetOfInitializedLength());
4422 masm
.spectreBoundsCheck32(index
, capacity
, spectreScratch
, ¬Dense
);
4424 BaseValueIndex
element(scratch
, index
);
4425 masm
.branchTestMagic(Assembler::Equal
, element
, ¬Dense
);
4427 masm
.jump(failure
->label());
4429 masm
.bind(¬Dense
);
4433 bool CacheIRCompiler::emitGuardIndexIsValidUpdateOrAdd(ObjOperandId objId
,
4434 Int32OperandId indexId
) {
4435 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4436 Register obj
= allocator
.useRegister(masm
, objId
);
4437 Register index
= allocator
.useRegister(masm
, indexId
);
4438 AutoScratchRegister
scratch(allocator
, masm
);
4439 AutoSpectreBoundsScratchRegister
spectreScratch(allocator
, masm
);
4441 FailurePath
* failure
;
4442 if (!addFailurePath(&failure
)) {
4446 // Load obj->elements.
4447 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
4451 // If length is writable, branch to &success. All indices are writable.
4452 Address
flags(scratch
, ObjectElements::offsetOfFlags());
4453 masm
.branchTest32(Assembler::Zero
, flags
,
4454 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH
),
4457 // Otherwise, ensure index is in bounds.
4458 Address
length(scratch
, ObjectElements::offsetOfLength());
4459 masm
.spectreBoundsCheck32(index
, length
, spectreScratch
,
4460 /* failure = */ failure
->label());
4461 masm
.bind(&success
);
4465 bool CacheIRCompiler::emitGuardTagNotEqual(ValueTagOperandId lhsId
,
4466 ValueTagOperandId rhsId
) {
4467 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4468 Register lhs
= allocator
.useRegister(masm
, lhsId
);
4469 Register rhs
= allocator
.useRegister(masm
, rhsId
);
4471 FailurePath
* failure
;
4472 if (!addFailurePath(&failure
)) {
4477 masm
.branch32(Assembler::Equal
, lhs
, rhs
, failure
->label());
4479 // If both lhs and rhs are numbers, can't use tag comparison to do inequality
4481 masm
.branchTestNumber(Assembler::NotEqual
, lhs
, &done
);
4482 masm
.branchTestNumber(Assembler::NotEqual
, rhs
, &done
);
4483 masm
.jump(failure
->label());
4489 bool CacheIRCompiler::emitGuardXrayExpandoShapeAndDefaultProto(
4490 ObjOperandId objId
, uint32_t shapeWrapperOffset
) {
4491 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4493 Register obj
= allocator
.useRegister(masm
, objId
);
4494 StubFieldOffset
shapeWrapper(shapeWrapperOffset
, StubField::Type::JSObject
);
4496 AutoScratchRegister
scratch(allocator
, masm
);
4497 AutoScratchRegister
scratch2(allocator
, masm
);
4498 AutoScratchRegister
scratch3(allocator
, masm
);
4500 FailurePath
* failure
;
4501 if (!addFailurePath(&failure
)) {
4505 masm
.loadPtr(Address(obj
, ProxyObject::offsetOfReservedSlots()), scratch
);
4506 Address
holderAddress(scratch
,
4507 sizeof(Value
) * GetXrayJitInfo()->xrayHolderSlot
);
4508 Address
expandoAddress(scratch
, NativeObject::getFixedSlotOffset(
4509 GetXrayJitInfo()->holderExpandoSlot
));
4511 masm
.fallibleUnboxObject(holderAddress
, scratch
, failure
->label());
4512 masm
.fallibleUnboxObject(expandoAddress
, scratch
, failure
->label());
4514 // Unwrap the expando before checking its shape.
4515 masm
.loadPtr(Address(scratch
, ProxyObject::offsetOfReservedSlots()), scratch
);
4517 Address(scratch
, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
4520 emitLoadStubField(shapeWrapper
, scratch2
);
4521 LoadShapeWrapperContents(masm
, scratch2
, scratch2
, failure
->label());
4522 masm
.branchTestObjShape(Assembler::NotEqual
, scratch
, scratch2
, scratch3
,
4523 scratch
, failure
->label());
4525 // The reserved slots on the expando should all be in fixed slots.
4526 Address
protoAddress(scratch
, NativeObject::getFixedSlotOffset(
4527 GetXrayJitInfo()->expandoProtoSlot
));
4528 masm
.branchTestUndefined(Assembler::NotEqual
, protoAddress
, failure
->label());
4533 bool CacheIRCompiler::emitGuardXrayNoExpando(ObjOperandId objId
) {
4534 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4536 Register obj
= allocator
.useRegister(masm
, objId
);
4537 AutoScratchRegister
scratch(allocator
, masm
);
4539 FailurePath
* failure
;
4540 if (!addFailurePath(&failure
)) {
4544 masm
.loadPtr(Address(obj
, ProxyObject::offsetOfReservedSlots()), scratch
);
4545 Address
holderAddress(scratch
,
4546 sizeof(Value
) * GetXrayJitInfo()->xrayHolderSlot
);
4547 Address
expandoAddress(scratch
, NativeObject::getFixedSlotOffset(
4548 GetXrayJitInfo()->holderExpandoSlot
));
4551 masm
.fallibleUnboxObject(holderAddress
, scratch
, &done
);
4552 masm
.branchTestObject(Assembler::Equal
, expandoAddress
, failure
->label());
4558 bool CacheIRCompiler::emitGuardNoAllocationMetadataBuilder(
4559 uint32_t builderAddrOffset
) {
4560 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4561 AutoScratchRegister
scratch(allocator
, masm
);
4563 FailurePath
* failure
;
4564 if (!addFailurePath(&failure
)) {
4568 StubFieldOffset
builderField(builderAddrOffset
, StubField::Type::RawPointer
);
4569 emitLoadStubField(builderField
, scratch
);
4570 masm
.branchPtr(Assembler::NotEqual
, Address(scratch
, 0), ImmWord(0),
4576 bool CacheIRCompiler::emitGuardFunctionHasJitEntry(ObjOperandId funId
,
4577 bool constructing
) {
4578 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4579 Register fun
= allocator
.useRegister(masm
, funId
);
4581 FailurePath
* failure
;
4582 if (!addFailurePath(&failure
)) {
4586 masm
.branchIfFunctionHasNoJitEntry(fun
, constructing
, failure
->label());
4590 bool CacheIRCompiler::emitGuardFunctionHasNoJitEntry(ObjOperandId funId
) {
4591 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4592 Register obj
= allocator
.useRegister(masm
, funId
);
4593 AutoScratchRegister
scratch(allocator
, masm
);
4595 FailurePath
* failure
;
4596 if (!addFailurePath(&failure
)) {
4600 masm
.branchIfFunctionHasJitEntry(obj
, /*isConstructing =*/false,
4605 bool CacheIRCompiler::emitGuardFunctionIsNonBuiltinCtor(ObjOperandId funId
) {
4606 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4608 Register fun
= allocator
.useRegister(masm
, funId
);
4609 AutoScratchRegister
scratch(allocator
, masm
);
4611 FailurePath
* failure
;
4612 if (!addFailurePath(&failure
)) {
4616 masm
.branchIfNotFunctionIsNonBuiltinCtor(fun
, scratch
, failure
->label());
4620 bool CacheIRCompiler::emitGuardFunctionIsConstructor(ObjOperandId funId
) {
4621 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4622 Register funcReg
= allocator
.useRegister(masm
, funId
);
4623 AutoScratchRegister
scratch(allocator
, masm
);
4625 FailurePath
* failure
;
4626 if (!addFailurePath(&failure
)) {
4630 // Ensure obj is a constructor
4631 masm
.branchTestFunctionFlags(funcReg
, FunctionFlags::CONSTRUCTOR
,
4632 Assembler::Zero
, failure
->label());
4636 bool CacheIRCompiler::emitGuardNotClassConstructor(ObjOperandId funId
) {
4637 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4638 Register fun
= allocator
.useRegister(masm
, funId
);
4639 AutoScratchRegister
scratch(allocator
, masm
);
4641 FailurePath
* failure
;
4642 if (!addFailurePath(&failure
)) {
4646 masm
.branchFunctionKind(Assembler::Equal
, FunctionFlags::ClassConstructor
,
4647 fun
, scratch
, failure
->label());
4651 bool CacheIRCompiler::emitGuardArrayIsPacked(ObjOperandId arrayId
) {
4652 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4653 Register array
= allocator
.useRegister(masm
, arrayId
);
4654 AutoScratchRegister
scratch(allocator
, masm
);
4655 AutoScratchRegister
scratch2(allocator
, masm
);
4657 FailurePath
* failure
;
4658 if (!addFailurePath(&failure
)) {
4662 masm
.branchArrayIsNotPacked(array
, scratch
, scratch2
, failure
->label());
4666 bool CacheIRCompiler::emitGuardArgumentsObjectFlags(ObjOperandId objId
,
4668 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4669 Register obj
= allocator
.useRegister(masm
, objId
);
4670 AutoScratchRegister
scratch(allocator
, masm
);
4672 FailurePath
* failure
;
4673 if (!addFailurePath(&failure
)) {
4677 masm
.branchTestArgumentsObjectFlags(obj
, scratch
, flags
, Assembler::NonZero
,
4682 bool CacheIRCompiler::emitLoadDenseElementHoleResult(ObjOperandId objId
,
4683 Int32OperandId indexId
) {
4684 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4685 AutoOutputRegister
output(*this);
4686 Register obj
= allocator
.useRegister(masm
, objId
);
4687 Register index
= allocator
.useRegister(masm
, indexId
);
4688 AutoScratchRegister
scratch1(allocator
, masm
);
4689 AutoScratchRegisterMaybeOutput
scratch2(allocator
, masm
, output
);
4691 FailurePath
* failure
;
4692 if (!addFailurePath(&failure
)) {
4696 // Make sure the index is nonnegative.
4697 masm
.branch32(Assembler::LessThan
, index
, Imm32(0), failure
->label());
4699 // Load obj->elements.
4700 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch1
);
4702 // Guard on the initialized length.
4704 Address
initLength(scratch1
, ObjectElements::offsetOfInitializedLength());
4705 masm
.spectreBoundsCheck32(index
, initLength
, scratch2
, &hole
);
4709 masm
.loadValue(BaseObjectElementIndex(scratch1
, index
), output
.valueReg());
4710 masm
.branchTestMagic(Assembler::NotEqual
, output
.valueReg(), &done
);
4712 // Load undefined for the hole.
4714 masm
.moveValue(UndefinedValue(), output
.valueReg());
4720 bool CacheIRCompiler::emitLoadTypedArrayElementExistsResult(
4721 ObjOperandId objId
, IntPtrOperandId indexId
) {
4722 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4723 AutoOutputRegister
output(*this);
4724 Register obj
= allocator
.useRegister(masm
, objId
);
4725 Register index
= allocator
.useRegister(masm
, indexId
);
4726 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4728 Label outOfBounds
, done
;
4731 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
4732 masm
.branchPtr(Assembler::BelowOrEqual
, scratch
, index
, &outOfBounds
);
4733 EmitStoreBoolean(masm
, true, output
);
4736 masm
.bind(&outOfBounds
);
4737 EmitStoreBoolean(masm
, false, output
);
4743 bool CacheIRCompiler::emitLoadDenseElementExistsResult(ObjOperandId objId
,
4744 Int32OperandId indexId
) {
4745 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4746 AutoOutputRegister
output(*this);
4747 Register obj
= allocator
.useRegister(masm
, objId
);
4748 Register index
= allocator
.useRegister(masm
, indexId
);
4749 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4751 FailurePath
* failure
;
4752 if (!addFailurePath(&failure
)) {
4756 // Load obj->elements.
4757 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
4759 // Bounds check. Unsigned compare sends negative indices to next IC.
4760 Address
initLength(scratch
, ObjectElements::offsetOfInitializedLength());
4761 masm
.branch32(Assembler::BelowOrEqual
, initLength
, index
, failure
->label());
4764 BaseObjectElementIndex
element(scratch
, index
);
4765 masm
.branchTestMagic(Assembler::Equal
, element
, failure
->label());
4767 EmitStoreBoolean(masm
, true, output
);
4771 bool CacheIRCompiler::emitLoadDenseElementHoleExistsResult(
4772 ObjOperandId objId
, Int32OperandId indexId
) {
4773 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4774 AutoOutputRegister
output(*this);
4775 Register obj
= allocator
.useRegister(masm
, objId
);
4776 Register index
= allocator
.useRegister(masm
, indexId
);
4777 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4779 FailurePath
* failure
;
4780 if (!addFailurePath(&failure
)) {
4784 // Make sure the index is nonnegative.
4785 masm
.branch32(Assembler::LessThan
, index
, Imm32(0), failure
->label());
4787 // Load obj->elements.
4788 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
4790 // Guard on the initialized length.
4792 Address
initLength(scratch
, ObjectElements::offsetOfInitializedLength());
4793 masm
.branch32(Assembler::BelowOrEqual
, initLength
, index
, &hole
);
4795 // Load value and replace with true.
4797 BaseObjectElementIndex
element(scratch
, index
);
4798 masm
.branchTestMagic(Assembler::Equal
, element
, &hole
);
4799 EmitStoreBoolean(masm
, true, output
);
4802 // Load false for the hole.
4804 EmitStoreBoolean(masm
, false, output
);
4810 bool CacheIRCompiler::emitPackedArrayPopResult(ObjOperandId arrayId
) {
4811 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4813 AutoOutputRegister
output(*this);
4814 Register array
= allocator
.useRegister(masm
, arrayId
);
4815 AutoScratchRegister
scratch1(allocator
, masm
);
4816 AutoScratchRegister
scratch2(allocator
, masm
);
4818 FailurePath
* failure
;
4819 if (!addFailurePath(&failure
)) {
4823 masm
.packedArrayPop(array
, output
.valueReg(), scratch1
, scratch2
,
4828 bool CacheIRCompiler::emitPackedArrayShiftResult(ObjOperandId arrayId
) {
4829 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4831 AutoOutputRegister
output(*this);
4832 Register array
= allocator
.useRegister(masm
, arrayId
);
4833 AutoScratchRegister
scratch1(allocator
, masm
);
4834 AutoScratchRegister
scratch2(allocator
, masm
);
4836 FailurePath
* failure
;
4837 if (!addFailurePath(&failure
)) {
4841 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
4842 liveVolatileFloatRegs());
4843 masm
.packedArrayShift(array
, output
.valueReg(), scratch1
, scratch2
,
4844 volatileRegs
, failure
->label());
4848 bool CacheIRCompiler::emitIsObjectResult(ValOperandId inputId
) {
4849 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4851 AutoOutputRegister
output(*this);
4852 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4854 ValueOperand val
= allocator
.useValueRegister(masm
, inputId
);
4856 masm
.testObjectSet(Assembler::Equal
, val
, scratch
);
4858 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
4862 bool CacheIRCompiler::emitIsPackedArrayResult(ObjOperandId objId
) {
4863 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4865 AutoOutputRegister
output(*this);
4866 Register obj
= allocator
.useRegister(masm
, objId
);
4867 AutoScratchRegister
scratch(allocator
, masm
);
4869 Register outputScratch
= output
.valueReg().scratchReg();
4870 masm
.setIsPackedArray(obj
, outputScratch
, scratch
);
4871 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, outputScratch
, output
.valueReg());
4875 bool CacheIRCompiler::emitIsCallableResult(ValOperandId inputId
) {
4876 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4878 AutoOutputRegister
output(*this);
4879 AutoScratchRegister
scratch1(allocator
, masm
);
4880 AutoScratchRegisterMaybeOutput
scratch2(allocator
, masm
, output
);
4882 ValueOperand val
= allocator
.useValueRegister(masm
, inputId
);
4884 Label isObject
, done
;
4885 masm
.branchTestObject(Assembler::Equal
, val
, &isObject
);
4886 // Primitives are never callable.
4887 masm
.move32(Imm32(0), scratch2
);
4890 masm
.bind(&isObject
);
4891 masm
.unboxObject(val
, scratch1
);
4894 masm
.isCallable(scratch1
, scratch2
, &isProxy
);
4897 masm
.bind(&isProxy
);
4899 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
4900 liveVolatileFloatRegs());
4901 masm
.PushRegsInMask(volatileRegs
);
4903 using Fn
= bool (*)(JSObject
* obj
);
4904 masm
.setupUnalignedABICall(scratch2
);
4905 masm
.passABIArg(scratch1
);
4906 masm
.callWithABI
<Fn
, ObjectIsCallable
>();
4907 masm
.storeCallBoolResult(scratch2
);
4909 LiveRegisterSet ignore
;
4910 ignore
.add(scratch2
);
4911 masm
.PopRegsInMaskIgnore(volatileRegs
, ignore
);
4915 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
4919 bool CacheIRCompiler::emitIsConstructorResult(ObjOperandId objId
) {
4920 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4922 AutoOutputRegister
output(*this);
4923 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4925 Register obj
= allocator
.useRegister(masm
, objId
);
4927 Label isProxy
, done
;
4928 masm
.isConstructor(obj
, scratch
, &isProxy
);
4931 masm
.bind(&isProxy
);
4933 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
4934 liveVolatileFloatRegs());
4935 masm
.PushRegsInMask(volatileRegs
);
4937 using Fn
= bool (*)(JSObject
* obj
);
4938 masm
.setupUnalignedABICall(scratch
);
4939 masm
.passABIArg(obj
);
4940 masm
.callWithABI
<Fn
, ObjectIsConstructor
>();
4941 masm
.storeCallBoolResult(scratch
);
4943 LiveRegisterSet ignore
;
4944 ignore
.add(scratch
);
4945 masm
.PopRegsInMaskIgnore(volatileRegs
, ignore
);
4949 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
4953 bool CacheIRCompiler::emitIsCrossRealmArrayConstructorResult(
4954 ObjOperandId objId
) {
4955 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4957 AutoOutputRegister
output(*this);
4958 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4959 Register obj
= allocator
.useRegister(masm
, objId
);
4961 masm
.setIsCrossRealmArrayConstructor(obj
, scratch
);
4962 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
4966 bool CacheIRCompiler::emitArrayBufferViewByteOffsetInt32Result(
4967 ObjOperandId objId
) {
4968 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4970 AutoOutputRegister
output(*this);
4971 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4972 Register obj
= allocator
.useRegister(masm
, objId
);
4974 FailurePath
* failure
;
4975 if (!addFailurePath(&failure
)) {
4979 masm
.loadArrayBufferViewByteOffsetIntPtr(obj
, scratch
);
4980 masm
.guardNonNegativeIntPtrToInt32(scratch
, failure
->label());
4981 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
4985 bool CacheIRCompiler::emitArrayBufferViewByteOffsetDoubleResult(
4986 ObjOperandId objId
) {
4987 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4989 AutoOutputRegister
output(*this);
4990 Register obj
= allocator
.useRegister(masm
, objId
);
4991 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4993 ScratchDoubleScope
fpscratch(masm
);
4994 masm
.loadArrayBufferViewByteOffsetIntPtr(obj
, scratch
);
4995 masm
.convertIntPtrToDouble(scratch
, fpscratch
);
4996 masm
.boxDouble(fpscratch
, output
.valueReg(), fpscratch
);
5000 bool CacheIRCompiler::emitTypedArrayByteLengthInt32Result(ObjOperandId objId
) {
5001 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5003 AutoOutputRegister
output(*this);
5004 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
5005 AutoScratchRegister
scratch2(allocator
, masm
);
5006 Register obj
= allocator
.useRegister(masm
, objId
);
5008 FailurePath
* failure
;
5009 if (!addFailurePath(&failure
)) {
5013 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch1
);
5014 masm
.guardNonNegativeIntPtrToInt32(scratch1
, failure
->label());
5015 masm
.typedArrayElementSize(obj
, scratch2
);
5017 masm
.branchMul32(Assembler::Overflow
, scratch2
.get(), scratch1
,
5020 masm
.tagValue(JSVAL_TYPE_INT32
, scratch1
, output
.valueReg());
5024 bool CacheIRCompiler::emitTypedArrayByteLengthDoubleResult(ObjOperandId objId
) {
5025 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5027 AutoOutputRegister
output(*this);
5028 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
5029 AutoScratchRegister
scratch2(allocator
, masm
);
5030 Register obj
= allocator
.useRegister(masm
, objId
);
5032 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch1
);
5033 masm
.typedArrayElementSize(obj
, scratch2
);
5034 masm
.mulPtr(scratch2
, scratch1
);
5036 ScratchDoubleScope
fpscratch(masm
);
5037 masm
.convertIntPtrToDouble(scratch1
, fpscratch
);
5038 masm
.boxDouble(fpscratch
, output
.valueReg(), fpscratch
);
5042 bool CacheIRCompiler::emitTypedArrayElementSizeResult(ObjOperandId objId
) {
5043 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5045 AutoOutputRegister
output(*this);
5046 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5047 Register obj
= allocator
.useRegister(masm
, objId
);
5049 masm
.typedArrayElementSize(obj
, scratch
);
5050 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5054 bool CacheIRCompiler::emitGuardHasAttachedArrayBuffer(ObjOperandId objId
) {
5055 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5057 AutoScratchRegister
scratch(allocator
, masm
);
5058 Register obj
= allocator
.useRegister(masm
, objId
);
5060 FailurePath
* failure
;
5061 if (!addFailurePath(&failure
)) {
5065 masm
.branchIfHasDetachedArrayBuffer(obj
, scratch
, failure
->label());
5069 bool CacheIRCompiler::emitIsTypedArrayConstructorResult(ObjOperandId objId
) {
5070 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5072 AutoOutputRegister
output(*this);
5073 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5074 Register obj
= allocator
.useRegister(masm
, objId
);
5076 masm
.setIsDefinitelyTypedArrayConstructor(obj
, scratch
);
5077 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
5081 bool CacheIRCompiler::emitGetNextMapSetEntryForIteratorResult(
5082 ObjOperandId iterId
, ObjOperandId resultArrId
, bool isMap
) {
5083 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5085 AutoOutputRegister
output(*this);
5086 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5087 Register iter
= allocator
.useRegister(masm
, iterId
);
5088 Register resultArr
= allocator
.useRegister(masm
, resultArrId
);
5090 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5091 save
.takeUnchecked(output
.valueReg());
5092 save
.takeUnchecked(scratch
);
5093 masm
.PushRegsInMask(save
);
5095 masm
.setupUnalignedABICall(scratch
);
5096 masm
.passABIArg(iter
);
5097 masm
.passABIArg(resultArr
);
5099 using Fn
= bool (*)(MapIteratorObject
* iter
, ArrayObject
* resultPairObj
);
5100 masm
.callWithABI
<Fn
, MapIteratorObject::next
>();
5102 using Fn
= bool (*)(SetIteratorObject
* iter
, ArrayObject
* resultObj
);
5103 masm
.callWithABI
<Fn
, SetIteratorObject::next
>();
5105 masm
.storeCallBoolResult(scratch
);
5107 masm
.PopRegsInMask(save
);
5109 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
5113 void CacheIRCompiler::emitActivateIterator(Register objBeingIterated
,
5114 Register iterObject
,
5115 Register nativeIter
,
5116 Register scratch
, Register scratch2
,
5117 uint32_t enumeratorsAddrOffset
) {
5118 // 'objectBeingIterated_' must be nullptr, so we don't need a pre-barrier.
5119 Address
iterObjAddr(nativeIter
,
5120 NativeIterator::offsetOfObjectBeingIterated());
5123 masm
.branchPtr(Assembler::Equal
, iterObjAddr
, ImmPtr(nullptr), &ok
);
5124 masm
.assumeUnreachable("iterator with non-null object");
5128 // Mark iterator as active.
5129 Address
iterFlagsAddr(nativeIter
, NativeIterator::offsetOfFlagsAndCount());
5130 masm
.storePtr(objBeingIterated
, iterObjAddr
);
5131 masm
.or32(Imm32(NativeIterator::Flags::Active
), iterFlagsAddr
);
5133 // Post-write barrier for stores to 'objectBeingIterated_'.
5134 emitPostBarrierSlot(
5136 TypedOrValueRegister(MIRType::Object
, AnyRegister(objBeingIterated
)),
5139 // Chain onto the active iterator stack.
5140 StubFieldOffset
enumeratorsAddr(enumeratorsAddrOffset
,
5141 StubField::Type::RawPointer
);
5142 emitLoadStubField(enumeratorsAddr
, scratch
);
5143 masm
.registerIterator(scratch
, nativeIter
, scratch2
);
5146 bool CacheIRCompiler::emitObjectToIteratorResult(
5147 ObjOperandId objId
, uint32_t enumeratorsAddrOffset
) {
5148 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5150 AutoCallVM
callvm(masm
, this, allocator
);
5151 Register obj
= allocator
.useRegister(masm
, objId
);
5153 AutoScratchRegister
iterObj(allocator
, masm
);
5154 AutoScratchRegister
scratch(allocator
, masm
);
5155 AutoScratchRegisterMaybeOutput
scratch2(allocator
, masm
, callvm
.output());
5156 AutoScratchRegisterMaybeOutputType
scratch3(allocator
, masm
, callvm
.output());
5159 masm
.maybeLoadIteratorFromShape(obj
, iterObj
, scratch
, scratch2
, scratch3
,
5163 Address(iterObj
, PropertyIteratorObject::offsetOfIteratorSlot()),
5166 emitActivateIterator(obj
, iterObj
, scratch
, scratch2
, scratch3
,
5167 enumeratorsAddrOffset
);
5173 using Fn
= PropertyIteratorObject
* (*)(JSContext
*, HandleObject
);
5174 callvm
.call
<Fn
, GetIterator
>();
5175 masm
.storeCallPointerResult(iterObj
);
5178 EmitStoreResult(masm
, iterObj
, JSVAL_TYPE_OBJECT
, callvm
.output());
5182 bool CacheIRCompiler::emitValueToIteratorResult(ValOperandId valId
) {
5183 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5185 AutoCallVM
callvm(masm
, this, allocator
);
5187 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
5193 using Fn
= PropertyIteratorObject
* (*)(JSContext
*, HandleValue
);
5194 callvm
.call
<Fn
, ValueToIterator
>();
5198 bool CacheIRCompiler::emitNewArrayIteratorResult(
5199 uint32_t templateObjectOffset
) {
5200 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5202 AutoCallVM
callvm(masm
, this, allocator
);
5206 using Fn
= ArrayIteratorObject
* (*)(JSContext
*);
5207 callvm
.call
<Fn
, NewArrayIterator
>();
5211 bool CacheIRCompiler::emitNewStringIteratorResult(
5212 uint32_t templateObjectOffset
) {
5213 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5215 AutoCallVM
callvm(masm
, this, allocator
);
5219 using Fn
= StringIteratorObject
* (*)(JSContext
*);
5220 callvm
.call
<Fn
, NewStringIterator
>();
5224 bool CacheIRCompiler::emitNewRegExpStringIteratorResult(
5225 uint32_t templateObjectOffset
) {
5226 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5228 AutoCallVM
callvm(masm
, this, allocator
);
5232 using Fn
= RegExpStringIteratorObject
* (*)(JSContext
*);
5233 callvm
.call
<Fn
, NewRegExpStringIterator
>();
5237 bool CacheIRCompiler::emitObjectCreateResult(uint32_t templateObjectOffset
) {
5238 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5240 AutoCallVM
callvm(masm
, this, allocator
);
5241 AutoScratchRegister
scratch(allocator
, masm
);
5243 StubFieldOffset
objectField(templateObjectOffset
, StubField::Type::JSObject
);
5244 emitLoadStubField(objectField
, scratch
);
5249 using Fn
= PlainObject
* (*)(JSContext
*, Handle
<PlainObject
*>);
5250 callvm
.call
<Fn
, ObjectCreateWithTemplate
>();
5254 bool CacheIRCompiler::emitObjectKeysResult(ObjOperandId objId
) {
5255 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5257 AutoCallVM
callvm(masm
, this, allocator
);
5258 Register obj
= allocator
.useRegister(masm
, objId
);
5260 // Our goal is only to record calls to Object.keys, to elide it when
5261 // partially used, not to provide an alternative implementation.
5266 using Fn
= JSObject
* (*)(JSContext
*, HandleObject
);
5267 callvm
.call
<Fn
, jit::ObjectKeys
>();
5273 bool CacheIRCompiler::emitNewArrayFromLengthResult(
5274 uint32_t templateObjectOffset
, Int32OperandId lengthId
) {
5275 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5277 AutoCallVM
callvm(masm
, this, allocator
);
5278 AutoScratchRegister
scratch(allocator
, masm
);
5279 Register length
= allocator
.useRegister(masm
, lengthId
);
5281 StubFieldOffset
objectField(templateObjectOffset
, StubField::Type::JSObject
);
5282 emitLoadStubField(objectField
, scratch
);
5288 using Fn
= ArrayObject
* (*)(JSContext
*, Handle
<ArrayObject
*>, int32_t length
);
5289 callvm
.call
<Fn
, ArrayConstructorOneArg
>();
5293 bool CacheIRCompiler::emitNewTypedArrayFromLengthResult(
5294 uint32_t templateObjectOffset
, Int32OperandId lengthId
) {
5295 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5297 AutoCallVM
callvm(masm
, this, allocator
);
5298 AutoScratchRegister
scratch(allocator
, masm
);
5299 Register length
= allocator
.useRegister(masm
, lengthId
);
5301 StubFieldOffset
objectField(templateObjectOffset
, StubField::Type::JSObject
);
5302 emitLoadStubField(objectField
, scratch
);
5308 using Fn
= TypedArrayObject
* (*)(JSContext
*, HandleObject
, int32_t length
);
5309 callvm
.call
<Fn
, NewTypedArrayWithTemplateAndLength
>();
5313 bool CacheIRCompiler::emitNewTypedArrayFromArrayBufferResult(
5314 uint32_t templateObjectOffset
, ObjOperandId bufferId
,
5315 ValOperandId byteOffsetId
, ValOperandId lengthId
) {
5316 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5318 #ifdef JS_CODEGEN_X86
5319 MOZ_CRASH("Instruction not supported on 32-bit x86, not enough registers");
5322 AutoCallVM
callvm(masm
, this, allocator
);
5323 AutoScratchRegister
scratch(allocator
, masm
);
5324 Register buffer
= allocator
.useRegister(masm
, bufferId
);
5325 ValueOperand byteOffset
= allocator
.useValueRegister(masm
, byteOffsetId
);
5326 ValueOperand length
= allocator
.useValueRegister(masm
, lengthId
);
5328 StubFieldOffset
objectField(templateObjectOffset
, StubField::Type::JSObject
);
5329 emitLoadStubField(objectField
, scratch
);
5333 masm
.Push(byteOffset
);
5337 using Fn
= TypedArrayObject
* (*)(JSContext
*, HandleObject
, HandleObject
,
5338 HandleValue
, HandleValue
);
5339 callvm
.call
<Fn
, NewTypedArrayWithTemplateAndBuffer
>();
5343 bool CacheIRCompiler::emitNewTypedArrayFromArrayResult(
5344 uint32_t templateObjectOffset
, ObjOperandId arrayId
) {
5345 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5347 AutoCallVM
callvm(masm
, this, allocator
);
5348 AutoScratchRegister
scratch(allocator
, masm
);
5349 Register array
= allocator
.useRegister(masm
, arrayId
);
5351 StubFieldOffset
objectField(templateObjectOffset
, StubField::Type::JSObject
);
5352 emitLoadStubField(objectField
, scratch
);
5358 using Fn
= TypedArrayObject
* (*)(JSContext
*, HandleObject
, HandleObject
);
5359 callvm
.call
<Fn
, NewTypedArrayWithTemplateAndArray
>();
5363 bool CacheIRCompiler::emitAddSlotAndCallAddPropHook(ObjOperandId objId
,
5365 uint32_t newShapeOffset
) {
5366 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5368 AutoCallVM
callvm(masm
, this, allocator
);
5370 AutoScratchRegister
scratch(allocator
, masm
);
5371 Register obj
= allocator
.useRegister(masm
, objId
);
5372 ValueOperand rhs
= allocator
.useValueRegister(masm
, rhsId
);
5374 StubFieldOffset
shapeField(newShapeOffset
, StubField::Type::Shape
);
5375 emitLoadStubField(shapeField
, scratch
);
5384 bool (*)(JSContext
*, Handle
<NativeObject
*>, HandleValue
, Handle
<Shape
*>);
5385 callvm
.callNoResult
<Fn
, AddSlotAndCallAddPropHook
>();
5389 bool CacheIRCompiler::emitMathAbsInt32Result(Int32OperandId inputId
) {
5390 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5392 AutoOutputRegister
output(*this);
5393 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5395 Register input
= allocator
.useRegister(masm
, inputId
);
5397 FailurePath
* failure
;
5398 if (!addFailurePath(&failure
)) {
5402 masm
.mov(input
, scratch
);
5403 // Don't negate already positive values.
5405 masm
.branchTest32(Assembler::NotSigned
, scratch
, scratch
, &positive
);
5406 // neg32 might overflow for INT_MIN.
5407 masm
.branchNeg32(Assembler::Overflow
, scratch
, failure
->label());
5408 masm
.bind(&positive
);
5410 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5414 bool CacheIRCompiler::emitMathAbsNumberResult(NumberOperandId inputId
) {
5415 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5417 AutoOutputRegister
output(*this);
5418 AutoAvailableFloatRegister
scratch(*this, FloatReg0
);
5420 allocator
.ensureDoubleRegister(masm
, inputId
, scratch
);
5422 masm
.absDouble(scratch
, scratch
);
5423 masm
.boxDouble(scratch
, output
.valueReg(), scratch
);
5427 bool CacheIRCompiler::emitMathClz32Result(Int32OperandId inputId
) {
5428 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5430 AutoOutputRegister
output(*this);
5431 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5432 Register input
= allocator
.useRegister(masm
, inputId
);
5434 masm
.clz32(input
, scratch
, /* knownNotZero = */ false);
5435 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5439 bool CacheIRCompiler::emitMathSignInt32Result(Int32OperandId inputId
) {
5440 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5442 AutoOutputRegister
output(*this);
5443 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5444 Register input
= allocator
.useRegister(masm
, inputId
);
5446 masm
.signInt32(input
, scratch
);
5447 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5451 bool CacheIRCompiler::emitMathSignNumberResult(NumberOperandId inputId
) {
5452 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5454 AutoOutputRegister
output(*this);
5455 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg0
);
5456 AutoAvailableFloatRegister
floatScratch2(*this, FloatReg1
);
5458 allocator
.ensureDoubleRegister(masm
, inputId
, floatScratch1
);
5460 masm
.signDouble(floatScratch1
, floatScratch2
);
5461 masm
.boxDouble(floatScratch2
, output
.valueReg(), floatScratch2
);
5465 bool CacheIRCompiler::emitMathSignNumberToInt32Result(NumberOperandId inputId
) {
5466 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5468 AutoOutputRegister
output(*this);
5469 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5470 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg0
);
5471 AutoAvailableFloatRegister
floatScratch2(*this, FloatReg1
);
5473 FailurePath
* failure
;
5474 if (!addFailurePath(&failure
)) {
5478 allocator
.ensureDoubleRegister(masm
, inputId
, floatScratch1
);
5480 masm
.signDoubleToInt32(floatScratch1
, scratch
, floatScratch2
,
5482 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5486 bool CacheIRCompiler::emitMathImulResult(Int32OperandId lhsId
,
5487 Int32OperandId rhsId
) {
5488 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5490 AutoOutputRegister
output(*this);
5491 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5492 Register lhs
= allocator
.useRegister(masm
, lhsId
);
5493 Register rhs
= allocator
.useRegister(masm
, rhsId
);
5495 masm
.mov(lhs
, scratch
);
5496 masm
.mul32(rhs
, scratch
);
5497 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5501 bool CacheIRCompiler::emitMathSqrtNumberResult(NumberOperandId inputId
) {
5502 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5504 AutoOutputRegister
output(*this);
5505 AutoAvailableFloatRegister
scratch(*this, FloatReg0
);
5507 allocator
.ensureDoubleRegister(masm
, inputId
, scratch
);
5509 masm
.sqrtDouble(scratch
, scratch
);
5510 masm
.boxDouble(scratch
, output
.valueReg(), scratch
);
5514 bool CacheIRCompiler::emitMathFloorNumberResult(NumberOperandId inputId
) {
5515 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5517 AutoOutputRegister
output(*this);
5518 AutoAvailableFloatRegister
scratch(*this, FloatReg0
);
5520 allocator
.ensureDoubleRegister(masm
, inputId
, scratch
);
5522 if (Assembler::HasRoundInstruction(RoundingMode::Down
)) {
5523 masm
.nearbyIntDouble(RoundingMode::Down
, scratch
, scratch
);
5524 masm
.boxDouble(scratch
, output
.valueReg(), scratch
);
5528 return emitMathFunctionNumberResultShared(UnaryMathFunction::Floor
, scratch
,
5532 bool CacheIRCompiler::emitMathCeilNumberResult(NumberOperandId inputId
) {
5533 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5535 AutoOutputRegister
output(*this);
5536 AutoAvailableFloatRegister
scratch(*this, FloatReg0
);
5538 allocator
.ensureDoubleRegister(masm
, inputId
, scratch
);
5540 if (Assembler::HasRoundInstruction(RoundingMode::Up
)) {
5541 masm
.nearbyIntDouble(RoundingMode::Up
, scratch
, scratch
);
5542 masm
.boxDouble(scratch
, output
.valueReg(), scratch
);
5546 return emitMathFunctionNumberResultShared(UnaryMathFunction::Ceil
, scratch
,
5550 bool CacheIRCompiler::emitMathTruncNumberResult(NumberOperandId inputId
) {
5551 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5553 AutoOutputRegister
output(*this);
5554 AutoAvailableFloatRegister
scratch(*this, FloatReg0
);
5556 allocator
.ensureDoubleRegister(masm
, inputId
, scratch
);
5558 if (Assembler::HasRoundInstruction(RoundingMode::TowardsZero
)) {
5559 masm
.nearbyIntDouble(RoundingMode::TowardsZero
, scratch
, scratch
);
5560 masm
.boxDouble(scratch
, output
.valueReg(), scratch
);
5564 return emitMathFunctionNumberResultShared(UnaryMathFunction::Trunc
, scratch
,
5568 bool CacheIRCompiler::emitMathFRoundNumberResult(NumberOperandId inputId
) {
5569 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5571 AutoOutputRegister
output(*this);
5572 AutoAvailableFloatRegister
scratch(*this, FloatReg0
);
5573 FloatRegister scratchFloat32
= scratch
.get().asSingle();
5575 allocator
.ensureDoubleRegister(masm
, inputId
, scratch
);
5577 masm
.convertDoubleToFloat32(scratch
, scratchFloat32
);
5578 masm
.convertFloat32ToDouble(scratchFloat32
, scratch
);
5580 masm
.boxDouble(scratch
, output
.valueReg(), scratch
);
5584 bool CacheIRCompiler::emitMathHypot2NumberResult(NumberOperandId first
,
5585 NumberOperandId second
) {
5586 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5587 AutoOutputRegister
output(*this);
5588 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5590 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
5591 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
5593 allocator
.ensureDoubleRegister(masm
, first
, floatScratch0
);
5594 allocator
.ensureDoubleRegister(masm
, second
, floatScratch1
);
5596 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5597 masm
.PushRegsInMask(save
);
5599 using Fn
= double (*)(double x
, double y
);
5600 masm
.setupUnalignedABICall(scratch
);
5601 masm
.passABIArg(floatScratch0
, ABIType::Float64
);
5602 masm
.passABIArg(floatScratch1
, ABIType::Float64
);
5604 masm
.callWithABI
<Fn
, ecmaHypot
>(ABIType::Float64
);
5605 masm
.storeCallFloatResult(floatScratch0
);
5607 LiveRegisterSet ignore
;
5608 ignore
.add(floatScratch0
);
5609 masm
.PopRegsInMaskIgnore(save
, ignore
);
5611 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
5615 bool CacheIRCompiler::emitMathHypot3NumberResult(NumberOperandId first
,
5616 NumberOperandId second
,
5617 NumberOperandId third
) {
5618 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5619 AutoOutputRegister
output(*this);
5620 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5622 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
5623 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
5624 AutoAvailableFloatRegister
floatScratch2(*this, FloatReg2
);
5626 allocator
.ensureDoubleRegister(masm
, first
, floatScratch0
);
5627 allocator
.ensureDoubleRegister(masm
, second
, floatScratch1
);
5628 allocator
.ensureDoubleRegister(masm
, third
, floatScratch2
);
5630 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5631 masm
.PushRegsInMask(save
);
5633 using Fn
= double (*)(double x
, double y
, double z
);
5634 masm
.setupUnalignedABICall(scratch
);
5635 masm
.passABIArg(floatScratch0
, ABIType::Float64
);
5636 masm
.passABIArg(floatScratch1
, ABIType::Float64
);
5637 masm
.passABIArg(floatScratch2
, ABIType::Float64
);
5639 masm
.callWithABI
<Fn
, hypot3
>(ABIType::Float64
);
5640 masm
.storeCallFloatResult(floatScratch0
);
5642 LiveRegisterSet ignore
;
5643 ignore
.add(floatScratch0
);
5644 masm
.PopRegsInMaskIgnore(save
, ignore
);
5646 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
5650 bool CacheIRCompiler::emitMathHypot4NumberResult(NumberOperandId first
,
5651 NumberOperandId second
,
5652 NumberOperandId third
,
5653 NumberOperandId fourth
) {
5654 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5655 AutoOutputRegister
output(*this);
5656 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5658 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
5659 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
5660 AutoAvailableFloatRegister
floatScratch2(*this, FloatReg2
);
5661 AutoAvailableFloatRegister
floatScratch3(*this, FloatReg3
);
5663 allocator
.ensureDoubleRegister(masm
, first
, floatScratch0
);
5664 allocator
.ensureDoubleRegister(masm
, second
, floatScratch1
);
5665 allocator
.ensureDoubleRegister(masm
, third
, floatScratch2
);
5666 allocator
.ensureDoubleRegister(masm
, fourth
, floatScratch3
);
5668 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5669 masm
.PushRegsInMask(save
);
5671 using Fn
= double (*)(double x
, double y
, double z
, double w
);
5672 masm
.setupUnalignedABICall(scratch
);
5673 masm
.passABIArg(floatScratch0
, ABIType::Float64
);
5674 masm
.passABIArg(floatScratch1
, ABIType::Float64
);
5675 masm
.passABIArg(floatScratch2
, ABIType::Float64
);
5676 masm
.passABIArg(floatScratch3
, ABIType::Float64
);
5678 masm
.callWithABI
<Fn
, hypot4
>(ABIType::Float64
);
5679 masm
.storeCallFloatResult(floatScratch0
);
5681 LiveRegisterSet ignore
;
5682 ignore
.add(floatScratch0
);
5683 masm
.PopRegsInMaskIgnore(save
, ignore
);
5685 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
5689 bool CacheIRCompiler::emitMathAtan2NumberResult(NumberOperandId yId
,
5690 NumberOperandId xId
) {
5691 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5692 AutoOutputRegister
output(*this);
5693 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5695 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
5696 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
5698 allocator
.ensureDoubleRegister(masm
, yId
, floatScratch0
);
5699 allocator
.ensureDoubleRegister(masm
, xId
, floatScratch1
);
5701 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5702 masm
.PushRegsInMask(save
);
5704 using Fn
= double (*)(double x
, double y
);
5705 masm
.setupUnalignedABICall(scratch
);
5706 masm
.passABIArg(floatScratch0
, ABIType::Float64
);
5707 masm
.passABIArg(floatScratch1
, ABIType::Float64
);
5708 masm
.callWithABI
<Fn
, js::ecmaAtan2
>(ABIType::Float64
);
5709 masm
.storeCallFloatResult(floatScratch0
);
5711 LiveRegisterSet ignore
;
5712 ignore
.add(floatScratch0
);
5713 masm
.PopRegsInMaskIgnore(save
, ignore
);
5715 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
5720 bool CacheIRCompiler::emitMathFloorToInt32Result(NumberOperandId inputId
) {
5721 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5723 AutoOutputRegister
output(*this);
5724 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5726 AutoAvailableFloatRegister
scratchFloat(*this, FloatReg0
);
5728 FailurePath
* failure
;
5729 if (!addFailurePath(&failure
)) {
5733 allocator
.ensureDoubleRegister(masm
, inputId
, scratchFloat
);
5735 masm
.floorDoubleToInt32(scratchFloat
, scratch
, failure
->label());
5737 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5741 bool CacheIRCompiler::emitMathCeilToInt32Result(NumberOperandId inputId
) {
5742 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5744 AutoOutputRegister
output(*this);
5745 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5747 AutoAvailableFloatRegister
scratchFloat(*this, FloatReg0
);
5749 FailurePath
* failure
;
5750 if (!addFailurePath(&failure
)) {
5754 allocator
.ensureDoubleRegister(masm
, inputId
, scratchFloat
);
5756 masm
.ceilDoubleToInt32(scratchFloat
, scratch
, failure
->label());
5758 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5762 bool CacheIRCompiler::emitMathTruncToInt32Result(NumberOperandId inputId
) {
5763 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5765 AutoOutputRegister
output(*this);
5766 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5768 AutoAvailableFloatRegister
scratchFloat(*this, FloatReg0
);
5770 FailurePath
* failure
;
5771 if (!addFailurePath(&failure
)) {
5775 allocator
.ensureDoubleRegister(masm
, inputId
, scratchFloat
);
5777 masm
.truncDoubleToInt32(scratchFloat
, scratch
, failure
->label());
5779 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5783 bool CacheIRCompiler::emitMathRoundToInt32Result(NumberOperandId inputId
) {
5784 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5786 AutoOutputRegister
output(*this);
5787 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5789 AutoAvailableFloatRegister
scratchFloat0(*this, FloatReg0
);
5790 AutoAvailableFloatRegister
scratchFloat1(*this, FloatReg1
);
5792 FailurePath
* failure
;
5793 if (!addFailurePath(&failure
)) {
5797 allocator
.ensureDoubleRegister(masm
, inputId
, scratchFloat0
);
5799 masm
.roundDoubleToInt32(scratchFloat0
, scratch
, scratchFloat1
,
5802 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5806 bool CacheIRCompiler::emitInt32MinMax(bool isMax
, Int32OperandId firstId
,
5807 Int32OperandId secondId
,
5808 Int32OperandId resultId
) {
5809 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5811 Register first
= allocator
.useRegister(masm
, firstId
);
5812 Register second
= allocator
.useRegister(masm
, secondId
);
5813 Register result
= allocator
.defineRegister(masm
, resultId
);
5815 Assembler::Condition cond
=
5816 isMax
? Assembler::GreaterThan
: Assembler::LessThan
;
5817 masm
.move32(first
, result
);
5818 masm
.cmp32Move32(cond
, second
, first
, second
, result
);
5822 bool CacheIRCompiler::emitNumberMinMax(bool isMax
, NumberOperandId firstId
,
5823 NumberOperandId secondId
,
5824 NumberOperandId resultId
) {
5825 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5827 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
5829 AutoAvailableFloatRegister
scratch1(*this, FloatReg0
);
5830 AutoAvailableFloatRegister
scratch2(*this, FloatReg1
);
5832 allocator
.ensureDoubleRegister(masm
, firstId
, scratch1
);
5833 allocator
.ensureDoubleRegister(masm
, secondId
, scratch2
);
5836 masm
.maxDouble(scratch2
, scratch1
, /* handleNaN = */ true);
5838 masm
.minDouble(scratch2
, scratch1
, /* handleNaN = */ true);
5841 masm
.boxDouble(scratch1
, output
, scratch1
);
5845 bool CacheIRCompiler::emitInt32MinMaxArrayResult(ObjOperandId arrayId
,
5847 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5849 AutoOutputRegister
output(*this);
5850 Register array
= allocator
.useRegister(masm
, arrayId
);
5852 AutoScratchRegister
scratch(allocator
, masm
);
5853 AutoScratchRegister
scratch2(allocator
, masm
);
5854 AutoScratchRegisterMaybeOutputType
scratch3(allocator
, masm
, output
);
5855 AutoScratchRegisterMaybeOutput
result(allocator
, masm
, output
);
5857 FailurePath
* failure
;
5858 if (!addFailurePath(&failure
)) {
5862 masm
.minMaxArrayInt32(array
, result
, scratch
, scratch2
, scratch3
, isMax
,
5864 masm
.tagValue(JSVAL_TYPE_INT32
, result
, output
.valueReg());
5868 bool CacheIRCompiler::emitNumberMinMaxArrayResult(ObjOperandId arrayId
,
5870 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5872 AutoOutputRegister
output(*this);
5873 Register array
= allocator
.useRegister(masm
, arrayId
);
5875 AutoAvailableFloatRegister
result(*this, FloatReg0
);
5876 AutoAvailableFloatRegister
floatScratch(*this, FloatReg1
);
5878 AutoScratchRegister
scratch1(allocator
, masm
);
5879 AutoScratchRegister
scratch2(allocator
, masm
);
5881 FailurePath
* failure
;
5882 if (!addFailurePath(&failure
)) {
5886 masm
.minMaxArrayNumber(array
, result
, floatScratch
, scratch1
, scratch2
, isMax
,
5888 masm
.boxDouble(result
, output
.valueReg(), result
);
5892 bool CacheIRCompiler::emitMathFunctionNumberResultShared(
5893 UnaryMathFunction fun
, FloatRegister inputScratch
, ValueOperand output
) {
5894 UnaryMathFunctionType funPtr
= GetUnaryMathFunctionPtr(fun
);
5896 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5897 save
.takeUnchecked(inputScratch
);
5898 masm
.PushRegsInMask(save
);
5900 masm
.setupUnalignedABICall(output
.scratchReg());
5901 masm
.passABIArg(inputScratch
, ABIType::Float64
);
5902 masm
.callWithABI(DynamicFunction
<UnaryMathFunctionType
>(funPtr
),
5904 masm
.storeCallFloatResult(inputScratch
);
5906 masm
.PopRegsInMask(save
);
5908 masm
.boxDouble(inputScratch
, output
, inputScratch
);
5912 bool CacheIRCompiler::emitMathFunctionNumberResult(NumberOperandId inputId
,
5913 UnaryMathFunction fun
) {
5914 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5916 AutoOutputRegister
output(*this);
5917 AutoAvailableFloatRegister
scratch(*this, FloatReg0
);
5919 allocator
.ensureDoubleRegister(masm
, inputId
, scratch
);
5921 return emitMathFunctionNumberResultShared(fun
, scratch
, output
.valueReg());
5924 static void EmitStoreDenseElement(MacroAssembler
& masm
,
5925 const ConstantOrRegister
& value
,
5926 BaseObjectElementIndex target
) {
5927 if (value
.constant()) {
5928 Value v
= value
.value();
5929 masm
.storeValue(v
, target
);
5933 TypedOrValueRegister reg
= value
.reg();
5934 masm
.storeTypedOrValue(reg
, target
);
5937 bool CacheIRCompiler::emitStoreDenseElement(ObjOperandId objId
,
5938 Int32OperandId indexId
,
5939 ValOperandId rhsId
) {
5940 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5942 Register obj
= allocator
.useRegister(masm
, objId
);
5943 Register index
= allocator
.useRegister(masm
, indexId
);
5944 ConstantOrRegister val
= allocator
.useConstantOrRegister(masm
, rhsId
);
5946 AutoScratchRegister
scratch(allocator
, masm
);
5948 FailurePath
* failure
;
5949 if (!addFailurePath(&failure
)) {
5953 // Load obj->elements in scratch.
5954 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
5956 // Bounds check. Unfortunately we don't have more registers available on
5957 // x86, so use InvalidReg and emit slightly slower code on x86.
5958 Register spectreTemp
= InvalidReg
;
5959 Address
initLength(scratch
, ObjectElements::offsetOfInitializedLength());
5960 masm
.spectreBoundsCheck32(index
, initLength
, spectreTemp
, failure
->label());
5963 BaseObjectElementIndex
element(scratch
, index
);
5964 masm
.branchTestMagic(Assembler::Equal
, element
, failure
->label());
5966 // Perform the store.
5967 EmitPreBarrier(masm
, element
, MIRType::Value
);
5968 EmitStoreDenseElement(masm
, val
, element
);
5970 emitPostBarrierElement(obj
, val
, scratch
, index
);
5974 static void EmitAssertExtensibleElements(MacroAssembler
& masm
,
5975 Register elementsReg
) {
5977 // Preceding shape guards ensure the object elements are extensible.
5978 Address
elementsFlags(elementsReg
, ObjectElements::offsetOfFlags());
5980 masm
.branchTest32(Assembler::Zero
, elementsFlags
,
5981 Imm32(ObjectElements::Flags::NOT_EXTENSIBLE
), &ok
);
5982 masm
.assumeUnreachable("Unexpected non-extensible elements");
5987 static void EmitAssertWritableArrayLengthElements(MacroAssembler
& masm
,
5988 Register elementsReg
) {
5990 // Preceding shape guards ensure the array length is writable.
5991 Address
elementsFlags(elementsReg
, ObjectElements::offsetOfFlags());
5993 masm
.branchTest32(Assembler::Zero
, elementsFlags
,
5994 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH
),
5996 masm
.assumeUnreachable("Unexpected non-writable array length elements");
6001 bool CacheIRCompiler::emitStoreDenseElementHole(ObjOperandId objId
,
6002 Int32OperandId indexId
,
6005 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6007 Register obj
= allocator
.useRegister(masm
, objId
);
6008 Register index
= allocator
.useRegister(masm
, indexId
);
6009 ConstantOrRegister val
= allocator
.useConstantOrRegister(masm
, rhsId
);
6011 AutoScratchRegister
scratch(allocator
, masm
);
6013 FailurePath
* failure
;
6014 if (!addFailurePath(&failure
)) {
6018 // Load obj->elements in scratch.
6019 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
6021 EmitAssertExtensibleElements(masm
, scratch
);
6023 EmitAssertWritableArrayLengthElements(masm
, scratch
);
6026 BaseObjectElementIndex
element(scratch
, index
);
6027 Address
initLength(scratch
, ObjectElements::offsetOfInitializedLength());
6028 Address
elementsFlags(scratch
, ObjectElements::offsetOfFlags());
6030 // We don't have enough registers on x86 so use InvalidReg. This will emit
6031 // slightly less efficient code on x86.
6032 Register spectreTemp
= InvalidReg
;
6034 Label storeSkipPreBarrier
;
6037 Label inBounds
, outOfBounds
;
6038 masm
.spectreBoundsCheck32(index
, initLength
, spectreTemp
, &outOfBounds
);
6039 masm
.jump(&inBounds
);
6041 // If we're out-of-bounds, only handle the index == initLength case.
6042 masm
.bind(&outOfBounds
);
6043 masm
.branch32(Assembler::NotEqual
, initLength
, index
, failure
->label());
6045 // If index < capacity, we can add a dense element inline. If not we
6046 // need to allocate more elements.
6047 Label allocElement
, addNewElement
;
6048 Address
capacity(scratch
, ObjectElements::offsetOfCapacity());
6049 masm
.spectreBoundsCheck32(index
, capacity
, spectreTemp
, &allocElement
);
6050 masm
.jump(&addNewElement
);
6052 masm
.bind(&allocElement
);
6054 LiveRegisterSet
save(GeneralRegisterSet::Volatile(),
6055 liveVolatileFloatRegs());
6056 save
.takeUnchecked(scratch
);
6057 masm
.PushRegsInMask(save
);
6059 using Fn
= bool (*)(JSContext
* cx
, NativeObject
* obj
);
6060 masm
.setupUnalignedABICall(scratch
);
6061 masm
.loadJSContext(scratch
);
6062 masm
.passABIArg(scratch
);
6063 masm
.passABIArg(obj
);
6064 masm
.callWithABI
<Fn
, NativeObject::addDenseElementPure
>();
6065 masm
.storeCallPointerResult(scratch
);
6067 masm
.PopRegsInMask(save
);
6068 masm
.branchIfFalseBool(scratch
, failure
->label());
6070 // Load the reallocated elements pointer.
6071 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
6073 masm
.bind(&addNewElement
);
6075 // Increment initLength.
6076 masm
.add32(Imm32(1), initLength
);
6078 // If length is now <= index, increment length too.
6079 Label skipIncrementLength
;
6080 Address
length(scratch
, ObjectElements::offsetOfLength());
6081 masm
.branch32(Assembler::Above
, length
, index
, &skipIncrementLength
);
6082 masm
.add32(Imm32(1), length
);
6083 masm
.bind(&skipIncrementLength
);
6085 // Skip EmitPreBarrier as the memory is uninitialized.
6086 masm
.jump(&storeSkipPreBarrier
);
6088 masm
.bind(&inBounds
);
6090 // Fail if index >= initLength.
6091 masm
.spectreBoundsCheck32(index
, initLength
, spectreTemp
, failure
->label());
6094 EmitPreBarrier(masm
, element
, MIRType::Value
);
6096 masm
.bind(&storeSkipPreBarrier
);
6097 EmitStoreDenseElement(masm
, val
, element
);
6099 emitPostBarrierElement(obj
, val
, scratch
, index
);
6103 bool CacheIRCompiler::emitArrayPush(ObjOperandId objId
, ValOperandId rhsId
) {
6104 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6106 AutoOutputRegister
output(*this);
6107 Register obj
= allocator
.useRegister(masm
, objId
);
6108 ValueOperand val
= allocator
.useValueRegister(masm
, rhsId
);
6110 AutoScratchRegisterMaybeOutput
scratchLength(allocator
, masm
, output
);
6111 AutoScratchRegisterMaybeOutputType
scratch(allocator
, masm
, output
);
6113 FailurePath
* failure
;
6114 if (!addFailurePath(&failure
)) {
6118 // Load obj->elements in scratch.
6119 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
6121 EmitAssertExtensibleElements(masm
, scratch
);
6122 EmitAssertWritableArrayLengthElements(masm
, scratch
);
6124 Address
elementsInitLength(scratch
,
6125 ObjectElements::offsetOfInitializedLength());
6126 Address
elementsLength(scratch
, ObjectElements::offsetOfLength());
6127 Address
capacity(scratch
, ObjectElements::offsetOfCapacity());
6129 // Fail if length != initLength.
6130 masm
.load32(elementsInitLength
, scratchLength
);
6131 masm
.branch32(Assembler::NotEqual
, elementsLength
, scratchLength
,
6134 // If scratchLength < capacity, we can add a dense element inline. If not we
6135 // need to allocate more elements.
6136 Label allocElement
, addNewElement
;
6137 masm
.spectreBoundsCheck32(scratchLength
, capacity
, InvalidReg
, &allocElement
);
6138 masm
.jump(&addNewElement
);
6140 masm
.bind(&allocElement
);
6142 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
6143 save
.takeUnchecked(scratch
);
6144 masm
.PushRegsInMask(save
);
6146 using Fn
= bool (*)(JSContext
* cx
, NativeObject
* obj
);
6147 masm
.setupUnalignedABICall(scratch
);
6148 masm
.loadJSContext(scratch
);
6149 masm
.passABIArg(scratch
);
6150 masm
.passABIArg(obj
);
6151 masm
.callWithABI
<Fn
, NativeObject::addDenseElementPure
>();
6152 masm
.storeCallPointerResult(scratch
);
6154 masm
.PopRegsInMask(save
);
6155 masm
.branchIfFalseBool(scratch
, failure
->label());
6157 // Load the reallocated elements pointer.
6158 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
6160 masm
.bind(&addNewElement
);
6162 // Increment initLength and length.
6163 masm
.add32(Imm32(1), elementsInitLength
);
6164 masm
.add32(Imm32(1), elementsLength
);
6167 BaseObjectElementIndex
element(scratch
, scratchLength
);
6168 masm
.storeValue(val
, element
);
6169 emitPostBarrierElement(obj
, val
, scratch
, scratchLength
);
6171 // Return value is new length.
6172 masm
.add32(Imm32(1), scratchLength
);
6173 masm
.tagValue(JSVAL_TYPE_INT32
, scratchLength
, output
.valueReg());
6178 bool CacheIRCompiler::emitStoreTypedArrayElement(ObjOperandId objId
,
6179 Scalar::Type elementType
,
6180 IntPtrOperandId indexId
,
6183 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6184 Register obj
= allocator
.useRegister(masm
, objId
);
6185 Register index
= allocator
.useRegister(masm
, indexId
);
6187 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
6189 Maybe
<Register
> valInt32
;
6190 Maybe
<Register
> valBigInt
;
6191 switch (elementType
) {
6195 case Scalar::Uint16
:
6197 case Scalar::Uint32
:
6198 case Scalar::Uint8Clamped
:
6199 valInt32
.emplace(allocator
.useRegister(masm
, Int32OperandId(rhsId
)));
6202 case Scalar::Float32
:
6203 case Scalar::Float64
:
6204 allocator
.ensureDoubleRegister(masm
, NumberOperandId(rhsId
),
6208 case Scalar::BigInt64
:
6209 case Scalar::BigUint64
:
6210 valBigInt
.emplace(allocator
.useRegister(masm
, BigIntOperandId(rhsId
)));
6213 case Scalar::MaxTypedArrayViewType
:
6215 case Scalar::Simd128
:
6216 MOZ_CRASH("Unsupported TypedArray type");
6219 AutoScratchRegister
scratch1(allocator
, masm
);
6220 Maybe
<AutoScratchRegister
> scratch2
;
6221 Maybe
<AutoSpectreBoundsScratchRegister
> spectreScratch
;
6222 if (Scalar::isBigIntType(elementType
)) {
6223 scratch2
.emplace(allocator
, masm
);
6225 spectreScratch
.emplace(allocator
, masm
);
6228 FailurePath
* failure
= nullptr;
6230 if (!addFailurePath(&failure
)) {
6237 Register spectreTemp
= scratch2
? scratch2
->get() : spectreScratch
->get();
6238 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch1
);
6239 masm
.spectreBoundsCheckPtr(index
, scratch1
, spectreTemp
,
6240 handleOOB
? &done
: failure
->label());
6242 // Load the elements vector.
6243 masm
.loadPtr(Address(obj
, ArrayBufferViewObject::dataOffset()), scratch1
);
6245 BaseIndex
dest(scratch1
, index
, ScaleFromScalarType(elementType
));
6247 if (Scalar::isBigIntType(elementType
)) {
6249 Register64
temp(scratch2
->get());
6251 // We don't have more registers available on x86, so spill |obj|.
6253 Register64
temp(scratch2
->get(), obj
);
6256 masm
.loadBigInt64(*valBigInt
, temp
);
6257 masm
.storeToTypedBigIntArray(elementType
, temp
, dest
);
6262 } else if (elementType
== Scalar::Float32
) {
6263 ScratchFloat32Scope
fpscratch(masm
);
6264 masm
.convertDoubleToFloat32(floatScratch0
, fpscratch
);
6265 masm
.storeToTypedFloatArray(elementType
, fpscratch
, dest
);
6266 } else if (elementType
== Scalar::Float64
) {
6267 masm
.storeToTypedFloatArray(elementType
, floatScratch0
, dest
);
6269 masm
.storeToTypedIntArray(elementType
, *valInt32
, dest
);
6276 static gc::Heap
InitialBigIntHeap(JSContext
* cx
) {
6277 JS::Zone
* zone
= cx
->zone();
6278 return zone
->allocNurseryBigInts() ? gc::Heap::Default
: gc::Heap::Tenured
;
6281 static void EmitAllocateBigInt(MacroAssembler
& masm
, Register result
,
6282 Register temp
, const LiveRegisterSet
& liveSet
,
6283 gc::Heap initialHeap
, Label
* fail
) {
6284 Label fallback
, done
;
6285 masm
.newGCBigInt(result
, temp
, initialHeap
, &fallback
);
6288 masm
.bind(&fallback
);
6290 // Request a minor collection at a later time if nursery allocation failed.
6291 bool requestMinorGC
= initialHeap
== gc::Heap::Default
;
6293 masm
.PushRegsInMask(liveSet
);
6294 using Fn
= void* (*)(JSContext
* cx
, bool requestMinorGC
);
6295 masm
.setupUnalignedABICall(temp
);
6296 masm
.loadJSContext(temp
);
6297 masm
.passABIArg(temp
);
6298 masm
.move32(Imm32(requestMinorGC
), result
);
6299 masm
.passABIArg(result
);
6300 masm
.callWithABI
<Fn
, jit::AllocateBigIntNoGC
>();
6301 masm
.storeCallPointerResult(result
);
6303 masm
.PopRegsInMask(liveSet
);
6304 masm
.branchPtr(Assembler::Equal
, result
, ImmWord(0), fail
);
6309 bool CacheIRCompiler::emitLoadTypedArrayElementResult(
6310 ObjOperandId objId
, IntPtrOperandId indexId
, Scalar::Type elementType
,
6311 bool handleOOB
, bool forceDoubleForUint32
) {
6312 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6313 AutoOutputRegister
output(*this);
6314 Register obj
= allocator
.useRegister(masm
, objId
);
6315 Register index
= allocator
.useRegister(masm
, indexId
);
6317 AutoScratchRegister
scratch1(allocator
, masm
);
6319 AutoScratchRegister
scratch2(allocator
, masm
);
6321 // There are too few registers available on x86, so we may need to reuse the
6322 // output's scratch register.
6323 AutoScratchRegisterMaybeOutput
scratch2(allocator
, masm
, output
);
6326 FailurePath
* failure
;
6327 if (!addFailurePath(&failure
)) {
6333 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch1
);
6334 masm
.spectreBoundsCheckPtr(index
, scratch1
, scratch2
,
6335 handleOOB
? &outOfBounds
: failure
->label());
6337 // Allocate BigInt if needed. The code after this should be infallible.
6338 Maybe
<Register
> bigInt
;
6339 if (Scalar::isBigIntType(elementType
)) {
6340 bigInt
.emplace(output
.valueReg().scratchReg());
6342 LiveRegisterSet
save(GeneralRegisterSet::Volatile(),
6343 liveVolatileFloatRegs());
6344 save
.takeUnchecked(scratch1
);
6345 save
.takeUnchecked(scratch2
);
6346 save
.takeUnchecked(output
);
6348 gc::Heap initialHeap
= InitialBigIntHeap(cx_
);
6349 EmitAllocateBigInt(masm
, *bigInt
, scratch1
, save
, initialHeap
,
6353 // Load the elements vector.
6354 masm
.loadPtr(Address(obj
, ArrayBufferViewObject::dataOffset()), scratch1
);
6357 BaseIndex
source(scratch1
, index
, ScaleFromScalarType(elementType
));
6359 if (Scalar::isBigIntType(elementType
)) {
6361 Register64
temp(scratch2
);
6363 // We don't have more registers available on x86, so spill |obj| and
6364 // additionally use the output's type register.
6365 MOZ_ASSERT(output
.valueReg().scratchReg() != output
.valueReg().typeReg());
6367 Register64
temp(output
.valueReg().typeReg(), obj
);
6370 masm
.loadFromTypedBigIntArray(elementType
, source
, *bigInt
, temp
);
6376 masm
.tagValue(JSVAL_TYPE_BIGINT
, *bigInt
, output
.valueReg());
6378 MacroAssembler::Uint32Mode uint32Mode
=
6379 forceDoubleForUint32
? MacroAssembler::Uint32Mode::ForceDouble
6380 : MacroAssembler::Uint32Mode::FailOnDouble
;
6381 masm
.loadFromTypedArray(elementType
, source
, output
.valueReg(), uint32Mode
,
6382 scratch1
, failure
->label());
6389 masm
.bind(&outOfBounds
);
6390 masm
.moveValue(UndefinedValue(), output
.valueReg());
6398 static void EmitDataViewBoundsCheck(MacroAssembler
& masm
, size_t byteSize
,
6399 Register obj
, Register offset
,
6400 Register scratch
, Label
* fail
) {
6401 // Ensure both offset < length and offset + (byteSize - 1) < length.
6402 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
6403 if (byteSize
== 1) {
6404 masm
.spectreBoundsCheckPtr(offset
, scratch
, InvalidReg
, fail
);
6406 // temp := length - (byteSize - 1)
6407 // if temp < 0: fail
6408 // if offset >= temp: fail
6409 masm
.branchSubPtr(Assembler::Signed
, Imm32(byteSize
- 1), scratch
, fail
);
6410 masm
.spectreBoundsCheckPtr(offset
, scratch
, InvalidReg
, fail
);
6414 bool CacheIRCompiler::emitLoadDataViewValueResult(
6415 ObjOperandId objId
, IntPtrOperandId offsetId
,
6416 BooleanOperandId littleEndianId
, Scalar::Type elementType
,
6417 bool forceDoubleForUint32
) {
6418 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6420 AutoOutputRegister
output(*this);
6421 Register obj
= allocator
.useRegister(masm
, objId
);
6422 Register offset
= allocator
.useRegister(masm
, offsetId
);
6423 Register littleEndian
= allocator
.useRegister(masm
, littleEndianId
);
6425 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
6427 Register64 outputReg64
= output
.valueReg().toRegister64();
6428 Register outputScratch
= outputReg64
.scratchReg();
6430 FailurePath
* failure
;
6431 if (!addFailurePath(&failure
)) {
6435 const size_t byteSize
= Scalar::byteSize(elementType
);
6437 EmitDataViewBoundsCheck(masm
, byteSize
, obj
, offset
, outputScratch
,
6440 masm
.loadPtr(Address(obj
, DataViewObject::dataOffset()), outputScratch
);
6443 BaseIndex
source(outputScratch
, offset
, TimesOne
);
6444 switch (elementType
) {
6446 masm
.load8SignExtend(source
, outputScratch
);
6449 masm
.load8ZeroExtend(source
, outputScratch
);
6452 masm
.load16UnalignedSignExtend(source
, outputScratch
);
6454 case Scalar::Uint16
:
6455 masm
.load16UnalignedZeroExtend(source
, outputScratch
);
6458 case Scalar::Uint32
:
6459 case Scalar::Float32
:
6460 masm
.load32Unaligned(source
, outputScratch
);
6462 case Scalar::Float64
:
6463 case Scalar::BigInt64
:
6464 case Scalar::BigUint64
:
6465 masm
.load64Unaligned(source
, outputReg64
);
6467 case Scalar::Uint8Clamped
:
6469 MOZ_CRASH("Invalid typed array type");
6472 // Swap the bytes in the loaded value.
6475 masm
.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual
: Assembler::Equal
,
6476 littleEndian
, Imm32(0), &skip
);
6478 switch (elementType
) {
6480 masm
.byteSwap16SignExtend(outputScratch
);
6482 case Scalar::Uint16
:
6483 masm
.byteSwap16ZeroExtend(outputScratch
);
6486 case Scalar::Uint32
:
6487 case Scalar::Float32
:
6488 masm
.byteSwap32(outputScratch
);
6490 case Scalar::Float64
:
6491 case Scalar::BigInt64
:
6492 case Scalar::BigUint64
:
6493 masm
.byteSwap64(outputReg64
);
6497 case Scalar::Uint8Clamped
:
6499 MOZ_CRASH("Invalid type");
6505 // Move the value into the output register.
6506 switch (elementType
) {
6510 case Scalar::Uint16
:
6512 masm
.tagValue(JSVAL_TYPE_INT32
, outputScratch
, output
.valueReg());
6514 case Scalar::Uint32
: {
6515 MacroAssembler::Uint32Mode uint32Mode
=
6516 forceDoubleForUint32
? MacroAssembler::Uint32Mode::ForceDouble
6517 : MacroAssembler::Uint32Mode::FailOnDouble
;
6518 masm
.boxUint32(outputScratch
, output
.valueReg(), uint32Mode
,
6522 case Scalar::Float32
: {
6523 FloatRegister scratchFloat32
= floatScratch0
.get().asSingle();
6524 masm
.moveGPRToFloat32(outputScratch
, scratchFloat32
);
6525 masm
.canonicalizeFloat(scratchFloat32
);
6526 masm
.convertFloat32ToDouble(scratchFloat32
, floatScratch0
);
6527 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
6530 case Scalar::Float64
:
6531 masm
.moveGPR64ToDouble(outputReg64
, floatScratch0
);
6532 masm
.canonicalizeDouble(floatScratch0
);
6533 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
6535 case Scalar::BigInt64
:
6536 case Scalar::BigUint64
: {
6537 // We need two extra registers. Reuse the obj/littleEndian registers.
6538 Register bigInt
= obj
;
6539 Register bigIntScratch
= littleEndian
;
6541 masm
.push(bigIntScratch
);
6543 LiveRegisterSet
save(GeneralRegisterSet::Volatile(),
6544 liveVolatileFloatRegs());
6545 save
.takeUnchecked(bigInt
);
6546 save
.takeUnchecked(bigIntScratch
);
6547 gc::Heap initialHeap
= InitialBigIntHeap(cx_
);
6548 EmitAllocateBigInt(masm
, bigInt
, bigIntScratch
, save
, initialHeap
, &fail
);
6552 masm
.pop(bigIntScratch
);
6554 masm
.jump(failure
->label());
6557 masm
.initializeBigInt64(elementType
, bigInt
, outputReg64
);
6558 masm
.tagValue(JSVAL_TYPE_BIGINT
, bigInt
, output
.valueReg());
6559 masm
.pop(bigIntScratch
);
6563 case Scalar::Uint8Clamped
:
6565 MOZ_CRASH("Invalid typed array type");
6571 bool CacheIRCompiler::emitStoreDataViewValueResult(
6572 ObjOperandId objId
, IntPtrOperandId offsetId
, uint32_t valueId
,
6573 BooleanOperandId littleEndianId
, Scalar::Type elementType
) {
6574 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6576 AutoOutputRegister
output(*this);
6577 #ifdef JS_CODEGEN_X86
6578 // Use a scratch register to avoid running out of the registers.
6579 Register obj
= output
.valueReg().typeReg();
6580 allocator
.copyToScratchRegister(masm
, objId
, obj
);
6582 Register obj
= allocator
.useRegister(masm
, objId
);
6584 Register offset
= allocator
.useRegister(masm
, offsetId
);
6585 Register littleEndian
= allocator
.useRegister(masm
, littleEndianId
);
6587 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
6588 Maybe
<Register
> valInt32
;
6589 Maybe
<Register
> valBigInt
;
6590 switch (elementType
) {
6594 case Scalar::Uint16
:
6596 case Scalar::Uint32
:
6597 case Scalar::Uint8Clamped
:
6598 valInt32
.emplace(allocator
.useRegister(masm
, Int32OperandId(valueId
)));
6601 case Scalar::Float32
:
6602 case Scalar::Float64
:
6603 allocator
.ensureDoubleRegister(masm
, NumberOperandId(valueId
),
6607 case Scalar::BigInt64
:
6608 case Scalar::BigUint64
:
6609 valBigInt
.emplace(allocator
.useRegister(masm
, BigIntOperandId(valueId
)));
6612 case Scalar::MaxTypedArrayViewType
:
6614 case Scalar::Simd128
:
6615 MOZ_CRASH("Unsupported type");
6618 Register scratch1
= output
.valueReg().scratchReg();
6619 MOZ_ASSERT(scratch1
!= obj
, "scratchReg must not be typeReg");
6621 // On platforms with enough registers, |scratch2| is an extra scratch register
6622 // (pair) used for byte-swapping the value.
6623 #ifndef JS_CODEGEN_X86
6624 mozilla::MaybeOneOf
<AutoScratchRegister
, AutoScratchRegister64
> scratch2
;
6625 switch (elementType
) {
6630 case Scalar::Uint16
:
6632 case Scalar::Uint32
:
6633 case Scalar::Float32
:
6634 scratch2
.construct
<AutoScratchRegister
>(allocator
, masm
);
6636 case Scalar::Float64
:
6637 case Scalar::BigInt64
:
6638 case Scalar::BigUint64
:
6639 scratch2
.construct
<AutoScratchRegister64
>(allocator
, masm
);
6641 case Scalar::Uint8Clamped
:
6643 MOZ_CRASH("Invalid type");
6647 FailurePath
* failure
;
6648 if (!addFailurePath(&failure
)) {
6652 const size_t byteSize
= Scalar::byteSize(elementType
);
6654 EmitDataViewBoundsCheck(masm
, byteSize
, obj
, offset
, scratch1
,
6657 masm
.loadPtr(Address(obj
, DataViewObject::dataOffset()), scratch1
);
6658 BaseIndex
dest(scratch1
, offset
, TimesOne
);
6660 if (byteSize
== 1) {
6661 // Byte swapping has no effect, so just do the byte store.
6662 masm
.store8(*valInt32
, dest
);
6663 masm
.moveValue(UndefinedValue(), output
.valueReg());
6667 // On 32-bit x86, |obj| is already a scratch register so use that. If we need
6668 // a Register64 we also use the littleEndian register and use the stack
6669 // location for the check below.
6670 bool pushedLittleEndian
= false;
6671 #ifdef JS_CODEGEN_X86
6672 if (byteSize
== 8) {
6673 masm
.push(littleEndian
);
6674 pushedLittleEndian
= true;
6676 auto valScratch32
= [&]() -> Register
{ return obj
; };
6677 auto valScratch64
= [&]() -> Register64
{
6678 return Register64(obj
, littleEndian
);
6681 auto valScratch32
= [&]() -> Register
{
6682 return scratch2
.ref
<AutoScratchRegister
>();
6684 auto valScratch64
= [&]() -> Register64
{
6685 return scratch2
.ref
<AutoScratchRegister64
>();
6689 // Load the value into a gpr register.
6690 switch (elementType
) {
6692 case Scalar::Uint16
:
6694 case Scalar::Uint32
:
6695 masm
.move32(*valInt32
, valScratch32());
6697 case Scalar::Float32
: {
6698 FloatRegister scratchFloat32
= floatScratch0
.get().asSingle();
6699 masm
.convertDoubleToFloat32(floatScratch0
, scratchFloat32
);
6700 masm
.canonicalizeFloatIfDeterministic(scratchFloat32
);
6701 masm
.moveFloat32ToGPR(scratchFloat32
, valScratch32());
6704 case Scalar::Float64
: {
6705 masm
.canonicalizeDoubleIfDeterministic(floatScratch0
);
6706 masm
.moveDoubleToGPR64(floatScratch0
, valScratch64());
6709 case Scalar::BigInt64
:
6710 case Scalar::BigUint64
:
6711 masm
.loadBigInt64(*valBigInt
, valScratch64());
6715 case Scalar::Uint8Clamped
:
6717 MOZ_CRASH("Invalid type");
6720 // Swap the bytes in the loaded value.
6722 if (pushedLittleEndian
) {
6723 masm
.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual
: Assembler::Equal
,
6724 Address(masm
.getStackPointer(), 0), Imm32(0), &skip
);
6726 masm
.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual
: Assembler::Equal
,
6727 littleEndian
, Imm32(0), &skip
);
6729 switch (elementType
) {
6731 masm
.byteSwap16SignExtend(valScratch32());
6733 case Scalar::Uint16
:
6734 masm
.byteSwap16ZeroExtend(valScratch32());
6737 case Scalar::Uint32
:
6738 case Scalar::Float32
:
6739 masm
.byteSwap32(valScratch32());
6741 case Scalar::Float64
:
6742 case Scalar::BigInt64
:
6743 case Scalar::BigUint64
:
6744 masm
.byteSwap64(valScratch64());
6748 case Scalar::Uint8Clamped
:
6750 MOZ_CRASH("Invalid type");
6755 switch (elementType
) {
6757 case Scalar::Uint16
:
6758 masm
.store16Unaligned(valScratch32(), dest
);
6761 case Scalar::Uint32
:
6762 case Scalar::Float32
:
6763 masm
.store32Unaligned(valScratch32(), dest
);
6765 case Scalar::Float64
:
6766 case Scalar::BigInt64
:
6767 case Scalar::BigUint64
:
6768 masm
.store64Unaligned(valScratch64(), dest
);
6772 case Scalar::Uint8Clamped
:
6774 MOZ_CRASH("Invalid typed array type");
6777 #ifdef JS_CODEGEN_X86
6778 // Restore registers.
6779 if (pushedLittleEndian
) {
6780 masm
.pop(littleEndian
);
6784 masm
.moveValue(UndefinedValue(), output
.valueReg());
6788 bool CacheIRCompiler::emitStoreFixedSlotUndefinedResult(ObjOperandId objId
,
6789 uint32_t offsetOffset
,
6790 ValOperandId rhsId
) {
6791 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6793 AutoOutputRegister
output(*this);
6794 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
6795 Register obj
= allocator
.useRegister(masm
, objId
);
6796 ValueOperand val
= allocator
.useValueRegister(masm
, rhsId
);
6798 StubFieldOffset
offset(offsetOffset
, StubField::Type::RawInt32
);
6799 emitLoadStubField(offset
, scratch
);
6801 BaseIndex
slot(obj
, scratch
, TimesOne
);
6802 EmitPreBarrier(masm
, slot
, MIRType::Value
);
6803 masm
.storeValue(val
, slot
);
6804 emitPostBarrierSlot(obj
, val
, scratch
);
6806 masm
.moveValue(UndefinedValue(), output
.valueReg());
6810 bool CacheIRCompiler::emitLoadObjectResult(ObjOperandId objId
) {
6811 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6812 AutoOutputRegister
output(*this);
6813 Register obj
= allocator
.useRegister(masm
, objId
);
6815 EmitStoreResult(masm
, obj
, JSVAL_TYPE_OBJECT
, output
);
6820 bool CacheIRCompiler::emitLoadStringResult(StringOperandId strId
) {
6821 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6822 AutoOutputRegister
output(*this);
6823 Register str
= allocator
.useRegister(masm
, strId
);
6825 masm
.tagValue(JSVAL_TYPE_STRING
, str
, output
.valueReg());
6830 bool CacheIRCompiler::emitLoadSymbolResult(SymbolOperandId symId
) {
6831 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6832 AutoOutputRegister
output(*this);
6833 Register sym
= allocator
.useRegister(masm
, symId
);
6835 masm
.tagValue(JSVAL_TYPE_SYMBOL
, sym
, output
.valueReg());
6840 bool CacheIRCompiler::emitLoadInt32Result(Int32OperandId valId
) {
6841 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6842 AutoOutputRegister
output(*this);
6843 Register val
= allocator
.useRegister(masm
, valId
);
6845 masm
.tagValue(JSVAL_TYPE_INT32
, val
, output
.valueReg());
6850 bool CacheIRCompiler::emitLoadBigIntResult(BigIntOperandId valId
) {
6851 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6852 AutoOutputRegister
output(*this);
6853 Register val
= allocator
.useRegister(masm
, valId
);
6855 masm
.tagValue(JSVAL_TYPE_BIGINT
, val
, output
.valueReg());
6860 bool CacheIRCompiler::emitLoadDoubleResult(NumberOperandId valId
) {
6861 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6862 AutoOutputRegister
output(*this);
6863 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
6867 masm
.branchTestDouble(Assembler::Equal
, val
, &ok
);
6868 masm
.branchTestInt32(Assembler::Equal
, val
, &ok
);
6869 masm
.assumeUnreachable("input must be double or int32");
6873 masm
.moveValue(val
, output
.valueReg());
6874 masm
.convertInt32ValueToDouble(output
.valueReg());
6879 bool CacheIRCompiler::emitLoadTypeOfObjectResult(ObjOperandId objId
) {
6880 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6881 AutoOutputRegister
output(*this);
6882 Register obj
= allocator
.useRegister(masm
, objId
);
6883 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
6885 Label slowCheck
, isObject
, isCallable
, isUndefined
, done
;
6886 masm
.typeOfObject(obj
, scratch
, &slowCheck
, &isObject
, &isCallable
,
6889 masm
.bind(&isCallable
);
6890 masm
.moveValue(StringValue(cx_
->names().function
), output
.valueReg());
6893 masm
.bind(&isUndefined
);
6894 masm
.moveValue(StringValue(cx_
->names().undefined
), output
.valueReg());
6897 masm
.bind(&isObject
);
6898 masm
.moveValue(StringValue(cx_
->names().object
), output
.valueReg());
6902 masm
.bind(&slowCheck
);
6903 LiveRegisterSet
save(GeneralRegisterSet::Volatile(),
6904 liveVolatileFloatRegs());
6905 masm
.PushRegsInMask(save
);
6907 using Fn
= JSString
* (*)(JSObject
* obj
, JSRuntime
* rt
);
6908 masm
.setupUnalignedABICall(scratch
);
6909 masm
.passABIArg(obj
);
6910 masm
.movePtr(ImmPtr(cx_
->runtime()), scratch
);
6911 masm
.passABIArg(scratch
);
6912 masm
.callWithABI
<Fn
, TypeOfNameObject
>();
6913 masm
.storeCallPointerResult(scratch
);
6915 LiveRegisterSet ignore
;
6916 ignore
.add(scratch
);
6917 masm
.PopRegsInMaskIgnore(save
, ignore
);
6919 masm
.tagValue(JSVAL_TYPE_STRING
, scratch
, output
.valueReg());
6926 bool CacheIRCompiler::emitLoadInt32TruthyResult(ValOperandId inputId
) {
6927 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6928 AutoOutputRegister
output(*this);
6929 ValueOperand val
= allocator
.useValueRegister(masm
, inputId
);
6931 Label ifFalse
, done
;
6932 masm
.branchTestInt32Truthy(false, val
, &ifFalse
);
6933 masm
.moveValue(BooleanValue(true), output
.valueReg());
6936 masm
.bind(&ifFalse
);
6937 masm
.moveValue(BooleanValue(false), output
.valueReg());
6943 bool CacheIRCompiler::emitLoadStringTruthyResult(StringOperandId strId
) {
6944 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6945 AutoOutputRegister
output(*this);
6946 Register str
= allocator
.useRegister(masm
, strId
);
6948 Label ifFalse
, done
;
6949 masm
.branch32(Assembler::Equal
, Address(str
, JSString::offsetOfLength()),
6950 Imm32(0), &ifFalse
);
6951 masm
.moveValue(BooleanValue(true), output
.valueReg());
6954 masm
.bind(&ifFalse
);
6955 masm
.moveValue(BooleanValue(false), output
.valueReg());
6961 bool CacheIRCompiler::emitLoadDoubleTruthyResult(NumberOperandId inputId
) {
6962 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6963 AutoOutputRegister
output(*this);
6965 AutoScratchFloatRegister
floatReg(this);
6967 allocator
.ensureDoubleRegister(masm
, inputId
, floatReg
);
6969 Label ifFalse
, done
;
6971 masm
.branchTestDoubleTruthy(false, floatReg
, &ifFalse
);
6972 masm
.moveValue(BooleanValue(true), output
.valueReg());
6975 masm
.bind(&ifFalse
);
6976 masm
.moveValue(BooleanValue(false), output
.valueReg());
6982 bool CacheIRCompiler::emitLoadObjectTruthyResult(ObjOperandId objId
) {
6983 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6984 AutoOutputRegister
output(*this);
6985 Register obj
= allocator
.useRegister(masm
, objId
);
6986 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
6988 Label emulatesUndefined
, slowPath
, done
;
6989 masm
.branchIfObjectEmulatesUndefined(obj
, scratch
, &slowPath
,
6990 &emulatesUndefined
);
6991 masm
.moveValue(BooleanValue(true), output
.valueReg());
6994 masm
.bind(&emulatesUndefined
);
6995 masm
.moveValue(BooleanValue(false), output
.valueReg());
6998 masm
.bind(&slowPath
);
7000 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
7001 liveVolatileFloatRegs());
7002 volatileRegs
.takeUnchecked(scratch
);
7003 volatileRegs
.takeUnchecked(output
);
7004 masm
.PushRegsInMask(volatileRegs
);
7006 using Fn
= bool (*)(JSObject
* obj
);
7007 masm
.setupUnalignedABICall(scratch
);
7008 masm
.passABIArg(obj
);
7009 masm
.callWithABI
<Fn
, js::EmulatesUndefined
>();
7010 masm
.storeCallBoolResult(scratch
);
7011 masm
.xor32(Imm32(1), scratch
);
7013 masm
.PopRegsInMask(volatileRegs
);
7015 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
7022 bool CacheIRCompiler::emitLoadBigIntTruthyResult(BigIntOperandId bigIntId
) {
7023 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7024 AutoOutputRegister
output(*this);
7025 Register bigInt
= allocator
.useRegister(masm
, bigIntId
);
7027 Label ifFalse
, done
;
7028 masm
.branch32(Assembler::Equal
,
7029 Address(bigInt
, BigInt::offsetOfDigitLength()), Imm32(0),
7031 masm
.moveValue(BooleanValue(true), output
.valueReg());
7034 masm
.bind(&ifFalse
);
7035 masm
.moveValue(BooleanValue(false), output
.valueReg());
7041 bool CacheIRCompiler::emitLoadValueTruthyResult(ValOperandId inputId
) {
7042 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7044 AutoOutputRegister
output(*this);
7045 ValueOperand value
= allocator
.useValueRegister(masm
, inputId
);
7046 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
7047 AutoScratchRegister
scratch2(allocator
, masm
);
7048 AutoScratchFloatRegister
floatReg(this);
7050 Label ifFalse
, ifTrue
, done
;
7053 ScratchTagScope
tag(masm
, value
);
7054 masm
.splitTagForTest(value
, tag
);
7056 masm
.branchTestUndefined(Assembler::Equal
, tag
, &ifFalse
);
7057 masm
.branchTestNull(Assembler::Equal
, tag
, &ifFalse
);
7060 masm
.branchTestBoolean(Assembler::NotEqual
, tag
, ¬Boolean
);
7062 ScratchTagScopeRelease
_(&tag
);
7063 masm
.branchTestBooleanTruthy(false, value
, &ifFalse
);
7066 masm
.bind(¬Boolean
);
7069 masm
.branchTestInt32(Assembler::NotEqual
, tag
, ¬Int32
);
7071 ScratchTagScopeRelease
_(&tag
);
7072 masm
.branchTestInt32Truthy(false, value
, &ifFalse
);
7075 masm
.bind(¬Int32
);
7078 masm
.branchTestObject(Assembler::NotEqual
, tag
, ¬Object
);
7080 ScratchTagScopeRelease
_(&tag
);
7082 Register obj
= masm
.extractObject(value
, scratch1
);
7085 masm
.branchIfObjectEmulatesUndefined(obj
, scratch2
, &slowPath
, &ifFalse
);
7088 masm
.bind(&slowPath
);
7090 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
7091 liveVolatileFloatRegs());
7092 volatileRegs
.takeUnchecked(scratch1
);
7093 volatileRegs
.takeUnchecked(scratch2
);
7094 volatileRegs
.takeUnchecked(output
);
7095 masm
.PushRegsInMask(volatileRegs
);
7097 using Fn
= bool (*)(JSObject
* obj
);
7098 masm
.setupUnalignedABICall(scratch2
);
7099 masm
.passABIArg(obj
);
7100 masm
.callWithABI
<Fn
, js::EmulatesUndefined
>();
7101 masm
.storeCallPointerResult(scratch2
);
7103 masm
.PopRegsInMask(volatileRegs
);
7105 masm
.branchIfTrueBool(scratch2
, &ifFalse
);
7109 masm
.bind(¬Object
);
7112 masm
.branchTestString(Assembler::NotEqual
, tag
, ¬String
);
7114 ScratchTagScopeRelease
_(&tag
);
7115 masm
.branchTestStringTruthy(false, value
, &ifFalse
);
7118 masm
.bind(¬String
);
7121 masm
.branchTestBigInt(Assembler::NotEqual
, tag
, ¬BigInt
);
7123 ScratchTagScopeRelease
_(&tag
);
7124 masm
.branchTestBigIntTruthy(false, value
, &ifFalse
);
7127 masm
.bind(¬BigInt
);
7129 masm
.branchTestSymbol(Assembler::Equal
, tag
, &ifTrue
);
7133 masm
.branchTestDouble(Assembler::Equal
, tag
, &isDouble
);
7134 masm
.assumeUnreachable("Unexpected value type");
7135 masm
.bind(&isDouble
);
7139 ScratchTagScopeRelease
_(&tag
);
7140 masm
.unboxDouble(value
, floatReg
);
7141 masm
.branchTestDoubleTruthy(false, floatReg
, &ifFalse
);
7144 // Fall through to true case.
7148 masm
.moveValue(BooleanValue(true), output
.valueReg());
7151 masm
.bind(&ifFalse
);
7152 masm
.moveValue(BooleanValue(false), output
.valueReg());
7158 bool CacheIRCompiler::emitComparePointerResultShared(JSOp op
,
7159 TypedOperandId lhsId
,
7160 TypedOperandId rhsId
) {
7161 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7162 AutoOutputRegister
output(*this);
7164 Register left
= allocator
.useRegister(masm
, lhsId
);
7165 Register right
= allocator
.useRegister(masm
, rhsId
);
7167 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
7170 masm
.branchPtr(JSOpToCondition(op
, /* signed = */ true), left
, right
,
7173 EmitStoreBoolean(masm
, false, output
);
7177 EmitStoreBoolean(masm
, true, output
);
7182 bool CacheIRCompiler::emitCompareObjectResult(JSOp op
, ObjOperandId lhsId
,
7183 ObjOperandId rhsId
) {
7184 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7185 return emitComparePointerResultShared(op
, lhsId
, rhsId
);
7188 bool CacheIRCompiler::emitCompareSymbolResult(JSOp op
, SymbolOperandId lhsId
,
7189 SymbolOperandId rhsId
) {
7190 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7191 return emitComparePointerResultShared(op
, lhsId
, rhsId
);
7194 bool CacheIRCompiler::emitCompareInt32Result(JSOp op
, Int32OperandId lhsId
,
7195 Int32OperandId rhsId
) {
7196 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7197 AutoOutputRegister
output(*this);
7198 Register left
= allocator
.useRegister(masm
, lhsId
);
7199 Register right
= allocator
.useRegister(masm
, rhsId
);
7202 masm
.branch32(JSOpToCondition(op
, /* signed = */ true), left
, right
, &ifTrue
);
7204 EmitStoreBoolean(masm
, false, output
);
7208 EmitStoreBoolean(masm
, true, output
);
7213 bool CacheIRCompiler::emitCompareDoubleResult(JSOp op
, NumberOperandId lhsId
,
7214 NumberOperandId rhsId
) {
7215 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7216 AutoOutputRegister
output(*this);
7218 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
7219 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
7221 FailurePath
* failure
;
7222 if (!addFailurePath(&failure
)) {
7226 allocator
.ensureDoubleRegister(masm
, lhsId
, floatScratch0
);
7227 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch1
);
7230 masm
.branchDouble(JSOpToDoubleCondition(op
), floatScratch0
, floatScratch1
,
7232 EmitStoreBoolean(masm
, false, output
);
7236 EmitStoreBoolean(masm
, true, output
);
7241 bool CacheIRCompiler::emitCompareBigIntResult(JSOp op
, BigIntOperandId lhsId
,
7242 BigIntOperandId rhsId
) {
7243 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7244 AutoOutputRegister
output(*this);
7246 Register lhs
= allocator
.useRegister(masm
, lhsId
);
7247 Register rhs
= allocator
.useRegister(masm
, rhsId
);
7249 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
7251 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7252 masm
.PushRegsInMask(save
);
7254 masm
.setupUnalignedABICall(scratch
);
7256 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7257 // - |left <= right| is implemented as |right >= left|.
7258 // - |left > right| is implemented as |right < left|.
7259 if (op
== JSOp::Le
|| op
== JSOp::Gt
) {
7260 masm
.passABIArg(rhs
);
7261 masm
.passABIArg(lhs
);
7263 masm
.passABIArg(lhs
);
7264 masm
.passABIArg(rhs
);
7267 using Fn
= bool (*)(BigInt
*, BigInt
*);
7269 if (op
== JSOp::Eq
|| op
== JSOp::StrictEq
) {
7270 fn
= jit::BigIntEqual
<EqualityKind::Equal
>;
7271 } else if (op
== JSOp::Ne
|| op
== JSOp::StrictNe
) {
7272 fn
= jit::BigIntEqual
<EqualityKind::NotEqual
>;
7273 } else if (op
== JSOp::Lt
|| op
== JSOp::Gt
) {
7274 fn
= jit::BigIntCompare
<ComparisonKind::LessThan
>;
7276 MOZ_ASSERT(op
== JSOp::Le
|| op
== JSOp::Ge
);
7277 fn
= jit::BigIntCompare
<ComparisonKind::GreaterThanOrEqual
>;
7280 masm
.callWithABI(DynamicFunction
<Fn
>(fn
));
7281 masm
.storeCallBoolResult(scratch
);
7283 LiveRegisterSet ignore
;
7284 ignore
.add(scratch
);
7285 masm
.PopRegsInMaskIgnore(save
, ignore
);
7287 EmitStoreResult(masm
, scratch
, JSVAL_TYPE_BOOLEAN
, output
);
7291 bool CacheIRCompiler::emitCompareBigIntInt32Result(JSOp op
,
7292 BigIntOperandId lhsId
,
7293 Int32OperandId rhsId
) {
7294 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7295 AutoOutputRegister
output(*this);
7296 Register bigInt
= allocator
.useRegister(masm
, lhsId
);
7297 Register int32
= allocator
.useRegister(masm
, rhsId
);
7299 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
7300 AutoScratchRegister
scratch2(allocator
, masm
);
7302 Label ifTrue
, ifFalse
;
7303 masm
.compareBigIntAndInt32(op
, bigInt
, int32
, scratch1
, scratch2
, &ifTrue
,
7307 masm
.bind(&ifFalse
);
7308 EmitStoreBoolean(masm
, false, output
);
7312 EmitStoreBoolean(masm
, true, output
);
7318 bool CacheIRCompiler::emitCompareBigIntNumberResult(JSOp op
,
7319 BigIntOperandId lhsId
,
7320 NumberOperandId rhsId
) {
7321 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7322 AutoOutputRegister
output(*this);
7324 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
7326 Register lhs
= allocator
.useRegister(masm
, lhsId
);
7327 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch0
);
7329 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
7331 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7332 masm
.PushRegsInMask(save
);
7334 masm
.setupUnalignedABICall(scratch
);
7336 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7337 // - |left <= right| is implemented as |right >= left|.
7338 // - |left > right| is implemented as |right < left|.
7339 if (op
== JSOp::Le
|| op
== JSOp::Gt
) {
7340 masm
.passABIArg(floatScratch0
, ABIType::Float64
);
7341 masm
.passABIArg(lhs
);
7343 masm
.passABIArg(lhs
);
7344 masm
.passABIArg(floatScratch0
, ABIType::Float64
);
7347 using FnBigIntNumber
= bool (*)(BigInt
*, double);
7348 using FnNumberBigInt
= bool (*)(double, BigInt
*);
7351 masm
.callWithABI
<FnBigIntNumber
,
7352 jit::BigIntNumberEqual
<EqualityKind::Equal
>>();
7356 masm
.callWithABI
<FnBigIntNumber
,
7357 jit::BigIntNumberEqual
<EqualityKind::NotEqual
>>();
7361 masm
.callWithABI
<FnBigIntNumber
,
7362 jit::BigIntNumberCompare
<ComparisonKind::LessThan
>>();
7366 masm
.callWithABI
<FnNumberBigInt
,
7367 jit::NumberBigIntCompare
<ComparisonKind::LessThan
>>();
7373 jit::NumberBigIntCompare
<ComparisonKind::GreaterThanOrEqual
>>();
7379 jit::BigIntNumberCompare
<ComparisonKind::GreaterThanOrEqual
>>();
7383 MOZ_CRASH("unhandled op");
7386 masm
.storeCallBoolResult(scratch
);
7388 LiveRegisterSet ignore
;
7389 ignore
.add(scratch
);
7390 masm
.PopRegsInMaskIgnore(save
, ignore
);
7392 EmitStoreResult(masm
, scratch
, JSVAL_TYPE_BOOLEAN
, output
);
7396 bool CacheIRCompiler::emitCompareBigIntStringResult(JSOp op
,
7397 BigIntOperandId lhsId
,
7398 StringOperandId rhsId
) {
7399 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7400 AutoCallVM
callvm(masm
, this, allocator
);
7402 Register lhs
= allocator
.useRegister(masm
, lhsId
);
7403 Register rhs
= allocator
.useRegister(masm
, rhsId
);
7407 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7408 // - |left <= right| is implemented as |right >= left|.
7409 // - |left > right| is implemented as |right < left|.
7410 if (op
== JSOp::Le
|| op
== JSOp::Gt
) {
7418 using FnBigIntString
=
7419 bool (*)(JSContext
*, HandleBigInt
, HandleString
, bool*);
7420 using FnStringBigInt
=
7421 bool (*)(JSContext
*, HandleString
, HandleBigInt
, bool*);
7425 constexpr auto Equal
= EqualityKind::Equal
;
7426 callvm
.call
<FnBigIntString
, BigIntStringEqual
<Equal
>>();
7430 constexpr auto NotEqual
= EqualityKind::NotEqual
;
7431 callvm
.call
<FnBigIntString
, BigIntStringEqual
<NotEqual
>>();
7435 constexpr auto LessThan
= ComparisonKind::LessThan
;
7436 callvm
.call
<FnBigIntString
, BigIntStringCompare
<LessThan
>>();
7440 constexpr auto LessThan
= ComparisonKind::LessThan
;
7441 callvm
.call
<FnStringBigInt
, StringBigIntCompare
<LessThan
>>();
7445 constexpr auto GreaterThanOrEqual
= ComparisonKind::GreaterThanOrEqual
;
7446 callvm
.call
<FnStringBigInt
, StringBigIntCompare
<GreaterThanOrEqual
>>();
7450 constexpr auto GreaterThanOrEqual
= ComparisonKind::GreaterThanOrEqual
;
7451 callvm
.call
<FnBigIntString
, BigIntStringCompare
<GreaterThanOrEqual
>>();
7455 MOZ_CRASH("unhandled op");
7460 bool CacheIRCompiler::emitCompareNullUndefinedResult(JSOp op
, bool isUndefined
,
7461 ValOperandId inputId
) {
7462 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7464 AutoOutputRegister
output(*this);
7465 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
7466 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
7467 AutoScratchRegister
scratch2(allocator
, masm
);
7469 if (IsStrictEqualityOp(op
)) {
7471 masm
.testUndefinedSet(JSOpToCondition(op
, false), input
, scratch
);
7473 masm
.testNullSet(JSOpToCondition(op
, false), input
, scratch
);
7475 EmitStoreResult(masm
, scratch
, JSVAL_TYPE_BOOLEAN
, output
);
7479 FailurePath
* failure
;
7480 if (!addFailurePath(&failure
)) {
7484 MOZ_ASSERT(IsLooseEqualityOp(op
));
7486 Label nullOrLikeUndefined
, notNullOrLikeUndefined
, done
;
7488 ScratchTagScope
tag(masm
, input
);
7489 masm
.splitTagForTest(input
, tag
);
7492 masm
.branchTestUndefined(Assembler::Equal
, tag
, &nullOrLikeUndefined
);
7493 masm
.branchTestNull(Assembler::Equal
, tag
, &nullOrLikeUndefined
);
7495 masm
.branchTestNull(Assembler::Equal
, tag
, &nullOrLikeUndefined
);
7496 masm
.branchTestUndefined(Assembler::Equal
, tag
, &nullOrLikeUndefined
);
7498 masm
.branchTestObject(Assembler::NotEqual
, tag
, ¬NullOrLikeUndefined
);
7501 ScratchTagScopeRelease
_(&tag
);
7503 masm
.unboxObject(input
, scratch
);
7504 masm
.branchIfObjectEmulatesUndefined(scratch
, scratch2
, failure
->label(),
7505 &nullOrLikeUndefined
);
7506 masm
.jump(¬NullOrLikeUndefined
);
7510 masm
.bind(&nullOrLikeUndefined
);
7511 EmitStoreBoolean(masm
, op
== JSOp::Eq
, output
);
7514 masm
.bind(¬NullOrLikeUndefined
);
7515 EmitStoreBoolean(masm
, op
== JSOp::Ne
, output
);
7521 bool CacheIRCompiler::emitCompareDoubleSameValueResult(NumberOperandId lhsId
,
7522 NumberOperandId rhsId
) {
7523 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7525 AutoOutputRegister
output(*this);
7526 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
7527 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
7528 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
7529 AutoAvailableFloatRegister
floatScratch2(*this, FloatReg2
);
7531 allocator
.ensureDoubleRegister(masm
, lhsId
, floatScratch0
);
7532 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch1
);
7534 masm
.sameValueDouble(floatScratch0
, floatScratch1
, floatScratch2
, scratch
);
7535 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
7539 bool CacheIRCompiler::emitIndirectTruncateInt32Result(Int32OperandId valId
) {
7540 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7541 AutoOutputRegister
output(*this);
7542 Register val
= allocator
.useRegister(masm
, valId
);
7544 if (output
.hasValue()) {
7545 masm
.tagValue(JSVAL_TYPE_INT32
, val
, output
.valueReg());
7547 masm
.mov(val
, output
.typedReg().gpr());
7552 bool CacheIRCompiler::emitCallPrintString(const char* str
) {
7553 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7558 bool CacheIRCompiler::emitBreakpoint() {
7559 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7564 void CacheIRCompiler::emitPostBarrierShared(Register obj
,
7565 const ConstantOrRegister
& val
,
7567 Register maybeIndex
) {
7568 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7570 if (val
.constant()) {
7571 MOZ_ASSERT_IF(val
.value().isGCThing(),
7572 !IsInsideNursery(val
.value().toGCThing()));
7576 TypedOrValueRegister reg
= val
.reg();
7577 if (reg
.hasTyped() && !NeedsPostBarrier(reg
.type())) {
7582 if (reg
.hasValue()) {
7583 masm
.branchValueIsNurseryCell(Assembler::NotEqual
, reg
.valueReg(), scratch
,
7586 masm
.branchPtrInNurseryChunk(Assembler::NotEqual
, reg
.typedReg().gpr(),
7587 scratch
, &skipBarrier
);
7589 masm
.branchPtrInNurseryChunk(Assembler::Equal
, obj
, scratch
, &skipBarrier
);
7591 // Check one element cache to avoid VM call.
7592 auto* lastCellAddr
= cx_
->runtime()->gc
.addressOfLastBufferedWholeCell();
7593 masm
.branchPtr(Assembler::Equal
, AbsoluteAddress(lastCellAddr
), obj
,
7596 // Call one of these, depending on maybeIndex:
7598 // void PostWriteBarrier(JSRuntime* rt, JSObject* obj);
7599 // void PostWriteElementBarrier(JSRuntime* rt, JSObject* obj,
7601 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7602 masm
.PushRegsInMask(save
);
7603 masm
.setupUnalignedABICall(scratch
);
7604 masm
.movePtr(ImmPtr(cx_
->runtime()), scratch
);
7605 masm
.passABIArg(scratch
);
7606 masm
.passABIArg(obj
);
7607 if (maybeIndex
!= InvalidReg
) {
7608 masm
.passABIArg(maybeIndex
);
7609 using Fn
= void (*)(JSRuntime
* rt
, JSObject
* obj
, int32_t index
);
7610 masm
.callWithABI
<Fn
, PostWriteElementBarrier
>();
7612 using Fn
= void (*)(JSRuntime
* rt
, js::gc::Cell
* cell
);
7613 masm
.callWithABI
<Fn
, PostWriteBarrier
>();
7615 masm
.PopRegsInMask(save
);
7617 masm
.bind(&skipBarrier
);
7620 bool CacheIRCompiler::emitWrapResult() {
7621 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7622 AutoOutputRegister
output(*this);
7623 AutoScratchRegister
scratch(allocator
, masm
);
7625 FailurePath
* failure
;
7626 if (!addFailurePath(&failure
)) {
7631 // We only have to wrap objects, because we are in the same zone.
7632 masm
.branchTestObject(Assembler::NotEqual
, output
.valueReg(), &done
);
7634 Register obj
= output
.valueReg().scratchReg();
7635 masm
.unboxObject(output
.valueReg(), obj
);
7637 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7638 masm
.PushRegsInMask(save
);
7640 using Fn
= JSObject
* (*)(JSContext
* cx
, JSObject
* obj
);
7641 masm
.setupUnalignedABICall(scratch
);
7642 masm
.loadJSContext(scratch
);
7643 masm
.passABIArg(scratch
);
7644 masm
.passABIArg(obj
);
7645 masm
.callWithABI
<Fn
, WrapObjectPure
>();
7646 masm
.storeCallPointerResult(obj
);
7648 LiveRegisterSet ignore
;
7650 masm
.PopRegsInMaskIgnore(save
, ignore
);
7652 // We could not get a wrapper for this object.
7653 masm
.branchTestPtr(Assembler::Zero
, obj
, obj
, failure
->label());
7655 // We clobbered the output register, so we have to retag.
7656 masm
.tagValue(JSVAL_TYPE_OBJECT
, obj
, output
.valueReg());
7662 bool CacheIRCompiler::emitMegamorphicLoadSlotByValueResult(ObjOperandId objId
,
7663 ValOperandId idId
) {
7664 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7665 AutoOutputRegister
output(*this);
7667 Register obj
= allocator
.useRegister(masm
, objId
);
7668 ValueOperand idVal
= allocator
.useValueRegister(masm
, idId
);
7670 #ifdef JS_CODEGEN_X86
7671 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
7672 AutoScratchRegisterMaybeOutputType
scratch2(allocator
, masm
, output
);
7674 AutoScratchRegister
scratch1(allocator
, masm
);
7675 AutoScratchRegister
scratch2(allocator
, masm
);
7676 AutoScratchRegister
scratch3(allocator
, masm
);
7679 FailurePath
* failure
;
7680 if (!addFailurePath(&failure
)) {
7684 #ifdef JS_CODEGEN_X86
7685 masm
.xorPtr(scratch2
, scratch2
);
7688 masm
.emitMegamorphicCacheLookupByValue(
7689 idVal
, obj
, scratch1
, scratch3
, scratch2
, output
.valueReg(), &cacheHit
);
7692 masm
.branchIfNonNativeObj(obj
, scratch1
, failure
->label());
7694 // idVal will be in vp[0], result will be stored in vp[1].
7695 masm
.reserveStack(sizeof(Value
));
7697 masm
.moveStackPtrTo(idVal
.scratchReg());
7699 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
7700 liveVolatileFloatRegs());
7701 volatileRegs
.takeUnchecked(scratch1
);
7702 volatileRegs
.takeUnchecked(idVal
);
7703 masm
.PushRegsInMask(volatileRegs
);
7705 using Fn
= bool (*)(JSContext
* cx
, JSObject
* obj
,
7706 MegamorphicCache::Entry
* cacheEntry
, Value
* vp
);
7707 masm
.setupUnalignedABICall(scratch1
);
7708 masm
.loadJSContext(scratch1
);
7709 masm
.passABIArg(scratch1
);
7710 masm
.passABIArg(obj
);
7711 masm
.passABIArg(scratch2
);
7712 masm
.passABIArg(idVal
.scratchReg());
7713 masm
.callWithABI
<Fn
, GetNativeDataPropertyByValuePure
>();
7715 masm
.storeCallPointerResult(scratch1
);
7716 masm
.PopRegsInMask(volatileRegs
);
7721 uint32_t framePushed
= masm
.framePushed();
7722 masm
.branchIfTrueBool(scratch1
, &ok
);
7723 masm
.adjustStack(sizeof(Value
));
7724 masm
.jump(failure
->label());
7727 masm
.setFramePushed(framePushed
);
7728 masm
.loadTypedOrValue(Address(masm
.getStackPointer(), 0), output
);
7729 masm
.adjustStack(sizeof(Value
));
7731 #ifndef JS_CODEGEN_X86
7732 masm
.bind(&cacheHit
);
7737 bool CacheIRCompiler::emitMegamorphicHasPropResult(ObjOperandId objId
,
7740 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7741 AutoOutputRegister
output(*this);
7743 Register obj
= allocator
.useRegister(masm
, objId
);
7744 ValueOperand idVal
= allocator
.useValueRegister(masm
, idId
);
7746 #ifdef JS_CODEGEN_X86
7747 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
7748 AutoScratchRegisterMaybeOutputType
scratch2(allocator
, masm
, output
);
7750 AutoScratchRegister
scratch1(allocator
, masm
);
7751 AutoScratchRegister
scratch2(allocator
, masm
);
7752 AutoScratchRegister
scratch3(allocator
, masm
);
7755 FailurePath
* failure
;
7756 if (!addFailurePath(&failure
)) {
7760 #ifndef JS_CODEGEN_X86
7761 Label cacheHit
, done
;
7762 masm
.emitMegamorphicCacheLookupExists(idVal
, obj
, scratch1
, scratch3
,
7763 scratch2
, output
.maybeReg(), &cacheHit
,
7766 masm
.xorPtr(scratch2
, scratch2
);
7769 masm
.branchIfNonNativeObj(obj
, scratch1
, failure
->label());
7771 // idVal will be in vp[0], result will be stored in vp[1].
7772 masm
.reserveStack(sizeof(Value
));
7774 masm
.moveStackPtrTo(idVal
.scratchReg());
7776 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
7777 liveVolatileFloatRegs());
7778 volatileRegs
.takeUnchecked(scratch1
);
7779 volatileRegs
.takeUnchecked(idVal
);
7780 masm
.PushRegsInMask(volatileRegs
);
7782 using Fn
= bool (*)(JSContext
* cx
, JSObject
* obj
,
7783 MegamorphicCache::Entry
* cacheEntry
, Value
* vp
);
7784 masm
.setupUnalignedABICall(scratch1
);
7785 masm
.loadJSContext(scratch1
);
7786 masm
.passABIArg(scratch1
);
7787 masm
.passABIArg(obj
);
7788 masm
.passABIArg(scratch2
);
7789 masm
.passABIArg(idVal
.scratchReg());
7791 masm
.callWithABI
<Fn
, HasNativeDataPropertyPure
<true>>();
7793 masm
.callWithABI
<Fn
, HasNativeDataPropertyPure
<false>>();
7795 masm
.storeCallPointerResult(scratch1
);
7796 masm
.PopRegsInMask(volatileRegs
);
7801 uint32_t framePushed
= masm
.framePushed();
7802 masm
.branchIfTrueBool(scratch1
, &ok
);
7803 masm
.adjustStack(sizeof(Value
));
7804 masm
.jump(failure
->label());
7807 masm
.setFramePushed(framePushed
);
7808 masm
.loadTypedOrValue(Address(masm
.getStackPointer(), 0), output
);
7809 masm
.adjustStack(sizeof(Value
));
7811 #ifndef JS_CODEGEN_X86
7813 masm
.bind(&cacheHit
);
7814 if (output
.hasValue()) {
7815 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, output
.valueReg().scratchReg(),
7823 bool CacheIRCompiler::emitCallObjectHasSparseElementResult(
7824 ObjOperandId objId
, Int32OperandId indexId
) {
7825 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7826 AutoOutputRegister
output(*this);
7828 Register obj
= allocator
.useRegister(masm
, objId
);
7829 Register index
= allocator
.useRegister(masm
, indexId
);
7831 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
7832 AutoScratchRegister
scratch2(allocator
, masm
);
7834 FailurePath
* failure
;
7835 if (!addFailurePath(&failure
)) {
7839 masm
.reserveStack(sizeof(Value
));
7840 masm
.moveStackPtrTo(scratch2
.get());
7842 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
7843 liveVolatileFloatRegs());
7844 volatileRegs
.takeUnchecked(scratch1
);
7845 volatileRegs
.takeUnchecked(index
);
7846 masm
.PushRegsInMask(volatileRegs
);
7849 bool (*)(JSContext
* cx
, NativeObject
* obj
, int32_t index
, Value
* vp
);
7850 masm
.setupUnalignedABICall(scratch1
);
7851 masm
.loadJSContext(scratch1
);
7852 masm
.passABIArg(scratch1
);
7853 masm
.passABIArg(obj
);
7854 masm
.passABIArg(index
);
7855 masm
.passABIArg(scratch2
);
7856 masm
.callWithABI
<Fn
, HasNativeElementPure
>();
7857 masm
.storeCallPointerResult(scratch1
);
7858 masm
.PopRegsInMask(volatileRegs
);
7861 uint32_t framePushed
= masm
.framePushed();
7862 masm
.branchIfTrueBool(scratch1
, &ok
);
7863 masm
.adjustStack(sizeof(Value
));
7864 masm
.jump(failure
->label());
7867 masm
.setFramePushed(framePushed
);
7868 masm
.loadTypedOrValue(Address(masm
.getStackPointer(), 0), output
);
7869 masm
.adjustStack(sizeof(Value
));
7874 * Move a constant value into register dest.
7876 void CacheIRCompiler::emitLoadStubFieldConstant(StubFieldOffset val
,
7878 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7879 MOZ_ASSERT(mode_
== Mode::Ion
);
7880 switch (val
.getStubFieldType()) {
7881 case StubField::Type::Shape
:
7882 masm
.movePtr(ImmGCPtr(shapeStubField(val
.getOffset())), dest
);
7884 case StubField::Type::WeakGetterSetter
:
7885 masm
.movePtr(ImmGCPtr(weakGetterSetterStubField(val
.getOffset())), dest
);
7887 case StubField::Type::String
:
7888 masm
.movePtr(ImmGCPtr(stringStubField(val
.getOffset())), dest
);
7890 case StubField::Type::JSObject
:
7891 masm
.movePtr(ImmGCPtr(objectStubField(val
.getOffset())), dest
);
7893 case StubField::Type::RawPointer
:
7894 masm
.movePtr(ImmPtr(pointerStubField(val
.getOffset())), dest
);
7896 case StubField::Type::RawInt32
:
7897 masm
.move32(Imm32(int32StubField(val
.getOffset())), dest
);
7899 case StubField::Type::Id
:
7900 masm
.movePropertyKey(idStubField(val
.getOffset()), dest
);
7903 MOZ_CRASH("Unhandled stub field constant type");
7908 * After this is done executing, dest contains the value; either through a
7909 * constant load or through the load from the stub data.
7911 * The current policy is that Baseline will use loads from the stub data (to
7912 * allow IC sharing), where as Ion doesn't share ICs, and so we can safely use
7913 * constants in the IC.
7915 void CacheIRCompiler::emitLoadStubField(StubFieldOffset val
, Register dest
) {
7916 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7917 if (stubFieldPolicy_
== StubFieldPolicy::Constant
) {
7918 emitLoadStubFieldConstant(val
, dest
);
7920 Address
load(ICStubReg
, stubDataOffset_
+ val
.getOffset());
7922 switch (val
.getStubFieldType()) {
7923 case StubField::Type::RawPointer
:
7924 case StubField::Type::Shape
:
7925 case StubField::Type::WeakGetterSetter
:
7926 case StubField::Type::JSObject
:
7927 case StubField::Type::Symbol
:
7928 case StubField::Type::String
:
7929 case StubField::Type::Id
:
7930 masm
.loadPtr(load
, dest
);
7932 case StubField::Type::RawInt32
:
7933 masm
.load32(load
, dest
);
7936 MOZ_CRASH("Unhandled stub field constant type");
7941 void CacheIRCompiler::emitLoadValueStubField(StubFieldOffset val
,
7942 ValueOperand dest
) {
7943 MOZ_ASSERT(val
.getStubFieldType() == StubField::Type::Value
);
7945 if (stubFieldPolicy_
== StubFieldPolicy::Constant
) {
7946 MOZ_ASSERT(mode_
== Mode::Ion
);
7947 masm
.moveValue(valueStubField(val
.getOffset()), dest
);
7949 Address
addr(ICStubReg
, stubDataOffset_
+ val
.getOffset());
7950 masm
.loadValue(addr
, dest
);
7954 void CacheIRCompiler::emitLoadDoubleValueStubField(StubFieldOffset val
,
7956 FloatRegister scratch
) {
7957 MOZ_ASSERT(val
.getStubFieldType() == StubField::Type::Double
);
7959 if (stubFieldPolicy_
== StubFieldPolicy::Constant
) {
7960 MOZ_ASSERT(mode_
== Mode::Ion
);
7961 double d
= doubleStubField(val
.getOffset());
7962 masm
.moveValue(DoubleValue(d
), dest
);
7964 Address
addr(ICStubReg
, stubDataOffset_
+ val
.getOffset());
7965 masm
.loadDouble(addr
, scratch
);
7966 masm
.boxDouble(scratch
, dest
, scratch
);
7970 bool CacheIRCompiler::emitLoadInstanceOfObjectResult(ValOperandId lhsId
,
7971 ObjOperandId protoId
) {
7972 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7973 AutoOutputRegister
output(*this);
7974 ValueOperand lhs
= allocator
.useValueRegister(masm
, lhsId
);
7975 Register proto
= allocator
.useRegister(masm
, protoId
);
7977 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
7979 FailurePath
* failure
;
7980 if (!addFailurePath(&failure
)) {
7984 Label returnFalse
, returnTrue
, done
;
7985 masm
.fallibleUnboxObject(lhs
, scratch
, &returnFalse
);
7987 // LHS is an object. Load its proto.
7988 masm
.loadObjProto(scratch
, scratch
);
7990 // Walk the proto chain until we either reach the target object,
7991 // nullptr or LazyProto.
7995 masm
.branchPtr(Assembler::Equal
, scratch
, proto
, &returnTrue
);
7996 masm
.branchTestPtr(Assembler::Zero
, scratch
, scratch
, &returnFalse
);
7998 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto
) == 1);
7999 masm
.branchPtr(Assembler::Equal
, scratch
, ImmWord(1), failure
->label());
8001 masm
.loadObjProto(scratch
, scratch
);
8005 masm
.bind(&returnFalse
);
8006 EmitStoreBoolean(masm
, false, output
);
8009 masm
.bind(&returnTrue
);
8010 EmitStoreBoolean(masm
, true, output
);
8016 bool CacheIRCompiler::emitMegamorphicLoadSlotResult(ObjOperandId objId
,
8017 uint32_t idOffset
) {
8018 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8019 AutoOutputRegister
output(*this);
8021 Register obj
= allocator
.useRegister(masm
, objId
);
8022 StubFieldOffset
id(idOffset
, StubField::Type::Id
);
8024 AutoScratchRegisterMaybeOutput
idReg(allocator
, masm
, output
);
8025 AutoScratchRegister
scratch1(allocator
, masm
);
8026 AutoScratchRegister
scratch2(allocator
, masm
);
8027 AutoScratchRegisterMaybeOutputType
scratch3(allocator
, masm
, output
);
8029 FailurePath
* failure
;
8030 if (!addFailurePath(&failure
)) {
8034 #ifdef JS_CODEGEN_X86
8035 masm
.xorPtr(scratch3
, scratch3
);
8038 emitLoadStubField(id
, idReg
);
8039 masm
.emitMegamorphicCacheLookupByValue(idReg
.get(), obj
, scratch1
, scratch2
,
8040 scratch3
, output
.valueReg(),
8044 masm
.branchIfNonNativeObj(obj
, scratch1
, failure
->label());
8046 masm
.Push(UndefinedValue());
8047 masm
.moveStackPtrTo(idReg
.get());
8049 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
8050 liveVolatileFloatRegs());
8051 volatileRegs
.takeUnchecked(scratch1
);
8052 volatileRegs
.takeUnchecked(scratch2
);
8053 volatileRegs
.takeUnchecked(scratch3
);
8054 volatileRegs
.takeUnchecked(idReg
);
8055 masm
.PushRegsInMask(volatileRegs
);
8057 using Fn
= bool (*)(JSContext
* cx
, JSObject
* obj
, PropertyKey id
,
8058 MegamorphicCache::Entry
* cacheEntry
, Value
* vp
);
8059 masm
.setupUnalignedABICall(scratch1
);
8060 masm
.loadJSContext(scratch1
);
8061 masm
.passABIArg(scratch1
);
8062 masm
.passABIArg(obj
);
8063 emitLoadStubField(id
, scratch2
);
8064 masm
.passABIArg(scratch2
);
8065 masm
.passABIArg(scratch3
);
8066 masm
.passABIArg(idReg
);
8068 #ifdef JS_CODEGEN_X86
8069 masm
.callWithABI
<Fn
, GetNativeDataPropertyPureWithCacheLookup
>();
8071 masm
.callWithABI
<Fn
, GetNativeDataPropertyPure
>();
8074 masm
.storeCallPointerResult(scratch2
);
8075 masm
.PopRegsInMask(volatileRegs
);
8077 masm
.loadTypedOrValue(Address(masm
.getStackPointer(), 0), output
);
8078 masm
.adjustStack(sizeof(Value
));
8080 masm
.branchIfFalseBool(scratch2
, failure
->label());
8081 #ifndef JS_CODEGEN_X86
8082 masm
.bind(&cacheHit
);
8088 bool CacheIRCompiler::emitMegamorphicStoreSlot(ObjOperandId objId
,
8092 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8094 AutoCallVM
callvm(masm
, this, allocator
);
8096 Register obj
= allocator
.useRegister(masm
, objId
);
8097 ConstantOrRegister val
= allocator
.useConstantOrRegister(masm
, rhsId
);
8098 StubFieldOffset
id(idOffset
, StubField::Type::Id
);
8099 AutoScratchRegister
scratch(allocator
, masm
);
8103 masm
.Push(Imm32(strict
));
8105 emitLoadStubField(id
, scratch
);
8109 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleId
, HandleValue
, bool);
8110 callvm
.callNoResult
<Fn
, SetPropertyMegamorphic
<false>>();
8114 bool CacheIRCompiler::emitGuardHasGetterSetter(ObjOperandId objId
,
8116 uint32_t getterSetterOffset
) {
8117 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8119 Register obj
= allocator
.useRegister(masm
, objId
);
8121 StubFieldOffset
id(idOffset
, StubField::Type::Id
);
8122 StubFieldOffset
getterSetter(getterSetterOffset
,
8123 StubField::Type::WeakGetterSetter
);
8125 AutoScratchRegister
scratch1(allocator
, masm
);
8126 AutoScratchRegister
scratch2(allocator
, masm
);
8127 AutoScratchRegister
scratch3(allocator
, masm
);
8129 FailurePath
* failure
;
8130 if (!addFailurePath(&failure
)) {
8134 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
8135 liveVolatileFloatRegs());
8136 volatileRegs
.takeUnchecked(scratch1
);
8137 volatileRegs
.takeUnchecked(scratch2
);
8138 masm
.PushRegsInMask(volatileRegs
);
8140 using Fn
= bool (*)(JSContext
* cx
, JSObject
* obj
, jsid id
,
8141 GetterSetter
* getterSetter
);
8142 masm
.setupUnalignedABICall(scratch1
);
8143 masm
.loadJSContext(scratch1
);
8144 masm
.passABIArg(scratch1
);
8145 masm
.passABIArg(obj
);
8146 emitLoadStubField(id
, scratch2
);
8147 masm
.passABIArg(scratch2
);
8148 emitLoadStubField(getterSetter
, scratch3
);
8149 masm
.passABIArg(scratch3
);
8150 masm
.callWithABI
<Fn
, ObjectHasGetterSetterPure
>();
8151 masm
.storeCallPointerResult(scratch1
);
8152 masm
.PopRegsInMask(volatileRegs
);
8154 masm
.branchIfFalseBool(scratch1
, failure
->label());
8158 bool CacheIRCompiler::emitGuardWasmArg(ValOperandId argId
,
8159 wasm::ValType::Kind kind
) {
8160 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8162 // All values can be boxed as AnyRef.
8163 if (kind
== wasm::ValType::Ref
) {
8166 MOZ_ASSERT(kind
!= wasm::ValType::V128
);
8168 ValueOperand arg
= allocator
.useValueRegister(masm
, argId
);
8170 FailurePath
* failure
;
8171 if (!addFailurePath(&failure
)) {
8175 // Check that the argument can be converted to the Wasm type in Warp code
8176 // without bailing out.
8179 case wasm::ValType::I32
:
8180 case wasm::ValType::F32
:
8181 case wasm::ValType::F64
: {
8182 // Argument must be number, bool, or undefined.
8183 masm
.branchTestNumber(Assembler::Equal
, arg
, &done
);
8184 masm
.branchTestBoolean(Assembler::Equal
, arg
, &done
);
8185 masm
.branchTestUndefined(Assembler::NotEqual
, arg
, failure
->label());
8188 case wasm::ValType::I64
: {
8189 // Argument must be bigint, bool, or string.
8190 masm
.branchTestBigInt(Assembler::Equal
, arg
, &done
);
8191 masm
.branchTestBoolean(Assembler::Equal
, arg
, &done
);
8192 masm
.branchTestString(Assembler::NotEqual
, arg
, failure
->label());
8196 MOZ_CRASH("Unexpected kind");
8203 bool CacheIRCompiler::emitGuardMultipleShapes(ObjOperandId objId
,
8204 uint32_t shapesOffset
) {
8205 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8206 Register obj
= allocator
.useRegister(masm
, objId
);
8207 AutoScratchRegister
shapes(allocator
, masm
);
8208 AutoScratchRegister
scratch(allocator
, masm
);
8209 AutoScratchRegister
scratch2(allocator
, masm
);
8211 bool needSpectreMitigations
= objectGuardNeedsSpectreMitigations(objId
);
8213 Register spectreScratch
= InvalidReg
;
8214 Maybe
<AutoScratchRegister
> maybeSpectreScratch
;
8215 if (needSpectreMitigations
) {
8216 maybeSpectreScratch
.emplace(allocator
, masm
);
8217 spectreScratch
= *maybeSpectreScratch
;
8220 FailurePath
* failure
;
8221 if (!addFailurePath(&failure
)) {
8225 // The stub field contains a ListObject. Load its elements.
8226 StubFieldOffset
shapeArray(shapesOffset
, StubField::Type::JSObject
);
8227 emitLoadStubField(shapeArray
, shapes
);
8228 masm
.loadPtr(Address(shapes
, NativeObject::offsetOfElements()), shapes
);
8230 masm
.branchTestObjShapeList(Assembler::NotEqual
, obj
, shapes
, scratch
,
8231 scratch2
, spectreScratch
, failure
->label());
8235 bool CacheIRCompiler::emitLoadObject(ObjOperandId resultId
,
8236 uint32_t objOffset
) {
8237 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8238 Register reg
= allocator
.defineRegister(masm
, resultId
);
8239 StubFieldOffset
obj(objOffset
, StubField::Type::JSObject
);
8240 emitLoadStubField(obj
, reg
);
8244 bool CacheIRCompiler::emitLoadProtoObject(ObjOperandId resultId
,
8246 ObjOperandId receiverObjId
) {
8247 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8248 Register reg
= allocator
.defineRegister(masm
, resultId
);
8249 StubFieldOffset
obj(objOffset
, StubField::Type::JSObject
);
8250 emitLoadStubField(obj
, reg
);
8254 bool CacheIRCompiler::emitLoadInt32Constant(uint32_t valOffset
,
8255 Int32OperandId resultId
) {
8256 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8257 Register reg
= allocator
.defineRegister(masm
, resultId
);
8258 StubFieldOffset
val(valOffset
, StubField::Type::RawInt32
);
8259 emitLoadStubField(val
, reg
);
8263 bool CacheIRCompiler::emitLoadBooleanConstant(bool val
,
8264 BooleanOperandId resultId
) {
8265 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8266 Register reg
= allocator
.defineRegister(masm
, resultId
);
8267 masm
.move32(Imm32(val
), reg
);
8271 bool CacheIRCompiler::emitLoadDoubleConstant(uint32_t valOffset
,
8272 NumberOperandId resultId
) {
8273 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8275 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
8276 StubFieldOffset
val(valOffset
, StubField::Type::Double
);
8278 AutoScratchFloatRegister
floatReg(this);
8280 emitLoadDoubleValueStubField(val
, output
, floatReg
);
8284 bool CacheIRCompiler::emitLoadUndefined(ValOperandId resultId
) {
8285 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8287 ValueOperand reg
= allocator
.defineValueRegister(masm
, resultId
);
8288 masm
.moveValue(UndefinedValue(), reg
);
8292 bool CacheIRCompiler::emitLoadConstantString(uint32_t strOffset
,
8293 StringOperandId resultId
) {
8294 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8295 Register reg
= allocator
.defineRegister(masm
, resultId
);
8296 StubFieldOffset
str(strOffset
, StubField::Type::String
);
8297 emitLoadStubField(str
, reg
);
8301 bool CacheIRCompiler::emitCallInt32ToString(Int32OperandId inputId
,
8302 StringOperandId resultId
) {
8303 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8304 Register input
= allocator
.useRegister(masm
, inputId
);
8305 Register result
= allocator
.defineRegister(masm
, resultId
);
8307 FailurePath
* failure
;
8308 if (!addFailurePath(&failure
)) {
8312 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
8313 liveVolatileFloatRegs());
8314 volatileRegs
.takeUnchecked(result
);
8315 masm
.PushRegsInMask(volatileRegs
);
8317 using Fn
= JSLinearString
* (*)(JSContext
* cx
, int32_t i
);
8318 masm
.setupUnalignedABICall(result
);
8319 masm
.loadJSContext(result
);
8320 masm
.passABIArg(result
);
8321 masm
.passABIArg(input
);
8322 masm
.callWithABI
<Fn
, js::Int32ToStringPure
>();
8324 masm
.storeCallPointerResult(result
);
8325 masm
.PopRegsInMask(volatileRegs
);
8327 masm
.branchPtr(Assembler::Equal
, result
, ImmPtr(nullptr), failure
->label());
8331 bool CacheIRCompiler::emitCallNumberToString(NumberOperandId inputId
,
8332 StringOperandId resultId
) {
8333 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8335 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
8337 allocator
.ensureDoubleRegister(masm
, inputId
, floatScratch0
);
8338 Register result
= allocator
.defineRegister(masm
, resultId
);
8340 FailurePath
* failure
;
8341 if (!addFailurePath(&failure
)) {
8345 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
8346 liveVolatileFloatRegs());
8347 volatileRegs
.takeUnchecked(result
);
8348 masm
.PushRegsInMask(volatileRegs
);
8350 using Fn
= JSString
* (*)(JSContext
* cx
, double d
);
8351 masm
.setupUnalignedABICall(result
);
8352 masm
.loadJSContext(result
);
8353 masm
.passABIArg(result
);
8354 masm
.passABIArg(floatScratch0
, ABIType::Float64
);
8355 masm
.callWithABI
<Fn
, js::NumberToStringPure
>();
8357 masm
.storeCallPointerResult(result
);
8358 masm
.PopRegsInMask(volatileRegs
);
8360 masm
.branchPtr(Assembler::Equal
, result
, ImmPtr(nullptr), failure
->label());
8364 bool CacheIRCompiler::emitInt32ToStringWithBaseResult(Int32OperandId inputId
,
8365 Int32OperandId baseId
) {
8366 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8368 AutoCallVM
callvm(masm
, this, allocator
);
8369 Register input
= allocator
.useRegister(masm
, inputId
);
8370 Register base
= allocator
.useRegister(masm
, baseId
);
8372 FailurePath
* failure
;
8373 if (!addFailurePath(&failure
)) {
8377 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8378 // we can't use both at the same time. This isn't an issue here, because Ion
8379 // doesn't support CallICs. If that ever changes, this code must be updated.
8380 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8382 masm
.branch32(Assembler::LessThan
, base
, Imm32(2), failure
->label());
8383 masm
.branch32(Assembler::GreaterThan
, base
, Imm32(36), failure
->label());
8385 // Use lower-case characters by default.
8386 constexpr bool lowerCase
= true;
8390 masm
.Push(Imm32(lowerCase
));
8394 using Fn
= JSString
* (*)(JSContext
*, int32_t, int32_t, bool);
8395 callvm
.call
<Fn
, js::Int32ToStringWithBase
>();
8399 bool CacheIRCompiler::emitBooleanToString(BooleanOperandId inputId
,
8400 StringOperandId resultId
) {
8401 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8402 Register boolean
= allocator
.useRegister(masm
, inputId
);
8403 Register result
= allocator
.defineRegister(masm
, resultId
);
8404 const JSAtomState
& names
= cx_
->names();
8407 masm
.branchTest32(Assembler::NonZero
, boolean
, boolean
, &true_
);
8410 masm
.movePtr(ImmGCPtr(names
.false_
), result
);
8415 masm
.movePtr(ImmGCPtr(names
.true_
), result
);
8421 bool CacheIRCompiler::emitObjectToStringResult(ObjOperandId objId
) {
8422 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8424 AutoOutputRegister
output(*this);
8425 Register obj
= allocator
.useRegister(masm
, objId
);
8426 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
8428 FailurePath
* failure
;
8429 if (!addFailurePath(&failure
)) {
8433 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
8434 liveVolatileFloatRegs());
8435 volatileRegs
.takeUnchecked(output
.valueReg());
8436 volatileRegs
.takeUnchecked(scratch
);
8437 masm
.PushRegsInMask(volatileRegs
);
8439 using Fn
= JSString
* (*)(JSContext
*, JSObject
*);
8440 masm
.setupUnalignedABICall(scratch
);
8441 masm
.loadJSContext(scratch
);
8442 masm
.passABIArg(scratch
);
8443 masm
.passABIArg(obj
);
8444 masm
.callWithABI
<Fn
, js::ObjectClassToString
>();
8445 masm
.storeCallPointerResult(scratch
);
8447 masm
.PopRegsInMask(volatileRegs
);
8449 masm
.branchPtr(Assembler::Equal
, scratch
, ImmPtr(nullptr), failure
->label());
8450 masm
.tagValue(JSVAL_TYPE_STRING
, scratch
, output
.valueReg());
8455 bool CacheIRCompiler::emitCallStringConcatResult(StringOperandId lhsId
,
8456 StringOperandId rhsId
) {
8457 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8458 AutoCallVM
callvm(masm
, this, allocator
);
8460 Register lhs
= allocator
.useRegister(masm
, lhsId
);
8461 Register rhs
= allocator
.useRegister(masm
, rhsId
);
8465 masm
.Push(static_cast<js::jit::Imm32
>(int32_t(js::gc::Heap::Default
)));
8470 JSString
* (*)(JSContext
*, HandleString
, HandleString
, js::gc::Heap
);
8471 callvm
.call
<Fn
, ConcatStrings
<CanGC
>>();
8476 bool CacheIRCompiler::emitCallIsSuspendedGeneratorResult(ValOperandId valId
) {
8477 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8478 AutoOutputRegister
output(*this);
8479 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
8480 AutoScratchRegister
scratch2(allocator
, masm
);
8481 ValueOperand input
= allocator
.useValueRegister(masm
, valId
);
8483 // Test if it's an object.
8484 Label returnFalse
, done
;
8485 masm
.fallibleUnboxObject(input
, scratch
, &returnFalse
);
8487 // Test if it's a GeneratorObject.
8488 masm
.branchTestObjClass(Assembler::NotEqual
, scratch
,
8489 &GeneratorObject::class_
, scratch2
, scratch
,
8492 // If the resumeIndex slot holds an int32 value < RESUME_INDEX_RUNNING,
8493 // the generator is suspended.
8494 Address
addr(scratch
, AbstractGeneratorObject::offsetOfResumeIndexSlot());
8495 masm
.fallibleUnboxInt32(addr
, scratch
, &returnFalse
);
8496 masm
.branch32(Assembler::AboveOrEqual
, scratch
,
8497 Imm32(AbstractGeneratorObject::RESUME_INDEX_RUNNING
),
8500 masm
.moveValue(BooleanValue(true), output
.valueReg());
8503 masm
.bind(&returnFalse
);
8504 masm
.moveValue(BooleanValue(false), output
.valueReg());
8510 // This op generates no code. It is consumed by the transpiler.
8511 bool CacheIRCompiler::emitMetaScriptedThisShape(uint32_t) { return true; }
8513 bool CacheIRCompiler::emitCallNativeGetElementResult(ObjOperandId objId
,
8514 Int32OperandId indexId
) {
8515 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8516 AutoCallVM
callvm(masm
, this, allocator
);
8518 Register obj
= allocator
.useRegister(masm
, objId
);
8519 Register index
= allocator
.useRegister(masm
, indexId
);
8524 masm
.Push(TypedOrValueRegister(MIRType::Object
, AnyRegister(obj
)));
8527 using Fn
= bool (*)(JSContext
*, Handle
<NativeObject
*>, HandleValue
, int32_t,
8528 MutableHandleValue
);
8529 callvm
.call
<Fn
, NativeGetElement
>();
8534 bool CacheIRCompiler::emitCallNativeGetElementSuperResult(
8535 ObjOperandId objId
, Int32OperandId indexId
, ValOperandId receiverId
) {
8536 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8537 AutoCallVM
callvm(masm
, this, allocator
);
8539 Register obj
= allocator
.useRegister(masm
, objId
);
8540 Register index
= allocator
.useRegister(masm
, indexId
);
8541 ValueOperand receiver
= allocator
.useValueRegister(masm
, receiverId
);
8546 masm
.Push(receiver
);
8549 using Fn
= bool (*)(JSContext
*, Handle
<NativeObject
*>, HandleValue
, int32_t,
8550 MutableHandleValue
);
8551 callvm
.call
<Fn
, NativeGetElement
>();
8556 bool CacheIRCompiler::emitProxyHasPropResult(ObjOperandId objId
,
8557 ValOperandId idId
, bool hasOwn
) {
8558 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8559 AutoCallVM
callvm(masm
, this, allocator
);
8561 Register obj
= allocator
.useRegister(masm
, objId
);
8562 ValueOperand idVal
= allocator
.useValueRegister(masm
, idId
);
8569 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleValue
, bool*);
8571 callvm
.call
<Fn
, ProxyHasOwn
>();
8573 callvm
.call
<Fn
, ProxyHas
>();
8578 bool CacheIRCompiler::emitProxyGetByValueResult(ObjOperandId objId
,
8579 ValOperandId idId
) {
8580 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8581 AutoCallVM
callvm(masm
, this, allocator
);
8583 Register obj
= allocator
.useRegister(masm
, objId
);
8584 ValueOperand idVal
= allocator
.useValueRegister(masm
, idId
);
8591 bool (*)(JSContext
*, HandleObject
, HandleValue
, MutableHandleValue
);
8592 callvm
.call
<Fn
, ProxyGetPropertyByValue
>();
8596 bool CacheIRCompiler::emitCallGetSparseElementResult(ObjOperandId objId
,
8597 Int32OperandId indexId
) {
8598 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8600 AutoCallVM
callvm(masm
, this, allocator
);
8602 Register obj
= allocator
.useRegister(masm
, objId
);
8603 Register id
= allocator
.useRegister(masm
, indexId
);
8609 using Fn
= bool (*)(JSContext
* cx
, Handle
<NativeObject
*> obj
, int32_t int_id
,
8610 MutableHandleValue result
);
8611 callvm
.call
<Fn
, GetSparseElementHelper
>();
8615 bool CacheIRCompiler::emitRegExpSearcherLastLimitResult() {
8616 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8618 AutoOutputRegister
output(*this);
8619 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
8620 AutoScratchRegister
scratch2(allocator
, masm
);
8622 masm
.loadAndClearRegExpSearcherLastLimit(scratch1
, scratch2
);
8624 masm
.tagValue(JSVAL_TYPE_INT32
, scratch1
, output
.valueReg());
8628 bool CacheIRCompiler::emitRegExpFlagResult(ObjOperandId regexpId
,
8629 int32_t flagsMask
) {
8630 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8632 AutoOutputRegister
output(*this);
8633 Register regexp
= allocator
.useRegister(masm
, regexpId
);
8634 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
8637 regexp
, NativeObject::getFixedSlotOffset(RegExpObject::flagsSlot()));
8638 masm
.unboxInt32(flagsAddr
, scratch
);
8640 Label ifFalse
, done
;
8641 masm
.branchTest32(Assembler::Zero
, scratch
, Imm32(flagsMask
), &ifFalse
);
8642 masm
.moveValue(BooleanValue(true), output
.valueReg());
8645 masm
.bind(&ifFalse
);
8646 masm
.moveValue(BooleanValue(false), output
.valueReg());
8652 bool CacheIRCompiler::emitCallSubstringKernelResult(StringOperandId strId
,
8653 Int32OperandId beginId
,
8654 Int32OperandId lengthId
) {
8655 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8657 AutoCallVM
callvm(masm
, this, allocator
);
8659 Register str
= allocator
.useRegister(masm
, strId
);
8660 Register begin
= allocator
.useRegister(masm
, beginId
);
8661 Register length
= allocator
.useRegister(masm
, lengthId
);
8668 using Fn
= JSString
* (*)(JSContext
* cx
, HandleString str
, int32_t begin
,
8670 callvm
.call
<Fn
, SubstringKernel
>();
8674 bool CacheIRCompiler::emitStringReplaceStringResult(
8675 StringOperandId strId
, StringOperandId patternId
,
8676 StringOperandId replacementId
) {
8677 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8679 AutoCallVM
callvm(masm
, this, allocator
);
8681 Register str
= allocator
.useRegister(masm
, strId
);
8682 Register pattern
= allocator
.useRegister(masm
, patternId
);
8683 Register replacement
= allocator
.useRegister(masm
, replacementId
);
8686 masm
.Push(replacement
);
8691 JSString
* (*)(JSContext
*, HandleString
, HandleString
, HandleString
);
8692 callvm
.call
<Fn
, jit::StringReplace
>();
8696 bool CacheIRCompiler::emitStringSplitStringResult(StringOperandId strId
,
8697 StringOperandId separatorId
) {
8698 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8700 AutoCallVM
callvm(masm
, this, allocator
);
8702 Register str
= allocator
.useRegister(masm
, strId
);
8703 Register separator
= allocator
.useRegister(masm
, separatorId
);
8706 masm
.Push(Imm32(INT32_MAX
));
8707 masm
.Push(separator
);
8710 using Fn
= ArrayObject
* (*)(JSContext
*, HandleString
, HandleString
, uint32_t);
8711 callvm
.call
<Fn
, js::StringSplitString
>();
8715 bool CacheIRCompiler::emitRegExpPrototypeOptimizableResult(
8716 ObjOperandId protoId
) {
8717 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8719 AutoOutputRegister
output(*this);
8720 Register proto
= allocator
.useRegister(masm
, protoId
);
8721 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
8724 masm
.branchIfNotRegExpPrototypeOptimizable(
8725 proto
, scratch
, /* maybeGlobal = */ nullptr, &slow
);
8726 masm
.moveValue(BooleanValue(true), output
.valueReg());
8732 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
8733 liveVolatileFloatRegs());
8734 volatileRegs
.takeUnchecked(scratch
);
8735 masm
.PushRegsInMask(volatileRegs
);
8737 using Fn
= bool (*)(JSContext
* cx
, JSObject
* proto
);
8738 masm
.setupUnalignedABICall(scratch
);
8739 masm
.loadJSContext(scratch
);
8740 masm
.passABIArg(scratch
);
8741 masm
.passABIArg(proto
);
8742 masm
.callWithABI
<Fn
, RegExpPrototypeOptimizableRaw
>();
8743 masm
.storeCallBoolResult(scratch
);
8745 masm
.PopRegsInMask(volatileRegs
);
8746 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
8753 bool CacheIRCompiler::emitRegExpInstanceOptimizableResult(
8754 ObjOperandId regexpId
, ObjOperandId protoId
) {
8755 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8757 AutoOutputRegister
output(*this);
8758 Register regexp
= allocator
.useRegister(masm
, regexpId
);
8759 Register proto
= allocator
.useRegister(masm
, protoId
);
8760 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
8763 masm
.branchIfNotRegExpInstanceOptimizable(regexp
, scratch
,
8764 /* maybeGlobal = */ nullptr, &slow
);
8765 masm
.moveValue(BooleanValue(true), output
.valueReg());
8771 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
8772 liveVolatileFloatRegs());
8773 volatileRegs
.takeUnchecked(scratch
);
8774 masm
.PushRegsInMask(volatileRegs
);
8776 using Fn
= bool (*)(JSContext
* cx
, JSObject
* obj
, JSObject
* proto
);
8777 masm
.setupUnalignedABICall(scratch
);
8778 masm
.loadJSContext(scratch
);
8779 masm
.passABIArg(scratch
);
8780 masm
.passABIArg(regexp
);
8781 masm
.passABIArg(proto
);
8782 masm
.callWithABI
<Fn
, RegExpInstanceOptimizableRaw
>();
8783 masm
.storeCallBoolResult(scratch
);
8785 masm
.PopRegsInMask(volatileRegs
);
8786 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
8793 bool CacheIRCompiler::emitGetFirstDollarIndexResult(StringOperandId strId
) {
8794 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8796 AutoCallVM
callvm(masm
, this, allocator
);
8798 Register str
= allocator
.useRegister(masm
, strId
);
8803 using Fn
= bool (*)(JSContext
*, JSString
*, int32_t*);
8804 callvm
.call
<Fn
, GetFirstDollarIndexRaw
>();
8808 bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
8809 ObjOperandId objId
, IntPtrOperandId indexId
, uint32_t expectedId
,
8810 uint32_t replacementId
, Scalar::Type elementType
) {
8811 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8813 Maybe
<AutoOutputRegister
> output
;
8814 Maybe
<AutoCallVM
> callvm
;
8815 if (!Scalar::isBigIntType(elementType
)) {
8816 output
.emplace(*this);
8818 callvm
.emplace(masm
, this, allocator
);
8820 #ifdef JS_CODEGEN_X86
8821 // Use a scratch register to avoid running out of registers.
8822 Register obj
= output
? output
->valueReg().typeReg()
8823 : callvm
->outputValueReg().typeReg();
8824 allocator
.copyToScratchRegister(masm
, objId
, obj
);
8826 Register obj
= allocator
.useRegister(masm
, objId
);
8828 Register index
= allocator
.useRegister(masm
, indexId
);
8830 Register replacement
;
8831 if (!Scalar::isBigIntType(elementType
)) {
8832 expected
= allocator
.useRegister(masm
, Int32OperandId(expectedId
));
8833 replacement
= allocator
.useRegister(masm
, Int32OperandId(replacementId
));
8835 expected
= allocator
.useRegister(masm
, BigIntOperandId(expectedId
));
8836 replacement
= allocator
.useRegister(masm
, BigIntOperandId(replacementId
));
8839 Register scratch
= output
? output
->valueReg().scratchReg()
8840 : callvm
->outputValueReg().scratchReg();
8841 MOZ_ASSERT(scratch
!= obj
, "scratchReg must not be typeReg");
8843 // Not enough registers on X86.
8844 Register spectreTemp
= Register::Invalid();
8846 FailurePath
* failure
;
8847 if (!addFailurePath(&failure
)) {
8851 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8852 // we can't use both at the same time. This isn't an issue here, because Ion
8853 // doesn't support CallICs. If that ever changes, this code must be updated.
8854 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8857 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
8858 masm
.spectreBoundsCheckPtr(index
, scratch
, spectreTemp
, failure
->label());
8860 // Atomic operations are highly platform-dependent, for example x86/x64 has
8861 // specific requirements on which registers are used; MIPS needs multiple
8862 // additional temporaries. Therefore we're using either an ABI or VM call here
8863 // instead of handling each platform separately.
8865 if (Scalar::isBigIntType(elementType
)) {
8868 masm
.Push(replacement
);
8869 masm
.Push(expected
);
8873 using Fn
= BigInt
* (*)(JSContext
*, FixedLengthTypedArrayObject
*, size_t,
8874 const BigInt
*, const BigInt
*);
8875 callvm
->call
<Fn
, jit::AtomicsCompareExchange64
>();
8880 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
8881 liveVolatileFloatRegs());
8882 volatileRegs
.takeUnchecked(output
->valueReg());
8883 volatileRegs
.takeUnchecked(scratch
);
8884 masm
.PushRegsInMask(volatileRegs
);
8886 masm
.setupUnalignedABICall(scratch
);
8887 masm
.passABIArg(obj
);
8888 masm
.passABIArg(index
);
8889 masm
.passABIArg(expected
);
8890 masm
.passABIArg(replacement
);
8891 masm
.callWithABI(DynamicFunction
<AtomicsCompareExchangeFn
>(
8892 AtomicsCompareExchange(elementType
)));
8893 masm
.storeCallInt32Result(scratch
);
8895 masm
.PopRegsInMask(volatileRegs
);
8898 if (elementType
!= Scalar::Uint32
) {
8899 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
->valueReg());
8901 ScratchDoubleScope
fpscratch(masm
);
8902 masm
.convertUInt32ToDouble(scratch
, fpscratch
);
8903 masm
.boxDouble(fpscratch
, output
->valueReg(), fpscratch
);
8909 bool CacheIRCompiler::emitAtomicsReadModifyWriteResult(
8910 ObjOperandId objId
, IntPtrOperandId indexId
, uint32_t valueId
,
8911 Scalar::Type elementType
, AtomicsReadWriteModifyFn fn
) {
8912 AutoOutputRegister
output(*this);
8913 Register obj
= allocator
.useRegister(masm
, objId
);
8914 Register index
= allocator
.useRegister(masm
, indexId
);
8915 Register value
= allocator
.useRegister(masm
, Int32OperandId(valueId
));
8916 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
8918 // Not enough registers on X86.
8919 Register spectreTemp
= Register::Invalid();
8921 FailurePath
* failure
;
8922 if (!addFailurePath(&failure
)) {
8927 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
8928 masm
.spectreBoundsCheckPtr(index
, scratch
, spectreTemp
, failure
->label());
8930 // See comment in emitAtomicsCompareExchange for why we use an ABI call.
8932 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
8933 liveVolatileFloatRegs());
8934 volatileRegs
.takeUnchecked(output
.valueReg());
8935 volatileRegs
.takeUnchecked(scratch
);
8936 masm
.PushRegsInMask(volatileRegs
);
8938 masm
.setupUnalignedABICall(scratch
);
8939 masm
.passABIArg(obj
);
8940 masm
.passABIArg(index
);
8941 masm
.passABIArg(value
);
8942 masm
.callWithABI(DynamicFunction
<AtomicsReadWriteModifyFn
>(fn
));
8943 masm
.storeCallInt32Result(scratch
);
8945 masm
.PopRegsInMask(volatileRegs
);
8948 if (elementType
!= Scalar::Uint32
) {
8949 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
8951 ScratchDoubleScope
fpscratch(masm
);
8952 masm
.convertUInt32ToDouble(scratch
, fpscratch
);
8953 masm
.boxDouble(fpscratch
, output
.valueReg(), fpscratch
);
8959 template <CacheIRCompiler::AtomicsReadWriteModify64Fn fn
>
8960 bool CacheIRCompiler::emitAtomicsReadModifyWriteResult64(
8961 ObjOperandId objId
, IntPtrOperandId indexId
, uint32_t valueId
) {
8962 AutoCallVM
callvm(masm
, this, allocator
);
8963 Register obj
= allocator
.useRegister(masm
, objId
);
8964 Register index
= allocator
.useRegister(masm
, indexId
);
8965 Register value
= allocator
.useRegister(masm
, BigIntOperandId(valueId
));
8966 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, callvm
.output());
8968 // Not enough registers on X86.
8969 Register spectreTemp
= Register::Invalid();
8971 FailurePath
* failure
;
8972 if (!addFailurePath(&failure
)) {
8976 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8977 // we can't use both at the same time. This isn't an issue here, because Ion
8978 // doesn't support CallICs. If that ever changes, this code must be updated.
8979 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8982 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
8983 masm
.spectreBoundsCheckPtr(index
, scratch
, spectreTemp
, failure
->label());
8985 // See comment in emitAtomicsCompareExchange for why we use a VM call.
8993 callvm
.call
<AtomicsReadWriteModify64Fn
, fn
>();
8997 bool CacheIRCompiler::emitAtomicsExchangeResult(ObjOperandId objId
,
8998 IntPtrOperandId indexId
,
9000 Scalar::Type elementType
) {
9001 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9003 if (Scalar::isBigIntType(elementType
)) {
9004 return emitAtomicsReadModifyWriteResult64
<jit::AtomicsExchange64
>(
9005 objId
, indexId
, valueId
);
9007 return emitAtomicsReadModifyWriteResult(objId
, indexId
, valueId
, elementType
,
9008 AtomicsExchange(elementType
));
9011 bool CacheIRCompiler::emitAtomicsAddResult(ObjOperandId objId
,
9012 IntPtrOperandId indexId
,
9014 Scalar::Type elementType
,
9016 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9018 if (Scalar::isBigIntType(elementType
)) {
9019 return emitAtomicsReadModifyWriteResult64
<jit::AtomicsAdd64
>(objId
, indexId
,
9022 return emitAtomicsReadModifyWriteResult(objId
, indexId
, valueId
, elementType
,
9023 AtomicsAdd(elementType
));
9026 bool CacheIRCompiler::emitAtomicsSubResult(ObjOperandId objId
,
9027 IntPtrOperandId indexId
,
9029 Scalar::Type elementType
,
9031 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9033 if (Scalar::isBigIntType(elementType
)) {
9034 return emitAtomicsReadModifyWriteResult64
<jit::AtomicsSub64
>(objId
, indexId
,
9037 return emitAtomicsReadModifyWriteResult(objId
, indexId
, valueId
, elementType
,
9038 AtomicsSub(elementType
));
9041 bool CacheIRCompiler::emitAtomicsAndResult(ObjOperandId objId
,
9042 IntPtrOperandId indexId
,
9044 Scalar::Type elementType
,
9046 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9048 if (Scalar::isBigIntType(elementType
)) {
9049 return emitAtomicsReadModifyWriteResult64
<jit::AtomicsAnd64
>(objId
, indexId
,
9052 return emitAtomicsReadModifyWriteResult(objId
, indexId
, valueId
, elementType
,
9053 AtomicsAnd(elementType
));
9056 bool CacheIRCompiler::emitAtomicsOrResult(ObjOperandId objId
,
9057 IntPtrOperandId indexId
,
9059 Scalar::Type elementType
,
9061 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9063 if (Scalar::isBigIntType(elementType
)) {
9064 return emitAtomicsReadModifyWriteResult64
<jit::AtomicsOr64
>(objId
, indexId
,
9067 return emitAtomicsReadModifyWriteResult(objId
, indexId
, valueId
, elementType
,
9068 AtomicsOr(elementType
));
9071 bool CacheIRCompiler::emitAtomicsXorResult(ObjOperandId objId
,
9072 IntPtrOperandId indexId
,
9074 Scalar::Type elementType
,
9076 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9078 if (Scalar::isBigIntType(elementType
)) {
9079 return emitAtomicsReadModifyWriteResult64
<jit::AtomicsXor64
>(objId
, indexId
,
9082 return emitAtomicsReadModifyWriteResult(objId
, indexId
, valueId
, elementType
,
9083 AtomicsXor(elementType
));
9086 bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId
,
9087 IntPtrOperandId indexId
,
9088 Scalar::Type elementType
) {
9089 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9091 Maybe
<AutoOutputRegister
> output
;
9092 Maybe
<AutoCallVM
> callvm
;
9093 if (!Scalar::isBigIntType(elementType
)) {
9094 output
.emplace(*this);
9096 callvm
.emplace(masm
, this, allocator
);
9098 Register obj
= allocator
.useRegister(masm
, objId
);
9099 Register index
= allocator
.useRegister(masm
, indexId
);
9100 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
,
9101 output
? *output
: callvm
->output());
9102 AutoSpectreBoundsScratchRegister
spectreTemp(allocator
, masm
);
9103 AutoAvailableFloatRegister
floatReg(*this, FloatReg0
);
9105 FailurePath
* failure
;
9106 if (!addFailurePath(&failure
)) {
9110 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
9111 // we can't use both at the same time. This isn't an issue here, because Ion
9112 // doesn't support CallICs. If that ever changes, this code must be updated.
9113 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
9116 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
9117 masm
.spectreBoundsCheckPtr(index
, scratch
, spectreTemp
, failure
->label());
9119 // Atomic operations are highly platform-dependent, for example x86/arm32 has
9120 // specific requirements on which registers are used. Therefore we're using a
9121 // VM call here instead of handling each platform separately.
9122 if (Scalar::isBigIntType(elementType
)) {
9128 using Fn
= BigInt
* (*)(JSContext
*, FixedLengthTypedArrayObject
*, size_t);
9129 callvm
->call
<Fn
, jit::AtomicsLoad64
>();
9133 // Load the elements vector.
9134 masm
.loadPtr(Address(obj
, ArrayBufferViewObject::dataOffset()), scratch
);
9137 BaseIndex
source(scratch
, index
, ScaleFromScalarType(elementType
));
9139 // NOTE: the generated code must match the assembly code in gen_load in
9140 // GenerateAtomicOperations.py
9141 auto sync
= Synchronization::Load();
9143 masm
.memoryBarrierBefore(sync
);
9145 Label
* failUint32
= nullptr;
9146 MacroAssembler::Uint32Mode mode
= MacroAssembler::Uint32Mode::ForceDouble
;
9147 masm
.loadFromTypedArray(elementType
, source
, output
->valueReg(), mode
,
9148 scratch
, failUint32
);
9149 masm
.memoryBarrierAfter(sync
);
9154 bool CacheIRCompiler::emitAtomicsStoreResult(ObjOperandId objId
,
9155 IntPtrOperandId indexId
,
9157 Scalar::Type elementType
) {
9158 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9160 AutoOutputRegister
output(*this);
9161 Register obj
= allocator
.useRegister(masm
, objId
);
9162 Register index
= allocator
.useRegister(masm
, indexId
);
9163 Maybe
<Register
> valueInt32
;
9164 Maybe
<Register
> valueBigInt
;
9165 if (!Scalar::isBigIntType(elementType
)) {
9166 valueInt32
.emplace(allocator
.useRegister(masm
, Int32OperandId(valueId
)));
9168 valueBigInt
.emplace(allocator
.useRegister(masm
, BigIntOperandId(valueId
)));
9170 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
9172 // Not enough registers on X86.
9173 Register spectreTemp
= Register::Invalid();
9175 FailurePath
* failure
;
9176 if (!addFailurePath(&failure
)) {
9181 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
9182 masm
.spectreBoundsCheckPtr(index
, scratch
, spectreTemp
, failure
->label());
9184 if (!Scalar::isBigIntType(elementType
)) {
9185 // Load the elements vector.
9186 masm
.loadPtr(Address(obj
, ArrayBufferViewObject::dataOffset()), scratch
);
9189 BaseIndex
dest(scratch
, index
, ScaleFromScalarType(elementType
));
9191 // NOTE: the generated code must match the assembly code in gen_store in
9192 // GenerateAtomicOperations.py
9193 auto sync
= Synchronization::Store();
9195 masm
.memoryBarrierBefore(sync
);
9196 masm
.storeToTypedIntArray(elementType
, *valueInt32
, dest
);
9197 masm
.memoryBarrierAfter(sync
);
9199 masm
.tagValue(JSVAL_TYPE_INT32
, *valueInt32
, output
.valueReg());
9201 // See comment in emitAtomicsCompareExchange for why we use an ABI call.
9203 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
9204 liveVolatileFloatRegs());
9205 volatileRegs
.takeUnchecked(output
.valueReg());
9206 volatileRegs
.takeUnchecked(scratch
);
9207 masm
.PushRegsInMask(volatileRegs
);
9209 using Fn
= void (*)(FixedLengthTypedArrayObject
*, size_t, const BigInt
*);
9210 masm
.setupUnalignedABICall(scratch
);
9211 masm
.passABIArg(obj
);
9212 masm
.passABIArg(index
);
9213 masm
.passABIArg(*valueBigInt
);
9214 masm
.callWithABI
<Fn
, jit::AtomicsStore64
>();
9216 masm
.PopRegsInMask(volatileRegs
);
9218 masm
.tagValue(JSVAL_TYPE_BIGINT
, *valueBigInt
, output
.valueReg());
9224 bool CacheIRCompiler::emitAtomicsIsLockFreeResult(Int32OperandId valueId
) {
9225 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9227 AutoOutputRegister
output(*this);
9228 Register value
= allocator
.useRegister(masm
, valueId
);
9229 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
9231 masm
.atomicIsLockFreeJS(value
, scratch
);
9232 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
9237 bool CacheIRCompiler::emitBigIntAsIntNResult(Int32OperandId bitsId
,
9238 BigIntOperandId bigIntId
) {
9239 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9241 AutoCallVM
callvm(masm
, this, allocator
);
9243 Register bits
= allocator
.useRegister(masm
, bitsId
);
9244 Register bigInt
= allocator
.useRegister(masm
, bigIntId
);
9250 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, int32_t);
9251 callvm
.call
<Fn
, jit::BigIntAsIntN
>();
9255 bool CacheIRCompiler::emitBigIntAsUintNResult(Int32OperandId bitsId
,
9256 BigIntOperandId bigIntId
) {
9257 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9259 AutoCallVM
callvm(masm
, this, allocator
);
9261 Register bits
= allocator
.useRegister(masm
, bitsId
);
9262 Register bigInt
= allocator
.useRegister(masm
, bigIntId
);
9268 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, int32_t);
9269 callvm
.call
<Fn
, jit::BigIntAsUintN
>();
9273 bool CacheIRCompiler::emitSetHasResult(ObjOperandId setId
, ValOperandId valId
) {
9274 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9276 AutoCallVM
callvm(masm
, this, allocator
);
9278 Register set
= allocator
.useRegister(masm
, setId
);
9279 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
9285 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleValue
, bool*);
9286 callvm
.call
<Fn
, jit::SetObjectHas
>();
9290 bool CacheIRCompiler::emitSetHasNonGCThingResult(ObjOperandId setId
,
9291 ValOperandId valId
) {
9292 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9294 AutoOutputRegister
output(*this);
9295 Register set
= allocator
.useRegister(masm
, setId
);
9296 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
9298 AutoScratchRegister
scratch1(allocator
, masm
);
9299 AutoScratchRegister
scratch2(allocator
, masm
);
9300 AutoScratchRegister
scratch3(allocator
, masm
);
9301 AutoScratchRegister
scratch4(allocator
, masm
);
9302 AutoAvailableFloatRegister
scratchFloat(*this, FloatReg0
);
9304 masm
.toHashableNonGCThing(val
, output
.valueReg(), scratchFloat
);
9305 masm
.prepareHashNonGCThing(output
.valueReg(), scratch1
, scratch2
);
9307 masm
.setObjectHasNonBigInt(set
, output
.valueReg(), scratch1
, scratch2
,
9308 scratch3
, scratch4
);
9309 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
9313 bool CacheIRCompiler::emitSetHasSymbolResult(ObjOperandId setId
,
9314 SymbolOperandId symId
) {
9315 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9317 AutoOutputRegister
output(*this);
9318 Register set
= allocator
.useRegister(masm
, setId
);
9319 Register sym
= allocator
.useRegister(masm
, symId
);
9321 AutoScratchRegister
scratch1(allocator
, masm
);
9322 AutoScratchRegister
scratch2(allocator
, masm
);
9323 AutoScratchRegister
scratch3(allocator
, masm
);
9324 AutoScratchRegister
scratch4(allocator
, masm
);
9326 masm
.prepareHashSymbol(sym
, scratch1
);
9328 masm
.tagValue(JSVAL_TYPE_SYMBOL
, sym
, output
.valueReg());
9329 masm
.setObjectHasNonBigInt(set
, output
.valueReg(), scratch1
, scratch2
,
9330 scratch3
, scratch4
);
9331 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
9335 bool CacheIRCompiler::emitSetHasBigIntResult(ObjOperandId setId
,
9336 BigIntOperandId bigIntId
) {
9337 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9339 AutoOutputRegister
output(*this);
9340 Register set
= allocator
.useRegister(masm
, setId
);
9341 Register bigInt
= allocator
.useRegister(masm
, bigIntId
);
9343 AutoScratchRegister
scratch1(allocator
, masm
);
9344 AutoScratchRegister
scratch2(allocator
, masm
);
9345 AutoScratchRegister
scratch3(allocator
, masm
);
9346 AutoScratchRegister
scratch4(allocator
, masm
);
9347 AutoScratchRegister
scratch5(allocator
, masm
);
9348 #ifndef JS_CODEGEN_ARM
9349 AutoScratchRegister
scratch6(allocator
, masm
);
9351 // We don't have more registers available on ARM32.
9352 Register scratch6
= set
;
9357 masm
.prepareHashBigInt(bigInt
, scratch1
, scratch2
, scratch3
, scratch4
);
9359 masm
.tagValue(JSVAL_TYPE_BIGINT
, bigInt
, output
.valueReg());
9360 masm
.setObjectHasBigInt(set
, output
.valueReg(), scratch1
, scratch2
, scratch3
,
9361 scratch4
, scratch5
, scratch6
);
9362 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
9364 #ifdef JS_CODEGEN_ARM
9370 bool CacheIRCompiler::emitSetHasObjectResult(ObjOperandId setId
,
9371 ObjOperandId objId
) {
9372 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9374 AutoOutputRegister
output(*this);
9375 Register set
= allocator
.useRegister(masm
, setId
);
9376 Register obj
= allocator
.useRegister(masm
, objId
);
9378 AutoScratchRegister
scratch1(allocator
, masm
);
9379 AutoScratchRegister
scratch2(allocator
, masm
);
9380 AutoScratchRegister
scratch3(allocator
, masm
);
9381 AutoScratchRegister
scratch4(allocator
, masm
);
9382 AutoScratchRegister
scratch5(allocator
, masm
);
9384 masm
.tagValue(JSVAL_TYPE_OBJECT
, obj
, output
.valueReg());
9385 masm
.prepareHashObject(set
, output
.valueReg(), scratch1
, scratch2
, scratch3
,
9386 scratch4
, scratch5
);
9388 masm
.setObjectHasNonBigInt(set
, output
.valueReg(), scratch1
, scratch2
,
9389 scratch3
, scratch4
);
9390 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
9394 bool CacheIRCompiler::emitSetSizeResult(ObjOperandId setId
) {
9395 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9397 AutoOutputRegister
output(*this);
9398 Register set
= allocator
.useRegister(masm
, setId
);
9399 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
9401 masm
.loadSetObjectSize(set
, scratch
);
9402 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
9406 bool CacheIRCompiler::emitMapHasResult(ObjOperandId mapId
, ValOperandId valId
) {
9407 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9409 AutoCallVM
callvm(masm
, this, allocator
);
9411 Register map
= allocator
.useRegister(masm
, mapId
);
9412 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
9418 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleValue
, bool*);
9419 callvm
.call
<Fn
, jit::MapObjectHas
>();
9423 bool CacheIRCompiler::emitMapHasNonGCThingResult(ObjOperandId mapId
,
9424 ValOperandId valId
) {
9425 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9427 AutoOutputRegister
output(*this);
9428 Register map
= allocator
.useRegister(masm
, mapId
);
9429 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
9431 AutoScratchRegister
scratch1(allocator
, masm
);
9432 AutoScratchRegister
scratch2(allocator
, masm
);
9433 AutoScratchRegister
scratch3(allocator
, masm
);
9434 AutoScratchRegister
scratch4(allocator
, masm
);
9435 AutoAvailableFloatRegister
scratchFloat(*this, FloatReg0
);
9437 masm
.toHashableNonGCThing(val
, output
.valueReg(), scratchFloat
);
9438 masm
.prepareHashNonGCThing(output
.valueReg(), scratch1
, scratch2
);
9440 masm
.mapObjectHasNonBigInt(map
, output
.valueReg(), scratch1
, scratch2
,
9441 scratch3
, scratch4
);
9442 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
9446 bool CacheIRCompiler::emitMapHasSymbolResult(ObjOperandId mapId
,
9447 SymbolOperandId symId
) {
9448 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9450 AutoOutputRegister
output(*this);
9451 Register map
= allocator
.useRegister(masm
, mapId
);
9452 Register sym
= allocator
.useRegister(masm
, symId
);
9454 AutoScratchRegister
scratch1(allocator
, masm
);
9455 AutoScratchRegister
scratch2(allocator
, masm
);
9456 AutoScratchRegister
scratch3(allocator
, masm
);
9457 AutoScratchRegister
scratch4(allocator
, masm
);
9459 masm
.prepareHashSymbol(sym
, scratch1
);
9461 masm
.tagValue(JSVAL_TYPE_SYMBOL
, sym
, output
.valueReg());
9462 masm
.mapObjectHasNonBigInt(map
, output
.valueReg(), scratch1
, scratch2
,
9463 scratch3
, scratch4
);
9464 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
9468 bool CacheIRCompiler::emitMapHasBigIntResult(ObjOperandId mapId
,
9469 BigIntOperandId bigIntId
) {
9470 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9472 AutoOutputRegister
output(*this);
9473 Register map
= allocator
.useRegister(masm
, mapId
);
9474 Register bigInt
= allocator
.useRegister(masm
, bigIntId
);
9476 AutoScratchRegister
scratch1(allocator
, masm
);
9477 AutoScratchRegister
scratch2(allocator
, masm
);
9478 AutoScratchRegister
scratch3(allocator
, masm
);
9479 AutoScratchRegister
scratch4(allocator
, masm
);
9480 AutoScratchRegister
scratch5(allocator
, masm
);
9481 #ifndef JS_CODEGEN_ARM
9482 AutoScratchRegister
scratch6(allocator
, masm
);
9484 // We don't have more registers available on ARM32.
9485 Register scratch6
= map
;
9490 masm
.prepareHashBigInt(bigInt
, scratch1
, scratch2
, scratch3
, scratch4
);
9492 masm
.tagValue(JSVAL_TYPE_BIGINT
, bigInt
, output
.valueReg());
9493 masm
.mapObjectHasBigInt(map
, output
.valueReg(), scratch1
, scratch2
, scratch3
,
9494 scratch4
, scratch5
, scratch6
);
9495 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
9497 #ifdef JS_CODEGEN_ARM
9503 bool CacheIRCompiler::emitMapHasObjectResult(ObjOperandId mapId
,
9504 ObjOperandId objId
) {
9505 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9507 AutoOutputRegister
output(*this);
9508 Register map
= allocator
.useRegister(masm
, mapId
);
9509 Register obj
= allocator
.useRegister(masm
, objId
);
9511 AutoScratchRegister
scratch1(allocator
, masm
);
9512 AutoScratchRegister
scratch2(allocator
, masm
);
9513 AutoScratchRegister
scratch3(allocator
, masm
);
9514 AutoScratchRegister
scratch4(allocator
, masm
);
9515 AutoScratchRegister
scratch5(allocator
, masm
);
9517 masm
.tagValue(JSVAL_TYPE_OBJECT
, obj
, output
.valueReg());
9518 masm
.prepareHashObject(map
, output
.valueReg(), scratch1
, scratch2
, scratch3
,
9519 scratch4
, scratch5
);
9521 masm
.mapObjectHasNonBigInt(map
, output
.valueReg(), scratch1
, scratch2
,
9522 scratch3
, scratch4
);
9523 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
9527 bool CacheIRCompiler::emitMapGetResult(ObjOperandId mapId
, ValOperandId valId
) {
9528 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9530 AutoCallVM
callvm(masm
, this, allocator
);
9532 Register map
= allocator
.useRegister(masm
, mapId
);
9533 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
9540 bool (*)(JSContext
*, HandleObject
, HandleValue
, MutableHandleValue
);
9541 callvm
.call
<Fn
, jit::MapObjectGet
>();
9545 bool CacheIRCompiler::emitMapGetNonGCThingResult(ObjOperandId mapId
,
9546 ValOperandId valId
) {
9547 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9549 AutoOutputRegister
output(*this);
9550 Register map
= allocator
.useRegister(masm
, mapId
);
9551 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
9553 AutoScratchRegister
scratch1(allocator
, masm
);
9554 AutoScratchRegister
scratch2(allocator
, masm
);
9555 AutoScratchRegister
scratch3(allocator
, masm
);
9556 AutoScratchRegister
scratch4(allocator
, masm
);
9557 AutoAvailableFloatRegister
scratchFloat(*this, FloatReg0
);
9559 masm
.toHashableNonGCThing(val
, output
.valueReg(), scratchFloat
);
9560 masm
.prepareHashNonGCThing(output
.valueReg(), scratch1
, scratch2
);
9562 masm
.mapObjectGetNonBigInt(map
, output
.valueReg(), scratch1
,
9563 output
.valueReg(), scratch2
, scratch3
, scratch4
);
9567 bool CacheIRCompiler::emitMapGetSymbolResult(ObjOperandId mapId
,
9568 SymbolOperandId symId
) {
9569 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9571 AutoOutputRegister
output(*this);
9572 Register map
= allocator
.useRegister(masm
, mapId
);
9573 Register sym
= allocator
.useRegister(masm
, symId
);
9575 AutoScratchRegister
scratch1(allocator
, masm
);
9576 AutoScratchRegister
scratch2(allocator
, masm
);
9577 AutoScratchRegister
scratch3(allocator
, masm
);
9578 AutoScratchRegister
scratch4(allocator
, masm
);
9580 masm
.prepareHashSymbol(sym
, scratch1
);
9582 masm
.tagValue(JSVAL_TYPE_SYMBOL
, sym
, output
.valueReg());
9583 masm
.mapObjectGetNonBigInt(map
, output
.valueReg(), scratch1
,
9584 output
.valueReg(), scratch2
, scratch3
, scratch4
);
9588 bool CacheIRCompiler::emitMapGetBigIntResult(ObjOperandId mapId
,
9589 BigIntOperandId bigIntId
) {
9590 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9592 AutoOutputRegister
output(*this);
9593 Register map
= allocator
.useRegister(masm
, mapId
);
9594 Register bigInt
= allocator
.useRegister(masm
, bigIntId
);
9596 AutoScratchRegister
scratch1(allocator
, masm
);
9597 AutoScratchRegister
scratch2(allocator
, masm
);
9598 AutoScratchRegister
scratch3(allocator
, masm
);
9599 AutoScratchRegister
scratch4(allocator
, masm
);
9600 AutoScratchRegister
scratch5(allocator
, masm
);
9601 #ifndef JS_CODEGEN_ARM
9602 AutoScratchRegister
scratch6(allocator
, masm
);
9604 // We don't have more registers available on ARM32.
9605 Register scratch6
= map
;
9610 masm
.prepareHashBigInt(bigInt
, scratch1
, scratch2
, scratch3
, scratch4
);
9612 masm
.tagValue(JSVAL_TYPE_BIGINT
, bigInt
, output
.valueReg());
9613 masm
.mapObjectGetBigInt(map
, output
.valueReg(), scratch1
, output
.valueReg(),
9614 scratch2
, scratch3
, scratch4
, scratch5
, scratch6
);
9616 #ifdef JS_CODEGEN_ARM
9622 bool CacheIRCompiler::emitMapGetObjectResult(ObjOperandId mapId
,
9623 ObjOperandId objId
) {
9624 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9626 AutoOutputRegister
output(*this);
9627 Register map
= allocator
.useRegister(masm
, mapId
);
9628 Register obj
= allocator
.useRegister(masm
, objId
);
9630 AutoScratchRegister
scratch1(allocator
, masm
);
9631 AutoScratchRegister
scratch2(allocator
, masm
);
9632 AutoScratchRegister
scratch3(allocator
, masm
);
9633 AutoScratchRegister
scratch4(allocator
, masm
);
9634 AutoScratchRegister
scratch5(allocator
, masm
);
9636 masm
.tagValue(JSVAL_TYPE_OBJECT
, obj
, output
.valueReg());
9637 masm
.prepareHashObject(map
, output
.valueReg(), scratch1
, scratch2
, scratch3
,
9638 scratch4
, scratch5
);
9640 masm
.mapObjectGetNonBigInt(map
, output
.valueReg(), scratch1
,
9641 output
.valueReg(), scratch2
, scratch3
, scratch4
);
9645 bool CacheIRCompiler::emitMapSizeResult(ObjOperandId mapId
) {
9646 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9648 AutoOutputRegister
output(*this);
9649 Register map
= allocator
.useRegister(masm
, mapId
);
9650 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
9652 masm
.loadMapObjectSize(map
, scratch
);
9653 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
9657 bool CacheIRCompiler::emitArrayFromArgumentsObjectResult(ObjOperandId objId
,
9658 uint32_t shapeOffset
) {
9659 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9661 AutoCallVM
callvm(masm
, this, allocator
);
9663 Register obj
= allocator
.useRegister(masm
, objId
);
9668 using Fn
= ArrayObject
* (*)(JSContext
*, Handle
<ArgumentsObject
*>);
9669 callvm
.call
<Fn
, js::ArrayFromArgumentsObject
>();
9673 bool CacheIRCompiler::emitGuardGlobalGeneration(uint32_t expectedOffset
,
9674 uint32_t generationAddrOffset
) {
9675 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9677 AutoScratchRegister
scratch(allocator
, masm
);
9678 AutoScratchRegister
scratch2(allocator
, masm
);
9680 FailurePath
* failure
;
9681 if (!addFailurePath(&failure
)) {
9685 StubFieldOffset
expected(expectedOffset
, StubField::Type::RawInt32
);
9686 emitLoadStubField(expected
, scratch
);
9688 StubFieldOffset
generationAddr(generationAddrOffset
,
9689 StubField::Type::RawPointer
);
9690 emitLoadStubField(generationAddr
, scratch2
);
9692 masm
.branch32(Assembler::NotEqual
, Address(scratch2
, 0), scratch
,
9698 bool CacheIRCompiler::emitGuardFuse(RealmFuses::FuseIndex fuseIndex
) {
9699 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9700 AutoScratchRegister
scratch(allocator
, masm
);
9702 FailurePath
* failure
;
9703 if (!addFailurePath(&failure
)) {
9707 masm
.loadRealmFuse(fuseIndex
, scratch
);
9708 masm
.branchPtr(Assembler::NotEqual
, scratch
, ImmPtr(nullptr),
9713 bool CacheIRCompiler::emitBailout() {
9714 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9716 // Generates no code.
9721 bool CacheIRCompiler::emitAssertRecoveredOnBailoutResult(ValOperandId valId
,
9722 bool mustBeRecovered
) {
9723 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9725 AutoOutputRegister
output(*this);
9727 // NOP when not in IonMonkey
9728 masm
.moveValue(UndefinedValue(), output
.valueReg());
9733 bool CacheIRCompiler::emitAssertPropertyLookup(ObjOperandId objId
,
9735 uint32_t slotOffset
) {
9736 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9738 Register obj
= allocator
.useRegister(masm
, objId
);
9740 AutoScratchRegister
id(allocator
, masm
);
9741 AutoScratchRegister
slot(allocator
, masm
);
9743 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
9744 masm
.PushRegsInMask(save
);
9746 masm
.setupUnalignedABICall(id
);
9748 StubFieldOffset
idField(idOffset
, StubField::Type::Id
);
9749 emitLoadStubField(idField
, id
);
9751 StubFieldOffset
slotField(slotOffset
, StubField::Type::RawInt32
);
9752 emitLoadStubField(slotField
, slot
);
9754 masm
.passABIArg(obj
);
9755 masm
.passABIArg(id
);
9756 masm
.passABIArg(slot
);
9757 using Fn
= void (*)(NativeObject
*, PropertyKey
, uint32_t);
9758 masm
.callWithABI
<Fn
, js::jit::AssertPropertyLookup
>();
9759 masm
.PopRegsInMask(save
);
9764 #ifdef FUZZING_JS_FUZZILLI
9765 bool CacheIRCompiler::emitFuzzilliHashResult(ValOperandId valId
) {
9766 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9768 ValueOperand input
= allocator
.useValueRegister(masm
, valId
);
9769 AutoScratchRegister
scratch(allocator
, masm
);
9770 AutoScratchRegister
scratchJSContext(allocator
, masm
);
9771 AutoScratchFloatRegister
floatReg(this);
9773 AutoScratchRegister64
scratch64(allocator
, masm
);
9775 AutoScratchRegister
scratch2(allocator
, masm
);
9778 Label addFloat
, updateHash
, done
;
9781 ScratchTagScope
tag(masm
, input
);
9782 masm
.splitTagForTest(input
, tag
);
9785 masm
.branchTestInt32(Assembler::NotEqual
, tag
, ¬Int32
);
9787 ScratchTagScopeRelease
_(&tag
);
9789 masm
.unboxInt32(input
, scratch
);
9790 masm
.convertInt32ToDouble(scratch
, floatReg
);
9791 masm
.jump(&addFloat
);
9793 masm
.bind(¬Int32
);
9796 masm
.branchTestDouble(Assembler::NotEqual
, tag
, ¬Double
);
9798 ScratchTagScopeRelease
_(&tag
);
9800 masm
.unboxDouble(input
, floatReg
);
9801 masm
.canonicalizeDouble(floatReg
);
9802 masm
.jump(&addFloat
);
9804 masm
.bind(¬Double
);
9807 masm
.branchTestNull(Assembler::NotEqual
, tag
, ¬Null
);
9809 ScratchTagScopeRelease
_(&tag
);
9811 masm
.move32(Imm32(1), scratch
);
9812 masm
.convertInt32ToDouble(scratch
, floatReg
);
9813 masm
.jump(&addFloat
);
9815 masm
.bind(¬Null
);
9818 masm
.branchTestUndefined(Assembler::NotEqual
, tag
, ¬Undefined
);
9820 ScratchTagScopeRelease
_(&tag
);
9822 masm
.move32(Imm32(2), scratch
);
9823 masm
.convertInt32ToDouble(scratch
, floatReg
);
9824 masm
.jump(&addFloat
);
9826 masm
.bind(¬Undefined
);
9829 masm
.branchTestBoolean(Assembler::NotEqual
, tag
, ¬Boolean
);
9831 ScratchTagScopeRelease
_(&tag
);
9833 masm
.unboxBoolean(input
, scratch
);
9834 masm
.add32(Imm32(3), scratch
);
9835 masm
.convertInt32ToDouble(scratch
, floatReg
);
9836 masm
.jump(&addFloat
);
9838 masm
.bind(¬Boolean
);
9841 masm
.branchTestBigInt(Assembler::NotEqual
, tag
, ¬BigInt
);
9843 ScratchTagScopeRelease
_(&tag
);
9845 masm
.unboxBigInt(input
, scratch
);
9847 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
9848 liveVolatileFloatRegs());
9849 masm
.PushRegsInMask(volatileRegs
);
9850 // TODO: remove floatReg, scratch, scratchJS?
9852 using Fn
= uint32_t (*)(BigInt
* bigInt
);
9853 masm
.setupUnalignedABICall(scratchJSContext
);
9854 masm
.loadJSContext(scratchJSContext
);
9855 masm
.passABIArg(scratch
);
9856 masm
.callWithABI
<Fn
, js::FuzzilliHashBigInt
>();
9857 masm
.storeCallInt32Result(scratch
);
9859 LiveRegisterSet ignore
;
9860 ignore
.add(scratch
);
9861 ignore
.add(scratchJSContext
);
9862 masm
.PopRegsInMaskIgnore(volatileRegs
, ignore
);
9863 masm
.jump(&updateHash
);
9865 masm
.bind(¬BigInt
);
9868 masm
.branchTestObject(Assembler::NotEqual
, tag
, ¬Object
);
9870 ScratchTagScopeRelease
_(&tag
);
9872 AutoCallVM
callvm(masm
, this, allocator
);
9873 Register obj
= allocator
.allocateRegister(masm
);
9874 masm
.unboxObject(input
, obj
);
9879 using Fn
= void (*)(JSContext
* cx
, JSObject
* o
);
9880 callvm
.callNoResult
<Fn
, js::FuzzilliHashObject
>();
9881 allocator
.releaseRegister(obj
);
9885 masm
.bind(¬Object
);
9887 masm
.move32(Imm32(0), scratch
);
9888 masm
.jump(&updateHash
);
9893 masm
.bind(&addFloat
);
9895 masm
.loadJSContext(scratchJSContext
);
9896 Address
addrExecHash(scratchJSContext
, offsetof(JSContext
, executionHash
));
9899 masm
.moveDoubleToGPR64(floatReg
, scratch64
);
9900 masm
.move32(scratch64
.get().reg
, scratch
);
9901 masm
.rshift64(Imm32(32), scratch64
);
9902 masm
.add32(scratch64
.get().reg
, scratch
);
9904 Register64
scratch64(scratch
, scratch2
);
9905 masm
.moveDoubleToGPR64(floatReg
, scratch64
);
9906 masm
.add32(scratch2
, scratch
);
9911 masm
.bind(&updateHash
);
9913 masm
.loadJSContext(scratchJSContext
);
9914 Address
addrExecHash(scratchJSContext
, offsetof(JSContext
, executionHash
));
9915 masm
.load32(addrExecHash
, scratchJSContext
);
9916 masm
.add32(scratchJSContext
, scratch
);
9917 masm
.rotateLeft(Imm32(1), scratch
, scratch
);
9918 masm
.loadJSContext(scratchJSContext
);
9919 masm
.store32(scratch
, addrExecHash
);
9922 Address
addrExecHashInputs(scratchJSContext
,
9923 offsetof(JSContext
, executionHashInputs
));
9924 masm
.load32(addrExecHashInputs
, scratch
);
9925 masm
.add32(Imm32(1), scratch
);
9926 masm
.store32(scratch
, addrExecHashInputs
);
9931 AutoOutputRegister
output(*this);
9932 masm
.moveValue(UndefinedValue(), output
.valueReg());
9937 template <typename Fn
, Fn fn
>
9938 void CacheIRCompiler::callVM(MacroAssembler
& masm
) {
9939 VMFunctionId id
= VMFunctionToId
<Fn
, fn
>::id
;
9940 callVMInternal(masm
, id
);
9943 void CacheIRCompiler::callVMInternal(MacroAssembler
& masm
, VMFunctionId id
) {
9944 MOZ_ASSERT(enteredStubFrame_
);
9945 if (mode_
== Mode::Ion
) {
9946 TrampolinePtr code
= cx_
->runtime()->jitRuntime()->getVMWrapper(id
);
9947 const VMFunctionData
& fun
= GetVMFunction(id
);
9948 uint32_t frameSize
= fun
.explicitStackSlots() * sizeof(void*);
9949 masm
.PushFrameDescriptor(FrameType::IonICCall
);
9952 // Pop rest of the exit frame and the arguments left on the stack.
9954 sizeof(ExitFrameLayout
) - ExitFrameLayout::bytesPoppedAfterCall();
9955 masm
.implicitPop(frameSize
+ framePop
);
9957 masm
.freeStack(asIon()->localTracingSlots() * sizeof(Value
));
9959 // Pop IonICCallFrameLayout.
9960 masm
.Pop(FramePointer
);
9961 masm
.freeStack(IonICCallFrameLayout::Size() - sizeof(void*));
9965 MOZ_ASSERT(mode_
== Mode::Baseline
);
9967 TrampolinePtr code
= cx_
->runtime()->jitRuntime()->getVMWrapper(id
);
9969 EmitBaselineCallVM(code
, masm
);
9972 bool CacheIRCompiler::isBaseline() { return mode_
== Mode::Baseline
; }
9974 bool CacheIRCompiler::isIon() { return mode_
== Mode::Ion
; }
9976 BaselineCacheIRCompiler
* CacheIRCompiler::asBaseline() {
9977 MOZ_ASSERT(this->isBaseline());
9978 return static_cast<BaselineCacheIRCompiler
*>(this);
9981 IonCacheIRCompiler
* CacheIRCompiler::asIon() {
9982 MOZ_ASSERT(this->isIon());
9983 return static_cast<IonCacheIRCompiler
*>(this);
9987 void CacheIRCompiler::assertFloatRegisterAvailable(FloatRegister reg
) {
9989 // Baseline does not have any FloatRegisters live when calling an IC stub.
9993 asIon()->assertFloatRegisterAvailable(reg
);
9997 AutoCallVM::AutoCallVM(MacroAssembler
& masm
, CacheIRCompiler
* compiler
,
9998 CacheRegisterAllocator
& allocator
)
9999 : masm_(masm
), compiler_(compiler
), allocator_(allocator
) {
10000 // Ion needs to `enterStubFrame` before it can callVM and it also needs to
10001 // initialize AutoSaveLiveRegisters.
10002 if (compiler_
->mode_
== CacheIRCompiler::Mode::Ion
) {
10003 // Will need to use a downcast here as well, in order to pass the
10004 // stub to AutoSaveLiveRegisters
10005 save_
.emplace(*compiler_
->asIon());
10008 if (compiler
->outputUnchecked_
.isSome()) {
10009 output_
.emplace(*compiler
);
10012 if (compiler_
->mode_
== CacheIRCompiler::Mode::Baseline
) {
10013 stubFrame_
.emplace(*compiler_
->asBaseline());
10014 if (output_
.isSome()) {
10015 scratch_
.emplace(allocator_
, masm_
, output_
.ref());
10017 scratch_
.emplace(allocator_
, masm_
);
10022 void AutoCallVM::prepare() {
10023 allocator_
.discardStack(masm_
);
10024 MOZ_ASSERT(compiler_
!= nullptr);
10025 if (compiler_
->mode_
== CacheIRCompiler::Mode::Ion
) {
10026 compiler_
->asIon()->enterStubFrame(masm_
, *save_
.ptr());
10029 MOZ_ASSERT(compiler_
->mode_
== CacheIRCompiler::Mode::Baseline
);
10030 stubFrame_
->enter(masm_
, scratch_
.ref());
10033 void AutoCallVM::storeResult(JSValueType returnType
) {
10034 MOZ_ASSERT(returnType
!= JSVAL_TYPE_DOUBLE
);
10036 if (returnType
== JSVAL_TYPE_UNKNOWN
) {
10037 masm_
.storeCallResultValue(output_
.ref());
10039 if (output_
->hasValue()) {
10040 masm_
.tagValue(returnType
, ReturnReg
, output_
->valueReg());
10042 masm_
.storeCallPointerResult(output_
->typedReg().gpr());
10047 void AutoCallVM::leaveBaselineStubFrame() {
10048 if (compiler_
->mode_
== CacheIRCompiler::Mode::Baseline
) {
10049 stubFrame_
->leave(masm_
);
10053 template <typename
...>
10054 struct VMFunctionReturnType
;
10056 template <class R
, typename
... Args
>
10057 struct VMFunctionReturnType
<R (*)(JSContext
*, Args
...)> {
10058 using LastArgument
= typename LastArg
<Args
...>::Type
;
10060 // By convention VMFunctions returning `bool` use an output parameter.
10062 std::conditional_t
<std::is_same_v
<R
, bool>, LastArgument
, R
>;
10066 struct ReturnTypeToJSValueType
;
10068 // Definitions for the currently used return types.
10070 struct ReturnTypeToJSValueType
<MutableHandleValue
> {
10071 static constexpr JSValueType result
= JSVAL_TYPE_UNKNOWN
;
10074 struct ReturnTypeToJSValueType
<bool*> {
10075 static constexpr JSValueType result
= JSVAL_TYPE_BOOLEAN
;
10078 struct ReturnTypeToJSValueType
<int32_t*> {
10079 static constexpr JSValueType result
= JSVAL_TYPE_INT32
;
10082 struct ReturnTypeToJSValueType
<JSString
*> {
10083 static constexpr JSValueType result
= JSVAL_TYPE_STRING
;
10086 struct ReturnTypeToJSValueType
<BigInt
*> {
10087 static constexpr JSValueType result
= JSVAL_TYPE_BIGINT
;
10090 struct ReturnTypeToJSValueType
<JSObject
*> {
10091 static constexpr JSValueType result
= JSVAL_TYPE_OBJECT
;
10094 struct ReturnTypeToJSValueType
<PropertyIteratorObject
*> {
10095 static constexpr JSValueType result
= JSVAL_TYPE_OBJECT
;
10098 struct ReturnTypeToJSValueType
<ArrayIteratorObject
*> {
10099 static constexpr JSValueType result
= JSVAL_TYPE_OBJECT
;
10102 struct ReturnTypeToJSValueType
<StringIteratorObject
*> {
10103 static constexpr JSValueType result
= JSVAL_TYPE_OBJECT
;
10106 struct ReturnTypeToJSValueType
<RegExpStringIteratorObject
*> {
10107 static constexpr JSValueType result
= JSVAL_TYPE_OBJECT
;
10110 struct ReturnTypeToJSValueType
<PlainObject
*> {
10111 static constexpr JSValueType result
= JSVAL_TYPE_OBJECT
;
10114 struct ReturnTypeToJSValueType
<ArrayObject
*> {
10115 static constexpr JSValueType result
= JSVAL_TYPE_OBJECT
;
10118 struct ReturnTypeToJSValueType
<TypedArrayObject
*> {
10119 static constexpr JSValueType result
= JSVAL_TYPE_OBJECT
;
10122 template <typename Fn
>
10123 void AutoCallVM::storeResult() {
10124 using ReturnType
= typename VMFunctionReturnType
<Fn
>::ReturnType
;
10125 storeResult(ReturnTypeToJSValueType
<ReturnType
>::result
);
10128 AutoScratchFloatRegister::AutoScratchFloatRegister(CacheIRCompiler
* compiler
,
10129 FailurePath
* failure
)
10130 : compiler_(compiler
), failure_(failure
) {
10131 // If we're compiling a Baseline IC, FloatReg0 is always available.
10132 if (!compiler_
->isBaseline()) {
10133 MacroAssembler
& masm
= compiler_
->masm
;
10134 masm
.push(FloatReg0
);
10135 compiler
->allocator
.setHasAutoScratchFloatRegisterSpill(true);
10139 failure_
->setHasAutoScratchFloatRegister();
10143 AutoScratchFloatRegister::~AutoScratchFloatRegister() {
10145 failure_
->clearHasAutoScratchFloatRegister();
10148 if (!compiler_
->isBaseline()) {
10149 MacroAssembler
& masm
= compiler_
->masm
;
10150 masm
.pop(FloatReg0
);
10151 compiler_
->allocator
.setHasAutoScratchFloatRegisterSpill(false);
10156 masm
.bind(&failurePopReg_
);
10157 masm
.pop(FloatReg0
);
10158 masm
.jump(failure_
->label());
10164 Label
* AutoScratchFloatRegister::failure() {
10165 MOZ_ASSERT(failure_
);
10167 if (!compiler_
->isBaseline()) {
10168 return &failurePopReg_
;
10170 return failure_
->labelUnchecked();