1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/CacheIRCompiler.h"
9 #include "mozilla/ArrayUtils.h"
10 #include "mozilla/FunctionTypeTraits.h"
11 #include "mozilla/MaybeOneOf.h"
12 #include "mozilla/ScopeExit.h"
14 #include <type_traits>
17 #include "jslibmath.h"
20 #include "builtin/DataViewObject.h"
21 #include "builtin/MapObject.h"
22 #include "builtin/Object.h"
23 #include "gc/GCEnum.h"
24 #include "jit/BaselineCacheIRCompiler.h"
25 #include "jit/CacheIRGenerator.h"
26 #include "jit/IonCacheIRCompiler.h"
27 #include "jit/JitFrames.h"
28 #include "jit/JitRuntime.h"
29 #include "jit/JitZone.h"
30 #include "jit/SharedICHelpers.h"
31 #include "jit/SharedICRegisters.h"
32 #include "jit/VMFunctions.h"
33 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
34 #include "js/friend/XrayJitInfo.h" // js::jit::GetXrayJitInfo
35 #include "js/ScalarType.h" // js::Scalar::Type
36 #include "js/SweepingAPI.h"
37 #include "proxy/DOMProxy.h"
38 #include "proxy/Proxy.h"
39 #include "proxy/ScriptedProxyHandler.h"
40 #include "vm/ArgumentsObject.h"
41 #include "vm/ArrayBufferObject.h"
42 #include "vm/ArrayBufferViewObject.h"
43 #include "vm/BigIntType.h"
44 #include "vm/FunctionFlags.h" // js::FunctionFlags
45 #include "vm/GeneratorObject.h"
46 #include "vm/GetterSetter.h"
47 #include "vm/Interpreter.h"
48 #include "vm/Uint8Clamped.h"
50 #include "builtin/Boolean-inl.h"
51 #include "jit/MacroAssembler-inl.h"
52 #include "jit/SharedICHelpers-inl.h"
53 #include "jit/VMFunctionList-inl.h"
56 using namespace js::jit
;
58 using mozilla::BitwiseCast
;
61 using JS::ExpandoAndGeneration
;
63 ValueOperand
CacheRegisterAllocator::useValueRegister(MacroAssembler
& masm
,
65 OperandLocation
& loc
= operandLocations_
[op
.id()];
68 case OperandLocation::ValueReg
:
69 currentOpRegs_
.add(loc
.valueReg());
70 return loc
.valueReg();
72 case OperandLocation::ValueStack
: {
73 ValueOperand reg
= allocateValueRegister(masm
);
74 popValue(masm
, &loc
, reg
);
78 case OperandLocation::BaselineFrame
: {
79 ValueOperand reg
= allocateValueRegister(masm
);
80 Address addr
= addressOf(masm
, loc
.baselineFrameSlot());
81 masm
.loadValue(addr
, reg
);
86 case OperandLocation::Constant
: {
87 ValueOperand reg
= allocateValueRegister(masm
);
88 masm
.moveValue(loc
.constant(), reg
);
93 case OperandLocation::PayloadReg
: {
94 // Temporarily add the payload register to currentOpRegs_ so
95 // allocateValueRegister will stay away from it.
96 currentOpRegs_
.add(loc
.payloadReg());
97 ValueOperand reg
= allocateValueRegister(masm
);
98 masm
.tagValue(loc
.payloadType(), loc
.payloadReg(), reg
);
99 currentOpRegs_
.take(loc
.payloadReg());
100 availableRegs_
.add(loc
.payloadReg());
101 loc
.setValueReg(reg
);
105 case OperandLocation::PayloadStack
: {
106 ValueOperand reg
= allocateValueRegister(masm
);
107 popPayload(masm
, &loc
, reg
.scratchReg());
108 masm
.tagValue(loc
.payloadType(), reg
.scratchReg(), reg
);
109 loc
.setValueReg(reg
);
113 case OperandLocation::DoubleReg
: {
114 ValueOperand reg
= allocateValueRegister(masm
);
116 ScratchDoubleScope
fpscratch(masm
);
117 masm
.boxDouble(loc
.doubleReg(), reg
, fpscratch
);
119 loc
.setValueReg(reg
);
123 case OperandLocation::Uninitialized
:
130 // Load a value operand directly into a float register. Caller must have
131 // guarded isNumber on the provided val.
132 void CacheRegisterAllocator::ensureDoubleRegister(MacroAssembler
& masm
,
134 FloatRegister dest
) const {
135 // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
136 // any stack slot offsets below.
137 int32_t stackOffset
= hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
139 const OperandLocation
& loc
= operandLocations_
[op
.id()];
142 switch (loc
.kind()) {
143 case OperandLocation::ValueReg
: {
144 masm
.ensureDouble(loc
.valueReg(), dest
, &failure
);
148 case OperandLocation::ValueStack
: {
149 Address addr
= valueAddress(masm
, &loc
);
150 addr
.offset
+= stackOffset
;
151 masm
.ensureDouble(addr
, dest
, &failure
);
155 case OperandLocation::BaselineFrame
: {
156 Address addr
= addressOf(masm
, loc
.baselineFrameSlot());
157 addr
.offset
+= stackOffset
;
158 masm
.ensureDouble(addr
, dest
, &failure
);
162 case OperandLocation::DoubleReg
: {
163 masm
.moveDouble(loc
.doubleReg(), dest
);
167 case OperandLocation::Constant
: {
168 MOZ_ASSERT(loc
.constant().isNumber(),
169 "Caller must ensure the operand is a number value");
170 masm
.loadConstantDouble(loc
.constant().toNumber(), dest
);
174 case OperandLocation::PayloadReg
: {
175 // Doubles can't be stored in payload registers, so this must be an int32.
176 MOZ_ASSERT(loc
.payloadType() == JSVAL_TYPE_INT32
,
177 "Caller must ensure the operand is a number value");
178 masm
.convertInt32ToDouble(loc
.payloadReg(), dest
);
182 case OperandLocation::PayloadStack
: {
183 // Doubles can't be stored in payload registers, so this must be an int32.
184 MOZ_ASSERT(loc
.payloadType() == JSVAL_TYPE_INT32
,
185 "Caller must ensure the operand is a number value");
186 MOZ_ASSERT(loc
.payloadStack() <= stackPushed_
);
187 Address addr
= payloadAddress(masm
, &loc
);
188 addr
.offset
+= stackOffset
;
189 masm
.convertInt32ToDouble(addr
, dest
);
193 case OperandLocation::Uninitialized
:
194 MOZ_CRASH("Unhandled operand type in ensureDoubleRegister");
199 masm
.assumeUnreachable(
200 "Missing guard allowed non-number to hit ensureDoubleRegister");
204 void CacheRegisterAllocator::copyToScratchRegister(MacroAssembler
& masm
,
205 TypedOperandId typedId
,
206 Register dest
) const {
207 // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
208 // any stack slot offsets below.
209 int32_t stackOffset
= hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
211 const OperandLocation
& loc
= operandLocations_
[typedId
.id()];
213 switch (loc
.kind()) {
214 case OperandLocation::ValueReg
: {
215 masm
.unboxNonDouble(loc
.valueReg(), dest
, typedId
.type());
218 case OperandLocation::ValueStack
: {
219 Address addr
= valueAddress(masm
, &loc
);
220 addr
.offset
+= stackOffset
;
221 masm
.unboxNonDouble(addr
, dest
, typedId
.type());
224 case OperandLocation::BaselineFrame
: {
225 Address addr
= addressOf(masm
, loc
.baselineFrameSlot());
226 addr
.offset
+= stackOffset
;
227 masm
.unboxNonDouble(addr
, dest
, typedId
.type());
230 case OperandLocation::PayloadReg
: {
231 MOZ_ASSERT(loc
.payloadType() == typedId
.type());
232 masm
.mov(loc
.payloadReg(), dest
);
235 case OperandLocation::PayloadStack
: {
236 MOZ_ASSERT(loc
.payloadType() == typedId
.type());
237 MOZ_ASSERT(loc
.payloadStack() <= stackPushed_
);
238 Address addr
= payloadAddress(masm
, &loc
);
239 addr
.offset
+= stackOffset
;
240 masm
.loadPtr(addr
, dest
);
243 case OperandLocation::DoubleReg
:
244 case OperandLocation::Constant
:
245 case OperandLocation::Uninitialized
:
246 MOZ_CRASH("Unhandled operand location");
250 void CacheRegisterAllocator::copyToScratchValueRegister(
251 MacroAssembler
& masm
, ValOperandId valId
, ValueOperand dest
) const {
252 MOZ_ASSERT(!addedFailurePath_
);
253 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
255 const OperandLocation
& loc
= operandLocations_
[valId
.id()];
256 switch (loc
.kind()) {
257 case OperandLocation::ValueReg
:
258 masm
.moveValue(loc
.valueReg(), dest
);
260 case OperandLocation::ValueStack
: {
261 Address addr
= valueAddress(masm
, &loc
);
262 masm
.loadValue(addr
, dest
);
265 case OperandLocation::BaselineFrame
: {
266 Address addr
= addressOf(masm
, loc
.baselineFrameSlot());
267 masm
.loadValue(addr
, dest
);
270 case OperandLocation::Constant
:
271 masm
.moveValue(loc
.constant(), dest
);
273 case OperandLocation::PayloadReg
:
274 masm
.tagValue(loc
.payloadType(), loc
.payloadReg(), dest
);
276 case OperandLocation::PayloadStack
: {
277 Address addr
= payloadAddress(masm
, &loc
);
278 masm
.loadPtr(addr
, dest
.scratchReg());
279 masm
.tagValue(loc
.payloadType(), dest
.scratchReg(), dest
);
282 case OperandLocation::DoubleReg
: {
283 ScratchDoubleScope
fpscratch(masm
);
284 masm
.boxDouble(loc
.doubleReg(), dest
, fpscratch
);
287 case OperandLocation::Uninitialized
:
292 Register
CacheRegisterAllocator::useRegister(MacroAssembler
& masm
,
293 TypedOperandId typedId
) {
294 MOZ_ASSERT(!addedFailurePath_
);
295 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
297 OperandLocation
& loc
= operandLocations_
[typedId
.id()];
298 switch (loc
.kind()) {
299 case OperandLocation::PayloadReg
:
300 currentOpRegs_
.add(loc
.payloadReg());
301 return loc
.payloadReg();
303 case OperandLocation::ValueReg
: {
304 // It's possible the value is still boxed: as an optimization, we unbox
305 // the first time we use a value as object.
306 ValueOperand val
= loc
.valueReg();
307 availableRegs_
.add(val
);
308 Register reg
= val
.scratchReg();
309 availableRegs_
.take(reg
);
310 masm
.unboxNonDouble(val
, reg
, typedId
.type());
311 loc
.setPayloadReg(reg
, typedId
.type());
312 currentOpRegs_
.add(reg
);
316 case OperandLocation::PayloadStack
: {
317 Register reg
= allocateRegister(masm
);
318 popPayload(masm
, &loc
, reg
);
322 case OperandLocation::ValueStack
: {
323 // The value is on the stack, but boxed. If it's on top of the stack we
324 // unbox it and then remove it from the stack, else we just unbox.
325 Register reg
= allocateRegister(masm
);
326 if (loc
.valueStack() == stackPushed_
) {
327 masm
.unboxNonDouble(Address(masm
.getStackPointer(), 0), reg
,
329 masm
.addToStackPtr(Imm32(sizeof(js::Value
)));
330 MOZ_ASSERT(stackPushed_
>= sizeof(js::Value
));
331 stackPushed_
-= sizeof(js::Value
);
333 MOZ_ASSERT(loc
.valueStack() < stackPushed_
);
335 Address(masm
.getStackPointer(), stackPushed_
- loc
.valueStack()),
336 reg
, typedId
.type());
338 loc
.setPayloadReg(reg
, typedId
.type());
342 case OperandLocation::BaselineFrame
: {
343 Register reg
= allocateRegister(masm
);
344 Address addr
= addressOf(masm
, loc
.baselineFrameSlot());
345 masm
.unboxNonDouble(addr
, reg
, typedId
.type());
346 loc
.setPayloadReg(reg
, typedId
.type());
350 case OperandLocation::Constant
: {
351 Value v
= loc
.constant();
352 Register reg
= allocateRegister(masm
);
354 masm
.movePtr(ImmGCPtr(v
.toString()), reg
);
355 } else if (v
.isSymbol()) {
356 masm
.movePtr(ImmGCPtr(v
.toSymbol()), reg
);
357 } else if (v
.isBigInt()) {
358 masm
.movePtr(ImmGCPtr(v
.toBigInt()), reg
);
360 MOZ_CRASH("Unexpected Value");
362 loc
.setPayloadReg(reg
, v
.extractNonDoubleType());
366 case OperandLocation::DoubleReg
:
367 case OperandLocation::Uninitialized
:
374 ConstantOrRegister
CacheRegisterAllocator::useConstantOrRegister(
375 MacroAssembler
& masm
, ValOperandId val
) {
376 MOZ_ASSERT(!addedFailurePath_
);
377 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
379 OperandLocation
& loc
= operandLocations_
[val
.id()];
380 switch (loc
.kind()) {
381 case OperandLocation::Constant
:
382 return loc
.constant();
384 case OperandLocation::PayloadReg
:
385 case OperandLocation::PayloadStack
: {
386 JSValueType payloadType
= loc
.payloadType();
387 Register reg
= useRegister(masm
, TypedOperandId(val
, payloadType
));
388 return TypedOrValueRegister(MIRTypeFromValueType(payloadType
),
392 case OperandLocation::ValueReg
:
393 case OperandLocation::ValueStack
:
394 case OperandLocation::BaselineFrame
:
395 return TypedOrValueRegister(useValueRegister(masm
, val
));
397 case OperandLocation::DoubleReg
:
398 return TypedOrValueRegister(MIRType::Double
,
399 AnyRegister(loc
.doubleReg()));
401 case OperandLocation::Uninitialized
:
408 Register
CacheRegisterAllocator::defineRegister(MacroAssembler
& masm
,
409 TypedOperandId typedId
) {
410 MOZ_ASSERT(!addedFailurePath_
);
411 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
413 OperandLocation
& loc
= operandLocations_
[typedId
.id()];
414 MOZ_ASSERT(loc
.kind() == OperandLocation::Uninitialized
);
416 Register reg
= allocateRegister(masm
);
417 loc
.setPayloadReg(reg
, typedId
.type());
421 ValueOperand
CacheRegisterAllocator::defineValueRegister(MacroAssembler
& masm
,
423 MOZ_ASSERT(!addedFailurePath_
);
424 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
426 OperandLocation
& loc
= operandLocations_
[val
.id()];
427 MOZ_ASSERT(loc
.kind() == OperandLocation::Uninitialized
);
429 ValueOperand reg
= allocateValueRegister(masm
);
430 loc
.setValueReg(reg
);
434 void CacheRegisterAllocator::freeDeadOperandLocations(MacroAssembler
& masm
) {
435 // See if any operands are dead so we can reuse their registers. Note that
436 // we skip the input operands, as those are also used by failure paths, and
437 // we currently don't track those uses.
438 for (size_t i
= writer_
.numInputOperands(); i
< operandLocations_
.length();
440 if (!writer_
.operandIsDead(i
, currentInstruction_
)) {
444 OperandLocation
& loc
= operandLocations_
[i
];
445 switch (loc
.kind()) {
446 case OperandLocation::PayloadReg
:
447 availableRegs_
.add(loc
.payloadReg());
449 case OperandLocation::ValueReg
:
450 availableRegs_
.add(loc
.valueReg());
452 case OperandLocation::PayloadStack
:
453 masm
.propagateOOM(freePayloadSlots_
.append(loc
.payloadStack()));
455 case OperandLocation::ValueStack
:
456 masm
.propagateOOM(freeValueSlots_
.append(loc
.valueStack()));
458 case OperandLocation::Uninitialized
:
459 case OperandLocation::BaselineFrame
:
460 case OperandLocation::Constant
:
461 case OperandLocation::DoubleReg
:
464 loc
.setUninitialized();
468 void CacheRegisterAllocator::discardStack(MacroAssembler
& masm
) {
469 // This should only be called when we are no longer using the operands,
470 // as we're discarding everything from the native stack. Set all operand
471 // locations to Uninitialized to catch bugs.
472 for (size_t i
= 0; i
< operandLocations_
.length(); i
++) {
473 operandLocations_
[i
].setUninitialized();
476 if (stackPushed_
> 0) {
477 masm
.addToStackPtr(Imm32(stackPushed_
));
480 freePayloadSlots_
.clear();
481 freeValueSlots_
.clear();
484 Register
CacheRegisterAllocator::allocateRegister(MacroAssembler
& masm
) {
485 MOZ_ASSERT(!addedFailurePath_
);
486 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
488 if (availableRegs_
.empty()) {
489 freeDeadOperandLocations(masm
);
492 if (availableRegs_
.empty()) {
493 // Still no registers available, try to spill unused operands to
495 for (size_t i
= 0; i
< operandLocations_
.length(); i
++) {
496 OperandLocation
& loc
= operandLocations_
[i
];
497 if (loc
.kind() == OperandLocation::PayloadReg
) {
498 Register reg
= loc
.payloadReg();
499 if (currentOpRegs_
.has(reg
)) {
503 spillOperandToStack(masm
, &loc
);
504 availableRegs_
.add(reg
);
505 break; // We got a register, so break out of the loop.
507 if (loc
.kind() == OperandLocation::ValueReg
) {
508 ValueOperand reg
= loc
.valueReg();
509 if (currentOpRegs_
.aliases(reg
)) {
513 spillOperandToStack(masm
, &loc
);
514 availableRegs_
.add(reg
);
515 break; // Break out of the loop.
520 if (availableRegs_
.empty() && !availableRegsAfterSpill_
.empty()) {
521 Register reg
= availableRegsAfterSpill_
.takeAny();
523 stackPushed_
+= sizeof(uintptr_t);
525 masm
.propagateOOM(spilledRegs_
.append(SpilledRegister(reg
, stackPushed_
)));
527 availableRegs_
.add(reg
);
530 // At this point, there must be a free register.
531 MOZ_RELEASE_ASSERT(!availableRegs_
.empty());
533 Register reg
= availableRegs_
.takeAny();
534 currentOpRegs_
.add(reg
);
538 void CacheRegisterAllocator::allocateFixedRegister(MacroAssembler
& masm
,
540 MOZ_ASSERT(!addedFailurePath_
);
541 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
543 // Fixed registers should be allocated first, to ensure they're
545 MOZ_ASSERT(!currentOpRegs_
.has(reg
), "Register is in use");
547 freeDeadOperandLocations(masm
);
549 if (availableRegs_
.has(reg
)) {
550 availableRegs_
.take(reg
);
551 currentOpRegs_
.add(reg
);
555 // Register may be available only after spilling contents.
556 if (availableRegsAfterSpill_
.has(reg
)) {
557 availableRegsAfterSpill_
.take(reg
);
559 stackPushed_
+= sizeof(uintptr_t);
561 masm
.propagateOOM(spilledRegs_
.append(SpilledRegister(reg
, stackPushed_
)));
562 currentOpRegs_
.add(reg
);
566 // The register must be used by some operand. Spill it to the stack.
567 for (size_t i
= 0; i
< operandLocations_
.length(); i
++) {
568 OperandLocation
& loc
= operandLocations_
[i
];
569 if (loc
.kind() == OperandLocation::PayloadReg
) {
570 if (loc
.payloadReg() != reg
) {
574 spillOperandToStackOrRegister(masm
, &loc
);
575 currentOpRegs_
.add(reg
);
578 if (loc
.kind() == OperandLocation::ValueReg
) {
579 if (!loc
.valueReg().aliases(reg
)) {
583 ValueOperand valueReg
= loc
.valueReg();
584 spillOperandToStackOrRegister(masm
, &loc
);
586 availableRegs_
.add(valueReg
);
587 availableRegs_
.take(reg
);
588 currentOpRegs_
.add(reg
);
593 MOZ_CRASH("Invalid register");
596 void CacheRegisterAllocator::allocateFixedValueRegister(MacroAssembler
& masm
,
599 allocateFixedRegister(masm
, reg
.payloadReg());
600 allocateFixedRegister(masm
, reg
.typeReg());
602 allocateFixedRegister(masm
, reg
.valueReg());
607 // Possible miscompilation in clang-12 (bug 1689641)
610 ValueOperand
CacheRegisterAllocator::allocateValueRegister(
611 MacroAssembler
& masm
) {
613 Register reg1
= allocateRegister(masm
);
614 Register reg2
= allocateRegister(masm
);
615 return ValueOperand(reg1
, reg2
);
617 Register reg
= allocateRegister(masm
);
618 return ValueOperand(reg
);
622 bool CacheRegisterAllocator::init() {
623 if (!origInputLocations_
.resize(writer_
.numInputOperands())) {
626 if (!operandLocations_
.resize(writer_
.numOperandIds())) {
632 void CacheRegisterAllocator::initAvailableRegsAfterSpill() {
633 // Registers not in availableRegs_ and not used by input operands are
634 // available after being spilled.
635 availableRegsAfterSpill_
.set() = GeneralRegisterSet::Intersect(
636 GeneralRegisterSet::Not(availableRegs_
.set()),
637 GeneralRegisterSet::Not(inputRegisterSet()));
640 void CacheRegisterAllocator::fixupAliasedInputs(MacroAssembler
& masm
) {
641 // If IC inputs alias each other, make sure they are stored in different
642 // locations so we don't have to deal with this complexity in the rest of
645 // Note that this can happen in IonMonkey with something like |o.foo = o|
648 size_t numInputs
= writer_
.numInputOperands();
649 MOZ_ASSERT(origInputLocations_
.length() == numInputs
);
651 for (size_t i
= 1; i
< numInputs
; i
++) {
652 OperandLocation
& loc1
= operandLocations_
[i
];
653 if (!loc1
.isInRegister()) {
657 for (size_t j
= 0; j
< i
; j
++) {
658 OperandLocation
& loc2
= operandLocations_
[j
];
659 if (!loc1
.aliasesReg(loc2
)) {
663 // loc1 and loc2 alias so we spill one of them. If one is a
664 // ValueReg and the other is a PayloadReg, we have to spill the
665 // PayloadReg: spilling the ValueReg instead would leave its type
666 // register unallocated on 32-bit platforms.
667 if (loc1
.kind() == OperandLocation::ValueReg
) {
668 spillOperandToStack(masm
, &loc2
);
670 MOZ_ASSERT(loc1
.kind() == OperandLocation::PayloadReg
);
671 spillOperandToStack(masm
, &loc1
);
672 break; // Spilled loc1, so nothing else will alias it.
682 GeneralRegisterSet
CacheRegisterAllocator::inputRegisterSet() const {
683 MOZ_ASSERT(origInputLocations_
.length() == writer_
.numInputOperands());
685 AllocatableGeneralRegisterSet result
;
686 for (size_t i
= 0; i
< writer_
.numInputOperands(); i
++) {
687 const OperandLocation
& loc
= operandLocations_
[i
];
688 MOZ_ASSERT(loc
== origInputLocations_
[i
]);
690 switch (loc
.kind()) {
691 case OperandLocation::PayloadReg
:
692 result
.addUnchecked(loc
.payloadReg());
694 case OperandLocation::ValueReg
:
695 result
.addUnchecked(loc
.valueReg());
697 case OperandLocation::PayloadStack
:
698 case OperandLocation::ValueStack
:
699 case OperandLocation::BaselineFrame
:
700 case OperandLocation::Constant
:
701 case OperandLocation::DoubleReg
:
703 case OperandLocation::Uninitialized
:
706 MOZ_CRASH("Invalid kind");
712 JSValueType
CacheRegisterAllocator::knownType(ValOperandId val
) const {
713 const OperandLocation
& loc
= operandLocations_
[val
.id()];
715 switch (loc
.kind()) {
716 case OperandLocation::ValueReg
:
717 case OperandLocation::ValueStack
:
718 case OperandLocation::BaselineFrame
:
719 return JSVAL_TYPE_UNKNOWN
;
721 case OperandLocation::PayloadStack
:
722 case OperandLocation::PayloadReg
:
723 return loc
.payloadType();
725 case OperandLocation::Constant
:
726 return loc
.constant().isDouble() ? JSVAL_TYPE_DOUBLE
727 : loc
.constant().extractNonDoubleType();
729 case OperandLocation::DoubleReg
:
730 return JSVAL_TYPE_DOUBLE
;
732 case OperandLocation::Uninitialized
:
736 MOZ_CRASH("Invalid kind");
739 void CacheRegisterAllocator::initInputLocation(
740 size_t i
, const TypedOrValueRegister
& reg
) {
741 if (reg
.hasValue()) {
742 initInputLocation(i
, reg
.valueReg());
743 } else if (reg
.typedReg().isFloat()) {
744 MOZ_ASSERT(reg
.type() == MIRType::Double
);
745 initInputLocation(i
, reg
.typedReg().fpu());
747 initInputLocation(i
, reg
.typedReg().gpr(),
748 ValueTypeFromMIRType(reg
.type()));
752 void CacheRegisterAllocator::initInputLocation(
753 size_t i
, const ConstantOrRegister
& value
) {
754 if (value
.constant()) {
755 initInputLocation(i
, value
.value());
757 initInputLocation(i
, value
.reg());
761 void CacheRegisterAllocator::spillOperandToStack(MacroAssembler
& masm
,
762 OperandLocation
* loc
) {
763 MOZ_ASSERT(loc
>= operandLocations_
.begin() && loc
< operandLocations_
.end());
765 if (loc
->kind() == OperandLocation::ValueReg
) {
766 if (!freeValueSlots_
.empty()) {
767 uint32_t stackPos
= freeValueSlots_
.popCopy();
768 MOZ_ASSERT(stackPos
<= stackPushed_
);
769 masm
.storeValue(loc
->valueReg(),
770 Address(masm
.getStackPointer(), stackPushed_
- stackPos
));
771 loc
->setValueStack(stackPos
);
774 stackPushed_
+= sizeof(js::Value
);
775 masm
.pushValue(loc
->valueReg());
776 loc
->setValueStack(stackPushed_
);
780 MOZ_ASSERT(loc
->kind() == OperandLocation::PayloadReg
);
782 if (!freePayloadSlots_
.empty()) {
783 uint32_t stackPos
= freePayloadSlots_
.popCopy();
784 MOZ_ASSERT(stackPos
<= stackPushed_
);
785 masm
.storePtr(loc
->payloadReg(),
786 Address(masm
.getStackPointer(), stackPushed_
- stackPos
));
787 loc
->setPayloadStack(stackPos
, loc
->payloadType());
790 stackPushed_
+= sizeof(uintptr_t);
791 masm
.push(loc
->payloadReg());
792 loc
->setPayloadStack(stackPushed_
, loc
->payloadType());
795 void CacheRegisterAllocator::spillOperandToStackOrRegister(
796 MacroAssembler
& masm
, OperandLocation
* loc
) {
797 MOZ_ASSERT(loc
>= operandLocations_
.begin() && loc
< operandLocations_
.end());
799 // If enough registers are available, use them.
800 if (loc
->kind() == OperandLocation::ValueReg
) {
801 static const size_t BoxPieces
= sizeof(Value
) / sizeof(uintptr_t);
802 if (availableRegs_
.set().size() >= BoxPieces
) {
803 ValueOperand reg
= availableRegs_
.takeAnyValue();
804 masm
.moveValue(loc
->valueReg(), reg
);
805 loc
->setValueReg(reg
);
809 MOZ_ASSERT(loc
->kind() == OperandLocation::PayloadReg
);
810 if (!availableRegs_
.empty()) {
811 Register reg
= availableRegs_
.takeAny();
812 masm
.movePtr(loc
->payloadReg(), reg
);
813 loc
->setPayloadReg(reg
, loc
->payloadType());
818 // Not enough registers available, spill to the stack.
819 spillOperandToStack(masm
, loc
);
822 void CacheRegisterAllocator::popPayload(MacroAssembler
& masm
,
823 OperandLocation
* loc
, Register dest
) {
824 MOZ_ASSERT(loc
>= operandLocations_
.begin() && loc
< operandLocations_
.end());
825 MOZ_ASSERT(stackPushed_
>= sizeof(uintptr_t));
827 // The payload is on the stack. If it's on top of the stack we can just
828 // pop it, else we emit a load.
829 if (loc
->payloadStack() == stackPushed_
) {
831 stackPushed_
-= sizeof(uintptr_t);
833 MOZ_ASSERT(loc
->payloadStack() < stackPushed_
);
834 masm
.loadPtr(payloadAddress(masm
, loc
), dest
);
835 masm
.propagateOOM(freePayloadSlots_
.append(loc
->payloadStack()));
838 loc
->setPayloadReg(dest
, loc
->payloadType());
841 Address
CacheRegisterAllocator::valueAddress(MacroAssembler
& masm
,
842 const OperandLocation
* loc
) const {
843 MOZ_ASSERT(loc
>= operandLocations_
.begin() && loc
< operandLocations_
.end());
844 return Address(masm
.getStackPointer(), stackPushed_
- loc
->valueStack());
847 Address
CacheRegisterAllocator::payloadAddress(
848 MacroAssembler
& masm
, const OperandLocation
* loc
) const {
849 MOZ_ASSERT(loc
>= operandLocations_
.begin() && loc
< operandLocations_
.end());
850 return Address(masm
.getStackPointer(), stackPushed_
- loc
->payloadStack());
853 void CacheRegisterAllocator::popValue(MacroAssembler
& masm
,
854 OperandLocation
* loc
, ValueOperand dest
) {
855 MOZ_ASSERT(loc
>= operandLocations_
.begin() && loc
< operandLocations_
.end());
856 MOZ_ASSERT(stackPushed_
>= sizeof(js::Value
));
858 // The Value is on the stack. If it's on top of the stack we can just
859 // pop it, else we emit a load.
860 if (loc
->valueStack() == stackPushed_
) {
862 stackPushed_
-= sizeof(js::Value
);
864 MOZ_ASSERT(loc
->valueStack() < stackPushed_
);
866 Address(masm
.getStackPointer(), stackPushed_
- loc
->valueStack()),
868 masm
.propagateOOM(freeValueSlots_
.append(loc
->valueStack()));
871 loc
->setValueReg(dest
);
875 void CacheRegisterAllocator::assertValidState() const {
876 // Assert different operands don't have aliasing storage. We depend on this
877 // when spilling registers, for instance.
879 if (!JitOptions
.fullDebugChecks
) {
883 for (size_t i
= 0; i
< operandLocations_
.length(); i
++) {
884 const auto& loc1
= operandLocations_
[i
];
885 if (loc1
.isUninitialized()) {
889 for (size_t j
= 0; j
< i
; j
++) {
890 const auto& loc2
= operandLocations_
[j
];
891 if (loc2
.isUninitialized()) {
894 MOZ_ASSERT(!loc1
.aliasesReg(loc2
));
900 bool OperandLocation::aliasesReg(const OperandLocation
& other
) const {
901 MOZ_ASSERT(&other
!= this);
903 switch (other
.kind_
) {
905 return aliasesReg(other
.payloadReg());
907 return aliasesReg(other
.valueReg());
918 MOZ_CRASH("Invalid kind");
921 void CacheRegisterAllocator::restoreInputState(MacroAssembler
& masm
,
922 bool shouldDiscardStack
) {
923 size_t numInputOperands
= origInputLocations_
.length();
924 MOZ_ASSERT(writer_
.numInputOperands() == numInputOperands
);
926 for (size_t j
= 0; j
< numInputOperands
; j
++) {
927 const OperandLocation
& dest
= origInputLocations_
[j
];
928 OperandLocation
& cur
= operandLocations_
[j
];
933 auto autoAssign
= mozilla::MakeScopeExit([&] { cur
= dest
; });
935 // We have a cycle if a destination register will be used later
936 // as source register. If that happens, just push the current value
937 // on the stack and later get it from there.
938 for (size_t k
= j
+ 1; k
< numInputOperands
; k
++) {
939 OperandLocation
& laterSource
= operandLocations_
[k
];
940 if (dest
.aliasesReg(laterSource
)) {
941 spillOperandToStack(masm
, &laterSource
);
945 if (dest
.kind() == OperandLocation::ValueReg
) {
946 // We have to restore a Value register.
947 switch (cur
.kind()) {
948 case OperandLocation::ValueReg
:
949 masm
.moveValue(cur
.valueReg(), dest
.valueReg());
951 case OperandLocation::PayloadReg
:
952 masm
.tagValue(cur
.payloadType(), cur
.payloadReg(), dest
.valueReg());
954 case OperandLocation::PayloadStack
: {
955 Register scratch
= dest
.valueReg().scratchReg();
956 popPayload(masm
, &cur
, scratch
);
957 masm
.tagValue(cur
.payloadType(), scratch
, dest
.valueReg());
960 case OperandLocation::ValueStack
:
961 popValue(masm
, &cur
, dest
.valueReg());
963 case OperandLocation::DoubleReg
:
964 masm
.boxDouble(cur
.doubleReg(), dest
.valueReg(), cur
.doubleReg());
966 case OperandLocation::Constant
:
967 case OperandLocation::BaselineFrame
:
968 case OperandLocation::Uninitialized
:
971 } else if (dest
.kind() == OperandLocation::PayloadReg
) {
972 // We have to restore a payload register.
973 switch (cur
.kind()) {
974 case OperandLocation::ValueReg
:
975 MOZ_ASSERT(dest
.payloadType() != JSVAL_TYPE_DOUBLE
);
976 masm
.unboxNonDouble(cur
.valueReg(), dest
.payloadReg(),
979 case OperandLocation::PayloadReg
:
980 MOZ_ASSERT(cur
.payloadType() == dest
.payloadType());
981 masm
.mov(cur
.payloadReg(), dest
.payloadReg());
983 case OperandLocation::PayloadStack
: {
984 MOZ_ASSERT(cur
.payloadType() == dest
.payloadType());
985 popPayload(masm
, &cur
, dest
.payloadReg());
988 case OperandLocation::ValueStack
:
989 MOZ_ASSERT(stackPushed_
>= sizeof(js::Value
));
990 MOZ_ASSERT(cur
.valueStack() <= stackPushed_
);
991 MOZ_ASSERT(dest
.payloadType() != JSVAL_TYPE_DOUBLE
);
993 Address(masm
.getStackPointer(), stackPushed_
- cur
.valueStack()),
994 dest
.payloadReg(), dest
.payloadType());
996 case OperandLocation::Constant
:
997 case OperandLocation::BaselineFrame
:
998 case OperandLocation::DoubleReg
:
999 case OperandLocation::Uninitialized
:
1002 } else if (dest
.kind() == OperandLocation::Constant
||
1003 dest
.kind() == OperandLocation::BaselineFrame
||
1004 dest
.kind() == OperandLocation::DoubleReg
) {
1009 MOZ_CRASH("Invalid kind");
1012 for (const SpilledRegister
& spill
: spilledRegs_
) {
1013 MOZ_ASSERT(stackPushed_
>= sizeof(uintptr_t));
1015 if (spill
.stackPushed
== stackPushed_
) {
1016 masm
.pop(spill
.reg
);
1017 stackPushed_
-= sizeof(uintptr_t);
1019 MOZ_ASSERT(spill
.stackPushed
< stackPushed_
);
1021 Address(masm
.getStackPointer(), stackPushed_
- spill
.stackPushed
),
1026 if (shouldDiscardStack
) {
1031 size_t CacheIRStubInfo::stubDataSize() const {
1035 StubField::Type type
= fieldType(field
++);
1036 if (type
== StubField::Type::Limit
) {
1039 size
+= StubField::sizeInBytes(type
);
1043 template <typename T
>
1044 static GCPtr
<T
>* AsGCPtr(void* ptr
) {
1045 return static_cast<GCPtr
<T
>*>(ptr
);
1048 void CacheIRStubInfo::replaceStubRawWord(uint8_t* stubData
, uint32_t offset
,
1050 uintptr_t newWord
) const {
1051 MOZ_ASSERT(uintptr_t(stubData
+ offset
) % sizeof(uintptr_t) == 0);
1052 uintptr_t* addr
= reinterpret_cast<uintptr_t*>(stubData
+ offset
);
1053 MOZ_ASSERT(*addr
== oldWord
);
1057 template <class Stub
, StubField::Type type
>
1058 typename MapStubFieldToType
<type
>::WrappedType
& CacheIRStubInfo::getStubField(
1059 Stub
* stub
, uint32_t offset
) const {
1060 uint8_t* stubData
= (uint8_t*)stub
+ stubDataOffset_
;
1061 MOZ_ASSERT(uintptr_t(stubData
+ offset
) % sizeof(uintptr_t) == 0);
1063 using WrappedType
= typename MapStubFieldToType
<type
>::WrappedType
;
1064 return *reinterpret_cast<WrappedType
*>(stubData
+ offset
);
1067 #define INSTANTIATE_GET_STUB_FIELD(Type) \
1068 template typename MapStubFieldToType<Type>::WrappedType& \
1069 CacheIRStubInfo::getStubField<ICCacheIRStub, Type>(ICCacheIRStub * stub, \
1070 uint32_t offset) const;
1071 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Shape
)
1072 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakShape
)
1073 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakGetterSetter
)
1074 INSTANTIATE_GET_STUB_FIELD(StubField::Type::JSObject
)
1075 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakObject
)
1076 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Symbol
)
1077 INSTANTIATE_GET_STUB_FIELD(StubField::Type::String
)
1078 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakBaseScript
)
1079 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Value
)
1080 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Id
)
1081 #undef INSTANTIATE_GET_STUB_FIELD
1083 template <class Stub
, class T
>
1084 T
* CacheIRStubInfo::getPtrStubField(Stub
* stub
, uint32_t offset
) const {
1085 uint8_t* stubData
= (uint8_t*)stub
+ stubDataOffset_
;
1086 MOZ_ASSERT(uintptr_t(stubData
+ offset
) % sizeof(uintptr_t) == 0);
1088 return *reinterpret_cast<T
**>(stubData
+ offset
);
1091 template gc::AllocSite
* CacheIRStubInfo::getPtrStubField(ICCacheIRStub
* stub
,
1092 uint32_t offset
) const;
1094 template <StubField::Type type
, typename V
>
1095 static void InitWrappedPtr(void* ptr
, V val
) {
1096 using RawType
= typename MapStubFieldToType
<type
>::RawType
;
1097 using WrappedType
= typename MapStubFieldToType
<type
>::WrappedType
;
1098 auto* wrapped
= static_cast<WrappedType
*>(ptr
);
1099 new (wrapped
) WrappedType(mozilla::BitwiseCast
<RawType
>(val
));
1102 static void InitWordStubField(StubField::Type type
, void* dest
,
1104 MOZ_ASSERT(StubField::sizeIsWord(type
));
1105 MOZ_ASSERT((uintptr_t(dest
) % sizeof(uintptr_t)) == 0,
1106 "Unaligned stub field");
1109 case StubField::Type::RawInt32
:
1110 case StubField::Type::RawPointer
:
1111 case StubField::Type::AllocSite
:
1112 *static_cast<uintptr_t*>(dest
) = value
;
1114 case StubField::Type::Shape
:
1115 InitWrappedPtr
<StubField::Type::Shape
>(dest
, value
);
1117 case StubField::Type::WeakShape
:
1118 // No read barrier required to copy weak pointer.
1119 InitWrappedPtr
<StubField::Type::WeakShape
>(dest
, value
);
1121 case StubField::Type::WeakGetterSetter
:
1122 // No read barrier required to copy weak pointer.
1123 InitWrappedPtr
<StubField::Type::WeakGetterSetter
>(dest
, value
);
1125 case StubField::Type::JSObject
:
1126 InitWrappedPtr
<StubField::Type::JSObject
>(dest
, value
);
1128 case StubField::Type::WeakObject
:
1129 // No read barrier required to copy weak pointer.
1130 InitWrappedPtr
<StubField::Type::WeakObject
>(dest
, value
);
1132 case StubField::Type::Symbol
:
1133 InitWrappedPtr
<StubField::Type::Symbol
>(dest
, value
);
1135 case StubField::Type::String
:
1136 InitWrappedPtr
<StubField::Type::String
>(dest
, value
);
1138 case StubField::Type::WeakBaseScript
:
1139 // No read barrier required to copy weak pointer.
1140 InitWrappedPtr
<StubField::Type::WeakBaseScript
>(dest
, value
);
1142 case StubField::Type::JitCode
:
1143 InitWrappedPtr
<StubField::Type::JitCode
>(dest
, value
);
1145 case StubField::Type::Id
:
1146 AsGCPtr
<jsid
>(dest
)->init(jsid::fromRawBits(value
));
1148 case StubField::Type::RawInt64
:
1149 case StubField::Type::Double
:
1150 case StubField::Type::Value
:
1151 case StubField::Type::Limit
:
1152 MOZ_CRASH("Invalid type");
1156 static void InitInt64StubField(StubField::Type type
, void* dest
,
1158 MOZ_ASSERT(StubField::sizeIsInt64(type
));
1159 MOZ_ASSERT((uintptr_t(dest
) % sizeof(uint64_t)) == 0, "Unaligned stub field");
1162 case StubField::Type::RawInt64
:
1163 case StubField::Type::Double
:
1164 *static_cast<uint64_t*>(dest
) = value
;
1166 case StubField::Type::Value
:
1167 AsGCPtr
<Value
>(dest
)->init(Value::fromRawBits(value
));
1169 case StubField::Type::RawInt32
:
1170 case StubField::Type::RawPointer
:
1171 case StubField::Type::AllocSite
:
1172 case StubField::Type::Shape
:
1173 case StubField::Type::WeakShape
:
1174 case StubField::Type::WeakGetterSetter
:
1175 case StubField::Type::JSObject
:
1176 case StubField::Type::WeakObject
:
1177 case StubField::Type::Symbol
:
1178 case StubField::Type::String
:
1179 case StubField::Type::WeakBaseScript
:
1180 case StubField::Type::JitCode
:
1181 case StubField::Type::Id
:
1182 case StubField::Type::Limit
:
1183 MOZ_CRASH("Invalid type");
1187 void CacheIRWriter::copyStubData(uint8_t* dest
) const {
1188 MOZ_ASSERT(!failed());
1190 for (const StubField
& field
: stubFields_
) {
1191 if (field
.sizeIsWord()) {
1192 InitWordStubField(field
.type(), dest
, field
.asWord());
1193 dest
+= sizeof(uintptr_t);
1195 InitInt64StubField(field
.type(), dest
, field
.asInt64());
1196 dest
+= sizeof(uint64_t);
1201 ICCacheIRStub
* ICCacheIRStub::clone(JSRuntime
* rt
, ICStubSpace
& newSpace
) {
1202 const CacheIRStubInfo
* info
= stubInfo();
1203 MOZ_ASSERT(info
->makesGCCalls());
1205 size_t bytesNeeded
= info
->stubDataOffset() + info
->stubDataSize();
1207 AutoEnterOOMUnsafeRegion oomUnsafe
;
1208 void* newStubMem
= newSpace
.alloc(bytesNeeded
);
1210 oomUnsafe
.crash("ICCacheIRStub::clone");
1213 ICCacheIRStub
* newStub
= new (newStubMem
) ICCacheIRStub(*this);
1215 const uint8_t* src
= this->stubDataStart();
1216 uint8_t* dest
= newStub
->stubDataStart();
1218 // Because this can be called during sweeping when discarding JIT code, we
1219 // have to lock the store buffer
1220 gc::AutoLockStoreBuffer
lock(rt
);
1224 StubField::Type type
= info
->fieldType(field
);
1225 if (type
== StubField::Type::Limit
) {
1229 if (StubField::sizeIsWord(type
)) {
1230 const uintptr_t* srcField
= reinterpret_cast<const uintptr_t*>(src
);
1231 InitWordStubField(type
, dest
, *srcField
);
1232 src
+= sizeof(uintptr_t);
1233 dest
+= sizeof(uintptr_t);
1235 const uint64_t* srcField
= reinterpret_cast<const uint64_t*>(src
);
1236 InitInt64StubField(type
, dest
, *srcField
);
1237 src
+= sizeof(uint64_t);
1238 dest
+= sizeof(uint64_t);
1247 template <typename T
>
1248 static inline bool ShouldTraceWeakEdgeInStub(JSTracer
* trc
) {
1249 if constexpr (std::is_same_v
<T
, IonICStub
>) {
1250 // 'Weak' edges are traced strongly in IonICs.
1253 static_assert(std::is_same_v
<T
, ICCacheIRStub
>);
1254 return trc
->traceWeakEdges();
1258 template <typename T
>
1259 void jit::TraceCacheIRStub(JSTracer
* trc
, T
* stub
,
1260 const CacheIRStubInfo
* stubInfo
) {
1261 using Type
= StubField::Type
;
1266 Type fieldType
= stubInfo
->fieldType(field
);
1267 switch (fieldType
) {
1268 case Type::RawInt32
:
1269 case Type::RawPointer
:
1270 case Type::RawInt64
:
1274 // For CCW IC stubs, we can store same-zone but cross-compartment
1275 // shapes. Use TraceSameZoneCrossCompartmentEdge to not assert in the
1276 // GC. Note: CacheIRWriter::writeShapeField asserts we never store
1277 // cross-zone shapes.
1278 GCPtr
<Shape
*>& shapeField
=
1279 stubInfo
->getStubField
<T
, Type::Shape
>(stub
, offset
);
1280 TraceSameZoneCrossCompartmentEdge(trc
, &shapeField
, "cacheir-shape");
1283 case Type::WeakShape
:
1284 if (ShouldTraceWeakEdgeInStub
<T
>(trc
)) {
1285 WeakHeapPtr
<Shape
*>& shapeField
=
1286 stubInfo
->getStubField
<T
, Type::WeakShape
>(stub
, offset
);
1288 TraceSameZoneCrossCompartmentEdge(trc
, &shapeField
,
1289 "cacheir-weak-shape");
1293 case Type::WeakGetterSetter
:
1294 if (ShouldTraceWeakEdgeInStub
<T
>(trc
)) {
1297 &stubInfo
->getStubField
<T
, Type::WeakGetterSetter
>(stub
, offset
),
1298 "cacheir-weak-getter-setter");
1301 case Type::JSObject
: {
1302 TraceEdge(trc
, &stubInfo
->getStubField
<T
, Type::JSObject
>(stub
, offset
),
1306 case Type::WeakObject
:
1307 if (ShouldTraceWeakEdgeInStub
<T
>(trc
)) {
1309 trc
, &stubInfo
->getStubField
<T
, Type::WeakObject
>(stub
, offset
),
1310 "cacheir-weak-object");
1314 TraceEdge(trc
, &stubInfo
->getStubField
<T
, Type::Symbol
>(stub
, offset
),
1318 TraceEdge(trc
, &stubInfo
->getStubField
<T
, Type::String
>(stub
, offset
),
1321 case Type::WeakBaseScript
:
1322 if (ShouldTraceWeakEdgeInStub
<T
>(trc
)) {
1325 &stubInfo
->getStubField
<T
, Type::WeakBaseScript
>(stub
, offset
),
1326 "cacheir-weak-script");
1330 TraceEdge(trc
, &stubInfo
->getStubField
<T
, Type::JitCode
>(stub
, offset
),
1334 TraceEdge(trc
, &stubInfo
->getStubField
<T
, Type::Id
>(stub
, offset
),
1338 TraceEdge(trc
, &stubInfo
->getStubField
<T
, Type::Value
>(stub
, offset
),
1341 case Type::AllocSite
: {
1342 gc::AllocSite
* site
=
1343 stubInfo
->getPtrStubField
<T
, gc::AllocSite
>(stub
, offset
);
1351 offset
+= StubField::sizeInBytes(fieldType
);
1355 template void jit::TraceCacheIRStub(JSTracer
* trc
, ICCacheIRStub
* stub
,
1356 const CacheIRStubInfo
* stubInfo
);
1358 template void jit::TraceCacheIRStub(JSTracer
* trc
, IonICStub
* stub
,
1359 const CacheIRStubInfo
* stubInfo
);
1361 template <typename T
>
1362 bool jit::TraceWeakCacheIRStub(JSTracer
* trc
, T
* stub
,
1363 const CacheIRStubInfo
* stubInfo
) {
1364 using Type
= StubField::Type
;
1369 Type fieldType
= stubInfo
->fieldType(field
);
1370 switch (fieldType
) {
1371 case Type::WeakShape
: {
1372 WeakHeapPtr
<Shape
*>& shapeField
=
1373 stubInfo
->getStubField
<T
, Type::WeakShape
>(stub
, offset
);
1374 auto r
= TraceWeakEdge(trc
, &shapeField
, "cacheir-weak-shape");
1380 case Type::WeakObject
: {
1381 WeakHeapPtr
<JSObject
*>& objectField
=
1382 stubInfo
->getStubField
<T
, Type::WeakObject
>(stub
, offset
);
1383 auto r
= TraceWeakEdge(trc
, &objectField
, "cacheir-weak-object");
1389 case Type::WeakBaseScript
: {
1390 WeakHeapPtr
<BaseScript
*>& scriptField
=
1391 stubInfo
->getStubField
<T
, Type::WeakBaseScript
>(stub
, offset
);
1392 auto r
= TraceWeakEdge(trc
, &scriptField
, "cacheir-weak-script");
1398 case Type::WeakGetterSetter
: {
1399 WeakHeapPtr
<GetterSetter
*>& getterSetterField
=
1400 stubInfo
->getStubField
<T
, Type::WeakGetterSetter
>(stub
, offset
);
1401 auto r
= TraceWeakEdge(trc
, &getterSetterField
,
1402 "cacheir-weak-getter-setter");
1409 return true; // Done.
1410 case Type::RawInt32
:
1411 case Type::RawPointer
:
1413 case Type::JSObject
:
1418 case Type::AllocSite
:
1419 case Type::RawInt64
:
1422 break; // Skip non-weak fields.
1425 offset
+= StubField::sizeInBytes(fieldType
);
1429 template bool jit::TraceWeakCacheIRStub(JSTracer
* trc
, ICCacheIRStub
* stub
,
1430 const CacheIRStubInfo
* stubInfo
);
1432 template bool jit::TraceWeakCacheIRStub(JSTracer
* trc
, IonICStub
* stub
,
1433 const CacheIRStubInfo
* stubInfo
);
1435 bool CacheIRWriter::stubDataEquals(const uint8_t* stubData
) const {
1436 MOZ_ASSERT(!failed());
1438 const uintptr_t* stubDataWords
= reinterpret_cast<const uintptr_t*>(stubData
);
1440 for (const StubField
& field
: stubFields_
) {
1441 if (field
.sizeIsWord()) {
1442 if (field
.asWord() != *stubDataWords
) {
1449 if (field
.asInt64() != *reinterpret_cast<const uint64_t*>(stubDataWords
)) {
1452 stubDataWords
+= sizeof(uint64_t) / sizeof(uintptr_t);
1458 bool CacheIRWriter::stubDataEqualsIgnoring(const uint8_t* stubData
,
1459 uint32_t ignoreOffset
) const {
1460 MOZ_ASSERT(!failed());
1462 uint32_t offset
= 0;
1463 for (const StubField
& field
: stubFields_
) {
1464 if (offset
!= ignoreOffset
) {
1465 if (field
.sizeIsWord()) {
1466 uintptr_t raw
= *reinterpret_cast<const uintptr_t*>(stubData
+ offset
);
1467 if (field
.asWord() != raw
) {
1471 uint64_t raw
= *reinterpret_cast<const uint64_t*>(stubData
+ offset
);
1472 if (field
.asInt64() != raw
) {
1477 offset
+= StubField::sizeInBytes(field
.type());
1483 HashNumber
CacheIRStubKey::hash(const CacheIRStubKey::Lookup
& l
) {
1484 HashNumber hash
= mozilla::HashBytes(l
.code
, l
.length
);
1485 hash
= mozilla::AddToHash(hash
, uint32_t(l
.kind
));
1486 hash
= mozilla::AddToHash(hash
, uint32_t(l
.engine
));
1490 bool CacheIRStubKey::match(const CacheIRStubKey
& entry
,
1491 const CacheIRStubKey::Lookup
& l
) {
1492 if (entry
.stubInfo
->kind() != l
.kind
) {
1496 if (entry
.stubInfo
->engine() != l
.engine
) {
1500 if (entry
.stubInfo
->codeLength() != l
.length
) {
1504 if (!mozilla::ArrayEqual(entry
.stubInfo
->code(), l
.code
, l
.length
)) {
1511 CacheIRReader::CacheIRReader(const CacheIRStubInfo
* stubInfo
)
1512 : CacheIRReader(stubInfo
->code(),
1513 stubInfo
->code() + stubInfo
->codeLength()) {}
1515 CacheIRStubInfo
* CacheIRStubInfo::New(CacheKind kind
, ICStubEngine engine
,
1517 uint32_t stubDataOffset
,
1518 const CacheIRWriter
& writer
) {
1519 size_t numStubFields
= writer
.numStubFields();
1520 size_t bytesNeeded
=
1521 sizeof(CacheIRStubInfo
) + writer
.codeLength() +
1522 (numStubFields
+ 1); // +1 for the GCType::Limit terminator.
1523 uint8_t* p
= js_pod_malloc
<uint8_t>(bytesNeeded
);
1528 // Copy the CacheIR code.
1529 uint8_t* codeStart
= p
+ sizeof(CacheIRStubInfo
);
1530 mozilla::PodCopy(codeStart
, writer
.codeStart(), writer
.codeLength());
1532 static_assert(sizeof(StubField::Type
) == sizeof(uint8_t),
1533 "StubField::Type must fit in uint8_t");
1535 // Copy the stub field types.
1536 uint8_t* fieldTypes
= codeStart
+ writer
.codeLength();
1537 for (size_t i
= 0; i
< numStubFields
; i
++) {
1538 fieldTypes
[i
] = uint8_t(writer
.stubFieldType(i
));
1540 fieldTypes
[numStubFields
] = uint8_t(StubField::Type::Limit
);
1542 return new (p
) CacheIRStubInfo(kind
, engine
, makesGCCalls
, stubDataOffset
,
1543 writer
.codeLength());
1546 bool OperandLocation::operator==(const OperandLocation
& other
) const {
1547 if (kind_
!= other
.kind_
) {
1555 return payloadReg() == other
.payloadReg() &&
1556 payloadType() == other
.payloadType();
1558 return valueReg() == other
.valueReg();
1560 return payloadStack() == other
.payloadStack() &&
1561 payloadType() == other
.payloadType();
1563 return valueStack() == other
.valueStack();
1565 return baselineFrameSlot() == other
.baselineFrameSlot();
1567 return constant() == other
.constant();
1569 return doubleReg() == other
.doubleReg();
1572 MOZ_CRASH("Invalid OperandLocation kind");
1575 AutoOutputRegister::AutoOutputRegister(CacheIRCompiler
& compiler
)
1576 : output_(compiler
.outputUnchecked_
.ref()), alloc_(compiler
.allocator
) {
1577 if (output_
.hasValue()) {
1578 alloc_
.allocateFixedValueRegister(compiler
.masm
, output_
.valueReg());
1579 } else if (!output_
.typedReg().isFloat()) {
1580 alloc_
.allocateFixedRegister(compiler
.masm
, output_
.typedReg().gpr());
1584 AutoOutputRegister::~AutoOutputRegister() {
1585 if (output_
.hasValue()) {
1586 alloc_
.releaseValueRegister(output_
.valueReg());
1587 } else if (!output_
.typedReg().isFloat()) {
1588 alloc_
.releaseRegister(output_
.typedReg().gpr());
1592 bool FailurePath::canShareFailurePath(const FailurePath
& other
) const {
1593 if (stackPushed_
!= other
.stackPushed_
) {
1597 if (spilledRegs_
.length() != other
.spilledRegs_
.length()) {
1601 for (size_t i
= 0; i
< spilledRegs_
.length(); i
++) {
1602 if (spilledRegs_
[i
] != other
.spilledRegs_
[i
]) {
1607 MOZ_ASSERT(inputs_
.length() == other
.inputs_
.length());
1609 for (size_t i
= 0; i
< inputs_
.length(); i
++) {
1610 if (inputs_
[i
] != other
.inputs_
[i
]) {
1617 bool CacheIRCompiler::addFailurePath(FailurePath
** failure
) {
1619 allocator
.setAddedFailurePath();
1621 MOZ_ASSERT(!allocator
.hasAutoScratchFloatRegisterSpill());
1623 FailurePath newFailure
;
1624 for (size_t i
= 0; i
< writer_
.numInputOperands(); i
++) {
1625 if (!newFailure
.appendInput(allocator
.operandLocation(i
))) {
1629 if (!newFailure
.setSpilledRegs(allocator
.spilledRegs())) {
1632 newFailure
.setStackPushed(allocator
.stackPushed());
1634 // Reuse the previous failure path if the current one is the same, to
1635 // avoid emitting duplicate code.
1636 if (failurePaths
.length() > 0 &&
1637 failurePaths
.back().canShareFailurePath(newFailure
)) {
1638 *failure
= &failurePaths
.back();
1642 if (!failurePaths
.append(std::move(newFailure
))) {
1646 *failure
= &failurePaths
.back();
1650 bool CacheIRCompiler::emitFailurePath(size_t index
) {
1651 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1652 FailurePath
& failure
= failurePaths
[index
];
1654 allocator
.setStackPushed(failure
.stackPushed());
1656 for (size_t i
= 0; i
< writer_
.numInputOperands(); i
++) {
1657 allocator
.setOperandLocation(i
, failure
.input(i
));
1660 if (!allocator
.setSpilledRegs(failure
.spilledRegs())) {
1664 masm
.bind(failure
.label());
1665 allocator
.restoreInputState(masm
);
1669 bool CacheIRCompiler::emitGuardIsNumber(ValOperandId inputId
) {
1670 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1671 JSValueType knownType
= allocator
.knownType(inputId
);
1673 // Doubles and ints are numbers!
1674 if (knownType
== JSVAL_TYPE_DOUBLE
|| knownType
== JSVAL_TYPE_INT32
) {
1678 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1679 FailurePath
* failure
;
1680 if (!addFailurePath(&failure
)) {
1684 masm
.branchTestNumber(Assembler::NotEqual
, input
, failure
->label());
1688 bool CacheIRCompiler::emitGuardToObject(ValOperandId inputId
) {
1689 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1690 if (allocator
.knownType(inputId
) == JSVAL_TYPE_OBJECT
) {
1694 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1695 FailurePath
* failure
;
1696 if (!addFailurePath(&failure
)) {
1699 masm
.branchTestObject(Assembler::NotEqual
, input
, failure
->label());
1703 bool CacheIRCompiler::emitGuardIsNullOrUndefined(ValOperandId inputId
) {
1704 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1705 JSValueType knownType
= allocator
.knownType(inputId
);
1706 if (knownType
== JSVAL_TYPE_UNDEFINED
|| knownType
== JSVAL_TYPE_NULL
) {
1710 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1711 FailurePath
* failure
;
1712 if (!addFailurePath(&failure
)) {
1717 masm
.branchTestNull(Assembler::Equal
, input
, &success
);
1718 masm
.branchTestUndefined(Assembler::NotEqual
, input
, failure
->label());
1720 masm
.bind(&success
);
1724 bool CacheIRCompiler::emitGuardIsNull(ValOperandId inputId
) {
1725 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1726 JSValueType knownType
= allocator
.knownType(inputId
);
1727 if (knownType
== JSVAL_TYPE_NULL
) {
1731 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1732 FailurePath
* failure
;
1733 if (!addFailurePath(&failure
)) {
1737 masm
.branchTestNull(Assembler::NotEqual
, input
, failure
->label());
1741 bool CacheIRCompiler::emitGuardIsUndefined(ValOperandId inputId
) {
1742 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1743 JSValueType knownType
= allocator
.knownType(inputId
);
1744 if (knownType
== JSVAL_TYPE_UNDEFINED
) {
1748 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1749 FailurePath
* failure
;
1750 if (!addFailurePath(&failure
)) {
1754 masm
.branchTestUndefined(Assembler::NotEqual
, input
, failure
->label());
1758 bool CacheIRCompiler::emitGuardIsNotUninitializedLexical(ValOperandId valId
) {
1759 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1761 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
1763 FailurePath
* failure
;
1764 if (!addFailurePath(&failure
)) {
1768 masm
.branchTestMagicValue(Assembler::Equal
, val
, JS_UNINITIALIZED_LEXICAL
,
1773 bool CacheIRCompiler::emitGuardBooleanToInt32(ValOperandId inputId
,
1774 Int32OperandId resultId
) {
1775 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1776 Register output
= allocator
.defineRegister(masm
, resultId
);
1778 if (allocator
.knownType(inputId
) == JSVAL_TYPE_BOOLEAN
) {
1780 allocator
.useRegister(masm
, BooleanOperandId(inputId
.id()));
1781 masm
.move32(input
, output
);
1784 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1786 FailurePath
* failure
;
1787 if (!addFailurePath(&failure
)) {
1791 masm
.fallibleUnboxBoolean(input
, output
, failure
->label());
1795 bool CacheIRCompiler::emitGuardToString(ValOperandId inputId
) {
1796 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1797 if (allocator
.knownType(inputId
) == JSVAL_TYPE_STRING
) {
1801 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1802 FailurePath
* failure
;
1803 if (!addFailurePath(&failure
)) {
1806 masm
.branchTestString(Assembler::NotEqual
, input
, failure
->label());
1810 bool CacheIRCompiler::emitGuardToSymbol(ValOperandId inputId
) {
1811 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1812 if (allocator
.knownType(inputId
) == JSVAL_TYPE_SYMBOL
) {
1816 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1817 FailurePath
* failure
;
1818 if (!addFailurePath(&failure
)) {
1821 masm
.branchTestSymbol(Assembler::NotEqual
, input
, failure
->label());
1825 bool CacheIRCompiler::emitGuardToBigInt(ValOperandId inputId
) {
1826 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1827 if (allocator
.knownType(inputId
) == JSVAL_TYPE_BIGINT
) {
1831 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1832 FailurePath
* failure
;
1833 if (!addFailurePath(&failure
)) {
1836 masm
.branchTestBigInt(Assembler::NotEqual
, input
, failure
->label());
1840 bool CacheIRCompiler::emitGuardToBoolean(ValOperandId inputId
) {
1841 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1843 if (allocator
.knownType(inputId
) == JSVAL_TYPE_BOOLEAN
) {
1847 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1848 FailurePath
* failure
;
1849 if (!addFailurePath(&failure
)) {
1852 masm
.branchTestBoolean(Assembler::NotEqual
, input
, failure
->label());
1856 bool CacheIRCompiler::emitGuardToInt32(ValOperandId inputId
) {
1857 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1859 if (allocator
.knownType(inputId
) == JSVAL_TYPE_INT32
) {
1863 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1865 FailurePath
* failure
;
1866 if (!addFailurePath(&failure
)) {
1870 masm
.branchTestInt32(Assembler::NotEqual
, input
, failure
->label());
1874 bool CacheIRCompiler::emitGuardToNonGCThing(ValOperandId inputId
) {
1875 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1877 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1879 FailurePath
* failure
;
1880 if (!addFailurePath(&failure
)) {
1884 masm
.branchTestGCThing(Assembler::Equal
, input
, failure
->label());
1888 // Infallible |emitDouble| emitters can use this implementation to avoid
1889 // generating extra clean-up instructions to restore the scratch float register.
1890 // To select this function simply omit the |Label* fail| parameter for the
1891 // emitter lambda function.
1892 template <typename EmitDouble
>
1893 static std::enable_if_t
<mozilla::FunctionTypeTraits
<EmitDouble
>::arity
== 1,
1895 EmitGuardDouble(CacheIRCompiler
* compiler
, MacroAssembler
& masm
,
1896 ValueOperand input
, FailurePath
* failure
,
1897 EmitDouble emitDouble
) {
1898 AutoScratchFloatRegister
floatReg(compiler
);
1900 masm
.unboxDouble(input
, floatReg
);
1901 emitDouble(floatReg
.get());
1904 template <typename EmitDouble
>
1905 static std::enable_if_t
<mozilla::FunctionTypeTraits
<EmitDouble
>::arity
== 2,
1907 EmitGuardDouble(CacheIRCompiler
* compiler
, MacroAssembler
& masm
,
1908 ValueOperand input
, FailurePath
* failure
,
1909 EmitDouble emitDouble
) {
1910 AutoScratchFloatRegister
floatReg(compiler
, failure
);
1912 masm
.unboxDouble(input
, floatReg
);
1913 emitDouble(floatReg
.get(), floatReg
.failure());
1916 template <typename EmitInt32
, typename EmitDouble
>
1917 static void EmitGuardInt32OrDouble(CacheIRCompiler
* compiler
,
1918 MacroAssembler
& masm
, ValueOperand input
,
1919 Register output
, FailurePath
* failure
,
1920 EmitInt32 emitInt32
, EmitDouble emitDouble
) {
1924 ScratchTagScope
tag(masm
, input
);
1925 masm
.splitTagForTest(input
, tag
);
1928 masm
.branchTestInt32(Assembler::NotEqual
, tag
, ¬Int32
);
1930 ScratchTagScopeRelease
_(&tag
);
1932 masm
.unboxInt32(input
, output
);
1937 masm
.bind(¬Int32
);
1939 masm
.branchTestDouble(Assembler::NotEqual
, tag
, failure
->label());
1941 ScratchTagScopeRelease
_(&tag
);
1943 EmitGuardDouble(compiler
, masm
, input
, failure
, emitDouble
);
1950 bool CacheIRCompiler::emitGuardToInt32Index(ValOperandId inputId
,
1951 Int32OperandId resultId
) {
1952 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1953 Register output
= allocator
.defineRegister(masm
, resultId
);
1955 if (allocator
.knownType(inputId
) == JSVAL_TYPE_INT32
) {
1956 Register input
= allocator
.useRegister(masm
, Int32OperandId(inputId
.id()));
1957 masm
.move32(input
, output
);
1961 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
1963 FailurePath
* failure
;
1964 if (!addFailurePath(&failure
)) {
1968 EmitGuardInt32OrDouble(
1969 this, masm
, input
, output
, failure
,
1971 // No-op if the value is already an int32.
1973 [&](FloatRegister floatReg
, Label
* fail
) {
1974 // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
1975 masm
.convertDoubleToInt32(floatReg
, output
, fail
, false);
1981 bool CacheIRCompiler::emitInt32ToIntPtr(Int32OperandId inputId
,
1982 IntPtrOperandId resultId
) {
1983 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1985 Register input
= allocator
.useRegister(masm
, inputId
);
1986 Register output
= allocator
.defineRegister(masm
, resultId
);
1988 masm
.move32SignExtendToPtr(input
, output
);
1992 bool CacheIRCompiler::emitGuardNumberToIntPtrIndex(NumberOperandId inputId
,
1994 IntPtrOperandId resultId
) {
1995 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1997 Register output
= allocator
.defineRegister(masm
, resultId
);
1999 FailurePath
* failure
= nullptr;
2001 if (!addFailurePath(&failure
)) {
2006 AutoScratchFloatRegister
floatReg(this, failure
);
2007 allocator
.ensureDoubleRegister(masm
, inputId
, floatReg
);
2009 // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
2012 masm
.convertDoubleToPtr(floatReg
, output
, &fail
, false);
2015 // Substitute the invalid index with an arbitrary out-of-bounds index.
2017 masm
.movePtr(ImmWord(-1), output
);
2021 masm
.convertDoubleToPtr(floatReg
, output
, floatReg
.failure(), false);
2027 bool CacheIRCompiler::emitGuardToInt32ModUint32(ValOperandId inputId
,
2028 Int32OperandId resultId
) {
2029 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2030 Register output
= allocator
.defineRegister(masm
, resultId
);
2032 if (allocator
.knownType(inputId
) == JSVAL_TYPE_INT32
) {
2033 ConstantOrRegister input
= allocator
.useConstantOrRegister(masm
, inputId
);
2034 if (input
.constant()) {
2035 masm
.move32(Imm32(input
.value().toInt32()), output
);
2037 MOZ_ASSERT(input
.reg().type() == MIRType::Int32
);
2038 masm
.move32(input
.reg().typedReg().gpr(), output
);
2043 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
2045 FailurePath
* failure
;
2046 if (!addFailurePath(&failure
)) {
2050 EmitGuardInt32OrDouble(
2051 this, masm
, input
, output
, failure
,
2053 // No-op if the value is already an int32.
2055 [&](FloatRegister floatReg
, Label
* fail
) {
2056 masm
.branchTruncateDoubleMaybeModUint32(floatReg
, output
, fail
);
2062 bool CacheIRCompiler::emitGuardToUint8Clamped(ValOperandId inputId
,
2063 Int32OperandId resultId
) {
2064 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2065 Register output
= allocator
.defineRegister(masm
, resultId
);
2067 if (allocator
.knownType(inputId
) == JSVAL_TYPE_INT32
) {
2068 ConstantOrRegister input
= allocator
.useConstantOrRegister(masm
, inputId
);
2069 if (input
.constant()) {
2070 masm
.move32(Imm32(ClampDoubleToUint8(input
.value().toInt32())), output
);
2072 MOZ_ASSERT(input
.reg().type() == MIRType::Int32
);
2073 masm
.move32(input
.reg().typedReg().gpr(), output
);
2074 masm
.clampIntToUint8(output
);
2079 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
2081 FailurePath
* failure
;
2082 if (!addFailurePath(&failure
)) {
2086 EmitGuardInt32OrDouble(
2087 this, masm
, input
, output
, failure
,
2089 // |output| holds the unboxed int32 value.
2090 masm
.clampIntToUint8(output
);
2092 [&](FloatRegister floatReg
) {
2093 masm
.clampDoubleToUint8(floatReg
, output
);
2099 bool CacheIRCompiler::emitGuardNonDoubleType(ValOperandId inputId
,
2101 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2103 if (allocator
.knownType(inputId
) == JSValueType(type
)) {
2107 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
2109 FailurePath
* failure
;
2110 if (!addFailurePath(&failure
)) {
2115 case ValueType::String
:
2116 masm
.branchTestString(Assembler::NotEqual
, input
, failure
->label());
2118 case ValueType::Symbol
:
2119 masm
.branchTestSymbol(Assembler::NotEqual
, input
, failure
->label());
2121 case ValueType::BigInt
:
2122 masm
.branchTestBigInt(Assembler::NotEqual
, input
, failure
->label());
2124 case ValueType::Int32
:
2125 masm
.branchTestInt32(Assembler::NotEqual
, input
, failure
->label());
2127 case ValueType::Boolean
:
2128 masm
.branchTestBoolean(Assembler::NotEqual
, input
, failure
->label());
2130 case ValueType::Undefined
:
2131 masm
.branchTestUndefined(Assembler::NotEqual
, input
, failure
->label());
2133 case ValueType::Null
:
2134 masm
.branchTestNull(Assembler::NotEqual
, input
, failure
->label());
2136 case ValueType::Double
:
2137 case ValueType::Magic
:
2138 case ValueType::PrivateGCThing
:
2139 case ValueType::Object
:
2140 #ifdef ENABLE_RECORD_TUPLE
2141 case ValueType::ExtendedPrimitive
:
2143 MOZ_CRASH("unexpected type");
2149 bool CacheIRCompiler::emitGuardClass(ObjOperandId objId
, GuardClassKind kind
) {
2150 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2151 Register obj
= allocator
.useRegister(masm
, objId
);
2152 AutoScratchRegister
scratch(allocator
, masm
);
2154 FailurePath
* failure
;
2155 if (!addFailurePath(&failure
)) {
2159 if (kind
== GuardClassKind::JSFunction
) {
2160 if (objectGuardNeedsSpectreMitigations(objId
)) {
2161 masm
.branchTestObjIsFunction(Assembler::NotEqual
, obj
, scratch
, obj
,
2164 masm
.branchTestObjIsFunctionNoSpectreMitigations(
2165 Assembler::NotEqual
, obj
, scratch
, failure
->label());
2170 const JSClass
* clasp
= nullptr;
2172 case GuardClassKind::Array
:
2173 clasp
= &ArrayObject::class_
;
2175 case GuardClassKind::PlainObject
:
2176 clasp
= &PlainObject::class_
;
2178 case GuardClassKind::ArrayBuffer
:
2179 clasp
= &ArrayBufferObject::class_
;
2181 case GuardClassKind::SharedArrayBuffer
:
2182 clasp
= &SharedArrayBufferObject::class_
;
2184 case GuardClassKind::DataView
:
2185 clasp
= &DataViewObject::class_
;
2187 case GuardClassKind::MappedArguments
:
2188 clasp
= &MappedArgumentsObject::class_
;
2190 case GuardClassKind::UnmappedArguments
:
2191 clasp
= &UnmappedArgumentsObject::class_
;
2193 case GuardClassKind::WindowProxy
:
2194 clasp
= cx_
->runtime()->maybeWindowProxyClass();
2196 case GuardClassKind::Set
:
2197 clasp
= &SetObject::class_
;
2199 case GuardClassKind::Map
:
2200 clasp
= &MapObject::class_
;
2202 case GuardClassKind::BoundFunction
:
2203 clasp
= &BoundFunctionObject::class_
;
2205 case GuardClassKind::JSFunction
:
2206 MOZ_CRASH("JSFunction handled before switch");
2210 if (objectGuardNeedsSpectreMitigations(objId
)) {
2211 masm
.branchTestObjClass(Assembler::NotEqual
, obj
, clasp
, scratch
, obj
,
2214 masm
.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual
, obj
, clasp
,
2215 scratch
, failure
->label());
2221 bool CacheIRCompiler::emitGuardNullProto(ObjOperandId objId
) {
2222 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2223 Register obj
= allocator
.useRegister(masm
, objId
);
2224 AutoScratchRegister
scratch(allocator
, masm
);
2226 FailurePath
* failure
;
2227 if (!addFailurePath(&failure
)) {
2231 masm
.loadObjProto(obj
, scratch
);
2232 masm
.branchTestPtr(Assembler::NonZero
, scratch
, scratch
, failure
->label());
2236 bool CacheIRCompiler::emitGuardIsExtensible(ObjOperandId objId
) {
2237 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2238 Register obj
= allocator
.useRegister(masm
, objId
);
2239 AutoScratchRegister
scratch(allocator
, masm
);
2241 FailurePath
* failure
;
2242 if (!addFailurePath(&failure
)) {
2246 masm
.branchIfObjectNotExtensible(obj
, scratch
, failure
->label());
2250 bool CacheIRCompiler::emitGuardDynamicSlotIsSpecificObject(
2251 ObjOperandId objId
, ObjOperandId expectedId
, uint32_t slotOffset
) {
2252 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2253 Register obj
= allocator
.useRegister(masm
, objId
);
2254 Register expectedObject
= allocator
.useRegister(masm
, expectedId
);
2256 // Allocate registers before the failure path to make sure they're registered
2257 // by addFailurePath.
2258 AutoScratchRegister
scratch1(allocator
, masm
);
2259 AutoScratchRegister
scratch2(allocator
, masm
);
2261 FailurePath
* failure
;
2262 if (!addFailurePath(&failure
)) {
2266 // Guard on the expected object.
2267 StubFieldOffset
slot(slotOffset
, StubField::Type::RawInt32
);
2268 masm
.loadPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch1
);
2269 emitLoadStubField(slot
, scratch2
);
2270 BaseObjectSlotIndex
expectedSlot(scratch1
, scratch2
);
2271 masm
.fallibleUnboxObject(expectedSlot
, scratch1
, failure
->label());
2272 masm
.branchPtr(Assembler::NotEqual
, expectedObject
, scratch1
,
2278 bool CacheIRCompiler::emitGuardDynamicSlotIsNotObject(ObjOperandId objId
,
2279 uint32_t slotOffset
) {
2280 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2281 Register obj
= allocator
.useRegister(masm
, objId
);
2283 AutoScratchRegister
scratch1(allocator
, masm
);
2284 AutoScratchRegister
scratch2(allocator
, masm
);
2286 FailurePath
* failure
;
2287 if (!addFailurePath(&failure
)) {
2291 // Guard that the slot isn't an object.
2292 StubFieldOffset
slot(slotOffset
, StubField::Type::RawInt32
);
2293 masm
.loadPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch1
);
2294 emitLoadStubField(slot
, scratch2
);
2295 BaseObjectSlotIndex
expectedSlot(scratch1
, scratch2
);
2296 masm
.branchTestObject(Assembler::Equal
, expectedSlot
, failure
->label());
2301 bool CacheIRCompiler::emitGuardFixedSlotValue(ObjOperandId objId
,
2302 uint32_t offsetOffset
,
2303 uint32_t valOffset
) {
2304 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2306 Register obj
= allocator
.useRegister(masm
, objId
);
2308 AutoScratchRegister
scratch(allocator
, masm
);
2309 AutoScratchValueRegister
scratchVal(allocator
, masm
);
2311 FailurePath
* failure
;
2312 if (!addFailurePath(&failure
)) {
2316 StubFieldOffset
offset(offsetOffset
, StubField::Type::RawInt32
);
2317 emitLoadStubField(offset
, scratch
);
2319 StubFieldOffset
val(valOffset
, StubField::Type::Value
);
2320 emitLoadValueStubField(val
, scratchVal
);
2322 BaseIndex
slotVal(obj
, scratch
, TimesOne
);
2323 masm
.branchTestValue(Assembler::NotEqual
, slotVal
, scratchVal
,
2328 bool CacheIRCompiler::emitGuardDynamicSlotValue(ObjOperandId objId
,
2329 uint32_t offsetOffset
,
2330 uint32_t valOffset
) {
2331 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2333 Register obj
= allocator
.useRegister(masm
, objId
);
2335 AutoScratchRegister
scratch1(allocator
, masm
);
2336 AutoScratchRegister
scratch2(allocator
, masm
);
2337 AutoScratchValueRegister
scratchVal(allocator
, masm
);
2339 FailurePath
* failure
;
2340 if (!addFailurePath(&failure
)) {
2344 masm
.loadPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch1
);
2346 StubFieldOffset
offset(offsetOffset
, StubField::Type::RawInt32
);
2347 emitLoadStubField(offset
, scratch2
);
2349 StubFieldOffset
val(valOffset
, StubField::Type::Value
);
2350 emitLoadValueStubField(val
, scratchVal
);
2352 BaseIndex
slotVal(scratch1
, scratch2
, TimesOne
);
2353 masm
.branchTestValue(Assembler::NotEqual
, slotVal
, scratchVal
,
2358 bool CacheIRCompiler::emitLoadScriptedProxyHandler(ValOperandId resultId
,
2359 ObjOperandId objId
) {
2360 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2362 Register obj
= allocator
.useRegister(masm
, objId
);
2363 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
2365 masm
.loadPtr(Address(obj
, ProxyObject::offsetOfReservedSlots()),
2366 output
.scratchReg());
2368 Address(output
.scratchReg(), js::detail::ProxyReservedSlots::offsetOfSlot(
2369 ScriptedProxyHandler::HANDLER_EXTRA
)),
2374 bool CacheIRCompiler::emitIdToStringOrSymbol(ValOperandId resultId
,
2375 ValOperandId idId
) {
2376 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2378 ValueOperand id
= allocator
.useValueRegister(masm
, idId
);
2379 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
2380 AutoScratchRegister
scratch(allocator
, masm
);
2382 FailurePath
* failure
;
2383 if (!addFailurePath(&failure
)) {
2387 masm
.moveValue(id
, output
);
2389 Label done
, intDone
, callVM
;
2391 ScratchTagScope
tag(masm
, output
);
2392 masm
.splitTagForTest(output
, tag
);
2393 masm
.branchTestString(Assembler::Equal
, tag
, &done
);
2394 masm
.branchTestSymbol(Assembler::Equal
, tag
, &done
);
2395 masm
.branchTestInt32(Assembler::NotEqual
, tag
, failure
->label());
2398 Register intReg
= output
.scratchReg();
2399 masm
.unboxInt32(output
, intReg
);
2401 // Fast path for small integers.
2402 masm
.lookupStaticIntString(intReg
, intReg
, scratch
, cx_
->staticStrings(),
2404 masm
.jump(&intDone
);
2407 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
2408 liveVolatileFloatRegs());
2409 masm
.PushRegsInMask(volatileRegs
);
2411 using Fn
= JSLinearString
* (*)(JSContext
* cx
, int32_t i
);
2412 masm
.setupUnalignedABICall(scratch
);
2413 masm
.loadJSContext(scratch
);
2414 masm
.passABIArg(scratch
);
2415 masm
.passABIArg(intReg
);
2416 masm
.callWithABI
<Fn
, js::Int32ToStringPure
>();
2418 masm
.storeCallPointerResult(intReg
);
2420 LiveRegisterSet ignore
;
2422 masm
.PopRegsInMaskIgnore(volatileRegs
, ignore
);
2424 masm
.branchPtr(Assembler::Equal
, intReg
, ImmPtr(nullptr), failure
->label());
2426 masm
.bind(&intDone
);
2427 masm
.tagValue(JSVAL_TYPE_STRING
, intReg
, output
);
2433 bool CacheIRCompiler::emitLoadFixedSlot(ValOperandId resultId
,
2435 uint32_t offsetOffset
) {
2436 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2438 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
2439 Register obj
= allocator
.useRegister(masm
, objId
);
2440 AutoScratchRegister
scratch(allocator
, masm
);
2442 StubFieldOffset
slotIndex(offsetOffset
, StubField::Type::RawInt32
);
2443 emitLoadStubField(slotIndex
, scratch
);
2445 masm
.loadValue(BaseIndex(obj
, scratch
, TimesOne
), output
);
2449 bool CacheIRCompiler::emitLoadDynamicSlot(ValOperandId resultId
,
2451 uint32_t slotOffset
) {
2452 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2454 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
2455 Register obj
= allocator
.useRegister(masm
, objId
);
2456 AutoScratchRegister
scratch1(allocator
, masm
);
2457 Register scratch2
= output
.scratchReg();
2459 StubFieldOffset
slotIndex(slotOffset
, StubField::Type::RawInt32
);
2460 emitLoadStubField(slotIndex
, scratch2
);
2462 masm
.loadPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch1
);
2463 masm
.loadValue(BaseObjectSlotIndex(scratch1
, scratch2
), output
);
2467 bool CacheIRCompiler::emitGuardIsNativeObject(ObjOperandId objId
) {
2468 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2470 Register obj
= allocator
.useRegister(masm
, objId
);
2471 AutoScratchRegister
scratch(allocator
, masm
);
2473 FailurePath
* failure
;
2474 if (!addFailurePath(&failure
)) {
2478 masm
.branchIfNonNativeObj(obj
, scratch
, failure
->label());
2482 bool CacheIRCompiler::emitGuardIsProxy(ObjOperandId objId
) {
2483 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2485 Register obj
= allocator
.useRegister(masm
, objId
);
2486 AutoScratchRegister
scratch(allocator
, masm
);
2488 FailurePath
* failure
;
2489 if (!addFailurePath(&failure
)) {
2493 masm
.branchTestObjectIsProxy(false, obj
, scratch
, failure
->label());
2497 bool CacheIRCompiler::emitGuardIsNotProxy(ObjOperandId objId
) {
2498 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2500 Register obj
= allocator
.useRegister(masm
, objId
);
2501 AutoScratchRegister
scratch(allocator
, masm
);
2503 FailurePath
* failure
;
2504 if (!addFailurePath(&failure
)) {
2508 masm
.branchTestObjectIsProxy(true, obj
, scratch
, failure
->label());
2512 bool CacheIRCompiler::emitGuardIsNotArrayBufferMaybeShared(ObjOperandId objId
) {
2513 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2515 Register obj
= allocator
.useRegister(masm
, objId
);
2516 AutoScratchRegister
scratch(allocator
, masm
);
2518 FailurePath
* failure
;
2519 if (!addFailurePath(&failure
)) {
2523 masm
.loadObjClassUnsafe(obj
, scratch
);
2524 masm
.branchPtr(Assembler::Equal
, scratch
, ImmPtr(&ArrayBufferObject::class_
),
2526 masm
.branchPtr(Assembler::Equal
, scratch
,
2527 ImmPtr(&SharedArrayBufferObject::class_
), failure
->label());
2531 bool CacheIRCompiler::emitGuardIsTypedArray(ObjOperandId objId
) {
2532 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2534 Register obj
= allocator
.useRegister(masm
, objId
);
2535 AutoScratchRegister
scratch(allocator
, masm
);
2537 FailurePath
* failure
;
2538 if (!addFailurePath(&failure
)) {
2542 masm
.loadObjClassUnsafe(obj
, scratch
);
2543 masm
.branchIfClassIsNotTypedArray(scratch
, failure
->label());
2547 bool CacheIRCompiler::emitGuardIsNotDOMProxy(ObjOperandId objId
) {
2548 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2549 Register obj
= allocator
.useRegister(masm
, objId
);
2550 AutoScratchRegister
scratch(allocator
, masm
);
2552 FailurePath
* failure
;
2553 if (!addFailurePath(&failure
)) {
2557 masm
.branchTestProxyHandlerFamily(Assembler::Equal
, obj
, scratch
,
2558 GetDOMProxyHandlerFamily(),
2563 bool CacheIRCompiler::emitGuardNoDenseElements(ObjOperandId objId
) {
2564 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2565 Register obj
= allocator
.useRegister(masm
, objId
);
2566 AutoScratchRegister
scratch(allocator
, masm
);
2568 FailurePath
* failure
;
2569 if (!addFailurePath(&failure
)) {
2573 // Load obj->elements.
2574 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
2576 // Make sure there are no dense elements.
2577 Address
initLength(scratch
, ObjectElements::offsetOfInitializedLength());
2578 masm
.branch32(Assembler::NotEqual
, initLength
, Imm32(0), failure
->label());
2582 bool CacheIRCompiler::emitGuardSpecificInt32(Int32OperandId numId
,
2584 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2585 Register num
= allocator
.useRegister(masm
, numId
);
2587 FailurePath
* failure
;
2588 if (!addFailurePath(&failure
)) {
2592 masm
.branch32(Assembler::NotEqual
, num
, Imm32(expected
), failure
->label());
2596 bool CacheIRCompiler::emitGuardStringToInt32(StringOperandId strId
,
2597 Int32OperandId resultId
) {
2598 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2599 Register str
= allocator
.useRegister(masm
, strId
);
2600 Register output
= allocator
.defineRegister(masm
, resultId
);
2601 AutoScratchRegister
scratch(allocator
, masm
);
2603 FailurePath
* failure
;
2604 if (!addFailurePath(&failure
)) {
2608 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
2609 liveVolatileFloatRegs());
2610 masm
.guardStringToInt32(str
, output
, scratch
, volatileRegs
, failure
->label());
2614 bool CacheIRCompiler::emitGuardStringToNumber(StringOperandId strId
,
2615 NumberOperandId resultId
) {
2616 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2617 Register str
= allocator
.useRegister(masm
, strId
);
2618 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
2619 AutoScratchRegister
scratch(allocator
, masm
);
2621 FailurePath
* failure
;
2622 if (!addFailurePath(&failure
)) {
2627 // Use indexed value as fast path if possible.
2628 masm
.loadStringIndexValue(str
, scratch
, &vmCall
);
2629 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
);
2634 // Reserve stack for holding the result value of the call.
2635 masm
.reserveStack(sizeof(double));
2636 masm
.moveStackPtrTo(output
.payloadOrValueReg());
2638 // We cannot use callVM, as callVM expects to be able to clobber all
2639 // operands, however, since this op is not the last in the generated IC, we
2640 // want to be able to reference other live values.
2641 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
2642 liveVolatileFloatRegs());
2643 masm
.PushRegsInMask(volatileRegs
);
2645 using Fn
= bool (*)(JSContext
* cx
, JSString
* str
, double* result
);
2646 masm
.setupUnalignedABICall(scratch
);
2647 masm
.loadJSContext(scratch
);
2648 masm
.passABIArg(scratch
);
2649 masm
.passABIArg(str
);
2650 masm
.passABIArg(output
.payloadOrValueReg());
2651 masm
.callWithABI
<Fn
, js::StringToNumberPure
>();
2652 masm
.storeCallPointerResult(scratch
);
2654 LiveRegisterSet ignore
;
2655 ignore
.add(scratch
);
2656 masm
.PopRegsInMaskIgnore(volatileRegs
, ignore
);
2659 masm
.branchIfTrueBool(scratch
, &ok
);
2661 // OOM path, recovered by StringToNumberPure.
2663 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
2664 // flow-insensitively, and using it twice would confuse the stack height
2666 masm
.addToStackPtr(Imm32(sizeof(double)));
2667 masm
.jump(failure
->label());
2672 ScratchDoubleScope
fpscratch(masm
);
2673 masm
.loadDouble(Address(output
.payloadOrValueReg(), 0), fpscratch
);
2674 masm
.boxDouble(fpscratch
, output
, fpscratch
);
2676 masm
.freeStack(sizeof(double));
2682 bool CacheIRCompiler::emitNumberParseIntResult(StringOperandId strId
,
2683 Int32OperandId radixId
) {
2684 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2686 AutoCallVM
callvm(masm
, this, allocator
);
2688 Register str
= allocator
.useRegister(masm
, strId
);
2689 Register radix
= allocator
.useRegister(masm
, radixId
);
2690 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, callvm
.output());
2694 masm
.branch32(Assembler::Equal
, radix
, Imm32(0), &ok
);
2695 masm
.branch32(Assembler::Equal
, radix
, Imm32(10), &ok
);
2696 masm
.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
2700 // Discard the stack to ensure it's balanced when we skip the vm-call.
2701 allocator
.discardStack(masm
);
2703 // Use indexed value as fast path if possible.
2705 masm
.loadStringIndexValue(str
, scratch
, &vmCall
);
2706 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, callvm
.outputValueReg());
2715 using Fn
= bool (*)(JSContext
*, HandleString
, int32_t, MutableHandleValue
);
2716 callvm
.call
<Fn
, js::NumberParseInt
>();
2722 bool CacheIRCompiler::emitDoubleParseIntResult(NumberOperandId numId
) {
2723 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2725 AutoOutputRegister
output(*this);
2726 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
2727 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg0
);
2728 AutoAvailableFloatRegister
floatScratch2(*this, FloatReg1
);
2730 FailurePath
* failure
;
2731 if (!addFailurePath(&failure
)) {
2735 allocator
.ensureDoubleRegister(masm
, numId
, floatScratch1
);
2737 masm
.branchDouble(Assembler::DoubleUnordered
, floatScratch1
, floatScratch1
,
2739 masm
.branchTruncateDoubleToInt32(floatScratch1
, scratch
, failure
->label());
2742 masm
.branch32(Assembler::NotEqual
, scratch
, Imm32(0), &ok
);
2744 // Accept both +0 and -0 and return 0.
2745 masm
.loadConstantDouble(0.0, floatScratch2
);
2746 masm
.branchDouble(Assembler::DoubleEqual
, floatScratch1
, floatScratch2
,
2749 // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
2750 masm
.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW
, floatScratch2
);
2751 masm
.branchDouble(Assembler::DoubleLessThan
, floatScratch1
, floatScratch2
,
2756 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
2760 bool CacheIRCompiler::emitBooleanToNumber(BooleanOperandId booleanId
,
2761 NumberOperandId resultId
) {
2762 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2763 Register boolean
= allocator
.useRegister(masm
, booleanId
);
2764 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
2765 masm
.tagValue(JSVAL_TYPE_INT32
, boolean
, output
);
2769 bool CacheIRCompiler::emitGuardStringToIndex(StringOperandId strId
,
2770 Int32OperandId resultId
) {
2771 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2772 Register str
= allocator
.useRegister(masm
, strId
);
2773 Register output
= allocator
.defineRegister(masm
, resultId
);
2775 FailurePath
* failure
;
2776 if (!addFailurePath(&failure
)) {
2781 masm
.loadStringIndexValue(str
, output
, &vmCall
);
2786 LiveRegisterSet
save(GeneralRegisterSet::Volatile(),
2787 liveVolatileFloatRegs());
2788 masm
.PushRegsInMask(save
);
2790 using Fn
= int32_t (*)(JSString
* str
);
2791 masm
.setupUnalignedABICall(output
);
2792 masm
.passABIArg(str
);
2793 masm
.callWithABI
<Fn
, GetIndexFromString
>();
2794 masm
.storeCallInt32Result(output
);
2796 LiveRegisterSet ignore
;
2798 masm
.PopRegsInMaskIgnore(save
, ignore
);
2800 // GetIndexFromString returns a negative value on failure.
2801 masm
.branchTest32(Assembler::Signed
, output
, output
, failure
->label());
2808 bool CacheIRCompiler::emitLoadProto(ObjOperandId objId
, ObjOperandId resultId
) {
2809 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2810 Register obj
= allocator
.useRegister(masm
, objId
);
2811 Register reg
= allocator
.defineRegister(masm
, resultId
);
2812 masm
.loadObjProto(obj
, reg
);
2815 // We shouldn't encounter a null or lazy proto.
2816 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto
) == 1);
2819 masm
.branchPtr(Assembler::Above
, reg
, ImmWord(1), &done
);
2820 masm
.assumeUnreachable("Unexpected null or lazy proto in CacheIR LoadProto");
2826 bool CacheIRCompiler::emitLoadEnclosingEnvironment(ObjOperandId objId
,
2827 ObjOperandId resultId
) {
2828 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2829 Register obj
= allocator
.useRegister(masm
, objId
);
2830 Register reg
= allocator
.defineRegister(masm
, resultId
);
2832 Address(obj
, EnvironmentObject::offsetOfEnclosingEnvironment()), reg
);
2836 bool CacheIRCompiler::emitLoadWrapperTarget(ObjOperandId objId
,
2837 ObjOperandId resultId
) {
2838 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2839 Register obj
= allocator
.useRegister(masm
, objId
);
2840 Register reg
= allocator
.defineRegister(masm
, resultId
);
2842 masm
.loadPtr(Address(obj
, ProxyObject::offsetOfReservedSlots()), reg
);
2844 Address(reg
, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()), reg
);
2848 bool CacheIRCompiler::emitLoadValueTag(ValOperandId valId
,
2849 ValueTagOperandId resultId
) {
2850 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2851 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
2852 Register res
= allocator
.defineRegister(masm
, resultId
);
2854 Register tag
= masm
.extractTag(val
, res
);
2861 bool CacheIRCompiler::emitLoadDOMExpandoValue(ObjOperandId objId
,
2862 ValOperandId resultId
) {
2863 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2864 Register obj
= allocator
.useRegister(masm
, objId
);
2865 ValueOperand val
= allocator
.defineValueRegister(masm
, resultId
);
2867 masm
.loadPtr(Address(obj
, ProxyObject::offsetOfReservedSlots()),
2869 masm
.loadValue(Address(val
.scratchReg(),
2870 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
2875 bool CacheIRCompiler::emitLoadDOMExpandoValueIgnoreGeneration(
2876 ObjOperandId objId
, ValOperandId resultId
) {
2877 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2878 Register obj
= allocator
.useRegister(masm
, objId
);
2879 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
2881 // Determine the expando's Address.
2882 Register scratch
= output
.scratchReg();
2883 masm
.loadPtr(Address(obj
, ProxyObject::offsetOfReservedSlots()), scratch
);
2884 Address
expandoAddr(scratch
,
2885 js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
2888 // Private values are stored as doubles, so assert we have a double.
2890 masm
.branchTestDouble(Assembler::Equal
, expandoAddr
, &ok
);
2891 masm
.assumeUnreachable("DOM expando is not a PrivateValue!");
2895 // Load the ExpandoAndGeneration* from the PrivateValue.
2896 masm
.loadPrivate(expandoAddr
, scratch
);
2898 // Load expandoAndGeneration->expando into the output Value register.
2899 masm
.loadValue(Address(scratch
, ExpandoAndGeneration::offsetOfExpando()),
2904 bool CacheIRCompiler::emitLoadUndefinedResult() {
2905 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2906 AutoOutputRegister
output(*this);
2907 masm
.moveValue(UndefinedValue(), output
.valueReg());
2911 static void EmitStoreBoolean(MacroAssembler
& masm
, bool b
,
2912 const AutoOutputRegister
& output
) {
2913 if (output
.hasValue()) {
2914 Value val
= BooleanValue(b
);
2915 masm
.moveValue(val
, output
.valueReg());
2917 MOZ_ASSERT(output
.type() == JSVAL_TYPE_BOOLEAN
);
2918 masm
.movePtr(ImmWord(b
), output
.typedReg().gpr());
2922 bool CacheIRCompiler::emitLoadBooleanResult(bool val
) {
2923 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2924 AutoOutputRegister
output(*this);
2925 EmitStoreBoolean(masm
, val
, output
);
2929 bool CacheIRCompiler::emitLoadOperandResult(ValOperandId inputId
) {
2930 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2931 AutoOutputRegister
output(*this);
2932 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
2933 masm
.moveValue(input
, output
.valueReg());
2937 static void EmitStoreResult(MacroAssembler
& masm
, Register reg
,
2939 const AutoOutputRegister
& output
) {
2940 if (output
.hasValue()) {
2941 masm
.tagValue(type
, reg
, output
.valueReg());
2944 if (type
== JSVAL_TYPE_INT32
&& output
.typedReg().isFloat()) {
2945 masm
.convertInt32ToDouble(reg
, output
.typedReg().fpu());
2948 if (type
== output
.type()) {
2949 masm
.mov(reg
, output
.typedReg().gpr());
2952 masm
.assumeUnreachable("Should have monitored result");
2955 bool CacheIRCompiler::emitLoadInt32ArrayLengthResult(ObjOperandId objId
) {
2956 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2957 AutoOutputRegister
output(*this);
2958 Register obj
= allocator
.useRegister(masm
, objId
);
2959 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
2961 FailurePath
* failure
;
2962 if (!addFailurePath(&failure
)) {
2966 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
2967 masm
.load32(Address(scratch
, ObjectElements::offsetOfLength()), scratch
);
2969 // Guard length fits in an int32.
2970 masm
.branchTest32(Assembler::Signed
, scratch
, scratch
, failure
->label());
2971 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
2975 bool CacheIRCompiler::emitLoadInt32ArrayLength(ObjOperandId objId
,
2976 Int32OperandId resultId
) {
2977 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2978 Register obj
= allocator
.useRegister(masm
, objId
);
2979 Register res
= allocator
.defineRegister(masm
, resultId
);
2981 FailurePath
* failure
;
2982 if (!addFailurePath(&failure
)) {
2986 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), res
);
2987 masm
.load32(Address(res
, ObjectElements::offsetOfLength()), res
);
2989 // Guard length fits in an int32.
2990 masm
.branchTest32(Assembler::Signed
, res
, res
, failure
->label());
2994 bool CacheIRCompiler::emitDoubleAddResult(NumberOperandId lhsId
,
2995 NumberOperandId rhsId
) {
2996 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2997 AutoOutputRegister
output(*this);
2999 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
3000 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
3002 allocator
.ensureDoubleRegister(masm
, lhsId
, floatScratch0
);
3003 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch1
);
3005 masm
.addDouble(floatScratch1
, floatScratch0
);
3006 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
3010 bool CacheIRCompiler::emitDoubleSubResult(NumberOperandId lhsId
,
3011 NumberOperandId rhsId
) {
3012 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3013 AutoOutputRegister
output(*this);
3015 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
3016 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
3018 allocator
.ensureDoubleRegister(masm
, lhsId
, floatScratch0
);
3019 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch1
);
3021 masm
.subDouble(floatScratch1
, floatScratch0
);
3022 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
3026 bool CacheIRCompiler::emitDoubleMulResult(NumberOperandId lhsId
,
3027 NumberOperandId rhsId
) {
3028 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3029 AutoOutputRegister
output(*this);
3031 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
3032 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
3034 allocator
.ensureDoubleRegister(masm
, lhsId
, floatScratch0
);
3035 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch1
);
3037 masm
.mulDouble(floatScratch1
, floatScratch0
);
3038 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
3042 bool CacheIRCompiler::emitDoubleDivResult(NumberOperandId lhsId
,
3043 NumberOperandId rhsId
) {
3044 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3045 AutoOutputRegister
output(*this);
3047 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
3048 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
3050 allocator
.ensureDoubleRegister(masm
, lhsId
, floatScratch0
);
3051 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch1
);
3053 masm
.divDouble(floatScratch1
, floatScratch0
);
3054 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
3058 bool CacheIRCompiler::emitDoubleModResult(NumberOperandId lhsId
,
3059 NumberOperandId rhsId
) {
3060 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3061 AutoOutputRegister
output(*this);
3062 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3064 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
3065 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
3067 allocator
.ensureDoubleRegister(masm
, lhsId
, floatScratch0
);
3068 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch1
);
3070 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3071 masm
.PushRegsInMask(save
);
3073 using Fn
= double (*)(double a
, double b
);
3074 masm
.setupUnalignedABICall(scratch
);
3075 masm
.passABIArg(floatScratch0
, MoveOp::DOUBLE
);
3076 masm
.passABIArg(floatScratch1
, MoveOp::DOUBLE
);
3077 masm
.callWithABI
<Fn
, js::NumberMod
>(MoveOp::DOUBLE
);
3078 masm
.storeCallFloatResult(floatScratch0
);
3080 LiveRegisterSet ignore
;
3081 ignore
.add(floatScratch0
);
3082 masm
.PopRegsInMaskIgnore(save
, ignore
);
3084 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
3088 bool CacheIRCompiler::emitDoublePowResult(NumberOperandId lhsId
,
3089 NumberOperandId rhsId
) {
3090 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3091 AutoOutputRegister
output(*this);
3092 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3094 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
3095 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
3097 allocator
.ensureDoubleRegister(masm
, lhsId
, floatScratch0
);
3098 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch1
);
3100 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3101 masm
.PushRegsInMask(save
);
3103 using Fn
= double (*)(double x
, double y
);
3104 masm
.setupUnalignedABICall(scratch
);
3105 masm
.passABIArg(floatScratch0
, MoveOp::DOUBLE
);
3106 masm
.passABIArg(floatScratch1
, MoveOp::DOUBLE
);
3107 masm
.callWithABI
<Fn
, js::ecmaPow
>(MoveOp::DOUBLE
);
3108 masm
.storeCallFloatResult(floatScratch0
);
3110 LiveRegisterSet ignore
;
3111 ignore
.add(floatScratch0
);
3112 masm
.PopRegsInMaskIgnore(save
, ignore
);
3114 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
3119 bool CacheIRCompiler::emitInt32AddResult(Int32OperandId lhsId
,
3120 Int32OperandId rhsId
) {
3121 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3122 AutoOutputRegister
output(*this);
3123 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3125 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3126 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3128 FailurePath
* failure
;
3129 if (!addFailurePath(&failure
)) {
3133 masm
.mov(rhs
, scratch
);
3134 masm
.branchAdd32(Assembler::Overflow
, lhs
, scratch
, failure
->label());
3135 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3139 bool CacheIRCompiler::emitInt32SubResult(Int32OperandId lhsId
,
3140 Int32OperandId rhsId
) {
3141 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3142 AutoOutputRegister
output(*this);
3143 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3144 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3145 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3147 FailurePath
* failure
;
3148 if (!addFailurePath(&failure
)) {
3152 masm
.mov(lhs
, scratch
);
3153 masm
.branchSub32(Assembler::Overflow
, rhs
, scratch
, failure
->label());
3154 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3159 bool CacheIRCompiler::emitInt32MulResult(Int32OperandId lhsId
,
3160 Int32OperandId rhsId
) {
3161 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3162 AutoOutputRegister
output(*this);
3163 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3164 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3165 AutoScratchRegister
scratch(allocator
, masm
);
3166 AutoScratchRegisterMaybeOutput
scratch2(allocator
, masm
, output
);
3168 FailurePath
* failure
;
3169 if (!addFailurePath(&failure
)) {
3173 Label maybeNegZero
, done
;
3174 masm
.mov(lhs
, scratch
);
3175 masm
.branchMul32(Assembler::Overflow
, rhs
, scratch
, failure
->label());
3176 masm
.branchTest32(Assembler::Zero
, scratch
, scratch
, &maybeNegZero
);
3179 masm
.bind(&maybeNegZero
);
3180 masm
.mov(lhs
, scratch2
);
3181 // Result is -0 if exactly one of lhs or rhs is negative.
3182 masm
.or32(rhs
, scratch2
);
3183 masm
.branchTest32(Assembler::Signed
, scratch2
, scratch2
, failure
->label());
3186 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3190 bool CacheIRCompiler::emitInt32DivResult(Int32OperandId lhsId
,
3191 Int32OperandId rhsId
) {
3192 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3193 AutoOutputRegister
output(*this);
3194 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3195 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3196 AutoScratchRegister
rem(allocator
, masm
);
3197 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3199 FailurePath
* failure
;
3200 if (!addFailurePath(&failure
)) {
3204 // Prevent division by 0.
3205 masm
.branchTest32(Assembler::Zero
, rhs
, rhs
, failure
->label());
3207 // Prevent -2147483648 / -1.
3209 masm
.branch32(Assembler::NotEqual
, lhs
, Imm32(INT32_MIN
), ¬Overflow
);
3210 masm
.branch32(Assembler::Equal
, rhs
, Imm32(-1), failure
->label());
3211 masm
.bind(¬Overflow
);
3213 // Prevent negative 0.
3215 masm
.branchTest32(Assembler::NonZero
, lhs
, lhs
, ¬Zero
);
3216 masm
.branchTest32(Assembler::Signed
, rhs
, rhs
, failure
->label());
3217 masm
.bind(¬Zero
);
3219 masm
.mov(lhs
, scratch
);
3220 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
3221 liveVolatileFloatRegs());
3222 masm
.flexibleDivMod32(rhs
, scratch
, rem
, false, volatileRegs
);
3224 // A remainder implies a double result.
3225 masm
.branchTest32(Assembler::NonZero
, rem
, rem
, failure
->label());
3226 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3230 bool CacheIRCompiler::emitInt32ModResult(Int32OperandId lhsId
,
3231 Int32OperandId rhsId
) {
3232 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3233 AutoOutputRegister
output(*this);
3234 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3235 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3236 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3238 FailurePath
* failure
;
3239 if (!addFailurePath(&failure
)) {
3243 // x % 0 results in NaN
3244 masm
.branchTest32(Assembler::Zero
, rhs
, rhs
, failure
->label());
3246 // Prevent -2147483648 % -1.
3248 // Traps on x86 and has undefined behavior on ARM32 (when __aeabi_idivmod is
3251 masm
.branch32(Assembler::NotEqual
, lhs
, Imm32(INT32_MIN
), ¬Overflow
);
3252 masm
.branch32(Assembler::Equal
, rhs
, Imm32(-1), failure
->label());
3253 masm
.bind(¬Overflow
);
3255 masm
.mov(lhs
, scratch
);
3256 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
3257 liveVolatileFloatRegs());
3258 masm
.flexibleRemainder32(rhs
, scratch
, false, volatileRegs
);
3260 // Modulo takes the sign of the dividend; we can't return negative zero here.
3262 masm
.branchTest32(Assembler::NonZero
, scratch
, scratch
, ¬Zero
);
3263 masm
.branchTest32(Assembler::Signed
, lhs
, lhs
, failure
->label());
3264 masm
.bind(¬Zero
);
3266 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3271 bool CacheIRCompiler::emitInt32PowResult(Int32OperandId lhsId
,
3272 Int32OperandId rhsId
) {
3273 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3274 AutoOutputRegister
output(*this);
3275 Register base
= allocator
.useRegister(masm
, lhsId
);
3276 Register power
= allocator
.useRegister(masm
, rhsId
);
3277 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
3278 AutoScratchRegisterMaybeOutputType
scratch2(allocator
, masm
, output
);
3279 AutoScratchRegister
scratch3(allocator
, masm
);
3281 FailurePath
* failure
;
3282 if (!addFailurePath(&failure
)) {
3286 masm
.pow32(base
, power
, scratch1
, scratch2
, scratch3
, failure
->label());
3288 masm
.tagValue(JSVAL_TYPE_INT32
, scratch1
, output
.valueReg());
3292 bool CacheIRCompiler::emitInt32BitOrResult(Int32OperandId lhsId
,
3293 Int32OperandId rhsId
) {
3294 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3295 AutoOutputRegister
output(*this);
3296 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3298 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3299 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3301 masm
.mov(rhs
, scratch
);
3302 masm
.or32(lhs
, scratch
);
3303 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3307 bool CacheIRCompiler::emitInt32BitXorResult(Int32OperandId lhsId
,
3308 Int32OperandId rhsId
) {
3309 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3310 AutoOutputRegister
output(*this);
3311 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3313 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3314 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3316 masm
.mov(rhs
, scratch
);
3317 masm
.xor32(lhs
, scratch
);
3318 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3322 bool CacheIRCompiler::emitInt32BitAndResult(Int32OperandId lhsId
,
3323 Int32OperandId rhsId
) {
3324 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3325 AutoOutputRegister
output(*this);
3326 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3328 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3329 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3331 masm
.mov(rhs
, scratch
);
3332 masm
.and32(lhs
, scratch
);
3333 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3337 bool CacheIRCompiler::emitInt32LeftShiftResult(Int32OperandId lhsId
,
3338 Int32OperandId rhsId
) {
3339 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3340 AutoOutputRegister
output(*this);
3341 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3342 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3343 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3345 masm
.mov(lhs
, scratch
);
3346 masm
.flexibleLshift32(rhs
, scratch
);
3347 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3352 bool CacheIRCompiler::emitInt32RightShiftResult(Int32OperandId lhsId
,
3353 Int32OperandId rhsId
) {
3354 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3355 AutoOutputRegister
output(*this);
3356 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3357 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3358 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3360 masm
.mov(lhs
, scratch
);
3361 masm
.flexibleRshift32Arithmetic(rhs
, scratch
);
3362 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3367 bool CacheIRCompiler::emitInt32URightShiftResult(Int32OperandId lhsId
,
3368 Int32OperandId rhsId
,
3370 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3371 AutoOutputRegister
output(*this);
3373 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3374 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3375 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3377 FailurePath
* failure
;
3378 if (!addFailurePath(&failure
)) {
3382 masm
.mov(lhs
, scratch
);
3383 masm
.flexibleRshift32(rhs
, scratch
);
3385 ScratchDoubleScope
fpscratch(masm
);
3386 masm
.convertUInt32ToDouble(scratch
, fpscratch
);
3387 masm
.boxDouble(fpscratch
, output
.valueReg(), fpscratch
);
3389 masm
.branchTest32(Assembler::Signed
, scratch
, scratch
, failure
->label());
3390 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3395 bool CacheIRCompiler::emitInt32NegationResult(Int32OperandId inputId
) {
3396 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3397 AutoOutputRegister
output(*this);
3398 Register val
= allocator
.useRegister(masm
, inputId
);
3399 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3401 FailurePath
* failure
;
3402 if (!addFailurePath(&failure
)) {
3406 // Guard against 0 and MIN_INT by checking if low 31-bits are all zero.
3407 // Both of these result in a double.
3408 masm
.branchTest32(Assembler::Zero
, val
, Imm32(0x7fffffff), failure
->label());
3409 masm
.mov(val
, scratch
);
3410 masm
.neg32(scratch
);
3411 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3415 bool CacheIRCompiler::emitInt32IncResult(Int32OperandId inputId
) {
3416 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3417 AutoOutputRegister
output(*this);
3418 Register input
= allocator
.useRegister(masm
, inputId
);
3419 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3421 FailurePath
* failure
;
3422 if (!addFailurePath(&failure
)) {
3426 masm
.mov(input
, scratch
);
3427 masm
.branchAdd32(Assembler::Overflow
, Imm32(1), scratch
, failure
->label());
3428 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3433 bool CacheIRCompiler::emitInt32DecResult(Int32OperandId inputId
) {
3434 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3435 AutoOutputRegister
output(*this);
3436 Register input
= allocator
.useRegister(masm
, inputId
);
3437 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3439 FailurePath
* failure
;
3440 if (!addFailurePath(&failure
)) {
3444 masm
.mov(input
, scratch
);
3445 masm
.branchSub32(Assembler::Overflow
, Imm32(1), scratch
, failure
->label());
3446 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3451 bool CacheIRCompiler::emitInt32NotResult(Int32OperandId inputId
) {
3452 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3453 AutoOutputRegister
output(*this);
3454 Register val
= allocator
.useRegister(masm
, inputId
);
3455 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3457 masm
.mov(val
, scratch
);
3458 masm
.not32(scratch
);
3459 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3463 bool CacheIRCompiler::emitDoubleNegationResult(NumberOperandId inputId
) {
3464 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3465 AutoOutputRegister
output(*this);
3467 AutoScratchFloatRegister
floatReg(this);
3469 allocator
.ensureDoubleRegister(masm
, inputId
, floatReg
);
3471 masm
.negateDouble(floatReg
);
3472 masm
.boxDouble(floatReg
, output
.valueReg(), floatReg
);
3477 bool CacheIRCompiler::emitDoubleIncDecResult(bool isInc
,
3478 NumberOperandId inputId
) {
3479 AutoOutputRegister
output(*this);
3481 AutoScratchFloatRegister
floatReg(this);
3483 allocator
.ensureDoubleRegister(masm
, inputId
, floatReg
);
3486 ScratchDoubleScope
fpscratch(masm
);
3487 masm
.loadConstantDouble(1.0, fpscratch
);
3489 masm
.addDouble(fpscratch
, floatReg
);
3491 masm
.subDouble(fpscratch
, floatReg
);
3494 masm
.boxDouble(floatReg
, output
.valueReg(), floatReg
);
3499 bool CacheIRCompiler::emitDoubleIncResult(NumberOperandId inputId
) {
3500 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3501 return emitDoubleIncDecResult(true, inputId
);
3504 bool CacheIRCompiler::emitDoubleDecResult(NumberOperandId inputId
) {
3505 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3506 return emitDoubleIncDecResult(false, inputId
);
3509 template <typename Fn
, Fn fn
>
3510 bool CacheIRCompiler::emitBigIntBinaryOperationShared(BigIntOperandId lhsId
,
3511 BigIntOperandId rhsId
) {
3512 AutoCallVM
callvm(masm
, this, allocator
);
3513 Register lhs
= allocator
.useRegister(masm
, lhsId
);
3514 Register rhs
= allocator
.useRegister(masm
, rhsId
);
3521 callvm
.call
<Fn
, fn
>();
3525 bool CacheIRCompiler::emitBigIntAddResult(BigIntOperandId lhsId
,
3526 BigIntOperandId rhsId
) {
3527 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3528 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3529 return emitBigIntBinaryOperationShared
<Fn
, BigInt::add
>(lhsId
, rhsId
);
3532 bool CacheIRCompiler::emitBigIntSubResult(BigIntOperandId lhsId
,
3533 BigIntOperandId rhsId
) {
3534 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3535 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3536 return emitBigIntBinaryOperationShared
<Fn
, BigInt::sub
>(lhsId
, rhsId
);
3539 bool CacheIRCompiler::emitBigIntMulResult(BigIntOperandId lhsId
,
3540 BigIntOperandId rhsId
) {
3541 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3542 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3543 return emitBigIntBinaryOperationShared
<Fn
, BigInt::mul
>(lhsId
, rhsId
);
3546 bool CacheIRCompiler::emitBigIntDivResult(BigIntOperandId lhsId
,
3547 BigIntOperandId rhsId
) {
3548 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3549 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3550 return emitBigIntBinaryOperationShared
<Fn
, BigInt::div
>(lhsId
, rhsId
);
3553 bool CacheIRCompiler::emitBigIntModResult(BigIntOperandId lhsId
,
3554 BigIntOperandId rhsId
) {
3555 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3556 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3557 return emitBigIntBinaryOperationShared
<Fn
, BigInt::mod
>(lhsId
, rhsId
);
3560 bool CacheIRCompiler::emitBigIntPowResult(BigIntOperandId lhsId
,
3561 BigIntOperandId rhsId
) {
3562 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3563 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3564 return emitBigIntBinaryOperationShared
<Fn
, BigInt::pow
>(lhsId
, rhsId
);
3567 bool CacheIRCompiler::emitBigIntBitAndResult(BigIntOperandId lhsId
,
3568 BigIntOperandId rhsId
) {
3569 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3570 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3571 return emitBigIntBinaryOperationShared
<Fn
, BigInt::bitAnd
>(lhsId
, rhsId
);
3574 bool CacheIRCompiler::emitBigIntBitOrResult(BigIntOperandId lhsId
,
3575 BigIntOperandId rhsId
) {
3576 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3577 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3578 return emitBigIntBinaryOperationShared
<Fn
, BigInt::bitOr
>(lhsId
, rhsId
);
3581 bool CacheIRCompiler::emitBigIntBitXorResult(BigIntOperandId lhsId
,
3582 BigIntOperandId rhsId
) {
3583 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3584 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3585 return emitBigIntBinaryOperationShared
<Fn
, BigInt::bitXor
>(lhsId
, rhsId
);
3588 bool CacheIRCompiler::emitBigIntLeftShiftResult(BigIntOperandId lhsId
,
3589 BigIntOperandId rhsId
) {
3590 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3591 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3592 return emitBigIntBinaryOperationShared
<Fn
, BigInt::lsh
>(lhsId
, rhsId
);
3595 bool CacheIRCompiler::emitBigIntRightShiftResult(BigIntOperandId lhsId
,
3596 BigIntOperandId rhsId
) {
3597 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3598 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, HandleBigInt
);
3599 return emitBigIntBinaryOperationShared
<Fn
, BigInt::rsh
>(lhsId
, rhsId
);
3602 template <typename Fn
, Fn fn
>
3603 bool CacheIRCompiler::emitBigIntUnaryOperationShared(BigIntOperandId inputId
) {
3604 AutoCallVM
callvm(masm
, this, allocator
);
3605 Register val
= allocator
.useRegister(masm
, inputId
);
3611 callvm
.call
<Fn
, fn
>();
3615 bool CacheIRCompiler::emitBigIntNotResult(BigIntOperandId inputId
) {
3616 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3617 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
);
3618 return emitBigIntUnaryOperationShared
<Fn
, BigInt::bitNot
>(inputId
);
3621 bool CacheIRCompiler::emitBigIntNegationResult(BigIntOperandId inputId
) {
3622 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3623 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
);
3624 return emitBigIntUnaryOperationShared
<Fn
, BigInt::neg
>(inputId
);
3627 bool CacheIRCompiler::emitBigIntIncResult(BigIntOperandId inputId
) {
3628 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3629 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
);
3630 return emitBigIntUnaryOperationShared
<Fn
, BigInt::inc
>(inputId
);
3633 bool CacheIRCompiler::emitBigIntDecResult(BigIntOperandId inputId
) {
3634 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3635 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
);
3636 return emitBigIntUnaryOperationShared
<Fn
, BigInt::dec
>(inputId
);
3639 bool CacheIRCompiler::emitTruncateDoubleToUInt32(NumberOperandId inputId
,
3640 Int32OperandId resultId
) {
3641 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3642 Register res
= allocator
.defineRegister(masm
, resultId
);
3644 AutoScratchFloatRegister
floatReg(this);
3646 allocator
.ensureDoubleRegister(masm
, inputId
, floatReg
);
3648 Label done
, truncateABICall
;
3650 masm
.branchTruncateDoubleMaybeModUint32(floatReg
, res
, &truncateABICall
);
3653 masm
.bind(&truncateABICall
);
3654 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3655 save
.takeUnchecked(floatReg
);
3657 save
.takeUnchecked(floatReg
.get().asSingle());
3658 masm
.PushRegsInMask(save
);
3660 using Fn
= int32_t (*)(double);
3661 masm
.setupUnalignedABICall(res
);
3662 masm
.passABIArg(floatReg
, MoveOp::DOUBLE
);
3663 masm
.callWithABI
<Fn
, JS::ToInt32
>(MoveOp::GENERAL
,
3664 CheckUnsafeCallWithABI::DontCheckOther
);
3665 masm
.storeCallInt32Result(res
);
3667 LiveRegisterSet ignore
;
3669 masm
.PopRegsInMaskIgnore(save
, ignore
);
3675 bool CacheIRCompiler::emitLoadArgumentsObjectLengthResult(ObjOperandId objId
) {
3676 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3677 AutoOutputRegister
output(*this);
3678 Register obj
= allocator
.useRegister(masm
, objId
);
3679 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3681 FailurePath
* failure
;
3682 if (!addFailurePath(&failure
)) {
3686 masm
.loadArgumentsObjectLength(obj
, scratch
, failure
->label());
3688 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3692 bool CacheIRCompiler::emitLoadArgumentsObjectLength(ObjOperandId objId
,
3693 Int32OperandId resultId
) {
3694 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3695 Register obj
= allocator
.useRegister(masm
, objId
);
3696 Register res
= allocator
.defineRegister(masm
, resultId
);
3698 FailurePath
* failure
;
3699 if (!addFailurePath(&failure
)) {
3703 masm
.loadArgumentsObjectLength(obj
, res
, failure
->label());
3707 bool CacheIRCompiler::emitLoadArrayBufferByteLengthInt32Result(
3708 ObjOperandId objId
) {
3709 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3710 AutoOutputRegister
output(*this);
3711 Register obj
= allocator
.useRegister(masm
, objId
);
3712 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3714 FailurePath
* failure
;
3715 if (!addFailurePath(&failure
)) {
3719 masm
.loadArrayBufferByteLengthIntPtr(obj
, scratch
);
3720 masm
.guardNonNegativeIntPtrToInt32(scratch
, failure
->label());
3721 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3725 bool CacheIRCompiler::emitLoadArrayBufferByteLengthDoubleResult(
3726 ObjOperandId objId
) {
3727 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3728 AutoOutputRegister
output(*this);
3729 Register obj
= allocator
.useRegister(masm
, objId
);
3730 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3732 ScratchDoubleScope
fpscratch(masm
);
3733 masm
.loadArrayBufferByteLengthIntPtr(obj
, scratch
);
3734 masm
.convertIntPtrToDouble(scratch
, fpscratch
);
3735 masm
.boxDouble(fpscratch
, output
.valueReg(), fpscratch
);
3739 bool CacheIRCompiler::emitLoadArrayBufferViewLengthInt32Result(
3740 ObjOperandId objId
) {
3741 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3742 AutoOutputRegister
output(*this);
3743 Register obj
= allocator
.useRegister(masm
, objId
);
3744 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3746 FailurePath
* failure
;
3747 if (!addFailurePath(&failure
)) {
3751 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
3752 masm
.guardNonNegativeIntPtrToInt32(scratch
, failure
->label());
3753 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3757 bool CacheIRCompiler::emitLoadArrayBufferViewLengthDoubleResult(
3758 ObjOperandId objId
) {
3759 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3760 AutoOutputRegister
output(*this);
3761 Register obj
= allocator
.useRegister(masm
, objId
);
3762 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3764 ScratchDoubleScope
fpscratch(masm
);
3765 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
3766 masm
.convertIntPtrToDouble(scratch
, fpscratch
);
3767 masm
.boxDouble(fpscratch
, output
.valueReg(), fpscratch
);
3771 bool CacheIRCompiler::emitLoadBoundFunctionNumArgs(ObjOperandId objId
,
3772 Int32OperandId resultId
) {
3773 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3775 Register obj
= allocator
.useRegister(masm
, objId
);
3776 Register output
= allocator
.defineRegister(masm
, resultId
);
3778 masm
.unboxInt32(Address(obj
, BoundFunctionObject::offsetOfFlagsSlot()),
3780 masm
.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift
), output
);
3784 bool CacheIRCompiler::emitLoadBoundFunctionTarget(ObjOperandId objId
,
3785 ObjOperandId resultId
) {
3786 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3788 Register obj
= allocator
.useRegister(masm
, objId
);
3789 Register output
= allocator
.defineRegister(masm
, resultId
);
3791 masm
.unboxObject(Address(obj
, BoundFunctionObject::offsetOfTargetSlot()),
3796 bool CacheIRCompiler::emitGuardBoundFunctionIsConstructor(ObjOperandId objId
) {
3797 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3799 Register obj
= allocator
.useRegister(masm
, objId
);
3801 FailurePath
* failure
;
3802 if (!addFailurePath(&failure
)) {
3806 Address
flagsSlot(obj
, BoundFunctionObject::offsetOfFlagsSlot());
3807 masm
.branchTest32(Assembler::Zero
, flagsSlot
,
3808 Imm32(BoundFunctionObject::IsConstructorFlag
),
3813 bool CacheIRCompiler::emitGuardObjectIdentity(ObjOperandId obj1Id
,
3814 ObjOperandId obj2Id
) {
3815 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3817 Register obj1
= allocator
.useRegister(masm
, obj1Id
);
3818 Register obj2
= allocator
.useRegister(masm
, obj2Id
);
3820 FailurePath
* failure
;
3821 if (!addFailurePath(&failure
)) {
3825 masm
.branchPtr(Assembler::NotEqual
, obj1
, obj2
, failure
->label());
3829 bool CacheIRCompiler::emitLoadFunctionLengthResult(ObjOperandId objId
) {
3830 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3831 AutoOutputRegister
output(*this);
3832 Register obj
= allocator
.useRegister(masm
, objId
);
3833 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3835 FailurePath
* failure
;
3836 if (!addFailurePath(&failure
)) {
3840 // Get the JSFunction flags and arg count.
3841 masm
.load32(Address(obj
, JSFunction::offsetOfFlagsAndArgCount()), scratch
);
3843 // Functions with a SelfHostedLazyScript must be compiled with the slow-path
3844 // before the function length is known. If the length was previously resolved,
3845 // the length property may be shadowed.
3847 Assembler::NonZero
, scratch
,
3848 Imm32(FunctionFlags::SELFHOSTLAZY
| FunctionFlags::RESOLVED_LENGTH
),
3851 masm
.loadFunctionLength(obj
, scratch
, scratch
, failure
->label());
3852 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3856 bool CacheIRCompiler::emitLoadFunctionNameResult(ObjOperandId objId
) {
3857 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3858 AutoOutputRegister
output(*this);
3859 Register obj
= allocator
.useRegister(masm
, objId
);
3860 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3862 FailurePath
* failure
;
3863 if (!addFailurePath(&failure
)) {
3867 masm
.loadFunctionName(obj
, scratch
, ImmGCPtr(cx_
->names().empty_
),
3870 masm
.tagValue(JSVAL_TYPE_STRING
, scratch
, output
.valueReg());
3874 bool CacheIRCompiler::emitLinearizeForCharAccess(StringOperandId strId
,
3875 Int32OperandId indexId
,
3876 StringOperandId resultId
) {
3877 Register str
= allocator
.useRegister(masm
, strId
);
3878 Register index
= allocator
.useRegister(masm
, indexId
);
3879 Register result
= allocator
.defineRegister(masm
, resultId
);
3880 AutoScratchRegister
scratch(allocator
, masm
);
3882 FailurePath
* failure
;
3883 if (!addFailurePath(&failure
)) {
3888 masm
.movePtr(str
, result
);
3890 // We can omit the bounds check, because we only compare the index against the
3891 // string length. In the worst case we unnecessarily linearize the string
3892 // when the index is out-of-bounds.
3894 masm
.branchIfCanLoadStringChar(str
, index
, scratch
, &done
);
3896 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
3897 liveVolatileFloatRegs());
3898 masm
.PushRegsInMask(volatileRegs
);
3900 using Fn
= JSLinearString
* (*)(JSString
*);
3901 masm
.setupUnalignedABICall(scratch
);
3902 masm
.passABIArg(str
);
3903 masm
.callWithABI
<Fn
, js::jit::LinearizeForCharAccessPure
>();
3904 masm
.storeCallPointerResult(result
);
3906 LiveRegisterSet ignore
;
3908 masm
.PopRegsInMaskIgnore(volatileRegs
, ignore
);
3910 masm
.branchTestPtr(Assembler::Zero
, result
, result
, failure
->label());
3917 bool CacheIRCompiler::emitLoadStringLengthResult(StringOperandId strId
) {
3918 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3919 AutoOutputRegister
output(*this);
3920 Register str
= allocator
.useRegister(masm
, strId
);
3921 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3923 masm
.loadStringLength(str
, scratch
);
3924 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
3928 bool CacheIRCompiler::emitLoadStringCharCodeResult(StringOperandId strId
,
3929 Int32OperandId indexId
,
3931 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3932 AutoOutputRegister
output(*this);
3933 Register str
= allocator
.useRegister(masm
, strId
);
3934 Register index
= allocator
.useRegister(masm
, indexId
);
3935 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
3936 AutoScratchRegisterMaybeOutputType
scratch2(allocator
, masm
, output
);
3937 AutoScratchRegister
scratch3(allocator
, masm
);
3939 // Bounds check, load string char.
3942 FailurePath
* failure
;
3943 if (!addFailurePath(&failure
)) {
3947 masm
.spectreBoundsCheck32(index
, Address(str
, JSString::offsetOfLength()),
3948 scratch1
, failure
->label());
3949 masm
.loadStringChar(str
, index
, scratch1
, scratch2
, scratch3
,
3952 // Return NaN for out-of-bounds access.
3953 masm
.moveValue(JS::NaNValue(), output
.valueReg());
3955 // The bounds check mustn't use a scratch register which aliases the output.
3956 MOZ_ASSERT(!output
.valueReg().aliases(scratch3
));
3958 // This CacheIR op is always preceded by |LinearizeForCharAccess|, so we're
3959 // guaranteed to see no nested ropes.
3961 masm
.spectreBoundsCheck32(index
, Address(str
, JSString::offsetOfLength()),
3963 masm
.loadStringChar(str
, index
, scratch1
, scratch2
, scratch3
, &loadFailed
);
3966 masm
.jump(&loadedChar
);
3967 masm
.bind(&loadFailed
);
3968 masm
.assumeUnreachable("loadStringChar can't fail for linear strings");
3969 masm
.bind(&loadedChar
);
3972 masm
.tagValue(JSVAL_TYPE_INT32
, scratch1
, output
.valueReg());
3977 bool CacheIRCompiler::emitNewStringObjectResult(uint32_t templateObjectOffset
,
3978 StringOperandId strId
) {
3979 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3981 AutoCallVM
callvm(masm
, this, allocator
);
3983 Register str
= allocator
.useRegister(masm
, strId
);
3988 using Fn
= JSObject
* (*)(JSContext
*, HandleString
);
3989 callvm
.call
<Fn
, NewStringObject
>();
3993 bool CacheIRCompiler::emitStringIncludesResult(StringOperandId strId
,
3994 StringOperandId searchStrId
) {
3995 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3997 AutoCallVM
callvm(masm
, this, allocator
);
3999 Register str
= allocator
.useRegister(masm
, strId
);
4000 Register searchStr
= allocator
.useRegister(masm
, searchStrId
);
4003 masm
.Push(searchStr
);
4006 using Fn
= bool (*)(JSContext
*, HandleString
, HandleString
, bool*);
4007 callvm
.call
<Fn
, js::StringIncludes
>();
4011 bool CacheIRCompiler::emitStringIndexOfResult(StringOperandId strId
,
4012 StringOperandId searchStrId
) {
4013 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4015 AutoCallVM
callvm(masm
, this, allocator
);
4017 Register str
= allocator
.useRegister(masm
, strId
);
4018 Register searchStr
= allocator
.useRegister(masm
, searchStrId
);
4021 masm
.Push(searchStr
);
4024 using Fn
= bool (*)(JSContext
*, HandleString
, HandleString
, int32_t*);
4025 callvm
.call
<Fn
, js::StringIndexOf
>();
4029 bool CacheIRCompiler::emitStringLastIndexOfResult(StringOperandId strId
,
4030 StringOperandId searchStrId
) {
4031 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4033 AutoCallVM
callvm(masm
, this, allocator
);
4035 Register str
= allocator
.useRegister(masm
, strId
);
4036 Register searchStr
= allocator
.useRegister(masm
, searchStrId
);
4039 masm
.Push(searchStr
);
4042 using Fn
= bool (*)(JSContext
*, HandleString
, HandleString
, int32_t*);
4043 callvm
.call
<Fn
, js::StringLastIndexOf
>();
4047 bool CacheIRCompiler::emitStringStartsWithResult(StringOperandId strId
,
4048 StringOperandId searchStrId
) {
4049 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4051 AutoCallVM
callvm(masm
, this, allocator
);
4053 Register str
= allocator
.useRegister(masm
, strId
);
4054 Register searchStr
= allocator
.useRegister(masm
, searchStrId
);
4057 masm
.Push(searchStr
);
4060 using Fn
= bool (*)(JSContext
*, HandleString
, HandleString
, bool*);
4061 callvm
.call
<Fn
, js::StringStartsWith
>();
4065 bool CacheIRCompiler::emitStringEndsWithResult(StringOperandId strId
,
4066 StringOperandId searchStrId
) {
4067 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4069 AutoCallVM
callvm(masm
, this, allocator
);
4071 Register str
= allocator
.useRegister(masm
, strId
);
4072 Register searchStr
= allocator
.useRegister(masm
, searchStrId
);
4075 masm
.Push(searchStr
);
4078 using Fn
= bool (*)(JSContext
*, HandleString
, HandleString
, bool*);
4079 callvm
.call
<Fn
, js::StringEndsWith
>();
4083 bool CacheIRCompiler::emitStringToLowerCaseResult(StringOperandId strId
) {
4084 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4086 AutoCallVM
callvm(masm
, this, allocator
);
4088 Register str
= allocator
.useRegister(masm
, strId
);
4093 using Fn
= JSString
* (*)(JSContext
*, HandleString
);
4094 callvm
.call
<Fn
, js::StringToLowerCase
>();
4098 bool CacheIRCompiler::emitStringToUpperCaseResult(StringOperandId strId
) {
4099 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4101 AutoCallVM
callvm(masm
, this, allocator
);
4103 Register str
= allocator
.useRegister(masm
, strId
);
4108 using Fn
= JSString
* (*)(JSContext
*, HandleString
);
4109 callvm
.call
<Fn
, js::StringToUpperCase
>();
4113 bool CacheIRCompiler::emitStringTrimResult(StringOperandId strId
) {
4114 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4116 AutoCallVM
callvm(masm
, this, allocator
);
4118 Register str
= allocator
.useRegister(masm
, strId
);
4123 using Fn
= JSString
* (*)(JSContext
*, HandleString
);
4124 callvm
.call
<Fn
, js::StringTrim
>();
4128 bool CacheIRCompiler::emitStringTrimStartResult(StringOperandId strId
) {
4129 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4131 AutoCallVM
callvm(masm
, this, allocator
);
4133 Register str
= allocator
.useRegister(masm
, strId
);
4138 using Fn
= JSString
* (*)(JSContext
*, HandleString
);
4139 callvm
.call
<Fn
, js::StringTrimStart
>();
4143 bool CacheIRCompiler::emitStringTrimEndResult(StringOperandId strId
) {
4144 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4146 AutoCallVM
callvm(masm
, this, allocator
);
4148 Register str
= allocator
.useRegister(masm
, strId
);
4153 using Fn
= JSString
* (*)(JSContext
*, HandleString
);
4154 callvm
.call
<Fn
, js::StringTrimEnd
>();
4158 bool CacheIRCompiler::emitLoadArgumentsObjectArgResult(ObjOperandId objId
,
4159 Int32OperandId indexId
) {
4160 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4161 AutoOutputRegister
output(*this);
4162 Register obj
= allocator
.useRegister(masm
, objId
);
4163 Register index
= allocator
.useRegister(masm
, indexId
);
4164 AutoScratchRegister
scratch(allocator
, masm
);
4166 FailurePath
* failure
;
4167 if (!addFailurePath(&failure
)) {
4171 masm
.loadArgumentsObjectElement(obj
, index
, output
.valueReg(), scratch
,
4176 bool CacheIRCompiler::emitLoadArgumentsObjectArgHoleResult(
4177 ObjOperandId objId
, Int32OperandId indexId
) {
4178 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4179 AutoOutputRegister
output(*this);
4180 Register obj
= allocator
.useRegister(masm
, objId
);
4181 Register index
= allocator
.useRegister(masm
, indexId
);
4182 AutoScratchRegister
scratch(allocator
, masm
);
4184 FailurePath
* failure
;
4185 if (!addFailurePath(&failure
)) {
4189 masm
.loadArgumentsObjectElementHole(obj
, index
, output
.valueReg(), scratch
,
4194 bool CacheIRCompiler::emitLoadArgumentsObjectArgExistsResult(
4195 ObjOperandId objId
, Int32OperandId indexId
) {
4196 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4197 AutoOutputRegister
output(*this);
4198 Register obj
= allocator
.useRegister(masm
, objId
);
4199 Register index
= allocator
.useRegister(masm
, indexId
);
4200 AutoScratchRegister
scratch1(allocator
, masm
);
4201 AutoScratchRegisterMaybeOutput
scratch2(allocator
, masm
, output
);
4203 FailurePath
* failure
;
4204 if (!addFailurePath(&failure
)) {
4208 masm
.loadArgumentsObjectElementExists(obj
, index
, scratch2
, scratch1
,
4210 EmitStoreResult(masm
, scratch2
, JSVAL_TYPE_BOOLEAN
, output
);
4214 bool CacheIRCompiler::emitLoadDenseElementResult(ObjOperandId objId
,
4215 Int32OperandId indexId
) {
4216 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4217 AutoOutputRegister
output(*this);
4218 Register obj
= allocator
.useRegister(masm
, objId
);
4219 Register index
= allocator
.useRegister(masm
, indexId
);
4220 AutoScratchRegister
scratch1(allocator
, masm
);
4221 AutoScratchRegisterMaybeOutput
scratch2(allocator
, masm
, output
);
4223 FailurePath
* failure
;
4224 if (!addFailurePath(&failure
)) {
4228 // Load obj->elements.
4229 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch1
);
4232 Address
initLength(scratch1
, ObjectElements::offsetOfInitializedLength());
4233 masm
.spectreBoundsCheck32(index
, initLength
, scratch2
, failure
->label());
4236 BaseObjectElementIndex
element(scratch1
, index
);
4237 masm
.branchTestMagic(Assembler::Equal
, element
, failure
->label());
4238 masm
.loadTypedOrValue(element
, output
);
4242 bool CacheIRCompiler::emitGuardInt32IsNonNegative(Int32OperandId indexId
) {
4243 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4244 Register index
= allocator
.useRegister(masm
, indexId
);
4246 FailurePath
* failure
;
4247 if (!addFailurePath(&failure
)) {
4251 masm
.branch32(Assembler::LessThan
, index
, Imm32(0), failure
->label());
4255 bool CacheIRCompiler::emitGuardIndexIsNotDenseElement(ObjOperandId objId
,
4256 Int32OperandId indexId
) {
4257 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4258 Register obj
= allocator
.useRegister(masm
, objId
);
4259 Register index
= allocator
.useRegister(masm
, indexId
);
4260 AutoScratchRegister
scratch(allocator
, masm
);
4261 AutoSpectreBoundsScratchRegister
spectreScratch(allocator
, masm
);
4263 FailurePath
* failure
;
4264 if (!addFailurePath(&failure
)) {
4268 // Load obj->elements.
4269 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
4271 // Ensure index >= initLength or the element is a hole.
4273 Address
capacity(scratch
, ObjectElements::offsetOfInitializedLength());
4274 masm
.spectreBoundsCheck32(index
, capacity
, spectreScratch
, ¬Dense
);
4276 BaseValueIndex
element(scratch
, index
);
4277 masm
.branchTestMagic(Assembler::Equal
, element
, ¬Dense
);
4279 masm
.jump(failure
->label());
4281 masm
.bind(¬Dense
);
4285 bool CacheIRCompiler::emitGuardIndexIsValidUpdateOrAdd(ObjOperandId objId
,
4286 Int32OperandId indexId
) {
4287 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4288 Register obj
= allocator
.useRegister(masm
, objId
);
4289 Register index
= allocator
.useRegister(masm
, indexId
);
4290 AutoScratchRegister
scratch(allocator
, masm
);
4291 AutoSpectreBoundsScratchRegister
spectreScratch(allocator
, masm
);
4293 FailurePath
* failure
;
4294 if (!addFailurePath(&failure
)) {
4298 // Load obj->elements.
4299 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
4303 // If length is writable, branch to &success. All indices are writable.
4304 Address
flags(scratch
, ObjectElements::offsetOfFlags());
4305 masm
.branchTest32(Assembler::Zero
, flags
,
4306 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH
),
4309 // Otherwise, ensure index is in bounds.
4310 Address
length(scratch
, ObjectElements::offsetOfLength());
4311 masm
.spectreBoundsCheck32(index
, length
, spectreScratch
,
4312 /* failure = */ failure
->label());
4313 masm
.bind(&success
);
4317 bool CacheIRCompiler::emitGuardTagNotEqual(ValueTagOperandId lhsId
,
4318 ValueTagOperandId rhsId
) {
4319 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4320 Register lhs
= allocator
.useRegister(masm
, lhsId
);
4321 Register rhs
= allocator
.useRegister(masm
, rhsId
);
4323 FailurePath
* failure
;
4324 if (!addFailurePath(&failure
)) {
4329 masm
.branch32(Assembler::Equal
, lhs
, rhs
, failure
->label());
4331 // If both lhs and rhs are numbers, can't use tag comparison to do inequality
4333 masm
.branchTestNumber(Assembler::NotEqual
, lhs
, &done
);
4334 masm
.branchTestNumber(Assembler::NotEqual
, rhs
, &done
);
4335 masm
.jump(failure
->label());
4341 bool CacheIRCompiler::emitGuardXrayExpandoShapeAndDefaultProto(
4342 ObjOperandId objId
, uint32_t shapeWrapperOffset
) {
4343 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4345 Register obj
= allocator
.useRegister(masm
, objId
);
4346 StubFieldOffset
shapeWrapper(shapeWrapperOffset
, StubField::Type::JSObject
);
4348 AutoScratchRegister
scratch(allocator
, masm
);
4349 AutoScratchRegister
scratch2(allocator
, masm
);
4350 AutoScratchRegister
scratch3(allocator
, masm
);
4352 FailurePath
* failure
;
4353 if (!addFailurePath(&failure
)) {
4357 masm
.loadPtr(Address(obj
, ProxyObject::offsetOfReservedSlots()), scratch
);
4358 Address
holderAddress(scratch
,
4359 sizeof(Value
) * GetXrayJitInfo()->xrayHolderSlot
);
4360 Address
expandoAddress(scratch
, NativeObject::getFixedSlotOffset(
4361 GetXrayJitInfo()->holderExpandoSlot
));
4363 masm
.fallibleUnboxObject(holderAddress
, scratch
, failure
->label());
4364 masm
.fallibleUnboxObject(expandoAddress
, scratch
, failure
->label());
4366 // Unwrap the expando before checking its shape.
4367 masm
.loadPtr(Address(scratch
, ProxyObject::offsetOfReservedSlots()), scratch
);
4369 Address(scratch
, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
4372 emitLoadStubField(shapeWrapper
, scratch2
);
4373 LoadShapeWrapperContents(masm
, scratch2
, scratch2
, failure
->label());
4374 masm
.branchTestObjShape(Assembler::NotEqual
, scratch
, scratch2
, scratch3
,
4375 scratch
, failure
->label());
4377 // The reserved slots on the expando should all be in fixed slots.
4378 Address
protoAddress(scratch
, NativeObject::getFixedSlotOffset(
4379 GetXrayJitInfo()->expandoProtoSlot
));
4380 masm
.branchTestUndefined(Assembler::NotEqual
, protoAddress
, failure
->label());
4385 bool CacheIRCompiler::emitGuardXrayNoExpando(ObjOperandId objId
) {
4386 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4388 Register obj
= allocator
.useRegister(masm
, objId
);
4389 AutoScratchRegister
scratch(allocator
, masm
);
4391 FailurePath
* failure
;
4392 if (!addFailurePath(&failure
)) {
4396 masm
.loadPtr(Address(obj
, ProxyObject::offsetOfReservedSlots()), scratch
);
4397 Address
holderAddress(scratch
,
4398 sizeof(Value
) * GetXrayJitInfo()->xrayHolderSlot
);
4399 Address
expandoAddress(scratch
, NativeObject::getFixedSlotOffset(
4400 GetXrayJitInfo()->holderExpandoSlot
));
4403 masm
.fallibleUnboxObject(holderAddress
, scratch
, &done
);
4404 masm
.branchTestObject(Assembler::Equal
, expandoAddress
, failure
->label());
4410 bool CacheIRCompiler::emitGuardNoAllocationMetadataBuilder(
4411 uint32_t builderAddrOffset
) {
4412 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4413 AutoScratchRegister
scratch(allocator
, masm
);
4415 FailurePath
* failure
;
4416 if (!addFailurePath(&failure
)) {
4420 StubFieldOffset
builderField(builderAddrOffset
, StubField::Type::RawPointer
);
4421 emitLoadStubField(builderField
, scratch
);
4422 masm
.branchPtr(Assembler::NotEqual
, Address(scratch
, 0), ImmWord(0),
4428 bool CacheIRCompiler::emitGuardFunctionHasJitEntry(ObjOperandId funId
,
4429 bool constructing
) {
4430 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4431 Register fun
= allocator
.useRegister(masm
, funId
);
4433 FailurePath
* failure
;
4434 if (!addFailurePath(&failure
)) {
4438 masm
.branchIfFunctionHasNoJitEntry(fun
, constructing
, failure
->label());
4442 bool CacheIRCompiler::emitGuardFunctionHasNoJitEntry(ObjOperandId funId
) {
4443 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4444 Register obj
= allocator
.useRegister(masm
, funId
);
4445 AutoScratchRegister
scratch(allocator
, masm
);
4447 FailurePath
* failure
;
4448 if (!addFailurePath(&failure
)) {
4452 masm
.branchIfFunctionHasJitEntry(obj
, /*isConstructing =*/false,
4457 bool CacheIRCompiler::emitGuardFunctionIsNonBuiltinCtor(ObjOperandId funId
) {
4458 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4460 Register fun
= allocator
.useRegister(masm
, funId
);
4461 AutoScratchRegister
scratch(allocator
, masm
);
4463 FailurePath
* failure
;
4464 if (!addFailurePath(&failure
)) {
4468 masm
.branchIfNotFunctionIsNonBuiltinCtor(fun
, scratch
, failure
->label());
4472 bool CacheIRCompiler::emitGuardFunctionIsConstructor(ObjOperandId funId
) {
4473 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4474 Register funcReg
= allocator
.useRegister(masm
, funId
);
4475 AutoScratchRegister
scratch(allocator
, masm
);
4477 FailurePath
* failure
;
4478 if (!addFailurePath(&failure
)) {
4482 // Ensure obj is a constructor
4483 masm
.branchTestFunctionFlags(funcReg
, FunctionFlags::CONSTRUCTOR
,
4484 Assembler::Zero
, failure
->label());
4488 bool CacheIRCompiler::emitGuardNotClassConstructor(ObjOperandId funId
) {
4489 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4490 Register fun
= allocator
.useRegister(masm
, funId
);
4491 AutoScratchRegister
scratch(allocator
, masm
);
4493 FailurePath
* failure
;
4494 if (!addFailurePath(&failure
)) {
4498 masm
.branchFunctionKind(Assembler::Equal
, FunctionFlags::ClassConstructor
,
4499 fun
, scratch
, failure
->label());
4503 bool CacheIRCompiler::emitGuardArrayIsPacked(ObjOperandId arrayId
) {
4504 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4505 Register array
= allocator
.useRegister(masm
, arrayId
);
4506 AutoScratchRegister
scratch(allocator
, masm
);
4507 AutoScratchRegister
scratch2(allocator
, masm
);
4509 FailurePath
* failure
;
4510 if (!addFailurePath(&failure
)) {
4514 masm
.branchArrayIsNotPacked(array
, scratch
, scratch2
, failure
->label());
4518 bool CacheIRCompiler::emitGuardArgumentsObjectFlags(ObjOperandId objId
,
4520 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4521 Register obj
= allocator
.useRegister(masm
, objId
);
4522 AutoScratchRegister
scratch(allocator
, masm
);
4524 FailurePath
* failure
;
4525 if (!addFailurePath(&failure
)) {
4529 masm
.branchTestArgumentsObjectFlags(obj
, scratch
, flags
, Assembler::NonZero
,
4534 bool CacheIRCompiler::emitLoadDenseElementHoleResult(ObjOperandId objId
,
4535 Int32OperandId indexId
) {
4536 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4537 AutoOutputRegister
output(*this);
4538 Register obj
= allocator
.useRegister(masm
, objId
);
4539 Register index
= allocator
.useRegister(masm
, indexId
);
4540 AutoScratchRegister
scratch1(allocator
, masm
);
4541 AutoScratchRegisterMaybeOutput
scratch2(allocator
, masm
, output
);
4543 FailurePath
* failure
;
4544 if (!addFailurePath(&failure
)) {
4548 // Make sure the index is nonnegative.
4549 masm
.branch32(Assembler::LessThan
, index
, Imm32(0), failure
->label());
4551 // Load obj->elements.
4552 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch1
);
4554 // Guard on the initialized length.
4556 Address
initLength(scratch1
, ObjectElements::offsetOfInitializedLength());
4557 masm
.spectreBoundsCheck32(index
, initLength
, scratch2
, &hole
);
4561 masm
.loadValue(BaseObjectElementIndex(scratch1
, index
), output
.valueReg());
4562 masm
.branchTestMagic(Assembler::NotEqual
, output
.valueReg(), &done
);
4564 // Load undefined for the hole.
4566 masm
.moveValue(UndefinedValue(), output
.valueReg());
4572 bool CacheIRCompiler::emitLoadTypedArrayElementExistsResult(
4573 ObjOperandId objId
, IntPtrOperandId indexId
) {
4574 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4575 AutoOutputRegister
output(*this);
4576 Register obj
= allocator
.useRegister(masm
, objId
);
4577 Register index
= allocator
.useRegister(masm
, indexId
);
4578 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4580 Label outOfBounds
, done
;
4583 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
4584 masm
.branchPtr(Assembler::BelowOrEqual
, scratch
, index
, &outOfBounds
);
4585 EmitStoreBoolean(masm
, true, output
);
4588 masm
.bind(&outOfBounds
);
4589 EmitStoreBoolean(masm
, false, output
);
4595 bool CacheIRCompiler::emitLoadDenseElementExistsResult(ObjOperandId objId
,
4596 Int32OperandId indexId
) {
4597 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4598 AutoOutputRegister
output(*this);
4599 Register obj
= allocator
.useRegister(masm
, objId
);
4600 Register index
= allocator
.useRegister(masm
, indexId
);
4601 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4603 FailurePath
* failure
;
4604 if (!addFailurePath(&failure
)) {
4608 // Load obj->elements.
4609 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
4611 // Bounds check. Unsigned compare sends negative indices to next IC.
4612 Address
initLength(scratch
, ObjectElements::offsetOfInitializedLength());
4613 masm
.branch32(Assembler::BelowOrEqual
, initLength
, index
, failure
->label());
4616 BaseObjectElementIndex
element(scratch
, index
);
4617 masm
.branchTestMagic(Assembler::Equal
, element
, failure
->label());
4619 EmitStoreBoolean(masm
, true, output
);
4623 bool CacheIRCompiler::emitLoadDenseElementHoleExistsResult(
4624 ObjOperandId objId
, Int32OperandId indexId
) {
4625 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4626 AutoOutputRegister
output(*this);
4627 Register obj
= allocator
.useRegister(masm
, objId
);
4628 Register index
= allocator
.useRegister(masm
, indexId
);
4629 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4631 FailurePath
* failure
;
4632 if (!addFailurePath(&failure
)) {
4636 // Make sure the index is nonnegative.
4637 masm
.branch32(Assembler::LessThan
, index
, Imm32(0), failure
->label());
4639 // Load obj->elements.
4640 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
4642 // Guard on the initialized length.
4644 Address
initLength(scratch
, ObjectElements::offsetOfInitializedLength());
4645 masm
.branch32(Assembler::BelowOrEqual
, initLength
, index
, &hole
);
4647 // Load value and replace with true.
4649 BaseObjectElementIndex
element(scratch
, index
);
4650 masm
.branchTestMagic(Assembler::Equal
, element
, &hole
);
4651 EmitStoreBoolean(masm
, true, output
);
4654 // Load false for the hole.
4656 EmitStoreBoolean(masm
, false, output
);
4662 bool CacheIRCompiler::emitPackedArrayPopResult(ObjOperandId arrayId
) {
4663 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4665 AutoOutputRegister
output(*this);
4666 Register array
= allocator
.useRegister(masm
, arrayId
);
4667 AutoScratchRegister
scratch1(allocator
, masm
);
4668 AutoScratchRegister
scratch2(allocator
, masm
);
4670 FailurePath
* failure
;
4671 if (!addFailurePath(&failure
)) {
4675 masm
.packedArrayPop(array
, output
.valueReg(), scratch1
, scratch2
,
4680 bool CacheIRCompiler::emitPackedArrayShiftResult(ObjOperandId arrayId
) {
4681 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4683 AutoOutputRegister
output(*this);
4684 Register array
= allocator
.useRegister(masm
, arrayId
);
4685 AutoScratchRegister
scratch1(allocator
, masm
);
4686 AutoScratchRegister
scratch2(allocator
, masm
);
4688 FailurePath
* failure
;
4689 if (!addFailurePath(&failure
)) {
4693 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
4694 liveVolatileFloatRegs());
4695 masm
.packedArrayShift(array
, output
.valueReg(), scratch1
, scratch2
,
4696 volatileRegs
, failure
->label());
4700 bool CacheIRCompiler::emitIsObjectResult(ValOperandId inputId
) {
4701 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4703 AutoOutputRegister
output(*this);
4704 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4706 ValueOperand val
= allocator
.useValueRegister(masm
, inputId
);
4708 masm
.testObjectSet(Assembler::Equal
, val
, scratch
);
4710 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
4714 bool CacheIRCompiler::emitIsPackedArrayResult(ObjOperandId objId
) {
4715 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4717 AutoOutputRegister
output(*this);
4718 Register obj
= allocator
.useRegister(masm
, objId
);
4719 AutoScratchRegister
scratch(allocator
, masm
);
4721 Register outputScratch
= output
.valueReg().scratchReg();
4722 masm
.setIsPackedArray(obj
, outputScratch
, scratch
);
4723 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, outputScratch
, output
.valueReg());
4727 bool CacheIRCompiler::emitIsCallableResult(ValOperandId inputId
) {
4728 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4730 AutoOutputRegister
output(*this);
4731 AutoScratchRegister
scratch1(allocator
, masm
);
4732 AutoScratchRegisterMaybeOutput
scratch2(allocator
, masm
, output
);
4734 ValueOperand val
= allocator
.useValueRegister(masm
, inputId
);
4736 Label isObject
, done
;
4737 masm
.branchTestObject(Assembler::Equal
, val
, &isObject
);
4738 // Primitives are never callable.
4739 masm
.move32(Imm32(0), scratch2
);
4742 masm
.bind(&isObject
);
4743 masm
.unboxObject(val
, scratch1
);
4746 masm
.isCallable(scratch1
, scratch2
, &isProxy
);
4749 masm
.bind(&isProxy
);
4751 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
4752 liveVolatileFloatRegs());
4753 masm
.PushRegsInMask(volatileRegs
);
4755 using Fn
= bool (*)(JSObject
* obj
);
4756 masm
.setupUnalignedABICall(scratch2
);
4757 masm
.passABIArg(scratch1
);
4758 masm
.callWithABI
<Fn
, ObjectIsCallable
>();
4759 masm
.storeCallBoolResult(scratch2
);
4761 LiveRegisterSet ignore
;
4762 ignore
.add(scratch2
);
4763 masm
.PopRegsInMaskIgnore(volatileRegs
, ignore
);
4767 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
4771 bool CacheIRCompiler::emitIsConstructorResult(ObjOperandId objId
) {
4772 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4774 AutoOutputRegister
output(*this);
4775 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4777 Register obj
= allocator
.useRegister(masm
, objId
);
4779 Label isProxy
, done
;
4780 masm
.isConstructor(obj
, scratch
, &isProxy
);
4783 masm
.bind(&isProxy
);
4785 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
4786 liveVolatileFloatRegs());
4787 masm
.PushRegsInMask(volatileRegs
);
4789 using Fn
= bool (*)(JSObject
* obj
);
4790 masm
.setupUnalignedABICall(scratch
);
4791 masm
.passABIArg(obj
);
4792 masm
.callWithABI
<Fn
, ObjectIsConstructor
>();
4793 masm
.storeCallBoolResult(scratch
);
4795 LiveRegisterSet ignore
;
4796 ignore
.add(scratch
);
4797 masm
.PopRegsInMaskIgnore(volatileRegs
, ignore
);
4801 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
4805 bool CacheIRCompiler::emitIsCrossRealmArrayConstructorResult(
4806 ObjOperandId objId
) {
4807 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4809 AutoOutputRegister
output(*this);
4810 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4811 Register obj
= allocator
.useRegister(masm
, objId
);
4813 masm
.setIsCrossRealmArrayConstructor(obj
, scratch
);
4814 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
4818 bool CacheIRCompiler::emitArrayBufferViewByteOffsetInt32Result(
4819 ObjOperandId objId
) {
4820 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4822 AutoOutputRegister
output(*this);
4823 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4824 Register obj
= allocator
.useRegister(masm
, objId
);
4826 FailurePath
* failure
;
4827 if (!addFailurePath(&failure
)) {
4831 masm
.loadArrayBufferViewByteOffsetIntPtr(obj
, scratch
);
4832 masm
.guardNonNegativeIntPtrToInt32(scratch
, failure
->label());
4833 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
4837 bool CacheIRCompiler::emitArrayBufferViewByteOffsetDoubleResult(
4838 ObjOperandId objId
) {
4839 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4841 AutoOutputRegister
output(*this);
4842 Register obj
= allocator
.useRegister(masm
, objId
);
4843 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4845 ScratchDoubleScope
fpscratch(masm
);
4846 masm
.loadArrayBufferViewByteOffsetIntPtr(obj
, scratch
);
4847 masm
.convertIntPtrToDouble(scratch
, fpscratch
);
4848 masm
.boxDouble(fpscratch
, output
.valueReg(), fpscratch
);
4852 bool CacheIRCompiler::emitTypedArrayByteLengthInt32Result(ObjOperandId objId
) {
4853 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4855 AutoOutputRegister
output(*this);
4856 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
4857 AutoScratchRegister
scratch2(allocator
, masm
);
4858 Register obj
= allocator
.useRegister(masm
, objId
);
4860 FailurePath
* failure
;
4861 if (!addFailurePath(&failure
)) {
4865 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch1
);
4866 masm
.guardNonNegativeIntPtrToInt32(scratch1
, failure
->label());
4867 masm
.typedArrayElementSize(obj
, scratch2
);
4869 masm
.branchMul32(Assembler::Overflow
, scratch2
.get(), scratch1
,
4872 masm
.tagValue(JSVAL_TYPE_INT32
, scratch1
, output
.valueReg());
4876 bool CacheIRCompiler::emitTypedArrayByteLengthDoubleResult(ObjOperandId objId
) {
4877 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4879 AutoOutputRegister
output(*this);
4880 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
4881 AutoScratchRegister
scratch2(allocator
, masm
);
4882 Register obj
= allocator
.useRegister(masm
, objId
);
4884 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch1
);
4885 masm
.typedArrayElementSize(obj
, scratch2
);
4886 masm
.mulPtr(scratch2
, scratch1
);
4888 ScratchDoubleScope
fpscratch(masm
);
4889 masm
.convertIntPtrToDouble(scratch1
, fpscratch
);
4890 masm
.boxDouble(fpscratch
, output
.valueReg(), fpscratch
);
4894 bool CacheIRCompiler::emitTypedArrayElementSizeResult(ObjOperandId objId
) {
4895 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4897 AutoOutputRegister
output(*this);
4898 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4899 Register obj
= allocator
.useRegister(masm
, objId
);
4901 masm
.typedArrayElementSize(obj
, scratch
);
4902 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
4906 bool CacheIRCompiler::emitGuardHasAttachedArrayBuffer(ObjOperandId objId
) {
4907 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4909 AutoScratchRegister
scratch(allocator
, masm
);
4910 Register obj
= allocator
.useRegister(masm
, objId
);
4912 FailurePath
* failure
;
4913 if (!addFailurePath(&failure
)) {
4917 masm
.branchIfHasDetachedArrayBuffer(obj
, scratch
, failure
->label());
4921 bool CacheIRCompiler::emitIsTypedArrayConstructorResult(ObjOperandId objId
) {
4922 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4924 AutoOutputRegister
output(*this);
4925 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4926 Register obj
= allocator
.useRegister(masm
, objId
);
4928 masm
.setIsDefinitelyTypedArrayConstructor(obj
, scratch
);
4929 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
4933 bool CacheIRCompiler::emitGetNextMapSetEntryForIteratorResult(
4934 ObjOperandId iterId
, ObjOperandId resultArrId
, bool isMap
) {
4935 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4937 AutoOutputRegister
output(*this);
4938 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4939 Register iter
= allocator
.useRegister(masm
, iterId
);
4940 Register resultArr
= allocator
.useRegister(masm
, resultArrId
);
4942 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
4943 save
.takeUnchecked(output
.valueReg());
4944 save
.takeUnchecked(scratch
);
4945 masm
.PushRegsInMask(save
);
4947 masm
.setupUnalignedABICall(scratch
);
4948 masm
.passABIArg(iter
);
4949 masm
.passABIArg(resultArr
);
4951 using Fn
= bool (*)(MapIteratorObject
* iter
, ArrayObject
* resultPairObj
);
4952 masm
.callWithABI
<Fn
, MapIteratorObject::next
>();
4954 using Fn
= bool (*)(SetIteratorObject
* iter
, ArrayObject
* resultObj
);
4955 masm
.callWithABI
<Fn
, SetIteratorObject::next
>();
4957 masm
.storeCallBoolResult(scratch
);
4959 masm
.PopRegsInMask(save
);
4961 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
4965 void CacheIRCompiler::emitActivateIterator(Register objBeingIterated
,
4966 Register iterObject
,
4967 Register nativeIter
,
4968 Register scratch
, Register scratch2
,
4969 uint32_t enumeratorsAddrOffset
) {
4970 // 'objectBeingIterated_' must be nullptr, so we don't need a pre-barrier.
4971 Address
iterObjAddr(nativeIter
,
4972 NativeIterator::offsetOfObjectBeingIterated());
4975 masm
.branchPtr(Assembler::Equal
, iterObjAddr
, ImmPtr(nullptr), &ok
);
4976 masm
.assumeUnreachable("iterator with non-null object");
4980 // Mark iterator as active.
4981 Address
iterFlagsAddr(nativeIter
, NativeIterator::offsetOfFlagsAndCount());
4982 masm
.storePtr(objBeingIterated
, iterObjAddr
);
4983 masm
.or32(Imm32(NativeIterator::Flags::Active
), iterFlagsAddr
);
4985 // Post-write barrier for stores to 'objectBeingIterated_'.
4986 emitPostBarrierSlot(
4988 TypedOrValueRegister(MIRType::Object
, AnyRegister(objBeingIterated
)),
4991 // Chain onto the active iterator stack.
4992 StubFieldOffset
enumeratorsAddr(enumeratorsAddrOffset
,
4993 StubField::Type::RawPointer
);
4994 emitLoadStubField(enumeratorsAddr
, scratch
);
4995 masm
.registerIterator(scratch
, nativeIter
, scratch2
);
4998 bool CacheIRCompiler::emitObjectToIteratorResult(
4999 ObjOperandId objId
, uint32_t enumeratorsAddrOffset
) {
5000 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5002 AutoCallVM
callvm(masm
, this, allocator
);
5003 Register obj
= allocator
.useRegister(masm
, objId
);
5005 AutoScratchRegister
iterObj(allocator
, masm
);
5006 AutoScratchRegister
scratch(allocator
, masm
);
5007 AutoScratchRegisterMaybeOutput
scratch2(allocator
, masm
, callvm
.output());
5008 AutoScratchRegisterMaybeOutputType
scratch3(allocator
, masm
, callvm
.output());
5011 masm
.maybeLoadIteratorFromShape(obj
, iterObj
, scratch
, scratch2
, scratch3
,
5015 Address(iterObj
, PropertyIteratorObject::offsetOfIteratorSlot()),
5018 emitActivateIterator(obj
, iterObj
, scratch
, scratch2
, scratch3
,
5019 enumeratorsAddrOffset
);
5025 using Fn
= PropertyIteratorObject
* (*)(JSContext
*, HandleObject
);
5026 callvm
.call
<Fn
, GetIterator
>();
5027 masm
.storeCallPointerResult(iterObj
);
5030 EmitStoreResult(masm
, iterObj
, JSVAL_TYPE_OBJECT
, callvm
.output());
5034 bool CacheIRCompiler::emitValueToIteratorResult(ValOperandId valId
) {
5035 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5037 AutoCallVM
callvm(masm
, this, allocator
);
5039 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
5045 using Fn
= PropertyIteratorObject
* (*)(JSContext
*, HandleValue
);
5046 callvm
.call
<Fn
, ValueToIterator
>();
5050 bool CacheIRCompiler::emitNewArrayIteratorResult(
5051 uint32_t templateObjectOffset
) {
5052 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5054 AutoCallVM
callvm(masm
, this, allocator
);
5058 using Fn
= ArrayIteratorObject
* (*)(JSContext
*);
5059 callvm
.call
<Fn
, NewArrayIterator
>();
5063 bool CacheIRCompiler::emitNewStringIteratorResult(
5064 uint32_t templateObjectOffset
) {
5065 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5067 AutoCallVM
callvm(masm
, this, allocator
);
5071 using Fn
= StringIteratorObject
* (*)(JSContext
*);
5072 callvm
.call
<Fn
, NewStringIterator
>();
5076 bool CacheIRCompiler::emitNewRegExpStringIteratorResult(
5077 uint32_t templateObjectOffset
) {
5078 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5080 AutoCallVM
callvm(masm
, this, allocator
);
5084 using Fn
= RegExpStringIteratorObject
* (*)(JSContext
*);
5085 callvm
.call
<Fn
, NewRegExpStringIterator
>();
5089 bool CacheIRCompiler::emitObjectCreateResult(uint32_t templateObjectOffset
) {
5090 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5092 AutoCallVM
callvm(masm
, this, allocator
);
5093 AutoScratchRegister
scratch(allocator
, masm
);
5095 StubFieldOffset
objectField(templateObjectOffset
, StubField::Type::JSObject
);
5096 emitLoadStubField(objectField
, scratch
);
5101 using Fn
= PlainObject
* (*)(JSContext
*, Handle
<PlainObject
*>);
5102 callvm
.call
<Fn
, ObjectCreateWithTemplate
>();
5106 bool CacheIRCompiler::emitObjectKeysResult(ObjOperandId objId
) {
5107 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5109 AutoCallVM
callvm(masm
, this, allocator
);
5110 Register obj
= allocator
.useRegister(masm
, objId
);
5112 // Our goal is only to record calls to Object.keys, to elide it when
5113 // partially used, not to provide an alternative implementation.
5118 using Fn
= JSObject
* (*)(JSContext
*, HandleObject
);
5119 callvm
.call
<Fn
, jit::ObjectKeys
>();
5125 bool CacheIRCompiler::emitNewArrayFromLengthResult(
5126 uint32_t templateObjectOffset
, Int32OperandId lengthId
) {
5127 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5129 AutoCallVM
callvm(masm
, this, allocator
);
5130 AutoScratchRegister
scratch(allocator
, masm
);
5131 Register length
= allocator
.useRegister(masm
, lengthId
);
5133 StubFieldOffset
objectField(templateObjectOffset
, StubField::Type::JSObject
);
5134 emitLoadStubField(objectField
, scratch
);
5140 using Fn
= ArrayObject
* (*)(JSContext
*, Handle
<ArrayObject
*>, int32_t length
);
5141 callvm
.call
<Fn
, ArrayConstructorOneArg
>();
5145 bool CacheIRCompiler::emitNewTypedArrayFromLengthResult(
5146 uint32_t templateObjectOffset
, Int32OperandId lengthId
) {
5147 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5149 AutoCallVM
callvm(masm
, this, allocator
);
5150 AutoScratchRegister
scratch(allocator
, masm
);
5151 Register length
= allocator
.useRegister(masm
, lengthId
);
5153 StubFieldOffset
objectField(templateObjectOffset
, StubField::Type::JSObject
);
5154 emitLoadStubField(objectField
, scratch
);
5160 using Fn
= TypedArrayObject
* (*)(JSContext
*, HandleObject
, int32_t length
);
5161 callvm
.call
<Fn
, NewTypedArrayWithTemplateAndLength
>();
5165 bool CacheIRCompiler::emitNewTypedArrayFromArrayBufferResult(
5166 uint32_t templateObjectOffset
, ObjOperandId bufferId
,
5167 ValOperandId byteOffsetId
, ValOperandId lengthId
) {
5168 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5170 #ifdef JS_CODEGEN_X86
5171 MOZ_CRASH("Instruction not supported on 32-bit x86, not enough registers");
5174 AutoCallVM
callvm(masm
, this, allocator
);
5175 AutoScratchRegister
scratch(allocator
, masm
);
5176 Register buffer
= allocator
.useRegister(masm
, bufferId
);
5177 ValueOperand byteOffset
= allocator
.useValueRegister(masm
, byteOffsetId
);
5178 ValueOperand length
= allocator
.useValueRegister(masm
, lengthId
);
5180 StubFieldOffset
objectField(templateObjectOffset
, StubField::Type::JSObject
);
5181 emitLoadStubField(objectField
, scratch
);
5185 masm
.Push(byteOffset
);
5189 using Fn
= TypedArrayObject
* (*)(JSContext
*, HandleObject
, HandleObject
,
5190 HandleValue
, HandleValue
);
5191 callvm
.call
<Fn
, NewTypedArrayWithTemplateAndBuffer
>();
5195 bool CacheIRCompiler::emitNewTypedArrayFromArrayResult(
5196 uint32_t templateObjectOffset
, ObjOperandId arrayId
) {
5197 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5199 AutoCallVM
callvm(masm
, this, allocator
);
5200 AutoScratchRegister
scratch(allocator
, masm
);
5201 Register array
= allocator
.useRegister(masm
, arrayId
);
5203 StubFieldOffset
objectField(templateObjectOffset
, StubField::Type::JSObject
);
5204 emitLoadStubField(objectField
, scratch
);
5210 using Fn
= TypedArrayObject
* (*)(JSContext
*, HandleObject
, HandleObject
);
5211 callvm
.call
<Fn
, NewTypedArrayWithTemplateAndArray
>();
5215 bool CacheIRCompiler::emitAddSlotAndCallAddPropHook(ObjOperandId objId
,
5217 uint32_t newShapeOffset
) {
5218 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5220 AutoCallVM
callvm(masm
, this, allocator
);
5222 AutoScratchRegister
scratch(allocator
, masm
);
5223 Register obj
= allocator
.useRegister(masm
, objId
);
5224 ValueOperand rhs
= allocator
.useValueRegister(masm
, rhsId
);
5226 StubFieldOffset
shapeField(newShapeOffset
, StubField::Type::Shape
);
5227 emitLoadStubField(shapeField
, scratch
);
5236 bool (*)(JSContext
*, Handle
<NativeObject
*>, HandleValue
, Handle
<Shape
*>);
5237 callvm
.callNoResult
<Fn
, AddSlotAndCallAddPropHook
>();
5241 bool CacheIRCompiler::emitMathAbsInt32Result(Int32OperandId inputId
) {
5242 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5244 AutoOutputRegister
output(*this);
5245 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5247 Register input
= allocator
.useRegister(masm
, inputId
);
5249 FailurePath
* failure
;
5250 if (!addFailurePath(&failure
)) {
5254 masm
.mov(input
, scratch
);
5255 // Don't negate already positive values.
5257 masm
.branchTest32(Assembler::NotSigned
, scratch
, scratch
, &positive
);
5258 // neg32 might overflow for INT_MIN.
5259 masm
.branchNeg32(Assembler::Overflow
, scratch
, failure
->label());
5260 masm
.bind(&positive
);
5262 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5266 bool CacheIRCompiler::emitMathAbsNumberResult(NumberOperandId inputId
) {
5267 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5269 AutoOutputRegister
output(*this);
5270 AutoAvailableFloatRegister
scratch(*this, FloatReg0
);
5272 allocator
.ensureDoubleRegister(masm
, inputId
, scratch
);
5274 masm
.absDouble(scratch
, scratch
);
5275 masm
.boxDouble(scratch
, output
.valueReg(), scratch
);
5279 bool CacheIRCompiler::emitMathClz32Result(Int32OperandId inputId
) {
5280 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5282 AutoOutputRegister
output(*this);
5283 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5284 Register input
= allocator
.useRegister(masm
, inputId
);
5286 masm
.clz32(input
, scratch
, /* knownNotZero = */ false);
5287 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5291 bool CacheIRCompiler::emitMathSignInt32Result(Int32OperandId inputId
) {
5292 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5294 AutoOutputRegister
output(*this);
5295 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5296 Register input
= allocator
.useRegister(masm
, inputId
);
5298 masm
.signInt32(input
, scratch
);
5299 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5303 bool CacheIRCompiler::emitMathSignNumberResult(NumberOperandId inputId
) {
5304 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5306 AutoOutputRegister
output(*this);
5307 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg0
);
5308 AutoAvailableFloatRegister
floatScratch2(*this, FloatReg1
);
5310 allocator
.ensureDoubleRegister(masm
, inputId
, floatScratch1
);
5312 masm
.signDouble(floatScratch1
, floatScratch2
);
5313 masm
.boxDouble(floatScratch2
, output
.valueReg(), floatScratch2
);
5317 bool CacheIRCompiler::emitMathSignNumberToInt32Result(NumberOperandId inputId
) {
5318 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5320 AutoOutputRegister
output(*this);
5321 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5322 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg0
);
5323 AutoAvailableFloatRegister
floatScratch2(*this, FloatReg1
);
5325 FailurePath
* failure
;
5326 if (!addFailurePath(&failure
)) {
5330 allocator
.ensureDoubleRegister(masm
, inputId
, floatScratch1
);
5332 masm
.signDoubleToInt32(floatScratch1
, scratch
, floatScratch2
,
5334 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5338 bool CacheIRCompiler::emitMathImulResult(Int32OperandId lhsId
,
5339 Int32OperandId rhsId
) {
5340 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5342 AutoOutputRegister
output(*this);
5343 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5344 Register lhs
= allocator
.useRegister(masm
, lhsId
);
5345 Register rhs
= allocator
.useRegister(masm
, rhsId
);
5347 masm
.mov(lhs
, scratch
);
5348 masm
.mul32(rhs
, scratch
);
5349 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5353 bool CacheIRCompiler::emitMathSqrtNumberResult(NumberOperandId inputId
) {
5354 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5356 AutoOutputRegister
output(*this);
5357 AutoAvailableFloatRegister
scratch(*this, FloatReg0
);
5359 allocator
.ensureDoubleRegister(masm
, inputId
, scratch
);
5361 masm
.sqrtDouble(scratch
, scratch
);
5362 masm
.boxDouble(scratch
, output
.valueReg(), scratch
);
5366 bool CacheIRCompiler::emitMathFloorNumberResult(NumberOperandId inputId
) {
5367 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5369 AutoOutputRegister
output(*this);
5370 AutoAvailableFloatRegister
scratch(*this, FloatReg0
);
5372 allocator
.ensureDoubleRegister(masm
, inputId
, scratch
);
5374 if (Assembler::HasRoundInstruction(RoundingMode::Down
)) {
5375 masm
.nearbyIntDouble(RoundingMode::Down
, scratch
, scratch
);
5376 masm
.boxDouble(scratch
, output
.valueReg(), scratch
);
5380 return emitMathFunctionNumberResultShared(UnaryMathFunction::Floor
, scratch
,
5384 bool CacheIRCompiler::emitMathCeilNumberResult(NumberOperandId inputId
) {
5385 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5387 AutoOutputRegister
output(*this);
5388 AutoAvailableFloatRegister
scratch(*this, FloatReg0
);
5390 allocator
.ensureDoubleRegister(masm
, inputId
, scratch
);
5392 if (Assembler::HasRoundInstruction(RoundingMode::Up
)) {
5393 masm
.nearbyIntDouble(RoundingMode::Up
, scratch
, scratch
);
5394 masm
.boxDouble(scratch
, output
.valueReg(), scratch
);
5398 return emitMathFunctionNumberResultShared(UnaryMathFunction::Ceil
, scratch
,
5402 bool CacheIRCompiler::emitMathTruncNumberResult(NumberOperandId inputId
) {
5403 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5405 AutoOutputRegister
output(*this);
5406 AutoAvailableFloatRegister
scratch(*this, FloatReg0
);
5408 allocator
.ensureDoubleRegister(masm
, inputId
, scratch
);
5410 if (Assembler::HasRoundInstruction(RoundingMode::TowardsZero
)) {
5411 masm
.nearbyIntDouble(RoundingMode::TowardsZero
, scratch
, scratch
);
5412 masm
.boxDouble(scratch
, output
.valueReg(), scratch
);
5416 return emitMathFunctionNumberResultShared(UnaryMathFunction::Trunc
, scratch
,
5420 bool CacheIRCompiler::emitMathFRoundNumberResult(NumberOperandId inputId
) {
5421 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5423 AutoOutputRegister
output(*this);
5424 AutoAvailableFloatRegister
scratch(*this, FloatReg0
);
5425 FloatRegister scratchFloat32
= scratch
.get().asSingle();
5427 allocator
.ensureDoubleRegister(masm
, inputId
, scratch
);
5429 masm
.convertDoubleToFloat32(scratch
, scratchFloat32
);
5430 masm
.convertFloat32ToDouble(scratchFloat32
, scratch
);
5432 masm
.boxDouble(scratch
, output
.valueReg(), scratch
);
5436 bool CacheIRCompiler::emitMathHypot2NumberResult(NumberOperandId first
,
5437 NumberOperandId second
) {
5438 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5439 AutoOutputRegister
output(*this);
5440 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5442 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
5443 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
5445 allocator
.ensureDoubleRegister(masm
, first
, floatScratch0
);
5446 allocator
.ensureDoubleRegister(masm
, second
, floatScratch1
);
5448 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5449 masm
.PushRegsInMask(save
);
5451 using Fn
= double (*)(double x
, double y
);
5452 masm
.setupUnalignedABICall(scratch
);
5453 masm
.passABIArg(floatScratch0
, MoveOp::DOUBLE
);
5454 masm
.passABIArg(floatScratch1
, MoveOp::DOUBLE
);
5456 masm
.callWithABI
<Fn
, ecmaHypot
>(MoveOp::DOUBLE
);
5457 masm
.storeCallFloatResult(floatScratch0
);
5459 LiveRegisterSet ignore
;
5460 ignore
.add(floatScratch0
);
5461 masm
.PopRegsInMaskIgnore(save
, ignore
);
5463 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
5467 bool CacheIRCompiler::emitMathHypot3NumberResult(NumberOperandId first
,
5468 NumberOperandId second
,
5469 NumberOperandId third
) {
5470 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5471 AutoOutputRegister
output(*this);
5472 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5474 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
5475 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
5476 AutoAvailableFloatRegister
floatScratch2(*this, FloatReg2
);
5478 allocator
.ensureDoubleRegister(masm
, first
, floatScratch0
);
5479 allocator
.ensureDoubleRegister(masm
, second
, floatScratch1
);
5480 allocator
.ensureDoubleRegister(masm
, third
, floatScratch2
);
5482 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5483 masm
.PushRegsInMask(save
);
5485 using Fn
= double (*)(double x
, double y
, double z
);
5486 masm
.setupUnalignedABICall(scratch
);
5487 masm
.passABIArg(floatScratch0
, MoveOp::DOUBLE
);
5488 masm
.passABIArg(floatScratch1
, MoveOp::DOUBLE
);
5489 masm
.passABIArg(floatScratch2
, MoveOp::DOUBLE
);
5491 masm
.callWithABI
<Fn
, hypot3
>(MoveOp::DOUBLE
);
5492 masm
.storeCallFloatResult(floatScratch0
);
5494 LiveRegisterSet ignore
;
5495 ignore
.add(floatScratch0
);
5496 masm
.PopRegsInMaskIgnore(save
, ignore
);
5498 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
5502 bool CacheIRCompiler::emitMathHypot4NumberResult(NumberOperandId first
,
5503 NumberOperandId second
,
5504 NumberOperandId third
,
5505 NumberOperandId fourth
) {
5506 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5507 AutoOutputRegister
output(*this);
5508 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5510 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
5511 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
5512 AutoAvailableFloatRegister
floatScratch2(*this, FloatReg2
);
5513 AutoAvailableFloatRegister
floatScratch3(*this, FloatReg3
);
5515 allocator
.ensureDoubleRegister(masm
, first
, floatScratch0
);
5516 allocator
.ensureDoubleRegister(masm
, second
, floatScratch1
);
5517 allocator
.ensureDoubleRegister(masm
, third
, floatScratch2
);
5518 allocator
.ensureDoubleRegister(masm
, fourth
, floatScratch3
);
5520 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5521 masm
.PushRegsInMask(save
);
5523 using Fn
= double (*)(double x
, double y
, double z
, double w
);
5524 masm
.setupUnalignedABICall(scratch
);
5525 masm
.passABIArg(floatScratch0
, MoveOp::DOUBLE
);
5526 masm
.passABIArg(floatScratch1
, MoveOp::DOUBLE
);
5527 masm
.passABIArg(floatScratch2
, MoveOp::DOUBLE
);
5528 masm
.passABIArg(floatScratch3
, MoveOp::DOUBLE
);
5530 masm
.callWithABI
<Fn
, hypot4
>(MoveOp::DOUBLE
);
5531 masm
.storeCallFloatResult(floatScratch0
);
5533 LiveRegisterSet ignore
;
5534 ignore
.add(floatScratch0
);
5535 masm
.PopRegsInMaskIgnore(save
, ignore
);
5537 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
5541 bool CacheIRCompiler::emitMathAtan2NumberResult(NumberOperandId yId
,
5542 NumberOperandId xId
) {
5543 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5544 AutoOutputRegister
output(*this);
5545 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5547 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
5548 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
5550 allocator
.ensureDoubleRegister(masm
, yId
, floatScratch0
);
5551 allocator
.ensureDoubleRegister(masm
, xId
, floatScratch1
);
5553 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5554 masm
.PushRegsInMask(save
);
5556 using Fn
= double (*)(double x
, double y
);
5557 masm
.setupUnalignedABICall(scratch
);
5558 masm
.passABIArg(floatScratch0
, MoveOp::DOUBLE
);
5559 masm
.passABIArg(floatScratch1
, MoveOp::DOUBLE
);
5560 masm
.callWithABI
<Fn
, js::ecmaAtan2
>(MoveOp::DOUBLE
);
5561 masm
.storeCallFloatResult(floatScratch0
);
5563 LiveRegisterSet ignore
;
5564 ignore
.add(floatScratch0
);
5565 masm
.PopRegsInMaskIgnore(save
, ignore
);
5567 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
5572 bool CacheIRCompiler::emitMathFloorToInt32Result(NumberOperandId inputId
) {
5573 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5575 AutoOutputRegister
output(*this);
5576 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5578 AutoAvailableFloatRegister
scratchFloat(*this, FloatReg0
);
5580 FailurePath
* failure
;
5581 if (!addFailurePath(&failure
)) {
5585 allocator
.ensureDoubleRegister(masm
, inputId
, scratchFloat
);
5587 masm
.floorDoubleToInt32(scratchFloat
, scratch
, failure
->label());
5589 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5593 bool CacheIRCompiler::emitMathCeilToInt32Result(NumberOperandId inputId
) {
5594 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5596 AutoOutputRegister
output(*this);
5597 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5599 AutoAvailableFloatRegister
scratchFloat(*this, FloatReg0
);
5601 FailurePath
* failure
;
5602 if (!addFailurePath(&failure
)) {
5606 allocator
.ensureDoubleRegister(masm
, inputId
, scratchFloat
);
5608 masm
.ceilDoubleToInt32(scratchFloat
, scratch
, failure
->label());
5610 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5614 bool CacheIRCompiler::emitMathTruncToInt32Result(NumberOperandId inputId
) {
5615 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5617 AutoOutputRegister
output(*this);
5618 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5620 AutoAvailableFloatRegister
scratchFloat(*this, FloatReg0
);
5622 FailurePath
* failure
;
5623 if (!addFailurePath(&failure
)) {
5627 allocator
.ensureDoubleRegister(masm
, inputId
, scratchFloat
);
5629 masm
.truncDoubleToInt32(scratchFloat
, scratch
, failure
->label());
5631 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5635 bool CacheIRCompiler::emitMathRoundToInt32Result(NumberOperandId inputId
) {
5636 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5638 AutoOutputRegister
output(*this);
5639 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
5641 AutoAvailableFloatRegister
scratchFloat0(*this, FloatReg0
);
5642 AutoAvailableFloatRegister
scratchFloat1(*this, FloatReg1
);
5644 FailurePath
* failure
;
5645 if (!addFailurePath(&failure
)) {
5649 allocator
.ensureDoubleRegister(masm
, inputId
, scratchFloat0
);
5651 masm
.roundDoubleToInt32(scratchFloat0
, scratch
, scratchFloat1
,
5654 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
5658 bool CacheIRCompiler::emitInt32MinMax(bool isMax
, Int32OperandId firstId
,
5659 Int32OperandId secondId
,
5660 Int32OperandId resultId
) {
5661 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5663 Register first
= allocator
.useRegister(masm
, firstId
);
5664 Register second
= allocator
.useRegister(masm
, secondId
);
5665 Register result
= allocator
.defineRegister(masm
, resultId
);
5667 Assembler::Condition cond
=
5668 isMax
? Assembler::GreaterThan
: Assembler::LessThan
;
5669 masm
.move32(first
, result
);
5670 masm
.cmp32Move32(cond
, second
, first
, second
, result
);
5674 bool CacheIRCompiler::emitNumberMinMax(bool isMax
, NumberOperandId firstId
,
5675 NumberOperandId secondId
,
5676 NumberOperandId resultId
) {
5677 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5679 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
5681 AutoAvailableFloatRegister
scratch1(*this, FloatReg0
);
5682 AutoAvailableFloatRegister
scratch2(*this, FloatReg1
);
5684 allocator
.ensureDoubleRegister(masm
, firstId
, scratch1
);
5685 allocator
.ensureDoubleRegister(masm
, secondId
, scratch2
);
5688 masm
.maxDouble(scratch2
, scratch1
, /* handleNaN = */ true);
5690 masm
.minDouble(scratch2
, scratch1
, /* handleNaN = */ true);
5693 masm
.boxDouble(scratch1
, output
, scratch1
);
5697 bool CacheIRCompiler::emitInt32MinMaxArrayResult(ObjOperandId arrayId
,
5699 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5701 AutoOutputRegister
output(*this);
5702 Register array
= allocator
.useRegister(masm
, arrayId
);
5704 AutoScratchRegister
scratch(allocator
, masm
);
5705 AutoScratchRegister
scratch2(allocator
, masm
);
5706 AutoScratchRegisterMaybeOutputType
scratch3(allocator
, masm
, output
);
5707 AutoScratchRegisterMaybeOutput
result(allocator
, masm
, output
);
5709 FailurePath
* failure
;
5710 if (!addFailurePath(&failure
)) {
5714 masm
.minMaxArrayInt32(array
, result
, scratch
, scratch2
, scratch3
, isMax
,
5716 masm
.tagValue(JSVAL_TYPE_INT32
, result
, output
.valueReg());
5720 bool CacheIRCompiler::emitNumberMinMaxArrayResult(ObjOperandId arrayId
,
5722 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5724 AutoOutputRegister
output(*this);
5725 Register array
= allocator
.useRegister(masm
, arrayId
);
5727 AutoAvailableFloatRegister
result(*this, FloatReg0
);
5728 AutoAvailableFloatRegister
floatScratch(*this, FloatReg1
);
5730 AutoScratchRegister
scratch1(allocator
, masm
);
5731 AutoScratchRegister
scratch2(allocator
, masm
);
5733 FailurePath
* failure
;
5734 if (!addFailurePath(&failure
)) {
5738 masm
.minMaxArrayNumber(array
, result
, floatScratch
, scratch1
, scratch2
, isMax
,
5740 masm
.boxDouble(result
, output
.valueReg(), result
);
5744 bool CacheIRCompiler::emitMathFunctionNumberResultShared(
5745 UnaryMathFunction fun
, FloatRegister inputScratch
, ValueOperand output
) {
5746 UnaryMathFunctionType funPtr
= GetUnaryMathFunctionPtr(fun
);
5748 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5749 save
.takeUnchecked(inputScratch
);
5750 masm
.PushRegsInMask(save
);
5752 masm
.setupUnalignedABICall(output
.scratchReg());
5753 masm
.passABIArg(inputScratch
, MoveOp::DOUBLE
);
5754 masm
.callWithABI(DynamicFunction
<UnaryMathFunctionType
>(funPtr
),
5756 masm
.storeCallFloatResult(inputScratch
);
5758 masm
.PopRegsInMask(save
);
5760 masm
.boxDouble(inputScratch
, output
, inputScratch
);
5764 bool CacheIRCompiler::emitMathFunctionNumberResult(NumberOperandId inputId
,
5765 UnaryMathFunction fun
) {
5766 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5768 AutoOutputRegister
output(*this);
5769 AutoAvailableFloatRegister
scratch(*this, FloatReg0
);
5771 allocator
.ensureDoubleRegister(masm
, inputId
, scratch
);
5773 return emitMathFunctionNumberResultShared(fun
, scratch
, output
.valueReg());
5776 static void EmitStoreDenseElement(MacroAssembler
& masm
,
5777 const ConstantOrRegister
& value
,
5778 BaseObjectElementIndex target
) {
5779 if (value
.constant()) {
5780 Value v
= value
.value();
5781 masm
.storeValue(v
, target
);
5785 TypedOrValueRegister reg
= value
.reg();
5786 masm
.storeTypedOrValue(reg
, target
);
5789 bool CacheIRCompiler::emitStoreDenseElement(ObjOperandId objId
,
5790 Int32OperandId indexId
,
5791 ValOperandId rhsId
) {
5792 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5794 Register obj
= allocator
.useRegister(masm
, objId
);
5795 Register index
= allocator
.useRegister(masm
, indexId
);
5796 ConstantOrRegister val
= allocator
.useConstantOrRegister(masm
, rhsId
);
5798 AutoScratchRegister
scratch(allocator
, masm
);
5800 FailurePath
* failure
;
5801 if (!addFailurePath(&failure
)) {
5805 // Load obj->elements in scratch.
5806 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
5808 // Bounds check. Unfortunately we don't have more registers available on
5809 // x86, so use InvalidReg and emit slightly slower code on x86.
5810 Register spectreTemp
= InvalidReg
;
5811 Address
initLength(scratch
, ObjectElements::offsetOfInitializedLength());
5812 masm
.spectreBoundsCheck32(index
, initLength
, spectreTemp
, failure
->label());
5815 BaseObjectElementIndex
element(scratch
, index
);
5816 masm
.branchTestMagic(Assembler::Equal
, element
, failure
->label());
5818 // Perform the store.
5819 EmitPreBarrier(masm
, element
, MIRType::Value
);
5820 EmitStoreDenseElement(masm
, val
, element
);
5822 emitPostBarrierElement(obj
, val
, scratch
, index
);
5826 static void EmitAssertExtensibleElements(MacroAssembler
& masm
,
5827 Register elementsReg
) {
5829 // Preceding shape guards ensure the object elements are extensible.
5830 Address
elementsFlags(elementsReg
, ObjectElements::offsetOfFlags());
5832 masm
.branchTest32(Assembler::Zero
, elementsFlags
,
5833 Imm32(ObjectElements::Flags::NOT_EXTENSIBLE
), &ok
);
5834 masm
.assumeUnreachable("Unexpected non-extensible elements");
5839 static void EmitAssertWritableArrayLengthElements(MacroAssembler
& masm
,
5840 Register elementsReg
) {
5842 // Preceding shape guards ensure the array length is writable.
5843 Address
elementsFlags(elementsReg
, ObjectElements::offsetOfFlags());
5845 masm
.branchTest32(Assembler::Zero
, elementsFlags
,
5846 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH
),
5848 masm
.assumeUnreachable("Unexpected non-writable array length elements");
5853 bool CacheIRCompiler::emitStoreDenseElementHole(ObjOperandId objId
,
5854 Int32OperandId indexId
,
5857 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5859 Register obj
= allocator
.useRegister(masm
, objId
);
5860 Register index
= allocator
.useRegister(masm
, indexId
);
5861 ConstantOrRegister val
= allocator
.useConstantOrRegister(masm
, rhsId
);
5863 AutoScratchRegister
scratch(allocator
, masm
);
5865 FailurePath
* failure
;
5866 if (!addFailurePath(&failure
)) {
5870 // Load obj->elements in scratch.
5871 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
5873 EmitAssertExtensibleElements(masm
, scratch
);
5875 EmitAssertWritableArrayLengthElements(masm
, scratch
);
5878 BaseObjectElementIndex
element(scratch
, index
);
5879 Address
initLength(scratch
, ObjectElements::offsetOfInitializedLength());
5880 Address
elementsFlags(scratch
, ObjectElements::offsetOfFlags());
5882 // We don't have enough registers on x86 so use InvalidReg. This will emit
5883 // slightly less efficient code on x86.
5884 Register spectreTemp
= InvalidReg
;
5886 Label storeSkipPreBarrier
;
5889 Label inBounds
, outOfBounds
;
5890 masm
.spectreBoundsCheck32(index
, initLength
, spectreTemp
, &outOfBounds
);
5891 masm
.jump(&inBounds
);
5893 // If we're out-of-bounds, only handle the index == initLength case.
5894 masm
.bind(&outOfBounds
);
5895 masm
.branch32(Assembler::NotEqual
, initLength
, index
, failure
->label());
5897 // If index < capacity, we can add a dense element inline. If not we
5898 // need to allocate more elements.
5899 Label allocElement
, addNewElement
;
5900 Address
capacity(scratch
, ObjectElements::offsetOfCapacity());
5901 masm
.spectreBoundsCheck32(index
, capacity
, spectreTemp
, &allocElement
);
5902 masm
.jump(&addNewElement
);
5904 masm
.bind(&allocElement
);
5906 LiveRegisterSet
save(GeneralRegisterSet::Volatile(),
5907 liveVolatileFloatRegs());
5908 save
.takeUnchecked(scratch
);
5909 masm
.PushRegsInMask(save
);
5911 using Fn
= bool (*)(JSContext
* cx
, NativeObject
* obj
);
5912 masm
.setupUnalignedABICall(scratch
);
5913 masm
.loadJSContext(scratch
);
5914 masm
.passABIArg(scratch
);
5915 masm
.passABIArg(obj
);
5916 masm
.callWithABI
<Fn
, NativeObject::addDenseElementPure
>();
5917 masm
.storeCallPointerResult(scratch
);
5919 masm
.PopRegsInMask(save
);
5920 masm
.branchIfFalseBool(scratch
, failure
->label());
5922 // Load the reallocated elements pointer.
5923 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
5925 masm
.bind(&addNewElement
);
5927 // Increment initLength.
5928 masm
.add32(Imm32(1), initLength
);
5930 // If length is now <= index, increment length too.
5931 Label skipIncrementLength
;
5932 Address
length(scratch
, ObjectElements::offsetOfLength());
5933 masm
.branch32(Assembler::Above
, length
, index
, &skipIncrementLength
);
5934 masm
.add32(Imm32(1), length
);
5935 masm
.bind(&skipIncrementLength
);
5937 // Skip EmitPreBarrier as the memory is uninitialized.
5938 masm
.jump(&storeSkipPreBarrier
);
5940 masm
.bind(&inBounds
);
5942 // Fail if index >= initLength.
5943 masm
.spectreBoundsCheck32(index
, initLength
, spectreTemp
, failure
->label());
5946 EmitPreBarrier(masm
, element
, MIRType::Value
);
5948 masm
.bind(&storeSkipPreBarrier
);
5949 EmitStoreDenseElement(masm
, val
, element
);
5951 emitPostBarrierElement(obj
, val
, scratch
, index
);
5955 bool CacheIRCompiler::emitArrayPush(ObjOperandId objId
, ValOperandId rhsId
) {
5956 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
5958 AutoOutputRegister
output(*this);
5959 Register obj
= allocator
.useRegister(masm
, objId
);
5960 ValueOperand val
= allocator
.useValueRegister(masm
, rhsId
);
5962 AutoScratchRegisterMaybeOutput
scratchLength(allocator
, masm
, output
);
5963 AutoScratchRegisterMaybeOutputType
scratch(allocator
, masm
, output
);
5965 FailurePath
* failure
;
5966 if (!addFailurePath(&failure
)) {
5970 // Load obj->elements in scratch.
5971 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
5973 EmitAssertExtensibleElements(masm
, scratch
);
5974 EmitAssertWritableArrayLengthElements(masm
, scratch
);
5976 Address
elementsInitLength(scratch
,
5977 ObjectElements::offsetOfInitializedLength());
5978 Address
elementsLength(scratch
, ObjectElements::offsetOfLength());
5979 Address
capacity(scratch
, ObjectElements::offsetOfCapacity());
5981 // Fail if length != initLength.
5982 masm
.load32(elementsInitLength
, scratchLength
);
5983 masm
.branch32(Assembler::NotEqual
, elementsLength
, scratchLength
,
5986 // If scratchLength < capacity, we can add a dense element inline. If not we
5987 // need to allocate more elements.
5988 Label allocElement
, addNewElement
;
5989 masm
.spectreBoundsCheck32(scratchLength
, capacity
, InvalidReg
, &allocElement
);
5990 masm
.jump(&addNewElement
);
5992 masm
.bind(&allocElement
);
5994 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5995 save
.takeUnchecked(scratch
);
5996 masm
.PushRegsInMask(save
);
5998 using Fn
= bool (*)(JSContext
* cx
, NativeObject
* obj
);
5999 masm
.setupUnalignedABICall(scratch
);
6000 masm
.loadJSContext(scratch
);
6001 masm
.passABIArg(scratch
);
6002 masm
.passABIArg(obj
);
6003 masm
.callWithABI
<Fn
, NativeObject::addDenseElementPure
>();
6004 masm
.storeCallPointerResult(scratch
);
6006 masm
.PopRegsInMask(save
);
6007 masm
.branchIfFalseBool(scratch
, failure
->label());
6009 // Load the reallocated elements pointer.
6010 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
6012 masm
.bind(&addNewElement
);
6014 // Increment initLength and length.
6015 masm
.add32(Imm32(1), elementsInitLength
);
6016 masm
.add32(Imm32(1), elementsLength
);
6019 BaseObjectElementIndex
element(scratch
, scratchLength
);
6020 masm
.storeValue(val
, element
);
6021 emitPostBarrierElement(obj
, val
, scratch
, scratchLength
);
6023 // Return value is new length.
6024 masm
.add32(Imm32(1), scratchLength
);
6025 masm
.tagValue(JSVAL_TYPE_INT32
, scratchLength
, output
.valueReg());
6030 bool CacheIRCompiler::emitStoreTypedArrayElement(ObjOperandId objId
,
6031 Scalar::Type elementType
,
6032 IntPtrOperandId indexId
,
6035 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6036 Register obj
= allocator
.useRegister(masm
, objId
);
6037 Register index
= allocator
.useRegister(masm
, indexId
);
6039 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
6041 Maybe
<Register
> valInt32
;
6042 Maybe
<Register
> valBigInt
;
6043 switch (elementType
) {
6047 case Scalar::Uint16
:
6049 case Scalar::Uint32
:
6050 case Scalar::Uint8Clamped
:
6051 valInt32
.emplace(allocator
.useRegister(masm
, Int32OperandId(rhsId
)));
6054 case Scalar::Float32
:
6055 case Scalar::Float64
:
6056 allocator
.ensureDoubleRegister(masm
, NumberOperandId(rhsId
),
6060 case Scalar::BigInt64
:
6061 case Scalar::BigUint64
:
6062 valBigInt
.emplace(allocator
.useRegister(masm
, BigIntOperandId(rhsId
)));
6065 case Scalar::MaxTypedArrayViewType
:
6067 case Scalar::Simd128
:
6068 MOZ_CRASH("Unsupported TypedArray type");
6071 AutoScratchRegister
scratch1(allocator
, masm
);
6072 Maybe
<AutoScratchRegister
> scratch2
;
6073 Maybe
<AutoSpectreBoundsScratchRegister
> spectreScratch
;
6074 if (Scalar::isBigIntType(elementType
)) {
6075 scratch2
.emplace(allocator
, masm
);
6077 spectreScratch
.emplace(allocator
, masm
);
6080 FailurePath
* failure
= nullptr;
6082 if (!addFailurePath(&failure
)) {
6089 Register spectreTemp
= scratch2
? scratch2
->get() : spectreScratch
->get();
6090 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch1
);
6091 masm
.spectreBoundsCheckPtr(index
, scratch1
, spectreTemp
,
6092 handleOOB
? &done
: failure
->label());
6094 // Load the elements vector.
6095 masm
.loadPtr(Address(obj
, ArrayBufferViewObject::dataOffset()), scratch1
);
6097 BaseIndex
dest(scratch1
, index
, ScaleFromScalarType(elementType
));
6099 if (Scalar::isBigIntType(elementType
)) {
6101 Register64
temp(scratch2
->get());
6103 // We don't have more registers available on x86, so spill |obj|.
6105 Register64
temp(scratch2
->get(), obj
);
6108 masm
.loadBigInt64(*valBigInt
, temp
);
6109 masm
.storeToTypedBigIntArray(elementType
, temp
, dest
);
6114 } else if (elementType
== Scalar::Float32
) {
6115 ScratchFloat32Scope
fpscratch(masm
);
6116 masm
.convertDoubleToFloat32(floatScratch0
, fpscratch
);
6117 masm
.storeToTypedFloatArray(elementType
, fpscratch
, dest
);
6118 } else if (elementType
== Scalar::Float64
) {
6119 masm
.storeToTypedFloatArray(elementType
, floatScratch0
, dest
);
6121 masm
.storeToTypedIntArray(elementType
, *valInt32
, dest
);
6128 static gc::Heap
InitialBigIntHeap(JSContext
* cx
) {
6129 JS::Zone
* zone
= cx
->zone();
6130 return zone
->allocNurseryBigInts() ? gc::Heap::Default
: gc::Heap::Tenured
;
6133 static void EmitAllocateBigInt(MacroAssembler
& masm
, Register result
,
6134 Register temp
, const LiveRegisterSet
& liveSet
,
6135 gc::Heap initialHeap
, Label
* fail
) {
6136 Label fallback
, done
;
6137 masm
.newGCBigInt(result
, temp
, initialHeap
, &fallback
);
6140 masm
.bind(&fallback
);
6142 // Request a minor collection at a later time if nursery allocation failed.
6143 bool requestMinorGC
= initialHeap
== gc::Heap::Default
;
6145 masm
.PushRegsInMask(liveSet
);
6146 using Fn
= void* (*)(JSContext
* cx
, bool requestMinorGC
);
6147 masm
.setupUnalignedABICall(temp
);
6148 masm
.loadJSContext(temp
);
6149 masm
.passABIArg(temp
);
6150 masm
.move32(Imm32(requestMinorGC
), result
);
6151 masm
.passABIArg(result
);
6152 masm
.callWithABI
<Fn
, jit::AllocateBigIntNoGC
>();
6153 masm
.storeCallPointerResult(result
);
6155 masm
.PopRegsInMask(liveSet
);
6156 masm
.branchPtr(Assembler::Equal
, result
, ImmWord(0), fail
);
6161 bool CacheIRCompiler::emitLoadTypedArrayElementResult(
6162 ObjOperandId objId
, IntPtrOperandId indexId
, Scalar::Type elementType
,
6163 bool handleOOB
, bool forceDoubleForUint32
) {
6164 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6165 AutoOutputRegister
output(*this);
6166 Register obj
= allocator
.useRegister(masm
, objId
);
6167 Register index
= allocator
.useRegister(masm
, indexId
);
6169 AutoScratchRegister
scratch1(allocator
, masm
);
6171 AutoScratchRegister
scratch2(allocator
, masm
);
6173 // There are too few registers available on x86, so we may need to reuse the
6174 // output's scratch register.
6175 AutoScratchRegisterMaybeOutput
scratch2(allocator
, masm
, output
);
6178 FailurePath
* failure
;
6179 if (!addFailurePath(&failure
)) {
6185 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch1
);
6186 masm
.spectreBoundsCheckPtr(index
, scratch1
, scratch2
,
6187 handleOOB
? &outOfBounds
: failure
->label());
6189 // Allocate BigInt if needed. The code after this should be infallible.
6190 Maybe
<Register
> bigInt
;
6191 if (Scalar::isBigIntType(elementType
)) {
6192 bigInt
.emplace(output
.valueReg().scratchReg());
6194 LiveRegisterSet
save(GeneralRegisterSet::Volatile(),
6195 liveVolatileFloatRegs());
6196 save
.takeUnchecked(scratch1
);
6197 save
.takeUnchecked(scratch2
);
6198 save
.takeUnchecked(output
);
6200 gc::Heap initialHeap
= InitialBigIntHeap(cx_
);
6201 EmitAllocateBigInt(masm
, *bigInt
, scratch1
, save
, initialHeap
,
6205 // Load the elements vector.
6206 masm
.loadPtr(Address(obj
, ArrayBufferViewObject::dataOffset()), scratch1
);
6209 BaseIndex
source(scratch1
, index
, ScaleFromScalarType(elementType
));
6211 if (Scalar::isBigIntType(elementType
)) {
6213 Register64
temp(scratch2
);
6215 // We don't have more registers available on x86, so spill |obj| and
6216 // additionally use the output's type register.
6217 MOZ_ASSERT(output
.valueReg().scratchReg() != output
.valueReg().typeReg());
6219 Register64
temp(output
.valueReg().typeReg(), obj
);
6222 masm
.loadFromTypedBigIntArray(elementType
, source
, *bigInt
, temp
);
6228 masm
.tagValue(JSVAL_TYPE_BIGINT
, *bigInt
, output
.valueReg());
6230 MacroAssembler::Uint32Mode uint32Mode
=
6231 forceDoubleForUint32
? MacroAssembler::Uint32Mode::ForceDouble
6232 : MacroAssembler::Uint32Mode::FailOnDouble
;
6233 masm
.loadFromTypedArray(elementType
, source
, output
.valueReg(), uint32Mode
,
6234 scratch1
, failure
->label());
6241 masm
.bind(&outOfBounds
);
6242 masm
.moveValue(UndefinedValue(), output
.valueReg());
6250 static void EmitDataViewBoundsCheck(MacroAssembler
& masm
, size_t byteSize
,
6251 Register obj
, Register offset
,
6252 Register scratch
, Label
* fail
) {
6253 // Ensure both offset < length and offset + (byteSize - 1) < length.
6254 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
6255 if (byteSize
== 1) {
6256 masm
.spectreBoundsCheckPtr(offset
, scratch
, InvalidReg
, fail
);
6258 // temp := length - (byteSize - 1)
6259 // if temp < 0: fail
6260 // if offset >= temp: fail
6261 masm
.branchSubPtr(Assembler::Signed
, Imm32(byteSize
- 1), scratch
, fail
);
6262 masm
.spectreBoundsCheckPtr(offset
, scratch
, InvalidReg
, fail
);
6266 bool CacheIRCompiler::emitLoadDataViewValueResult(
6267 ObjOperandId objId
, IntPtrOperandId offsetId
,
6268 BooleanOperandId littleEndianId
, Scalar::Type elementType
,
6269 bool forceDoubleForUint32
) {
6270 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6272 AutoOutputRegister
output(*this);
6273 Register obj
= allocator
.useRegister(masm
, objId
);
6274 Register offset
= allocator
.useRegister(masm
, offsetId
);
6275 Register littleEndian
= allocator
.useRegister(masm
, littleEndianId
);
6277 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
6279 Register64 outputReg64
= output
.valueReg().toRegister64();
6280 Register outputScratch
= outputReg64
.scratchReg();
6282 FailurePath
* failure
;
6283 if (!addFailurePath(&failure
)) {
6287 const size_t byteSize
= Scalar::byteSize(elementType
);
6289 EmitDataViewBoundsCheck(masm
, byteSize
, obj
, offset
, outputScratch
,
6292 masm
.loadPtr(Address(obj
, DataViewObject::dataOffset()), outputScratch
);
6295 BaseIndex
source(outputScratch
, offset
, TimesOne
);
6296 switch (elementType
) {
6298 masm
.load8SignExtend(source
, outputScratch
);
6301 masm
.load8ZeroExtend(source
, outputScratch
);
6304 masm
.load16UnalignedSignExtend(source
, outputScratch
);
6306 case Scalar::Uint16
:
6307 masm
.load16UnalignedZeroExtend(source
, outputScratch
);
6310 case Scalar::Uint32
:
6311 case Scalar::Float32
:
6312 masm
.load32Unaligned(source
, outputScratch
);
6314 case Scalar::Float64
:
6315 case Scalar::BigInt64
:
6316 case Scalar::BigUint64
:
6317 masm
.load64Unaligned(source
, outputReg64
);
6319 case Scalar::Uint8Clamped
:
6321 MOZ_CRASH("Invalid typed array type");
6324 // Swap the bytes in the loaded value.
6327 masm
.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual
: Assembler::Equal
,
6328 littleEndian
, Imm32(0), &skip
);
6330 switch (elementType
) {
6332 masm
.byteSwap16SignExtend(outputScratch
);
6334 case Scalar::Uint16
:
6335 masm
.byteSwap16ZeroExtend(outputScratch
);
6338 case Scalar::Uint32
:
6339 case Scalar::Float32
:
6340 masm
.byteSwap32(outputScratch
);
6342 case Scalar::Float64
:
6343 case Scalar::BigInt64
:
6344 case Scalar::BigUint64
:
6345 masm
.byteSwap64(outputReg64
);
6349 case Scalar::Uint8Clamped
:
6351 MOZ_CRASH("Invalid type");
6357 // Move the value into the output register.
6358 switch (elementType
) {
6362 case Scalar::Uint16
:
6364 masm
.tagValue(JSVAL_TYPE_INT32
, outputScratch
, output
.valueReg());
6366 case Scalar::Uint32
: {
6367 MacroAssembler::Uint32Mode uint32Mode
=
6368 forceDoubleForUint32
? MacroAssembler::Uint32Mode::ForceDouble
6369 : MacroAssembler::Uint32Mode::FailOnDouble
;
6370 masm
.boxUint32(outputScratch
, output
.valueReg(), uint32Mode
,
6374 case Scalar::Float32
: {
6375 FloatRegister scratchFloat32
= floatScratch0
.get().asSingle();
6376 masm
.moveGPRToFloat32(outputScratch
, scratchFloat32
);
6377 masm
.canonicalizeFloat(scratchFloat32
);
6378 masm
.convertFloat32ToDouble(scratchFloat32
, floatScratch0
);
6379 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
6382 case Scalar::Float64
:
6383 masm
.moveGPR64ToDouble(outputReg64
, floatScratch0
);
6384 masm
.canonicalizeDouble(floatScratch0
);
6385 masm
.boxDouble(floatScratch0
, output
.valueReg(), floatScratch0
);
6387 case Scalar::BigInt64
:
6388 case Scalar::BigUint64
: {
6389 // We need two extra registers. Reuse the obj/littleEndian registers.
6390 Register bigInt
= obj
;
6391 Register bigIntScratch
= littleEndian
;
6393 masm
.push(bigIntScratch
);
6395 LiveRegisterSet
save(GeneralRegisterSet::Volatile(),
6396 liveVolatileFloatRegs());
6397 save
.takeUnchecked(bigInt
);
6398 save
.takeUnchecked(bigIntScratch
);
6399 gc::Heap initialHeap
= InitialBigIntHeap(cx_
);
6400 EmitAllocateBigInt(masm
, bigInt
, bigIntScratch
, save
, initialHeap
, &fail
);
6404 masm
.pop(bigIntScratch
);
6406 masm
.jump(failure
->label());
6409 masm
.initializeBigInt64(elementType
, bigInt
, outputReg64
);
6410 masm
.tagValue(JSVAL_TYPE_BIGINT
, bigInt
, output
.valueReg());
6411 masm
.pop(bigIntScratch
);
6415 case Scalar::Uint8Clamped
:
6417 MOZ_CRASH("Invalid typed array type");
6423 bool CacheIRCompiler::emitStoreDataViewValueResult(
6424 ObjOperandId objId
, IntPtrOperandId offsetId
, uint32_t valueId
,
6425 BooleanOperandId littleEndianId
, Scalar::Type elementType
) {
6426 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6428 AutoOutputRegister
output(*this);
6429 #ifdef JS_CODEGEN_X86
6430 // Use a scratch register to avoid running out of the registers.
6431 Register obj
= output
.valueReg().typeReg();
6432 allocator
.copyToScratchRegister(masm
, objId
, obj
);
6434 Register obj
= allocator
.useRegister(masm
, objId
);
6436 Register offset
= allocator
.useRegister(masm
, offsetId
);
6437 Register littleEndian
= allocator
.useRegister(masm
, littleEndianId
);
6439 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
6440 Maybe
<Register
> valInt32
;
6441 Maybe
<Register
> valBigInt
;
6442 switch (elementType
) {
6446 case Scalar::Uint16
:
6448 case Scalar::Uint32
:
6449 case Scalar::Uint8Clamped
:
6450 valInt32
.emplace(allocator
.useRegister(masm
, Int32OperandId(valueId
)));
6453 case Scalar::Float32
:
6454 case Scalar::Float64
:
6455 allocator
.ensureDoubleRegister(masm
, NumberOperandId(valueId
),
6459 case Scalar::BigInt64
:
6460 case Scalar::BigUint64
:
6461 valBigInt
.emplace(allocator
.useRegister(masm
, BigIntOperandId(valueId
)));
6464 case Scalar::MaxTypedArrayViewType
:
6466 case Scalar::Simd128
:
6467 MOZ_CRASH("Unsupported type");
6470 Register scratch1
= output
.valueReg().scratchReg();
6471 MOZ_ASSERT(scratch1
!= obj
, "scratchReg must not be typeReg");
6473 // On platforms with enough registers, |scratch2| is an extra scratch register
6474 // (pair) used for byte-swapping the value.
6475 #ifndef JS_CODEGEN_X86
6476 mozilla::MaybeOneOf
<AutoScratchRegister
, AutoScratchRegister64
> scratch2
;
6477 switch (elementType
) {
6482 case Scalar::Uint16
:
6484 case Scalar::Uint32
:
6485 case Scalar::Float32
:
6486 scratch2
.construct
<AutoScratchRegister
>(allocator
, masm
);
6488 case Scalar::Float64
:
6489 case Scalar::BigInt64
:
6490 case Scalar::BigUint64
:
6491 scratch2
.construct
<AutoScratchRegister64
>(allocator
, masm
);
6493 case Scalar::Uint8Clamped
:
6495 MOZ_CRASH("Invalid type");
6499 FailurePath
* failure
;
6500 if (!addFailurePath(&failure
)) {
6504 const size_t byteSize
= Scalar::byteSize(elementType
);
6506 EmitDataViewBoundsCheck(masm
, byteSize
, obj
, offset
, scratch1
,
6509 masm
.loadPtr(Address(obj
, DataViewObject::dataOffset()), scratch1
);
6510 BaseIndex
dest(scratch1
, offset
, TimesOne
);
6512 if (byteSize
== 1) {
6513 // Byte swapping has no effect, so just do the byte store.
6514 masm
.store8(*valInt32
, dest
);
6515 masm
.moveValue(UndefinedValue(), output
.valueReg());
6519 // On 32-bit x86, |obj| is already a scratch register so use that. If we need
6520 // a Register64 we also use the littleEndian register and use the stack
6521 // location for the check below.
6522 bool pushedLittleEndian
= false;
6523 #ifdef JS_CODEGEN_X86
6524 if (byteSize
== 8) {
6525 masm
.push(littleEndian
);
6526 pushedLittleEndian
= true;
6528 auto valScratch32
= [&]() -> Register
{ return obj
; };
6529 auto valScratch64
= [&]() -> Register64
{
6530 return Register64(obj
, littleEndian
);
6533 auto valScratch32
= [&]() -> Register
{
6534 return scratch2
.ref
<AutoScratchRegister
>();
6536 auto valScratch64
= [&]() -> Register64
{
6537 return scratch2
.ref
<AutoScratchRegister64
>();
6541 // Load the value into a gpr register.
6542 switch (elementType
) {
6544 case Scalar::Uint16
:
6546 case Scalar::Uint32
:
6547 masm
.move32(*valInt32
, valScratch32());
6549 case Scalar::Float32
: {
6550 FloatRegister scratchFloat32
= floatScratch0
.get().asSingle();
6551 masm
.convertDoubleToFloat32(floatScratch0
, scratchFloat32
);
6552 masm
.canonicalizeFloatIfDeterministic(scratchFloat32
);
6553 masm
.moveFloat32ToGPR(scratchFloat32
, valScratch32());
6556 case Scalar::Float64
: {
6557 masm
.canonicalizeDoubleIfDeterministic(floatScratch0
);
6558 masm
.moveDoubleToGPR64(floatScratch0
, valScratch64());
6561 case Scalar::BigInt64
:
6562 case Scalar::BigUint64
:
6563 masm
.loadBigInt64(*valBigInt
, valScratch64());
6567 case Scalar::Uint8Clamped
:
6569 MOZ_CRASH("Invalid type");
6572 // Swap the bytes in the loaded value.
6574 if (pushedLittleEndian
) {
6575 masm
.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual
: Assembler::Equal
,
6576 Address(masm
.getStackPointer(), 0), Imm32(0), &skip
);
6578 masm
.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual
: Assembler::Equal
,
6579 littleEndian
, Imm32(0), &skip
);
6581 switch (elementType
) {
6583 masm
.byteSwap16SignExtend(valScratch32());
6585 case Scalar::Uint16
:
6586 masm
.byteSwap16ZeroExtend(valScratch32());
6589 case Scalar::Uint32
:
6590 case Scalar::Float32
:
6591 masm
.byteSwap32(valScratch32());
6593 case Scalar::Float64
:
6594 case Scalar::BigInt64
:
6595 case Scalar::BigUint64
:
6596 masm
.byteSwap64(valScratch64());
6600 case Scalar::Uint8Clamped
:
6602 MOZ_CRASH("Invalid type");
6607 switch (elementType
) {
6609 case Scalar::Uint16
:
6610 masm
.store16Unaligned(valScratch32(), dest
);
6613 case Scalar::Uint32
:
6614 case Scalar::Float32
:
6615 masm
.store32Unaligned(valScratch32(), dest
);
6617 case Scalar::Float64
:
6618 case Scalar::BigInt64
:
6619 case Scalar::BigUint64
:
6620 masm
.store64Unaligned(valScratch64(), dest
);
6624 case Scalar::Uint8Clamped
:
6626 MOZ_CRASH("Invalid typed array type");
6629 #ifdef JS_CODEGEN_X86
6630 // Restore registers.
6631 if (pushedLittleEndian
) {
6632 masm
.pop(littleEndian
);
6636 masm
.moveValue(UndefinedValue(), output
.valueReg());
6640 bool CacheIRCompiler::emitStoreFixedSlotUndefinedResult(ObjOperandId objId
,
6641 uint32_t offsetOffset
,
6642 ValOperandId rhsId
) {
6643 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6645 AutoOutputRegister
output(*this);
6646 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
6647 Register obj
= allocator
.useRegister(masm
, objId
);
6648 ValueOperand val
= allocator
.useValueRegister(masm
, rhsId
);
6650 StubFieldOffset
offset(offsetOffset
, StubField::Type::RawInt32
);
6651 emitLoadStubField(offset
, scratch
);
6653 BaseIndex
slot(obj
, scratch
, TimesOne
);
6654 EmitPreBarrier(masm
, slot
, MIRType::Value
);
6655 masm
.storeValue(val
, slot
);
6656 emitPostBarrierSlot(obj
, val
, scratch
);
6658 masm
.moveValue(UndefinedValue(), output
.valueReg());
6662 bool CacheIRCompiler::emitLoadObjectResult(ObjOperandId objId
) {
6663 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6664 AutoOutputRegister
output(*this);
6665 Register obj
= allocator
.useRegister(masm
, objId
);
6667 EmitStoreResult(masm
, obj
, JSVAL_TYPE_OBJECT
, output
);
6672 bool CacheIRCompiler::emitLoadStringResult(StringOperandId strId
) {
6673 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6674 AutoOutputRegister
output(*this);
6675 Register str
= allocator
.useRegister(masm
, strId
);
6677 masm
.tagValue(JSVAL_TYPE_STRING
, str
, output
.valueReg());
6682 bool CacheIRCompiler::emitLoadSymbolResult(SymbolOperandId symId
) {
6683 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6684 AutoOutputRegister
output(*this);
6685 Register sym
= allocator
.useRegister(masm
, symId
);
6687 masm
.tagValue(JSVAL_TYPE_SYMBOL
, sym
, output
.valueReg());
6692 bool CacheIRCompiler::emitLoadInt32Result(Int32OperandId valId
) {
6693 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6694 AutoOutputRegister
output(*this);
6695 Register val
= allocator
.useRegister(masm
, valId
);
6697 masm
.tagValue(JSVAL_TYPE_INT32
, val
, output
.valueReg());
6702 bool CacheIRCompiler::emitLoadBigIntResult(BigIntOperandId valId
) {
6703 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6704 AutoOutputRegister
output(*this);
6705 Register val
= allocator
.useRegister(masm
, valId
);
6707 masm
.tagValue(JSVAL_TYPE_BIGINT
, val
, output
.valueReg());
6712 bool CacheIRCompiler::emitLoadDoubleResult(NumberOperandId valId
) {
6713 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6714 AutoOutputRegister
output(*this);
6715 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
6719 masm
.branchTestDouble(Assembler::Equal
, val
, &ok
);
6720 masm
.branchTestInt32(Assembler::Equal
, val
, &ok
);
6721 masm
.assumeUnreachable("input must be double or int32");
6725 masm
.moveValue(val
, output
.valueReg());
6726 masm
.convertInt32ValueToDouble(output
.valueReg());
6731 bool CacheIRCompiler::emitLoadTypeOfObjectResult(ObjOperandId objId
) {
6732 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6733 AutoOutputRegister
output(*this);
6734 Register obj
= allocator
.useRegister(masm
, objId
);
6735 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
6737 Label slowCheck
, isObject
, isCallable
, isUndefined
, done
;
6738 masm
.typeOfObject(obj
, scratch
, &slowCheck
, &isObject
, &isCallable
,
6741 masm
.bind(&isCallable
);
6742 masm
.moveValue(StringValue(cx_
->names().function
), output
.valueReg());
6745 masm
.bind(&isUndefined
);
6746 masm
.moveValue(StringValue(cx_
->names().undefined
), output
.valueReg());
6749 masm
.bind(&isObject
);
6750 masm
.moveValue(StringValue(cx_
->names().object
), output
.valueReg());
6754 masm
.bind(&slowCheck
);
6755 LiveRegisterSet
save(GeneralRegisterSet::Volatile(),
6756 liveVolatileFloatRegs());
6757 masm
.PushRegsInMask(save
);
6759 using Fn
= JSString
* (*)(JSObject
* obj
, JSRuntime
* rt
);
6760 masm
.setupUnalignedABICall(scratch
);
6761 masm
.passABIArg(obj
);
6762 masm
.movePtr(ImmPtr(cx_
->runtime()), scratch
);
6763 masm
.passABIArg(scratch
);
6764 masm
.callWithABI
<Fn
, TypeOfNameObject
>();
6765 masm
.storeCallPointerResult(scratch
);
6767 LiveRegisterSet ignore
;
6768 ignore
.add(scratch
);
6769 masm
.PopRegsInMaskIgnore(save
, ignore
);
6771 masm
.tagValue(JSVAL_TYPE_STRING
, scratch
, output
.valueReg());
6778 bool CacheIRCompiler::emitLoadInt32TruthyResult(ValOperandId inputId
) {
6779 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6780 AutoOutputRegister
output(*this);
6781 ValueOperand val
= allocator
.useValueRegister(masm
, inputId
);
6783 Label ifFalse
, done
;
6784 masm
.branchTestInt32Truthy(false, val
, &ifFalse
);
6785 masm
.moveValue(BooleanValue(true), output
.valueReg());
6788 masm
.bind(&ifFalse
);
6789 masm
.moveValue(BooleanValue(false), output
.valueReg());
6795 bool CacheIRCompiler::emitLoadStringTruthyResult(StringOperandId strId
) {
6796 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6797 AutoOutputRegister
output(*this);
6798 Register str
= allocator
.useRegister(masm
, strId
);
6800 Label ifFalse
, done
;
6801 masm
.branch32(Assembler::Equal
, Address(str
, JSString::offsetOfLength()),
6802 Imm32(0), &ifFalse
);
6803 masm
.moveValue(BooleanValue(true), output
.valueReg());
6806 masm
.bind(&ifFalse
);
6807 masm
.moveValue(BooleanValue(false), output
.valueReg());
6813 bool CacheIRCompiler::emitLoadDoubleTruthyResult(NumberOperandId inputId
) {
6814 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6815 AutoOutputRegister
output(*this);
6817 AutoScratchFloatRegister
floatReg(this);
6819 allocator
.ensureDoubleRegister(masm
, inputId
, floatReg
);
6821 Label ifFalse
, done
;
6823 masm
.branchTestDoubleTruthy(false, floatReg
, &ifFalse
);
6824 masm
.moveValue(BooleanValue(true), output
.valueReg());
6827 masm
.bind(&ifFalse
);
6828 masm
.moveValue(BooleanValue(false), output
.valueReg());
6834 bool CacheIRCompiler::emitLoadObjectTruthyResult(ObjOperandId objId
) {
6835 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6836 AutoOutputRegister
output(*this);
6837 Register obj
= allocator
.useRegister(masm
, objId
);
6838 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
6840 Label emulatesUndefined
, slowPath
, done
;
6841 masm
.branchIfObjectEmulatesUndefined(obj
, scratch
, &slowPath
,
6842 &emulatesUndefined
);
6843 masm
.moveValue(BooleanValue(true), output
.valueReg());
6846 masm
.bind(&emulatesUndefined
);
6847 masm
.moveValue(BooleanValue(false), output
.valueReg());
6850 masm
.bind(&slowPath
);
6852 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
6853 liveVolatileFloatRegs());
6854 volatileRegs
.takeUnchecked(scratch
);
6855 volatileRegs
.takeUnchecked(output
);
6856 masm
.PushRegsInMask(volatileRegs
);
6858 using Fn
= bool (*)(JSObject
* obj
);
6859 masm
.setupUnalignedABICall(scratch
);
6860 masm
.passABIArg(obj
);
6861 masm
.callWithABI
<Fn
, js::EmulatesUndefined
>();
6862 masm
.storeCallBoolResult(scratch
);
6863 masm
.xor32(Imm32(1), scratch
);
6865 masm
.PopRegsInMask(volatileRegs
);
6867 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
6874 bool CacheIRCompiler::emitLoadBigIntTruthyResult(BigIntOperandId bigIntId
) {
6875 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6876 AutoOutputRegister
output(*this);
6877 Register bigInt
= allocator
.useRegister(masm
, bigIntId
);
6879 Label ifFalse
, done
;
6880 masm
.branch32(Assembler::Equal
,
6881 Address(bigInt
, BigInt::offsetOfDigitLength()), Imm32(0),
6883 masm
.moveValue(BooleanValue(true), output
.valueReg());
6886 masm
.bind(&ifFalse
);
6887 masm
.moveValue(BooleanValue(false), output
.valueReg());
6893 bool CacheIRCompiler::emitLoadValueTruthyResult(ValOperandId inputId
) {
6894 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
6896 AutoOutputRegister
output(*this);
6897 ValueOperand value
= allocator
.useValueRegister(masm
, inputId
);
6898 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
6899 AutoScratchRegister
scratch2(allocator
, masm
);
6900 AutoScratchFloatRegister
floatReg(this);
6902 Label ifFalse
, ifTrue
, done
;
6905 ScratchTagScope
tag(masm
, value
);
6906 masm
.splitTagForTest(value
, tag
);
6908 masm
.branchTestUndefined(Assembler::Equal
, tag
, &ifFalse
);
6909 masm
.branchTestNull(Assembler::Equal
, tag
, &ifFalse
);
6912 masm
.branchTestBoolean(Assembler::NotEqual
, tag
, ¬Boolean
);
6914 ScratchTagScopeRelease
_(&tag
);
6915 masm
.branchTestBooleanTruthy(false, value
, &ifFalse
);
6918 masm
.bind(¬Boolean
);
6921 masm
.branchTestInt32(Assembler::NotEqual
, tag
, ¬Int32
);
6923 ScratchTagScopeRelease
_(&tag
);
6924 masm
.branchTestInt32Truthy(false, value
, &ifFalse
);
6927 masm
.bind(¬Int32
);
6930 masm
.branchTestObject(Assembler::NotEqual
, tag
, ¬Object
);
6932 ScratchTagScopeRelease
_(&tag
);
6934 Register obj
= masm
.extractObject(value
, scratch1
);
6937 masm
.branchIfObjectEmulatesUndefined(obj
, scratch2
, &slowPath
, &ifFalse
);
6940 masm
.bind(&slowPath
);
6942 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
6943 liveVolatileFloatRegs());
6944 volatileRegs
.takeUnchecked(scratch1
);
6945 volatileRegs
.takeUnchecked(scratch2
);
6946 volatileRegs
.takeUnchecked(output
);
6947 masm
.PushRegsInMask(volatileRegs
);
6949 using Fn
= bool (*)(JSObject
* obj
);
6950 masm
.setupUnalignedABICall(scratch2
);
6951 masm
.passABIArg(obj
);
6952 masm
.callWithABI
<Fn
, js::EmulatesUndefined
>();
6953 masm
.storeCallPointerResult(scratch2
);
6955 masm
.PopRegsInMask(volatileRegs
);
6957 masm
.branchIfTrueBool(scratch2
, &ifFalse
);
6961 masm
.bind(¬Object
);
6964 masm
.branchTestString(Assembler::NotEqual
, tag
, ¬String
);
6966 ScratchTagScopeRelease
_(&tag
);
6967 masm
.branchTestStringTruthy(false, value
, &ifFalse
);
6970 masm
.bind(¬String
);
6973 masm
.branchTestBigInt(Assembler::NotEqual
, tag
, ¬BigInt
);
6975 ScratchTagScopeRelease
_(&tag
);
6976 masm
.branchTestBigIntTruthy(false, value
, &ifFalse
);
6979 masm
.bind(¬BigInt
);
6981 masm
.branchTestSymbol(Assembler::Equal
, tag
, &ifTrue
);
6985 masm
.branchTestDouble(Assembler::Equal
, tag
, &isDouble
);
6986 masm
.assumeUnreachable("Unexpected value type");
6987 masm
.bind(&isDouble
);
6991 ScratchTagScopeRelease
_(&tag
);
6992 masm
.unboxDouble(value
, floatReg
);
6993 masm
.branchTestDoubleTruthy(false, floatReg
, &ifFalse
);
6996 // Fall through to true case.
7000 masm
.moveValue(BooleanValue(true), output
.valueReg());
7003 masm
.bind(&ifFalse
);
7004 masm
.moveValue(BooleanValue(false), output
.valueReg());
7010 bool CacheIRCompiler::emitComparePointerResultShared(JSOp op
,
7011 TypedOperandId lhsId
,
7012 TypedOperandId rhsId
) {
7013 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7014 AutoOutputRegister
output(*this);
7016 Register left
= allocator
.useRegister(masm
, lhsId
);
7017 Register right
= allocator
.useRegister(masm
, rhsId
);
7019 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
7022 masm
.branchPtr(JSOpToCondition(op
, /* signed = */ true), left
, right
,
7025 EmitStoreBoolean(masm
, false, output
);
7029 EmitStoreBoolean(masm
, true, output
);
7034 bool CacheIRCompiler::emitCompareObjectResult(JSOp op
, ObjOperandId lhsId
,
7035 ObjOperandId rhsId
) {
7036 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7037 return emitComparePointerResultShared(op
, lhsId
, rhsId
);
7040 bool CacheIRCompiler::emitCompareSymbolResult(JSOp op
, SymbolOperandId lhsId
,
7041 SymbolOperandId rhsId
) {
7042 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7043 return emitComparePointerResultShared(op
, lhsId
, rhsId
);
7046 bool CacheIRCompiler::emitCompareInt32Result(JSOp op
, Int32OperandId lhsId
,
7047 Int32OperandId rhsId
) {
7048 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7049 AutoOutputRegister
output(*this);
7050 Register left
= allocator
.useRegister(masm
, lhsId
);
7051 Register right
= allocator
.useRegister(masm
, rhsId
);
7054 masm
.branch32(JSOpToCondition(op
, /* signed = */ true), left
, right
, &ifTrue
);
7056 EmitStoreBoolean(masm
, false, output
);
7060 EmitStoreBoolean(masm
, true, output
);
7065 bool CacheIRCompiler::emitCompareDoubleResult(JSOp op
, NumberOperandId lhsId
,
7066 NumberOperandId rhsId
) {
7067 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7068 AutoOutputRegister
output(*this);
7070 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
7071 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
7073 FailurePath
* failure
;
7074 if (!addFailurePath(&failure
)) {
7078 allocator
.ensureDoubleRegister(masm
, lhsId
, floatScratch0
);
7079 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch1
);
7082 masm
.branchDouble(JSOpToDoubleCondition(op
), floatScratch0
, floatScratch1
,
7084 EmitStoreBoolean(masm
, false, output
);
7088 EmitStoreBoolean(masm
, true, output
);
7093 bool CacheIRCompiler::emitCompareBigIntResult(JSOp op
, BigIntOperandId lhsId
,
7094 BigIntOperandId rhsId
) {
7095 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7096 AutoOutputRegister
output(*this);
7098 Register lhs
= allocator
.useRegister(masm
, lhsId
);
7099 Register rhs
= allocator
.useRegister(masm
, rhsId
);
7101 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
7103 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7104 masm
.PushRegsInMask(save
);
7106 masm
.setupUnalignedABICall(scratch
);
7108 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7109 // - |left <= right| is implemented as |right >= left|.
7110 // - |left > right| is implemented as |right < left|.
7111 if (op
== JSOp::Le
|| op
== JSOp::Gt
) {
7112 masm
.passABIArg(rhs
);
7113 masm
.passABIArg(lhs
);
7115 masm
.passABIArg(lhs
);
7116 masm
.passABIArg(rhs
);
7119 using Fn
= bool (*)(BigInt
*, BigInt
*);
7121 if (op
== JSOp::Eq
|| op
== JSOp::StrictEq
) {
7122 fn
= jit::BigIntEqual
<EqualityKind::Equal
>;
7123 } else if (op
== JSOp::Ne
|| op
== JSOp::StrictNe
) {
7124 fn
= jit::BigIntEqual
<EqualityKind::NotEqual
>;
7125 } else if (op
== JSOp::Lt
|| op
== JSOp::Gt
) {
7126 fn
= jit::BigIntCompare
<ComparisonKind::LessThan
>;
7128 MOZ_ASSERT(op
== JSOp::Le
|| op
== JSOp::Ge
);
7129 fn
= jit::BigIntCompare
<ComparisonKind::GreaterThanOrEqual
>;
7132 masm
.callWithABI(DynamicFunction
<Fn
>(fn
));
7133 masm
.storeCallBoolResult(scratch
);
7135 LiveRegisterSet ignore
;
7136 ignore
.add(scratch
);
7137 masm
.PopRegsInMaskIgnore(save
, ignore
);
7139 EmitStoreResult(masm
, scratch
, JSVAL_TYPE_BOOLEAN
, output
);
7143 bool CacheIRCompiler::emitCompareBigIntInt32Result(JSOp op
,
7144 BigIntOperandId lhsId
,
7145 Int32OperandId rhsId
) {
7146 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7147 AutoOutputRegister
output(*this);
7148 Register bigInt
= allocator
.useRegister(masm
, lhsId
);
7149 Register int32
= allocator
.useRegister(masm
, rhsId
);
7151 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
7152 AutoScratchRegister
scratch2(allocator
, masm
);
7154 Label ifTrue
, ifFalse
;
7155 masm
.compareBigIntAndInt32(op
, bigInt
, int32
, scratch1
, scratch2
, &ifTrue
,
7159 masm
.bind(&ifFalse
);
7160 EmitStoreBoolean(masm
, false, output
);
7164 EmitStoreBoolean(masm
, true, output
);
7170 bool CacheIRCompiler::emitCompareBigIntNumberResult(JSOp op
,
7171 BigIntOperandId lhsId
,
7172 NumberOperandId rhsId
) {
7173 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7174 AutoOutputRegister
output(*this);
7176 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
7178 Register lhs
= allocator
.useRegister(masm
, lhsId
);
7179 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch0
);
7181 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
7183 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7184 masm
.PushRegsInMask(save
);
7186 masm
.setupUnalignedABICall(scratch
);
7188 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7189 // - |left <= right| is implemented as |right >= left|.
7190 // - |left > right| is implemented as |right < left|.
7191 if (op
== JSOp::Le
|| op
== JSOp::Gt
) {
7192 masm
.passABIArg(floatScratch0
, MoveOp::DOUBLE
);
7193 masm
.passABIArg(lhs
);
7195 masm
.passABIArg(lhs
);
7196 masm
.passABIArg(floatScratch0
, MoveOp::DOUBLE
);
7199 using FnBigIntNumber
= bool (*)(BigInt
*, double);
7200 using FnNumberBigInt
= bool (*)(double, BigInt
*);
7203 masm
.callWithABI
<FnBigIntNumber
,
7204 jit::BigIntNumberEqual
<EqualityKind::Equal
>>();
7208 masm
.callWithABI
<FnBigIntNumber
,
7209 jit::BigIntNumberEqual
<EqualityKind::NotEqual
>>();
7213 masm
.callWithABI
<FnBigIntNumber
,
7214 jit::BigIntNumberCompare
<ComparisonKind::LessThan
>>();
7218 masm
.callWithABI
<FnNumberBigInt
,
7219 jit::NumberBigIntCompare
<ComparisonKind::LessThan
>>();
7225 jit::NumberBigIntCompare
<ComparisonKind::GreaterThanOrEqual
>>();
7231 jit::BigIntNumberCompare
<ComparisonKind::GreaterThanOrEqual
>>();
7235 MOZ_CRASH("unhandled op");
7238 masm
.storeCallBoolResult(scratch
);
7240 LiveRegisterSet ignore
;
7241 ignore
.add(scratch
);
7242 masm
.PopRegsInMaskIgnore(save
, ignore
);
7244 EmitStoreResult(masm
, scratch
, JSVAL_TYPE_BOOLEAN
, output
);
7248 bool CacheIRCompiler::emitCompareBigIntStringResult(JSOp op
,
7249 BigIntOperandId lhsId
,
7250 StringOperandId rhsId
) {
7251 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7252 AutoCallVM
callvm(masm
, this, allocator
);
7254 Register lhs
= allocator
.useRegister(masm
, lhsId
);
7255 Register rhs
= allocator
.useRegister(masm
, rhsId
);
7259 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7260 // - |left <= right| is implemented as |right >= left|.
7261 // - |left > right| is implemented as |right < left|.
7262 if (op
== JSOp::Le
|| op
== JSOp::Gt
) {
7270 using FnBigIntString
=
7271 bool (*)(JSContext
*, HandleBigInt
, HandleString
, bool*);
7272 using FnStringBigInt
=
7273 bool (*)(JSContext
*, HandleString
, HandleBigInt
, bool*);
7277 constexpr auto Equal
= EqualityKind::Equal
;
7278 callvm
.call
<FnBigIntString
, BigIntStringEqual
<Equal
>>();
7282 constexpr auto NotEqual
= EqualityKind::NotEqual
;
7283 callvm
.call
<FnBigIntString
, BigIntStringEqual
<NotEqual
>>();
7287 constexpr auto LessThan
= ComparisonKind::LessThan
;
7288 callvm
.call
<FnBigIntString
, BigIntStringCompare
<LessThan
>>();
7292 constexpr auto LessThan
= ComparisonKind::LessThan
;
7293 callvm
.call
<FnStringBigInt
, StringBigIntCompare
<LessThan
>>();
7297 constexpr auto GreaterThanOrEqual
= ComparisonKind::GreaterThanOrEqual
;
7298 callvm
.call
<FnStringBigInt
, StringBigIntCompare
<GreaterThanOrEqual
>>();
7302 constexpr auto GreaterThanOrEqual
= ComparisonKind::GreaterThanOrEqual
;
7303 callvm
.call
<FnBigIntString
, BigIntStringCompare
<GreaterThanOrEqual
>>();
7307 MOZ_CRASH("unhandled op");
7312 bool CacheIRCompiler::emitCompareNullUndefinedResult(JSOp op
, bool isUndefined
,
7313 ValOperandId inputId
) {
7314 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7316 AutoOutputRegister
output(*this);
7317 ValueOperand input
= allocator
.useValueRegister(masm
, inputId
);
7318 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
7320 if (IsStrictEqualityOp(op
)) {
7322 masm
.testUndefinedSet(JSOpToCondition(op
, false), input
, scratch
);
7324 masm
.testNullSet(JSOpToCondition(op
, false), input
, scratch
);
7326 EmitStoreResult(masm
, scratch
, JSVAL_TYPE_BOOLEAN
, output
);
7330 FailurePath
* failure
;
7331 if (!addFailurePath(&failure
)) {
7335 MOZ_ASSERT(IsLooseEqualityOp(op
));
7337 Label nullOrLikeUndefined
, notNullOrLikeUndefined
, done
;
7339 ScratchTagScope
tag(masm
, input
);
7340 masm
.splitTagForTest(input
, tag
);
7343 masm
.branchTestUndefined(Assembler::Equal
, tag
, &nullOrLikeUndefined
);
7344 masm
.branchTestNull(Assembler::Equal
, tag
, &nullOrLikeUndefined
);
7346 masm
.branchTestNull(Assembler::Equal
, tag
, &nullOrLikeUndefined
);
7347 masm
.branchTestUndefined(Assembler::Equal
, tag
, &nullOrLikeUndefined
);
7349 masm
.branchTestObject(Assembler::NotEqual
, tag
, ¬NullOrLikeUndefined
);
7352 ScratchTagScopeRelease
_(&tag
);
7354 masm
.unboxObject(input
, scratch
);
7355 masm
.branchIfObjectEmulatesUndefined(scratch
, scratch
, failure
->label(),
7356 &nullOrLikeUndefined
);
7357 masm
.jump(¬NullOrLikeUndefined
);
7361 masm
.bind(&nullOrLikeUndefined
);
7362 EmitStoreBoolean(masm
, op
== JSOp::Eq
, output
);
7365 masm
.bind(¬NullOrLikeUndefined
);
7366 EmitStoreBoolean(masm
, op
== JSOp::Ne
, output
);
7372 bool CacheIRCompiler::emitCompareDoubleSameValueResult(NumberOperandId lhsId
,
7373 NumberOperandId rhsId
) {
7374 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7376 AutoOutputRegister
output(*this);
7377 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
7378 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
7379 AutoAvailableFloatRegister
floatScratch1(*this, FloatReg1
);
7380 AutoAvailableFloatRegister
floatScratch2(*this, FloatReg2
);
7382 allocator
.ensureDoubleRegister(masm
, lhsId
, floatScratch0
);
7383 allocator
.ensureDoubleRegister(masm
, rhsId
, floatScratch1
);
7385 masm
.sameValueDouble(floatScratch0
, floatScratch1
, floatScratch2
, scratch
);
7386 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
7390 bool CacheIRCompiler::emitIndirectTruncateInt32Result(Int32OperandId valId
) {
7391 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7392 AutoOutputRegister
output(*this);
7393 Register val
= allocator
.useRegister(masm
, valId
);
7395 if (output
.hasValue()) {
7396 masm
.tagValue(JSVAL_TYPE_INT32
, val
, output
.valueReg());
7398 masm
.mov(val
, output
.typedReg().gpr());
7403 bool CacheIRCompiler::emitCallPrintString(const char* str
) {
7404 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7409 bool CacheIRCompiler::emitBreakpoint() {
7410 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7415 void CacheIRCompiler::emitPostBarrierShared(Register obj
,
7416 const ConstantOrRegister
& val
,
7418 Register maybeIndex
) {
7419 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7421 if (val
.constant()) {
7422 MOZ_ASSERT_IF(val
.value().isGCThing(),
7423 !IsInsideNursery(val
.value().toGCThing()));
7427 TypedOrValueRegister reg
= val
.reg();
7428 if (reg
.hasTyped() && !NeedsPostBarrier(reg
.type())) {
7433 if (reg
.hasValue()) {
7434 masm
.branchValueIsNurseryCell(Assembler::NotEqual
, reg
.valueReg(), scratch
,
7437 masm
.branchPtrInNurseryChunk(Assembler::NotEqual
, reg
.typedReg().gpr(),
7438 scratch
, &skipBarrier
);
7440 masm
.branchPtrInNurseryChunk(Assembler::Equal
, obj
, scratch
, &skipBarrier
);
7442 // Check one element cache to avoid VM call.
7443 auto* lastCellAddr
= cx_
->runtime()->gc
.addressOfLastBufferedWholeCell();
7444 masm
.branchPtr(Assembler::Equal
, AbsoluteAddress(lastCellAddr
), obj
,
7447 // Call one of these, depending on maybeIndex:
7449 // void PostWriteBarrier(JSRuntime* rt, JSObject* obj);
7450 // void PostWriteElementBarrier(JSRuntime* rt, JSObject* obj,
7452 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7453 masm
.PushRegsInMask(save
);
7454 masm
.setupUnalignedABICall(scratch
);
7455 masm
.movePtr(ImmPtr(cx_
->runtime()), scratch
);
7456 masm
.passABIArg(scratch
);
7457 masm
.passABIArg(obj
);
7458 if (maybeIndex
!= InvalidReg
) {
7459 masm
.passABIArg(maybeIndex
);
7460 using Fn
= void (*)(JSRuntime
* rt
, JSObject
* obj
, int32_t index
);
7461 masm
.callWithABI
<Fn
, PostWriteElementBarrier
>();
7463 using Fn
= void (*)(JSRuntime
* rt
, js::gc::Cell
* cell
);
7464 masm
.callWithABI
<Fn
, PostWriteBarrier
>();
7466 masm
.PopRegsInMask(save
);
7468 masm
.bind(&skipBarrier
);
7471 bool CacheIRCompiler::emitWrapResult() {
7472 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7473 AutoOutputRegister
output(*this);
7474 AutoScratchRegister
scratch(allocator
, masm
);
7476 FailurePath
* failure
;
7477 if (!addFailurePath(&failure
)) {
7482 // We only have to wrap objects, because we are in the same zone.
7483 masm
.branchTestObject(Assembler::NotEqual
, output
.valueReg(), &done
);
7485 Register obj
= output
.valueReg().scratchReg();
7486 masm
.unboxObject(output
.valueReg(), obj
);
7488 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7489 masm
.PushRegsInMask(save
);
7491 using Fn
= JSObject
* (*)(JSContext
* cx
, JSObject
* obj
);
7492 masm
.setupUnalignedABICall(scratch
);
7493 masm
.loadJSContext(scratch
);
7494 masm
.passABIArg(scratch
);
7495 masm
.passABIArg(obj
);
7496 masm
.callWithABI
<Fn
, WrapObjectPure
>();
7497 masm
.storeCallPointerResult(obj
);
7499 LiveRegisterSet ignore
;
7501 masm
.PopRegsInMaskIgnore(save
, ignore
);
7503 // We could not get a wrapper for this object.
7504 masm
.branchTestPtr(Assembler::Zero
, obj
, obj
, failure
->label());
7506 // We clobbered the output register, so we have to retag.
7507 masm
.tagValue(JSVAL_TYPE_OBJECT
, obj
, output
.valueReg());
7513 bool CacheIRCompiler::emitMegamorphicLoadSlotByValueResult(ObjOperandId objId
,
7514 ValOperandId idId
) {
7515 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7516 AutoOutputRegister
output(*this);
7518 Register obj
= allocator
.useRegister(masm
, objId
);
7519 ValueOperand idVal
= allocator
.useValueRegister(masm
, idId
);
7521 #ifdef JS_CODEGEN_X86
7522 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
7523 AutoScratchRegisterMaybeOutputType
scratch2(allocator
, masm
, output
);
7525 AutoScratchRegister
scratch1(allocator
, masm
);
7526 AutoScratchRegister
scratch2(allocator
, masm
);
7527 AutoScratchRegister
scratch3(allocator
, masm
);
7530 FailurePath
* failure
;
7531 if (!addFailurePath(&failure
)) {
7535 #ifdef JS_CODEGEN_X86
7536 masm
.xorPtr(scratch2
, scratch2
);
7539 masm
.emitMegamorphicCacheLookupByValue(
7540 idVal
, obj
, scratch1
, scratch3
, scratch2
, output
.valueReg(), &cacheHit
);
7543 masm
.branchIfNonNativeObj(obj
, scratch1
, failure
->label());
7545 // idVal will be in vp[0], result will be stored in vp[1].
7546 masm
.reserveStack(sizeof(Value
));
7548 masm
.moveStackPtrTo(idVal
.scratchReg());
7550 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
7551 liveVolatileFloatRegs());
7552 volatileRegs
.takeUnchecked(scratch1
);
7553 volatileRegs
.takeUnchecked(idVal
);
7554 masm
.PushRegsInMask(volatileRegs
);
7556 using Fn
= bool (*)(JSContext
* cx
, JSObject
* obj
,
7557 MegamorphicCache::Entry
* cacheEntry
, Value
* vp
);
7558 masm
.setupUnalignedABICall(scratch1
);
7559 masm
.loadJSContext(scratch1
);
7560 masm
.passABIArg(scratch1
);
7561 masm
.passABIArg(obj
);
7562 masm
.passABIArg(scratch2
);
7563 masm
.passABIArg(idVal
.scratchReg());
7564 masm
.callWithABI
<Fn
, GetNativeDataPropertyByValuePure
>();
7566 masm
.storeCallPointerResult(scratch1
);
7567 masm
.PopRegsInMask(volatileRegs
);
7572 uint32_t framePushed
= masm
.framePushed();
7573 masm
.branchIfTrueBool(scratch1
, &ok
);
7574 masm
.adjustStack(sizeof(Value
));
7575 masm
.jump(failure
->label());
7578 masm
.setFramePushed(framePushed
);
7579 masm
.loadTypedOrValue(Address(masm
.getStackPointer(), 0), output
);
7580 masm
.adjustStack(sizeof(Value
));
7582 #ifndef JS_CODEGEN_X86
7583 masm
.bind(&cacheHit
);
7588 bool CacheIRCompiler::emitMegamorphicHasPropResult(ObjOperandId objId
,
7591 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7592 AutoOutputRegister
output(*this);
7594 Register obj
= allocator
.useRegister(masm
, objId
);
7595 ValueOperand idVal
= allocator
.useValueRegister(masm
, idId
);
7597 #ifdef JS_CODEGEN_X86
7598 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
7599 AutoScratchRegisterMaybeOutputType
scratch2(allocator
, masm
, output
);
7601 AutoScratchRegister
scratch1(allocator
, masm
);
7602 AutoScratchRegister
scratch2(allocator
, masm
);
7603 AutoScratchRegister
scratch3(allocator
, masm
);
7606 FailurePath
* failure
;
7607 if (!addFailurePath(&failure
)) {
7611 #ifndef JS_CODEGEN_X86
7612 Label cacheHit
, done
;
7613 masm
.emitMegamorphicCacheLookupExists(idVal
, obj
, scratch1
, scratch3
,
7614 scratch2
, output
.maybeReg(), &cacheHit
,
7617 masm
.xorPtr(scratch2
, scratch2
);
7620 masm
.branchIfNonNativeObj(obj
, scratch1
, failure
->label());
7622 // idVal will be in vp[0], result will be stored in vp[1].
7623 masm
.reserveStack(sizeof(Value
));
7625 masm
.moveStackPtrTo(idVal
.scratchReg());
7627 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
7628 liveVolatileFloatRegs());
7629 volatileRegs
.takeUnchecked(scratch1
);
7630 volatileRegs
.takeUnchecked(idVal
);
7631 masm
.PushRegsInMask(volatileRegs
);
7633 using Fn
= bool (*)(JSContext
* cx
, JSObject
* obj
,
7634 MegamorphicCache::Entry
* cacheEntry
, Value
* vp
);
7635 masm
.setupUnalignedABICall(scratch1
);
7636 masm
.loadJSContext(scratch1
);
7637 masm
.passABIArg(scratch1
);
7638 masm
.passABIArg(obj
);
7639 masm
.passABIArg(scratch2
);
7640 masm
.passABIArg(idVal
.scratchReg());
7642 masm
.callWithABI
<Fn
, HasNativeDataPropertyPure
<true>>();
7644 masm
.callWithABI
<Fn
, HasNativeDataPropertyPure
<false>>();
7646 masm
.storeCallPointerResult(scratch1
);
7647 masm
.PopRegsInMask(volatileRegs
);
7652 uint32_t framePushed
= masm
.framePushed();
7653 masm
.branchIfTrueBool(scratch1
, &ok
);
7654 masm
.adjustStack(sizeof(Value
));
7655 masm
.jump(failure
->label());
7658 masm
.setFramePushed(framePushed
);
7659 masm
.loadTypedOrValue(Address(masm
.getStackPointer(), 0), output
);
7660 masm
.adjustStack(sizeof(Value
));
7662 #ifndef JS_CODEGEN_X86
7664 masm
.bind(&cacheHit
);
7665 if (output
.hasValue()) {
7666 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, output
.valueReg().scratchReg(),
7674 bool CacheIRCompiler::emitCallObjectHasSparseElementResult(
7675 ObjOperandId objId
, Int32OperandId indexId
) {
7676 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7677 AutoOutputRegister
output(*this);
7679 Register obj
= allocator
.useRegister(masm
, objId
);
7680 Register index
= allocator
.useRegister(masm
, indexId
);
7682 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
7683 AutoScratchRegister
scratch2(allocator
, masm
);
7685 FailurePath
* failure
;
7686 if (!addFailurePath(&failure
)) {
7690 masm
.reserveStack(sizeof(Value
));
7691 masm
.moveStackPtrTo(scratch2
.get());
7693 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
7694 liveVolatileFloatRegs());
7695 volatileRegs
.takeUnchecked(scratch1
);
7696 volatileRegs
.takeUnchecked(index
);
7697 masm
.PushRegsInMask(volatileRegs
);
7700 bool (*)(JSContext
* cx
, NativeObject
* obj
, int32_t index
, Value
* vp
);
7701 masm
.setupUnalignedABICall(scratch1
);
7702 masm
.loadJSContext(scratch1
);
7703 masm
.passABIArg(scratch1
);
7704 masm
.passABIArg(obj
);
7705 masm
.passABIArg(index
);
7706 masm
.passABIArg(scratch2
);
7707 masm
.callWithABI
<Fn
, HasNativeElementPure
>();
7708 masm
.storeCallPointerResult(scratch1
);
7709 masm
.PopRegsInMask(volatileRegs
);
7712 uint32_t framePushed
= masm
.framePushed();
7713 masm
.branchIfTrueBool(scratch1
, &ok
);
7714 masm
.adjustStack(sizeof(Value
));
7715 masm
.jump(failure
->label());
7718 masm
.setFramePushed(framePushed
);
7719 masm
.loadTypedOrValue(Address(masm
.getStackPointer(), 0), output
);
7720 masm
.adjustStack(sizeof(Value
));
7725 * Move a constant value into register dest.
7727 void CacheIRCompiler::emitLoadStubFieldConstant(StubFieldOffset val
,
7729 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7730 MOZ_ASSERT(mode_
== Mode::Ion
);
7731 switch (val
.getStubFieldType()) {
7732 case StubField::Type::Shape
:
7733 masm
.movePtr(ImmGCPtr(shapeStubField(val
.getOffset())), dest
);
7735 case StubField::Type::WeakGetterSetter
:
7736 masm
.movePtr(ImmGCPtr(weakGetterSetterStubField(val
.getOffset())), dest
);
7738 case StubField::Type::String
:
7739 masm
.movePtr(ImmGCPtr(stringStubField(val
.getOffset())), dest
);
7741 case StubField::Type::JSObject
:
7742 masm
.movePtr(ImmGCPtr(objectStubField(val
.getOffset())), dest
);
7744 case StubField::Type::RawPointer
:
7745 masm
.movePtr(ImmPtr(pointerStubField(val
.getOffset())), dest
);
7747 case StubField::Type::RawInt32
:
7748 masm
.move32(Imm32(int32StubField(val
.getOffset())), dest
);
7750 case StubField::Type::Id
:
7751 masm
.movePropertyKey(idStubField(val
.getOffset()), dest
);
7754 MOZ_CRASH("Unhandled stub field constant type");
7759 * After this is done executing, dest contains the value; either through a
7760 * constant load or through the load from the stub data.
7762 * The current policy is that Baseline will use loads from the stub data (to
7763 * allow IC sharing), where as Ion doesn't share ICs, and so we can safely use
7764 * constants in the IC.
7766 void CacheIRCompiler::emitLoadStubField(StubFieldOffset val
, Register dest
) {
7767 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7768 if (stubFieldPolicy_
== StubFieldPolicy::Constant
) {
7769 emitLoadStubFieldConstant(val
, dest
);
7771 Address
load(ICStubReg
, stubDataOffset_
+ val
.getOffset());
7773 switch (val
.getStubFieldType()) {
7774 case StubField::Type::RawPointer
:
7775 case StubField::Type::Shape
:
7776 case StubField::Type::WeakGetterSetter
:
7777 case StubField::Type::JSObject
:
7778 case StubField::Type::Symbol
:
7779 case StubField::Type::String
:
7780 case StubField::Type::Id
:
7781 masm
.loadPtr(load
, dest
);
7783 case StubField::Type::RawInt32
:
7784 masm
.load32(load
, dest
);
7787 MOZ_CRASH("Unhandled stub field constant type");
7792 void CacheIRCompiler::emitLoadValueStubField(StubFieldOffset val
,
7793 ValueOperand dest
) {
7794 MOZ_ASSERT(val
.getStubFieldType() == StubField::Type::Value
);
7796 if (stubFieldPolicy_
== StubFieldPolicy::Constant
) {
7797 MOZ_ASSERT(mode_
== Mode::Ion
);
7798 masm
.moveValue(valueStubField(val
.getOffset()), dest
);
7800 Address
addr(ICStubReg
, stubDataOffset_
+ val
.getOffset());
7801 masm
.loadValue(addr
, dest
);
7805 void CacheIRCompiler::emitLoadDoubleValueStubField(StubFieldOffset val
,
7807 FloatRegister scratch
) {
7808 MOZ_ASSERT(val
.getStubFieldType() == StubField::Type::Double
);
7810 if (stubFieldPolicy_
== StubFieldPolicy::Constant
) {
7811 MOZ_ASSERT(mode_
== Mode::Ion
);
7812 double d
= doubleStubField(val
.getOffset());
7813 masm
.moveValue(DoubleValue(d
), dest
);
7815 Address
addr(ICStubReg
, stubDataOffset_
+ val
.getOffset());
7816 masm
.loadDouble(addr
, scratch
);
7817 masm
.boxDouble(scratch
, dest
, scratch
);
7821 bool CacheIRCompiler::emitLoadInstanceOfObjectResult(ValOperandId lhsId
,
7822 ObjOperandId protoId
) {
7823 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7824 AutoOutputRegister
output(*this);
7825 ValueOperand lhs
= allocator
.useValueRegister(masm
, lhsId
);
7826 Register proto
= allocator
.useRegister(masm
, protoId
);
7828 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
7830 FailurePath
* failure
;
7831 if (!addFailurePath(&failure
)) {
7835 Label returnFalse
, returnTrue
, done
;
7836 masm
.fallibleUnboxObject(lhs
, scratch
, &returnFalse
);
7838 // LHS is an object. Load its proto.
7839 masm
.loadObjProto(scratch
, scratch
);
7841 // Walk the proto chain until we either reach the target object,
7842 // nullptr or LazyProto.
7846 masm
.branchPtr(Assembler::Equal
, scratch
, proto
, &returnTrue
);
7847 masm
.branchTestPtr(Assembler::Zero
, scratch
, scratch
, &returnFalse
);
7849 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto
) == 1);
7850 masm
.branchPtr(Assembler::Equal
, scratch
, ImmWord(1), failure
->label());
7852 masm
.loadObjProto(scratch
, scratch
);
7856 masm
.bind(&returnFalse
);
7857 EmitStoreBoolean(masm
, false, output
);
7860 masm
.bind(&returnTrue
);
7861 EmitStoreBoolean(masm
, true, output
);
7867 bool CacheIRCompiler::emitMegamorphicLoadSlotResult(ObjOperandId objId
,
7868 uint32_t idOffset
) {
7869 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7870 AutoOutputRegister
output(*this);
7872 Register obj
= allocator
.useRegister(masm
, objId
);
7873 StubFieldOffset
id(idOffset
, StubField::Type::Id
);
7875 AutoScratchRegisterMaybeOutput
idReg(allocator
, masm
, output
);
7876 AutoScratchRegister
scratch1(allocator
, masm
);
7877 AutoScratchRegister
scratch2(allocator
, masm
);
7878 AutoScratchRegisterMaybeOutputType
scratch3(allocator
, masm
, output
);
7880 FailurePath
* failure
;
7881 if (!addFailurePath(&failure
)) {
7885 #ifdef JS_CODEGEN_X86
7886 masm
.xorPtr(scratch3
, scratch3
);
7889 emitLoadStubField(id
, idReg
);
7890 masm
.emitMegamorphicCacheLookupByValue(idReg
.get(), obj
, scratch1
, scratch2
,
7891 scratch3
, output
.valueReg(),
7895 masm
.branchIfNonNativeObj(obj
, scratch1
, failure
->label());
7897 masm
.Push(UndefinedValue());
7898 masm
.moveStackPtrTo(idReg
.get());
7900 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
7901 liveVolatileFloatRegs());
7902 volatileRegs
.takeUnchecked(scratch1
);
7903 volatileRegs
.takeUnchecked(scratch2
);
7904 volatileRegs
.takeUnchecked(scratch3
);
7905 volatileRegs
.takeUnchecked(idReg
);
7906 masm
.PushRegsInMask(volatileRegs
);
7908 using Fn
= bool (*)(JSContext
* cx
, JSObject
* obj
, PropertyKey id
,
7909 MegamorphicCache::Entry
* cacheEntry
, Value
* vp
);
7910 masm
.setupUnalignedABICall(scratch1
);
7911 masm
.loadJSContext(scratch1
);
7912 masm
.passABIArg(scratch1
);
7913 masm
.passABIArg(obj
);
7914 emitLoadStubField(id
, scratch2
);
7915 masm
.passABIArg(scratch2
);
7916 masm
.passABIArg(scratch3
);
7917 masm
.passABIArg(idReg
);
7919 #ifdef JS_CODEGEN_X86
7920 masm
.callWithABI
<Fn
, GetNativeDataPropertyPureWithCacheLookup
>();
7922 masm
.callWithABI
<Fn
, GetNativeDataPropertyPure
>();
7925 masm
.storeCallPointerResult(scratch2
);
7926 masm
.PopRegsInMask(volatileRegs
);
7928 masm
.loadTypedOrValue(Address(masm
.getStackPointer(), 0), output
);
7929 masm
.adjustStack(sizeof(Value
));
7931 masm
.branchIfFalseBool(scratch2
, failure
->label());
7932 #ifndef JS_CODEGEN_X86
7933 masm
.bind(&cacheHit
);
7939 bool CacheIRCompiler::emitMegamorphicStoreSlot(ObjOperandId objId
,
7943 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7945 AutoCallVM
callvm(masm
, this, allocator
);
7947 Register obj
= allocator
.useRegister(masm
, objId
);
7948 ConstantOrRegister val
= allocator
.useConstantOrRegister(masm
, rhsId
);
7949 StubFieldOffset
id(idOffset
, StubField::Type::Id
);
7950 AutoScratchRegister
scratch(allocator
, masm
);
7954 masm
.Push(Imm32(strict
));
7956 emitLoadStubField(id
, scratch
);
7960 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleId
, HandleValue
, bool);
7961 callvm
.callNoResult
<Fn
, SetPropertyMegamorphic
<false>>();
7965 bool CacheIRCompiler::emitGuardHasGetterSetter(ObjOperandId objId
,
7967 uint32_t getterSetterOffset
) {
7968 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
7970 Register obj
= allocator
.useRegister(masm
, objId
);
7972 StubFieldOffset
id(idOffset
, StubField::Type::Id
);
7973 StubFieldOffset
getterSetter(getterSetterOffset
,
7974 StubField::Type::WeakGetterSetter
);
7976 AutoScratchRegister
scratch1(allocator
, masm
);
7977 AutoScratchRegister
scratch2(allocator
, masm
);
7978 AutoScratchRegister
scratch3(allocator
, masm
);
7980 FailurePath
* failure
;
7981 if (!addFailurePath(&failure
)) {
7985 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
7986 liveVolatileFloatRegs());
7987 volatileRegs
.takeUnchecked(scratch1
);
7988 volatileRegs
.takeUnchecked(scratch2
);
7989 masm
.PushRegsInMask(volatileRegs
);
7991 using Fn
= bool (*)(JSContext
* cx
, JSObject
* obj
, jsid id
,
7992 GetterSetter
* getterSetter
);
7993 masm
.setupUnalignedABICall(scratch1
);
7994 masm
.loadJSContext(scratch1
);
7995 masm
.passABIArg(scratch1
);
7996 masm
.passABIArg(obj
);
7997 emitLoadStubField(id
, scratch2
);
7998 masm
.passABIArg(scratch2
);
7999 emitLoadStubField(getterSetter
, scratch3
);
8000 masm
.passABIArg(scratch3
);
8001 masm
.callWithABI
<Fn
, ObjectHasGetterSetterPure
>();
8002 masm
.storeCallPointerResult(scratch1
);
8003 masm
.PopRegsInMask(volatileRegs
);
8005 masm
.branchIfFalseBool(scratch1
, failure
->label());
8009 bool CacheIRCompiler::emitGuardWasmArg(ValOperandId argId
,
8010 wasm::ValType::Kind kind
) {
8011 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8013 // All values can be boxed as AnyRef.
8014 if (kind
== wasm::ValType::Ref
) {
8017 MOZ_ASSERT(kind
!= wasm::ValType::V128
);
8019 ValueOperand arg
= allocator
.useValueRegister(masm
, argId
);
8021 FailurePath
* failure
;
8022 if (!addFailurePath(&failure
)) {
8026 // Check that the argument can be converted to the Wasm type in Warp code
8027 // without bailing out.
8030 case wasm::ValType::I32
:
8031 case wasm::ValType::F32
:
8032 case wasm::ValType::F64
: {
8033 // Argument must be number, bool, or undefined.
8034 masm
.branchTestNumber(Assembler::Equal
, arg
, &done
);
8035 masm
.branchTestBoolean(Assembler::Equal
, arg
, &done
);
8036 masm
.branchTestUndefined(Assembler::NotEqual
, arg
, failure
->label());
8039 case wasm::ValType::I64
: {
8040 // Argument must be bigint, bool, or string.
8041 masm
.branchTestBigInt(Assembler::Equal
, arg
, &done
);
8042 masm
.branchTestBoolean(Assembler::Equal
, arg
, &done
);
8043 masm
.branchTestString(Assembler::NotEqual
, arg
, failure
->label());
8047 MOZ_CRASH("Unexpected kind");
8054 bool CacheIRCompiler::emitGuardMultipleShapes(ObjOperandId objId
,
8055 uint32_t shapesOffset
) {
8056 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8057 Register obj
= allocator
.useRegister(masm
, objId
);
8058 AutoScratchRegister
shapes(allocator
, masm
);
8059 AutoScratchRegister
scratch(allocator
, masm
);
8060 AutoScratchRegister
scratch2(allocator
, masm
);
8062 bool needSpectreMitigations
= objectGuardNeedsSpectreMitigations(objId
);
8064 Register spectreScratch
= InvalidReg
;
8065 Maybe
<AutoScratchRegister
> maybeSpectreScratch
;
8066 if (needSpectreMitigations
) {
8067 maybeSpectreScratch
.emplace(allocator
, masm
);
8068 spectreScratch
= *maybeSpectreScratch
;
8071 FailurePath
* failure
;
8072 if (!addFailurePath(&failure
)) {
8076 // The stub field contains a ListObject. Load its elements.
8077 StubFieldOffset
shapeArray(shapesOffset
, StubField::Type::JSObject
);
8078 emitLoadStubField(shapeArray
, shapes
);
8079 masm
.loadPtr(Address(shapes
, NativeObject::offsetOfElements()), shapes
);
8081 masm
.branchTestObjShapeList(Assembler::NotEqual
, obj
, shapes
, scratch
,
8082 scratch2
, spectreScratch
, failure
->label());
8086 bool CacheIRCompiler::emitLoadObject(ObjOperandId resultId
,
8087 uint32_t objOffset
) {
8088 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8089 Register reg
= allocator
.defineRegister(masm
, resultId
);
8090 StubFieldOffset
obj(objOffset
, StubField::Type::JSObject
);
8091 emitLoadStubField(obj
, reg
);
8095 bool CacheIRCompiler::emitLoadProtoObject(ObjOperandId resultId
,
8097 ObjOperandId receiverObjId
) {
8098 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8099 Register reg
= allocator
.defineRegister(masm
, resultId
);
8100 StubFieldOffset
obj(objOffset
, StubField::Type::JSObject
);
8101 emitLoadStubField(obj
, reg
);
8105 bool CacheIRCompiler::emitLoadInt32Constant(uint32_t valOffset
,
8106 Int32OperandId resultId
) {
8107 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8108 Register reg
= allocator
.defineRegister(masm
, resultId
);
8109 StubFieldOffset
val(valOffset
, StubField::Type::RawInt32
);
8110 emitLoadStubField(val
, reg
);
8114 bool CacheIRCompiler::emitLoadBooleanConstant(bool val
,
8115 BooleanOperandId resultId
) {
8116 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8117 Register reg
= allocator
.defineRegister(masm
, resultId
);
8118 masm
.move32(Imm32(val
), reg
);
8122 bool CacheIRCompiler::emitLoadDoubleConstant(uint32_t valOffset
,
8123 NumberOperandId resultId
) {
8124 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8126 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
8127 StubFieldOffset
val(valOffset
, StubField::Type::Double
);
8129 AutoScratchFloatRegister
floatReg(this);
8131 emitLoadDoubleValueStubField(val
, output
, floatReg
);
8135 bool CacheIRCompiler::emitLoadUndefined(ValOperandId resultId
) {
8136 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8138 ValueOperand reg
= allocator
.defineValueRegister(masm
, resultId
);
8139 masm
.moveValue(UndefinedValue(), reg
);
8143 bool CacheIRCompiler::emitLoadConstantString(uint32_t strOffset
,
8144 StringOperandId resultId
) {
8145 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8146 Register reg
= allocator
.defineRegister(masm
, resultId
);
8147 StubFieldOffset
str(strOffset
, StubField::Type::String
);
8148 emitLoadStubField(str
, reg
);
8152 bool CacheIRCompiler::emitCallInt32ToString(Int32OperandId inputId
,
8153 StringOperandId resultId
) {
8154 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8155 Register input
= allocator
.useRegister(masm
, inputId
);
8156 Register result
= allocator
.defineRegister(masm
, resultId
);
8158 FailurePath
* failure
;
8159 if (!addFailurePath(&failure
)) {
8163 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
8164 liveVolatileFloatRegs());
8165 volatileRegs
.takeUnchecked(result
);
8166 masm
.PushRegsInMask(volatileRegs
);
8168 using Fn
= JSLinearString
* (*)(JSContext
* cx
, int32_t i
);
8169 masm
.setupUnalignedABICall(result
);
8170 masm
.loadJSContext(result
);
8171 masm
.passABIArg(result
);
8172 masm
.passABIArg(input
);
8173 masm
.callWithABI
<Fn
, js::Int32ToStringPure
>();
8175 masm
.storeCallPointerResult(result
);
8176 masm
.PopRegsInMask(volatileRegs
);
8178 masm
.branchPtr(Assembler::Equal
, result
, ImmPtr(nullptr), failure
->label());
8182 bool CacheIRCompiler::emitCallNumberToString(NumberOperandId inputId
,
8183 StringOperandId resultId
) {
8184 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8186 AutoAvailableFloatRegister
floatScratch0(*this, FloatReg0
);
8188 allocator
.ensureDoubleRegister(masm
, inputId
, floatScratch0
);
8189 Register result
= allocator
.defineRegister(masm
, resultId
);
8191 FailurePath
* failure
;
8192 if (!addFailurePath(&failure
)) {
8196 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
8197 liveVolatileFloatRegs());
8198 volatileRegs
.takeUnchecked(result
);
8199 masm
.PushRegsInMask(volatileRegs
);
8201 using Fn
= JSString
* (*)(JSContext
* cx
, double d
);
8202 masm
.setupUnalignedABICall(result
);
8203 masm
.loadJSContext(result
);
8204 masm
.passABIArg(result
);
8205 masm
.passABIArg(floatScratch0
, MoveOp::DOUBLE
);
8206 masm
.callWithABI
<Fn
, js::NumberToStringPure
>();
8208 masm
.storeCallPointerResult(result
);
8209 masm
.PopRegsInMask(volatileRegs
);
8211 masm
.branchPtr(Assembler::Equal
, result
, ImmPtr(nullptr), failure
->label());
8215 bool CacheIRCompiler::emitInt32ToStringWithBaseResult(Int32OperandId inputId
,
8216 Int32OperandId baseId
) {
8217 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8219 AutoCallVM
callvm(masm
, this, allocator
);
8220 Register input
= allocator
.useRegister(masm
, inputId
);
8221 Register base
= allocator
.useRegister(masm
, baseId
);
8223 FailurePath
* failure
;
8224 if (!addFailurePath(&failure
)) {
8228 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8229 // we can't use both at the same time. This isn't an issue here, because Ion
8230 // doesn't support CallICs. If that ever changes, this code must be updated.
8231 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8233 masm
.branch32(Assembler::LessThan
, base
, Imm32(2), failure
->label());
8234 masm
.branch32(Assembler::GreaterThan
, base
, Imm32(36), failure
->label());
8236 // Use lower-case characters by default.
8237 constexpr bool lowerCase
= true;
8241 masm
.Push(Imm32(lowerCase
));
8245 using Fn
= JSString
* (*)(JSContext
*, int32_t, int32_t, bool);
8246 callvm
.call
<Fn
, js::Int32ToStringWithBase
>();
8250 bool CacheIRCompiler::emitBooleanToString(BooleanOperandId inputId
,
8251 StringOperandId resultId
) {
8252 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8253 Register boolean
= allocator
.useRegister(masm
, inputId
);
8254 Register result
= allocator
.defineRegister(masm
, resultId
);
8255 const JSAtomState
& names
= cx_
->names();
8258 masm
.branchTest32(Assembler::NonZero
, boolean
, boolean
, &true_
);
8261 masm
.movePtr(ImmGCPtr(names
.false_
), result
);
8266 masm
.movePtr(ImmGCPtr(names
.true_
), result
);
8272 bool CacheIRCompiler::emitObjectToStringResult(ObjOperandId objId
) {
8273 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8275 AutoOutputRegister
output(*this);
8276 Register obj
= allocator
.useRegister(masm
, objId
);
8277 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
8279 FailurePath
* failure
;
8280 if (!addFailurePath(&failure
)) {
8284 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
8285 liveVolatileFloatRegs());
8286 volatileRegs
.takeUnchecked(output
.valueReg());
8287 volatileRegs
.takeUnchecked(scratch
);
8288 masm
.PushRegsInMask(volatileRegs
);
8290 using Fn
= JSString
* (*)(JSContext
*, JSObject
*);
8291 masm
.setupUnalignedABICall(scratch
);
8292 masm
.loadJSContext(scratch
);
8293 masm
.passABIArg(scratch
);
8294 masm
.passABIArg(obj
);
8295 masm
.callWithABI
<Fn
, js::ObjectClassToString
>();
8296 masm
.storeCallPointerResult(scratch
);
8298 masm
.PopRegsInMask(volatileRegs
);
8300 masm
.branchPtr(Assembler::Equal
, scratch
, ImmPtr(nullptr), failure
->label());
8301 masm
.tagValue(JSVAL_TYPE_STRING
, scratch
, output
.valueReg());
8306 bool CacheIRCompiler::emitCallStringConcatResult(StringOperandId lhsId
,
8307 StringOperandId rhsId
) {
8308 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8309 AutoCallVM
callvm(masm
, this, allocator
);
8311 Register lhs
= allocator
.useRegister(masm
, lhsId
);
8312 Register rhs
= allocator
.useRegister(masm
, rhsId
);
8316 masm
.Push(static_cast<js::jit::Imm32
>(int32_t(js::gc::Heap::Default
)));
8321 JSString
* (*)(JSContext
*, HandleString
, HandleString
, js::gc::Heap
);
8322 callvm
.call
<Fn
, ConcatStrings
<CanGC
>>();
8327 bool CacheIRCompiler::emitCallIsSuspendedGeneratorResult(ValOperandId valId
) {
8328 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8329 AutoOutputRegister
output(*this);
8330 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
8331 AutoScratchRegister
scratch2(allocator
, masm
);
8332 ValueOperand input
= allocator
.useValueRegister(masm
, valId
);
8334 // Test if it's an object.
8335 Label returnFalse
, done
;
8336 masm
.fallibleUnboxObject(input
, scratch
, &returnFalse
);
8338 // Test if it's a GeneratorObject.
8339 masm
.branchTestObjClass(Assembler::NotEqual
, scratch
,
8340 &GeneratorObject::class_
, scratch2
, scratch
,
8343 // If the resumeIndex slot holds an int32 value < RESUME_INDEX_RUNNING,
8344 // the generator is suspended.
8345 Address
addr(scratch
, AbstractGeneratorObject::offsetOfResumeIndexSlot());
8346 masm
.fallibleUnboxInt32(addr
, scratch
, &returnFalse
);
8347 masm
.branch32(Assembler::AboveOrEqual
, scratch
,
8348 Imm32(AbstractGeneratorObject::RESUME_INDEX_RUNNING
),
8351 masm
.moveValue(BooleanValue(true), output
.valueReg());
8354 masm
.bind(&returnFalse
);
8355 masm
.moveValue(BooleanValue(false), output
.valueReg());
8361 // This op generates no code. It is consumed by the transpiler.
8362 bool CacheIRCompiler::emitMetaScriptedThisShape(uint32_t) { return true; }
8364 bool CacheIRCompiler::emitCallNativeGetElementResult(ObjOperandId objId
,
8365 Int32OperandId indexId
) {
8366 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8367 AutoCallVM
callvm(masm
, this, allocator
);
8369 Register obj
= allocator
.useRegister(masm
, objId
);
8370 Register index
= allocator
.useRegister(masm
, indexId
);
8375 masm
.Push(TypedOrValueRegister(MIRType::Object
, AnyRegister(obj
)));
8378 using Fn
= bool (*)(JSContext
*, Handle
<NativeObject
*>, HandleValue
, int32_t,
8379 MutableHandleValue
);
8380 callvm
.call
<Fn
, NativeGetElement
>();
8385 bool CacheIRCompiler::emitCallNativeGetElementSuperResult(
8386 ObjOperandId objId
, Int32OperandId indexId
, ValOperandId receiverId
) {
8387 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8388 AutoCallVM
callvm(masm
, this, allocator
);
8390 Register obj
= allocator
.useRegister(masm
, objId
);
8391 Register index
= allocator
.useRegister(masm
, indexId
);
8392 ValueOperand receiver
= allocator
.useValueRegister(masm
, receiverId
);
8397 masm
.Push(receiver
);
8400 using Fn
= bool (*)(JSContext
*, Handle
<NativeObject
*>, HandleValue
, int32_t,
8401 MutableHandleValue
);
8402 callvm
.call
<Fn
, NativeGetElement
>();
8407 bool CacheIRCompiler::emitProxyHasPropResult(ObjOperandId objId
,
8408 ValOperandId idId
, bool hasOwn
) {
8409 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8410 AutoCallVM
callvm(masm
, this, allocator
);
8412 Register obj
= allocator
.useRegister(masm
, objId
);
8413 ValueOperand idVal
= allocator
.useValueRegister(masm
, idId
);
8420 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleValue
, bool*);
8422 callvm
.call
<Fn
, ProxyHasOwn
>();
8424 callvm
.call
<Fn
, ProxyHas
>();
8429 bool CacheIRCompiler::emitProxyGetByValueResult(ObjOperandId objId
,
8430 ValOperandId idId
) {
8431 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8432 AutoCallVM
callvm(masm
, this, allocator
);
8434 Register obj
= allocator
.useRegister(masm
, objId
);
8435 ValueOperand idVal
= allocator
.useValueRegister(masm
, idId
);
8442 bool (*)(JSContext
*, HandleObject
, HandleValue
, MutableHandleValue
);
8443 callvm
.call
<Fn
, ProxyGetPropertyByValue
>();
8447 bool CacheIRCompiler::emitCallGetSparseElementResult(ObjOperandId objId
,
8448 Int32OperandId indexId
) {
8449 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8451 AutoCallVM
callvm(masm
, this, allocator
);
8453 Register obj
= allocator
.useRegister(masm
, objId
);
8454 Register id
= allocator
.useRegister(masm
, indexId
);
8460 using Fn
= bool (*)(JSContext
* cx
, Handle
<NativeObject
*> obj
, int32_t int_id
,
8461 MutableHandleValue result
);
8462 callvm
.call
<Fn
, GetSparseElementHelper
>();
8466 bool CacheIRCompiler::emitRegExpSearcherLastLimitResult() {
8467 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8469 AutoOutputRegister
output(*this);
8470 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
8471 AutoScratchRegister
scratch2(allocator
, masm
);
8473 masm
.loadAndClearRegExpSearcherLastLimit(scratch1
, scratch2
);
8475 masm
.tagValue(JSVAL_TYPE_INT32
, scratch1
, output
.valueReg());
8479 bool CacheIRCompiler::emitRegExpFlagResult(ObjOperandId regexpId
,
8480 int32_t flagsMask
) {
8481 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8483 AutoOutputRegister
output(*this);
8484 Register regexp
= allocator
.useRegister(masm
, regexpId
);
8485 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
8488 regexp
, NativeObject::getFixedSlotOffset(RegExpObject::flagsSlot()));
8489 masm
.unboxInt32(flagsAddr
, scratch
);
8491 Label ifFalse
, done
;
8492 masm
.branchTest32(Assembler::Zero
, scratch
, Imm32(flagsMask
), &ifFalse
);
8493 masm
.moveValue(BooleanValue(true), output
.valueReg());
8496 masm
.bind(&ifFalse
);
8497 masm
.moveValue(BooleanValue(false), output
.valueReg());
8503 bool CacheIRCompiler::emitCallSubstringKernelResult(StringOperandId strId
,
8504 Int32OperandId beginId
,
8505 Int32OperandId lengthId
) {
8506 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8508 AutoCallVM
callvm(masm
, this, allocator
);
8510 Register str
= allocator
.useRegister(masm
, strId
);
8511 Register begin
= allocator
.useRegister(masm
, beginId
);
8512 Register length
= allocator
.useRegister(masm
, lengthId
);
8519 using Fn
= JSString
* (*)(JSContext
* cx
, HandleString str
, int32_t begin
,
8521 callvm
.call
<Fn
, SubstringKernel
>();
8525 bool CacheIRCompiler::emitStringReplaceStringResult(
8526 StringOperandId strId
, StringOperandId patternId
,
8527 StringOperandId replacementId
) {
8528 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8530 AutoCallVM
callvm(masm
, this, allocator
);
8532 Register str
= allocator
.useRegister(masm
, strId
);
8533 Register pattern
= allocator
.useRegister(masm
, patternId
);
8534 Register replacement
= allocator
.useRegister(masm
, replacementId
);
8537 masm
.Push(replacement
);
8542 JSString
* (*)(JSContext
*, HandleString
, HandleString
, HandleString
);
8543 callvm
.call
<Fn
, jit::StringReplace
>();
8547 bool CacheIRCompiler::emitStringSplitStringResult(StringOperandId strId
,
8548 StringOperandId separatorId
) {
8549 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8551 AutoCallVM
callvm(masm
, this, allocator
);
8553 Register str
= allocator
.useRegister(masm
, strId
);
8554 Register separator
= allocator
.useRegister(masm
, separatorId
);
8557 masm
.Push(Imm32(INT32_MAX
));
8558 masm
.Push(separator
);
8561 using Fn
= ArrayObject
* (*)(JSContext
*, HandleString
, HandleString
, uint32_t);
8562 callvm
.call
<Fn
, js::StringSplitString
>();
8566 bool CacheIRCompiler::emitRegExpPrototypeOptimizableResult(
8567 ObjOperandId protoId
) {
8568 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8570 AutoOutputRegister
output(*this);
8571 Register proto
= allocator
.useRegister(masm
, protoId
);
8572 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
8575 masm
.branchIfNotRegExpPrototypeOptimizable(
8576 proto
, scratch
, /* maybeGlobal = */ nullptr, &slow
);
8577 masm
.moveValue(BooleanValue(true), output
.valueReg());
8583 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
8584 liveVolatileFloatRegs());
8585 volatileRegs
.takeUnchecked(scratch
);
8586 masm
.PushRegsInMask(volatileRegs
);
8588 using Fn
= bool (*)(JSContext
* cx
, JSObject
* proto
);
8589 masm
.setupUnalignedABICall(scratch
);
8590 masm
.loadJSContext(scratch
);
8591 masm
.passABIArg(scratch
);
8592 masm
.passABIArg(proto
);
8593 masm
.callWithABI
<Fn
, RegExpPrototypeOptimizableRaw
>();
8594 masm
.storeCallBoolResult(scratch
);
8596 masm
.PopRegsInMask(volatileRegs
);
8597 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
8604 bool CacheIRCompiler::emitRegExpInstanceOptimizableResult(
8605 ObjOperandId regexpId
, ObjOperandId protoId
) {
8606 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8608 AutoOutputRegister
output(*this);
8609 Register regexp
= allocator
.useRegister(masm
, regexpId
);
8610 Register proto
= allocator
.useRegister(masm
, protoId
);
8611 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
8614 masm
.branchIfNotRegExpInstanceOptimizable(regexp
, scratch
,
8615 /* maybeGlobal = */ nullptr, &slow
);
8616 masm
.moveValue(BooleanValue(true), output
.valueReg());
8622 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
8623 liveVolatileFloatRegs());
8624 volatileRegs
.takeUnchecked(scratch
);
8625 masm
.PushRegsInMask(volatileRegs
);
8627 using Fn
= bool (*)(JSContext
* cx
, JSObject
* obj
, JSObject
* proto
);
8628 masm
.setupUnalignedABICall(scratch
);
8629 masm
.loadJSContext(scratch
);
8630 masm
.passABIArg(scratch
);
8631 masm
.passABIArg(regexp
);
8632 masm
.passABIArg(proto
);
8633 masm
.callWithABI
<Fn
, RegExpInstanceOptimizableRaw
>();
8634 masm
.storeCallBoolResult(scratch
);
8636 masm
.PopRegsInMask(volatileRegs
);
8637 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
8644 bool CacheIRCompiler::emitGetFirstDollarIndexResult(StringOperandId strId
) {
8645 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8647 AutoCallVM
callvm(masm
, this, allocator
);
8649 Register str
= allocator
.useRegister(masm
, strId
);
8654 using Fn
= bool (*)(JSContext
*, JSString
*, int32_t*);
8655 callvm
.call
<Fn
, GetFirstDollarIndexRaw
>();
8659 bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
8660 ObjOperandId objId
, IntPtrOperandId indexId
, uint32_t expectedId
,
8661 uint32_t replacementId
, Scalar::Type elementType
) {
8662 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8664 Maybe
<AutoOutputRegister
> output
;
8665 Maybe
<AutoCallVM
> callvm
;
8666 if (!Scalar::isBigIntType(elementType
)) {
8667 output
.emplace(*this);
8669 callvm
.emplace(masm
, this, allocator
);
8671 #ifdef JS_CODEGEN_X86
8672 // Use a scratch register to avoid running out of registers.
8673 Register obj
= output
? output
->valueReg().typeReg()
8674 : callvm
->outputValueReg().typeReg();
8675 allocator
.copyToScratchRegister(masm
, objId
, obj
);
8677 Register obj
= allocator
.useRegister(masm
, objId
);
8679 Register index
= allocator
.useRegister(masm
, indexId
);
8681 Register replacement
;
8682 if (!Scalar::isBigIntType(elementType
)) {
8683 expected
= allocator
.useRegister(masm
, Int32OperandId(expectedId
));
8684 replacement
= allocator
.useRegister(masm
, Int32OperandId(replacementId
));
8686 expected
= allocator
.useRegister(masm
, BigIntOperandId(expectedId
));
8687 replacement
= allocator
.useRegister(masm
, BigIntOperandId(replacementId
));
8690 Register scratch
= output
? output
->valueReg().scratchReg()
8691 : callvm
->outputValueReg().scratchReg();
8692 MOZ_ASSERT(scratch
!= obj
, "scratchReg must not be typeReg");
8694 // Not enough registers on X86.
8695 Register spectreTemp
= Register::Invalid();
8697 FailurePath
* failure
;
8698 if (!addFailurePath(&failure
)) {
8702 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8703 // we can't use both at the same time. This isn't an issue here, because Ion
8704 // doesn't support CallICs. If that ever changes, this code must be updated.
8705 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8708 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
8709 masm
.spectreBoundsCheckPtr(index
, scratch
, spectreTemp
, failure
->label());
8711 // Atomic operations are highly platform-dependent, for example x86/x64 has
8712 // specific requirements on which registers are used; MIPS needs multiple
8713 // additional temporaries. Therefore we're using either an ABI or VM call here
8714 // instead of handling each platform separately.
8716 if (Scalar::isBigIntType(elementType
)) {
8719 masm
.Push(replacement
);
8720 masm
.Push(expected
);
8724 using Fn
= BigInt
* (*)(JSContext
*, TypedArrayObject
*, size_t, const BigInt
*,
8726 callvm
->call
<Fn
, jit::AtomicsCompareExchange64
>();
8731 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
8732 liveVolatileFloatRegs());
8733 volatileRegs
.takeUnchecked(output
->valueReg());
8734 volatileRegs
.takeUnchecked(scratch
);
8735 masm
.PushRegsInMask(volatileRegs
);
8737 masm
.setupUnalignedABICall(scratch
);
8738 masm
.passABIArg(obj
);
8739 masm
.passABIArg(index
);
8740 masm
.passABIArg(expected
);
8741 masm
.passABIArg(replacement
);
8742 masm
.callWithABI(DynamicFunction
<AtomicsCompareExchangeFn
>(
8743 AtomicsCompareExchange(elementType
)));
8744 masm
.storeCallInt32Result(scratch
);
8746 masm
.PopRegsInMask(volatileRegs
);
8749 if (elementType
!= Scalar::Uint32
) {
8750 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
->valueReg());
8752 ScratchDoubleScope
fpscratch(masm
);
8753 masm
.convertUInt32ToDouble(scratch
, fpscratch
);
8754 masm
.boxDouble(fpscratch
, output
->valueReg(), fpscratch
);
8760 bool CacheIRCompiler::emitAtomicsReadModifyWriteResult(
8761 ObjOperandId objId
, IntPtrOperandId indexId
, uint32_t valueId
,
8762 Scalar::Type elementType
, AtomicsReadWriteModifyFn fn
) {
8763 AutoOutputRegister
output(*this);
8764 Register obj
= allocator
.useRegister(masm
, objId
);
8765 Register index
= allocator
.useRegister(masm
, indexId
);
8766 Register value
= allocator
.useRegister(masm
, Int32OperandId(valueId
));
8767 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
8769 // Not enough registers on X86.
8770 Register spectreTemp
= Register::Invalid();
8772 FailurePath
* failure
;
8773 if (!addFailurePath(&failure
)) {
8778 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
8779 masm
.spectreBoundsCheckPtr(index
, scratch
, spectreTemp
, failure
->label());
8781 // See comment in emitAtomicsCompareExchange for why we use an ABI call.
8783 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
8784 liveVolatileFloatRegs());
8785 volatileRegs
.takeUnchecked(output
.valueReg());
8786 volatileRegs
.takeUnchecked(scratch
);
8787 masm
.PushRegsInMask(volatileRegs
);
8789 masm
.setupUnalignedABICall(scratch
);
8790 masm
.passABIArg(obj
);
8791 masm
.passABIArg(index
);
8792 masm
.passABIArg(value
);
8793 masm
.callWithABI(DynamicFunction
<AtomicsReadWriteModifyFn
>(fn
));
8794 masm
.storeCallInt32Result(scratch
);
8796 masm
.PopRegsInMask(volatileRegs
);
8799 if (elementType
!= Scalar::Uint32
) {
8800 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
8802 ScratchDoubleScope
fpscratch(masm
);
8803 masm
.convertUInt32ToDouble(scratch
, fpscratch
);
8804 masm
.boxDouble(fpscratch
, output
.valueReg(), fpscratch
);
8810 template <CacheIRCompiler::AtomicsReadWriteModify64Fn fn
>
8811 bool CacheIRCompiler::emitAtomicsReadModifyWriteResult64(
8812 ObjOperandId objId
, IntPtrOperandId indexId
, uint32_t valueId
) {
8813 AutoCallVM
callvm(masm
, this, allocator
);
8814 Register obj
= allocator
.useRegister(masm
, objId
);
8815 Register index
= allocator
.useRegister(masm
, indexId
);
8816 Register value
= allocator
.useRegister(masm
, BigIntOperandId(valueId
));
8817 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, callvm
.output());
8819 // Not enough registers on X86.
8820 Register spectreTemp
= Register::Invalid();
8822 FailurePath
* failure
;
8823 if (!addFailurePath(&failure
)) {
8827 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8828 // we can't use both at the same time. This isn't an issue here, because Ion
8829 // doesn't support CallICs. If that ever changes, this code must be updated.
8830 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8833 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
8834 masm
.spectreBoundsCheckPtr(index
, scratch
, spectreTemp
, failure
->label());
8836 // See comment in emitAtomicsCompareExchange for why we use a VM call.
8844 callvm
.call
<AtomicsReadWriteModify64Fn
, fn
>();
8848 bool CacheIRCompiler::emitAtomicsExchangeResult(ObjOperandId objId
,
8849 IntPtrOperandId indexId
,
8851 Scalar::Type elementType
) {
8852 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8854 if (Scalar::isBigIntType(elementType
)) {
8855 return emitAtomicsReadModifyWriteResult64
<jit::AtomicsExchange64
>(
8856 objId
, indexId
, valueId
);
8858 return emitAtomicsReadModifyWriteResult(objId
, indexId
, valueId
, elementType
,
8859 AtomicsExchange(elementType
));
8862 bool CacheIRCompiler::emitAtomicsAddResult(ObjOperandId objId
,
8863 IntPtrOperandId indexId
,
8865 Scalar::Type elementType
,
8867 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8869 if (Scalar::isBigIntType(elementType
)) {
8870 return emitAtomicsReadModifyWriteResult64
<jit::AtomicsAdd64
>(objId
, indexId
,
8873 return emitAtomicsReadModifyWriteResult(objId
, indexId
, valueId
, elementType
,
8874 AtomicsAdd(elementType
));
8877 bool CacheIRCompiler::emitAtomicsSubResult(ObjOperandId objId
,
8878 IntPtrOperandId indexId
,
8880 Scalar::Type elementType
,
8882 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8884 if (Scalar::isBigIntType(elementType
)) {
8885 return emitAtomicsReadModifyWriteResult64
<jit::AtomicsSub64
>(objId
, indexId
,
8888 return emitAtomicsReadModifyWriteResult(objId
, indexId
, valueId
, elementType
,
8889 AtomicsSub(elementType
));
8892 bool CacheIRCompiler::emitAtomicsAndResult(ObjOperandId objId
,
8893 IntPtrOperandId indexId
,
8895 Scalar::Type elementType
,
8897 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8899 if (Scalar::isBigIntType(elementType
)) {
8900 return emitAtomicsReadModifyWriteResult64
<jit::AtomicsAnd64
>(objId
, indexId
,
8903 return emitAtomicsReadModifyWriteResult(objId
, indexId
, valueId
, elementType
,
8904 AtomicsAnd(elementType
));
8907 bool CacheIRCompiler::emitAtomicsOrResult(ObjOperandId objId
,
8908 IntPtrOperandId indexId
,
8910 Scalar::Type elementType
,
8912 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8914 if (Scalar::isBigIntType(elementType
)) {
8915 return emitAtomicsReadModifyWriteResult64
<jit::AtomicsOr64
>(objId
, indexId
,
8918 return emitAtomicsReadModifyWriteResult(objId
, indexId
, valueId
, elementType
,
8919 AtomicsOr(elementType
));
8922 bool CacheIRCompiler::emitAtomicsXorResult(ObjOperandId objId
,
8923 IntPtrOperandId indexId
,
8925 Scalar::Type elementType
,
8927 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8929 if (Scalar::isBigIntType(elementType
)) {
8930 return emitAtomicsReadModifyWriteResult64
<jit::AtomicsXor64
>(objId
, indexId
,
8933 return emitAtomicsReadModifyWriteResult(objId
, indexId
, valueId
, elementType
,
8934 AtomicsXor(elementType
));
8937 bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId
,
8938 IntPtrOperandId indexId
,
8939 Scalar::Type elementType
) {
8940 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
8942 Maybe
<AutoOutputRegister
> output
;
8943 Maybe
<AutoCallVM
> callvm
;
8944 if (!Scalar::isBigIntType(elementType
)) {
8945 output
.emplace(*this);
8947 callvm
.emplace(masm
, this, allocator
);
8949 Register obj
= allocator
.useRegister(masm
, objId
);
8950 Register index
= allocator
.useRegister(masm
, indexId
);
8951 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
,
8952 output
? *output
: callvm
->output());
8953 AutoSpectreBoundsScratchRegister
spectreTemp(allocator
, masm
);
8954 AutoAvailableFloatRegister
floatReg(*this, FloatReg0
);
8956 FailurePath
* failure
;
8957 if (!addFailurePath(&failure
)) {
8961 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8962 // we can't use both at the same time. This isn't an issue here, because Ion
8963 // doesn't support CallICs. If that ever changes, this code must be updated.
8964 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8967 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
8968 masm
.spectreBoundsCheckPtr(index
, scratch
, spectreTemp
, failure
->label());
8970 // Atomic operations are highly platform-dependent, for example x86/arm32 has
8971 // specific requirements on which registers are used. Therefore we're using a
8972 // VM call here instead of handling each platform separately.
8973 if (Scalar::isBigIntType(elementType
)) {
8979 using Fn
= BigInt
* (*)(JSContext
*, TypedArrayObject
*, size_t);
8980 callvm
->call
<Fn
, jit::AtomicsLoad64
>();
8984 // Load the elements vector.
8985 masm
.loadPtr(Address(obj
, ArrayBufferViewObject::dataOffset()), scratch
);
8988 BaseIndex
source(scratch
, index
, ScaleFromScalarType(elementType
));
8990 // NOTE: the generated code must match the assembly code in gen_load in
8991 // GenerateAtomicOperations.py
8992 auto sync
= Synchronization::Load();
8994 masm
.memoryBarrierBefore(sync
);
8996 Label
* failUint32
= nullptr;
8997 MacroAssembler::Uint32Mode mode
= MacroAssembler::Uint32Mode::ForceDouble
;
8998 masm
.loadFromTypedArray(elementType
, source
, output
->valueReg(), mode
,
8999 scratch
, failUint32
);
9000 masm
.memoryBarrierAfter(sync
);
9005 bool CacheIRCompiler::emitAtomicsStoreResult(ObjOperandId objId
,
9006 IntPtrOperandId indexId
,
9008 Scalar::Type elementType
) {
9009 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9011 AutoOutputRegister
output(*this);
9012 Register obj
= allocator
.useRegister(masm
, objId
);
9013 Register index
= allocator
.useRegister(masm
, indexId
);
9014 Maybe
<Register
> valueInt32
;
9015 Maybe
<Register
> valueBigInt
;
9016 if (!Scalar::isBigIntType(elementType
)) {
9017 valueInt32
.emplace(allocator
.useRegister(masm
, Int32OperandId(valueId
)));
9019 valueBigInt
.emplace(allocator
.useRegister(masm
, BigIntOperandId(valueId
)));
9021 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
9023 // Not enough registers on X86.
9024 Register spectreTemp
= Register::Invalid();
9026 FailurePath
* failure
;
9027 if (!addFailurePath(&failure
)) {
9032 masm
.loadArrayBufferViewLengthIntPtr(obj
, scratch
);
9033 masm
.spectreBoundsCheckPtr(index
, scratch
, spectreTemp
, failure
->label());
9035 if (!Scalar::isBigIntType(elementType
)) {
9036 // Load the elements vector.
9037 masm
.loadPtr(Address(obj
, ArrayBufferViewObject::dataOffset()), scratch
);
9040 BaseIndex
dest(scratch
, index
, ScaleFromScalarType(elementType
));
9042 // NOTE: the generated code must match the assembly code in gen_store in
9043 // GenerateAtomicOperations.py
9044 auto sync
= Synchronization::Store();
9046 masm
.memoryBarrierBefore(sync
);
9047 masm
.storeToTypedIntArray(elementType
, *valueInt32
, dest
);
9048 masm
.memoryBarrierAfter(sync
);
9050 masm
.tagValue(JSVAL_TYPE_INT32
, *valueInt32
, output
.valueReg());
9052 // See comment in emitAtomicsCompareExchange for why we use an ABI call.
9054 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
9055 liveVolatileFloatRegs());
9056 volatileRegs
.takeUnchecked(output
.valueReg());
9057 volatileRegs
.takeUnchecked(scratch
);
9058 masm
.PushRegsInMask(volatileRegs
);
9060 using Fn
= void (*)(TypedArrayObject
*, size_t, const BigInt
*);
9061 masm
.setupUnalignedABICall(scratch
);
9062 masm
.passABIArg(obj
);
9063 masm
.passABIArg(index
);
9064 masm
.passABIArg(*valueBigInt
);
9065 masm
.callWithABI
<Fn
, jit::AtomicsStore64
>();
9067 masm
.PopRegsInMask(volatileRegs
);
9069 masm
.tagValue(JSVAL_TYPE_BIGINT
, *valueBigInt
, output
.valueReg());
9075 bool CacheIRCompiler::emitAtomicsIsLockFreeResult(Int32OperandId valueId
) {
9076 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9078 AutoOutputRegister
output(*this);
9079 Register value
= allocator
.useRegister(masm
, valueId
);
9080 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
9082 masm
.atomicIsLockFreeJS(value
, scratch
);
9083 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
9088 bool CacheIRCompiler::emitBigIntAsIntNResult(Int32OperandId bitsId
,
9089 BigIntOperandId bigIntId
) {
9090 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9092 AutoCallVM
callvm(masm
, this, allocator
);
9094 Register bits
= allocator
.useRegister(masm
, bitsId
);
9095 Register bigInt
= allocator
.useRegister(masm
, bigIntId
);
9101 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, int32_t);
9102 callvm
.call
<Fn
, jit::BigIntAsIntN
>();
9106 bool CacheIRCompiler::emitBigIntAsUintNResult(Int32OperandId bitsId
,
9107 BigIntOperandId bigIntId
) {
9108 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9110 AutoCallVM
callvm(masm
, this, allocator
);
9112 Register bits
= allocator
.useRegister(masm
, bitsId
);
9113 Register bigInt
= allocator
.useRegister(masm
, bigIntId
);
9119 using Fn
= BigInt
* (*)(JSContext
*, HandleBigInt
, int32_t);
9120 callvm
.call
<Fn
, jit::BigIntAsUintN
>();
9124 bool CacheIRCompiler::emitSetHasResult(ObjOperandId setId
, ValOperandId valId
) {
9125 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9127 AutoCallVM
callvm(masm
, this, allocator
);
9129 Register set
= allocator
.useRegister(masm
, setId
);
9130 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
9136 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleValue
, bool*);
9137 callvm
.call
<Fn
, jit::SetObjectHas
>();
9141 bool CacheIRCompiler::emitSetHasNonGCThingResult(ObjOperandId setId
,
9142 ValOperandId valId
) {
9143 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9145 AutoOutputRegister
output(*this);
9146 Register set
= allocator
.useRegister(masm
, setId
);
9147 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
9149 AutoScratchRegister
scratch1(allocator
, masm
);
9150 AutoScratchRegister
scratch2(allocator
, masm
);
9151 AutoScratchRegister
scratch3(allocator
, masm
);
9152 AutoScratchRegister
scratch4(allocator
, masm
);
9153 AutoAvailableFloatRegister
scratchFloat(*this, FloatReg0
);
9155 masm
.toHashableNonGCThing(val
, output
.valueReg(), scratchFloat
);
9156 masm
.prepareHashNonGCThing(output
.valueReg(), scratch1
, scratch2
);
9158 masm
.setObjectHasNonBigInt(set
, output
.valueReg(), scratch1
, scratch2
,
9159 scratch3
, scratch4
);
9160 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
9164 bool CacheIRCompiler::emitSetHasSymbolResult(ObjOperandId setId
,
9165 SymbolOperandId symId
) {
9166 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9168 AutoOutputRegister
output(*this);
9169 Register set
= allocator
.useRegister(masm
, setId
);
9170 Register sym
= allocator
.useRegister(masm
, symId
);
9172 AutoScratchRegister
scratch1(allocator
, masm
);
9173 AutoScratchRegister
scratch2(allocator
, masm
);
9174 AutoScratchRegister
scratch3(allocator
, masm
);
9175 AutoScratchRegister
scratch4(allocator
, masm
);
9177 masm
.prepareHashSymbol(sym
, scratch1
);
9179 masm
.tagValue(JSVAL_TYPE_SYMBOL
, sym
, output
.valueReg());
9180 masm
.setObjectHasNonBigInt(set
, output
.valueReg(), scratch1
, scratch2
,
9181 scratch3
, scratch4
);
9182 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
9186 bool CacheIRCompiler::emitSetHasBigIntResult(ObjOperandId setId
,
9187 BigIntOperandId bigIntId
) {
9188 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9190 AutoOutputRegister
output(*this);
9191 Register set
= allocator
.useRegister(masm
, setId
);
9192 Register bigInt
= allocator
.useRegister(masm
, bigIntId
);
9194 AutoScratchRegister
scratch1(allocator
, masm
);
9195 AutoScratchRegister
scratch2(allocator
, masm
);
9196 AutoScratchRegister
scratch3(allocator
, masm
);
9197 AutoScratchRegister
scratch4(allocator
, masm
);
9198 AutoScratchRegister
scratch5(allocator
, masm
);
9199 #ifndef JS_CODEGEN_ARM
9200 AutoScratchRegister
scratch6(allocator
, masm
);
9202 // We don't have more registers available on ARM32.
9203 Register scratch6
= set
;
9208 masm
.prepareHashBigInt(bigInt
, scratch1
, scratch2
, scratch3
, scratch4
);
9210 masm
.tagValue(JSVAL_TYPE_BIGINT
, bigInt
, output
.valueReg());
9211 masm
.setObjectHasBigInt(set
, output
.valueReg(), scratch1
, scratch2
, scratch3
,
9212 scratch4
, scratch5
, scratch6
);
9213 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
9215 #ifdef JS_CODEGEN_ARM
9221 bool CacheIRCompiler::emitSetHasObjectResult(ObjOperandId setId
,
9222 ObjOperandId objId
) {
9223 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9225 AutoOutputRegister
output(*this);
9226 Register set
= allocator
.useRegister(masm
, setId
);
9227 Register obj
= allocator
.useRegister(masm
, objId
);
9229 AutoScratchRegister
scratch1(allocator
, masm
);
9230 AutoScratchRegister
scratch2(allocator
, masm
);
9231 AutoScratchRegister
scratch3(allocator
, masm
);
9232 AutoScratchRegister
scratch4(allocator
, masm
);
9233 AutoScratchRegister
scratch5(allocator
, masm
);
9235 masm
.tagValue(JSVAL_TYPE_OBJECT
, obj
, output
.valueReg());
9236 masm
.prepareHashObject(set
, output
.valueReg(), scratch1
, scratch2
, scratch3
,
9237 scratch4
, scratch5
);
9239 masm
.setObjectHasNonBigInt(set
, output
.valueReg(), scratch1
, scratch2
,
9240 scratch3
, scratch4
);
9241 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
9245 bool CacheIRCompiler::emitSetSizeResult(ObjOperandId setId
) {
9246 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9248 AutoOutputRegister
output(*this);
9249 Register set
= allocator
.useRegister(masm
, setId
);
9250 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
9252 masm
.loadSetObjectSize(set
, scratch
);
9253 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
9257 bool CacheIRCompiler::emitMapHasResult(ObjOperandId mapId
, ValOperandId valId
) {
9258 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9260 AutoCallVM
callvm(masm
, this, allocator
);
9262 Register map
= allocator
.useRegister(masm
, mapId
);
9263 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
9269 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleValue
, bool*);
9270 callvm
.call
<Fn
, jit::MapObjectHas
>();
9274 bool CacheIRCompiler::emitMapHasNonGCThingResult(ObjOperandId mapId
,
9275 ValOperandId valId
) {
9276 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9278 AutoOutputRegister
output(*this);
9279 Register map
= allocator
.useRegister(masm
, mapId
);
9280 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
9282 AutoScratchRegister
scratch1(allocator
, masm
);
9283 AutoScratchRegister
scratch2(allocator
, masm
);
9284 AutoScratchRegister
scratch3(allocator
, masm
);
9285 AutoScratchRegister
scratch4(allocator
, masm
);
9286 AutoAvailableFloatRegister
scratchFloat(*this, FloatReg0
);
9288 masm
.toHashableNonGCThing(val
, output
.valueReg(), scratchFloat
);
9289 masm
.prepareHashNonGCThing(output
.valueReg(), scratch1
, scratch2
);
9291 masm
.mapObjectHasNonBigInt(map
, output
.valueReg(), scratch1
, scratch2
,
9292 scratch3
, scratch4
);
9293 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
9297 bool CacheIRCompiler::emitMapHasSymbolResult(ObjOperandId mapId
,
9298 SymbolOperandId symId
) {
9299 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9301 AutoOutputRegister
output(*this);
9302 Register map
= allocator
.useRegister(masm
, mapId
);
9303 Register sym
= allocator
.useRegister(masm
, symId
);
9305 AutoScratchRegister
scratch1(allocator
, masm
);
9306 AutoScratchRegister
scratch2(allocator
, masm
);
9307 AutoScratchRegister
scratch3(allocator
, masm
);
9308 AutoScratchRegister
scratch4(allocator
, masm
);
9310 masm
.prepareHashSymbol(sym
, scratch1
);
9312 masm
.tagValue(JSVAL_TYPE_SYMBOL
, sym
, output
.valueReg());
9313 masm
.mapObjectHasNonBigInt(map
, output
.valueReg(), scratch1
, scratch2
,
9314 scratch3
, scratch4
);
9315 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
9319 bool CacheIRCompiler::emitMapHasBigIntResult(ObjOperandId mapId
,
9320 BigIntOperandId bigIntId
) {
9321 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9323 AutoOutputRegister
output(*this);
9324 Register map
= allocator
.useRegister(masm
, mapId
);
9325 Register bigInt
= allocator
.useRegister(masm
, bigIntId
);
9327 AutoScratchRegister
scratch1(allocator
, masm
);
9328 AutoScratchRegister
scratch2(allocator
, masm
);
9329 AutoScratchRegister
scratch3(allocator
, masm
);
9330 AutoScratchRegister
scratch4(allocator
, masm
);
9331 AutoScratchRegister
scratch5(allocator
, masm
);
9332 #ifndef JS_CODEGEN_ARM
9333 AutoScratchRegister
scratch6(allocator
, masm
);
9335 // We don't have more registers available on ARM32.
9336 Register scratch6
= map
;
9341 masm
.prepareHashBigInt(bigInt
, scratch1
, scratch2
, scratch3
, scratch4
);
9343 masm
.tagValue(JSVAL_TYPE_BIGINT
, bigInt
, output
.valueReg());
9344 masm
.mapObjectHasBigInt(map
, output
.valueReg(), scratch1
, scratch2
, scratch3
,
9345 scratch4
, scratch5
, scratch6
);
9346 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
9348 #ifdef JS_CODEGEN_ARM
9354 bool CacheIRCompiler::emitMapHasObjectResult(ObjOperandId mapId
,
9355 ObjOperandId objId
) {
9356 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9358 AutoOutputRegister
output(*this);
9359 Register map
= allocator
.useRegister(masm
, mapId
);
9360 Register obj
= allocator
.useRegister(masm
, objId
);
9362 AutoScratchRegister
scratch1(allocator
, masm
);
9363 AutoScratchRegister
scratch2(allocator
, masm
);
9364 AutoScratchRegister
scratch3(allocator
, masm
);
9365 AutoScratchRegister
scratch4(allocator
, masm
);
9366 AutoScratchRegister
scratch5(allocator
, masm
);
9368 masm
.tagValue(JSVAL_TYPE_OBJECT
, obj
, output
.valueReg());
9369 masm
.prepareHashObject(map
, output
.valueReg(), scratch1
, scratch2
, scratch3
,
9370 scratch4
, scratch5
);
9372 masm
.mapObjectHasNonBigInt(map
, output
.valueReg(), scratch1
, scratch2
,
9373 scratch3
, scratch4
);
9374 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
9378 bool CacheIRCompiler::emitMapGetResult(ObjOperandId mapId
, ValOperandId valId
) {
9379 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9381 AutoCallVM
callvm(masm
, this, allocator
);
9383 Register map
= allocator
.useRegister(masm
, mapId
);
9384 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
9391 bool (*)(JSContext
*, HandleObject
, HandleValue
, MutableHandleValue
);
9392 callvm
.call
<Fn
, jit::MapObjectGet
>();
9396 bool CacheIRCompiler::emitMapGetNonGCThingResult(ObjOperandId mapId
,
9397 ValOperandId valId
) {
9398 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9400 AutoOutputRegister
output(*this);
9401 Register map
= allocator
.useRegister(masm
, mapId
);
9402 ValueOperand val
= allocator
.useValueRegister(masm
, valId
);
9404 AutoScratchRegister
scratch1(allocator
, masm
);
9405 AutoScratchRegister
scratch2(allocator
, masm
);
9406 AutoScratchRegister
scratch3(allocator
, masm
);
9407 AutoScratchRegister
scratch4(allocator
, masm
);
9408 AutoAvailableFloatRegister
scratchFloat(*this, FloatReg0
);
9410 masm
.toHashableNonGCThing(val
, output
.valueReg(), scratchFloat
);
9411 masm
.prepareHashNonGCThing(output
.valueReg(), scratch1
, scratch2
);
9413 masm
.mapObjectGetNonBigInt(map
, output
.valueReg(), scratch1
,
9414 output
.valueReg(), scratch2
, scratch3
, scratch4
);
9418 bool CacheIRCompiler::emitMapGetSymbolResult(ObjOperandId mapId
,
9419 SymbolOperandId symId
) {
9420 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9422 AutoOutputRegister
output(*this);
9423 Register map
= allocator
.useRegister(masm
, mapId
);
9424 Register sym
= allocator
.useRegister(masm
, symId
);
9426 AutoScratchRegister
scratch1(allocator
, masm
);
9427 AutoScratchRegister
scratch2(allocator
, masm
);
9428 AutoScratchRegister
scratch3(allocator
, masm
);
9429 AutoScratchRegister
scratch4(allocator
, masm
);
9431 masm
.prepareHashSymbol(sym
, scratch1
);
9433 masm
.tagValue(JSVAL_TYPE_SYMBOL
, sym
, output
.valueReg());
9434 masm
.mapObjectGetNonBigInt(map
, output
.valueReg(), scratch1
,
9435 output
.valueReg(), scratch2
, scratch3
, scratch4
);
9439 bool CacheIRCompiler::emitMapGetBigIntResult(ObjOperandId mapId
,
9440 BigIntOperandId bigIntId
) {
9441 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9443 AutoOutputRegister
output(*this);
9444 Register map
= allocator
.useRegister(masm
, mapId
);
9445 Register bigInt
= allocator
.useRegister(masm
, bigIntId
);
9447 AutoScratchRegister
scratch1(allocator
, masm
);
9448 AutoScratchRegister
scratch2(allocator
, masm
);
9449 AutoScratchRegister
scratch3(allocator
, masm
);
9450 AutoScratchRegister
scratch4(allocator
, masm
);
9451 AutoScratchRegister
scratch5(allocator
, masm
);
9452 #ifndef JS_CODEGEN_ARM
9453 AutoScratchRegister
scratch6(allocator
, masm
);
9455 // We don't have more registers available on ARM32.
9456 Register scratch6
= map
;
9461 masm
.prepareHashBigInt(bigInt
, scratch1
, scratch2
, scratch3
, scratch4
);
9463 masm
.tagValue(JSVAL_TYPE_BIGINT
, bigInt
, output
.valueReg());
9464 masm
.mapObjectGetBigInt(map
, output
.valueReg(), scratch1
, output
.valueReg(),
9465 scratch2
, scratch3
, scratch4
, scratch5
, scratch6
);
9467 #ifdef JS_CODEGEN_ARM
9473 bool CacheIRCompiler::emitMapGetObjectResult(ObjOperandId mapId
,
9474 ObjOperandId objId
) {
9475 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9477 AutoOutputRegister
output(*this);
9478 Register map
= allocator
.useRegister(masm
, mapId
);
9479 Register obj
= allocator
.useRegister(masm
, objId
);
9481 AutoScratchRegister
scratch1(allocator
, masm
);
9482 AutoScratchRegister
scratch2(allocator
, masm
);
9483 AutoScratchRegister
scratch3(allocator
, masm
);
9484 AutoScratchRegister
scratch4(allocator
, masm
);
9485 AutoScratchRegister
scratch5(allocator
, masm
);
9487 masm
.tagValue(JSVAL_TYPE_OBJECT
, obj
, output
.valueReg());
9488 masm
.prepareHashObject(map
, output
.valueReg(), scratch1
, scratch2
, scratch3
,
9489 scratch4
, scratch5
);
9491 masm
.mapObjectGetNonBigInt(map
, output
.valueReg(), scratch1
,
9492 output
.valueReg(), scratch2
, scratch3
, scratch4
);
9496 bool CacheIRCompiler::emitMapSizeResult(ObjOperandId mapId
) {
9497 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9499 AutoOutputRegister
output(*this);
9500 Register map
= allocator
.useRegister(masm
, mapId
);
9501 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
9503 masm
.loadMapObjectSize(map
, scratch
);
9504 masm
.tagValue(JSVAL_TYPE_INT32
, scratch
, output
.valueReg());
9508 bool CacheIRCompiler::emitArrayFromArgumentsObjectResult(ObjOperandId objId
,
9509 uint32_t shapeOffset
) {
9510 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9512 AutoCallVM
callvm(masm
, this, allocator
);
9514 Register obj
= allocator
.useRegister(masm
, objId
);
9519 using Fn
= ArrayObject
* (*)(JSContext
*, Handle
<ArgumentsObject
*>);
9520 callvm
.call
<Fn
, js::ArrayFromArgumentsObject
>();
9524 bool CacheIRCompiler::emitGuardGlobalGeneration(uint32_t expectedOffset
,
9525 uint32_t generationAddrOffset
) {
9526 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9528 AutoScratchRegister
scratch(allocator
, masm
);
9529 AutoScratchRegister
scratch2(allocator
, masm
);
9531 FailurePath
* failure
;
9532 if (!addFailurePath(&failure
)) {
9536 StubFieldOffset
expected(expectedOffset
, StubField::Type::RawInt32
);
9537 emitLoadStubField(expected
, scratch
);
9539 StubFieldOffset
generationAddr(generationAddrOffset
,
9540 StubField::Type::RawPointer
);
9541 emitLoadStubField(generationAddr
, scratch2
);
9543 masm
.branch32(Assembler::NotEqual
, Address(scratch2
, 0), scratch
,
9549 bool CacheIRCompiler::emitGuardFuse(RealmFuses::FuseIndex fuseIndex
) {
9550 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9551 AutoScratchRegister
scratch(allocator
, masm
);
9553 FailurePath
* failure
;
9554 if (!addFailurePath(&failure
)) {
9558 masm
.loadRealmFuse(fuseIndex
, scratch
);
9559 masm
.branchPtr(Assembler::NotEqual
, scratch
, ImmPtr(nullptr),
9564 bool CacheIRCompiler::emitBailout() {
9565 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9567 // Generates no code.
9572 bool CacheIRCompiler::emitAssertRecoveredOnBailoutResult(ValOperandId valId
,
9573 bool mustBeRecovered
) {
9574 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9576 AutoOutputRegister
output(*this);
9578 // NOP when not in IonMonkey
9579 masm
.moveValue(UndefinedValue(), output
.valueReg());
9584 bool CacheIRCompiler::emitAssertPropertyLookup(ObjOperandId objId
,
9586 uint32_t slotOffset
) {
9587 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9589 Register obj
= allocator
.useRegister(masm
, objId
);
9591 AutoScratchRegister
id(allocator
, masm
);
9592 AutoScratchRegister
slot(allocator
, masm
);
9594 LiveRegisterSet
save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
9595 masm
.PushRegsInMask(save
);
9597 masm
.setupUnalignedABICall(id
);
9599 StubFieldOffset
idField(idOffset
, StubField::Type::Id
);
9600 emitLoadStubField(idField
, id
);
9602 StubFieldOffset
slotField(slotOffset
, StubField::Type::RawInt32
);
9603 emitLoadStubField(slotField
, slot
);
9605 masm
.passABIArg(obj
);
9606 masm
.passABIArg(id
);
9607 masm
.passABIArg(slot
);
9608 using Fn
= void (*)(NativeObject
*, PropertyKey
, uint32_t);
9609 masm
.callWithABI
<Fn
, js::jit::AssertPropertyLookup
>();
9610 masm
.PopRegsInMask(save
);
9615 #ifdef FUZZING_JS_FUZZILLI
9616 bool CacheIRCompiler::emitFuzzilliHashResult(ValOperandId valId
) {
9617 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
9619 ValueOperand input
= allocator
.useValueRegister(masm
, valId
);
9620 AutoScratchRegister
scratch(allocator
, masm
);
9621 AutoScratchRegister
scratchJSContext(allocator
, masm
);
9622 AutoScratchFloatRegister
floatReg(this);
9624 AutoScratchRegister64
scratch64(allocator
, masm
);
9626 AutoScratchRegister
scratch2(allocator
, masm
);
9629 Label addFloat
, updateHash
, done
;
9632 ScratchTagScope
tag(masm
, input
);
9633 masm
.splitTagForTest(input
, tag
);
9636 masm
.branchTestInt32(Assembler::NotEqual
, tag
, ¬Int32
);
9638 ScratchTagScopeRelease
_(&tag
);
9640 masm
.unboxInt32(input
, scratch
);
9641 masm
.convertInt32ToDouble(scratch
, floatReg
);
9642 masm
.jump(&addFloat
);
9644 masm
.bind(¬Int32
);
9647 masm
.branchTestDouble(Assembler::NotEqual
, tag
, ¬Double
);
9649 ScratchTagScopeRelease
_(&tag
);
9651 masm
.unboxDouble(input
, floatReg
);
9652 masm
.canonicalizeDouble(floatReg
);
9653 masm
.jump(&addFloat
);
9655 masm
.bind(¬Double
);
9658 masm
.branchTestNull(Assembler::NotEqual
, tag
, ¬Null
);
9660 ScratchTagScopeRelease
_(&tag
);
9662 masm
.move32(Imm32(1), scratch
);
9663 masm
.convertInt32ToDouble(scratch
, floatReg
);
9664 masm
.jump(&addFloat
);
9666 masm
.bind(¬Null
);
9669 masm
.branchTestUndefined(Assembler::NotEqual
, tag
, ¬Undefined
);
9671 ScratchTagScopeRelease
_(&tag
);
9673 masm
.move32(Imm32(2), scratch
);
9674 masm
.convertInt32ToDouble(scratch
, floatReg
);
9675 masm
.jump(&addFloat
);
9677 masm
.bind(¬Undefined
);
9680 masm
.branchTestBoolean(Assembler::NotEqual
, tag
, ¬Boolean
);
9682 ScratchTagScopeRelease
_(&tag
);
9684 masm
.unboxBoolean(input
, scratch
);
9685 masm
.add32(Imm32(3), scratch
);
9686 masm
.convertInt32ToDouble(scratch
, floatReg
);
9687 masm
.jump(&addFloat
);
9689 masm
.bind(¬Boolean
);
9692 masm
.branchTestBigInt(Assembler::NotEqual
, tag
, ¬BigInt
);
9694 ScratchTagScopeRelease
_(&tag
);
9696 masm
.unboxBigInt(input
, scratch
);
9698 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
9699 liveVolatileFloatRegs());
9700 masm
.PushRegsInMask(volatileRegs
);
9701 // TODO: remove floatReg, scratch, scratchJS?
9703 using Fn
= uint32_t (*)(BigInt
* bigInt
);
9704 masm
.setupUnalignedABICall(scratchJSContext
);
9705 masm
.loadJSContext(scratchJSContext
);
9706 masm
.passABIArg(scratch
);
9707 masm
.callWithABI
<Fn
, js::FuzzilliHashBigInt
>();
9708 masm
.storeCallInt32Result(scratch
);
9710 LiveRegisterSet ignore
;
9711 ignore
.add(scratch
);
9712 ignore
.add(scratchJSContext
);
9713 masm
.PopRegsInMaskIgnore(volatileRegs
, ignore
);
9714 masm
.jump(&updateHash
);
9716 masm
.bind(¬BigInt
);
9719 masm
.branchTestObject(Assembler::NotEqual
, tag
, ¬Object
);
9721 ScratchTagScopeRelease
_(&tag
);
9723 AutoCallVM
callvm(masm
, this, allocator
);
9724 Register obj
= allocator
.allocateRegister(masm
);
9725 masm
.unboxObject(input
, obj
);
9730 using Fn
= void (*)(JSContext
* cx
, JSObject
* o
);
9731 callvm
.callNoResult
<Fn
, js::FuzzilliHashObject
>();
9732 allocator
.releaseRegister(obj
);
9736 masm
.bind(¬Object
);
9738 masm
.move32(Imm32(0), scratch
);
9739 masm
.jump(&updateHash
);
9744 masm
.bind(&addFloat
);
9746 masm
.loadJSContext(scratchJSContext
);
9747 Address
addrExecHash(scratchJSContext
, offsetof(JSContext
, executionHash
));
9750 masm
.moveDoubleToGPR64(floatReg
, scratch64
);
9751 masm
.move32(scratch64
.get().reg
, scratch
);
9752 masm
.rshift64(Imm32(32), scratch64
);
9753 masm
.add32(scratch64
.get().reg
, scratch
);
9755 Register64
scratch64(scratch
, scratch2
);
9756 masm
.moveDoubleToGPR64(floatReg
, scratch64
);
9757 masm
.add32(scratch2
, scratch
);
9762 masm
.bind(&updateHash
);
9764 masm
.loadJSContext(scratchJSContext
);
9765 Address
addrExecHash(scratchJSContext
, offsetof(JSContext
, executionHash
));
9766 masm
.load32(addrExecHash
, scratchJSContext
);
9767 masm
.add32(scratchJSContext
, scratch
);
9768 masm
.rotateLeft(Imm32(1), scratch
, scratch
);
9769 masm
.loadJSContext(scratchJSContext
);
9770 masm
.store32(scratch
, addrExecHash
);
9773 Address
addrExecHashInputs(scratchJSContext
,
9774 offsetof(JSContext
, executionHashInputs
));
9775 masm
.load32(addrExecHashInputs
, scratch
);
9776 masm
.add32(Imm32(1), scratch
);
9777 masm
.store32(scratch
, addrExecHashInputs
);
9782 AutoOutputRegister
output(*this);
9783 masm
.moveValue(UndefinedValue(), output
.valueReg());
9788 template <typename Fn
, Fn fn
>
9789 void CacheIRCompiler::callVM(MacroAssembler
& masm
) {
9790 VMFunctionId id
= VMFunctionToId
<Fn
, fn
>::id
;
9791 callVMInternal(masm
, id
);
9794 void CacheIRCompiler::callVMInternal(MacroAssembler
& masm
, VMFunctionId id
) {
9795 MOZ_ASSERT(enteredStubFrame_
);
9796 if (mode_
== Mode::Ion
) {
9797 TrampolinePtr code
= cx_
->runtime()->jitRuntime()->getVMWrapper(id
);
9798 const VMFunctionData
& fun
= GetVMFunction(id
);
9799 uint32_t frameSize
= fun
.explicitStackSlots() * sizeof(void*);
9800 masm
.PushFrameDescriptor(FrameType::IonICCall
);
9803 // Pop rest of the exit frame and the arguments left on the stack.
9805 sizeof(ExitFrameLayout
) - ExitFrameLayout::bytesPoppedAfterCall();
9806 masm
.implicitPop(frameSize
+ framePop
);
9808 masm
.freeStack(asIon()->localTracingSlots() * sizeof(Value
));
9810 // Pop IonICCallFrameLayout.
9811 masm
.Pop(FramePointer
);
9812 masm
.freeStack(IonICCallFrameLayout::Size() - sizeof(void*));
9816 MOZ_ASSERT(mode_
== Mode::Baseline
);
9818 TrampolinePtr code
= cx_
->runtime()->jitRuntime()->getVMWrapper(id
);
9820 EmitBaselineCallVM(code
, masm
);
9823 bool CacheIRCompiler::isBaseline() { return mode_
== Mode::Baseline
; }
9825 bool CacheIRCompiler::isIon() { return mode_
== Mode::Ion
; }
9827 BaselineCacheIRCompiler
* CacheIRCompiler::asBaseline() {
9828 MOZ_ASSERT(this->isBaseline());
9829 return static_cast<BaselineCacheIRCompiler
*>(this);
9832 IonCacheIRCompiler
* CacheIRCompiler::asIon() {
9833 MOZ_ASSERT(this->isIon());
9834 return static_cast<IonCacheIRCompiler
*>(this);
9838 void CacheIRCompiler::assertFloatRegisterAvailable(FloatRegister reg
) {
9840 // Baseline does not have any FloatRegisters live when calling an IC stub.
9844 asIon()->assertFloatRegisterAvailable(reg
);
9848 AutoCallVM::AutoCallVM(MacroAssembler
& masm
, CacheIRCompiler
* compiler
,
9849 CacheRegisterAllocator
& allocator
)
9850 : masm_(masm
), compiler_(compiler
), allocator_(allocator
) {
9851 // Ion needs to `enterStubFrame` before it can callVM and it also needs to
9852 // initialize AutoSaveLiveRegisters.
9853 if (compiler_
->mode_
== CacheIRCompiler::Mode::Ion
) {
9854 // Will need to use a downcast here as well, in order to pass the
9855 // stub to AutoSaveLiveRegisters
9856 save_
.emplace(*compiler_
->asIon());
9859 if (compiler
->outputUnchecked_
.isSome()) {
9860 output_
.emplace(*compiler
);
9863 if (compiler_
->mode_
== CacheIRCompiler::Mode::Baseline
) {
9864 stubFrame_
.emplace(*compiler_
->asBaseline());
9865 if (output_
.isSome()) {
9866 scratch_
.emplace(allocator_
, masm_
, output_
.ref());
9868 scratch_
.emplace(allocator_
, masm_
);
9873 void AutoCallVM::prepare() {
9874 allocator_
.discardStack(masm_
);
9875 MOZ_ASSERT(compiler_
!= nullptr);
9876 if (compiler_
->mode_
== CacheIRCompiler::Mode::Ion
) {
9877 compiler_
->asIon()->enterStubFrame(masm_
, *save_
.ptr());
9880 MOZ_ASSERT(compiler_
->mode_
== CacheIRCompiler::Mode::Baseline
);
9881 stubFrame_
->enter(masm_
, scratch_
.ref());
9884 void AutoCallVM::storeResult(JSValueType returnType
) {
9885 MOZ_ASSERT(returnType
!= JSVAL_TYPE_DOUBLE
);
9887 if (returnType
== JSVAL_TYPE_UNKNOWN
) {
9888 masm_
.storeCallResultValue(output_
.ref());
9890 if (output_
->hasValue()) {
9891 masm_
.tagValue(returnType
, ReturnReg
, output_
->valueReg());
9893 masm_
.storeCallPointerResult(output_
->typedReg().gpr());
9898 void AutoCallVM::leaveBaselineStubFrame() {
9899 if (compiler_
->mode_
== CacheIRCompiler::Mode::Baseline
) {
9900 stubFrame_
->leave(masm_
);
9904 template <typename
...>
9905 struct VMFunctionReturnType
;
9907 template <class R
, typename
... Args
>
9908 struct VMFunctionReturnType
<R (*)(JSContext
*, Args
...)> {
9909 using LastArgument
= typename LastArg
<Args
...>::Type
;
9911 // By convention VMFunctions returning `bool` use an output parameter.
9913 std::conditional_t
<std::is_same_v
<R
, bool>, LastArgument
, R
>;
9917 struct ReturnTypeToJSValueType
;
9919 // Definitions for the currently used return types.
9921 struct ReturnTypeToJSValueType
<MutableHandleValue
> {
9922 static constexpr JSValueType result
= JSVAL_TYPE_UNKNOWN
;
9925 struct ReturnTypeToJSValueType
<bool*> {
9926 static constexpr JSValueType result
= JSVAL_TYPE_BOOLEAN
;
9929 struct ReturnTypeToJSValueType
<int32_t*> {
9930 static constexpr JSValueType result
= JSVAL_TYPE_INT32
;
9933 struct ReturnTypeToJSValueType
<JSString
*> {
9934 static constexpr JSValueType result
= JSVAL_TYPE_STRING
;
9937 struct ReturnTypeToJSValueType
<BigInt
*> {
9938 static constexpr JSValueType result
= JSVAL_TYPE_BIGINT
;
9941 struct ReturnTypeToJSValueType
<JSObject
*> {
9942 static constexpr JSValueType result
= JSVAL_TYPE_OBJECT
;
9945 struct ReturnTypeToJSValueType
<PropertyIteratorObject
*> {
9946 static constexpr JSValueType result
= JSVAL_TYPE_OBJECT
;
9949 struct ReturnTypeToJSValueType
<ArrayIteratorObject
*> {
9950 static constexpr JSValueType result
= JSVAL_TYPE_OBJECT
;
9953 struct ReturnTypeToJSValueType
<StringIteratorObject
*> {
9954 static constexpr JSValueType result
= JSVAL_TYPE_OBJECT
;
9957 struct ReturnTypeToJSValueType
<RegExpStringIteratorObject
*> {
9958 static constexpr JSValueType result
= JSVAL_TYPE_OBJECT
;
9961 struct ReturnTypeToJSValueType
<PlainObject
*> {
9962 static constexpr JSValueType result
= JSVAL_TYPE_OBJECT
;
9965 struct ReturnTypeToJSValueType
<ArrayObject
*> {
9966 static constexpr JSValueType result
= JSVAL_TYPE_OBJECT
;
9969 struct ReturnTypeToJSValueType
<TypedArrayObject
*> {
9970 static constexpr JSValueType result
= JSVAL_TYPE_OBJECT
;
9973 template <typename Fn
>
9974 void AutoCallVM::storeResult() {
9975 using ReturnType
= typename VMFunctionReturnType
<Fn
>::ReturnType
;
9976 storeResult(ReturnTypeToJSValueType
<ReturnType
>::result
);
9979 AutoScratchFloatRegister::AutoScratchFloatRegister(CacheIRCompiler
* compiler
,
9980 FailurePath
* failure
)
9981 : compiler_(compiler
), failure_(failure
) {
9982 // If we're compiling a Baseline IC, FloatReg0 is always available.
9983 if (!compiler_
->isBaseline()) {
9984 MacroAssembler
& masm
= compiler_
->masm
;
9985 masm
.push(FloatReg0
);
9986 compiler
->allocator
.setHasAutoScratchFloatRegisterSpill(true);
9990 failure_
->setHasAutoScratchFloatRegister();
9994 AutoScratchFloatRegister::~AutoScratchFloatRegister() {
9996 failure_
->clearHasAutoScratchFloatRegister();
9999 if (!compiler_
->isBaseline()) {
10000 MacroAssembler
& masm
= compiler_
->masm
;
10001 masm
.pop(FloatReg0
);
10002 compiler_
->allocator
.setHasAutoScratchFloatRegisterSpill(false);
10007 masm
.bind(&failurePopReg_
);
10008 masm
.pop(FloatReg0
);
10009 masm
.jump(failure_
->label());
10015 Label
* AutoScratchFloatRegister::failure() {
10016 MOZ_ASSERT(failure_
);
10018 if (!compiler_
->isBaseline()) {
10019 return &failurePopReg_
;
10021 return failure_
->labelUnchecked();