Bug 1873042 - Part 9: Combine Int32ToStringWithBase and StringConvertCase. r=jandem
[gecko.git] / js / src / jit / CacheIRCompiler.cpp
blob177d596497b582fe1698d7e768857725a580d6cb
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/CacheIRCompiler.h"
9 #include "mozilla/ArrayUtils.h"
10 #include "mozilla/FunctionTypeTraits.h"
11 #include "mozilla/MaybeOneOf.h"
12 #include "mozilla/ScopeExit.h"
14 #include <type_traits>
15 #include <utility>
17 #include "jslibmath.h"
18 #include "jsmath.h"
20 #include "builtin/DataViewObject.h"
21 #include "builtin/MapObject.h"
22 #include "builtin/Object.h"
23 #include "gc/GCEnum.h"
24 #include "jit/BaselineCacheIRCompiler.h"
25 #include "jit/CacheIRGenerator.h"
26 #include "jit/IonCacheIRCompiler.h"
27 #include "jit/JitFrames.h"
28 #include "jit/JitRuntime.h"
29 #include "jit/JitZone.h"
30 #include "jit/SharedICHelpers.h"
31 #include "jit/SharedICRegisters.h"
32 #include "jit/VMFunctions.h"
33 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
34 #include "js/friend/XrayJitInfo.h" // js::jit::GetXrayJitInfo
35 #include "js/ScalarType.h" // js::Scalar::Type
36 #include "js/SweepingAPI.h"
37 #include "proxy/DOMProxy.h"
38 #include "proxy/Proxy.h"
39 #include "proxy/ScriptedProxyHandler.h"
40 #include "vm/ArgumentsObject.h"
41 #include "vm/ArrayBufferObject.h"
42 #include "vm/ArrayBufferViewObject.h"
43 #include "vm/BigIntType.h"
44 #include "vm/FunctionFlags.h" // js::FunctionFlags
45 #include "vm/GeneratorObject.h"
46 #include "vm/GetterSetter.h"
47 #include "vm/Interpreter.h"
48 #include "vm/Uint8Clamped.h"
50 #include "builtin/Boolean-inl.h"
51 #include "jit/MacroAssembler-inl.h"
52 #include "jit/SharedICHelpers-inl.h"
53 #include "jit/VMFunctionList-inl.h"
55 using namespace js;
56 using namespace js::jit;
58 using mozilla::BitwiseCast;
59 using mozilla::Maybe;
61 using JS::ExpandoAndGeneration;
63 ValueOperand CacheRegisterAllocator::useValueRegister(MacroAssembler& masm,
64 ValOperandId op) {
65 OperandLocation& loc = operandLocations_[op.id()];
67 switch (loc.kind()) {
68 case OperandLocation::ValueReg:
69 currentOpRegs_.add(loc.valueReg());
70 return loc.valueReg();
72 case OperandLocation::ValueStack: {
73 ValueOperand reg = allocateValueRegister(masm);
74 popValue(masm, &loc, reg);
75 return reg;
78 case OperandLocation::BaselineFrame: {
79 ValueOperand reg = allocateValueRegister(masm);
80 Address addr = addressOf(masm, loc.baselineFrameSlot());
81 masm.loadValue(addr, reg);
82 loc.setValueReg(reg);
83 return reg;
86 case OperandLocation::Constant: {
87 ValueOperand reg = allocateValueRegister(masm);
88 masm.moveValue(loc.constant(), reg);
89 loc.setValueReg(reg);
90 return reg;
93 case OperandLocation::PayloadReg: {
94 // Temporarily add the payload register to currentOpRegs_ so
95 // allocateValueRegister will stay away from it.
96 currentOpRegs_.add(loc.payloadReg());
97 ValueOperand reg = allocateValueRegister(masm);
98 masm.tagValue(loc.payloadType(), loc.payloadReg(), reg);
99 currentOpRegs_.take(loc.payloadReg());
100 availableRegs_.add(loc.payloadReg());
101 loc.setValueReg(reg);
102 return reg;
105 case OperandLocation::PayloadStack: {
106 ValueOperand reg = allocateValueRegister(masm);
107 popPayload(masm, &loc, reg.scratchReg());
108 masm.tagValue(loc.payloadType(), reg.scratchReg(), reg);
109 loc.setValueReg(reg);
110 return reg;
113 case OperandLocation::DoubleReg: {
114 ValueOperand reg = allocateValueRegister(masm);
116 ScratchDoubleScope fpscratch(masm);
117 masm.boxDouble(loc.doubleReg(), reg, fpscratch);
119 loc.setValueReg(reg);
120 return reg;
123 case OperandLocation::Uninitialized:
124 break;
127 MOZ_CRASH();
130 // Load a value operand directly into a float register. Caller must have
131 // guarded isNumber on the provided val.
132 void CacheRegisterAllocator::ensureDoubleRegister(MacroAssembler& masm,
133 NumberOperandId op,
134 FloatRegister dest) const {
135 // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
136 // any stack slot offsets below.
137 int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
139 const OperandLocation& loc = operandLocations_[op.id()];
141 Label failure, done;
142 switch (loc.kind()) {
143 case OperandLocation::ValueReg: {
144 masm.ensureDouble(loc.valueReg(), dest, &failure);
145 break;
148 case OperandLocation::ValueStack: {
149 Address addr = valueAddress(masm, &loc);
150 addr.offset += stackOffset;
151 masm.ensureDouble(addr, dest, &failure);
152 break;
155 case OperandLocation::BaselineFrame: {
156 Address addr = addressOf(masm, loc.baselineFrameSlot());
157 addr.offset += stackOffset;
158 masm.ensureDouble(addr, dest, &failure);
159 break;
162 case OperandLocation::DoubleReg: {
163 masm.moveDouble(loc.doubleReg(), dest);
164 return;
167 case OperandLocation::Constant: {
168 MOZ_ASSERT(loc.constant().isNumber(),
169 "Caller must ensure the operand is a number value");
170 masm.loadConstantDouble(loc.constant().toNumber(), dest);
171 return;
174 case OperandLocation::PayloadReg: {
175 // Doubles can't be stored in payload registers, so this must be an int32.
176 MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
177 "Caller must ensure the operand is a number value");
178 masm.convertInt32ToDouble(loc.payloadReg(), dest);
179 return;
182 case OperandLocation::PayloadStack: {
183 // Doubles can't be stored in payload registers, so this must be an int32.
184 MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
185 "Caller must ensure the operand is a number value");
186 MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
187 Address addr = payloadAddress(masm, &loc);
188 addr.offset += stackOffset;
189 masm.convertInt32ToDouble(addr, dest);
190 return;
193 case OperandLocation::Uninitialized:
194 MOZ_CRASH("Unhandled operand type in ensureDoubleRegister");
195 return;
197 masm.jump(&done);
198 masm.bind(&failure);
199 masm.assumeUnreachable(
200 "Missing guard allowed non-number to hit ensureDoubleRegister");
201 masm.bind(&done);
204 void CacheRegisterAllocator::copyToScratchRegister(MacroAssembler& masm,
205 TypedOperandId typedId,
206 Register dest) const {
207 // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
208 // any stack slot offsets below.
209 int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
211 const OperandLocation& loc = operandLocations_[typedId.id()];
213 switch (loc.kind()) {
214 case OperandLocation::ValueReg: {
215 masm.unboxNonDouble(loc.valueReg(), dest, typedId.type());
216 break;
218 case OperandLocation::ValueStack: {
219 Address addr = valueAddress(masm, &loc);
220 addr.offset += stackOffset;
221 masm.unboxNonDouble(addr, dest, typedId.type());
222 break;
224 case OperandLocation::BaselineFrame: {
225 Address addr = addressOf(masm, loc.baselineFrameSlot());
226 addr.offset += stackOffset;
227 masm.unboxNonDouble(addr, dest, typedId.type());
228 break;
230 case OperandLocation::PayloadReg: {
231 MOZ_ASSERT(loc.payloadType() == typedId.type());
232 masm.mov(loc.payloadReg(), dest);
233 return;
235 case OperandLocation::PayloadStack: {
236 MOZ_ASSERT(loc.payloadType() == typedId.type());
237 MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
238 Address addr = payloadAddress(masm, &loc);
239 addr.offset += stackOffset;
240 masm.loadPtr(addr, dest);
241 return;
243 case OperandLocation::DoubleReg:
244 case OperandLocation::Constant:
245 case OperandLocation::Uninitialized:
246 MOZ_CRASH("Unhandled operand location");
250 void CacheRegisterAllocator::copyToScratchValueRegister(
251 MacroAssembler& masm, ValOperandId valId, ValueOperand dest) const {
252 MOZ_ASSERT(!addedFailurePath_);
253 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
255 const OperandLocation& loc = operandLocations_[valId.id()];
256 switch (loc.kind()) {
257 case OperandLocation::ValueReg:
258 masm.moveValue(loc.valueReg(), dest);
259 break;
260 case OperandLocation::ValueStack: {
261 Address addr = valueAddress(masm, &loc);
262 masm.loadValue(addr, dest);
263 break;
265 case OperandLocation::BaselineFrame: {
266 Address addr = addressOf(masm, loc.baselineFrameSlot());
267 masm.loadValue(addr, dest);
268 break;
270 case OperandLocation::Constant:
271 masm.moveValue(loc.constant(), dest);
272 break;
273 case OperandLocation::PayloadReg:
274 masm.tagValue(loc.payloadType(), loc.payloadReg(), dest);
275 break;
276 case OperandLocation::PayloadStack: {
277 Address addr = payloadAddress(masm, &loc);
278 masm.loadPtr(addr, dest.scratchReg());
279 masm.tagValue(loc.payloadType(), dest.scratchReg(), dest);
280 break;
282 case OperandLocation::DoubleReg: {
283 ScratchDoubleScope fpscratch(masm);
284 masm.boxDouble(loc.doubleReg(), dest, fpscratch);
285 break;
287 case OperandLocation::Uninitialized:
288 MOZ_CRASH();
292 Register CacheRegisterAllocator::useRegister(MacroAssembler& masm,
293 TypedOperandId typedId) {
294 MOZ_ASSERT(!addedFailurePath_);
295 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
297 OperandLocation& loc = operandLocations_[typedId.id()];
298 switch (loc.kind()) {
299 case OperandLocation::PayloadReg:
300 currentOpRegs_.add(loc.payloadReg());
301 return loc.payloadReg();
303 case OperandLocation::ValueReg: {
304 // It's possible the value is still boxed: as an optimization, we unbox
305 // the first time we use a value as object.
306 ValueOperand val = loc.valueReg();
307 availableRegs_.add(val);
308 Register reg = val.scratchReg();
309 availableRegs_.take(reg);
310 masm.unboxNonDouble(val, reg, typedId.type());
311 loc.setPayloadReg(reg, typedId.type());
312 currentOpRegs_.add(reg);
313 return reg;
316 case OperandLocation::PayloadStack: {
317 Register reg = allocateRegister(masm);
318 popPayload(masm, &loc, reg);
319 return reg;
322 case OperandLocation::ValueStack: {
323 // The value is on the stack, but boxed. If it's on top of the stack we
324 // unbox it and then remove it from the stack, else we just unbox.
325 Register reg = allocateRegister(masm);
326 if (loc.valueStack() == stackPushed_) {
327 masm.unboxNonDouble(Address(masm.getStackPointer(), 0), reg,
328 typedId.type());
329 masm.addToStackPtr(Imm32(sizeof(js::Value)));
330 MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
331 stackPushed_ -= sizeof(js::Value);
332 } else {
333 MOZ_ASSERT(loc.valueStack() < stackPushed_);
334 masm.unboxNonDouble(
335 Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()),
336 reg, typedId.type());
338 loc.setPayloadReg(reg, typedId.type());
339 return reg;
342 case OperandLocation::BaselineFrame: {
343 Register reg = allocateRegister(masm);
344 Address addr = addressOf(masm, loc.baselineFrameSlot());
345 masm.unboxNonDouble(addr, reg, typedId.type());
346 loc.setPayloadReg(reg, typedId.type());
347 return reg;
350 case OperandLocation::Constant: {
351 Value v = loc.constant();
352 Register reg = allocateRegister(masm);
353 if (v.isString()) {
354 masm.movePtr(ImmGCPtr(v.toString()), reg);
355 } else if (v.isSymbol()) {
356 masm.movePtr(ImmGCPtr(v.toSymbol()), reg);
357 } else if (v.isBigInt()) {
358 masm.movePtr(ImmGCPtr(v.toBigInt()), reg);
359 } else {
360 MOZ_CRASH("Unexpected Value");
362 loc.setPayloadReg(reg, v.extractNonDoubleType());
363 return reg;
366 case OperandLocation::DoubleReg:
367 case OperandLocation::Uninitialized:
368 break;
371 MOZ_CRASH();
374 ConstantOrRegister CacheRegisterAllocator::useConstantOrRegister(
375 MacroAssembler& masm, ValOperandId val) {
376 MOZ_ASSERT(!addedFailurePath_);
377 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
379 OperandLocation& loc = operandLocations_[val.id()];
380 switch (loc.kind()) {
381 case OperandLocation::Constant:
382 return loc.constant();
384 case OperandLocation::PayloadReg:
385 case OperandLocation::PayloadStack: {
386 JSValueType payloadType = loc.payloadType();
387 Register reg = useRegister(masm, TypedOperandId(val, payloadType));
388 return TypedOrValueRegister(MIRTypeFromValueType(payloadType),
389 AnyRegister(reg));
392 case OperandLocation::ValueReg:
393 case OperandLocation::ValueStack:
394 case OperandLocation::BaselineFrame:
395 return TypedOrValueRegister(useValueRegister(masm, val));
397 case OperandLocation::DoubleReg:
398 return TypedOrValueRegister(MIRType::Double,
399 AnyRegister(loc.doubleReg()));
401 case OperandLocation::Uninitialized:
402 break;
405 MOZ_CRASH();
408 Register CacheRegisterAllocator::defineRegister(MacroAssembler& masm,
409 TypedOperandId typedId) {
410 MOZ_ASSERT(!addedFailurePath_);
411 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
413 OperandLocation& loc = operandLocations_[typedId.id()];
414 MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
416 Register reg = allocateRegister(masm);
417 loc.setPayloadReg(reg, typedId.type());
418 return reg;
421 ValueOperand CacheRegisterAllocator::defineValueRegister(MacroAssembler& masm,
422 ValOperandId val) {
423 MOZ_ASSERT(!addedFailurePath_);
424 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
426 OperandLocation& loc = operandLocations_[val.id()];
427 MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
429 ValueOperand reg = allocateValueRegister(masm);
430 loc.setValueReg(reg);
431 return reg;
434 void CacheRegisterAllocator::freeDeadOperandLocations(MacroAssembler& masm) {
435 // See if any operands are dead so we can reuse their registers. Note that
436 // we skip the input operands, as those are also used by failure paths, and
437 // we currently don't track those uses.
438 for (size_t i = writer_.numInputOperands(); i < operandLocations_.length();
439 i++) {
440 if (!writer_.operandIsDead(i, currentInstruction_)) {
441 continue;
444 OperandLocation& loc = operandLocations_[i];
445 switch (loc.kind()) {
446 case OperandLocation::PayloadReg:
447 availableRegs_.add(loc.payloadReg());
448 break;
449 case OperandLocation::ValueReg:
450 availableRegs_.add(loc.valueReg());
451 break;
452 case OperandLocation::PayloadStack:
453 masm.propagateOOM(freePayloadSlots_.append(loc.payloadStack()));
454 break;
455 case OperandLocation::ValueStack:
456 masm.propagateOOM(freeValueSlots_.append(loc.valueStack()));
457 break;
458 case OperandLocation::Uninitialized:
459 case OperandLocation::BaselineFrame:
460 case OperandLocation::Constant:
461 case OperandLocation::DoubleReg:
462 break;
464 loc.setUninitialized();
468 void CacheRegisterAllocator::discardStack(MacroAssembler& masm) {
469 // This should only be called when we are no longer using the operands,
470 // as we're discarding everything from the native stack. Set all operand
471 // locations to Uninitialized to catch bugs.
472 for (size_t i = 0; i < operandLocations_.length(); i++) {
473 operandLocations_[i].setUninitialized();
476 if (stackPushed_ > 0) {
477 masm.addToStackPtr(Imm32(stackPushed_));
478 stackPushed_ = 0;
480 freePayloadSlots_.clear();
481 freeValueSlots_.clear();
484 Register CacheRegisterAllocator::allocateRegister(MacroAssembler& masm) {
485 MOZ_ASSERT(!addedFailurePath_);
486 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
488 if (availableRegs_.empty()) {
489 freeDeadOperandLocations(masm);
492 if (availableRegs_.empty()) {
493 // Still no registers available, try to spill unused operands to
494 // the stack.
495 for (size_t i = 0; i < operandLocations_.length(); i++) {
496 OperandLocation& loc = operandLocations_[i];
497 if (loc.kind() == OperandLocation::PayloadReg) {
498 Register reg = loc.payloadReg();
499 if (currentOpRegs_.has(reg)) {
500 continue;
503 spillOperandToStack(masm, &loc);
504 availableRegs_.add(reg);
505 break; // We got a register, so break out of the loop.
507 if (loc.kind() == OperandLocation::ValueReg) {
508 ValueOperand reg = loc.valueReg();
509 if (currentOpRegs_.aliases(reg)) {
510 continue;
513 spillOperandToStack(masm, &loc);
514 availableRegs_.add(reg);
515 break; // Break out of the loop.
520 if (availableRegs_.empty() && !availableRegsAfterSpill_.empty()) {
521 Register reg = availableRegsAfterSpill_.takeAny();
522 masm.push(reg);
523 stackPushed_ += sizeof(uintptr_t);
525 masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
527 availableRegs_.add(reg);
530 // At this point, there must be a free register.
531 MOZ_RELEASE_ASSERT(!availableRegs_.empty());
533 Register reg = availableRegs_.takeAny();
534 currentOpRegs_.add(reg);
535 return reg;
538 void CacheRegisterAllocator::allocateFixedRegister(MacroAssembler& masm,
539 Register reg) {
540 MOZ_ASSERT(!addedFailurePath_);
541 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
543 // Fixed registers should be allocated first, to ensure they're
544 // still available.
545 MOZ_ASSERT(!currentOpRegs_.has(reg), "Register is in use");
547 freeDeadOperandLocations(masm);
549 if (availableRegs_.has(reg)) {
550 availableRegs_.take(reg);
551 currentOpRegs_.add(reg);
552 return;
555 // Register may be available only after spilling contents.
556 if (availableRegsAfterSpill_.has(reg)) {
557 availableRegsAfterSpill_.take(reg);
558 masm.push(reg);
559 stackPushed_ += sizeof(uintptr_t);
561 masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
562 currentOpRegs_.add(reg);
563 return;
566 // The register must be used by some operand. Spill it to the stack.
567 for (size_t i = 0; i < operandLocations_.length(); i++) {
568 OperandLocation& loc = operandLocations_[i];
569 if (loc.kind() == OperandLocation::PayloadReg) {
570 if (loc.payloadReg() != reg) {
571 continue;
574 spillOperandToStackOrRegister(masm, &loc);
575 currentOpRegs_.add(reg);
576 return;
578 if (loc.kind() == OperandLocation::ValueReg) {
579 if (!loc.valueReg().aliases(reg)) {
580 continue;
583 ValueOperand valueReg = loc.valueReg();
584 spillOperandToStackOrRegister(masm, &loc);
586 availableRegs_.add(valueReg);
587 availableRegs_.take(reg);
588 currentOpRegs_.add(reg);
589 return;
593 MOZ_CRASH("Invalid register");
596 void CacheRegisterAllocator::allocateFixedValueRegister(MacroAssembler& masm,
597 ValueOperand reg) {
598 #ifdef JS_NUNBOX32
599 allocateFixedRegister(masm, reg.payloadReg());
600 allocateFixedRegister(masm, reg.typeReg());
601 #else
602 allocateFixedRegister(masm, reg.valueReg());
603 #endif
606 #ifdef JS_NUNBOX32
607 // Possible miscompilation in clang-12 (bug 1689641)
608 MOZ_NEVER_INLINE
609 #endif
610 ValueOperand CacheRegisterAllocator::allocateValueRegister(
611 MacroAssembler& masm) {
612 #ifdef JS_NUNBOX32
613 Register reg1 = allocateRegister(masm);
614 Register reg2 = allocateRegister(masm);
615 return ValueOperand(reg1, reg2);
616 #else
617 Register reg = allocateRegister(masm);
618 return ValueOperand(reg);
619 #endif
622 bool CacheRegisterAllocator::init() {
623 if (!origInputLocations_.resize(writer_.numInputOperands())) {
624 return false;
626 if (!operandLocations_.resize(writer_.numOperandIds())) {
627 return false;
629 return true;
632 void CacheRegisterAllocator::initAvailableRegsAfterSpill() {
633 // Registers not in availableRegs_ and not used by input operands are
634 // available after being spilled.
635 availableRegsAfterSpill_.set() = GeneralRegisterSet::Intersect(
636 GeneralRegisterSet::Not(availableRegs_.set()),
637 GeneralRegisterSet::Not(inputRegisterSet()));
640 void CacheRegisterAllocator::fixupAliasedInputs(MacroAssembler& masm) {
641 // If IC inputs alias each other, make sure they are stored in different
642 // locations so we don't have to deal with this complexity in the rest of
643 // the allocator.
645 // Note that this can happen in IonMonkey with something like |o.foo = o|
646 // or |o[i] = i|.
648 size_t numInputs = writer_.numInputOperands();
649 MOZ_ASSERT(origInputLocations_.length() == numInputs);
651 for (size_t i = 1; i < numInputs; i++) {
652 OperandLocation& loc1 = operandLocations_[i];
653 if (!loc1.isInRegister()) {
654 continue;
657 for (size_t j = 0; j < i; j++) {
658 OperandLocation& loc2 = operandLocations_[j];
659 if (!loc1.aliasesReg(loc2)) {
660 continue;
663 // loc1 and loc2 alias so we spill one of them. If one is a
664 // ValueReg and the other is a PayloadReg, we have to spill the
665 // PayloadReg: spilling the ValueReg instead would leave its type
666 // register unallocated on 32-bit platforms.
667 if (loc1.kind() == OperandLocation::ValueReg) {
668 spillOperandToStack(masm, &loc2);
669 } else {
670 MOZ_ASSERT(loc1.kind() == OperandLocation::PayloadReg);
671 spillOperandToStack(masm, &loc1);
672 break; // Spilled loc1, so nothing else will alias it.
677 #ifdef DEBUG
678 assertValidState();
679 #endif
682 GeneralRegisterSet CacheRegisterAllocator::inputRegisterSet() const {
683 MOZ_ASSERT(origInputLocations_.length() == writer_.numInputOperands());
685 AllocatableGeneralRegisterSet result;
686 for (size_t i = 0; i < writer_.numInputOperands(); i++) {
687 const OperandLocation& loc = operandLocations_[i];
688 MOZ_ASSERT(loc == origInputLocations_[i]);
690 switch (loc.kind()) {
691 case OperandLocation::PayloadReg:
692 result.addUnchecked(loc.payloadReg());
693 continue;
694 case OperandLocation::ValueReg:
695 result.addUnchecked(loc.valueReg());
696 continue;
697 case OperandLocation::PayloadStack:
698 case OperandLocation::ValueStack:
699 case OperandLocation::BaselineFrame:
700 case OperandLocation::Constant:
701 case OperandLocation::DoubleReg:
702 continue;
703 case OperandLocation::Uninitialized:
704 break;
706 MOZ_CRASH("Invalid kind");
709 return result.set();
712 JSValueType CacheRegisterAllocator::knownType(ValOperandId val) const {
713 const OperandLocation& loc = operandLocations_[val.id()];
715 switch (loc.kind()) {
716 case OperandLocation::ValueReg:
717 case OperandLocation::ValueStack:
718 case OperandLocation::BaselineFrame:
719 return JSVAL_TYPE_UNKNOWN;
721 case OperandLocation::PayloadStack:
722 case OperandLocation::PayloadReg:
723 return loc.payloadType();
725 case OperandLocation::Constant:
726 return loc.constant().isDouble() ? JSVAL_TYPE_DOUBLE
727 : loc.constant().extractNonDoubleType();
729 case OperandLocation::DoubleReg:
730 return JSVAL_TYPE_DOUBLE;
732 case OperandLocation::Uninitialized:
733 break;
736 MOZ_CRASH("Invalid kind");
739 void CacheRegisterAllocator::initInputLocation(
740 size_t i, const TypedOrValueRegister& reg) {
741 if (reg.hasValue()) {
742 initInputLocation(i, reg.valueReg());
743 } else if (reg.typedReg().isFloat()) {
744 MOZ_ASSERT(reg.type() == MIRType::Double);
745 initInputLocation(i, reg.typedReg().fpu());
746 } else {
747 initInputLocation(i, reg.typedReg().gpr(),
748 ValueTypeFromMIRType(reg.type()));
752 void CacheRegisterAllocator::initInputLocation(
753 size_t i, const ConstantOrRegister& value) {
754 if (value.constant()) {
755 initInputLocation(i, value.value());
756 } else {
757 initInputLocation(i, value.reg());
761 void CacheRegisterAllocator::spillOperandToStack(MacroAssembler& masm,
762 OperandLocation* loc) {
763 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
765 if (loc->kind() == OperandLocation::ValueReg) {
766 if (!freeValueSlots_.empty()) {
767 uint32_t stackPos = freeValueSlots_.popCopy();
768 MOZ_ASSERT(stackPos <= stackPushed_);
769 masm.storeValue(loc->valueReg(),
770 Address(masm.getStackPointer(), stackPushed_ - stackPos));
771 loc->setValueStack(stackPos);
772 return;
774 stackPushed_ += sizeof(js::Value);
775 masm.pushValue(loc->valueReg());
776 loc->setValueStack(stackPushed_);
777 return;
780 MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
782 if (!freePayloadSlots_.empty()) {
783 uint32_t stackPos = freePayloadSlots_.popCopy();
784 MOZ_ASSERT(stackPos <= stackPushed_);
785 masm.storePtr(loc->payloadReg(),
786 Address(masm.getStackPointer(), stackPushed_ - stackPos));
787 loc->setPayloadStack(stackPos, loc->payloadType());
788 return;
790 stackPushed_ += sizeof(uintptr_t);
791 masm.push(loc->payloadReg());
792 loc->setPayloadStack(stackPushed_, loc->payloadType());
795 void CacheRegisterAllocator::spillOperandToStackOrRegister(
796 MacroAssembler& masm, OperandLocation* loc) {
797 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
799 // If enough registers are available, use them.
800 if (loc->kind() == OperandLocation::ValueReg) {
801 static const size_t BoxPieces = sizeof(Value) / sizeof(uintptr_t);
802 if (availableRegs_.set().size() >= BoxPieces) {
803 ValueOperand reg = availableRegs_.takeAnyValue();
804 masm.moveValue(loc->valueReg(), reg);
805 loc->setValueReg(reg);
806 return;
808 } else {
809 MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
810 if (!availableRegs_.empty()) {
811 Register reg = availableRegs_.takeAny();
812 masm.movePtr(loc->payloadReg(), reg);
813 loc->setPayloadReg(reg, loc->payloadType());
814 return;
818 // Not enough registers available, spill to the stack.
819 spillOperandToStack(masm, loc);
822 void CacheRegisterAllocator::popPayload(MacroAssembler& masm,
823 OperandLocation* loc, Register dest) {
824 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
825 MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
827 // The payload is on the stack. If it's on top of the stack we can just
828 // pop it, else we emit a load.
829 if (loc->payloadStack() == stackPushed_) {
830 masm.pop(dest);
831 stackPushed_ -= sizeof(uintptr_t);
832 } else {
833 MOZ_ASSERT(loc->payloadStack() < stackPushed_);
834 masm.loadPtr(payloadAddress(masm, loc), dest);
835 masm.propagateOOM(freePayloadSlots_.append(loc->payloadStack()));
838 loc->setPayloadReg(dest, loc->payloadType());
841 Address CacheRegisterAllocator::valueAddress(MacroAssembler& masm,
842 const OperandLocation* loc) const {
843 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
844 return Address(masm.getStackPointer(), stackPushed_ - loc->valueStack());
847 Address CacheRegisterAllocator::payloadAddress(
848 MacroAssembler& masm, const OperandLocation* loc) const {
849 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
850 return Address(masm.getStackPointer(), stackPushed_ - loc->payloadStack());
853 void CacheRegisterAllocator::popValue(MacroAssembler& masm,
854 OperandLocation* loc, ValueOperand dest) {
855 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
856 MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
858 // The Value is on the stack. If it's on top of the stack we can just
859 // pop it, else we emit a load.
860 if (loc->valueStack() == stackPushed_) {
861 masm.popValue(dest);
862 stackPushed_ -= sizeof(js::Value);
863 } else {
864 MOZ_ASSERT(loc->valueStack() < stackPushed_);
865 masm.loadValue(
866 Address(masm.getStackPointer(), stackPushed_ - loc->valueStack()),
867 dest);
868 masm.propagateOOM(freeValueSlots_.append(loc->valueStack()));
871 loc->setValueReg(dest);
874 #ifdef DEBUG
875 void CacheRegisterAllocator::assertValidState() const {
876 // Assert different operands don't have aliasing storage. We depend on this
877 // when spilling registers, for instance.
879 if (!JitOptions.fullDebugChecks) {
880 return;
883 for (size_t i = 0; i < operandLocations_.length(); i++) {
884 const auto& loc1 = operandLocations_[i];
885 if (loc1.isUninitialized()) {
886 continue;
889 for (size_t j = 0; j < i; j++) {
890 const auto& loc2 = operandLocations_[j];
891 if (loc2.isUninitialized()) {
892 continue;
894 MOZ_ASSERT(!loc1.aliasesReg(loc2));
898 #endif
900 bool OperandLocation::aliasesReg(const OperandLocation& other) const {
901 MOZ_ASSERT(&other != this);
903 switch (other.kind_) {
904 case PayloadReg:
905 return aliasesReg(other.payloadReg());
906 case ValueReg:
907 return aliasesReg(other.valueReg());
908 case PayloadStack:
909 case ValueStack:
910 case BaselineFrame:
911 case Constant:
912 case DoubleReg:
913 return false;
914 case Uninitialized:
915 break;
918 MOZ_CRASH("Invalid kind");
921 void CacheRegisterAllocator::restoreInputState(MacroAssembler& masm,
922 bool shouldDiscardStack) {
923 size_t numInputOperands = origInputLocations_.length();
924 MOZ_ASSERT(writer_.numInputOperands() == numInputOperands);
926 for (size_t j = 0; j < numInputOperands; j++) {
927 const OperandLocation& dest = origInputLocations_[j];
928 OperandLocation& cur = operandLocations_[j];
929 if (dest == cur) {
930 continue;
933 auto autoAssign = mozilla::MakeScopeExit([&] { cur = dest; });
935 // We have a cycle if a destination register will be used later
936 // as source register. If that happens, just push the current value
937 // on the stack and later get it from there.
938 for (size_t k = j + 1; k < numInputOperands; k++) {
939 OperandLocation& laterSource = operandLocations_[k];
940 if (dest.aliasesReg(laterSource)) {
941 spillOperandToStack(masm, &laterSource);
945 if (dest.kind() == OperandLocation::ValueReg) {
946 // We have to restore a Value register.
947 switch (cur.kind()) {
948 case OperandLocation::ValueReg:
949 masm.moveValue(cur.valueReg(), dest.valueReg());
950 continue;
951 case OperandLocation::PayloadReg:
952 masm.tagValue(cur.payloadType(), cur.payloadReg(), dest.valueReg());
953 continue;
954 case OperandLocation::PayloadStack: {
955 Register scratch = dest.valueReg().scratchReg();
956 popPayload(masm, &cur, scratch);
957 masm.tagValue(cur.payloadType(), scratch, dest.valueReg());
958 continue;
960 case OperandLocation::ValueStack:
961 popValue(masm, &cur, dest.valueReg());
962 continue;
963 case OperandLocation::DoubleReg:
964 masm.boxDouble(cur.doubleReg(), dest.valueReg(), cur.doubleReg());
965 continue;
966 case OperandLocation::Constant:
967 case OperandLocation::BaselineFrame:
968 case OperandLocation::Uninitialized:
969 break;
971 } else if (dest.kind() == OperandLocation::PayloadReg) {
972 // We have to restore a payload register.
973 switch (cur.kind()) {
974 case OperandLocation::ValueReg:
975 MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
976 masm.unboxNonDouble(cur.valueReg(), dest.payloadReg(),
977 dest.payloadType());
978 continue;
979 case OperandLocation::PayloadReg:
980 MOZ_ASSERT(cur.payloadType() == dest.payloadType());
981 masm.mov(cur.payloadReg(), dest.payloadReg());
982 continue;
983 case OperandLocation::PayloadStack: {
984 MOZ_ASSERT(cur.payloadType() == dest.payloadType());
985 popPayload(masm, &cur, dest.payloadReg());
986 continue;
988 case OperandLocation::ValueStack:
989 MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
990 MOZ_ASSERT(cur.valueStack() <= stackPushed_);
991 MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
992 masm.unboxNonDouble(
993 Address(masm.getStackPointer(), stackPushed_ - cur.valueStack()),
994 dest.payloadReg(), dest.payloadType());
995 continue;
996 case OperandLocation::Constant:
997 case OperandLocation::BaselineFrame:
998 case OperandLocation::DoubleReg:
999 case OperandLocation::Uninitialized:
1000 break;
1002 } else if (dest.kind() == OperandLocation::Constant ||
1003 dest.kind() == OperandLocation::BaselineFrame ||
1004 dest.kind() == OperandLocation::DoubleReg) {
1005 // Nothing to do.
1006 continue;
1009 MOZ_CRASH("Invalid kind");
1012 for (const SpilledRegister& spill : spilledRegs_) {
1013 MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
1015 if (spill.stackPushed == stackPushed_) {
1016 masm.pop(spill.reg);
1017 stackPushed_ -= sizeof(uintptr_t);
1018 } else {
1019 MOZ_ASSERT(spill.stackPushed < stackPushed_);
1020 masm.loadPtr(
1021 Address(masm.getStackPointer(), stackPushed_ - spill.stackPushed),
1022 spill.reg);
1026 if (shouldDiscardStack) {
1027 discardStack(masm);
1031 size_t CacheIRStubInfo::stubDataSize() const {
1032 size_t field = 0;
1033 size_t size = 0;
1034 while (true) {
1035 StubField::Type type = fieldType(field++);
1036 if (type == StubField::Type::Limit) {
1037 return size;
1039 size += StubField::sizeInBytes(type);
1043 template <typename T>
1044 static GCPtr<T>* AsGCPtr(void* ptr) {
1045 return static_cast<GCPtr<T>*>(ptr);
1048 void CacheIRStubInfo::replaceStubRawWord(uint8_t* stubData, uint32_t offset,
1049 uintptr_t oldWord,
1050 uintptr_t newWord) const {
1051 MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
1052 uintptr_t* addr = reinterpret_cast<uintptr_t*>(stubData + offset);
1053 MOZ_ASSERT(*addr == oldWord);
1054 *addr = newWord;
1057 template <class Stub, StubField::Type type>
1058 typename MapStubFieldToType<type>::WrappedType& CacheIRStubInfo::getStubField(
1059 Stub* stub, uint32_t offset) const {
1060 uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
1061 MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
1063 using WrappedType = typename MapStubFieldToType<type>::WrappedType;
1064 return *reinterpret_cast<WrappedType*>(stubData + offset);
1067 #define INSTANTIATE_GET_STUB_FIELD(Type) \
1068 template typename MapStubFieldToType<Type>::WrappedType& \
1069 CacheIRStubInfo::getStubField<ICCacheIRStub, Type>(ICCacheIRStub * stub, \
1070 uint32_t offset) const;
1071 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Shape)
1072 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakShape)
1073 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakGetterSetter)
1074 INSTANTIATE_GET_STUB_FIELD(StubField::Type::JSObject)
1075 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakObject)
1076 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Symbol)
1077 INSTANTIATE_GET_STUB_FIELD(StubField::Type::String)
1078 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakBaseScript)
1079 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Value)
1080 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Id)
1081 #undef INSTANTIATE_GET_STUB_FIELD
1083 template <class Stub, class T>
1084 T* CacheIRStubInfo::getPtrStubField(Stub* stub, uint32_t offset) const {
1085 uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
1086 MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
1088 return *reinterpret_cast<T**>(stubData + offset);
1091 template gc::AllocSite* CacheIRStubInfo::getPtrStubField(ICCacheIRStub* stub,
1092 uint32_t offset) const;
1094 template <StubField::Type type, typename V>
1095 static void InitWrappedPtr(void* ptr, V val) {
1096 using RawType = typename MapStubFieldToType<type>::RawType;
1097 using WrappedType = typename MapStubFieldToType<type>::WrappedType;
1098 auto* wrapped = static_cast<WrappedType*>(ptr);
1099 new (wrapped) WrappedType(mozilla::BitwiseCast<RawType>(val));
1102 static void InitWordStubField(StubField::Type type, void* dest,
1103 uintptr_t value) {
1104 MOZ_ASSERT(StubField::sizeIsWord(type));
1105 MOZ_ASSERT((uintptr_t(dest) % sizeof(uintptr_t)) == 0,
1106 "Unaligned stub field");
1108 switch (type) {
1109 case StubField::Type::RawInt32:
1110 case StubField::Type::RawPointer:
1111 case StubField::Type::AllocSite:
1112 *static_cast<uintptr_t*>(dest) = value;
1113 break;
1114 case StubField::Type::Shape:
1115 InitWrappedPtr<StubField::Type::Shape>(dest, value);
1116 break;
1117 case StubField::Type::WeakShape:
1118 // No read barrier required to copy weak pointer.
1119 InitWrappedPtr<StubField::Type::WeakShape>(dest, value);
1120 break;
1121 case StubField::Type::WeakGetterSetter:
1122 // No read barrier required to copy weak pointer.
1123 InitWrappedPtr<StubField::Type::WeakGetterSetter>(dest, value);
1124 break;
1125 case StubField::Type::JSObject:
1126 InitWrappedPtr<StubField::Type::JSObject>(dest, value);
1127 break;
1128 case StubField::Type::WeakObject:
1129 // No read barrier required to copy weak pointer.
1130 InitWrappedPtr<StubField::Type::WeakObject>(dest, value);
1131 break;
1132 case StubField::Type::Symbol:
1133 InitWrappedPtr<StubField::Type::Symbol>(dest, value);
1134 break;
1135 case StubField::Type::String:
1136 InitWrappedPtr<StubField::Type::String>(dest, value);
1137 break;
1138 case StubField::Type::WeakBaseScript:
1139 // No read barrier required to copy weak pointer.
1140 InitWrappedPtr<StubField::Type::WeakBaseScript>(dest, value);
1141 break;
1142 case StubField::Type::JitCode:
1143 InitWrappedPtr<StubField::Type::JitCode>(dest, value);
1144 break;
1145 case StubField::Type::Id:
1146 AsGCPtr<jsid>(dest)->init(jsid::fromRawBits(value));
1147 break;
1148 case StubField::Type::RawInt64:
1149 case StubField::Type::Double:
1150 case StubField::Type::Value:
1151 case StubField::Type::Limit:
1152 MOZ_CRASH("Invalid type");
1156 static void InitInt64StubField(StubField::Type type, void* dest,
1157 uint64_t value) {
1158 MOZ_ASSERT(StubField::sizeIsInt64(type));
1159 MOZ_ASSERT((uintptr_t(dest) % sizeof(uint64_t)) == 0, "Unaligned stub field");
1161 switch (type) {
1162 case StubField::Type::RawInt64:
1163 case StubField::Type::Double:
1164 *static_cast<uint64_t*>(dest) = value;
1165 break;
1166 case StubField::Type::Value:
1167 AsGCPtr<Value>(dest)->init(Value::fromRawBits(value));
1168 break;
1169 case StubField::Type::RawInt32:
1170 case StubField::Type::RawPointer:
1171 case StubField::Type::AllocSite:
1172 case StubField::Type::Shape:
1173 case StubField::Type::WeakShape:
1174 case StubField::Type::WeakGetterSetter:
1175 case StubField::Type::JSObject:
1176 case StubField::Type::WeakObject:
1177 case StubField::Type::Symbol:
1178 case StubField::Type::String:
1179 case StubField::Type::WeakBaseScript:
1180 case StubField::Type::JitCode:
1181 case StubField::Type::Id:
1182 case StubField::Type::Limit:
1183 MOZ_CRASH("Invalid type");
1187 void CacheIRWriter::copyStubData(uint8_t* dest) const {
1188 MOZ_ASSERT(!failed());
1190 for (const StubField& field : stubFields_) {
1191 if (field.sizeIsWord()) {
1192 InitWordStubField(field.type(), dest, field.asWord());
1193 dest += sizeof(uintptr_t);
1194 } else {
1195 InitInt64StubField(field.type(), dest, field.asInt64());
1196 dest += sizeof(uint64_t);
1201 ICCacheIRStub* ICCacheIRStub::clone(JSRuntime* rt, ICStubSpace& newSpace) {
1202 const CacheIRStubInfo* info = stubInfo();
1203 MOZ_ASSERT(info->makesGCCalls());
1205 size_t bytesNeeded = info->stubDataOffset() + info->stubDataSize();
1207 AutoEnterOOMUnsafeRegion oomUnsafe;
1208 void* newStubMem = newSpace.alloc(bytesNeeded);
1209 if (!newStubMem) {
1210 oomUnsafe.crash("ICCacheIRStub::clone");
1213 ICCacheIRStub* newStub = new (newStubMem) ICCacheIRStub(*this);
1215 const uint8_t* src = this->stubDataStart();
1216 uint8_t* dest = newStub->stubDataStart();
1218 // Because this can be called during sweeping when discarding JIT code, we
1219 // have to lock the store buffer
1220 gc::AutoLockStoreBuffer lock(rt);
1222 uint32_t field = 0;
1223 while (true) {
1224 StubField::Type type = info->fieldType(field);
1225 if (type == StubField::Type::Limit) {
1226 break; // Done.
1229 if (StubField::sizeIsWord(type)) {
1230 const uintptr_t* srcField = reinterpret_cast<const uintptr_t*>(src);
1231 InitWordStubField(type, dest, *srcField);
1232 src += sizeof(uintptr_t);
1233 dest += sizeof(uintptr_t);
1234 } else {
1235 const uint64_t* srcField = reinterpret_cast<const uint64_t*>(src);
1236 InitInt64StubField(type, dest, *srcField);
1237 src += sizeof(uint64_t);
1238 dest += sizeof(uint64_t);
1241 field++;
1244 return newStub;
1247 template <typename T>
1248 static inline bool ShouldTraceWeakEdgeInStub(JSTracer* trc) {
1249 if constexpr (std::is_same_v<T, IonICStub>) {
1250 // 'Weak' edges are traced strongly in IonICs.
1251 return true;
1252 } else {
1253 static_assert(std::is_same_v<T, ICCacheIRStub>);
1254 return trc->traceWeakEdges();
1258 template <typename T>
1259 void jit::TraceCacheIRStub(JSTracer* trc, T* stub,
1260 const CacheIRStubInfo* stubInfo) {
1261 using Type = StubField::Type;
1263 uint32_t field = 0;
1264 size_t offset = 0;
1265 while (true) {
1266 Type fieldType = stubInfo->fieldType(field);
1267 switch (fieldType) {
1268 case Type::RawInt32:
1269 case Type::RawPointer:
1270 case Type::RawInt64:
1271 case Type::Double:
1272 break;
1273 case Type::Shape: {
1274 // For CCW IC stubs, we can store same-zone but cross-compartment
1275 // shapes. Use TraceSameZoneCrossCompartmentEdge to not assert in the
1276 // GC. Note: CacheIRWriter::writeShapeField asserts we never store
1277 // cross-zone shapes.
1278 GCPtr<Shape*>& shapeField =
1279 stubInfo->getStubField<T, Type::Shape>(stub, offset);
1280 TraceSameZoneCrossCompartmentEdge(trc, &shapeField, "cacheir-shape");
1281 break;
1283 case Type::WeakShape:
1284 if (ShouldTraceWeakEdgeInStub<T>(trc)) {
1285 WeakHeapPtr<Shape*>& shapeField =
1286 stubInfo->getStubField<T, Type::WeakShape>(stub, offset);
1287 if (shapeField) {
1288 TraceSameZoneCrossCompartmentEdge(trc, &shapeField,
1289 "cacheir-weak-shape");
1292 break;
1293 case Type::WeakGetterSetter:
1294 if (ShouldTraceWeakEdgeInStub<T>(trc)) {
1295 TraceNullableEdge(
1296 trc,
1297 &stubInfo->getStubField<T, Type::WeakGetterSetter>(stub, offset),
1298 "cacheir-weak-getter-setter");
1300 break;
1301 case Type::JSObject: {
1302 TraceEdge(trc, &stubInfo->getStubField<T, Type::JSObject>(stub, offset),
1303 "cacheir-object");
1304 break;
1306 case Type::WeakObject:
1307 if (ShouldTraceWeakEdgeInStub<T>(trc)) {
1308 TraceNullableEdge(
1309 trc, &stubInfo->getStubField<T, Type::WeakObject>(stub, offset),
1310 "cacheir-weak-object");
1312 break;
1313 case Type::Symbol:
1314 TraceEdge(trc, &stubInfo->getStubField<T, Type::Symbol>(stub, offset),
1315 "cacheir-symbol");
1316 break;
1317 case Type::String:
1318 TraceEdge(trc, &stubInfo->getStubField<T, Type::String>(stub, offset),
1319 "cacheir-string");
1320 break;
1321 case Type::WeakBaseScript:
1322 if (ShouldTraceWeakEdgeInStub<T>(trc)) {
1323 TraceNullableEdge(
1324 trc,
1325 &stubInfo->getStubField<T, Type::WeakBaseScript>(stub, offset),
1326 "cacheir-weak-script");
1328 break;
1329 case Type::JitCode:
1330 TraceEdge(trc, &stubInfo->getStubField<T, Type::JitCode>(stub, offset),
1331 "cacheir-jitcode");
1332 break;
1333 case Type::Id:
1334 TraceEdge(trc, &stubInfo->getStubField<T, Type::Id>(stub, offset),
1335 "cacheir-id");
1336 break;
1337 case Type::Value:
1338 TraceEdge(trc, &stubInfo->getStubField<T, Type::Value>(stub, offset),
1339 "cacheir-value");
1340 break;
1341 case Type::AllocSite: {
1342 gc::AllocSite* site =
1343 stubInfo->getPtrStubField<T, gc::AllocSite>(stub, offset);
1344 site->trace(trc);
1345 break;
1347 case Type::Limit:
1348 return; // Done.
1350 field++;
1351 offset += StubField::sizeInBytes(fieldType);
1355 template void jit::TraceCacheIRStub(JSTracer* trc, ICCacheIRStub* stub,
1356 const CacheIRStubInfo* stubInfo);
1358 template void jit::TraceCacheIRStub(JSTracer* trc, IonICStub* stub,
1359 const CacheIRStubInfo* stubInfo);
1361 template <typename T>
1362 bool jit::TraceWeakCacheIRStub(JSTracer* trc, T* stub,
1363 const CacheIRStubInfo* stubInfo) {
1364 using Type = StubField::Type;
1366 uint32_t field = 0;
1367 size_t offset = 0;
1368 while (true) {
1369 Type fieldType = stubInfo->fieldType(field);
1370 switch (fieldType) {
1371 case Type::WeakShape: {
1372 WeakHeapPtr<Shape*>& shapeField =
1373 stubInfo->getStubField<T, Type::WeakShape>(stub, offset);
1374 auto r = TraceWeakEdge(trc, &shapeField, "cacheir-weak-shape");
1375 if (r.isDead()) {
1376 return false;
1378 break;
1380 case Type::WeakObject: {
1381 WeakHeapPtr<JSObject*>& objectField =
1382 stubInfo->getStubField<T, Type::WeakObject>(stub, offset);
1383 auto r = TraceWeakEdge(trc, &objectField, "cacheir-weak-object");
1384 if (r.isDead()) {
1385 return false;
1387 break;
1389 case Type::WeakBaseScript: {
1390 WeakHeapPtr<BaseScript*>& scriptField =
1391 stubInfo->getStubField<T, Type::WeakBaseScript>(stub, offset);
1392 auto r = TraceWeakEdge(trc, &scriptField, "cacheir-weak-script");
1393 if (r.isDead()) {
1394 return false;
1396 break;
1398 case Type::WeakGetterSetter: {
1399 WeakHeapPtr<GetterSetter*>& getterSetterField =
1400 stubInfo->getStubField<T, Type::WeakGetterSetter>(stub, offset);
1401 auto r = TraceWeakEdge(trc, &getterSetterField,
1402 "cacheir-weak-getter-setter");
1403 if (r.isDead()) {
1404 return false;
1406 break;
1408 case Type::Limit:
1409 return true; // Done.
1410 case Type::RawInt32:
1411 case Type::RawPointer:
1412 case Type::Shape:
1413 case Type::JSObject:
1414 case Type::Symbol:
1415 case Type::String:
1416 case Type::JitCode:
1417 case Type::Id:
1418 case Type::AllocSite:
1419 case Type::RawInt64:
1420 case Type::Value:
1421 case Type::Double:
1422 break; // Skip non-weak fields.
1424 field++;
1425 offset += StubField::sizeInBytes(fieldType);
1429 template bool jit::TraceWeakCacheIRStub(JSTracer* trc, ICCacheIRStub* stub,
1430 const CacheIRStubInfo* stubInfo);
1432 template bool jit::TraceWeakCacheIRStub(JSTracer* trc, IonICStub* stub,
1433 const CacheIRStubInfo* stubInfo);
1435 bool CacheIRWriter::stubDataEquals(const uint8_t* stubData) const {
1436 MOZ_ASSERT(!failed());
1438 const uintptr_t* stubDataWords = reinterpret_cast<const uintptr_t*>(stubData);
1440 for (const StubField& field : stubFields_) {
1441 if (field.sizeIsWord()) {
1442 if (field.asWord() != *stubDataWords) {
1443 return false;
1445 stubDataWords++;
1446 continue;
1449 if (field.asInt64() != *reinterpret_cast<const uint64_t*>(stubDataWords)) {
1450 return false;
1452 stubDataWords += sizeof(uint64_t) / sizeof(uintptr_t);
1455 return true;
1458 bool CacheIRWriter::stubDataEqualsIgnoring(const uint8_t* stubData,
1459 uint32_t ignoreOffset) const {
1460 MOZ_ASSERT(!failed());
1462 uint32_t offset = 0;
1463 for (const StubField& field : stubFields_) {
1464 if (offset != ignoreOffset) {
1465 if (field.sizeIsWord()) {
1466 uintptr_t raw = *reinterpret_cast<const uintptr_t*>(stubData + offset);
1467 if (field.asWord() != raw) {
1468 return false;
1470 } else {
1471 uint64_t raw = *reinterpret_cast<const uint64_t*>(stubData + offset);
1472 if (field.asInt64() != raw) {
1473 return false;
1477 offset += StubField::sizeInBytes(field.type());
1480 return true;
1483 HashNumber CacheIRStubKey::hash(const CacheIRStubKey::Lookup& l) {
1484 HashNumber hash = mozilla::HashBytes(l.code, l.length);
1485 hash = mozilla::AddToHash(hash, uint32_t(l.kind));
1486 hash = mozilla::AddToHash(hash, uint32_t(l.engine));
1487 return hash;
1490 bool CacheIRStubKey::match(const CacheIRStubKey& entry,
1491 const CacheIRStubKey::Lookup& l) {
1492 if (entry.stubInfo->kind() != l.kind) {
1493 return false;
1496 if (entry.stubInfo->engine() != l.engine) {
1497 return false;
1500 if (entry.stubInfo->codeLength() != l.length) {
1501 return false;
1504 if (!mozilla::ArrayEqual(entry.stubInfo->code(), l.code, l.length)) {
1505 return false;
1508 return true;
1511 CacheIRReader::CacheIRReader(const CacheIRStubInfo* stubInfo)
1512 : CacheIRReader(stubInfo->code(),
1513 stubInfo->code() + stubInfo->codeLength()) {}
1515 CacheIRStubInfo* CacheIRStubInfo::New(CacheKind kind, ICStubEngine engine,
1516 bool makesGCCalls,
1517 uint32_t stubDataOffset,
1518 const CacheIRWriter& writer) {
1519 size_t numStubFields = writer.numStubFields();
1520 size_t bytesNeeded =
1521 sizeof(CacheIRStubInfo) + writer.codeLength() +
1522 (numStubFields + 1); // +1 for the GCType::Limit terminator.
1523 uint8_t* p = js_pod_malloc<uint8_t>(bytesNeeded);
1524 if (!p) {
1525 return nullptr;
1528 // Copy the CacheIR code.
1529 uint8_t* codeStart = p + sizeof(CacheIRStubInfo);
1530 mozilla::PodCopy(codeStart, writer.codeStart(), writer.codeLength());
1532 static_assert(sizeof(StubField::Type) == sizeof(uint8_t),
1533 "StubField::Type must fit in uint8_t");
1535 // Copy the stub field types.
1536 uint8_t* fieldTypes = codeStart + writer.codeLength();
1537 for (size_t i = 0; i < numStubFields; i++) {
1538 fieldTypes[i] = uint8_t(writer.stubFieldType(i));
1540 fieldTypes[numStubFields] = uint8_t(StubField::Type::Limit);
1542 return new (p) CacheIRStubInfo(kind, engine, makesGCCalls, stubDataOffset,
1543 writer.codeLength());
1546 bool OperandLocation::operator==(const OperandLocation& other) const {
1547 if (kind_ != other.kind_) {
1548 return false;
1551 switch (kind()) {
1552 case Uninitialized:
1553 return true;
1554 case PayloadReg:
1555 return payloadReg() == other.payloadReg() &&
1556 payloadType() == other.payloadType();
1557 case ValueReg:
1558 return valueReg() == other.valueReg();
1559 case PayloadStack:
1560 return payloadStack() == other.payloadStack() &&
1561 payloadType() == other.payloadType();
1562 case ValueStack:
1563 return valueStack() == other.valueStack();
1564 case BaselineFrame:
1565 return baselineFrameSlot() == other.baselineFrameSlot();
1566 case Constant:
1567 return constant() == other.constant();
1568 case DoubleReg:
1569 return doubleReg() == other.doubleReg();
1572 MOZ_CRASH("Invalid OperandLocation kind");
1575 AutoOutputRegister::AutoOutputRegister(CacheIRCompiler& compiler)
1576 : output_(compiler.outputUnchecked_.ref()), alloc_(compiler.allocator) {
1577 if (output_.hasValue()) {
1578 alloc_.allocateFixedValueRegister(compiler.masm, output_.valueReg());
1579 } else if (!output_.typedReg().isFloat()) {
1580 alloc_.allocateFixedRegister(compiler.masm, output_.typedReg().gpr());
1584 AutoOutputRegister::~AutoOutputRegister() {
1585 if (output_.hasValue()) {
1586 alloc_.releaseValueRegister(output_.valueReg());
1587 } else if (!output_.typedReg().isFloat()) {
1588 alloc_.releaseRegister(output_.typedReg().gpr());
1592 bool FailurePath::canShareFailurePath(const FailurePath& other) const {
1593 if (stackPushed_ != other.stackPushed_) {
1594 return false;
1597 if (spilledRegs_.length() != other.spilledRegs_.length()) {
1598 return false;
1601 for (size_t i = 0; i < spilledRegs_.length(); i++) {
1602 if (spilledRegs_[i] != other.spilledRegs_[i]) {
1603 return false;
1607 MOZ_ASSERT(inputs_.length() == other.inputs_.length());
1609 for (size_t i = 0; i < inputs_.length(); i++) {
1610 if (inputs_[i] != other.inputs_[i]) {
1611 return false;
1614 return true;
1617 bool CacheIRCompiler::addFailurePath(FailurePath** failure) {
1618 #ifdef DEBUG
1619 allocator.setAddedFailurePath();
1620 #endif
1621 MOZ_ASSERT(!allocator.hasAutoScratchFloatRegisterSpill());
1623 FailurePath newFailure;
1624 for (size_t i = 0; i < writer_.numInputOperands(); i++) {
1625 if (!newFailure.appendInput(allocator.operandLocation(i))) {
1626 return false;
1629 if (!newFailure.setSpilledRegs(allocator.spilledRegs())) {
1630 return false;
1632 newFailure.setStackPushed(allocator.stackPushed());
1634 // Reuse the previous failure path if the current one is the same, to
1635 // avoid emitting duplicate code.
1636 if (failurePaths.length() > 0 &&
1637 failurePaths.back().canShareFailurePath(newFailure)) {
1638 *failure = &failurePaths.back();
1639 return true;
1642 if (!failurePaths.append(std::move(newFailure))) {
1643 return false;
1646 *failure = &failurePaths.back();
1647 return true;
1650 bool CacheIRCompiler::emitFailurePath(size_t index) {
1651 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1652 FailurePath& failure = failurePaths[index];
1654 allocator.setStackPushed(failure.stackPushed());
1656 for (size_t i = 0; i < writer_.numInputOperands(); i++) {
1657 allocator.setOperandLocation(i, failure.input(i));
1660 if (!allocator.setSpilledRegs(failure.spilledRegs())) {
1661 return false;
1664 masm.bind(failure.label());
1665 allocator.restoreInputState(masm);
1666 return true;
1669 bool CacheIRCompiler::emitGuardIsNumber(ValOperandId inputId) {
1670 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1671 JSValueType knownType = allocator.knownType(inputId);
1673 // Doubles and ints are numbers!
1674 if (knownType == JSVAL_TYPE_DOUBLE || knownType == JSVAL_TYPE_INT32) {
1675 return true;
1678 ValueOperand input = allocator.useValueRegister(masm, inputId);
1679 FailurePath* failure;
1680 if (!addFailurePath(&failure)) {
1681 return false;
1684 masm.branchTestNumber(Assembler::NotEqual, input, failure->label());
1685 return true;
1688 bool CacheIRCompiler::emitGuardToObject(ValOperandId inputId) {
1689 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1690 if (allocator.knownType(inputId) == JSVAL_TYPE_OBJECT) {
1691 return true;
1694 ValueOperand input = allocator.useValueRegister(masm, inputId);
1695 FailurePath* failure;
1696 if (!addFailurePath(&failure)) {
1697 return false;
1699 masm.branchTestObject(Assembler::NotEqual, input, failure->label());
1700 return true;
1703 bool CacheIRCompiler::emitGuardIsNullOrUndefined(ValOperandId inputId) {
1704 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1705 JSValueType knownType = allocator.knownType(inputId);
1706 if (knownType == JSVAL_TYPE_UNDEFINED || knownType == JSVAL_TYPE_NULL) {
1707 return true;
1710 ValueOperand input = allocator.useValueRegister(masm, inputId);
1711 FailurePath* failure;
1712 if (!addFailurePath(&failure)) {
1713 return false;
1716 Label success;
1717 masm.branchTestNull(Assembler::Equal, input, &success);
1718 masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
1720 masm.bind(&success);
1721 return true;
1724 bool CacheIRCompiler::emitGuardIsNull(ValOperandId inputId) {
1725 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1726 JSValueType knownType = allocator.knownType(inputId);
1727 if (knownType == JSVAL_TYPE_NULL) {
1728 return true;
1731 ValueOperand input = allocator.useValueRegister(masm, inputId);
1732 FailurePath* failure;
1733 if (!addFailurePath(&failure)) {
1734 return false;
1737 masm.branchTestNull(Assembler::NotEqual, input, failure->label());
1738 return true;
1741 bool CacheIRCompiler::emitGuardIsUndefined(ValOperandId inputId) {
1742 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1743 JSValueType knownType = allocator.knownType(inputId);
1744 if (knownType == JSVAL_TYPE_UNDEFINED) {
1745 return true;
1748 ValueOperand input = allocator.useValueRegister(masm, inputId);
1749 FailurePath* failure;
1750 if (!addFailurePath(&failure)) {
1751 return false;
1754 masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
1755 return true;
1758 bool CacheIRCompiler::emitGuardIsNotUninitializedLexical(ValOperandId valId) {
1759 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1761 ValueOperand val = allocator.useValueRegister(masm, valId);
1763 FailurePath* failure;
1764 if (!addFailurePath(&failure)) {
1765 return false;
1768 masm.branchTestMagicValue(Assembler::Equal, val, JS_UNINITIALIZED_LEXICAL,
1769 failure->label());
1770 return true;
1773 bool CacheIRCompiler::emitGuardBooleanToInt32(ValOperandId inputId,
1774 Int32OperandId resultId) {
1775 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1776 Register output = allocator.defineRegister(masm, resultId);
1778 if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
1779 Register input =
1780 allocator.useRegister(masm, BooleanOperandId(inputId.id()));
1781 masm.move32(input, output);
1782 return true;
1784 ValueOperand input = allocator.useValueRegister(masm, inputId);
1786 FailurePath* failure;
1787 if (!addFailurePath(&failure)) {
1788 return false;
1791 masm.fallibleUnboxBoolean(input, output, failure->label());
1792 return true;
1795 bool CacheIRCompiler::emitGuardToString(ValOperandId inputId) {
1796 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1797 if (allocator.knownType(inputId) == JSVAL_TYPE_STRING) {
1798 return true;
1801 ValueOperand input = allocator.useValueRegister(masm, inputId);
1802 FailurePath* failure;
1803 if (!addFailurePath(&failure)) {
1804 return false;
1806 masm.branchTestString(Assembler::NotEqual, input, failure->label());
1807 return true;
1810 bool CacheIRCompiler::emitGuardToSymbol(ValOperandId inputId) {
1811 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1812 if (allocator.knownType(inputId) == JSVAL_TYPE_SYMBOL) {
1813 return true;
1816 ValueOperand input = allocator.useValueRegister(masm, inputId);
1817 FailurePath* failure;
1818 if (!addFailurePath(&failure)) {
1819 return false;
1821 masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
1822 return true;
1825 bool CacheIRCompiler::emitGuardToBigInt(ValOperandId inputId) {
1826 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1827 if (allocator.knownType(inputId) == JSVAL_TYPE_BIGINT) {
1828 return true;
1831 ValueOperand input = allocator.useValueRegister(masm, inputId);
1832 FailurePath* failure;
1833 if (!addFailurePath(&failure)) {
1834 return false;
1836 masm.branchTestBigInt(Assembler::NotEqual, input, failure->label());
1837 return true;
1840 bool CacheIRCompiler::emitGuardToBoolean(ValOperandId inputId) {
1841 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1843 if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
1844 return true;
1847 ValueOperand input = allocator.useValueRegister(masm, inputId);
1848 FailurePath* failure;
1849 if (!addFailurePath(&failure)) {
1850 return false;
1852 masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
1853 return true;
1856 bool CacheIRCompiler::emitGuardToInt32(ValOperandId inputId) {
1857 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1859 if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
1860 return true;
1863 ValueOperand input = allocator.useValueRegister(masm, inputId);
1865 FailurePath* failure;
1866 if (!addFailurePath(&failure)) {
1867 return false;
1870 masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
1871 return true;
1874 bool CacheIRCompiler::emitGuardToNonGCThing(ValOperandId inputId) {
1875 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1877 ValueOperand input = allocator.useValueRegister(masm, inputId);
1879 FailurePath* failure;
1880 if (!addFailurePath(&failure)) {
1881 return false;
1884 masm.branchTestGCThing(Assembler::Equal, input, failure->label());
1885 return true;
1888 // Infallible |emitDouble| emitters can use this implementation to avoid
1889 // generating extra clean-up instructions to restore the scratch float register.
1890 // To select this function simply omit the |Label* fail| parameter for the
1891 // emitter lambda function.
1892 template <typename EmitDouble>
1893 static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 1,
1894 void>
1895 EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
1896 ValueOperand input, FailurePath* failure,
1897 EmitDouble emitDouble) {
1898 AutoScratchFloatRegister floatReg(compiler);
1900 masm.unboxDouble(input, floatReg);
1901 emitDouble(floatReg.get());
1904 template <typename EmitDouble>
1905 static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 2,
1906 void>
1907 EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
1908 ValueOperand input, FailurePath* failure,
1909 EmitDouble emitDouble) {
1910 AutoScratchFloatRegister floatReg(compiler, failure);
1912 masm.unboxDouble(input, floatReg);
1913 emitDouble(floatReg.get(), floatReg.failure());
1916 template <typename EmitInt32, typename EmitDouble>
1917 static void EmitGuardInt32OrDouble(CacheIRCompiler* compiler,
1918 MacroAssembler& masm, ValueOperand input,
1919 Register output, FailurePath* failure,
1920 EmitInt32 emitInt32, EmitDouble emitDouble) {
1921 Label done;
1924 ScratchTagScope tag(masm, input);
1925 masm.splitTagForTest(input, tag);
1927 Label notInt32;
1928 masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
1930 ScratchTagScopeRelease _(&tag);
1932 masm.unboxInt32(input, output);
1933 emitInt32();
1935 masm.jump(&done);
1937 masm.bind(&notInt32);
1939 masm.branchTestDouble(Assembler::NotEqual, tag, failure->label());
1941 ScratchTagScopeRelease _(&tag);
1943 EmitGuardDouble(compiler, masm, input, failure, emitDouble);
1947 masm.bind(&done);
1950 bool CacheIRCompiler::emitGuardToInt32Index(ValOperandId inputId,
1951 Int32OperandId resultId) {
1952 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1953 Register output = allocator.defineRegister(masm, resultId);
1955 if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
1956 Register input = allocator.useRegister(masm, Int32OperandId(inputId.id()));
1957 masm.move32(input, output);
1958 return true;
1961 ValueOperand input = allocator.useValueRegister(masm, inputId);
1963 FailurePath* failure;
1964 if (!addFailurePath(&failure)) {
1965 return false;
1968 EmitGuardInt32OrDouble(
1969 this, masm, input, output, failure,
1970 []() {
1971 // No-op if the value is already an int32.
1973 [&](FloatRegister floatReg, Label* fail) {
1974 // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
1975 masm.convertDoubleToInt32(floatReg, output, fail, false);
1978 return true;
1981 bool CacheIRCompiler::emitInt32ToIntPtr(Int32OperandId inputId,
1982 IntPtrOperandId resultId) {
1983 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1985 Register input = allocator.useRegister(masm, inputId);
1986 Register output = allocator.defineRegister(masm, resultId);
1988 masm.move32SignExtendToPtr(input, output);
1989 return true;
1992 bool CacheIRCompiler::emitGuardNumberToIntPtrIndex(NumberOperandId inputId,
1993 bool supportOOB,
1994 IntPtrOperandId resultId) {
1995 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1997 Register output = allocator.defineRegister(masm, resultId);
1999 FailurePath* failure = nullptr;
2000 if (!supportOOB) {
2001 if (!addFailurePath(&failure)) {
2002 return false;
2006 AutoScratchFloatRegister floatReg(this, failure);
2007 allocator.ensureDoubleRegister(masm, inputId, floatReg);
2009 // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
2010 if (supportOOB) {
2011 Label done, fail;
2012 masm.convertDoubleToPtr(floatReg, output, &fail, false);
2013 masm.jump(&done);
2015 // Substitute the invalid index with an arbitrary out-of-bounds index.
2016 masm.bind(&fail);
2017 masm.movePtr(ImmWord(-1), output);
2019 masm.bind(&done);
2020 } else {
2021 masm.convertDoubleToPtr(floatReg, output, floatReg.failure(), false);
2024 return true;
2027 bool CacheIRCompiler::emitGuardToInt32ModUint32(ValOperandId inputId,
2028 Int32OperandId resultId) {
2029 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2030 Register output = allocator.defineRegister(masm, resultId);
2032 if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
2033 ConstantOrRegister input = allocator.useConstantOrRegister(masm, inputId);
2034 if (input.constant()) {
2035 masm.move32(Imm32(input.value().toInt32()), output);
2036 } else {
2037 MOZ_ASSERT(input.reg().type() == MIRType::Int32);
2038 masm.move32(input.reg().typedReg().gpr(), output);
2040 return true;
2043 ValueOperand input = allocator.useValueRegister(masm, inputId);
2045 FailurePath* failure;
2046 if (!addFailurePath(&failure)) {
2047 return false;
2050 EmitGuardInt32OrDouble(
2051 this, masm, input, output, failure,
2052 []() {
2053 // No-op if the value is already an int32.
2055 [&](FloatRegister floatReg, Label* fail) {
2056 masm.branchTruncateDoubleMaybeModUint32(floatReg, output, fail);
2059 return true;
2062 bool CacheIRCompiler::emitGuardToUint8Clamped(ValOperandId inputId,
2063 Int32OperandId resultId) {
2064 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2065 Register output = allocator.defineRegister(masm, resultId);
2067 if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
2068 ConstantOrRegister input = allocator.useConstantOrRegister(masm, inputId);
2069 if (input.constant()) {
2070 masm.move32(Imm32(ClampDoubleToUint8(input.value().toInt32())), output);
2071 } else {
2072 MOZ_ASSERT(input.reg().type() == MIRType::Int32);
2073 masm.move32(input.reg().typedReg().gpr(), output);
2074 masm.clampIntToUint8(output);
2076 return true;
2079 ValueOperand input = allocator.useValueRegister(masm, inputId);
2081 FailurePath* failure;
2082 if (!addFailurePath(&failure)) {
2083 return false;
2086 EmitGuardInt32OrDouble(
2087 this, masm, input, output, failure,
2088 [&]() {
2089 // |output| holds the unboxed int32 value.
2090 masm.clampIntToUint8(output);
2092 [&](FloatRegister floatReg) {
2093 masm.clampDoubleToUint8(floatReg, output);
2096 return true;
2099 bool CacheIRCompiler::emitGuardNonDoubleType(ValOperandId inputId,
2100 ValueType type) {
2101 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2103 if (allocator.knownType(inputId) == JSValueType(type)) {
2104 return true;
2107 ValueOperand input = allocator.useValueRegister(masm, inputId);
2109 FailurePath* failure;
2110 if (!addFailurePath(&failure)) {
2111 return false;
2114 switch (type) {
2115 case ValueType::String:
2116 masm.branchTestString(Assembler::NotEqual, input, failure->label());
2117 break;
2118 case ValueType::Symbol:
2119 masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
2120 break;
2121 case ValueType::BigInt:
2122 masm.branchTestBigInt(Assembler::NotEqual, input, failure->label());
2123 break;
2124 case ValueType::Int32:
2125 masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
2126 break;
2127 case ValueType::Boolean:
2128 masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
2129 break;
2130 case ValueType::Undefined:
2131 masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
2132 break;
2133 case ValueType::Null:
2134 masm.branchTestNull(Assembler::NotEqual, input, failure->label());
2135 break;
2136 case ValueType::Double:
2137 case ValueType::Magic:
2138 case ValueType::PrivateGCThing:
2139 case ValueType::Object:
2140 #ifdef ENABLE_RECORD_TUPLE
2141 case ValueType::ExtendedPrimitive:
2142 #endif
2143 MOZ_CRASH("unexpected type");
2146 return true;
2149 bool CacheIRCompiler::emitGuardClass(ObjOperandId objId, GuardClassKind kind) {
2150 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2151 Register obj = allocator.useRegister(masm, objId);
2152 AutoScratchRegister scratch(allocator, masm);
2154 FailurePath* failure;
2155 if (!addFailurePath(&failure)) {
2156 return false;
2159 if (kind == GuardClassKind::JSFunction) {
2160 if (objectGuardNeedsSpectreMitigations(objId)) {
2161 masm.branchTestObjIsFunction(Assembler::NotEqual, obj, scratch, obj,
2162 failure->label());
2163 } else {
2164 masm.branchTestObjIsFunctionNoSpectreMitigations(
2165 Assembler::NotEqual, obj, scratch, failure->label());
2167 return true;
2170 const JSClass* clasp = nullptr;
2171 switch (kind) {
2172 case GuardClassKind::Array:
2173 clasp = &ArrayObject::class_;
2174 break;
2175 case GuardClassKind::PlainObject:
2176 clasp = &PlainObject::class_;
2177 break;
2178 case GuardClassKind::ArrayBuffer:
2179 clasp = &ArrayBufferObject::class_;
2180 break;
2181 case GuardClassKind::SharedArrayBuffer:
2182 clasp = &SharedArrayBufferObject::class_;
2183 break;
2184 case GuardClassKind::DataView:
2185 clasp = &DataViewObject::class_;
2186 break;
2187 case GuardClassKind::MappedArguments:
2188 clasp = &MappedArgumentsObject::class_;
2189 break;
2190 case GuardClassKind::UnmappedArguments:
2191 clasp = &UnmappedArgumentsObject::class_;
2192 break;
2193 case GuardClassKind::WindowProxy:
2194 clasp = cx_->runtime()->maybeWindowProxyClass();
2195 break;
2196 case GuardClassKind::Set:
2197 clasp = &SetObject::class_;
2198 break;
2199 case GuardClassKind::Map:
2200 clasp = &MapObject::class_;
2201 break;
2202 case GuardClassKind::BoundFunction:
2203 clasp = &BoundFunctionObject::class_;
2204 break;
2205 case GuardClassKind::JSFunction:
2206 MOZ_CRASH("JSFunction handled before switch");
2208 MOZ_ASSERT(clasp);
2210 if (objectGuardNeedsSpectreMitigations(objId)) {
2211 masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj,
2212 failure->label());
2213 } else {
2214 masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp,
2215 scratch, failure->label());
2218 return true;
2221 bool CacheIRCompiler::emitGuardNullProto(ObjOperandId objId) {
2222 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2223 Register obj = allocator.useRegister(masm, objId);
2224 AutoScratchRegister scratch(allocator, masm);
2226 FailurePath* failure;
2227 if (!addFailurePath(&failure)) {
2228 return false;
2231 masm.loadObjProto(obj, scratch);
2232 masm.branchTestPtr(Assembler::NonZero, scratch, scratch, failure->label());
2233 return true;
2236 bool CacheIRCompiler::emitGuardIsExtensible(ObjOperandId objId) {
2237 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2238 Register obj = allocator.useRegister(masm, objId);
2239 AutoScratchRegister scratch(allocator, masm);
2241 FailurePath* failure;
2242 if (!addFailurePath(&failure)) {
2243 return false;
2246 masm.branchIfObjectNotExtensible(obj, scratch, failure->label());
2247 return true;
2250 bool CacheIRCompiler::emitGuardDynamicSlotIsSpecificObject(
2251 ObjOperandId objId, ObjOperandId expectedId, uint32_t slotOffset) {
2252 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2253 Register obj = allocator.useRegister(masm, objId);
2254 Register expectedObject = allocator.useRegister(masm, expectedId);
2256 // Allocate registers before the failure path to make sure they're registered
2257 // by addFailurePath.
2258 AutoScratchRegister scratch1(allocator, masm);
2259 AutoScratchRegister scratch2(allocator, masm);
2261 FailurePath* failure;
2262 if (!addFailurePath(&failure)) {
2263 return false;
2266 // Guard on the expected object.
2267 StubFieldOffset slot(slotOffset, StubField::Type::RawInt32);
2268 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2269 emitLoadStubField(slot, scratch2);
2270 BaseObjectSlotIndex expectedSlot(scratch1, scratch2);
2271 masm.fallibleUnboxObject(expectedSlot, scratch1, failure->label());
2272 masm.branchPtr(Assembler::NotEqual, expectedObject, scratch1,
2273 failure->label());
2275 return true;
2278 bool CacheIRCompiler::emitGuardDynamicSlotIsNotObject(ObjOperandId objId,
2279 uint32_t slotOffset) {
2280 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2281 Register obj = allocator.useRegister(masm, objId);
2283 AutoScratchRegister scratch1(allocator, masm);
2284 AutoScratchRegister scratch2(allocator, masm);
2286 FailurePath* failure;
2287 if (!addFailurePath(&failure)) {
2288 return false;
2291 // Guard that the slot isn't an object.
2292 StubFieldOffset slot(slotOffset, StubField::Type::RawInt32);
2293 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2294 emitLoadStubField(slot, scratch2);
2295 BaseObjectSlotIndex expectedSlot(scratch1, scratch2);
2296 masm.branchTestObject(Assembler::Equal, expectedSlot, failure->label());
2298 return true;
2301 bool CacheIRCompiler::emitGuardFixedSlotValue(ObjOperandId objId,
2302 uint32_t offsetOffset,
2303 uint32_t valOffset) {
2304 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2306 Register obj = allocator.useRegister(masm, objId);
2308 AutoScratchRegister scratch(allocator, masm);
2309 AutoScratchValueRegister scratchVal(allocator, masm);
2311 FailurePath* failure;
2312 if (!addFailurePath(&failure)) {
2313 return false;
2316 StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
2317 emitLoadStubField(offset, scratch);
2319 StubFieldOffset val(valOffset, StubField::Type::Value);
2320 emitLoadValueStubField(val, scratchVal);
2322 BaseIndex slotVal(obj, scratch, TimesOne);
2323 masm.branchTestValue(Assembler::NotEqual, slotVal, scratchVal,
2324 failure->label());
2325 return true;
2328 bool CacheIRCompiler::emitGuardDynamicSlotValue(ObjOperandId objId,
2329 uint32_t offsetOffset,
2330 uint32_t valOffset) {
2331 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2333 Register obj = allocator.useRegister(masm, objId);
2335 AutoScratchRegister scratch1(allocator, masm);
2336 AutoScratchRegister scratch2(allocator, masm);
2337 AutoScratchValueRegister scratchVal(allocator, masm);
2339 FailurePath* failure;
2340 if (!addFailurePath(&failure)) {
2341 return false;
2344 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2346 StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
2347 emitLoadStubField(offset, scratch2);
2349 StubFieldOffset val(valOffset, StubField::Type::Value);
2350 emitLoadValueStubField(val, scratchVal);
2352 BaseIndex slotVal(scratch1, scratch2, TimesOne);
2353 masm.branchTestValue(Assembler::NotEqual, slotVal, scratchVal,
2354 failure->label());
2355 return true;
2358 bool CacheIRCompiler::emitLoadScriptedProxyHandler(ValOperandId resultId,
2359 ObjOperandId objId) {
2360 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2362 Register obj = allocator.useRegister(masm, objId);
2363 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2365 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
2366 output.scratchReg());
2367 masm.loadValue(
2368 Address(output.scratchReg(), js::detail::ProxyReservedSlots::offsetOfSlot(
2369 ScriptedProxyHandler::HANDLER_EXTRA)),
2370 output);
2371 return true;
2374 bool CacheIRCompiler::emitIdToStringOrSymbol(ValOperandId resultId,
2375 ValOperandId idId) {
2376 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2378 ValueOperand id = allocator.useValueRegister(masm, idId);
2379 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2380 AutoScratchRegister scratch(allocator, masm);
2382 FailurePath* failure;
2383 if (!addFailurePath(&failure)) {
2384 return false;
2387 masm.moveValue(id, output);
2389 Label done, intDone, callVM;
2391 ScratchTagScope tag(masm, output);
2392 masm.splitTagForTest(output, tag);
2393 masm.branchTestString(Assembler::Equal, tag, &done);
2394 masm.branchTestSymbol(Assembler::Equal, tag, &done);
2395 masm.branchTestInt32(Assembler::NotEqual, tag, failure->label());
2398 Register intReg = output.scratchReg();
2399 masm.unboxInt32(output, intReg);
2401 // Fast path for small integers.
2402 masm.lookupStaticIntString(intReg, intReg, scratch, cx_->staticStrings(),
2403 &callVM);
2404 masm.jump(&intDone);
2406 masm.bind(&callVM);
2407 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2408 liveVolatileFloatRegs());
2409 masm.PushRegsInMask(volatileRegs);
2411 using Fn = JSLinearString* (*)(JSContext* cx, int32_t i);
2412 masm.setupUnalignedABICall(scratch);
2413 masm.loadJSContext(scratch);
2414 masm.passABIArg(scratch);
2415 masm.passABIArg(intReg);
2416 masm.callWithABI<Fn, js::Int32ToStringPure>();
2418 masm.storeCallPointerResult(intReg);
2420 LiveRegisterSet ignore;
2421 ignore.add(intReg);
2422 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
2424 masm.branchPtr(Assembler::Equal, intReg, ImmPtr(nullptr), failure->label());
2426 masm.bind(&intDone);
2427 masm.tagValue(JSVAL_TYPE_STRING, intReg, output);
2428 masm.bind(&done);
2430 return true;
2433 bool CacheIRCompiler::emitLoadFixedSlot(ValOperandId resultId,
2434 ObjOperandId objId,
2435 uint32_t offsetOffset) {
2436 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2438 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2439 Register obj = allocator.useRegister(masm, objId);
2440 AutoScratchRegister scratch(allocator, masm);
2442 StubFieldOffset slotIndex(offsetOffset, StubField::Type::RawInt32);
2443 emitLoadStubField(slotIndex, scratch);
2445 masm.loadValue(BaseIndex(obj, scratch, TimesOne), output);
2446 return true;
2449 bool CacheIRCompiler::emitLoadDynamicSlot(ValOperandId resultId,
2450 ObjOperandId objId,
2451 uint32_t slotOffset) {
2452 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2454 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2455 Register obj = allocator.useRegister(masm, objId);
2456 AutoScratchRegister scratch1(allocator, masm);
2457 Register scratch2 = output.scratchReg();
2459 StubFieldOffset slotIndex(slotOffset, StubField::Type::RawInt32);
2460 emitLoadStubField(slotIndex, scratch2);
2462 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2463 masm.loadValue(BaseObjectSlotIndex(scratch1, scratch2), output);
2464 return true;
2467 bool CacheIRCompiler::emitGuardIsNativeObject(ObjOperandId objId) {
2468 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2470 Register obj = allocator.useRegister(masm, objId);
2471 AutoScratchRegister scratch(allocator, masm);
2473 FailurePath* failure;
2474 if (!addFailurePath(&failure)) {
2475 return false;
2478 masm.branchIfNonNativeObj(obj, scratch, failure->label());
2479 return true;
2482 bool CacheIRCompiler::emitGuardIsProxy(ObjOperandId objId) {
2483 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2485 Register obj = allocator.useRegister(masm, objId);
2486 AutoScratchRegister scratch(allocator, masm);
2488 FailurePath* failure;
2489 if (!addFailurePath(&failure)) {
2490 return false;
2493 masm.branchTestObjectIsProxy(false, obj, scratch, failure->label());
2494 return true;
2497 bool CacheIRCompiler::emitGuardIsNotProxy(ObjOperandId objId) {
2498 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2500 Register obj = allocator.useRegister(masm, objId);
2501 AutoScratchRegister scratch(allocator, masm);
2503 FailurePath* failure;
2504 if (!addFailurePath(&failure)) {
2505 return false;
2508 masm.branchTestObjectIsProxy(true, obj, scratch, failure->label());
2509 return true;
2512 bool CacheIRCompiler::emitGuardIsNotArrayBufferMaybeShared(ObjOperandId objId) {
2513 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2515 Register obj = allocator.useRegister(masm, objId);
2516 AutoScratchRegister scratch(allocator, masm);
2518 FailurePath* failure;
2519 if (!addFailurePath(&failure)) {
2520 return false;
2523 masm.loadObjClassUnsafe(obj, scratch);
2524 masm.branchPtr(Assembler::Equal, scratch, ImmPtr(&ArrayBufferObject::class_),
2525 failure->label());
2526 masm.branchPtr(Assembler::Equal, scratch,
2527 ImmPtr(&SharedArrayBufferObject::class_), failure->label());
2528 return true;
2531 bool CacheIRCompiler::emitGuardIsTypedArray(ObjOperandId objId) {
2532 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2534 Register obj = allocator.useRegister(masm, objId);
2535 AutoScratchRegister scratch(allocator, masm);
2537 FailurePath* failure;
2538 if (!addFailurePath(&failure)) {
2539 return false;
2542 masm.loadObjClassUnsafe(obj, scratch);
2543 masm.branchIfClassIsNotTypedArray(scratch, failure->label());
2544 return true;
2547 bool CacheIRCompiler::emitGuardIsNotDOMProxy(ObjOperandId objId) {
2548 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2549 Register obj = allocator.useRegister(masm, objId);
2550 AutoScratchRegister scratch(allocator, masm);
2552 FailurePath* failure;
2553 if (!addFailurePath(&failure)) {
2554 return false;
2557 masm.branchTestProxyHandlerFamily(Assembler::Equal, obj, scratch,
2558 GetDOMProxyHandlerFamily(),
2559 failure->label());
2560 return true;
2563 bool CacheIRCompiler::emitGuardNoDenseElements(ObjOperandId objId) {
2564 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2565 Register obj = allocator.useRegister(masm, objId);
2566 AutoScratchRegister scratch(allocator, masm);
2568 FailurePath* failure;
2569 if (!addFailurePath(&failure)) {
2570 return false;
2573 // Load obj->elements.
2574 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
2576 // Make sure there are no dense elements.
2577 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
2578 masm.branch32(Assembler::NotEqual, initLength, Imm32(0), failure->label());
2579 return true;
2582 bool CacheIRCompiler::emitGuardSpecificInt32(Int32OperandId numId,
2583 int32_t expected) {
2584 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2585 Register num = allocator.useRegister(masm, numId);
2587 FailurePath* failure;
2588 if (!addFailurePath(&failure)) {
2589 return false;
2592 masm.branch32(Assembler::NotEqual, num, Imm32(expected), failure->label());
2593 return true;
2596 bool CacheIRCompiler::emitGuardStringToInt32(StringOperandId strId,
2597 Int32OperandId resultId) {
2598 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2599 Register str = allocator.useRegister(masm, strId);
2600 Register output = allocator.defineRegister(masm, resultId);
2601 AutoScratchRegister scratch(allocator, masm);
2603 FailurePath* failure;
2604 if (!addFailurePath(&failure)) {
2605 return false;
2608 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2609 liveVolatileFloatRegs());
2610 masm.guardStringToInt32(str, output, scratch, volatileRegs, failure->label());
2611 return true;
2614 bool CacheIRCompiler::emitGuardStringToNumber(StringOperandId strId,
2615 NumberOperandId resultId) {
2616 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2617 Register str = allocator.useRegister(masm, strId);
2618 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2619 AutoScratchRegister scratch(allocator, masm);
2621 FailurePath* failure;
2622 if (!addFailurePath(&failure)) {
2623 return false;
2626 Label vmCall, done;
2627 // Use indexed value as fast path if possible.
2628 masm.loadStringIndexValue(str, scratch, &vmCall);
2629 masm.tagValue(JSVAL_TYPE_INT32, scratch, output);
2630 masm.jump(&done);
2632 masm.bind(&vmCall);
2634 // Reserve stack for holding the result value of the call.
2635 masm.reserveStack(sizeof(double));
2636 masm.moveStackPtrTo(output.payloadOrValueReg());
2638 // We cannot use callVM, as callVM expects to be able to clobber all
2639 // operands, however, since this op is not the last in the generated IC, we
2640 // want to be able to reference other live values.
2641 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2642 liveVolatileFloatRegs());
2643 masm.PushRegsInMask(volatileRegs);
2645 using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
2646 masm.setupUnalignedABICall(scratch);
2647 masm.loadJSContext(scratch);
2648 masm.passABIArg(scratch);
2649 masm.passABIArg(str);
2650 masm.passABIArg(output.payloadOrValueReg());
2651 masm.callWithABI<Fn, js::StringToNumberPure>();
2652 masm.storeCallPointerResult(scratch);
2654 LiveRegisterSet ignore;
2655 ignore.add(scratch);
2656 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
2658 Label ok;
2659 masm.branchIfTrueBool(scratch, &ok);
2661 // OOM path, recovered by StringToNumberPure.
2663 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
2664 // flow-insensitively, and using it twice would confuse the stack height
2665 // tracking.
2666 masm.addToStackPtr(Imm32(sizeof(double)));
2667 masm.jump(failure->label());
2669 masm.bind(&ok);
2672 ScratchDoubleScope fpscratch(masm);
2673 masm.loadDouble(Address(output.payloadOrValueReg(), 0), fpscratch);
2674 masm.boxDouble(fpscratch, output, fpscratch);
2676 masm.freeStack(sizeof(double));
2678 masm.bind(&done);
2679 return true;
2682 bool CacheIRCompiler::emitNumberParseIntResult(StringOperandId strId,
2683 Int32OperandId radixId) {
2684 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2686 AutoCallVM callvm(masm, this, allocator);
2688 Register str = allocator.useRegister(masm, strId);
2689 Register radix = allocator.useRegister(masm, radixId);
2690 AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
2692 #ifdef DEBUG
2693 Label ok;
2694 masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
2695 masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
2696 masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
2697 masm.bind(&ok);
2698 #endif
2700 // Discard the stack to ensure it's balanced when we skip the vm-call.
2701 allocator.discardStack(masm);
2703 // Use indexed value as fast path if possible.
2704 Label vmCall, done;
2705 masm.loadStringIndexValue(str, scratch, &vmCall);
2706 masm.tagValue(JSVAL_TYPE_INT32, scratch, callvm.outputValueReg());
2707 masm.jump(&done);
2709 masm.bind(&vmCall);
2711 callvm.prepare();
2712 masm.Push(radix);
2713 masm.Push(str);
2715 using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
2716 callvm.call<Fn, js::NumberParseInt>();
2718 masm.bind(&done);
2719 return true;
2722 bool CacheIRCompiler::emitDoubleParseIntResult(NumberOperandId numId) {
2723 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2725 AutoOutputRegister output(*this);
2726 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2727 AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
2728 AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
2730 FailurePath* failure;
2731 if (!addFailurePath(&failure)) {
2732 return false;
2735 allocator.ensureDoubleRegister(masm, numId, floatScratch1);
2737 masm.branchDouble(Assembler::DoubleUnordered, floatScratch1, floatScratch1,
2738 failure->label());
2739 masm.branchTruncateDoubleToInt32(floatScratch1, scratch, failure->label());
2741 Label ok;
2742 masm.branch32(Assembler::NotEqual, scratch, Imm32(0), &ok);
2744 // Accept both +0 and -0 and return 0.
2745 masm.loadConstantDouble(0.0, floatScratch2);
2746 masm.branchDouble(Assembler::DoubleEqual, floatScratch1, floatScratch2,
2747 &ok);
2749 // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
2750 masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, floatScratch2);
2751 masm.branchDouble(Assembler::DoubleLessThan, floatScratch1, floatScratch2,
2752 failure->label());
2754 masm.bind(&ok);
2756 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2757 return true;
2760 bool CacheIRCompiler::emitBooleanToNumber(BooleanOperandId booleanId,
2761 NumberOperandId resultId) {
2762 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2763 Register boolean = allocator.useRegister(masm, booleanId);
2764 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2765 masm.tagValue(JSVAL_TYPE_INT32, boolean, output);
2766 return true;
2769 bool CacheIRCompiler::emitGuardStringToIndex(StringOperandId strId,
2770 Int32OperandId resultId) {
2771 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2772 Register str = allocator.useRegister(masm, strId);
2773 Register output = allocator.defineRegister(masm, resultId);
2775 FailurePath* failure;
2776 if (!addFailurePath(&failure)) {
2777 return false;
2780 Label vmCall, done;
2781 masm.loadStringIndexValue(str, output, &vmCall);
2782 masm.jump(&done);
2785 masm.bind(&vmCall);
2786 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
2787 liveVolatileFloatRegs());
2788 masm.PushRegsInMask(save);
2790 using Fn = int32_t (*)(JSString* str);
2791 masm.setupUnalignedABICall(output);
2792 masm.passABIArg(str);
2793 masm.callWithABI<Fn, GetIndexFromString>();
2794 masm.storeCallInt32Result(output);
2796 LiveRegisterSet ignore;
2797 ignore.add(output);
2798 masm.PopRegsInMaskIgnore(save, ignore);
2800 // GetIndexFromString returns a negative value on failure.
2801 masm.branchTest32(Assembler::Signed, output, output, failure->label());
2804 masm.bind(&done);
2805 return true;
2808 bool CacheIRCompiler::emitLoadProto(ObjOperandId objId, ObjOperandId resultId) {
2809 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2810 Register obj = allocator.useRegister(masm, objId);
2811 Register reg = allocator.defineRegister(masm, resultId);
2812 masm.loadObjProto(obj, reg);
2814 #ifdef DEBUG
2815 // We shouldn't encounter a null or lazy proto.
2816 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
2818 Label done;
2819 masm.branchPtr(Assembler::Above, reg, ImmWord(1), &done);
2820 masm.assumeUnreachable("Unexpected null or lazy proto in CacheIR LoadProto");
2821 masm.bind(&done);
2822 #endif
2823 return true;
2826 bool CacheIRCompiler::emitLoadEnclosingEnvironment(ObjOperandId objId,
2827 ObjOperandId resultId) {
2828 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2829 Register obj = allocator.useRegister(masm, objId);
2830 Register reg = allocator.defineRegister(masm, resultId);
2831 masm.unboxObject(
2832 Address(obj, EnvironmentObject::offsetOfEnclosingEnvironment()), reg);
2833 return true;
2836 bool CacheIRCompiler::emitLoadWrapperTarget(ObjOperandId objId,
2837 ObjOperandId resultId) {
2838 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2839 Register obj = allocator.useRegister(masm, objId);
2840 Register reg = allocator.defineRegister(masm, resultId);
2842 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), reg);
2843 masm.unboxObject(
2844 Address(reg, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()), reg);
2845 return true;
2848 bool CacheIRCompiler::emitLoadValueTag(ValOperandId valId,
2849 ValueTagOperandId resultId) {
2850 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2851 ValueOperand val = allocator.useValueRegister(masm, valId);
2852 Register res = allocator.defineRegister(masm, resultId);
2854 Register tag = masm.extractTag(val, res);
2855 if (tag != res) {
2856 masm.mov(tag, res);
2858 return true;
2861 bool CacheIRCompiler::emitLoadDOMExpandoValue(ObjOperandId objId,
2862 ValOperandId resultId) {
2863 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2864 Register obj = allocator.useRegister(masm, objId);
2865 ValueOperand val = allocator.defineValueRegister(masm, resultId);
2867 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
2868 val.scratchReg());
2869 masm.loadValue(Address(val.scratchReg(),
2870 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
2871 val);
2872 return true;
2875 bool CacheIRCompiler::emitLoadDOMExpandoValueIgnoreGeneration(
2876 ObjOperandId objId, ValOperandId resultId) {
2877 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2878 Register obj = allocator.useRegister(masm, objId);
2879 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2881 // Determine the expando's Address.
2882 Register scratch = output.scratchReg();
2883 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
2884 Address expandoAddr(scratch,
2885 js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
2887 #ifdef DEBUG
2888 // Private values are stored as doubles, so assert we have a double.
2889 Label ok;
2890 masm.branchTestDouble(Assembler::Equal, expandoAddr, &ok);
2891 masm.assumeUnreachable("DOM expando is not a PrivateValue!");
2892 masm.bind(&ok);
2893 #endif
2895 // Load the ExpandoAndGeneration* from the PrivateValue.
2896 masm.loadPrivate(expandoAddr, scratch);
2898 // Load expandoAndGeneration->expando into the output Value register.
2899 masm.loadValue(Address(scratch, ExpandoAndGeneration::offsetOfExpando()),
2900 output);
2901 return true;
2904 bool CacheIRCompiler::emitLoadUndefinedResult() {
2905 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2906 AutoOutputRegister output(*this);
2907 masm.moveValue(UndefinedValue(), output.valueReg());
2908 return true;
2911 static void EmitStoreBoolean(MacroAssembler& masm, bool b,
2912 const AutoOutputRegister& output) {
2913 if (output.hasValue()) {
2914 Value val = BooleanValue(b);
2915 masm.moveValue(val, output.valueReg());
2916 } else {
2917 MOZ_ASSERT(output.type() == JSVAL_TYPE_BOOLEAN);
2918 masm.movePtr(ImmWord(b), output.typedReg().gpr());
2922 bool CacheIRCompiler::emitLoadBooleanResult(bool val) {
2923 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2924 AutoOutputRegister output(*this);
2925 EmitStoreBoolean(masm, val, output);
2926 return true;
2929 bool CacheIRCompiler::emitLoadOperandResult(ValOperandId inputId) {
2930 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2931 AutoOutputRegister output(*this);
2932 ValueOperand input = allocator.useValueRegister(masm, inputId);
2933 masm.moveValue(input, output.valueReg());
2934 return true;
2937 static void EmitStoreResult(MacroAssembler& masm, Register reg,
2938 JSValueType type,
2939 const AutoOutputRegister& output) {
2940 if (output.hasValue()) {
2941 masm.tagValue(type, reg, output.valueReg());
2942 return;
2944 if (type == JSVAL_TYPE_INT32 && output.typedReg().isFloat()) {
2945 masm.convertInt32ToDouble(reg, output.typedReg().fpu());
2946 return;
2948 if (type == output.type()) {
2949 masm.mov(reg, output.typedReg().gpr());
2950 return;
2952 masm.assumeUnreachable("Should have monitored result");
2955 bool CacheIRCompiler::emitLoadInt32ArrayLengthResult(ObjOperandId objId) {
2956 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2957 AutoOutputRegister output(*this);
2958 Register obj = allocator.useRegister(masm, objId);
2959 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2961 FailurePath* failure;
2962 if (!addFailurePath(&failure)) {
2963 return false;
2966 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
2967 masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratch);
2969 // Guard length fits in an int32.
2970 masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
2971 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2972 return true;
2975 bool CacheIRCompiler::emitLoadInt32ArrayLength(ObjOperandId objId,
2976 Int32OperandId resultId) {
2977 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2978 Register obj = allocator.useRegister(masm, objId);
2979 Register res = allocator.defineRegister(masm, resultId);
2981 FailurePath* failure;
2982 if (!addFailurePath(&failure)) {
2983 return false;
2986 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), res);
2987 masm.load32(Address(res, ObjectElements::offsetOfLength()), res);
2989 // Guard length fits in an int32.
2990 masm.branchTest32(Assembler::Signed, res, res, failure->label());
2991 return true;
2994 bool CacheIRCompiler::emitDoubleAddResult(NumberOperandId lhsId,
2995 NumberOperandId rhsId) {
2996 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2997 AutoOutputRegister output(*this);
2999 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3000 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3002 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3003 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3005 masm.addDouble(floatScratch1, floatScratch0);
3006 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3008 return true;
3010 bool CacheIRCompiler::emitDoubleSubResult(NumberOperandId lhsId,
3011 NumberOperandId rhsId) {
3012 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3013 AutoOutputRegister output(*this);
3015 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3016 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3018 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3019 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3021 masm.subDouble(floatScratch1, floatScratch0);
3022 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3024 return true;
3026 bool CacheIRCompiler::emitDoubleMulResult(NumberOperandId lhsId,
3027 NumberOperandId rhsId) {
3028 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3029 AutoOutputRegister output(*this);
3031 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3032 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3034 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3035 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3037 masm.mulDouble(floatScratch1, floatScratch0);
3038 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3040 return true;
3042 bool CacheIRCompiler::emitDoubleDivResult(NumberOperandId lhsId,
3043 NumberOperandId rhsId) {
3044 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3045 AutoOutputRegister output(*this);
3047 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3048 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3050 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3051 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3053 masm.divDouble(floatScratch1, floatScratch0);
3054 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3056 return true;
3058 bool CacheIRCompiler::emitDoubleModResult(NumberOperandId lhsId,
3059 NumberOperandId rhsId) {
3060 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3061 AutoOutputRegister output(*this);
3062 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3064 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3065 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3067 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3068 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3070 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3071 masm.PushRegsInMask(save);
3073 using Fn = double (*)(double a, double b);
3074 masm.setupUnalignedABICall(scratch);
3075 masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
3076 masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
3077 masm.callWithABI<Fn, js::NumberMod>(MoveOp::DOUBLE);
3078 masm.storeCallFloatResult(floatScratch0);
3080 LiveRegisterSet ignore;
3081 ignore.add(floatScratch0);
3082 masm.PopRegsInMaskIgnore(save, ignore);
3084 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3086 return true;
3088 bool CacheIRCompiler::emitDoublePowResult(NumberOperandId lhsId,
3089 NumberOperandId rhsId) {
3090 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3091 AutoOutputRegister output(*this);
3092 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3094 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3095 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3097 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3098 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3100 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3101 masm.PushRegsInMask(save);
3103 using Fn = double (*)(double x, double y);
3104 masm.setupUnalignedABICall(scratch);
3105 masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
3106 masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
3107 masm.callWithABI<Fn, js::ecmaPow>(MoveOp::DOUBLE);
3108 masm.storeCallFloatResult(floatScratch0);
3110 LiveRegisterSet ignore;
3111 ignore.add(floatScratch0);
3112 masm.PopRegsInMaskIgnore(save, ignore);
3114 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3116 return true;
3119 bool CacheIRCompiler::emitInt32AddResult(Int32OperandId lhsId,
3120 Int32OperandId rhsId) {
3121 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3122 AutoOutputRegister output(*this);
3123 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3125 Register lhs = allocator.useRegister(masm, lhsId);
3126 Register rhs = allocator.useRegister(masm, rhsId);
3128 FailurePath* failure;
3129 if (!addFailurePath(&failure)) {
3130 return false;
3133 masm.mov(rhs, scratch);
3134 masm.branchAdd32(Assembler::Overflow, lhs, scratch, failure->label());
3135 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3137 return true;
3139 bool CacheIRCompiler::emitInt32SubResult(Int32OperandId lhsId,
3140 Int32OperandId rhsId) {
3141 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3142 AutoOutputRegister output(*this);
3143 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3144 Register lhs = allocator.useRegister(masm, lhsId);
3145 Register rhs = allocator.useRegister(masm, rhsId);
3147 FailurePath* failure;
3148 if (!addFailurePath(&failure)) {
3149 return false;
3152 masm.mov(lhs, scratch);
3153 masm.branchSub32(Assembler::Overflow, rhs, scratch, failure->label());
3154 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3156 return true;
3159 bool CacheIRCompiler::emitInt32MulResult(Int32OperandId lhsId,
3160 Int32OperandId rhsId) {
3161 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3162 AutoOutputRegister output(*this);
3163 Register lhs = allocator.useRegister(masm, lhsId);
3164 Register rhs = allocator.useRegister(masm, rhsId);
3165 AutoScratchRegister scratch(allocator, masm);
3166 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
3168 FailurePath* failure;
3169 if (!addFailurePath(&failure)) {
3170 return false;
3173 Label maybeNegZero, done;
3174 masm.mov(lhs, scratch);
3175 masm.branchMul32(Assembler::Overflow, rhs, scratch, failure->label());
3176 masm.branchTest32(Assembler::Zero, scratch, scratch, &maybeNegZero);
3177 masm.jump(&done);
3179 masm.bind(&maybeNegZero);
3180 masm.mov(lhs, scratch2);
3181 // Result is -0 if exactly one of lhs or rhs is negative.
3182 masm.or32(rhs, scratch2);
3183 masm.branchTest32(Assembler::Signed, scratch2, scratch2, failure->label());
3185 masm.bind(&done);
3186 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3187 return true;
3190 bool CacheIRCompiler::emitInt32DivResult(Int32OperandId lhsId,
3191 Int32OperandId rhsId) {
3192 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3193 AutoOutputRegister output(*this);
3194 Register lhs = allocator.useRegister(masm, lhsId);
3195 Register rhs = allocator.useRegister(masm, rhsId);
3196 AutoScratchRegister rem(allocator, masm);
3197 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3199 FailurePath* failure;
3200 if (!addFailurePath(&failure)) {
3201 return false;
3204 // Prevent division by 0.
3205 masm.branchTest32(Assembler::Zero, rhs, rhs, failure->label());
3207 // Prevent -2147483648 / -1.
3208 Label notOverflow;
3209 masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), &notOverflow);
3210 masm.branch32(Assembler::Equal, rhs, Imm32(-1), failure->label());
3211 masm.bind(&notOverflow);
3213 // Prevent negative 0.
3214 Label notZero;
3215 masm.branchTest32(Assembler::NonZero, lhs, lhs, &notZero);
3216 masm.branchTest32(Assembler::Signed, rhs, rhs, failure->label());
3217 masm.bind(&notZero);
3219 masm.mov(lhs, scratch);
3220 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
3221 liveVolatileFloatRegs());
3222 masm.flexibleDivMod32(rhs, scratch, rem, false, volatileRegs);
3224 // A remainder implies a double result.
3225 masm.branchTest32(Assembler::NonZero, rem, rem, failure->label());
3226 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3227 return true;
3230 bool CacheIRCompiler::emitInt32ModResult(Int32OperandId lhsId,
3231 Int32OperandId rhsId) {
3232 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3233 AutoOutputRegister output(*this);
3234 Register lhs = allocator.useRegister(masm, lhsId);
3235 Register rhs = allocator.useRegister(masm, rhsId);
3236 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3238 FailurePath* failure;
3239 if (!addFailurePath(&failure)) {
3240 return false;
3243 // x % 0 results in NaN
3244 masm.branchTest32(Assembler::Zero, rhs, rhs, failure->label());
3246 // Prevent -2147483648 % -1.
3248 // Traps on x86 and has undefined behavior on ARM32 (when __aeabi_idivmod is
3249 // called).
3250 Label notOverflow;
3251 masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), &notOverflow);
3252 masm.branch32(Assembler::Equal, rhs, Imm32(-1), failure->label());
3253 masm.bind(&notOverflow);
3255 masm.mov(lhs, scratch);
3256 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
3257 liveVolatileFloatRegs());
3258 masm.flexibleRemainder32(rhs, scratch, false, volatileRegs);
3260 // Modulo takes the sign of the dividend; we can't return negative zero here.
3261 Label notZero;
3262 masm.branchTest32(Assembler::NonZero, scratch, scratch, &notZero);
3263 masm.branchTest32(Assembler::Signed, lhs, lhs, failure->label());
3264 masm.bind(&notZero);
3266 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3268 return true;
3271 bool CacheIRCompiler::emitInt32PowResult(Int32OperandId lhsId,
3272 Int32OperandId rhsId) {
3273 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3274 AutoOutputRegister output(*this);
3275 Register base = allocator.useRegister(masm, lhsId);
3276 Register power = allocator.useRegister(masm, rhsId);
3277 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
3278 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
3279 AutoScratchRegister scratch3(allocator, masm);
3281 FailurePath* failure;
3282 if (!addFailurePath(&failure)) {
3283 return false;
3286 masm.pow32(base, power, scratch1, scratch2, scratch3, failure->label());
3288 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
3289 return true;
3292 bool CacheIRCompiler::emitInt32BitOrResult(Int32OperandId lhsId,
3293 Int32OperandId rhsId) {
3294 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3295 AutoOutputRegister output(*this);
3296 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3298 Register lhs = allocator.useRegister(masm, lhsId);
3299 Register rhs = allocator.useRegister(masm, rhsId);
3301 masm.mov(rhs, scratch);
3302 masm.or32(lhs, scratch);
3303 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3305 return true;
3307 bool CacheIRCompiler::emitInt32BitXorResult(Int32OperandId lhsId,
3308 Int32OperandId rhsId) {
3309 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3310 AutoOutputRegister output(*this);
3311 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3313 Register lhs = allocator.useRegister(masm, lhsId);
3314 Register rhs = allocator.useRegister(masm, rhsId);
3316 masm.mov(rhs, scratch);
3317 masm.xor32(lhs, scratch);
3318 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3320 return true;
3322 bool CacheIRCompiler::emitInt32BitAndResult(Int32OperandId lhsId,
3323 Int32OperandId rhsId) {
3324 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3325 AutoOutputRegister output(*this);
3326 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3328 Register lhs = allocator.useRegister(masm, lhsId);
3329 Register rhs = allocator.useRegister(masm, rhsId);
3331 masm.mov(rhs, scratch);
3332 masm.and32(lhs, scratch);
3333 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3335 return true;
3337 bool CacheIRCompiler::emitInt32LeftShiftResult(Int32OperandId lhsId,
3338 Int32OperandId rhsId) {
3339 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3340 AutoOutputRegister output(*this);
3341 Register lhs = allocator.useRegister(masm, lhsId);
3342 Register rhs = allocator.useRegister(masm, rhsId);
3343 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3345 masm.mov(lhs, scratch);
3346 masm.flexibleLshift32(rhs, scratch);
3347 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3349 return true;
3352 bool CacheIRCompiler::emitInt32RightShiftResult(Int32OperandId lhsId,
3353 Int32OperandId rhsId) {
3354 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3355 AutoOutputRegister output(*this);
3356 Register lhs = allocator.useRegister(masm, lhsId);
3357 Register rhs = allocator.useRegister(masm, rhsId);
3358 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3360 masm.mov(lhs, scratch);
3361 masm.flexibleRshift32Arithmetic(rhs, scratch);
3362 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3364 return true;
3367 bool CacheIRCompiler::emitInt32URightShiftResult(Int32OperandId lhsId,
3368 Int32OperandId rhsId,
3369 bool forceDouble) {
3370 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3371 AutoOutputRegister output(*this);
3373 Register lhs = allocator.useRegister(masm, lhsId);
3374 Register rhs = allocator.useRegister(masm, rhsId);
3375 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3377 FailurePath* failure;
3378 if (!addFailurePath(&failure)) {
3379 return false;
3382 masm.mov(lhs, scratch);
3383 masm.flexibleRshift32(rhs, scratch);
3384 if (forceDouble) {
3385 ScratchDoubleScope fpscratch(masm);
3386 masm.convertUInt32ToDouble(scratch, fpscratch);
3387 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
3388 } else {
3389 masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
3390 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3392 return true;
3395 bool CacheIRCompiler::emitInt32NegationResult(Int32OperandId inputId) {
3396 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3397 AutoOutputRegister output(*this);
3398 Register val = allocator.useRegister(masm, inputId);
3399 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3401 FailurePath* failure;
3402 if (!addFailurePath(&failure)) {
3403 return false;
3406 // Guard against 0 and MIN_INT by checking if low 31-bits are all zero.
3407 // Both of these result in a double.
3408 masm.branchTest32(Assembler::Zero, val, Imm32(0x7fffffff), failure->label());
3409 masm.mov(val, scratch);
3410 masm.neg32(scratch);
3411 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3412 return true;
3415 bool CacheIRCompiler::emitInt32IncResult(Int32OperandId inputId) {
3416 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3417 AutoOutputRegister output(*this);
3418 Register input = allocator.useRegister(masm, inputId);
3419 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3421 FailurePath* failure;
3422 if (!addFailurePath(&failure)) {
3423 return false;
3426 masm.mov(input, scratch);
3427 masm.branchAdd32(Assembler::Overflow, Imm32(1), scratch, failure->label());
3428 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3430 return true;
3433 bool CacheIRCompiler::emitInt32DecResult(Int32OperandId inputId) {
3434 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3435 AutoOutputRegister output(*this);
3436 Register input = allocator.useRegister(masm, inputId);
3437 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3439 FailurePath* failure;
3440 if (!addFailurePath(&failure)) {
3441 return false;
3444 masm.mov(input, scratch);
3445 masm.branchSub32(Assembler::Overflow, Imm32(1), scratch, failure->label());
3446 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3448 return true;
3451 bool CacheIRCompiler::emitInt32NotResult(Int32OperandId inputId) {
3452 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3453 AutoOutputRegister output(*this);
3454 Register val = allocator.useRegister(masm, inputId);
3455 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3457 masm.mov(val, scratch);
3458 masm.not32(scratch);
3459 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3460 return true;
3463 bool CacheIRCompiler::emitDoubleNegationResult(NumberOperandId inputId) {
3464 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3465 AutoOutputRegister output(*this);
3467 AutoScratchFloatRegister floatReg(this);
3469 allocator.ensureDoubleRegister(masm, inputId, floatReg);
3471 masm.negateDouble(floatReg);
3472 masm.boxDouble(floatReg, output.valueReg(), floatReg);
3474 return true;
3477 bool CacheIRCompiler::emitDoubleIncDecResult(bool isInc,
3478 NumberOperandId inputId) {
3479 AutoOutputRegister output(*this);
3481 AutoScratchFloatRegister floatReg(this);
3483 allocator.ensureDoubleRegister(masm, inputId, floatReg);
3486 ScratchDoubleScope fpscratch(masm);
3487 masm.loadConstantDouble(1.0, fpscratch);
3488 if (isInc) {
3489 masm.addDouble(fpscratch, floatReg);
3490 } else {
3491 masm.subDouble(fpscratch, floatReg);
3494 masm.boxDouble(floatReg, output.valueReg(), floatReg);
3496 return true;
3499 bool CacheIRCompiler::emitDoubleIncResult(NumberOperandId inputId) {
3500 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3501 return emitDoubleIncDecResult(true, inputId);
3504 bool CacheIRCompiler::emitDoubleDecResult(NumberOperandId inputId) {
3505 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3506 return emitDoubleIncDecResult(false, inputId);
3509 template <typename Fn, Fn fn>
3510 bool CacheIRCompiler::emitBigIntBinaryOperationShared(BigIntOperandId lhsId,
3511 BigIntOperandId rhsId) {
3512 AutoCallVM callvm(masm, this, allocator);
3513 Register lhs = allocator.useRegister(masm, lhsId);
3514 Register rhs = allocator.useRegister(masm, rhsId);
3516 callvm.prepare();
3518 masm.Push(rhs);
3519 masm.Push(lhs);
3521 callvm.call<Fn, fn>();
3522 return true;
3525 bool CacheIRCompiler::emitBigIntAddResult(BigIntOperandId lhsId,
3526 BigIntOperandId rhsId) {
3527 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3528 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3529 return emitBigIntBinaryOperationShared<Fn, BigInt::add>(lhsId, rhsId);
3532 bool CacheIRCompiler::emitBigIntSubResult(BigIntOperandId lhsId,
3533 BigIntOperandId rhsId) {
3534 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3535 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3536 return emitBigIntBinaryOperationShared<Fn, BigInt::sub>(lhsId, rhsId);
3539 bool CacheIRCompiler::emitBigIntMulResult(BigIntOperandId lhsId,
3540 BigIntOperandId rhsId) {
3541 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3542 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3543 return emitBigIntBinaryOperationShared<Fn, BigInt::mul>(lhsId, rhsId);
3546 bool CacheIRCompiler::emitBigIntDivResult(BigIntOperandId lhsId,
3547 BigIntOperandId rhsId) {
3548 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3549 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3550 return emitBigIntBinaryOperationShared<Fn, BigInt::div>(lhsId, rhsId);
3553 bool CacheIRCompiler::emitBigIntModResult(BigIntOperandId lhsId,
3554 BigIntOperandId rhsId) {
3555 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3556 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3557 return emitBigIntBinaryOperationShared<Fn, BigInt::mod>(lhsId, rhsId);
3560 bool CacheIRCompiler::emitBigIntPowResult(BigIntOperandId lhsId,
3561 BigIntOperandId rhsId) {
3562 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3563 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3564 return emitBigIntBinaryOperationShared<Fn, BigInt::pow>(lhsId, rhsId);
3567 bool CacheIRCompiler::emitBigIntBitAndResult(BigIntOperandId lhsId,
3568 BigIntOperandId rhsId) {
3569 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3570 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3571 return emitBigIntBinaryOperationShared<Fn, BigInt::bitAnd>(lhsId, rhsId);
3574 bool CacheIRCompiler::emitBigIntBitOrResult(BigIntOperandId lhsId,
3575 BigIntOperandId rhsId) {
3576 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3577 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3578 return emitBigIntBinaryOperationShared<Fn, BigInt::bitOr>(lhsId, rhsId);
3581 bool CacheIRCompiler::emitBigIntBitXorResult(BigIntOperandId lhsId,
3582 BigIntOperandId rhsId) {
3583 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3584 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3585 return emitBigIntBinaryOperationShared<Fn, BigInt::bitXor>(lhsId, rhsId);
3588 bool CacheIRCompiler::emitBigIntLeftShiftResult(BigIntOperandId lhsId,
3589 BigIntOperandId rhsId) {
3590 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3591 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3592 return emitBigIntBinaryOperationShared<Fn, BigInt::lsh>(lhsId, rhsId);
3595 bool CacheIRCompiler::emitBigIntRightShiftResult(BigIntOperandId lhsId,
3596 BigIntOperandId rhsId) {
3597 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3598 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3599 return emitBigIntBinaryOperationShared<Fn, BigInt::rsh>(lhsId, rhsId);
3602 template <typename Fn, Fn fn>
3603 bool CacheIRCompiler::emitBigIntUnaryOperationShared(BigIntOperandId inputId) {
3604 AutoCallVM callvm(masm, this, allocator);
3605 Register val = allocator.useRegister(masm, inputId);
3607 callvm.prepare();
3609 masm.Push(val);
3611 callvm.call<Fn, fn>();
3612 return true;
3615 bool CacheIRCompiler::emitBigIntNotResult(BigIntOperandId inputId) {
3616 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3617 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3618 return emitBigIntUnaryOperationShared<Fn, BigInt::bitNot>(inputId);
3621 bool CacheIRCompiler::emitBigIntNegationResult(BigIntOperandId inputId) {
3622 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3623 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3624 return emitBigIntUnaryOperationShared<Fn, BigInt::neg>(inputId);
3627 bool CacheIRCompiler::emitBigIntIncResult(BigIntOperandId inputId) {
3628 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3629 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3630 return emitBigIntUnaryOperationShared<Fn, BigInt::inc>(inputId);
3633 bool CacheIRCompiler::emitBigIntDecResult(BigIntOperandId inputId) {
3634 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3635 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3636 return emitBigIntUnaryOperationShared<Fn, BigInt::dec>(inputId);
3639 bool CacheIRCompiler::emitTruncateDoubleToUInt32(NumberOperandId inputId,
3640 Int32OperandId resultId) {
3641 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3642 Register res = allocator.defineRegister(masm, resultId);
3644 AutoScratchFloatRegister floatReg(this);
3646 allocator.ensureDoubleRegister(masm, inputId, floatReg);
3648 Label done, truncateABICall;
3650 masm.branchTruncateDoubleMaybeModUint32(floatReg, res, &truncateABICall);
3651 masm.jump(&done);
3653 masm.bind(&truncateABICall);
3654 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3655 save.takeUnchecked(floatReg);
3656 // Bug 1451976
3657 save.takeUnchecked(floatReg.get().asSingle());
3658 masm.PushRegsInMask(save);
3660 using Fn = int32_t (*)(double);
3661 masm.setupUnalignedABICall(res);
3662 masm.passABIArg(floatReg, MoveOp::DOUBLE);
3663 masm.callWithABI<Fn, JS::ToInt32>(MoveOp::GENERAL,
3664 CheckUnsafeCallWithABI::DontCheckOther);
3665 masm.storeCallInt32Result(res);
3667 LiveRegisterSet ignore;
3668 ignore.add(res);
3669 masm.PopRegsInMaskIgnore(save, ignore);
3671 masm.bind(&done);
3672 return true;
3675 bool CacheIRCompiler::emitLoadArgumentsObjectLengthResult(ObjOperandId objId) {
3676 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3677 AutoOutputRegister output(*this);
3678 Register obj = allocator.useRegister(masm, objId);
3679 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3681 FailurePath* failure;
3682 if (!addFailurePath(&failure)) {
3683 return false;
3686 masm.loadArgumentsObjectLength(obj, scratch, failure->label());
3688 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3689 return true;
3692 bool CacheIRCompiler::emitLoadArgumentsObjectLength(ObjOperandId objId,
3693 Int32OperandId resultId) {
3694 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3695 Register obj = allocator.useRegister(masm, objId);
3696 Register res = allocator.defineRegister(masm, resultId);
3698 FailurePath* failure;
3699 if (!addFailurePath(&failure)) {
3700 return false;
3703 masm.loadArgumentsObjectLength(obj, res, failure->label());
3704 return true;
3707 bool CacheIRCompiler::emitLoadArrayBufferByteLengthInt32Result(
3708 ObjOperandId objId) {
3709 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3710 AutoOutputRegister output(*this);
3711 Register obj = allocator.useRegister(masm, objId);
3712 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3714 FailurePath* failure;
3715 if (!addFailurePath(&failure)) {
3716 return false;
3719 masm.loadArrayBufferByteLengthIntPtr(obj, scratch);
3720 masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
3721 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3722 return true;
3725 bool CacheIRCompiler::emitLoadArrayBufferByteLengthDoubleResult(
3726 ObjOperandId objId) {
3727 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3728 AutoOutputRegister output(*this);
3729 Register obj = allocator.useRegister(masm, objId);
3730 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3732 ScratchDoubleScope fpscratch(masm);
3733 masm.loadArrayBufferByteLengthIntPtr(obj, scratch);
3734 masm.convertIntPtrToDouble(scratch, fpscratch);
3735 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
3736 return true;
3739 bool CacheIRCompiler::emitLoadArrayBufferViewLengthInt32Result(
3740 ObjOperandId objId) {
3741 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3742 AutoOutputRegister output(*this);
3743 Register obj = allocator.useRegister(masm, objId);
3744 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3746 FailurePath* failure;
3747 if (!addFailurePath(&failure)) {
3748 return false;
3751 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
3752 masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
3753 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3754 return true;
3757 bool CacheIRCompiler::emitLoadArrayBufferViewLengthDoubleResult(
3758 ObjOperandId objId) {
3759 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3760 AutoOutputRegister output(*this);
3761 Register obj = allocator.useRegister(masm, objId);
3762 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3764 ScratchDoubleScope fpscratch(masm);
3765 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
3766 masm.convertIntPtrToDouble(scratch, fpscratch);
3767 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
3768 return true;
3771 bool CacheIRCompiler::emitLoadBoundFunctionNumArgs(ObjOperandId objId,
3772 Int32OperandId resultId) {
3773 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3775 Register obj = allocator.useRegister(masm, objId);
3776 Register output = allocator.defineRegister(masm, resultId);
3778 masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
3779 output);
3780 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
3781 return true;
3784 bool CacheIRCompiler::emitLoadBoundFunctionTarget(ObjOperandId objId,
3785 ObjOperandId resultId) {
3786 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3788 Register obj = allocator.useRegister(masm, objId);
3789 Register output = allocator.defineRegister(masm, resultId);
3791 masm.unboxObject(Address(obj, BoundFunctionObject::offsetOfTargetSlot()),
3792 output);
3793 return true;
3796 bool CacheIRCompiler::emitGuardBoundFunctionIsConstructor(ObjOperandId objId) {
3797 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3799 Register obj = allocator.useRegister(masm, objId);
3801 FailurePath* failure;
3802 if (!addFailurePath(&failure)) {
3803 return false;
3806 Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
3807 masm.branchTest32(Assembler::Zero, flagsSlot,
3808 Imm32(BoundFunctionObject::IsConstructorFlag),
3809 failure->label());
3810 return true;
3813 bool CacheIRCompiler::emitGuardObjectIdentity(ObjOperandId obj1Id,
3814 ObjOperandId obj2Id) {
3815 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3817 Register obj1 = allocator.useRegister(masm, obj1Id);
3818 Register obj2 = allocator.useRegister(masm, obj2Id);
3820 FailurePath* failure;
3821 if (!addFailurePath(&failure)) {
3822 return false;
3825 masm.branchPtr(Assembler::NotEqual, obj1, obj2, failure->label());
3826 return true;
3829 bool CacheIRCompiler::emitLoadFunctionLengthResult(ObjOperandId objId) {
3830 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3831 AutoOutputRegister output(*this);
3832 Register obj = allocator.useRegister(masm, objId);
3833 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3835 FailurePath* failure;
3836 if (!addFailurePath(&failure)) {
3837 return false;
3840 // Get the JSFunction flags and arg count.
3841 masm.load32(Address(obj, JSFunction::offsetOfFlagsAndArgCount()), scratch);
3843 // Functions with a SelfHostedLazyScript must be compiled with the slow-path
3844 // before the function length is known. If the length was previously resolved,
3845 // the length property may be shadowed.
3846 masm.branchTest32(
3847 Assembler::NonZero, scratch,
3848 Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
3849 failure->label());
3851 masm.loadFunctionLength(obj, scratch, scratch, failure->label());
3852 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3853 return true;
3856 bool CacheIRCompiler::emitLoadFunctionNameResult(ObjOperandId objId) {
3857 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3858 AutoOutputRegister output(*this);
3859 Register obj = allocator.useRegister(masm, objId);
3860 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3862 FailurePath* failure;
3863 if (!addFailurePath(&failure)) {
3864 return false;
3867 masm.loadFunctionName(obj, scratch, ImmGCPtr(cx_->names().empty_),
3868 failure->label());
3870 masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
3871 return true;
3874 bool CacheIRCompiler::emitLinearizeForCharAccess(StringOperandId strId,
3875 Int32OperandId indexId,
3876 StringOperandId resultId) {
3877 Register str = allocator.useRegister(masm, strId);
3878 Register index = allocator.useRegister(masm, indexId);
3879 Register result = allocator.defineRegister(masm, resultId);
3880 AutoScratchRegister scratch(allocator, masm);
3882 FailurePath* failure;
3883 if (!addFailurePath(&failure)) {
3884 return false;
3887 Label done;
3888 masm.movePtr(str, result);
3890 // We can omit the bounds check, because we only compare the index against the
3891 // string length. In the worst case we unnecessarily linearize the string
3892 // when the index is out-of-bounds.
3894 masm.branchIfCanLoadStringChar(str, index, scratch, &done);
3896 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
3897 liveVolatileFloatRegs());
3898 masm.PushRegsInMask(volatileRegs);
3900 using Fn = JSLinearString* (*)(JSString*);
3901 masm.setupUnalignedABICall(scratch);
3902 masm.passABIArg(str);
3903 masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
3904 masm.storeCallPointerResult(result);
3906 LiveRegisterSet ignore;
3907 ignore.add(result);
3908 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
3910 masm.branchTestPtr(Assembler::Zero, result, result, failure->label());
3913 masm.bind(&done);
3914 return true;
3917 bool CacheIRCompiler::emitLoadStringLengthResult(StringOperandId strId) {
3918 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3919 AutoOutputRegister output(*this);
3920 Register str = allocator.useRegister(masm, strId);
3921 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3923 masm.loadStringLength(str, scratch);
3924 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3925 return true;
3928 bool CacheIRCompiler::emitLoadStringCharCodeResult(StringOperandId strId,
3929 Int32OperandId indexId,
3930 bool handleOOB) {
3931 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3932 AutoOutputRegister output(*this);
3933 Register str = allocator.useRegister(masm, strId);
3934 Register index = allocator.useRegister(masm, indexId);
3935 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
3936 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
3937 AutoScratchRegister scratch3(allocator, masm);
3939 // Bounds check, load string char.
3940 Label done;
3941 if (!handleOOB) {
3942 FailurePath* failure;
3943 if (!addFailurePath(&failure)) {
3944 return false;
3947 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
3948 scratch1, failure->label());
3949 masm.loadStringChar(str, index, scratch1, scratch2, scratch3,
3950 failure->label());
3951 } else {
3952 // Return NaN for out-of-bounds access.
3953 masm.moveValue(JS::NaNValue(), output.valueReg());
3955 // The bounds check mustn't use a scratch register which aliases the output.
3956 MOZ_ASSERT(!output.valueReg().aliases(scratch3));
3958 // This CacheIR op is always preceded by |LinearizeForCharAccess|, so we're
3959 // guaranteed to see no nested ropes.
3960 Label loadFailed;
3961 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
3962 scratch3, &done);
3963 masm.loadStringChar(str, index, scratch1, scratch2, scratch3, &loadFailed);
3965 Label loadedChar;
3966 masm.jump(&loadedChar);
3967 masm.bind(&loadFailed);
3968 masm.assumeUnreachable("loadStringChar can't fail for linear strings");
3969 masm.bind(&loadedChar);
3972 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
3973 masm.bind(&done);
3974 return true;
3977 bool CacheIRCompiler::emitNewStringObjectResult(uint32_t templateObjectOffset,
3978 StringOperandId strId) {
3979 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3981 AutoCallVM callvm(masm, this, allocator);
3983 Register str = allocator.useRegister(masm, strId);
3985 callvm.prepare();
3986 masm.Push(str);
3988 using Fn = JSObject* (*)(JSContext*, HandleString);
3989 callvm.call<Fn, NewStringObject>();
3990 return true;
3993 bool CacheIRCompiler::emitStringIncludesResult(StringOperandId strId,
3994 StringOperandId searchStrId) {
3995 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3997 AutoCallVM callvm(masm, this, allocator);
3999 Register str = allocator.useRegister(masm, strId);
4000 Register searchStr = allocator.useRegister(masm, searchStrId);
4002 callvm.prepare();
4003 masm.Push(searchStr);
4004 masm.Push(str);
4006 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
4007 callvm.call<Fn, js::StringIncludes>();
4008 return true;
4011 bool CacheIRCompiler::emitStringIndexOfResult(StringOperandId strId,
4012 StringOperandId searchStrId) {
4013 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4015 AutoCallVM callvm(masm, this, allocator);
4017 Register str = allocator.useRegister(masm, strId);
4018 Register searchStr = allocator.useRegister(masm, searchStrId);
4020 callvm.prepare();
4021 masm.Push(searchStr);
4022 masm.Push(str);
4024 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
4025 callvm.call<Fn, js::StringIndexOf>();
4026 return true;
4029 bool CacheIRCompiler::emitStringLastIndexOfResult(StringOperandId strId,
4030 StringOperandId searchStrId) {
4031 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4033 AutoCallVM callvm(masm, this, allocator);
4035 Register str = allocator.useRegister(masm, strId);
4036 Register searchStr = allocator.useRegister(masm, searchStrId);
4038 callvm.prepare();
4039 masm.Push(searchStr);
4040 masm.Push(str);
4042 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
4043 callvm.call<Fn, js::StringLastIndexOf>();
4044 return true;
4047 bool CacheIRCompiler::emitStringStartsWithResult(StringOperandId strId,
4048 StringOperandId searchStrId) {
4049 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4051 AutoCallVM callvm(masm, this, allocator);
4053 Register str = allocator.useRegister(masm, strId);
4054 Register searchStr = allocator.useRegister(masm, searchStrId);
4056 callvm.prepare();
4057 masm.Push(searchStr);
4058 masm.Push(str);
4060 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
4061 callvm.call<Fn, js::StringStartsWith>();
4062 return true;
4065 bool CacheIRCompiler::emitStringEndsWithResult(StringOperandId strId,
4066 StringOperandId searchStrId) {
4067 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4069 AutoCallVM callvm(masm, this, allocator);
4071 Register str = allocator.useRegister(masm, strId);
4072 Register searchStr = allocator.useRegister(masm, searchStrId);
4074 callvm.prepare();
4075 masm.Push(searchStr);
4076 masm.Push(str);
4078 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
4079 callvm.call<Fn, js::StringEndsWith>();
4080 return true;
4083 bool CacheIRCompiler::emitStringToLowerCaseResult(StringOperandId strId) {
4084 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4086 AutoCallVM callvm(masm, this, allocator);
4088 Register str = allocator.useRegister(masm, strId);
4090 callvm.prepare();
4091 masm.Push(str);
4093 using Fn = JSString* (*)(JSContext*, HandleString);
4094 callvm.call<Fn, js::StringToLowerCase>();
4095 return true;
4098 bool CacheIRCompiler::emitStringToUpperCaseResult(StringOperandId strId) {
4099 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4101 AutoCallVM callvm(masm, this, allocator);
4103 Register str = allocator.useRegister(masm, strId);
4105 callvm.prepare();
4106 masm.Push(str);
4108 using Fn = JSString* (*)(JSContext*, HandleString);
4109 callvm.call<Fn, js::StringToUpperCase>();
4110 return true;
4113 bool CacheIRCompiler::emitStringTrimResult(StringOperandId strId) {
4114 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4116 AutoCallVM callvm(masm, this, allocator);
4118 Register str = allocator.useRegister(masm, strId);
4120 callvm.prepare();
4121 masm.Push(str);
4123 using Fn = JSString* (*)(JSContext*, HandleString);
4124 callvm.call<Fn, js::StringTrim>();
4125 return true;
4128 bool CacheIRCompiler::emitStringTrimStartResult(StringOperandId strId) {
4129 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4131 AutoCallVM callvm(masm, this, allocator);
4133 Register str = allocator.useRegister(masm, strId);
4135 callvm.prepare();
4136 masm.Push(str);
4138 using Fn = JSString* (*)(JSContext*, HandleString);
4139 callvm.call<Fn, js::StringTrimStart>();
4140 return true;
4143 bool CacheIRCompiler::emitStringTrimEndResult(StringOperandId strId) {
4144 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4146 AutoCallVM callvm(masm, this, allocator);
4148 Register str = allocator.useRegister(masm, strId);
4150 callvm.prepare();
4151 masm.Push(str);
4153 using Fn = JSString* (*)(JSContext*, HandleString);
4154 callvm.call<Fn, js::StringTrimEnd>();
4155 return true;
4158 bool CacheIRCompiler::emitLoadArgumentsObjectArgResult(ObjOperandId objId,
4159 Int32OperandId indexId) {
4160 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4161 AutoOutputRegister output(*this);
4162 Register obj = allocator.useRegister(masm, objId);
4163 Register index = allocator.useRegister(masm, indexId);
4164 AutoScratchRegister scratch(allocator, masm);
4166 FailurePath* failure;
4167 if (!addFailurePath(&failure)) {
4168 return false;
4171 masm.loadArgumentsObjectElement(obj, index, output.valueReg(), scratch,
4172 failure->label());
4173 return true;
4176 bool CacheIRCompiler::emitLoadArgumentsObjectArgHoleResult(
4177 ObjOperandId objId, Int32OperandId indexId) {
4178 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4179 AutoOutputRegister output(*this);
4180 Register obj = allocator.useRegister(masm, objId);
4181 Register index = allocator.useRegister(masm, indexId);
4182 AutoScratchRegister scratch(allocator, masm);
4184 FailurePath* failure;
4185 if (!addFailurePath(&failure)) {
4186 return false;
4189 masm.loadArgumentsObjectElementHole(obj, index, output.valueReg(), scratch,
4190 failure->label());
4191 return true;
4194 bool CacheIRCompiler::emitLoadArgumentsObjectArgExistsResult(
4195 ObjOperandId objId, Int32OperandId indexId) {
4196 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4197 AutoOutputRegister output(*this);
4198 Register obj = allocator.useRegister(masm, objId);
4199 Register index = allocator.useRegister(masm, indexId);
4200 AutoScratchRegister scratch1(allocator, masm);
4201 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
4203 FailurePath* failure;
4204 if (!addFailurePath(&failure)) {
4205 return false;
4208 masm.loadArgumentsObjectElementExists(obj, index, scratch2, scratch1,
4209 failure->label());
4210 EmitStoreResult(masm, scratch2, JSVAL_TYPE_BOOLEAN, output);
4211 return true;
4214 bool CacheIRCompiler::emitLoadDenseElementResult(ObjOperandId objId,
4215 Int32OperandId indexId) {
4216 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4217 AutoOutputRegister output(*this);
4218 Register obj = allocator.useRegister(masm, objId);
4219 Register index = allocator.useRegister(masm, indexId);
4220 AutoScratchRegister scratch1(allocator, masm);
4221 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
4223 FailurePath* failure;
4224 if (!addFailurePath(&failure)) {
4225 return false;
4228 // Load obj->elements.
4229 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
4231 // Bounds check.
4232 Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
4233 masm.spectreBoundsCheck32(index, initLength, scratch2, failure->label());
4235 // Hole check.
4236 BaseObjectElementIndex element(scratch1, index);
4237 masm.branchTestMagic(Assembler::Equal, element, failure->label());
4238 masm.loadTypedOrValue(element, output);
4239 return true;
4242 bool CacheIRCompiler::emitGuardInt32IsNonNegative(Int32OperandId indexId) {
4243 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4244 Register index = allocator.useRegister(masm, indexId);
4246 FailurePath* failure;
4247 if (!addFailurePath(&failure)) {
4248 return false;
4251 masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
4252 return true;
4255 bool CacheIRCompiler::emitGuardIndexIsNotDenseElement(ObjOperandId objId,
4256 Int32OperandId indexId) {
4257 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4258 Register obj = allocator.useRegister(masm, objId);
4259 Register index = allocator.useRegister(masm, indexId);
4260 AutoScratchRegister scratch(allocator, masm);
4261 AutoSpectreBoundsScratchRegister spectreScratch(allocator, masm);
4263 FailurePath* failure;
4264 if (!addFailurePath(&failure)) {
4265 return false;
4268 // Load obj->elements.
4269 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4271 // Ensure index >= initLength or the element is a hole.
4272 Label notDense;
4273 Address capacity(scratch, ObjectElements::offsetOfInitializedLength());
4274 masm.spectreBoundsCheck32(index, capacity, spectreScratch, &notDense);
4276 BaseValueIndex element(scratch, index);
4277 masm.branchTestMagic(Assembler::Equal, element, &notDense);
4279 masm.jump(failure->label());
4281 masm.bind(&notDense);
4282 return true;
4285 bool CacheIRCompiler::emitGuardIndexIsValidUpdateOrAdd(ObjOperandId objId,
4286 Int32OperandId indexId) {
4287 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4288 Register obj = allocator.useRegister(masm, objId);
4289 Register index = allocator.useRegister(masm, indexId);
4290 AutoScratchRegister scratch(allocator, masm);
4291 AutoSpectreBoundsScratchRegister spectreScratch(allocator, masm);
4293 FailurePath* failure;
4294 if (!addFailurePath(&failure)) {
4295 return false;
4298 // Load obj->elements.
4299 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4301 Label success;
4303 // If length is writable, branch to &success. All indices are writable.
4304 Address flags(scratch, ObjectElements::offsetOfFlags());
4305 masm.branchTest32(Assembler::Zero, flags,
4306 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
4307 &success);
4309 // Otherwise, ensure index is in bounds.
4310 Address length(scratch, ObjectElements::offsetOfLength());
4311 masm.spectreBoundsCheck32(index, length, spectreScratch,
4312 /* failure = */ failure->label());
4313 masm.bind(&success);
4314 return true;
4317 bool CacheIRCompiler::emitGuardTagNotEqual(ValueTagOperandId lhsId,
4318 ValueTagOperandId rhsId) {
4319 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4320 Register lhs = allocator.useRegister(masm, lhsId);
4321 Register rhs = allocator.useRegister(masm, rhsId);
4323 FailurePath* failure;
4324 if (!addFailurePath(&failure)) {
4325 return false;
4328 Label done;
4329 masm.branch32(Assembler::Equal, lhs, rhs, failure->label());
4331 // If both lhs and rhs are numbers, can't use tag comparison to do inequality
4332 // comparison
4333 masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
4334 masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
4335 masm.jump(failure->label());
4337 masm.bind(&done);
4338 return true;
4341 bool CacheIRCompiler::emitGuardXrayExpandoShapeAndDefaultProto(
4342 ObjOperandId objId, uint32_t shapeWrapperOffset) {
4343 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4345 Register obj = allocator.useRegister(masm, objId);
4346 StubFieldOffset shapeWrapper(shapeWrapperOffset, StubField::Type::JSObject);
4348 AutoScratchRegister scratch(allocator, masm);
4349 AutoScratchRegister scratch2(allocator, masm);
4350 AutoScratchRegister scratch3(allocator, masm);
4352 FailurePath* failure;
4353 if (!addFailurePath(&failure)) {
4354 return false;
4357 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
4358 Address holderAddress(scratch,
4359 sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
4360 Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(
4361 GetXrayJitInfo()->holderExpandoSlot));
4363 masm.fallibleUnboxObject(holderAddress, scratch, failure->label());
4364 masm.fallibleUnboxObject(expandoAddress, scratch, failure->label());
4366 // Unwrap the expando before checking its shape.
4367 masm.loadPtr(Address(scratch, ProxyObject::offsetOfReservedSlots()), scratch);
4368 masm.unboxObject(
4369 Address(scratch, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
4370 scratch);
4372 emitLoadStubField(shapeWrapper, scratch2);
4373 LoadShapeWrapperContents(masm, scratch2, scratch2, failure->label());
4374 masm.branchTestObjShape(Assembler::NotEqual, scratch, scratch2, scratch3,
4375 scratch, failure->label());
4377 // The reserved slots on the expando should all be in fixed slots.
4378 Address protoAddress(scratch, NativeObject::getFixedSlotOffset(
4379 GetXrayJitInfo()->expandoProtoSlot));
4380 masm.branchTestUndefined(Assembler::NotEqual, protoAddress, failure->label());
4382 return true;
4385 bool CacheIRCompiler::emitGuardXrayNoExpando(ObjOperandId objId) {
4386 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4388 Register obj = allocator.useRegister(masm, objId);
4389 AutoScratchRegister scratch(allocator, masm);
4391 FailurePath* failure;
4392 if (!addFailurePath(&failure)) {
4393 return false;
4396 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
4397 Address holderAddress(scratch,
4398 sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
4399 Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(
4400 GetXrayJitInfo()->holderExpandoSlot));
4402 Label done;
4403 masm.fallibleUnboxObject(holderAddress, scratch, &done);
4404 masm.branchTestObject(Assembler::Equal, expandoAddress, failure->label());
4405 masm.bind(&done);
4407 return true;
4410 bool CacheIRCompiler::emitGuardNoAllocationMetadataBuilder(
4411 uint32_t builderAddrOffset) {
4412 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4413 AutoScratchRegister scratch(allocator, masm);
4415 FailurePath* failure;
4416 if (!addFailurePath(&failure)) {
4417 return false;
4420 StubFieldOffset builderField(builderAddrOffset, StubField::Type::RawPointer);
4421 emitLoadStubField(builderField, scratch);
4422 masm.branchPtr(Assembler::NotEqual, Address(scratch, 0), ImmWord(0),
4423 failure->label());
4425 return true;
4428 bool CacheIRCompiler::emitGuardFunctionHasJitEntry(ObjOperandId funId,
4429 bool constructing) {
4430 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4431 Register fun = allocator.useRegister(masm, funId);
4433 FailurePath* failure;
4434 if (!addFailurePath(&failure)) {
4435 return false;
4438 masm.branchIfFunctionHasNoJitEntry(fun, constructing, failure->label());
4439 return true;
4442 bool CacheIRCompiler::emitGuardFunctionHasNoJitEntry(ObjOperandId funId) {
4443 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4444 Register obj = allocator.useRegister(masm, funId);
4445 AutoScratchRegister scratch(allocator, masm);
4447 FailurePath* failure;
4448 if (!addFailurePath(&failure)) {
4449 return false;
4452 masm.branchIfFunctionHasJitEntry(obj, /*isConstructing =*/false,
4453 failure->label());
4454 return true;
4457 bool CacheIRCompiler::emitGuardFunctionIsNonBuiltinCtor(ObjOperandId funId) {
4458 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4460 Register fun = allocator.useRegister(masm, funId);
4461 AutoScratchRegister scratch(allocator, masm);
4463 FailurePath* failure;
4464 if (!addFailurePath(&failure)) {
4465 return false;
4468 masm.branchIfNotFunctionIsNonBuiltinCtor(fun, scratch, failure->label());
4469 return true;
4472 bool CacheIRCompiler::emitGuardFunctionIsConstructor(ObjOperandId funId) {
4473 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4474 Register funcReg = allocator.useRegister(masm, funId);
4475 AutoScratchRegister scratch(allocator, masm);
4477 FailurePath* failure;
4478 if (!addFailurePath(&failure)) {
4479 return false;
4482 // Ensure obj is a constructor
4483 masm.branchTestFunctionFlags(funcReg, FunctionFlags::CONSTRUCTOR,
4484 Assembler::Zero, failure->label());
4485 return true;
4488 bool CacheIRCompiler::emitGuardNotClassConstructor(ObjOperandId funId) {
4489 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4490 Register fun = allocator.useRegister(masm, funId);
4491 AutoScratchRegister scratch(allocator, masm);
4493 FailurePath* failure;
4494 if (!addFailurePath(&failure)) {
4495 return false;
4498 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
4499 fun, scratch, failure->label());
4500 return true;
4503 bool CacheIRCompiler::emitGuardArrayIsPacked(ObjOperandId arrayId) {
4504 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4505 Register array = allocator.useRegister(masm, arrayId);
4506 AutoScratchRegister scratch(allocator, masm);
4507 AutoScratchRegister scratch2(allocator, masm);
4509 FailurePath* failure;
4510 if (!addFailurePath(&failure)) {
4511 return false;
4514 masm.branchArrayIsNotPacked(array, scratch, scratch2, failure->label());
4515 return true;
4518 bool CacheIRCompiler::emitGuardArgumentsObjectFlags(ObjOperandId objId,
4519 uint8_t flags) {
4520 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4521 Register obj = allocator.useRegister(masm, objId);
4522 AutoScratchRegister scratch(allocator, masm);
4524 FailurePath* failure;
4525 if (!addFailurePath(&failure)) {
4526 return false;
4529 masm.branchTestArgumentsObjectFlags(obj, scratch, flags, Assembler::NonZero,
4530 failure->label());
4531 return true;
4534 bool CacheIRCompiler::emitLoadDenseElementHoleResult(ObjOperandId objId,
4535 Int32OperandId indexId) {
4536 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4537 AutoOutputRegister output(*this);
4538 Register obj = allocator.useRegister(masm, objId);
4539 Register index = allocator.useRegister(masm, indexId);
4540 AutoScratchRegister scratch1(allocator, masm);
4541 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
4543 FailurePath* failure;
4544 if (!addFailurePath(&failure)) {
4545 return false;
4548 // Make sure the index is nonnegative.
4549 masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
4551 // Load obj->elements.
4552 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
4554 // Guard on the initialized length.
4555 Label hole;
4556 Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
4557 masm.spectreBoundsCheck32(index, initLength, scratch2, &hole);
4559 // Load the value.
4560 Label done;
4561 masm.loadValue(BaseObjectElementIndex(scratch1, index), output.valueReg());
4562 masm.branchTestMagic(Assembler::NotEqual, output.valueReg(), &done);
4564 // Load undefined for the hole.
4565 masm.bind(&hole);
4566 masm.moveValue(UndefinedValue(), output.valueReg());
4568 masm.bind(&done);
4569 return true;
4572 bool CacheIRCompiler::emitLoadTypedArrayElementExistsResult(
4573 ObjOperandId objId, IntPtrOperandId indexId) {
4574 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4575 AutoOutputRegister output(*this);
4576 Register obj = allocator.useRegister(masm, objId);
4577 Register index = allocator.useRegister(masm, indexId);
4578 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4580 Label outOfBounds, done;
4582 // Bounds check.
4583 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
4584 masm.branchPtr(Assembler::BelowOrEqual, scratch, index, &outOfBounds);
4585 EmitStoreBoolean(masm, true, output);
4586 masm.jump(&done);
4588 masm.bind(&outOfBounds);
4589 EmitStoreBoolean(masm, false, output);
4591 masm.bind(&done);
4592 return true;
4595 bool CacheIRCompiler::emitLoadDenseElementExistsResult(ObjOperandId objId,
4596 Int32OperandId indexId) {
4597 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4598 AutoOutputRegister output(*this);
4599 Register obj = allocator.useRegister(masm, objId);
4600 Register index = allocator.useRegister(masm, indexId);
4601 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4603 FailurePath* failure;
4604 if (!addFailurePath(&failure)) {
4605 return false;
4608 // Load obj->elements.
4609 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4611 // Bounds check. Unsigned compare sends negative indices to next IC.
4612 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
4613 masm.branch32(Assembler::BelowOrEqual, initLength, index, failure->label());
4615 // Hole check.
4616 BaseObjectElementIndex element(scratch, index);
4617 masm.branchTestMagic(Assembler::Equal, element, failure->label());
4619 EmitStoreBoolean(masm, true, output);
4620 return true;
4623 bool CacheIRCompiler::emitLoadDenseElementHoleExistsResult(
4624 ObjOperandId objId, Int32OperandId indexId) {
4625 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4626 AutoOutputRegister output(*this);
4627 Register obj = allocator.useRegister(masm, objId);
4628 Register index = allocator.useRegister(masm, indexId);
4629 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4631 FailurePath* failure;
4632 if (!addFailurePath(&failure)) {
4633 return false;
4636 // Make sure the index is nonnegative.
4637 masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
4639 // Load obj->elements.
4640 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4642 // Guard on the initialized length.
4643 Label hole;
4644 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
4645 masm.branch32(Assembler::BelowOrEqual, initLength, index, &hole);
4647 // Load value and replace with true.
4648 Label done;
4649 BaseObjectElementIndex element(scratch, index);
4650 masm.branchTestMagic(Assembler::Equal, element, &hole);
4651 EmitStoreBoolean(masm, true, output);
4652 masm.jump(&done);
4654 // Load false for the hole.
4655 masm.bind(&hole);
4656 EmitStoreBoolean(masm, false, output);
4658 masm.bind(&done);
4659 return true;
4662 bool CacheIRCompiler::emitPackedArrayPopResult(ObjOperandId arrayId) {
4663 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4665 AutoOutputRegister output(*this);
4666 Register array = allocator.useRegister(masm, arrayId);
4667 AutoScratchRegister scratch1(allocator, masm);
4668 AutoScratchRegister scratch2(allocator, masm);
4670 FailurePath* failure;
4671 if (!addFailurePath(&failure)) {
4672 return false;
4675 masm.packedArrayPop(array, output.valueReg(), scratch1, scratch2,
4676 failure->label());
4677 return true;
4680 bool CacheIRCompiler::emitPackedArrayShiftResult(ObjOperandId arrayId) {
4681 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4683 AutoOutputRegister output(*this);
4684 Register array = allocator.useRegister(masm, arrayId);
4685 AutoScratchRegister scratch1(allocator, masm);
4686 AutoScratchRegister scratch2(allocator, masm);
4688 FailurePath* failure;
4689 if (!addFailurePath(&failure)) {
4690 return false;
4693 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
4694 liveVolatileFloatRegs());
4695 masm.packedArrayShift(array, output.valueReg(), scratch1, scratch2,
4696 volatileRegs, failure->label());
4697 return true;
4700 bool CacheIRCompiler::emitIsObjectResult(ValOperandId inputId) {
4701 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4703 AutoOutputRegister output(*this);
4704 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4706 ValueOperand val = allocator.useValueRegister(masm, inputId);
4708 masm.testObjectSet(Assembler::Equal, val, scratch);
4710 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
4711 return true;
4714 bool CacheIRCompiler::emitIsPackedArrayResult(ObjOperandId objId) {
4715 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4717 AutoOutputRegister output(*this);
4718 Register obj = allocator.useRegister(masm, objId);
4719 AutoScratchRegister scratch(allocator, masm);
4721 Register outputScratch = output.valueReg().scratchReg();
4722 masm.setIsPackedArray(obj, outputScratch, scratch);
4723 masm.tagValue(JSVAL_TYPE_BOOLEAN, outputScratch, output.valueReg());
4724 return true;
4727 bool CacheIRCompiler::emitIsCallableResult(ValOperandId inputId) {
4728 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4730 AutoOutputRegister output(*this);
4731 AutoScratchRegister scratch1(allocator, masm);
4732 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
4734 ValueOperand val = allocator.useValueRegister(masm, inputId);
4736 Label isObject, done;
4737 masm.branchTestObject(Assembler::Equal, val, &isObject);
4738 // Primitives are never callable.
4739 masm.move32(Imm32(0), scratch2);
4740 masm.jump(&done);
4742 masm.bind(&isObject);
4743 masm.unboxObject(val, scratch1);
4745 Label isProxy;
4746 masm.isCallable(scratch1, scratch2, &isProxy);
4747 masm.jump(&done);
4749 masm.bind(&isProxy);
4751 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
4752 liveVolatileFloatRegs());
4753 masm.PushRegsInMask(volatileRegs);
4755 using Fn = bool (*)(JSObject* obj);
4756 masm.setupUnalignedABICall(scratch2);
4757 masm.passABIArg(scratch1);
4758 masm.callWithABI<Fn, ObjectIsCallable>();
4759 masm.storeCallBoolResult(scratch2);
4761 LiveRegisterSet ignore;
4762 ignore.add(scratch2);
4763 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
4766 masm.bind(&done);
4767 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
4768 return true;
4771 bool CacheIRCompiler::emitIsConstructorResult(ObjOperandId objId) {
4772 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4774 AutoOutputRegister output(*this);
4775 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4777 Register obj = allocator.useRegister(masm, objId);
4779 Label isProxy, done;
4780 masm.isConstructor(obj, scratch, &isProxy);
4781 masm.jump(&done);
4783 masm.bind(&isProxy);
4785 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
4786 liveVolatileFloatRegs());
4787 masm.PushRegsInMask(volatileRegs);
4789 using Fn = bool (*)(JSObject* obj);
4790 masm.setupUnalignedABICall(scratch);
4791 masm.passABIArg(obj);
4792 masm.callWithABI<Fn, ObjectIsConstructor>();
4793 masm.storeCallBoolResult(scratch);
4795 LiveRegisterSet ignore;
4796 ignore.add(scratch);
4797 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
4800 masm.bind(&done);
4801 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
4802 return true;
4805 bool CacheIRCompiler::emitIsCrossRealmArrayConstructorResult(
4806 ObjOperandId objId) {
4807 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4809 AutoOutputRegister output(*this);
4810 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4811 Register obj = allocator.useRegister(masm, objId);
4813 masm.setIsCrossRealmArrayConstructor(obj, scratch);
4814 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
4815 return true;
4818 bool CacheIRCompiler::emitArrayBufferViewByteOffsetInt32Result(
4819 ObjOperandId objId) {
4820 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4822 AutoOutputRegister output(*this);
4823 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4824 Register obj = allocator.useRegister(masm, objId);
4826 FailurePath* failure;
4827 if (!addFailurePath(&failure)) {
4828 return false;
4831 masm.loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
4832 masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
4833 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4834 return true;
4837 bool CacheIRCompiler::emitArrayBufferViewByteOffsetDoubleResult(
4838 ObjOperandId objId) {
4839 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4841 AutoOutputRegister output(*this);
4842 Register obj = allocator.useRegister(masm, objId);
4843 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4845 ScratchDoubleScope fpscratch(masm);
4846 masm.loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
4847 masm.convertIntPtrToDouble(scratch, fpscratch);
4848 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
4849 return true;
4852 bool CacheIRCompiler::emitTypedArrayByteLengthInt32Result(ObjOperandId objId) {
4853 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4855 AutoOutputRegister output(*this);
4856 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
4857 AutoScratchRegister scratch2(allocator, masm);
4858 Register obj = allocator.useRegister(masm, objId);
4860 FailurePath* failure;
4861 if (!addFailurePath(&failure)) {
4862 return false;
4865 masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
4866 masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
4867 masm.typedArrayElementSize(obj, scratch2);
4869 masm.branchMul32(Assembler::Overflow, scratch2.get(), scratch1,
4870 failure->label());
4872 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
4873 return true;
4876 bool CacheIRCompiler::emitTypedArrayByteLengthDoubleResult(ObjOperandId objId) {
4877 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4879 AutoOutputRegister output(*this);
4880 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
4881 AutoScratchRegister scratch2(allocator, masm);
4882 Register obj = allocator.useRegister(masm, objId);
4884 masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
4885 masm.typedArrayElementSize(obj, scratch2);
4886 masm.mulPtr(scratch2, scratch1);
4888 ScratchDoubleScope fpscratch(masm);
4889 masm.convertIntPtrToDouble(scratch1, fpscratch);
4890 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
4891 return true;
4894 bool CacheIRCompiler::emitTypedArrayElementSizeResult(ObjOperandId objId) {
4895 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4897 AutoOutputRegister output(*this);
4898 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4899 Register obj = allocator.useRegister(masm, objId);
4901 masm.typedArrayElementSize(obj, scratch);
4902 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4903 return true;
4906 bool CacheIRCompiler::emitGuardHasAttachedArrayBuffer(ObjOperandId objId) {
4907 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4909 AutoScratchRegister scratch(allocator, masm);
4910 Register obj = allocator.useRegister(masm, objId);
4912 FailurePath* failure;
4913 if (!addFailurePath(&failure)) {
4914 return false;
4917 masm.branchIfHasDetachedArrayBuffer(obj, scratch, failure->label());
4918 return true;
4921 bool CacheIRCompiler::emitIsTypedArrayConstructorResult(ObjOperandId objId) {
4922 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4924 AutoOutputRegister output(*this);
4925 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4926 Register obj = allocator.useRegister(masm, objId);
4928 masm.setIsDefinitelyTypedArrayConstructor(obj, scratch);
4929 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
4930 return true;
4933 bool CacheIRCompiler::emitGetNextMapSetEntryForIteratorResult(
4934 ObjOperandId iterId, ObjOperandId resultArrId, bool isMap) {
4935 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4937 AutoOutputRegister output(*this);
4938 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4939 Register iter = allocator.useRegister(masm, iterId);
4940 Register resultArr = allocator.useRegister(masm, resultArrId);
4942 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
4943 save.takeUnchecked(output.valueReg());
4944 save.takeUnchecked(scratch);
4945 masm.PushRegsInMask(save);
4947 masm.setupUnalignedABICall(scratch);
4948 masm.passABIArg(iter);
4949 masm.passABIArg(resultArr);
4950 if (isMap) {
4951 using Fn = bool (*)(MapIteratorObject* iter, ArrayObject* resultPairObj);
4952 masm.callWithABI<Fn, MapIteratorObject::next>();
4953 } else {
4954 using Fn = bool (*)(SetIteratorObject* iter, ArrayObject* resultObj);
4955 masm.callWithABI<Fn, SetIteratorObject::next>();
4957 masm.storeCallBoolResult(scratch);
4959 masm.PopRegsInMask(save);
4961 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
4962 return true;
4965 void CacheIRCompiler::emitActivateIterator(Register objBeingIterated,
4966 Register iterObject,
4967 Register nativeIter,
4968 Register scratch, Register scratch2,
4969 uint32_t enumeratorsAddrOffset) {
4970 // 'objectBeingIterated_' must be nullptr, so we don't need a pre-barrier.
4971 Address iterObjAddr(nativeIter,
4972 NativeIterator::offsetOfObjectBeingIterated());
4973 #ifdef DEBUG
4974 Label ok;
4975 masm.branchPtr(Assembler::Equal, iterObjAddr, ImmPtr(nullptr), &ok);
4976 masm.assumeUnreachable("iterator with non-null object");
4977 masm.bind(&ok);
4978 #endif
4980 // Mark iterator as active.
4981 Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlagsAndCount());
4982 masm.storePtr(objBeingIterated, iterObjAddr);
4983 masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
4985 // Post-write barrier for stores to 'objectBeingIterated_'.
4986 emitPostBarrierSlot(
4987 iterObject,
4988 TypedOrValueRegister(MIRType::Object, AnyRegister(objBeingIterated)),
4989 scratch);
4991 // Chain onto the active iterator stack.
4992 StubFieldOffset enumeratorsAddr(enumeratorsAddrOffset,
4993 StubField::Type::RawPointer);
4994 emitLoadStubField(enumeratorsAddr, scratch);
4995 masm.registerIterator(scratch, nativeIter, scratch2);
4998 bool CacheIRCompiler::emitObjectToIteratorResult(
4999 ObjOperandId objId, uint32_t enumeratorsAddrOffset) {
5000 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5002 AutoCallVM callvm(masm, this, allocator);
5003 Register obj = allocator.useRegister(masm, objId);
5005 AutoScratchRegister iterObj(allocator, masm);
5006 AutoScratchRegister scratch(allocator, masm);
5007 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, callvm.output());
5008 AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, callvm.output());
5010 Label callVM, done;
5011 masm.maybeLoadIteratorFromShape(obj, iterObj, scratch, scratch2, scratch3,
5012 &callVM);
5014 masm.loadPrivate(
5015 Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
5016 scratch);
5018 emitActivateIterator(obj, iterObj, scratch, scratch2, scratch3,
5019 enumeratorsAddrOffset);
5020 masm.jump(&done);
5022 masm.bind(&callVM);
5023 callvm.prepare();
5024 masm.Push(obj);
5025 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
5026 callvm.call<Fn, GetIterator>();
5027 masm.storeCallPointerResult(iterObj);
5029 masm.bind(&done);
5030 EmitStoreResult(masm, iterObj, JSVAL_TYPE_OBJECT, callvm.output());
5031 return true;
5034 bool CacheIRCompiler::emitValueToIteratorResult(ValOperandId valId) {
5035 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5037 AutoCallVM callvm(masm, this, allocator);
5039 ValueOperand val = allocator.useValueRegister(masm, valId);
5041 callvm.prepare();
5043 masm.Push(val);
5045 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleValue);
5046 callvm.call<Fn, ValueToIterator>();
5047 return true;
5050 bool CacheIRCompiler::emitNewArrayIteratorResult(
5051 uint32_t templateObjectOffset) {
5052 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5054 AutoCallVM callvm(masm, this, allocator);
5056 callvm.prepare();
5058 using Fn = ArrayIteratorObject* (*)(JSContext*);
5059 callvm.call<Fn, NewArrayIterator>();
5060 return true;
5063 bool CacheIRCompiler::emitNewStringIteratorResult(
5064 uint32_t templateObjectOffset) {
5065 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5067 AutoCallVM callvm(masm, this, allocator);
5069 callvm.prepare();
5071 using Fn = StringIteratorObject* (*)(JSContext*);
5072 callvm.call<Fn, NewStringIterator>();
5073 return true;
5076 bool CacheIRCompiler::emitNewRegExpStringIteratorResult(
5077 uint32_t templateObjectOffset) {
5078 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5080 AutoCallVM callvm(masm, this, allocator);
5082 callvm.prepare();
5084 using Fn = RegExpStringIteratorObject* (*)(JSContext*);
5085 callvm.call<Fn, NewRegExpStringIterator>();
5086 return true;
5089 bool CacheIRCompiler::emitObjectCreateResult(uint32_t templateObjectOffset) {
5090 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5092 AutoCallVM callvm(masm, this, allocator);
5093 AutoScratchRegister scratch(allocator, masm);
5095 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5096 emitLoadStubField(objectField, scratch);
5098 callvm.prepare();
5099 masm.Push(scratch);
5101 using Fn = PlainObject* (*)(JSContext*, Handle<PlainObject*>);
5102 callvm.call<Fn, ObjectCreateWithTemplate>();
5103 return true;
5106 bool CacheIRCompiler::emitObjectKeysResult(ObjOperandId objId) {
5107 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5109 AutoCallVM callvm(masm, this, allocator);
5110 Register obj = allocator.useRegister(masm, objId);
5112 // Our goal is only to record calls to Object.keys, to elide it when
5113 // partially used, not to provide an alternative implementation.
5115 callvm.prepare();
5116 masm.Push(obj);
5118 using Fn = JSObject* (*)(JSContext*, HandleObject);
5119 callvm.call<Fn, jit::ObjectKeys>();
5122 return true;
5125 bool CacheIRCompiler::emitNewArrayFromLengthResult(
5126 uint32_t templateObjectOffset, Int32OperandId lengthId) {
5127 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5129 AutoCallVM callvm(masm, this, allocator);
5130 AutoScratchRegister scratch(allocator, masm);
5131 Register length = allocator.useRegister(masm, lengthId);
5133 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5134 emitLoadStubField(objectField, scratch);
5136 callvm.prepare();
5137 masm.Push(length);
5138 masm.Push(scratch);
5140 using Fn = ArrayObject* (*)(JSContext*, Handle<ArrayObject*>, int32_t length);
5141 callvm.call<Fn, ArrayConstructorOneArg>();
5142 return true;
5145 bool CacheIRCompiler::emitNewTypedArrayFromLengthResult(
5146 uint32_t templateObjectOffset, Int32OperandId lengthId) {
5147 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5149 AutoCallVM callvm(masm, this, allocator);
5150 AutoScratchRegister scratch(allocator, masm);
5151 Register length = allocator.useRegister(masm, lengthId);
5153 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5154 emitLoadStubField(objectField, scratch);
5156 callvm.prepare();
5157 masm.Push(length);
5158 masm.Push(scratch);
5160 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
5161 callvm.call<Fn, NewTypedArrayWithTemplateAndLength>();
5162 return true;
5165 bool CacheIRCompiler::emitNewTypedArrayFromArrayBufferResult(
5166 uint32_t templateObjectOffset, ObjOperandId bufferId,
5167 ValOperandId byteOffsetId, ValOperandId lengthId) {
5168 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5170 #ifdef JS_CODEGEN_X86
5171 MOZ_CRASH("Instruction not supported on 32-bit x86, not enough registers");
5172 #endif
5174 AutoCallVM callvm(masm, this, allocator);
5175 AutoScratchRegister scratch(allocator, masm);
5176 Register buffer = allocator.useRegister(masm, bufferId);
5177 ValueOperand byteOffset = allocator.useValueRegister(masm, byteOffsetId);
5178 ValueOperand length = allocator.useValueRegister(masm, lengthId);
5180 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5181 emitLoadStubField(objectField, scratch);
5183 callvm.prepare();
5184 masm.Push(length);
5185 masm.Push(byteOffset);
5186 masm.Push(buffer);
5187 masm.Push(scratch);
5189 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
5190 HandleValue, HandleValue);
5191 callvm.call<Fn, NewTypedArrayWithTemplateAndBuffer>();
5192 return true;
5195 bool CacheIRCompiler::emitNewTypedArrayFromArrayResult(
5196 uint32_t templateObjectOffset, ObjOperandId arrayId) {
5197 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5199 AutoCallVM callvm(masm, this, allocator);
5200 AutoScratchRegister scratch(allocator, masm);
5201 Register array = allocator.useRegister(masm, arrayId);
5203 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5204 emitLoadStubField(objectField, scratch);
5206 callvm.prepare();
5207 masm.Push(array);
5208 masm.Push(scratch);
5210 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
5211 callvm.call<Fn, NewTypedArrayWithTemplateAndArray>();
5212 return true;
5215 bool CacheIRCompiler::emitAddSlotAndCallAddPropHook(ObjOperandId objId,
5216 ValOperandId rhsId,
5217 uint32_t newShapeOffset) {
5218 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5220 AutoCallVM callvm(masm, this, allocator);
5222 AutoScratchRegister scratch(allocator, masm);
5223 Register obj = allocator.useRegister(masm, objId);
5224 ValueOperand rhs = allocator.useValueRegister(masm, rhsId);
5226 StubFieldOffset shapeField(newShapeOffset, StubField::Type::Shape);
5227 emitLoadStubField(shapeField, scratch);
5229 callvm.prepare();
5231 masm.Push(scratch);
5232 masm.Push(rhs);
5233 masm.Push(obj);
5235 using Fn =
5236 bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, Handle<Shape*>);
5237 callvm.callNoResult<Fn, AddSlotAndCallAddPropHook>();
5238 return true;
5241 bool CacheIRCompiler::emitMathAbsInt32Result(Int32OperandId inputId) {
5242 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5244 AutoOutputRegister output(*this);
5245 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5247 Register input = allocator.useRegister(masm, inputId);
5249 FailurePath* failure;
5250 if (!addFailurePath(&failure)) {
5251 return false;
5254 masm.mov(input, scratch);
5255 // Don't negate already positive values.
5256 Label positive;
5257 masm.branchTest32(Assembler::NotSigned, scratch, scratch, &positive);
5258 // neg32 might overflow for INT_MIN.
5259 masm.branchNeg32(Assembler::Overflow, scratch, failure->label());
5260 masm.bind(&positive);
5262 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5263 return true;
5266 bool CacheIRCompiler::emitMathAbsNumberResult(NumberOperandId inputId) {
5267 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5269 AutoOutputRegister output(*this);
5270 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5272 allocator.ensureDoubleRegister(masm, inputId, scratch);
5274 masm.absDouble(scratch, scratch);
5275 masm.boxDouble(scratch, output.valueReg(), scratch);
5276 return true;
5279 bool CacheIRCompiler::emitMathClz32Result(Int32OperandId inputId) {
5280 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5282 AutoOutputRegister output(*this);
5283 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5284 Register input = allocator.useRegister(masm, inputId);
5286 masm.clz32(input, scratch, /* knownNotZero = */ false);
5287 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5288 return true;
5291 bool CacheIRCompiler::emitMathSignInt32Result(Int32OperandId inputId) {
5292 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5294 AutoOutputRegister output(*this);
5295 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5296 Register input = allocator.useRegister(masm, inputId);
5298 masm.signInt32(input, scratch);
5299 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5300 return true;
5303 bool CacheIRCompiler::emitMathSignNumberResult(NumberOperandId inputId) {
5304 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5306 AutoOutputRegister output(*this);
5307 AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
5308 AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
5310 allocator.ensureDoubleRegister(masm, inputId, floatScratch1);
5312 masm.signDouble(floatScratch1, floatScratch2);
5313 masm.boxDouble(floatScratch2, output.valueReg(), floatScratch2);
5314 return true;
5317 bool CacheIRCompiler::emitMathSignNumberToInt32Result(NumberOperandId inputId) {
5318 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5320 AutoOutputRegister output(*this);
5321 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5322 AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
5323 AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
5325 FailurePath* failure;
5326 if (!addFailurePath(&failure)) {
5327 return false;
5330 allocator.ensureDoubleRegister(masm, inputId, floatScratch1);
5332 masm.signDoubleToInt32(floatScratch1, scratch, floatScratch2,
5333 failure->label());
5334 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5335 return true;
5338 bool CacheIRCompiler::emitMathImulResult(Int32OperandId lhsId,
5339 Int32OperandId rhsId) {
5340 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5342 AutoOutputRegister output(*this);
5343 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5344 Register lhs = allocator.useRegister(masm, lhsId);
5345 Register rhs = allocator.useRegister(masm, rhsId);
5347 masm.mov(lhs, scratch);
5348 masm.mul32(rhs, scratch);
5349 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5350 return true;
5353 bool CacheIRCompiler::emitMathSqrtNumberResult(NumberOperandId inputId) {
5354 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5356 AutoOutputRegister output(*this);
5357 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5359 allocator.ensureDoubleRegister(masm, inputId, scratch);
5361 masm.sqrtDouble(scratch, scratch);
5362 masm.boxDouble(scratch, output.valueReg(), scratch);
5363 return true;
5366 bool CacheIRCompiler::emitMathFloorNumberResult(NumberOperandId inputId) {
5367 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5369 AutoOutputRegister output(*this);
5370 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5372 allocator.ensureDoubleRegister(masm, inputId, scratch);
5374 if (Assembler::HasRoundInstruction(RoundingMode::Down)) {
5375 masm.nearbyIntDouble(RoundingMode::Down, scratch, scratch);
5376 masm.boxDouble(scratch, output.valueReg(), scratch);
5377 return true;
5380 return emitMathFunctionNumberResultShared(UnaryMathFunction::Floor, scratch,
5381 output.valueReg());
5384 bool CacheIRCompiler::emitMathCeilNumberResult(NumberOperandId inputId) {
5385 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5387 AutoOutputRegister output(*this);
5388 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5390 allocator.ensureDoubleRegister(masm, inputId, scratch);
5392 if (Assembler::HasRoundInstruction(RoundingMode::Up)) {
5393 masm.nearbyIntDouble(RoundingMode::Up, scratch, scratch);
5394 masm.boxDouble(scratch, output.valueReg(), scratch);
5395 return true;
5398 return emitMathFunctionNumberResultShared(UnaryMathFunction::Ceil, scratch,
5399 output.valueReg());
5402 bool CacheIRCompiler::emitMathTruncNumberResult(NumberOperandId inputId) {
5403 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5405 AutoOutputRegister output(*this);
5406 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5408 allocator.ensureDoubleRegister(masm, inputId, scratch);
5410 if (Assembler::HasRoundInstruction(RoundingMode::TowardsZero)) {
5411 masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
5412 masm.boxDouble(scratch, output.valueReg(), scratch);
5413 return true;
5416 return emitMathFunctionNumberResultShared(UnaryMathFunction::Trunc, scratch,
5417 output.valueReg());
5420 bool CacheIRCompiler::emitMathFRoundNumberResult(NumberOperandId inputId) {
5421 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5423 AutoOutputRegister output(*this);
5424 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5425 FloatRegister scratchFloat32 = scratch.get().asSingle();
5427 allocator.ensureDoubleRegister(masm, inputId, scratch);
5429 masm.convertDoubleToFloat32(scratch, scratchFloat32);
5430 masm.convertFloat32ToDouble(scratchFloat32, scratch);
5432 masm.boxDouble(scratch, output.valueReg(), scratch);
5433 return true;
5436 bool CacheIRCompiler::emitMathHypot2NumberResult(NumberOperandId first,
5437 NumberOperandId second) {
5438 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5439 AutoOutputRegister output(*this);
5440 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5442 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
5443 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
5445 allocator.ensureDoubleRegister(masm, first, floatScratch0);
5446 allocator.ensureDoubleRegister(masm, second, floatScratch1);
5448 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5449 masm.PushRegsInMask(save);
5451 using Fn = double (*)(double x, double y);
5452 masm.setupUnalignedABICall(scratch);
5453 masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
5454 masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
5456 masm.callWithABI<Fn, ecmaHypot>(MoveOp::DOUBLE);
5457 masm.storeCallFloatResult(floatScratch0);
5459 LiveRegisterSet ignore;
5460 ignore.add(floatScratch0);
5461 masm.PopRegsInMaskIgnore(save, ignore);
5463 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
5464 return true;
5467 bool CacheIRCompiler::emitMathHypot3NumberResult(NumberOperandId first,
5468 NumberOperandId second,
5469 NumberOperandId third) {
5470 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5471 AutoOutputRegister output(*this);
5472 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5474 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
5475 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
5476 AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
5478 allocator.ensureDoubleRegister(masm, first, floatScratch0);
5479 allocator.ensureDoubleRegister(masm, second, floatScratch1);
5480 allocator.ensureDoubleRegister(masm, third, floatScratch2);
5482 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5483 masm.PushRegsInMask(save);
5485 using Fn = double (*)(double x, double y, double z);
5486 masm.setupUnalignedABICall(scratch);
5487 masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
5488 masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
5489 masm.passABIArg(floatScratch2, MoveOp::DOUBLE);
5491 masm.callWithABI<Fn, hypot3>(MoveOp::DOUBLE);
5492 masm.storeCallFloatResult(floatScratch0);
5494 LiveRegisterSet ignore;
5495 ignore.add(floatScratch0);
5496 masm.PopRegsInMaskIgnore(save, ignore);
5498 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
5499 return true;
5502 bool CacheIRCompiler::emitMathHypot4NumberResult(NumberOperandId first,
5503 NumberOperandId second,
5504 NumberOperandId third,
5505 NumberOperandId fourth) {
5506 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5507 AutoOutputRegister output(*this);
5508 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5510 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
5511 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
5512 AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
5513 AutoAvailableFloatRegister floatScratch3(*this, FloatReg3);
5515 allocator.ensureDoubleRegister(masm, first, floatScratch0);
5516 allocator.ensureDoubleRegister(masm, second, floatScratch1);
5517 allocator.ensureDoubleRegister(masm, third, floatScratch2);
5518 allocator.ensureDoubleRegister(masm, fourth, floatScratch3);
5520 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5521 masm.PushRegsInMask(save);
5523 using Fn = double (*)(double x, double y, double z, double w);
5524 masm.setupUnalignedABICall(scratch);
5525 masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
5526 masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
5527 masm.passABIArg(floatScratch2, MoveOp::DOUBLE);
5528 masm.passABIArg(floatScratch3, MoveOp::DOUBLE);
5530 masm.callWithABI<Fn, hypot4>(MoveOp::DOUBLE);
5531 masm.storeCallFloatResult(floatScratch0);
5533 LiveRegisterSet ignore;
5534 ignore.add(floatScratch0);
5535 masm.PopRegsInMaskIgnore(save, ignore);
5537 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
5538 return true;
5541 bool CacheIRCompiler::emitMathAtan2NumberResult(NumberOperandId yId,
5542 NumberOperandId xId) {
5543 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5544 AutoOutputRegister output(*this);
5545 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5547 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
5548 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
5550 allocator.ensureDoubleRegister(masm, yId, floatScratch0);
5551 allocator.ensureDoubleRegister(masm, xId, floatScratch1);
5553 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5554 masm.PushRegsInMask(save);
5556 using Fn = double (*)(double x, double y);
5557 masm.setupUnalignedABICall(scratch);
5558 masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
5559 masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
5560 masm.callWithABI<Fn, js::ecmaAtan2>(MoveOp::DOUBLE);
5561 masm.storeCallFloatResult(floatScratch0);
5563 LiveRegisterSet ignore;
5564 ignore.add(floatScratch0);
5565 masm.PopRegsInMaskIgnore(save, ignore);
5567 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
5569 return true;
5572 bool CacheIRCompiler::emitMathFloorToInt32Result(NumberOperandId inputId) {
5573 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5575 AutoOutputRegister output(*this);
5576 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5578 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
5580 FailurePath* failure;
5581 if (!addFailurePath(&failure)) {
5582 return false;
5585 allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
5587 masm.floorDoubleToInt32(scratchFloat, scratch, failure->label());
5589 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5590 return true;
5593 bool CacheIRCompiler::emitMathCeilToInt32Result(NumberOperandId inputId) {
5594 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5596 AutoOutputRegister output(*this);
5597 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5599 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
5601 FailurePath* failure;
5602 if (!addFailurePath(&failure)) {
5603 return false;
5606 allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
5608 masm.ceilDoubleToInt32(scratchFloat, scratch, failure->label());
5610 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5611 return true;
5614 bool CacheIRCompiler::emitMathTruncToInt32Result(NumberOperandId inputId) {
5615 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5617 AutoOutputRegister output(*this);
5618 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5620 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
5622 FailurePath* failure;
5623 if (!addFailurePath(&failure)) {
5624 return false;
5627 allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
5629 masm.truncDoubleToInt32(scratchFloat, scratch, failure->label());
5631 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5632 return true;
5635 bool CacheIRCompiler::emitMathRoundToInt32Result(NumberOperandId inputId) {
5636 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5638 AutoOutputRegister output(*this);
5639 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5641 AutoAvailableFloatRegister scratchFloat0(*this, FloatReg0);
5642 AutoAvailableFloatRegister scratchFloat1(*this, FloatReg1);
5644 FailurePath* failure;
5645 if (!addFailurePath(&failure)) {
5646 return false;
5649 allocator.ensureDoubleRegister(masm, inputId, scratchFloat0);
5651 masm.roundDoubleToInt32(scratchFloat0, scratch, scratchFloat1,
5652 failure->label());
5654 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5655 return true;
5658 bool CacheIRCompiler::emitInt32MinMax(bool isMax, Int32OperandId firstId,
5659 Int32OperandId secondId,
5660 Int32OperandId resultId) {
5661 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5663 Register first = allocator.useRegister(masm, firstId);
5664 Register second = allocator.useRegister(masm, secondId);
5665 Register result = allocator.defineRegister(masm, resultId);
5667 Assembler::Condition cond =
5668 isMax ? Assembler::GreaterThan : Assembler::LessThan;
5669 masm.move32(first, result);
5670 masm.cmp32Move32(cond, second, first, second, result);
5671 return true;
5674 bool CacheIRCompiler::emitNumberMinMax(bool isMax, NumberOperandId firstId,
5675 NumberOperandId secondId,
5676 NumberOperandId resultId) {
5677 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5679 ValueOperand output = allocator.defineValueRegister(masm, resultId);
5681 AutoAvailableFloatRegister scratch1(*this, FloatReg0);
5682 AutoAvailableFloatRegister scratch2(*this, FloatReg1);
5684 allocator.ensureDoubleRegister(masm, firstId, scratch1);
5685 allocator.ensureDoubleRegister(masm, secondId, scratch2);
5687 if (isMax) {
5688 masm.maxDouble(scratch2, scratch1, /* handleNaN = */ true);
5689 } else {
5690 masm.minDouble(scratch2, scratch1, /* handleNaN = */ true);
5693 masm.boxDouble(scratch1, output, scratch1);
5694 return true;
5697 bool CacheIRCompiler::emitInt32MinMaxArrayResult(ObjOperandId arrayId,
5698 bool isMax) {
5699 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5701 AutoOutputRegister output(*this);
5702 Register array = allocator.useRegister(masm, arrayId);
5704 AutoScratchRegister scratch(allocator, masm);
5705 AutoScratchRegister scratch2(allocator, masm);
5706 AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, output);
5707 AutoScratchRegisterMaybeOutput result(allocator, masm, output);
5709 FailurePath* failure;
5710 if (!addFailurePath(&failure)) {
5711 return false;
5714 masm.minMaxArrayInt32(array, result, scratch, scratch2, scratch3, isMax,
5715 failure->label());
5716 masm.tagValue(JSVAL_TYPE_INT32, result, output.valueReg());
5717 return true;
5720 bool CacheIRCompiler::emitNumberMinMaxArrayResult(ObjOperandId arrayId,
5721 bool isMax) {
5722 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5724 AutoOutputRegister output(*this);
5725 Register array = allocator.useRegister(masm, arrayId);
5727 AutoAvailableFloatRegister result(*this, FloatReg0);
5728 AutoAvailableFloatRegister floatScratch(*this, FloatReg1);
5730 AutoScratchRegister scratch1(allocator, masm);
5731 AutoScratchRegister scratch2(allocator, masm);
5733 FailurePath* failure;
5734 if (!addFailurePath(&failure)) {
5735 return false;
5738 masm.minMaxArrayNumber(array, result, floatScratch, scratch1, scratch2, isMax,
5739 failure->label());
5740 masm.boxDouble(result, output.valueReg(), result);
5741 return true;
5744 bool CacheIRCompiler::emitMathFunctionNumberResultShared(
5745 UnaryMathFunction fun, FloatRegister inputScratch, ValueOperand output) {
5746 UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
5748 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5749 save.takeUnchecked(inputScratch);
5750 masm.PushRegsInMask(save);
5752 masm.setupUnalignedABICall(output.scratchReg());
5753 masm.passABIArg(inputScratch, MoveOp::DOUBLE);
5754 masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
5755 MoveOp::DOUBLE);
5756 masm.storeCallFloatResult(inputScratch);
5758 masm.PopRegsInMask(save);
5760 masm.boxDouble(inputScratch, output, inputScratch);
5761 return true;
5764 bool CacheIRCompiler::emitMathFunctionNumberResult(NumberOperandId inputId,
5765 UnaryMathFunction fun) {
5766 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5768 AutoOutputRegister output(*this);
5769 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5771 allocator.ensureDoubleRegister(masm, inputId, scratch);
5773 return emitMathFunctionNumberResultShared(fun, scratch, output.valueReg());
5776 static void EmitStoreDenseElement(MacroAssembler& masm,
5777 const ConstantOrRegister& value,
5778 BaseObjectElementIndex target) {
5779 if (value.constant()) {
5780 Value v = value.value();
5781 masm.storeValue(v, target);
5782 return;
5785 TypedOrValueRegister reg = value.reg();
5786 masm.storeTypedOrValue(reg, target);
5789 bool CacheIRCompiler::emitStoreDenseElement(ObjOperandId objId,
5790 Int32OperandId indexId,
5791 ValOperandId rhsId) {
5792 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5794 Register obj = allocator.useRegister(masm, objId);
5795 Register index = allocator.useRegister(masm, indexId);
5796 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
5798 AutoScratchRegister scratch(allocator, masm);
5800 FailurePath* failure;
5801 if (!addFailurePath(&failure)) {
5802 return false;
5805 // Load obj->elements in scratch.
5806 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
5808 // Bounds check. Unfortunately we don't have more registers available on
5809 // x86, so use InvalidReg and emit slightly slower code on x86.
5810 Register spectreTemp = InvalidReg;
5811 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
5812 masm.spectreBoundsCheck32(index, initLength, spectreTemp, failure->label());
5814 // Hole check.
5815 BaseObjectElementIndex element(scratch, index);
5816 masm.branchTestMagic(Assembler::Equal, element, failure->label());
5818 // Perform the store.
5819 EmitPreBarrier(masm, element, MIRType::Value);
5820 EmitStoreDenseElement(masm, val, element);
5822 emitPostBarrierElement(obj, val, scratch, index);
5823 return true;
5826 static void EmitAssertExtensibleElements(MacroAssembler& masm,
5827 Register elementsReg) {
5828 #ifdef DEBUG
5829 // Preceding shape guards ensure the object elements are extensible.
5830 Address elementsFlags(elementsReg, ObjectElements::offsetOfFlags());
5831 Label ok;
5832 masm.branchTest32(Assembler::Zero, elementsFlags,
5833 Imm32(ObjectElements::Flags::NOT_EXTENSIBLE), &ok);
5834 masm.assumeUnreachable("Unexpected non-extensible elements");
5835 masm.bind(&ok);
5836 #endif
5839 static void EmitAssertWritableArrayLengthElements(MacroAssembler& masm,
5840 Register elementsReg) {
5841 #ifdef DEBUG
5842 // Preceding shape guards ensure the array length is writable.
5843 Address elementsFlags(elementsReg, ObjectElements::offsetOfFlags());
5844 Label ok;
5845 masm.branchTest32(Assembler::Zero, elementsFlags,
5846 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
5847 &ok);
5848 masm.assumeUnreachable("Unexpected non-writable array length elements");
5849 masm.bind(&ok);
5850 #endif
5853 bool CacheIRCompiler::emitStoreDenseElementHole(ObjOperandId objId,
5854 Int32OperandId indexId,
5855 ValOperandId rhsId,
5856 bool handleAdd) {
5857 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5859 Register obj = allocator.useRegister(masm, objId);
5860 Register index = allocator.useRegister(masm, indexId);
5861 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
5863 AutoScratchRegister scratch(allocator, masm);
5865 FailurePath* failure;
5866 if (!addFailurePath(&failure)) {
5867 return false;
5870 // Load obj->elements in scratch.
5871 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
5873 EmitAssertExtensibleElements(masm, scratch);
5874 if (handleAdd) {
5875 EmitAssertWritableArrayLengthElements(masm, scratch);
5878 BaseObjectElementIndex element(scratch, index);
5879 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
5880 Address elementsFlags(scratch, ObjectElements::offsetOfFlags());
5882 // We don't have enough registers on x86 so use InvalidReg. This will emit
5883 // slightly less efficient code on x86.
5884 Register spectreTemp = InvalidReg;
5886 Label storeSkipPreBarrier;
5887 if (handleAdd) {
5888 // Bounds check.
5889 Label inBounds, outOfBounds;
5890 masm.spectreBoundsCheck32(index, initLength, spectreTemp, &outOfBounds);
5891 masm.jump(&inBounds);
5893 // If we're out-of-bounds, only handle the index == initLength case.
5894 masm.bind(&outOfBounds);
5895 masm.branch32(Assembler::NotEqual, initLength, index, failure->label());
5897 // If index < capacity, we can add a dense element inline. If not we
5898 // need to allocate more elements.
5899 Label allocElement, addNewElement;
5900 Address capacity(scratch, ObjectElements::offsetOfCapacity());
5901 masm.spectreBoundsCheck32(index, capacity, spectreTemp, &allocElement);
5902 masm.jump(&addNewElement);
5904 masm.bind(&allocElement);
5906 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
5907 liveVolatileFloatRegs());
5908 save.takeUnchecked(scratch);
5909 masm.PushRegsInMask(save);
5911 using Fn = bool (*)(JSContext* cx, NativeObject* obj);
5912 masm.setupUnalignedABICall(scratch);
5913 masm.loadJSContext(scratch);
5914 masm.passABIArg(scratch);
5915 masm.passABIArg(obj);
5916 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
5917 masm.storeCallPointerResult(scratch);
5919 masm.PopRegsInMask(save);
5920 masm.branchIfFalseBool(scratch, failure->label());
5922 // Load the reallocated elements pointer.
5923 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
5925 masm.bind(&addNewElement);
5927 // Increment initLength.
5928 masm.add32(Imm32(1), initLength);
5930 // If length is now <= index, increment length too.
5931 Label skipIncrementLength;
5932 Address length(scratch, ObjectElements::offsetOfLength());
5933 masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
5934 masm.add32(Imm32(1), length);
5935 masm.bind(&skipIncrementLength);
5937 // Skip EmitPreBarrier as the memory is uninitialized.
5938 masm.jump(&storeSkipPreBarrier);
5940 masm.bind(&inBounds);
5941 } else {
5942 // Fail if index >= initLength.
5943 masm.spectreBoundsCheck32(index, initLength, spectreTemp, failure->label());
5946 EmitPreBarrier(masm, element, MIRType::Value);
5948 masm.bind(&storeSkipPreBarrier);
5949 EmitStoreDenseElement(masm, val, element);
5951 emitPostBarrierElement(obj, val, scratch, index);
5952 return true;
5955 bool CacheIRCompiler::emitArrayPush(ObjOperandId objId, ValOperandId rhsId) {
5956 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5958 AutoOutputRegister output(*this);
5959 Register obj = allocator.useRegister(masm, objId);
5960 ValueOperand val = allocator.useValueRegister(masm, rhsId);
5962 AutoScratchRegisterMaybeOutput scratchLength(allocator, masm, output);
5963 AutoScratchRegisterMaybeOutputType scratch(allocator, masm, output);
5965 FailurePath* failure;
5966 if (!addFailurePath(&failure)) {
5967 return false;
5970 // Load obj->elements in scratch.
5971 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
5973 EmitAssertExtensibleElements(masm, scratch);
5974 EmitAssertWritableArrayLengthElements(masm, scratch);
5976 Address elementsInitLength(scratch,
5977 ObjectElements::offsetOfInitializedLength());
5978 Address elementsLength(scratch, ObjectElements::offsetOfLength());
5979 Address capacity(scratch, ObjectElements::offsetOfCapacity());
5981 // Fail if length != initLength.
5982 masm.load32(elementsInitLength, scratchLength);
5983 masm.branch32(Assembler::NotEqual, elementsLength, scratchLength,
5984 failure->label());
5986 // If scratchLength < capacity, we can add a dense element inline. If not we
5987 // need to allocate more elements.
5988 Label allocElement, addNewElement;
5989 masm.spectreBoundsCheck32(scratchLength, capacity, InvalidReg, &allocElement);
5990 masm.jump(&addNewElement);
5992 masm.bind(&allocElement);
5994 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5995 save.takeUnchecked(scratch);
5996 masm.PushRegsInMask(save);
5998 using Fn = bool (*)(JSContext* cx, NativeObject* obj);
5999 masm.setupUnalignedABICall(scratch);
6000 masm.loadJSContext(scratch);
6001 masm.passABIArg(scratch);
6002 masm.passABIArg(obj);
6003 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
6004 masm.storeCallPointerResult(scratch);
6006 masm.PopRegsInMask(save);
6007 masm.branchIfFalseBool(scratch, failure->label());
6009 // Load the reallocated elements pointer.
6010 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
6012 masm.bind(&addNewElement);
6014 // Increment initLength and length.
6015 masm.add32(Imm32(1), elementsInitLength);
6016 masm.add32(Imm32(1), elementsLength);
6018 // Store the value.
6019 BaseObjectElementIndex element(scratch, scratchLength);
6020 masm.storeValue(val, element);
6021 emitPostBarrierElement(obj, val, scratch, scratchLength);
6023 // Return value is new length.
6024 masm.add32(Imm32(1), scratchLength);
6025 masm.tagValue(JSVAL_TYPE_INT32, scratchLength, output.valueReg());
6027 return true;
6030 bool CacheIRCompiler::emitStoreTypedArrayElement(ObjOperandId objId,
6031 Scalar::Type elementType,
6032 IntPtrOperandId indexId,
6033 uint32_t rhsId,
6034 bool handleOOB) {
6035 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6036 Register obj = allocator.useRegister(masm, objId);
6037 Register index = allocator.useRegister(masm, indexId);
6039 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
6041 Maybe<Register> valInt32;
6042 Maybe<Register> valBigInt;
6043 switch (elementType) {
6044 case Scalar::Int8:
6045 case Scalar::Uint8:
6046 case Scalar::Int16:
6047 case Scalar::Uint16:
6048 case Scalar::Int32:
6049 case Scalar::Uint32:
6050 case Scalar::Uint8Clamped:
6051 valInt32.emplace(allocator.useRegister(masm, Int32OperandId(rhsId)));
6052 break;
6054 case Scalar::Float32:
6055 case Scalar::Float64:
6056 allocator.ensureDoubleRegister(masm, NumberOperandId(rhsId),
6057 floatScratch0);
6058 break;
6060 case Scalar::BigInt64:
6061 case Scalar::BigUint64:
6062 valBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(rhsId)));
6063 break;
6065 case Scalar::MaxTypedArrayViewType:
6066 case Scalar::Int64:
6067 case Scalar::Simd128:
6068 MOZ_CRASH("Unsupported TypedArray type");
6071 AutoScratchRegister scratch1(allocator, masm);
6072 Maybe<AutoScratchRegister> scratch2;
6073 Maybe<AutoSpectreBoundsScratchRegister> spectreScratch;
6074 if (Scalar::isBigIntType(elementType)) {
6075 scratch2.emplace(allocator, masm);
6076 } else {
6077 spectreScratch.emplace(allocator, masm);
6080 FailurePath* failure = nullptr;
6081 if (!handleOOB) {
6082 if (!addFailurePath(&failure)) {
6083 return false;
6087 // Bounds check.
6088 Label done;
6089 Register spectreTemp = scratch2 ? scratch2->get() : spectreScratch->get();
6090 masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
6091 masm.spectreBoundsCheckPtr(index, scratch1, spectreTemp,
6092 handleOOB ? &done : failure->label());
6094 // Load the elements vector.
6095 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch1);
6097 BaseIndex dest(scratch1, index, ScaleFromScalarType(elementType));
6099 if (Scalar::isBigIntType(elementType)) {
6100 #ifdef JS_PUNBOX64
6101 Register64 temp(scratch2->get());
6102 #else
6103 // We don't have more registers available on x86, so spill |obj|.
6104 masm.push(obj);
6105 Register64 temp(scratch2->get(), obj);
6106 #endif
6108 masm.loadBigInt64(*valBigInt, temp);
6109 masm.storeToTypedBigIntArray(elementType, temp, dest);
6111 #ifndef JS_PUNBOX64
6112 masm.pop(obj);
6113 #endif
6114 } else if (elementType == Scalar::Float32) {
6115 ScratchFloat32Scope fpscratch(masm);
6116 masm.convertDoubleToFloat32(floatScratch0, fpscratch);
6117 masm.storeToTypedFloatArray(elementType, fpscratch, dest);
6118 } else if (elementType == Scalar::Float64) {
6119 masm.storeToTypedFloatArray(elementType, floatScratch0, dest);
6120 } else {
6121 masm.storeToTypedIntArray(elementType, *valInt32, dest);
6124 masm.bind(&done);
6125 return true;
6128 static gc::Heap InitialBigIntHeap(JSContext* cx) {
6129 JS::Zone* zone = cx->zone();
6130 return zone->allocNurseryBigInts() ? gc::Heap::Default : gc::Heap::Tenured;
6133 static void EmitAllocateBigInt(MacroAssembler& masm, Register result,
6134 Register temp, const LiveRegisterSet& liveSet,
6135 gc::Heap initialHeap, Label* fail) {
6136 Label fallback, done;
6137 masm.newGCBigInt(result, temp, initialHeap, &fallback);
6138 masm.jump(&done);
6140 masm.bind(&fallback);
6142 // Request a minor collection at a later time if nursery allocation failed.
6143 bool requestMinorGC = initialHeap == gc::Heap::Default;
6145 masm.PushRegsInMask(liveSet);
6146 using Fn = void* (*)(JSContext* cx, bool requestMinorGC);
6147 masm.setupUnalignedABICall(temp);
6148 masm.loadJSContext(temp);
6149 masm.passABIArg(temp);
6150 masm.move32(Imm32(requestMinorGC), result);
6151 masm.passABIArg(result);
6152 masm.callWithABI<Fn, jit::AllocateBigIntNoGC>();
6153 masm.storeCallPointerResult(result);
6155 masm.PopRegsInMask(liveSet);
6156 masm.branchPtr(Assembler::Equal, result, ImmWord(0), fail);
6158 masm.bind(&done);
6161 bool CacheIRCompiler::emitLoadTypedArrayElementResult(
6162 ObjOperandId objId, IntPtrOperandId indexId, Scalar::Type elementType,
6163 bool handleOOB, bool forceDoubleForUint32) {
6164 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6165 AutoOutputRegister output(*this);
6166 Register obj = allocator.useRegister(masm, objId);
6167 Register index = allocator.useRegister(masm, indexId);
6169 AutoScratchRegister scratch1(allocator, masm);
6170 #ifdef JS_PUNBOX64
6171 AutoScratchRegister scratch2(allocator, masm);
6172 #else
6173 // There are too few registers available on x86, so we may need to reuse the
6174 // output's scratch register.
6175 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
6176 #endif
6178 FailurePath* failure;
6179 if (!addFailurePath(&failure)) {
6180 return false;
6183 // Bounds check.
6184 Label outOfBounds;
6185 masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
6186 masm.spectreBoundsCheckPtr(index, scratch1, scratch2,
6187 handleOOB ? &outOfBounds : failure->label());
6189 // Allocate BigInt if needed. The code after this should be infallible.
6190 Maybe<Register> bigInt;
6191 if (Scalar::isBigIntType(elementType)) {
6192 bigInt.emplace(output.valueReg().scratchReg());
6194 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
6195 liveVolatileFloatRegs());
6196 save.takeUnchecked(scratch1);
6197 save.takeUnchecked(scratch2);
6198 save.takeUnchecked(output);
6200 gc::Heap initialHeap = InitialBigIntHeap(cx_);
6201 EmitAllocateBigInt(masm, *bigInt, scratch1, save, initialHeap,
6202 failure->label());
6205 // Load the elements vector.
6206 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch1);
6208 // Load the value.
6209 BaseIndex source(scratch1, index, ScaleFromScalarType(elementType));
6211 if (Scalar::isBigIntType(elementType)) {
6212 #ifdef JS_PUNBOX64
6213 Register64 temp(scratch2);
6214 #else
6215 // We don't have more registers available on x86, so spill |obj| and
6216 // additionally use the output's type register.
6217 MOZ_ASSERT(output.valueReg().scratchReg() != output.valueReg().typeReg());
6218 masm.push(obj);
6219 Register64 temp(output.valueReg().typeReg(), obj);
6220 #endif
6222 masm.loadFromTypedBigIntArray(elementType, source, *bigInt, temp);
6224 #ifndef JS_PUNBOX64
6225 masm.pop(obj);
6226 #endif
6228 masm.tagValue(JSVAL_TYPE_BIGINT, *bigInt, output.valueReg());
6229 } else {
6230 MacroAssembler::Uint32Mode uint32Mode =
6231 forceDoubleForUint32 ? MacroAssembler::Uint32Mode::ForceDouble
6232 : MacroAssembler::Uint32Mode::FailOnDouble;
6233 masm.loadFromTypedArray(elementType, source, output.valueReg(), uint32Mode,
6234 scratch1, failure->label());
6237 if (handleOOB) {
6238 Label done;
6239 masm.jump(&done);
6241 masm.bind(&outOfBounds);
6242 masm.moveValue(UndefinedValue(), output.valueReg());
6244 masm.bind(&done);
6247 return true;
6250 static void EmitDataViewBoundsCheck(MacroAssembler& masm, size_t byteSize,
6251 Register obj, Register offset,
6252 Register scratch, Label* fail) {
6253 // Ensure both offset < length and offset + (byteSize - 1) < length.
6254 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
6255 if (byteSize == 1) {
6256 masm.spectreBoundsCheckPtr(offset, scratch, InvalidReg, fail);
6257 } else {
6258 // temp := length - (byteSize - 1)
6259 // if temp < 0: fail
6260 // if offset >= temp: fail
6261 masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), scratch, fail);
6262 masm.spectreBoundsCheckPtr(offset, scratch, InvalidReg, fail);
6266 bool CacheIRCompiler::emitLoadDataViewValueResult(
6267 ObjOperandId objId, IntPtrOperandId offsetId,
6268 BooleanOperandId littleEndianId, Scalar::Type elementType,
6269 bool forceDoubleForUint32) {
6270 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6272 AutoOutputRegister output(*this);
6273 Register obj = allocator.useRegister(masm, objId);
6274 Register offset = allocator.useRegister(masm, offsetId);
6275 Register littleEndian = allocator.useRegister(masm, littleEndianId);
6277 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
6279 Register64 outputReg64 = output.valueReg().toRegister64();
6280 Register outputScratch = outputReg64.scratchReg();
6282 FailurePath* failure;
6283 if (!addFailurePath(&failure)) {
6284 return false;
6287 const size_t byteSize = Scalar::byteSize(elementType);
6289 EmitDataViewBoundsCheck(masm, byteSize, obj, offset, outputScratch,
6290 failure->label());
6292 masm.loadPtr(Address(obj, DataViewObject::dataOffset()), outputScratch);
6294 // Load the value.
6295 BaseIndex source(outputScratch, offset, TimesOne);
6296 switch (elementType) {
6297 case Scalar::Int8:
6298 masm.load8SignExtend(source, outputScratch);
6299 break;
6300 case Scalar::Uint8:
6301 masm.load8ZeroExtend(source, outputScratch);
6302 break;
6303 case Scalar::Int16:
6304 masm.load16UnalignedSignExtend(source, outputScratch);
6305 break;
6306 case Scalar::Uint16:
6307 masm.load16UnalignedZeroExtend(source, outputScratch);
6308 break;
6309 case Scalar::Int32:
6310 case Scalar::Uint32:
6311 case Scalar::Float32:
6312 masm.load32Unaligned(source, outputScratch);
6313 break;
6314 case Scalar::Float64:
6315 case Scalar::BigInt64:
6316 case Scalar::BigUint64:
6317 masm.load64Unaligned(source, outputReg64);
6318 break;
6319 case Scalar::Uint8Clamped:
6320 default:
6321 MOZ_CRASH("Invalid typed array type");
6324 // Swap the bytes in the loaded value.
6325 if (byteSize > 1) {
6326 Label skip;
6327 masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
6328 littleEndian, Imm32(0), &skip);
6330 switch (elementType) {
6331 case Scalar::Int16:
6332 masm.byteSwap16SignExtend(outputScratch);
6333 break;
6334 case Scalar::Uint16:
6335 masm.byteSwap16ZeroExtend(outputScratch);
6336 break;
6337 case Scalar::Int32:
6338 case Scalar::Uint32:
6339 case Scalar::Float32:
6340 masm.byteSwap32(outputScratch);
6341 break;
6342 case Scalar::Float64:
6343 case Scalar::BigInt64:
6344 case Scalar::BigUint64:
6345 masm.byteSwap64(outputReg64);
6346 break;
6347 case Scalar::Int8:
6348 case Scalar::Uint8:
6349 case Scalar::Uint8Clamped:
6350 default:
6351 MOZ_CRASH("Invalid type");
6354 masm.bind(&skip);
6357 // Move the value into the output register.
6358 switch (elementType) {
6359 case Scalar::Int8:
6360 case Scalar::Uint8:
6361 case Scalar::Int16:
6362 case Scalar::Uint16:
6363 case Scalar::Int32:
6364 masm.tagValue(JSVAL_TYPE_INT32, outputScratch, output.valueReg());
6365 break;
6366 case Scalar::Uint32: {
6367 MacroAssembler::Uint32Mode uint32Mode =
6368 forceDoubleForUint32 ? MacroAssembler::Uint32Mode::ForceDouble
6369 : MacroAssembler::Uint32Mode::FailOnDouble;
6370 masm.boxUint32(outputScratch, output.valueReg(), uint32Mode,
6371 failure->label());
6372 break;
6374 case Scalar::Float32: {
6375 FloatRegister scratchFloat32 = floatScratch0.get().asSingle();
6376 masm.moveGPRToFloat32(outputScratch, scratchFloat32);
6377 masm.canonicalizeFloat(scratchFloat32);
6378 masm.convertFloat32ToDouble(scratchFloat32, floatScratch0);
6379 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
6380 break;
6382 case Scalar::Float64:
6383 masm.moveGPR64ToDouble(outputReg64, floatScratch0);
6384 masm.canonicalizeDouble(floatScratch0);
6385 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
6386 break;
6387 case Scalar::BigInt64:
6388 case Scalar::BigUint64: {
6389 // We need two extra registers. Reuse the obj/littleEndian registers.
6390 Register bigInt = obj;
6391 Register bigIntScratch = littleEndian;
6392 masm.push(bigInt);
6393 masm.push(bigIntScratch);
6394 Label fail, done;
6395 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
6396 liveVolatileFloatRegs());
6397 save.takeUnchecked(bigInt);
6398 save.takeUnchecked(bigIntScratch);
6399 gc::Heap initialHeap = InitialBigIntHeap(cx_);
6400 EmitAllocateBigInt(masm, bigInt, bigIntScratch, save, initialHeap, &fail);
6401 masm.jump(&done);
6403 masm.bind(&fail);
6404 masm.pop(bigIntScratch);
6405 masm.pop(bigInt);
6406 masm.jump(failure->label());
6408 masm.bind(&done);
6409 masm.initializeBigInt64(elementType, bigInt, outputReg64);
6410 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
6411 masm.pop(bigIntScratch);
6412 masm.pop(bigInt);
6413 break;
6415 case Scalar::Uint8Clamped:
6416 default:
6417 MOZ_CRASH("Invalid typed array type");
6420 return true;
6423 bool CacheIRCompiler::emitStoreDataViewValueResult(
6424 ObjOperandId objId, IntPtrOperandId offsetId, uint32_t valueId,
6425 BooleanOperandId littleEndianId, Scalar::Type elementType) {
6426 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6428 AutoOutputRegister output(*this);
6429 #ifdef JS_CODEGEN_X86
6430 // Use a scratch register to avoid running out of the registers.
6431 Register obj = output.valueReg().typeReg();
6432 allocator.copyToScratchRegister(masm, objId, obj);
6433 #else
6434 Register obj = allocator.useRegister(masm, objId);
6435 #endif
6436 Register offset = allocator.useRegister(masm, offsetId);
6437 Register littleEndian = allocator.useRegister(masm, littleEndianId);
6439 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
6440 Maybe<Register> valInt32;
6441 Maybe<Register> valBigInt;
6442 switch (elementType) {
6443 case Scalar::Int8:
6444 case Scalar::Uint8:
6445 case Scalar::Int16:
6446 case Scalar::Uint16:
6447 case Scalar::Int32:
6448 case Scalar::Uint32:
6449 case Scalar::Uint8Clamped:
6450 valInt32.emplace(allocator.useRegister(masm, Int32OperandId(valueId)));
6451 break;
6453 case Scalar::Float32:
6454 case Scalar::Float64:
6455 allocator.ensureDoubleRegister(masm, NumberOperandId(valueId),
6456 floatScratch0);
6457 break;
6459 case Scalar::BigInt64:
6460 case Scalar::BigUint64:
6461 valBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(valueId)));
6462 break;
6464 case Scalar::MaxTypedArrayViewType:
6465 case Scalar::Int64:
6466 case Scalar::Simd128:
6467 MOZ_CRASH("Unsupported type");
6470 Register scratch1 = output.valueReg().scratchReg();
6471 MOZ_ASSERT(scratch1 != obj, "scratchReg must not be typeReg");
6473 // On platforms with enough registers, |scratch2| is an extra scratch register
6474 // (pair) used for byte-swapping the value.
6475 #ifndef JS_CODEGEN_X86
6476 mozilla::MaybeOneOf<AutoScratchRegister, AutoScratchRegister64> scratch2;
6477 switch (elementType) {
6478 case Scalar::Int8:
6479 case Scalar::Uint8:
6480 break;
6481 case Scalar::Int16:
6482 case Scalar::Uint16:
6483 case Scalar::Int32:
6484 case Scalar::Uint32:
6485 case Scalar::Float32:
6486 scratch2.construct<AutoScratchRegister>(allocator, masm);
6487 break;
6488 case Scalar::Float64:
6489 case Scalar::BigInt64:
6490 case Scalar::BigUint64:
6491 scratch2.construct<AutoScratchRegister64>(allocator, masm);
6492 break;
6493 case Scalar::Uint8Clamped:
6494 default:
6495 MOZ_CRASH("Invalid type");
6497 #endif
6499 FailurePath* failure;
6500 if (!addFailurePath(&failure)) {
6501 return false;
6504 const size_t byteSize = Scalar::byteSize(elementType);
6506 EmitDataViewBoundsCheck(masm, byteSize, obj, offset, scratch1,
6507 failure->label());
6509 masm.loadPtr(Address(obj, DataViewObject::dataOffset()), scratch1);
6510 BaseIndex dest(scratch1, offset, TimesOne);
6512 if (byteSize == 1) {
6513 // Byte swapping has no effect, so just do the byte store.
6514 masm.store8(*valInt32, dest);
6515 masm.moveValue(UndefinedValue(), output.valueReg());
6516 return true;
6519 // On 32-bit x86, |obj| is already a scratch register so use that. If we need
6520 // a Register64 we also use the littleEndian register and use the stack
6521 // location for the check below.
6522 bool pushedLittleEndian = false;
6523 #ifdef JS_CODEGEN_X86
6524 if (byteSize == 8) {
6525 masm.push(littleEndian);
6526 pushedLittleEndian = true;
6528 auto valScratch32 = [&]() -> Register { return obj; };
6529 auto valScratch64 = [&]() -> Register64 {
6530 return Register64(obj, littleEndian);
6532 #else
6533 auto valScratch32 = [&]() -> Register {
6534 return scratch2.ref<AutoScratchRegister>();
6536 auto valScratch64 = [&]() -> Register64 {
6537 return scratch2.ref<AutoScratchRegister64>();
6539 #endif
6541 // Load the value into a gpr register.
6542 switch (elementType) {
6543 case Scalar::Int16:
6544 case Scalar::Uint16:
6545 case Scalar::Int32:
6546 case Scalar::Uint32:
6547 masm.move32(*valInt32, valScratch32());
6548 break;
6549 case Scalar::Float32: {
6550 FloatRegister scratchFloat32 = floatScratch0.get().asSingle();
6551 masm.convertDoubleToFloat32(floatScratch0, scratchFloat32);
6552 masm.canonicalizeFloatIfDeterministic(scratchFloat32);
6553 masm.moveFloat32ToGPR(scratchFloat32, valScratch32());
6554 break;
6556 case Scalar::Float64: {
6557 masm.canonicalizeDoubleIfDeterministic(floatScratch0);
6558 masm.moveDoubleToGPR64(floatScratch0, valScratch64());
6559 break;
6561 case Scalar::BigInt64:
6562 case Scalar::BigUint64:
6563 masm.loadBigInt64(*valBigInt, valScratch64());
6564 break;
6565 case Scalar::Int8:
6566 case Scalar::Uint8:
6567 case Scalar::Uint8Clamped:
6568 default:
6569 MOZ_CRASH("Invalid type");
6572 // Swap the bytes in the loaded value.
6573 Label skip;
6574 if (pushedLittleEndian) {
6575 masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
6576 Address(masm.getStackPointer(), 0), Imm32(0), &skip);
6577 } else {
6578 masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
6579 littleEndian, Imm32(0), &skip);
6581 switch (elementType) {
6582 case Scalar::Int16:
6583 masm.byteSwap16SignExtend(valScratch32());
6584 break;
6585 case Scalar::Uint16:
6586 masm.byteSwap16ZeroExtend(valScratch32());
6587 break;
6588 case Scalar::Int32:
6589 case Scalar::Uint32:
6590 case Scalar::Float32:
6591 masm.byteSwap32(valScratch32());
6592 break;
6593 case Scalar::Float64:
6594 case Scalar::BigInt64:
6595 case Scalar::BigUint64:
6596 masm.byteSwap64(valScratch64());
6597 break;
6598 case Scalar::Int8:
6599 case Scalar::Uint8:
6600 case Scalar::Uint8Clamped:
6601 default:
6602 MOZ_CRASH("Invalid type");
6604 masm.bind(&skip);
6606 // Store the value.
6607 switch (elementType) {
6608 case Scalar::Int16:
6609 case Scalar::Uint16:
6610 masm.store16Unaligned(valScratch32(), dest);
6611 break;
6612 case Scalar::Int32:
6613 case Scalar::Uint32:
6614 case Scalar::Float32:
6615 masm.store32Unaligned(valScratch32(), dest);
6616 break;
6617 case Scalar::Float64:
6618 case Scalar::BigInt64:
6619 case Scalar::BigUint64:
6620 masm.store64Unaligned(valScratch64(), dest);
6621 break;
6622 case Scalar::Int8:
6623 case Scalar::Uint8:
6624 case Scalar::Uint8Clamped:
6625 default:
6626 MOZ_CRASH("Invalid typed array type");
6629 #ifdef JS_CODEGEN_X86
6630 // Restore registers.
6631 if (pushedLittleEndian) {
6632 masm.pop(littleEndian);
6634 #endif
6636 masm.moveValue(UndefinedValue(), output.valueReg());
6637 return true;
6640 bool CacheIRCompiler::emitStoreFixedSlotUndefinedResult(ObjOperandId objId,
6641 uint32_t offsetOffset,
6642 ValOperandId rhsId) {
6643 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6645 AutoOutputRegister output(*this);
6646 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6647 Register obj = allocator.useRegister(masm, objId);
6648 ValueOperand val = allocator.useValueRegister(masm, rhsId);
6650 StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
6651 emitLoadStubField(offset, scratch);
6653 BaseIndex slot(obj, scratch, TimesOne);
6654 EmitPreBarrier(masm, slot, MIRType::Value);
6655 masm.storeValue(val, slot);
6656 emitPostBarrierSlot(obj, val, scratch);
6658 masm.moveValue(UndefinedValue(), output.valueReg());
6659 return true;
6662 bool CacheIRCompiler::emitLoadObjectResult(ObjOperandId objId) {
6663 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6664 AutoOutputRegister output(*this);
6665 Register obj = allocator.useRegister(masm, objId);
6667 EmitStoreResult(masm, obj, JSVAL_TYPE_OBJECT, output);
6669 return true;
6672 bool CacheIRCompiler::emitLoadStringResult(StringOperandId strId) {
6673 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6674 AutoOutputRegister output(*this);
6675 Register str = allocator.useRegister(masm, strId);
6677 masm.tagValue(JSVAL_TYPE_STRING, str, output.valueReg());
6679 return true;
6682 bool CacheIRCompiler::emitLoadSymbolResult(SymbolOperandId symId) {
6683 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6684 AutoOutputRegister output(*this);
6685 Register sym = allocator.useRegister(masm, symId);
6687 masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
6689 return true;
6692 bool CacheIRCompiler::emitLoadInt32Result(Int32OperandId valId) {
6693 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6694 AutoOutputRegister output(*this);
6695 Register val = allocator.useRegister(masm, valId);
6697 masm.tagValue(JSVAL_TYPE_INT32, val, output.valueReg());
6699 return true;
6702 bool CacheIRCompiler::emitLoadBigIntResult(BigIntOperandId valId) {
6703 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6704 AutoOutputRegister output(*this);
6705 Register val = allocator.useRegister(masm, valId);
6707 masm.tagValue(JSVAL_TYPE_BIGINT, val, output.valueReg());
6709 return true;
6712 bool CacheIRCompiler::emitLoadDoubleResult(NumberOperandId valId) {
6713 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6714 AutoOutputRegister output(*this);
6715 ValueOperand val = allocator.useValueRegister(masm, valId);
6717 #ifdef DEBUG
6718 Label ok;
6719 masm.branchTestDouble(Assembler::Equal, val, &ok);
6720 masm.branchTestInt32(Assembler::Equal, val, &ok);
6721 masm.assumeUnreachable("input must be double or int32");
6722 masm.bind(&ok);
6723 #endif
6725 masm.moveValue(val, output.valueReg());
6726 masm.convertInt32ValueToDouble(output.valueReg());
6728 return true;
6731 bool CacheIRCompiler::emitLoadTypeOfObjectResult(ObjOperandId objId) {
6732 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6733 AutoOutputRegister output(*this);
6734 Register obj = allocator.useRegister(masm, objId);
6735 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6737 Label slowCheck, isObject, isCallable, isUndefined, done;
6738 masm.typeOfObject(obj, scratch, &slowCheck, &isObject, &isCallable,
6739 &isUndefined);
6741 masm.bind(&isCallable);
6742 masm.moveValue(StringValue(cx_->names().function), output.valueReg());
6743 masm.jump(&done);
6745 masm.bind(&isUndefined);
6746 masm.moveValue(StringValue(cx_->names().undefined), output.valueReg());
6747 masm.jump(&done);
6749 masm.bind(&isObject);
6750 masm.moveValue(StringValue(cx_->names().object), output.valueReg());
6751 masm.jump(&done);
6754 masm.bind(&slowCheck);
6755 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
6756 liveVolatileFloatRegs());
6757 masm.PushRegsInMask(save);
6759 using Fn = JSString* (*)(JSObject* obj, JSRuntime* rt);
6760 masm.setupUnalignedABICall(scratch);
6761 masm.passABIArg(obj);
6762 masm.movePtr(ImmPtr(cx_->runtime()), scratch);
6763 masm.passABIArg(scratch);
6764 masm.callWithABI<Fn, TypeOfNameObject>();
6765 masm.storeCallPointerResult(scratch);
6767 LiveRegisterSet ignore;
6768 ignore.add(scratch);
6769 masm.PopRegsInMaskIgnore(save, ignore);
6771 masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
6774 masm.bind(&done);
6775 return true;
6778 bool CacheIRCompiler::emitLoadInt32TruthyResult(ValOperandId inputId) {
6779 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6780 AutoOutputRegister output(*this);
6781 ValueOperand val = allocator.useValueRegister(masm, inputId);
6783 Label ifFalse, done;
6784 masm.branchTestInt32Truthy(false, val, &ifFalse);
6785 masm.moveValue(BooleanValue(true), output.valueReg());
6786 masm.jump(&done);
6788 masm.bind(&ifFalse);
6789 masm.moveValue(BooleanValue(false), output.valueReg());
6791 masm.bind(&done);
6792 return true;
6795 bool CacheIRCompiler::emitLoadStringTruthyResult(StringOperandId strId) {
6796 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6797 AutoOutputRegister output(*this);
6798 Register str = allocator.useRegister(masm, strId);
6800 Label ifFalse, done;
6801 masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
6802 Imm32(0), &ifFalse);
6803 masm.moveValue(BooleanValue(true), output.valueReg());
6804 masm.jump(&done);
6806 masm.bind(&ifFalse);
6807 masm.moveValue(BooleanValue(false), output.valueReg());
6809 masm.bind(&done);
6810 return true;
6813 bool CacheIRCompiler::emitLoadDoubleTruthyResult(NumberOperandId inputId) {
6814 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6815 AutoOutputRegister output(*this);
6817 AutoScratchFloatRegister floatReg(this);
6819 allocator.ensureDoubleRegister(masm, inputId, floatReg);
6821 Label ifFalse, done;
6823 masm.branchTestDoubleTruthy(false, floatReg, &ifFalse);
6824 masm.moveValue(BooleanValue(true), output.valueReg());
6825 masm.jump(&done);
6827 masm.bind(&ifFalse);
6828 masm.moveValue(BooleanValue(false), output.valueReg());
6830 masm.bind(&done);
6831 return true;
6834 bool CacheIRCompiler::emitLoadObjectTruthyResult(ObjOperandId objId) {
6835 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6836 AutoOutputRegister output(*this);
6837 Register obj = allocator.useRegister(masm, objId);
6838 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6840 Label emulatesUndefined, slowPath, done;
6841 masm.branchIfObjectEmulatesUndefined(obj, scratch, &slowPath,
6842 &emulatesUndefined);
6843 masm.moveValue(BooleanValue(true), output.valueReg());
6844 masm.jump(&done);
6846 masm.bind(&emulatesUndefined);
6847 masm.moveValue(BooleanValue(false), output.valueReg());
6848 masm.jump(&done);
6850 masm.bind(&slowPath);
6852 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
6853 liveVolatileFloatRegs());
6854 volatileRegs.takeUnchecked(scratch);
6855 volatileRegs.takeUnchecked(output);
6856 masm.PushRegsInMask(volatileRegs);
6858 using Fn = bool (*)(JSObject* obj);
6859 masm.setupUnalignedABICall(scratch);
6860 masm.passABIArg(obj);
6861 masm.callWithABI<Fn, js::EmulatesUndefined>();
6862 masm.storeCallBoolResult(scratch);
6863 masm.xor32(Imm32(1), scratch);
6865 masm.PopRegsInMask(volatileRegs);
6867 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
6870 masm.bind(&done);
6871 return true;
6874 bool CacheIRCompiler::emitLoadBigIntTruthyResult(BigIntOperandId bigIntId) {
6875 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6876 AutoOutputRegister output(*this);
6877 Register bigInt = allocator.useRegister(masm, bigIntId);
6879 Label ifFalse, done;
6880 masm.branch32(Assembler::Equal,
6881 Address(bigInt, BigInt::offsetOfDigitLength()), Imm32(0),
6882 &ifFalse);
6883 masm.moveValue(BooleanValue(true), output.valueReg());
6884 masm.jump(&done);
6886 masm.bind(&ifFalse);
6887 masm.moveValue(BooleanValue(false), output.valueReg());
6889 masm.bind(&done);
6890 return true;
6893 bool CacheIRCompiler::emitLoadValueTruthyResult(ValOperandId inputId) {
6894 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6896 AutoOutputRegister output(*this);
6897 ValueOperand value = allocator.useValueRegister(masm, inputId);
6898 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
6899 AutoScratchRegister scratch2(allocator, masm);
6900 AutoScratchFloatRegister floatReg(this);
6902 Label ifFalse, ifTrue, done;
6905 ScratchTagScope tag(masm, value);
6906 masm.splitTagForTest(value, tag);
6908 masm.branchTestUndefined(Assembler::Equal, tag, &ifFalse);
6909 masm.branchTestNull(Assembler::Equal, tag, &ifFalse);
6911 Label notBoolean;
6912 masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
6914 ScratchTagScopeRelease _(&tag);
6915 masm.branchTestBooleanTruthy(false, value, &ifFalse);
6916 masm.jump(&ifTrue);
6918 masm.bind(&notBoolean);
6920 Label notInt32;
6921 masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
6923 ScratchTagScopeRelease _(&tag);
6924 masm.branchTestInt32Truthy(false, value, &ifFalse);
6925 masm.jump(&ifTrue);
6927 masm.bind(&notInt32);
6929 Label notObject;
6930 masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
6932 ScratchTagScopeRelease _(&tag);
6934 Register obj = masm.extractObject(value, scratch1);
6936 Label slowPath;
6937 masm.branchIfObjectEmulatesUndefined(obj, scratch2, &slowPath, &ifFalse);
6938 masm.jump(&ifTrue);
6940 masm.bind(&slowPath);
6942 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
6943 liveVolatileFloatRegs());
6944 volatileRegs.takeUnchecked(scratch1);
6945 volatileRegs.takeUnchecked(scratch2);
6946 volatileRegs.takeUnchecked(output);
6947 masm.PushRegsInMask(volatileRegs);
6949 using Fn = bool (*)(JSObject* obj);
6950 masm.setupUnalignedABICall(scratch2);
6951 masm.passABIArg(obj);
6952 masm.callWithABI<Fn, js::EmulatesUndefined>();
6953 masm.storeCallPointerResult(scratch2);
6955 masm.PopRegsInMask(volatileRegs);
6957 masm.branchIfTrueBool(scratch2, &ifFalse);
6958 masm.jump(&ifTrue);
6961 masm.bind(&notObject);
6963 Label notString;
6964 masm.branchTestString(Assembler::NotEqual, tag, &notString);
6966 ScratchTagScopeRelease _(&tag);
6967 masm.branchTestStringTruthy(false, value, &ifFalse);
6968 masm.jump(&ifTrue);
6970 masm.bind(&notString);
6972 Label notBigInt;
6973 masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
6975 ScratchTagScopeRelease _(&tag);
6976 masm.branchTestBigIntTruthy(false, value, &ifFalse);
6977 masm.jump(&ifTrue);
6979 masm.bind(&notBigInt);
6981 masm.branchTestSymbol(Assembler::Equal, tag, &ifTrue);
6983 #ifdef DEBUG
6984 Label isDouble;
6985 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
6986 masm.assumeUnreachable("Unexpected value type");
6987 masm.bind(&isDouble);
6988 #endif
6991 ScratchTagScopeRelease _(&tag);
6992 masm.unboxDouble(value, floatReg);
6993 masm.branchTestDoubleTruthy(false, floatReg, &ifFalse);
6996 // Fall through to true case.
6999 masm.bind(&ifTrue);
7000 masm.moveValue(BooleanValue(true), output.valueReg());
7001 masm.jump(&done);
7003 masm.bind(&ifFalse);
7004 masm.moveValue(BooleanValue(false), output.valueReg());
7006 masm.bind(&done);
7007 return true;
7010 bool CacheIRCompiler::emitComparePointerResultShared(JSOp op,
7011 TypedOperandId lhsId,
7012 TypedOperandId rhsId) {
7013 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7014 AutoOutputRegister output(*this);
7016 Register left = allocator.useRegister(masm, lhsId);
7017 Register right = allocator.useRegister(masm, rhsId);
7019 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7021 Label ifTrue, done;
7022 masm.branchPtr(JSOpToCondition(op, /* signed = */ true), left, right,
7023 &ifTrue);
7025 EmitStoreBoolean(masm, false, output);
7026 masm.jump(&done);
7028 masm.bind(&ifTrue);
7029 EmitStoreBoolean(masm, true, output);
7030 masm.bind(&done);
7031 return true;
7034 bool CacheIRCompiler::emitCompareObjectResult(JSOp op, ObjOperandId lhsId,
7035 ObjOperandId rhsId) {
7036 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7037 return emitComparePointerResultShared(op, lhsId, rhsId);
7040 bool CacheIRCompiler::emitCompareSymbolResult(JSOp op, SymbolOperandId lhsId,
7041 SymbolOperandId rhsId) {
7042 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7043 return emitComparePointerResultShared(op, lhsId, rhsId);
7046 bool CacheIRCompiler::emitCompareInt32Result(JSOp op, Int32OperandId lhsId,
7047 Int32OperandId rhsId) {
7048 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7049 AutoOutputRegister output(*this);
7050 Register left = allocator.useRegister(masm, lhsId);
7051 Register right = allocator.useRegister(masm, rhsId);
7053 Label ifTrue, done;
7054 masm.branch32(JSOpToCondition(op, /* signed = */ true), left, right, &ifTrue);
7056 EmitStoreBoolean(masm, false, output);
7057 masm.jump(&done);
7059 masm.bind(&ifTrue);
7060 EmitStoreBoolean(masm, true, output);
7061 masm.bind(&done);
7062 return true;
7065 bool CacheIRCompiler::emitCompareDoubleResult(JSOp op, NumberOperandId lhsId,
7066 NumberOperandId rhsId) {
7067 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7068 AutoOutputRegister output(*this);
7070 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
7071 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
7073 FailurePath* failure;
7074 if (!addFailurePath(&failure)) {
7075 return false;
7078 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
7079 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
7081 Label done, ifTrue;
7082 masm.branchDouble(JSOpToDoubleCondition(op), floatScratch0, floatScratch1,
7083 &ifTrue);
7084 EmitStoreBoolean(masm, false, output);
7085 masm.jump(&done);
7087 masm.bind(&ifTrue);
7088 EmitStoreBoolean(masm, true, output);
7089 masm.bind(&done);
7090 return true;
7093 bool CacheIRCompiler::emitCompareBigIntResult(JSOp op, BigIntOperandId lhsId,
7094 BigIntOperandId rhsId) {
7095 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7096 AutoOutputRegister output(*this);
7098 Register lhs = allocator.useRegister(masm, lhsId);
7099 Register rhs = allocator.useRegister(masm, rhsId);
7101 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7103 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7104 masm.PushRegsInMask(save);
7106 masm.setupUnalignedABICall(scratch);
7108 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7109 // - |left <= right| is implemented as |right >= left|.
7110 // - |left > right| is implemented as |right < left|.
7111 if (op == JSOp::Le || op == JSOp::Gt) {
7112 masm.passABIArg(rhs);
7113 masm.passABIArg(lhs);
7114 } else {
7115 masm.passABIArg(lhs);
7116 masm.passABIArg(rhs);
7119 using Fn = bool (*)(BigInt*, BigInt*);
7120 Fn fn;
7121 if (op == JSOp::Eq || op == JSOp::StrictEq) {
7122 fn = jit::BigIntEqual<EqualityKind::Equal>;
7123 } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
7124 fn = jit::BigIntEqual<EqualityKind::NotEqual>;
7125 } else if (op == JSOp::Lt || op == JSOp::Gt) {
7126 fn = jit::BigIntCompare<ComparisonKind::LessThan>;
7127 } else {
7128 MOZ_ASSERT(op == JSOp::Le || op == JSOp::Ge);
7129 fn = jit::BigIntCompare<ComparisonKind::GreaterThanOrEqual>;
7132 masm.callWithABI(DynamicFunction<Fn>(fn));
7133 masm.storeCallBoolResult(scratch);
7135 LiveRegisterSet ignore;
7136 ignore.add(scratch);
7137 masm.PopRegsInMaskIgnore(save, ignore);
7139 EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
7140 return true;
7143 bool CacheIRCompiler::emitCompareBigIntInt32Result(JSOp op,
7144 BigIntOperandId lhsId,
7145 Int32OperandId rhsId) {
7146 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7147 AutoOutputRegister output(*this);
7148 Register bigInt = allocator.useRegister(masm, lhsId);
7149 Register int32 = allocator.useRegister(masm, rhsId);
7151 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
7152 AutoScratchRegister scratch2(allocator, masm);
7154 Label ifTrue, ifFalse;
7155 masm.compareBigIntAndInt32(op, bigInt, int32, scratch1, scratch2, &ifTrue,
7156 &ifFalse);
7158 Label done;
7159 masm.bind(&ifFalse);
7160 EmitStoreBoolean(masm, false, output);
7161 masm.jump(&done);
7163 masm.bind(&ifTrue);
7164 EmitStoreBoolean(masm, true, output);
7166 masm.bind(&done);
7167 return true;
7170 bool CacheIRCompiler::emitCompareBigIntNumberResult(JSOp op,
7171 BigIntOperandId lhsId,
7172 NumberOperandId rhsId) {
7173 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7174 AutoOutputRegister output(*this);
7176 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
7178 Register lhs = allocator.useRegister(masm, lhsId);
7179 allocator.ensureDoubleRegister(masm, rhsId, floatScratch0);
7181 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7183 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7184 masm.PushRegsInMask(save);
7186 masm.setupUnalignedABICall(scratch);
7188 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7189 // - |left <= right| is implemented as |right >= left|.
7190 // - |left > right| is implemented as |right < left|.
7191 if (op == JSOp::Le || op == JSOp::Gt) {
7192 masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
7193 masm.passABIArg(lhs);
7194 } else {
7195 masm.passABIArg(lhs);
7196 masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
7199 using FnBigIntNumber = bool (*)(BigInt*, double);
7200 using FnNumberBigInt = bool (*)(double, BigInt*);
7201 switch (op) {
7202 case JSOp::Eq: {
7203 masm.callWithABI<FnBigIntNumber,
7204 jit::BigIntNumberEqual<EqualityKind::Equal>>();
7205 break;
7207 case JSOp::Ne: {
7208 masm.callWithABI<FnBigIntNumber,
7209 jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
7210 break;
7212 case JSOp::Lt: {
7213 masm.callWithABI<FnBigIntNumber,
7214 jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
7215 break;
7217 case JSOp::Gt: {
7218 masm.callWithABI<FnNumberBigInt,
7219 jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
7220 break;
7222 case JSOp::Le: {
7223 masm.callWithABI<
7224 FnNumberBigInt,
7225 jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
7226 break;
7228 case JSOp::Ge: {
7229 masm.callWithABI<
7230 FnBigIntNumber,
7231 jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
7232 break;
7234 default:
7235 MOZ_CRASH("unhandled op");
7238 masm.storeCallBoolResult(scratch);
7240 LiveRegisterSet ignore;
7241 ignore.add(scratch);
7242 masm.PopRegsInMaskIgnore(save, ignore);
7244 EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
7245 return true;
7248 bool CacheIRCompiler::emitCompareBigIntStringResult(JSOp op,
7249 BigIntOperandId lhsId,
7250 StringOperandId rhsId) {
7251 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7252 AutoCallVM callvm(masm, this, allocator);
7254 Register lhs = allocator.useRegister(masm, lhsId);
7255 Register rhs = allocator.useRegister(masm, rhsId);
7257 callvm.prepare();
7259 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7260 // - |left <= right| is implemented as |right >= left|.
7261 // - |left > right| is implemented as |right < left|.
7262 if (op == JSOp::Le || op == JSOp::Gt) {
7263 masm.Push(lhs);
7264 masm.Push(rhs);
7265 } else {
7266 masm.Push(rhs);
7267 masm.Push(lhs);
7270 using FnBigIntString =
7271 bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
7272 using FnStringBigInt =
7273 bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
7275 switch (op) {
7276 case JSOp::Eq: {
7277 constexpr auto Equal = EqualityKind::Equal;
7278 callvm.call<FnBigIntString, BigIntStringEqual<Equal>>();
7279 break;
7281 case JSOp::Ne: {
7282 constexpr auto NotEqual = EqualityKind::NotEqual;
7283 callvm.call<FnBigIntString, BigIntStringEqual<NotEqual>>();
7284 break;
7286 case JSOp::Lt: {
7287 constexpr auto LessThan = ComparisonKind::LessThan;
7288 callvm.call<FnBigIntString, BigIntStringCompare<LessThan>>();
7289 break;
7291 case JSOp::Gt: {
7292 constexpr auto LessThan = ComparisonKind::LessThan;
7293 callvm.call<FnStringBigInt, StringBigIntCompare<LessThan>>();
7294 break;
7296 case JSOp::Le: {
7297 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
7298 callvm.call<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>();
7299 break;
7301 case JSOp::Ge: {
7302 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
7303 callvm.call<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>();
7304 break;
7306 default:
7307 MOZ_CRASH("unhandled op");
7309 return true;
7312 bool CacheIRCompiler::emitCompareNullUndefinedResult(JSOp op, bool isUndefined,
7313 ValOperandId inputId) {
7314 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7316 AutoOutputRegister output(*this);
7317 ValueOperand input = allocator.useValueRegister(masm, inputId);
7318 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7320 if (IsStrictEqualityOp(op)) {
7321 if (isUndefined) {
7322 masm.testUndefinedSet(JSOpToCondition(op, false), input, scratch);
7323 } else {
7324 masm.testNullSet(JSOpToCondition(op, false), input, scratch);
7326 EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
7327 return true;
7330 FailurePath* failure;
7331 if (!addFailurePath(&failure)) {
7332 return false;
7335 MOZ_ASSERT(IsLooseEqualityOp(op));
7337 Label nullOrLikeUndefined, notNullOrLikeUndefined, done;
7339 ScratchTagScope tag(masm, input);
7340 masm.splitTagForTest(input, tag);
7342 if (isUndefined) {
7343 masm.branchTestUndefined(Assembler::Equal, tag, &nullOrLikeUndefined);
7344 masm.branchTestNull(Assembler::Equal, tag, &nullOrLikeUndefined);
7345 } else {
7346 masm.branchTestNull(Assembler::Equal, tag, &nullOrLikeUndefined);
7347 masm.branchTestUndefined(Assembler::Equal, tag, &nullOrLikeUndefined);
7349 masm.branchTestObject(Assembler::NotEqual, tag, &notNullOrLikeUndefined);
7352 ScratchTagScopeRelease _(&tag);
7354 masm.unboxObject(input, scratch);
7355 masm.branchIfObjectEmulatesUndefined(scratch, scratch, failure->label(),
7356 &nullOrLikeUndefined);
7357 masm.jump(&notNullOrLikeUndefined);
7361 masm.bind(&nullOrLikeUndefined);
7362 EmitStoreBoolean(masm, op == JSOp::Eq, output);
7363 masm.jump(&done);
7365 masm.bind(&notNullOrLikeUndefined);
7366 EmitStoreBoolean(masm, op == JSOp::Ne, output);
7368 masm.bind(&done);
7369 return true;
7372 bool CacheIRCompiler::emitCompareDoubleSameValueResult(NumberOperandId lhsId,
7373 NumberOperandId rhsId) {
7374 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7376 AutoOutputRegister output(*this);
7377 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7378 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
7379 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
7380 AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
7382 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
7383 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
7385 masm.sameValueDouble(floatScratch0, floatScratch1, floatScratch2, scratch);
7386 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
7387 return true;
7390 bool CacheIRCompiler::emitIndirectTruncateInt32Result(Int32OperandId valId) {
7391 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7392 AutoOutputRegister output(*this);
7393 Register val = allocator.useRegister(masm, valId);
7395 if (output.hasValue()) {
7396 masm.tagValue(JSVAL_TYPE_INT32, val, output.valueReg());
7397 } else {
7398 masm.mov(val, output.typedReg().gpr());
7400 return true;
7403 bool CacheIRCompiler::emitCallPrintString(const char* str) {
7404 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7405 masm.printf(str);
7406 return true;
7409 bool CacheIRCompiler::emitBreakpoint() {
7410 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7411 masm.breakpoint();
7412 return true;
7415 void CacheIRCompiler::emitPostBarrierShared(Register obj,
7416 const ConstantOrRegister& val,
7417 Register scratch,
7418 Register maybeIndex) {
7419 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7421 if (val.constant()) {
7422 MOZ_ASSERT_IF(val.value().isGCThing(),
7423 !IsInsideNursery(val.value().toGCThing()));
7424 return;
7427 TypedOrValueRegister reg = val.reg();
7428 if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
7429 return;
7432 Label skipBarrier;
7433 if (reg.hasValue()) {
7434 masm.branchValueIsNurseryCell(Assembler::NotEqual, reg.valueReg(), scratch,
7435 &skipBarrier);
7436 } else {
7437 masm.branchPtrInNurseryChunk(Assembler::NotEqual, reg.typedReg().gpr(),
7438 scratch, &skipBarrier);
7440 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, &skipBarrier);
7442 // Check one element cache to avoid VM call.
7443 auto* lastCellAddr = cx_->runtime()->gc.addressOfLastBufferedWholeCell();
7444 masm.branchPtr(Assembler::Equal, AbsoluteAddress(lastCellAddr), obj,
7445 &skipBarrier);
7447 // Call one of these, depending on maybeIndex:
7449 // void PostWriteBarrier(JSRuntime* rt, JSObject* obj);
7450 // void PostWriteElementBarrier(JSRuntime* rt, JSObject* obj,
7451 // int32_t index);
7452 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7453 masm.PushRegsInMask(save);
7454 masm.setupUnalignedABICall(scratch);
7455 masm.movePtr(ImmPtr(cx_->runtime()), scratch);
7456 masm.passABIArg(scratch);
7457 masm.passABIArg(obj);
7458 if (maybeIndex != InvalidReg) {
7459 masm.passABIArg(maybeIndex);
7460 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
7461 masm.callWithABI<Fn, PostWriteElementBarrier>();
7462 } else {
7463 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
7464 masm.callWithABI<Fn, PostWriteBarrier>();
7466 masm.PopRegsInMask(save);
7468 masm.bind(&skipBarrier);
7471 bool CacheIRCompiler::emitWrapResult() {
7472 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7473 AutoOutputRegister output(*this);
7474 AutoScratchRegister scratch(allocator, masm);
7476 FailurePath* failure;
7477 if (!addFailurePath(&failure)) {
7478 return false;
7481 Label done;
7482 // We only have to wrap objects, because we are in the same zone.
7483 masm.branchTestObject(Assembler::NotEqual, output.valueReg(), &done);
7485 Register obj = output.valueReg().scratchReg();
7486 masm.unboxObject(output.valueReg(), obj);
7488 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7489 masm.PushRegsInMask(save);
7491 using Fn = JSObject* (*)(JSContext* cx, JSObject* obj);
7492 masm.setupUnalignedABICall(scratch);
7493 masm.loadJSContext(scratch);
7494 masm.passABIArg(scratch);
7495 masm.passABIArg(obj);
7496 masm.callWithABI<Fn, WrapObjectPure>();
7497 masm.storeCallPointerResult(obj);
7499 LiveRegisterSet ignore;
7500 ignore.add(obj);
7501 masm.PopRegsInMaskIgnore(save, ignore);
7503 // We could not get a wrapper for this object.
7504 masm.branchTestPtr(Assembler::Zero, obj, obj, failure->label());
7506 // We clobbered the output register, so we have to retag.
7507 masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
7509 masm.bind(&done);
7510 return true;
7513 bool CacheIRCompiler::emitMegamorphicLoadSlotByValueResult(ObjOperandId objId,
7514 ValOperandId idId) {
7515 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7516 AutoOutputRegister output(*this);
7518 Register obj = allocator.useRegister(masm, objId);
7519 ValueOperand idVal = allocator.useValueRegister(masm, idId);
7521 #ifdef JS_CODEGEN_X86
7522 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
7523 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
7524 #else
7525 AutoScratchRegister scratch1(allocator, masm);
7526 AutoScratchRegister scratch2(allocator, masm);
7527 AutoScratchRegister scratch3(allocator, masm);
7528 #endif
7530 FailurePath* failure;
7531 if (!addFailurePath(&failure)) {
7532 return false;
7535 #ifdef JS_CODEGEN_X86
7536 masm.xorPtr(scratch2, scratch2);
7537 #else
7538 Label cacheHit;
7539 masm.emitMegamorphicCacheLookupByValue(
7540 idVal, obj, scratch1, scratch3, scratch2, output.valueReg(), &cacheHit);
7541 #endif
7543 masm.branchIfNonNativeObj(obj, scratch1, failure->label());
7545 // idVal will be in vp[0], result will be stored in vp[1].
7546 masm.reserveStack(sizeof(Value));
7547 masm.Push(idVal);
7548 masm.moveStackPtrTo(idVal.scratchReg());
7550 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7551 liveVolatileFloatRegs());
7552 volatileRegs.takeUnchecked(scratch1);
7553 volatileRegs.takeUnchecked(idVal);
7554 masm.PushRegsInMask(volatileRegs);
7556 using Fn = bool (*)(JSContext* cx, JSObject* obj,
7557 MegamorphicCache::Entry* cacheEntry, Value* vp);
7558 masm.setupUnalignedABICall(scratch1);
7559 masm.loadJSContext(scratch1);
7560 masm.passABIArg(scratch1);
7561 masm.passABIArg(obj);
7562 masm.passABIArg(scratch2);
7563 masm.passABIArg(idVal.scratchReg());
7564 masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
7566 masm.storeCallPointerResult(scratch1);
7567 masm.PopRegsInMask(volatileRegs);
7569 masm.Pop(idVal);
7571 Label ok;
7572 uint32_t framePushed = masm.framePushed();
7573 masm.branchIfTrueBool(scratch1, &ok);
7574 masm.adjustStack(sizeof(Value));
7575 masm.jump(failure->label());
7577 masm.bind(&ok);
7578 masm.setFramePushed(framePushed);
7579 masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
7580 masm.adjustStack(sizeof(Value));
7582 #ifndef JS_CODEGEN_X86
7583 masm.bind(&cacheHit);
7584 #endif
7585 return true;
7588 bool CacheIRCompiler::emitMegamorphicHasPropResult(ObjOperandId objId,
7589 ValOperandId idId,
7590 bool hasOwn) {
7591 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7592 AutoOutputRegister output(*this);
7594 Register obj = allocator.useRegister(masm, objId);
7595 ValueOperand idVal = allocator.useValueRegister(masm, idId);
7597 #ifdef JS_CODEGEN_X86
7598 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
7599 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
7600 #else
7601 AutoScratchRegister scratch1(allocator, masm);
7602 AutoScratchRegister scratch2(allocator, masm);
7603 AutoScratchRegister scratch3(allocator, masm);
7604 #endif
7606 FailurePath* failure;
7607 if (!addFailurePath(&failure)) {
7608 return false;
7611 #ifndef JS_CODEGEN_X86
7612 Label cacheHit, done;
7613 masm.emitMegamorphicCacheLookupExists(idVal, obj, scratch1, scratch3,
7614 scratch2, output.maybeReg(), &cacheHit,
7615 hasOwn);
7616 #else
7617 masm.xorPtr(scratch2, scratch2);
7618 #endif
7620 masm.branchIfNonNativeObj(obj, scratch1, failure->label());
7622 // idVal will be in vp[0], result will be stored in vp[1].
7623 masm.reserveStack(sizeof(Value));
7624 masm.Push(idVal);
7625 masm.moveStackPtrTo(idVal.scratchReg());
7627 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7628 liveVolatileFloatRegs());
7629 volatileRegs.takeUnchecked(scratch1);
7630 volatileRegs.takeUnchecked(idVal);
7631 masm.PushRegsInMask(volatileRegs);
7633 using Fn = bool (*)(JSContext* cx, JSObject* obj,
7634 MegamorphicCache::Entry* cacheEntry, Value* vp);
7635 masm.setupUnalignedABICall(scratch1);
7636 masm.loadJSContext(scratch1);
7637 masm.passABIArg(scratch1);
7638 masm.passABIArg(obj);
7639 masm.passABIArg(scratch2);
7640 masm.passABIArg(idVal.scratchReg());
7641 if (hasOwn) {
7642 masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
7643 } else {
7644 masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
7646 masm.storeCallPointerResult(scratch1);
7647 masm.PopRegsInMask(volatileRegs);
7649 masm.Pop(idVal);
7651 Label ok;
7652 uint32_t framePushed = masm.framePushed();
7653 masm.branchIfTrueBool(scratch1, &ok);
7654 masm.adjustStack(sizeof(Value));
7655 masm.jump(failure->label());
7657 masm.bind(&ok);
7658 masm.setFramePushed(framePushed);
7659 masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
7660 masm.adjustStack(sizeof(Value));
7662 #ifndef JS_CODEGEN_X86
7663 masm.jump(&done);
7664 masm.bind(&cacheHit);
7665 if (output.hasValue()) {
7666 masm.tagValue(JSVAL_TYPE_BOOLEAN, output.valueReg().scratchReg(),
7667 output.valueReg());
7669 masm.bind(&done);
7670 #endif
7671 return true;
7674 bool CacheIRCompiler::emitCallObjectHasSparseElementResult(
7675 ObjOperandId objId, Int32OperandId indexId) {
7676 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7677 AutoOutputRegister output(*this);
7679 Register obj = allocator.useRegister(masm, objId);
7680 Register index = allocator.useRegister(masm, indexId);
7682 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
7683 AutoScratchRegister scratch2(allocator, masm);
7685 FailurePath* failure;
7686 if (!addFailurePath(&failure)) {
7687 return false;
7690 masm.reserveStack(sizeof(Value));
7691 masm.moveStackPtrTo(scratch2.get());
7693 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7694 liveVolatileFloatRegs());
7695 volatileRegs.takeUnchecked(scratch1);
7696 volatileRegs.takeUnchecked(index);
7697 masm.PushRegsInMask(volatileRegs);
7699 using Fn =
7700 bool (*)(JSContext* cx, NativeObject* obj, int32_t index, Value* vp);
7701 masm.setupUnalignedABICall(scratch1);
7702 masm.loadJSContext(scratch1);
7703 masm.passABIArg(scratch1);
7704 masm.passABIArg(obj);
7705 masm.passABIArg(index);
7706 masm.passABIArg(scratch2);
7707 masm.callWithABI<Fn, HasNativeElementPure>();
7708 masm.storeCallPointerResult(scratch1);
7709 masm.PopRegsInMask(volatileRegs);
7711 Label ok;
7712 uint32_t framePushed = masm.framePushed();
7713 masm.branchIfTrueBool(scratch1, &ok);
7714 masm.adjustStack(sizeof(Value));
7715 masm.jump(failure->label());
7717 masm.bind(&ok);
7718 masm.setFramePushed(framePushed);
7719 masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
7720 masm.adjustStack(sizeof(Value));
7721 return true;
7725 * Move a constant value into register dest.
7727 void CacheIRCompiler::emitLoadStubFieldConstant(StubFieldOffset val,
7728 Register dest) {
7729 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7730 MOZ_ASSERT(mode_ == Mode::Ion);
7731 switch (val.getStubFieldType()) {
7732 case StubField::Type::Shape:
7733 masm.movePtr(ImmGCPtr(shapeStubField(val.getOffset())), dest);
7734 break;
7735 case StubField::Type::WeakGetterSetter:
7736 masm.movePtr(ImmGCPtr(weakGetterSetterStubField(val.getOffset())), dest);
7737 break;
7738 case StubField::Type::String:
7739 masm.movePtr(ImmGCPtr(stringStubField(val.getOffset())), dest);
7740 break;
7741 case StubField::Type::JSObject:
7742 masm.movePtr(ImmGCPtr(objectStubField(val.getOffset())), dest);
7743 break;
7744 case StubField::Type::RawPointer:
7745 masm.movePtr(ImmPtr(pointerStubField(val.getOffset())), dest);
7746 break;
7747 case StubField::Type::RawInt32:
7748 masm.move32(Imm32(int32StubField(val.getOffset())), dest);
7749 break;
7750 case StubField::Type::Id:
7751 masm.movePropertyKey(idStubField(val.getOffset()), dest);
7752 break;
7753 default:
7754 MOZ_CRASH("Unhandled stub field constant type");
7759 * After this is done executing, dest contains the value; either through a
7760 * constant load or through the load from the stub data.
7762 * The current policy is that Baseline will use loads from the stub data (to
7763 * allow IC sharing), where as Ion doesn't share ICs, and so we can safely use
7764 * constants in the IC.
7766 void CacheIRCompiler::emitLoadStubField(StubFieldOffset val, Register dest) {
7767 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7768 if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
7769 emitLoadStubFieldConstant(val, dest);
7770 } else {
7771 Address load(ICStubReg, stubDataOffset_ + val.getOffset());
7773 switch (val.getStubFieldType()) {
7774 case StubField::Type::RawPointer:
7775 case StubField::Type::Shape:
7776 case StubField::Type::WeakGetterSetter:
7777 case StubField::Type::JSObject:
7778 case StubField::Type::Symbol:
7779 case StubField::Type::String:
7780 case StubField::Type::Id:
7781 masm.loadPtr(load, dest);
7782 break;
7783 case StubField::Type::RawInt32:
7784 masm.load32(load, dest);
7785 break;
7786 default:
7787 MOZ_CRASH("Unhandled stub field constant type");
7792 void CacheIRCompiler::emitLoadValueStubField(StubFieldOffset val,
7793 ValueOperand dest) {
7794 MOZ_ASSERT(val.getStubFieldType() == StubField::Type::Value);
7796 if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
7797 MOZ_ASSERT(mode_ == Mode::Ion);
7798 masm.moveValue(valueStubField(val.getOffset()), dest);
7799 } else {
7800 Address addr(ICStubReg, stubDataOffset_ + val.getOffset());
7801 masm.loadValue(addr, dest);
7805 void CacheIRCompiler::emitLoadDoubleValueStubField(StubFieldOffset val,
7806 ValueOperand dest,
7807 FloatRegister scratch) {
7808 MOZ_ASSERT(val.getStubFieldType() == StubField::Type::Double);
7810 if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
7811 MOZ_ASSERT(mode_ == Mode::Ion);
7812 double d = doubleStubField(val.getOffset());
7813 masm.moveValue(DoubleValue(d), dest);
7814 } else {
7815 Address addr(ICStubReg, stubDataOffset_ + val.getOffset());
7816 masm.loadDouble(addr, scratch);
7817 masm.boxDouble(scratch, dest, scratch);
7821 bool CacheIRCompiler::emitLoadInstanceOfObjectResult(ValOperandId lhsId,
7822 ObjOperandId protoId) {
7823 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7824 AutoOutputRegister output(*this);
7825 ValueOperand lhs = allocator.useValueRegister(masm, lhsId);
7826 Register proto = allocator.useRegister(masm, protoId);
7828 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7830 FailurePath* failure;
7831 if (!addFailurePath(&failure)) {
7832 return false;
7835 Label returnFalse, returnTrue, done;
7836 masm.fallibleUnboxObject(lhs, scratch, &returnFalse);
7838 // LHS is an object. Load its proto.
7839 masm.loadObjProto(scratch, scratch);
7841 // Walk the proto chain until we either reach the target object,
7842 // nullptr or LazyProto.
7843 Label loop;
7844 masm.bind(&loop);
7846 masm.branchPtr(Assembler::Equal, scratch, proto, &returnTrue);
7847 masm.branchTestPtr(Assembler::Zero, scratch, scratch, &returnFalse);
7849 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
7850 masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), failure->label());
7852 masm.loadObjProto(scratch, scratch);
7853 masm.jump(&loop);
7856 masm.bind(&returnFalse);
7857 EmitStoreBoolean(masm, false, output);
7858 masm.jump(&done);
7860 masm.bind(&returnTrue);
7861 EmitStoreBoolean(masm, true, output);
7862 // fallthrough
7863 masm.bind(&done);
7864 return true;
7867 bool CacheIRCompiler::emitMegamorphicLoadSlotResult(ObjOperandId objId,
7868 uint32_t idOffset) {
7869 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7870 AutoOutputRegister output(*this);
7872 Register obj = allocator.useRegister(masm, objId);
7873 StubFieldOffset id(idOffset, StubField::Type::Id);
7875 AutoScratchRegisterMaybeOutput idReg(allocator, masm, output);
7876 AutoScratchRegister scratch1(allocator, masm);
7877 AutoScratchRegister scratch2(allocator, masm);
7878 AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, output);
7880 FailurePath* failure;
7881 if (!addFailurePath(&failure)) {
7882 return false;
7885 #ifdef JS_CODEGEN_X86
7886 masm.xorPtr(scratch3, scratch3);
7887 #else
7888 Label cacheHit;
7889 emitLoadStubField(id, idReg);
7890 masm.emitMegamorphicCacheLookupByValue(idReg.get(), obj, scratch1, scratch2,
7891 scratch3, output.valueReg(),
7892 &cacheHit);
7893 #endif
7895 masm.branchIfNonNativeObj(obj, scratch1, failure->label());
7897 masm.Push(UndefinedValue());
7898 masm.moveStackPtrTo(idReg.get());
7900 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7901 liveVolatileFloatRegs());
7902 volatileRegs.takeUnchecked(scratch1);
7903 volatileRegs.takeUnchecked(scratch2);
7904 volatileRegs.takeUnchecked(scratch3);
7905 volatileRegs.takeUnchecked(idReg);
7906 masm.PushRegsInMask(volatileRegs);
7908 using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
7909 MegamorphicCache::Entry* cacheEntry, Value* vp);
7910 masm.setupUnalignedABICall(scratch1);
7911 masm.loadJSContext(scratch1);
7912 masm.passABIArg(scratch1);
7913 masm.passABIArg(obj);
7914 emitLoadStubField(id, scratch2);
7915 masm.passABIArg(scratch2);
7916 masm.passABIArg(scratch3);
7917 masm.passABIArg(idReg);
7919 #ifdef JS_CODEGEN_X86
7920 masm.callWithABI<Fn, GetNativeDataPropertyPureWithCacheLookup>();
7921 #else
7922 masm.callWithABI<Fn, GetNativeDataPropertyPure>();
7923 #endif
7925 masm.storeCallPointerResult(scratch2);
7926 masm.PopRegsInMask(volatileRegs);
7928 masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
7929 masm.adjustStack(sizeof(Value));
7931 masm.branchIfFalseBool(scratch2, failure->label());
7932 #ifndef JS_CODEGEN_X86
7933 masm.bind(&cacheHit);
7934 #endif
7936 return true;
7939 bool CacheIRCompiler::emitMegamorphicStoreSlot(ObjOperandId objId,
7940 uint32_t idOffset,
7941 ValOperandId rhsId,
7942 bool strict) {
7943 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7945 AutoCallVM callvm(masm, this, allocator);
7947 Register obj = allocator.useRegister(masm, objId);
7948 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
7949 StubFieldOffset id(idOffset, StubField::Type::Id);
7950 AutoScratchRegister scratch(allocator, masm);
7952 callvm.prepare();
7954 masm.Push(Imm32(strict));
7955 masm.Push(val);
7956 emitLoadStubField(id, scratch);
7957 masm.Push(scratch);
7958 masm.Push(obj);
7960 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
7961 callvm.callNoResult<Fn, SetPropertyMegamorphic<false>>();
7962 return true;
7965 bool CacheIRCompiler::emitGuardHasGetterSetter(ObjOperandId objId,
7966 uint32_t idOffset,
7967 uint32_t getterSetterOffset) {
7968 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7970 Register obj = allocator.useRegister(masm, objId);
7972 StubFieldOffset id(idOffset, StubField::Type::Id);
7973 StubFieldOffset getterSetter(getterSetterOffset,
7974 StubField::Type::WeakGetterSetter);
7976 AutoScratchRegister scratch1(allocator, masm);
7977 AutoScratchRegister scratch2(allocator, masm);
7978 AutoScratchRegister scratch3(allocator, masm);
7980 FailurePath* failure;
7981 if (!addFailurePath(&failure)) {
7982 return false;
7985 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7986 liveVolatileFloatRegs());
7987 volatileRegs.takeUnchecked(scratch1);
7988 volatileRegs.takeUnchecked(scratch2);
7989 masm.PushRegsInMask(volatileRegs);
7991 using Fn = bool (*)(JSContext* cx, JSObject* obj, jsid id,
7992 GetterSetter* getterSetter);
7993 masm.setupUnalignedABICall(scratch1);
7994 masm.loadJSContext(scratch1);
7995 masm.passABIArg(scratch1);
7996 masm.passABIArg(obj);
7997 emitLoadStubField(id, scratch2);
7998 masm.passABIArg(scratch2);
7999 emitLoadStubField(getterSetter, scratch3);
8000 masm.passABIArg(scratch3);
8001 masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
8002 masm.storeCallPointerResult(scratch1);
8003 masm.PopRegsInMask(volatileRegs);
8005 masm.branchIfFalseBool(scratch1, failure->label());
8006 return true;
8009 bool CacheIRCompiler::emitGuardWasmArg(ValOperandId argId,
8010 wasm::ValType::Kind kind) {
8011 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8013 // All values can be boxed as AnyRef.
8014 if (kind == wasm::ValType::Ref) {
8015 return true;
8017 MOZ_ASSERT(kind != wasm::ValType::V128);
8019 ValueOperand arg = allocator.useValueRegister(masm, argId);
8021 FailurePath* failure;
8022 if (!addFailurePath(&failure)) {
8023 return false;
8026 // Check that the argument can be converted to the Wasm type in Warp code
8027 // without bailing out.
8028 Label done;
8029 switch (kind) {
8030 case wasm::ValType::I32:
8031 case wasm::ValType::F32:
8032 case wasm::ValType::F64: {
8033 // Argument must be number, bool, or undefined.
8034 masm.branchTestNumber(Assembler::Equal, arg, &done);
8035 masm.branchTestBoolean(Assembler::Equal, arg, &done);
8036 masm.branchTestUndefined(Assembler::NotEqual, arg, failure->label());
8037 break;
8039 case wasm::ValType::I64: {
8040 // Argument must be bigint, bool, or string.
8041 masm.branchTestBigInt(Assembler::Equal, arg, &done);
8042 masm.branchTestBoolean(Assembler::Equal, arg, &done);
8043 masm.branchTestString(Assembler::NotEqual, arg, failure->label());
8044 break;
8046 default:
8047 MOZ_CRASH("Unexpected kind");
8049 masm.bind(&done);
8051 return true;
8054 bool CacheIRCompiler::emitGuardMultipleShapes(ObjOperandId objId,
8055 uint32_t shapesOffset) {
8056 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8057 Register obj = allocator.useRegister(masm, objId);
8058 AutoScratchRegister shapes(allocator, masm);
8059 AutoScratchRegister scratch(allocator, masm);
8060 AutoScratchRegister scratch2(allocator, masm);
8062 bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
8064 Register spectreScratch = InvalidReg;
8065 Maybe<AutoScratchRegister> maybeSpectreScratch;
8066 if (needSpectreMitigations) {
8067 maybeSpectreScratch.emplace(allocator, masm);
8068 spectreScratch = *maybeSpectreScratch;
8071 FailurePath* failure;
8072 if (!addFailurePath(&failure)) {
8073 return false;
8076 // The stub field contains a ListObject. Load its elements.
8077 StubFieldOffset shapeArray(shapesOffset, StubField::Type::JSObject);
8078 emitLoadStubField(shapeArray, shapes);
8079 masm.loadPtr(Address(shapes, NativeObject::offsetOfElements()), shapes);
8081 masm.branchTestObjShapeList(Assembler::NotEqual, obj, shapes, scratch,
8082 scratch2, spectreScratch, failure->label());
8083 return true;
8086 bool CacheIRCompiler::emitLoadObject(ObjOperandId resultId,
8087 uint32_t objOffset) {
8088 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8089 Register reg = allocator.defineRegister(masm, resultId);
8090 StubFieldOffset obj(objOffset, StubField::Type::JSObject);
8091 emitLoadStubField(obj, reg);
8092 return true;
8095 bool CacheIRCompiler::emitLoadProtoObject(ObjOperandId resultId,
8096 uint32_t objOffset,
8097 ObjOperandId receiverObjId) {
8098 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8099 Register reg = allocator.defineRegister(masm, resultId);
8100 StubFieldOffset obj(objOffset, StubField::Type::JSObject);
8101 emitLoadStubField(obj, reg);
8102 return true;
8105 bool CacheIRCompiler::emitLoadInt32Constant(uint32_t valOffset,
8106 Int32OperandId resultId) {
8107 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8108 Register reg = allocator.defineRegister(masm, resultId);
8109 StubFieldOffset val(valOffset, StubField::Type::RawInt32);
8110 emitLoadStubField(val, reg);
8111 return true;
8114 bool CacheIRCompiler::emitLoadBooleanConstant(bool val,
8115 BooleanOperandId resultId) {
8116 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8117 Register reg = allocator.defineRegister(masm, resultId);
8118 masm.move32(Imm32(val), reg);
8119 return true;
8122 bool CacheIRCompiler::emitLoadDoubleConstant(uint32_t valOffset,
8123 NumberOperandId resultId) {
8124 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8126 ValueOperand output = allocator.defineValueRegister(masm, resultId);
8127 StubFieldOffset val(valOffset, StubField::Type::Double);
8129 AutoScratchFloatRegister floatReg(this);
8131 emitLoadDoubleValueStubField(val, output, floatReg);
8132 return true;
8135 bool CacheIRCompiler::emitLoadUndefined(ValOperandId resultId) {
8136 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8138 ValueOperand reg = allocator.defineValueRegister(masm, resultId);
8139 masm.moveValue(UndefinedValue(), reg);
8140 return true;
8143 bool CacheIRCompiler::emitLoadConstantString(uint32_t strOffset,
8144 StringOperandId resultId) {
8145 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8146 Register reg = allocator.defineRegister(masm, resultId);
8147 StubFieldOffset str(strOffset, StubField::Type::String);
8148 emitLoadStubField(str, reg);
8149 return true;
8152 bool CacheIRCompiler::emitCallInt32ToString(Int32OperandId inputId,
8153 StringOperandId resultId) {
8154 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8155 Register input = allocator.useRegister(masm, inputId);
8156 Register result = allocator.defineRegister(masm, resultId);
8158 FailurePath* failure;
8159 if (!addFailurePath(&failure)) {
8160 return false;
8163 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8164 liveVolatileFloatRegs());
8165 volatileRegs.takeUnchecked(result);
8166 masm.PushRegsInMask(volatileRegs);
8168 using Fn = JSLinearString* (*)(JSContext* cx, int32_t i);
8169 masm.setupUnalignedABICall(result);
8170 masm.loadJSContext(result);
8171 masm.passABIArg(result);
8172 masm.passABIArg(input);
8173 masm.callWithABI<Fn, js::Int32ToStringPure>();
8175 masm.storeCallPointerResult(result);
8176 masm.PopRegsInMask(volatileRegs);
8178 masm.branchPtr(Assembler::Equal, result, ImmPtr(nullptr), failure->label());
8179 return true;
8182 bool CacheIRCompiler::emitCallNumberToString(NumberOperandId inputId,
8183 StringOperandId resultId) {
8184 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8186 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
8188 allocator.ensureDoubleRegister(masm, inputId, floatScratch0);
8189 Register result = allocator.defineRegister(masm, resultId);
8191 FailurePath* failure;
8192 if (!addFailurePath(&failure)) {
8193 return false;
8196 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8197 liveVolatileFloatRegs());
8198 volatileRegs.takeUnchecked(result);
8199 masm.PushRegsInMask(volatileRegs);
8201 using Fn = JSString* (*)(JSContext* cx, double d);
8202 masm.setupUnalignedABICall(result);
8203 masm.loadJSContext(result);
8204 masm.passABIArg(result);
8205 masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
8206 masm.callWithABI<Fn, js::NumberToStringPure>();
8208 masm.storeCallPointerResult(result);
8209 masm.PopRegsInMask(volatileRegs);
8211 masm.branchPtr(Assembler::Equal, result, ImmPtr(nullptr), failure->label());
8212 return true;
8215 bool CacheIRCompiler::emitInt32ToStringWithBaseResult(Int32OperandId inputId,
8216 Int32OperandId baseId) {
8217 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8219 AutoCallVM callvm(masm, this, allocator);
8220 Register input = allocator.useRegister(masm, inputId);
8221 Register base = allocator.useRegister(masm, baseId);
8223 FailurePath* failure;
8224 if (!addFailurePath(&failure)) {
8225 return false;
8228 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8229 // we can't use both at the same time. This isn't an issue here, because Ion
8230 // doesn't support CallICs. If that ever changes, this code must be updated.
8231 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8233 masm.branch32(Assembler::LessThan, base, Imm32(2), failure->label());
8234 masm.branch32(Assembler::GreaterThan, base, Imm32(36), failure->label());
8236 // Use lower-case characters by default.
8237 constexpr bool lowerCase = true;
8239 callvm.prepare();
8241 masm.Push(Imm32(lowerCase));
8242 masm.Push(base);
8243 masm.Push(input);
8245 using Fn = JSString* (*)(JSContext*, int32_t, int32_t, bool);
8246 callvm.call<Fn, js::Int32ToStringWithBase>();
8247 return true;
8250 bool CacheIRCompiler::emitBooleanToString(BooleanOperandId inputId,
8251 StringOperandId resultId) {
8252 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8253 Register boolean = allocator.useRegister(masm, inputId);
8254 Register result = allocator.defineRegister(masm, resultId);
8255 const JSAtomState& names = cx_->names();
8256 Label true_, done;
8258 masm.branchTest32(Assembler::NonZero, boolean, boolean, &true_);
8260 // False case
8261 masm.movePtr(ImmGCPtr(names.false_), result);
8262 masm.jump(&done);
8264 // True case
8265 masm.bind(&true_);
8266 masm.movePtr(ImmGCPtr(names.true_), result);
8267 masm.bind(&done);
8269 return true;
8272 bool CacheIRCompiler::emitObjectToStringResult(ObjOperandId objId) {
8273 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8275 AutoOutputRegister output(*this);
8276 Register obj = allocator.useRegister(masm, objId);
8277 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8279 FailurePath* failure;
8280 if (!addFailurePath(&failure)) {
8281 return false;
8284 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8285 liveVolatileFloatRegs());
8286 volatileRegs.takeUnchecked(output.valueReg());
8287 volatileRegs.takeUnchecked(scratch);
8288 masm.PushRegsInMask(volatileRegs);
8290 using Fn = JSString* (*)(JSContext*, JSObject*);
8291 masm.setupUnalignedABICall(scratch);
8292 masm.loadJSContext(scratch);
8293 masm.passABIArg(scratch);
8294 masm.passABIArg(obj);
8295 masm.callWithABI<Fn, js::ObjectClassToString>();
8296 masm.storeCallPointerResult(scratch);
8298 masm.PopRegsInMask(volatileRegs);
8300 masm.branchPtr(Assembler::Equal, scratch, ImmPtr(nullptr), failure->label());
8301 masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
8303 return true;
8306 bool CacheIRCompiler::emitCallStringConcatResult(StringOperandId lhsId,
8307 StringOperandId rhsId) {
8308 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8309 AutoCallVM callvm(masm, this, allocator);
8311 Register lhs = allocator.useRegister(masm, lhsId);
8312 Register rhs = allocator.useRegister(masm, rhsId);
8314 callvm.prepare();
8316 masm.Push(static_cast<js::jit::Imm32>(int32_t(js::gc::Heap::Default)));
8317 masm.Push(rhs);
8318 masm.Push(lhs);
8320 using Fn =
8321 JSString* (*)(JSContext*, HandleString, HandleString, js::gc::Heap);
8322 callvm.call<Fn, ConcatStrings<CanGC>>();
8324 return true;
8327 bool CacheIRCompiler::emitCallIsSuspendedGeneratorResult(ValOperandId valId) {
8328 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8329 AutoOutputRegister output(*this);
8330 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8331 AutoScratchRegister scratch2(allocator, masm);
8332 ValueOperand input = allocator.useValueRegister(masm, valId);
8334 // Test if it's an object.
8335 Label returnFalse, done;
8336 masm.fallibleUnboxObject(input, scratch, &returnFalse);
8338 // Test if it's a GeneratorObject.
8339 masm.branchTestObjClass(Assembler::NotEqual, scratch,
8340 &GeneratorObject::class_, scratch2, scratch,
8341 &returnFalse);
8343 // If the resumeIndex slot holds an int32 value < RESUME_INDEX_RUNNING,
8344 // the generator is suspended.
8345 Address addr(scratch, AbstractGeneratorObject::offsetOfResumeIndexSlot());
8346 masm.fallibleUnboxInt32(addr, scratch, &returnFalse);
8347 masm.branch32(Assembler::AboveOrEqual, scratch,
8348 Imm32(AbstractGeneratorObject::RESUME_INDEX_RUNNING),
8349 &returnFalse);
8351 masm.moveValue(BooleanValue(true), output.valueReg());
8352 masm.jump(&done);
8354 masm.bind(&returnFalse);
8355 masm.moveValue(BooleanValue(false), output.valueReg());
8357 masm.bind(&done);
8358 return true;
8361 // This op generates no code. It is consumed by the transpiler.
8362 bool CacheIRCompiler::emitMetaScriptedThisShape(uint32_t) { return true; }
8364 bool CacheIRCompiler::emitCallNativeGetElementResult(ObjOperandId objId,
8365 Int32OperandId indexId) {
8366 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8367 AutoCallVM callvm(masm, this, allocator);
8369 Register obj = allocator.useRegister(masm, objId);
8370 Register index = allocator.useRegister(masm, indexId);
8372 callvm.prepare();
8374 masm.Push(index);
8375 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
8376 masm.Push(obj);
8378 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
8379 MutableHandleValue);
8380 callvm.call<Fn, NativeGetElement>();
8382 return true;
8385 bool CacheIRCompiler::emitCallNativeGetElementSuperResult(
8386 ObjOperandId objId, Int32OperandId indexId, ValOperandId receiverId) {
8387 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8388 AutoCallVM callvm(masm, this, allocator);
8390 Register obj = allocator.useRegister(masm, objId);
8391 Register index = allocator.useRegister(masm, indexId);
8392 ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
8394 callvm.prepare();
8396 masm.Push(index);
8397 masm.Push(receiver);
8398 masm.Push(obj);
8400 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
8401 MutableHandleValue);
8402 callvm.call<Fn, NativeGetElement>();
8404 return true;
8407 bool CacheIRCompiler::emitProxyHasPropResult(ObjOperandId objId,
8408 ValOperandId idId, bool hasOwn) {
8409 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8410 AutoCallVM callvm(masm, this, allocator);
8412 Register obj = allocator.useRegister(masm, objId);
8413 ValueOperand idVal = allocator.useValueRegister(masm, idId);
8415 callvm.prepare();
8417 masm.Push(idVal);
8418 masm.Push(obj);
8420 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
8421 if (hasOwn) {
8422 callvm.call<Fn, ProxyHasOwn>();
8423 } else {
8424 callvm.call<Fn, ProxyHas>();
8426 return true;
8429 bool CacheIRCompiler::emitProxyGetByValueResult(ObjOperandId objId,
8430 ValOperandId idId) {
8431 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8432 AutoCallVM callvm(masm, this, allocator);
8434 Register obj = allocator.useRegister(masm, objId);
8435 ValueOperand idVal = allocator.useValueRegister(masm, idId);
8437 callvm.prepare();
8438 masm.Push(idVal);
8439 masm.Push(obj);
8441 using Fn =
8442 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
8443 callvm.call<Fn, ProxyGetPropertyByValue>();
8444 return true;
8447 bool CacheIRCompiler::emitCallGetSparseElementResult(ObjOperandId objId,
8448 Int32OperandId indexId) {
8449 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8451 AutoCallVM callvm(masm, this, allocator);
8453 Register obj = allocator.useRegister(masm, objId);
8454 Register id = allocator.useRegister(masm, indexId);
8456 callvm.prepare();
8457 masm.Push(id);
8458 masm.Push(obj);
8460 using Fn = bool (*)(JSContext* cx, Handle<NativeObject*> obj, int32_t int_id,
8461 MutableHandleValue result);
8462 callvm.call<Fn, GetSparseElementHelper>();
8463 return true;
8466 bool CacheIRCompiler::emitRegExpSearcherLastLimitResult() {
8467 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8469 AutoOutputRegister output(*this);
8470 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
8471 AutoScratchRegister scratch2(allocator, masm);
8473 masm.loadAndClearRegExpSearcherLastLimit(scratch1, scratch2);
8475 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
8476 return true;
8479 bool CacheIRCompiler::emitRegExpFlagResult(ObjOperandId regexpId,
8480 int32_t flagsMask) {
8481 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8483 AutoOutputRegister output(*this);
8484 Register regexp = allocator.useRegister(masm, regexpId);
8485 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8487 Address flagsAddr(
8488 regexp, NativeObject::getFixedSlotOffset(RegExpObject::flagsSlot()));
8489 masm.unboxInt32(flagsAddr, scratch);
8491 Label ifFalse, done;
8492 masm.branchTest32(Assembler::Zero, scratch, Imm32(flagsMask), &ifFalse);
8493 masm.moveValue(BooleanValue(true), output.valueReg());
8494 masm.jump(&done);
8496 masm.bind(&ifFalse);
8497 masm.moveValue(BooleanValue(false), output.valueReg());
8499 masm.bind(&done);
8500 return true;
8503 bool CacheIRCompiler::emitCallSubstringKernelResult(StringOperandId strId,
8504 Int32OperandId beginId,
8505 Int32OperandId lengthId) {
8506 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8508 AutoCallVM callvm(masm, this, allocator);
8510 Register str = allocator.useRegister(masm, strId);
8511 Register begin = allocator.useRegister(masm, beginId);
8512 Register length = allocator.useRegister(masm, lengthId);
8514 callvm.prepare();
8515 masm.Push(length);
8516 masm.Push(begin);
8517 masm.Push(str);
8519 using Fn = JSString* (*)(JSContext* cx, HandleString str, int32_t begin,
8520 int32_t len);
8521 callvm.call<Fn, SubstringKernel>();
8522 return true;
8525 bool CacheIRCompiler::emitStringReplaceStringResult(
8526 StringOperandId strId, StringOperandId patternId,
8527 StringOperandId replacementId) {
8528 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8530 AutoCallVM callvm(masm, this, allocator);
8532 Register str = allocator.useRegister(masm, strId);
8533 Register pattern = allocator.useRegister(masm, patternId);
8534 Register replacement = allocator.useRegister(masm, replacementId);
8536 callvm.prepare();
8537 masm.Push(replacement);
8538 masm.Push(pattern);
8539 masm.Push(str);
8541 using Fn =
8542 JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
8543 callvm.call<Fn, jit::StringReplace>();
8544 return true;
8547 bool CacheIRCompiler::emitStringSplitStringResult(StringOperandId strId,
8548 StringOperandId separatorId) {
8549 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8551 AutoCallVM callvm(masm, this, allocator);
8553 Register str = allocator.useRegister(masm, strId);
8554 Register separator = allocator.useRegister(masm, separatorId);
8556 callvm.prepare();
8557 masm.Push(Imm32(INT32_MAX));
8558 masm.Push(separator);
8559 masm.Push(str);
8561 using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
8562 callvm.call<Fn, js::StringSplitString>();
8563 return true;
8566 bool CacheIRCompiler::emitRegExpPrototypeOptimizableResult(
8567 ObjOperandId protoId) {
8568 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8570 AutoOutputRegister output(*this);
8571 Register proto = allocator.useRegister(masm, protoId);
8572 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8574 Label slow, done;
8575 masm.branchIfNotRegExpPrototypeOptimizable(
8576 proto, scratch, /* maybeGlobal = */ nullptr, &slow);
8577 masm.moveValue(BooleanValue(true), output.valueReg());
8578 masm.jump(&done);
8581 masm.bind(&slow);
8583 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8584 liveVolatileFloatRegs());
8585 volatileRegs.takeUnchecked(scratch);
8586 masm.PushRegsInMask(volatileRegs);
8588 using Fn = bool (*)(JSContext* cx, JSObject* proto);
8589 masm.setupUnalignedABICall(scratch);
8590 masm.loadJSContext(scratch);
8591 masm.passABIArg(scratch);
8592 masm.passABIArg(proto);
8593 masm.callWithABI<Fn, RegExpPrototypeOptimizableRaw>();
8594 masm.storeCallBoolResult(scratch);
8596 masm.PopRegsInMask(volatileRegs);
8597 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
8600 masm.bind(&done);
8601 return true;
8604 bool CacheIRCompiler::emitRegExpInstanceOptimizableResult(
8605 ObjOperandId regexpId, ObjOperandId protoId) {
8606 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8608 AutoOutputRegister output(*this);
8609 Register regexp = allocator.useRegister(masm, regexpId);
8610 Register proto = allocator.useRegister(masm, protoId);
8611 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8613 Label slow, done;
8614 masm.branchIfNotRegExpInstanceOptimizable(regexp, scratch,
8615 /* maybeGlobal = */ nullptr, &slow);
8616 masm.moveValue(BooleanValue(true), output.valueReg());
8617 masm.jump(&done);
8620 masm.bind(&slow);
8622 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8623 liveVolatileFloatRegs());
8624 volatileRegs.takeUnchecked(scratch);
8625 masm.PushRegsInMask(volatileRegs);
8627 using Fn = bool (*)(JSContext* cx, JSObject* obj, JSObject* proto);
8628 masm.setupUnalignedABICall(scratch);
8629 masm.loadJSContext(scratch);
8630 masm.passABIArg(scratch);
8631 masm.passABIArg(regexp);
8632 masm.passABIArg(proto);
8633 masm.callWithABI<Fn, RegExpInstanceOptimizableRaw>();
8634 masm.storeCallBoolResult(scratch);
8636 masm.PopRegsInMask(volatileRegs);
8637 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
8640 masm.bind(&done);
8641 return true;
8644 bool CacheIRCompiler::emitGetFirstDollarIndexResult(StringOperandId strId) {
8645 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8647 AutoCallVM callvm(masm, this, allocator);
8649 Register str = allocator.useRegister(masm, strId);
8651 callvm.prepare();
8652 masm.Push(str);
8654 using Fn = bool (*)(JSContext*, JSString*, int32_t*);
8655 callvm.call<Fn, GetFirstDollarIndexRaw>();
8656 return true;
8659 bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
8660 ObjOperandId objId, IntPtrOperandId indexId, uint32_t expectedId,
8661 uint32_t replacementId, Scalar::Type elementType) {
8662 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8664 Maybe<AutoOutputRegister> output;
8665 Maybe<AutoCallVM> callvm;
8666 if (!Scalar::isBigIntType(elementType)) {
8667 output.emplace(*this);
8668 } else {
8669 callvm.emplace(masm, this, allocator);
8671 #ifdef JS_CODEGEN_X86
8672 // Use a scratch register to avoid running out of registers.
8673 Register obj = output ? output->valueReg().typeReg()
8674 : callvm->outputValueReg().typeReg();
8675 allocator.copyToScratchRegister(masm, objId, obj);
8676 #else
8677 Register obj = allocator.useRegister(masm, objId);
8678 #endif
8679 Register index = allocator.useRegister(masm, indexId);
8680 Register expected;
8681 Register replacement;
8682 if (!Scalar::isBigIntType(elementType)) {
8683 expected = allocator.useRegister(masm, Int32OperandId(expectedId));
8684 replacement = allocator.useRegister(masm, Int32OperandId(replacementId));
8685 } else {
8686 expected = allocator.useRegister(masm, BigIntOperandId(expectedId));
8687 replacement = allocator.useRegister(masm, BigIntOperandId(replacementId));
8690 Register scratch = output ? output->valueReg().scratchReg()
8691 : callvm->outputValueReg().scratchReg();
8692 MOZ_ASSERT(scratch != obj, "scratchReg must not be typeReg");
8694 // Not enough registers on X86.
8695 Register spectreTemp = Register::Invalid();
8697 FailurePath* failure;
8698 if (!addFailurePath(&failure)) {
8699 return false;
8702 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8703 // we can't use both at the same time. This isn't an issue here, because Ion
8704 // doesn't support CallICs. If that ever changes, this code must be updated.
8705 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8707 // Bounds check.
8708 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
8709 masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
8711 // Atomic operations are highly platform-dependent, for example x86/x64 has
8712 // specific requirements on which registers are used; MIPS needs multiple
8713 // additional temporaries. Therefore we're using either an ABI or VM call here
8714 // instead of handling each platform separately.
8716 if (Scalar::isBigIntType(elementType)) {
8717 callvm->prepare();
8719 masm.Push(replacement);
8720 masm.Push(expected);
8721 masm.Push(index);
8722 masm.Push(obj);
8724 using Fn = BigInt* (*)(JSContext*, TypedArrayObject*, size_t, const BigInt*,
8725 const BigInt*);
8726 callvm->call<Fn, jit::AtomicsCompareExchange64>();
8727 return true;
8731 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8732 liveVolatileFloatRegs());
8733 volatileRegs.takeUnchecked(output->valueReg());
8734 volatileRegs.takeUnchecked(scratch);
8735 masm.PushRegsInMask(volatileRegs);
8737 masm.setupUnalignedABICall(scratch);
8738 masm.passABIArg(obj);
8739 masm.passABIArg(index);
8740 masm.passABIArg(expected);
8741 masm.passABIArg(replacement);
8742 masm.callWithABI(DynamicFunction<AtomicsCompareExchangeFn>(
8743 AtomicsCompareExchange(elementType)));
8744 masm.storeCallInt32Result(scratch);
8746 masm.PopRegsInMask(volatileRegs);
8749 if (elementType != Scalar::Uint32) {
8750 masm.tagValue(JSVAL_TYPE_INT32, scratch, output->valueReg());
8751 } else {
8752 ScratchDoubleScope fpscratch(masm);
8753 masm.convertUInt32ToDouble(scratch, fpscratch);
8754 masm.boxDouble(fpscratch, output->valueReg(), fpscratch);
8757 return true;
8760 bool CacheIRCompiler::emitAtomicsReadModifyWriteResult(
8761 ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
8762 Scalar::Type elementType, AtomicsReadWriteModifyFn fn) {
8763 AutoOutputRegister output(*this);
8764 Register obj = allocator.useRegister(masm, objId);
8765 Register index = allocator.useRegister(masm, indexId);
8766 Register value = allocator.useRegister(masm, Int32OperandId(valueId));
8767 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8769 // Not enough registers on X86.
8770 Register spectreTemp = Register::Invalid();
8772 FailurePath* failure;
8773 if (!addFailurePath(&failure)) {
8774 return false;
8777 // Bounds check.
8778 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
8779 masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
8781 // See comment in emitAtomicsCompareExchange for why we use an ABI call.
8783 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8784 liveVolatileFloatRegs());
8785 volatileRegs.takeUnchecked(output.valueReg());
8786 volatileRegs.takeUnchecked(scratch);
8787 masm.PushRegsInMask(volatileRegs);
8789 masm.setupUnalignedABICall(scratch);
8790 masm.passABIArg(obj);
8791 masm.passABIArg(index);
8792 masm.passABIArg(value);
8793 masm.callWithABI(DynamicFunction<AtomicsReadWriteModifyFn>(fn));
8794 masm.storeCallInt32Result(scratch);
8796 masm.PopRegsInMask(volatileRegs);
8799 if (elementType != Scalar::Uint32) {
8800 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
8801 } else {
8802 ScratchDoubleScope fpscratch(masm);
8803 masm.convertUInt32ToDouble(scratch, fpscratch);
8804 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
8807 return true;
8810 template <CacheIRCompiler::AtomicsReadWriteModify64Fn fn>
8811 bool CacheIRCompiler::emitAtomicsReadModifyWriteResult64(
8812 ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId) {
8813 AutoCallVM callvm(masm, this, allocator);
8814 Register obj = allocator.useRegister(masm, objId);
8815 Register index = allocator.useRegister(masm, indexId);
8816 Register value = allocator.useRegister(masm, BigIntOperandId(valueId));
8817 AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
8819 // Not enough registers on X86.
8820 Register spectreTemp = Register::Invalid();
8822 FailurePath* failure;
8823 if (!addFailurePath(&failure)) {
8824 return false;
8827 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8828 // we can't use both at the same time. This isn't an issue here, because Ion
8829 // doesn't support CallICs. If that ever changes, this code must be updated.
8830 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8832 // Bounds check.
8833 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
8834 masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
8836 // See comment in emitAtomicsCompareExchange for why we use a VM call.
8838 callvm.prepare();
8840 masm.Push(value);
8841 masm.Push(index);
8842 masm.Push(obj);
8844 callvm.call<AtomicsReadWriteModify64Fn, fn>();
8845 return true;
8848 bool CacheIRCompiler::emitAtomicsExchangeResult(ObjOperandId objId,
8849 IntPtrOperandId indexId,
8850 uint32_t valueId,
8851 Scalar::Type elementType) {
8852 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8854 if (Scalar::isBigIntType(elementType)) {
8855 return emitAtomicsReadModifyWriteResult64<jit::AtomicsExchange64>(
8856 objId, indexId, valueId);
8858 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
8859 AtomicsExchange(elementType));
8862 bool CacheIRCompiler::emitAtomicsAddResult(ObjOperandId objId,
8863 IntPtrOperandId indexId,
8864 uint32_t valueId,
8865 Scalar::Type elementType,
8866 bool forEffect) {
8867 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8869 if (Scalar::isBigIntType(elementType)) {
8870 return emitAtomicsReadModifyWriteResult64<jit::AtomicsAdd64>(objId, indexId,
8871 valueId);
8873 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
8874 AtomicsAdd(elementType));
8877 bool CacheIRCompiler::emitAtomicsSubResult(ObjOperandId objId,
8878 IntPtrOperandId indexId,
8879 uint32_t valueId,
8880 Scalar::Type elementType,
8881 bool forEffect) {
8882 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8884 if (Scalar::isBigIntType(elementType)) {
8885 return emitAtomicsReadModifyWriteResult64<jit::AtomicsSub64>(objId, indexId,
8886 valueId);
8888 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
8889 AtomicsSub(elementType));
8892 bool CacheIRCompiler::emitAtomicsAndResult(ObjOperandId objId,
8893 IntPtrOperandId indexId,
8894 uint32_t valueId,
8895 Scalar::Type elementType,
8896 bool forEffect) {
8897 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8899 if (Scalar::isBigIntType(elementType)) {
8900 return emitAtomicsReadModifyWriteResult64<jit::AtomicsAnd64>(objId, indexId,
8901 valueId);
8903 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
8904 AtomicsAnd(elementType));
8907 bool CacheIRCompiler::emitAtomicsOrResult(ObjOperandId objId,
8908 IntPtrOperandId indexId,
8909 uint32_t valueId,
8910 Scalar::Type elementType,
8911 bool forEffect) {
8912 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8914 if (Scalar::isBigIntType(elementType)) {
8915 return emitAtomicsReadModifyWriteResult64<jit::AtomicsOr64>(objId, indexId,
8916 valueId);
8918 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
8919 AtomicsOr(elementType));
8922 bool CacheIRCompiler::emitAtomicsXorResult(ObjOperandId objId,
8923 IntPtrOperandId indexId,
8924 uint32_t valueId,
8925 Scalar::Type elementType,
8926 bool forEffect) {
8927 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8929 if (Scalar::isBigIntType(elementType)) {
8930 return emitAtomicsReadModifyWriteResult64<jit::AtomicsXor64>(objId, indexId,
8931 valueId);
8933 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
8934 AtomicsXor(elementType));
8937 bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId,
8938 IntPtrOperandId indexId,
8939 Scalar::Type elementType) {
8940 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8942 Maybe<AutoOutputRegister> output;
8943 Maybe<AutoCallVM> callvm;
8944 if (!Scalar::isBigIntType(elementType)) {
8945 output.emplace(*this);
8946 } else {
8947 callvm.emplace(masm, this, allocator);
8949 Register obj = allocator.useRegister(masm, objId);
8950 Register index = allocator.useRegister(masm, indexId);
8951 AutoScratchRegisterMaybeOutput scratch(allocator, masm,
8952 output ? *output : callvm->output());
8953 AutoSpectreBoundsScratchRegister spectreTemp(allocator, masm);
8954 AutoAvailableFloatRegister floatReg(*this, FloatReg0);
8956 FailurePath* failure;
8957 if (!addFailurePath(&failure)) {
8958 return false;
8961 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8962 // we can't use both at the same time. This isn't an issue here, because Ion
8963 // doesn't support CallICs. If that ever changes, this code must be updated.
8964 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8966 // Bounds check.
8967 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
8968 masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
8970 // Atomic operations are highly platform-dependent, for example x86/arm32 has
8971 // specific requirements on which registers are used. Therefore we're using a
8972 // VM call here instead of handling each platform separately.
8973 if (Scalar::isBigIntType(elementType)) {
8974 callvm->prepare();
8976 masm.Push(index);
8977 masm.Push(obj);
8979 using Fn = BigInt* (*)(JSContext*, TypedArrayObject*, size_t);
8980 callvm->call<Fn, jit::AtomicsLoad64>();
8981 return true;
8984 // Load the elements vector.
8985 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch);
8987 // Load the value.
8988 BaseIndex source(scratch, index, ScaleFromScalarType(elementType));
8990 // NOTE: the generated code must match the assembly code in gen_load in
8991 // GenerateAtomicOperations.py
8992 auto sync = Synchronization::Load();
8994 masm.memoryBarrierBefore(sync);
8996 Label* failUint32 = nullptr;
8997 MacroAssembler::Uint32Mode mode = MacroAssembler::Uint32Mode::ForceDouble;
8998 masm.loadFromTypedArray(elementType, source, output->valueReg(), mode,
8999 scratch, failUint32);
9000 masm.memoryBarrierAfter(sync);
9002 return true;
9005 bool CacheIRCompiler::emitAtomicsStoreResult(ObjOperandId objId,
9006 IntPtrOperandId indexId,
9007 uint32_t valueId,
9008 Scalar::Type elementType) {
9009 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9011 AutoOutputRegister output(*this);
9012 Register obj = allocator.useRegister(masm, objId);
9013 Register index = allocator.useRegister(masm, indexId);
9014 Maybe<Register> valueInt32;
9015 Maybe<Register> valueBigInt;
9016 if (!Scalar::isBigIntType(elementType)) {
9017 valueInt32.emplace(allocator.useRegister(masm, Int32OperandId(valueId)));
9018 } else {
9019 valueBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(valueId)));
9021 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9023 // Not enough registers on X86.
9024 Register spectreTemp = Register::Invalid();
9026 FailurePath* failure;
9027 if (!addFailurePath(&failure)) {
9028 return false;
9031 // Bounds check.
9032 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
9033 masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
9035 if (!Scalar::isBigIntType(elementType)) {
9036 // Load the elements vector.
9037 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch);
9039 // Store the value.
9040 BaseIndex dest(scratch, index, ScaleFromScalarType(elementType));
9042 // NOTE: the generated code must match the assembly code in gen_store in
9043 // GenerateAtomicOperations.py
9044 auto sync = Synchronization::Store();
9046 masm.memoryBarrierBefore(sync);
9047 masm.storeToTypedIntArray(elementType, *valueInt32, dest);
9048 masm.memoryBarrierAfter(sync);
9050 masm.tagValue(JSVAL_TYPE_INT32, *valueInt32, output.valueReg());
9051 } else {
9052 // See comment in emitAtomicsCompareExchange for why we use an ABI call.
9054 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
9055 liveVolatileFloatRegs());
9056 volatileRegs.takeUnchecked(output.valueReg());
9057 volatileRegs.takeUnchecked(scratch);
9058 masm.PushRegsInMask(volatileRegs);
9060 using Fn = void (*)(TypedArrayObject*, size_t, const BigInt*);
9061 masm.setupUnalignedABICall(scratch);
9062 masm.passABIArg(obj);
9063 masm.passABIArg(index);
9064 masm.passABIArg(*valueBigInt);
9065 masm.callWithABI<Fn, jit::AtomicsStore64>();
9067 masm.PopRegsInMask(volatileRegs);
9069 masm.tagValue(JSVAL_TYPE_BIGINT, *valueBigInt, output.valueReg());
9072 return true;
9075 bool CacheIRCompiler::emitAtomicsIsLockFreeResult(Int32OperandId valueId) {
9076 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9078 AutoOutputRegister output(*this);
9079 Register value = allocator.useRegister(masm, valueId);
9080 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9082 masm.atomicIsLockFreeJS(value, scratch);
9083 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
9085 return true;
9088 bool CacheIRCompiler::emitBigIntAsIntNResult(Int32OperandId bitsId,
9089 BigIntOperandId bigIntId) {
9090 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9092 AutoCallVM callvm(masm, this, allocator);
9094 Register bits = allocator.useRegister(masm, bitsId);
9095 Register bigInt = allocator.useRegister(masm, bigIntId);
9097 callvm.prepare();
9098 masm.Push(bits);
9099 masm.Push(bigInt);
9101 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
9102 callvm.call<Fn, jit::BigIntAsIntN>();
9103 return true;
9106 bool CacheIRCompiler::emitBigIntAsUintNResult(Int32OperandId bitsId,
9107 BigIntOperandId bigIntId) {
9108 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9110 AutoCallVM callvm(masm, this, allocator);
9112 Register bits = allocator.useRegister(masm, bitsId);
9113 Register bigInt = allocator.useRegister(masm, bigIntId);
9115 callvm.prepare();
9116 masm.Push(bits);
9117 masm.Push(bigInt);
9119 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
9120 callvm.call<Fn, jit::BigIntAsUintN>();
9121 return true;
9124 bool CacheIRCompiler::emitSetHasResult(ObjOperandId setId, ValOperandId valId) {
9125 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9127 AutoCallVM callvm(masm, this, allocator);
9129 Register set = allocator.useRegister(masm, setId);
9130 ValueOperand val = allocator.useValueRegister(masm, valId);
9132 callvm.prepare();
9133 masm.Push(val);
9134 masm.Push(set);
9136 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
9137 callvm.call<Fn, jit::SetObjectHas>();
9138 return true;
9141 bool CacheIRCompiler::emitSetHasNonGCThingResult(ObjOperandId setId,
9142 ValOperandId valId) {
9143 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9145 AutoOutputRegister output(*this);
9146 Register set = allocator.useRegister(masm, setId);
9147 ValueOperand val = allocator.useValueRegister(masm, valId);
9149 AutoScratchRegister scratch1(allocator, masm);
9150 AutoScratchRegister scratch2(allocator, masm);
9151 AutoScratchRegister scratch3(allocator, masm);
9152 AutoScratchRegister scratch4(allocator, masm);
9153 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
9155 masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
9156 masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
9158 masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
9159 scratch3, scratch4);
9160 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9161 return true;
9164 bool CacheIRCompiler::emitSetHasSymbolResult(ObjOperandId setId,
9165 SymbolOperandId symId) {
9166 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9168 AutoOutputRegister output(*this);
9169 Register set = allocator.useRegister(masm, setId);
9170 Register sym = allocator.useRegister(masm, symId);
9172 AutoScratchRegister scratch1(allocator, masm);
9173 AutoScratchRegister scratch2(allocator, masm);
9174 AutoScratchRegister scratch3(allocator, masm);
9175 AutoScratchRegister scratch4(allocator, masm);
9177 masm.prepareHashSymbol(sym, scratch1);
9179 masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
9180 masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
9181 scratch3, scratch4);
9182 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9183 return true;
9186 bool CacheIRCompiler::emitSetHasBigIntResult(ObjOperandId setId,
9187 BigIntOperandId bigIntId) {
9188 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9190 AutoOutputRegister output(*this);
9191 Register set = allocator.useRegister(masm, setId);
9192 Register bigInt = allocator.useRegister(masm, bigIntId);
9194 AutoScratchRegister scratch1(allocator, masm);
9195 AutoScratchRegister scratch2(allocator, masm);
9196 AutoScratchRegister scratch3(allocator, masm);
9197 AutoScratchRegister scratch4(allocator, masm);
9198 AutoScratchRegister scratch5(allocator, masm);
9199 #ifndef JS_CODEGEN_ARM
9200 AutoScratchRegister scratch6(allocator, masm);
9201 #else
9202 // We don't have more registers available on ARM32.
9203 Register scratch6 = set;
9205 masm.push(set);
9206 #endif
9208 masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
9210 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
9211 masm.setObjectHasBigInt(set, output.valueReg(), scratch1, scratch2, scratch3,
9212 scratch4, scratch5, scratch6);
9213 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9215 #ifdef JS_CODEGEN_ARM
9216 masm.pop(set);
9217 #endif
9218 return true;
9221 bool CacheIRCompiler::emitSetHasObjectResult(ObjOperandId setId,
9222 ObjOperandId objId) {
9223 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9225 AutoOutputRegister output(*this);
9226 Register set = allocator.useRegister(masm, setId);
9227 Register obj = allocator.useRegister(masm, objId);
9229 AutoScratchRegister scratch1(allocator, masm);
9230 AutoScratchRegister scratch2(allocator, masm);
9231 AutoScratchRegister scratch3(allocator, masm);
9232 AutoScratchRegister scratch4(allocator, masm);
9233 AutoScratchRegister scratch5(allocator, masm);
9235 masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
9236 masm.prepareHashObject(set, output.valueReg(), scratch1, scratch2, scratch3,
9237 scratch4, scratch5);
9239 masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
9240 scratch3, scratch4);
9241 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9242 return true;
9245 bool CacheIRCompiler::emitSetSizeResult(ObjOperandId setId) {
9246 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9248 AutoOutputRegister output(*this);
9249 Register set = allocator.useRegister(masm, setId);
9250 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9252 masm.loadSetObjectSize(set, scratch);
9253 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
9254 return true;
9257 bool CacheIRCompiler::emitMapHasResult(ObjOperandId mapId, ValOperandId valId) {
9258 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9260 AutoCallVM callvm(masm, this, allocator);
9262 Register map = allocator.useRegister(masm, mapId);
9263 ValueOperand val = allocator.useValueRegister(masm, valId);
9265 callvm.prepare();
9266 masm.Push(val);
9267 masm.Push(map);
9269 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
9270 callvm.call<Fn, jit::MapObjectHas>();
9271 return true;
9274 bool CacheIRCompiler::emitMapHasNonGCThingResult(ObjOperandId mapId,
9275 ValOperandId valId) {
9276 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9278 AutoOutputRegister output(*this);
9279 Register map = allocator.useRegister(masm, mapId);
9280 ValueOperand val = allocator.useValueRegister(masm, valId);
9282 AutoScratchRegister scratch1(allocator, masm);
9283 AutoScratchRegister scratch2(allocator, masm);
9284 AutoScratchRegister scratch3(allocator, masm);
9285 AutoScratchRegister scratch4(allocator, masm);
9286 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
9288 masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
9289 masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
9291 masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
9292 scratch3, scratch4);
9293 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9294 return true;
9297 bool CacheIRCompiler::emitMapHasSymbolResult(ObjOperandId mapId,
9298 SymbolOperandId symId) {
9299 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9301 AutoOutputRegister output(*this);
9302 Register map = allocator.useRegister(masm, mapId);
9303 Register sym = allocator.useRegister(masm, symId);
9305 AutoScratchRegister scratch1(allocator, masm);
9306 AutoScratchRegister scratch2(allocator, masm);
9307 AutoScratchRegister scratch3(allocator, masm);
9308 AutoScratchRegister scratch4(allocator, masm);
9310 masm.prepareHashSymbol(sym, scratch1);
9312 masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
9313 masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
9314 scratch3, scratch4);
9315 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9316 return true;
9319 bool CacheIRCompiler::emitMapHasBigIntResult(ObjOperandId mapId,
9320 BigIntOperandId bigIntId) {
9321 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9323 AutoOutputRegister output(*this);
9324 Register map = allocator.useRegister(masm, mapId);
9325 Register bigInt = allocator.useRegister(masm, bigIntId);
9327 AutoScratchRegister scratch1(allocator, masm);
9328 AutoScratchRegister scratch2(allocator, masm);
9329 AutoScratchRegister scratch3(allocator, masm);
9330 AutoScratchRegister scratch4(allocator, masm);
9331 AutoScratchRegister scratch5(allocator, masm);
9332 #ifndef JS_CODEGEN_ARM
9333 AutoScratchRegister scratch6(allocator, masm);
9334 #else
9335 // We don't have more registers available on ARM32.
9336 Register scratch6 = map;
9338 masm.push(map);
9339 #endif
9341 masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
9343 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
9344 masm.mapObjectHasBigInt(map, output.valueReg(), scratch1, scratch2, scratch3,
9345 scratch4, scratch5, scratch6);
9346 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9348 #ifdef JS_CODEGEN_ARM
9349 masm.pop(map);
9350 #endif
9351 return true;
9354 bool CacheIRCompiler::emitMapHasObjectResult(ObjOperandId mapId,
9355 ObjOperandId objId) {
9356 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9358 AutoOutputRegister output(*this);
9359 Register map = allocator.useRegister(masm, mapId);
9360 Register obj = allocator.useRegister(masm, objId);
9362 AutoScratchRegister scratch1(allocator, masm);
9363 AutoScratchRegister scratch2(allocator, masm);
9364 AutoScratchRegister scratch3(allocator, masm);
9365 AutoScratchRegister scratch4(allocator, masm);
9366 AutoScratchRegister scratch5(allocator, masm);
9368 masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
9369 masm.prepareHashObject(map, output.valueReg(), scratch1, scratch2, scratch3,
9370 scratch4, scratch5);
9372 masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
9373 scratch3, scratch4);
9374 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9375 return true;
9378 bool CacheIRCompiler::emitMapGetResult(ObjOperandId mapId, ValOperandId valId) {
9379 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9381 AutoCallVM callvm(masm, this, allocator);
9383 Register map = allocator.useRegister(masm, mapId);
9384 ValueOperand val = allocator.useValueRegister(masm, valId);
9386 callvm.prepare();
9387 masm.Push(val);
9388 masm.Push(map);
9390 using Fn =
9391 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
9392 callvm.call<Fn, jit::MapObjectGet>();
9393 return true;
9396 bool CacheIRCompiler::emitMapGetNonGCThingResult(ObjOperandId mapId,
9397 ValOperandId valId) {
9398 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9400 AutoOutputRegister output(*this);
9401 Register map = allocator.useRegister(masm, mapId);
9402 ValueOperand val = allocator.useValueRegister(masm, valId);
9404 AutoScratchRegister scratch1(allocator, masm);
9405 AutoScratchRegister scratch2(allocator, masm);
9406 AutoScratchRegister scratch3(allocator, masm);
9407 AutoScratchRegister scratch4(allocator, masm);
9408 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
9410 masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
9411 masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
9413 masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
9414 output.valueReg(), scratch2, scratch3, scratch4);
9415 return true;
9418 bool CacheIRCompiler::emitMapGetSymbolResult(ObjOperandId mapId,
9419 SymbolOperandId symId) {
9420 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9422 AutoOutputRegister output(*this);
9423 Register map = allocator.useRegister(masm, mapId);
9424 Register sym = allocator.useRegister(masm, symId);
9426 AutoScratchRegister scratch1(allocator, masm);
9427 AutoScratchRegister scratch2(allocator, masm);
9428 AutoScratchRegister scratch3(allocator, masm);
9429 AutoScratchRegister scratch4(allocator, masm);
9431 masm.prepareHashSymbol(sym, scratch1);
9433 masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
9434 masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
9435 output.valueReg(), scratch2, scratch3, scratch4);
9436 return true;
9439 bool CacheIRCompiler::emitMapGetBigIntResult(ObjOperandId mapId,
9440 BigIntOperandId bigIntId) {
9441 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9443 AutoOutputRegister output(*this);
9444 Register map = allocator.useRegister(masm, mapId);
9445 Register bigInt = allocator.useRegister(masm, bigIntId);
9447 AutoScratchRegister scratch1(allocator, masm);
9448 AutoScratchRegister scratch2(allocator, masm);
9449 AutoScratchRegister scratch3(allocator, masm);
9450 AutoScratchRegister scratch4(allocator, masm);
9451 AutoScratchRegister scratch5(allocator, masm);
9452 #ifndef JS_CODEGEN_ARM
9453 AutoScratchRegister scratch6(allocator, masm);
9454 #else
9455 // We don't have more registers available on ARM32.
9456 Register scratch6 = map;
9458 masm.push(map);
9459 #endif
9461 masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
9463 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
9464 masm.mapObjectGetBigInt(map, output.valueReg(), scratch1, output.valueReg(),
9465 scratch2, scratch3, scratch4, scratch5, scratch6);
9467 #ifdef JS_CODEGEN_ARM
9468 masm.pop(map);
9469 #endif
9470 return true;
9473 bool CacheIRCompiler::emitMapGetObjectResult(ObjOperandId mapId,
9474 ObjOperandId objId) {
9475 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9477 AutoOutputRegister output(*this);
9478 Register map = allocator.useRegister(masm, mapId);
9479 Register obj = allocator.useRegister(masm, objId);
9481 AutoScratchRegister scratch1(allocator, masm);
9482 AutoScratchRegister scratch2(allocator, masm);
9483 AutoScratchRegister scratch3(allocator, masm);
9484 AutoScratchRegister scratch4(allocator, masm);
9485 AutoScratchRegister scratch5(allocator, masm);
9487 masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
9488 masm.prepareHashObject(map, output.valueReg(), scratch1, scratch2, scratch3,
9489 scratch4, scratch5);
9491 masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
9492 output.valueReg(), scratch2, scratch3, scratch4);
9493 return true;
9496 bool CacheIRCompiler::emitMapSizeResult(ObjOperandId mapId) {
9497 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9499 AutoOutputRegister output(*this);
9500 Register map = allocator.useRegister(masm, mapId);
9501 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9503 masm.loadMapObjectSize(map, scratch);
9504 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
9505 return true;
9508 bool CacheIRCompiler::emitArrayFromArgumentsObjectResult(ObjOperandId objId,
9509 uint32_t shapeOffset) {
9510 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9512 AutoCallVM callvm(masm, this, allocator);
9514 Register obj = allocator.useRegister(masm, objId);
9516 callvm.prepare();
9517 masm.Push(obj);
9519 using Fn = ArrayObject* (*)(JSContext*, Handle<ArgumentsObject*>);
9520 callvm.call<Fn, js::ArrayFromArgumentsObject>();
9521 return true;
9524 bool CacheIRCompiler::emitGuardGlobalGeneration(uint32_t expectedOffset,
9525 uint32_t generationAddrOffset) {
9526 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9528 AutoScratchRegister scratch(allocator, masm);
9529 AutoScratchRegister scratch2(allocator, masm);
9531 FailurePath* failure;
9532 if (!addFailurePath(&failure)) {
9533 return false;
9536 StubFieldOffset expected(expectedOffset, StubField::Type::RawInt32);
9537 emitLoadStubField(expected, scratch);
9539 StubFieldOffset generationAddr(generationAddrOffset,
9540 StubField::Type::RawPointer);
9541 emitLoadStubField(generationAddr, scratch2);
9543 masm.branch32(Assembler::NotEqual, Address(scratch2, 0), scratch,
9544 failure->label());
9546 return true;
9549 bool CacheIRCompiler::emitGuardFuse(RealmFuses::FuseIndex fuseIndex) {
9550 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9551 AutoScratchRegister scratch(allocator, masm);
9553 FailurePath* failure;
9554 if (!addFailurePath(&failure)) {
9555 return false;
9558 masm.loadRealmFuse(fuseIndex, scratch);
9559 masm.branchPtr(Assembler::NotEqual, scratch, ImmPtr(nullptr),
9560 failure->label());
9561 return true;
9564 bool CacheIRCompiler::emitBailout() {
9565 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9567 // Generates no code.
9569 return true;
9572 bool CacheIRCompiler::emitAssertRecoveredOnBailoutResult(ValOperandId valId,
9573 bool mustBeRecovered) {
9574 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9576 AutoOutputRegister output(*this);
9578 // NOP when not in IonMonkey
9579 masm.moveValue(UndefinedValue(), output.valueReg());
9581 return true;
9584 bool CacheIRCompiler::emitAssertPropertyLookup(ObjOperandId objId,
9585 uint32_t idOffset,
9586 uint32_t slotOffset) {
9587 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9589 Register obj = allocator.useRegister(masm, objId);
9591 AutoScratchRegister id(allocator, masm);
9592 AutoScratchRegister slot(allocator, masm);
9594 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
9595 masm.PushRegsInMask(save);
9597 masm.setupUnalignedABICall(id);
9599 StubFieldOffset idField(idOffset, StubField::Type::Id);
9600 emitLoadStubField(idField, id);
9602 StubFieldOffset slotField(slotOffset, StubField::Type::RawInt32);
9603 emitLoadStubField(slotField, slot);
9605 masm.passABIArg(obj);
9606 masm.passABIArg(id);
9607 masm.passABIArg(slot);
9608 using Fn = void (*)(NativeObject*, PropertyKey, uint32_t);
9609 masm.callWithABI<Fn, js::jit::AssertPropertyLookup>();
9610 masm.PopRegsInMask(save);
9612 return true;
9615 #ifdef FUZZING_JS_FUZZILLI
9616 bool CacheIRCompiler::emitFuzzilliHashResult(ValOperandId valId) {
9617 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9619 ValueOperand input = allocator.useValueRegister(masm, valId);
9620 AutoScratchRegister scratch(allocator, masm);
9621 AutoScratchRegister scratchJSContext(allocator, masm);
9622 AutoScratchFloatRegister floatReg(this);
9623 # ifdef JS_PUNBOX64
9624 AutoScratchRegister64 scratch64(allocator, masm);
9625 # else
9626 AutoScratchRegister scratch2(allocator, masm);
9627 # endif
9629 Label addFloat, updateHash, done;
9632 ScratchTagScope tag(masm, input);
9633 masm.splitTagForTest(input, tag);
9635 Label notInt32;
9636 masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
9638 ScratchTagScopeRelease _(&tag);
9640 masm.unboxInt32(input, scratch);
9641 masm.convertInt32ToDouble(scratch, floatReg);
9642 masm.jump(&addFloat);
9644 masm.bind(&notInt32);
9646 Label notDouble;
9647 masm.branchTestDouble(Assembler::NotEqual, tag, &notDouble);
9649 ScratchTagScopeRelease _(&tag);
9651 masm.unboxDouble(input, floatReg);
9652 masm.canonicalizeDouble(floatReg);
9653 masm.jump(&addFloat);
9655 masm.bind(&notDouble);
9657 Label notNull;
9658 masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
9660 ScratchTagScopeRelease _(&tag);
9662 masm.move32(Imm32(1), scratch);
9663 masm.convertInt32ToDouble(scratch, floatReg);
9664 masm.jump(&addFloat);
9666 masm.bind(&notNull);
9668 Label notUndefined;
9669 masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
9671 ScratchTagScopeRelease _(&tag);
9673 masm.move32(Imm32(2), scratch);
9674 masm.convertInt32ToDouble(scratch, floatReg);
9675 masm.jump(&addFloat);
9677 masm.bind(&notUndefined);
9679 Label notBoolean;
9680 masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
9682 ScratchTagScopeRelease _(&tag);
9684 masm.unboxBoolean(input, scratch);
9685 masm.add32(Imm32(3), scratch);
9686 masm.convertInt32ToDouble(scratch, floatReg);
9687 masm.jump(&addFloat);
9689 masm.bind(&notBoolean);
9691 Label notBigInt;
9692 masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
9694 ScratchTagScopeRelease _(&tag);
9696 masm.unboxBigInt(input, scratch);
9698 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
9699 liveVolatileFloatRegs());
9700 masm.PushRegsInMask(volatileRegs);
9701 // TODO: remove floatReg, scratch, scratchJS?
9703 using Fn = uint32_t (*)(BigInt* bigInt);
9704 masm.setupUnalignedABICall(scratchJSContext);
9705 masm.loadJSContext(scratchJSContext);
9706 masm.passABIArg(scratch);
9707 masm.callWithABI<Fn, js::FuzzilliHashBigInt>();
9708 masm.storeCallInt32Result(scratch);
9710 LiveRegisterSet ignore;
9711 ignore.add(scratch);
9712 ignore.add(scratchJSContext);
9713 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
9714 masm.jump(&updateHash);
9716 masm.bind(&notBigInt);
9718 Label notObject;
9719 masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
9721 ScratchTagScopeRelease _(&tag);
9723 AutoCallVM callvm(masm, this, allocator);
9724 Register obj = allocator.allocateRegister(masm);
9725 masm.unboxObject(input, obj);
9727 callvm.prepare();
9728 masm.Push(obj);
9730 using Fn = void (*)(JSContext* cx, JSObject* o);
9731 callvm.callNoResult<Fn, js::FuzzilliHashObject>();
9732 allocator.releaseRegister(obj);
9734 masm.jump(&done);
9736 masm.bind(&notObject);
9738 masm.move32(Imm32(0), scratch);
9739 masm.jump(&updateHash);
9744 masm.bind(&addFloat);
9746 masm.loadJSContext(scratchJSContext);
9747 Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
9749 # ifdef JS_PUNBOX64
9750 masm.moveDoubleToGPR64(floatReg, scratch64);
9751 masm.move32(scratch64.get().reg, scratch);
9752 masm.rshift64(Imm32(32), scratch64);
9753 masm.add32(scratch64.get().reg, scratch);
9754 # else
9755 Register64 scratch64(scratch, scratch2);
9756 masm.moveDoubleToGPR64(floatReg, scratch64);
9757 masm.add32(scratch2, scratch);
9758 # endif
9762 masm.bind(&updateHash);
9764 masm.loadJSContext(scratchJSContext);
9765 Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
9766 masm.load32(addrExecHash, scratchJSContext);
9767 masm.add32(scratchJSContext, scratch);
9768 masm.rotateLeft(Imm32(1), scratch, scratch);
9769 masm.loadJSContext(scratchJSContext);
9770 masm.store32(scratch, addrExecHash);
9772 // stats
9773 Address addrExecHashInputs(scratchJSContext,
9774 offsetof(JSContext, executionHashInputs));
9775 masm.load32(addrExecHashInputs, scratch);
9776 masm.add32(Imm32(1), scratch);
9777 masm.store32(scratch, addrExecHashInputs);
9780 masm.bind(&done);
9782 AutoOutputRegister output(*this);
9783 masm.moveValue(UndefinedValue(), output.valueReg());
9784 return true;
9786 #endif
9788 template <typename Fn, Fn fn>
9789 void CacheIRCompiler::callVM(MacroAssembler& masm) {
9790 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
9791 callVMInternal(masm, id);
9794 void CacheIRCompiler::callVMInternal(MacroAssembler& masm, VMFunctionId id) {
9795 MOZ_ASSERT(enteredStubFrame_);
9796 if (mode_ == Mode::Ion) {
9797 TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(id);
9798 const VMFunctionData& fun = GetVMFunction(id);
9799 uint32_t frameSize = fun.explicitStackSlots() * sizeof(void*);
9800 masm.PushFrameDescriptor(FrameType::IonICCall);
9801 masm.callJit(code);
9803 // Pop rest of the exit frame and the arguments left on the stack.
9804 int framePop =
9805 sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
9806 masm.implicitPop(frameSize + framePop);
9808 masm.freeStack(asIon()->localTracingSlots() * sizeof(Value));
9810 // Pop IonICCallFrameLayout.
9811 masm.Pop(FramePointer);
9812 masm.freeStack(IonICCallFrameLayout::Size() - sizeof(void*));
9813 return;
9816 MOZ_ASSERT(mode_ == Mode::Baseline);
9818 TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(id);
9820 EmitBaselineCallVM(code, masm);
9823 bool CacheIRCompiler::isBaseline() { return mode_ == Mode::Baseline; }
9825 bool CacheIRCompiler::isIon() { return mode_ == Mode::Ion; }
9827 BaselineCacheIRCompiler* CacheIRCompiler::asBaseline() {
9828 MOZ_ASSERT(this->isBaseline());
9829 return static_cast<BaselineCacheIRCompiler*>(this);
9832 IonCacheIRCompiler* CacheIRCompiler::asIon() {
9833 MOZ_ASSERT(this->isIon());
9834 return static_cast<IonCacheIRCompiler*>(this);
9837 #ifdef DEBUG
9838 void CacheIRCompiler::assertFloatRegisterAvailable(FloatRegister reg) {
9839 if (isBaseline()) {
9840 // Baseline does not have any FloatRegisters live when calling an IC stub.
9841 return;
9844 asIon()->assertFloatRegisterAvailable(reg);
9846 #endif
9848 AutoCallVM::AutoCallVM(MacroAssembler& masm, CacheIRCompiler* compiler,
9849 CacheRegisterAllocator& allocator)
9850 : masm_(masm), compiler_(compiler), allocator_(allocator) {
9851 // Ion needs to `enterStubFrame` before it can callVM and it also needs to
9852 // initialize AutoSaveLiveRegisters.
9853 if (compiler_->mode_ == CacheIRCompiler::Mode::Ion) {
9854 // Will need to use a downcast here as well, in order to pass the
9855 // stub to AutoSaveLiveRegisters
9856 save_.emplace(*compiler_->asIon());
9859 if (compiler->outputUnchecked_.isSome()) {
9860 output_.emplace(*compiler);
9863 if (compiler_->mode_ == CacheIRCompiler::Mode::Baseline) {
9864 stubFrame_.emplace(*compiler_->asBaseline());
9865 if (output_.isSome()) {
9866 scratch_.emplace(allocator_, masm_, output_.ref());
9867 } else {
9868 scratch_.emplace(allocator_, masm_);
9873 void AutoCallVM::prepare() {
9874 allocator_.discardStack(masm_);
9875 MOZ_ASSERT(compiler_ != nullptr);
9876 if (compiler_->mode_ == CacheIRCompiler::Mode::Ion) {
9877 compiler_->asIon()->enterStubFrame(masm_, *save_.ptr());
9878 return;
9880 MOZ_ASSERT(compiler_->mode_ == CacheIRCompiler::Mode::Baseline);
9881 stubFrame_->enter(masm_, scratch_.ref());
9884 void AutoCallVM::storeResult(JSValueType returnType) {
9885 MOZ_ASSERT(returnType != JSVAL_TYPE_DOUBLE);
9887 if (returnType == JSVAL_TYPE_UNKNOWN) {
9888 masm_.storeCallResultValue(output_.ref());
9889 } else {
9890 if (output_->hasValue()) {
9891 masm_.tagValue(returnType, ReturnReg, output_->valueReg());
9892 } else {
9893 masm_.storeCallPointerResult(output_->typedReg().gpr());
9898 void AutoCallVM::leaveBaselineStubFrame() {
9899 if (compiler_->mode_ == CacheIRCompiler::Mode::Baseline) {
9900 stubFrame_->leave(masm_);
9904 template <typename...>
9905 struct VMFunctionReturnType;
9907 template <class R, typename... Args>
9908 struct VMFunctionReturnType<R (*)(JSContext*, Args...)> {
9909 using LastArgument = typename LastArg<Args...>::Type;
9911 // By convention VMFunctions returning `bool` use an output parameter.
9912 using ReturnType =
9913 std::conditional_t<std::is_same_v<R, bool>, LastArgument, R>;
9916 template <class>
9917 struct ReturnTypeToJSValueType;
9919 // Definitions for the currently used return types.
9920 template <>
9921 struct ReturnTypeToJSValueType<MutableHandleValue> {
9922 static constexpr JSValueType result = JSVAL_TYPE_UNKNOWN;
9924 template <>
9925 struct ReturnTypeToJSValueType<bool*> {
9926 static constexpr JSValueType result = JSVAL_TYPE_BOOLEAN;
9928 template <>
9929 struct ReturnTypeToJSValueType<int32_t*> {
9930 static constexpr JSValueType result = JSVAL_TYPE_INT32;
9932 template <>
9933 struct ReturnTypeToJSValueType<JSString*> {
9934 static constexpr JSValueType result = JSVAL_TYPE_STRING;
9936 template <>
9937 struct ReturnTypeToJSValueType<BigInt*> {
9938 static constexpr JSValueType result = JSVAL_TYPE_BIGINT;
9940 template <>
9941 struct ReturnTypeToJSValueType<JSObject*> {
9942 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
9944 template <>
9945 struct ReturnTypeToJSValueType<PropertyIteratorObject*> {
9946 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
9948 template <>
9949 struct ReturnTypeToJSValueType<ArrayIteratorObject*> {
9950 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
9952 template <>
9953 struct ReturnTypeToJSValueType<StringIteratorObject*> {
9954 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
9956 template <>
9957 struct ReturnTypeToJSValueType<RegExpStringIteratorObject*> {
9958 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
9960 template <>
9961 struct ReturnTypeToJSValueType<PlainObject*> {
9962 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
9964 template <>
9965 struct ReturnTypeToJSValueType<ArrayObject*> {
9966 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
9968 template <>
9969 struct ReturnTypeToJSValueType<TypedArrayObject*> {
9970 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
9973 template <typename Fn>
9974 void AutoCallVM::storeResult() {
9975 using ReturnType = typename VMFunctionReturnType<Fn>::ReturnType;
9976 storeResult(ReturnTypeToJSValueType<ReturnType>::result);
9979 AutoScratchFloatRegister::AutoScratchFloatRegister(CacheIRCompiler* compiler,
9980 FailurePath* failure)
9981 : compiler_(compiler), failure_(failure) {
9982 // If we're compiling a Baseline IC, FloatReg0 is always available.
9983 if (!compiler_->isBaseline()) {
9984 MacroAssembler& masm = compiler_->masm;
9985 masm.push(FloatReg0);
9986 compiler->allocator.setHasAutoScratchFloatRegisterSpill(true);
9989 if (failure_) {
9990 failure_->setHasAutoScratchFloatRegister();
9994 AutoScratchFloatRegister::~AutoScratchFloatRegister() {
9995 if (failure_) {
9996 failure_->clearHasAutoScratchFloatRegister();
9999 if (!compiler_->isBaseline()) {
10000 MacroAssembler& masm = compiler_->masm;
10001 masm.pop(FloatReg0);
10002 compiler_->allocator.setHasAutoScratchFloatRegisterSpill(false);
10004 if (failure_) {
10005 Label done;
10006 masm.jump(&done);
10007 masm.bind(&failurePopReg_);
10008 masm.pop(FloatReg0);
10009 masm.jump(failure_->label());
10010 masm.bind(&done);
10015 Label* AutoScratchFloatRegister::failure() {
10016 MOZ_ASSERT(failure_);
10018 if (!compiler_->isBaseline()) {
10019 return &failurePopReg_;
10021 return failure_->labelUnchecked();