Bug 1842773 - Part 29: Add {FixedLength,Growable}SharedArrayBufferObject classes...
[gecko.git] / js / src / jit / CacheIRCompiler.cpp
blob8848de4c6e8e90011a4f4ce896922f0f4e9d551a
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/CacheIRCompiler.h"
9 #include "mozilla/ArrayUtils.h"
10 #include "mozilla/FunctionTypeTraits.h"
11 #include "mozilla/MaybeOneOf.h"
12 #include "mozilla/ScopeExit.h"
14 #include <type_traits>
15 #include <utility>
17 #include "jslibmath.h"
18 #include "jsmath.h"
20 #include "builtin/DataViewObject.h"
21 #include "builtin/MapObject.h"
22 #include "builtin/Object.h"
23 #include "gc/GCEnum.h"
24 #include "gc/SweepingAPI.h" // js::gc::AutoLockStoreBuffer
25 #include "jit/BaselineCacheIRCompiler.h"
26 #include "jit/CacheIRGenerator.h"
27 #include "jit/IonCacheIRCompiler.h"
28 #include "jit/JitFrames.h"
29 #include "jit/JitRuntime.h"
30 #include "jit/JitZone.h"
31 #include "jit/SharedICHelpers.h"
32 #include "jit/SharedICRegisters.h"
33 #include "jit/VMFunctions.h"
34 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
35 #include "js/friend/XrayJitInfo.h" // js::jit::GetXrayJitInfo
36 #include "js/ScalarType.h" // js::Scalar::Type
37 #include "proxy/DOMProxy.h"
38 #include "proxy/Proxy.h"
39 #include "proxy/ScriptedProxyHandler.h"
40 #include "vm/ArgumentsObject.h"
41 #include "vm/ArrayBufferObject.h"
42 #include "vm/ArrayBufferViewObject.h"
43 #include "vm/BigIntType.h"
44 #include "vm/FunctionFlags.h" // js::FunctionFlags
45 #include "vm/GeneratorObject.h"
46 #include "vm/GetterSetter.h"
47 #include "vm/Interpreter.h"
48 #include "vm/Uint8Clamped.h"
50 #include "builtin/Boolean-inl.h"
51 #include "jit/MacroAssembler-inl.h"
52 #include "jit/SharedICHelpers-inl.h"
53 #include "jit/VMFunctionList-inl.h"
55 using namespace js;
56 using namespace js::jit;
58 using mozilla::BitwiseCast;
59 using mozilla::Maybe;
61 using JS::ExpandoAndGeneration;
63 ValueOperand CacheRegisterAllocator::useValueRegister(MacroAssembler& masm,
64 ValOperandId op) {
65 OperandLocation& loc = operandLocations_[op.id()];
67 switch (loc.kind()) {
68 case OperandLocation::ValueReg:
69 currentOpRegs_.add(loc.valueReg());
70 return loc.valueReg();
72 case OperandLocation::ValueStack: {
73 ValueOperand reg = allocateValueRegister(masm);
74 popValue(masm, &loc, reg);
75 return reg;
78 case OperandLocation::BaselineFrame: {
79 ValueOperand reg = allocateValueRegister(masm);
80 Address addr = addressOf(masm, loc.baselineFrameSlot());
81 masm.loadValue(addr, reg);
82 loc.setValueReg(reg);
83 return reg;
86 case OperandLocation::Constant: {
87 ValueOperand reg = allocateValueRegister(masm);
88 masm.moveValue(loc.constant(), reg);
89 loc.setValueReg(reg);
90 return reg;
93 case OperandLocation::PayloadReg: {
94 // Temporarily add the payload register to currentOpRegs_ so
95 // allocateValueRegister will stay away from it.
96 currentOpRegs_.add(loc.payloadReg());
97 ValueOperand reg = allocateValueRegister(masm);
98 masm.tagValue(loc.payloadType(), loc.payloadReg(), reg);
99 currentOpRegs_.take(loc.payloadReg());
100 availableRegs_.add(loc.payloadReg());
101 loc.setValueReg(reg);
102 return reg;
105 case OperandLocation::PayloadStack: {
106 ValueOperand reg = allocateValueRegister(masm);
107 popPayload(masm, &loc, reg.scratchReg());
108 masm.tagValue(loc.payloadType(), reg.scratchReg(), reg);
109 loc.setValueReg(reg);
110 return reg;
113 case OperandLocation::DoubleReg: {
114 ValueOperand reg = allocateValueRegister(masm);
116 ScratchDoubleScope fpscratch(masm);
117 masm.boxDouble(loc.doubleReg(), reg, fpscratch);
119 loc.setValueReg(reg);
120 return reg;
123 case OperandLocation::Uninitialized:
124 break;
127 MOZ_CRASH();
130 // Load a value operand directly into a float register. Caller must have
131 // guarded isNumber on the provided val.
132 void CacheRegisterAllocator::ensureDoubleRegister(MacroAssembler& masm,
133 NumberOperandId op,
134 FloatRegister dest) const {
135 // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
136 // any stack slot offsets below.
137 int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
139 const OperandLocation& loc = operandLocations_[op.id()];
141 Label failure, done;
142 switch (loc.kind()) {
143 case OperandLocation::ValueReg: {
144 masm.ensureDouble(loc.valueReg(), dest, &failure);
145 break;
148 case OperandLocation::ValueStack: {
149 Address addr = valueAddress(masm, &loc);
150 addr.offset += stackOffset;
151 masm.ensureDouble(addr, dest, &failure);
152 break;
155 case OperandLocation::BaselineFrame: {
156 Address addr = addressOf(masm, loc.baselineFrameSlot());
157 addr.offset += stackOffset;
158 masm.ensureDouble(addr, dest, &failure);
159 break;
162 case OperandLocation::DoubleReg: {
163 masm.moveDouble(loc.doubleReg(), dest);
164 return;
167 case OperandLocation::Constant: {
168 MOZ_ASSERT(loc.constant().isNumber(),
169 "Caller must ensure the operand is a number value");
170 masm.loadConstantDouble(loc.constant().toNumber(), dest);
171 return;
174 case OperandLocation::PayloadReg: {
175 // Doubles can't be stored in payload registers, so this must be an int32.
176 MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
177 "Caller must ensure the operand is a number value");
178 masm.convertInt32ToDouble(loc.payloadReg(), dest);
179 return;
182 case OperandLocation::PayloadStack: {
183 // Doubles can't be stored in payload registers, so this must be an int32.
184 MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
185 "Caller must ensure the operand is a number value");
186 MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
187 Address addr = payloadAddress(masm, &loc);
188 addr.offset += stackOffset;
189 masm.convertInt32ToDouble(addr, dest);
190 return;
193 case OperandLocation::Uninitialized:
194 MOZ_CRASH("Unhandled operand type in ensureDoubleRegister");
195 return;
197 masm.jump(&done);
198 masm.bind(&failure);
199 masm.assumeUnreachable(
200 "Missing guard allowed non-number to hit ensureDoubleRegister");
201 masm.bind(&done);
204 void CacheRegisterAllocator::copyToScratchRegister(MacroAssembler& masm,
205 TypedOperandId typedId,
206 Register dest) const {
207 // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
208 // any stack slot offsets below.
209 int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
211 const OperandLocation& loc = operandLocations_[typedId.id()];
213 switch (loc.kind()) {
214 case OperandLocation::ValueReg: {
215 masm.unboxNonDouble(loc.valueReg(), dest, typedId.type());
216 break;
218 case OperandLocation::ValueStack: {
219 Address addr = valueAddress(masm, &loc);
220 addr.offset += stackOffset;
221 masm.unboxNonDouble(addr, dest, typedId.type());
222 break;
224 case OperandLocation::BaselineFrame: {
225 Address addr = addressOf(masm, loc.baselineFrameSlot());
226 addr.offset += stackOffset;
227 masm.unboxNonDouble(addr, dest, typedId.type());
228 break;
230 case OperandLocation::PayloadReg: {
231 MOZ_ASSERT(loc.payloadType() == typedId.type());
232 masm.mov(loc.payloadReg(), dest);
233 return;
235 case OperandLocation::PayloadStack: {
236 MOZ_ASSERT(loc.payloadType() == typedId.type());
237 MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
238 Address addr = payloadAddress(masm, &loc);
239 addr.offset += stackOffset;
240 masm.loadPtr(addr, dest);
241 return;
243 case OperandLocation::DoubleReg:
244 case OperandLocation::Constant:
245 case OperandLocation::Uninitialized:
246 MOZ_CRASH("Unhandled operand location");
250 void CacheRegisterAllocator::copyToScratchValueRegister(
251 MacroAssembler& masm, ValOperandId valId, ValueOperand dest) const {
252 MOZ_ASSERT(!addedFailurePath_);
253 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
255 const OperandLocation& loc = operandLocations_[valId.id()];
256 switch (loc.kind()) {
257 case OperandLocation::ValueReg:
258 masm.moveValue(loc.valueReg(), dest);
259 break;
260 case OperandLocation::ValueStack: {
261 Address addr = valueAddress(masm, &loc);
262 masm.loadValue(addr, dest);
263 break;
265 case OperandLocation::BaselineFrame: {
266 Address addr = addressOf(masm, loc.baselineFrameSlot());
267 masm.loadValue(addr, dest);
268 break;
270 case OperandLocation::Constant:
271 masm.moveValue(loc.constant(), dest);
272 break;
273 case OperandLocation::PayloadReg:
274 masm.tagValue(loc.payloadType(), loc.payloadReg(), dest);
275 break;
276 case OperandLocation::PayloadStack: {
277 Address addr = payloadAddress(masm, &loc);
278 masm.loadPtr(addr, dest.scratchReg());
279 masm.tagValue(loc.payloadType(), dest.scratchReg(), dest);
280 break;
282 case OperandLocation::DoubleReg: {
283 ScratchDoubleScope fpscratch(masm);
284 masm.boxDouble(loc.doubleReg(), dest, fpscratch);
285 break;
287 case OperandLocation::Uninitialized:
288 MOZ_CRASH();
292 Register CacheRegisterAllocator::useRegister(MacroAssembler& masm,
293 TypedOperandId typedId) {
294 MOZ_ASSERT(!addedFailurePath_);
295 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
297 OperandLocation& loc = operandLocations_[typedId.id()];
298 switch (loc.kind()) {
299 case OperandLocation::PayloadReg:
300 currentOpRegs_.add(loc.payloadReg());
301 return loc.payloadReg();
303 case OperandLocation::ValueReg: {
304 // It's possible the value is still boxed: as an optimization, we unbox
305 // the first time we use a value as object.
306 ValueOperand val = loc.valueReg();
307 availableRegs_.add(val);
308 Register reg = val.scratchReg();
309 availableRegs_.take(reg);
310 masm.unboxNonDouble(val, reg, typedId.type());
311 loc.setPayloadReg(reg, typedId.type());
312 currentOpRegs_.add(reg);
313 return reg;
316 case OperandLocation::PayloadStack: {
317 Register reg = allocateRegister(masm);
318 popPayload(masm, &loc, reg);
319 return reg;
322 case OperandLocation::ValueStack: {
323 // The value is on the stack, but boxed. If it's on top of the stack we
324 // unbox it and then remove it from the stack, else we just unbox.
325 Register reg = allocateRegister(masm);
326 if (loc.valueStack() == stackPushed_) {
327 masm.unboxNonDouble(Address(masm.getStackPointer(), 0), reg,
328 typedId.type());
329 masm.addToStackPtr(Imm32(sizeof(js::Value)));
330 MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
331 stackPushed_ -= sizeof(js::Value);
332 } else {
333 MOZ_ASSERT(loc.valueStack() < stackPushed_);
334 masm.unboxNonDouble(
335 Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()),
336 reg, typedId.type());
338 loc.setPayloadReg(reg, typedId.type());
339 return reg;
342 case OperandLocation::BaselineFrame: {
343 Register reg = allocateRegister(masm);
344 Address addr = addressOf(masm, loc.baselineFrameSlot());
345 masm.unboxNonDouble(addr, reg, typedId.type());
346 loc.setPayloadReg(reg, typedId.type());
347 return reg;
350 case OperandLocation::Constant: {
351 Value v = loc.constant();
352 Register reg = allocateRegister(masm);
353 if (v.isString()) {
354 masm.movePtr(ImmGCPtr(v.toString()), reg);
355 } else if (v.isSymbol()) {
356 masm.movePtr(ImmGCPtr(v.toSymbol()), reg);
357 } else if (v.isBigInt()) {
358 masm.movePtr(ImmGCPtr(v.toBigInt()), reg);
359 } else {
360 MOZ_CRASH("Unexpected Value");
362 loc.setPayloadReg(reg, v.extractNonDoubleType());
363 return reg;
366 case OperandLocation::DoubleReg:
367 case OperandLocation::Uninitialized:
368 break;
371 MOZ_CRASH();
374 ConstantOrRegister CacheRegisterAllocator::useConstantOrRegister(
375 MacroAssembler& masm, ValOperandId val) {
376 MOZ_ASSERT(!addedFailurePath_);
377 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
379 OperandLocation& loc = operandLocations_[val.id()];
380 switch (loc.kind()) {
381 case OperandLocation::Constant:
382 return loc.constant();
384 case OperandLocation::PayloadReg:
385 case OperandLocation::PayloadStack: {
386 JSValueType payloadType = loc.payloadType();
387 Register reg = useRegister(masm, TypedOperandId(val, payloadType));
388 return TypedOrValueRegister(MIRTypeFromValueType(payloadType),
389 AnyRegister(reg));
392 case OperandLocation::ValueReg:
393 case OperandLocation::ValueStack:
394 case OperandLocation::BaselineFrame:
395 return TypedOrValueRegister(useValueRegister(masm, val));
397 case OperandLocation::DoubleReg:
398 return TypedOrValueRegister(MIRType::Double,
399 AnyRegister(loc.doubleReg()));
401 case OperandLocation::Uninitialized:
402 break;
405 MOZ_CRASH();
408 Register CacheRegisterAllocator::defineRegister(MacroAssembler& masm,
409 TypedOperandId typedId) {
410 MOZ_ASSERT(!addedFailurePath_);
411 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
413 OperandLocation& loc = operandLocations_[typedId.id()];
414 MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
416 Register reg = allocateRegister(masm);
417 loc.setPayloadReg(reg, typedId.type());
418 return reg;
421 ValueOperand CacheRegisterAllocator::defineValueRegister(MacroAssembler& masm,
422 ValOperandId val) {
423 MOZ_ASSERT(!addedFailurePath_);
424 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
426 OperandLocation& loc = operandLocations_[val.id()];
427 MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
429 ValueOperand reg = allocateValueRegister(masm);
430 loc.setValueReg(reg);
431 return reg;
434 void CacheRegisterAllocator::freeDeadOperandLocations(MacroAssembler& masm) {
435 // See if any operands are dead so we can reuse their registers. Note that
436 // we skip the input operands, as those are also used by failure paths, and
437 // we currently don't track those uses.
438 for (size_t i = writer_.numInputOperands(); i < operandLocations_.length();
439 i++) {
440 if (!writer_.operandIsDead(i, currentInstruction_)) {
441 continue;
444 OperandLocation& loc = operandLocations_[i];
445 switch (loc.kind()) {
446 case OperandLocation::PayloadReg:
447 availableRegs_.add(loc.payloadReg());
448 break;
449 case OperandLocation::ValueReg:
450 availableRegs_.add(loc.valueReg());
451 break;
452 case OperandLocation::PayloadStack:
453 masm.propagateOOM(freePayloadSlots_.append(loc.payloadStack()));
454 break;
455 case OperandLocation::ValueStack:
456 masm.propagateOOM(freeValueSlots_.append(loc.valueStack()));
457 break;
458 case OperandLocation::Uninitialized:
459 case OperandLocation::BaselineFrame:
460 case OperandLocation::Constant:
461 case OperandLocation::DoubleReg:
462 break;
464 loc.setUninitialized();
468 void CacheRegisterAllocator::discardStack(MacroAssembler& masm) {
469 // This should only be called when we are no longer using the operands,
470 // as we're discarding everything from the native stack. Set all operand
471 // locations to Uninitialized to catch bugs.
472 for (size_t i = 0; i < operandLocations_.length(); i++) {
473 operandLocations_[i].setUninitialized();
476 if (stackPushed_ > 0) {
477 masm.addToStackPtr(Imm32(stackPushed_));
478 stackPushed_ = 0;
480 freePayloadSlots_.clear();
481 freeValueSlots_.clear();
484 Register CacheRegisterAllocator::allocateRegister(MacroAssembler& masm) {
485 MOZ_ASSERT(!addedFailurePath_);
486 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
488 if (availableRegs_.empty()) {
489 freeDeadOperandLocations(masm);
492 if (availableRegs_.empty()) {
493 // Still no registers available, try to spill unused operands to
494 // the stack.
495 for (size_t i = 0; i < operandLocations_.length(); i++) {
496 OperandLocation& loc = operandLocations_[i];
497 if (loc.kind() == OperandLocation::PayloadReg) {
498 Register reg = loc.payloadReg();
499 if (currentOpRegs_.has(reg)) {
500 continue;
503 spillOperandToStack(masm, &loc);
504 availableRegs_.add(reg);
505 break; // We got a register, so break out of the loop.
507 if (loc.kind() == OperandLocation::ValueReg) {
508 ValueOperand reg = loc.valueReg();
509 if (currentOpRegs_.aliases(reg)) {
510 continue;
513 spillOperandToStack(masm, &loc);
514 availableRegs_.add(reg);
515 break; // Break out of the loop.
520 if (availableRegs_.empty() && !availableRegsAfterSpill_.empty()) {
521 Register reg = availableRegsAfterSpill_.takeAny();
522 masm.push(reg);
523 stackPushed_ += sizeof(uintptr_t);
525 masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
527 availableRegs_.add(reg);
530 // At this point, there must be a free register.
531 MOZ_RELEASE_ASSERT(!availableRegs_.empty());
533 Register reg = availableRegs_.takeAny();
534 currentOpRegs_.add(reg);
535 return reg;
538 void CacheRegisterAllocator::allocateFixedRegister(MacroAssembler& masm,
539 Register reg) {
540 MOZ_ASSERT(!addedFailurePath_);
541 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
543 // Fixed registers should be allocated first, to ensure they're
544 // still available.
545 MOZ_ASSERT(!currentOpRegs_.has(reg), "Register is in use");
547 freeDeadOperandLocations(masm);
549 if (availableRegs_.has(reg)) {
550 availableRegs_.take(reg);
551 currentOpRegs_.add(reg);
552 return;
555 // Register may be available only after spilling contents.
556 if (availableRegsAfterSpill_.has(reg)) {
557 availableRegsAfterSpill_.take(reg);
558 masm.push(reg);
559 stackPushed_ += sizeof(uintptr_t);
561 masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
562 currentOpRegs_.add(reg);
563 return;
566 // The register must be used by some operand. Spill it to the stack.
567 for (size_t i = 0; i < operandLocations_.length(); i++) {
568 OperandLocation& loc = operandLocations_[i];
569 if (loc.kind() == OperandLocation::PayloadReg) {
570 if (loc.payloadReg() != reg) {
571 continue;
574 spillOperandToStackOrRegister(masm, &loc);
575 currentOpRegs_.add(reg);
576 return;
578 if (loc.kind() == OperandLocation::ValueReg) {
579 if (!loc.valueReg().aliases(reg)) {
580 continue;
583 ValueOperand valueReg = loc.valueReg();
584 spillOperandToStackOrRegister(masm, &loc);
586 availableRegs_.add(valueReg);
587 availableRegs_.take(reg);
588 currentOpRegs_.add(reg);
589 return;
593 MOZ_CRASH("Invalid register");
596 void CacheRegisterAllocator::allocateFixedValueRegister(MacroAssembler& masm,
597 ValueOperand reg) {
598 #ifdef JS_NUNBOX32
599 allocateFixedRegister(masm, reg.payloadReg());
600 allocateFixedRegister(masm, reg.typeReg());
601 #else
602 allocateFixedRegister(masm, reg.valueReg());
603 #endif
606 #ifdef JS_NUNBOX32
607 // Possible miscompilation in clang-12 (bug 1689641)
608 MOZ_NEVER_INLINE
609 #endif
610 ValueOperand CacheRegisterAllocator::allocateValueRegister(
611 MacroAssembler& masm) {
612 #ifdef JS_NUNBOX32
613 Register reg1 = allocateRegister(masm);
614 Register reg2 = allocateRegister(masm);
615 return ValueOperand(reg1, reg2);
616 #else
617 Register reg = allocateRegister(masm);
618 return ValueOperand(reg);
619 #endif
622 bool CacheRegisterAllocator::init() {
623 if (!origInputLocations_.resize(writer_.numInputOperands())) {
624 return false;
626 if (!operandLocations_.resize(writer_.numOperandIds())) {
627 return false;
629 return true;
632 void CacheRegisterAllocator::initAvailableRegsAfterSpill() {
633 // Registers not in availableRegs_ and not used by input operands are
634 // available after being spilled.
635 availableRegsAfterSpill_.set() = GeneralRegisterSet::Intersect(
636 GeneralRegisterSet::Not(availableRegs_.set()),
637 GeneralRegisterSet::Not(inputRegisterSet()));
640 void CacheRegisterAllocator::fixupAliasedInputs(MacroAssembler& masm) {
641 // If IC inputs alias each other, make sure they are stored in different
642 // locations so we don't have to deal with this complexity in the rest of
643 // the allocator.
645 // Note that this can happen in IonMonkey with something like |o.foo = o|
646 // or |o[i] = i|.
648 size_t numInputs = writer_.numInputOperands();
649 MOZ_ASSERT(origInputLocations_.length() == numInputs);
651 for (size_t i = 1; i < numInputs; i++) {
652 OperandLocation& loc1 = operandLocations_[i];
653 if (!loc1.isInRegister()) {
654 continue;
657 for (size_t j = 0; j < i; j++) {
658 OperandLocation& loc2 = operandLocations_[j];
659 if (!loc1.aliasesReg(loc2)) {
660 continue;
663 // loc1 and loc2 alias so we spill one of them. If one is a
664 // ValueReg and the other is a PayloadReg, we have to spill the
665 // PayloadReg: spilling the ValueReg instead would leave its type
666 // register unallocated on 32-bit platforms.
667 if (loc1.kind() == OperandLocation::ValueReg) {
668 spillOperandToStack(masm, &loc2);
669 } else {
670 MOZ_ASSERT(loc1.kind() == OperandLocation::PayloadReg);
671 spillOperandToStack(masm, &loc1);
672 break; // Spilled loc1, so nothing else will alias it.
677 #ifdef DEBUG
678 assertValidState();
679 #endif
682 GeneralRegisterSet CacheRegisterAllocator::inputRegisterSet() const {
683 MOZ_ASSERT(origInputLocations_.length() == writer_.numInputOperands());
685 AllocatableGeneralRegisterSet result;
686 for (size_t i = 0; i < writer_.numInputOperands(); i++) {
687 const OperandLocation& loc = operandLocations_[i];
688 MOZ_ASSERT(loc == origInputLocations_[i]);
690 switch (loc.kind()) {
691 case OperandLocation::PayloadReg:
692 result.addUnchecked(loc.payloadReg());
693 continue;
694 case OperandLocation::ValueReg:
695 result.addUnchecked(loc.valueReg());
696 continue;
697 case OperandLocation::PayloadStack:
698 case OperandLocation::ValueStack:
699 case OperandLocation::BaselineFrame:
700 case OperandLocation::Constant:
701 case OperandLocation::DoubleReg:
702 continue;
703 case OperandLocation::Uninitialized:
704 break;
706 MOZ_CRASH("Invalid kind");
709 return result.set();
712 JSValueType CacheRegisterAllocator::knownType(ValOperandId val) const {
713 const OperandLocation& loc = operandLocations_[val.id()];
715 switch (loc.kind()) {
716 case OperandLocation::ValueReg:
717 case OperandLocation::ValueStack:
718 case OperandLocation::BaselineFrame:
719 return JSVAL_TYPE_UNKNOWN;
721 case OperandLocation::PayloadStack:
722 case OperandLocation::PayloadReg:
723 return loc.payloadType();
725 case OperandLocation::Constant:
726 return loc.constant().isDouble() ? JSVAL_TYPE_DOUBLE
727 : loc.constant().extractNonDoubleType();
729 case OperandLocation::DoubleReg:
730 return JSVAL_TYPE_DOUBLE;
732 case OperandLocation::Uninitialized:
733 break;
736 MOZ_CRASH("Invalid kind");
739 void CacheRegisterAllocator::initInputLocation(
740 size_t i, const TypedOrValueRegister& reg) {
741 if (reg.hasValue()) {
742 initInputLocation(i, reg.valueReg());
743 } else if (reg.typedReg().isFloat()) {
744 MOZ_ASSERT(reg.type() == MIRType::Double);
745 initInputLocation(i, reg.typedReg().fpu());
746 } else {
747 initInputLocation(i, reg.typedReg().gpr(),
748 ValueTypeFromMIRType(reg.type()));
752 void CacheRegisterAllocator::initInputLocation(
753 size_t i, const ConstantOrRegister& value) {
754 if (value.constant()) {
755 initInputLocation(i, value.value());
756 } else {
757 initInputLocation(i, value.reg());
761 void CacheRegisterAllocator::spillOperandToStack(MacroAssembler& masm,
762 OperandLocation* loc) {
763 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
765 if (loc->kind() == OperandLocation::ValueReg) {
766 if (!freeValueSlots_.empty()) {
767 uint32_t stackPos = freeValueSlots_.popCopy();
768 MOZ_ASSERT(stackPos <= stackPushed_);
769 masm.storeValue(loc->valueReg(),
770 Address(masm.getStackPointer(), stackPushed_ - stackPos));
771 loc->setValueStack(stackPos);
772 return;
774 stackPushed_ += sizeof(js::Value);
775 masm.pushValue(loc->valueReg());
776 loc->setValueStack(stackPushed_);
777 return;
780 MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
782 if (!freePayloadSlots_.empty()) {
783 uint32_t stackPos = freePayloadSlots_.popCopy();
784 MOZ_ASSERT(stackPos <= stackPushed_);
785 masm.storePtr(loc->payloadReg(),
786 Address(masm.getStackPointer(), stackPushed_ - stackPos));
787 loc->setPayloadStack(stackPos, loc->payloadType());
788 return;
790 stackPushed_ += sizeof(uintptr_t);
791 masm.push(loc->payloadReg());
792 loc->setPayloadStack(stackPushed_, loc->payloadType());
795 void CacheRegisterAllocator::spillOperandToStackOrRegister(
796 MacroAssembler& masm, OperandLocation* loc) {
797 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
799 // If enough registers are available, use them.
800 if (loc->kind() == OperandLocation::ValueReg) {
801 static const size_t BoxPieces = sizeof(Value) / sizeof(uintptr_t);
802 if (availableRegs_.set().size() >= BoxPieces) {
803 ValueOperand reg = availableRegs_.takeAnyValue();
804 masm.moveValue(loc->valueReg(), reg);
805 loc->setValueReg(reg);
806 return;
808 } else {
809 MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
810 if (!availableRegs_.empty()) {
811 Register reg = availableRegs_.takeAny();
812 masm.movePtr(loc->payloadReg(), reg);
813 loc->setPayloadReg(reg, loc->payloadType());
814 return;
818 // Not enough registers available, spill to the stack.
819 spillOperandToStack(masm, loc);
822 void CacheRegisterAllocator::popPayload(MacroAssembler& masm,
823 OperandLocation* loc, Register dest) {
824 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
825 MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
827 // The payload is on the stack. If it's on top of the stack we can just
828 // pop it, else we emit a load.
829 if (loc->payloadStack() == stackPushed_) {
830 masm.pop(dest);
831 stackPushed_ -= sizeof(uintptr_t);
832 } else {
833 MOZ_ASSERT(loc->payloadStack() < stackPushed_);
834 masm.loadPtr(payloadAddress(masm, loc), dest);
835 masm.propagateOOM(freePayloadSlots_.append(loc->payloadStack()));
838 loc->setPayloadReg(dest, loc->payloadType());
841 Address CacheRegisterAllocator::valueAddress(MacroAssembler& masm,
842 const OperandLocation* loc) const {
843 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
844 return Address(masm.getStackPointer(), stackPushed_ - loc->valueStack());
847 Address CacheRegisterAllocator::payloadAddress(
848 MacroAssembler& masm, const OperandLocation* loc) const {
849 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
850 return Address(masm.getStackPointer(), stackPushed_ - loc->payloadStack());
853 void CacheRegisterAllocator::popValue(MacroAssembler& masm,
854 OperandLocation* loc, ValueOperand dest) {
855 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
856 MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
858 // The Value is on the stack. If it's on top of the stack we can just
859 // pop it, else we emit a load.
860 if (loc->valueStack() == stackPushed_) {
861 masm.popValue(dest);
862 stackPushed_ -= sizeof(js::Value);
863 } else {
864 MOZ_ASSERT(loc->valueStack() < stackPushed_);
865 masm.loadValue(
866 Address(masm.getStackPointer(), stackPushed_ - loc->valueStack()),
867 dest);
868 masm.propagateOOM(freeValueSlots_.append(loc->valueStack()));
871 loc->setValueReg(dest);
874 #ifdef DEBUG
875 void CacheRegisterAllocator::assertValidState() const {
876 // Assert different operands don't have aliasing storage. We depend on this
877 // when spilling registers, for instance.
879 if (!JitOptions.fullDebugChecks) {
880 return;
883 for (size_t i = 0; i < operandLocations_.length(); i++) {
884 const auto& loc1 = operandLocations_[i];
885 if (loc1.isUninitialized()) {
886 continue;
889 for (size_t j = 0; j < i; j++) {
890 const auto& loc2 = operandLocations_[j];
891 if (loc2.isUninitialized()) {
892 continue;
894 MOZ_ASSERT(!loc1.aliasesReg(loc2));
898 #endif
900 bool OperandLocation::aliasesReg(const OperandLocation& other) const {
901 MOZ_ASSERT(&other != this);
903 switch (other.kind_) {
904 case PayloadReg:
905 return aliasesReg(other.payloadReg());
906 case ValueReg:
907 return aliasesReg(other.valueReg());
908 case PayloadStack:
909 case ValueStack:
910 case BaselineFrame:
911 case Constant:
912 case DoubleReg:
913 return false;
914 case Uninitialized:
915 break;
918 MOZ_CRASH("Invalid kind");
921 void CacheRegisterAllocator::restoreInputState(MacroAssembler& masm,
922 bool shouldDiscardStack) {
923 size_t numInputOperands = origInputLocations_.length();
924 MOZ_ASSERT(writer_.numInputOperands() == numInputOperands);
926 for (size_t j = 0; j < numInputOperands; j++) {
927 const OperandLocation& dest = origInputLocations_[j];
928 OperandLocation& cur = operandLocations_[j];
929 if (dest == cur) {
930 continue;
933 auto autoAssign = mozilla::MakeScopeExit([&] { cur = dest; });
935 // We have a cycle if a destination register will be used later
936 // as source register. If that happens, just push the current value
937 // on the stack and later get it from there.
938 for (size_t k = j + 1; k < numInputOperands; k++) {
939 OperandLocation& laterSource = operandLocations_[k];
940 if (dest.aliasesReg(laterSource)) {
941 spillOperandToStack(masm, &laterSource);
945 if (dest.kind() == OperandLocation::ValueReg) {
946 // We have to restore a Value register.
947 switch (cur.kind()) {
948 case OperandLocation::ValueReg:
949 masm.moveValue(cur.valueReg(), dest.valueReg());
950 continue;
951 case OperandLocation::PayloadReg:
952 masm.tagValue(cur.payloadType(), cur.payloadReg(), dest.valueReg());
953 continue;
954 case OperandLocation::PayloadStack: {
955 Register scratch = dest.valueReg().scratchReg();
956 popPayload(masm, &cur, scratch);
957 masm.tagValue(cur.payloadType(), scratch, dest.valueReg());
958 continue;
960 case OperandLocation::ValueStack:
961 popValue(masm, &cur, dest.valueReg());
962 continue;
963 case OperandLocation::DoubleReg:
964 masm.boxDouble(cur.doubleReg(), dest.valueReg(), cur.doubleReg());
965 continue;
966 case OperandLocation::Constant:
967 case OperandLocation::BaselineFrame:
968 case OperandLocation::Uninitialized:
969 break;
971 } else if (dest.kind() == OperandLocation::PayloadReg) {
972 // We have to restore a payload register.
973 switch (cur.kind()) {
974 case OperandLocation::ValueReg:
975 MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
976 masm.unboxNonDouble(cur.valueReg(), dest.payloadReg(),
977 dest.payloadType());
978 continue;
979 case OperandLocation::PayloadReg:
980 MOZ_ASSERT(cur.payloadType() == dest.payloadType());
981 masm.mov(cur.payloadReg(), dest.payloadReg());
982 continue;
983 case OperandLocation::PayloadStack: {
984 MOZ_ASSERT(cur.payloadType() == dest.payloadType());
985 popPayload(masm, &cur, dest.payloadReg());
986 continue;
988 case OperandLocation::ValueStack:
989 MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
990 MOZ_ASSERT(cur.valueStack() <= stackPushed_);
991 MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
992 masm.unboxNonDouble(
993 Address(masm.getStackPointer(), stackPushed_ - cur.valueStack()),
994 dest.payloadReg(), dest.payloadType());
995 continue;
996 case OperandLocation::Constant:
997 case OperandLocation::BaselineFrame:
998 case OperandLocation::DoubleReg:
999 case OperandLocation::Uninitialized:
1000 break;
1002 } else if (dest.kind() == OperandLocation::Constant ||
1003 dest.kind() == OperandLocation::BaselineFrame ||
1004 dest.kind() == OperandLocation::DoubleReg) {
1005 // Nothing to do.
1006 continue;
1009 MOZ_CRASH("Invalid kind");
1012 for (const SpilledRegister& spill : spilledRegs_) {
1013 MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
1015 if (spill.stackPushed == stackPushed_) {
1016 masm.pop(spill.reg);
1017 stackPushed_ -= sizeof(uintptr_t);
1018 } else {
1019 MOZ_ASSERT(spill.stackPushed < stackPushed_);
1020 masm.loadPtr(
1021 Address(masm.getStackPointer(), stackPushed_ - spill.stackPushed),
1022 spill.reg);
1026 if (shouldDiscardStack) {
1027 discardStack(masm);
1031 size_t CacheIRStubInfo::stubDataSize() const {
1032 size_t field = 0;
1033 size_t size = 0;
1034 while (true) {
1035 StubField::Type type = fieldType(field++);
1036 if (type == StubField::Type::Limit) {
1037 return size;
1039 size += StubField::sizeInBytes(type);
1043 template <typename T>
1044 static GCPtr<T>* AsGCPtr(void* ptr) {
1045 return static_cast<GCPtr<T>*>(ptr);
1048 void CacheIRStubInfo::replaceStubRawWord(uint8_t* stubData, uint32_t offset,
1049 uintptr_t oldWord,
1050 uintptr_t newWord) const {
1051 MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
1052 uintptr_t* addr = reinterpret_cast<uintptr_t*>(stubData + offset);
1053 MOZ_ASSERT(*addr == oldWord);
1054 *addr = newWord;
1057 template <class Stub, StubField::Type type>
1058 typename MapStubFieldToType<type>::WrappedType& CacheIRStubInfo::getStubField(
1059 Stub* stub, uint32_t offset) const {
1060 uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
1061 MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
1063 using WrappedType = typename MapStubFieldToType<type>::WrappedType;
1064 return *reinterpret_cast<WrappedType*>(stubData + offset);
1067 #define INSTANTIATE_GET_STUB_FIELD(Type) \
1068 template typename MapStubFieldToType<Type>::WrappedType& \
1069 CacheIRStubInfo::getStubField<ICCacheIRStub, Type>(ICCacheIRStub * stub, \
1070 uint32_t offset) const;
1071 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Shape)
1072 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakShape)
1073 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakGetterSetter)
1074 INSTANTIATE_GET_STUB_FIELD(StubField::Type::JSObject)
1075 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakObject)
1076 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Symbol)
1077 INSTANTIATE_GET_STUB_FIELD(StubField::Type::String)
1078 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakBaseScript)
1079 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Value)
1080 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Id)
1081 #undef INSTANTIATE_GET_STUB_FIELD
1083 template <class Stub, class T>
1084 T* CacheIRStubInfo::getPtrStubField(Stub* stub, uint32_t offset) const {
1085 uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
1086 MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
1088 return *reinterpret_cast<T**>(stubData + offset);
1091 template gc::AllocSite* CacheIRStubInfo::getPtrStubField(ICCacheIRStub* stub,
1092 uint32_t offset) const;
1094 template <StubField::Type type, typename V>
1095 static void InitWrappedPtr(void* ptr, V val) {
1096 using RawType = typename MapStubFieldToType<type>::RawType;
1097 using WrappedType = typename MapStubFieldToType<type>::WrappedType;
1098 auto* wrapped = static_cast<WrappedType*>(ptr);
1099 new (wrapped) WrappedType(mozilla::BitwiseCast<RawType>(val));
1102 static void InitWordStubField(StubField::Type type, void* dest,
1103 uintptr_t value) {
1104 MOZ_ASSERT(StubField::sizeIsWord(type));
1105 MOZ_ASSERT((uintptr_t(dest) % sizeof(uintptr_t)) == 0,
1106 "Unaligned stub field");
1108 switch (type) {
1109 case StubField::Type::RawInt32:
1110 case StubField::Type::RawPointer:
1111 case StubField::Type::AllocSite:
1112 *static_cast<uintptr_t*>(dest) = value;
1113 break;
1114 case StubField::Type::Shape:
1115 InitWrappedPtr<StubField::Type::Shape>(dest, value);
1116 break;
1117 case StubField::Type::WeakShape:
1118 // No read barrier required to copy weak pointer.
1119 InitWrappedPtr<StubField::Type::WeakShape>(dest, value);
1120 break;
1121 case StubField::Type::WeakGetterSetter:
1122 // No read barrier required to copy weak pointer.
1123 InitWrappedPtr<StubField::Type::WeakGetterSetter>(dest, value);
1124 break;
1125 case StubField::Type::JSObject:
1126 InitWrappedPtr<StubField::Type::JSObject>(dest, value);
1127 break;
1128 case StubField::Type::WeakObject:
1129 // No read barrier required to copy weak pointer.
1130 InitWrappedPtr<StubField::Type::WeakObject>(dest, value);
1131 break;
1132 case StubField::Type::Symbol:
1133 InitWrappedPtr<StubField::Type::Symbol>(dest, value);
1134 break;
1135 case StubField::Type::String:
1136 InitWrappedPtr<StubField::Type::String>(dest, value);
1137 break;
1138 case StubField::Type::WeakBaseScript:
1139 // No read barrier required to copy weak pointer.
1140 InitWrappedPtr<StubField::Type::WeakBaseScript>(dest, value);
1141 break;
1142 case StubField::Type::JitCode:
1143 InitWrappedPtr<StubField::Type::JitCode>(dest, value);
1144 break;
1145 case StubField::Type::Id:
1146 AsGCPtr<jsid>(dest)->init(jsid::fromRawBits(value));
1147 break;
1148 case StubField::Type::RawInt64:
1149 case StubField::Type::Double:
1150 case StubField::Type::Value:
1151 case StubField::Type::Limit:
1152 MOZ_CRASH("Invalid type");
1156 static void InitInt64StubField(StubField::Type type, void* dest,
1157 uint64_t value) {
1158 MOZ_ASSERT(StubField::sizeIsInt64(type));
1159 MOZ_ASSERT((uintptr_t(dest) % sizeof(uint64_t)) == 0, "Unaligned stub field");
1161 switch (type) {
1162 case StubField::Type::RawInt64:
1163 case StubField::Type::Double:
1164 *static_cast<uint64_t*>(dest) = value;
1165 break;
1166 case StubField::Type::Value:
1167 AsGCPtr<Value>(dest)->init(Value::fromRawBits(value));
1168 break;
1169 case StubField::Type::RawInt32:
1170 case StubField::Type::RawPointer:
1171 case StubField::Type::AllocSite:
1172 case StubField::Type::Shape:
1173 case StubField::Type::WeakShape:
1174 case StubField::Type::WeakGetterSetter:
1175 case StubField::Type::JSObject:
1176 case StubField::Type::WeakObject:
1177 case StubField::Type::Symbol:
1178 case StubField::Type::String:
1179 case StubField::Type::WeakBaseScript:
1180 case StubField::Type::JitCode:
1181 case StubField::Type::Id:
1182 case StubField::Type::Limit:
1183 MOZ_CRASH("Invalid type");
1187 void CacheIRWriter::copyStubData(uint8_t* dest) const {
1188 MOZ_ASSERT(!failed());
1190 for (const StubField& field : stubFields_) {
1191 if (field.sizeIsWord()) {
1192 InitWordStubField(field.type(), dest, field.asWord());
1193 dest += sizeof(uintptr_t);
1194 } else {
1195 InitInt64StubField(field.type(), dest, field.asInt64());
1196 dest += sizeof(uint64_t);
1201 ICCacheIRStub* ICCacheIRStub::clone(JSRuntime* rt, ICStubSpace& newSpace) {
1202 const CacheIRStubInfo* info = stubInfo();
1203 MOZ_ASSERT(info->makesGCCalls());
1205 size_t bytesNeeded = info->stubDataOffset() + info->stubDataSize();
1207 AutoEnterOOMUnsafeRegion oomUnsafe;
1208 void* newStubMem = newSpace.alloc(bytesNeeded);
1209 if (!newStubMem) {
1210 oomUnsafe.crash("ICCacheIRStub::clone");
1213 ICCacheIRStub* newStub = new (newStubMem) ICCacheIRStub(*this);
1215 const uint8_t* src = this->stubDataStart();
1216 uint8_t* dest = newStub->stubDataStart();
1218 // Because this can be called during sweeping when discarding JIT code, we
1219 // have to lock the store buffer
1220 gc::AutoLockStoreBuffer lock(rt);
1222 uint32_t field = 0;
1223 while (true) {
1224 StubField::Type type = info->fieldType(field);
1225 if (type == StubField::Type::Limit) {
1226 break; // Done.
1229 if (StubField::sizeIsWord(type)) {
1230 const uintptr_t* srcField = reinterpret_cast<const uintptr_t*>(src);
1231 InitWordStubField(type, dest, *srcField);
1232 src += sizeof(uintptr_t);
1233 dest += sizeof(uintptr_t);
1234 } else {
1235 const uint64_t* srcField = reinterpret_cast<const uint64_t*>(src);
1236 InitInt64StubField(type, dest, *srcField);
1237 src += sizeof(uint64_t);
1238 dest += sizeof(uint64_t);
1241 field++;
1244 return newStub;
1247 template <typename T>
1248 static inline bool ShouldTraceWeakEdgeInStub(JSTracer* trc) {
1249 if constexpr (std::is_same_v<T, IonICStub>) {
1250 // 'Weak' edges are traced strongly in IonICs.
1251 return true;
1252 } else {
1253 static_assert(std::is_same_v<T, ICCacheIRStub>);
1254 return trc->traceWeakEdges();
1258 template <typename T>
1259 void jit::TraceCacheIRStub(JSTracer* trc, T* stub,
1260 const CacheIRStubInfo* stubInfo) {
1261 using Type = StubField::Type;
1263 uint32_t field = 0;
1264 size_t offset = 0;
1265 while (true) {
1266 Type fieldType = stubInfo->fieldType(field);
1267 switch (fieldType) {
1268 case Type::RawInt32:
1269 case Type::RawPointer:
1270 case Type::RawInt64:
1271 case Type::Double:
1272 break;
1273 case Type::Shape: {
1274 // For CCW IC stubs, we can store same-zone but cross-compartment
1275 // shapes. Use TraceSameZoneCrossCompartmentEdge to not assert in the
1276 // GC. Note: CacheIRWriter::writeShapeField asserts we never store
1277 // cross-zone shapes.
1278 GCPtr<Shape*>& shapeField =
1279 stubInfo->getStubField<T, Type::Shape>(stub, offset);
1280 TraceSameZoneCrossCompartmentEdge(trc, &shapeField, "cacheir-shape");
1281 break;
1283 case Type::WeakShape:
1284 if (ShouldTraceWeakEdgeInStub<T>(trc)) {
1285 WeakHeapPtr<Shape*>& shapeField =
1286 stubInfo->getStubField<T, Type::WeakShape>(stub, offset);
1287 if (shapeField) {
1288 TraceSameZoneCrossCompartmentEdge(trc, &shapeField,
1289 "cacheir-weak-shape");
1292 break;
1293 case Type::WeakGetterSetter:
1294 if (ShouldTraceWeakEdgeInStub<T>(trc)) {
1295 TraceNullableEdge(
1296 trc,
1297 &stubInfo->getStubField<T, Type::WeakGetterSetter>(stub, offset),
1298 "cacheir-weak-getter-setter");
1300 break;
1301 case Type::JSObject: {
1302 TraceEdge(trc, &stubInfo->getStubField<T, Type::JSObject>(stub, offset),
1303 "cacheir-object");
1304 break;
1306 case Type::WeakObject:
1307 if (ShouldTraceWeakEdgeInStub<T>(trc)) {
1308 TraceNullableEdge(
1309 trc, &stubInfo->getStubField<T, Type::WeakObject>(stub, offset),
1310 "cacheir-weak-object");
1312 break;
1313 case Type::Symbol:
1314 TraceEdge(trc, &stubInfo->getStubField<T, Type::Symbol>(stub, offset),
1315 "cacheir-symbol");
1316 break;
1317 case Type::String:
1318 TraceEdge(trc, &stubInfo->getStubField<T, Type::String>(stub, offset),
1319 "cacheir-string");
1320 break;
1321 case Type::WeakBaseScript:
1322 if (ShouldTraceWeakEdgeInStub<T>(trc)) {
1323 TraceNullableEdge(
1324 trc,
1325 &stubInfo->getStubField<T, Type::WeakBaseScript>(stub, offset),
1326 "cacheir-weak-script");
1328 break;
1329 case Type::JitCode:
1330 TraceEdge(trc, &stubInfo->getStubField<T, Type::JitCode>(stub, offset),
1331 "cacheir-jitcode");
1332 break;
1333 case Type::Id:
1334 TraceEdge(trc, &stubInfo->getStubField<T, Type::Id>(stub, offset),
1335 "cacheir-id");
1336 break;
1337 case Type::Value:
1338 TraceEdge(trc, &stubInfo->getStubField<T, Type::Value>(stub, offset),
1339 "cacheir-value");
1340 break;
1341 case Type::AllocSite: {
1342 gc::AllocSite* site =
1343 stubInfo->getPtrStubField<T, gc::AllocSite>(stub, offset);
1344 site->trace(trc);
1345 break;
1347 case Type::Limit:
1348 return; // Done.
1350 field++;
1351 offset += StubField::sizeInBytes(fieldType);
1355 template void jit::TraceCacheIRStub(JSTracer* trc, ICCacheIRStub* stub,
1356 const CacheIRStubInfo* stubInfo);
1358 template void jit::TraceCacheIRStub(JSTracer* trc, IonICStub* stub,
1359 const CacheIRStubInfo* stubInfo);
1361 template <typename T>
1362 bool jit::TraceWeakCacheIRStub(JSTracer* trc, T* stub,
1363 const CacheIRStubInfo* stubInfo) {
1364 using Type = StubField::Type;
1366 uint32_t field = 0;
1367 size_t offset = 0;
1368 while (true) {
1369 Type fieldType = stubInfo->fieldType(field);
1370 switch (fieldType) {
1371 case Type::WeakShape: {
1372 WeakHeapPtr<Shape*>& shapeField =
1373 stubInfo->getStubField<T, Type::WeakShape>(stub, offset);
1374 auto r = TraceWeakEdge(trc, &shapeField, "cacheir-weak-shape");
1375 if (r.isDead()) {
1376 return false;
1378 break;
1380 case Type::WeakObject: {
1381 WeakHeapPtr<JSObject*>& objectField =
1382 stubInfo->getStubField<T, Type::WeakObject>(stub, offset);
1383 auto r = TraceWeakEdge(trc, &objectField, "cacheir-weak-object");
1384 if (r.isDead()) {
1385 return false;
1387 break;
1389 case Type::WeakBaseScript: {
1390 WeakHeapPtr<BaseScript*>& scriptField =
1391 stubInfo->getStubField<T, Type::WeakBaseScript>(stub, offset);
1392 auto r = TraceWeakEdge(trc, &scriptField, "cacheir-weak-script");
1393 if (r.isDead()) {
1394 return false;
1396 break;
1398 case Type::WeakGetterSetter: {
1399 WeakHeapPtr<GetterSetter*>& getterSetterField =
1400 stubInfo->getStubField<T, Type::WeakGetterSetter>(stub, offset);
1401 auto r = TraceWeakEdge(trc, &getterSetterField,
1402 "cacheir-weak-getter-setter");
1403 if (r.isDead()) {
1404 return false;
1406 break;
1408 case Type::Limit:
1409 return true; // Done.
1410 case Type::RawInt32:
1411 case Type::RawPointer:
1412 case Type::Shape:
1413 case Type::JSObject:
1414 case Type::Symbol:
1415 case Type::String:
1416 case Type::JitCode:
1417 case Type::Id:
1418 case Type::AllocSite:
1419 case Type::RawInt64:
1420 case Type::Value:
1421 case Type::Double:
1422 break; // Skip non-weak fields.
1424 field++;
1425 offset += StubField::sizeInBytes(fieldType);
1429 template bool jit::TraceWeakCacheIRStub(JSTracer* trc, ICCacheIRStub* stub,
1430 const CacheIRStubInfo* stubInfo);
1432 template bool jit::TraceWeakCacheIRStub(JSTracer* trc, IonICStub* stub,
1433 const CacheIRStubInfo* stubInfo);
1435 bool CacheIRWriter::stubDataEquals(const uint8_t* stubData) const {
1436 MOZ_ASSERT(!failed());
1438 const uintptr_t* stubDataWords = reinterpret_cast<const uintptr_t*>(stubData);
1440 for (const StubField& field : stubFields_) {
1441 if (field.sizeIsWord()) {
1442 if (field.asWord() != *stubDataWords) {
1443 return false;
1445 stubDataWords++;
1446 continue;
1449 if (field.asInt64() != *reinterpret_cast<const uint64_t*>(stubDataWords)) {
1450 return false;
1452 stubDataWords += sizeof(uint64_t) / sizeof(uintptr_t);
1455 return true;
1458 bool CacheIRWriter::stubDataEqualsIgnoring(const uint8_t* stubData,
1459 uint32_t ignoreOffset) const {
1460 MOZ_ASSERT(!failed());
1462 uint32_t offset = 0;
1463 for (const StubField& field : stubFields_) {
1464 if (offset != ignoreOffset) {
1465 if (field.sizeIsWord()) {
1466 uintptr_t raw = *reinterpret_cast<const uintptr_t*>(stubData + offset);
1467 if (field.asWord() != raw) {
1468 return false;
1470 } else {
1471 uint64_t raw = *reinterpret_cast<const uint64_t*>(stubData + offset);
1472 if (field.asInt64() != raw) {
1473 return false;
1477 offset += StubField::sizeInBytes(field.type());
1480 return true;
1483 HashNumber CacheIRStubKey::hash(const CacheIRStubKey::Lookup& l) {
1484 HashNumber hash = mozilla::HashBytes(l.code, l.length);
1485 hash = mozilla::AddToHash(hash, uint32_t(l.kind));
1486 hash = mozilla::AddToHash(hash, uint32_t(l.engine));
1487 return hash;
1490 bool CacheIRStubKey::match(const CacheIRStubKey& entry,
1491 const CacheIRStubKey::Lookup& l) {
1492 if (entry.stubInfo->kind() != l.kind) {
1493 return false;
1496 if (entry.stubInfo->engine() != l.engine) {
1497 return false;
1500 if (entry.stubInfo->codeLength() != l.length) {
1501 return false;
1504 if (!mozilla::ArrayEqual(entry.stubInfo->code(), l.code, l.length)) {
1505 return false;
1508 return true;
1511 CacheIRReader::CacheIRReader(const CacheIRStubInfo* stubInfo)
1512 : CacheIRReader(stubInfo->code(),
1513 stubInfo->code() + stubInfo->codeLength()) {}
1515 CacheIRStubInfo* CacheIRStubInfo::New(CacheKind kind, ICStubEngine engine,
1516 bool makesGCCalls,
1517 uint32_t stubDataOffset,
1518 const CacheIRWriter& writer) {
1519 size_t numStubFields = writer.numStubFields();
1520 size_t bytesNeeded =
1521 sizeof(CacheIRStubInfo) + writer.codeLength() +
1522 (numStubFields + 1); // +1 for the GCType::Limit terminator.
1523 uint8_t* p = js_pod_malloc<uint8_t>(bytesNeeded);
1524 if (!p) {
1525 return nullptr;
1528 // Copy the CacheIR code.
1529 uint8_t* codeStart = p + sizeof(CacheIRStubInfo);
1530 mozilla::PodCopy(codeStart, writer.codeStart(), writer.codeLength());
1532 static_assert(sizeof(StubField::Type) == sizeof(uint8_t),
1533 "StubField::Type must fit in uint8_t");
1535 // Copy the stub field types.
1536 uint8_t* fieldTypes = codeStart + writer.codeLength();
1537 for (size_t i = 0; i < numStubFields; i++) {
1538 fieldTypes[i] = uint8_t(writer.stubFieldType(i));
1540 fieldTypes[numStubFields] = uint8_t(StubField::Type::Limit);
1542 return new (p) CacheIRStubInfo(kind, engine, makesGCCalls, stubDataOffset,
1543 writer.codeLength());
1546 bool OperandLocation::operator==(const OperandLocation& other) const {
1547 if (kind_ != other.kind_) {
1548 return false;
1551 switch (kind()) {
1552 case Uninitialized:
1553 return true;
1554 case PayloadReg:
1555 return payloadReg() == other.payloadReg() &&
1556 payloadType() == other.payloadType();
1557 case ValueReg:
1558 return valueReg() == other.valueReg();
1559 case PayloadStack:
1560 return payloadStack() == other.payloadStack() &&
1561 payloadType() == other.payloadType();
1562 case ValueStack:
1563 return valueStack() == other.valueStack();
1564 case BaselineFrame:
1565 return baselineFrameSlot() == other.baselineFrameSlot();
1566 case Constant:
1567 return constant() == other.constant();
1568 case DoubleReg:
1569 return doubleReg() == other.doubleReg();
1572 MOZ_CRASH("Invalid OperandLocation kind");
1575 AutoOutputRegister::AutoOutputRegister(CacheIRCompiler& compiler)
1576 : output_(compiler.outputUnchecked_.ref()), alloc_(compiler.allocator) {
1577 if (output_.hasValue()) {
1578 alloc_.allocateFixedValueRegister(compiler.masm, output_.valueReg());
1579 } else if (!output_.typedReg().isFloat()) {
1580 alloc_.allocateFixedRegister(compiler.masm, output_.typedReg().gpr());
1584 AutoOutputRegister::~AutoOutputRegister() {
1585 if (output_.hasValue()) {
1586 alloc_.releaseValueRegister(output_.valueReg());
1587 } else if (!output_.typedReg().isFloat()) {
1588 alloc_.releaseRegister(output_.typedReg().gpr());
1592 bool FailurePath::canShareFailurePath(const FailurePath& other) const {
1593 if (stackPushed_ != other.stackPushed_) {
1594 return false;
1597 if (spilledRegs_.length() != other.spilledRegs_.length()) {
1598 return false;
1601 for (size_t i = 0; i < spilledRegs_.length(); i++) {
1602 if (spilledRegs_[i] != other.spilledRegs_[i]) {
1603 return false;
1607 MOZ_ASSERT(inputs_.length() == other.inputs_.length());
1609 for (size_t i = 0; i < inputs_.length(); i++) {
1610 if (inputs_[i] != other.inputs_[i]) {
1611 return false;
1614 return true;
1617 bool CacheIRCompiler::addFailurePath(FailurePath** failure) {
1618 #ifdef DEBUG
1619 allocator.setAddedFailurePath();
1620 #endif
1621 MOZ_ASSERT(!allocator.hasAutoScratchFloatRegisterSpill());
1623 FailurePath newFailure;
1624 for (size_t i = 0; i < writer_.numInputOperands(); i++) {
1625 if (!newFailure.appendInput(allocator.operandLocation(i))) {
1626 return false;
1629 if (!newFailure.setSpilledRegs(allocator.spilledRegs())) {
1630 return false;
1632 newFailure.setStackPushed(allocator.stackPushed());
1634 // Reuse the previous failure path if the current one is the same, to
1635 // avoid emitting duplicate code.
1636 if (failurePaths.length() > 0 &&
1637 failurePaths.back().canShareFailurePath(newFailure)) {
1638 *failure = &failurePaths.back();
1639 return true;
1642 if (!failurePaths.append(std::move(newFailure))) {
1643 return false;
1646 *failure = &failurePaths.back();
1647 return true;
1650 bool CacheIRCompiler::emitFailurePath(size_t index) {
1651 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1652 FailurePath& failure = failurePaths[index];
1654 allocator.setStackPushed(failure.stackPushed());
1656 for (size_t i = 0; i < writer_.numInputOperands(); i++) {
1657 allocator.setOperandLocation(i, failure.input(i));
1660 if (!allocator.setSpilledRegs(failure.spilledRegs())) {
1661 return false;
1664 masm.bind(failure.label());
1665 allocator.restoreInputState(masm);
1666 return true;
1669 bool CacheIRCompiler::emitGuardIsNumber(ValOperandId inputId) {
1670 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1671 JSValueType knownType = allocator.knownType(inputId);
1673 // Doubles and ints are numbers!
1674 if (knownType == JSVAL_TYPE_DOUBLE || knownType == JSVAL_TYPE_INT32) {
1675 return true;
1678 ValueOperand input = allocator.useValueRegister(masm, inputId);
1679 FailurePath* failure;
1680 if (!addFailurePath(&failure)) {
1681 return false;
1684 masm.branchTestNumber(Assembler::NotEqual, input, failure->label());
1685 return true;
1688 bool CacheIRCompiler::emitGuardToObject(ValOperandId inputId) {
1689 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1690 if (allocator.knownType(inputId) == JSVAL_TYPE_OBJECT) {
1691 return true;
1694 ValueOperand input = allocator.useValueRegister(masm, inputId);
1695 FailurePath* failure;
1696 if (!addFailurePath(&failure)) {
1697 return false;
1699 masm.branchTestObject(Assembler::NotEqual, input, failure->label());
1700 return true;
1703 bool CacheIRCompiler::emitGuardIsNullOrUndefined(ValOperandId inputId) {
1704 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1705 JSValueType knownType = allocator.knownType(inputId);
1706 if (knownType == JSVAL_TYPE_UNDEFINED || knownType == JSVAL_TYPE_NULL) {
1707 return true;
1710 ValueOperand input = allocator.useValueRegister(masm, inputId);
1711 FailurePath* failure;
1712 if (!addFailurePath(&failure)) {
1713 return false;
1716 Label success;
1717 masm.branchTestNull(Assembler::Equal, input, &success);
1718 masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
1720 masm.bind(&success);
1721 return true;
1724 bool CacheIRCompiler::emitGuardIsNull(ValOperandId inputId) {
1725 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1726 JSValueType knownType = allocator.knownType(inputId);
1727 if (knownType == JSVAL_TYPE_NULL) {
1728 return true;
1731 ValueOperand input = allocator.useValueRegister(masm, inputId);
1732 FailurePath* failure;
1733 if (!addFailurePath(&failure)) {
1734 return false;
1737 masm.branchTestNull(Assembler::NotEqual, input, failure->label());
1738 return true;
1741 bool CacheIRCompiler::emitGuardIsUndefined(ValOperandId inputId) {
1742 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1743 JSValueType knownType = allocator.knownType(inputId);
1744 if (knownType == JSVAL_TYPE_UNDEFINED) {
1745 return true;
1748 ValueOperand input = allocator.useValueRegister(masm, inputId);
1749 FailurePath* failure;
1750 if (!addFailurePath(&failure)) {
1751 return false;
1754 masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
1755 return true;
1758 bool CacheIRCompiler::emitGuardIsNotUninitializedLexical(ValOperandId valId) {
1759 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1761 ValueOperand val = allocator.useValueRegister(masm, valId);
1763 FailurePath* failure;
1764 if (!addFailurePath(&failure)) {
1765 return false;
1768 masm.branchTestMagicValue(Assembler::Equal, val, JS_UNINITIALIZED_LEXICAL,
1769 failure->label());
1770 return true;
1773 bool CacheIRCompiler::emitGuardBooleanToInt32(ValOperandId inputId,
1774 Int32OperandId resultId) {
1775 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1776 Register output = allocator.defineRegister(masm, resultId);
1778 if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
1779 Register input =
1780 allocator.useRegister(masm, BooleanOperandId(inputId.id()));
1781 masm.move32(input, output);
1782 return true;
1784 ValueOperand input = allocator.useValueRegister(masm, inputId);
1786 FailurePath* failure;
1787 if (!addFailurePath(&failure)) {
1788 return false;
1791 masm.fallibleUnboxBoolean(input, output, failure->label());
1792 return true;
1795 bool CacheIRCompiler::emitGuardToString(ValOperandId inputId) {
1796 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1797 if (allocator.knownType(inputId) == JSVAL_TYPE_STRING) {
1798 return true;
1801 ValueOperand input = allocator.useValueRegister(masm, inputId);
1802 FailurePath* failure;
1803 if (!addFailurePath(&failure)) {
1804 return false;
1806 masm.branchTestString(Assembler::NotEqual, input, failure->label());
1807 return true;
1810 bool CacheIRCompiler::emitGuardToSymbol(ValOperandId inputId) {
1811 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1812 if (allocator.knownType(inputId) == JSVAL_TYPE_SYMBOL) {
1813 return true;
1816 ValueOperand input = allocator.useValueRegister(masm, inputId);
1817 FailurePath* failure;
1818 if (!addFailurePath(&failure)) {
1819 return false;
1821 masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
1822 return true;
1825 bool CacheIRCompiler::emitGuardToBigInt(ValOperandId inputId) {
1826 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1827 if (allocator.knownType(inputId) == JSVAL_TYPE_BIGINT) {
1828 return true;
1831 ValueOperand input = allocator.useValueRegister(masm, inputId);
1832 FailurePath* failure;
1833 if (!addFailurePath(&failure)) {
1834 return false;
1836 masm.branchTestBigInt(Assembler::NotEqual, input, failure->label());
1837 return true;
1840 bool CacheIRCompiler::emitGuardToBoolean(ValOperandId inputId) {
1841 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1843 if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
1844 return true;
1847 ValueOperand input = allocator.useValueRegister(masm, inputId);
1848 FailurePath* failure;
1849 if (!addFailurePath(&failure)) {
1850 return false;
1852 masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
1853 return true;
1856 bool CacheIRCompiler::emitGuardToInt32(ValOperandId inputId) {
1857 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1859 if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
1860 return true;
1863 ValueOperand input = allocator.useValueRegister(masm, inputId);
1865 FailurePath* failure;
1866 if (!addFailurePath(&failure)) {
1867 return false;
1870 masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
1871 return true;
1874 bool CacheIRCompiler::emitGuardToNonGCThing(ValOperandId inputId) {
1875 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1877 ValueOperand input = allocator.useValueRegister(masm, inputId);
1879 FailurePath* failure;
1880 if (!addFailurePath(&failure)) {
1881 return false;
1884 masm.branchTestGCThing(Assembler::Equal, input, failure->label());
1885 return true;
1888 // Infallible |emitDouble| emitters can use this implementation to avoid
1889 // generating extra clean-up instructions to restore the scratch float register.
1890 // To select this function simply omit the |Label* fail| parameter for the
1891 // emitter lambda function.
1892 template <typename EmitDouble>
1893 static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 1,
1894 void>
1895 EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
1896 ValueOperand input, FailurePath* failure,
1897 EmitDouble emitDouble) {
1898 AutoScratchFloatRegister floatReg(compiler);
1900 masm.unboxDouble(input, floatReg);
1901 emitDouble(floatReg.get());
1904 template <typename EmitDouble>
1905 static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 2,
1906 void>
1907 EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
1908 ValueOperand input, FailurePath* failure,
1909 EmitDouble emitDouble) {
1910 AutoScratchFloatRegister floatReg(compiler, failure);
1912 masm.unboxDouble(input, floatReg);
1913 emitDouble(floatReg.get(), floatReg.failure());
1916 template <typename EmitInt32, typename EmitDouble>
1917 static void EmitGuardInt32OrDouble(CacheIRCompiler* compiler,
1918 MacroAssembler& masm, ValueOperand input,
1919 Register output, FailurePath* failure,
1920 EmitInt32 emitInt32, EmitDouble emitDouble) {
1921 Label done;
1924 ScratchTagScope tag(masm, input);
1925 masm.splitTagForTest(input, tag);
1927 Label notInt32;
1928 masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
1930 ScratchTagScopeRelease _(&tag);
1932 masm.unboxInt32(input, output);
1933 emitInt32();
1935 masm.jump(&done);
1937 masm.bind(&notInt32);
1939 masm.branchTestDouble(Assembler::NotEqual, tag, failure->label());
1941 ScratchTagScopeRelease _(&tag);
1943 EmitGuardDouble(compiler, masm, input, failure, emitDouble);
1947 masm.bind(&done);
1950 bool CacheIRCompiler::emitGuardToInt32Index(ValOperandId inputId,
1951 Int32OperandId resultId) {
1952 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1953 Register output = allocator.defineRegister(masm, resultId);
1955 if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
1956 Register input = allocator.useRegister(masm, Int32OperandId(inputId.id()));
1957 masm.move32(input, output);
1958 return true;
1961 ValueOperand input = allocator.useValueRegister(masm, inputId);
1963 FailurePath* failure;
1964 if (!addFailurePath(&failure)) {
1965 return false;
1968 EmitGuardInt32OrDouble(
1969 this, masm, input, output, failure,
1970 []() {
1971 // No-op if the value is already an int32.
1973 [&](FloatRegister floatReg, Label* fail) {
1974 // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
1975 masm.convertDoubleToInt32(floatReg, output, fail, false);
1978 return true;
1981 bool CacheIRCompiler::emitInt32ToIntPtr(Int32OperandId inputId,
1982 IntPtrOperandId resultId) {
1983 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1985 Register input = allocator.useRegister(masm, inputId);
1986 Register output = allocator.defineRegister(masm, resultId);
1988 masm.move32SignExtendToPtr(input, output);
1989 return true;
1992 bool CacheIRCompiler::emitGuardNumberToIntPtrIndex(NumberOperandId inputId,
1993 bool supportOOB,
1994 IntPtrOperandId resultId) {
1995 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1997 Register output = allocator.defineRegister(masm, resultId);
1999 FailurePath* failure = nullptr;
2000 if (!supportOOB) {
2001 if (!addFailurePath(&failure)) {
2002 return false;
2006 AutoScratchFloatRegister floatReg(this, failure);
2007 allocator.ensureDoubleRegister(masm, inputId, floatReg);
2009 // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
2010 if (supportOOB) {
2011 Label done, fail;
2012 masm.convertDoubleToPtr(floatReg, output, &fail, false);
2013 masm.jump(&done);
2015 // Substitute the invalid index with an arbitrary out-of-bounds index.
2016 masm.bind(&fail);
2017 masm.movePtr(ImmWord(-1), output);
2019 masm.bind(&done);
2020 } else {
2021 masm.convertDoubleToPtr(floatReg, output, floatReg.failure(), false);
2024 return true;
2027 bool CacheIRCompiler::emitGuardToInt32ModUint32(ValOperandId inputId,
2028 Int32OperandId resultId) {
2029 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2030 Register output = allocator.defineRegister(masm, resultId);
2032 if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
2033 ConstantOrRegister input = allocator.useConstantOrRegister(masm, inputId);
2034 if (input.constant()) {
2035 masm.move32(Imm32(input.value().toInt32()), output);
2036 } else {
2037 MOZ_ASSERT(input.reg().type() == MIRType::Int32);
2038 masm.move32(input.reg().typedReg().gpr(), output);
2040 return true;
2043 ValueOperand input = allocator.useValueRegister(masm, inputId);
2045 FailurePath* failure;
2046 if (!addFailurePath(&failure)) {
2047 return false;
2050 EmitGuardInt32OrDouble(
2051 this, masm, input, output, failure,
2052 []() {
2053 // No-op if the value is already an int32.
2055 [&](FloatRegister floatReg, Label* fail) {
2056 masm.branchTruncateDoubleMaybeModUint32(floatReg, output, fail);
2059 return true;
2062 bool CacheIRCompiler::emitGuardToUint8Clamped(ValOperandId inputId,
2063 Int32OperandId resultId) {
2064 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2065 Register output = allocator.defineRegister(masm, resultId);
2067 if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
2068 ConstantOrRegister input = allocator.useConstantOrRegister(masm, inputId);
2069 if (input.constant()) {
2070 masm.move32(Imm32(ClampDoubleToUint8(input.value().toInt32())), output);
2071 } else {
2072 MOZ_ASSERT(input.reg().type() == MIRType::Int32);
2073 masm.move32(input.reg().typedReg().gpr(), output);
2074 masm.clampIntToUint8(output);
2076 return true;
2079 ValueOperand input = allocator.useValueRegister(masm, inputId);
2081 FailurePath* failure;
2082 if (!addFailurePath(&failure)) {
2083 return false;
2086 EmitGuardInt32OrDouble(
2087 this, masm, input, output, failure,
2088 [&]() {
2089 // |output| holds the unboxed int32 value.
2090 masm.clampIntToUint8(output);
2092 [&](FloatRegister floatReg) {
2093 masm.clampDoubleToUint8(floatReg, output);
2096 return true;
2099 bool CacheIRCompiler::emitGuardNonDoubleType(ValOperandId inputId,
2100 ValueType type) {
2101 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2103 if (allocator.knownType(inputId) == JSValueType(type)) {
2104 return true;
2107 ValueOperand input = allocator.useValueRegister(masm, inputId);
2109 FailurePath* failure;
2110 if (!addFailurePath(&failure)) {
2111 return false;
2114 switch (type) {
2115 case ValueType::String:
2116 masm.branchTestString(Assembler::NotEqual, input, failure->label());
2117 break;
2118 case ValueType::Symbol:
2119 masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
2120 break;
2121 case ValueType::BigInt:
2122 masm.branchTestBigInt(Assembler::NotEqual, input, failure->label());
2123 break;
2124 case ValueType::Int32:
2125 masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
2126 break;
2127 case ValueType::Boolean:
2128 masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
2129 break;
2130 case ValueType::Undefined:
2131 masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
2132 break;
2133 case ValueType::Null:
2134 masm.branchTestNull(Assembler::NotEqual, input, failure->label());
2135 break;
2136 case ValueType::Double:
2137 case ValueType::Magic:
2138 case ValueType::PrivateGCThing:
2139 case ValueType::Object:
2140 #ifdef ENABLE_RECORD_TUPLE
2141 case ValueType::ExtendedPrimitive:
2142 #endif
2143 MOZ_CRASH("unexpected type");
2146 return true;
2149 bool CacheIRCompiler::emitGuardClass(ObjOperandId objId, GuardClassKind kind) {
2150 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2151 Register obj = allocator.useRegister(masm, objId);
2152 AutoScratchRegister scratch(allocator, masm);
2154 FailurePath* failure;
2155 if (!addFailurePath(&failure)) {
2156 return false;
2159 if (kind == GuardClassKind::JSFunction) {
2160 if (objectGuardNeedsSpectreMitigations(objId)) {
2161 masm.branchTestObjIsFunction(Assembler::NotEqual, obj, scratch, obj,
2162 failure->label());
2163 } else {
2164 masm.branchTestObjIsFunctionNoSpectreMitigations(
2165 Assembler::NotEqual, obj, scratch, failure->label());
2167 return true;
2170 const JSClass* clasp = nullptr;
2171 switch (kind) {
2172 case GuardClassKind::Array:
2173 clasp = &ArrayObject::class_;
2174 break;
2175 case GuardClassKind::PlainObject:
2176 clasp = &PlainObject::class_;
2177 break;
2178 case GuardClassKind::FixedLengthArrayBuffer:
2179 clasp = &FixedLengthArrayBufferObject::class_;
2180 break;
2181 case GuardClassKind::FixedLengthSharedArrayBuffer:
2182 clasp = &FixedLengthSharedArrayBufferObject::class_;
2183 break;
2184 case GuardClassKind::FixedLengthDataView:
2185 clasp = &FixedLengthDataViewObject::class_;
2186 break;
2187 case GuardClassKind::MappedArguments:
2188 clasp = &MappedArgumentsObject::class_;
2189 break;
2190 case GuardClassKind::UnmappedArguments:
2191 clasp = &UnmappedArgumentsObject::class_;
2192 break;
2193 case GuardClassKind::WindowProxy:
2194 clasp = cx_->runtime()->maybeWindowProxyClass();
2195 break;
2196 case GuardClassKind::Set:
2197 clasp = &SetObject::class_;
2198 break;
2199 case GuardClassKind::Map:
2200 clasp = &MapObject::class_;
2201 break;
2202 case GuardClassKind::BoundFunction:
2203 clasp = &BoundFunctionObject::class_;
2204 break;
2205 case GuardClassKind::JSFunction:
2206 MOZ_CRASH("JSFunction handled before switch");
2208 MOZ_ASSERT(clasp);
2210 if (objectGuardNeedsSpectreMitigations(objId)) {
2211 masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj,
2212 failure->label());
2213 } else {
2214 masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp,
2215 scratch, failure->label());
2218 return true;
2221 bool CacheIRCompiler::emitGuardNullProto(ObjOperandId objId) {
2222 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2223 Register obj = allocator.useRegister(masm, objId);
2224 AutoScratchRegister scratch(allocator, masm);
2226 FailurePath* failure;
2227 if (!addFailurePath(&failure)) {
2228 return false;
2231 masm.loadObjProto(obj, scratch);
2232 masm.branchTestPtr(Assembler::NonZero, scratch, scratch, failure->label());
2233 return true;
2236 bool CacheIRCompiler::emitGuardIsExtensible(ObjOperandId objId) {
2237 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2238 Register obj = allocator.useRegister(masm, objId);
2239 AutoScratchRegister scratch(allocator, masm);
2241 FailurePath* failure;
2242 if (!addFailurePath(&failure)) {
2243 return false;
2246 masm.branchIfObjectNotExtensible(obj, scratch, failure->label());
2247 return true;
2250 bool CacheIRCompiler::emitGuardDynamicSlotIsSpecificObject(
2251 ObjOperandId objId, ObjOperandId expectedId, uint32_t slotOffset) {
2252 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2253 Register obj = allocator.useRegister(masm, objId);
2254 Register expectedObject = allocator.useRegister(masm, expectedId);
2256 // Allocate registers before the failure path to make sure they're registered
2257 // by addFailurePath.
2258 AutoScratchRegister scratch1(allocator, masm);
2259 AutoScratchRegister scratch2(allocator, masm);
2261 FailurePath* failure;
2262 if (!addFailurePath(&failure)) {
2263 return false;
2266 // Guard on the expected object.
2267 StubFieldOffset slot(slotOffset, StubField::Type::RawInt32);
2268 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2269 emitLoadStubField(slot, scratch2);
2270 BaseObjectSlotIndex expectedSlot(scratch1, scratch2);
2271 masm.fallibleUnboxObject(expectedSlot, scratch1, failure->label());
2272 masm.branchPtr(Assembler::NotEqual, expectedObject, scratch1,
2273 failure->label());
2275 return true;
2278 bool CacheIRCompiler::emitGuardDynamicSlotIsNotObject(ObjOperandId objId,
2279 uint32_t slotOffset) {
2280 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2281 Register obj = allocator.useRegister(masm, objId);
2283 AutoScratchRegister scratch1(allocator, masm);
2284 AutoScratchRegister scratch2(allocator, masm);
2286 FailurePath* failure;
2287 if (!addFailurePath(&failure)) {
2288 return false;
2291 // Guard that the slot isn't an object.
2292 StubFieldOffset slot(slotOffset, StubField::Type::RawInt32);
2293 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2294 emitLoadStubField(slot, scratch2);
2295 BaseObjectSlotIndex expectedSlot(scratch1, scratch2);
2296 masm.branchTestObject(Assembler::Equal, expectedSlot, failure->label());
2298 return true;
2301 bool CacheIRCompiler::emitGuardFixedSlotValue(ObjOperandId objId,
2302 uint32_t offsetOffset,
2303 uint32_t valOffset) {
2304 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2306 Register obj = allocator.useRegister(masm, objId);
2308 AutoScratchRegister scratch(allocator, masm);
2309 AutoScratchValueRegister scratchVal(allocator, masm);
2311 FailurePath* failure;
2312 if (!addFailurePath(&failure)) {
2313 return false;
2316 StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
2317 emitLoadStubField(offset, scratch);
2319 StubFieldOffset val(valOffset, StubField::Type::Value);
2320 emitLoadValueStubField(val, scratchVal);
2322 BaseIndex slotVal(obj, scratch, TimesOne);
2323 masm.branchTestValue(Assembler::NotEqual, slotVal, scratchVal,
2324 failure->label());
2325 return true;
2328 bool CacheIRCompiler::emitGuardDynamicSlotValue(ObjOperandId objId,
2329 uint32_t offsetOffset,
2330 uint32_t valOffset) {
2331 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2333 Register obj = allocator.useRegister(masm, objId);
2335 AutoScratchRegister scratch1(allocator, masm);
2336 AutoScratchRegister scratch2(allocator, masm);
2337 AutoScratchValueRegister scratchVal(allocator, masm);
2339 FailurePath* failure;
2340 if (!addFailurePath(&failure)) {
2341 return false;
2344 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2346 StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
2347 emitLoadStubField(offset, scratch2);
2349 StubFieldOffset val(valOffset, StubField::Type::Value);
2350 emitLoadValueStubField(val, scratchVal);
2352 BaseIndex slotVal(scratch1, scratch2, TimesOne);
2353 masm.branchTestValue(Assembler::NotEqual, slotVal, scratchVal,
2354 failure->label());
2355 return true;
2358 bool CacheIRCompiler::emitLoadScriptedProxyHandler(ValOperandId resultId,
2359 ObjOperandId objId) {
2360 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2362 Register obj = allocator.useRegister(masm, objId);
2363 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2365 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
2366 output.scratchReg());
2367 masm.loadValue(
2368 Address(output.scratchReg(), js::detail::ProxyReservedSlots::offsetOfSlot(
2369 ScriptedProxyHandler::HANDLER_EXTRA)),
2370 output);
2371 return true;
2374 bool CacheIRCompiler::emitIdToStringOrSymbol(ValOperandId resultId,
2375 ValOperandId idId) {
2376 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2378 ValueOperand id = allocator.useValueRegister(masm, idId);
2379 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2380 AutoScratchRegister scratch(allocator, masm);
2382 FailurePath* failure;
2383 if (!addFailurePath(&failure)) {
2384 return false;
2387 masm.moveValue(id, output);
2389 Label done, intDone, callVM;
2391 ScratchTagScope tag(masm, output);
2392 masm.splitTagForTest(output, tag);
2393 masm.branchTestString(Assembler::Equal, tag, &done);
2394 masm.branchTestSymbol(Assembler::Equal, tag, &done);
2395 masm.branchTestInt32(Assembler::NotEqual, tag, failure->label());
2398 Register intReg = output.scratchReg();
2399 masm.unboxInt32(output, intReg);
2401 // Fast path for small integers.
2402 masm.lookupStaticIntString(intReg, intReg, scratch, cx_->staticStrings(),
2403 &callVM);
2404 masm.jump(&intDone);
2406 masm.bind(&callVM);
2407 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2408 liveVolatileFloatRegs());
2409 masm.PushRegsInMask(volatileRegs);
2411 using Fn = JSLinearString* (*)(JSContext* cx, int32_t i);
2412 masm.setupUnalignedABICall(scratch);
2413 masm.loadJSContext(scratch);
2414 masm.passABIArg(scratch);
2415 masm.passABIArg(intReg);
2416 masm.callWithABI<Fn, js::Int32ToStringPure>();
2418 masm.storeCallPointerResult(intReg);
2420 LiveRegisterSet ignore;
2421 ignore.add(intReg);
2422 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
2424 masm.branchPtr(Assembler::Equal, intReg, ImmPtr(nullptr), failure->label());
2426 masm.bind(&intDone);
2427 masm.tagValue(JSVAL_TYPE_STRING, intReg, output);
2428 masm.bind(&done);
2430 return true;
2433 bool CacheIRCompiler::emitLoadFixedSlot(ValOperandId resultId,
2434 ObjOperandId objId,
2435 uint32_t offsetOffset) {
2436 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2438 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2439 Register obj = allocator.useRegister(masm, objId);
2440 AutoScratchRegister scratch(allocator, masm);
2442 StubFieldOffset slotIndex(offsetOffset, StubField::Type::RawInt32);
2443 emitLoadStubField(slotIndex, scratch);
2445 masm.loadValue(BaseIndex(obj, scratch, TimesOne), output);
2446 return true;
2449 bool CacheIRCompiler::emitLoadDynamicSlot(ValOperandId resultId,
2450 ObjOperandId objId,
2451 uint32_t slotOffset) {
2452 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2454 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2455 Register obj = allocator.useRegister(masm, objId);
2456 AutoScratchRegister scratch1(allocator, masm);
2457 Register scratch2 = output.scratchReg();
2459 StubFieldOffset slotIndex(slotOffset, StubField::Type::RawInt32);
2460 emitLoadStubField(slotIndex, scratch2);
2462 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2463 masm.loadValue(BaseObjectSlotIndex(scratch1, scratch2), output);
2464 return true;
2467 bool CacheIRCompiler::emitGuardIsNativeObject(ObjOperandId objId) {
2468 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2470 Register obj = allocator.useRegister(masm, objId);
2471 AutoScratchRegister scratch(allocator, masm);
2473 FailurePath* failure;
2474 if (!addFailurePath(&failure)) {
2475 return false;
2478 masm.branchIfNonNativeObj(obj, scratch, failure->label());
2479 return true;
2482 bool CacheIRCompiler::emitGuardIsProxy(ObjOperandId objId) {
2483 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2485 Register obj = allocator.useRegister(masm, objId);
2486 AutoScratchRegister scratch(allocator, masm);
2488 FailurePath* failure;
2489 if (!addFailurePath(&failure)) {
2490 return false;
2493 masm.branchTestObjectIsProxy(false, obj, scratch, failure->label());
2494 return true;
2497 bool CacheIRCompiler::emitGuardIsNotProxy(ObjOperandId objId) {
2498 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2500 Register obj = allocator.useRegister(masm, objId);
2501 AutoScratchRegister scratch(allocator, masm);
2503 FailurePath* failure;
2504 if (!addFailurePath(&failure)) {
2505 return false;
2508 masm.branchTestObjectIsProxy(true, obj, scratch, failure->label());
2509 return true;
2512 bool CacheIRCompiler::emitGuardIsNotArrayBufferMaybeShared(ObjOperandId objId) {
2513 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2515 Register obj = allocator.useRegister(masm, objId);
2516 AutoScratchRegister scratch(allocator, masm);
2518 FailurePath* failure;
2519 if (!addFailurePath(&failure)) {
2520 return false;
2523 masm.loadObjClassUnsafe(obj, scratch);
2524 masm.branchPtr(Assembler::Equal, scratch,
2525 ImmPtr(&FixedLengthArrayBufferObject::class_),
2526 failure->label());
2527 masm.branchPtr(Assembler::Equal, scratch,
2528 ImmPtr(&FixedLengthSharedArrayBufferObject::class_),
2529 failure->label());
2530 masm.branchPtr(Assembler::Equal, scratch,
2531 ImmPtr(&ResizableArrayBufferObject::class_), failure->label());
2532 masm.branchPtr(Assembler::Equal, scratch,
2533 ImmPtr(&GrowableSharedArrayBufferObject::class_),
2534 failure->label());
2535 return true;
2538 bool CacheIRCompiler::emitGuardIsTypedArray(ObjOperandId objId) {
2539 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2541 Register obj = allocator.useRegister(masm, objId);
2542 AutoScratchRegister scratch(allocator, masm);
2544 FailurePath* failure;
2545 if (!addFailurePath(&failure)) {
2546 return false;
2549 masm.loadObjClassUnsafe(obj, scratch);
2550 masm.branchIfClassIsNotTypedArray(scratch, failure->label());
2551 return true;
2554 bool CacheIRCompiler::emitGuardIsFixedLengthTypedArray(ObjOperandId objId) {
2555 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2557 Register obj = allocator.useRegister(masm, objId);
2558 AutoScratchRegister scratch(allocator, masm);
2560 FailurePath* failure;
2561 if (!addFailurePath(&failure)) {
2562 return false;
2565 masm.loadObjClassUnsafe(obj, scratch);
2566 masm.branchIfClassIsNotFixedLengthTypedArray(scratch, failure->label());
2567 return true;
2570 bool CacheIRCompiler::emitGuardIsNotDOMProxy(ObjOperandId objId) {
2571 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2572 Register obj = allocator.useRegister(masm, objId);
2573 AutoScratchRegister scratch(allocator, masm);
2575 FailurePath* failure;
2576 if (!addFailurePath(&failure)) {
2577 return false;
2580 masm.branchTestProxyHandlerFamily(Assembler::Equal, obj, scratch,
2581 GetDOMProxyHandlerFamily(),
2582 failure->label());
2583 return true;
2586 bool CacheIRCompiler::emitGuardNoDenseElements(ObjOperandId objId) {
2587 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2588 Register obj = allocator.useRegister(masm, objId);
2589 AutoScratchRegister scratch(allocator, masm);
2591 FailurePath* failure;
2592 if (!addFailurePath(&failure)) {
2593 return false;
2596 // Load obj->elements.
2597 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
2599 // Make sure there are no dense elements.
2600 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
2601 masm.branch32(Assembler::NotEqual, initLength, Imm32(0), failure->label());
2602 return true;
2605 bool CacheIRCompiler::emitGuardSpecificInt32(Int32OperandId numId,
2606 int32_t expected) {
2607 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2608 Register num = allocator.useRegister(masm, numId);
2610 FailurePath* failure;
2611 if (!addFailurePath(&failure)) {
2612 return false;
2615 masm.branch32(Assembler::NotEqual, num, Imm32(expected), failure->label());
2616 return true;
2619 bool CacheIRCompiler::emitGuardStringToInt32(StringOperandId strId,
2620 Int32OperandId resultId) {
2621 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2622 Register str = allocator.useRegister(masm, strId);
2623 Register output = allocator.defineRegister(masm, resultId);
2624 AutoScratchRegister scratch(allocator, masm);
2626 FailurePath* failure;
2627 if (!addFailurePath(&failure)) {
2628 return false;
2631 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2632 liveVolatileFloatRegs());
2633 masm.guardStringToInt32(str, output, scratch, volatileRegs, failure->label());
2634 return true;
2637 bool CacheIRCompiler::emitGuardStringToNumber(StringOperandId strId,
2638 NumberOperandId resultId) {
2639 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2640 Register str = allocator.useRegister(masm, strId);
2641 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2642 AutoScratchRegister scratch(allocator, masm);
2644 FailurePath* failure;
2645 if (!addFailurePath(&failure)) {
2646 return false;
2649 Label vmCall, done;
2650 // Use indexed value as fast path if possible.
2651 masm.loadStringIndexValue(str, scratch, &vmCall);
2652 masm.tagValue(JSVAL_TYPE_INT32, scratch, output);
2653 masm.jump(&done);
2655 masm.bind(&vmCall);
2657 // Reserve stack for holding the result value of the call.
2658 masm.reserveStack(sizeof(double));
2659 masm.moveStackPtrTo(output.payloadOrValueReg());
2661 // We cannot use callVM, as callVM expects to be able to clobber all
2662 // operands, however, since this op is not the last in the generated IC, we
2663 // want to be able to reference other live values.
2664 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2665 liveVolatileFloatRegs());
2666 masm.PushRegsInMask(volatileRegs);
2668 using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
2669 masm.setupUnalignedABICall(scratch);
2670 masm.loadJSContext(scratch);
2671 masm.passABIArg(scratch);
2672 masm.passABIArg(str);
2673 masm.passABIArg(output.payloadOrValueReg());
2674 masm.callWithABI<Fn, js::StringToNumberPure>();
2675 masm.storeCallPointerResult(scratch);
2677 LiveRegisterSet ignore;
2678 ignore.add(scratch);
2679 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
2681 Label ok;
2682 masm.branchIfTrueBool(scratch, &ok);
2684 // OOM path, recovered by StringToNumberPure.
2686 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
2687 // flow-insensitively, and using it twice would confuse the stack height
2688 // tracking.
2689 masm.addToStackPtr(Imm32(sizeof(double)));
2690 masm.jump(failure->label());
2692 masm.bind(&ok);
2695 ScratchDoubleScope fpscratch(masm);
2696 masm.loadDouble(Address(output.payloadOrValueReg(), 0), fpscratch);
2697 masm.boxDouble(fpscratch, output, fpscratch);
2699 masm.freeStack(sizeof(double));
2701 masm.bind(&done);
2702 return true;
2705 bool CacheIRCompiler::emitNumberParseIntResult(StringOperandId strId,
2706 Int32OperandId radixId) {
2707 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2709 AutoCallVM callvm(masm, this, allocator);
2711 Register str = allocator.useRegister(masm, strId);
2712 Register radix = allocator.useRegister(masm, radixId);
2713 AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
2715 #ifdef DEBUG
2716 Label ok;
2717 masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
2718 masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
2719 masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
2720 masm.bind(&ok);
2721 #endif
2723 // Discard the stack to ensure it's balanced when we skip the vm-call.
2724 allocator.discardStack(masm);
2726 // Use indexed value as fast path if possible.
2727 Label vmCall, done;
2728 masm.loadStringIndexValue(str, scratch, &vmCall);
2729 masm.tagValue(JSVAL_TYPE_INT32, scratch, callvm.outputValueReg());
2730 masm.jump(&done);
2732 masm.bind(&vmCall);
2734 callvm.prepare();
2735 masm.Push(radix);
2736 masm.Push(str);
2738 using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
2739 callvm.call<Fn, js::NumberParseInt>();
2741 masm.bind(&done);
2742 return true;
2745 bool CacheIRCompiler::emitDoubleParseIntResult(NumberOperandId numId) {
2746 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2748 AutoOutputRegister output(*this);
2749 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2750 AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
2751 AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
2753 FailurePath* failure;
2754 if (!addFailurePath(&failure)) {
2755 return false;
2758 allocator.ensureDoubleRegister(masm, numId, floatScratch1);
2760 masm.branchDouble(Assembler::DoubleUnordered, floatScratch1, floatScratch1,
2761 failure->label());
2762 masm.branchTruncateDoubleToInt32(floatScratch1, scratch, failure->label());
2764 Label ok;
2765 masm.branch32(Assembler::NotEqual, scratch, Imm32(0), &ok);
2767 // Accept both +0 and -0 and return 0.
2768 masm.loadConstantDouble(0.0, floatScratch2);
2769 masm.branchDouble(Assembler::DoubleEqual, floatScratch1, floatScratch2,
2770 &ok);
2772 // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
2773 masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, floatScratch2);
2774 masm.branchDouble(Assembler::DoubleLessThan, floatScratch1, floatScratch2,
2775 failure->label());
2777 masm.bind(&ok);
2779 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2780 return true;
2783 bool CacheIRCompiler::emitBooleanToNumber(BooleanOperandId booleanId,
2784 NumberOperandId resultId) {
2785 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2786 Register boolean = allocator.useRegister(masm, booleanId);
2787 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2788 masm.tagValue(JSVAL_TYPE_INT32, boolean, output);
2789 return true;
2792 bool CacheIRCompiler::emitGuardStringToIndex(StringOperandId strId,
2793 Int32OperandId resultId) {
2794 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2795 Register str = allocator.useRegister(masm, strId);
2796 Register output = allocator.defineRegister(masm, resultId);
2798 FailurePath* failure;
2799 if (!addFailurePath(&failure)) {
2800 return false;
2803 Label vmCall, done;
2804 masm.loadStringIndexValue(str, output, &vmCall);
2805 masm.jump(&done);
2808 masm.bind(&vmCall);
2809 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
2810 liveVolatileFloatRegs());
2811 masm.PushRegsInMask(save);
2813 using Fn = int32_t (*)(JSString* str);
2814 masm.setupUnalignedABICall(output);
2815 masm.passABIArg(str);
2816 masm.callWithABI<Fn, GetIndexFromString>();
2817 masm.storeCallInt32Result(output);
2819 LiveRegisterSet ignore;
2820 ignore.add(output);
2821 masm.PopRegsInMaskIgnore(save, ignore);
2823 // GetIndexFromString returns a negative value on failure.
2824 masm.branchTest32(Assembler::Signed, output, output, failure->label());
2827 masm.bind(&done);
2828 return true;
2831 bool CacheIRCompiler::emitLoadProto(ObjOperandId objId, ObjOperandId resultId) {
2832 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2833 Register obj = allocator.useRegister(masm, objId);
2834 Register reg = allocator.defineRegister(masm, resultId);
2835 masm.loadObjProto(obj, reg);
2837 #ifdef DEBUG
2838 // We shouldn't encounter a null or lazy proto.
2839 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
2841 Label done;
2842 masm.branchPtr(Assembler::Above, reg, ImmWord(1), &done);
2843 masm.assumeUnreachable("Unexpected null or lazy proto in CacheIR LoadProto");
2844 masm.bind(&done);
2845 #endif
2846 return true;
2849 bool CacheIRCompiler::emitLoadEnclosingEnvironment(ObjOperandId objId,
2850 ObjOperandId resultId) {
2851 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2852 Register obj = allocator.useRegister(masm, objId);
2853 Register reg = allocator.defineRegister(masm, resultId);
2854 masm.unboxObject(
2855 Address(obj, EnvironmentObject::offsetOfEnclosingEnvironment()), reg);
2856 return true;
2859 bool CacheIRCompiler::emitLoadWrapperTarget(ObjOperandId objId,
2860 ObjOperandId resultId) {
2861 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2862 Register obj = allocator.useRegister(masm, objId);
2863 Register reg = allocator.defineRegister(masm, resultId);
2865 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), reg);
2866 masm.unboxObject(
2867 Address(reg, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()), reg);
2868 return true;
2871 bool CacheIRCompiler::emitLoadValueTag(ValOperandId valId,
2872 ValueTagOperandId resultId) {
2873 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2874 ValueOperand val = allocator.useValueRegister(masm, valId);
2875 Register res = allocator.defineRegister(masm, resultId);
2877 Register tag = masm.extractTag(val, res);
2878 if (tag != res) {
2879 masm.mov(tag, res);
2881 return true;
2884 bool CacheIRCompiler::emitLoadDOMExpandoValue(ObjOperandId objId,
2885 ValOperandId resultId) {
2886 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2887 Register obj = allocator.useRegister(masm, objId);
2888 ValueOperand val = allocator.defineValueRegister(masm, resultId);
2890 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
2891 val.scratchReg());
2892 masm.loadValue(Address(val.scratchReg(),
2893 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
2894 val);
2895 return true;
2898 bool CacheIRCompiler::emitLoadDOMExpandoValueIgnoreGeneration(
2899 ObjOperandId objId, ValOperandId resultId) {
2900 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2901 Register obj = allocator.useRegister(masm, objId);
2902 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2904 // Determine the expando's Address.
2905 Register scratch = output.scratchReg();
2906 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
2907 Address expandoAddr(scratch,
2908 js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
2910 #ifdef DEBUG
2911 // Private values are stored as doubles, so assert we have a double.
2912 Label ok;
2913 masm.branchTestDouble(Assembler::Equal, expandoAddr, &ok);
2914 masm.assumeUnreachable("DOM expando is not a PrivateValue!");
2915 masm.bind(&ok);
2916 #endif
2918 // Load the ExpandoAndGeneration* from the PrivateValue.
2919 masm.loadPrivate(expandoAddr, scratch);
2921 // Load expandoAndGeneration->expando into the output Value register.
2922 masm.loadValue(Address(scratch, ExpandoAndGeneration::offsetOfExpando()),
2923 output);
2924 return true;
2927 bool CacheIRCompiler::emitLoadUndefinedResult() {
2928 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2929 AutoOutputRegister output(*this);
2930 masm.moveValue(UndefinedValue(), output.valueReg());
2931 return true;
2934 static void EmitStoreBoolean(MacroAssembler& masm, bool b,
2935 const AutoOutputRegister& output) {
2936 if (output.hasValue()) {
2937 Value val = BooleanValue(b);
2938 masm.moveValue(val, output.valueReg());
2939 } else {
2940 MOZ_ASSERT(output.type() == JSVAL_TYPE_BOOLEAN);
2941 masm.movePtr(ImmWord(b), output.typedReg().gpr());
2945 bool CacheIRCompiler::emitLoadBooleanResult(bool val) {
2946 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2947 AutoOutputRegister output(*this);
2948 EmitStoreBoolean(masm, val, output);
2949 return true;
2952 bool CacheIRCompiler::emitLoadOperandResult(ValOperandId inputId) {
2953 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2954 AutoOutputRegister output(*this);
2955 ValueOperand input = allocator.useValueRegister(masm, inputId);
2956 masm.moveValue(input, output.valueReg());
2957 return true;
2960 static void EmitStoreResult(MacroAssembler& masm, Register reg,
2961 JSValueType type,
2962 const AutoOutputRegister& output) {
2963 if (output.hasValue()) {
2964 masm.tagValue(type, reg, output.valueReg());
2965 return;
2967 if (type == JSVAL_TYPE_INT32 && output.typedReg().isFloat()) {
2968 masm.convertInt32ToDouble(reg, output.typedReg().fpu());
2969 return;
2971 if (type == output.type()) {
2972 masm.mov(reg, output.typedReg().gpr());
2973 return;
2975 masm.assumeUnreachable("Should have monitored result");
2978 bool CacheIRCompiler::emitLoadInt32ArrayLengthResult(ObjOperandId objId) {
2979 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2980 AutoOutputRegister output(*this);
2981 Register obj = allocator.useRegister(masm, objId);
2982 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2984 FailurePath* failure;
2985 if (!addFailurePath(&failure)) {
2986 return false;
2989 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
2990 masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratch);
2992 // Guard length fits in an int32.
2993 masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
2994 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2995 return true;
2998 bool CacheIRCompiler::emitLoadInt32ArrayLength(ObjOperandId objId,
2999 Int32OperandId resultId) {
3000 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3001 Register obj = allocator.useRegister(masm, objId);
3002 Register res = allocator.defineRegister(masm, resultId);
3004 FailurePath* failure;
3005 if (!addFailurePath(&failure)) {
3006 return false;
3009 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), res);
3010 masm.load32(Address(res, ObjectElements::offsetOfLength()), res);
3012 // Guard length fits in an int32.
3013 masm.branchTest32(Assembler::Signed, res, res, failure->label());
3014 return true;
3017 bool CacheIRCompiler::emitDoubleAddResult(NumberOperandId lhsId,
3018 NumberOperandId rhsId) {
3019 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3020 AutoOutputRegister output(*this);
3022 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3023 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3025 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3026 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3028 masm.addDouble(floatScratch1, floatScratch0);
3029 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3031 return true;
3033 bool CacheIRCompiler::emitDoubleSubResult(NumberOperandId lhsId,
3034 NumberOperandId rhsId) {
3035 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3036 AutoOutputRegister output(*this);
3038 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3039 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3041 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3042 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3044 masm.subDouble(floatScratch1, floatScratch0);
3045 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3047 return true;
3049 bool CacheIRCompiler::emitDoubleMulResult(NumberOperandId lhsId,
3050 NumberOperandId rhsId) {
3051 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3052 AutoOutputRegister output(*this);
3054 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3055 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3057 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3058 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3060 masm.mulDouble(floatScratch1, floatScratch0);
3061 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3063 return true;
3065 bool CacheIRCompiler::emitDoubleDivResult(NumberOperandId lhsId,
3066 NumberOperandId rhsId) {
3067 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3068 AutoOutputRegister output(*this);
3070 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3071 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3073 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3074 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3076 masm.divDouble(floatScratch1, floatScratch0);
3077 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3079 return true;
3081 bool CacheIRCompiler::emitDoubleModResult(NumberOperandId lhsId,
3082 NumberOperandId rhsId) {
3083 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3084 AutoOutputRegister output(*this);
3085 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3087 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3088 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3090 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3091 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3093 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3094 masm.PushRegsInMask(save);
3096 using Fn = double (*)(double a, double b);
3097 masm.setupUnalignedABICall(scratch);
3098 masm.passABIArg(floatScratch0, ABIType::Float64);
3099 masm.passABIArg(floatScratch1, ABIType::Float64);
3100 masm.callWithABI<Fn, js::NumberMod>(ABIType::Float64);
3101 masm.storeCallFloatResult(floatScratch0);
3103 LiveRegisterSet ignore;
3104 ignore.add(floatScratch0);
3105 masm.PopRegsInMaskIgnore(save, ignore);
3107 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3109 return true;
3111 bool CacheIRCompiler::emitDoublePowResult(NumberOperandId lhsId,
3112 NumberOperandId rhsId) {
3113 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3114 AutoOutputRegister output(*this);
3115 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3117 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3118 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3120 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3121 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3123 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3124 masm.PushRegsInMask(save);
3126 using Fn = double (*)(double x, double y);
3127 masm.setupUnalignedABICall(scratch);
3128 masm.passABIArg(floatScratch0, ABIType::Float64);
3129 masm.passABIArg(floatScratch1, ABIType::Float64);
3130 masm.callWithABI<Fn, js::ecmaPow>(ABIType::Float64);
3131 masm.storeCallFloatResult(floatScratch0);
3133 LiveRegisterSet ignore;
3134 ignore.add(floatScratch0);
3135 masm.PopRegsInMaskIgnore(save, ignore);
3137 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3139 return true;
3142 bool CacheIRCompiler::emitInt32AddResult(Int32OperandId lhsId,
3143 Int32OperandId rhsId) {
3144 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3145 AutoOutputRegister output(*this);
3146 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3148 Register lhs = allocator.useRegister(masm, lhsId);
3149 Register rhs = allocator.useRegister(masm, rhsId);
3151 FailurePath* failure;
3152 if (!addFailurePath(&failure)) {
3153 return false;
3156 masm.mov(rhs, scratch);
3157 masm.branchAdd32(Assembler::Overflow, lhs, scratch, failure->label());
3158 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3160 return true;
3162 bool CacheIRCompiler::emitInt32SubResult(Int32OperandId lhsId,
3163 Int32OperandId rhsId) {
3164 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3165 AutoOutputRegister output(*this);
3166 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3167 Register lhs = allocator.useRegister(masm, lhsId);
3168 Register rhs = allocator.useRegister(masm, rhsId);
3170 FailurePath* failure;
3171 if (!addFailurePath(&failure)) {
3172 return false;
3175 masm.mov(lhs, scratch);
3176 masm.branchSub32(Assembler::Overflow, rhs, scratch, failure->label());
3177 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3179 return true;
3182 bool CacheIRCompiler::emitInt32MulResult(Int32OperandId lhsId,
3183 Int32OperandId rhsId) {
3184 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3185 AutoOutputRegister output(*this);
3186 Register lhs = allocator.useRegister(masm, lhsId);
3187 Register rhs = allocator.useRegister(masm, rhsId);
3188 AutoScratchRegister scratch(allocator, masm);
3189 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
3191 FailurePath* failure;
3192 if (!addFailurePath(&failure)) {
3193 return false;
3196 Label maybeNegZero, done;
3197 masm.mov(lhs, scratch);
3198 masm.branchMul32(Assembler::Overflow, rhs, scratch, failure->label());
3199 masm.branchTest32(Assembler::Zero, scratch, scratch, &maybeNegZero);
3200 masm.jump(&done);
3202 masm.bind(&maybeNegZero);
3203 masm.mov(lhs, scratch2);
3204 // Result is -0 if exactly one of lhs or rhs is negative.
3205 masm.or32(rhs, scratch2);
3206 masm.branchTest32(Assembler::Signed, scratch2, scratch2, failure->label());
3208 masm.bind(&done);
3209 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3210 return true;
3213 bool CacheIRCompiler::emitInt32DivResult(Int32OperandId lhsId,
3214 Int32OperandId rhsId) {
3215 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3216 AutoOutputRegister output(*this);
3217 Register lhs = allocator.useRegister(masm, lhsId);
3218 Register rhs = allocator.useRegister(masm, rhsId);
3219 AutoScratchRegister rem(allocator, masm);
3220 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3222 FailurePath* failure;
3223 if (!addFailurePath(&failure)) {
3224 return false;
3227 // Prevent division by 0.
3228 masm.branchTest32(Assembler::Zero, rhs, rhs, failure->label());
3230 // Prevent -2147483648 / -1.
3231 Label notOverflow;
3232 masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), &notOverflow);
3233 masm.branch32(Assembler::Equal, rhs, Imm32(-1), failure->label());
3234 masm.bind(&notOverflow);
3236 // Prevent negative 0.
3237 Label notZero;
3238 masm.branchTest32(Assembler::NonZero, lhs, lhs, &notZero);
3239 masm.branchTest32(Assembler::Signed, rhs, rhs, failure->label());
3240 masm.bind(&notZero);
3242 masm.mov(lhs, scratch);
3243 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
3244 liveVolatileFloatRegs());
3245 masm.flexibleDivMod32(rhs, scratch, rem, false, volatileRegs);
3247 // A remainder implies a double result.
3248 masm.branchTest32(Assembler::NonZero, rem, rem, failure->label());
3249 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3250 return true;
3253 bool CacheIRCompiler::emitInt32ModResult(Int32OperandId lhsId,
3254 Int32OperandId rhsId) {
3255 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3256 AutoOutputRegister output(*this);
3257 Register lhs = allocator.useRegister(masm, lhsId);
3258 Register rhs = allocator.useRegister(masm, rhsId);
3259 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3261 FailurePath* failure;
3262 if (!addFailurePath(&failure)) {
3263 return false;
3266 // x % 0 results in NaN
3267 masm.branchTest32(Assembler::Zero, rhs, rhs, failure->label());
3269 // Prevent -2147483648 % -1.
3271 // Traps on x86 and has undefined behavior on ARM32 (when __aeabi_idivmod is
3272 // called).
3273 Label notOverflow;
3274 masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), &notOverflow);
3275 masm.branch32(Assembler::Equal, rhs, Imm32(-1), failure->label());
3276 masm.bind(&notOverflow);
3278 masm.mov(lhs, scratch);
3279 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
3280 liveVolatileFloatRegs());
3281 masm.flexibleRemainder32(rhs, scratch, false, volatileRegs);
3283 // Modulo takes the sign of the dividend; we can't return negative zero here.
3284 Label notZero;
3285 masm.branchTest32(Assembler::NonZero, scratch, scratch, &notZero);
3286 masm.branchTest32(Assembler::Signed, lhs, lhs, failure->label());
3287 masm.bind(&notZero);
3289 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3291 return true;
3294 bool CacheIRCompiler::emitInt32PowResult(Int32OperandId lhsId,
3295 Int32OperandId rhsId) {
3296 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3297 AutoOutputRegister output(*this);
3298 Register base = allocator.useRegister(masm, lhsId);
3299 Register power = allocator.useRegister(masm, rhsId);
3300 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
3301 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
3302 AutoScratchRegister scratch3(allocator, masm);
3304 FailurePath* failure;
3305 if (!addFailurePath(&failure)) {
3306 return false;
3309 masm.pow32(base, power, scratch1, scratch2, scratch3, failure->label());
3311 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
3312 return true;
3315 bool CacheIRCompiler::emitInt32BitOrResult(Int32OperandId lhsId,
3316 Int32OperandId rhsId) {
3317 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3318 AutoOutputRegister output(*this);
3319 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3321 Register lhs = allocator.useRegister(masm, lhsId);
3322 Register rhs = allocator.useRegister(masm, rhsId);
3324 masm.mov(rhs, scratch);
3325 masm.or32(lhs, scratch);
3326 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3328 return true;
3330 bool CacheIRCompiler::emitInt32BitXorResult(Int32OperandId lhsId,
3331 Int32OperandId rhsId) {
3332 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3333 AutoOutputRegister output(*this);
3334 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3336 Register lhs = allocator.useRegister(masm, lhsId);
3337 Register rhs = allocator.useRegister(masm, rhsId);
3339 masm.mov(rhs, scratch);
3340 masm.xor32(lhs, scratch);
3341 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3343 return true;
3345 bool CacheIRCompiler::emitInt32BitAndResult(Int32OperandId lhsId,
3346 Int32OperandId rhsId) {
3347 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3348 AutoOutputRegister output(*this);
3349 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3351 Register lhs = allocator.useRegister(masm, lhsId);
3352 Register rhs = allocator.useRegister(masm, rhsId);
3354 masm.mov(rhs, scratch);
3355 masm.and32(lhs, scratch);
3356 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3358 return true;
3360 bool CacheIRCompiler::emitInt32LeftShiftResult(Int32OperandId lhsId,
3361 Int32OperandId rhsId) {
3362 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3363 AutoOutputRegister output(*this);
3364 Register lhs = allocator.useRegister(masm, lhsId);
3365 Register rhs = allocator.useRegister(masm, rhsId);
3366 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3368 masm.mov(lhs, scratch);
3369 masm.flexibleLshift32(rhs, scratch);
3370 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3372 return true;
3375 bool CacheIRCompiler::emitInt32RightShiftResult(Int32OperandId lhsId,
3376 Int32OperandId rhsId) {
3377 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3378 AutoOutputRegister output(*this);
3379 Register lhs = allocator.useRegister(masm, lhsId);
3380 Register rhs = allocator.useRegister(masm, rhsId);
3381 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3383 masm.mov(lhs, scratch);
3384 masm.flexibleRshift32Arithmetic(rhs, scratch);
3385 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3387 return true;
3390 bool CacheIRCompiler::emitInt32URightShiftResult(Int32OperandId lhsId,
3391 Int32OperandId rhsId,
3392 bool forceDouble) {
3393 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3394 AutoOutputRegister output(*this);
3396 Register lhs = allocator.useRegister(masm, lhsId);
3397 Register rhs = allocator.useRegister(masm, rhsId);
3398 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3400 FailurePath* failure;
3401 if (!addFailurePath(&failure)) {
3402 return false;
3405 masm.mov(lhs, scratch);
3406 masm.flexibleRshift32(rhs, scratch);
3407 if (forceDouble) {
3408 ScratchDoubleScope fpscratch(masm);
3409 masm.convertUInt32ToDouble(scratch, fpscratch);
3410 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
3411 } else {
3412 masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
3413 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3415 return true;
3418 bool CacheIRCompiler::emitInt32NegationResult(Int32OperandId inputId) {
3419 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3420 AutoOutputRegister output(*this);
3421 Register val = allocator.useRegister(masm, inputId);
3422 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3424 FailurePath* failure;
3425 if (!addFailurePath(&failure)) {
3426 return false;
3429 // Guard against 0 and MIN_INT by checking if low 31-bits are all zero.
3430 // Both of these result in a double.
3431 masm.branchTest32(Assembler::Zero, val, Imm32(0x7fffffff), failure->label());
3432 masm.mov(val, scratch);
3433 masm.neg32(scratch);
3434 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3435 return true;
3438 bool CacheIRCompiler::emitInt32IncResult(Int32OperandId inputId) {
3439 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3440 AutoOutputRegister output(*this);
3441 Register input = allocator.useRegister(masm, inputId);
3442 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3444 FailurePath* failure;
3445 if (!addFailurePath(&failure)) {
3446 return false;
3449 masm.mov(input, scratch);
3450 masm.branchAdd32(Assembler::Overflow, Imm32(1), scratch, failure->label());
3451 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3453 return true;
3456 bool CacheIRCompiler::emitInt32DecResult(Int32OperandId inputId) {
3457 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3458 AutoOutputRegister output(*this);
3459 Register input = allocator.useRegister(masm, inputId);
3460 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3462 FailurePath* failure;
3463 if (!addFailurePath(&failure)) {
3464 return false;
3467 masm.mov(input, scratch);
3468 masm.branchSub32(Assembler::Overflow, Imm32(1), scratch, failure->label());
3469 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3471 return true;
3474 bool CacheIRCompiler::emitInt32NotResult(Int32OperandId inputId) {
3475 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3476 AutoOutputRegister output(*this);
3477 Register val = allocator.useRegister(masm, inputId);
3478 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3480 masm.mov(val, scratch);
3481 masm.not32(scratch);
3482 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3483 return true;
3486 bool CacheIRCompiler::emitDoubleNegationResult(NumberOperandId inputId) {
3487 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3488 AutoOutputRegister output(*this);
3490 AutoScratchFloatRegister floatReg(this);
3492 allocator.ensureDoubleRegister(masm, inputId, floatReg);
3494 masm.negateDouble(floatReg);
3495 masm.boxDouble(floatReg, output.valueReg(), floatReg);
3497 return true;
3500 bool CacheIRCompiler::emitDoubleIncDecResult(bool isInc,
3501 NumberOperandId inputId) {
3502 AutoOutputRegister output(*this);
3504 AutoScratchFloatRegister floatReg(this);
3506 allocator.ensureDoubleRegister(masm, inputId, floatReg);
3509 ScratchDoubleScope fpscratch(masm);
3510 masm.loadConstantDouble(1.0, fpscratch);
3511 if (isInc) {
3512 masm.addDouble(fpscratch, floatReg);
3513 } else {
3514 masm.subDouble(fpscratch, floatReg);
3517 masm.boxDouble(floatReg, output.valueReg(), floatReg);
3519 return true;
3522 bool CacheIRCompiler::emitDoubleIncResult(NumberOperandId inputId) {
3523 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3524 return emitDoubleIncDecResult(true, inputId);
3527 bool CacheIRCompiler::emitDoubleDecResult(NumberOperandId inputId) {
3528 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3529 return emitDoubleIncDecResult(false, inputId);
3532 template <typename Fn, Fn fn>
3533 bool CacheIRCompiler::emitBigIntBinaryOperationShared(BigIntOperandId lhsId,
3534 BigIntOperandId rhsId) {
3535 AutoCallVM callvm(masm, this, allocator);
3536 Register lhs = allocator.useRegister(masm, lhsId);
3537 Register rhs = allocator.useRegister(masm, rhsId);
3539 callvm.prepare();
3541 masm.Push(rhs);
3542 masm.Push(lhs);
3544 callvm.call<Fn, fn>();
3545 return true;
3548 bool CacheIRCompiler::emitBigIntAddResult(BigIntOperandId lhsId,
3549 BigIntOperandId rhsId) {
3550 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3551 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3552 return emitBigIntBinaryOperationShared<Fn, BigInt::add>(lhsId, rhsId);
3555 bool CacheIRCompiler::emitBigIntSubResult(BigIntOperandId lhsId,
3556 BigIntOperandId rhsId) {
3557 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3558 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3559 return emitBigIntBinaryOperationShared<Fn, BigInt::sub>(lhsId, rhsId);
3562 bool CacheIRCompiler::emitBigIntMulResult(BigIntOperandId lhsId,
3563 BigIntOperandId rhsId) {
3564 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3565 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3566 return emitBigIntBinaryOperationShared<Fn, BigInt::mul>(lhsId, rhsId);
3569 bool CacheIRCompiler::emitBigIntDivResult(BigIntOperandId lhsId,
3570 BigIntOperandId rhsId) {
3571 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3572 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3573 return emitBigIntBinaryOperationShared<Fn, BigInt::div>(lhsId, rhsId);
3576 bool CacheIRCompiler::emitBigIntModResult(BigIntOperandId lhsId,
3577 BigIntOperandId rhsId) {
3578 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3579 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3580 return emitBigIntBinaryOperationShared<Fn, BigInt::mod>(lhsId, rhsId);
3583 bool CacheIRCompiler::emitBigIntPowResult(BigIntOperandId lhsId,
3584 BigIntOperandId rhsId) {
3585 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3586 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3587 return emitBigIntBinaryOperationShared<Fn, BigInt::pow>(lhsId, rhsId);
3590 bool CacheIRCompiler::emitBigIntBitAndResult(BigIntOperandId lhsId,
3591 BigIntOperandId rhsId) {
3592 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3593 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3594 return emitBigIntBinaryOperationShared<Fn, BigInt::bitAnd>(lhsId, rhsId);
3597 bool CacheIRCompiler::emitBigIntBitOrResult(BigIntOperandId lhsId,
3598 BigIntOperandId rhsId) {
3599 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3600 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3601 return emitBigIntBinaryOperationShared<Fn, BigInt::bitOr>(lhsId, rhsId);
3604 bool CacheIRCompiler::emitBigIntBitXorResult(BigIntOperandId lhsId,
3605 BigIntOperandId rhsId) {
3606 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3607 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3608 return emitBigIntBinaryOperationShared<Fn, BigInt::bitXor>(lhsId, rhsId);
3611 bool CacheIRCompiler::emitBigIntLeftShiftResult(BigIntOperandId lhsId,
3612 BigIntOperandId rhsId) {
3613 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3614 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3615 return emitBigIntBinaryOperationShared<Fn, BigInt::lsh>(lhsId, rhsId);
3618 bool CacheIRCompiler::emitBigIntRightShiftResult(BigIntOperandId lhsId,
3619 BigIntOperandId rhsId) {
3620 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3621 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3622 return emitBigIntBinaryOperationShared<Fn, BigInt::rsh>(lhsId, rhsId);
3625 template <typename Fn, Fn fn>
3626 bool CacheIRCompiler::emitBigIntUnaryOperationShared(BigIntOperandId inputId) {
3627 AutoCallVM callvm(masm, this, allocator);
3628 Register val = allocator.useRegister(masm, inputId);
3630 callvm.prepare();
3632 masm.Push(val);
3634 callvm.call<Fn, fn>();
3635 return true;
3638 bool CacheIRCompiler::emitBigIntNotResult(BigIntOperandId inputId) {
3639 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3640 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3641 return emitBigIntUnaryOperationShared<Fn, BigInt::bitNot>(inputId);
3644 bool CacheIRCompiler::emitBigIntNegationResult(BigIntOperandId inputId) {
3645 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3646 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3647 return emitBigIntUnaryOperationShared<Fn, BigInt::neg>(inputId);
3650 bool CacheIRCompiler::emitBigIntIncResult(BigIntOperandId inputId) {
3651 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3652 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3653 return emitBigIntUnaryOperationShared<Fn, BigInt::inc>(inputId);
3656 bool CacheIRCompiler::emitBigIntDecResult(BigIntOperandId inputId) {
3657 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3658 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3659 return emitBigIntUnaryOperationShared<Fn, BigInt::dec>(inputId);
3662 bool CacheIRCompiler::emitTruncateDoubleToUInt32(NumberOperandId inputId,
3663 Int32OperandId resultId) {
3664 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3665 Register res = allocator.defineRegister(masm, resultId);
3667 AutoScratchFloatRegister floatReg(this);
3669 allocator.ensureDoubleRegister(masm, inputId, floatReg);
3671 Label done, truncateABICall;
3673 masm.branchTruncateDoubleMaybeModUint32(floatReg, res, &truncateABICall);
3674 masm.jump(&done);
3676 masm.bind(&truncateABICall);
3677 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3678 save.takeUnchecked(floatReg);
3679 // Bug 1451976
3680 save.takeUnchecked(floatReg.get().asSingle());
3681 masm.PushRegsInMask(save);
3683 using Fn = int32_t (*)(double);
3684 masm.setupUnalignedABICall(res);
3685 masm.passABIArg(floatReg, ABIType::Float64);
3686 masm.callWithABI<Fn, JS::ToInt32>(ABIType::General,
3687 CheckUnsafeCallWithABI::DontCheckOther);
3688 masm.storeCallInt32Result(res);
3690 LiveRegisterSet ignore;
3691 ignore.add(res);
3692 masm.PopRegsInMaskIgnore(save, ignore);
3694 masm.bind(&done);
3695 return true;
3698 bool CacheIRCompiler::emitLoadArgumentsObjectLengthResult(ObjOperandId objId) {
3699 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3700 AutoOutputRegister output(*this);
3701 Register obj = allocator.useRegister(masm, objId);
3702 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3704 FailurePath* failure;
3705 if (!addFailurePath(&failure)) {
3706 return false;
3709 masm.loadArgumentsObjectLength(obj, scratch, failure->label());
3711 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3712 return true;
3715 bool CacheIRCompiler::emitLoadArgumentsObjectLength(ObjOperandId objId,
3716 Int32OperandId resultId) {
3717 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3718 Register obj = allocator.useRegister(masm, objId);
3719 Register res = allocator.defineRegister(masm, resultId);
3721 FailurePath* failure;
3722 if (!addFailurePath(&failure)) {
3723 return false;
3726 masm.loadArgumentsObjectLength(obj, res, failure->label());
3727 return true;
3730 bool CacheIRCompiler::emitLoadArrayBufferByteLengthInt32Result(
3731 ObjOperandId objId) {
3732 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3733 AutoOutputRegister output(*this);
3734 Register obj = allocator.useRegister(masm, objId);
3735 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3737 FailurePath* failure;
3738 if (!addFailurePath(&failure)) {
3739 return false;
3742 masm.loadArrayBufferByteLengthIntPtr(obj, scratch);
3743 masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
3744 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3745 return true;
3748 bool CacheIRCompiler::emitLoadArrayBufferByteLengthDoubleResult(
3749 ObjOperandId objId) {
3750 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3751 AutoOutputRegister output(*this);
3752 Register obj = allocator.useRegister(masm, objId);
3753 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3755 ScratchDoubleScope fpscratch(masm);
3756 masm.loadArrayBufferByteLengthIntPtr(obj, scratch);
3757 masm.convertIntPtrToDouble(scratch, fpscratch);
3758 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
3759 return true;
3762 bool CacheIRCompiler::emitLoadArrayBufferViewLengthInt32Result(
3763 ObjOperandId objId) {
3764 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3765 AutoOutputRegister output(*this);
3766 Register obj = allocator.useRegister(masm, objId);
3767 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3769 FailurePath* failure;
3770 if (!addFailurePath(&failure)) {
3771 return false;
3774 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
3775 masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
3776 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3777 return true;
3780 bool CacheIRCompiler::emitLoadArrayBufferViewLengthDoubleResult(
3781 ObjOperandId objId) {
3782 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3783 AutoOutputRegister output(*this);
3784 Register obj = allocator.useRegister(masm, objId);
3785 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3787 ScratchDoubleScope fpscratch(masm);
3788 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
3789 masm.convertIntPtrToDouble(scratch, fpscratch);
3790 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
3791 return true;
3794 bool CacheIRCompiler::emitLoadBoundFunctionNumArgs(ObjOperandId objId,
3795 Int32OperandId resultId) {
3796 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3798 Register obj = allocator.useRegister(masm, objId);
3799 Register output = allocator.defineRegister(masm, resultId);
3801 masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
3802 output);
3803 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
3804 return true;
3807 bool CacheIRCompiler::emitLoadBoundFunctionTarget(ObjOperandId objId,
3808 ObjOperandId resultId) {
3809 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3811 Register obj = allocator.useRegister(masm, objId);
3812 Register output = allocator.defineRegister(masm, resultId);
3814 masm.unboxObject(Address(obj, BoundFunctionObject::offsetOfTargetSlot()),
3815 output);
3816 return true;
3819 bool CacheIRCompiler::emitGuardBoundFunctionIsConstructor(ObjOperandId objId) {
3820 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3822 Register obj = allocator.useRegister(masm, objId);
3824 FailurePath* failure;
3825 if (!addFailurePath(&failure)) {
3826 return false;
3829 Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
3830 masm.branchTest32(Assembler::Zero, flagsSlot,
3831 Imm32(BoundFunctionObject::IsConstructorFlag),
3832 failure->label());
3833 return true;
3836 bool CacheIRCompiler::emitGuardObjectIdentity(ObjOperandId obj1Id,
3837 ObjOperandId obj2Id) {
3838 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3840 Register obj1 = allocator.useRegister(masm, obj1Id);
3841 Register obj2 = allocator.useRegister(masm, obj2Id);
3843 FailurePath* failure;
3844 if (!addFailurePath(&failure)) {
3845 return false;
3848 masm.branchPtr(Assembler::NotEqual, obj1, obj2, failure->label());
3849 return true;
3852 bool CacheIRCompiler::emitLoadFunctionLengthResult(ObjOperandId objId) {
3853 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3854 AutoOutputRegister output(*this);
3855 Register obj = allocator.useRegister(masm, objId);
3856 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3858 FailurePath* failure;
3859 if (!addFailurePath(&failure)) {
3860 return false;
3863 // Get the JSFunction flags and arg count.
3864 masm.load32(Address(obj, JSFunction::offsetOfFlagsAndArgCount()), scratch);
3866 // Functions with a SelfHostedLazyScript must be compiled with the slow-path
3867 // before the function length is known. If the length was previously resolved,
3868 // the length property may be shadowed.
3869 masm.branchTest32(
3870 Assembler::NonZero, scratch,
3871 Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
3872 failure->label());
3874 masm.loadFunctionLength(obj, scratch, scratch, failure->label());
3875 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3876 return true;
3879 bool CacheIRCompiler::emitLoadFunctionNameResult(ObjOperandId objId) {
3880 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3881 AutoOutputRegister output(*this);
3882 Register obj = allocator.useRegister(masm, objId);
3883 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3885 FailurePath* failure;
3886 if (!addFailurePath(&failure)) {
3887 return false;
3890 masm.loadFunctionName(obj, scratch, ImmGCPtr(cx_->names().empty_),
3891 failure->label());
3893 masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
3894 return true;
3897 bool CacheIRCompiler::emitLinearizeForCharAccess(StringOperandId strId,
3898 Int32OperandId indexId,
3899 StringOperandId resultId) {
3900 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3901 Register str = allocator.useRegister(masm, strId);
3902 Register index = allocator.useRegister(masm, indexId);
3903 Register result = allocator.defineRegister(masm, resultId);
3904 AutoScratchRegister scratch(allocator, masm);
3906 FailurePath* failure;
3907 if (!addFailurePath(&failure)) {
3908 return false;
3911 Label done;
3912 masm.movePtr(str, result);
3914 // We can omit the bounds check, because we only compare the index against the
3915 // string length. In the worst case we unnecessarily linearize the string
3916 // when the index is out-of-bounds.
3918 masm.branchIfCanLoadStringChar(str, index, scratch, &done);
3920 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
3921 liveVolatileFloatRegs());
3922 masm.PushRegsInMask(volatileRegs);
3924 using Fn = JSLinearString* (*)(JSString*);
3925 masm.setupUnalignedABICall(scratch);
3926 masm.passABIArg(str);
3927 masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
3928 masm.storeCallPointerResult(result);
3930 LiveRegisterSet ignore;
3931 ignore.add(result);
3932 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
3934 masm.branchTestPtr(Assembler::Zero, result, result, failure->label());
3937 masm.bind(&done);
3938 return true;
3941 bool CacheIRCompiler::emitLinearizeForCodePointAccess(
3942 StringOperandId strId, Int32OperandId indexId, StringOperandId resultId) {
3943 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3944 Register str = allocator.useRegister(masm, strId);
3945 Register index = allocator.useRegister(masm, indexId);
3946 Register result = allocator.defineRegister(masm, resultId);
3947 AutoScratchRegister scratch1(allocator, masm);
3948 AutoScratchRegister scratch2(allocator, masm);
3950 FailurePath* failure;
3951 if (!addFailurePath(&failure)) {
3952 return false;
3955 Label done;
3956 masm.movePtr(str, result);
3958 // We can omit the bounds check, because we only compare the index against the
3959 // string length. In the worst case we unnecessarily linearize the string
3960 // when the index is out-of-bounds.
3962 masm.branchIfCanLoadStringCodePoint(str, index, scratch1, scratch2, &done);
3964 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
3965 liveVolatileFloatRegs());
3966 masm.PushRegsInMask(volatileRegs);
3968 using Fn = JSLinearString* (*)(JSString*);
3969 masm.setupUnalignedABICall(scratch1);
3970 masm.passABIArg(str);
3971 masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
3972 masm.storeCallPointerResult(result);
3974 LiveRegisterSet ignore;
3975 ignore.add(result);
3976 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
3978 masm.branchTestPtr(Assembler::Zero, result, result, failure->label());
3981 masm.bind(&done);
3982 return true;
3985 bool CacheIRCompiler::emitToRelativeStringIndex(Int32OperandId indexId,
3986 StringOperandId strId,
3987 Int32OperandId resultId) {
3988 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3989 Register index = allocator.useRegister(masm, indexId);
3990 Register str = allocator.useRegister(masm, strId);
3991 Register result = allocator.defineRegister(masm, resultId);
3993 // If |index| is non-negative, it's an index relative to the start of the
3994 // string. Otherwise it's an index relative to the end of the string.
3995 masm.move32(Imm32(0), result);
3996 masm.cmp32Load32(Assembler::LessThan, index, Imm32(0),
3997 Address(str, JSString::offsetOfLength()), result);
3998 masm.add32(index, result);
3999 return true;
4002 bool CacheIRCompiler::emitLoadStringLengthResult(StringOperandId strId) {
4003 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4004 AutoOutputRegister output(*this);
4005 Register str = allocator.useRegister(masm, strId);
4006 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4008 masm.loadStringLength(str, scratch);
4009 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4010 return true;
4013 bool CacheIRCompiler::emitLoadStringCharCodeResult(StringOperandId strId,
4014 Int32OperandId indexId,
4015 bool handleOOB) {
4016 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4017 AutoOutputRegister output(*this);
4018 Register str = allocator.useRegister(masm, strId);
4019 Register index = allocator.useRegister(masm, indexId);
4020 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
4021 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
4022 AutoScratchRegister scratch3(allocator, masm);
4024 // Bounds check, load string char.
4025 Label done;
4026 if (!handleOOB) {
4027 FailurePath* failure;
4028 if (!addFailurePath(&failure)) {
4029 return false;
4032 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
4033 scratch1, failure->label());
4034 masm.loadStringChar(str, index, scratch1, scratch2, scratch3,
4035 failure->label());
4036 } else {
4037 // Return NaN for out-of-bounds access.
4038 masm.moveValue(JS::NaNValue(), output.valueReg());
4040 // The bounds check mustn't use a scratch register which aliases the output.
4041 MOZ_ASSERT(!output.valueReg().aliases(scratch3));
4043 // This CacheIR op is always preceded by |LinearizeForCharAccess|, so we're
4044 // guaranteed to see no nested ropes.
4045 Label loadFailed;
4046 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
4047 scratch3, &done);
4048 masm.loadStringChar(str, index, scratch1, scratch2, scratch3, &loadFailed);
4050 Label loadedChar;
4051 masm.jump(&loadedChar);
4052 masm.bind(&loadFailed);
4053 masm.assumeUnreachable("loadStringChar can't fail for linear strings");
4054 masm.bind(&loadedChar);
4057 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
4058 masm.bind(&done);
4059 return true;
4062 bool CacheIRCompiler::emitLoadStringCodePointResult(StringOperandId strId,
4063 Int32OperandId indexId,
4064 bool handleOOB) {
4065 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4066 AutoOutputRegister output(*this);
4067 Register str = allocator.useRegister(masm, strId);
4068 Register index = allocator.useRegister(masm, indexId);
4069 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
4070 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
4071 AutoScratchRegister scratch3(allocator, masm);
4073 // Bounds check, load string char.
4074 Label done;
4075 if (!handleOOB) {
4076 FailurePath* failure;
4077 if (!addFailurePath(&failure)) {
4078 return false;
4081 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
4082 scratch1, failure->label());
4083 masm.loadStringCodePoint(str, index, scratch1, scratch2, scratch3,
4084 failure->label());
4085 } else {
4086 // Return undefined for out-of-bounds access.
4087 masm.moveValue(JS::UndefinedValue(), output.valueReg());
4089 // The bounds check mustn't use a scratch register which aliases the output.
4090 MOZ_ASSERT(!output.valueReg().aliases(scratch3));
4092 // This CacheIR op is always preceded by |LinearizeForCodePointAccess|, so
4093 // we're guaranteed to see no nested ropes or split surrogates.
4094 Label loadFailed;
4095 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
4096 scratch3, &done);
4097 masm.loadStringCodePoint(str, index, scratch1, scratch2, scratch3,
4098 &loadFailed);
4100 Label loadedChar;
4101 masm.jump(&loadedChar);
4102 masm.bind(&loadFailed);
4103 masm.assumeUnreachable("loadStringCodePoint can't fail for linear strings");
4104 masm.bind(&loadedChar);
4107 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
4108 masm.bind(&done);
4109 return true;
4112 bool CacheIRCompiler::emitNewStringObjectResult(uint32_t templateObjectOffset,
4113 StringOperandId strId) {
4114 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4116 AutoCallVM callvm(masm, this, allocator);
4118 Register str = allocator.useRegister(masm, strId);
4120 callvm.prepare();
4121 masm.Push(str);
4123 using Fn = JSObject* (*)(JSContext*, HandleString);
4124 callvm.call<Fn, NewStringObject>();
4125 return true;
4128 bool CacheIRCompiler::emitStringIncludesResult(StringOperandId strId,
4129 StringOperandId searchStrId) {
4130 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4132 AutoCallVM callvm(masm, this, allocator);
4134 Register str = allocator.useRegister(masm, strId);
4135 Register searchStr = allocator.useRegister(masm, searchStrId);
4137 callvm.prepare();
4138 masm.Push(searchStr);
4139 masm.Push(str);
4141 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
4142 callvm.call<Fn, js::StringIncludes>();
4143 return true;
4146 bool CacheIRCompiler::emitStringIndexOfResult(StringOperandId strId,
4147 StringOperandId searchStrId) {
4148 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4150 AutoCallVM callvm(masm, this, allocator);
4152 Register str = allocator.useRegister(masm, strId);
4153 Register searchStr = allocator.useRegister(masm, searchStrId);
4155 callvm.prepare();
4156 masm.Push(searchStr);
4157 masm.Push(str);
4159 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
4160 callvm.call<Fn, js::StringIndexOf>();
4161 return true;
4164 bool CacheIRCompiler::emitStringLastIndexOfResult(StringOperandId strId,
4165 StringOperandId searchStrId) {
4166 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4168 AutoCallVM callvm(masm, this, allocator);
4170 Register str = allocator.useRegister(masm, strId);
4171 Register searchStr = allocator.useRegister(masm, searchStrId);
4173 callvm.prepare();
4174 masm.Push(searchStr);
4175 masm.Push(str);
4177 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
4178 callvm.call<Fn, js::StringLastIndexOf>();
4179 return true;
4182 bool CacheIRCompiler::emitStringStartsWithResult(StringOperandId strId,
4183 StringOperandId searchStrId) {
4184 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4186 AutoCallVM callvm(masm, this, allocator);
4188 Register str = allocator.useRegister(masm, strId);
4189 Register searchStr = allocator.useRegister(masm, searchStrId);
4191 callvm.prepare();
4192 masm.Push(searchStr);
4193 masm.Push(str);
4195 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
4196 callvm.call<Fn, js::StringStartsWith>();
4197 return true;
4200 bool CacheIRCompiler::emitStringEndsWithResult(StringOperandId strId,
4201 StringOperandId searchStrId) {
4202 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4204 AutoCallVM callvm(masm, this, allocator);
4206 Register str = allocator.useRegister(masm, strId);
4207 Register searchStr = allocator.useRegister(masm, searchStrId);
4209 callvm.prepare();
4210 masm.Push(searchStr);
4211 masm.Push(str);
4213 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
4214 callvm.call<Fn, js::StringEndsWith>();
4215 return true;
4218 bool CacheIRCompiler::emitStringToLowerCaseResult(StringOperandId strId) {
4219 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4221 AutoCallVM callvm(masm, this, allocator);
4223 Register str = allocator.useRegister(masm, strId);
4225 callvm.prepare();
4226 masm.Push(str);
4228 using Fn = JSString* (*)(JSContext*, HandleString);
4229 callvm.call<Fn, js::StringToLowerCase>();
4230 return true;
4233 bool CacheIRCompiler::emitStringToUpperCaseResult(StringOperandId strId) {
4234 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4236 AutoCallVM callvm(masm, this, allocator);
4238 Register str = allocator.useRegister(masm, strId);
4240 callvm.prepare();
4241 masm.Push(str);
4243 using Fn = JSString* (*)(JSContext*, HandleString);
4244 callvm.call<Fn, js::StringToUpperCase>();
4245 return true;
4248 bool CacheIRCompiler::emitStringTrimResult(StringOperandId strId) {
4249 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4251 AutoCallVM callvm(masm, this, allocator);
4253 Register str = allocator.useRegister(masm, strId);
4255 callvm.prepare();
4256 masm.Push(str);
4258 using Fn = JSString* (*)(JSContext*, HandleString);
4259 callvm.call<Fn, js::StringTrim>();
4260 return true;
4263 bool CacheIRCompiler::emitStringTrimStartResult(StringOperandId strId) {
4264 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4266 AutoCallVM callvm(masm, this, allocator);
4268 Register str = allocator.useRegister(masm, strId);
4270 callvm.prepare();
4271 masm.Push(str);
4273 using Fn = JSString* (*)(JSContext*, HandleString);
4274 callvm.call<Fn, js::StringTrimStart>();
4275 return true;
4278 bool CacheIRCompiler::emitStringTrimEndResult(StringOperandId strId) {
4279 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4281 AutoCallVM callvm(masm, this, allocator);
4283 Register str = allocator.useRegister(masm, strId);
4285 callvm.prepare();
4286 masm.Push(str);
4288 using Fn = JSString* (*)(JSContext*, HandleString);
4289 callvm.call<Fn, js::StringTrimEnd>();
4290 return true;
4293 bool CacheIRCompiler::emitLoadArgumentsObjectArgResult(ObjOperandId objId,
4294 Int32OperandId indexId) {
4295 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4296 AutoOutputRegister output(*this);
4297 Register obj = allocator.useRegister(masm, objId);
4298 Register index = allocator.useRegister(masm, indexId);
4299 AutoScratchRegister scratch(allocator, masm);
4301 FailurePath* failure;
4302 if (!addFailurePath(&failure)) {
4303 return false;
4306 masm.loadArgumentsObjectElement(obj, index, output.valueReg(), scratch,
4307 failure->label());
4308 return true;
4311 bool CacheIRCompiler::emitLoadArgumentsObjectArgHoleResult(
4312 ObjOperandId objId, Int32OperandId indexId) {
4313 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4314 AutoOutputRegister output(*this);
4315 Register obj = allocator.useRegister(masm, objId);
4316 Register index = allocator.useRegister(masm, indexId);
4317 AutoScratchRegister scratch(allocator, masm);
4319 FailurePath* failure;
4320 if (!addFailurePath(&failure)) {
4321 return false;
4324 masm.loadArgumentsObjectElementHole(obj, index, output.valueReg(), scratch,
4325 failure->label());
4326 return true;
4329 bool CacheIRCompiler::emitLoadArgumentsObjectArgExistsResult(
4330 ObjOperandId objId, Int32OperandId indexId) {
4331 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4332 AutoOutputRegister output(*this);
4333 Register obj = allocator.useRegister(masm, objId);
4334 Register index = allocator.useRegister(masm, indexId);
4335 AutoScratchRegister scratch1(allocator, masm);
4336 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
4338 FailurePath* failure;
4339 if (!addFailurePath(&failure)) {
4340 return false;
4343 masm.loadArgumentsObjectElementExists(obj, index, scratch2, scratch1,
4344 failure->label());
4345 EmitStoreResult(masm, scratch2, JSVAL_TYPE_BOOLEAN, output);
4346 return true;
4349 bool CacheIRCompiler::emitLoadDenseElementResult(ObjOperandId objId,
4350 Int32OperandId indexId) {
4351 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4352 AutoOutputRegister output(*this);
4353 Register obj = allocator.useRegister(masm, objId);
4354 Register index = allocator.useRegister(masm, indexId);
4355 AutoScratchRegister scratch1(allocator, masm);
4356 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
4358 FailurePath* failure;
4359 if (!addFailurePath(&failure)) {
4360 return false;
4363 // Load obj->elements.
4364 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
4366 // Bounds check.
4367 Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
4368 masm.spectreBoundsCheck32(index, initLength, scratch2, failure->label());
4370 // Hole check.
4371 BaseObjectElementIndex element(scratch1, index);
4372 masm.branchTestMagic(Assembler::Equal, element, failure->label());
4373 masm.loadTypedOrValue(element, output);
4374 return true;
4377 bool CacheIRCompiler::emitGuardInt32IsNonNegative(Int32OperandId indexId) {
4378 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4379 Register index = allocator.useRegister(masm, indexId);
4381 FailurePath* failure;
4382 if (!addFailurePath(&failure)) {
4383 return false;
4386 masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
4387 return true;
4390 bool CacheIRCompiler::emitGuardIndexIsNotDenseElement(ObjOperandId objId,
4391 Int32OperandId indexId) {
4392 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4393 Register obj = allocator.useRegister(masm, objId);
4394 Register index = allocator.useRegister(masm, indexId);
4395 AutoScratchRegister scratch(allocator, masm);
4396 AutoSpectreBoundsScratchRegister spectreScratch(allocator, masm);
4398 FailurePath* failure;
4399 if (!addFailurePath(&failure)) {
4400 return false;
4403 // Load obj->elements.
4404 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4406 // Ensure index >= initLength or the element is a hole.
4407 Label notDense;
4408 Address capacity(scratch, ObjectElements::offsetOfInitializedLength());
4409 masm.spectreBoundsCheck32(index, capacity, spectreScratch, &notDense);
4411 BaseValueIndex element(scratch, index);
4412 masm.branchTestMagic(Assembler::Equal, element, &notDense);
4414 masm.jump(failure->label());
4416 masm.bind(&notDense);
4417 return true;
4420 bool CacheIRCompiler::emitGuardIndexIsValidUpdateOrAdd(ObjOperandId objId,
4421 Int32OperandId indexId) {
4422 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4423 Register obj = allocator.useRegister(masm, objId);
4424 Register index = allocator.useRegister(masm, indexId);
4425 AutoScratchRegister scratch(allocator, masm);
4426 AutoSpectreBoundsScratchRegister spectreScratch(allocator, masm);
4428 FailurePath* failure;
4429 if (!addFailurePath(&failure)) {
4430 return false;
4433 // Load obj->elements.
4434 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4436 Label success;
4438 // If length is writable, branch to &success. All indices are writable.
4439 Address flags(scratch, ObjectElements::offsetOfFlags());
4440 masm.branchTest32(Assembler::Zero, flags,
4441 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
4442 &success);
4444 // Otherwise, ensure index is in bounds.
4445 Address length(scratch, ObjectElements::offsetOfLength());
4446 masm.spectreBoundsCheck32(index, length, spectreScratch,
4447 /* failure = */ failure->label());
4448 masm.bind(&success);
4449 return true;
4452 bool CacheIRCompiler::emitGuardTagNotEqual(ValueTagOperandId lhsId,
4453 ValueTagOperandId rhsId) {
4454 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4455 Register lhs = allocator.useRegister(masm, lhsId);
4456 Register rhs = allocator.useRegister(masm, rhsId);
4458 FailurePath* failure;
4459 if (!addFailurePath(&failure)) {
4460 return false;
4463 Label done;
4464 masm.branch32(Assembler::Equal, lhs, rhs, failure->label());
4466 // If both lhs and rhs are numbers, can't use tag comparison to do inequality
4467 // comparison
4468 masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
4469 masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
4470 masm.jump(failure->label());
4472 masm.bind(&done);
4473 return true;
4476 bool CacheIRCompiler::emitGuardXrayExpandoShapeAndDefaultProto(
4477 ObjOperandId objId, uint32_t shapeWrapperOffset) {
4478 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4480 Register obj = allocator.useRegister(masm, objId);
4481 StubFieldOffset shapeWrapper(shapeWrapperOffset, StubField::Type::JSObject);
4483 AutoScratchRegister scratch(allocator, masm);
4484 AutoScratchRegister scratch2(allocator, masm);
4485 AutoScratchRegister scratch3(allocator, masm);
4487 FailurePath* failure;
4488 if (!addFailurePath(&failure)) {
4489 return false;
4492 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
4493 Address holderAddress(scratch,
4494 sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
4495 Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(
4496 GetXrayJitInfo()->holderExpandoSlot));
4498 masm.fallibleUnboxObject(holderAddress, scratch, failure->label());
4499 masm.fallibleUnboxObject(expandoAddress, scratch, failure->label());
4501 // Unwrap the expando before checking its shape.
4502 masm.loadPtr(Address(scratch, ProxyObject::offsetOfReservedSlots()), scratch);
4503 masm.unboxObject(
4504 Address(scratch, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
4505 scratch);
4507 emitLoadStubField(shapeWrapper, scratch2);
4508 LoadShapeWrapperContents(masm, scratch2, scratch2, failure->label());
4509 masm.branchTestObjShape(Assembler::NotEqual, scratch, scratch2, scratch3,
4510 scratch, failure->label());
4512 // The reserved slots on the expando should all be in fixed slots.
4513 Address protoAddress(scratch, NativeObject::getFixedSlotOffset(
4514 GetXrayJitInfo()->expandoProtoSlot));
4515 masm.branchTestUndefined(Assembler::NotEqual, protoAddress, failure->label());
4517 return true;
4520 bool CacheIRCompiler::emitGuardXrayNoExpando(ObjOperandId objId) {
4521 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4523 Register obj = allocator.useRegister(masm, objId);
4524 AutoScratchRegister scratch(allocator, masm);
4526 FailurePath* failure;
4527 if (!addFailurePath(&failure)) {
4528 return false;
4531 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
4532 Address holderAddress(scratch,
4533 sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
4534 Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(
4535 GetXrayJitInfo()->holderExpandoSlot));
4537 Label done;
4538 masm.fallibleUnboxObject(holderAddress, scratch, &done);
4539 masm.branchTestObject(Assembler::Equal, expandoAddress, failure->label());
4540 masm.bind(&done);
4542 return true;
4545 bool CacheIRCompiler::emitGuardNoAllocationMetadataBuilder(
4546 uint32_t builderAddrOffset) {
4547 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4548 AutoScratchRegister scratch(allocator, masm);
4550 FailurePath* failure;
4551 if (!addFailurePath(&failure)) {
4552 return false;
4555 StubFieldOffset builderField(builderAddrOffset, StubField::Type::RawPointer);
4556 emitLoadStubField(builderField, scratch);
4557 masm.branchPtr(Assembler::NotEqual, Address(scratch, 0), ImmWord(0),
4558 failure->label());
4560 return true;
4563 bool CacheIRCompiler::emitGuardFunctionHasJitEntry(ObjOperandId funId,
4564 bool constructing) {
4565 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4566 Register fun = allocator.useRegister(masm, funId);
4568 FailurePath* failure;
4569 if (!addFailurePath(&failure)) {
4570 return false;
4573 masm.branchIfFunctionHasNoJitEntry(fun, constructing, failure->label());
4574 return true;
4577 bool CacheIRCompiler::emitGuardFunctionHasNoJitEntry(ObjOperandId funId) {
4578 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4579 Register obj = allocator.useRegister(masm, funId);
4580 AutoScratchRegister scratch(allocator, masm);
4582 FailurePath* failure;
4583 if (!addFailurePath(&failure)) {
4584 return false;
4587 masm.branchIfFunctionHasJitEntry(obj, /*isConstructing =*/false,
4588 failure->label());
4589 return true;
4592 bool CacheIRCompiler::emitGuardFunctionIsNonBuiltinCtor(ObjOperandId funId) {
4593 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4595 Register fun = allocator.useRegister(masm, funId);
4596 AutoScratchRegister scratch(allocator, masm);
4598 FailurePath* failure;
4599 if (!addFailurePath(&failure)) {
4600 return false;
4603 masm.branchIfNotFunctionIsNonBuiltinCtor(fun, scratch, failure->label());
4604 return true;
4607 bool CacheIRCompiler::emitGuardFunctionIsConstructor(ObjOperandId funId) {
4608 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4609 Register funcReg = allocator.useRegister(masm, funId);
4610 AutoScratchRegister scratch(allocator, masm);
4612 FailurePath* failure;
4613 if (!addFailurePath(&failure)) {
4614 return false;
4617 // Ensure obj is a constructor
4618 masm.branchTestFunctionFlags(funcReg, FunctionFlags::CONSTRUCTOR,
4619 Assembler::Zero, failure->label());
4620 return true;
4623 bool CacheIRCompiler::emitGuardNotClassConstructor(ObjOperandId funId) {
4624 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4625 Register fun = allocator.useRegister(masm, funId);
4626 AutoScratchRegister scratch(allocator, masm);
4628 FailurePath* failure;
4629 if (!addFailurePath(&failure)) {
4630 return false;
4633 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
4634 fun, scratch, failure->label());
4635 return true;
4638 bool CacheIRCompiler::emitGuardArrayIsPacked(ObjOperandId arrayId) {
4639 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4640 Register array = allocator.useRegister(masm, arrayId);
4641 AutoScratchRegister scratch(allocator, masm);
4642 AutoScratchRegister scratch2(allocator, masm);
4644 FailurePath* failure;
4645 if (!addFailurePath(&failure)) {
4646 return false;
4649 masm.branchArrayIsNotPacked(array, scratch, scratch2, failure->label());
4650 return true;
4653 bool CacheIRCompiler::emitGuardArgumentsObjectFlags(ObjOperandId objId,
4654 uint8_t flags) {
4655 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4656 Register obj = allocator.useRegister(masm, objId);
4657 AutoScratchRegister scratch(allocator, masm);
4659 FailurePath* failure;
4660 if (!addFailurePath(&failure)) {
4661 return false;
4664 masm.branchTestArgumentsObjectFlags(obj, scratch, flags, Assembler::NonZero,
4665 failure->label());
4666 return true;
4669 bool CacheIRCompiler::emitLoadDenseElementHoleResult(ObjOperandId objId,
4670 Int32OperandId indexId) {
4671 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4672 AutoOutputRegister output(*this);
4673 Register obj = allocator.useRegister(masm, objId);
4674 Register index = allocator.useRegister(masm, indexId);
4675 AutoScratchRegister scratch1(allocator, masm);
4676 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
4678 FailurePath* failure;
4679 if (!addFailurePath(&failure)) {
4680 return false;
4683 // Make sure the index is nonnegative.
4684 masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
4686 // Load obj->elements.
4687 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
4689 // Guard on the initialized length.
4690 Label hole;
4691 Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
4692 masm.spectreBoundsCheck32(index, initLength, scratch2, &hole);
4694 // Load the value.
4695 Label done;
4696 masm.loadValue(BaseObjectElementIndex(scratch1, index), output.valueReg());
4697 masm.branchTestMagic(Assembler::NotEqual, output.valueReg(), &done);
4699 // Load undefined for the hole.
4700 masm.bind(&hole);
4701 masm.moveValue(UndefinedValue(), output.valueReg());
4703 masm.bind(&done);
4704 return true;
4707 bool CacheIRCompiler::emitLoadTypedArrayElementExistsResult(
4708 ObjOperandId objId, IntPtrOperandId indexId) {
4709 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4710 AutoOutputRegister output(*this);
4711 Register obj = allocator.useRegister(masm, objId);
4712 Register index = allocator.useRegister(masm, indexId);
4713 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4715 Label outOfBounds, done;
4717 // Bounds check.
4718 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
4719 masm.branchPtr(Assembler::BelowOrEqual, scratch, index, &outOfBounds);
4720 EmitStoreBoolean(masm, true, output);
4721 masm.jump(&done);
4723 masm.bind(&outOfBounds);
4724 EmitStoreBoolean(masm, false, output);
4726 masm.bind(&done);
4727 return true;
4730 bool CacheIRCompiler::emitLoadDenseElementExistsResult(ObjOperandId objId,
4731 Int32OperandId indexId) {
4732 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4733 AutoOutputRegister output(*this);
4734 Register obj = allocator.useRegister(masm, objId);
4735 Register index = allocator.useRegister(masm, indexId);
4736 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4738 FailurePath* failure;
4739 if (!addFailurePath(&failure)) {
4740 return false;
4743 // Load obj->elements.
4744 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4746 // Bounds check. Unsigned compare sends negative indices to next IC.
4747 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
4748 masm.branch32(Assembler::BelowOrEqual, initLength, index, failure->label());
4750 // Hole check.
4751 BaseObjectElementIndex element(scratch, index);
4752 masm.branchTestMagic(Assembler::Equal, element, failure->label());
4754 EmitStoreBoolean(masm, true, output);
4755 return true;
4758 bool CacheIRCompiler::emitLoadDenseElementHoleExistsResult(
4759 ObjOperandId objId, Int32OperandId indexId) {
4760 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4761 AutoOutputRegister output(*this);
4762 Register obj = allocator.useRegister(masm, objId);
4763 Register index = allocator.useRegister(masm, indexId);
4764 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4766 FailurePath* failure;
4767 if (!addFailurePath(&failure)) {
4768 return false;
4771 // Make sure the index is nonnegative.
4772 masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
4774 // Load obj->elements.
4775 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4777 // Guard on the initialized length.
4778 Label hole;
4779 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
4780 masm.branch32(Assembler::BelowOrEqual, initLength, index, &hole);
4782 // Load value and replace with true.
4783 Label done;
4784 BaseObjectElementIndex element(scratch, index);
4785 masm.branchTestMagic(Assembler::Equal, element, &hole);
4786 EmitStoreBoolean(masm, true, output);
4787 masm.jump(&done);
4789 // Load false for the hole.
4790 masm.bind(&hole);
4791 EmitStoreBoolean(masm, false, output);
4793 masm.bind(&done);
4794 return true;
4797 bool CacheIRCompiler::emitPackedArrayPopResult(ObjOperandId arrayId) {
4798 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4800 AutoOutputRegister output(*this);
4801 Register array = allocator.useRegister(masm, arrayId);
4802 AutoScratchRegister scratch1(allocator, masm);
4803 AutoScratchRegister scratch2(allocator, masm);
4805 FailurePath* failure;
4806 if (!addFailurePath(&failure)) {
4807 return false;
4810 masm.packedArrayPop(array, output.valueReg(), scratch1, scratch2,
4811 failure->label());
4812 return true;
4815 bool CacheIRCompiler::emitPackedArrayShiftResult(ObjOperandId arrayId) {
4816 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4818 AutoOutputRegister output(*this);
4819 Register array = allocator.useRegister(masm, arrayId);
4820 AutoScratchRegister scratch1(allocator, masm);
4821 AutoScratchRegister scratch2(allocator, masm);
4823 FailurePath* failure;
4824 if (!addFailurePath(&failure)) {
4825 return false;
4828 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
4829 liveVolatileFloatRegs());
4830 masm.packedArrayShift(array, output.valueReg(), scratch1, scratch2,
4831 volatileRegs, failure->label());
4832 return true;
4835 bool CacheIRCompiler::emitIsObjectResult(ValOperandId inputId) {
4836 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4838 AutoOutputRegister output(*this);
4839 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4841 ValueOperand val = allocator.useValueRegister(masm, inputId);
4843 masm.testObjectSet(Assembler::Equal, val, scratch);
4845 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
4846 return true;
4849 bool CacheIRCompiler::emitIsPackedArrayResult(ObjOperandId objId) {
4850 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4852 AutoOutputRegister output(*this);
4853 Register obj = allocator.useRegister(masm, objId);
4854 AutoScratchRegister scratch(allocator, masm);
4856 Register outputScratch = output.valueReg().scratchReg();
4857 masm.setIsPackedArray(obj, outputScratch, scratch);
4858 masm.tagValue(JSVAL_TYPE_BOOLEAN, outputScratch, output.valueReg());
4859 return true;
4862 bool CacheIRCompiler::emitIsCallableResult(ValOperandId inputId) {
4863 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4865 AutoOutputRegister output(*this);
4866 AutoScratchRegister scratch1(allocator, masm);
4867 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
4869 ValueOperand val = allocator.useValueRegister(masm, inputId);
4871 Label isObject, done;
4872 masm.branchTestObject(Assembler::Equal, val, &isObject);
4873 // Primitives are never callable.
4874 masm.move32(Imm32(0), scratch2);
4875 masm.jump(&done);
4877 masm.bind(&isObject);
4878 masm.unboxObject(val, scratch1);
4880 Label isProxy;
4881 masm.isCallable(scratch1, scratch2, &isProxy);
4882 masm.jump(&done);
4884 masm.bind(&isProxy);
4886 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
4887 liveVolatileFloatRegs());
4888 masm.PushRegsInMask(volatileRegs);
4890 using Fn = bool (*)(JSObject* obj);
4891 masm.setupUnalignedABICall(scratch2);
4892 masm.passABIArg(scratch1);
4893 masm.callWithABI<Fn, ObjectIsCallable>();
4894 masm.storeCallBoolResult(scratch2);
4896 LiveRegisterSet ignore;
4897 ignore.add(scratch2);
4898 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
4901 masm.bind(&done);
4902 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
4903 return true;
4906 bool CacheIRCompiler::emitIsConstructorResult(ObjOperandId objId) {
4907 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4909 AutoOutputRegister output(*this);
4910 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4912 Register obj = allocator.useRegister(masm, objId);
4914 Label isProxy, done;
4915 masm.isConstructor(obj, scratch, &isProxy);
4916 masm.jump(&done);
4918 masm.bind(&isProxy);
4920 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
4921 liveVolatileFloatRegs());
4922 masm.PushRegsInMask(volatileRegs);
4924 using Fn = bool (*)(JSObject* obj);
4925 masm.setupUnalignedABICall(scratch);
4926 masm.passABIArg(obj);
4927 masm.callWithABI<Fn, ObjectIsConstructor>();
4928 masm.storeCallBoolResult(scratch);
4930 LiveRegisterSet ignore;
4931 ignore.add(scratch);
4932 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
4935 masm.bind(&done);
4936 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
4937 return true;
4940 bool CacheIRCompiler::emitIsCrossRealmArrayConstructorResult(
4941 ObjOperandId objId) {
4942 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4944 AutoOutputRegister output(*this);
4945 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4946 Register obj = allocator.useRegister(masm, objId);
4948 masm.setIsCrossRealmArrayConstructor(obj, scratch);
4949 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
4950 return true;
4953 bool CacheIRCompiler::emitArrayBufferViewByteOffsetInt32Result(
4954 ObjOperandId objId) {
4955 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4957 AutoOutputRegister output(*this);
4958 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4959 Register obj = allocator.useRegister(masm, objId);
4961 FailurePath* failure;
4962 if (!addFailurePath(&failure)) {
4963 return false;
4966 masm.loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
4967 masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
4968 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4969 return true;
4972 bool CacheIRCompiler::emitArrayBufferViewByteOffsetDoubleResult(
4973 ObjOperandId objId) {
4974 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4976 AutoOutputRegister output(*this);
4977 Register obj = allocator.useRegister(masm, objId);
4978 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4980 ScratchDoubleScope fpscratch(masm);
4981 masm.loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
4982 masm.convertIntPtrToDouble(scratch, fpscratch);
4983 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
4984 return true;
4987 bool CacheIRCompiler::emitTypedArrayByteLengthInt32Result(ObjOperandId objId) {
4988 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4990 AutoOutputRegister output(*this);
4991 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
4992 AutoScratchRegister scratch2(allocator, masm);
4993 Register obj = allocator.useRegister(masm, objId);
4995 FailurePath* failure;
4996 if (!addFailurePath(&failure)) {
4997 return false;
5000 masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
5001 masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
5002 masm.typedArrayElementSize(obj, scratch2);
5004 masm.branchMul32(Assembler::Overflow, scratch2.get(), scratch1,
5005 failure->label());
5007 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
5008 return true;
5011 bool CacheIRCompiler::emitTypedArrayByteLengthDoubleResult(ObjOperandId objId) {
5012 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5014 AutoOutputRegister output(*this);
5015 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
5016 AutoScratchRegister scratch2(allocator, masm);
5017 Register obj = allocator.useRegister(masm, objId);
5019 masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
5020 masm.typedArrayElementSize(obj, scratch2);
5021 masm.mulPtr(scratch2, scratch1);
5023 ScratchDoubleScope fpscratch(masm);
5024 masm.convertIntPtrToDouble(scratch1, fpscratch);
5025 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
5026 return true;
5029 bool CacheIRCompiler::emitTypedArrayElementSizeResult(ObjOperandId objId) {
5030 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5032 AutoOutputRegister output(*this);
5033 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5034 Register obj = allocator.useRegister(masm, objId);
5036 masm.typedArrayElementSize(obj, scratch);
5037 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5038 return true;
5041 bool CacheIRCompiler::emitGuardHasAttachedArrayBuffer(ObjOperandId objId) {
5042 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5044 AutoScratchRegister scratch(allocator, masm);
5045 Register obj = allocator.useRegister(masm, objId);
5047 FailurePath* failure;
5048 if (!addFailurePath(&failure)) {
5049 return false;
5052 masm.branchIfHasDetachedArrayBuffer(obj, scratch, failure->label());
5053 return true;
5056 bool CacheIRCompiler::emitIsTypedArrayConstructorResult(ObjOperandId objId) {
5057 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5059 AutoOutputRegister output(*this);
5060 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5061 Register obj = allocator.useRegister(masm, objId);
5063 masm.setIsDefinitelyTypedArrayConstructor(obj, scratch);
5064 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
5065 return true;
5068 bool CacheIRCompiler::emitGetNextMapSetEntryForIteratorResult(
5069 ObjOperandId iterId, ObjOperandId resultArrId, bool isMap) {
5070 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5072 AutoOutputRegister output(*this);
5073 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5074 Register iter = allocator.useRegister(masm, iterId);
5075 Register resultArr = allocator.useRegister(masm, resultArrId);
5077 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5078 save.takeUnchecked(output.valueReg());
5079 save.takeUnchecked(scratch);
5080 masm.PushRegsInMask(save);
5082 masm.setupUnalignedABICall(scratch);
5083 masm.passABIArg(iter);
5084 masm.passABIArg(resultArr);
5085 if (isMap) {
5086 using Fn = bool (*)(MapIteratorObject* iter, ArrayObject* resultPairObj);
5087 masm.callWithABI<Fn, MapIteratorObject::next>();
5088 } else {
5089 using Fn = bool (*)(SetIteratorObject* iter, ArrayObject* resultObj);
5090 masm.callWithABI<Fn, SetIteratorObject::next>();
5092 masm.storeCallBoolResult(scratch);
5094 masm.PopRegsInMask(save);
5096 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
5097 return true;
5100 void CacheIRCompiler::emitActivateIterator(Register objBeingIterated,
5101 Register iterObject,
5102 Register nativeIter,
5103 Register scratch, Register scratch2,
5104 uint32_t enumeratorsAddrOffset) {
5105 // 'objectBeingIterated_' must be nullptr, so we don't need a pre-barrier.
5106 Address iterObjAddr(nativeIter,
5107 NativeIterator::offsetOfObjectBeingIterated());
5108 #ifdef DEBUG
5109 Label ok;
5110 masm.branchPtr(Assembler::Equal, iterObjAddr, ImmPtr(nullptr), &ok);
5111 masm.assumeUnreachable("iterator with non-null object");
5112 masm.bind(&ok);
5113 #endif
5115 // Mark iterator as active.
5116 Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlagsAndCount());
5117 masm.storePtr(objBeingIterated, iterObjAddr);
5118 masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
5120 // Post-write barrier for stores to 'objectBeingIterated_'.
5121 emitPostBarrierSlot(
5122 iterObject,
5123 TypedOrValueRegister(MIRType::Object, AnyRegister(objBeingIterated)),
5124 scratch);
5126 // Chain onto the active iterator stack.
5127 StubFieldOffset enumeratorsAddr(enumeratorsAddrOffset,
5128 StubField::Type::RawPointer);
5129 emitLoadStubField(enumeratorsAddr, scratch);
5130 masm.registerIterator(scratch, nativeIter, scratch2);
5133 bool CacheIRCompiler::emitObjectToIteratorResult(
5134 ObjOperandId objId, uint32_t enumeratorsAddrOffset) {
5135 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5137 AutoCallVM callvm(masm, this, allocator);
5138 Register obj = allocator.useRegister(masm, objId);
5140 AutoScratchRegister iterObj(allocator, masm);
5141 AutoScratchRegister scratch(allocator, masm);
5142 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, callvm.output());
5143 AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, callvm.output());
5145 Label callVM, done;
5146 masm.maybeLoadIteratorFromShape(obj, iterObj, scratch, scratch2, scratch3,
5147 &callVM);
5149 masm.loadPrivate(
5150 Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
5151 scratch);
5153 emitActivateIterator(obj, iterObj, scratch, scratch2, scratch3,
5154 enumeratorsAddrOffset);
5155 masm.jump(&done);
5157 masm.bind(&callVM);
5158 callvm.prepare();
5159 masm.Push(obj);
5160 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
5161 callvm.call<Fn, GetIterator>();
5162 masm.storeCallPointerResult(iterObj);
5164 masm.bind(&done);
5165 EmitStoreResult(masm, iterObj, JSVAL_TYPE_OBJECT, callvm.output());
5166 return true;
5169 bool CacheIRCompiler::emitValueToIteratorResult(ValOperandId valId) {
5170 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5172 AutoCallVM callvm(masm, this, allocator);
5174 ValueOperand val = allocator.useValueRegister(masm, valId);
5176 callvm.prepare();
5178 masm.Push(val);
5180 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleValue);
5181 callvm.call<Fn, ValueToIterator>();
5182 return true;
5185 bool CacheIRCompiler::emitNewArrayIteratorResult(
5186 uint32_t templateObjectOffset) {
5187 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5189 AutoCallVM callvm(masm, this, allocator);
5191 callvm.prepare();
5193 using Fn = ArrayIteratorObject* (*)(JSContext*);
5194 callvm.call<Fn, NewArrayIterator>();
5195 return true;
5198 bool CacheIRCompiler::emitNewStringIteratorResult(
5199 uint32_t templateObjectOffset) {
5200 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5202 AutoCallVM callvm(masm, this, allocator);
5204 callvm.prepare();
5206 using Fn = StringIteratorObject* (*)(JSContext*);
5207 callvm.call<Fn, NewStringIterator>();
5208 return true;
5211 bool CacheIRCompiler::emitNewRegExpStringIteratorResult(
5212 uint32_t templateObjectOffset) {
5213 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5215 AutoCallVM callvm(masm, this, allocator);
5217 callvm.prepare();
5219 using Fn = RegExpStringIteratorObject* (*)(JSContext*);
5220 callvm.call<Fn, NewRegExpStringIterator>();
5221 return true;
5224 bool CacheIRCompiler::emitObjectCreateResult(uint32_t templateObjectOffset) {
5225 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5227 AutoCallVM callvm(masm, this, allocator);
5228 AutoScratchRegister scratch(allocator, masm);
5230 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5231 emitLoadStubField(objectField, scratch);
5233 callvm.prepare();
5234 masm.Push(scratch);
5236 using Fn = PlainObject* (*)(JSContext*, Handle<PlainObject*>);
5237 callvm.call<Fn, ObjectCreateWithTemplate>();
5238 return true;
5241 bool CacheIRCompiler::emitObjectKeysResult(ObjOperandId objId) {
5242 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5244 AutoCallVM callvm(masm, this, allocator);
5245 Register obj = allocator.useRegister(masm, objId);
5247 // Our goal is only to record calls to Object.keys, to elide it when
5248 // partially used, not to provide an alternative implementation.
5250 callvm.prepare();
5251 masm.Push(obj);
5253 using Fn = JSObject* (*)(JSContext*, HandleObject);
5254 callvm.call<Fn, jit::ObjectKeys>();
5257 return true;
5260 bool CacheIRCompiler::emitNewArrayFromLengthResult(
5261 uint32_t templateObjectOffset, Int32OperandId lengthId) {
5262 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5264 AutoCallVM callvm(masm, this, allocator);
5265 AutoScratchRegister scratch(allocator, masm);
5266 Register length = allocator.useRegister(masm, lengthId);
5268 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5269 emitLoadStubField(objectField, scratch);
5271 callvm.prepare();
5272 masm.Push(length);
5273 masm.Push(scratch);
5275 using Fn = ArrayObject* (*)(JSContext*, Handle<ArrayObject*>, int32_t length);
5276 callvm.call<Fn, ArrayConstructorOneArg>();
5277 return true;
5280 bool CacheIRCompiler::emitNewTypedArrayFromLengthResult(
5281 uint32_t templateObjectOffset, Int32OperandId lengthId) {
5282 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5284 AutoCallVM callvm(masm, this, allocator);
5285 AutoScratchRegister scratch(allocator, masm);
5286 Register length = allocator.useRegister(masm, lengthId);
5288 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5289 emitLoadStubField(objectField, scratch);
5291 callvm.prepare();
5292 masm.Push(length);
5293 masm.Push(scratch);
5295 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
5296 callvm.call<Fn, NewTypedArrayWithTemplateAndLength>();
5297 return true;
5300 bool CacheIRCompiler::emitNewTypedArrayFromArrayBufferResult(
5301 uint32_t templateObjectOffset, ObjOperandId bufferId,
5302 ValOperandId byteOffsetId, ValOperandId lengthId) {
5303 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5305 #ifdef JS_CODEGEN_X86
5306 MOZ_CRASH("Instruction not supported on 32-bit x86, not enough registers");
5307 #endif
5309 AutoCallVM callvm(masm, this, allocator);
5310 AutoScratchRegister scratch(allocator, masm);
5311 Register buffer = allocator.useRegister(masm, bufferId);
5312 ValueOperand byteOffset = allocator.useValueRegister(masm, byteOffsetId);
5313 ValueOperand length = allocator.useValueRegister(masm, lengthId);
5315 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5316 emitLoadStubField(objectField, scratch);
5318 callvm.prepare();
5319 masm.Push(length);
5320 masm.Push(byteOffset);
5321 masm.Push(buffer);
5322 masm.Push(scratch);
5324 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
5325 HandleValue, HandleValue);
5326 callvm.call<Fn, NewTypedArrayWithTemplateAndBuffer>();
5327 return true;
5330 bool CacheIRCompiler::emitNewTypedArrayFromArrayResult(
5331 uint32_t templateObjectOffset, ObjOperandId arrayId) {
5332 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5334 AutoCallVM callvm(masm, this, allocator);
5335 AutoScratchRegister scratch(allocator, masm);
5336 Register array = allocator.useRegister(masm, arrayId);
5338 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5339 emitLoadStubField(objectField, scratch);
5341 callvm.prepare();
5342 masm.Push(array);
5343 masm.Push(scratch);
5345 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
5346 callvm.call<Fn, NewTypedArrayWithTemplateAndArray>();
5347 return true;
5350 bool CacheIRCompiler::emitAddSlotAndCallAddPropHook(ObjOperandId objId,
5351 ValOperandId rhsId,
5352 uint32_t newShapeOffset) {
5353 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5355 AutoCallVM callvm(masm, this, allocator);
5357 AutoScratchRegister scratch(allocator, masm);
5358 Register obj = allocator.useRegister(masm, objId);
5359 ValueOperand rhs = allocator.useValueRegister(masm, rhsId);
5361 StubFieldOffset shapeField(newShapeOffset, StubField::Type::Shape);
5362 emitLoadStubField(shapeField, scratch);
5364 callvm.prepare();
5366 masm.Push(scratch);
5367 masm.Push(rhs);
5368 masm.Push(obj);
5370 using Fn =
5371 bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, Handle<Shape*>);
5372 callvm.callNoResult<Fn, AddSlotAndCallAddPropHook>();
5373 return true;
5376 bool CacheIRCompiler::emitMathAbsInt32Result(Int32OperandId inputId) {
5377 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5379 AutoOutputRegister output(*this);
5380 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5382 Register input = allocator.useRegister(masm, inputId);
5384 FailurePath* failure;
5385 if (!addFailurePath(&failure)) {
5386 return false;
5389 masm.mov(input, scratch);
5390 // Don't negate already positive values.
5391 Label positive;
5392 masm.branchTest32(Assembler::NotSigned, scratch, scratch, &positive);
5393 // neg32 might overflow for INT_MIN.
5394 masm.branchNeg32(Assembler::Overflow, scratch, failure->label());
5395 masm.bind(&positive);
5397 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5398 return true;
5401 bool CacheIRCompiler::emitMathAbsNumberResult(NumberOperandId inputId) {
5402 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5404 AutoOutputRegister output(*this);
5405 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5407 allocator.ensureDoubleRegister(masm, inputId, scratch);
5409 masm.absDouble(scratch, scratch);
5410 masm.boxDouble(scratch, output.valueReg(), scratch);
5411 return true;
5414 bool CacheIRCompiler::emitMathClz32Result(Int32OperandId inputId) {
5415 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5417 AutoOutputRegister output(*this);
5418 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5419 Register input = allocator.useRegister(masm, inputId);
5421 masm.clz32(input, scratch, /* knownNotZero = */ false);
5422 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5423 return true;
5426 bool CacheIRCompiler::emitMathSignInt32Result(Int32OperandId inputId) {
5427 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5429 AutoOutputRegister output(*this);
5430 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5431 Register input = allocator.useRegister(masm, inputId);
5433 masm.signInt32(input, scratch);
5434 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5435 return true;
5438 bool CacheIRCompiler::emitMathSignNumberResult(NumberOperandId inputId) {
5439 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5441 AutoOutputRegister output(*this);
5442 AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
5443 AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
5445 allocator.ensureDoubleRegister(masm, inputId, floatScratch1);
5447 masm.signDouble(floatScratch1, floatScratch2);
5448 masm.boxDouble(floatScratch2, output.valueReg(), floatScratch2);
5449 return true;
5452 bool CacheIRCompiler::emitMathSignNumberToInt32Result(NumberOperandId inputId) {
5453 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5455 AutoOutputRegister output(*this);
5456 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5457 AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
5458 AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
5460 FailurePath* failure;
5461 if (!addFailurePath(&failure)) {
5462 return false;
5465 allocator.ensureDoubleRegister(masm, inputId, floatScratch1);
5467 masm.signDoubleToInt32(floatScratch1, scratch, floatScratch2,
5468 failure->label());
5469 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5470 return true;
5473 bool CacheIRCompiler::emitMathImulResult(Int32OperandId lhsId,
5474 Int32OperandId rhsId) {
5475 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5477 AutoOutputRegister output(*this);
5478 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5479 Register lhs = allocator.useRegister(masm, lhsId);
5480 Register rhs = allocator.useRegister(masm, rhsId);
5482 masm.mov(lhs, scratch);
5483 masm.mul32(rhs, scratch);
5484 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5485 return true;
5488 bool CacheIRCompiler::emitMathSqrtNumberResult(NumberOperandId inputId) {
5489 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5491 AutoOutputRegister output(*this);
5492 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5494 allocator.ensureDoubleRegister(masm, inputId, scratch);
5496 masm.sqrtDouble(scratch, scratch);
5497 masm.boxDouble(scratch, output.valueReg(), scratch);
5498 return true;
5501 bool CacheIRCompiler::emitMathFloorNumberResult(NumberOperandId inputId) {
5502 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5504 AutoOutputRegister output(*this);
5505 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5507 allocator.ensureDoubleRegister(masm, inputId, scratch);
5509 if (Assembler::HasRoundInstruction(RoundingMode::Down)) {
5510 masm.nearbyIntDouble(RoundingMode::Down, scratch, scratch);
5511 masm.boxDouble(scratch, output.valueReg(), scratch);
5512 return true;
5515 return emitMathFunctionNumberResultShared(UnaryMathFunction::Floor, scratch,
5516 output.valueReg());
5519 bool CacheIRCompiler::emitMathCeilNumberResult(NumberOperandId inputId) {
5520 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5522 AutoOutputRegister output(*this);
5523 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5525 allocator.ensureDoubleRegister(masm, inputId, scratch);
5527 if (Assembler::HasRoundInstruction(RoundingMode::Up)) {
5528 masm.nearbyIntDouble(RoundingMode::Up, scratch, scratch);
5529 masm.boxDouble(scratch, output.valueReg(), scratch);
5530 return true;
5533 return emitMathFunctionNumberResultShared(UnaryMathFunction::Ceil, scratch,
5534 output.valueReg());
5537 bool CacheIRCompiler::emitMathTruncNumberResult(NumberOperandId inputId) {
5538 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5540 AutoOutputRegister output(*this);
5541 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5543 allocator.ensureDoubleRegister(masm, inputId, scratch);
5545 if (Assembler::HasRoundInstruction(RoundingMode::TowardsZero)) {
5546 masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
5547 masm.boxDouble(scratch, output.valueReg(), scratch);
5548 return true;
5551 return emitMathFunctionNumberResultShared(UnaryMathFunction::Trunc, scratch,
5552 output.valueReg());
5555 bool CacheIRCompiler::emitMathFRoundNumberResult(NumberOperandId inputId) {
5556 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5558 AutoOutputRegister output(*this);
5559 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5560 FloatRegister scratchFloat32 = scratch.get().asSingle();
5562 allocator.ensureDoubleRegister(masm, inputId, scratch);
5564 masm.convertDoubleToFloat32(scratch, scratchFloat32);
5565 masm.convertFloat32ToDouble(scratchFloat32, scratch);
5567 masm.boxDouble(scratch, output.valueReg(), scratch);
5568 return true;
5571 bool CacheIRCompiler::emitMathHypot2NumberResult(NumberOperandId first,
5572 NumberOperandId second) {
5573 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5574 AutoOutputRegister output(*this);
5575 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5577 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
5578 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
5580 allocator.ensureDoubleRegister(masm, first, floatScratch0);
5581 allocator.ensureDoubleRegister(masm, second, floatScratch1);
5583 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5584 masm.PushRegsInMask(save);
5586 using Fn = double (*)(double x, double y);
5587 masm.setupUnalignedABICall(scratch);
5588 masm.passABIArg(floatScratch0, ABIType::Float64);
5589 masm.passABIArg(floatScratch1, ABIType::Float64);
5591 masm.callWithABI<Fn, ecmaHypot>(ABIType::Float64);
5592 masm.storeCallFloatResult(floatScratch0);
5594 LiveRegisterSet ignore;
5595 ignore.add(floatScratch0);
5596 masm.PopRegsInMaskIgnore(save, ignore);
5598 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
5599 return true;
5602 bool CacheIRCompiler::emitMathHypot3NumberResult(NumberOperandId first,
5603 NumberOperandId second,
5604 NumberOperandId third) {
5605 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5606 AutoOutputRegister output(*this);
5607 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5609 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
5610 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
5611 AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
5613 allocator.ensureDoubleRegister(masm, first, floatScratch0);
5614 allocator.ensureDoubleRegister(masm, second, floatScratch1);
5615 allocator.ensureDoubleRegister(masm, third, floatScratch2);
5617 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5618 masm.PushRegsInMask(save);
5620 using Fn = double (*)(double x, double y, double z);
5621 masm.setupUnalignedABICall(scratch);
5622 masm.passABIArg(floatScratch0, ABIType::Float64);
5623 masm.passABIArg(floatScratch1, ABIType::Float64);
5624 masm.passABIArg(floatScratch2, ABIType::Float64);
5626 masm.callWithABI<Fn, hypot3>(ABIType::Float64);
5627 masm.storeCallFloatResult(floatScratch0);
5629 LiveRegisterSet ignore;
5630 ignore.add(floatScratch0);
5631 masm.PopRegsInMaskIgnore(save, ignore);
5633 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
5634 return true;
5637 bool CacheIRCompiler::emitMathHypot4NumberResult(NumberOperandId first,
5638 NumberOperandId second,
5639 NumberOperandId third,
5640 NumberOperandId fourth) {
5641 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5642 AutoOutputRegister output(*this);
5643 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5645 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
5646 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
5647 AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
5648 AutoAvailableFloatRegister floatScratch3(*this, FloatReg3);
5650 allocator.ensureDoubleRegister(masm, first, floatScratch0);
5651 allocator.ensureDoubleRegister(masm, second, floatScratch1);
5652 allocator.ensureDoubleRegister(masm, third, floatScratch2);
5653 allocator.ensureDoubleRegister(masm, fourth, floatScratch3);
5655 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5656 masm.PushRegsInMask(save);
5658 using Fn = double (*)(double x, double y, double z, double w);
5659 masm.setupUnalignedABICall(scratch);
5660 masm.passABIArg(floatScratch0, ABIType::Float64);
5661 masm.passABIArg(floatScratch1, ABIType::Float64);
5662 masm.passABIArg(floatScratch2, ABIType::Float64);
5663 masm.passABIArg(floatScratch3, ABIType::Float64);
5665 masm.callWithABI<Fn, hypot4>(ABIType::Float64);
5666 masm.storeCallFloatResult(floatScratch0);
5668 LiveRegisterSet ignore;
5669 ignore.add(floatScratch0);
5670 masm.PopRegsInMaskIgnore(save, ignore);
5672 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
5673 return true;
5676 bool CacheIRCompiler::emitMathAtan2NumberResult(NumberOperandId yId,
5677 NumberOperandId xId) {
5678 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5679 AutoOutputRegister output(*this);
5680 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5682 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
5683 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
5685 allocator.ensureDoubleRegister(masm, yId, floatScratch0);
5686 allocator.ensureDoubleRegister(masm, xId, floatScratch1);
5688 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5689 masm.PushRegsInMask(save);
5691 using Fn = double (*)(double x, double y);
5692 masm.setupUnalignedABICall(scratch);
5693 masm.passABIArg(floatScratch0, ABIType::Float64);
5694 masm.passABIArg(floatScratch1, ABIType::Float64);
5695 masm.callWithABI<Fn, js::ecmaAtan2>(ABIType::Float64);
5696 masm.storeCallFloatResult(floatScratch0);
5698 LiveRegisterSet ignore;
5699 ignore.add(floatScratch0);
5700 masm.PopRegsInMaskIgnore(save, ignore);
5702 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
5704 return true;
5707 bool CacheIRCompiler::emitMathFloorToInt32Result(NumberOperandId inputId) {
5708 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5710 AutoOutputRegister output(*this);
5711 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5713 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
5715 FailurePath* failure;
5716 if (!addFailurePath(&failure)) {
5717 return false;
5720 allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
5722 masm.floorDoubleToInt32(scratchFloat, scratch, failure->label());
5724 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5725 return true;
5728 bool CacheIRCompiler::emitMathCeilToInt32Result(NumberOperandId inputId) {
5729 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5731 AutoOutputRegister output(*this);
5732 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5734 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
5736 FailurePath* failure;
5737 if (!addFailurePath(&failure)) {
5738 return false;
5741 allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
5743 masm.ceilDoubleToInt32(scratchFloat, scratch, failure->label());
5745 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5746 return true;
5749 bool CacheIRCompiler::emitMathTruncToInt32Result(NumberOperandId inputId) {
5750 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5752 AutoOutputRegister output(*this);
5753 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5755 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
5757 FailurePath* failure;
5758 if (!addFailurePath(&failure)) {
5759 return false;
5762 allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
5764 masm.truncDoubleToInt32(scratchFloat, scratch, failure->label());
5766 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5767 return true;
5770 bool CacheIRCompiler::emitMathRoundToInt32Result(NumberOperandId inputId) {
5771 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5773 AutoOutputRegister output(*this);
5774 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5776 AutoAvailableFloatRegister scratchFloat0(*this, FloatReg0);
5777 AutoAvailableFloatRegister scratchFloat1(*this, FloatReg1);
5779 FailurePath* failure;
5780 if (!addFailurePath(&failure)) {
5781 return false;
5784 allocator.ensureDoubleRegister(masm, inputId, scratchFloat0);
5786 masm.roundDoubleToInt32(scratchFloat0, scratch, scratchFloat1,
5787 failure->label());
5789 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5790 return true;
5793 bool CacheIRCompiler::emitInt32MinMax(bool isMax, Int32OperandId firstId,
5794 Int32OperandId secondId,
5795 Int32OperandId resultId) {
5796 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5798 Register first = allocator.useRegister(masm, firstId);
5799 Register second = allocator.useRegister(masm, secondId);
5800 Register result = allocator.defineRegister(masm, resultId);
5802 Assembler::Condition cond =
5803 isMax ? Assembler::GreaterThan : Assembler::LessThan;
5804 masm.move32(first, result);
5805 masm.cmp32Move32(cond, second, first, second, result);
5806 return true;
5809 bool CacheIRCompiler::emitNumberMinMax(bool isMax, NumberOperandId firstId,
5810 NumberOperandId secondId,
5811 NumberOperandId resultId) {
5812 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5814 ValueOperand output = allocator.defineValueRegister(masm, resultId);
5816 AutoAvailableFloatRegister scratch1(*this, FloatReg0);
5817 AutoAvailableFloatRegister scratch2(*this, FloatReg1);
5819 allocator.ensureDoubleRegister(masm, firstId, scratch1);
5820 allocator.ensureDoubleRegister(masm, secondId, scratch2);
5822 if (isMax) {
5823 masm.maxDouble(scratch2, scratch1, /* handleNaN = */ true);
5824 } else {
5825 masm.minDouble(scratch2, scratch1, /* handleNaN = */ true);
5828 masm.boxDouble(scratch1, output, scratch1);
5829 return true;
5832 bool CacheIRCompiler::emitInt32MinMaxArrayResult(ObjOperandId arrayId,
5833 bool isMax) {
5834 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5836 AutoOutputRegister output(*this);
5837 Register array = allocator.useRegister(masm, arrayId);
5839 AutoScratchRegister scratch(allocator, masm);
5840 AutoScratchRegister scratch2(allocator, masm);
5841 AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, output);
5842 AutoScratchRegisterMaybeOutput result(allocator, masm, output);
5844 FailurePath* failure;
5845 if (!addFailurePath(&failure)) {
5846 return false;
5849 masm.minMaxArrayInt32(array, result, scratch, scratch2, scratch3, isMax,
5850 failure->label());
5851 masm.tagValue(JSVAL_TYPE_INT32, result, output.valueReg());
5852 return true;
5855 bool CacheIRCompiler::emitNumberMinMaxArrayResult(ObjOperandId arrayId,
5856 bool isMax) {
5857 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5859 AutoOutputRegister output(*this);
5860 Register array = allocator.useRegister(masm, arrayId);
5862 AutoAvailableFloatRegister result(*this, FloatReg0);
5863 AutoAvailableFloatRegister floatScratch(*this, FloatReg1);
5865 AutoScratchRegister scratch1(allocator, masm);
5866 AutoScratchRegister scratch2(allocator, masm);
5868 FailurePath* failure;
5869 if (!addFailurePath(&failure)) {
5870 return false;
5873 masm.minMaxArrayNumber(array, result, floatScratch, scratch1, scratch2, isMax,
5874 failure->label());
5875 masm.boxDouble(result, output.valueReg(), result);
5876 return true;
5879 bool CacheIRCompiler::emitMathFunctionNumberResultShared(
5880 UnaryMathFunction fun, FloatRegister inputScratch, ValueOperand output) {
5881 UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
5883 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5884 save.takeUnchecked(inputScratch);
5885 masm.PushRegsInMask(save);
5887 masm.setupUnalignedABICall(output.scratchReg());
5888 masm.passABIArg(inputScratch, ABIType::Float64);
5889 masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
5890 ABIType::Float64);
5891 masm.storeCallFloatResult(inputScratch);
5893 masm.PopRegsInMask(save);
5895 masm.boxDouble(inputScratch, output, inputScratch);
5896 return true;
5899 bool CacheIRCompiler::emitMathFunctionNumberResult(NumberOperandId inputId,
5900 UnaryMathFunction fun) {
5901 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5903 AutoOutputRegister output(*this);
5904 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5906 allocator.ensureDoubleRegister(masm, inputId, scratch);
5908 return emitMathFunctionNumberResultShared(fun, scratch, output.valueReg());
5911 static void EmitStoreDenseElement(MacroAssembler& masm,
5912 const ConstantOrRegister& value,
5913 BaseObjectElementIndex target) {
5914 if (value.constant()) {
5915 Value v = value.value();
5916 masm.storeValue(v, target);
5917 return;
5920 TypedOrValueRegister reg = value.reg();
5921 masm.storeTypedOrValue(reg, target);
5924 bool CacheIRCompiler::emitStoreDenseElement(ObjOperandId objId,
5925 Int32OperandId indexId,
5926 ValOperandId rhsId) {
5927 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5929 Register obj = allocator.useRegister(masm, objId);
5930 Register index = allocator.useRegister(masm, indexId);
5931 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
5933 AutoScratchRegister scratch(allocator, masm);
5935 FailurePath* failure;
5936 if (!addFailurePath(&failure)) {
5937 return false;
5940 // Load obj->elements in scratch.
5941 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
5943 // Bounds check. Unfortunately we don't have more registers available on
5944 // x86, so use InvalidReg and emit slightly slower code on x86.
5945 Register spectreTemp = InvalidReg;
5946 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
5947 masm.spectreBoundsCheck32(index, initLength, spectreTemp, failure->label());
5949 // Hole check.
5950 BaseObjectElementIndex element(scratch, index);
5951 masm.branchTestMagic(Assembler::Equal, element, failure->label());
5953 // Perform the store.
5954 EmitPreBarrier(masm, element, MIRType::Value);
5955 EmitStoreDenseElement(masm, val, element);
5957 emitPostBarrierElement(obj, val, scratch, index);
5958 return true;
5961 static void EmitAssertExtensibleElements(MacroAssembler& masm,
5962 Register elementsReg) {
5963 #ifdef DEBUG
5964 // Preceding shape guards ensure the object elements are extensible.
5965 Address elementsFlags(elementsReg, ObjectElements::offsetOfFlags());
5966 Label ok;
5967 masm.branchTest32(Assembler::Zero, elementsFlags,
5968 Imm32(ObjectElements::Flags::NOT_EXTENSIBLE), &ok);
5969 masm.assumeUnreachable("Unexpected non-extensible elements");
5970 masm.bind(&ok);
5971 #endif
5974 static void EmitAssertWritableArrayLengthElements(MacroAssembler& masm,
5975 Register elementsReg) {
5976 #ifdef DEBUG
5977 // Preceding shape guards ensure the array length is writable.
5978 Address elementsFlags(elementsReg, ObjectElements::offsetOfFlags());
5979 Label ok;
5980 masm.branchTest32(Assembler::Zero, elementsFlags,
5981 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
5982 &ok);
5983 masm.assumeUnreachable("Unexpected non-writable array length elements");
5984 masm.bind(&ok);
5985 #endif
5988 bool CacheIRCompiler::emitStoreDenseElementHole(ObjOperandId objId,
5989 Int32OperandId indexId,
5990 ValOperandId rhsId,
5991 bool handleAdd) {
5992 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5994 Register obj = allocator.useRegister(masm, objId);
5995 Register index = allocator.useRegister(masm, indexId);
5996 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
5998 AutoScratchRegister scratch(allocator, masm);
6000 FailurePath* failure;
6001 if (!addFailurePath(&failure)) {
6002 return false;
6005 // Load obj->elements in scratch.
6006 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
6008 EmitAssertExtensibleElements(masm, scratch);
6009 if (handleAdd) {
6010 EmitAssertWritableArrayLengthElements(masm, scratch);
6013 BaseObjectElementIndex element(scratch, index);
6014 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
6015 Address elementsFlags(scratch, ObjectElements::offsetOfFlags());
6017 // We don't have enough registers on x86 so use InvalidReg. This will emit
6018 // slightly less efficient code on x86.
6019 Register spectreTemp = InvalidReg;
6021 Label storeSkipPreBarrier;
6022 if (handleAdd) {
6023 // Bounds check.
6024 Label inBounds, outOfBounds;
6025 masm.spectreBoundsCheck32(index, initLength, spectreTemp, &outOfBounds);
6026 masm.jump(&inBounds);
6028 // If we're out-of-bounds, only handle the index == initLength case.
6029 masm.bind(&outOfBounds);
6030 masm.branch32(Assembler::NotEqual, initLength, index, failure->label());
6032 // If index < capacity, we can add a dense element inline. If not we
6033 // need to allocate more elements.
6034 Label allocElement, addNewElement;
6035 Address capacity(scratch, ObjectElements::offsetOfCapacity());
6036 masm.spectreBoundsCheck32(index, capacity, spectreTemp, &allocElement);
6037 masm.jump(&addNewElement);
6039 masm.bind(&allocElement);
6041 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
6042 liveVolatileFloatRegs());
6043 save.takeUnchecked(scratch);
6044 masm.PushRegsInMask(save);
6046 using Fn = bool (*)(JSContext* cx, NativeObject* obj);
6047 masm.setupUnalignedABICall(scratch);
6048 masm.loadJSContext(scratch);
6049 masm.passABIArg(scratch);
6050 masm.passABIArg(obj);
6051 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
6052 masm.storeCallPointerResult(scratch);
6054 masm.PopRegsInMask(save);
6055 masm.branchIfFalseBool(scratch, failure->label());
6057 // Load the reallocated elements pointer.
6058 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
6060 masm.bind(&addNewElement);
6062 // Increment initLength.
6063 masm.add32(Imm32(1), initLength);
6065 // If length is now <= index, increment length too.
6066 Label skipIncrementLength;
6067 Address length(scratch, ObjectElements::offsetOfLength());
6068 masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
6069 masm.add32(Imm32(1), length);
6070 masm.bind(&skipIncrementLength);
6072 // Skip EmitPreBarrier as the memory is uninitialized.
6073 masm.jump(&storeSkipPreBarrier);
6075 masm.bind(&inBounds);
6076 } else {
6077 // Fail if index >= initLength.
6078 masm.spectreBoundsCheck32(index, initLength, spectreTemp, failure->label());
6081 EmitPreBarrier(masm, element, MIRType::Value);
6083 masm.bind(&storeSkipPreBarrier);
6084 EmitStoreDenseElement(masm, val, element);
6086 emitPostBarrierElement(obj, val, scratch, index);
6087 return true;
6090 bool CacheIRCompiler::emitArrayPush(ObjOperandId objId, ValOperandId rhsId) {
6091 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6093 AutoOutputRegister output(*this);
6094 Register obj = allocator.useRegister(masm, objId);
6095 ValueOperand val = allocator.useValueRegister(masm, rhsId);
6097 AutoScratchRegisterMaybeOutput scratchLength(allocator, masm, output);
6098 AutoScratchRegisterMaybeOutputType scratch(allocator, masm, output);
6100 FailurePath* failure;
6101 if (!addFailurePath(&failure)) {
6102 return false;
6105 // Load obj->elements in scratch.
6106 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
6108 EmitAssertExtensibleElements(masm, scratch);
6109 EmitAssertWritableArrayLengthElements(masm, scratch);
6111 Address elementsInitLength(scratch,
6112 ObjectElements::offsetOfInitializedLength());
6113 Address elementsLength(scratch, ObjectElements::offsetOfLength());
6114 Address capacity(scratch, ObjectElements::offsetOfCapacity());
6116 // Fail if length != initLength.
6117 masm.load32(elementsInitLength, scratchLength);
6118 masm.branch32(Assembler::NotEqual, elementsLength, scratchLength,
6119 failure->label());
6121 // If scratchLength < capacity, we can add a dense element inline. If not we
6122 // need to allocate more elements.
6123 Label allocElement, addNewElement;
6124 masm.spectreBoundsCheck32(scratchLength, capacity, InvalidReg, &allocElement);
6125 masm.jump(&addNewElement);
6127 masm.bind(&allocElement);
6129 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
6130 save.takeUnchecked(scratch);
6131 masm.PushRegsInMask(save);
6133 using Fn = bool (*)(JSContext* cx, NativeObject* obj);
6134 masm.setupUnalignedABICall(scratch);
6135 masm.loadJSContext(scratch);
6136 masm.passABIArg(scratch);
6137 masm.passABIArg(obj);
6138 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
6139 masm.storeCallPointerResult(scratch);
6141 masm.PopRegsInMask(save);
6142 masm.branchIfFalseBool(scratch, failure->label());
6144 // Load the reallocated elements pointer.
6145 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
6147 masm.bind(&addNewElement);
6149 // Increment initLength and length.
6150 masm.add32(Imm32(1), elementsInitLength);
6151 masm.add32(Imm32(1), elementsLength);
6153 // Store the value.
6154 BaseObjectElementIndex element(scratch, scratchLength);
6155 masm.storeValue(val, element);
6156 emitPostBarrierElement(obj, val, scratch, scratchLength);
6158 // Return value is new length.
6159 masm.add32(Imm32(1), scratchLength);
6160 masm.tagValue(JSVAL_TYPE_INT32, scratchLength, output.valueReg());
6162 return true;
6165 bool CacheIRCompiler::emitStoreTypedArrayElement(ObjOperandId objId,
6166 Scalar::Type elementType,
6167 IntPtrOperandId indexId,
6168 uint32_t rhsId,
6169 bool handleOOB) {
6170 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6171 Register obj = allocator.useRegister(masm, objId);
6172 Register index = allocator.useRegister(masm, indexId);
6174 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
6176 Maybe<Register> valInt32;
6177 Maybe<Register> valBigInt;
6178 switch (elementType) {
6179 case Scalar::Int8:
6180 case Scalar::Uint8:
6181 case Scalar::Int16:
6182 case Scalar::Uint16:
6183 case Scalar::Int32:
6184 case Scalar::Uint32:
6185 case Scalar::Uint8Clamped:
6186 valInt32.emplace(allocator.useRegister(masm, Int32OperandId(rhsId)));
6187 break;
6189 case Scalar::Float32:
6190 case Scalar::Float64:
6191 allocator.ensureDoubleRegister(masm, NumberOperandId(rhsId),
6192 floatScratch0);
6193 break;
6195 case Scalar::BigInt64:
6196 case Scalar::BigUint64:
6197 valBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(rhsId)));
6198 break;
6200 case Scalar::MaxTypedArrayViewType:
6201 case Scalar::Int64:
6202 case Scalar::Simd128:
6203 MOZ_CRASH("Unsupported TypedArray type");
6206 AutoScratchRegister scratch1(allocator, masm);
6207 Maybe<AutoScratchRegister> scratch2;
6208 Maybe<AutoSpectreBoundsScratchRegister> spectreScratch;
6209 if (Scalar::isBigIntType(elementType)) {
6210 scratch2.emplace(allocator, masm);
6211 } else {
6212 spectreScratch.emplace(allocator, masm);
6215 FailurePath* failure = nullptr;
6216 if (!handleOOB) {
6217 if (!addFailurePath(&failure)) {
6218 return false;
6222 // Bounds check.
6223 Label done;
6224 Register spectreTemp = scratch2 ? scratch2->get() : spectreScratch->get();
6225 masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
6226 masm.spectreBoundsCheckPtr(index, scratch1, spectreTemp,
6227 handleOOB ? &done : failure->label());
6229 // Load the elements vector.
6230 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch1);
6232 BaseIndex dest(scratch1, index, ScaleFromScalarType(elementType));
6234 if (Scalar::isBigIntType(elementType)) {
6235 #ifdef JS_PUNBOX64
6236 Register64 temp(scratch2->get());
6237 #else
6238 // We don't have more registers available on x86, so spill |obj|.
6239 masm.push(obj);
6240 Register64 temp(scratch2->get(), obj);
6241 #endif
6243 masm.loadBigInt64(*valBigInt, temp);
6244 masm.storeToTypedBigIntArray(elementType, temp, dest);
6246 #ifndef JS_PUNBOX64
6247 masm.pop(obj);
6248 #endif
6249 } else if (elementType == Scalar::Float32) {
6250 ScratchFloat32Scope fpscratch(masm);
6251 masm.convertDoubleToFloat32(floatScratch0, fpscratch);
6252 masm.storeToTypedFloatArray(elementType, fpscratch, dest);
6253 } else if (elementType == Scalar::Float64) {
6254 masm.storeToTypedFloatArray(elementType, floatScratch0, dest);
6255 } else {
6256 masm.storeToTypedIntArray(elementType, *valInt32, dest);
6259 masm.bind(&done);
6260 return true;
6263 static gc::Heap InitialBigIntHeap(JSContext* cx) {
6264 JS::Zone* zone = cx->zone();
6265 return zone->allocNurseryBigInts() ? gc::Heap::Default : gc::Heap::Tenured;
6268 static void EmitAllocateBigInt(MacroAssembler& masm, Register result,
6269 Register temp, const LiveRegisterSet& liveSet,
6270 gc::Heap initialHeap, Label* fail) {
6271 Label fallback, done;
6272 masm.newGCBigInt(result, temp, initialHeap, &fallback);
6273 masm.jump(&done);
6275 masm.bind(&fallback);
6277 // Request a minor collection at a later time if nursery allocation failed.
6278 bool requestMinorGC = initialHeap == gc::Heap::Default;
6280 masm.PushRegsInMask(liveSet);
6281 using Fn = void* (*)(JSContext* cx, bool requestMinorGC);
6282 masm.setupUnalignedABICall(temp);
6283 masm.loadJSContext(temp);
6284 masm.passABIArg(temp);
6285 masm.move32(Imm32(requestMinorGC), result);
6286 masm.passABIArg(result);
6287 masm.callWithABI<Fn, jit::AllocateBigIntNoGC>();
6288 masm.storeCallPointerResult(result);
6290 masm.PopRegsInMask(liveSet);
6291 masm.branchPtr(Assembler::Equal, result, ImmWord(0), fail);
6293 masm.bind(&done);
6296 bool CacheIRCompiler::emitLoadTypedArrayElementResult(
6297 ObjOperandId objId, IntPtrOperandId indexId, Scalar::Type elementType,
6298 bool handleOOB, bool forceDoubleForUint32) {
6299 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6300 AutoOutputRegister output(*this);
6301 Register obj = allocator.useRegister(masm, objId);
6302 Register index = allocator.useRegister(masm, indexId);
6304 AutoScratchRegister scratch1(allocator, masm);
6305 #ifdef JS_PUNBOX64
6306 AutoScratchRegister scratch2(allocator, masm);
6307 #else
6308 // There are too few registers available on x86, so we may need to reuse the
6309 // output's scratch register.
6310 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
6311 #endif
6313 FailurePath* failure;
6314 if (!addFailurePath(&failure)) {
6315 return false;
6318 // Bounds check.
6319 Label outOfBounds;
6320 masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
6321 masm.spectreBoundsCheckPtr(index, scratch1, scratch2,
6322 handleOOB ? &outOfBounds : failure->label());
6324 // Allocate BigInt if needed. The code after this should be infallible.
6325 Maybe<Register> bigInt;
6326 if (Scalar::isBigIntType(elementType)) {
6327 bigInt.emplace(output.valueReg().scratchReg());
6329 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
6330 liveVolatileFloatRegs());
6331 save.takeUnchecked(scratch1);
6332 save.takeUnchecked(scratch2);
6333 save.takeUnchecked(output);
6335 gc::Heap initialHeap = InitialBigIntHeap(cx_);
6336 EmitAllocateBigInt(masm, *bigInt, scratch1, save, initialHeap,
6337 failure->label());
6340 // Load the elements vector.
6341 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch1);
6343 // Load the value.
6344 BaseIndex source(scratch1, index, ScaleFromScalarType(elementType));
6346 if (Scalar::isBigIntType(elementType)) {
6347 #ifdef JS_PUNBOX64
6348 Register64 temp(scratch2);
6349 #else
6350 // We don't have more registers available on x86, so spill |obj| and
6351 // additionally use the output's type register.
6352 MOZ_ASSERT(output.valueReg().scratchReg() != output.valueReg().typeReg());
6353 masm.push(obj);
6354 Register64 temp(output.valueReg().typeReg(), obj);
6355 #endif
6357 masm.loadFromTypedBigIntArray(elementType, source, *bigInt, temp);
6359 #ifndef JS_PUNBOX64
6360 masm.pop(obj);
6361 #endif
6363 masm.tagValue(JSVAL_TYPE_BIGINT, *bigInt, output.valueReg());
6364 } else {
6365 MacroAssembler::Uint32Mode uint32Mode =
6366 forceDoubleForUint32 ? MacroAssembler::Uint32Mode::ForceDouble
6367 : MacroAssembler::Uint32Mode::FailOnDouble;
6368 masm.loadFromTypedArray(elementType, source, output.valueReg(), uint32Mode,
6369 scratch1, failure->label());
6372 if (handleOOB) {
6373 Label done;
6374 masm.jump(&done);
6376 masm.bind(&outOfBounds);
6377 masm.moveValue(UndefinedValue(), output.valueReg());
6379 masm.bind(&done);
6382 return true;
6385 static void EmitDataViewBoundsCheck(MacroAssembler& masm, size_t byteSize,
6386 Register obj, Register offset,
6387 Register scratch, Label* fail) {
6388 // Ensure both offset < length and offset + (byteSize - 1) < length.
6389 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
6390 if (byteSize == 1) {
6391 masm.spectreBoundsCheckPtr(offset, scratch, InvalidReg, fail);
6392 } else {
6393 // temp := length - (byteSize - 1)
6394 // if temp < 0: fail
6395 // if offset >= temp: fail
6396 masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), scratch, fail);
6397 masm.spectreBoundsCheckPtr(offset, scratch, InvalidReg, fail);
6401 bool CacheIRCompiler::emitLoadDataViewValueResult(
6402 ObjOperandId objId, IntPtrOperandId offsetId,
6403 BooleanOperandId littleEndianId, Scalar::Type elementType,
6404 bool forceDoubleForUint32) {
6405 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6407 AutoOutputRegister output(*this);
6408 Register obj = allocator.useRegister(masm, objId);
6409 Register offset = allocator.useRegister(masm, offsetId);
6410 Register littleEndian = allocator.useRegister(masm, littleEndianId);
6412 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
6414 Register64 outputReg64 = output.valueReg().toRegister64();
6415 Register outputScratch = outputReg64.scratchReg();
6417 FailurePath* failure;
6418 if (!addFailurePath(&failure)) {
6419 return false;
6422 const size_t byteSize = Scalar::byteSize(elementType);
6424 EmitDataViewBoundsCheck(masm, byteSize, obj, offset, outputScratch,
6425 failure->label());
6427 masm.loadPtr(Address(obj, DataViewObject::dataOffset()), outputScratch);
6429 // Load the value.
6430 BaseIndex source(outputScratch, offset, TimesOne);
6431 switch (elementType) {
6432 case Scalar::Int8:
6433 masm.load8SignExtend(source, outputScratch);
6434 break;
6435 case Scalar::Uint8:
6436 masm.load8ZeroExtend(source, outputScratch);
6437 break;
6438 case Scalar::Int16:
6439 masm.load16UnalignedSignExtend(source, outputScratch);
6440 break;
6441 case Scalar::Uint16:
6442 masm.load16UnalignedZeroExtend(source, outputScratch);
6443 break;
6444 case Scalar::Int32:
6445 case Scalar::Uint32:
6446 case Scalar::Float32:
6447 masm.load32Unaligned(source, outputScratch);
6448 break;
6449 case Scalar::Float64:
6450 case Scalar::BigInt64:
6451 case Scalar::BigUint64:
6452 masm.load64Unaligned(source, outputReg64);
6453 break;
6454 case Scalar::Uint8Clamped:
6455 default:
6456 MOZ_CRASH("Invalid typed array type");
6459 // Swap the bytes in the loaded value.
6460 if (byteSize > 1) {
6461 Label skip;
6462 masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
6463 littleEndian, Imm32(0), &skip);
6465 switch (elementType) {
6466 case Scalar::Int16:
6467 masm.byteSwap16SignExtend(outputScratch);
6468 break;
6469 case Scalar::Uint16:
6470 masm.byteSwap16ZeroExtend(outputScratch);
6471 break;
6472 case Scalar::Int32:
6473 case Scalar::Uint32:
6474 case Scalar::Float32:
6475 masm.byteSwap32(outputScratch);
6476 break;
6477 case Scalar::Float64:
6478 case Scalar::BigInt64:
6479 case Scalar::BigUint64:
6480 masm.byteSwap64(outputReg64);
6481 break;
6482 case Scalar::Int8:
6483 case Scalar::Uint8:
6484 case Scalar::Uint8Clamped:
6485 default:
6486 MOZ_CRASH("Invalid type");
6489 masm.bind(&skip);
6492 // Move the value into the output register.
6493 switch (elementType) {
6494 case Scalar::Int8:
6495 case Scalar::Uint8:
6496 case Scalar::Int16:
6497 case Scalar::Uint16:
6498 case Scalar::Int32:
6499 masm.tagValue(JSVAL_TYPE_INT32, outputScratch, output.valueReg());
6500 break;
6501 case Scalar::Uint32: {
6502 MacroAssembler::Uint32Mode uint32Mode =
6503 forceDoubleForUint32 ? MacroAssembler::Uint32Mode::ForceDouble
6504 : MacroAssembler::Uint32Mode::FailOnDouble;
6505 masm.boxUint32(outputScratch, output.valueReg(), uint32Mode,
6506 failure->label());
6507 break;
6509 case Scalar::Float32: {
6510 FloatRegister scratchFloat32 = floatScratch0.get().asSingle();
6511 masm.moveGPRToFloat32(outputScratch, scratchFloat32);
6512 masm.canonicalizeFloat(scratchFloat32);
6513 masm.convertFloat32ToDouble(scratchFloat32, floatScratch0);
6514 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
6515 break;
6517 case Scalar::Float64:
6518 masm.moveGPR64ToDouble(outputReg64, floatScratch0);
6519 masm.canonicalizeDouble(floatScratch0);
6520 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
6521 break;
6522 case Scalar::BigInt64:
6523 case Scalar::BigUint64: {
6524 // We need two extra registers. Reuse the obj/littleEndian registers.
6525 Register bigInt = obj;
6526 Register bigIntScratch = littleEndian;
6527 masm.push(bigInt);
6528 masm.push(bigIntScratch);
6529 Label fail, done;
6530 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
6531 liveVolatileFloatRegs());
6532 save.takeUnchecked(bigInt);
6533 save.takeUnchecked(bigIntScratch);
6534 gc::Heap initialHeap = InitialBigIntHeap(cx_);
6535 EmitAllocateBigInt(masm, bigInt, bigIntScratch, save, initialHeap, &fail);
6536 masm.jump(&done);
6538 masm.bind(&fail);
6539 masm.pop(bigIntScratch);
6540 masm.pop(bigInt);
6541 masm.jump(failure->label());
6543 masm.bind(&done);
6544 masm.initializeBigInt64(elementType, bigInt, outputReg64);
6545 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
6546 masm.pop(bigIntScratch);
6547 masm.pop(bigInt);
6548 break;
6550 case Scalar::Uint8Clamped:
6551 default:
6552 MOZ_CRASH("Invalid typed array type");
6555 return true;
6558 bool CacheIRCompiler::emitStoreDataViewValueResult(
6559 ObjOperandId objId, IntPtrOperandId offsetId, uint32_t valueId,
6560 BooleanOperandId littleEndianId, Scalar::Type elementType) {
6561 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6563 AutoOutputRegister output(*this);
6564 #ifdef JS_CODEGEN_X86
6565 // Use a scratch register to avoid running out of the registers.
6566 Register obj = output.valueReg().typeReg();
6567 allocator.copyToScratchRegister(masm, objId, obj);
6568 #else
6569 Register obj = allocator.useRegister(masm, objId);
6570 #endif
6571 Register offset = allocator.useRegister(masm, offsetId);
6572 Register littleEndian = allocator.useRegister(masm, littleEndianId);
6574 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
6575 Maybe<Register> valInt32;
6576 Maybe<Register> valBigInt;
6577 switch (elementType) {
6578 case Scalar::Int8:
6579 case Scalar::Uint8:
6580 case Scalar::Int16:
6581 case Scalar::Uint16:
6582 case Scalar::Int32:
6583 case Scalar::Uint32:
6584 case Scalar::Uint8Clamped:
6585 valInt32.emplace(allocator.useRegister(masm, Int32OperandId(valueId)));
6586 break;
6588 case Scalar::Float32:
6589 case Scalar::Float64:
6590 allocator.ensureDoubleRegister(masm, NumberOperandId(valueId),
6591 floatScratch0);
6592 break;
6594 case Scalar::BigInt64:
6595 case Scalar::BigUint64:
6596 valBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(valueId)));
6597 break;
6599 case Scalar::MaxTypedArrayViewType:
6600 case Scalar::Int64:
6601 case Scalar::Simd128:
6602 MOZ_CRASH("Unsupported type");
6605 Register scratch1 = output.valueReg().scratchReg();
6606 MOZ_ASSERT(scratch1 != obj, "scratchReg must not be typeReg");
6608 // On platforms with enough registers, |scratch2| is an extra scratch register
6609 // (pair) used for byte-swapping the value.
6610 #ifndef JS_CODEGEN_X86
6611 mozilla::MaybeOneOf<AutoScratchRegister, AutoScratchRegister64> scratch2;
6612 switch (elementType) {
6613 case Scalar::Int8:
6614 case Scalar::Uint8:
6615 break;
6616 case Scalar::Int16:
6617 case Scalar::Uint16:
6618 case Scalar::Int32:
6619 case Scalar::Uint32:
6620 case Scalar::Float32:
6621 scratch2.construct<AutoScratchRegister>(allocator, masm);
6622 break;
6623 case Scalar::Float64:
6624 case Scalar::BigInt64:
6625 case Scalar::BigUint64:
6626 scratch2.construct<AutoScratchRegister64>(allocator, masm);
6627 break;
6628 case Scalar::Uint8Clamped:
6629 default:
6630 MOZ_CRASH("Invalid type");
6632 #endif
6634 FailurePath* failure;
6635 if (!addFailurePath(&failure)) {
6636 return false;
6639 const size_t byteSize = Scalar::byteSize(elementType);
6641 EmitDataViewBoundsCheck(masm, byteSize, obj, offset, scratch1,
6642 failure->label());
6644 masm.loadPtr(Address(obj, DataViewObject::dataOffset()), scratch1);
6645 BaseIndex dest(scratch1, offset, TimesOne);
6647 if (byteSize == 1) {
6648 // Byte swapping has no effect, so just do the byte store.
6649 masm.store8(*valInt32, dest);
6650 masm.moveValue(UndefinedValue(), output.valueReg());
6651 return true;
6654 // On 32-bit x86, |obj| is already a scratch register so use that. If we need
6655 // a Register64 we also use the littleEndian register and use the stack
6656 // location for the check below.
6657 bool pushedLittleEndian = false;
6658 #ifdef JS_CODEGEN_X86
6659 if (byteSize == 8) {
6660 masm.push(littleEndian);
6661 pushedLittleEndian = true;
6663 auto valScratch32 = [&]() -> Register { return obj; };
6664 auto valScratch64 = [&]() -> Register64 {
6665 return Register64(obj, littleEndian);
6667 #else
6668 auto valScratch32 = [&]() -> Register {
6669 return scratch2.ref<AutoScratchRegister>();
6671 auto valScratch64 = [&]() -> Register64 {
6672 return scratch2.ref<AutoScratchRegister64>();
6674 #endif
6676 // Load the value into a gpr register.
6677 switch (elementType) {
6678 case Scalar::Int16:
6679 case Scalar::Uint16:
6680 case Scalar::Int32:
6681 case Scalar::Uint32:
6682 masm.move32(*valInt32, valScratch32());
6683 break;
6684 case Scalar::Float32: {
6685 FloatRegister scratchFloat32 = floatScratch0.get().asSingle();
6686 masm.convertDoubleToFloat32(floatScratch0, scratchFloat32);
6687 masm.canonicalizeFloatIfDeterministic(scratchFloat32);
6688 masm.moveFloat32ToGPR(scratchFloat32, valScratch32());
6689 break;
6691 case Scalar::Float64: {
6692 masm.canonicalizeDoubleIfDeterministic(floatScratch0);
6693 masm.moveDoubleToGPR64(floatScratch0, valScratch64());
6694 break;
6696 case Scalar::BigInt64:
6697 case Scalar::BigUint64:
6698 masm.loadBigInt64(*valBigInt, valScratch64());
6699 break;
6700 case Scalar::Int8:
6701 case Scalar::Uint8:
6702 case Scalar::Uint8Clamped:
6703 default:
6704 MOZ_CRASH("Invalid type");
6707 // Swap the bytes in the loaded value.
6708 Label skip;
6709 if (pushedLittleEndian) {
6710 masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
6711 Address(masm.getStackPointer(), 0), Imm32(0), &skip);
6712 } else {
6713 masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
6714 littleEndian, Imm32(0), &skip);
6716 switch (elementType) {
6717 case Scalar::Int16:
6718 masm.byteSwap16SignExtend(valScratch32());
6719 break;
6720 case Scalar::Uint16:
6721 masm.byteSwap16ZeroExtend(valScratch32());
6722 break;
6723 case Scalar::Int32:
6724 case Scalar::Uint32:
6725 case Scalar::Float32:
6726 masm.byteSwap32(valScratch32());
6727 break;
6728 case Scalar::Float64:
6729 case Scalar::BigInt64:
6730 case Scalar::BigUint64:
6731 masm.byteSwap64(valScratch64());
6732 break;
6733 case Scalar::Int8:
6734 case Scalar::Uint8:
6735 case Scalar::Uint8Clamped:
6736 default:
6737 MOZ_CRASH("Invalid type");
6739 masm.bind(&skip);
6741 // Store the value.
6742 switch (elementType) {
6743 case Scalar::Int16:
6744 case Scalar::Uint16:
6745 masm.store16Unaligned(valScratch32(), dest);
6746 break;
6747 case Scalar::Int32:
6748 case Scalar::Uint32:
6749 case Scalar::Float32:
6750 masm.store32Unaligned(valScratch32(), dest);
6751 break;
6752 case Scalar::Float64:
6753 case Scalar::BigInt64:
6754 case Scalar::BigUint64:
6755 masm.store64Unaligned(valScratch64(), dest);
6756 break;
6757 case Scalar::Int8:
6758 case Scalar::Uint8:
6759 case Scalar::Uint8Clamped:
6760 default:
6761 MOZ_CRASH("Invalid typed array type");
6764 #ifdef JS_CODEGEN_X86
6765 // Restore registers.
6766 if (pushedLittleEndian) {
6767 masm.pop(littleEndian);
6769 #endif
6771 masm.moveValue(UndefinedValue(), output.valueReg());
6772 return true;
6775 bool CacheIRCompiler::emitStoreFixedSlotUndefinedResult(ObjOperandId objId,
6776 uint32_t offsetOffset,
6777 ValOperandId rhsId) {
6778 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6780 AutoOutputRegister output(*this);
6781 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6782 Register obj = allocator.useRegister(masm, objId);
6783 ValueOperand val = allocator.useValueRegister(masm, rhsId);
6785 StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
6786 emitLoadStubField(offset, scratch);
6788 BaseIndex slot(obj, scratch, TimesOne);
6789 EmitPreBarrier(masm, slot, MIRType::Value);
6790 masm.storeValue(val, slot);
6791 emitPostBarrierSlot(obj, val, scratch);
6793 masm.moveValue(UndefinedValue(), output.valueReg());
6794 return true;
6797 bool CacheIRCompiler::emitLoadObjectResult(ObjOperandId objId) {
6798 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6799 AutoOutputRegister output(*this);
6800 Register obj = allocator.useRegister(masm, objId);
6802 EmitStoreResult(masm, obj, JSVAL_TYPE_OBJECT, output);
6804 return true;
6807 bool CacheIRCompiler::emitLoadStringResult(StringOperandId strId) {
6808 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6809 AutoOutputRegister output(*this);
6810 Register str = allocator.useRegister(masm, strId);
6812 masm.tagValue(JSVAL_TYPE_STRING, str, output.valueReg());
6814 return true;
6817 bool CacheIRCompiler::emitLoadSymbolResult(SymbolOperandId symId) {
6818 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6819 AutoOutputRegister output(*this);
6820 Register sym = allocator.useRegister(masm, symId);
6822 masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
6824 return true;
6827 bool CacheIRCompiler::emitLoadInt32Result(Int32OperandId valId) {
6828 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6829 AutoOutputRegister output(*this);
6830 Register val = allocator.useRegister(masm, valId);
6832 masm.tagValue(JSVAL_TYPE_INT32, val, output.valueReg());
6834 return true;
6837 bool CacheIRCompiler::emitLoadBigIntResult(BigIntOperandId valId) {
6838 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6839 AutoOutputRegister output(*this);
6840 Register val = allocator.useRegister(masm, valId);
6842 masm.tagValue(JSVAL_TYPE_BIGINT, val, output.valueReg());
6844 return true;
6847 bool CacheIRCompiler::emitLoadDoubleResult(NumberOperandId valId) {
6848 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6849 AutoOutputRegister output(*this);
6850 ValueOperand val = allocator.useValueRegister(masm, valId);
6852 #ifdef DEBUG
6853 Label ok;
6854 masm.branchTestDouble(Assembler::Equal, val, &ok);
6855 masm.branchTestInt32(Assembler::Equal, val, &ok);
6856 masm.assumeUnreachable("input must be double or int32");
6857 masm.bind(&ok);
6858 #endif
6860 masm.moveValue(val, output.valueReg());
6861 masm.convertInt32ValueToDouble(output.valueReg());
6863 return true;
6866 bool CacheIRCompiler::emitLoadTypeOfObjectResult(ObjOperandId objId) {
6867 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6868 AutoOutputRegister output(*this);
6869 Register obj = allocator.useRegister(masm, objId);
6870 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6872 Label slowCheck, isObject, isCallable, isUndefined, done;
6873 masm.typeOfObject(obj, scratch, &slowCheck, &isObject, &isCallable,
6874 &isUndefined);
6876 masm.bind(&isCallable);
6877 masm.moveValue(StringValue(cx_->names().function), output.valueReg());
6878 masm.jump(&done);
6880 masm.bind(&isUndefined);
6881 masm.moveValue(StringValue(cx_->names().undefined), output.valueReg());
6882 masm.jump(&done);
6884 masm.bind(&isObject);
6885 masm.moveValue(StringValue(cx_->names().object), output.valueReg());
6886 masm.jump(&done);
6889 masm.bind(&slowCheck);
6890 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
6891 liveVolatileFloatRegs());
6892 masm.PushRegsInMask(save);
6894 using Fn = JSString* (*)(JSObject* obj, JSRuntime* rt);
6895 masm.setupUnalignedABICall(scratch);
6896 masm.passABIArg(obj);
6897 masm.movePtr(ImmPtr(cx_->runtime()), scratch);
6898 masm.passABIArg(scratch);
6899 masm.callWithABI<Fn, TypeOfNameObject>();
6900 masm.storeCallPointerResult(scratch);
6902 LiveRegisterSet ignore;
6903 ignore.add(scratch);
6904 masm.PopRegsInMaskIgnore(save, ignore);
6906 masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
6909 masm.bind(&done);
6910 return true;
6913 bool CacheIRCompiler::emitLoadInt32TruthyResult(ValOperandId inputId) {
6914 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6915 AutoOutputRegister output(*this);
6916 ValueOperand val = allocator.useValueRegister(masm, inputId);
6918 Label ifFalse, done;
6919 masm.branchTestInt32Truthy(false, val, &ifFalse);
6920 masm.moveValue(BooleanValue(true), output.valueReg());
6921 masm.jump(&done);
6923 masm.bind(&ifFalse);
6924 masm.moveValue(BooleanValue(false), output.valueReg());
6926 masm.bind(&done);
6927 return true;
6930 bool CacheIRCompiler::emitLoadStringTruthyResult(StringOperandId strId) {
6931 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6932 AutoOutputRegister output(*this);
6933 Register str = allocator.useRegister(masm, strId);
6935 Label ifFalse, done;
6936 masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
6937 Imm32(0), &ifFalse);
6938 masm.moveValue(BooleanValue(true), output.valueReg());
6939 masm.jump(&done);
6941 masm.bind(&ifFalse);
6942 masm.moveValue(BooleanValue(false), output.valueReg());
6944 masm.bind(&done);
6945 return true;
6948 bool CacheIRCompiler::emitLoadDoubleTruthyResult(NumberOperandId inputId) {
6949 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6950 AutoOutputRegister output(*this);
6952 AutoScratchFloatRegister floatReg(this);
6954 allocator.ensureDoubleRegister(masm, inputId, floatReg);
6956 Label ifFalse, done;
6958 masm.branchTestDoubleTruthy(false, floatReg, &ifFalse);
6959 masm.moveValue(BooleanValue(true), output.valueReg());
6960 masm.jump(&done);
6962 masm.bind(&ifFalse);
6963 masm.moveValue(BooleanValue(false), output.valueReg());
6965 masm.bind(&done);
6966 return true;
6969 bool CacheIRCompiler::emitLoadObjectTruthyResult(ObjOperandId objId) {
6970 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6971 AutoOutputRegister output(*this);
6972 Register obj = allocator.useRegister(masm, objId);
6973 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6975 Label emulatesUndefined, slowPath, done;
6976 masm.branchIfObjectEmulatesUndefined(obj, scratch, &slowPath,
6977 &emulatesUndefined);
6978 masm.moveValue(BooleanValue(true), output.valueReg());
6979 masm.jump(&done);
6981 masm.bind(&emulatesUndefined);
6982 masm.moveValue(BooleanValue(false), output.valueReg());
6983 masm.jump(&done);
6985 masm.bind(&slowPath);
6987 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
6988 liveVolatileFloatRegs());
6989 volatileRegs.takeUnchecked(scratch);
6990 volatileRegs.takeUnchecked(output);
6991 masm.PushRegsInMask(volatileRegs);
6993 using Fn = bool (*)(JSObject* obj);
6994 masm.setupUnalignedABICall(scratch);
6995 masm.passABIArg(obj);
6996 masm.callWithABI<Fn, js::EmulatesUndefined>();
6997 masm.storeCallBoolResult(scratch);
6998 masm.xor32(Imm32(1), scratch);
7000 masm.PopRegsInMask(volatileRegs);
7002 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
7005 masm.bind(&done);
7006 return true;
7009 bool CacheIRCompiler::emitLoadBigIntTruthyResult(BigIntOperandId bigIntId) {
7010 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7011 AutoOutputRegister output(*this);
7012 Register bigInt = allocator.useRegister(masm, bigIntId);
7014 Label ifFalse, done;
7015 masm.branch32(Assembler::Equal,
7016 Address(bigInt, BigInt::offsetOfDigitLength()), Imm32(0),
7017 &ifFalse);
7018 masm.moveValue(BooleanValue(true), output.valueReg());
7019 masm.jump(&done);
7021 masm.bind(&ifFalse);
7022 masm.moveValue(BooleanValue(false), output.valueReg());
7024 masm.bind(&done);
7025 return true;
7028 bool CacheIRCompiler::emitLoadValueTruthyResult(ValOperandId inputId) {
7029 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7031 AutoOutputRegister output(*this);
7032 ValueOperand value = allocator.useValueRegister(masm, inputId);
7033 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
7034 AutoScratchRegister scratch2(allocator, masm);
7035 AutoScratchFloatRegister floatReg(this);
7037 Label ifFalse, ifTrue, done;
7040 ScratchTagScope tag(masm, value);
7041 masm.splitTagForTest(value, tag);
7043 masm.branchTestUndefined(Assembler::Equal, tag, &ifFalse);
7044 masm.branchTestNull(Assembler::Equal, tag, &ifFalse);
7046 Label notBoolean;
7047 masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
7049 ScratchTagScopeRelease _(&tag);
7050 masm.branchTestBooleanTruthy(false, value, &ifFalse);
7051 masm.jump(&ifTrue);
7053 masm.bind(&notBoolean);
7055 Label notInt32;
7056 masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
7058 ScratchTagScopeRelease _(&tag);
7059 masm.branchTestInt32Truthy(false, value, &ifFalse);
7060 masm.jump(&ifTrue);
7062 masm.bind(&notInt32);
7064 Label notObject;
7065 masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
7067 ScratchTagScopeRelease _(&tag);
7069 Register obj = masm.extractObject(value, scratch1);
7071 Label slowPath;
7072 masm.branchIfObjectEmulatesUndefined(obj, scratch2, &slowPath, &ifFalse);
7073 masm.jump(&ifTrue);
7075 masm.bind(&slowPath);
7077 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7078 liveVolatileFloatRegs());
7079 volatileRegs.takeUnchecked(scratch1);
7080 volatileRegs.takeUnchecked(scratch2);
7081 volatileRegs.takeUnchecked(output);
7082 masm.PushRegsInMask(volatileRegs);
7084 using Fn = bool (*)(JSObject* obj);
7085 masm.setupUnalignedABICall(scratch2);
7086 masm.passABIArg(obj);
7087 masm.callWithABI<Fn, js::EmulatesUndefined>();
7088 masm.storeCallPointerResult(scratch2);
7090 masm.PopRegsInMask(volatileRegs);
7092 masm.branchIfTrueBool(scratch2, &ifFalse);
7093 masm.jump(&ifTrue);
7096 masm.bind(&notObject);
7098 Label notString;
7099 masm.branchTestString(Assembler::NotEqual, tag, &notString);
7101 ScratchTagScopeRelease _(&tag);
7102 masm.branchTestStringTruthy(false, value, &ifFalse);
7103 masm.jump(&ifTrue);
7105 masm.bind(&notString);
7107 Label notBigInt;
7108 masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
7110 ScratchTagScopeRelease _(&tag);
7111 masm.branchTestBigIntTruthy(false, value, &ifFalse);
7112 masm.jump(&ifTrue);
7114 masm.bind(&notBigInt);
7116 masm.branchTestSymbol(Assembler::Equal, tag, &ifTrue);
7118 #ifdef DEBUG
7119 Label isDouble;
7120 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
7121 masm.assumeUnreachable("Unexpected value type");
7122 masm.bind(&isDouble);
7123 #endif
7126 ScratchTagScopeRelease _(&tag);
7127 masm.unboxDouble(value, floatReg);
7128 masm.branchTestDoubleTruthy(false, floatReg, &ifFalse);
7131 // Fall through to true case.
7134 masm.bind(&ifTrue);
7135 masm.moveValue(BooleanValue(true), output.valueReg());
7136 masm.jump(&done);
7138 masm.bind(&ifFalse);
7139 masm.moveValue(BooleanValue(false), output.valueReg());
7141 masm.bind(&done);
7142 return true;
7145 bool CacheIRCompiler::emitComparePointerResultShared(JSOp op,
7146 TypedOperandId lhsId,
7147 TypedOperandId rhsId) {
7148 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7149 AutoOutputRegister output(*this);
7151 Register left = allocator.useRegister(masm, lhsId);
7152 Register right = allocator.useRegister(masm, rhsId);
7154 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7156 Label ifTrue, done;
7157 masm.branchPtr(JSOpToCondition(op, /* signed = */ true), left, right,
7158 &ifTrue);
7160 EmitStoreBoolean(masm, false, output);
7161 masm.jump(&done);
7163 masm.bind(&ifTrue);
7164 EmitStoreBoolean(masm, true, output);
7165 masm.bind(&done);
7166 return true;
7169 bool CacheIRCompiler::emitCompareObjectResult(JSOp op, ObjOperandId lhsId,
7170 ObjOperandId rhsId) {
7171 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7172 return emitComparePointerResultShared(op, lhsId, rhsId);
7175 bool CacheIRCompiler::emitCompareSymbolResult(JSOp op, SymbolOperandId lhsId,
7176 SymbolOperandId rhsId) {
7177 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7178 return emitComparePointerResultShared(op, lhsId, rhsId);
7181 bool CacheIRCompiler::emitCompareInt32Result(JSOp op, Int32OperandId lhsId,
7182 Int32OperandId rhsId) {
7183 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7184 AutoOutputRegister output(*this);
7185 Register left = allocator.useRegister(masm, lhsId);
7186 Register right = allocator.useRegister(masm, rhsId);
7188 Label ifTrue, done;
7189 masm.branch32(JSOpToCondition(op, /* signed = */ true), left, right, &ifTrue);
7191 EmitStoreBoolean(masm, false, output);
7192 masm.jump(&done);
7194 masm.bind(&ifTrue);
7195 EmitStoreBoolean(masm, true, output);
7196 masm.bind(&done);
7197 return true;
7200 bool CacheIRCompiler::emitCompareDoubleResult(JSOp op, NumberOperandId lhsId,
7201 NumberOperandId rhsId) {
7202 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7203 AutoOutputRegister output(*this);
7205 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
7206 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
7208 FailurePath* failure;
7209 if (!addFailurePath(&failure)) {
7210 return false;
7213 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
7214 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
7216 Label done, ifTrue;
7217 masm.branchDouble(JSOpToDoubleCondition(op), floatScratch0, floatScratch1,
7218 &ifTrue);
7219 EmitStoreBoolean(masm, false, output);
7220 masm.jump(&done);
7222 masm.bind(&ifTrue);
7223 EmitStoreBoolean(masm, true, output);
7224 masm.bind(&done);
7225 return true;
7228 bool CacheIRCompiler::emitCompareBigIntResult(JSOp op, BigIntOperandId lhsId,
7229 BigIntOperandId rhsId) {
7230 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7231 AutoOutputRegister output(*this);
7233 Register lhs = allocator.useRegister(masm, lhsId);
7234 Register rhs = allocator.useRegister(masm, rhsId);
7236 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7238 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7239 masm.PushRegsInMask(save);
7241 masm.setupUnalignedABICall(scratch);
7243 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7244 // - |left <= right| is implemented as |right >= left|.
7245 // - |left > right| is implemented as |right < left|.
7246 if (op == JSOp::Le || op == JSOp::Gt) {
7247 masm.passABIArg(rhs);
7248 masm.passABIArg(lhs);
7249 } else {
7250 masm.passABIArg(lhs);
7251 masm.passABIArg(rhs);
7254 using Fn = bool (*)(BigInt*, BigInt*);
7255 Fn fn;
7256 if (op == JSOp::Eq || op == JSOp::StrictEq) {
7257 fn = jit::BigIntEqual<EqualityKind::Equal>;
7258 } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
7259 fn = jit::BigIntEqual<EqualityKind::NotEqual>;
7260 } else if (op == JSOp::Lt || op == JSOp::Gt) {
7261 fn = jit::BigIntCompare<ComparisonKind::LessThan>;
7262 } else {
7263 MOZ_ASSERT(op == JSOp::Le || op == JSOp::Ge);
7264 fn = jit::BigIntCompare<ComparisonKind::GreaterThanOrEqual>;
7267 masm.callWithABI(DynamicFunction<Fn>(fn));
7268 masm.storeCallBoolResult(scratch);
7270 LiveRegisterSet ignore;
7271 ignore.add(scratch);
7272 masm.PopRegsInMaskIgnore(save, ignore);
7274 EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
7275 return true;
7278 bool CacheIRCompiler::emitCompareBigIntInt32Result(JSOp op,
7279 BigIntOperandId lhsId,
7280 Int32OperandId rhsId) {
7281 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7282 AutoOutputRegister output(*this);
7283 Register bigInt = allocator.useRegister(masm, lhsId);
7284 Register int32 = allocator.useRegister(masm, rhsId);
7286 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
7287 AutoScratchRegister scratch2(allocator, masm);
7289 Label ifTrue, ifFalse;
7290 masm.compareBigIntAndInt32(op, bigInt, int32, scratch1, scratch2, &ifTrue,
7291 &ifFalse);
7293 Label done;
7294 masm.bind(&ifFalse);
7295 EmitStoreBoolean(masm, false, output);
7296 masm.jump(&done);
7298 masm.bind(&ifTrue);
7299 EmitStoreBoolean(masm, true, output);
7301 masm.bind(&done);
7302 return true;
7305 bool CacheIRCompiler::emitCompareBigIntNumberResult(JSOp op,
7306 BigIntOperandId lhsId,
7307 NumberOperandId rhsId) {
7308 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7309 AutoOutputRegister output(*this);
7311 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
7313 Register lhs = allocator.useRegister(masm, lhsId);
7314 allocator.ensureDoubleRegister(masm, rhsId, floatScratch0);
7316 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7318 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7319 masm.PushRegsInMask(save);
7321 masm.setupUnalignedABICall(scratch);
7323 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7324 // - |left <= right| is implemented as |right >= left|.
7325 // - |left > right| is implemented as |right < left|.
7326 if (op == JSOp::Le || op == JSOp::Gt) {
7327 masm.passABIArg(floatScratch0, ABIType::Float64);
7328 masm.passABIArg(lhs);
7329 } else {
7330 masm.passABIArg(lhs);
7331 masm.passABIArg(floatScratch0, ABIType::Float64);
7334 using FnBigIntNumber = bool (*)(BigInt*, double);
7335 using FnNumberBigInt = bool (*)(double, BigInt*);
7336 switch (op) {
7337 case JSOp::Eq: {
7338 masm.callWithABI<FnBigIntNumber,
7339 jit::BigIntNumberEqual<EqualityKind::Equal>>();
7340 break;
7342 case JSOp::Ne: {
7343 masm.callWithABI<FnBigIntNumber,
7344 jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
7345 break;
7347 case JSOp::Lt: {
7348 masm.callWithABI<FnBigIntNumber,
7349 jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
7350 break;
7352 case JSOp::Gt: {
7353 masm.callWithABI<FnNumberBigInt,
7354 jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
7355 break;
7357 case JSOp::Le: {
7358 masm.callWithABI<
7359 FnNumberBigInt,
7360 jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
7361 break;
7363 case JSOp::Ge: {
7364 masm.callWithABI<
7365 FnBigIntNumber,
7366 jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
7367 break;
7369 default:
7370 MOZ_CRASH("unhandled op");
7373 masm.storeCallBoolResult(scratch);
7375 LiveRegisterSet ignore;
7376 ignore.add(scratch);
7377 masm.PopRegsInMaskIgnore(save, ignore);
7379 EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
7380 return true;
7383 bool CacheIRCompiler::emitCompareBigIntStringResult(JSOp op,
7384 BigIntOperandId lhsId,
7385 StringOperandId rhsId) {
7386 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7387 AutoCallVM callvm(masm, this, allocator);
7389 Register lhs = allocator.useRegister(masm, lhsId);
7390 Register rhs = allocator.useRegister(masm, rhsId);
7392 callvm.prepare();
7394 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7395 // - |left <= right| is implemented as |right >= left|.
7396 // - |left > right| is implemented as |right < left|.
7397 if (op == JSOp::Le || op == JSOp::Gt) {
7398 masm.Push(lhs);
7399 masm.Push(rhs);
7400 } else {
7401 masm.Push(rhs);
7402 masm.Push(lhs);
7405 using FnBigIntString =
7406 bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
7407 using FnStringBigInt =
7408 bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
7410 switch (op) {
7411 case JSOp::Eq: {
7412 constexpr auto Equal = EqualityKind::Equal;
7413 callvm.call<FnBigIntString, BigIntStringEqual<Equal>>();
7414 break;
7416 case JSOp::Ne: {
7417 constexpr auto NotEqual = EqualityKind::NotEqual;
7418 callvm.call<FnBigIntString, BigIntStringEqual<NotEqual>>();
7419 break;
7421 case JSOp::Lt: {
7422 constexpr auto LessThan = ComparisonKind::LessThan;
7423 callvm.call<FnBigIntString, BigIntStringCompare<LessThan>>();
7424 break;
7426 case JSOp::Gt: {
7427 constexpr auto LessThan = ComparisonKind::LessThan;
7428 callvm.call<FnStringBigInt, StringBigIntCompare<LessThan>>();
7429 break;
7431 case JSOp::Le: {
7432 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
7433 callvm.call<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>();
7434 break;
7436 case JSOp::Ge: {
7437 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
7438 callvm.call<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>();
7439 break;
7441 default:
7442 MOZ_CRASH("unhandled op");
7444 return true;
7447 bool CacheIRCompiler::emitCompareNullUndefinedResult(JSOp op, bool isUndefined,
7448 ValOperandId inputId) {
7449 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7451 AutoOutputRegister output(*this);
7452 ValueOperand input = allocator.useValueRegister(masm, inputId);
7453 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7455 if (IsStrictEqualityOp(op)) {
7456 if (isUndefined) {
7457 masm.testUndefinedSet(JSOpToCondition(op, false), input, scratch);
7458 } else {
7459 masm.testNullSet(JSOpToCondition(op, false), input, scratch);
7461 EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
7462 return true;
7465 FailurePath* failure;
7466 if (!addFailurePath(&failure)) {
7467 return false;
7470 MOZ_ASSERT(IsLooseEqualityOp(op));
7472 Label nullOrLikeUndefined, notNullOrLikeUndefined, done;
7474 ScratchTagScope tag(masm, input);
7475 masm.splitTagForTest(input, tag);
7477 if (isUndefined) {
7478 masm.branchTestUndefined(Assembler::Equal, tag, &nullOrLikeUndefined);
7479 masm.branchTestNull(Assembler::Equal, tag, &nullOrLikeUndefined);
7480 } else {
7481 masm.branchTestNull(Assembler::Equal, tag, &nullOrLikeUndefined);
7482 masm.branchTestUndefined(Assembler::Equal, tag, &nullOrLikeUndefined);
7484 masm.branchTestObject(Assembler::NotEqual, tag, &notNullOrLikeUndefined);
7487 ScratchTagScopeRelease _(&tag);
7489 masm.unboxObject(input, scratch);
7490 masm.branchIfObjectEmulatesUndefined(scratch, scratch, failure->label(),
7491 &nullOrLikeUndefined);
7492 masm.jump(&notNullOrLikeUndefined);
7496 masm.bind(&nullOrLikeUndefined);
7497 EmitStoreBoolean(masm, op == JSOp::Eq, output);
7498 masm.jump(&done);
7500 masm.bind(&notNullOrLikeUndefined);
7501 EmitStoreBoolean(masm, op == JSOp::Ne, output);
7503 masm.bind(&done);
7504 return true;
7507 bool CacheIRCompiler::emitCompareDoubleSameValueResult(NumberOperandId lhsId,
7508 NumberOperandId rhsId) {
7509 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7511 AutoOutputRegister output(*this);
7512 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7513 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
7514 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
7515 AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
7517 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
7518 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
7520 masm.sameValueDouble(floatScratch0, floatScratch1, floatScratch2, scratch);
7521 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
7522 return true;
7525 bool CacheIRCompiler::emitIndirectTruncateInt32Result(Int32OperandId valId) {
7526 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7527 AutoOutputRegister output(*this);
7528 Register val = allocator.useRegister(masm, valId);
7530 if (output.hasValue()) {
7531 masm.tagValue(JSVAL_TYPE_INT32, val, output.valueReg());
7532 } else {
7533 masm.mov(val, output.typedReg().gpr());
7535 return true;
7538 bool CacheIRCompiler::emitCallPrintString(const char* str) {
7539 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7540 masm.printf(str);
7541 return true;
7544 bool CacheIRCompiler::emitBreakpoint() {
7545 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7546 masm.breakpoint();
7547 return true;
7550 void CacheIRCompiler::emitPostBarrierShared(Register obj,
7551 const ConstantOrRegister& val,
7552 Register scratch,
7553 Register maybeIndex) {
7554 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7556 if (val.constant()) {
7557 MOZ_ASSERT_IF(val.value().isGCThing(),
7558 !IsInsideNursery(val.value().toGCThing()));
7559 return;
7562 TypedOrValueRegister reg = val.reg();
7563 if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
7564 return;
7567 Label skipBarrier;
7568 if (reg.hasValue()) {
7569 masm.branchValueIsNurseryCell(Assembler::NotEqual, reg.valueReg(), scratch,
7570 &skipBarrier);
7571 } else {
7572 masm.branchPtrInNurseryChunk(Assembler::NotEqual, reg.typedReg().gpr(),
7573 scratch, &skipBarrier);
7575 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, &skipBarrier);
7577 // Check one element cache to avoid VM call.
7578 auto* lastCellAddr = cx_->runtime()->gc.addressOfLastBufferedWholeCell();
7579 masm.branchPtr(Assembler::Equal, AbsoluteAddress(lastCellAddr), obj,
7580 &skipBarrier);
7582 // Call one of these, depending on maybeIndex:
7584 // void PostWriteBarrier(JSRuntime* rt, JSObject* obj);
7585 // void PostWriteElementBarrier(JSRuntime* rt, JSObject* obj,
7586 // int32_t index);
7587 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7588 masm.PushRegsInMask(save);
7589 masm.setupUnalignedABICall(scratch);
7590 masm.movePtr(ImmPtr(cx_->runtime()), scratch);
7591 masm.passABIArg(scratch);
7592 masm.passABIArg(obj);
7593 if (maybeIndex != InvalidReg) {
7594 masm.passABIArg(maybeIndex);
7595 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
7596 masm.callWithABI<Fn, PostWriteElementBarrier>();
7597 } else {
7598 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
7599 masm.callWithABI<Fn, PostWriteBarrier>();
7601 masm.PopRegsInMask(save);
7603 masm.bind(&skipBarrier);
7606 bool CacheIRCompiler::emitWrapResult() {
7607 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7608 AutoOutputRegister output(*this);
7609 AutoScratchRegister scratch(allocator, masm);
7611 FailurePath* failure;
7612 if (!addFailurePath(&failure)) {
7613 return false;
7616 Label done;
7617 // We only have to wrap objects, because we are in the same zone.
7618 masm.branchTestObject(Assembler::NotEqual, output.valueReg(), &done);
7620 Register obj = output.valueReg().scratchReg();
7621 masm.unboxObject(output.valueReg(), obj);
7623 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7624 masm.PushRegsInMask(save);
7626 using Fn = JSObject* (*)(JSContext* cx, JSObject* obj);
7627 masm.setupUnalignedABICall(scratch);
7628 masm.loadJSContext(scratch);
7629 masm.passABIArg(scratch);
7630 masm.passABIArg(obj);
7631 masm.callWithABI<Fn, WrapObjectPure>();
7632 masm.storeCallPointerResult(obj);
7634 LiveRegisterSet ignore;
7635 ignore.add(obj);
7636 masm.PopRegsInMaskIgnore(save, ignore);
7638 // We could not get a wrapper for this object.
7639 masm.branchTestPtr(Assembler::Zero, obj, obj, failure->label());
7641 // We clobbered the output register, so we have to retag.
7642 masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
7644 masm.bind(&done);
7645 return true;
7648 bool CacheIRCompiler::emitMegamorphicLoadSlotByValueResult(ObjOperandId objId,
7649 ValOperandId idId) {
7650 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7651 AutoOutputRegister output(*this);
7653 Register obj = allocator.useRegister(masm, objId);
7654 ValueOperand idVal = allocator.useValueRegister(masm, idId);
7656 #ifdef JS_CODEGEN_X86
7657 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
7658 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
7659 #else
7660 AutoScratchRegister scratch1(allocator, masm);
7661 AutoScratchRegister scratch2(allocator, masm);
7662 AutoScratchRegister scratch3(allocator, masm);
7663 #endif
7665 FailurePath* failure;
7666 if (!addFailurePath(&failure)) {
7667 return false;
7670 #ifdef JS_CODEGEN_X86
7671 masm.xorPtr(scratch2, scratch2);
7672 #else
7673 Label cacheHit;
7674 masm.emitMegamorphicCacheLookupByValue(
7675 idVal, obj, scratch1, scratch3, scratch2, output.valueReg(), &cacheHit);
7676 #endif
7678 masm.branchIfNonNativeObj(obj, scratch1, failure->label());
7680 // idVal will be in vp[0], result will be stored in vp[1].
7681 masm.reserveStack(sizeof(Value));
7682 masm.Push(idVal);
7683 masm.moveStackPtrTo(idVal.scratchReg());
7685 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7686 liveVolatileFloatRegs());
7687 volatileRegs.takeUnchecked(scratch1);
7688 volatileRegs.takeUnchecked(idVal);
7689 masm.PushRegsInMask(volatileRegs);
7691 using Fn = bool (*)(JSContext* cx, JSObject* obj,
7692 MegamorphicCache::Entry* cacheEntry, Value* vp);
7693 masm.setupUnalignedABICall(scratch1);
7694 masm.loadJSContext(scratch1);
7695 masm.passABIArg(scratch1);
7696 masm.passABIArg(obj);
7697 masm.passABIArg(scratch2);
7698 masm.passABIArg(idVal.scratchReg());
7699 masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
7701 masm.storeCallPointerResult(scratch1);
7702 masm.PopRegsInMask(volatileRegs);
7704 masm.Pop(idVal);
7706 Label ok;
7707 uint32_t framePushed = masm.framePushed();
7708 masm.branchIfTrueBool(scratch1, &ok);
7709 masm.adjustStack(sizeof(Value));
7710 masm.jump(failure->label());
7712 masm.bind(&ok);
7713 masm.setFramePushed(framePushed);
7714 masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
7715 masm.adjustStack(sizeof(Value));
7717 #ifndef JS_CODEGEN_X86
7718 masm.bind(&cacheHit);
7719 #endif
7720 return true;
7723 bool CacheIRCompiler::emitMegamorphicHasPropResult(ObjOperandId objId,
7724 ValOperandId idId,
7725 bool hasOwn) {
7726 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7727 AutoOutputRegister output(*this);
7729 Register obj = allocator.useRegister(masm, objId);
7730 ValueOperand idVal = allocator.useValueRegister(masm, idId);
7732 #ifdef JS_CODEGEN_X86
7733 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
7734 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
7735 #else
7736 AutoScratchRegister scratch1(allocator, masm);
7737 AutoScratchRegister scratch2(allocator, masm);
7738 AutoScratchRegister scratch3(allocator, masm);
7739 #endif
7741 FailurePath* failure;
7742 if (!addFailurePath(&failure)) {
7743 return false;
7746 #ifndef JS_CODEGEN_X86
7747 Label cacheHit, done;
7748 masm.emitMegamorphicCacheLookupExists(idVal, obj, scratch1, scratch3,
7749 scratch2, output.maybeReg(), &cacheHit,
7750 hasOwn);
7751 #else
7752 masm.xorPtr(scratch2, scratch2);
7753 #endif
7755 masm.branchIfNonNativeObj(obj, scratch1, failure->label());
7757 // idVal will be in vp[0], result will be stored in vp[1].
7758 masm.reserveStack(sizeof(Value));
7759 masm.Push(idVal);
7760 masm.moveStackPtrTo(idVal.scratchReg());
7762 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7763 liveVolatileFloatRegs());
7764 volatileRegs.takeUnchecked(scratch1);
7765 volatileRegs.takeUnchecked(idVal);
7766 masm.PushRegsInMask(volatileRegs);
7768 using Fn = bool (*)(JSContext* cx, JSObject* obj,
7769 MegamorphicCache::Entry* cacheEntry, Value* vp);
7770 masm.setupUnalignedABICall(scratch1);
7771 masm.loadJSContext(scratch1);
7772 masm.passABIArg(scratch1);
7773 masm.passABIArg(obj);
7774 masm.passABIArg(scratch2);
7775 masm.passABIArg(idVal.scratchReg());
7776 if (hasOwn) {
7777 masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
7778 } else {
7779 masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
7781 masm.storeCallPointerResult(scratch1);
7782 masm.PopRegsInMask(volatileRegs);
7784 masm.Pop(idVal);
7786 Label ok;
7787 uint32_t framePushed = masm.framePushed();
7788 masm.branchIfTrueBool(scratch1, &ok);
7789 masm.adjustStack(sizeof(Value));
7790 masm.jump(failure->label());
7792 masm.bind(&ok);
7793 masm.setFramePushed(framePushed);
7794 masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
7795 masm.adjustStack(sizeof(Value));
7797 #ifndef JS_CODEGEN_X86
7798 masm.jump(&done);
7799 masm.bind(&cacheHit);
7800 if (output.hasValue()) {
7801 masm.tagValue(JSVAL_TYPE_BOOLEAN, output.valueReg().scratchReg(),
7802 output.valueReg());
7804 masm.bind(&done);
7805 #endif
7806 return true;
7809 bool CacheIRCompiler::emitCallObjectHasSparseElementResult(
7810 ObjOperandId objId, Int32OperandId indexId) {
7811 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7812 AutoOutputRegister output(*this);
7814 Register obj = allocator.useRegister(masm, objId);
7815 Register index = allocator.useRegister(masm, indexId);
7817 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
7818 AutoScratchRegister scratch2(allocator, masm);
7820 FailurePath* failure;
7821 if (!addFailurePath(&failure)) {
7822 return false;
7825 masm.reserveStack(sizeof(Value));
7826 masm.moveStackPtrTo(scratch2.get());
7828 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7829 liveVolatileFloatRegs());
7830 volatileRegs.takeUnchecked(scratch1);
7831 volatileRegs.takeUnchecked(index);
7832 masm.PushRegsInMask(volatileRegs);
7834 using Fn =
7835 bool (*)(JSContext* cx, NativeObject* obj, int32_t index, Value* vp);
7836 masm.setupUnalignedABICall(scratch1);
7837 masm.loadJSContext(scratch1);
7838 masm.passABIArg(scratch1);
7839 masm.passABIArg(obj);
7840 masm.passABIArg(index);
7841 masm.passABIArg(scratch2);
7842 masm.callWithABI<Fn, HasNativeElementPure>();
7843 masm.storeCallPointerResult(scratch1);
7844 masm.PopRegsInMask(volatileRegs);
7846 Label ok;
7847 uint32_t framePushed = masm.framePushed();
7848 masm.branchIfTrueBool(scratch1, &ok);
7849 masm.adjustStack(sizeof(Value));
7850 masm.jump(failure->label());
7852 masm.bind(&ok);
7853 masm.setFramePushed(framePushed);
7854 masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
7855 masm.adjustStack(sizeof(Value));
7856 return true;
7860 * Move a constant value into register dest.
7862 void CacheIRCompiler::emitLoadStubFieldConstant(StubFieldOffset val,
7863 Register dest) {
7864 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7865 MOZ_ASSERT(mode_ == Mode::Ion);
7866 switch (val.getStubFieldType()) {
7867 case StubField::Type::Shape:
7868 masm.movePtr(ImmGCPtr(shapeStubField(val.getOffset())), dest);
7869 break;
7870 case StubField::Type::WeakGetterSetter:
7871 masm.movePtr(ImmGCPtr(weakGetterSetterStubField(val.getOffset())), dest);
7872 break;
7873 case StubField::Type::String:
7874 masm.movePtr(ImmGCPtr(stringStubField(val.getOffset())), dest);
7875 break;
7876 case StubField::Type::JSObject:
7877 masm.movePtr(ImmGCPtr(objectStubField(val.getOffset())), dest);
7878 break;
7879 case StubField::Type::RawPointer:
7880 masm.movePtr(ImmPtr(pointerStubField(val.getOffset())), dest);
7881 break;
7882 case StubField::Type::RawInt32:
7883 masm.move32(Imm32(int32StubField(val.getOffset())), dest);
7884 break;
7885 case StubField::Type::Id:
7886 masm.movePropertyKey(idStubField(val.getOffset()), dest);
7887 break;
7888 default:
7889 MOZ_CRASH("Unhandled stub field constant type");
7894 * After this is done executing, dest contains the value; either through a
7895 * constant load or through the load from the stub data.
7897 * The current policy is that Baseline will use loads from the stub data (to
7898 * allow IC sharing), where as Ion doesn't share ICs, and so we can safely use
7899 * constants in the IC.
7901 void CacheIRCompiler::emitLoadStubField(StubFieldOffset val, Register dest) {
7902 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7903 if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
7904 emitLoadStubFieldConstant(val, dest);
7905 } else {
7906 Address load(ICStubReg, stubDataOffset_ + val.getOffset());
7908 switch (val.getStubFieldType()) {
7909 case StubField::Type::RawPointer:
7910 case StubField::Type::Shape:
7911 case StubField::Type::WeakGetterSetter:
7912 case StubField::Type::JSObject:
7913 case StubField::Type::Symbol:
7914 case StubField::Type::String:
7915 case StubField::Type::Id:
7916 masm.loadPtr(load, dest);
7917 break;
7918 case StubField::Type::RawInt32:
7919 masm.load32(load, dest);
7920 break;
7921 default:
7922 MOZ_CRASH("Unhandled stub field constant type");
7927 void CacheIRCompiler::emitLoadValueStubField(StubFieldOffset val,
7928 ValueOperand dest) {
7929 MOZ_ASSERT(val.getStubFieldType() == StubField::Type::Value);
7931 if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
7932 MOZ_ASSERT(mode_ == Mode::Ion);
7933 masm.moveValue(valueStubField(val.getOffset()), dest);
7934 } else {
7935 Address addr(ICStubReg, stubDataOffset_ + val.getOffset());
7936 masm.loadValue(addr, dest);
7940 void CacheIRCompiler::emitLoadDoubleValueStubField(StubFieldOffset val,
7941 ValueOperand dest,
7942 FloatRegister scratch) {
7943 MOZ_ASSERT(val.getStubFieldType() == StubField::Type::Double);
7945 if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
7946 MOZ_ASSERT(mode_ == Mode::Ion);
7947 double d = doubleStubField(val.getOffset());
7948 masm.moveValue(DoubleValue(d), dest);
7949 } else {
7950 Address addr(ICStubReg, stubDataOffset_ + val.getOffset());
7951 masm.loadDouble(addr, scratch);
7952 masm.boxDouble(scratch, dest, scratch);
7956 bool CacheIRCompiler::emitLoadInstanceOfObjectResult(ValOperandId lhsId,
7957 ObjOperandId protoId) {
7958 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7959 AutoOutputRegister output(*this);
7960 ValueOperand lhs = allocator.useValueRegister(masm, lhsId);
7961 Register proto = allocator.useRegister(masm, protoId);
7963 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7965 FailurePath* failure;
7966 if (!addFailurePath(&failure)) {
7967 return false;
7970 Label returnFalse, returnTrue, done;
7971 masm.fallibleUnboxObject(lhs, scratch, &returnFalse);
7973 // LHS is an object. Load its proto.
7974 masm.loadObjProto(scratch, scratch);
7976 // Walk the proto chain until we either reach the target object,
7977 // nullptr or LazyProto.
7978 Label loop;
7979 masm.bind(&loop);
7981 masm.branchPtr(Assembler::Equal, scratch, proto, &returnTrue);
7982 masm.branchTestPtr(Assembler::Zero, scratch, scratch, &returnFalse);
7984 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
7985 masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), failure->label());
7987 masm.loadObjProto(scratch, scratch);
7988 masm.jump(&loop);
7991 masm.bind(&returnFalse);
7992 EmitStoreBoolean(masm, false, output);
7993 masm.jump(&done);
7995 masm.bind(&returnTrue);
7996 EmitStoreBoolean(masm, true, output);
7997 // fallthrough
7998 masm.bind(&done);
7999 return true;
8002 bool CacheIRCompiler::emitMegamorphicLoadSlotResult(ObjOperandId objId,
8003 uint32_t idOffset) {
8004 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8005 AutoOutputRegister output(*this);
8007 Register obj = allocator.useRegister(masm, objId);
8008 StubFieldOffset id(idOffset, StubField::Type::Id);
8010 AutoScratchRegisterMaybeOutput idReg(allocator, masm, output);
8011 AutoScratchRegister scratch1(allocator, masm);
8012 AutoScratchRegister scratch2(allocator, masm);
8013 AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, output);
8015 FailurePath* failure;
8016 if (!addFailurePath(&failure)) {
8017 return false;
8020 #ifdef JS_CODEGEN_X86
8021 masm.xorPtr(scratch3, scratch3);
8022 #else
8023 Label cacheHit;
8024 emitLoadStubField(id, idReg);
8025 masm.emitMegamorphicCacheLookupByValue(idReg.get(), obj, scratch1, scratch2,
8026 scratch3, output.valueReg(),
8027 &cacheHit);
8028 #endif
8030 masm.branchIfNonNativeObj(obj, scratch1, failure->label());
8032 masm.Push(UndefinedValue());
8033 masm.moveStackPtrTo(idReg.get());
8035 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8036 liveVolatileFloatRegs());
8037 volatileRegs.takeUnchecked(scratch1);
8038 volatileRegs.takeUnchecked(scratch2);
8039 volatileRegs.takeUnchecked(scratch3);
8040 volatileRegs.takeUnchecked(idReg);
8041 masm.PushRegsInMask(volatileRegs);
8043 using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
8044 MegamorphicCache::Entry* cacheEntry, Value* vp);
8045 masm.setupUnalignedABICall(scratch1);
8046 masm.loadJSContext(scratch1);
8047 masm.passABIArg(scratch1);
8048 masm.passABIArg(obj);
8049 emitLoadStubField(id, scratch2);
8050 masm.passABIArg(scratch2);
8051 masm.passABIArg(scratch3);
8052 masm.passABIArg(idReg);
8054 #ifdef JS_CODEGEN_X86
8055 masm.callWithABI<Fn, GetNativeDataPropertyPureWithCacheLookup>();
8056 #else
8057 masm.callWithABI<Fn, GetNativeDataPropertyPure>();
8058 #endif
8060 masm.storeCallPointerResult(scratch2);
8061 masm.PopRegsInMask(volatileRegs);
8063 masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
8064 masm.adjustStack(sizeof(Value));
8066 masm.branchIfFalseBool(scratch2, failure->label());
8067 #ifndef JS_CODEGEN_X86
8068 masm.bind(&cacheHit);
8069 #endif
8071 return true;
8074 bool CacheIRCompiler::emitMegamorphicStoreSlot(ObjOperandId objId,
8075 uint32_t idOffset,
8076 ValOperandId rhsId,
8077 bool strict) {
8078 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8080 AutoCallVM callvm(masm, this, allocator);
8082 Register obj = allocator.useRegister(masm, objId);
8083 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
8084 StubFieldOffset id(idOffset, StubField::Type::Id);
8085 AutoScratchRegister scratch(allocator, masm);
8087 callvm.prepare();
8089 masm.Push(Imm32(strict));
8090 masm.Push(val);
8091 emitLoadStubField(id, scratch);
8092 masm.Push(scratch);
8093 masm.Push(obj);
8095 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
8096 callvm.callNoResult<Fn, SetPropertyMegamorphic<false>>();
8097 return true;
8100 bool CacheIRCompiler::emitGuardHasGetterSetter(ObjOperandId objId,
8101 uint32_t idOffset,
8102 uint32_t getterSetterOffset) {
8103 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8105 Register obj = allocator.useRegister(masm, objId);
8107 StubFieldOffset id(idOffset, StubField::Type::Id);
8108 StubFieldOffset getterSetter(getterSetterOffset,
8109 StubField::Type::WeakGetterSetter);
8111 AutoScratchRegister scratch1(allocator, masm);
8112 AutoScratchRegister scratch2(allocator, masm);
8113 AutoScratchRegister scratch3(allocator, masm);
8115 FailurePath* failure;
8116 if (!addFailurePath(&failure)) {
8117 return false;
8120 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8121 liveVolatileFloatRegs());
8122 volatileRegs.takeUnchecked(scratch1);
8123 volatileRegs.takeUnchecked(scratch2);
8124 masm.PushRegsInMask(volatileRegs);
8126 using Fn = bool (*)(JSContext* cx, JSObject* obj, jsid id,
8127 GetterSetter* getterSetter);
8128 masm.setupUnalignedABICall(scratch1);
8129 masm.loadJSContext(scratch1);
8130 masm.passABIArg(scratch1);
8131 masm.passABIArg(obj);
8132 emitLoadStubField(id, scratch2);
8133 masm.passABIArg(scratch2);
8134 emitLoadStubField(getterSetter, scratch3);
8135 masm.passABIArg(scratch3);
8136 masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
8137 masm.storeCallPointerResult(scratch1);
8138 masm.PopRegsInMask(volatileRegs);
8140 masm.branchIfFalseBool(scratch1, failure->label());
8141 return true;
8144 bool CacheIRCompiler::emitGuardWasmArg(ValOperandId argId,
8145 wasm::ValType::Kind kind) {
8146 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8148 // All values can be boxed as AnyRef.
8149 if (kind == wasm::ValType::Ref) {
8150 return true;
8152 MOZ_ASSERT(kind != wasm::ValType::V128);
8154 ValueOperand arg = allocator.useValueRegister(masm, argId);
8156 FailurePath* failure;
8157 if (!addFailurePath(&failure)) {
8158 return false;
8161 // Check that the argument can be converted to the Wasm type in Warp code
8162 // without bailing out.
8163 Label done;
8164 switch (kind) {
8165 case wasm::ValType::I32:
8166 case wasm::ValType::F32:
8167 case wasm::ValType::F64: {
8168 // Argument must be number, bool, or undefined.
8169 masm.branchTestNumber(Assembler::Equal, arg, &done);
8170 masm.branchTestBoolean(Assembler::Equal, arg, &done);
8171 masm.branchTestUndefined(Assembler::NotEqual, arg, failure->label());
8172 break;
8174 case wasm::ValType::I64: {
8175 // Argument must be bigint, bool, or string.
8176 masm.branchTestBigInt(Assembler::Equal, arg, &done);
8177 masm.branchTestBoolean(Assembler::Equal, arg, &done);
8178 masm.branchTestString(Assembler::NotEqual, arg, failure->label());
8179 break;
8181 default:
8182 MOZ_CRASH("Unexpected kind");
8184 masm.bind(&done);
8186 return true;
8189 bool CacheIRCompiler::emitGuardMultipleShapes(ObjOperandId objId,
8190 uint32_t shapesOffset) {
8191 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8192 Register obj = allocator.useRegister(masm, objId);
8193 AutoScratchRegister shapes(allocator, masm);
8194 AutoScratchRegister scratch(allocator, masm);
8195 AutoScratchRegister scratch2(allocator, masm);
8197 bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
8199 Register spectreScratch = InvalidReg;
8200 Maybe<AutoScratchRegister> maybeSpectreScratch;
8201 if (needSpectreMitigations) {
8202 maybeSpectreScratch.emplace(allocator, masm);
8203 spectreScratch = *maybeSpectreScratch;
8206 FailurePath* failure;
8207 if (!addFailurePath(&failure)) {
8208 return false;
8211 // The stub field contains a ListObject. Load its elements.
8212 StubFieldOffset shapeArray(shapesOffset, StubField::Type::JSObject);
8213 emitLoadStubField(shapeArray, shapes);
8214 masm.loadPtr(Address(shapes, NativeObject::offsetOfElements()), shapes);
8216 masm.branchTestObjShapeList(Assembler::NotEqual, obj, shapes, scratch,
8217 scratch2, spectreScratch, failure->label());
8218 return true;
8221 bool CacheIRCompiler::emitLoadObject(ObjOperandId resultId,
8222 uint32_t objOffset) {
8223 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8224 Register reg = allocator.defineRegister(masm, resultId);
8225 StubFieldOffset obj(objOffset, StubField::Type::JSObject);
8226 emitLoadStubField(obj, reg);
8227 return true;
8230 bool CacheIRCompiler::emitLoadProtoObject(ObjOperandId resultId,
8231 uint32_t objOffset,
8232 ObjOperandId receiverObjId) {
8233 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8234 Register reg = allocator.defineRegister(masm, resultId);
8235 StubFieldOffset obj(objOffset, StubField::Type::JSObject);
8236 emitLoadStubField(obj, reg);
8237 return true;
8240 bool CacheIRCompiler::emitLoadInt32Constant(uint32_t valOffset,
8241 Int32OperandId resultId) {
8242 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8243 Register reg = allocator.defineRegister(masm, resultId);
8244 StubFieldOffset val(valOffset, StubField::Type::RawInt32);
8245 emitLoadStubField(val, reg);
8246 return true;
8249 bool CacheIRCompiler::emitLoadBooleanConstant(bool val,
8250 BooleanOperandId resultId) {
8251 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8252 Register reg = allocator.defineRegister(masm, resultId);
8253 masm.move32(Imm32(val), reg);
8254 return true;
8257 bool CacheIRCompiler::emitLoadDoubleConstant(uint32_t valOffset,
8258 NumberOperandId resultId) {
8259 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8261 ValueOperand output = allocator.defineValueRegister(masm, resultId);
8262 StubFieldOffset val(valOffset, StubField::Type::Double);
8264 AutoScratchFloatRegister floatReg(this);
8266 emitLoadDoubleValueStubField(val, output, floatReg);
8267 return true;
8270 bool CacheIRCompiler::emitLoadUndefined(ValOperandId resultId) {
8271 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8273 ValueOperand reg = allocator.defineValueRegister(masm, resultId);
8274 masm.moveValue(UndefinedValue(), reg);
8275 return true;
8278 bool CacheIRCompiler::emitLoadConstantString(uint32_t strOffset,
8279 StringOperandId resultId) {
8280 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8281 Register reg = allocator.defineRegister(masm, resultId);
8282 StubFieldOffset str(strOffset, StubField::Type::String);
8283 emitLoadStubField(str, reg);
8284 return true;
8287 bool CacheIRCompiler::emitCallInt32ToString(Int32OperandId inputId,
8288 StringOperandId resultId) {
8289 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8290 Register input = allocator.useRegister(masm, inputId);
8291 Register result = allocator.defineRegister(masm, resultId);
8293 FailurePath* failure;
8294 if (!addFailurePath(&failure)) {
8295 return false;
8298 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8299 liveVolatileFloatRegs());
8300 volatileRegs.takeUnchecked(result);
8301 masm.PushRegsInMask(volatileRegs);
8303 using Fn = JSLinearString* (*)(JSContext* cx, int32_t i);
8304 masm.setupUnalignedABICall(result);
8305 masm.loadJSContext(result);
8306 masm.passABIArg(result);
8307 masm.passABIArg(input);
8308 masm.callWithABI<Fn, js::Int32ToStringPure>();
8310 masm.storeCallPointerResult(result);
8311 masm.PopRegsInMask(volatileRegs);
8313 masm.branchPtr(Assembler::Equal, result, ImmPtr(nullptr), failure->label());
8314 return true;
8317 bool CacheIRCompiler::emitCallNumberToString(NumberOperandId inputId,
8318 StringOperandId resultId) {
8319 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8321 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
8323 allocator.ensureDoubleRegister(masm, inputId, floatScratch0);
8324 Register result = allocator.defineRegister(masm, resultId);
8326 FailurePath* failure;
8327 if (!addFailurePath(&failure)) {
8328 return false;
8331 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8332 liveVolatileFloatRegs());
8333 volatileRegs.takeUnchecked(result);
8334 masm.PushRegsInMask(volatileRegs);
8336 using Fn = JSString* (*)(JSContext* cx, double d);
8337 masm.setupUnalignedABICall(result);
8338 masm.loadJSContext(result);
8339 masm.passABIArg(result);
8340 masm.passABIArg(floatScratch0, ABIType::Float64);
8341 masm.callWithABI<Fn, js::NumberToStringPure>();
8343 masm.storeCallPointerResult(result);
8344 masm.PopRegsInMask(volatileRegs);
8346 masm.branchPtr(Assembler::Equal, result, ImmPtr(nullptr), failure->label());
8347 return true;
8350 bool CacheIRCompiler::emitInt32ToStringWithBaseResult(Int32OperandId inputId,
8351 Int32OperandId baseId) {
8352 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8354 AutoCallVM callvm(masm, this, allocator);
8355 Register input = allocator.useRegister(masm, inputId);
8356 Register base = allocator.useRegister(masm, baseId);
8358 FailurePath* failure;
8359 if (!addFailurePath(&failure)) {
8360 return false;
8363 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8364 // we can't use both at the same time. This isn't an issue here, because Ion
8365 // doesn't support CallICs. If that ever changes, this code must be updated.
8366 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8368 masm.branch32(Assembler::LessThan, base, Imm32(2), failure->label());
8369 masm.branch32(Assembler::GreaterThan, base, Imm32(36), failure->label());
8371 // Use lower-case characters by default.
8372 constexpr bool lowerCase = true;
8374 callvm.prepare();
8376 masm.Push(Imm32(lowerCase));
8377 masm.Push(base);
8378 masm.Push(input);
8380 using Fn = JSString* (*)(JSContext*, int32_t, int32_t, bool);
8381 callvm.call<Fn, js::Int32ToStringWithBase>();
8382 return true;
8385 bool CacheIRCompiler::emitBooleanToString(BooleanOperandId inputId,
8386 StringOperandId resultId) {
8387 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8388 Register boolean = allocator.useRegister(masm, inputId);
8389 Register result = allocator.defineRegister(masm, resultId);
8390 const JSAtomState& names = cx_->names();
8391 Label true_, done;
8393 masm.branchTest32(Assembler::NonZero, boolean, boolean, &true_);
8395 // False case
8396 masm.movePtr(ImmGCPtr(names.false_), result);
8397 masm.jump(&done);
8399 // True case
8400 masm.bind(&true_);
8401 masm.movePtr(ImmGCPtr(names.true_), result);
8402 masm.bind(&done);
8404 return true;
8407 bool CacheIRCompiler::emitObjectToStringResult(ObjOperandId objId) {
8408 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8410 AutoOutputRegister output(*this);
8411 Register obj = allocator.useRegister(masm, objId);
8412 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8414 FailurePath* failure;
8415 if (!addFailurePath(&failure)) {
8416 return false;
8419 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8420 liveVolatileFloatRegs());
8421 volatileRegs.takeUnchecked(output.valueReg());
8422 volatileRegs.takeUnchecked(scratch);
8423 masm.PushRegsInMask(volatileRegs);
8425 using Fn = JSString* (*)(JSContext*, JSObject*);
8426 masm.setupUnalignedABICall(scratch);
8427 masm.loadJSContext(scratch);
8428 masm.passABIArg(scratch);
8429 masm.passABIArg(obj);
8430 masm.callWithABI<Fn, js::ObjectClassToString>();
8431 masm.storeCallPointerResult(scratch);
8433 masm.PopRegsInMask(volatileRegs);
8435 masm.branchPtr(Assembler::Equal, scratch, ImmPtr(nullptr), failure->label());
8436 masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
8438 return true;
8441 bool CacheIRCompiler::emitCallStringConcatResult(StringOperandId lhsId,
8442 StringOperandId rhsId) {
8443 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8444 AutoCallVM callvm(masm, this, allocator);
8446 Register lhs = allocator.useRegister(masm, lhsId);
8447 Register rhs = allocator.useRegister(masm, rhsId);
8449 callvm.prepare();
8451 masm.Push(static_cast<js::jit::Imm32>(int32_t(js::gc::Heap::Default)));
8452 masm.Push(rhs);
8453 masm.Push(lhs);
8455 using Fn =
8456 JSString* (*)(JSContext*, HandleString, HandleString, js::gc::Heap);
8457 callvm.call<Fn, ConcatStrings<CanGC>>();
8459 return true;
8462 bool CacheIRCompiler::emitCallIsSuspendedGeneratorResult(ValOperandId valId) {
8463 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8464 AutoOutputRegister output(*this);
8465 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8466 AutoScratchRegister scratch2(allocator, masm);
8467 ValueOperand input = allocator.useValueRegister(masm, valId);
8469 // Test if it's an object.
8470 Label returnFalse, done;
8471 masm.fallibleUnboxObject(input, scratch, &returnFalse);
8473 // Test if it's a GeneratorObject.
8474 masm.branchTestObjClass(Assembler::NotEqual, scratch,
8475 &GeneratorObject::class_, scratch2, scratch,
8476 &returnFalse);
8478 // If the resumeIndex slot holds an int32 value < RESUME_INDEX_RUNNING,
8479 // the generator is suspended.
8480 Address addr(scratch, AbstractGeneratorObject::offsetOfResumeIndexSlot());
8481 masm.fallibleUnboxInt32(addr, scratch, &returnFalse);
8482 masm.branch32(Assembler::AboveOrEqual, scratch,
8483 Imm32(AbstractGeneratorObject::RESUME_INDEX_RUNNING),
8484 &returnFalse);
8486 masm.moveValue(BooleanValue(true), output.valueReg());
8487 masm.jump(&done);
8489 masm.bind(&returnFalse);
8490 masm.moveValue(BooleanValue(false), output.valueReg());
8492 masm.bind(&done);
8493 return true;
8496 // This op generates no code. It is consumed by the transpiler.
8497 bool CacheIRCompiler::emitMetaScriptedThisShape(uint32_t) { return true; }
8499 bool CacheIRCompiler::emitCallNativeGetElementResult(ObjOperandId objId,
8500 Int32OperandId indexId) {
8501 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8502 AutoCallVM callvm(masm, this, allocator);
8504 Register obj = allocator.useRegister(masm, objId);
8505 Register index = allocator.useRegister(masm, indexId);
8507 callvm.prepare();
8509 masm.Push(index);
8510 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
8511 masm.Push(obj);
8513 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
8514 MutableHandleValue);
8515 callvm.call<Fn, NativeGetElement>();
8517 return true;
8520 bool CacheIRCompiler::emitCallNativeGetElementSuperResult(
8521 ObjOperandId objId, Int32OperandId indexId, ValOperandId receiverId) {
8522 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8523 AutoCallVM callvm(masm, this, allocator);
8525 Register obj = allocator.useRegister(masm, objId);
8526 Register index = allocator.useRegister(masm, indexId);
8527 ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
8529 callvm.prepare();
8531 masm.Push(index);
8532 masm.Push(receiver);
8533 masm.Push(obj);
8535 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
8536 MutableHandleValue);
8537 callvm.call<Fn, NativeGetElement>();
8539 return true;
8542 bool CacheIRCompiler::emitProxyHasPropResult(ObjOperandId objId,
8543 ValOperandId idId, bool hasOwn) {
8544 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8545 AutoCallVM callvm(masm, this, allocator);
8547 Register obj = allocator.useRegister(masm, objId);
8548 ValueOperand idVal = allocator.useValueRegister(masm, idId);
8550 callvm.prepare();
8552 masm.Push(idVal);
8553 masm.Push(obj);
8555 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
8556 if (hasOwn) {
8557 callvm.call<Fn, ProxyHasOwn>();
8558 } else {
8559 callvm.call<Fn, ProxyHas>();
8561 return true;
8564 bool CacheIRCompiler::emitProxyGetByValueResult(ObjOperandId objId,
8565 ValOperandId idId) {
8566 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8567 AutoCallVM callvm(masm, this, allocator);
8569 Register obj = allocator.useRegister(masm, objId);
8570 ValueOperand idVal = allocator.useValueRegister(masm, idId);
8572 callvm.prepare();
8573 masm.Push(idVal);
8574 masm.Push(obj);
8576 using Fn =
8577 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
8578 callvm.call<Fn, ProxyGetPropertyByValue>();
8579 return true;
8582 bool CacheIRCompiler::emitCallGetSparseElementResult(ObjOperandId objId,
8583 Int32OperandId indexId) {
8584 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8586 AutoCallVM callvm(masm, this, allocator);
8588 Register obj = allocator.useRegister(masm, objId);
8589 Register id = allocator.useRegister(masm, indexId);
8591 callvm.prepare();
8592 masm.Push(id);
8593 masm.Push(obj);
8595 using Fn = bool (*)(JSContext* cx, Handle<NativeObject*> obj, int32_t int_id,
8596 MutableHandleValue result);
8597 callvm.call<Fn, GetSparseElementHelper>();
8598 return true;
8601 bool CacheIRCompiler::emitRegExpSearcherLastLimitResult() {
8602 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8604 AutoOutputRegister output(*this);
8605 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
8606 AutoScratchRegister scratch2(allocator, masm);
8608 masm.loadAndClearRegExpSearcherLastLimit(scratch1, scratch2);
8610 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
8611 return true;
8614 bool CacheIRCompiler::emitRegExpFlagResult(ObjOperandId regexpId,
8615 int32_t flagsMask) {
8616 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8618 AutoOutputRegister output(*this);
8619 Register regexp = allocator.useRegister(masm, regexpId);
8620 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8622 Address flagsAddr(
8623 regexp, NativeObject::getFixedSlotOffset(RegExpObject::flagsSlot()));
8624 masm.unboxInt32(flagsAddr, scratch);
8626 Label ifFalse, done;
8627 masm.branchTest32(Assembler::Zero, scratch, Imm32(flagsMask), &ifFalse);
8628 masm.moveValue(BooleanValue(true), output.valueReg());
8629 masm.jump(&done);
8631 masm.bind(&ifFalse);
8632 masm.moveValue(BooleanValue(false), output.valueReg());
8634 masm.bind(&done);
8635 return true;
8638 bool CacheIRCompiler::emitCallSubstringKernelResult(StringOperandId strId,
8639 Int32OperandId beginId,
8640 Int32OperandId lengthId) {
8641 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8643 AutoCallVM callvm(masm, this, allocator);
8645 Register str = allocator.useRegister(masm, strId);
8646 Register begin = allocator.useRegister(masm, beginId);
8647 Register length = allocator.useRegister(masm, lengthId);
8649 callvm.prepare();
8650 masm.Push(length);
8651 masm.Push(begin);
8652 masm.Push(str);
8654 using Fn = JSString* (*)(JSContext* cx, HandleString str, int32_t begin,
8655 int32_t len);
8656 callvm.call<Fn, SubstringKernel>();
8657 return true;
8660 bool CacheIRCompiler::emitStringReplaceStringResult(
8661 StringOperandId strId, StringOperandId patternId,
8662 StringOperandId replacementId) {
8663 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8665 AutoCallVM callvm(masm, this, allocator);
8667 Register str = allocator.useRegister(masm, strId);
8668 Register pattern = allocator.useRegister(masm, patternId);
8669 Register replacement = allocator.useRegister(masm, replacementId);
8671 callvm.prepare();
8672 masm.Push(replacement);
8673 masm.Push(pattern);
8674 masm.Push(str);
8676 using Fn =
8677 JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
8678 callvm.call<Fn, jit::StringReplace>();
8679 return true;
8682 bool CacheIRCompiler::emitStringSplitStringResult(StringOperandId strId,
8683 StringOperandId separatorId) {
8684 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8686 AutoCallVM callvm(masm, this, allocator);
8688 Register str = allocator.useRegister(masm, strId);
8689 Register separator = allocator.useRegister(masm, separatorId);
8691 callvm.prepare();
8692 masm.Push(Imm32(INT32_MAX));
8693 masm.Push(separator);
8694 masm.Push(str);
8696 using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
8697 callvm.call<Fn, js::StringSplitString>();
8698 return true;
8701 bool CacheIRCompiler::emitRegExpPrototypeOptimizableResult(
8702 ObjOperandId protoId) {
8703 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8705 AutoOutputRegister output(*this);
8706 Register proto = allocator.useRegister(masm, protoId);
8707 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8709 Label slow, done;
8710 masm.branchIfNotRegExpPrototypeOptimizable(
8711 proto, scratch, /* maybeGlobal = */ nullptr, &slow);
8712 masm.moveValue(BooleanValue(true), output.valueReg());
8713 masm.jump(&done);
8716 masm.bind(&slow);
8718 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8719 liveVolatileFloatRegs());
8720 volatileRegs.takeUnchecked(scratch);
8721 masm.PushRegsInMask(volatileRegs);
8723 using Fn = bool (*)(JSContext* cx, JSObject* proto);
8724 masm.setupUnalignedABICall(scratch);
8725 masm.loadJSContext(scratch);
8726 masm.passABIArg(scratch);
8727 masm.passABIArg(proto);
8728 masm.callWithABI<Fn, RegExpPrototypeOptimizableRaw>();
8729 masm.storeCallBoolResult(scratch);
8731 masm.PopRegsInMask(volatileRegs);
8732 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
8735 masm.bind(&done);
8736 return true;
8739 bool CacheIRCompiler::emitRegExpInstanceOptimizableResult(
8740 ObjOperandId regexpId, ObjOperandId protoId) {
8741 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8743 AutoOutputRegister output(*this);
8744 Register regexp = allocator.useRegister(masm, regexpId);
8745 Register proto = allocator.useRegister(masm, protoId);
8746 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8748 Label slow, done;
8749 masm.branchIfNotRegExpInstanceOptimizable(regexp, scratch,
8750 /* maybeGlobal = */ nullptr, &slow);
8751 masm.moveValue(BooleanValue(true), output.valueReg());
8752 masm.jump(&done);
8755 masm.bind(&slow);
8757 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8758 liveVolatileFloatRegs());
8759 volatileRegs.takeUnchecked(scratch);
8760 masm.PushRegsInMask(volatileRegs);
8762 using Fn = bool (*)(JSContext* cx, JSObject* obj, JSObject* proto);
8763 masm.setupUnalignedABICall(scratch);
8764 masm.loadJSContext(scratch);
8765 masm.passABIArg(scratch);
8766 masm.passABIArg(regexp);
8767 masm.passABIArg(proto);
8768 masm.callWithABI<Fn, RegExpInstanceOptimizableRaw>();
8769 masm.storeCallBoolResult(scratch);
8771 masm.PopRegsInMask(volatileRegs);
8772 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
8775 masm.bind(&done);
8776 return true;
8779 bool CacheIRCompiler::emitGetFirstDollarIndexResult(StringOperandId strId) {
8780 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8782 AutoCallVM callvm(masm, this, allocator);
8784 Register str = allocator.useRegister(masm, strId);
8786 callvm.prepare();
8787 masm.Push(str);
8789 using Fn = bool (*)(JSContext*, JSString*, int32_t*);
8790 callvm.call<Fn, GetFirstDollarIndexRaw>();
8791 return true;
8794 bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
8795 ObjOperandId objId, IntPtrOperandId indexId, uint32_t expectedId,
8796 uint32_t replacementId, Scalar::Type elementType) {
8797 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8799 Maybe<AutoOutputRegister> output;
8800 Maybe<AutoCallVM> callvm;
8801 if (!Scalar::isBigIntType(elementType)) {
8802 output.emplace(*this);
8803 } else {
8804 callvm.emplace(masm, this, allocator);
8806 #ifdef JS_CODEGEN_X86
8807 // Use a scratch register to avoid running out of registers.
8808 Register obj = output ? output->valueReg().typeReg()
8809 : callvm->outputValueReg().typeReg();
8810 allocator.copyToScratchRegister(masm, objId, obj);
8811 #else
8812 Register obj = allocator.useRegister(masm, objId);
8813 #endif
8814 Register index = allocator.useRegister(masm, indexId);
8815 Register expected;
8816 Register replacement;
8817 if (!Scalar::isBigIntType(elementType)) {
8818 expected = allocator.useRegister(masm, Int32OperandId(expectedId));
8819 replacement = allocator.useRegister(masm, Int32OperandId(replacementId));
8820 } else {
8821 expected = allocator.useRegister(masm, BigIntOperandId(expectedId));
8822 replacement = allocator.useRegister(masm, BigIntOperandId(replacementId));
8825 Register scratch = output ? output->valueReg().scratchReg()
8826 : callvm->outputValueReg().scratchReg();
8827 MOZ_ASSERT(scratch != obj, "scratchReg must not be typeReg");
8829 // Not enough registers on X86.
8830 Register spectreTemp = Register::Invalid();
8832 FailurePath* failure;
8833 if (!addFailurePath(&failure)) {
8834 return false;
8837 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8838 // we can't use both at the same time. This isn't an issue here, because Ion
8839 // doesn't support CallICs. If that ever changes, this code must be updated.
8840 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8842 // Bounds check.
8843 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
8844 masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
8846 // Atomic operations are highly platform-dependent, for example x86/x64 has
8847 // specific requirements on which registers are used; MIPS needs multiple
8848 // additional temporaries. Therefore we're using either an ABI or VM call here
8849 // instead of handling each platform separately.
8851 if (Scalar::isBigIntType(elementType)) {
8852 callvm->prepare();
8854 masm.Push(replacement);
8855 masm.Push(expected);
8856 masm.Push(index);
8857 masm.Push(obj);
8859 using Fn = BigInt* (*)(JSContext*, FixedLengthTypedArrayObject*, size_t,
8860 const BigInt*, const BigInt*);
8861 callvm->call<Fn, jit::AtomicsCompareExchange64>();
8862 return true;
8866 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8867 liveVolatileFloatRegs());
8868 volatileRegs.takeUnchecked(output->valueReg());
8869 volatileRegs.takeUnchecked(scratch);
8870 masm.PushRegsInMask(volatileRegs);
8872 masm.setupUnalignedABICall(scratch);
8873 masm.passABIArg(obj);
8874 masm.passABIArg(index);
8875 masm.passABIArg(expected);
8876 masm.passABIArg(replacement);
8877 masm.callWithABI(DynamicFunction<AtomicsCompareExchangeFn>(
8878 AtomicsCompareExchange(elementType)));
8879 masm.storeCallInt32Result(scratch);
8881 masm.PopRegsInMask(volatileRegs);
8884 if (elementType != Scalar::Uint32) {
8885 masm.tagValue(JSVAL_TYPE_INT32, scratch, output->valueReg());
8886 } else {
8887 ScratchDoubleScope fpscratch(masm);
8888 masm.convertUInt32ToDouble(scratch, fpscratch);
8889 masm.boxDouble(fpscratch, output->valueReg(), fpscratch);
8892 return true;
8895 bool CacheIRCompiler::emitAtomicsReadModifyWriteResult(
8896 ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
8897 Scalar::Type elementType, AtomicsReadWriteModifyFn fn) {
8898 AutoOutputRegister output(*this);
8899 Register obj = allocator.useRegister(masm, objId);
8900 Register index = allocator.useRegister(masm, indexId);
8901 Register value = allocator.useRegister(masm, Int32OperandId(valueId));
8902 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8904 // Not enough registers on X86.
8905 Register spectreTemp = Register::Invalid();
8907 FailurePath* failure;
8908 if (!addFailurePath(&failure)) {
8909 return false;
8912 // Bounds check.
8913 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
8914 masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
8916 // See comment in emitAtomicsCompareExchange for why we use an ABI call.
8918 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8919 liveVolatileFloatRegs());
8920 volatileRegs.takeUnchecked(output.valueReg());
8921 volatileRegs.takeUnchecked(scratch);
8922 masm.PushRegsInMask(volatileRegs);
8924 masm.setupUnalignedABICall(scratch);
8925 masm.passABIArg(obj);
8926 masm.passABIArg(index);
8927 masm.passABIArg(value);
8928 masm.callWithABI(DynamicFunction<AtomicsReadWriteModifyFn>(fn));
8929 masm.storeCallInt32Result(scratch);
8931 masm.PopRegsInMask(volatileRegs);
8934 if (elementType != Scalar::Uint32) {
8935 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
8936 } else {
8937 ScratchDoubleScope fpscratch(masm);
8938 masm.convertUInt32ToDouble(scratch, fpscratch);
8939 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
8942 return true;
8945 template <CacheIRCompiler::AtomicsReadWriteModify64Fn fn>
8946 bool CacheIRCompiler::emitAtomicsReadModifyWriteResult64(
8947 ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId) {
8948 AutoCallVM callvm(masm, this, allocator);
8949 Register obj = allocator.useRegister(masm, objId);
8950 Register index = allocator.useRegister(masm, indexId);
8951 Register value = allocator.useRegister(masm, BigIntOperandId(valueId));
8952 AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
8954 // Not enough registers on X86.
8955 Register spectreTemp = Register::Invalid();
8957 FailurePath* failure;
8958 if (!addFailurePath(&failure)) {
8959 return false;
8962 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8963 // we can't use both at the same time. This isn't an issue here, because Ion
8964 // doesn't support CallICs. If that ever changes, this code must be updated.
8965 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8967 // Bounds check.
8968 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
8969 masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
8971 // See comment in emitAtomicsCompareExchange for why we use a VM call.
8973 callvm.prepare();
8975 masm.Push(value);
8976 masm.Push(index);
8977 masm.Push(obj);
8979 callvm.call<AtomicsReadWriteModify64Fn, fn>();
8980 return true;
8983 bool CacheIRCompiler::emitAtomicsExchangeResult(ObjOperandId objId,
8984 IntPtrOperandId indexId,
8985 uint32_t valueId,
8986 Scalar::Type elementType) {
8987 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8989 if (Scalar::isBigIntType(elementType)) {
8990 return emitAtomicsReadModifyWriteResult64<jit::AtomicsExchange64>(
8991 objId, indexId, valueId);
8993 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
8994 AtomicsExchange(elementType));
8997 bool CacheIRCompiler::emitAtomicsAddResult(ObjOperandId objId,
8998 IntPtrOperandId indexId,
8999 uint32_t valueId,
9000 Scalar::Type elementType,
9001 bool forEffect) {
9002 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9004 if (Scalar::isBigIntType(elementType)) {
9005 return emitAtomicsReadModifyWriteResult64<jit::AtomicsAdd64>(objId, indexId,
9006 valueId);
9008 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
9009 AtomicsAdd(elementType));
9012 bool CacheIRCompiler::emitAtomicsSubResult(ObjOperandId objId,
9013 IntPtrOperandId indexId,
9014 uint32_t valueId,
9015 Scalar::Type elementType,
9016 bool forEffect) {
9017 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9019 if (Scalar::isBigIntType(elementType)) {
9020 return emitAtomicsReadModifyWriteResult64<jit::AtomicsSub64>(objId, indexId,
9021 valueId);
9023 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
9024 AtomicsSub(elementType));
9027 bool CacheIRCompiler::emitAtomicsAndResult(ObjOperandId objId,
9028 IntPtrOperandId indexId,
9029 uint32_t valueId,
9030 Scalar::Type elementType,
9031 bool forEffect) {
9032 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9034 if (Scalar::isBigIntType(elementType)) {
9035 return emitAtomicsReadModifyWriteResult64<jit::AtomicsAnd64>(objId, indexId,
9036 valueId);
9038 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
9039 AtomicsAnd(elementType));
9042 bool CacheIRCompiler::emitAtomicsOrResult(ObjOperandId objId,
9043 IntPtrOperandId indexId,
9044 uint32_t valueId,
9045 Scalar::Type elementType,
9046 bool forEffect) {
9047 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9049 if (Scalar::isBigIntType(elementType)) {
9050 return emitAtomicsReadModifyWriteResult64<jit::AtomicsOr64>(objId, indexId,
9051 valueId);
9053 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
9054 AtomicsOr(elementType));
9057 bool CacheIRCompiler::emitAtomicsXorResult(ObjOperandId objId,
9058 IntPtrOperandId indexId,
9059 uint32_t valueId,
9060 Scalar::Type elementType,
9061 bool forEffect) {
9062 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9064 if (Scalar::isBigIntType(elementType)) {
9065 return emitAtomicsReadModifyWriteResult64<jit::AtomicsXor64>(objId, indexId,
9066 valueId);
9068 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
9069 AtomicsXor(elementType));
9072 bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId,
9073 IntPtrOperandId indexId,
9074 Scalar::Type elementType) {
9075 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9077 Maybe<AutoOutputRegister> output;
9078 Maybe<AutoCallVM> callvm;
9079 if (!Scalar::isBigIntType(elementType)) {
9080 output.emplace(*this);
9081 } else {
9082 callvm.emplace(masm, this, allocator);
9084 Register obj = allocator.useRegister(masm, objId);
9085 Register index = allocator.useRegister(masm, indexId);
9086 AutoScratchRegisterMaybeOutput scratch(allocator, masm,
9087 output ? *output : callvm->output());
9088 AutoSpectreBoundsScratchRegister spectreTemp(allocator, masm);
9089 AutoAvailableFloatRegister floatReg(*this, FloatReg0);
9091 FailurePath* failure;
9092 if (!addFailurePath(&failure)) {
9093 return false;
9096 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
9097 // we can't use both at the same time. This isn't an issue here, because Ion
9098 // doesn't support CallICs. If that ever changes, this code must be updated.
9099 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
9101 // Bounds check.
9102 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
9103 masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
9105 // Atomic operations are highly platform-dependent, for example x86/arm32 has
9106 // specific requirements on which registers are used. Therefore we're using a
9107 // VM call here instead of handling each platform separately.
9108 if (Scalar::isBigIntType(elementType)) {
9109 callvm->prepare();
9111 masm.Push(index);
9112 masm.Push(obj);
9114 using Fn = BigInt* (*)(JSContext*, FixedLengthTypedArrayObject*, size_t);
9115 callvm->call<Fn, jit::AtomicsLoad64>();
9116 return true;
9119 // Load the elements vector.
9120 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch);
9122 // Load the value.
9123 BaseIndex source(scratch, index, ScaleFromScalarType(elementType));
9125 // NOTE: the generated code must match the assembly code in gen_load in
9126 // GenerateAtomicOperations.py
9127 auto sync = Synchronization::Load();
9129 masm.memoryBarrierBefore(sync);
9131 Label* failUint32 = nullptr;
9132 MacroAssembler::Uint32Mode mode = MacroAssembler::Uint32Mode::ForceDouble;
9133 masm.loadFromTypedArray(elementType, source, output->valueReg(), mode,
9134 scratch, failUint32);
9135 masm.memoryBarrierAfter(sync);
9137 return true;
9140 bool CacheIRCompiler::emitAtomicsStoreResult(ObjOperandId objId,
9141 IntPtrOperandId indexId,
9142 uint32_t valueId,
9143 Scalar::Type elementType) {
9144 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9146 AutoOutputRegister output(*this);
9147 Register obj = allocator.useRegister(masm, objId);
9148 Register index = allocator.useRegister(masm, indexId);
9149 Maybe<Register> valueInt32;
9150 Maybe<Register> valueBigInt;
9151 if (!Scalar::isBigIntType(elementType)) {
9152 valueInt32.emplace(allocator.useRegister(masm, Int32OperandId(valueId)));
9153 } else {
9154 valueBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(valueId)));
9156 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9158 // Not enough registers on X86.
9159 Register spectreTemp = Register::Invalid();
9161 FailurePath* failure;
9162 if (!addFailurePath(&failure)) {
9163 return false;
9166 // Bounds check.
9167 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
9168 masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
9170 if (!Scalar::isBigIntType(elementType)) {
9171 // Load the elements vector.
9172 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch);
9174 // Store the value.
9175 BaseIndex dest(scratch, index, ScaleFromScalarType(elementType));
9177 // NOTE: the generated code must match the assembly code in gen_store in
9178 // GenerateAtomicOperations.py
9179 auto sync = Synchronization::Store();
9181 masm.memoryBarrierBefore(sync);
9182 masm.storeToTypedIntArray(elementType, *valueInt32, dest);
9183 masm.memoryBarrierAfter(sync);
9185 masm.tagValue(JSVAL_TYPE_INT32, *valueInt32, output.valueReg());
9186 } else {
9187 // See comment in emitAtomicsCompareExchange for why we use an ABI call.
9189 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
9190 liveVolatileFloatRegs());
9191 volatileRegs.takeUnchecked(output.valueReg());
9192 volatileRegs.takeUnchecked(scratch);
9193 masm.PushRegsInMask(volatileRegs);
9195 using Fn = void (*)(FixedLengthTypedArrayObject*, size_t, const BigInt*);
9196 masm.setupUnalignedABICall(scratch);
9197 masm.passABIArg(obj);
9198 masm.passABIArg(index);
9199 masm.passABIArg(*valueBigInt);
9200 masm.callWithABI<Fn, jit::AtomicsStore64>();
9202 masm.PopRegsInMask(volatileRegs);
9204 masm.tagValue(JSVAL_TYPE_BIGINT, *valueBigInt, output.valueReg());
9207 return true;
9210 bool CacheIRCompiler::emitAtomicsIsLockFreeResult(Int32OperandId valueId) {
9211 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9213 AutoOutputRegister output(*this);
9214 Register value = allocator.useRegister(masm, valueId);
9215 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9217 masm.atomicIsLockFreeJS(value, scratch);
9218 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
9220 return true;
9223 bool CacheIRCompiler::emitBigIntAsIntNResult(Int32OperandId bitsId,
9224 BigIntOperandId bigIntId) {
9225 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9227 AutoCallVM callvm(masm, this, allocator);
9229 Register bits = allocator.useRegister(masm, bitsId);
9230 Register bigInt = allocator.useRegister(masm, bigIntId);
9232 callvm.prepare();
9233 masm.Push(bits);
9234 masm.Push(bigInt);
9236 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
9237 callvm.call<Fn, jit::BigIntAsIntN>();
9238 return true;
9241 bool CacheIRCompiler::emitBigIntAsUintNResult(Int32OperandId bitsId,
9242 BigIntOperandId bigIntId) {
9243 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9245 AutoCallVM callvm(masm, this, allocator);
9247 Register bits = allocator.useRegister(masm, bitsId);
9248 Register bigInt = allocator.useRegister(masm, bigIntId);
9250 callvm.prepare();
9251 masm.Push(bits);
9252 masm.Push(bigInt);
9254 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
9255 callvm.call<Fn, jit::BigIntAsUintN>();
9256 return true;
9259 bool CacheIRCompiler::emitSetHasResult(ObjOperandId setId, ValOperandId valId) {
9260 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9262 AutoCallVM callvm(masm, this, allocator);
9264 Register set = allocator.useRegister(masm, setId);
9265 ValueOperand val = allocator.useValueRegister(masm, valId);
9267 callvm.prepare();
9268 masm.Push(val);
9269 masm.Push(set);
9271 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
9272 callvm.call<Fn, jit::SetObjectHas>();
9273 return true;
9276 bool CacheIRCompiler::emitSetHasNonGCThingResult(ObjOperandId setId,
9277 ValOperandId valId) {
9278 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9280 AutoOutputRegister output(*this);
9281 Register set = allocator.useRegister(masm, setId);
9282 ValueOperand val = allocator.useValueRegister(masm, valId);
9284 AutoScratchRegister scratch1(allocator, masm);
9285 AutoScratchRegister scratch2(allocator, masm);
9286 AutoScratchRegister scratch3(allocator, masm);
9287 AutoScratchRegister scratch4(allocator, masm);
9288 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
9290 masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
9291 masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
9293 masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
9294 scratch3, scratch4);
9295 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9296 return true;
9299 bool CacheIRCompiler::emitSetHasSymbolResult(ObjOperandId setId,
9300 SymbolOperandId symId) {
9301 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9303 AutoOutputRegister output(*this);
9304 Register set = allocator.useRegister(masm, setId);
9305 Register sym = allocator.useRegister(masm, symId);
9307 AutoScratchRegister scratch1(allocator, masm);
9308 AutoScratchRegister scratch2(allocator, masm);
9309 AutoScratchRegister scratch3(allocator, masm);
9310 AutoScratchRegister scratch4(allocator, masm);
9312 masm.prepareHashSymbol(sym, scratch1);
9314 masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
9315 masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
9316 scratch3, scratch4);
9317 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9318 return true;
9321 bool CacheIRCompiler::emitSetHasBigIntResult(ObjOperandId setId,
9322 BigIntOperandId bigIntId) {
9323 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9325 AutoOutputRegister output(*this);
9326 Register set = allocator.useRegister(masm, setId);
9327 Register bigInt = allocator.useRegister(masm, bigIntId);
9329 AutoScratchRegister scratch1(allocator, masm);
9330 AutoScratchRegister scratch2(allocator, masm);
9331 AutoScratchRegister scratch3(allocator, masm);
9332 AutoScratchRegister scratch4(allocator, masm);
9333 AutoScratchRegister scratch5(allocator, masm);
9334 #ifndef JS_CODEGEN_ARM
9335 AutoScratchRegister scratch6(allocator, masm);
9336 #else
9337 // We don't have more registers available on ARM32.
9338 Register scratch6 = set;
9340 masm.push(set);
9341 #endif
9343 masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
9345 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
9346 masm.setObjectHasBigInt(set, output.valueReg(), scratch1, scratch2, scratch3,
9347 scratch4, scratch5, scratch6);
9348 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9350 #ifdef JS_CODEGEN_ARM
9351 masm.pop(set);
9352 #endif
9353 return true;
9356 bool CacheIRCompiler::emitSetHasObjectResult(ObjOperandId setId,
9357 ObjOperandId objId) {
9358 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9360 AutoOutputRegister output(*this);
9361 Register set = allocator.useRegister(masm, setId);
9362 Register obj = allocator.useRegister(masm, objId);
9364 AutoScratchRegister scratch1(allocator, masm);
9365 AutoScratchRegister scratch2(allocator, masm);
9366 AutoScratchRegister scratch3(allocator, masm);
9367 AutoScratchRegister scratch4(allocator, masm);
9368 AutoScratchRegister scratch5(allocator, masm);
9370 masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
9371 masm.prepareHashObject(set, output.valueReg(), scratch1, scratch2, scratch3,
9372 scratch4, scratch5);
9374 masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
9375 scratch3, scratch4);
9376 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9377 return true;
9380 bool CacheIRCompiler::emitSetSizeResult(ObjOperandId setId) {
9381 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9383 AutoOutputRegister output(*this);
9384 Register set = allocator.useRegister(masm, setId);
9385 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9387 masm.loadSetObjectSize(set, scratch);
9388 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
9389 return true;
9392 bool CacheIRCompiler::emitMapHasResult(ObjOperandId mapId, ValOperandId valId) {
9393 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9395 AutoCallVM callvm(masm, this, allocator);
9397 Register map = allocator.useRegister(masm, mapId);
9398 ValueOperand val = allocator.useValueRegister(masm, valId);
9400 callvm.prepare();
9401 masm.Push(val);
9402 masm.Push(map);
9404 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
9405 callvm.call<Fn, jit::MapObjectHas>();
9406 return true;
9409 bool CacheIRCompiler::emitMapHasNonGCThingResult(ObjOperandId mapId,
9410 ValOperandId valId) {
9411 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9413 AutoOutputRegister output(*this);
9414 Register map = allocator.useRegister(masm, mapId);
9415 ValueOperand val = allocator.useValueRegister(masm, valId);
9417 AutoScratchRegister scratch1(allocator, masm);
9418 AutoScratchRegister scratch2(allocator, masm);
9419 AutoScratchRegister scratch3(allocator, masm);
9420 AutoScratchRegister scratch4(allocator, masm);
9421 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
9423 masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
9424 masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
9426 masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
9427 scratch3, scratch4);
9428 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9429 return true;
9432 bool CacheIRCompiler::emitMapHasSymbolResult(ObjOperandId mapId,
9433 SymbolOperandId symId) {
9434 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9436 AutoOutputRegister output(*this);
9437 Register map = allocator.useRegister(masm, mapId);
9438 Register sym = allocator.useRegister(masm, symId);
9440 AutoScratchRegister scratch1(allocator, masm);
9441 AutoScratchRegister scratch2(allocator, masm);
9442 AutoScratchRegister scratch3(allocator, masm);
9443 AutoScratchRegister scratch4(allocator, masm);
9445 masm.prepareHashSymbol(sym, scratch1);
9447 masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
9448 masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
9449 scratch3, scratch4);
9450 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9451 return true;
9454 bool CacheIRCompiler::emitMapHasBigIntResult(ObjOperandId mapId,
9455 BigIntOperandId bigIntId) {
9456 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9458 AutoOutputRegister output(*this);
9459 Register map = allocator.useRegister(masm, mapId);
9460 Register bigInt = allocator.useRegister(masm, bigIntId);
9462 AutoScratchRegister scratch1(allocator, masm);
9463 AutoScratchRegister scratch2(allocator, masm);
9464 AutoScratchRegister scratch3(allocator, masm);
9465 AutoScratchRegister scratch4(allocator, masm);
9466 AutoScratchRegister scratch5(allocator, masm);
9467 #ifndef JS_CODEGEN_ARM
9468 AutoScratchRegister scratch6(allocator, masm);
9469 #else
9470 // We don't have more registers available on ARM32.
9471 Register scratch6 = map;
9473 masm.push(map);
9474 #endif
9476 masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
9478 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
9479 masm.mapObjectHasBigInt(map, output.valueReg(), scratch1, scratch2, scratch3,
9480 scratch4, scratch5, scratch6);
9481 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9483 #ifdef JS_CODEGEN_ARM
9484 masm.pop(map);
9485 #endif
9486 return true;
9489 bool CacheIRCompiler::emitMapHasObjectResult(ObjOperandId mapId,
9490 ObjOperandId objId) {
9491 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9493 AutoOutputRegister output(*this);
9494 Register map = allocator.useRegister(masm, mapId);
9495 Register obj = allocator.useRegister(masm, objId);
9497 AutoScratchRegister scratch1(allocator, masm);
9498 AutoScratchRegister scratch2(allocator, masm);
9499 AutoScratchRegister scratch3(allocator, masm);
9500 AutoScratchRegister scratch4(allocator, masm);
9501 AutoScratchRegister scratch5(allocator, masm);
9503 masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
9504 masm.prepareHashObject(map, output.valueReg(), scratch1, scratch2, scratch3,
9505 scratch4, scratch5);
9507 masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
9508 scratch3, scratch4);
9509 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9510 return true;
9513 bool CacheIRCompiler::emitMapGetResult(ObjOperandId mapId, ValOperandId valId) {
9514 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9516 AutoCallVM callvm(masm, this, allocator);
9518 Register map = allocator.useRegister(masm, mapId);
9519 ValueOperand val = allocator.useValueRegister(masm, valId);
9521 callvm.prepare();
9522 masm.Push(val);
9523 masm.Push(map);
9525 using Fn =
9526 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
9527 callvm.call<Fn, jit::MapObjectGet>();
9528 return true;
9531 bool CacheIRCompiler::emitMapGetNonGCThingResult(ObjOperandId mapId,
9532 ValOperandId valId) {
9533 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9535 AutoOutputRegister output(*this);
9536 Register map = allocator.useRegister(masm, mapId);
9537 ValueOperand val = allocator.useValueRegister(masm, valId);
9539 AutoScratchRegister scratch1(allocator, masm);
9540 AutoScratchRegister scratch2(allocator, masm);
9541 AutoScratchRegister scratch3(allocator, masm);
9542 AutoScratchRegister scratch4(allocator, masm);
9543 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
9545 masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
9546 masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
9548 masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
9549 output.valueReg(), scratch2, scratch3, scratch4);
9550 return true;
9553 bool CacheIRCompiler::emitMapGetSymbolResult(ObjOperandId mapId,
9554 SymbolOperandId symId) {
9555 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9557 AutoOutputRegister output(*this);
9558 Register map = allocator.useRegister(masm, mapId);
9559 Register sym = allocator.useRegister(masm, symId);
9561 AutoScratchRegister scratch1(allocator, masm);
9562 AutoScratchRegister scratch2(allocator, masm);
9563 AutoScratchRegister scratch3(allocator, masm);
9564 AutoScratchRegister scratch4(allocator, masm);
9566 masm.prepareHashSymbol(sym, scratch1);
9568 masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
9569 masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
9570 output.valueReg(), scratch2, scratch3, scratch4);
9571 return true;
9574 bool CacheIRCompiler::emitMapGetBigIntResult(ObjOperandId mapId,
9575 BigIntOperandId bigIntId) {
9576 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9578 AutoOutputRegister output(*this);
9579 Register map = allocator.useRegister(masm, mapId);
9580 Register bigInt = allocator.useRegister(masm, bigIntId);
9582 AutoScratchRegister scratch1(allocator, masm);
9583 AutoScratchRegister scratch2(allocator, masm);
9584 AutoScratchRegister scratch3(allocator, masm);
9585 AutoScratchRegister scratch4(allocator, masm);
9586 AutoScratchRegister scratch5(allocator, masm);
9587 #ifndef JS_CODEGEN_ARM
9588 AutoScratchRegister scratch6(allocator, masm);
9589 #else
9590 // We don't have more registers available on ARM32.
9591 Register scratch6 = map;
9593 masm.push(map);
9594 #endif
9596 masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
9598 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
9599 masm.mapObjectGetBigInt(map, output.valueReg(), scratch1, output.valueReg(),
9600 scratch2, scratch3, scratch4, scratch5, scratch6);
9602 #ifdef JS_CODEGEN_ARM
9603 masm.pop(map);
9604 #endif
9605 return true;
9608 bool CacheIRCompiler::emitMapGetObjectResult(ObjOperandId mapId,
9609 ObjOperandId objId) {
9610 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9612 AutoOutputRegister output(*this);
9613 Register map = allocator.useRegister(masm, mapId);
9614 Register obj = allocator.useRegister(masm, objId);
9616 AutoScratchRegister scratch1(allocator, masm);
9617 AutoScratchRegister scratch2(allocator, masm);
9618 AutoScratchRegister scratch3(allocator, masm);
9619 AutoScratchRegister scratch4(allocator, masm);
9620 AutoScratchRegister scratch5(allocator, masm);
9622 masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
9623 masm.prepareHashObject(map, output.valueReg(), scratch1, scratch2, scratch3,
9624 scratch4, scratch5);
9626 masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
9627 output.valueReg(), scratch2, scratch3, scratch4);
9628 return true;
9631 bool CacheIRCompiler::emitMapSizeResult(ObjOperandId mapId) {
9632 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9634 AutoOutputRegister output(*this);
9635 Register map = allocator.useRegister(masm, mapId);
9636 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9638 masm.loadMapObjectSize(map, scratch);
9639 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
9640 return true;
9643 bool CacheIRCompiler::emitArrayFromArgumentsObjectResult(ObjOperandId objId,
9644 uint32_t shapeOffset) {
9645 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9647 AutoCallVM callvm(masm, this, allocator);
9649 Register obj = allocator.useRegister(masm, objId);
9651 callvm.prepare();
9652 masm.Push(obj);
9654 using Fn = ArrayObject* (*)(JSContext*, Handle<ArgumentsObject*>);
9655 callvm.call<Fn, js::ArrayFromArgumentsObject>();
9656 return true;
9659 bool CacheIRCompiler::emitGuardGlobalGeneration(uint32_t expectedOffset,
9660 uint32_t generationAddrOffset) {
9661 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9663 AutoScratchRegister scratch(allocator, masm);
9664 AutoScratchRegister scratch2(allocator, masm);
9666 FailurePath* failure;
9667 if (!addFailurePath(&failure)) {
9668 return false;
9671 StubFieldOffset expected(expectedOffset, StubField::Type::RawInt32);
9672 emitLoadStubField(expected, scratch);
9674 StubFieldOffset generationAddr(generationAddrOffset,
9675 StubField::Type::RawPointer);
9676 emitLoadStubField(generationAddr, scratch2);
9678 masm.branch32(Assembler::NotEqual, Address(scratch2, 0), scratch,
9679 failure->label());
9681 return true;
9684 bool CacheIRCompiler::emitGuardFuse(RealmFuses::FuseIndex fuseIndex) {
9685 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9686 AutoScratchRegister scratch(allocator, masm);
9688 FailurePath* failure;
9689 if (!addFailurePath(&failure)) {
9690 return false;
9693 masm.loadRealmFuse(fuseIndex, scratch);
9694 masm.branchPtr(Assembler::NotEqual, scratch, ImmPtr(nullptr),
9695 failure->label());
9696 return true;
9699 bool CacheIRCompiler::emitBailout() {
9700 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9702 // Generates no code.
9704 return true;
9707 bool CacheIRCompiler::emitAssertRecoveredOnBailoutResult(ValOperandId valId,
9708 bool mustBeRecovered) {
9709 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9711 AutoOutputRegister output(*this);
9713 // NOP when not in IonMonkey
9714 masm.moveValue(UndefinedValue(), output.valueReg());
9716 return true;
9719 bool CacheIRCompiler::emitAssertPropertyLookup(ObjOperandId objId,
9720 uint32_t idOffset,
9721 uint32_t slotOffset) {
9722 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9724 Register obj = allocator.useRegister(masm, objId);
9726 AutoScratchRegister id(allocator, masm);
9727 AutoScratchRegister slot(allocator, masm);
9729 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
9730 masm.PushRegsInMask(save);
9732 masm.setupUnalignedABICall(id);
9734 StubFieldOffset idField(idOffset, StubField::Type::Id);
9735 emitLoadStubField(idField, id);
9737 StubFieldOffset slotField(slotOffset, StubField::Type::RawInt32);
9738 emitLoadStubField(slotField, slot);
9740 masm.passABIArg(obj);
9741 masm.passABIArg(id);
9742 masm.passABIArg(slot);
9743 using Fn = void (*)(NativeObject*, PropertyKey, uint32_t);
9744 masm.callWithABI<Fn, js::jit::AssertPropertyLookup>();
9745 masm.PopRegsInMask(save);
9747 return true;
9750 #ifdef FUZZING_JS_FUZZILLI
9751 bool CacheIRCompiler::emitFuzzilliHashResult(ValOperandId valId) {
9752 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9754 ValueOperand input = allocator.useValueRegister(masm, valId);
9755 AutoScratchRegister scratch(allocator, masm);
9756 AutoScratchRegister scratchJSContext(allocator, masm);
9757 AutoScratchFloatRegister floatReg(this);
9758 # ifdef JS_PUNBOX64
9759 AutoScratchRegister64 scratch64(allocator, masm);
9760 # else
9761 AutoScratchRegister scratch2(allocator, masm);
9762 # endif
9764 Label addFloat, updateHash, done;
9767 ScratchTagScope tag(masm, input);
9768 masm.splitTagForTest(input, tag);
9770 Label notInt32;
9771 masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
9773 ScratchTagScopeRelease _(&tag);
9775 masm.unboxInt32(input, scratch);
9776 masm.convertInt32ToDouble(scratch, floatReg);
9777 masm.jump(&addFloat);
9779 masm.bind(&notInt32);
9781 Label notDouble;
9782 masm.branchTestDouble(Assembler::NotEqual, tag, &notDouble);
9784 ScratchTagScopeRelease _(&tag);
9786 masm.unboxDouble(input, floatReg);
9787 masm.canonicalizeDouble(floatReg);
9788 masm.jump(&addFloat);
9790 masm.bind(&notDouble);
9792 Label notNull;
9793 masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
9795 ScratchTagScopeRelease _(&tag);
9797 masm.move32(Imm32(1), scratch);
9798 masm.convertInt32ToDouble(scratch, floatReg);
9799 masm.jump(&addFloat);
9801 masm.bind(&notNull);
9803 Label notUndefined;
9804 masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
9806 ScratchTagScopeRelease _(&tag);
9808 masm.move32(Imm32(2), scratch);
9809 masm.convertInt32ToDouble(scratch, floatReg);
9810 masm.jump(&addFloat);
9812 masm.bind(&notUndefined);
9814 Label notBoolean;
9815 masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
9817 ScratchTagScopeRelease _(&tag);
9819 masm.unboxBoolean(input, scratch);
9820 masm.add32(Imm32(3), scratch);
9821 masm.convertInt32ToDouble(scratch, floatReg);
9822 masm.jump(&addFloat);
9824 masm.bind(&notBoolean);
9826 Label notBigInt;
9827 masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
9829 ScratchTagScopeRelease _(&tag);
9831 masm.unboxBigInt(input, scratch);
9833 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
9834 liveVolatileFloatRegs());
9835 masm.PushRegsInMask(volatileRegs);
9836 // TODO: remove floatReg, scratch, scratchJS?
9838 using Fn = uint32_t (*)(BigInt* bigInt);
9839 masm.setupUnalignedABICall(scratchJSContext);
9840 masm.loadJSContext(scratchJSContext);
9841 masm.passABIArg(scratch);
9842 masm.callWithABI<Fn, js::FuzzilliHashBigInt>();
9843 masm.storeCallInt32Result(scratch);
9845 LiveRegisterSet ignore;
9846 ignore.add(scratch);
9847 ignore.add(scratchJSContext);
9848 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
9849 masm.jump(&updateHash);
9851 masm.bind(&notBigInt);
9853 Label notObject;
9854 masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
9856 ScratchTagScopeRelease _(&tag);
9858 AutoCallVM callvm(masm, this, allocator);
9859 Register obj = allocator.allocateRegister(masm);
9860 masm.unboxObject(input, obj);
9862 callvm.prepare();
9863 masm.Push(obj);
9865 using Fn = void (*)(JSContext* cx, JSObject* o);
9866 callvm.callNoResult<Fn, js::FuzzilliHashObject>();
9867 allocator.releaseRegister(obj);
9869 masm.jump(&done);
9871 masm.bind(&notObject);
9873 masm.move32(Imm32(0), scratch);
9874 masm.jump(&updateHash);
9879 masm.bind(&addFloat);
9881 masm.loadJSContext(scratchJSContext);
9882 Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
9884 # ifdef JS_PUNBOX64
9885 masm.moveDoubleToGPR64(floatReg, scratch64);
9886 masm.move32(scratch64.get().reg, scratch);
9887 masm.rshift64(Imm32(32), scratch64);
9888 masm.add32(scratch64.get().reg, scratch);
9889 # else
9890 Register64 scratch64(scratch, scratch2);
9891 masm.moveDoubleToGPR64(floatReg, scratch64);
9892 masm.add32(scratch2, scratch);
9893 # endif
9897 masm.bind(&updateHash);
9899 masm.loadJSContext(scratchJSContext);
9900 Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
9901 masm.load32(addrExecHash, scratchJSContext);
9902 masm.add32(scratchJSContext, scratch);
9903 masm.rotateLeft(Imm32(1), scratch, scratch);
9904 masm.loadJSContext(scratchJSContext);
9905 masm.store32(scratch, addrExecHash);
9907 // stats
9908 Address addrExecHashInputs(scratchJSContext,
9909 offsetof(JSContext, executionHashInputs));
9910 masm.load32(addrExecHashInputs, scratch);
9911 masm.add32(Imm32(1), scratch);
9912 masm.store32(scratch, addrExecHashInputs);
9915 masm.bind(&done);
9917 AutoOutputRegister output(*this);
9918 masm.moveValue(UndefinedValue(), output.valueReg());
9919 return true;
9921 #endif
9923 template <typename Fn, Fn fn>
9924 void CacheIRCompiler::callVM(MacroAssembler& masm) {
9925 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
9926 callVMInternal(masm, id);
9929 void CacheIRCompiler::callVMInternal(MacroAssembler& masm, VMFunctionId id) {
9930 MOZ_ASSERT(enteredStubFrame_);
9931 if (mode_ == Mode::Ion) {
9932 TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(id);
9933 const VMFunctionData& fun = GetVMFunction(id);
9934 uint32_t frameSize = fun.explicitStackSlots() * sizeof(void*);
9935 masm.PushFrameDescriptor(FrameType::IonICCall);
9936 masm.callJit(code);
9938 // Pop rest of the exit frame and the arguments left on the stack.
9939 int framePop =
9940 sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
9941 masm.implicitPop(frameSize + framePop);
9943 masm.freeStack(asIon()->localTracingSlots() * sizeof(Value));
9945 // Pop IonICCallFrameLayout.
9946 masm.Pop(FramePointer);
9947 masm.freeStack(IonICCallFrameLayout::Size() - sizeof(void*));
9948 return;
9951 MOZ_ASSERT(mode_ == Mode::Baseline);
9953 TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(id);
9955 EmitBaselineCallVM(code, masm);
9958 bool CacheIRCompiler::isBaseline() { return mode_ == Mode::Baseline; }
9960 bool CacheIRCompiler::isIon() { return mode_ == Mode::Ion; }
9962 BaselineCacheIRCompiler* CacheIRCompiler::asBaseline() {
9963 MOZ_ASSERT(this->isBaseline());
9964 return static_cast<BaselineCacheIRCompiler*>(this);
9967 IonCacheIRCompiler* CacheIRCompiler::asIon() {
9968 MOZ_ASSERT(this->isIon());
9969 return static_cast<IonCacheIRCompiler*>(this);
9972 #ifdef DEBUG
9973 void CacheIRCompiler::assertFloatRegisterAvailable(FloatRegister reg) {
9974 if (isBaseline()) {
9975 // Baseline does not have any FloatRegisters live when calling an IC stub.
9976 return;
9979 asIon()->assertFloatRegisterAvailable(reg);
9981 #endif
9983 AutoCallVM::AutoCallVM(MacroAssembler& masm, CacheIRCompiler* compiler,
9984 CacheRegisterAllocator& allocator)
9985 : masm_(masm), compiler_(compiler), allocator_(allocator) {
9986 // Ion needs to `enterStubFrame` before it can callVM and it also needs to
9987 // initialize AutoSaveLiveRegisters.
9988 if (compiler_->mode_ == CacheIRCompiler::Mode::Ion) {
9989 // Will need to use a downcast here as well, in order to pass the
9990 // stub to AutoSaveLiveRegisters
9991 save_.emplace(*compiler_->asIon());
9994 if (compiler->outputUnchecked_.isSome()) {
9995 output_.emplace(*compiler);
9998 if (compiler_->mode_ == CacheIRCompiler::Mode::Baseline) {
9999 stubFrame_.emplace(*compiler_->asBaseline());
10000 if (output_.isSome()) {
10001 scratch_.emplace(allocator_, masm_, output_.ref());
10002 } else {
10003 scratch_.emplace(allocator_, masm_);
10008 void AutoCallVM::prepare() {
10009 allocator_.discardStack(masm_);
10010 MOZ_ASSERT(compiler_ != nullptr);
10011 if (compiler_->mode_ == CacheIRCompiler::Mode::Ion) {
10012 compiler_->asIon()->enterStubFrame(masm_, *save_.ptr());
10013 return;
10015 MOZ_ASSERT(compiler_->mode_ == CacheIRCompiler::Mode::Baseline);
10016 stubFrame_->enter(masm_, scratch_.ref());
10019 void AutoCallVM::storeResult(JSValueType returnType) {
10020 MOZ_ASSERT(returnType != JSVAL_TYPE_DOUBLE);
10022 if (returnType == JSVAL_TYPE_UNKNOWN) {
10023 masm_.storeCallResultValue(output_.ref());
10024 } else {
10025 if (output_->hasValue()) {
10026 masm_.tagValue(returnType, ReturnReg, output_->valueReg());
10027 } else {
10028 masm_.storeCallPointerResult(output_->typedReg().gpr());
10033 void AutoCallVM::leaveBaselineStubFrame() {
10034 if (compiler_->mode_ == CacheIRCompiler::Mode::Baseline) {
10035 stubFrame_->leave(masm_);
10039 template <typename...>
10040 struct VMFunctionReturnType;
10042 template <class R, typename... Args>
10043 struct VMFunctionReturnType<R (*)(JSContext*, Args...)> {
10044 using LastArgument = typename LastArg<Args...>::Type;
10046 // By convention VMFunctions returning `bool` use an output parameter.
10047 using ReturnType =
10048 std::conditional_t<std::is_same_v<R, bool>, LastArgument, R>;
10051 template <class>
10052 struct ReturnTypeToJSValueType;
10054 // Definitions for the currently used return types.
10055 template <>
10056 struct ReturnTypeToJSValueType<MutableHandleValue> {
10057 static constexpr JSValueType result = JSVAL_TYPE_UNKNOWN;
10059 template <>
10060 struct ReturnTypeToJSValueType<bool*> {
10061 static constexpr JSValueType result = JSVAL_TYPE_BOOLEAN;
10063 template <>
10064 struct ReturnTypeToJSValueType<int32_t*> {
10065 static constexpr JSValueType result = JSVAL_TYPE_INT32;
10067 template <>
10068 struct ReturnTypeToJSValueType<JSString*> {
10069 static constexpr JSValueType result = JSVAL_TYPE_STRING;
10071 template <>
10072 struct ReturnTypeToJSValueType<BigInt*> {
10073 static constexpr JSValueType result = JSVAL_TYPE_BIGINT;
10075 template <>
10076 struct ReturnTypeToJSValueType<JSObject*> {
10077 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10079 template <>
10080 struct ReturnTypeToJSValueType<PropertyIteratorObject*> {
10081 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10083 template <>
10084 struct ReturnTypeToJSValueType<ArrayIteratorObject*> {
10085 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10087 template <>
10088 struct ReturnTypeToJSValueType<StringIteratorObject*> {
10089 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10091 template <>
10092 struct ReturnTypeToJSValueType<RegExpStringIteratorObject*> {
10093 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10095 template <>
10096 struct ReturnTypeToJSValueType<PlainObject*> {
10097 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10099 template <>
10100 struct ReturnTypeToJSValueType<ArrayObject*> {
10101 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10103 template <>
10104 struct ReturnTypeToJSValueType<TypedArrayObject*> {
10105 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10108 template <typename Fn>
10109 void AutoCallVM::storeResult() {
10110 using ReturnType = typename VMFunctionReturnType<Fn>::ReturnType;
10111 storeResult(ReturnTypeToJSValueType<ReturnType>::result);
10114 AutoScratchFloatRegister::AutoScratchFloatRegister(CacheIRCompiler* compiler,
10115 FailurePath* failure)
10116 : compiler_(compiler), failure_(failure) {
10117 // If we're compiling a Baseline IC, FloatReg0 is always available.
10118 if (!compiler_->isBaseline()) {
10119 MacroAssembler& masm = compiler_->masm;
10120 masm.push(FloatReg0);
10121 compiler->allocator.setHasAutoScratchFloatRegisterSpill(true);
10124 if (failure_) {
10125 failure_->setHasAutoScratchFloatRegister();
10129 AutoScratchFloatRegister::~AutoScratchFloatRegister() {
10130 if (failure_) {
10131 failure_->clearHasAutoScratchFloatRegister();
10134 if (!compiler_->isBaseline()) {
10135 MacroAssembler& masm = compiler_->masm;
10136 masm.pop(FloatReg0);
10137 compiler_->allocator.setHasAutoScratchFloatRegisterSpill(false);
10139 if (failure_) {
10140 Label done;
10141 masm.jump(&done);
10142 masm.bind(&failurePopReg_);
10143 masm.pop(FloatReg0);
10144 masm.jump(failure_->label());
10145 masm.bind(&done);
10150 Label* AutoScratchFloatRegister::failure() {
10151 MOZ_ASSERT(failure_);
10153 if (!compiler_->isBaseline()) {
10154 return &failurePopReg_;
10156 return failure_->labelUnchecked();