Backed out changeset 2450366cf7ca (bug 1891629) for causing win msix mochitest failures
[gecko.git] / js / src / jit / CacheIRCompiler.cpp
blob901143158ab3b5812a517bde0b50e3c01b8ebe19
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/CacheIRCompiler.h"
9 #include "mozilla/ArrayUtils.h"
10 #include "mozilla/FunctionTypeTraits.h"
11 #include "mozilla/MaybeOneOf.h"
12 #include "mozilla/ScopeExit.h"
14 #include <type_traits>
15 #include <utility>
17 #include "jslibmath.h"
18 #include "jsmath.h"
20 #include "builtin/DataViewObject.h"
21 #include "builtin/Object.h"
22 #include "gc/GCEnum.h"
23 #include "gc/SweepingAPI.h" // js::gc::AutoLockStoreBuffer
24 #include "jit/BaselineCacheIRCompiler.h"
25 #include "jit/CacheIRGenerator.h"
26 #include "jit/IonCacheIRCompiler.h"
27 #include "jit/JitFrames.h"
28 #include "jit/JitRuntime.h"
29 #include "jit/JitZone.h"
30 #include "jit/SharedICHelpers.h"
31 #include "jit/SharedICRegisters.h"
32 #include "jit/VMFunctions.h"
33 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
34 #include "js/friend/XrayJitInfo.h" // js::jit::GetXrayJitInfo
35 #include "js/ScalarType.h" // js::Scalar::Type
36 #include "proxy/DOMProxy.h"
37 #include "proxy/Proxy.h"
38 #include "proxy/ScriptedProxyHandler.h"
39 #include "vm/ArgumentsObject.h"
40 #include "vm/ArrayBufferObject.h"
41 #include "vm/ArrayBufferViewObject.h"
42 #include "vm/BigIntType.h"
43 #include "vm/FunctionFlags.h" // js::FunctionFlags
44 #include "vm/GeneratorObject.h"
45 #include "vm/GetterSetter.h"
46 #include "vm/Interpreter.h"
47 #include "vm/Uint8Clamped.h"
49 #include "builtin/Boolean-inl.h"
50 #include "jit/MacroAssembler-inl.h"
51 #include "jit/SharedICHelpers-inl.h"
52 #include "jit/VMFunctionList-inl.h"
54 using namespace js;
55 using namespace js::jit;
57 using mozilla::BitwiseCast;
58 using mozilla::Maybe;
60 using JS::ExpandoAndGeneration;
62 ValueOperand CacheRegisterAllocator::useValueRegister(MacroAssembler& masm,
63 ValOperandId op) {
64 OperandLocation& loc = operandLocations_[op.id()];
66 switch (loc.kind()) {
67 case OperandLocation::ValueReg:
68 currentOpRegs_.add(loc.valueReg());
69 return loc.valueReg();
71 case OperandLocation::ValueStack: {
72 ValueOperand reg = allocateValueRegister(masm);
73 popValue(masm, &loc, reg);
74 return reg;
77 case OperandLocation::BaselineFrame: {
78 ValueOperand reg = allocateValueRegister(masm);
79 Address addr = addressOf(masm, loc.baselineFrameSlot());
80 masm.loadValue(addr, reg);
81 loc.setValueReg(reg);
82 return reg;
85 case OperandLocation::Constant: {
86 ValueOperand reg = allocateValueRegister(masm);
87 masm.moveValue(loc.constant(), reg);
88 loc.setValueReg(reg);
89 return reg;
92 case OperandLocation::PayloadReg: {
93 // Temporarily add the payload register to currentOpRegs_ so
94 // allocateValueRegister will stay away from it.
95 currentOpRegs_.add(loc.payloadReg());
96 ValueOperand reg = allocateValueRegister(masm);
97 masm.tagValue(loc.payloadType(), loc.payloadReg(), reg);
98 currentOpRegs_.take(loc.payloadReg());
99 availableRegs_.add(loc.payloadReg());
100 loc.setValueReg(reg);
101 return reg;
104 case OperandLocation::PayloadStack: {
105 ValueOperand reg = allocateValueRegister(masm);
106 popPayload(masm, &loc, reg.scratchReg());
107 masm.tagValue(loc.payloadType(), reg.scratchReg(), reg);
108 loc.setValueReg(reg);
109 return reg;
112 case OperandLocation::DoubleReg: {
113 ValueOperand reg = allocateValueRegister(masm);
115 ScratchDoubleScope fpscratch(masm);
116 masm.boxDouble(loc.doubleReg(), reg, fpscratch);
118 loc.setValueReg(reg);
119 return reg;
122 case OperandLocation::Uninitialized:
123 break;
126 MOZ_CRASH();
129 // Load a value operand directly into a float register. Caller must have
130 // guarded isNumber on the provided val.
131 void CacheRegisterAllocator::ensureDoubleRegister(MacroAssembler& masm,
132 NumberOperandId op,
133 FloatRegister dest) const {
134 // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
135 // any stack slot offsets below.
136 int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
138 const OperandLocation& loc = operandLocations_[op.id()];
140 Label failure, done;
141 switch (loc.kind()) {
142 case OperandLocation::ValueReg: {
143 masm.ensureDouble(loc.valueReg(), dest, &failure);
144 break;
147 case OperandLocation::ValueStack: {
148 Address addr = valueAddress(masm, &loc);
149 addr.offset += stackOffset;
150 masm.ensureDouble(addr, dest, &failure);
151 break;
154 case OperandLocation::BaselineFrame: {
155 Address addr = addressOf(masm, loc.baselineFrameSlot());
156 addr.offset += stackOffset;
157 masm.ensureDouble(addr, dest, &failure);
158 break;
161 case OperandLocation::DoubleReg: {
162 masm.moveDouble(loc.doubleReg(), dest);
163 return;
166 case OperandLocation::Constant: {
167 MOZ_ASSERT(loc.constant().isNumber(),
168 "Caller must ensure the operand is a number value");
169 masm.loadConstantDouble(loc.constant().toNumber(), dest);
170 return;
173 case OperandLocation::PayloadReg: {
174 // Doubles can't be stored in payload registers, so this must be an int32.
175 MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
176 "Caller must ensure the operand is a number value");
177 masm.convertInt32ToDouble(loc.payloadReg(), dest);
178 return;
181 case OperandLocation::PayloadStack: {
182 // Doubles can't be stored in payload registers, so this must be an int32.
183 MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
184 "Caller must ensure the operand is a number value");
185 MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
186 Address addr = payloadAddress(masm, &loc);
187 addr.offset += stackOffset;
188 masm.convertInt32ToDouble(addr, dest);
189 return;
192 case OperandLocation::Uninitialized:
193 MOZ_CRASH("Unhandled operand type in ensureDoubleRegister");
194 return;
196 masm.jump(&done);
197 masm.bind(&failure);
198 masm.assumeUnreachable(
199 "Missing guard allowed non-number to hit ensureDoubleRegister");
200 masm.bind(&done);
203 void CacheRegisterAllocator::copyToScratchRegister(MacroAssembler& masm,
204 TypedOperandId typedId,
205 Register dest) const {
206 // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
207 // any stack slot offsets below.
208 int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
210 const OperandLocation& loc = operandLocations_[typedId.id()];
212 switch (loc.kind()) {
213 case OperandLocation::ValueReg: {
214 masm.unboxNonDouble(loc.valueReg(), dest, typedId.type());
215 break;
217 case OperandLocation::ValueStack: {
218 Address addr = valueAddress(masm, &loc);
219 addr.offset += stackOffset;
220 masm.unboxNonDouble(addr, dest, typedId.type());
221 break;
223 case OperandLocation::BaselineFrame: {
224 Address addr = addressOf(masm, loc.baselineFrameSlot());
225 addr.offset += stackOffset;
226 masm.unboxNonDouble(addr, dest, typedId.type());
227 break;
229 case OperandLocation::PayloadReg: {
230 MOZ_ASSERT(loc.payloadType() == typedId.type());
231 masm.mov(loc.payloadReg(), dest);
232 return;
234 case OperandLocation::PayloadStack: {
235 MOZ_ASSERT(loc.payloadType() == typedId.type());
236 MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
237 Address addr = payloadAddress(masm, &loc);
238 addr.offset += stackOffset;
239 masm.loadPtr(addr, dest);
240 return;
242 case OperandLocation::DoubleReg:
243 case OperandLocation::Constant:
244 case OperandLocation::Uninitialized:
245 MOZ_CRASH("Unhandled operand location");
249 void CacheRegisterAllocator::copyToScratchValueRegister(
250 MacroAssembler& masm, ValOperandId valId, ValueOperand dest) const {
251 MOZ_ASSERT(!addedFailurePath_);
252 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
254 const OperandLocation& loc = operandLocations_[valId.id()];
255 switch (loc.kind()) {
256 case OperandLocation::ValueReg:
257 masm.moveValue(loc.valueReg(), dest);
258 break;
259 case OperandLocation::ValueStack: {
260 Address addr = valueAddress(masm, &loc);
261 masm.loadValue(addr, dest);
262 break;
264 case OperandLocation::BaselineFrame: {
265 Address addr = addressOf(masm, loc.baselineFrameSlot());
266 masm.loadValue(addr, dest);
267 break;
269 case OperandLocation::Constant:
270 masm.moveValue(loc.constant(), dest);
271 break;
272 case OperandLocation::PayloadReg:
273 masm.tagValue(loc.payloadType(), loc.payloadReg(), dest);
274 break;
275 case OperandLocation::PayloadStack: {
276 Address addr = payloadAddress(masm, &loc);
277 masm.loadPtr(addr, dest.scratchReg());
278 masm.tagValue(loc.payloadType(), dest.scratchReg(), dest);
279 break;
281 case OperandLocation::DoubleReg: {
282 ScratchDoubleScope fpscratch(masm);
283 masm.boxDouble(loc.doubleReg(), dest, fpscratch);
284 break;
286 case OperandLocation::Uninitialized:
287 MOZ_CRASH();
291 Register CacheRegisterAllocator::useRegister(MacroAssembler& masm,
292 TypedOperandId typedId) {
293 MOZ_ASSERT(!addedFailurePath_);
294 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
296 OperandLocation& loc = operandLocations_[typedId.id()];
297 switch (loc.kind()) {
298 case OperandLocation::PayloadReg:
299 currentOpRegs_.add(loc.payloadReg());
300 return loc.payloadReg();
302 case OperandLocation::ValueReg: {
303 // It's possible the value is still boxed: as an optimization, we unbox
304 // the first time we use a value as object.
305 ValueOperand val = loc.valueReg();
306 availableRegs_.add(val);
307 Register reg = val.scratchReg();
308 availableRegs_.take(reg);
309 masm.unboxNonDouble(val, reg, typedId.type());
310 loc.setPayloadReg(reg, typedId.type());
311 currentOpRegs_.add(reg);
312 return reg;
315 case OperandLocation::PayloadStack: {
316 Register reg = allocateRegister(masm);
317 popPayload(masm, &loc, reg);
318 return reg;
321 case OperandLocation::ValueStack: {
322 // The value is on the stack, but boxed. If it's on top of the stack we
323 // unbox it and then remove it from the stack, else we just unbox.
324 Register reg = allocateRegister(masm);
325 if (loc.valueStack() == stackPushed_) {
326 masm.unboxNonDouble(Address(masm.getStackPointer(), 0), reg,
327 typedId.type());
328 masm.addToStackPtr(Imm32(sizeof(js::Value)));
329 MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
330 stackPushed_ -= sizeof(js::Value);
331 } else {
332 MOZ_ASSERT(loc.valueStack() < stackPushed_);
333 masm.unboxNonDouble(
334 Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()),
335 reg, typedId.type());
337 loc.setPayloadReg(reg, typedId.type());
338 return reg;
341 case OperandLocation::BaselineFrame: {
342 Register reg = allocateRegister(masm);
343 Address addr = addressOf(masm, loc.baselineFrameSlot());
344 masm.unboxNonDouble(addr, reg, typedId.type());
345 loc.setPayloadReg(reg, typedId.type());
346 return reg;
349 case OperandLocation::Constant: {
350 Value v = loc.constant();
351 Register reg = allocateRegister(masm);
352 if (v.isString()) {
353 masm.movePtr(ImmGCPtr(v.toString()), reg);
354 } else if (v.isSymbol()) {
355 masm.movePtr(ImmGCPtr(v.toSymbol()), reg);
356 } else if (v.isBigInt()) {
357 masm.movePtr(ImmGCPtr(v.toBigInt()), reg);
358 } else if (v.isBoolean()) {
359 masm.movePtr(ImmWord(v.toBoolean() ? 1 : 0), reg);
360 } else {
361 MOZ_CRASH("Unexpected Value");
363 loc.setPayloadReg(reg, v.extractNonDoubleType());
364 return reg;
367 case OperandLocation::DoubleReg:
368 case OperandLocation::Uninitialized:
369 break;
372 MOZ_CRASH();
375 ConstantOrRegister CacheRegisterAllocator::useConstantOrRegister(
376 MacroAssembler& masm, ValOperandId val) {
377 MOZ_ASSERT(!addedFailurePath_);
378 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
380 OperandLocation& loc = operandLocations_[val.id()];
381 switch (loc.kind()) {
382 case OperandLocation::Constant:
383 return loc.constant();
385 case OperandLocation::PayloadReg:
386 case OperandLocation::PayloadStack: {
387 JSValueType payloadType = loc.payloadType();
388 Register reg = useRegister(masm, TypedOperandId(val, payloadType));
389 return TypedOrValueRegister(MIRTypeFromValueType(payloadType),
390 AnyRegister(reg));
393 case OperandLocation::ValueReg:
394 case OperandLocation::ValueStack:
395 case OperandLocation::BaselineFrame:
396 return TypedOrValueRegister(useValueRegister(masm, val));
398 case OperandLocation::DoubleReg:
399 return TypedOrValueRegister(MIRType::Double,
400 AnyRegister(loc.doubleReg()));
402 case OperandLocation::Uninitialized:
403 break;
406 MOZ_CRASH();
409 Register CacheRegisterAllocator::defineRegister(MacroAssembler& masm,
410 TypedOperandId typedId) {
411 MOZ_ASSERT(!addedFailurePath_);
412 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
414 OperandLocation& loc = operandLocations_[typedId.id()];
415 MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
417 Register reg = allocateRegister(masm);
418 loc.setPayloadReg(reg, typedId.type());
419 return reg;
422 ValueOperand CacheRegisterAllocator::defineValueRegister(MacroAssembler& masm,
423 ValOperandId val) {
424 MOZ_ASSERT(!addedFailurePath_);
425 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
427 OperandLocation& loc = operandLocations_[val.id()];
428 MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
430 ValueOperand reg = allocateValueRegister(masm);
431 loc.setValueReg(reg);
432 return reg;
435 void CacheRegisterAllocator::freeDeadOperandLocations(MacroAssembler& masm) {
436 // See if any operands are dead so we can reuse their registers. Note that
437 // we skip the input operands, as those are also used by failure paths, and
438 // we currently don't track those uses.
439 for (size_t i = writer_.numInputOperands(); i < operandLocations_.length();
440 i++) {
441 if (!writer_.operandIsDead(i, currentInstruction_)) {
442 continue;
445 OperandLocation& loc = operandLocations_[i];
446 switch (loc.kind()) {
447 case OperandLocation::PayloadReg:
448 availableRegs_.add(loc.payloadReg());
449 break;
450 case OperandLocation::ValueReg:
451 availableRegs_.add(loc.valueReg());
452 break;
453 case OperandLocation::PayloadStack:
454 masm.propagateOOM(freePayloadSlots_.append(loc.payloadStack()));
455 break;
456 case OperandLocation::ValueStack:
457 masm.propagateOOM(freeValueSlots_.append(loc.valueStack()));
458 break;
459 case OperandLocation::Uninitialized:
460 case OperandLocation::BaselineFrame:
461 case OperandLocation::Constant:
462 case OperandLocation::DoubleReg:
463 break;
465 loc.setUninitialized();
469 void CacheRegisterAllocator::discardStack(MacroAssembler& masm) {
470 // This should only be called when we are no longer using the operands,
471 // as we're discarding everything from the native stack. Set all operand
472 // locations to Uninitialized to catch bugs.
473 for (size_t i = 0; i < operandLocations_.length(); i++) {
474 operandLocations_[i].setUninitialized();
477 if (stackPushed_ > 0) {
478 masm.addToStackPtr(Imm32(stackPushed_));
479 stackPushed_ = 0;
481 freePayloadSlots_.clear();
482 freeValueSlots_.clear();
485 Register CacheRegisterAllocator::allocateRegister(MacroAssembler& masm) {
486 MOZ_ASSERT(!addedFailurePath_);
487 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
489 if (availableRegs_.empty()) {
490 freeDeadOperandLocations(masm);
493 if (availableRegs_.empty()) {
494 // Still no registers available, try to spill unused operands to
495 // the stack.
496 for (size_t i = 0; i < operandLocations_.length(); i++) {
497 OperandLocation& loc = operandLocations_[i];
498 if (loc.kind() == OperandLocation::PayloadReg) {
499 Register reg = loc.payloadReg();
500 if (currentOpRegs_.has(reg)) {
501 continue;
504 spillOperandToStack(masm, &loc);
505 availableRegs_.add(reg);
506 break; // We got a register, so break out of the loop.
508 if (loc.kind() == OperandLocation::ValueReg) {
509 ValueOperand reg = loc.valueReg();
510 if (currentOpRegs_.aliases(reg)) {
511 continue;
514 spillOperandToStack(masm, &loc);
515 availableRegs_.add(reg);
516 break; // Break out of the loop.
521 if (availableRegs_.empty() && !availableRegsAfterSpill_.empty()) {
522 Register reg = availableRegsAfterSpill_.takeAny();
523 masm.push(reg);
524 stackPushed_ += sizeof(uintptr_t);
526 masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
528 availableRegs_.add(reg);
531 // At this point, there must be a free register.
532 MOZ_RELEASE_ASSERT(!availableRegs_.empty());
534 Register reg = availableRegs_.takeAny();
535 currentOpRegs_.add(reg);
536 return reg;
539 void CacheRegisterAllocator::allocateFixedRegister(MacroAssembler& masm,
540 Register reg) {
541 MOZ_ASSERT(!addedFailurePath_);
542 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
544 // Fixed registers should be allocated first, to ensure they're
545 // still available.
546 MOZ_ASSERT(!currentOpRegs_.has(reg), "Register is in use");
548 freeDeadOperandLocations(masm);
550 if (availableRegs_.has(reg)) {
551 availableRegs_.take(reg);
552 currentOpRegs_.add(reg);
553 return;
556 // Register may be available only after spilling contents.
557 if (availableRegsAfterSpill_.has(reg)) {
558 availableRegsAfterSpill_.take(reg);
559 masm.push(reg);
560 stackPushed_ += sizeof(uintptr_t);
562 masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
563 currentOpRegs_.add(reg);
564 return;
567 // The register must be used by some operand. Spill it to the stack.
568 for (size_t i = 0; i < operandLocations_.length(); i++) {
569 OperandLocation& loc = operandLocations_[i];
570 if (loc.kind() == OperandLocation::PayloadReg) {
571 if (loc.payloadReg() != reg) {
572 continue;
575 spillOperandToStackOrRegister(masm, &loc);
576 currentOpRegs_.add(reg);
577 return;
579 if (loc.kind() == OperandLocation::ValueReg) {
580 if (!loc.valueReg().aliases(reg)) {
581 continue;
584 ValueOperand valueReg = loc.valueReg();
585 spillOperandToStackOrRegister(masm, &loc);
587 availableRegs_.add(valueReg);
588 availableRegs_.take(reg);
589 currentOpRegs_.add(reg);
590 return;
594 MOZ_CRASH("Invalid register");
597 void CacheRegisterAllocator::allocateFixedValueRegister(MacroAssembler& masm,
598 ValueOperand reg) {
599 #ifdef JS_NUNBOX32
600 allocateFixedRegister(masm, reg.payloadReg());
601 allocateFixedRegister(masm, reg.typeReg());
602 #else
603 allocateFixedRegister(masm, reg.valueReg());
604 #endif
607 #ifdef JS_NUNBOX32
608 // Possible miscompilation in clang-12 (bug 1689641)
609 MOZ_NEVER_INLINE
610 #endif
611 ValueOperand CacheRegisterAllocator::allocateValueRegister(
612 MacroAssembler& masm) {
613 #ifdef JS_NUNBOX32
614 Register reg1 = allocateRegister(masm);
615 Register reg2 = allocateRegister(masm);
616 return ValueOperand(reg1, reg2);
617 #else
618 Register reg = allocateRegister(masm);
619 return ValueOperand(reg);
620 #endif
623 bool CacheRegisterAllocator::init() {
624 if (!origInputLocations_.resize(writer_.numInputOperands())) {
625 return false;
627 if (!operandLocations_.resize(writer_.numOperandIds())) {
628 return false;
630 return true;
633 void CacheRegisterAllocator::initAvailableRegsAfterSpill() {
634 // Registers not in availableRegs_ and not used by input operands are
635 // available after being spilled.
636 availableRegsAfterSpill_.set() = GeneralRegisterSet::Intersect(
637 GeneralRegisterSet::Not(availableRegs_.set()),
638 GeneralRegisterSet::Not(inputRegisterSet()));
641 void CacheRegisterAllocator::fixupAliasedInputs(MacroAssembler& masm) {
642 // If IC inputs alias each other, make sure they are stored in different
643 // locations so we don't have to deal with this complexity in the rest of
644 // the allocator.
646 // Note that this can happen in IonMonkey with something like |o.foo = o|
647 // or |o[i] = i|.
649 size_t numInputs = writer_.numInputOperands();
650 MOZ_ASSERT(origInputLocations_.length() == numInputs);
652 for (size_t i = 1; i < numInputs; i++) {
653 OperandLocation& loc1 = operandLocations_[i];
654 if (!loc1.isInRegister()) {
655 continue;
658 for (size_t j = 0; j < i; j++) {
659 OperandLocation& loc2 = operandLocations_[j];
660 if (!loc1.aliasesReg(loc2)) {
661 continue;
664 // loc1 and loc2 alias so we spill one of them. If one is a
665 // ValueReg and the other is a PayloadReg, we have to spill the
666 // PayloadReg: spilling the ValueReg instead would leave its type
667 // register unallocated on 32-bit platforms.
668 if (loc1.kind() == OperandLocation::ValueReg) {
669 spillOperandToStack(masm, &loc2);
670 } else {
671 MOZ_ASSERT(loc1.kind() == OperandLocation::PayloadReg);
672 spillOperandToStack(masm, &loc1);
673 break; // Spilled loc1, so nothing else will alias it.
678 #ifdef DEBUG
679 assertValidState();
680 #endif
683 GeneralRegisterSet CacheRegisterAllocator::inputRegisterSet() const {
684 MOZ_ASSERT(origInputLocations_.length() == writer_.numInputOperands());
686 AllocatableGeneralRegisterSet result;
687 for (size_t i = 0; i < writer_.numInputOperands(); i++) {
688 const OperandLocation& loc = operandLocations_[i];
689 MOZ_ASSERT(loc == origInputLocations_[i]);
691 switch (loc.kind()) {
692 case OperandLocation::PayloadReg:
693 result.addUnchecked(loc.payloadReg());
694 continue;
695 case OperandLocation::ValueReg:
696 result.addUnchecked(loc.valueReg());
697 continue;
698 case OperandLocation::PayloadStack:
699 case OperandLocation::ValueStack:
700 case OperandLocation::BaselineFrame:
701 case OperandLocation::Constant:
702 case OperandLocation::DoubleReg:
703 continue;
704 case OperandLocation::Uninitialized:
705 break;
707 MOZ_CRASH("Invalid kind");
710 return result.set();
713 JSValueType CacheRegisterAllocator::knownType(ValOperandId val) const {
714 const OperandLocation& loc = operandLocations_[val.id()];
716 switch (loc.kind()) {
717 case OperandLocation::ValueReg:
718 case OperandLocation::ValueStack:
719 case OperandLocation::BaselineFrame:
720 return JSVAL_TYPE_UNKNOWN;
722 case OperandLocation::PayloadStack:
723 case OperandLocation::PayloadReg:
724 return loc.payloadType();
726 case OperandLocation::Constant:
727 return loc.constant().isDouble() ? JSVAL_TYPE_DOUBLE
728 : loc.constant().extractNonDoubleType();
730 case OperandLocation::DoubleReg:
731 return JSVAL_TYPE_DOUBLE;
733 case OperandLocation::Uninitialized:
734 break;
737 MOZ_CRASH("Invalid kind");
740 void CacheRegisterAllocator::initInputLocation(
741 size_t i, const TypedOrValueRegister& reg) {
742 if (reg.hasValue()) {
743 initInputLocation(i, reg.valueReg());
744 } else if (reg.typedReg().isFloat()) {
745 MOZ_ASSERT(reg.type() == MIRType::Double);
746 initInputLocation(i, reg.typedReg().fpu());
747 } else {
748 initInputLocation(i, reg.typedReg().gpr(),
749 ValueTypeFromMIRType(reg.type()));
753 void CacheRegisterAllocator::initInputLocation(
754 size_t i, const ConstantOrRegister& value) {
755 if (value.constant()) {
756 initInputLocation(i, value.value());
757 } else {
758 initInputLocation(i, value.reg());
762 void CacheRegisterAllocator::spillOperandToStack(MacroAssembler& masm,
763 OperandLocation* loc) {
764 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
766 if (loc->kind() == OperandLocation::ValueReg) {
767 if (!freeValueSlots_.empty()) {
768 uint32_t stackPos = freeValueSlots_.popCopy();
769 MOZ_ASSERT(stackPos <= stackPushed_);
770 masm.storeValue(loc->valueReg(),
771 Address(masm.getStackPointer(), stackPushed_ - stackPos));
772 loc->setValueStack(stackPos);
773 return;
775 stackPushed_ += sizeof(js::Value);
776 masm.pushValue(loc->valueReg());
777 loc->setValueStack(stackPushed_);
778 return;
781 MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
783 if (!freePayloadSlots_.empty()) {
784 uint32_t stackPos = freePayloadSlots_.popCopy();
785 MOZ_ASSERT(stackPos <= stackPushed_);
786 masm.storePtr(loc->payloadReg(),
787 Address(masm.getStackPointer(), stackPushed_ - stackPos));
788 loc->setPayloadStack(stackPos, loc->payloadType());
789 return;
791 stackPushed_ += sizeof(uintptr_t);
792 masm.push(loc->payloadReg());
793 loc->setPayloadStack(stackPushed_, loc->payloadType());
796 void CacheRegisterAllocator::spillOperandToStackOrRegister(
797 MacroAssembler& masm, OperandLocation* loc) {
798 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
800 // If enough registers are available, use them.
801 if (loc->kind() == OperandLocation::ValueReg) {
802 static const size_t BoxPieces = sizeof(Value) / sizeof(uintptr_t);
803 if (availableRegs_.set().size() >= BoxPieces) {
804 ValueOperand reg = availableRegs_.takeAnyValue();
805 masm.moveValue(loc->valueReg(), reg);
806 loc->setValueReg(reg);
807 return;
809 } else {
810 MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
811 if (!availableRegs_.empty()) {
812 Register reg = availableRegs_.takeAny();
813 masm.movePtr(loc->payloadReg(), reg);
814 loc->setPayloadReg(reg, loc->payloadType());
815 return;
819 // Not enough registers available, spill to the stack.
820 spillOperandToStack(masm, loc);
823 void CacheRegisterAllocator::popPayload(MacroAssembler& masm,
824 OperandLocation* loc, Register dest) {
825 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
826 MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
828 // The payload is on the stack. If it's on top of the stack we can just
829 // pop it, else we emit a load.
830 if (loc->payloadStack() == stackPushed_) {
831 masm.pop(dest);
832 stackPushed_ -= sizeof(uintptr_t);
833 } else {
834 MOZ_ASSERT(loc->payloadStack() < stackPushed_);
835 masm.loadPtr(payloadAddress(masm, loc), dest);
836 masm.propagateOOM(freePayloadSlots_.append(loc->payloadStack()));
839 loc->setPayloadReg(dest, loc->payloadType());
842 Address CacheRegisterAllocator::valueAddress(MacroAssembler& masm,
843 const OperandLocation* loc) const {
844 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
845 return Address(masm.getStackPointer(), stackPushed_ - loc->valueStack());
848 Address CacheRegisterAllocator::payloadAddress(
849 MacroAssembler& masm, const OperandLocation* loc) const {
850 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
851 return Address(masm.getStackPointer(), stackPushed_ - loc->payloadStack());
854 void CacheRegisterAllocator::popValue(MacroAssembler& masm,
855 OperandLocation* loc, ValueOperand dest) {
856 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
857 MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
859 // The Value is on the stack. If it's on top of the stack we can just
860 // pop it, else we emit a load.
861 if (loc->valueStack() == stackPushed_) {
862 masm.popValue(dest);
863 stackPushed_ -= sizeof(js::Value);
864 } else {
865 MOZ_ASSERT(loc->valueStack() < stackPushed_);
866 masm.loadValue(
867 Address(masm.getStackPointer(), stackPushed_ - loc->valueStack()),
868 dest);
869 masm.propagateOOM(freeValueSlots_.append(loc->valueStack()));
872 loc->setValueReg(dest);
875 #ifdef DEBUG
876 void CacheRegisterAllocator::assertValidState() const {
877 // Assert different operands don't have aliasing storage. We depend on this
878 // when spilling registers, for instance.
880 if (!JitOptions.fullDebugChecks) {
881 return;
884 for (size_t i = 0; i < operandLocations_.length(); i++) {
885 const auto& loc1 = operandLocations_[i];
886 if (loc1.isUninitialized()) {
887 continue;
890 for (size_t j = 0; j < i; j++) {
891 const auto& loc2 = operandLocations_[j];
892 if (loc2.isUninitialized()) {
893 continue;
895 MOZ_ASSERT(!loc1.aliasesReg(loc2));
899 #endif
901 bool OperandLocation::aliasesReg(const OperandLocation& other) const {
902 MOZ_ASSERT(&other != this);
904 switch (other.kind_) {
905 case PayloadReg:
906 return aliasesReg(other.payloadReg());
907 case ValueReg:
908 return aliasesReg(other.valueReg());
909 case PayloadStack:
910 case ValueStack:
911 case BaselineFrame:
912 case Constant:
913 case DoubleReg:
914 return false;
915 case Uninitialized:
916 break;
919 MOZ_CRASH("Invalid kind");
922 void CacheRegisterAllocator::restoreInputState(MacroAssembler& masm,
923 bool shouldDiscardStack) {
924 size_t numInputOperands = origInputLocations_.length();
925 MOZ_ASSERT(writer_.numInputOperands() == numInputOperands);
927 for (size_t j = 0; j < numInputOperands; j++) {
928 const OperandLocation& dest = origInputLocations_[j];
929 OperandLocation& cur = operandLocations_[j];
930 if (dest == cur) {
931 continue;
934 auto autoAssign = mozilla::MakeScopeExit([&] { cur = dest; });
936 // We have a cycle if a destination register will be used later
937 // as source register. If that happens, just push the current value
938 // on the stack and later get it from there.
939 for (size_t k = j + 1; k < numInputOperands; k++) {
940 OperandLocation& laterSource = operandLocations_[k];
941 if (dest.aliasesReg(laterSource)) {
942 spillOperandToStack(masm, &laterSource);
946 if (dest.kind() == OperandLocation::ValueReg) {
947 // We have to restore a Value register.
948 switch (cur.kind()) {
949 case OperandLocation::ValueReg:
950 masm.moveValue(cur.valueReg(), dest.valueReg());
951 continue;
952 case OperandLocation::PayloadReg:
953 masm.tagValue(cur.payloadType(), cur.payloadReg(), dest.valueReg());
954 continue;
955 case OperandLocation::PayloadStack: {
956 Register scratch = dest.valueReg().scratchReg();
957 popPayload(masm, &cur, scratch);
958 masm.tagValue(cur.payloadType(), scratch, dest.valueReg());
959 continue;
961 case OperandLocation::ValueStack:
962 popValue(masm, &cur, dest.valueReg());
963 continue;
964 case OperandLocation::DoubleReg:
965 masm.boxDouble(cur.doubleReg(), dest.valueReg(), cur.doubleReg());
966 continue;
967 case OperandLocation::Constant:
968 case OperandLocation::BaselineFrame:
969 case OperandLocation::Uninitialized:
970 break;
972 } else if (dest.kind() == OperandLocation::PayloadReg) {
973 // We have to restore a payload register.
974 switch (cur.kind()) {
975 case OperandLocation::ValueReg:
976 MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
977 masm.unboxNonDouble(cur.valueReg(), dest.payloadReg(),
978 dest.payloadType());
979 continue;
980 case OperandLocation::PayloadReg:
981 MOZ_ASSERT(cur.payloadType() == dest.payloadType());
982 masm.mov(cur.payloadReg(), dest.payloadReg());
983 continue;
984 case OperandLocation::PayloadStack: {
985 MOZ_ASSERT(cur.payloadType() == dest.payloadType());
986 popPayload(masm, &cur, dest.payloadReg());
987 continue;
989 case OperandLocation::ValueStack:
990 MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
991 MOZ_ASSERT(cur.valueStack() <= stackPushed_);
992 MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
993 masm.unboxNonDouble(
994 Address(masm.getStackPointer(), stackPushed_ - cur.valueStack()),
995 dest.payloadReg(), dest.payloadType());
996 continue;
997 case OperandLocation::Constant:
998 case OperandLocation::BaselineFrame:
999 case OperandLocation::DoubleReg:
1000 case OperandLocation::Uninitialized:
1001 break;
1003 } else if (dest.kind() == OperandLocation::Constant ||
1004 dest.kind() == OperandLocation::BaselineFrame ||
1005 dest.kind() == OperandLocation::DoubleReg) {
1006 // Nothing to do.
1007 continue;
1010 MOZ_CRASH("Invalid kind");
1013 for (const SpilledRegister& spill : spilledRegs_) {
1014 MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
1016 if (spill.stackPushed == stackPushed_) {
1017 masm.pop(spill.reg);
1018 stackPushed_ -= sizeof(uintptr_t);
1019 } else {
1020 MOZ_ASSERT(spill.stackPushed < stackPushed_);
1021 masm.loadPtr(
1022 Address(masm.getStackPointer(), stackPushed_ - spill.stackPushed),
1023 spill.reg);
1027 if (shouldDiscardStack) {
1028 discardStack(masm);
1032 size_t CacheIRStubInfo::stubDataSize() const {
1033 size_t field = 0;
1034 size_t size = 0;
1035 while (true) {
1036 StubField::Type type = fieldType(field++);
1037 if (type == StubField::Type::Limit) {
1038 return size;
1040 size += StubField::sizeInBytes(type);
1044 template <typename T>
1045 static GCPtr<T>* AsGCPtr(void* ptr) {
1046 return static_cast<GCPtr<T>*>(ptr);
1049 void CacheIRStubInfo::replaceStubRawWord(uint8_t* stubData, uint32_t offset,
1050 uintptr_t oldWord,
1051 uintptr_t newWord) const {
1052 MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
1053 uintptr_t* addr = reinterpret_cast<uintptr_t*>(stubData + offset);
1054 MOZ_ASSERT(*addr == oldWord);
1055 *addr = newWord;
1058 template <class Stub, StubField::Type type>
1059 typename MapStubFieldToType<type>::WrappedType& CacheIRStubInfo::getStubField(
1060 Stub* stub, uint32_t offset) const {
1061 uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
1062 MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
1064 using WrappedType = typename MapStubFieldToType<type>::WrappedType;
1065 return *reinterpret_cast<WrappedType*>(stubData + offset);
1068 #define INSTANTIATE_GET_STUB_FIELD(Type) \
1069 template typename MapStubFieldToType<Type>::WrappedType& \
1070 CacheIRStubInfo::getStubField<ICCacheIRStub, Type>(ICCacheIRStub * stub, \
1071 uint32_t offset) const;
1072 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Shape)
1073 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakShape)
1074 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakGetterSetter)
1075 INSTANTIATE_GET_STUB_FIELD(StubField::Type::JSObject)
1076 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakObject)
1077 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Symbol)
1078 INSTANTIATE_GET_STUB_FIELD(StubField::Type::String)
1079 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakBaseScript)
1080 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Value)
1081 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Id)
1082 #undef INSTANTIATE_GET_STUB_FIELD
1084 template <class Stub, class T>
1085 T* CacheIRStubInfo::getPtrStubField(Stub* stub, uint32_t offset) const {
1086 uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
1087 MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
1089 return *reinterpret_cast<T**>(stubData + offset);
1092 template gc::AllocSite* CacheIRStubInfo::getPtrStubField(ICCacheIRStub* stub,
1093 uint32_t offset) const;
1095 template <StubField::Type type, typename V>
1096 static void InitWrappedPtr(void* ptr, V val) {
1097 using RawType = typename MapStubFieldToType<type>::RawType;
1098 using WrappedType = typename MapStubFieldToType<type>::WrappedType;
1099 auto* wrapped = static_cast<WrappedType*>(ptr);
1100 new (wrapped) WrappedType(mozilla::BitwiseCast<RawType>(val));
1103 static void InitWordStubField(StubField::Type type, void* dest,
1104 uintptr_t value) {
1105 MOZ_ASSERT(StubField::sizeIsWord(type));
1106 MOZ_ASSERT((uintptr_t(dest) % sizeof(uintptr_t)) == 0,
1107 "Unaligned stub field");
1109 switch (type) {
1110 case StubField::Type::RawInt32:
1111 case StubField::Type::RawPointer:
1112 case StubField::Type::AllocSite:
1113 *static_cast<uintptr_t*>(dest) = value;
1114 break;
1115 case StubField::Type::Shape:
1116 InitWrappedPtr<StubField::Type::Shape>(dest, value);
1117 break;
1118 case StubField::Type::WeakShape:
1119 // No read barrier required to copy weak pointer.
1120 InitWrappedPtr<StubField::Type::WeakShape>(dest, value);
1121 break;
1122 case StubField::Type::WeakGetterSetter:
1123 // No read barrier required to copy weak pointer.
1124 InitWrappedPtr<StubField::Type::WeakGetterSetter>(dest, value);
1125 break;
1126 case StubField::Type::JSObject:
1127 InitWrappedPtr<StubField::Type::JSObject>(dest, value);
1128 break;
1129 case StubField::Type::WeakObject:
1130 // No read barrier required to copy weak pointer.
1131 InitWrappedPtr<StubField::Type::WeakObject>(dest, value);
1132 break;
1133 case StubField::Type::Symbol:
1134 InitWrappedPtr<StubField::Type::Symbol>(dest, value);
1135 break;
1136 case StubField::Type::String:
1137 InitWrappedPtr<StubField::Type::String>(dest, value);
1138 break;
1139 case StubField::Type::WeakBaseScript:
1140 // No read barrier required to copy weak pointer.
1141 InitWrappedPtr<StubField::Type::WeakBaseScript>(dest, value);
1142 break;
1143 case StubField::Type::JitCode:
1144 InitWrappedPtr<StubField::Type::JitCode>(dest, value);
1145 break;
1146 case StubField::Type::Id:
1147 AsGCPtr<jsid>(dest)->init(jsid::fromRawBits(value));
1148 break;
1149 case StubField::Type::RawInt64:
1150 case StubField::Type::Double:
1151 case StubField::Type::Value:
1152 case StubField::Type::Limit:
1153 MOZ_CRASH("Invalid type");
1157 static void InitInt64StubField(StubField::Type type, void* dest,
1158 uint64_t value) {
1159 MOZ_ASSERT(StubField::sizeIsInt64(type));
1160 MOZ_ASSERT((uintptr_t(dest) % sizeof(uint64_t)) == 0, "Unaligned stub field");
1162 switch (type) {
1163 case StubField::Type::RawInt64:
1164 case StubField::Type::Double:
1165 *static_cast<uint64_t*>(dest) = value;
1166 break;
1167 case StubField::Type::Value:
1168 AsGCPtr<Value>(dest)->init(Value::fromRawBits(value));
1169 break;
1170 case StubField::Type::RawInt32:
1171 case StubField::Type::RawPointer:
1172 case StubField::Type::AllocSite:
1173 case StubField::Type::Shape:
1174 case StubField::Type::WeakShape:
1175 case StubField::Type::WeakGetterSetter:
1176 case StubField::Type::JSObject:
1177 case StubField::Type::WeakObject:
1178 case StubField::Type::Symbol:
1179 case StubField::Type::String:
1180 case StubField::Type::WeakBaseScript:
1181 case StubField::Type::JitCode:
1182 case StubField::Type::Id:
1183 case StubField::Type::Limit:
1184 MOZ_CRASH("Invalid type");
1188 void CacheIRWriter::copyStubData(uint8_t* dest) const {
1189 MOZ_ASSERT(!failed());
1191 for (const StubField& field : stubFields_) {
1192 if (field.sizeIsWord()) {
1193 InitWordStubField(field.type(), dest, field.asWord());
1194 dest += sizeof(uintptr_t);
1195 } else {
1196 InitInt64StubField(field.type(), dest, field.asInt64());
1197 dest += sizeof(uint64_t);
1202 ICCacheIRStub* ICCacheIRStub::clone(JSRuntime* rt, ICStubSpace& newSpace) {
1203 const CacheIRStubInfo* info = stubInfo();
1204 MOZ_ASSERT(info->makesGCCalls());
1206 size_t bytesNeeded = info->stubDataOffset() + info->stubDataSize();
1208 AutoEnterOOMUnsafeRegion oomUnsafe;
1209 void* newStubMem = newSpace.alloc(bytesNeeded);
1210 if (!newStubMem) {
1211 oomUnsafe.crash("ICCacheIRStub::clone");
1214 ICCacheIRStub* newStub = new (newStubMem) ICCacheIRStub(*this);
1216 const uint8_t* src = this->stubDataStart();
1217 uint8_t* dest = newStub->stubDataStart();
1219 // Because this can be called during sweeping when discarding JIT code, we
1220 // have to lock the store buffer
1221 gc::AutoLockStoreBuffer lock(rt);
1223 uint32_t field = 0;
1224 while (true) {
1225 StubField::Type type = info->fieldType(field);
1226 if (type == StubField::Type::Limit) {
1227 break; // Done.
1230 if (StubField::sizeIsWord(type)) {
1231 const uintptr_t* srcField = reinterpret_cast<const uintptr_t*>(src);
1232 InitWordStubField(type, dest, *srcField);
1233 src += sizeof(uintptr_t);
1234 dest += sizeof(uintptr_t);
1235 } else {
1236 const uint64_t* srcField = reinterpret_cast<const uint64_t*>(src);
1237 InitInt64StubField(type, dest, *srcField);
1238 src += sizeof(uint64_t);
1239 dest += sizeof(uint64_t);
1242 field++;
1245 return newStub;
1248 template <typename T>
1249 static inline bool ShouldTraceWeakEdgeInStub(JSTracer* trc) {
1250 if constexpr (std::is_same_v<T, IonICStub>) {
1251 // 'Weak' edges are traced strongly in IonICs.
1252 return true;
1253 } else {
1254 static_assert(std::is_same_v<T, ICCacheIRStub>);
1255 return trc->traceWeakEdges();
1259 template <typename T>
1260 void jit::TraceCacheIRStub(JSTracer* trc, T* stub,
1261 const CacheIRStubInfo* stubInfo) {
1262 using Type = StubField::Type;
1264 uint32_t field = 0;
1265 size_t offset = 0;
1266 while (true) {
1267 Type fieldType = stubInfo->fieldType(field);
1268 switch (fieldType) {
1269 case Type::RawInt32:
1270 case Type::RawPointer:
1271 case Type::RawInt64:
1272 case Type::Double:
1273 break;
1274 case Type::Shape: {
1275 // For CCW IC stubs, we can store same-zone but cross-compartment
1276 // shapes. Use TraceSameZoneCrossCompartmentEdge to not assert in the
1277 // GC. Note: CacheIRWriter::writeShapeField asserts we never store
1278 // cross-zone shapes.
1279 GCPtr<Shape*>& shapeField =
1280 stubInfo->getStubField<T, Type::Shape>(stub, offset);
1281 TraceSameZoneCrossCompartmentEdge(trc, &shapeField, "cacheir-shape");
1282 break;
1284 case Type::WeakShape:
1285 if (ShouldTraceWeakEdgeInStub<T>(trc)) {
1286 WeakHeapPtr<Shape*>& shapeField =
1287 stubInfo->getStubField<T, Type::WeakShape>(stub, offset);
1288 if (shapeField) {
1289 TraceSameZoneCrossCompartmentEdge(trc, &shapeField,
1290 "cacheir-weak-shape");
1293 break;
1294 case Type::WeakGetterSetter:
1295 if (ShouldTraceWeakEdgeInStub<T>(trc)) {
1296 TraceNullableEdge(
1297 trc,
1298 &stubInfo->getStubField<T, Type::WeakGetterSetter>(stub, offset),
1299 "cacheir-weak-getter-setter");
1301 break;
1302 case Type::JSObject: {
1303 TraceEdge(trc, &stubInfo->getStubField<T, Type::JSObject>(stub, offset),
1304 "cacheir-object");
1305 break;
1307 case Type::WeakObject:
1308 if (ShouldTraceWeakEdgeInStub<T>(trc)) {
1309 TraceNullableEdge(
1310 trc, &stubInfo->getStubField<T, Type::WeakObject>(stub, offset),
1311 "cacheir-weak-object");
1313 break;
1314 case Type::Symbol:
1315 TraceEdge(trc, &stubInfo->getStubField<T, Type::Symbol>(stub, offset),
1316 "cacheir-symbol");
1317 break;
1318 case Type::String:
1319 TraceEdge(trc, &stubInfo->getStubField<T, Type::String>(stub, offset),
1320 "cacheir-string");
1321 break;
1322 case Type::WeakBaseScript:
1323 if (ShouldTraceWeakEdgeInStub<T>(trc)) {
1324 TraceNullableEdge(
1325 trc,
1326 &stubInfo->getStubField<T, Type::WeakBaseScript>(stub, offset),
1327 "cacheir-weak-script");
1329 break;
1330 case Type::JitCode:
1331 TraceEdge(trc, &stubInfo->getStubField<T, Type::JitCode>(stub, offset),
1332 "cacheir-jitcode");
1333 break;
1334 case Type::Id:
1335 TraceEdge(trc, &stubInfo->getStubField<T, Type::Id>(stub, offset),
1336 "cacheir-id");
1337 break;
1338 case Type::Value:
1339 TraceEdge(trc, &stubInfo->getStubField<T, Type::Value>(stub, offset),
1340 "cacheir-value");
1341 break;
1342 case Type::AllocSite: {
1343 gc::AllocSite* site =
1344 stubInfo->getPtrStubField<T, gc::AllocSite>(stub, offset);
1345 site->trace(trc);
1346 break;
1348 case Type::Limit:
1349 return; // Done.
1351 field++;
1352 offset += StubField::sizeInBytes(fieldType);
1356 template void jit::TraceCacheIRStub(JSTracer* trc, ICCacheIRStub* stub,
1357 const CacheIRStubInfo* stubInfo);
1359 template void jit::TraceCacheIRStub(JSTracer* trc, IonICStub* stub,
1360 const CacheIRStubInfo* stubInfo);
1362 template <typename T>
1363 bool jit::TraceWeakCacheIRStub(JSTracer* trc, T* stub,
1364 const CacheIRStubInfo* stubInfo) {
1365 using Type = StubField::Type;
1367 bool isDead = false;
1369 uint32_t field = 0;
1370 size_t offset = 0;
1371 while (true) {
1372 Type fieldType = stubInfo->fieldType(field);
1373 switch (fieldType) {
1374 case Type::WeakShape: {
1375 WeakHeapPtr<Shape*>& shapeField =
1376 stubInfo->getStubField<T, Type::WeakShape>(stub, offset);
1377 auto r = TraceWeakEdge(trc, &shapeField, "cacheir-weak-shape");
1378 if (r.isDead()) {
1379 isDead = true;
1381 break;
1383 case Type::WeakObject: {
1384 WeakHeapPtr<JSObject*>& objectField =
1385 stubInfo->getStubField<T, Type::WeakObject>(stub, offset);
1386 auto r = TraceWeakEdge(trc, &objectField, "cacheir-weak-object");
1387 if (r.isDead()) {
1388 isDead = true;
1390 break;
1392 case Type::WeakBaseScript: {
1393 WeakHeapPtr<BaseScript*>& scriptField =
1394 stubInfo->getStubField<T, Type::WeakBaseScript>(stub, offset);
1395 auto r = TraceWeakEdge(trc, &scriptField, "cacheir-weak-script");
1396 if (r.isDead()) {
1397 isDead = true;
1399 break;
1401 case Type::WeakGetterSetter: {
1402 WeakHeapPtr<GetterSetter*>& getterSetterField =
1403 stubInfo->getStubField<T, Type::WeakGetterSetter>(stub, offset);
1404 auto r = TraceWeakEdge(trc, &getterSetterField,
1405 "cacheir-weak-getter-setter");
1406 if (r.isDead()) {
1407 isDead = true;
1409 break;
1411 case Type::Limit:
1412 // Done.
1413 return !isDead;
1414 case Type::RawInt32:
1415 case Type::RawPointer:
1416 case Type::Shape:
1417 case Type::JSObject:
1418 case Type::Symbol:
1419 case Type::String:
1420 case Type::JitCode:
1421 case Type::Id:
1422 case Type::AllocSite:
1423 case Type::RawInt64:
1424 case Type::Value:
1425 case Type::Double:
1426 break; // Skip non-weak fields.
1428 field++;
1429 offset += StubField::sizeInBytes(fieldType);
1433 template bool jit::TraceWeakCacheIRStub(JSTracer* trc, ICCacheIRStub* stub,
1434 const CacheIRStubInfo* stubInfo);
1436 template bool jit::TraceWeakCacheIRStub(JSTracer* trc, IonICStub* stub,
1437 const CacheIRStubInfo* stubInfo);
1439 bool CacheIRWriter::stubDataEquals(const uint8_t* stubData) const {
1440 MOZ_ASSERT(!failed());
1442 const uintptr_t* stubDataWords = reinterpret_cast<const uintptr_t*>(stubData);
1444 for (const StubField& field : stubFields_) {
1445 if (field.sizeIsWord()) {
1446 if (field.asWord() != *stubDataWords) {
1447 return false;
1449 stubDataWords++;
1450 continue;
1453 if (field.asInt64() != *reinterpret_cast<const uint64_t*>(stubDataWords)) {
1454 return false;
1456 stubDataWords += sizeof(uint64_t) / sizeof(uintptr_t);
1459 return true;
1462 bool CacheIRWriter::stubDataEqualsIgnoring(const uint8_t* stubData,
1463 uint32_t ignoreOffset) const {
1464 MOZ_ASSERT(!failed());
1466 uint32_t offset = 0;
1467 for (const StubField& field : stubFields_) {
1468 if (offset != ignoreOffset) {
1469 if (field.sizeIsWord()) {
1470 uintptr_t raw = *reinterpret_cast<const uintptr_t*>(stubData + offset);
1471 if (field.asWord() != raw) {
1472 return false;
1474 } else {
1475 uint64_t raw = *reinterpret_cast<const uint64_t*>(stubData + offset);
1476 if (field.asInt64() != raw) {
1477 return false;
1481 offset += StubField::sizeInBytes(field.type());
1484 return true;
1487 HashNumber CacheIRStubKey::hash(const CacheIRStubKey::Lookup& l) {
1488 HashNumber hash = mozilla::HashBytes(l.code, l.length);
1489 hash = mozilla::AddToHash(hash, uint32_t(l.kind));
1490 hash = mozilla::AddToHash(hash, uint32_t(l.engine));
1491 return hash;
1494 bool CacheIRStubKey::match(const CacheIRStubKey& entry,
1495 const CacheIRStubKey::Lookup& l) {
1496 if (entry.stubInfo->kind() != l.kind) {
1497 return false;
1500 if (entry.stubInfo->engine() != l.engine) {
1501 return false;
1504 if (entry.stubInfo->codeLength() != l.length) {
1505 return false;
1508 if (!mozilla::ArrayEqual(entry.stubInfo->code(), l.code, l.length)) {
1509 return false;
1512 return true;
1515 CacheIRReader::CacheIRReader(const CacheIRStubInfo* stubInfo)
1516 : CacheIRReader(stubInfo->code(),
1517 stubInfo->code() + stubInfo->codeLength()) {}
1519 CacheIRStubInfo* CacheIRStubInfo::New(CacheKind kind, ICStubEngine engine,
1520 bool makesGCCalls,
1521 uint32_t stubDataOffset,
1522 const CacheIRWriter& writer) {
1523 size_t numStubFields = writer.numStubFields();
1524 size_t bytesNeeded =
1525 sizeof(CacheIRStubInfo) + writer.codeLength() +
1526 (numStubFields + 1); // +1 for the GCType::Limit terminator.
1527 uint8_t* p = js_pod_malloc<uint8_t>(bytesNeeded);
1528 if (!p) {
1529 return nullptr;
1532 // Copy the CacheIR code.
1533 uint8_t* codeStart = p + sizeof(CacheIRStubInfo);
1534 mozilla::PodCopy(codeStart, writer.codeStart(), writer.codeLength());
1536 static_assert(sizeof(StubField::Type) == sizeof(uint8_t),
1537 "StubField::Type must fit in uint8_t");
1539 // Copy the stub field types.
1540 uint8_t* fieldTypes = codeStart + writer.codeLength();
1541 for (size_t i = 0; i < numStubFields; i++) {
1542 fieldTypes[i] = uint8_t(writer.stubFieldType(i));
1544 fieldTypes[numStubFields] = uint8_t(StubField::Type::Limit);
1546 return new (p) CacheIRStubInfo(kind, engine, makesGCCalls, stubDataOffset,
1547 writer.codeLength());
1550 bool OperandLocation::operator==(const OperandLocation& other) const {
1551 if (kind_ != other.kind_) {
1552 return false;
1555 switch (kind()) {
1556 case Uninitialized:
1557 return true;
1558 case PayloadReg:
1559 return payloadReg() == other.payloadReg() &&
1560 payloadType() == other.payloadType();
1561 case ValueReg:
1562 return valueReg() == other.valueReg();
1563 case PayloadStack:
1564 return payloadStack() == other.payloadStack() &&
1565 payloadType() == other.payloadType();
1566 case ValueStack:
1567 return valueStack() == other.valueStack();
1568 case BaselineFrame:
1569 return baselineFrameSlot() == other.baselineFrameSlot();
1570 case Constant:
1571 return constant() == other.constant();
1572 case DoubleReg:
1573 return doubleReg() == other.doubleReg();
1576 MOZ_CRASH("Invalid OperandLocation kind");
1579 AutoOutputRegister::AutoOutputRegister(CacheIRCompiler& compiler)
1580 : output_(compiler.outputUnchecked_.ref()), alloc_(compiler.allocator) {
1581 if (output_.hasValue()) {
1582 alloc_.allocateFixedValueRegister(compiler.masm, output_.valueReg());
1583 } else if (!output_.typedReg().isFloat()) {
1584 alloc_.allocateFixedRegister(compiler.masm, output_.typedReg().gpr());
1588 AutoOutputRegister::~AutoOutputRegister() {
1589 if (output_.hasValue()) {
1590 alloc_.releaseValueRegister(output_.valueReg());
1591 } else if (!output_.typedReg().isFloat()) {
1592 alloc_.releaseRegister(output_.typedReg().gpr());
1596 bool FailurePath::canShareFailurePath(const FailurePath& other) const {
1597 if (stackPushed_ != other.stackPushed_) {
1598 return false;
1601 if (spilledRegs_.length() != other.spilledRegs_.length()) {
1602 return false;
1605 for (size_t i = 0; i < spilledRegs_.length(); i++) {
1606 if (spilledRegs_[i] != other.spilledRegs_[i]) {
1607 return false;
1611 MOZ_ASSERT(inputs_.length() == other.inputs_.length());
1613 for (size_t i = 0; i < inputs_.length(); i++) {
1614 if (inputs_[i] != other.inputs_[i]) {
1615 return false;
1618 return true;
1621 bool CacheIRCompiler::addFailurePath(FailurePath** failure) {
1622 #ifdef DEBUG
1623 allocator.setAddedFailurePath();
1624 #endif
1625 MOZ_ASSERT(!allocator.hasAutoScratchFloatRegisterSpill());
1627 FailurePath newFailure;
1628 for (size_t i = 0; i < writer_.numInputOperands(); i++) {
1629 if (!newFailure.appendInput(allocator.operandLocation(i))) {
1630 return false;
1633 if (!newFailure.setSpilledRegs(allocator.spilledRegs())) {
1634 return false;
1636 newFailure.setStackPushed(allocator.stackPushed());
1638 // Reuse the previous failure path if the current one is the same, to
1639 // avoid emitting duplicate code.
1640 if (failurePaths.length() > 0 &&
1641 failurePaths.back().canShareFailurePath(newFailure)) {
1642 *failure = &failurePaths.back();
1643 return true;
1646 if (!failurePaths.append(std::move(newFailure))) {
1647 return false;
1650 *failure = &failurePaths.back();
1651 return true;
1654 bool CacheIRCompiler::emitFailurePath(size_t index) {
1655 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1656 FailurePath& failure = failurePaths[index];
1658 allocator.setStackPushed(failure.stackPushed());
1660 for (size_t i = 0; i < writer_.numInputOperands(); i++) {
1661 allocator.setOperandLocation(i, failure.input(i));
1664 if (!allocator.setSpilledRegs(failure.spilledRegs())) {
1665 return false;
1668 masm.bind(failure.label());
1669 allocator.restoreInputState(masm);
1670 return true;
1673 bool CacheIRCompiler::emitGuardIsNumber(ValOperandId inputId) {
1674 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1675 JSValueType knownType = allocator.knownType(inputId);
1677 // Doubles and ints are numbers!
1678 if (knownType == JSVAL_TYPE_DOUBLE || knownType == JSVAL_TYPE_INT32) {
1679 return true;
1682 ValueOperand input = allocator.useValueRegister(masm, inputId);
1683 FailurePath* failure;
1684 if (!addFailurePath(&failure)) {
1685 return false;
1688 masm.branchTestNumber(Assembler::NotEqual, input, failure->label());
1689 return true;
1692 bool CacheIRCompiler::emitGuardToObject(ValOperandId inputId) {
1693 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1694 if (allocator.knownType(inputId) == JSVAL_TYPE_OBJECT) {
1695 return true;
1698 ValueOperand input = allocator.useValueRegister(masm, inputId);
1699 FailurePath* failure;
1700 if (!addFailurePath(&failure)) {
1701 return false;
1703 masm.branchTestObject(Assembler::NotEqual, input, failure->label());
1704 return true;
1707 bool CacheIRCompiler::emitGuardIsNullOrUndefined(ValOperandId inputId) {
1708 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1709 JSValueType knownType = allocator.knownType(inputId);
1710 if (knownType == JSVAL_TYPE_UNDEFINED || knownType == JSVAL_TYPE_NULL) {
1711 return true;
1714 ValueOperand input = allocator.useValueRegister(masm, inputId);
1715 FailurePath* failure;
1716 if (!addFailurePath(&failure)) {
1717 return false;
1720 Label success;
1721 masm.branchTestNull(Assembler::Equal, input, &success);
1722 masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
1724 masm.bind(&success);
1725 return true;
1728 bool CacheIRCompiler::emitGuardIsNull(ValOperandId inputId) {
1729 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1730 JSValueType knownType = allocator.knownType(inputId);
1731 if (knownType == JSVAL_TYPE_NULL) {
1732 return true;
1735 ValueOperand input = allocator.useValueRegister(masm, inputId);
1736 FailurePath* failure;
1737 if (!addFailurePath(&failure)) {
1738 return false;
1741 masm.branchTestNull(Assembler::NotEqual, input, failure->label());
1742 return true;
1745 bool CacheIRCompiler::emitGuardIsUndefined(ValOperandId inputId) {
1746 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1747 JSValueType knownType = allocator.knownType(inputId);
1748 if (knownType == JSVAL_TYPE_UNDEFINED) {
1749 return true;
1752 ValueOperand input = allocator.useValueRegister(masm, inputId);
1753 FailurePath* failure;
1754 if (!addFailurePath(&failure)) {
1755 return false;
1758 masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
1759 return true;
1762 bool CacheIRCompiler::emitGuardIsNotUninitializedLexical(ValOperandId valId) {
1763 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1765 ValueOperand val = allocator.useValueRegister(masm, valId);
1767 FailurePath* failure;
1768 if (!addFailurePath(&failure)) {
1769 return false;
1772 masm.branchTestMagicValue(Assembler::Equal, val, JS_UNINITIALIZED_LEXICAL,
1773 failure->label());
1774 return true;
1777 bool CacheIRCompiler::emitGuardBooleanToInt32(ValOperandId inputId,
1778 Int32OperandId resultId) {
1779 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1780 Register output = allocator.defineRegister(masm, resultId);
1782 if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
1783 Register input =
1784 allocator.useRegister(masm, BooleanOperandId(inputId.id()));
1785 masm.move32(input, output);
1786 return true;
1788 ValueOperand input = allocator.useValueRegister(masm, inputId);
1790 FailurePath* failure;
1791 if (!addFailurePath(&failure)) {
1792 return false;
1795 masm.fallibleUnboxBoolean(input, output, failure->label());
1796 return true;
1799 bool CacheIRCompiler::emitGuardToString(ValOperandId inputId) {
1800 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1801 if (allocator.knownType(inputId) == JSVAL_TYPE_STRING) {
1802 return true;
1805 ValueOperand input = allocator.useValueRegister(masm, inputId);
1806 FailurePath* failure;
1807 if (!addFailurePath(&failure)) {
1808 return false;
1810 masm.branchTestString(Assembler::NotEqual, input, failure->label());
1811 return true;
1814 bool CacheIRCompiler::emitGuardToSymbol(ValOperandId inputId) {
1815 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1816 if (allocator.knownType(inputId) == JSVAL_TYPE_SYMBOL) {
1817 return true;
1820 ValueOperand input = allocator.useValueRegister(masm, inputId);
1821 FailurePath* failure;
1822 if (!addFailurePath(&failure)) {
1823 return false;
1825 masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
1826 return true;
1829 bool CacheIRCompiler::emitGuardToBigInt(ValOperandId inputId) {
1830 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1831 if (allocator.knownType(inputId) == JSVAL_TYPE_BIGINT) {
1832 return true;
1835 ValueOperand input = allocator.useValueRegister(masm, inputId);
1836 FailurePath* failure;
1837 if (!addFailurePath(&failure)) {
1838 return false;
1840 masm.branchTestBigInt(Assembler::NotEqual, input, failure->label());
1841 return true;
1844 bool CacheIRCompiler::emitGuardToBoolean(ValOperandId inputId) {
1845 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1847 if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
1848 return true;
1851 ValueOperand input = allocator.useValueRegister(masm, inputId);
1852 FailurePath* failure;
1853 if (!addFailurePath(&failure)) {
1854 return false;
1856 masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
1857 return true;
1860 bool CacheIRCompiler::emitGuardToInt32(ValOperandId inputId) {
1861 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1863 if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
1864 return true;
1867 ValueOperand input = allocator.useValueRegister(masm, inputId);
1869 FailurePath* failure;
1870 if (!addFailurePath(&failure)) {
1871 return false;
1874 masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
1875 return true;
1878 bool CacheIRCompiler::emitGuardToNonGCThing(ValOperandId inputId) {
1879 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1881 ValueOperand input = allocator.useValueRegister(masm, inputId);
1883 FailurePath* failure;
1884 if (!addFailurePath(&failure)) {
1885 return false;
1888 masm.branchTestGCThing(Assembler::Equal, input, failure->label());
1889 return true;
1892 // Infallible |emitDouble| emitters can use this implementation to avoid
1893 // generating extra clean-up instructions to restore the scratch float register.
1894 // To select this function simply omit the |Label* fail| parameter for the
1895 // emitter lambda function.
1896 template <typename EmitDouble>
1897 static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 1,
1898 void>
1899 EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
1900 ValueOperand input, FailurePath* failure,
1901 EmitDouble emitDouble) {
1902 AutoScratchFloatRegister floatReg(compiler);
1904 masm.unboxDouble(input, floatReg);
1905 emitDouble(floatReg.get());
1908 template <typename EmitDouble>
1909 static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 2,
1910 void>
1911 EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
1912 ValueOperand input, FailurePath* failure,
1913 EmitDouble emitDouble) {
1914 AutoScratchFloatRegister floatReg(compiler, failure);
1916 masm.unboxDouble(input, floatReg);
1917 emitDouble(floatReg.get(), floatReg.failure());
1920 template <typename EmitInt32, typename EmitDouble>
1921 static void EmitGuardInt32OrDouble(CacheIRCompiler* compiler,
1922 MacroAssembler& masm, ValueOperand input,
1923 Register output, FailurePath* failure,
1924 EmitInt32 emitInt32, EmitDouble emitDouble) {
1925 Label done;
1928 ScratchTagScope tag(masm, input);
1929 masm.splitTagForTest(input, tag);
1931 Label notInt32;
1932 masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
1934 ScratchTagScopeRelease _(&tag);
1936 masm.unboxInt32(input, output);
1937 emitInt32();
1939 masm.jump(&done);
1941 masm.bind(&notInt32);
1943 masm.branchTestDouble(Assembler::NotEqual, tag, failure->label());
1945 ScratchTagScopeRelease _(&tag);
1947 EmitGuardDouble(compiler, masm, input, failure, emitDouble);
1951 masm.bind(&done);
1954 bool CacheIRCompiler::emitGuardToInt32Index(ValOperandId inputId,
1955 Int32OperandId resultId) {
1956 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1957 Register output = allocator.defineRegister(masm, resultId);
1959 if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
1960 Register input = allocator.useRegister(masm, Int32OperandId(inputId.id()));
1961 masm.move32(input, output);
1962 return true;
1965 ValueOperand input = allocator.useValueRegister(masm, inputId);
1967 FailurePath* failure;
1968 if (!addFailurePath(&failure)) {
1969 return false;
1972 EmitGuardInt32OrDouble(
1973 this, masm, input, output, failure,
1974 []() {
1975 // No-op if the value is already an int32.
1977 [&](FloatRegister floatReg, Label* fail) {
1978 // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
1979 masm.convertDoubleToInt32(floatReg, output, fail, false);
1982 return true;
1985 bool CacheIRCompiler::emitInt32ToIntPtr(Int32OperandId inputId,
1986 IntPtrOperandId resultId) {
1987 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1989 Register input = allocator.useRegister(masm, inputId);
1990 Register output = allocator.defineRegister(masm, resultId);
1992 masm.move32SignExtendToPtr(input, output);
1993 return true;
1996 bool CacheIRCompiler::emitGuardNumberToIntPtrIndex(NumberOperandId inputId,
1997 bool supportOOB,
1998 IntPtrOperandId resultId) {
1999 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2001 Register output = allocator.defineRegister(masm, resultId);
2003 FailurePath* failure = nullptr;
2004 if (!supportOOB) {
2005 if (!addFailurePath(&failure)) {
2006 return false;
2010 AutoScratchFloatRegister floatReg(this, failure);
2011 allocator.ensureDoubleRegister(masm, inputId, floatReg);
2013 // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
2014 if (supportOOB) {
2015 Label done, fail;
2016 masm.convertDoubleToPtr(floatReg, output, &fail, false);
2017 masm.jump(&done);
2019 // Substitute the invalid index with an arbitrary out-of-bounds index.
2020 masm.bind(&fail);
2021 masm.movePtr(ImmWord(-1), output);
2023 masm.bind(&done);
2024 } else {
2025 masm.convertDoubleToPtr(floatReg, output, floatReg.failure(), false);
2028 return true;
2031 bool CacheIRCompiler::emitGuardToInt32ModUint32(ValOperandId inputId,
2032 Int32OperandId resultId) {
2033 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2034 Register output = allocator.defineRegister(masm, resultId);
2036 if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
2037 ConstantOrRegister input = allocator.useConstantOrRegister(masm, inputId);
2038 if (input.constant()) {
2039 masm.move32(Imm32(input.value().toInt32()), output);
2040 } else {
2041 MOZ_ASSERT(input.reg().type() == MIRType::Int32);
2042 masm.move32(input.reg().typedReg().gpr(), output);
2044 return true;
2047 ValueOperand input = allocator.useValueRegister(masm, inputId);
2049 FailurePath* failure;
2050 if (!addFailurePath(&failure)) {
2051 return false;
2054 EmitGuardInt32OrDouble(
2055 this, masm, input, output, failure,
2056 []() {
2057 // No-op if the value is already an int32.
2059 [&](FloatRegister floatReg, Label* fail) {
2060 masm.branchTruncateDoubleMaybeModUint32(floatReg, output, fail);
2063 return true;
2066 bool CacheIRCompiler::emitGuardToUint8Clamped(ValOperandId inputId,
2067 Int32OperandId resultId) {
2068 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2069 Register output = allocator.defineRegister(masm, resultId);
2071 if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
2072 ConstantOrRegister input = allocator.useConstantOrRegister(masm, inputId);
2073 if (input.constant()) {
2074 masm.move32(Imm32(ClampDoubleToUint8(input.value().toInt32())), output);
2075 } else {
2076 MOZ_ASSERT(input.reg().type() == MIRType::Int32);
2077 masm.move32(input.reg().typedReg().gpr(), output);
2078 masm.clampIntToUint8(output);
2080 return true;
2083 ValueOperand input = allocator.useValueRegister(masm, inputId);
2085 FailurePath* failure;
2086 if (!addFailurePath(&failure)) {
2087 return false;
2090 EmitGuardInt32OrDouble(
2091 this, masm, input, output, failure,
2092 [&]() {
2093 // |output| holds the unboxed int32 value.
2094 masm.clampIntToUint8(output);
2096 [&](FloatRegister floatReg) {
2097 masm.clampDoubleToUint8(floatReg, output);
2100 return true;
2103 bool CacheIRCompiler::emitGuardNonDoubleType(ValOperandId inputId,
2104 ValueType type) {
2105 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2107 if (allocator.knownType(inputId) == JSValueType(type)) {
2108 return true;
2111 ValueOperand input = allocator.useValueRegister(masm, inputId);
2113 FailurePath* failure;
2114 if (!addFailurePath(&failure)) {
2115 return false;
2118 switch (type) {
2119 case ValueType::String:
2120 masm.branchTestString(Assembler::NotEqual, input, failure->label());
2121 break;
2122 case ValueType::Symbol:
2123 masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
2124 break;
2125 case ValueType::BigInt:
2126 masm.branchTestBigInt(Assembler::NotEqual, input, failure->label());
2127 break;
2128 case ValueType::Int32:
2129 masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
2130 break;
2131 case ValueType::Boolean:
2132 masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
2133 break;
2134 case ValueType::Undefined:
2135 masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
2136 break;
2137 case ValueType::Null:
2138 masm.branchTestNull(Assembler::NotEqual, input, failure->label());
2139 break;
2140 case ValueType::Double:
2141 case ValueType::Magic:
2142 case ValueType::PrivateGCThing:
2143 case ValueType::Object:
2144 #ifdef ENABLE_RECORD_TUPLE
2145 case ValueType::ExtendedPrimitive:
2146 #endif
2147 MOZ_CRASH("unexpected type");
2150 return true;
2153 static const JSClass* ClassFor(JSContext* cx, GuardClassKind kind) {
2154 switch (kind) {
2155 case GuardClassKind::Array:
2156 case GuardClassKind::PlainObject:
2157 case GuardClassKind::FixedLengthArrayBuffer:
2158 case GuardClassKind::ResizableArrayBuffer:
2159 case GuardClassKind::FixedLengthSharedArrayBuffer:
2160 case GuardClassKind::GrowableSharedArrayBuffer:
2161 case GuardClassKind::FixedLengthDataView:
2162 case GuardClassKind::ResizableDataView:
2163 case GuardClassKind::MappedArguments:
2164 case GuardClassKind::UnmappedArguments:
2165 case GuardClassKind::Set:
2166 case GuardClassKind::Map:
2167 case GuardClassKind::BoundFunction:
2168 return ClassFor(kind);
2169 case GuardClassKind::WindowProxy:
2170 return cx->runtime()->maybeWindowProxyClass();
2171 case GuardClassKind::JSFunction:
2172 MOZ_CRASH("must be handled by caller");
2174 MOZ_CRASH("unexpected kind");
2177 bool CacheIRCompiler::emitGuardClass(ObjOperandId objId, GuardClassKind kind) {
2178 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2179 Register obj = allocator.useRegister(masm, objId);
2180 AutoScratchRegister scratch(allocator, masm);
2182 FailurePath* failure;
2183 if (!addFailurePath(&failure)) {
2184 return false;
2187 if (kind == GuardClassKind::JSFunction) {
2188 if (objectGuardNeedsSpectreMitigations(objId)) {
2189 masm.branchTestObjIsFunction(Assembler::NotEqual, obj, scratch, obj,
2190 failure->label());
2191 } else {
2192 masm.branchTestObjIsFunctionNoSpectreMitigations(
2193 Assembler::NotEqual, obj, scratch, failure->label());
2195 return true;
2198 const JSClass* clasp = ClassFor(cx_, kind);
2199 MOZ_ASSERT(clasp);
2201 if (objectGuardNeedsSpectreMitigations(objId)) {
2202 masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj,
2203 failure->label());
2204 } else {
2205 masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp,
2206 scratch, failure->label());
2209 return true;
2212 bool CacheIRCompiler::emitGuardEitherClass(ObjOperandId objId,
2213 GuardClassKind kind1,
2214 GuardClassKind kind2) {
2215 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2216 Register obj = allocator.useRegister(masm, objId);
2217 AutoScratchRegister scratch(allocator, masm);
2219 FailurePath* failure;
2220 if (!addFailurePath(&failure)) {
2221 return false;
2224 // We don't yet need this case, so it's unsupported for now.
2225 MOZ_ASSERT(kind1 != GuardClassKind::JSFunction &&
2226 kind2 != GuardClassKind::JSFunction);
2228 const JSClass* clasp1 = ClassFor(cx_, kind1);
2229 MOZ_ASSERT(clasp1);
2231 const JSClass* clasp2 = ClassFor(cx_, kind2);
2232 MOZ_ASSERT(clasp2);
2234 if (objectGuardNeedsSpectreMitigations(objId)) {
2235 masm.branchTestObjClass(Assembler::NotEqual, obj, {clasp1, clasp2}, scratch,
2236 obj, failure->label());
2237 } else {
2238 masm.branchTestObjClassNoSpectreMitigations(
2239 Assembler::NotEqual, obj, {clasp1, clasp2}, scratch, failure->label());
2242 return true;
2245 bool CacheIRCompiler::emitGuardNullProto(ObjOperandId objId) {
2246 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2247 Register obj = allocator.useRegister(masm, objId);
2248 AutoScratchRegister scratch(allocator, masm);
2250 FailurePath* failure;
2251 if (!addFailurePath(&failure)) {
2252 return false;
2255 masm.loadObjProto(obj, scratch);
2256 masm.branchTestPtr(Assembler::NonZero, scratch, scratch, failure->label());
2257 return true;
2260 bool CacheIRCompiler::emitGuardIsExtensible(ObjOperandId objId) {
2261 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2262 Register obj = allocator.useRegister(masm, objId);
2263 AutoScratchRegister scratch(allocator, masm);
2265 FailurePath* failure;
2266 if (!addFailurePath(&failure)) {
2267 return false;
2270 masm.branchIfObjectNotExtensible(obj, scratch, failure->label());
2271 return true;
2274 bool CacheIRCompiler::emitGuardDynamicSlotIsSpecificObject(
2275 ObjOperandId objId, ObjOperandId expectedId, uint32_t slotOffset) {
2276 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2277 Register obj = allocator.useRegister(masm, objId);
2278 Register expectedObject = allocator.useRegister(masm, expectedId);
2280 // Allocate registers before the failure path to make sure they're registered
2281 // by addFailurePath.
2282 AutoScratchRegister scratch1(allocator, masm);
2283 AutoScratchRegister scratch2(allocator, masm);
2285 FailurePath* failure;
2286 if (!addFailurePath(&failure)) {
2287 return false;
2290 // Guard on the expected object.
2291 StubFieldOffset slot(slotOffset, StubField::Type::RawInt32);
2292 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2293 emitLoadStubField(slot, scratch2);
2294 BaseObjectSlotIndex expectedSlot(scratch1, scratch2);
2295 masm.fallibleUnboxObject(expectedSlot, scratch1, failure->label());
2296 masm.branchPtr(Assembler::NotEqual, expectedObject, scratch1,
2297 failure->label());
2299 return true;
2302 bool CacheIRCompiler::emitGuardDynamicSlotIsNotObject(ObjOperandId objId,
2303 uint32_t slotOffset) {
2304 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2305 Register obj = allocator.useRegister(masm, objId);
2307 AutoScratchRegister scratch1(allocator, masm);
2308 AutoScratchRegister scratch2(allocator, masm);
2310 FailurePath* failure;
2311 if (!addFailurePath(&failure)) {
2312 return false;
2315 // Guard that the slot isn't an object.
2316 StubFieldOffset slot(slotOffset, StubField::Type::RawInt32);
2317 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2318 emitLoadStubField(slot, scratch2);
2319 BaseObjectSlotIndex expectedSlot(scratch1, scratch2);
2320 masm.branchTestObject(Assembler::Equal, expectedSlot, failure->label());
2322 return true;
2325 bool CacheIRCompiler::emitGuardFixedSlotValue(ObjOperandId objId,
2326 uint32_t offsetOffset,
2327 uint32_t valOffset) {
2328 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2330 Register obj = allocator.useRegister(masm, objId);
2332 AutoScratchRegister scratch(allocator, masm);
2333 AutoScratchValueRegister scratchVal(allocator, masm);
2335 FailurePath* failure;
2336 if (!addFailurePath(&failure)) {
2337 return false;
2340 StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
2341 emitLoadStubField(offset, scratch);
2343 StubFieldOffset val(valOffset, StubField::Type::Value);
2344 emitLoadValueStubField(val, scratchVal);
2346 BaseIndex slotVal(obj, scratch, TimesOne);
2347 masm.branchTestValue(Assembler::NotEqual, slotVal, scratchVal,
2348 failure->label());
2349 return true;
2352 bool CacheIRCompiler::emitGuardDynamicSlotValue(ObjOperandId objId,
2353 uint32_t offsetOffset,
2354 uint32_t valOffset) {
2355 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2357 Register obj = allocator.useRegister(masm, objId);
2359 AutoScratchRegister scratch1(allocator, masm);
2360 AutoScratchRegister scratch2(allocator, masm);
2361 AutoScratchValueRegister scratchVal(allocator, masm);
2363 FailurePath* failure;
2364 if (!addFailurePath(&failure)) {
2365 return false;
2368 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2370 StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
2371 emitLoadStubField(offset, scratch2);
2373 StubFieldOffset val(valOffset, StubField::Type::Value);
2374 emitLoadValueStubField(val, scratchVal);
2376 BaseIndex slotVal(scratch1, scratch2, TimesOne);
2377 masm.branchTestValue(Assembler::NotEqual, slotVal, scratchVal,
2378 failure->label());
2379 return true;
2382 bool CacheIRCompiler::emitLoadScriptedProxyHandler(ObjOperandId resultId,
2383 ObjOperandId objId) {
2384 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2386 Register obj = allocator.useRegister(masm, objId);
2387 Register output = allocator.defineRegister(masm, resultId);
2389 FailurePath* failure;
2390 if (!addFailurePath(&failure)) {
2391 return false;
2394 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), output);
2395 Address handlerAddr(output, js::detail::ProxyReservedSlots::offsetOfSlot(
2396 ScriptedProxyHandler::HANDLER_EXTRA));
2397 masm.fallibleUnboxObject(handlerAddr, output, failure->label());
2399 return true;
2402 bool CacheIRCompiler::emitIdToStringOrSymbol(ValOperandId resultId,
2403 ValOperandId idId) {
2404 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2406 ValueOperand id = allocator.useValueRegister(masm, idId);
2407 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2408 AutoScratchRegister scratch(allocator, masm);
2410 FailurePath* failure;
2411 if (!addFailurePath(&failure)) {
2412 return false;
2415 masm.moveValue(id, output);
2417 Label done, intDone, callVM;
2419 ScratchTagScope tag(masm, output);
2420 masm.splitTagForTest(output, tag);
2421 masm.branchTestString(Assembler::Equal, tag, &done);
2422 masm.branchTestSymbol(Assembler::Equal, tag, &done);
2423 masm.branchTestInt32(Assembler::NotEqual, tag, failure->label());
2426 Register intReg = output.scratchReg();
2427 masm.unboxInt32(output, intReg);
2429 // Fast path for small integers.
2430 masm.lookupStaticIntString(intReg, intReg, scratch, cx_->staticStrings(),
2431 &callVM);
2432 masm.jump(&intDone);
2434 masm.bind(&callVM);
2435 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2436 liveVolatileFloatRegs());
2437 masm.PushRegsInMask(volatileRegs);
2439 using Fn = JSLinearString* (*)(JSContext* cx, int32_t i);
2440 masm.setupUnalignedABICall(scratch);
2441 masm.loadJSContext(scratch);
2442 masm.passABIArg(scratch);
2443 masm.passABIArg(intReg);
2444 masm.callWithABI<Fn, js::Int32ToStringPure>();
2446 masm.storeCallPointerResult(intReg);
2448 LiveRegisterSet ignore;
2449 ignore.add(intReg);
2450 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
2452 masm.branchPtr(Assembler::Equal, intReg, ImmPtr(nullptr), failure->label());
2454 masm.bind(&intDone);
2455 masm.tagValue(JSVAL_TYPE_STRING, intReg, output);
2456 masm.bind(&done);
2458 return true;
2461 bool CacheIRCompiler::emitLoadFixedSlot(ValOperandId resultId,
2462 ObjOperandId objId,
2463 uint32_t offsetOffset) {
2464 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2466 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2467 Register obj = allocator.useRegister(masm, objId);
2468 AutoScratchRegister scratch(allocator, masm);
2470 StubFieldOffset slotIndex(offsetOffset, StubField::Type::RawInt32);
2471 emitLoadStubField(slotIndex, scratch);
2473 masm.loadValue(BaseIndex(obj, scratch, TimesOne), output);
2474 return true;
2477 bool CacheIRCompiler::emitLoadDynamicSlot(ValOperandId resultId,
2478 ObjOperandId objId,
2479 uint32_t slotOffset) {
2480 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2482 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2483 Register obj = allocator.useRegister(masm, objId);
2484 AutoScratchRegister scratch1(allocator, masm);
2485 Register scratch2 = output.scratchReg();
2487 StubFieldOffset slotIndex(slotOffset, StubField::Type::RawInt32);
2488 emitLoadStubField(slotIndex, scratch2);
2490 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2491 masm.loadValue(BaseObjectSlotIndex(scratch1, scratch2), output);
2492 return true;
2495 bool CacheIRCompiler::emitGuardIsNativeObject(ObjOperandId objId) {
2496 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2498 Register obj = allocator.useRegister(masm, objId);
2499 AutoScratchRegister scratch(allocator, masm);
2501 FailurePath* failure;
2502 if (!addFailurePath(&failure)) {
2503 return false;
2506 masm.branchIfNonNativeObj(obj, scratch, failure->label());
2507 return true;
2510 bool CacheIRCompiler::emitGuardIsProxy(ObjOperandId objId) {
2511 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2513 Register obj = allocator.useRegister(masm, objId);
2514 AutoScratchRegister scratch(allocator, masm);
2516 FailurePath* failure;
2517 if (!addFailurePath(&failure)) {
2518 return false;
2521 masm.branchTestObjectIsProxy(false, obj, scratch, failure->label());
2522 return true;
2525 bool CacheIRCompiler::emitGuardIsNotProxy(ObjOperandId objId) {
2526 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2528 Register obj = allocator.useRegister(masm, objId);
2529 AutoScratchRegister scratch(allocator, masm);
2531 FailurePath* failure;
2532 if (!addFailurePath(&failure)) {
2533 return false;
2536 masm.branchTestObjectIsProxy(true, obj, scratch, failure->label());
2537 return true;
2540 bool CacheIRCompiler::emitGuardIsNotArrayBufferMaybeShared(ObjOperandId objId) {
2541 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2543 Register obj = allocator.useRegister(masm, objId);
2544 AutoScratchRegister scratch(allocator, masm);
2546 FailurePath* failure;
2547 if (!addFailurePath(&failure)) {
2548 return false;
2551 masm.loadObjClassUnsafe(obj, scratch);
2552 masm.branchPtr(Assembler::Equal, scratch,
2553 ImmPtr(&FixedLengthArrayBufferObject::class_),
2554 failure->label());
2555 masm.branchPtr(Assembler::Equal, scratch,
2556 ImmPtr(&FixedLengthSharedArrayBufferObject::class_),
2557 failure->label());
2558 masm.branchPtr(Assembler::Equal, scratch,
2559 ImmPtr(&ResizableArrayBufferObject::class_), failure->label());
2560 masm.branchPtr(Assembler::Equal, scratch,
2561 ImmPtr(&GrowableSharedArrayBufferObject::class_),
2562 failure->label());
2563 return true;
2566 bool CacheIRCompiler::emitGuardIsTypedArray(ObjOperandId objId) {
2567 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2569 Register obj = allocator.useRegister(masm, objId);
2570 AutoScratchRegister scratch(allocator, masm);
2572 FailurePath* failure;
2573 if (!addFailurePath(&failure)) {
2574 return false;
2577 masm.loadObjClassUnsafe(obj, scratch);
2578 masm.branchIfClassIsNotTypedArray(scratch, failure->label());
2579 return true;
2582 bool CacheIRCompiler::emitGuardIsFixedLengthTypedArray(ObjOperandId objId) {
2583 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2585 Register obj = allocator.useRegister(masm, objId);
2586 AutoScratchRegister scratch(allocator, masm);
2588 FailurePath* failure;
2589 if (!addFailurePath(&failure)) {
2590 return false;
2593 masm.loadObjClassUnsafe(obj, scratch);
2594 masm.branchIfClassIsNotFixedLengthTypedArray(scratch, failure->label());
2595 return true;
2598 bool CacheIRCompiler::emitGuardIsResizableTypedArray(ObjOperandId objId) {
2599 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2601 Register obj = allocator.useRegister(masm, objId);
2602 AutoScratchRegister scratch(allocator, masm);
2604 FailurePath* failure;
2605 if (!addFailurePath(&failure)) {
2606 return false;
2609 masm.loadObjClassUnsafe(obj, scratch);
2610 masm.branchIfClassIsNotResizableTypedArray(scratch, failure->label());
2611 return true;
2614 bool CacheIRCompiler::emitGuardIsNotDOMProxy(ObjOperandId objId) {
2615 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2616 Register obj = allocator.useRegister(masm, objId);
2617 AutoScratchRegister scratch(allocator, masm);
2619 FailurePath* failure;
2620 if (!addFailurePath(&failure)) {
2621 return false;
2624 masm.branchTestProxyHandlerFamily(Assembler::Equal, obj, scratch,
2625 GetDOMProxyHandlerFamily(),
2626 failure->label());
2627 return true;
2630 bool CacheIRCompiler::emitGuardNoDenseElements(ObjOperandId objId) {
2631 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2632 Register obj = allocator.useRegister(masm, objId);
2633 AutoScratchRegister scratch(allocator, masm);
2635 FailurePath* failure;
2636 if (!addFailurePath(&failure)) {
2637 return false;
2640 // Load obj->elements.
2641 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
2643 // Make sure there are no dense elements.
2644 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
2645 masm.branch32(Assembler::NotEqual, initLength, Imm32(0), failure->label());
2646 return true;
2649 bool CacheIRCompiler::emitGuardSpecificInt32(Int32OperandId numId,
2650 int32_t expected) {
2651 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2652 Register num = allocator.useRegister(masm, numId);
2654 FailurePath* failure;
2655 if (!addFailurePath(&failure)) {
2656 return false;
2659 masm.branch32(Assembler::NotEqual, num, Imm32(expected), failure->label());
2660 return true;
2663 bool CacheIRCompiler::emitGuardStringToInt32(StringOperandId strId,
2664 Int32OperandId resultId) {
2665 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2666 Register str = allocator.useRegister(masm, strId);
2667 Register output = allocator.defineRegister(masm, resultId);
2668 AutoScratchRegister scratch(allocator, masm);
2670 FailurePath* failure;
2671 if (!addFailurePath(&failure)) {
2672 return false;
2675 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2676 liveVolatileFloatRegs());
2677 masm.guardStringToInt32(str, output, scratch, volatileRegs, failure->label());
2678 return true;
2681 bool CacheIRCompiler::emitGuardStringToNumber(StringOperandId strId,
2682 NumberOperandId resultId) {
2683 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2684 Register str = allocator.useRegister(masm, strId);
2685 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2686 AutoScratchRegister scratch(allocator, masm);
2688 FailurePath* failure;
2689 if (!addFailurePath(&failure)) {
2690 return false;
2693 Label vmCall, done;
2694 // Use indexed value as fast path if possible.
2695 masm.loadStringIndexValue(str, scratch, &vmCall);
2696 masm.tagValue(JSVAL_TYPE_INT32, scratch, output);
2697 masm.jump(&done);
2699 masm.bind(&vmCall);
2701 // Reserve stack for holding the result value of the call.
2702 masm.reserveStack(sizeof(double));
2703 masm.moveStackPtrTo(output.payloadOrValueReg());
2705 // We cannot use callVM, as callVM expects to be able to clobber all
2706 // operands, however, since this op is not the last in the generated IC, we
2707 // want to be able to reference other live values.
2708 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2709 liveVolatileFloatRegs());
2710 masm.PushRegsInMask(volatileRegs);
2712 using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
2713 masm.setupUnalignedABICall(scratch);
2714 masm.loadJSContext(scratch);
2715 masm.passABIArg(scratch);
2716 masm.passABIArg(str);
2717 masm.passABIArg(output.payloadOrValueReg());
2718 masm.callWithABI<Fn, js::StringToNumberPure>();
2719 masm.storeCallPointerResult(scratch);
2721 LiveRegisterSet ignore;
2722 ignore.add(scratch);
2723 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
2725 Label ok;
2726 masm.branchIfTrueBool(scratch, &ok);
2728 // OOM path, recovered by StringToNumberPure.
2730 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
2731 // flow-insensitively, and using it twice would confuse the stack height
2732 // tracking.
2733 masm.addToStackPtr(Imm32(sizeof(double)));
2734 masm.jump(failure->label());
2736 masm.bind(&ok);
2739 ScratchDoubleScope fpscratch(masm);
2740 masm.loadDouble(Address(output.payloadOrValueReg(), 0), fpscratch);
2741 masm.boxDouble(fpscratch, output, fpscratch);
2743 masm.freeStack(sizeof(double));
2745 masm.bind(&done);
2746 return true;
2749 bool CacheIRCompiler::emitNumberParseIntResult(StringOperandId strId,
2750 Int32OperandId radixId) {
2751 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2753 AutoCallVM callvm(masm, this, allocator);
2755 Register str = allocator.useRegister(masm, strId);
2756 Register radix = allocator.useRegister(masm, radixId);
2757 AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
2759 #ifdef DEBUG
2760 Label ok;
2761 masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
2762 masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
2763 masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
2764 masm.bind(&ok);
2765 #endif
2767 // Discard the stack to ensure it's balanced when we skip the vm-call.
2768 allocator.discardStack(masm);
2770 // Use indexed value as fast path if possible.
2771 Label vmCall, done;
2772 masm.loadStringIndexValue(str, scratch, &vmCall);
2773 masm.tagValue(JSVAL_TYPE_INT32, scratch, callvm.outputValueReg());
2774 masm.jump(&done);
2776 masm.bind(&vmCall);
2778 callvm.prepare();
2779 masm.Push(radix);
2780 masm.Push(str);
2782 using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
2783 callvm.call<Fn, js::NumberParseInt>();
2785 masm.bind(&done);
2786 return true;
2789 bool CacheIRCompiler::emitDoubleParseIntResult(NumberOperandId numId) {
2790 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2792 AutoOutputRegister output(*this);
2793 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2794 AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
2795 AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
2797 FailurePath* failure;
2798 if (!addFailurePath(&failure)) {
2799 return false;
2802 allocator.ensureDoubleRegister(masm, numId, floatScratch1);
2804 masm.branchDouble(Assembler::DoubleUnordered, floatScratch1, floatScratch1,
2805 failure->label());
2806 masm.branchTruncateDoubleToInt32(floatScratch1, scratch, failure->label());
2808 Label ok;
2809 masm.branch32(Assembler::NotEqual, scratch, Imm32(0), &ok);
2811 // Accept both +0 and -0 and return 0.
2812 masm.loadConstantDouble(0.0, floatScratch2);
2813 masm.branchDouble(Assembler::DoubleEqual, floatScratch1, floatScratch2,
2814 &ok);
2816 // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
2817 masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, floatScratch2);
2818 masm.branchDouble(Assembler::DoubleLessThan, floatScratch1, floatScratch2,
2819 failure->label());
2821 masm.bind(&ok);
2823 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2824 return true;
2827 bool CacheIRCompiler::emitStringToAtom(StringOperandId stringId) {
2828 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2829 Register str = allocator.useRegister(masm, stringId);
2830 AutoScratchRegister scratch(allocator, masm);
2832 FailurePath* failure;
2833 if (!addFailurePath(&failure)) {
2834 return false;
2837 Label done, vmCall;
2838 masm.branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
2839 Imm32(JSString::ATOM_BIT), &done);
2841 masm.lookupStringInAtomCacheLastLookups(str, scratch, str, &vmCall);
2842 masm.jump(&done);
2844 masm.bind(&vmCall);
2845 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
2846 masm.PushRegsInMask(save);
2848 using Fn = JSAtom* (*)(JSContext* cx, JSString* str);
2849 masm.setupUnalignedABICall(scratch);
2850 masm.loadJSContext(scratch);
2851 masm.passABIArg(scratch);
2852 masm.passABIArg(str);
2853 masm.callWithABI<Fn, jit::AtomizeStringNoGC>();
2854 masm.storeCallPointerResult(scratch);
2856 LiveRegisterSet ignore;
2857 ignore.add(scratch);
2858 masm.PopRegsInMaskIgnore(save, ignore);
2860 masm.branchPtr(Assembler::Equal, scratch, Imm32(0), failure->label());
2861 masm.movePtr(scratch.get(), str);
2863 masm.bind(&done);
2864 return true;
2867 bool CacheIRCompiler::emitBooleanToNumber(BooleanOperandId booleanId,
2868 NumberOperandId resultId) {
2869 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2870 Register boolean = allocator.useRegister(masm, booleanId);
2871 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2872 masm.tagValue(JSVAL_TYPE_INT32, boolean, output);
2873 return true;
2876 bool CacheIRCompiler::emitGuardStringToIndex(StringOperandId strId,
2877 Int32OperandId resultId) {
2878 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2879 Register str = allocator.useRegister(masm, strId);
2880 Register output = allocator.defineRegister(masm, resultId);
2882 FailurePath* failure;
2883 if (!addFailurePath(&failure)) {
2884 return false;
2887 Label vmCall, done;
2888 masm.loadStringIndexValue(str, output, &vmCall);
2889 masm.jump(&done);
2892 masm.bind(&vmCall);
2893 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
2894 liveVolatileFloatRegs());
2895 masm.PushRegsInMask(save);
2897 using Fn = int32_t (*)(JSString* str);
2898 masm.setupUnalignedABICall(output);
2899 masm.passABIArg(str);
2900 masm.callWithABI<Fn, GetIndexFromString>();
2901 masm.storeCallInt32Result(output);
2903 LiveRegisterSet ignore;
2904 ignore.add(output);
2905 masm.PopRegsInMaskIgnore(save, ignore);
2907 // GetIndexFromString returns a negative value on failure.
2908 masm.branchTest32(Assembler::Signed, output, output, failure->label());
2911 masm.bind(&done);
2912 return true;
2915 bool CacheIRCompiler::emitLoadProto(ObjOperandId objId, ObjOperandId resultId) {
2916 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2917 Register obj = allocator.useRegister(masm, objId);
2918 Register reg = allocator.defineRegister(masm, resultId);
2919 masm.loadObjProto(obj, reg);
2921 #ifdef DEBUG
2922 // We shouldn't encounter a null or lazy proto.
2923 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
2925 Label done;
2926 masm.branchPtr(Assembler::Above, reg, ImmWord(1), &done);
2927 masm.assumeUnreachable("Unexpected null or lazy proto in CacheIR LoadProto");
2928 masm.bind(&done);
2929 #endif
2930 return true;
2933 bool CacheIRCompiler::emitLoadEnclosingEnvironment(ObjOperandId objId,
2934 ObjOperandId resultId) {
2935 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2936 Register obj = allocator.useRegister(masm, objId);
2937 Register reg = allocator.defineRegister(masm, resultId);
2938 masm.unboxObject(
2939 Address(obj, EnvironmentObject::offsetOfEnclosingEnvironment()), reg);
2940 return true;
2943 bool CacheIRCompiler::emitLoadWrapperTarget(ObjOperandId objId,
2944 ObjOperandId resultId,
2945 bool fallible) {
2946 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2947 Register obj = allocator.useRegister(masm, objId);
2948 Register reg = allocator.defineRegister(masm, resultId);
2950 FailurePath* failure;
2951 if (fallible && !addFailurePath(&failure)) {
2952 return false;
2955 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), reg);
2957 Address targetAddr(reg,
2958 js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
2959 if (fallible) {
2960 masm.fallibleUnboxObject(targetAddr, reg, failure->label());
2961 } else {
2962 masm.unboxObject(targetAddr, reg);
2965 return true;
2968 bool CacheIRCompiler::emitLoadValueTag(ValOperandId valId,
2969 ValueTagOperandId resultId) {
2970 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2971 ValueOperand val = allocator.useValueRegister(masm, valId);
2972 Register res = allocator.defineRegister(masm, resultId);
2974 Register tag = masm.extractTag(val, res);
2975 if (tag != res) {
2976 masm.mov(tag, res);
2978 return true;
2981 bool CacheIRCompiler::emitLoadDOMExpandoValue(ObjOperandId objId,
2982 ValOperandId resultId) {
2983 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2984 Register obj = allocator.useRegister(masm, objId);
2985 ValueOperand val = allocator.defineValueRegister(masm, resultId);
2987 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
2988 val.scratchReg());
2989 masm.loadValue(Address(val.scratchReg(),
2990 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
2991 val);
2992 return true;
2995 bool CacheIRCompiler::emitLoadDOMExpandoValueIgnoreGeneration(
2996 ObjOperandId objId, ValOperandId resultId) {
2997 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2998 Register obj = allocator.useRegister(masm, objId);
2999 ValueOperand output = allocator.defineValueRegister(masm, resultId);
3001 // Determine the expando's Address.
3002 Register scratch = output.scratchReg();
3003 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
3004 Address expandoAddr(scratch,
3005 js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
3007 #ifdef DEBUG
3008 // Private values are stored as doubles, so assert we have a double.
3009 Label ok;
3010 masm.branchTestDouble(Assembler::Equal, expandoAddr, &ok);
3011 masm.assumeUnreachable("DOM expando is not a PrivateValue!");
3012 masm.bind(&ok);
3013 #endif
3015 // Load the ExpandoAndGeneration* from the PrivateValue.
3016 masm.loadPrivate(expandoAddr, scratch);
3018 // Load expandoAndGeneration->expando into the output Value register.
3019 masm.loadValue(Address(scratch, ExpandoAndGeneration::offsetOfExpando()),
3020 output);
3021 return true;
3024 bool CacheIRCompiler::emitLoadUndefinedResult() {
3025 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3026 AutoOutputRegister output(*this);
3027 masm.moveValue(UndefinedValue(), output.valueReg());
3028 return true;
3031 static void EmitStoreBoolean(MacroAssembler& masm, bool b,
3032 const AutoOutputRegister& output) {
3033 if (output.hasValue()) {
3034 Value val = BooleanValue(b);
3035 masm.moveValue(val, output.valueReg());
3036 } else {
3037 MOZ_ASSERT(output.type() == JSVAL_TYPE_BOOLEAN);
3038 masm.movePtr(ImmWord(b), output.typedReg().gpr());
3042 bool CacheIRCompiler::emitLoadBooleanResult(bool val) {
3043 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3044 AutoOutputRegister output(*this);
3045 EmitStoreBoolean(masm, val, output);
3046 return true;
3049 bool CacheIRCompiler::emitLoadOperandResult(ValOperandId inputId) {
3050 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3051 AutoOutputRegister output(*this);
3052 ValueOperand input = allocator.useValueRegister(masm, inputId);
3053 masm.moveValue(input, output.valueReg());
3054 return true;
3057 static void EmitStoreResult(MacroAssembler& masm, Register reg,
3058 JSValueType type,
3059 const AutoOutputRegister& output) {
3060 if (output.hasValue()) {
3061 masm.tagValue(type, reg, output.valueReg());
3062 return;
3064 if (type == JSVAL_TYPE_INT32 && output.typedReg().isFloat()) {
3065 masm.convertInt32ToDouble(reg, output.typedReg().fpu());
3066 return;
3068 if (type == output.type()) {
3069 masm.mov(reg, output.typedReg().gpr());
3070 return;
3072 masm.assumeUnreachable("Should have monitored result");
3075 bool CacheIRCompiler::emitLoadInt32ArrayLengthResult(ObjOperandId objId) {
3076 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3077 AutoOutputRegister output(*this);
3078 Register obj = allocator.useRegister(masm, objId);
3079 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3081 FailurePath* failure;
3082 if (!addFailurePath(&failure)) {
3083 return false;
3086 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
3087 masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratch);
3089 // Guard length fits in an int32.
3090 masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
3091 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3092 return true;
3095 bool CacheIRCompiler::emitLoadInt32ArrayLength(ObjOperandId objId,
3096 Int32OperandId resultId) {
3097 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3098 Register obj = allocator.useRegister(masm, objId);
3099 Register res = allocator.defineRegister(masm, resultId);
3101 FailurePath* failure;
3102 if (!addFailurePath(&failure)) {
3103 return false;
3106 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), res);
3107 masm.load32(Address(res, ObjectElements::offsetOfLength()), res);
3109 // Guard length fits in an int32.
3110 masm.branchTest32(Assembler::Signed, res, res, failure->label());
3111 return true;
3114 bool CacheIRCompiler::emitDoubleAddResult(NumberOperandId lhsId,
3115 NumberOperandId rhsId) {
3116 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3117 AutoOutputRegister output(*this);
3119 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3120 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3122 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3123 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3125 masm.addDouble(floatScratch1, floatScratch0);
3126 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3128 return true;
3130 bool CacheIRCompiler::emitDoubleSubResult(NumberOperandId lhsId,
3131 NumberOperandId rhsId) {
3132 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3133 AutoOutputRegister output(*this);
3135 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3136 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3138 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3139 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3141 masm.subDouble(floatScratch1, floatScratch0);
3142 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3144 return true;
3146 bool CacheIRCompiler::emitDoubleMulResult(NumberOperandId lhsId,
3147 NumberOperandId rhsId) {
3148 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3149 AutoOutputRegister output(*this);
3151 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3152 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3154 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3155 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3157 masm.mulDouble(floatScratch1, floatScratch0);
3158 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3160 return true;
3162 bool CacheIRCompiler::emitDoubleDivResult(NumberOperandId lhsId,
3163 NumberOperandId rhsId) {
3164 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3165 AutoOutputRegister output(*this);
3167 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3168 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3170 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3171 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3173 masm.divDouble(floatScratch1, floatScratch0);
3174 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3176 return true;
3178 bool CacheIRCompiler::emitDoubleModResult(NumberOperandId lhsId,
3179 NumberOperandId rhsId) {
3180 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3181 AutoOutputRegister output(*this);
3182 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3184 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3185 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3187 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3188 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3190 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3191 masm.PushRegsInMask(save);
3193 using Fn = double (*)(double a, double b);
3194 masm.setupUnalignedABICall(scratch);
3195 masm.passABIArg(floatScratch0, ABIType::Float64);
3196 masm.passABIArg(floatScratch1, ABIType::Float64);
3197 masm.callWithABI<Fn, js::NumberMod>(ABIType::Float64);
3198 masm.storeCallFloatResult(floatScratch0);
3200 LiveRegisterSet ignore;
3201 ignore.add(floatScratch0);
3202 masm.PopRegsInMaskIgnore(save, ignore);
3204 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3206 return true;
3208 bool CacheIRCompiler::emitDoublePowResult(NumberOperandId lhsId,
3209 NumberOperandId rhsId) {
3210 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3211 AutoOutputRegister output(*this);
3212 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3214 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3215 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3217 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3218 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3220 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3221 masm.PushRegsInMask(save);
3223 using Fn = double (*)(double x, double y);
3224 masm.setupUnalignedABICall(scratch);
3225 masm.passABIArg(floatScratch0, ABIType::Float64);
3226 masm.passABIArg(floatScratch1, ABIType::Float64);
3227 masm.callWithABI<Fn, js::ecmaPow>(ABIType::Float64);
3228 masm.storeCallFloatResult(floatScratch0);
3230 LiveRegisterSet ignore;
3231 ignore.add(floatScratch0);
3232 masm.PopRegsInMaskIgnore(save, ignore);
3234 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3236 return true;
3239 bool CacheIRCompiler::emitInt32AddResult(Int32OperandId lhsId,
3240 Int32OperandId rhsId) {
3241 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3242 AutoOutputRegister output(*this);
3243 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3245 Register lhs = allocator.useRegister(masm, lhsId);
3246 Register rhs = allocator.useRegister(masm, rhsId);
3248 FailurePath* failure;
3249 if (!addFailurePath(&failure)) {
3250 return false;
3253 masm.mov(rhs, scratch);
3254 masm.branchAdd32(Assembler::Overflow, lhs, scratch, failure->label());
3255 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3257 return true;
3259 bool CacheIRCompiler::emitInt32SubResult(Int32OperandId lhsId,
3260 Int32OperandId rhsId) {
3261 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3262 AutoOutputRegister output(*this);
3263 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3264 Register lhs = allocator.useRegister(masm, lhsId);
3265 Register rhs = allocator.useRegister(masm, rhsId);
3267 FailurePath* failure;
3268 if (!addFailurePath(&failure)) {
3269 return false;
3272 masm.mov(lhs, scratch);
3273 masm.branchSub32(Assembler::Overflow, rhs, scratch, failure->label());
3274 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3276 return true;
3279 bool CacheIRCompiler::emitInt32MulResult(Int32OperandId lhsId,
3280 Int32OperandId rhsId) {
3281 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3282 AutoOutputRegister output(*this);
3283 Register lhs = allocator.useRegister(masm, lhsId);
3284 Register rhs = allocator.useRegister(masm, rhsId);
3285 AutoScratchRegister scratch(allocator, masm);
3286 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
3288 FailurePath* failure;
3289 if (!addFailurePath(&failure)) {
3290 return false;
3293 Label maybeNegZero, done;
3294 masm.mov(lhs, scratch);
3295 masm.branchMul32(Assembler::Overflow, rhs, scratch, failure->label());
3296 masm.branchTest32(Assembler::Zero, scratch, scratch, &maybeNegZero);
3297 masm.jump(&done);
3299 masm.bind(&maybeNegZero);
3300 masm.mov(lhs, scratch2);
3301 // Result is -0 if exactly one of lhs or rhs is negative.
3302 masm.or32(rhs, scratch2);
3303 masm.branchTest32(Assembler::Signed, scratch2, scratch2, failure->label());
3305 masm.bind(&done);
3306 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3307 return true;
3310 bool CacheIRCompiler::emitInt32DivResult(Int32OperandId lhsId,
3311 Int32OperandId rhsId) {
3312 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3313 AutoOutputRegister output(*this);
3314 Register lhs = allocator.useRegister(masm, lhsId);
3315 Register rhs = allocator.useRegister(masm, rhsId);
3316 AutoScratchRegister rem(allocator, masm);
3317 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3319 FailurePath* failure;
3320 if (!addFailurePath(&failure)) {
3321 return false;
3324 // Prevent division by 0.
3325 masm.branchTest32(Assembler::Zero, rhs, rhs, failure->label());
3327 // Prevent -2147483648 / -1.
3328 Label notOverflow;
3329 masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), &notOverflow);
3330 masm.branch32(Assembler::Equal, rhs, Imm32(-1), failure->label());
3331 masm.bind(&notOverflow);
3333 // Prevent negative 0.
3334 Label notZero;
3335 masm.branchTest32(Assembler::NonZero, lhs, lhs, &notZero);
3336 masm.branchTest32(Assembler::Signed, rhs, rhs, failure->label());
3337 masm.bind(&notZero);
3339 masm.mov(lhs, scratch);
3340 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
3341 liveVolatileFloatRegs());
3342 masm.flexibleDivMod32(rhs, scratch, rem, false, volatileRegs);
3344 // A remainder implies a double result.
3345 masm.branchTest32(Assembler::NonZero, rem, rem, failure->label());
3346 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3347 return true;
3350 bool CacheIRCompiler::emitInt32ModResult(Int32OperandId lhsId,
3351 Int32OperandId rhsId) {
3352 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3353 AutoOutputRegister output(*this);
3354 Register lhs = allocator.useRegister(masm, lhsId);
3355 Register rhs = allocator.useRegister(masm, rhsId);
3356 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3358 FailurePath* failure;
3359 if (!addFailurePath(&failure)) {
3360 return false;
3363 // x % 0 results in NaN
3364 masm.branchTest32(Assembler::Zero, rhs, rhs, failure->label());
3366 // Prevent -2147483648 % -1.
3368 // Traps on x86 and has undefined behavior on ARM32 (when __aeabi_idivmod is
3369 // called).
3370 Label notOverflow;
3371 masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), &notOverflow);
3372 masm.branch32(Assembler::Equal, rhs, Imm32(-1), failure->label());
3373 masm.bind(&notOverflow);
3375 masm.mov(lhs, scratch);
3376 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
3377 liveVolatileFloatRegs());
3378 masm.flexibleRemainder32(rhs, scratch, false, volatileRegs);
3380 // Modulo takes the sign of the dividend; we can't return negative zero here.
3381 Label notZero;
3382 masm.branchTest32(Assembler::NonZero, scratch, scratch, &notZero);
3383 masm.branchTest32(Assembler::Signed, lhs, lhs, failure->label());
3384 masm.bind(&notZero);
3386 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3388 return true;
3391 bool CacheIRCompiler::emitInt32PowResult(Int32OperandId lhsId,
3392 Int32OperandId rhsId) {
3393 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3394 AutoOutputRegister output(*this);
3395 Register base = allocator.useRegister(masm, lhsId);
3396 Register power = allocator.useRegister(masm, rhsId);
3397 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
3398 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
3399 AutoScratchRegister scratch3(allocator, masm);
3401 FailurePath* failure;
3402 if (!addFailurePath(&failure)) {
3403 return false;
3406 masm.pow32(base, power, scratch1, scratch2, scratch3, failure->label());
3408 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
3409 return true;
3412 bool CacheIRCompiler::emitInt32BitOrResult(Int32OperandId lhsId,
3413 Int32OperandId rhsId) {
3414 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3415 AutoOutputRegister output(*this);
3416 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3418 Register lhs = allocator.useRegister(masm, lhsId);
3419 Register rhs = allocator.useRegister(masm, rhsId);
3421 masm.mov(rhs, scratch);
3422 masm.or32(lhs, scratch);
3423 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3425 return true;
3427 bool CacheIRCompiler::emitInt32BitXorResult(Int32OperandId lhsId,
3428 Int32OperandId rhsId) {
3429 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3430 AutoOutputRegister output(*this);
3431 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3433 Register lhs = allocator.useRegister(masm, lhsId);
3434 Register rhs = allocator.useRegister(masm, rhsId);
3436 masm.mov(rhs, scratch);
3437 masm.xor32(lhs, scratch);
3438 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3440 return true;
3442 bool CacheIRCompiler::emitInt32BitAndResult(Int32OperandId lhsId,
3443 Int32OperandId rhsId) {
3444 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3445 AutoOutputRegister output(*this);
3446 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3448 Register lhs = allocator.useRegister(masm, lhsId);
3449 Register rhs = allocator.useRegister(masm, rhsId);
3451 masm.mov(rhs, scratch);
3452 masm.and32(lhs, scratch);
3453 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3455 return true;
3457 bool CacheIRCompiler::emitInt32LeftShiftResult(Int32OperandId lhsId,
3458 Int32OperandId rhsId) {
3459 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3460 AutoOutputRegister output(*this);
3461 Register lhs = allocator.useRegister(masm, lhsId);
3462 Register rhs = allocator.useRegister(masm, rhsId);
3463 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3465 masm.mov(lhs, scratch);
3466 masm.flexibleLshift32(rhs, scratch);
3467 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3469 return true;
3472 bool CacheIRCompiler::emitInt32RightShiftResult(Int32OperandId lhsId,
3473 Int32OperandId rhsId) {
3474 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3475 AutoOutputRegister output(*this);
3476 Register lhs = allocator.useRegister(masm, lhsId);
3477 Register rhs = allocator.useRegister(masm, rhsId);
3478 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3480 masm.mov(lhs, scratch);
3481 masm.flexibleRshift32Arithmetic(rhs, scratch);
3482 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3484 return true;
3487 bool CacheIRCompiler::emitInt32URightShiftResult(Int32OperandId lhsId,
3488 Int32OperandId rhsId,
3489 bool forceDouble) {
3490 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3491 AutoOutputRegister output(*this);
3493 Register lhs = allocator.useRegister(masm, lhsId);
3494 Register rhs = allocator.useRegister(masm, rhsId);
3495 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3497 FailurePath* failure;
3498 if (!addFailurePath(&failure)) {
3499 return false;
3502 masm.mov(lhs, scratch);
3503 masm.flexibleRshift32(rhs, scratch);
3504 if (forceDouble) {
3505 ScratchDoubleScope fpscratch(masm);
3506 masm.convertUInt32ToDouble(scratch, fpscratch);
3507 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
3508 } else {
3509 masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
3510 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3512 return true;
3515 bool CacheIRCompiler::emitInt32NegationResult(Int32OperandId inputId) {
3516 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3517 AutoOutputRegister output(*this);
3518 Register val = allocator.useRegister(masm, inputId);
3519 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3521 FailurePath* failure;
3522 if (!addFailurePath(&failure)) {
3523 return false;
3526 // Guard against 0 and MIN_INT by checking if low 31-bits are all zero.
3527 // Both of these result in a double.
3528 masm.branchTest32(Assembler::Zero, val, Imm32(0x7fffffff), failure->label());
3529 masm.mov(val, scratch);
3530 masm.neg32(scratch);
3531 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3532 return true;
3535 bool CacheIRCompiler::emitInt32IncResult(Int32OperandId inputId) {
3536 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3537 AutoOutputRegister output(*this);
3538 Register input = allocator.useRegister(masm, inputId);
3539 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3541 FailurePath* failure;
3542 if (!addFailurePath(&failure)) {
3543 return false;
3546 masm.mov(input, scratch);
3547 masm.branchAdd32(Assembler::Overflow, Imm32(1), scratch, failure->label());
3548 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3550 return true;
3553 bool CacheIRCompiler::emitInt32DecResult(Int32OperandId inputId) {
3554 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3555 AutoOutputRegister output(*this);
3556 Register input = allocator.useRegister(masm, inputId);
3557 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3559 FailurePath* failure;
3560 if (!addFailurePath(&failure)) {
3561 return false;
3564 masm.mov(input, scratch);
3565 masm.branchSub32(Assembler::Overflow, Imm32(1), scratch, failure->label());
3566 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3568 return true;
3571 bool CacheIRCompiler::emitInt32NotResult(Int32OperandId inputId) {
3572 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3573 AutoOutputRegister output(*this);
3574 Register val = allocator.useRegister(masm, inputId);
3575 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3577 masm.mov(val, scratch);
3578 masm.not32(scratch);
3579 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3580 return true;
3583 bool CacheIRCompiler::emitDoubleNegationResult(NumberOperandId inputId) {
3584 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3585 AutoOutputRegister output(*this);
3587 AutoScratchFloatRegister floatReg(this);
3589 allocator.ensureDoubleRegister(masm, inputId, floatReg);
3591 masm.negateDouble(floatReg);
3592 masm.boxDouble(floatReg, output.valueReg(), floatReg);
3594 return true;
3597 bool CacheIRCompiler::emitDoubleIncDecResult(bool isInc,
3598 NumberOperandId inputId) {
3599 AutoOutputRegister output(*this);
3601 AutoScratchFloatRegister floatReg(this);
3603 allocator.ensureDoubleRegister(masm, inputId, floatReg);
3606 ScratchDoubleScope fpscratch(masm);
3607 masm.loadConstantDouble(1.0, fpscratch);
3608 if (isInc) {
3609 masm.addDouble(fpscratch, floatReg);
3610 } else {
3611 masm.subDouble(fpscratch, floatReg);
3614 masm.boxDouble(floatReg, output.valueReg(), floatReg);
3616 return true;
3619 bool CacheIRCompiler::emitDoubleIncResult(NumberOperandId inputId) {
3620 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3621 return emitDoubleIncDecResult(true, inputId);
3624 bool CacheIRCompiler::emitDoubleDecResult(NumberOperandId inputId) {
3625 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3626 return emitDoubleIncDecResult(false, inputId);
3629 template <typename Fn, Fn fn>
3630 bool CacheIRCompiler::emitBigIntBinaryOperationShared(BigIntOperandId lhsId,
3631 BigIntOperandId rhsId) {
3632 AutoCallVM callvm(masm, this, allocator);
3633 Register lhs = allocator.useRegister(masm, lhsId);
3634 Register rhs = allocator.useRegister(masm, rhsId);
3636 callvm.prepare();
3638 masm.Push(rhs);
3639 masm.Push(lhs);
3641 callvm.call<Fn, fn>();
3642 return true;
3645 bool CacheIRCompiler::emitBigIntAddResult(BigIntOperandId lhsId,
3646 BigIntOperandId rhsId) {
3647 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3648 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3649 return emitBigIntBinaryOperationShared<Fn, BigInt::add>(lhsId, rhsId);
3652 bool CacheIRCompiler::emitBigIntSubResult(BigIntOperandId lhsId,
3653 BigIntOperandId rhsId) {
3654 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3655 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3656 return emitBigIntBinaryOperationShared<Fn, BigInt::sub>(lhsId, rhsId);
3659 bool CacheIRCompiler::emitBigIntMulResult(BigIntOperandId lhsId,
3660 BigIntOperandId rhsId) {
3661 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3662 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3663 return emitBigIntBinaryOperationShared<Fn, BigInt::mul>(lhsId, rhsId);
3666 bool CacheIRCompiler::emitBigIntDivResult(BigIntOperandId lhsId,
3667 BigIntOperandId rhsId) {
3668 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3669 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3670 return emitBigIntBinaryOperationShared<Fn, BigInt::div>(lhsId, rhsId);
3673 bool CacheIRCompiler::emitBigIntModResult(BigIntOperandId lhsId,
3674 BigIntOperandId rhsId) {
3675 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3676 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3677 return emitBigIntBinaryOperationShared<Fn, BigInt::mod>(lhsId, rhsId);
3680 bool CacheIRCompiler::emitBigIntPowResult(BigIntOperandId lhsId,
3681 BigIntOperandId rhsId) {
3682 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3683 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3684 return emitBigIntBinaryOperationShared<Fn, BigInt::pow>(lhsId, rhsId);
3687 bool CacheIRCompiler::emitBigIntBitAndResult(BigIntOperandId lhsId,
3688 BigIntOperandId rhsId) {
3689 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3690 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3691 return emitBigIntBinaryOperationShared<Fn, BigInt::bitAnd>(lhsId, rhsId);
3694 bool CacheIRCompiler::emitBigIntBitOrResult(BigIntOperandId lhsId,
3695 BigIntOperandId rhsId) {
3696 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3697 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3698 return emitBigIntBinaryOperationShared<Fn, BigInt::bitOr>(lhsId, rhsId);
3701 bool CacheIRCompiler::emitBigIntBitXorResult(BigIntOperandId lhsId,
3702 BigIntOperandId rhsId) {
3703 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3704 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3705 return emitBigIntBinaryOperationShared<Fn, BigInt::bitXor>(lhsId, rhsId);
3708 bool CacheIRCompiler::emitBigIntLeftShiftResult(BigIntOperandId lhsId,
3709 BigIntOperandId rhsId) {
3710 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3711 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3712 return emitBigIntBinaryOperationShared<Fn, BigInt::lsh>(lhsId, rhsId);
3715 bool CacheIRCompiler::emitBigIntRightShiftResult(BigIntOperandId lhsId,
3716 BigIntOperandId rhsId) {
3717 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3718 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3719 return emitBigIntBinaryOperationShared<Fn, BigInt::rsh>(lhsId, rhsId);
3722 template <typename Fn, Fn fn>
3723 bool CacheIRCompiler::emitBigIntUnaryOperationShared(BigIntOperandId inputId) {
3724 AutoCallVM callvm(masm, this, allocator);
3725 Register val = allocator.useRegister(masm, inputId);
3727 callvm.prepare();
3729 masm.Push(val);
3731 callvm.call<Fn, fn>();
3732 return true;
3735 bool CacheIRCompiler::emitBigIntNotResult(BigIntOperandId inputId) {
3736 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3737 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3738 return emitBigIntUnaryOperationShared<Fn, BigInt::bitNot>(inputId);
3741 bool CacheIRCompiler::emitBigIntNegationResult(BigIntOperandId inputId) {
3742 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3743 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3744 return emitBigIntUnaryOperationShared<Fn, BigInt::neg>(inputId);
3747 bool CacheIRCompiler::emitBigIntIncResult(BigIntOperandId inputId) {
3748 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3749 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3750 return emitBigIntUnaryOperationShared<Fn, BigInt::inc>(inputId);
3753 bool CacheIRCompiler::emitBigIntDecResult(BigIntOperandId inputId) {
3754 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3755 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3756 return emitBigIntUnaryOperationShared<Fn, BigInt::dec>(inputId);
3759 bool CacheIRCompiler::emitTruncateDoubleToUInt32(NumberOperandId inputId,
3760 Int32OperandId resultId) {
3761 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3762 Register res = allocator.defineRegister(masm, resultId);
3764 AutoScratchFloatRegister floatReg(this);
3766 allocator.ensureDoubleRegister(masm, inputId, floatReg);
3768 Label done, truncateABICall;
3770 masm.branchTruncateDoubleMaybeModUint32(floatReg, res, &truncateABICall);
3771 masm.jump(&done);
3773 masm.bind(&truncateABICall);
3774 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3775 save.takeUnchecked(floatReg);
3776 // Bug 1451976
3777 save.takeUnchecked(floatReg.get().asSingle());
3778 masm.PushRegsInMask(save);
3780 using Fn = int32_t (*)(double);
3781 masm.setupUnalignedABICall(res);
3782 masm.passABIArg(floatReg, ABIType::Float64);
3783 masm.callWithABI<Fn, JS::ToInt32>(ABIType::General,
3784 CheckUnsafeCallWithABI::DontCheckOther);
3785 masm.storeCallInt32Result(res);
3787 LiveRegisterSet ignore;
3788 ignore.add(res);
3789 masm.PopRegsInMaskIgnore(save, ignore);
3791 masm.bind(&done);
3792 return true;
3795 bool CacheIRCompiler::emitDoubleToUint8Clamped(NumberOperandId inputId,
3796 Int32OperandId resultId) {
3797 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3798 Register res = allocator.defineRegister(masm, resultId);
3800 AutoScratchFloatRegister floatReg(this);
3802 allocator.ensureDoubleRegister(masm, inputId, floatReg);
3804 masm.clampDoubleToUint8(floatReg, res);
3805 return true;
3808 bool CacheIRCompiler::emitLoadArgumentsObjectLengthResult(ObjOperandId objId) {
3809 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3810 AutoOutputRegister output(*this);
3811 Register obj = allocator.useRegister(masm, objId);
3812 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3814 FailurePath* failure;
3815 if (!addFailurePath(&failure)) {
3816 return false;
3819 masm.loadArgumentsObjectLength(obj, scratch, failure->label());
3821 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3822 return true;
3825 bool CacheIRCompiler::emitLoadArgumentsObjectLength(ObjOperandId objId,
3826 Int32OperandId resultId) {
3827 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3828 Register obj = allocator.useRegister(masm, objId);
3829 Register res = allocator.defineRegister(masm, resultId);
3831 FailurePath* failure;
3832 if (!addFailurePath(&failure)) {
3833 return false;
3836 masm.loadArgumentsObjectLength(obj, res, failure->label());
3837 return true;
3840 bool CacheIRCompiler::emitLoadArrayBufferByteLengthInt32Result(
3841 ObjOperandId objId) {
3842 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3843 AutoOutputRegister output(*this);
3844 Register obj = allocator.useRegister(masm, objId);
3845 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3847 FailurePath* failure;
3848 if (!addFailurePath(&failure)) {
3849 return false;
3852 masm.loadArrayBufferByteLengthIntPtr(obj, scratch);
3853 masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
3854 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3855 return true;
3858 bool CacheIRCompiler::emitLoadArrayBufferByteLengthDoubleResult(
3859 ObjOperandId objId) {
3860 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3861 AutoOutputRegister output(*this);
3862 Register obj = allocator.useRegister(masm, objId);
3863 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3865 ScratchDoubleScope fpscratch(masm);
3866 masm.loadArrayBufferByteLengthIntPtr(obj, scratch);
3867 masm.convertIntPtrToDouble(scratch, fpscratch);
3868 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
3869 return true;
3872 bool CacheIRCompiler::emitLoadArrayBufferViewLengthInt32Result(
3873 ObjOperandId objId) {
3874 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3875 AutoOutputRegister output(*this);
3876 Register obj = allocator.useRegister(masm, objId);
3877 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3879 FailurePath* failure;
3880 if (!addFailurePath(&failure)) {
3881 return false;
3884 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
3885 masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
3886 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3887 return true;
3890 bool CacheIRCompiler::emitLoadArrayBufferViewLengthDoubleResult(
3891 ObjOperandId objId) {
3892 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3893 AutoOutputRegister output(*this);
3894 Register obj = allocator.useRegister(masm, objId);
3895 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3897 ScratchDoubleScope fpscratch(masm);
3898 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
3899 masm.convertIntPtrToDouble(scratch, fpscratch);
3900 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
3901 return true;
3904 bool CacheIRCompiler::emitLoadBoundFunctionNumArgs(ObjOperandId objId,
3905 Int32OperandId resultId) {
3906 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3908 Register obj = allocator.useRegister(masm, objId);
3909 Register output = allocator.defineRegister(masm, resultId);
3911 masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
3912 output);
3913 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
3914 return true;
3917 bool CacheIRCompiler::emitLoadBoundFunctionTarget(ObjOperandId objId,
3918 ObjOperandId resultId) {
3919 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3921 Register obj = allocator.useRegister(masm, objId);
3922 Register output = allocator.defineRegister(masm, resultId);
3924 masm.unboxObject(Address(obj, BoundFunctionObject::offsetOfTargetSlot()),
3925 output);
3926 return true;
3929 bool CacheIRCompiler::emitGuardBoundFunctionIsConstructor(ObjOperandId objId) {
3930 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3932 Register obj = allocator.useRegister(masm, objId);
3934 FailurePath* failure;
3935 if (!addFailurePath(&failure)) {
3936 return false;
3939 Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
3940 masm.branchTest32(Assembler::Zero, flagsSlot,
3941 Imm32(BoundFunctionObject::IsConstructorFlag),
3942 failure->label());
3943 return true;
3946 bool CacheIRCompiler::emitGuardObjectIdentity(ObjOperandId obj1Id,
3947 ObjOperandId obj2Id) {
3948 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3950 Register obj1 = allocator.useRegister(masm, obj1Id);
3951 Register obj2 = allocator.useRegister(masm, obj2Id);
3953 FailurePath* failure;
3954 if (!addFailurePath(&failure)) {
3955 return false;
3958 masm.branchPtr(Assembler::NotEqual, obj1, obj2, failure->label());
3959 return true;
3962 bool CacheIRCompiler::emitLoadFunctionLengthResult(ObjOperandId objId) {
3963 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3964 AutoOutputRegister output(*this);
3965 Register obj = allocator.useRegister(masm, objId);
3966 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3968 FailurePath* failure;
3969 if (!addFailurePath(&failure)) {
3970 return false;
3973 // Get the JSFunction flags and arg count.
3974 masm.load32(Address(obj, JSFunction::offsetOfFlagsAndArgCount()), scratch);
3976 // Functions with a SelfHostedLazyScript must be compiled with the slow-path
3977 // before the function length is known. If the length was previously resolved,
3978 // the length property may be shadowed.
3979 masm.branchTest32(
3980 Assembler::NonZero, scratch,
3981 Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
3982 failure->label());
3984 masm.loadFunctionLength(obj, scratch, scratch, failure->label());
3985 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3986 return true;
3989 bool CacheIRCompiler::emitLoadFunctionNameResult(ObjOperandId objId) {
3990 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3991 AutoOutputRegister output(*this);
3992 Register obj = allocator.useRegister(masm, objId);
3993 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3995 FailurePath* failure;
3996 if (!addFailurePath(&failure)) {
3997 return false;
4000 masm.loadFunctionName(obj, scratch, ImmGCPtr(cx_->names().empty_),
4001 failure->label());
4003 masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
4004 return true;
4007 bool CacheIRCompiler::emitLinearizeForCharAccess(StringOperandId strId,
4008 Int32OperandId indexId,
4009 StringOperandId resultId) {
4010 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4011 Register str = allocator.useRegister(masm, strId);
4012 Register index = allocator.useRegister(masm, indexId);
4013 Register result = allocator.defineRegister(masm, resultId);
4014 AutoScratchRegister scratch(allocator, masm);
4016 FailurePath* failure;
4017 if (!addFailurePath(&failure)) {
4018 return false;
4021 Label done;
4022 masm.movePtr(str, result);
4024 // We can omit the bounds check, because we only compare the index against the
4025 // string length. In the worst case we unnecessarily linearize the string
4026 // when the index is out-of-bounds.
4028 masm.branchIfCanLoadStringChar(str, index, scratch, &done);
4030 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
4031 liveVolatileFloatRegs());
4032 masm.PushRegsInMask(volatileRegs);
4034 using Fn = JSLinearString* (*)(JSString*);
4035 masm.setupUnalignedABICall(scratch);
4036 masm.passABIArg(str);
4037 masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
4038 masm.storeCallPointerResult(result);
4040 LiveRegisterSet ignore;
4041 ignore.add(result);
4042 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
4044 masm.branchTestPtr(Assembler::Zero, result, result, failure->label());
4047 masm.bind(&done);
4048 return true;
4051 bool CacheIRCompiler::emitLinearizeForCodePointAccess(
4052 StringOperandId strId, Int32OperandId indexId, StringOperandId resultId) {
4053 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4054 Register str = allocator.useRegister(masm, strId);
4055 Register index = allocator.useRegister(masm, indexId);
4056 Register result = allocator.defineRegister(masm, resultId);
4057 AutoScratchRegister scratch1(allocator, masm);
4058 AutoScratchRegister scratch2(allocator, masm);
4060 FailurePath* failure;
4061 if (!addFailurePath(&failure)) {
4062 return false;
4065 Label done;
4066 masm.movePtr(str, result);
4068 // We can omit the bounds check, because we only compare the index against the
4069 // string length. In the worst case we unnecessarily linearize the string
4070 // when the index is out-of-bounds.
4072 masm.branchIfCanLoadStringCodePoint(str, index, scratch1, scratch2, &done);
4074 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
4075 liveVolatileFloatRegs());
4076 masm.PushRegsInMask(volatileRegs);
4078 using Fn = JSLinearString* (*)(JSString*);
4079 masm.setupUnalignedABICall(scratch1);
4080 masm.passABIArg(str);
4081 masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
4082 masm.storeCallPointerResult(result);
4084 LiveRegisterSet ignore;
4085 ignore.add(result);
4086 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
4088 masm.branchTestPtr(Assembler::Zero, result, result, failure->label());
4091 masm.bind(&done);
4092 return true;
4095 bool CacheIRCompiler::emitToRelativeStringIndex(Int32OperandId indexId,
4096 StringOperandId strId,
4097 Int32OperandId resultId) {
4098 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4099 Register index = allocator.useRegister(masm, indexId);
4100 Register str = allocator.useRegister(masm, strId);
4101 Register result = allocator.defineRegister(masm, resultId);
4103 // If |index| is non-negative, it's an index relative to the start of the
4104 // string. Otherwise it's an index relative to the end of the string.
4105 masm.move32(Imm32(0), result);
4106 masm.cmp32Load32(Assembler::LessThan, index, Imm32(0),
4107 Address(str, JSString::offsetOfLength()), result);
4108 masm.add32(index, result);
4109 return true;
4112 bool CacheIRCompiler::emitLoadStringLengthResult(StringOperandId strId) {
4113 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4114 AutoOutputRegister output(*this);
4115 Register str = allocator.useRegister(masm, strId);
4116 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4118 masm.loadStringLength(str, scratch);
4119 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4120 return true;
4123 bool CacheIRCompiler::emitLoadStringCharCodeResult(StringOperandId strId,
4124 Int32OperandId indexId,
4125 bool handleOOB) {
4126 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4127 AutoOutputRegister output(*this);
4128 Register str = allocator.useRegister(masm, strId);
4129 Register index = allocator.useRegister(masm, indexId);
4130 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
4131 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
4132 AutoScratchRegister scratch3(allocator, masm);
4134 // Bounds check, load string char.
4135 Label done;
4136 if (!handleOOB) {
4137 FailurePath* failure;
4138 if (!addFailurePath(&failure)) {
4139 return false;
4142 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
4143 scratch1, failure->label());
4144 masm.loadStringChar(str, index, scratch1, scratch2, scratch3,
4145 failure->label());
4146 } else {
4147 // Return NaN for out-of-bounds access.
4148 masm.moveValue(JS::NaNValue(), output.valueReg());
4150 // The bounds check mustn't use a scratch register which aliases the output.
4151 MOZ_ASSERT(!output.valueReg().aliases(scratch3));
4153 // This CacheIR op is always preceded by |LinearizeForCharAccess|, so we're
4154 // guaranteed to see no nested ropes.
4155 Label loadFailed;
4156 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
4157 scratch3, &done);
4158 masm.loadStringChar(str, index, scratch1, scratch2, scratch3, &loadFailed);
4160 Label loadedChar;
4161 masm.jump(&loadedChar);
4162 masm.bind(&loadFailed);
4163 masm.assumeUnreachable("loadStringChar can't fail for linear strings");
4164 masm.bind(&loadedChar);
4167 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
4168 masm.bind(&done);
4169 return true;
4172 bool CacheIRCompiler::emitLoadStringCodePointResult(StringOperandId strId,
4173 Int32OperandId indexId,
4174 bool handleOOB) {
4175 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4176 AutoOutputRegister output(*this);
4177 Register str = allocator.useRegister(masm, strId);
4178 Register index = allocator.useRegister(masm, indexId);
4179 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
4180 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
4181 AutoScratchRegister scratch3(allocator, masm);
4183 // Bounds check, load string char.
4184 Label done;
4185 if (!handleOOB) {
4186 FailurePath* failure;
4187 if (!addFailurePath(&failure)) {
4188 return false;
4191 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
4192 scratch1, failure->label());
4193 masm.loadStringCodePoint(str, index, scratch1, scratch2, scratch3,
4194 failure->label());
4195 } else {
4196 // Return undefined for out-of-bounds access.
4197 masm.moveValue(JS::UndefinedValue(), output.valueReg());
4199 // The bounds check mustn't use a scratch register which aliases the output.
4200 MOZ_ASSERT(!output.valueReg().aliases(scratch3));
4202 // This CacheIR op is always preceded by |LinearizeForCodePointAccess|, so
4203 // we're guaranteed to see no nested ropes or split surrogates.
4204 Label loadFailed;
4205 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
4206 scratch3, &done);
4207 masm.loadStringCodePoint(str, index, scratch1, scratch2, scratch3,
4208 &loadFailed);
4210 Label loadedChar;
4211 masm.jump(&loadedChar);
4212 masm.bind(&loadFailed);
4213 masm.assumeUnreachable("loadStringCodePoint can't fail for linear strings");
4214 masm.bind(&loadedChar);
4217 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
4218 masm.bind(&done);
4219 return true;
4222 bool CacheIRCompiler::emitNewStringObjectResult(uint32_t templateObjectOffset,
4223 StringOperandId strId) {
4224 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4226 AutoCallVM callvm(masm, this, allocator);
4228 Register str = allocator.useRegister(masm, strId);
4230 callvm.prepare();
4231 masm.Push(str);
4233 using Fn = JSObject* (*)(JSContext*, HandleString);
4234 callvm.call<Fn, NewStringObject>();
4235 return true;
4238 bool CacheIRCompiler::emitStringIncludesResult(StringOperandId strId,
4239 StringOperandId searchStrId) {
4240 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4242 AutoCallVM callvm(masm, this, allocator);
4244 Register str = allocator.useRegister(masm, strId);
4245 Register searchStr = allocator.useRegister(masm, searchStrId);
4247 callvm.prepare();
4248 masm.Push(searchStr);
4249 masm.Push(str);
4251 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
4252 callvm.call<Fn, js::StringIncludes>();
4253 return true;
4256 bool CacheIRCompiler::emitStringIndexOfResult(StringOperandId strId,
4257 StringOperandId searchStrId) {
4258 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4260 AutoCallVM callvm(masm, this, allocator);
4262 Register str = allocator.useRegister(masm, strId);
4263 Register searchStr = allocator.useRegister(masm, searchStrId);
4265 callvm.prepare();
4266 masm.Push(searchStr);
4267 masm.Push(str);
4269 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
4270 callvm.call<Fn, js::StringIndexOf>();
4271 return true;
4274 bool CacheIRCompiler::emitStringLastIndexOfResult(StringOperandId strId,
4275 StringOperandId searchStrId) {
4276 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4278 AutoCallVM callvm(masm, this, allocator);
4280 Register str = allocator.useRegister(masm, strId);
4281 Register searchStr = allocator.useRegister(masm, searchStrId);
4283 callvm.prepare();
4284 masm.Push(searchStr);
4285 masm.Push(str);
4287 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
4288 callvm.call<Fn, js::StringLastIndexOf>();
4289 return true;
4292 bool CacheIRCompiler::emitStringStartsWithResult(StringOperandId strId,
4293 StringOperandId searchStrId) {
4294 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4296 AutoCallVM callvm(masm, this, allocator);
4298 Register str = allocator.useRegister(masm, strId);
4299 Register searchStr = allocator.useRegister(masm, searchStrId);
4301 callvm.prepare();
4302 masm.Push(searchStr);
4303 masm.Push(str);
4305 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
4306 callvm.call<Fn, js::StringStartsWith>();
4307 return true;
4310 bool CacheIRCompiler::emitStringEndsWithResult(StringOperandId strId,
4311 StringOperandId searchStrId) {
4312 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4314 AutoCallVM callvm(masm, this, allocator);
4316 Register str = allocator.useRegister(masm, strId);
4317 Register searchStr = allocator.useRegister(masm, searchStrId);
4319 callvm.prepare();
4320 masm.Push(searchStr);
4321 masm.Push(str);
4323 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
4324 callvm.call<Fn, js::StringEndsWith>();
4325 return true;
4328 bool CacheIRCompiler::emitStringToLowerCaseResult(StringOperandId strId) {
4329 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4331 AutoCallVM callvm(masm, this, allocator);
4333 Register str = allocator.useRegister(masm, strId);
4335 callvm.prepare();
4336 masm.Push(str);
4338 using Fn = JSString* (*)(JSContext*, HandleString);
4339 callvm.call<Fn, js::StringToLowerCase>();
4340 return true;
4343 bool CacheIRCompiler::emitStringToUpperCaseResult(StringOperandId strId) {
4344 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4346 AutoCallVM callvm(masm, this, allocator);
4348 Register str = allocator.useRegister(masm, strId);
4350 callvm.prepare();
4351 masm.Push(str);
4353 using Fn = JSString* (*)(JSContext*, HandleString);
4354 callvm.call<Fn, js::StringToUpperCase>();
4355 return true;
4358 bool CacheIRCompiler::emitStringTrimResult(StringOperandId strId) {
4359 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4361 AutoCallVM callvm(masm, this, allocator);
4363 Register str = allocator.useRegister(masm, strId);
4365 callvm.prepare();
4366 masm.Push(str);
4368 using Fn = JSString* (*)(JSContext*, HandleString);
4369 callvm.call<Fn, js::StringTrim>();
4370 return true;
4373 bool CacheIRCompiler::emitStringTrimStartResult(StringOperandId strId) {
4374 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4376 AutoCallVM callvm(masm, this, allocator);
4378 Register str = allocator.useRegister(masm, strId);
4380 callvm.prepare();
4381 masm.Push(str);
4383 using Fn = JSString* (*)(JSContext*, HandleString);
4384 callvm.call<Fn, js::StringTrimStart>();
4385 return true;
4388 bool CacheIRCompiler::emitStringTrimEndResult(StringOperandId strId) {
4389 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4391 AutoCallVM callvm(masm, this, allocator);
4393 Register str = allocator.useRegister(masm, strId);
4395 callvm.prepare();
4396 masm.Push(str);
4398 using Fn = JSString* (*)(JSContext*, HandleString);
4399 callvm.call<Fn, js::StringTrimEnd>();
4400 return true;
4403 bool CacheIRCompiler::emitLoadArgumentsObjectArgResult(ObjOperandId objId,
4404 Int32OperandId indexId) {
4405 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4406 AutoOutputRegister output(*this);
4407 Register obj = allocator.useRegister(masm, objId);
4408 Register index = allocator.useRegister(masm, indexId);
4409 AutoScratchRegister scratch(allocator, masm);
4411 FailurePath* failure;
4412 if (!addFailurePath(&failure)) {
4413 return false;
4416 masm.loadArgumentsObjectElement(obj, index, output.valueReg(), scratch,
4417 failure->label());
4418 return true;
4421 bool CacheIRCompiler::emitLoadArgumentsObjectArgHoleResult(
4422 ObjOperandId objId, Int32OperandId indexId) {
4423 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4424 AutoOutputRegister output(*this);
4425 Register obj = allocator.useRegister(masm, objId);
4426 Register index = allocator.useRegister(masm, indexId);
4427 AutoScratchRegister scratch(allocator, masm);
4429 FailurePath* failure;
4430 if (!addFailurePath(&failure)) {
4431 return false;
4434 masm.loadArgumentsObjectElementHole(obj, index, output.valueReg(), scratch,
4435 failure->label());
4436 return true;
4439 bool CacheIRCompiler::emitLoadArgumentsObjectArgExistsResult(
4440 ObjOperandId objId, Int32OperandId indexId) {
4441 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4442 AutoOutputRegister output(*this);
4443 Register obj = allocator.useRegister(masm, objId);
4444 Register index = allocator.useRegister(masm, indexId);
4445 AutoScratchRegister scratch1(allocator, masm);
4446 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
4448 FailurePath* failure;
4449 if (!addFailurePath(&failure)) {
4450 return false;
4453 masm.loadArgumentsObjectElementExists(obj, index, scratch2, scratch1,
4454 failure->label());
4455 EmitStoreResult(masm, scratch2, JSVAL_TYPE_BOOLEAN, output);
4456 return true;
4459 bool CacheIRCompiler::emitLoadDenseElementResult(ObjOperandId objId,
4460 Int32OperandId indexId) {
4461 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4462 AutoOutputRegister output(*this);
4463 Register obj = allocator.useRegister(masm, objId);
4464 Register index = allocator.useRegister(masm, indexId);
4465 AutoScratchRegister scratch1(allocator, masm);
4466 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
4468 FailurePath* failure;
4469 if (!addFailurePath(&failure)) {
4470 return false;
4473 // Load obj->elements.
4474 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
4476 // Bounds check.
4477 Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
4478 masm.spectreBoundsCheck32(index, initLength, scratch2, failure->label());
4480 // Hole check.
4481 BaseObjectElementIndex element(scratch1, index);
4482 masm.branchTestMagic(Assembler::Equal, element, failure->label());
4483 masm.loadTypedOrValue(element, output);
4484 return true;
4487 bool CacheIRCompiler::emitGuardInt32IsNonNegative(Int32OperandId indexId) {
4488 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4489 Register index = allocator.useRegister(masm, indexId);
4491 FailurePath* failure;
4492 if (!addFailurePath(&failure)) {
4493 return false;
4496 masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
4497 return true;
4500 bool CacheIRCompiler::emitGuardIndexIsNotDenseElement(ObjOperandId objId,
4501 Int32OperandId indexId) {
4502 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4503 Register obj = allocator.useRegister(masm, objId);
4504 Register index = allocator.useRegister(masm, indexId);
4505 AutoScratchRegister scratch(allocator, masm);
4506 AutoSpectreBoundsScratchRegister spectreScratch(allocator, masm);
4508 FailurePath* failure;
4509 if (!addFailurePath(&failure)) {
4510 return false;
4513 // Load obj->elements.
4514 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4516 // Ensure index >= initLength or the element is a hole.
4517 Label notDense;
4518 Address capacity(scratch, ObjectElements::offsetOfInitializedLength());
4519 masm.spectreBoundsCheck32(index, capacity, spectreScratch, &notDense);
4521 BaseValueIndex element(scratch, index);
4522 masm.branchTestMagic(Assembler::Equal, element, &notDense);
4524 masm.jump(failure->label());
4526 masm.bind(&notDense);
4527 return true;
4530 bool CacheIRCompiler::emitGuardIndexIsValidUpdateOrAdd(ObjOperandId objId,
4531 Int32OperandId indexId) {
4532 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4533 Register obj = allocator.useRegister(masm, objId);
4534 Register index = allocator.useRegister(masm, indexId);
4535 AutoScratchRegister scratch(allocator, masm);
4536 AutoSpectreBoundsScratchRegister spectreScratch(allocator, masm);
4538 FailurePath* failure;
4539 if (!addFailurePath(&failure)) {
4540 return false;
4543 // Load obj->elements.
4544 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4546 Label success;
4548 // If length is writable, branch to &success. All indices are writable.
4549 Address flags(scratch, ObjectElements::offsetOfFlags());
4550 masm.branchTest32(Assembler::Zero, flags,
4551 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
4552 &success);
4554 // Otherwise, ensure index is in bounds.
4555 Address length(scratch, ObjectElements::offsetOfLength());
4556 masm.spectreBoundsCheck32(index, length, spectreScratch,
4557 /* failure = */ failure->label());
4558 masm.bind(&success);
4559 return true;
4562 bool CacheIRCompiler::emitGuardTagNotEqual(ValueTagOperandId lhsId,
4563 ValueTagOperandId rhsId) {
4564 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4565 Register lhs = allocator.useRegister(masm, lhsId);
4566 Register rhs = allocator.useRegister(masm, rhsId);
4568 FailurePath* failure;
4569 if (!addFailurePath(&failure)) {
4570 return false;
4573 Label done;
4574 masm.branch32(Assembler::Equal, lhs, rhs, failure->label());
4576 // If both lhs and rhs are numbers, can't use tag comparison to do inequality
4577 // comparison
4578 masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
4579 masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
4580 masm.jump(failure->label());
4582 masm.bind(&done);
4583 return true;
4586 bool CacheIRCompiler::emitGuardXrayExpandoShapeAndDefaultProto(
4587 ObjOperandId objId, uint32_t shapeWrapperOffset) {
4588 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4590 Register obj = allocator.useRegister(masm, objId);
4591 StubFieldOffset shapeWrapper(shapeWrapperOffset, StubField::Type::JSObject);
4593 AutoScratchRegister scratch(allocator, masm);
4594 AutoScratchRegister scratch2(allocator, masm);
4595 AutoScratchRegister scratch3(allocator, masm);
4597 FailurePath* failure;
4598 if (!addFailurePath(&failure)) {
4599 return false;
4602 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
4603 Address holderAddress(scratch,
4604 sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
4605 Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(
4606 GetXrayJitInfo()->holderExpandoSlot));
4608 masm.fallibleUnboxObject(holderAddress, scratch, failure->label());
4609 masm.fallibleUnboxObject(expandoAddress, scratch, failure->label());
4611 // Unwrap the expando before checking its shape.
4612 masm.loadPtr(Address(scratch, ProxyObject::offsetOfReservedSlots()), scratch);
4613 masm.unboxObject(
4614 Address(scratch, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
4615 scratch);
4617 emitLoadStubField(shapeWrapper, scratch2);
4618 LoadShapeWrapperContents(masm, scratch2, scratch2, failure->label());
4619 masm.branchTestObjShape(Assembler::NotEqual, scratch, scratch2, scratch3,
4620 scratch, failure->label());
4622 // The reserved slots on the expando should all be in fixed slots.
4623 Address protoAddress(scratch, NativeObject::getFixedSlotOffset(
4624 GetXrayJitInfo()->expandoProtoSlot));
4625 masm.branchTestUndefined(Assembler::NotEqual, protoAddress, failure->label());
4627 return true;
4630 bool CacheIRCompiler::emitGuardXrayNoExpando(ObjOperandId objId) {
4631 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4633 Register obj = allocator.useRegister(masm, objId);
4634 AutoScratchRegister scratch(allocator, masm);
4636 FailurePath* failure;
4637 if (!addFailurePath(&failure)) {
4638 return false;
4641 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
4642 Address holderAddress(scratch,
4643 sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
4644 Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(
4645 GetXrayJitInfo()->holderExpandoSlot));
4647 Label done;
4648 masm.fallibleUnboxObject(holderAddress, scratch, &done);
4649 masm.branchTestObject(Assembler::Equal, expandoAddress, failure->label());
4650 masm.bind(&done);
4652 return true;
4655 bool CacheIRCompiler::emitGuardNoAllocationMetadataBuilder(
4656 uint32_t builderAddrOffset) {
4657 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4658 AutoScratchRegister scratch(allocator, masm);
4660 FailurePath* failure;
4661 if (!addFailurePath(&failure)) {
4662 return false;
4665 StubFieldOffset builderField(builderAddrOffset, StubField::Type::RawPointer);
4666 emitLoadStubField(builderField, scratch);
4667 masm.branchPtr(Assembler::NotEqual, Address(scratch, 0), ImmWord(0),
4668 failure->label());
4670 return true;
4673 bool CacheIRCompiler::emitGuardFunctionHasJitEntry(ObjOperandId funId) {
4674 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4675 Register fun = allocator.useRegister(masm, funId);
4677 FailurePath* failure;
4678 if (!addFailurePath(&failure)) {
4679 return false;
4682 masm.branchIfFunctionHasNoJitEntry(fun, failure->label());
4683 return true;
4686 bool CacheIRCompiler::emitGuardFunctionHasNoJitEntry(ObjOperandId funId) {
4687 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4688 Register obj = allocator.useRegister(masm, funId);
4689 AutoScratchRegister scratch(allocator, masm);
4691 FailurePath* failure;
4692 if (!addFailurePath(&failure)) {
4693 return false;
4696 masm.branchIfFunctionHasJitEntry(obj, failure->label());
4697 return true;
4700 bool CacheIRCompiler::emitGuardFunctionIsNonBuiltinCtor(ObjOperandId funId) {
4701 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4703 Register fun = allocator.useRegister(masm, funId);
4704 AutoScratchRegister scratch(allocator, masm);
4706 FailurePath* failure;
4707 if (!addFailurePath(&failure)) {
4708 return false;
4711 masm.branchIfNotFunctionIsNonBuiltinCtor(fun, scratch, failure->label());
4712 return true;
4715 bool CacheIRCompiler::emitGuardFunctionIsConstructor(ObjOperandId funId) {
4716 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4717 Register funcReg = allocator.useRegister(masm, funId);
4718 AutoScratchRegister scratch(allocator, masm);
4720 FailurePath* failure;
4721 if (!addFailurePath(&failure)) {
4722 return false;
4725 // Ensure obj is a constructor
4726 masm.branchTestFunctionFlags(funcReg, FunctionFlags::CONSTRUCTOR,
4727 Assembler::Zero, failure->label());
4728 return true;
4731 bool CacheIRCompiler::emitGuardNotClassConstructor(ObjOperandId funId) {
4732 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4733 Register fun = allocator.useRegister(masm, funId);
4734 AutoScratchRegister scratch(allocator, masm);
4736 FailurePath* failure;
4737 if (!addFailurePath(&failure)) {
4738 return false;
4741 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
4742 fun, scratch, failure->label());
4743 return true;
4746 bool CacheIRCompiler::emitGuardArrayIsPacked(ObjOperandId arrayId) {
4747 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4748 Register array = allocator.useRegister(masm, arrayId);
4749 AutoScratchRegister scratch(allocator, masm);
4750 AutoScratchRegister scratch2(allocator, masm);
4752 FailurePath* failure;
4753 if (!addFailurePath(&failure)) {
4754 return false;
4757 masm.branchArrayIsNotPacked(array, scratch, scratch2, failure->label());
4758 return true;
4761 bool CacheIRCompiler::emitGuardArgumentsObjectFlags(ObjOperandId objId,
4762 uint8_t flags) {
4763 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4764 Register obj = allocator.useRegister(masm, objId);
4765 AutoScratchRegister scratch(allocator, masm);
4767 FailurePath* failure;
4768 if (!addFailurePath(&failure)) {
4769 return false;
4772 masm.branchTestArgumentsObjectFlags(obj, scratch, flags, Assembler::NonZero,
4773 failure->label());
4774 return true;
4777 bool CacheIRCompiler::emitLoadDenseElementHoleResult(ObjOperandId objId,
4778 Int32OperandId indexId) {
4779 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4780 AutoOutputRegister output(*this);
4781 Register obj = allocator.useRegister(masm, objId);
4782 Register index = allocator.useRegister(masm, indexId);
4783 AutoScratchRegister scratch1(allocator, masm);
4784 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
4786 FailurePath* failure;
4787 if (!addFailurePath(&failure)) {
4788 return false;
4791 // Make sure the index is nonnegative.
4792 masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
4794 // Load obj->elements.
4795 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
4797 // Guard on the initialized length.
4798 Label hole;
4799 Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
4800 masm.spectreBoundsCheck32(index, initLength, scratch2, &hole);
4802 // Load the value.
4803 Label done;
4804 masm.loadValue(BaseObjectElementIndex(scratch1, index), output.valueReg());
4805 masm.branchTestMagic(Assembler::NotEqual, output.valueReg(), &done);
4807 // Load undefined for the hole.
4808 masm.bind(&hole);
4809 masm.moveValue(UndefinedValue(), output.valueReg());
4811 masm.bind(&done);
4812 return true;
4815 bool CacheIRCompiler::emitLoadTypedArrayElementExistsResult(
4816 ObjOperandId objId, IntPtrOperandId indexId, ArrayBufferViewKind viewKind) {
4817 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4818 AutoOutputRegister output(*this);
4819 Register obj = allocator.useRegister(masm, objId);
4820 Register index = allocator.useRegister(masm, indexId);
4821 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4822 Maybe<AutoScratchRegister> scratch2;
4823 if (viewKind == ArrayBufferViewKind::Resizable) {
4824 scratch2.emplace(allocator, masm);
4827 Label outOfBounds, done;
4829 // Bounds check.
4830 if (viewKind == ArrayBufferViewKind::FixedLength) {
4831 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
4832 } else {
4833 // Bounds check doesn't require synchronization. See IsValidIntegerIndex
4834 // abstract operation which reads the underlying buffer byte length using
4835 // "unordered" memory order.
4836 auto sync = Synchronization::None();
4838 masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch, *scratch2);
4840 masm.branchPtr(Assembler::BelowOrEqual, scratch, index, &outOfBounds);
4841 EmitStoreBoolean(masm, true, output);
4842 masm.jump(&done);
4844 masm.bind(&outOfBounds);
4845 EmitStoreBoolean(masm, false, output);
4847 masm.bind(&done);
4848 return true;
4851 bool CacheIRCompiler::emitLoadDenseElementExistsResult(ObjOperandId objId,
4852 Int32OperandId indexId) {
4853 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4854 AutoOutputRegister output(*this);
4855 Register obj = allocator.useRegister(masm, objId);
4856 Register index = allocator.useRegister(masm, indexId);
4857 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4859 FailurePath* failure;
4860 if (!addFailurePath(&failure)) {
4861 return false;
4864 // Load obj->elements.
4865 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4867 // Bounds check. Unsigned compare sends negative indices to next IC.
4868 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
4869 masm.branch32(Assembler::BelowOrEqual, initLength, index, failure->label());
4871 // Hole check.
4872 BaseObjectElementIndex element(scratch, index);
4873 masm.branchTestMagic(Assembler::Equal, element, failure->label());
4875 EmitStoreBoolean(masm, true, output);
4876 return true;
4879 bool CacheIRCompiler::emitLoadDenseElementHoleExistsResult(
4880 ObjOperandId objId, Int32OperandId indexId) {
4881 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4882 AutoOutputRegister output(*this);
4883 Register obj = allocator.useRegister(masm, objId);
4884 Register index = allocator.useRegister(masm, indexId);
4885 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4887 FailurePath* failure;
4888 if (!addFailurePath(&failure)) {
4889 return false;
4892 // Make sure the index is nonnegative.
4893 masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
4895 // Load obj->elements.
4896 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4898 // Guard on the initialized length.
4899 Label hole;
4900 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
4901 masm.branch32(Assembler::BelowOrEqual, initLength, index, &hole);
4903 // Load value and replace with true.
4904 Label done;
4905 BaseObjectElementIndex element(scratch, index);
4906 masm.branchTestMagic(Assembler::Equal, element, &hole);
4907 EmitStoreBoolean(masm, true, output);
4908 masm.jump(&done);
4910 // Load false for the hole.
4911 masm.bind(&hole);
4912 EmitStoreBoolean(masm, false, output);
4914 masm.bind(&done);
4915 return true;
4918 bool CacheIRCompiler::emitPackedArrayPopResult(ObjOperandId arrayId) {
4919 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4921 AutoOutputRegister output(*this);
4922 Register array = allocator.useRegister(masm, arrayId);
4923 AutoScratchRegister scratch1(allocator, masm);
4924 AutoScratchRegister scratch2(allocator, masm);
4926 FailurePath* failure;
4927 if (!addFailurePath(&failure)) {
4928 return false;
4931 masm.packedArrayPop(array, output.valueReg(), scratch1, scratch2,
4932 failure->label());
4933 return true;
4936 bool CacheIRCompiler::emitPackedArrayShiftResult(ObjOperandId arrayId) {
4937 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4939 AutoOutputRegister output(*this);
4940 Register array = allocator.useRegister(masm, arrayId);
4941 AutoScratchRegister scratch1(allocator, masm);
4942 AutoScratchRegister scratch2(allocator, masm);
4944 FailurePath* failure;
4945 if (!addFailurePath(&failure)) {
4946 return false;
4949 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
4950 liveVolatileFloatRegs());
4951 masm.packedArrayShift(array, output.valueReg(), scratch1, scratch2,
4952 volatileRegs, failure->label());
4953 return true;
4956 bool CacheIRCompiler::emitIsObjectResult(ValOperandId inputId) {
4957 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4959 AutoOutputRegister output(*this);
4960 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4962 ValueOperand val = allocator.useValueRegister(masm, inputId);
4964 masm.testObjectSet(Assembler::Equal, val, scratch);
4966 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
4967 return true;
4970 bool CacheIRCompiler::emitIsPackedArrayResult(ObjOperandId objId) {
4971 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4973 AutoOutputRegister output(*this);
4974 Register obj = allocator.useRegister(masm, objId);
4975 AutoScratchRegister scratch(allocator, masm);
4977 Register outputScratch = output.valueReg().scratchReg();
4978 masm.setIsPackedArray(obj, outputScratch, scratch);
4979 masm.tagValue(JSVAL_TYPE_BOOLEAN, outputScratch, output.valueReg());
4980 return true;
4983 bool CacheIRCompiler::emitIsCallableResult(ValOperandId inputId) {
4984 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4986 AutoOutputRegister output(*this);
4987 AutoScratchRegister scratch1(allocator, masm);
4988 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
4990 ValueOperand val = allocator.useValueRegister(masm, inputId);
4992 Label isObject, done;
4993 masm.branchTestObject(Assembler::Equal, val, &isObject);
4994 // Primitives are never callable.
4995 masm.move32(Imm32(0), scratch2);
4996 masm.jump(&done);
4998 masm.bind(&isObject);
4999 masm.unboxObject(val, scratch1);
5001 Label isProxy;
5002 masm.isCallable(scratch1, scratch2, &isProxy);
5003 masm.jump(&done);
5005 masm.bind(&isProxy);
5007 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
5008 liveVolatileFloatRegs());
5009 masm.PushRegsInMask(volatileRegs);
5011 using Fn = bool (*)(JSObject* obj);
5012 masm.setupUnalignedABICall(scratch2);
5013 masm.passABIArg(scratch1);
5014 masm.callWithABI<Fn, ObjectIsCallable>();
5015 masm.storeCallBoolResult(scratch2);
5017 LiveRegisterSet ignore;
5018 ignore.add(scratch2);
5019 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
5022 masm.bind(&done);
5023 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
5024 return true;
5027 bool CacheIRCompiler::emitIsConstructorResult(ObjOperandId objId) {
5028 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5030 AutoOutputRegister output(*this);
5031 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5033 Register obj = allocator.useRegister(masm, objId);
5035 Label isProxy, done;
5036 masm.isConstructor(obj, scratch, &isProxy);
5037 masm.jump(&done);
5039 masm.bind(&isProxy);
5041 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
5042 liveVolatileFloatRegs());
5043 masm.PushRegsInMask(volatileRegs);
5045 using Fn = bool (*)(JSObject* obj);
5046 masm.setupUnalignedABICall(scratch);
5047 masm.passABIArg(obj);
5048 masm.callWithABI<Fn, ObjectIsConstructor>();
5049 masm.storeCallBoolResult(scratch);
5051 LiveRegisterSet ignore;
5052 ignore.add(scratch);
5053 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
5056 masm.bind(&done);
5057 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
5058 return true;
5061 bool CacheIRCompiler::emitIsCrossRealmArrayConstructorResult(
5062 ObjOperandId objId) {
5063 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5065 AutoOutputRegister output(*this);
5066 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5067 Register obj = allocator.useRegister(masm, objId);
5069 masm.setIsCrossRealmArrayConstructor(obj, scratch);
5070 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
5071 return true;
5074 bool CacheIRCompiler::emitArrayBufferViewByteOffsetInt32Result(
5075 ObjOperandId objId) {
5076 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5078 AutoOutputRegister output(*this);
5079 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5080 Register obj = allocator.useRegister(masm, objId);
5082 FailurePath* failure;
5083 if (!addFailurePath(&failure)) {
5084 return false;
5087 masm.loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
5088 masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
5089 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5090 return true;
5093 bool CacheIRCompiler::emitArrayBufferViewByteOffsetDoubleResult(
5094 ObjOperandId objId) {
5095 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5097 AutoOutputRegister output(*this);
5098 Register obj = allocator.useRegister(masm, objId);
5099 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5101 ScratchDoubleScope fpscratch(masm);
5102 masm.loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
5103 masm.convertIntPtrToDouble(scratch, fpscratch);
5104 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
5105 return true;
5108 bool CacheIRCompiler::
5109 emitResizableTypedArrayByteOffsetMaybeOutOfBoundsInt32Result(
5110 ObjOperandId objId) {
5111 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5113 AutoOutputRegister output(*this);
5114 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
5115 AutoScratchRegister scratch2(allocator, masm);
5116 Register obj = allocator.useRegister(masm, objId);
5118 FailurePath* failure;
5119 if (!addFailurePath(&failure)) {
5120 return false;
5123 masm.loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(obj, scratch1,
5124 scratch2);
5125 masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
5126 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
5127 return true;
5130 bool CacheIRCompiler::
5131 emitResizableTypedArrayByteOffsetMaybeOutOfBoundsDoubleResult(
5132 ObjOperandId objId) {
5133 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5135 AutoOutputRegister output(*this);
5136 Register obj = allocator.useRegister(masm, objId);
5137 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
5138 AutoScratchRegister scratch2(allocator, masm);
5140 ScratchDoubleScope fpscratch(masm);
5141 masm.loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(obj, scratch1,
5142 scratch2);
5143 masm.convertIntPtrToDouble(scratch1, fpscratch);
5144 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
5145 return true;
5148 bool CacheIRCompiler::emitTypedArrayByteLengthInt32Result(ObjOperandId objId) {
5149 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5151 AutoOutputRegister output(*this);
5152 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
5153 AutoScratchRegister scratch2(allocator, masm);
5154 Register obj = allocator.useRegister(masm, objId);
5156 FailurePath* failure;
5157 if (!addFailurePath(&failure)) {
5158 return false;
5161 masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
5162 masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
5163 masm.typedArrayElementSize(obj, scratch2);
5165 masm.branchMul32(Assembler::Overflow, scratch2.get(), scratch1,
5166 failure->label());
5168 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
5169 return true;
5172 bool CacheIRCompiler::emitTypedArrayByteLengthDoubleResult(ObjOperandId objId) {
5173 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5175 AutoOutputRegister output(*this);
5176 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
5177 AutoScratchRegister scratch2(allocator, masm);
5178 Register obj = allocator.useRegister(masm, objId);
5180 masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
5181 masm.typedArrayElementSize(obj, scratch2);
5182 masm.mulPtr(scratch2, scratch1);
5184 ScratchDoubleScope fpscratch(masm);
5185 masm.convertIntPtrToDouble(scratch1, fpscratch);
5186 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
5187 return true;
5190 bool CacheIRCompiler::emitResizableTypedArrayByteLengthInt32Result(
5191 ObjOperandId objId) {
5192 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5194 AutoOutputRegister output(*this);
5195 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
5196 AutoScratchRegister scratch2(allocator, masm);
5197 Register obj = allocator.useRegister(masm, objId);
5199 FailurePath* failure;
5200 if (!addFailurePath(&failure)) {
5201 return false;
5204 // Explicit |byteLength| accesses are seq-consistent atomic loads.
5205 auto sync = Synchronization::Load();
5207 masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
5208 masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
5209 masm.typedArrayElementSize(obj, scratch2);
5211 masm.branchMul32(Assembler::Overflow, scratch2.get(), scratch1,
5212 failure->label());
5214 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
5215 return true;
5218 bool CacheIRCompiler::emitResizableTypedArrayByteLengthDoubleResult(
5219 ObjOperandId objId) {
5220 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5222 AutoOutputRegister output(*this);
5223 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
5224 AutoScratchRegister scratch2(allocator, masm);
5225 Register obj = allocator.useRegister(masm, objId);
5227 // Explicit |byteLength| accesses are seq-consistent atomic loads.
5228 auto sync = Synchronization::Load();
5230 masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
5231 masm.typedArrayElementSize(obj, scratch2);
5232 masm.mulPtr(scratch2, scratch1);
5234 ScratchDoubleScope fpscratch(masm);
5235 masm.convertIntPtrToDouble(scratch1, fpscratch);
5236 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
5237 return true;
5240 bool CacheIRCompiler::emitResizableTypedArrayLengthInt32Result(
5241 ObjOperandId objId) {
5242 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5244 AutoOutputRegister output(*this);
5245 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
5246 AutoScratchRegister scratch2(allocator, masm);
5247 Register obj = allocator.useRegister(masm, objId);
5249 FailurePath* failure;
5250 if (!addFailurePath(&failure)) {
5251 return false;
5254 // Explicit |length| accesses are seq-consistent atomic loads.
5255 auto sync = Synchronization::Load();
5257 masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
5258 masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
5260 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
5261 return true;
5264 bool CacheIRCompiler::emitResizableTypedArrayLengthDoubleResult(
5265 ObjOperandId objId) {
5266 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5268 AutoOutputRegister output(*this);
5269 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
5270 AutoScratchRegister scratch2(allocator, masm);
5271 Register obj = allocator.useRegister(masm, objId);
5273 // Explicit |length| accesses are seq-consistent atomic loads.
5274 auto sync = Synchronization::Load();
5276 masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
5278 ScratchDoubleScope fpscratch(masm);
5279 masm.convertIntPtrToDouble(scratch1, fpscratch);
5280 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
5281 return true;
5284 bool CacheIRCompiler::emitTypedArrayElementSizeResult(ObjOperandId objId) {
5285 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5287 AutoOutputRegister output(*this);
5288 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5289 Register obj = allocator.useRegister(masm, objId);
5291 masm.typedArrayElementSize(obj, scratch);
5292 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5293 return true;
5296 bool CacheIRCompiler::emitResizableDataViewByteLengthInt32Result(
5297 ObjOperandId objId) {
5298 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5300 AutoOutputRegister output(*this);
5301 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
5302 AutoScratchRegister scratch2(allocator, masm);
5303 Register obj = allocator.useRegister(masm, objId);
5305 FailurePath* failure;
5306 if (!addFailurePath(&failure)) {
5307 return false;
5310 // Explicit |byteLength| accesses are seq-consistent atomic loads.
5311 auto sync = Synchronization::Load();
5313 masm.loadResizableDataViewByteLengthIntPtr(sync, obj, scratch1, scratch2);
5314 masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
5316 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
5317 return true;
5320 bool CacheIRCompiler::emitResizableDataViewByteLengthDoubleResult(
5321 ObjOperandId objId) {
5322 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5324 AutoOutputRegister output(*this);
5325 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
5326 AutoScratchRegister scratch2(allocator, masm);
5327 Register obj = allocator.useRegister(masm, objId);
5329 // Explicit |byteLength| accesses are seq-consistent atomic loads.
5330 auto sync = Synchronization::Load();
5332 masm.loadResizableDataViewByteLengthIntPtr(sync, obj, scratch1, scratch2);
5334 ScratchDoubleScope fpscratch(masm);
5335 masm.convertIntPtrToDouble(scratch1, fpscratch);
5336 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
5337 return true;
5340 bool CacheIRCompiler::emitGrowableSharedArrayBufferByteLengthInt32Result(
5341 ObjOperandId objId) {
5342 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5344 AutoOutputRegister output(*this);
5345 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5346 Register obj = allocator.useRegister(masm, objId);
5348 FailurePath* failure;
5349 if (!addFailurePath(&failure)) {
5350 return false;
5353 // Explicit |byteLength| accesses are seq-consistent atomic loads.
5354 auto sync = Synchronization::Load();
5356 masm.loadGrowableSharedArrayBufferByteLengthIntPtr(sync, obj, scratch);
5357 masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
5359 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5360 return true;
5363 bool CacheIRCompiler::emitGrowableSharedArrayBufferByteLengthDoubleResult(
5364 ObjOperandId objId) {
5365 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5367 AutoOutputRegister output(*this);
5368 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5369 Register obj = allocator.useRegister(masm, objId);
5371 // Explicit |byteLength| accesses are seq-consistent atomic loads.
5372 auto sync = Synchronization::Load();
5374 masm.loadGrowableSharedArrayBufferByteLengthIntPtr(sync, obj, scratch);
5376 ScratchDoubleScope fpscratch(masm);
5377 masm.convertIntPtrToDouble(scratch, fpscratch);
5378 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
5379 return true;
5382 bool CacheIRCompiler::emitGuardHasAttachedArrayBuffer(ObjOperandId objId) {
5383 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5385 AutoScratchRegister scratch(allocator, masm);
5386 Register obj = allocator.useRegister(masm, objId);
5388 FailurePath* failure;
5389 if (!addFailurePath(&failure)) {
5390 return false;
5393 masm.branchIfHasDetachedArrayBuffer(obj, scratch, failure->label());
5394 return true;
5397 bool CacheIRCompiler::emitGuardResizableArrayBufferViewInBounds(
5398 ObjOperandId objId) {
5399 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5401 AutoScratchRegister scratch(allocator, masm);
5402 Register obj = allocator.useRegister(masm, objId);
5404 FailurePath* failure;
5405 if (!addFailurePath(&failure)) {
5406 return false;
5409 masm.branchIfResizableArrayBufferViewOutOfBounds(obj, scratch,
5410 failure->label());
5411 return true;
5414 bool CacheIRCompiler::emitGuardResizableArrayBufferViewInBoundsOrDetached(
5415 ObjOperandId objId) {
5416 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5418 AutoScratchRegister scratch(allocator, masm);
5419 Register obj = allocator.useRegister(masm, objId);
5421 FailurePath* failure;
5422 if (!addFailurePath(&failure)) {
5423 return false;
5426 Label done;
5427 masm.branchIfResizableArrayBufferViewInBounds(obj, scratch, &done);
5428 masm.branchIfHasAttachedArrayBuffer(obj, scratch, failure->label());
5429 masm.bind(&done);
5430 return true;
5433 bool CacheIRCompiler::emitIsTypedArrayConstructorResult(ObjOperandId objId) {
5434 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5436 AutoOutputRegister output(*this);
5437 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5438 Register obj = allocator.useRegister(masm, objId);
5440 masm.setIsDefinitelyTypedArrayConstructor(obj, scratch);
5441 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
5442 return true;
5445 bool CacheIRCompiler::emitGetNextMapSetEntryForIteratorResult(
5446 ObjOperandId iterId, ObjOperandId resultArrId, bool isMap) {
5447 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5449 AutoOutputRegister output(*this);
5450 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5451 Register iter = allocator.useRegister(masm, iterId);
5452 Register resultArr = allocator.useRegister(masm, resultArrId);
5454 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5455 save.takeUnchecked(output.valueReg());
5456 save.takeUnchecked(scratch);
5457 masm.PushRegsInMask(save);
5459 masm.setupUnalignedABICall(scratch);
5460 masm.passABIArg(iter);
5461 masm.passABIArg(resultArr);
5462 if (isMap) {
5463 using Fn = bool (*)(MapIteratorObject* iter, ArrayObject* resultPairObj);
5464 masm.callWithABI<Fn, MapIteratorObject::next>();
5465 } else {
5466 using Fn = bool (*)(SetIteratorObject* iter, ArrayObject* resultObj);
5467 masm.callWithABI<Fn, SetIteratorObject::next>();
5469 masm.storeCallBoolResult(scratch);
5471 masm.PopRegsInMask(save);
5473 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
5474 return true;
5477 void CacheIRCompiler::emitActivateIterator(Register objBeingIterated,
5478 Register iterObject,
5479 Register nativeIter,
5480 Register scratch, Register scratch2,
5481 uint32_t enumeratorsAddrOffset) {
5482 // 'objectBeingIterated_' must be nullptr, so we don't need a pre-barrier.
5483 Address iterObjAddr(nativeIter,
5484 NativeIterator::offsetOfObjectBeingIterated());
5485 #ifdef DEBUG
5486 Label ok;
5487 masm.branchPtr(Assembler::Equal, iterObjAddr, ImmPtr(nullptr), &ok);
5488 masm.assumeUnreachable("iterator with non-null object");
5489 masm.bind(&ok);
5490 #endif
5492 // Mark iterator as active.
5493 Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlagsAndCount());
5494 masm.storePtr(objBeingIterated, iterObjAddr);
5495 masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
5497 // Post-write barrier for stores to 'objectBeingIterated_'.
5498 emitPostBarrierSlot(
5499 iterObject,
5500 TypedOrValueRegister(MIRType::Object, AnyRegister(objBeingIterated)),
5501 scratch);
5503 // Chain onto the active iterator stack.
5504 StubFieldOffset enumeratorsAddr(enumeratorsAddrOffset,
5505 StubField::Type::RawPointer);
5506 emitLoadStubField(enumeratorsAddr, scratch);
5507 masm.registerIterator(scratch, nativeIter, scratch2);
5510 bool CacheIRCompiler::emitObjectToIteratorResult(
5511 ObjOperandId objId, uint32_t enumeratorsAddrOffset) {
5512 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5514 AutoCallVM callvm(masm, this, allocator);
5515 Register obj = allocator.useRegister(masm, objId);
5517 AutoScratchRegister iterObj(allocator, masm);
5518 AutoScratchRegister scratch(allocator, masm);
5519 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, callvm.output());
5520 AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, callvm.output());
5522 Label callVM, done;
5523 masm.maybeLoadIteratorFromShape(obj, iterObj, scratch, scratch2, scratch3,
5524 &callVM);
5526 masm.loadPrivate(
5527 Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
5528 scratch);
5530 emitActivateIterator(obj, iterObj, scratch, scratch2, scratch3,
5531 enumeratorsAddrOffset);
5532 masm.jump(&done);
5534 masm.bind(&callVM);
5535 callvm.prepare();
5536 masm.Push(obj);
5537 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
5538 callvm.call<Fn, GetIterator>();
5539 masm.storeCallPointerResult(iterObj);
5541 masm.bind(&done);
5542 EmitStoreResult(masm, iterObj, JSVAL_TYPE_OBJECT, callvm.output());
5543 return true;
5546 bool CacheIRCompiler::emitValueToIteratorResult(ValOperandId valId) {
5547 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5549 AutoCallVM callvm(masm, this, allocator);
5551 ValueOperand val = allocator.useValueRegister(masm, valId);
5553 callvm.prepare();
5555 masm.Push(val);
5557 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleValue);
5558 callvm.call<Fn, ValueToIterator>();
5559 return true;
5562 bool CacheIRCompiler::emitNewArrayIteratorResult(
5563 uint32_t templateObjectOffset) {
5564 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5566 AutoCallVM callvm(masm, this, allocator);
5568 callvm.prepare();
5570 using Fn = ArrayIteratorObject* (*)(JSContext*);
5571 callvm.call<Fn, NewArrayIterator>();
5572 return true;
5575 bool CacheIRCompiler::emitNewStringIteratorResult(
5576 uint32_t templateObjectOffset) {
5577 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5579 AutoCallVM callvm(masm, this, allocator);
5581 callvm.prepare();
5583 using Fn = StringIteratorObject* (*)(JSContext*);
5584 callvm.call<Fn, NewStringIterator>();
5585 return true;
5588 bool CacheIRCompiler::emitNewRegExpStringIteratorResult(
5589 uint32_t templateObjectOffset) {
5590 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5592 AutoCallVM callvm(masm, this, allocator);
5594 callvm.prepare();
5596 using Fn = RegExpStringIteratorObject* (*)(JSContext*);
5597 callvm.call<Fn, NewRegExpStringIterator>();
5598 return true;
5601 bool CacheIRCompiler::emitObjectCreateResult(uint32_t templateObjectOffset) {
5602 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5604 AutoCallVM callvm(masm, this, allocator);
5605 AutoScratchRegister scratch(allocator, masm);
5607 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5608 emitLoadStubField(objectField, scratch);
5610 callvm.prepare();
5611 masm.Push(scratch);
5613 using Fn = PlainObject* (*)(JSContext*, Handle<PlainObject*>);
5614 callvm.call<Fn, ObjectCreateWithTemplate>();
5615 return true;
5618 bool CacheIRCompiler::emitObjectKeysResult(ObjOperandId objId) {
5619 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5621 AutoCallVM callvm(masm, this, allocator);
5622 Register obj = allocator.useRegister(masm, objId);
5624 // Our goal is only to record calls to Object.keys, to elide it when
5625 // partially used, not to provide an alternative implementation.
5627 callvm.prepare();
5628 masm.Push(obj);
5630 using Fn = JSObject* (*)(JSContext*, HandleObject);
5631 callvm.call<Fn, jit::ObjectKeys>();
5634 return true;
5637 bool CacheIRCompiler::emitNewArrayFromLengthResult(
5638 uint32_t templateObjectOffset, Int32OperandId lengthId) {
5639 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5641 AutoCallVM callvm(masm, this, allocator);
5642 AutoScratchRegister scratch(allocator, masm);
5643 Register length = allocator.useRegister(masm, lengthId);
5645 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5646 emitLoadStubField(objectField, scratch);
5648 callvm.prepare();
5649 masm.Push(length);
5650 masm.Push(scratch);
5652 using Fn = ArrayObject* (*)(JSContext*, Handle<ArrayObject*>, int32_t length);
5653 callvm.call<Fn, ArrayConstructorOneArg>();
5654 return true;
5657 bool CacheIRCompiler::emitNewTypedArrayFromLengthResult(
5658 uint32_t templateObjectOffset, Int32OperandId lengthId) {
5659 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5661 AutoCallVM callvm(masm, this, allocator);
5662 AutoScratchRegister scratch(allocator, masm);
5663 Register length = allocator.useRegister(masm, lengthId);
5665 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5666 emitLoadStubField(objectField, scratch);
5668 callvm.prepare();
5669 masm.Push(length);
5670 masm.Push(scratch);
5672 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
5673 callvm.call<Fn, NewTypedArrayWithTemplateAndLength>();
5674 return true;
5677 bool CacheIRCompiler::emitNewTypedArrayFromArrayBufferResult(
5678 uint32_t templateObjectOffset, ObjOperandId bufferId,
5679 ValOperandId byteOffsetId, ValOperandId lengthId) {
5680 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5682 #ifdef JS_CODEGEN_X86
5683 MOZ_CRASH("Instruction not supported on 32-bit x86, not enough registers");
5684 #endif
5686 AutoCallVM callvm(masm, this, allocator);
5687 AutoScratchRegister scratch(allocator, masm);
5688 Register buffer = allocator.useRegister(masm, bufferId);
5689 ValueOperand byteOffset = allocator.useValueRegister(masm, byteOffsetId);
5690 ValueOperand length = allocator.useValueRegister(masm, lengthId);
5692 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5693 emitLoadStubField(objectField, scratch);
5695 callvm.prepare();
5696 masm.Push(length);
5697 masm.Push(byteOffset);
5698 masm.Push(buffer);
5699 masm.Push(scratch);
5701 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
5702 HandleValue, HandleValue);
5703 callvm.call<Fn, NewTypedArrayWithTemplateAndBuffer>();
5704 return true;
5707 bool CacheIRCompiler::emitNewTypedArrayFromArrayResult(
5708 uint32_t templateObjectOffset, ObjOperandId arrayId) {
5709 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5711 AutoCallVM callvm(masm, this, allocator);
5712 AutoScratchRegister scratch(allocator, masm);
5713 Register array = allocator.useRegister(masm, arrayId);
5715 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5716 emitLoadStubField(objectField, scratch);
5718 callvm.prepare();
5719 masm.Push(array);
5720 masm.Push(scratch);
5722 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
5723 callvm.call<Fn, NewTypedArrayWithTemplateAndArray>();
5724 return true;
5727 bool CacheIRCompiler::emitAddSlotAndCallAddPropHook(ObjOperandId objId,
5728 ValOperandId rhsId,
5729 uint32_t newShapeOffset) {
5730 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5732 AutoCallVM callvm(masm, this, allocator);
5734 AutoScratchRegister scratch(allocator, masm);
5735 Register obj = allocator.useRegister(masm, objId);
5736 ValueOperand rhs = allocator.useValueRegister(masm, rhsId);
5738 StubFieldOffset shapeField(newShapeOffset, StubField::Type::Shape);
5739 emitLoadStubField(shapeField, scratch);
5741 callvm.prepare();
5743 masm.Push(scratch);
5744 masm.Push(rhs);
5745 masm.Push(obj);
5747 using Fn =
5748 bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, Handle<Shape*>);
5749 callvm.callNoResult<Fn, AddSlotAndCallAddPropHook>();
5750 return true;
5753 bool CacheIRCompiler::emitMathAbsInt32Result(Int32OperandId inputId) {
5754 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5756 AutoOutputRegister output(*this);
5757 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5759 Register input = allocator.useRegister(masm, inputId);
5761 FailurePath* failure;
5762 if (!addFailurePath(&failure)) {
5763 return false;
5766 masm.mov(input, scratch);
5767 // Don't negate already positive values.
5768 Label positive;
5769 masm.branchTest32(Assembler::NotSigned, scratch, scratch, &positive);
5770 // neg32 might overflow for INT_MIN.
5771 masm.branchNeg32(Assembler::Overflow, scratch, failure->label());
5772 masm.bind(&positive);
5774 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5775 return true;
5778 bool CacheIRCompiler::emitMathAbsNumberResult(NumberOperandId inputId) {
5779 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5781 AutoOutputRegister output(*this);
5782 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5784 allocator.ensureDoubleRegister(masm, inputId, scratch);
5786 masm.absDouble(scratch, scratch);
5787 masm.boxDouble(scratch, output.valueReg(), scratch);
5788 return true;
5791 bool CacheIRCompiler::emitMathClz32Result(Int32OperandId inputId) {
5792 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5794 AutoOutputRegister output(*this);
5795 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5796 Register input = allocator.useRegister(masm, inputId);
5798 masm.clz32(input, scratch, /* knownNotZero = */ false);
5799 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5800 return true;
5803 bool CacheIRCompiler::emitMathSignInt32Result(Int32OperandId inputId) {
5804 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5806 AutoOutputRegister output(*this);
5807 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5808 Register input = allocator.useRegister(masm, inputId);
5810 masm.signInt32(input, scratch);
5811 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5812 return true;
5815 bool CacheIRCompiler::emitMathSignNumberResult(NumberOperandId inputId) {
5816 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5818 AutoOutputRegister output(*this);
5819 AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
5820 AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
5822 allocator.ensureDoubleRegister(masm, inputId, floatScratch1);
5824 masm.signDouble(floatScratch1, floatScratch2);
5825 masm.boxDouble(floatScratch2, output.valueReg(), floatScratch2);
5826 return true;
5829 bool CacheIRCompiler::emitMathSignNumberToInt32Result(NumberOperandId inputId) {
5830 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5832 AutoOutputRegister output(*this);
5833 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5834 AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
5835 AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
5837 FailurePath* failure;
5838 if (!addFailurePath(&failure)) {
5839 return false;
5842 allocator.ensureDoubleRegister(masm, inputId, floatScratch1);
5844 masm.signDoubleToInt32(floatScratch1, scratch, floatScratch2,
5845 failure->label());
5846 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5847 return true;
5850 bool CacheIRCompiler::emitMathImulResult(Int32OperandId lhsId,
5851 Int32OperandId rhsId) {
5852 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5854 AutoOutputRegister output(*this);
5855 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5856 Register lhs = allocator.useRegister(masm, lhsId);
5857 Register rhs = allocator.useRegister(masm, rhsId);
5859 masm.mov(lhs, scratch);
5860 masm.mul32(rhs, scratch);
5861 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5862 return true;
5865 bool CacheIRCompiler::emitMathSqrtNumberResult(NumberOperandId inputId) {
5866 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5868 AutoOutputRegister output(*this);
5869 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5871 allocator.ensureDoubleRegister(masm, inputId, scratch);
5873 masm.sqrtDouble(scratch, scratch);
5874 masm.boxDouble(scratch, output.valueReg(), scratch);
5875 return true;
5878 bool CacheIRCompiler::emitMathFloorNumberResult(NumberOperandId inputId) {
5879 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5881 AutoOutputRegister output(*this);
5882 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5884 allocator.ensureDoubleRegister(masm, inputId, scratch);
5886 if (Assembler::HasRoundInstruction(RoundingMode::Down)) {
5887 masm.nearbyIntDouble(RoundingMode::Down, scratch, scratch);
5888 masm.boxDouble(scratch, output.valueReg(), scratch);
5889 return true;
5892 return emitMathFunctionNumberResultShared(UnaryMathFunction::Floor, scratch,
5893 output.valueReg());
5896 bool CacheIRCompiler::emitMathCeilNumberResult(NumberOperandId inputId) {
5897 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5899 AutoOutputRegister output(*this);
5900 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5902 allocator.ensureDoubleRegister(masm, inputId, scratch);
5904 if (Assembler::HasRoundInstruction(RoundingMode::Up)) {
5905 masm.nearbyIntDouble(RoundingMode::Up, scratch, scratch);
5906 masm.boxDouble(scratch, output.valueReg(), scratch);
5907 return true;
5910 return emitMathFunctionNumberResultShared(UnaryMathFunction::Ceil, scratch,
5911 output.valueReg());
5914 bool CacheIRCompiler::emitMathTruncNumberResult(NumberOperandId inputId) {
5915 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5917 AutoOutputRegister output(*this);
5918 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5920 allocator.ensureDoubleRegister(masm, inputId, scratch);
5922 if (Assembler::HasRoundInstruction(RoundingMode::TowardsZero)) {
5923 masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
5924 masm.boxDouble(scratch, output.valueReg(), scratch);
5925 return true;
5928 return emitMathFunctionNumberResultShared(UnaryMathFunction::Trunc, scratch,
5929 output.valueReg());
5932 bool CacheIRCompiler::emitMathFRoundNumberResult(NumberOperandId inputId) {
5933 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5935 AutoOutputRegister output(*this);
5936 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5937 FloatRegister scratchFloat32 = scratch.get().asSingle();
5939 allocator.ensureDoubleRegister(masm, inputId, scratch);
5941 masm.convertDoubleToFloat32(scratch, scratchFloat32);
5942 masm.convertFloat32ToDouble(scratchFloat32, scratch);
5944 masm.boxDouble(scratch, output.valueReg(), scratch);
5945 return true;
5948 bool CacheIRCompiler::emitMathHypot2NumberResult(NumberOperandId first,
5949 NumberOperandId second) {
5950 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5951 AutoOutputRegister output(*this);
5952 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5954 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
5955 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
5957 allocator.ensureDoubleRegister(masm, first, floatScratch0);
5958 allocator.ensureDoubleRegister(masm, second, floatScratch1);
5960 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5961 masm.PushRegsInMask(save);
5963 using Fn = double (*)(double x, double y);
5964 masm.setupUnalignedABICall(scratch);
5965 masm.passABIArg(floatScratch0, ABIType::Float64);
5966 masm.passABIArg(floatScratch1, ABIType::Float64);
5968 masm.callWithABI<Fn, ecmaHypot>(ABIType::Float64);
5969 masm.storeCallFloatResult(floatScratch0);
5971 LiveRegisterSet ignore;
5972 ignore.add(floatScratch0);
5973 masm.PopRegsInMaskIgnore(save, ignore);
5975 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
5976 return true;
5979 bool CacheIRCompiler::emitMathHypot3NumberResult(NumberOperandId first,
5980 NumberOperandId second,
5981 NumberOperandId third) {
5982 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5983 AutoOutputRegister output(*this);
5984 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5986 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
5987 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
5988 AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
5990 allocator.ensureDoubleRegister(masm, first, floatScratch0);
5991 allocator.ensureDoubleRegister(masm, second, floatScratch1);
5992 allocator.ensureDoubleRegister(masm, third, floatScratch2);
5994 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5995 masm.PushRegsInMask(save);
5997 using Fn = double (*)(double x, double y, double z);
5998 masm.setupUnalignedABICall(scratch);
5999 masm.passABIArg(floatScratch0, ABIType::Float64);
6000 masm.passABIArg(floatScratch1, ABIType::Float64);
6001 masm.passABIArg(floatScratch2, ABIType::Float64);
6003 masm.callWithABI<Fn, hypot3>(ABIType::Float64);
6004 masm.storeCallFloatResult(floatScratch0);
6006 LiveRegisterSet ignore;
6007 ignore.add(floatScratch0);
6008 masm.PopRegsInMaskIgnore(save, ignore);
6010 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
6011 return true;
6014 bool CacheIRCompiler::emitMathHypot4NumberResult(NumberOperandId first,
6015 NumberOperandId second,
6016 NumberOperandId third,
6017 NumberOperandId fourth) {
6018 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6019 AutoOutputRegister output(*this);
6020 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6022 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
6023 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
6024 AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
6025 AutoAvailableFloatRegister floatScratch3(*this, FloatReg3);
6027 allocator.ensureDoubleRegister(masm, first, floatScratch0);
6028 allocator.ensureDoubleRegister(masm, second, floatScratch1);
6029 allocator.ensureDoubleRegister(masm, third, floatScratch2);
6030 allocator.ensureDoubleRegister(masm, fourth, floatScratch3);
6032 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
6033 masm.PushRegsInMask(save);
6035 using Fn = double (*)(double x, double y, double z, double w);
6036 masm.setupUnalignedABICall(scratch);
6037 masm.passABIArg(floatScratch0, ABIType::Float64);
6038 masm.passABIArg(floatScratch1, ABIType::Float64);
6039 masm.passABIArg(floatScratch2, ABIType::Float64);
6040 masm.passABIArg(floatScratch3, ABIType::Float64);
6042 masm.callWithABI<Fn, hypot4>(ABIType::Float64);
6043 masm.storeCallFloatResult(floatScratch0);
6045 LiveRegisterSet ignore;
6046 ignore.add(floatScratch0);
6047 masm.PopRegsInMaskIgnore(save, ignore);
6049 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
6050 return true;
6053 bool CacheIRCompiler::emitMathAtan2NumberResult(NumberOperandId yId,
6054 NumberOperandId xId) {
6055 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6056 AutoOutputRegister output(*this);
6057 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6059 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
6060 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
6062 allocator.ensureDoubleRegister(masm, yId, floatScratch0);
6063 allocator.ensureDoubleRegister(masm, xId, floatScratch1);
6065 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
6066 masm.PushRegsInMask(save);
6068 using Fn = double (*)(double x, double y);
6069 masm.setupUnalignedABICall(scratch);
6070 masm.passABIArg(floatScratch0, ABIType::Float64);
6071 masm.passABIArg(floatScratch1, ABIType::Float64);
6072 masm.callWithABI<Fn, js::ecmaAtan2>(ABIType::Float64);
6073 masm.storeCallFloatResult(floatScratch0);
6075 LiveRegisterSet ignore;
6076 ignore.add(floatScratch0);
6077 masm.PopRegsInMaskIgnore(save, ignore);
6079 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
6081 return true;
6084 bool CacheIRCompiler::emitMathFloorToInt32Result(NumberOperandId inputId) {
6085 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6087 AutoOutputRegister output(*this);
6088 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6090 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
6092 FailurePath* failure;
6093 if (!addFailurePath(&failure)) {
6094 return false;
6097 allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
6099 masm.floorDoubleToInt32(scratchFloat, scratch, failure->label());
6101 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
6102 return true;
6105 bool CacheIRCompiler::emitMathCeilToInt32Result(NumberOperandId inputId) {
6106 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6108 AutoOutputRegister output(*this);
6109 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6111 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
6113 FailurePath* failure;
6114 if (!addFailurePath(&failure)) {
6115 return false;
6118 allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
6120 masm.ceilDoubleToInt32(scratchFloat, scratch, failure->label());
6122 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
6123 return true;
6126 bool CacheIRCompiler::emitMathTruncToInt32Result(NumberOperandId inputId) {
6127 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6129 AutoOutputRegister output(*this);
6130 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6132 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
6134 FailurePath* failure;
6135 if (!addFailurePath(&failure)) {
6136 return false;
6139 allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
6141 masm.truncDoubleToInt32(scratchFloat, scratch, failure->label());
6143 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
6144 return true;
6147 bool CacheIRCompiler::emitMathRoundToInt32Result(NumberOperandId inputId) {
6148 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6150 AutoOutputRegister output(*this);
6151 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6153 AutoAvailableFloatRegister scratchFloat0(*this, FloatReg0);
6154 AutoAvailableFloatRegister scratchFloat1(*this, FloatReg1);
6156 FailurePath* failure;
6157 if (!addFailurePath(&failure)) {
6158 return false;
6161 allocator.ensureDoubleRegister(masm, inputId, scratchFloat0);
6163 masm.roundDoubleToInt32(scratchFloat0, scratch, scratchFloat1,
6164 failure->label());
6166 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
6167 return true;
6170 bool CacheIRCompiler::emitInt32MinMax(bool isMax, Int32OperandId firstId,
6171 Int32OperandId secondId,
6172 Int32OperandId resultId) {
6173 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6175 Register first = allocator.useRegister(masm, firstId);
6176 Register second = allocator.useRegister(masm, secondId);
6177 Register result = allocator.defineRegister(masm, resultId);
6179 Assembler::Condition cond =
6180 isMax ? Assembler::GreaterThan : Assembler::LessThan;
6181 masm.move32(first, result);
6182 masm.cmp32Move32(cond, second, first, second, result);
6183 return true;
6186 bool CacheIRCompiler::emitNumberMinMax(bool isMax, NumberOperandId firstId,
6187 NumberOperandId secondId,
6188 NumberOperandId resultId) {
6189 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6191 ValueOperand output = allocator.defineValueRegister(masm, resultId);
6193 AutoAvailableFloatRegister scratch1(*this, FloatReg0);
6194 AutoAvailableFloatRegister scratch2(*this, FloatReg1);
6196 allocator.ensureDoubleRegister(masm, firstId, scratch1);
6197 allocator.ensureDoubleRegister(masm, secondId, scratch2);
6199 if (isMax) {
6200 masm.maxDouble(scratch2, scratch1, /* handleNaN = */ true);
6201 } else {
6202 masm.minDouble(scratch2, scratch1, /* handleNaN = */ true);
6205 masm.boxDouble(scratch1, output, scratch1);
6206 return true;
6209 bool CacheIRCompiler::emitInt32MinMaxArrayResult(ObjOperandId arrayId,
6210 bool isMax) {
6211 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6213 AutoOutputRegister output(*this);
6214 Register array = allocator.useRegister(masm, arrayId);
6216 AutoScratchRegister scratch(allocator, masm);
6217 AutoScratchRegister scratch2(allocator, masm);
6218 AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, output);
6219 AutoScratchRegisterMaybeOutput result(allocator, masm, output);
6221 FailurePath* failure;
6222 if (!addFailurePath(&failure)) {
6223 return false;
6226 masm.minMaxArrayInt32(array, result, scratch, scratch2, scratch3, isMax,
6227 failure->label());
6228 masm.tagValue(JSVAL_TYPE_INT32, result, output.valueReg());
6229 return true;
6232 bool CacheIRCompiler::emitNumberMinMaxArrayResult(ObjOperandId arrayId,
6233 bool isMax) {
6234 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6236 AutoOutputRegister output(*this);
6237 Register array = allocator.useRegister(masm, arrayId);
6239 AutoAvailableFloatRegister result(*this, FloatReg0);
6240 AutoAvailableFloatRegister floatScratch(*this, FloatReg1);
6242 AutoScratchRegister scratch1(allocator, masm);
6243 AutoScratchRegister scratch2(allocator, masm);
6245 FailurePath* failure;
6246 if (!addFailurePath(&failure)) {
6247 return false;
6250 masm.minMaxArrayNumber(array, result, floatScratch, scratch1, scratch2, isMax,
6251 failure->label());
6252 masm.boxDouble(result, output.valueReg(), result);
6253 return true;
6256 bool CacheIRCompiler::emitMathFunctionNumberResultShared(
6257 UnaryMathFunction fun, FloatRegister inputScratch, ValueOperand output) {
6258 UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
6260 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
6261 save.takeUnchecked(inputScratch);
6262 masm.PushRegsInMask(save);
6264 masm.setupUnalignedABICall(output.scratchReg());
6265 masm.passABIArg(inputScratch, ABIType::Float64);
6266 masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
6267 ABIType::Float64);
6268 masm.storeCallFloatResult(inputScratch);
6270 masm.PopRegsInMask(save);
6272 masm.boxDouble(inputScratch, output, inputScratch);
6273 return true;
6276 bool CacheIRCompiler::emitMathFunctionNumberResult(NumberOperandId inputId,
6277 UnaryMathFunction fun) {
6278 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6280 AutoOutputRegister output(*this);
6281 AutoAvailableFloatRegister scratch(*this, FloatReg0);
6283 allocator.ensureDoubleRegister(masm, inputId, scratch);
6285 return emitMathFunctionNumberResultShared(fun, scratch, output.valueReg());
6288 static void EmitStoreDenseElement(MacroAssembler& masm,
6289 const ConstantOrRegister& value,
6290 BaseObjectElementIndex target) {
6291 if (value.constant()) {
6292 Value v = value.value();
6293 masm.storeValue(v, target);
6294 return;
6297 TypedOrValueRegister reg = value.reg();
6298 masm.storeTypedOrValue(reg, target);
6301 bool CacheIRCompiler::emitStoreDenseElement(ObjOperandId objId,
6302 Int32OperandId indexId,
6303 ValOperandId rhsId) {
6304 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6306 Register obj = allocator.useRegister(masm, objId);
6307 Register index = allocator.useRegister(masm, indexId);
6308 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
6310 AutoScratchRegister scratch(allocator, masm);
6312 FailurePath* failure;
6313 if (!addFailurePath(&failure)) {
6314 return false;
6317 // Load obj->elements in scratch.
6318 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
6320 // Bounds check. Unfortunately we don't have more registers available on
6321 // x86, so use InvalidReg and emit slightly slower code on x86.
6322 Register spectreTemp = InvalidReg;
6323 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
6324 masm.spectreBoundsCheck32(index, initLength, spectreTemp, failure->label());
6326 // Hole check.
6327 BaseObjectElementIndex element(scratch, index);
6328 masm.branchTestMagic(Assembler::Equal, element, failure->label());
6330 // Perform the store.
6331 EmitPreBarrier(masm, element, MIRType::Value);
6332 EmitStoreDenseElement(masm, val, element);
6334 emitPostBarrierElement(obj, val, scratch, index);
6335 return true;
6338 static void EmitAssertExtensibleElements(MacroAssembler& masm,
6339 Register elementsReg) {
6340 #ifdef DEBUG
6341 // Preceding shape guards ensure the object elements are extensible.
6342 Address elementsFlags(elementsReg, ObjectElements::offsetOfFlags());
6343 Label ok;
6344 masm.branchTest32(Assembler::Zero, elementsFlags,
6345 Imm32(ObjectElements::Flags::NOT_EXTENSIBLE), &ok);
6346 masm.assumeUnreachable("Unexpected non-extensible elements");
6347 masm.bind(&ok);
6348 #endif
6351 static void EmitAssertWritableArrayLengthElements(MacroAssembler& masm,
6352 Register elementsReg) {
6353 #ifdef DEBUG
6354 // Preceding shape guards ensure the array length is writable.
6355 Address elementsFlags(elementsReg, ObjectElements::offsetOfFlags());
6356 Label ok;
6357 masm.branchTest32(Assembler::Zero, elementsFlags,
6358 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
6359 &ok);
6360 masm.assumeUnreachable("Unexpected non-writable array length elements");
6361 masm.bind(&ok);
6362 #endif
6365 bool CacheIRCompiler::emitStoreDenseElementHole(ObjOperandId objId,
6366 Int32OperandId indexId,
6367 ValOperandId rhsId,
6368 bool handleAdd) {
6369 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6371 Register obj = allocator.useRegister(masm, objId);
6372 Register index = allocator.useRegister(masm, indexId);
6373 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
6375 AutoScratchRegister scratch(allocator, masm);
6377 FailurePath* failure;
6378 if (!addFailurePath(&failure)) {
6379 return false;
6382 // Load obj->elements in scratch.
6383 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
6385 EmitAssertExtensibleElements(masm, scratch);
6386 if (handleAdd) {
6387 EmitAssertWritableArrayLengthElements(masm, scratch);
6390 BaseObjectElementIndex element(scratch, index);
6391 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
6392 Address elementsFlags(scratch, ObjectElements::offsetOfFlags());
6394 // We don't have enough registers on x86 so use InvalidReg. This will emit
6395 // slightly less efficient code on x86.
6396 Register spectreTemp = InvalidReg;
6398 Label storeSkipPreBarrier;
6399 if (handleAdd) {
6400 // Bounds check.
6401 Label inBounds, outOfBounds;
6402 masm.spectreBoundsCheck32(index, initLength, spectreTemp, &outOfBounds);
6403 masm.jump(&inBounds);
6405 // If we're out-of-bounds, only handle the index == initLength case.
6406 masm.bind(&outOfBounds);
6407 masm.branch32(Assembler::NotEqual, initLength, index, failure->label());
6409 // If index < capacity, we can add a dense element inline. If not we
6410 // need to allocate more elements.
6411 Label allocElement, addNewElement;
6412 Address capacity(scratch, ObjectElements::offsetOfCapacity());
6413 masm.spectreBoundsCheck32(index, capacity, spectreTemp, &allocElement);
6414 masm.jump(&addNewElement);
6416 masm.bind(&allocElement);
6418 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
6419 liveVolatileFloatRegs());
6420 save.takeUnchecked(scratch);
6421 masm.PushRegsInMask(save);
6423 using Fn = bool (*)(JSContext* cx, NativeObject* obj);
6424 masm.setupUnalignedABICall(scratch);
6425 masm.loadJSContext(scratch);
6426 masm.passABIArg(scratch);
6427 masm.passABIArg(obj);
6428 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
6429 masm.storeCallPointerResult(scratch);
6431 masm.PopRegsInMask(save);
6432 masm.branchIfFalseBool(scratch, failure->label());
6434 // Load the reallocated elements pointer.
6435 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
6437 masm.bind(&addNewElement);
6439 // Increment initLength.
6440 masm.add32(Imm32(1), initLength);
6442 // If length is now <= index, increment length too.
6443 Label skipIncrementLength;
6444 Address length(scratch, ObjectElements::offsetOfLength());
6445 masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
6446 masm.add32(Imm32(1), length);
6447 masm.bind(&skipIncrementLength);
6449 // Skip EmitPreBarrier as the memory is uninitialized.
6450 masm.jump(&storeSkipPreBarrier);
6452 masm.bind(&inBounds);
6453 } else {
6454 // Fail if index >= initLength.
6455 masm.spectreBoundsCheck32(index, initLength, spectreTemp, failure->label());
6458 EmitPreBarrier(masm, element, MIRType::Value);
6460 masm.bind(&storeSkipPreBarrier);
6461 EmitStoreDenseElement(masm, val, element);
6463 emitPostBarrierElement(obj, val, scratch, index);
6464 return true;
6467 bool CacheIRCompiler::emitArrayPush(ObjOperandId objId, ValOperandId rhsId) {
6468 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6470 AutoOutputRegister output(*this);
6471 Register obj = allocator.useRegister(masm, objId);
6472 ValueOperand val = allocator.useValueRegister(masm, rhsId);
6474 AutoScratchRegisterMaybeOutput scratchLength(allocator, masm, output);
6475 AutoScratchRegisterMaybeOutputType scratch(allocator, masm, output);
6477 FailurePath* failure;
6478 if (!addFailurePath(&failure)) {
6479 return false;
6482 // Load obj->elements in scratch.
6483 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
6485 EmitAssertExtensibleElements(masm, scratch);
6486 EmitAssertWritableArrayLengthElements(masm, scratch);
6488 Address elementsInitLength(scratch,
6489 ObjectElements::offsetOfInitializedLength());
6490 Address elementsLength(scratch, ObjectElements::offsetOfLength());
6491 Address capacity(scratch, ObjectElements::offsetOfCapacity());
6493 // Fail if length != initLength.
6494 masm.load32(elementsInitLength, scratchLength);
6495 masm.branch32(Assembler::NotEqual, elementsLength, scratchLength,
6496 failure->label());
6498 // If scratchLength < capacity, we can add a dense element inline. If not we
6499 // need to allocate more elements.
6500 Label allocElement, addNewElement;
6501 masm.spectreBoundsCheck32(scratchLength, capacity, InvalidReg, &allocElement);
6502 masm.jump(&addNewElement);
6504 masm.bind(&allocElement);
6506 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
6507 save.takeUnchecked(scratch);
6508 masm.PushRegsInMask(save);
6510 using Fn = bool (*)(JSContext* cx, NativeObject* obj);
6511 masm.setupUnalignedABICall(scratch);
6512 masm.loadJSContext(scratch);
6513 masm.passABIArg(scratch);
6514 masm.passABIArg(obj);
6515 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
6516 masm.storeCallPointerResult(scratch);
6518 masm.PopRegsInMask(save);
6519 masm.branchIfFalseBool(scratch, failure->label());
6521 // Load the reallocated elements pointer.
6522 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
6524 masm.bind(&addNewElement);
6526 // Increment initLength and length.
6527 masm.add32(Imm32(1), elementsInitLength);
6528 masm.add32(Imm32(1), elementsLength);
6530 // Store the value.
6531 BaseObjectElementIndex element(scratch, scratchLength);
6532 masm.storeValue(val, element);
6533 emitPostBarrierElement(obj, val, scratch, scratchLength);
6535 // Return value is new length.
6536 masm.add32(Imm32(1), scratchLength);
6537 masm.tagValue(JSVAL_TYPE_INT32, scratchLength, output.valueReg());
6539 return true;
6542 bool CacheIRCompiler::emitStoreTypedArrayElement(ObjOperandId objId,
6543 Scalar::Type elementType,
6544 IntPtrOperandId indexId,
6545 uint32_t rhsId, bool handleOOB,
6546 ArrayBufferViewKind viewKind) {
6547 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6548 Register obj = allocator.useRegister(masm, objId);
6549 Register index = allocator.useRegister(masm, indexId);
6551 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
6553 Maybe<Register> valInt32;
6554 Maybe<Register> valBigInt;
6555 switch (elementType) {
6556 case Scalar::Int8:
6557 case Scalar::Uint8:
6558 case Scalar::Int16:
6559 case Scalar::Uint16:
6560 case Scalar::Int32:
6561 case Scalar::Uint32:
6562 case Scalar::Uint8Clamped:
6563 valInt32.emplace(allocator.useRegister(masm, Int32OperandId(rhsId)));
6564 break;
6566 case Scalar::Float32:
6567 case Scalar::Float64:
6568 allocator.ensureDoubleRegister(masm, NumberOperandId(rhsId),
6569 floatScratch0);
6570 break;
6572 case Scalar::BigInt64:
6573 case Scalar::BigUint64:
6574 valBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(rhsId)));
6575 break;
6577 case Scalar::MaxTypedArrayViewType:
6578 case Scalar::Int64:
6579 case Scalar::Simd128:
6580 MOZ_CRASH("Unsupported TypedArray type");
6583 AutoScratchRegister scratch1(allocator, masm);
6584 Maybe<AutoScratchRegister> scratch2;
6585 Maybe<AutoSpectreBoundsScratchRegister> spectreScratch;
6586 if (Scalar::isBigIntType(elementType) ||
6587 viewKind == ArrayBufferViewKind::Resizable) {
6588 scratch2.emplace(allocator, masm);
6589 } else {
6590 spectreScratch.emplace(allocator, masm);
6593 FailurePath* failure = nullptr;
6594 if (!handleOOB) {
6595 if (!addFailurePath(&failure)) {
6596 return false;
6600 // Bounds check.
6601 Label done;
6602 emitTypedArrayBoundsCheck(viewKind, obj, index, scratch1, scratch2,
6603 spectreScratch,
6604 handleOOB ? &done : failure->label());
6606 // Load the elements vector.
6607 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch1);
6609 BaseIndex dest(scratch1, index, ScaleFromScalarType(elementType));
6611 if (Scalar::isBigIntType(elementType)) {
6612 #ifdef JS_PUNBOX64
6613 Register64 temp(scratch2->get());
6614 #else
6615 // We don't have more registers available on x86, so spill |obj|.
6616 masm.push(obj);
6617 Register64 temp(scratch2->get(), obj);
6618 #endif
6620 masm.loadBigInt64(*valBigInt, temp);
6621 masm.storeToTypedBigIntArray(elementType, temp, dest);
6623 #ifndef JS_PUNBOX64
6624 masm.pop(obj);
6625 #endif
6626 } else if (elementType == Scalar::Float32) {
6627 ScratchFloat32Scope fpscratch(masm);
6628 masm.convertDoubleToFloat32(floatScratch0, fpscratch);
6629 masm.storeToTypedFloatArray(elementType, fpscratch, dest);
6630 } else if (elementType == Scalar::Float64) {
6631 masm.storeToTypedFloatArray(elementType, floatScratch0, dest);
6632 } else {
6633 masm.storeToTypedIntArray(elementType, *valInt32, dest);
6636 masm.bind(&done);
6637 return true;
6640 static gc::Heap InitialBigIntHeap(JSContext* cx) {
6641 JS::Zone* zone = cx->zone();
6642 return zone->allocNurseryBigInts() ? gc::Heap::Default : gc::Heap::Tenured;
6645 static void EmitAllocateBigInt(MacroAssembler& masm, Register result,
6646 Register temp, const LiveRegisterSet& liveSet,
6647 gc::Heap initialHeap, Label* fail) {
6648 Label fallback, done;
6649 masm.newGCBigInt(result, temp, initialHeap, &fallback);
6650 masm.jump(&done);
6652 masm.bind(&fallback);
6654 // Request a minor collection at a later time if nursery allocation failed.
6655 bool requestMinorGC = initialHeap == gc::Heap::Default;
6657 masm.PushRegsInMask(liveSet);
6658 using Fn = void* (*)(JSContext* cx, bool requestMinorGC);
6659 masm.setupUnalignedABICall(temp);
6660 masm.loadJSContext(temp);
6661 masm.passABIArg(temp);
6662 masm.move32(Imm32(requestMinorGC), result);
6663 masm.passABIArg(result);
6664 masm.callWithABI<Fn, jit::AllocateBigIntNoGC>();
6665 masm.storeCallPointerResult(result);
6667 masm.PopRegsInMask(liveSet);
6668 masm.branchPtr(Assembler::Equal, result, ImmWord(0), fail);
6670 masm.bind(&done);
6673 void CacheIRCompiler::emitTypedArrayBoundsCheck(ArrayBufferViewKind viewKind,
6674 Register obj, Register index,
6675 Register scratch,
6676 Register maybeScratch,
6677 Register spectreScratch,
6678 Label* fail) {
6679 // |index| must not alias any scratch register.
6680 MOZ_ASSERT(index != scratch);
6681 MOZ_ASSERT(index != maybeScratch);
6682 MOZ_ASSERT(index != spectreScratch);
6684 if (viewKind == ArrayBufferViewKind::FixedLength) {
6685 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
6686 masm.spectreBoundsCheckPtr(index, scratch, spectreScratch, fail);
6687 } else {
6688 if (maybeScratch == InvalidReg) {
6689 // Spill |index| to use it as an additional scratch register.
6690 masm.push(index);
6692 maybeScratch = index;
6693 } else {
6694 // Use |maybeScratch| when no explicit |spectreScratch| is present.
6695 if (spectreScratch == InvalidReg) {
6696 spectreScratch = maybeScratch;
6700 // Bounds check doesn't require synchronization. See IsValidIntegerIndex
6701 // abstract operation which reads the underlying buffer byte length using
6702 // "unordered" memory order.
6703 auto sync = Synchronization::None();
6705 masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch, maybeScratch);
6707 if (maybeScratch == index) {
6708 // Restore |index|.
6709 masm.pop(index);
6712 masm.spectreBoundsCheckPtr(index, scratch, spectreScratch, fail);
6716 void CacheIRCompiler::emitTypedArrayBoundsCheck(
6717 ArrayBufferViewKind viewKind, Register obj, Register index,
6718 Register scratch, mozilla::Maybe<Register> maybeScratch,
6719 mozilla::Maybe<Register> spectreScratch, Label* fail) {
6720 emitTypedArrayBoundsCheck(viewKind, obj, index, scratch,
6721 maybeScratch.valueOr(InvalidReg),
6722 spectreScratch.valueOr(InvalidReg), fail);
6725 bool CacheIRCompiler::emitLoadTypedArrayElementResult(
6726 ObjOperandId objId, IntPtrOperandId indexId, Scalar::Type elementType,
6727 bool handleOOB, bool forceDoubleForUint32, ArrayBufferViewKind viewKind) {
6728 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6729 AutoOutputRegister output(*this);
6730 Register obj = allocator.useRegister(masm, objId);
6731 Register index = allocator.useRegister(masm, indexId);
6733 AutoScratchRegister scratch1(allocator, masm);
6734 #ifdef JS_PUNBOX64
6735 AutoScratchRegister scratch2(allocator, masm);
6736 #else
6737 // There are too few registers available on x86, so we may need to reuse the
6738 // output's scratch register.
6739 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
6740 #endif
6742 FailurePath* failure;
6743 if (!addFailurePath(&failure)) {
6744 return false;
6747 // Bounds check.
6748 Label outOfBounds;
6749 emitTypedArrayBoundsCheck(viewKind, obj, index, scratch1, scratch2, scratch2,
6750 handleOOB ? &outOfBounds : failure->label());
6752 // Allocate BigInt if needed. The code after this should be infallible.
6753 Maybe<Register> bigInt;
6754 if (Scalar::isBigIntType(elementType)) {
6755 bigInt.emplace(output.valueReg().scratchReg());
6757 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
6758 liveVolatileFloatRegs());
6759 save.takeUnchecked(scratch1);
6760 save.takeUnchecked(scratch2);
6761 save.takeUnchecked(output);
6763 gc::Heap initialHeap = InitialBigIntHeap(cx_);
6764 EmitAllocateBigInt(masm, *bigInt, scratch1, save, initialHeap,
6765 failure->label());
6768 // Load the elements vector.
6769 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch1);
6771 // Load the value.
6772 BaseIndex source(scratch1, index, ScaleFromScalarType(elementType));
6774 if (Scalar::isBigIntType(elementType)) {
6775 #ifdef JS_PUNBOX64
6776 Register64 temp(scratch2);
6777 #else
6778 // We don't have more registers available on x86, so spill |obj| and
6779 // additionally use the output's type register.
6780 MOZ_ASSERT(output.valueReg().scratchReg() != output.valueReg().typeReg());
6781 masm.push(obj);
6782 Register64 temp(output.valueReg().typeReg(), obj);
6783 #endif
6785 masm.loadFromTypedBigIntArray(elementType, source, *bigInt, temp);
6787 #ifndef JS_PUNBOX64
6788 masm.pop(obj);
6789 #endif
6791 masm.tagValue(JSVAL_TYPE_BIGINT, *bigInt, output.valueReg());
6792 } else {
6793 MacroAssembler::Uint32Mode uint32Mode =
6794 forceDoubleForUint32 ? MacroAssembler::Uint32Mode::ForceDouble
6795 : MacroAssembler::Uint32Mode::FailOnDouble;
6796 masm.loadFromTypedArray(elementType, source, output.valueReg(), uint32Mode,
6797 scratch1, failure->label());
6800 if (handleOOB) {
6801 Label done;
6802 masm.jump(&done);
6804 masm.bind(&outOfBounds);
6805 masm.moveValue(UndefinedValue(), output.valueReg());
6807 masm.bind(&done);
6810 return true;
6813 void CacheIRCompiler::emitDataViewBoundsCheck(ArrayBufferViewKind viewKind,
6814 size_t byteSize, Register obj,
6815 Register offset, Register scratch,
6816 Register maybeScratch,
6817 Label* fail) {
6818 // |offset| must not alias any scratch register.
6819 MOZ_ASSERT(offset != scratch);
6820 MOZ_ASSERT(offset != maybeScratch);
6822 if (viewKind == ArrayBufferViewKind::FixedLength) {
6823 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
6824 } else {
6825 if (maybeScratch == InvalidReg) {
6826 // Spill |offset| to use it as an additional scratch register.
6827 masm.push(offset);
6829 maybeScratch = offset;
6832 // Bounds check doesn't require synchronization. See GetViewValue and
6833 // SetViewValue abstract operations which read the underlying buffer byte
6834 // length using "unordered" memory order.
6835 auto sync = Synchronization::None();
6837 masm.loadResizableDataViewByteLengthIntPtr(sync, obj, scratch,
6838 maybeScratch);
6840 if (maybeScratch == offset) {
6841 // Restore |offset|.
6842 masm.pop(offset);
6846 // Ensure both offset < length and offset + (byteSize - 1) < length.
6847 if (byteSize == 1) {
6848 masm.spectreBoundsCheckPtr(offset, scratch, InvalidReg, fail);
6849 } else {
6850 // temp := length - (byteSize - 1)
6851 // if temp < 0: fail
6852 // if offset >= temp: fail
6853 masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), scratch, fail);
6854 masm.spectreBoundsCheckPtr(offset, scratch, InvalidReg, fail);
6858 bool CacheIRCompiler::emitLoadDataViewValueResult(
6859 ObjOperandId objId, IntPtrOperandId offsetId,
6860 BooleanOperandId littleEndianId, Scalar::Type elementType,
6861 bool forceDoubleForUint32, ArrayBufferViewKind viewKind) {
6862 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6864 AutoOutputRegister output(*this);
6865 Register obj = allocator.useRegister(masm, objId);
6866 Register offset = allocator.useRegister(masm, offsetId);
6867 Register littleEndian = allocator.useRegister(masm, littleEndianId);
6869 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
6871 Register64 outputReg64 = output.valueReg().toRegister64();
6872 Register outputScratch = outputReg64.scratchReg();
6874 Register boundsCheckScratch;
6875 #ifndef JS_CODEGEN_X86
6876 Maybe<AutoScratchRegister> maybeBoundsCheckScratch;
6877 if (viewKind == ArrayBufferViewKind::Resizable) {
6878 maybeBoundsCheckScratch.emplace(allocator, masm);
6879 boundsCheckScratch = *maybeBoundsCheckScratch;
6881 #else
6882 // Not enough registers on x86, so use the other part of outputReg64.
6883 boundsCheckScratch = outputReg64.secondScratchReg();
6884 #endif
6886 FailurePath* failure;
6887 if (!addFailurePath(&failure)) {
6888 return false;
6891 const size_t byteSize = Scalar::byteSize(elementType);
6893 emitDataViewBoundsCheck(viewKind, byteSize, obj, offset, outputScratch,
6894 boundsCheckScratch, failure->label());
6896 masm.loadPtr(Address(obj, DataViewObject::dataOffset()), outputScratch);
6898 // Load the value.
6899 BaseIndex source(outputScratch, offset, TimesOne);
6900 switch (elementType) {
6901 case Scalar::Int8:
6902 masm.load8SignExtend(source, outputScratch);
6903 break;
6904 case Scalar::Uint8:
6905 masm.load8ZeroExtend(source, outputScratch);
6906 break;
6907 case Scalar::Int16:
6908 masm.load16UnalignedSignExtend(source, outputScratch);
6909 break;
6910 case Scalar::Uint16:
6911 masm.load16UnalignedZeroExtend(source, outputScratch);
6912 break;
6913 case Scalar::Int32:
6914 case Scalar::Uint32:
6915 case Scalar::Float32:
6916 masm.load32Unaligned(source, outputScratch);
6917 break;
6918 case Scalar::Float64:
6919 case Scalar::BigInt64:
6920 case Scalar::BigUint64:
6921 masm.load64Unaligned(source, outputReg64);
6922 break;
6923 case Scalar::Uint8Clamped:
6924 default:
6925 MOZ_CRASH("Invalid typed array type");
6928 // Swap the bytes in the loaded value.
6929 if (byteSize > 1) {
6930 Label skip;
6931 masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
6932 littleEndian, Imm32(0), &skip);
6934 switch (elementType) {
6935 case Scalar::Int16:
6936 masm.byteSwap16SignExtend(outputScratch);
6937 break;
6938 case Scalar::Uint16:
6939 masm.byteSwap16ZeroExtend(outputScratch);
6940 break;
6941 case Scalar::Int32:
6942 case Scalar::Uint32:
6943 case Scalar::Float32:
6944 masm.byteSwap32(outputScratch);
6945 break;
6946 case Scalar::Float64:
6947 case Scalar::BigInt64:
6948 case Scalar::BigUint64:
6949 masm.byteSwap64(outputReg64);
6950 break;
6951 case Scalar::Int8:
6952 case Scalar::Uint8:
6953 case Scalar::Uint8Clamped:
6954 default:
6955 MOZ_CRASH("Invalid type");
6958 masm.bind(&skip);
6961 // Move the value into the output register.
6962 switch (elementType) {
6963 case Scalar::Int8:
6964 case Scalar::Uint8:
6965 case Scalar::Int16:
6966 case Scalar::Uint16:
6967 case Scalar::Int32:
6968 masm.tagValue(JSVAL_TYPE_INT32, outputScratch, output.valueReg());
6969 break;
6970 case Scalar::Uint32: {
6971 MacroAssembler::Uint32Mode uint32Mode =
6972 forceDoubleForUint32 ? MacroAssembler::Uint32Mode::ForceDouble
6973 : MacroAssembler::Uint32Mode::FailOnDouble;
6974 masm.boxUint32(outputScratch, output.valueReg(), uint32Mode,
6975 failure->label());
6976 break;
6978 case Scalar::Float32: {
6979 FloatRegister scratchFloat32 = floatScratch0.get().asSingle();
6980 masm.moveGPRToFloat32(outputScratch, scratchFloat32);
6981 masm.canonicalizeFloat(scratchFloat32);
6982 masm.convertFloat32ToDouble(scratchFloat32, floatScratch0);
6983 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
6984 break;
6986 case Scalar::Float64:
6987 masm.moveGPR64ToDouble(outputReg64, floatScratch0);
6988 masm.canonicalizeDouble(floatScratch0);
6989 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
6990 break;
6991 case Scalar::BigInt64:
6992 case Scalar::BigUint64: {
6993 // We need two extra registers. Reuse the obj/littleEndian registers.
6994 Register bigInt = obj;
6995 Register bigIntScratch = littleEndian;
6996 masm.push(bigInt);
6997 masm.push(bigIntScratch);
6998 Label fail, done;
6999 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
7000 liveVolatileFloatRegs());
7001 save.takeUnchecked(bigInt);
7002 save.takeUnchecked(bigIntScratch);
7003 gc::Heap initialHeap = InitialBigIntHeap(cx_);
7004 EmitAllocateBigInt(masm, bigInt, bigIntScratch, save, initialHeap, &fail);
7005 masm.jump(&done);
7007 masm.bind(&fail);
7008 masm.pop(bigIntScratch);
7009 masm.pop(bigInt);
7010 masm.jump(failure->label());
7012 masm.bind(&done);
7013 masm.initializeBigInt64(elementType, bigInt, outputReg64);
7014 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
7015 masm.pop(bigIntScratch);
7016 masm.pop(bigInt);
7017 break;
7019 case Scalar::Uint8Clamped:
7020 default:
7021 MOZ_CRASH("Invalid typed array type");
7024 return true;
7027 bool CacheIRCompiler::emitStoreDataViewValueResult(
7028 ObjOperandId objId, IntPtrOperandId offsetId, uint32_t valueId,
7029 BooleanOperandId littleEndianId, Scalar::Type elementType,
7030 ArrayBufferViewKind viewKind) {
7031 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7033 AutoOutputRegister output(*this);
7034 #ifdef JS_CODEGEN_X86
7035 // Use a scratch register to avoid running out of the registers.
7036 Register obj = output.valueReg().typeReg();
7037 allocator.copyToScratchRegister(masm, objId, obj);
7038 #else
7039 Register obj = allocator.useRegister(masm, objId);
7040 #endif
7041 Register offset = allocator.useRegister(masm, offsetId);
7042 Register littleEndian = allocator.useRegister(masm, littleEndianId);
7044 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
7045 Maybe<Register> valInt32;
7046 Maybe<Register> valBigInt;
7047 switch (elementType) {
7048 case Scalar::Int8:
7049 case Scalar::Uint8:
7050 case Scalar::Int16:
7051 case Scalar::Uint16:
7052 case Scalar::Int32:
7053 case Scalar::Uint32:
7054 case Scalar::Uint8Clamped:
7055 valInt32.emplace(allocator.useRegister(masm, Int32OperandId(valueId)));
7056 break;
7058 case Scalar::Float32:
7059 case Scalar::Float64:
7060 allocator.ensureDoubleRegister(masm, NumberOperandId(valueId),
7061 floatScratch0);
7062 break;
7064 case Scalar::BigInt64:
7065 case Scalar::BigUint64:
7066 valBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(valueId)));
7067 break;
7069 case Scalar::MaxTypedArrayViewType:
7070 case Scalar::Int64:
7071 case Scalar::Simd128:
7072 MOZ_CRASH("Unsupported type");
7075 Register scratch1 = output.valueReg().scratchReg();
7076 MOZ_ASSERT(scratch1 != obj, "scratchReg must not be typeReg");
7078 // On platforms with enough registers, |scratch2| is an extra scratch register
7079 // (pair) used for byte-swapping the value.
7080 #ifndef JS_CODEGEN_X86
7081 mozilla::MaybeOneOf<AutoScratchRegister, AutoScratchRegister64> scratch2;
7082 switch (elementType) {
7083 case Scalar::Int8:
7084 case Scalar::Uint8:
7085 break;
7086 case Scalar::Int16:
7087 case Scalar::Uint16:
7088 case Scalar::Int32:
7089 case Scalar::Uint32:
7090 case Scalar::Float32:
7091 scratch2.construct<AutoScratchRegister>(allocator, masm);
7092 break;
7093 case Scalar::Float64:
7094 case Scalar::BigInt64:
7095 case Scalar::BigUint64:
7096 scratch2.construct<AutoScratchRegister64>(allocator, masm);
7097 break;
7098 case Scalar::Uint8Clamped:
7099 default:
7100 MOZ_CRASH("Invalid type");
7102 #endif
7104 Register boundsCheckScratch;
7105 #ifndef JS_CODEGEN_X86
7106 Maybe<AutoScratchRegister> maybeBoundsCheckScratch;
7107 if (viewKind == ArrayBufferViewKind::Resizable) {
7108 if (scratch2.constructed<AutoScratchRegister>()) {
7109 boundsCheckScratch = scratch2.ref<AutoScratchRegister>().get();
7110 } else if (scratch2.constructed<AutoScratchRegister64>()) {
7111 boundsCheckScratch =
7112 scratch2.ref<AutoScratchRegister64>().get().scratchReg();
7113 } else {
7114 maybeBoundsCheckScratch.emplace(allocator, masm);
7115 boundsCheckScratch = *maybeBoundsCheckScratch;
7118 #else
7119 // Not enough registers on x86.
7120 #endif
7122 FailurePath* failure;
7123 if (!addFailurePath(&failure)) {
7124 return false;
7127 const size_t byteSize = Scalar::byteSize(elementType);
7129 emitDataViewBoundsCheck(viewKind, byteSize, obj, offset, scratch1,
7130 boundsCheckScratch, failure->label());
7132 masm.loadPtr(Address(obj, DataViewObject::dataOffset()), scratch1);
7133 BaseIndex dest(scratch1, offset, TimesOne);
7135 if (byteSize == 1) {
7136 // Byte swapping has no effect, so just do the byte store.
7137 masm.store8(*valInt32, dest);
7138 masm.moveValue(UndefinedValue(), output.valueReg());
7139 return true;
7142 // On 32-bit x86, |obj| is already a scratch register so use that. If we need
7143 // a Register64 we also use the littleEndian register and use the stack
7144 // location for the check below.
7145 bool pushedLittleEndian = false;
7146 #ifdef JS_CODEGEN_X86
7147 if (byteSize == 8) {
7148 masm.push(littleEndian);
7149 pushedLittleEndian = true;
7151 auto valScratch32 = [&]() -> Register { return obj; };
7152 auto valScratch64 = [&]() -> Register64 {
7153 return Register64(obj, littleEndian);
7155 #else
7156 auto valScratch32 = [&]() -> Register {
7157 return scratch2.ref<AutoScratchRegister>();
7159 auto valScratch64 = [&]() -> Register64 {
7160 return scratch2.ref<AutoScratchRegister64>();
7162 #endif
7164 // Load the value into a gpr register.
7165 switch (elementType) {
7166 case Scalar::Int16:
7167 case Scalar::Uint16:
7168 case Scalar::Int32:
7169 case Scalar::Uint32:
7170 masm.move32(*valInt32, valScratch32());
7171 break;
7172 case Scalar::Float32: {
7173 FloatRegister scratchFloat32 = floatScratch0.get().asSingle();
7174 masm.convertDoubleToFloat32(floatScratch0, scratchFloat32);
7175 masm.canonicalizeFloatIfDeterministic(scratchFloat32);
7176 masm.moveFloat32ToGPR(scratchFloat32, valScratch32());
7177 break;
7179 case Scalar::Float64: {
7180 masm.canonicalizeDoubleIfDeterministic(floatScratch0);
7181 masm.moveDoubleToGPR64(floatScratch0, valScratch64());
7182 break;
7184 case Scalar::BigInt64:
7185 case Scalar::BigUint64:
7186 masm.loadBigInt64(*valBigInt, valScratch64());
7187 break;
7188 case Scalar::Int8:
7189 case Scalar::Uint8:
7190 case Scalar::Uint8Clamped:
7191 default:
7192 MOZ_CRASH("Invalid type");
7195 // Swap the bytes in the loaded value.
7196 Label skip;
7197 if (pushedLittleEndian) {
7198 masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
7199 Address(masm.getStackPointer(), 0), Imm32(0), &skip);
7200 } else {
7201 masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
7202 littleEndian, Imm32(0), &skip);
7204 switch (elementType) {
7205 case Scalar::Int16:
7206 masm.byteSwap16SignExtend(valScratch32());
7207 break;
7208 case Scalar::Uint16:
7209 masm.byteSwap16ZeroExtend(valScratch32());
7210 break;
7211 case Scalar::Int32:
7212 case Scalar::Uint32:
7213 case Scalar::Float32:
7214 masm.byteSwap32(valScratch32());
7215 break;
7216 case Scalar::Float64:
7217 case Scalar::BigInt64:
7218 case Scalar::BigUint64:
7219 masm.byteSwap64(valScratch64());
7220 break;
7221 case Scalar::Int8:
7222 case Scalar::Uint8:
7223 case Scalar::Uint8Clamped:
7224 default:
7225 MOZ_CRASH("Invalid type");
7227 masm.bind(&skip);
7229 // Store the value.
7230 switch (elementType) {
7231 case Scalar::Int16:
7232 case Scalar::Uint16:
7233 masm.store16Unaligned(valScratch32(), dest);
7234 break;
7235 case Scalar::Int32:
7236 case Scalar::Uint32:
7237 case Scalar::Float32:
7238 masm.store32Unaligned(valScratch32(), dest);
7239 break;
7240 case Scalar::Float64:
7241 case Scalar::BigInt64:
7242 case Scalar::BigUint64:
7243 masm.store64Unaligned(valScratch64(), dest);
7244 break;
7245 case Scalar::Int8:
7246 case Scalar::Uint8:
7247 case Scalar::Uint8Clamped:
7248 default:
7249 MOZ_CRASH("Invalid typed array type");
7252 #ifdef JS_CODEGEN_X86
7253 // Restore registers.
7254 if (pushedLittleEndian) {
7255 masm.pop(littleEndian);
7257 #endif
7259 masm.moveValue(UndefinedValue(), output.valueReg());
7260 return true;
7263 bool CacheIRCompiler::emitStoreFixedSlotUndefinedResult(ObjOperandId objId,
7264 uint32_t offsetOffset,
7265 ValOperandId rhsId) {
7266 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7268 AutoOutputRegister output(*this);
7269 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7270 Register obj = allocator.useRegister(masm, objId);
7271 ValueOperand val = allocator.useValueRegister(masm, rhsId);
7273 StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
7274 emitLoadStubField(offset, scratch);
7276 BaseIndex slot(obj, scratch, TimesOne);
7277 EmitPreBarrier(masm, slot, MIRType::Value);
7278 masm.storeValue(val, slot);
7279 emitPostBarrierSlot(obj, val, scratch);
7281 masm.moveValue(UndefinedValue(), output.valueReg());
7282 return true;
7285 bool CacheIRCompiler::emitLoadObjectResult(ObjOperandId objId) {
7286 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7287 AutoOutputRegister output(*this);
7288 Register obj = allocator.useRegister(masm, objId);
7290 EmitStoreResult(masm, obj, JSVAL_TYPE_OBJECT, output);
7292 return true;
7295 bool CacheIRCompiler::emitLoadStringResult(StringOperandId strId) {
7296 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7297 AutoOutputRegister output(*this);
7298 Register str = allocator.useRegister(masm, strId);
7300 masm.tagValue(JSVAL_TYPE_STRING, str, output.valueReg());
7302 return true;
7305 bool CacheIRCompiler::emitLoadSymbolResult(SymbolOperandId symId) {
7306 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7307 AutoOutputRegister output(*this);
7308 Register sym = allocator.useRegister(masm, symId);
7310 masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
7312 return true;
7315 bool CacheIRCompiler::emitLoadInt32Result(Int32OperandId valId) {
7316 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7317 AutoOutputRegister output(*this);
7318 Register val = allocator.useRegister(masm, valId);
7320 masm.tagValue(JSVAL_TYPE_INT32, val, output.valueReg());
7322 return true;
7325 bool CacheIRCompiler::emitLoadBigIntResult(BigIntOperandId valId) {
7326 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7327 AutoOutputRegister output(*this);
7328 Register val = allocator.useRegister(masm, valId);
7330 masm.tagValue(JSVAL_TYPE_BIGINT, val, output.valueReg());
7332 return true;
7335 bool CacheIRCompiler::emitLoadDoubleResult(NumberOperandId valId) {
7336 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7337 AutoOutputRegister output(*this);
7338 ValueOperand val = allocator.useValueRegister(masm, valId);
7340 #ifdef DEBUG
7341 Label ok;
7342 masm.branchTestDouble(Assembler::Equal, val, &ok);
7343 masm.branchTestInt32(Assembler::Equal, val, &ok);
7344 masm.assumeUnreachable("input must be double or int32");
7345 masm.bind(&ok);
7346 #endif
7348 masm.moveValue(val, output.valueReg());
7349 masm.convertInt32ValueToDouble(output.valueReg());
7351 return true;
7354 bool CacheIRCompiler::emitLoadTypeOfObjectResult(ObjOperandId objId) {
7355 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7356 AutoOutputRegister output(*this);
7357 Register obj = allocator.useRegister(masm, objId);
7358 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7360 Label slowCheck, isObject, isCallable, isUndefined, done;
7361 masm.typeOfObject(obj, scratch, &slowCheck, &isObject, &isCallable,
7362 &isUndefined);
7364 masm.bind(&isCallable);
7365 masm.moveValue(StringValue(cx_->names().function), output.valueReg());
7366 masm.jump(&done);
7368 masm.bind(&isUndefined);
7369 masm.moveValue(StringValue(cx_->names().undefined), output.valueReg());
7370 masm.jump(&done);
7372 masm.bind(&isObject);
7373 masm.moveValue(StringValue(cx_->names().object), output.valueReg());
7374 masm.jump(&done);
7377 masm.bind(&slowCheck);
7378 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
7379 liveVolatileFloatRegs());
7380 masm.PushRegsInMask(save);
7382 using Fn = JSString* (*)(JSObject* obj, JSRuntime* rt);
7383 masm.setupUnalignedABICall(scratch);
7384 masm.passABIArg(obj);
7385 masm.movePtr(ImmPtr(cx_->runtime()), scratch);
7386 masm.passABIArg(scratch);
7387 masm.callWithABI<Fn, TypeOfNameObject>();
7388 masm.storeCallPointerResult(scratch);
7390 LiveRegisterSet ignore;
7391 ignore.add(scratch);
7392 masm.PopRegsInMaskIgnore(save, ignore);
7394 masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
7397 masm.bind(&done);
7398 return true;
7401 bool CacheIRCompiler::emitLoadInt32TruthyResult(ValOperandId inputId) {
7402 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7403 AutoOutputRegister output(*this);
7404 ValueOperand val = allocator.useValueRegister(masm, inputId);
7406 Label ifFalse, done;
7407 masm.branchTestInt32Truthy(false, val, &ifFalse);
7408 masm.moveValue(BooleanValue(true), output.valueReg());
7409 masm.jump(&done);
7411 masm.bind(&ifFalse);
7412 masm.moveValue(BooleanValue(false), output.valueReg());
7414 masm.bind(&done);
7415 return true;
7418 bool CacheIRCompiler::emitLoadStringTruthyResult(StringOperandId strId) {
7419 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7420 AutoOutputRegister output(*this);
7421 Register str = allocator.useRegister(masm, strId);
7423 Label ifFalse, done;
7424 masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
7425 Imm32(0), &ifFalse);
7426 masm.moveValue(BooleanValue(true), output.valueReg());
7427 masm.jump(&done);
7429 masm.bind(&ifFalse);
7430 masm.moveValue(BooleanValue(false), output.valueReg());
7432 masm.bind(&done);
7433 return true;
7436 bool CacheIRCompiler::emitLoadDoubleTruthyResult(NumberOperandId inputId) {
7437 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7438 AutoOutputRegister output(*this);
7440 AutoScratchFloatRegister floatReg(this);
7442 allocator.ensureDoubleRegister(masm, inputId, floatReg);
7444 Label ifFalse, done;
7446 masm.branchTestDoubleTruthy(false, floatReg, &ifFalse);
7447 masm.moveValue(BooleanValue(true), output.valueReg());
7448 masm.jump(&done);
7450 masm.bind(&ifFalse);
7451 masm.moveValue(BooleanValue(false), output.valueReg());
7453 masm.bind(&done);
7454 return true;
7457 bool CacheIRCompiler::emitLoadObjectTruthyResult(ObjOperandId objId) {
7458 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7459 AutoOutputRegister output(*this);
7460 Register obj = allocator.useRegister(masm, objId);
7461 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7463 Label emulatesUndefined, slowPath, done;
7464 masm.branchIfObjectEmulatesUndefined(obj, scratch, &slowPath,
7465 &emulatesUndefined);
7466 masm.moveValue(BooleanValue(true), output.valueReg());
7467 masm.jump(&done);
7469 masm.bind(&emulatesUndefined);
7470 masm.moveValue(BooleanValue(false), output.valueReg());
7471 masm.jump(&done);
7473 masm.bind(&slowPath);
7475 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7476 liveVolatileFloatRegs());
7477 volatileRegs.takeUnchecked(scratch);
7478 volatileRegs.takeUnchecked(output);
7479 masm.PushRegsInMask(volatileRegs);
7481 using Fn = bool (*)(JSObject* obj);
7482 masm.setupUnalignedABICall(scratch);
7483 masm.passABIArg(obj);
7484 masm.callWithABI<Fn, js::EmulatesUndefined>();
7485 masm.storeCallBoolResult(scratch);
7486 masm.xor32(Imm32(1), scratch);
7488 masm.PopRegsInMask(volatileRegs);
7490 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
7493 masm.bind(&done);
7494 return true;
7497 bool CacheIRCompiler::emitLoadBigIntTruthyResult(BigIntOperandId bigIntId) {
7498 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7499 AutoOutputRegister output(*this);
7500 Register bigInt = allocator.useRegister(masm, bigIntId);
7502 Label ifFalse, done;
7503 masm.branch32(Assembler::Equal,
7504 Address(bigInt, BigInt::offsetOfDigitLength()), Imm32(0),
7505 &ifFalse);
7506 masm.moveValue(BooleanValue(true), output.valueReg());
7507 masm.jump(&done);
7509 masm.bind(&ifFalse);
7510 masm.moveValue(BooleanValue(false), output.valueReg());
7512 masm.bind(&done);
7513 return true;
7516 bool CacheIRCompiler::emitLoadValueTruthyResult(ValOperandId inputId) {
7517 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7519 AutoOutputRegister output(*this);
7520 ValueOperand value = allocator.useValueRegister(masm, inputId);
7521 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
7522 AutoScratchRegister scratch2(allocator, masm);
7523 AutoScratchFloatRegister floatReg(this);
7525 Label ifFalse, ifTrue, done;
7528 ScratchTagScope tag(masm, value);
7529 masm.splitTagForTest(value, tag);
7531 masm.branchTestUndefined(Assembler::Equal, tag, &ifFalse);
7532 masm.branchTestNull(Assembler::Equal, tag, &ifFalse);
7534 Label notBoolean;
7535 masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
7537 ScratchTagScopeRelease _(&tag);
7538 masm.branchTestBooleanTruthy(false, value, &ifFalse);
7539 masm.jump(&ifTrue);
7541 masm.bind(&notBoolean);
7543 Label notInt32;
7544 masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
7546 ScratchTagScopeRelease _(&tag);
7547 masm.branchTestInt32Truthy(false, value, &ifFalse);
7548 masm.jump(&ifTrue);
7550 masm.bind(&notInt32);
7552 Label notObject;
7553 masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
7555 ScratchTagScopeRelease _(&tag);
7557 Register obj = masm.extractObject(value, scratch1);
7559 Label slowPath;
7560 masm.branchIfObjectEmulatesUndefined(obj, scratch2, &slowPath, &ifFalse);
7561 masm.jump(&ifTrue);
7563 masm.bind(&slowPath);
7565 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7566 liveVolatileFloatRegs());
7567 volatileRegs.takeUnchecked(scratch1);
7568 volatileRegs.takeUnchecked(scratch2);
7569 volatileRegs.takeUnchecked(output);
7570 masm.PushRegsInMask(volatileRegs);
7572 using Fn = bool (*)(JSObject* obj);
7573 masm.setupUnalignedABICall(scratch2);
7574 masm.passABIArg(obj);
7575 masm.callWithABI<Fn, js::EmulatesUndefined>();
7576 masm.storeCallPointerResult(scratch2);
7578 masm.PopRegsInMask(volatileRegs);
7580 masm.branchIfTrueBool(scratch2, &ifFalse);
7581 masm.jump(&ifTrue);
7584 masm.bind(&notObject);
7586 Label notString;
7587 masm.branchTestString(Assembler::NotEqual, tag, &notString);
7589 ScratchTagScopeRelease _(&tag);
7590 masm.branchTestStringTruthy(false, value, &ifFalse);
7591 masm.jump(&ifTrue);
7593 masm.bind(&notString);
7595 Label notBigInt;
7596 masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
7598 ScratchTagScopeRelease _(&tag);
7599 masm.branchTestBigIntTruthy(false, value, &ifFalse);
7600 masm.jump(&ifTrue);
7602 masm.bind(&notBigInt);
7604 masm.branchTestSymbol(Assembler::Equal, tag, &ifTrue);
7606 #ifdef DEBUG
7607 Label isDouble;
7608 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
7609 masm.assumeUnreachable("Unexpected value type");
7610 masm.bind(&isDouble);
7611 #endif
7614 ScratchTagScopeRelease _(&tag);
7615 masm.unboxDouble(value, floatReg);
7616 masm.branchTestDoubleTruthy(false, floatReg, &ifFalse);
7619 // Fall through to true case.
7622 masm.bind(&ifTrue);
7623 masm.moveValue(BooleanValue(true), output.valueReg());
7624 masm.jump(&done);
7626 masm.bind(&ifFalse);
7627 masm.moveValue(BooleanValue(false), output.valueReg());
7629 masm.bind(&done);
7630 return true;
7633 bool CacheIRCompiler::emitComparePointerResultShared(JSOp op,
7634 TypedOperandId lhsId,
7635 TypedOperandId rhsId) {
7636 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7637 AutoOutputRegister output(*this);
7639 Register left = allocator.useRegister(masm, lhsId);
7640 Register right = allocator.useRegister(masm, rhsId);
7642 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7644 Label ifTrue, done;
7645 masm.branchPtr(JSOpToCondition(op, /* signed = */ true), left, right,
7646 &ifTrue);
7648 EmitStoreBoolean(masm, false, output);
7649 masm.jump(&done);
7651 masm.bind(&ifTrue);
7652 EmitStoreBoolean(masm, true, output);
7653 masm.bind(&done);
7654 return true;
7657 bool CacheIRCompiler::emitCompareObjectResult(JSOp op, ObjOperandId lhsId,
7658 ObjOperandId rhsId) {
7659 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7660 return emitComparePointerResultShared(op, lhsId, rhsId);
7663 bool CacheIRCompiler::emitCompareSymbolResult(JSOp op, SymbolOperandId lhsId,
7664 SymbolOperandId rhsId) {
7665 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7666 return emitComparePointerResultShared(op, lhsId, rhsId);
7669 bool CacheIRCompiler::emitCompareInt32Result(JSOp op, Int32OperandId lhsId,
7670 Int32OperandId rhsId) {
7671 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7672 AutoOutputRegister output(*this);
7673 Register left = allocator.useRegister(masm, lhsId);
7674 Register right = allocator.useRegister(masm, rhsId);
7676 Label ifTrue, done;
7677 masm.branch32(JSOpToCondition(op, /* signed = */ true), left, right, &ifTrue);
7679 EmitStoreBoolean(masm, false, output);
7680 masm.jump(&done);
7682 masm.bind(&ifTrue);
7683 EmitStoreBoolean(masm, true, output);
7684 masm.bind(&done);
7685 return true;
7688 bool CacheIRCompiler::emitCompareDoubleResult(JSOp op, NumberOperandId lhsId,
7689 NumberOperandId rhsId) {
7690 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7691 AutoOutputRegister output(*this);
7693 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
7694 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
7696 FailurePath* failure;
7697 if (!addFailurePath(&failure)) {
7698 return false;
7701 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
7702 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
7704 Label done, ifTrue;
7705 masm.branchDouble(JSOpToDoubleCondition(op), floatScratch0, floatScratch1,
7706 &ifTrue);
7707 EmitStoreBoolean(masm, false, output);
7708 masm.jump(&done);
7710 masm.bind(&ifTrue);
7711 EmitStoreBoolean(masm, true, output);
7712 masm.bind(&done);
7713 return true;
7716 bool CacheIRCompiler::emitCompareBigIntResult(JSOp op, BigIntOperandId lhsId,
7717 BigIntOperandId rhsId) {
7718 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7719 AutoOutputRegister output(*this);
7721 Register lhs = allocator.useRegister(masm, lhsId);
7722 Register rhs = allocator.useRegister(masm, rhsId);
7724 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7726 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7727 masm.PushRegsInMask(save);
7729 masm.setupUnalignedABICall(scratch);
7731 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7732 // - |left <= right| is implemented as |right >= left|.
7733 // - |left > right| is implemented as |right < left|.
7734 if (op == JSOp::Le || op == JSOp::Gt) {
7735 masm.passABIArg(rhs);
7736 masm.passABIArg(lhs);
7737 } else {
7738 masm.passABIArg(lhs);
7739 masm.passABIArg(rhs);
7742 using Fn = bool (*)(BigInt*, BigInt*);
7743 Fn fn;
7744 if (op == JSOp::Eq || op == JSOp::StrictEq) {
7745 fn = jit::BigIntEqual<EqualityKind::Equal>;
7746 } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
7747 fn = jit::BigIntEqual<EqualityKind::NotEqual>;
7748 } else if (op == JSOp::Lt || op == JSOp::Gt) {
7749 fn = jit::BigIntCompare<ComparisonKind::LessThan>;
7750 } else {
7751 MOZ_ASSERT(op == JSOp::Le || op == JSOp::Ge);
7752 fn = jit::BigIntCompare<ComparisonKind::GreaterThanOrEqual>;
7755 masm.callWithABI(DynamicFunction<Fn>(fn));
7756 masm.storeCallBoolResult(scratch);
7758 LiveRegisterSet ignore;
7759 ignore.add(scratch);
7760 masm.PopRegsInMaskIgnore(save, ignore);
7762 EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
7763 return true;
7766 bool CacheIRCompiler::emitCompareBigIntInt32Result(JSOp op,
7767 BigIntOperandId lhsId,
7768 Int32OperandId rhsId) {
7769 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7770 AutoOutputRegister output(*this);
7771 Register bigInt = allocator.useRegister(masm, lhsId);
7772 Register int32 = allocator.useRegister(masm, rhsId);
7774 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
7775 AutoScratchRegister scratch2(allocator, masm);
7777 Label ifTrue, ifFalse;
7778 masm.compareBigIntAndInt32(op, bigInt, int32, scratch1, scratch2, &ifTrue,
7779 &ifFalse);
7781 Label done;
7782 masm.bind(&ifFalse);
7783 EmitStoreBoolean(masm, false, output);
7784 masm.jump(&done);
7786 masm.bind(&ifTrue);
7787 EmitStoreBoolean(masm, true, output);
7789 masm.bind(&done);
7790 return true;
7793 bool CacheIRCompiler::emitCompareBigIntNumberResult(JSOp op,
7794 BigIntOperandId lhsId,
7795 NumberOperandId rhsId) {
7796 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7797 AutoOutputRegister output(*this);
7799 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
7801 Register lhs = allocator.useRegister(masm, lhsId);
7802 allocator.ensureDoubleRegister(masm, rhsId, floatScratch0);
7804 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7806 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7807 masm.PushRegsInMask(save);
7809 masm.setupUnalignedABICall(scratch);
7811 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7812 // - |left <= right| is implemented as |right >= left|.
7813 // - |left > right| is implemented as |right < left|.
7814 if (op == JSOp::Le || op == JSOp::Gt) {
7815 masm.passABIArg(floatScratch0, ABIType::Float64);
7816 masm.passABIArg(lhs);
7817 } else {
7818 masm.passABIArg(lhs);
7819 masm.passABIArg(floatScratch0, ABIType::Float64);
7822 using FnBigIntNumber = bool (*)(BigInt*, double);
7823 using FnNumberBigInt = bool (*)(double, BigInt*);
7824 switch (op) {
7825 case JSOp::Eq: {
7826 masm.callWithABI<FnBigIntNumber,
7827 jit::BigIntNumberEqual<EqualityKind::Equal>>();
7828 break;
7830 case JSOp::Ne: {
7831 masm.callWithABI<FnBigIntNumber,
7832 jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
7833 break;
7835 case JSOp::Lt: {
7836 masm.callWithABI<FnBigIntNumber,
7837 jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
7838 break;
7840 case JSOp::Gt: {
7841 masm.callWithABI<FnNumberBigInt,
7842 jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
7843 break;
7845 case JSOp::Le: {
7846 masm.callWithABI<
7847 FnNumberBigInt,
7848 jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
7849 break;
7851 case JSOp::Ge: {
7852 masm.callWithABI<
7853 FnBigIntNumber,
7854 jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
7855 break;
7857 default:
7858 MOZ_CRASH("unhandled op");
7861 masm.storeCallBoolResult(scratch);
7863 LiveRegisterSet ignore;
7864 ignore.add(scratch);
7865 masm.PopRegsInMaskIgnore(save, ignore);
7867 EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
7868 return true;
7871 bool CacheIRCompiler::emitCompareBigIntStringResult(JSOp op,
7872 BigIntOperandId lhsId,
7873 StringOperandId rhsId) {
7874 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7875 AutoCallVM callvm(masm, this, allocator);
7877 Register lhs = allocator.useRegister(masm, lhsId);
7878 Register rhs = allocator.useRegister(masm, rhsId);
7880 callvm.prepare();
7882 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7883 // - |left <= right| is implemented as |right >= left|.
7884 // - |left > right| is implemented as |right < left|.
7885 if (op == JSOp::Le || op == JSOp::Gt) {
7886 masm.Push(lhs);
7887 masm.Push(rhs);
7888 } else {
7889 masm.Push(rhs);
7890 masm.Push(lhs);
7893 using FnBigIntString =
7894 bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
7895 using FnStringBigInt =
7896 bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
7898 switch (op) {
7899 case JSOp::Eq: {
7900 constexpr auto Equal = EqualityKind::Equal;
7901 callvm.call<FnBigIntString, BigIntStringEqual<Equal>>();
7902 break;
7904 case JSOp::Ne: {
7905 constexpr auto NotEqual = EqualityKind::NotEqual;
7906 callvm.call<FnBigIntString, BigIntStringEqual<NotEqual>>();
7907 break;
7909 case JSOp::Lt: {
7910 constexpr auto LessThan = ComparisonKind::LessThan;
7911 callvm.call<FnBigIntString, BigIntStringCompare<LessThan>>();
7912 break;
7914 case JSOp::Gt: {
7915 constexpr auto LessThan = ComparisonKind::LessThan;
7916 callvm.call<FnStringBigInt, StringBigIntCompare<LessThan>>();
7917 break;
7919 case JSOp::Le: {
7920 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
7921 callvm.call<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>();
7922 break;
7924 case JSOp::Ge: {
7925 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
7926 callvm.call<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>();
7927 break;
7929 default:
7930 MOZ_CRASH("unhandled op");
7932 return true;
7935 bool CacheIRCompiler::emitCompareNullUndefinedResult(JSOp op, bool isUndefined,
7936 ValOperandId inputId) {
7937 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7939 AutoOutputRegister output(*this);
7940 ValueOperand input = allocator.useValueRegister(masm, inputId);
7941 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7942 AutoScratchRegister scratch2(allocator, masm);
7944 if (IsStrictEqualityOp(op)) {
7945 if (isUndefined) {
7946 masm.testUndefinedSet(JSOpToCondition(op, false), input, scratch);
7947 } else {
7948 masm.testNullSet(JSOpToCondition(op, false), input, scratch);
7950 EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
7951 return true;
7954 FailurePath* failure;
7955 if (!addFailurePath(&failure)) {
7956 return false;
7959 MOZ_ASSERT(IsLooseEqualityOp(op));
7961 Label nullOrLikeUndefined, notNullOrLikeUndefined, done;
7963 ScratchTagScope tag(masm, input);
7964 masm.splitTagForTest(input, tag);
7966 if (isUndefined) {
7967 masm.branchTestUndefined(Assembler::Equal, tag, &nullOrLikeUndefined);
7968 masm.branchTestNull(Assembler::Equal, tag, &nullOrLikeUndefined);
7969 } else {
7970 masm.branchTestNull(Assembler::Equal, tag, &nullOrLikeUndefined);
7971 masm.branchTestUndefined(Assembler::Equal, tag, &nullOrLikeUndefined);
7973 masm.branchTestObject(Assembler::NotEqual, tag, &notNullOrLikeUndefined);
7976 ScratchTagScopeRelease _(&tag);
7978 masm.unboxObject(input, scratch);
7979 masm.branchIfObjectEmulatesUndefined(scratch, scratch2, failure->label(),
7980 &nullOrLikeUndefined);
7981 masm.jump(&notNullOrLikeUndefined);
7985 masm.bind(&nullOrLikeUndefined);
7986 EmitStoreBoolean(masm, op == JSOp::Eq, output);
7987 masm.jump(&done);
7989 masm.bind(&notNullOrLikeUndefined);
7990 EmitStoreBoolean(masm, op == JSOp::Ne, output);
7992 masm.bind(&done);
7993 return true;
7996 bool CacheIRCompiler::emitCompareDoubleSameValueResult(NumberOperandId lhsId,
7997 NumberOperandId rhsId) {
7998 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8000 AutoOutputRegister output(*this);
8001 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8002 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
8003 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
8004 AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
8006 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
8007 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
8009 masm.sameValueDouble(floatScratch0, floatScratch1, floatScratch2, scratch);
8010 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
8011 return true;
8014 bool CacheIRCompiler::emitIndirectTruncateInt32Result(Int32OperandId valId) {
8015 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8016 AutoOutputRegister output(*this);
8017 Register val = allocator.useRegister(masm, valId);
8019 if (output.hasValue()) {
8020 masm.tagValue(JSVAL_TYPE_INT32, val, output.valueReg());
8021 } else {
8022 masm.mov(val, output.typedReg().gpr());
8024 return true;
8027 bool CacheIRCompiler::emitCallPrintString(const char* str) {
8028 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8029 masm.printf(str);
8030 return true;
8033 bool CacheIRCompiler::emitBreakpoint() {
8034 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8035 masm.breakpoint();
8036 return true;
8039 void CacheIRCompiler::emitPostBarrierShared(Register obj,
8040 const ConstantOrRegister& val,
8041 Register scratch,
8042 Register maybeIndex) {
8043 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8045 if (val.constant()) {
8046 MOZ_ASSERT_IF(val.value().isGCThing(),
8047 !IsInsideNursery(val.value().toGCThing()));
8048 return;
8051 TypedOrValueRegister reg = val.reg();
8052 if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
8053 return;
8056 Label skipBarrier;
8057 if (reg.hasValue()) {
8058 masm.branchValueIsNurseryCell(Assembler::NotEqual, reg.valueReg(), scratch,
8059 &skipBarrier);
8060 } else {
8061 masm.branchPtrInNurseryChunk(Assembler::NotEqual, reg.typedReg().gpr(),
8062 scratch, &skipBarrier);
8064 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, &skipBarrier);
8066 // Check one element cache to avoid VM call.
8067 auto* lastCellAddr = cx_->runtime()->gc.addressOfLastBufferedWholeCell();
8068 masm.branchPtr(Assembler::Equal, AbsoluteAddress(lastCellAddr), obj,
8069 &skipBarrier);
8071 // Call one of these, depending on maybeIndex:
8073 // void PostWriteBarrier(JSRuntime* rt, JSObject* obj);
8074 // void PostWriteElementBarrier(JSRuntime* rt, JSObject* obj,
8075 // int32_t index);
8076 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
8077 masm.PushRegsInMask(save);
8078 masm.setupUnalignedABICall(scratch);
8079 masm.movePtr(ImmPtr(cx_->runtime()), scratch);
8080 masm.passABIArg(scratch);
8081 masm.passABIArg(obj);
8082 if (maybeIndex != InvalidReg) {
8083 masm.passABIArg(maybeIndex);
8084 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
8085 masm.callWithABI<Fn, PostWriteElementBarrier>();
8086 } else {
8087 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
8088 masm.callWithABI<Fn, PostWriteBarrier>();
8090 masm.PopRegsInMask(save);
8092 masm.bind(&skipBarrier);
8095 bool CacheIRCompiler::emitWrapResult() {
8096 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8097 AutoOutputRegister output(*this);
8098 AutoScratchRegister scratch(allocator, masm);
8100 FailurePath* failure;
8101 if (!addFailurePath(&failure)) {
8102 return false;
8105 Label done;
8106 // We only have to wrap objects, because we are in the same zone.
8107 masm.branchTestObject(Assembler::NotEqual, output.valueReg(), &done);
8109 Register obj = output.valueReg().scratchReg();
8110 masm.unboxObject(output.valueReg(), obj);
8112 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
8113 masm.PushRegsInMask(save);
8115 using Fn = JSObject* (*)(JSContext* cx, JSObject* obj);
8116 masm.setupUnalignedABICall(scratch);
8117 masm.loadJSContext(scratch);
8118 masm.passABIArg(scratch);
8119 masm.passABIArg(obj);
8120 masm.callWithABI<Fn, WrapObjectPure>();
8121 masm.storeCallPointerResult(obj);
8123 LiveRegisterSet ignore;
8124 ignore.add(obj);
8125 masm.PopRegsInMaskIgnore(save, ignore);
8127 // We could not get a wrapper for this object.
8128 masm.branchTestPtr(Assembler::Zero, obj, obj, failure->label());
8130 // We clobbered the output register, so we have to retag.
8131 masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
8133 masm.bind(&done);
8134 return true;
8137 bool CacheIRCompiler::emitMegamorphicLoadSlotByValueResult(ObjOperandId objId,
8138 ValOperandId idId) {
8139 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8140 AutoOutputRegister output(*this);
8142 Register obj = allocator.useRegister(masm, objId);
8143 ValueOperand idVal = allocator.useValueRegister(masm, idId);
8145 #ifdef JS_CODEGEN_X86
8146 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
8147 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
8148 #else
8149 AutoScratchRegister scratch1(allocator, masm);
8150 AutoScratchRegister scratch2(allocator, masm);
8151 AutoScratchRegister scratch3(allocator, masm);
8152 #endif
8154 FailurePath* failure;
8155 if (!addFailurePath(&failure)) {
8156 return false;
8159 #ifdef JS_CODEGEN_X86
8160 masm.xorPtr(scratch2, scratch2);
8161 #else
8162 Label cacheHit;
8163 masm.emitMegamorphicCacheLookupByValue(
8164 idVal, obj, scratch1, scratch3, scratch2, output.valueReg(), &cacheHit);
8165 #endif
8167 masm.branchIfNonNativeObj(obj, scratch1, failure->label());
8169 // idVal will be in vp[0], result will be stored in vp[1].
8170 masm.reserveStack(sizeof(Value));
8171 masm.Push(idVal);
8172 masm.moveStackPtrTo(idVal.scratchReg());
8174 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8175 liveVolatileFloatRegs());
8176 volatileRegs.takeUnchecked(scratch1);
8177 volatileRegs.takeUnchecked(idVal);
8178 masm.PushRegsInMask(volatileRegs);
8180 using Fn = bool (*)(JSContext* cx, JSObject* obj,
8181 MegamorphicCache::Entry* cacheEntry, Value* vp);
8182 masm.setupUnalignedABICall(scratch1);
8183 masm.loadJSContext(scratch1);
8184 masm.passABIArg(scratch1);
8185 masm.passABIArg(obj);
8186 masm.passABIArg(scratch2);
8187 masm.passABIArg(idVal.scratchReg());
8188 masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
8190 masm.storeCallPointerResult(scratch1);
8191 masm.PopRegsInMask(volatileRegs);
8193 masm.Pop(idVal);
8195 Label ok;
8196 uint32_t framePushed = masm.framePushed();
8197 masm.branchIfTrueBool(scratch1, &ok);
8198 masm.adjustStack(sizeof(Value));
8199 masm.jump(failure->label());
8201 masm.bind(&ok);
8202 masm.setFramePushed(framePushed);
8203 masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
8204 masm.adjustStack(sizeof(Value));
8206 #ifndef JS_CODEGEN_X86
8207 masm.bind(&cacheHit);
8208 #endif
8209 return true;
8212 bool CacheIRCompiler::emitMegamorphicHasPropResult(ObjOperandId objId,
8213 ValOperandId idId,
8214 bool hasOwn) {
8215 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8216 AutoOutputRegister output(*this);
8218 Register obj = allocator.useRegister(masm, objId);
8219 ValueOperand idVal = allocator.useValueRegister(masm, idId);
8221 #ifdef JS_CODEGEN_X86
8222 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
8223 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
8224 #else
8225 AutoScratchRegister scratch1(allocator, masm);
8226 AutoScratchRegister scratch2(allocator, masm);
8227 AutoScratchRegister scratch3(allocator, masm);
8228 #endif
8230 FailurePath* failure;
8231 if (!addFailurePath(&failure)) {
8232 return false;
8235 #ifndef JS_CODEGEN_X86
8236 Label cacheHit, done;
8237 masm.emitMegamorphicCacheLookupExists(idVal, obj, scratch1, scratch3,
8238 scratch2, output.maybeReg(), &cacheHit,
8239 hasOwn);
8240 #else
8241 masm.xorPtr(scratch2, scratch2);
8242 #endif
8244 masm.branchIfNonNativeObj(obj, scratch1, failure->label());
8246 // idVal will be in vp[0], result will be stored in vp[1].
8247 masm.reserveStack(sizeof(Value));
8248 masm.Push(idVal);
8249 masm.moveStackPtrTo(idVal.scratchReg());
8251 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8252 liveVolatileFloatRegs());
8253 volatileRegs.takeUnchecked(scratch1);
8254 volatileRegs.takeUnchecked(idVal);
8255 masm.PushRegsInMask(volatileRegs);
8257 using Fn = bool (*)(JSContext* cx, JSObject* obj,
8258 MegamorphicCache::Entry* cacheEntry, Value* vp);
8259 masm.setupUnalignedABICall(scratch1);
8260 masm.loadJSContext(scratch1);
8261 masm.passABIArg(scratch1);
8262 masm.passABIArg(obj);
8263 masm.passABIArg(scratch2);
8264 masm.passABIArg(idVal.scratchReg());
8265 if (hasOwn) {
8266 masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
8267 } else {
8268 masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
8270 masm.storeCallPointerResult(scratch1);
8271 masm.PopRegsInMask(volatileRegs);
8273 masm.Pop(idVal);
8275 Label ok;
8276 uint32_t framePushed = masm.framePushed();
8277 masm.branchIfTrueBool(scratch1, &ok);
8278 masm.adjustStack(sizeof(Value));
8279 masm.jump(failure->label());
8281 masm.bind(&ok);
8282 masm.setFramePushed(framePushed);
8283 masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
8284 masm.adjustStack(sizeof(Value));
8286 #ifndef JS_CODEGEN_X86
8287 masm.jump(&done);
8288 masm.bind(&cacheHit);
8289 if (output.hasValue()) {
8290 masm.tagValue(JSVAL_TYPE_BOOLEAN, output.valueReg().scratchReg(),
8291 output.valueReg());
8293 masm.bind(&done);
8294 #endif
8295 return true;
8298 bool CacheIRCompiler::emitSmallObjectVariableKeyHasOwnResult(
8299 StringOperandId idId, uint32_t propNamesOffset, uint32_t shapeOffset) {
8300 StubFieldOffset propNames(propNamesOffset, StubField::Type::JSObject);
8301 AutoOutputRegister output(*this);
8302 Register id = allocator.useRegister(masm, idId);
8303 AutoScratchRegisterMaybeOutput propNamesReg(allocator, masm, output);
8304 AutoScratchRegister endScratch(allocator, masm);
8305 AutoScratchRegister scratch(allocator, masm);
8307 FailurePath* failure;
8308 if (!addFailurePath(&failure)) {
8309 return false;
8312 emitLoadStubField(propNames, propNamesReg);
8314 Label trueResult, falseResult, loop, done;
8316 masm.loadPtr(Address(propNamesReg, NativeObject::offsetOfElements()),
8317 propNamesReg);
8318 // Compute end pointer.
8319 Address lengthAddr(propNamesReg, ObjectElements::offsetOfInitializedLength());
8320 masm.load32(lengthAddr, endScratch);
8321 masm.branch32(Assembler::Equal, endScratch, Imm32(0), &falseResult);
8322 BaseObjectElementIndex endPtrAddr(propNamesReg, endScratch);
8323 masm.computeEffectiveAddress(endPtrAddr, endScratch);
8325 masm.bind(&loop);
8327 Address atomAddr(propNamesReg.get(), 0);
8329 masm.unboxString(atomAddr, scratch);
8330 masm.branchPtr(Assembler::Equal, scratch, id, &trueResult);
8332 masm.addPtr(Imm32(sizeof(Value)), propNamesReg);
8333 masm.branchPtr(Assembler::Below, propNamesReg, endScratch, &loop);
8335 masm.bind(&falseResult);
8336 if (output.hasValue()) {
8337 masm.moveValue(BooleanValue(false), output.valueReg());
8338 } else {
8339 masm.move32(Imm32(0), output.typedReg().gpr());
8341 masm.jump(&done);
8342 masm.bind(&trueResult);
8343 if (output.hasValue()) {
8344 masm.moveValue(BooleanValue(true), output.valueReg());
8345 } else {
8346 masm.move32(Imm32(1), output.typedReg().gpr());
8348 masm.bind(&done);
8349 return true;
8352 bool CacheIRCompiler::emitCallObjectHasSparseElementResult(
8353 ObjOperandId objId, Int32OperandId indexId) {
8354 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8355 AutoOutputRegister output(*this);
8357 Register obj = allocator.useRegister(masm, objId);
8358 Register index = allocator.useRegister(masm, indexId);
8360 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
8361 AutoScratchRegister scratch2(allocator, masm);
8363 FailurePath* failure;
8364 if (!addFailurePath(&failure)) {
8365 return false;
8368 masm.reserveStack(sizeof(Value));
8369 masm.moveStackPtrTo(scratch2.get());
8371 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8372 liveVolatileFloatRegs());
8373 volatileRegs.takeUnchecked(scratch1);
8374 volatileRegs.takeUnchecked(index);
8375 masm.PushRegsInMask(volatileRegs);
8377 using Fn =
8378 bool (*)(JSContext* cx, NativeObject* obj, int32_t index, Value* vp);
8379 masm.setupUnalignedABICall(scratch1);
8380 masm.loadJSContext(scratch1);
8381 masm.passABIArg(scratch1);
8382 masm.passABIArg(obj);
8383 masm.passABIArg(index);
8384 masm.passABIArg(scratch2);
8385 masm.callWithABI<Fn, HasNativeElementPure>();
8386 masm.storeCallPointerResult(scratch1);
8387 masm.PopRegsInMask(volatileRegs);
8389 Label ok;
8390 uint32_t framePushed = masm.framePushed();
8391 masm.branchIfTrueBool(scratch1, &ok);
8392 masm.adjustStack(sizeof(Value));
8393 masm.jump(failure->label());
8395 masm.bind(&ok);
8396 masm.setFramePushed(framePushed);
8397 masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
8398 masm.adjustStack(sizeof(Value));
8399 return true;
8403 * Move a constant value into register dest.
8405 void CacheIRCompiler::emitLoadStubFieldConstant(StubFieldOffset val,
8406 Register dest) {
8407 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8408 MOZ_ASSERT(mode_ == Mode::Ion);
8409 switch (val.getStubFieldType()) {
8410 case StubField::Type::Shape:
8411 masm.movePtr(ImmGCPtr(shapeStubField(val.getOffset())), dest);
8412 break;
8413 case StubField::Type::WeakGetterSetter:
8414 masm.movePtr(ImmGCPtr(weakGetterSetterStubField(val.getOffset())), dest);
8415 break;
8416 case StubField::Type::String:
8417 masm.movePtr(ImmGCPtr(stringStubField(val.getOffset())), dest);
8418 break;
8419 case StubField::Type::JSObject:
8420 masm.movePtr(ImmGCPtr(objectStubField(val.getOffset())), dest);
8421 break;
8422 case StubField::Type::RawPointer:
8423 masm.movePtr(ImmPtr(pointerStubField(val.getOffset())), dest);
8424 break;
8425 case StubField::Type::RawInt32:
8426 masm.move32(Imm32(int32StubField(val.getOffset())), dest);
8427 break;
8428 case StubField::Type::Id:
8429 masm.movePropertyKey(idStubField(val.getOffset()), dest);
8430 break;
8431 default:
8432 MOZ_CRASH("Unhandled stub field constant type");
8437 * After this is done executing, dest contains the value; either through a
8438 * constant load or through the load from the stub data.
8440 * The current policy is that Baseline will use loads from the stub data (to
8441 * allow IC sharing), where as Ion doesn't share ICs, and so we can safely use
8442 * constants in the IC.
8444 void CacheIRCompiler::emitLoadStubField(StubFieldOffset val, Register dest) {
8445 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8446 if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
8447 emitLoadStubFieldConstant(val, dest);
8448 } else {
8449 Address load(ICStubReg, stubDataOffset_ + val.getOffset());
8451 switch (val.getStubFieldType()) {
8452 case StubField::Type::RawPointer:
8453 case StubField::Type::Shape:
8454 case StubField::Type::WeakGetterSetter:
8455 case StubField::Type::JSObject:
8456 case StubField::Type::Symbol:
8457 case StubField::Type::String:
8458 case StubField::Type::Id:
8459 masm.loadPtr(load, dest);
8460 break;
8461 case StubField::Type::RawInt32:
8462 masm.load32(load, dest);
8463 break;
8464 default:
8465 MOZ_CRASH("Unhandled stub field constant type");
8470 void CacheIRCompiler::emitLoadValueStubField(StubFieldOffset val,
8471 ValueOperand dest) {
8472 MOZ_ASSERT(val.getStubFieldType() == StubField::Type::Value);
8474 if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
8475 MOZ_ASSERT(mode_ == Mode::Ion);
8476 masm.moveValue(valueStubField(val.getOffset()), dest);
8477 } else {
8478 Address addr(ICStubReg, stubDataOffset_ + val.getOffset());
8479 masm.loadValue(addr, dest);
8483 void CacheIRCompiler::emitLoadDoubleValueStubField(StubFieldOffset val,
8484 ValueOperand dest,
8485 FloatRegister scratch) {
8486 MOZ_ASSERT(val.getStubFieldType() == StubField::Type::Double);
8488 if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
8489 MOZ_ASSERT(mode_ == Mode::Ion);
8490 double d = doubleStubField(val.getOffset());
8491 masm.moveValue(DoubleValue(d), dest);
8492 } else {
8493 Address addr(ICStubReg, stubDataOffset_ + val.getOffset());
8494 masm.loadDouble(addr, scratch);
8495 masm.boxDouble(scratch, dest, scratch);
8499 bool CacheIRCompiler::emitLoadInstanceOfObjectResult(ValOperandId lhsId,
8500 ObjOperandId protoId) {
8501 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8502 AutoOutputRegister output(*this);
8503 ValueOperand lhs = allocator.useValueRegister(masm, lhsId);
8504 Register proto = allocator.useRegister(masm, protoId);
8506 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8508 FailurePath* failure;
8509 if (!addFailurePath(&failure)) {
8510 return false;
8513 Label returnFalse, returnTrue, done;
8514 masm.fallibleUnboxObject(lhs, scratch, &returnFalse);
8516 // LHS is an object. Load its proto.
8517 masm.loadObjProto(scratch, scratch);
8519 // Walk the proto chain until we either reach the target object,
8520 // nullptr or LazyProto.
8521 Label loop;
8522 masm.bind(&loop);
8524 masm.branchPtr(Assembler::Equal, scratch, proto, &returnTrue);
8525 masm.branchTestPtr(Assembler::Zero, scratch, scratch, &returnFalse);
8527 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
8528 masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), failure->label());
8530 masm.loadObjProto(scratch, scratch);
8531 masm.jump(&loop);
8534 masm.bind(&returnFalse);
8535 EmitStoreBoolean(masm, false, output);
8536 masm.jump(&done);
8538 masm.bind(&returnTrue);
8539 EmitStoreBoolean(masm, true, output);
8540 // fallthrough
8541 masm.bind(&done);
8542 return true;
8545 bool CacheIRCompiler::emitMegamorphicLoadSlotResult(ObjOperandId objId,
8546 uint32_t idOffset) {
8547 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8548 AutoOutputRegister output(*this);
8550 Register obj = allocator.useRegister(masm, objId);
8551 StubFieldOffset id(idOffset, StubField::Type::Id);
8553 AutoScratchRegisterMaybeOutput idReg(allocator, masm, output);
8554 AutoScratchRegister scratch1(allocator, masm);
8555 AutoScratchRegister scratch2(allocator, masm);
8556 AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, output);
8558 FailurePath* failure;
8559 if (!addFailurePath(&failure)) {
8560 return false;
8563 #ifdef JS_CODEGEN_X86
8564 masm.xorPtr(scratch3, scratch3);
8565 #else
8566 Label cacheHit;
8567 emitLoadStubField(id, idReg);
8568 masm.emitMegamorphicCacheLookupByValue(idReg.get(), obj, scratch1, scratch2,
8569 scratch3, output.valueReg(),
8570 &cacheHit);
8571 #endif
8573 masm.branchIfNonNativeObj(obj, scratch1, failure->label());
8575 masm.Push(UndefinedValue());
8576 masm.moveStackPtrTo(idReg.get());
8578 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8579 liveVolatileFloatRegs());
8580 volatileRegs.takeUnchecked(scratch1);
8581 volatileRegs.takeUnchecked(scratch2);
8582 volatileRegs.takeUnchecked(scratch3);
8583 volatileRegs.takeUnchecked(idReg);
8584 masm.PushRegsInMask(volatileRegs);
8586 using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
8587 MegamorphicCache::Entry* cacheEntry, Value* vp);
8588 masm.setupUnalignedABICall(scratch1);
8589 masm.loadJSContext(scratch1);
8590 masm.passABIArg(scratch1);
8591 masm.passABIArg(obj);
8592 emitLoadStubField(id, scratch2);
8593 masm.passABIArg(scratch2);
8594 masm.passABIArg(scratch3);
8595 masm.passABIArg(idReg);
8597 #ifdef JS_CODEGEN_X86
8598 masm.callWithABI<Fn, GetNativeDataPropertyPureWithCacheLookup>();
8599 #else
8600 masm.callWithABI<Fn, GetNativeDataPropertyPure>();
8601 #endif
8603 masm.storeCallPointerResult(scratch2);
8604 masm.PopRegsInMask(volatileRegs);
8606 masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
8607 masm.adjustStack(sizeof(Value));
8609 masm.branchIfFalseBool(scratch2, failure->label());
8610 #ifndef JS_CODEGEN_X86
8611 masm.bind(&cacheHit);
8612 #endif
8614 return true;
8617 bool CacheIRCompiler::emitMegamorphicStoreSlot(ObjOperandId objId,
8618 uint32_t idOffset,
8619 ValOperandId rhsId,
8620 bool strict) {
8621 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8623 AutoCallVM callvm(masm, this, allocator);
8625 Register obj = allocator.useRegister(masm, objId);
8626 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
8627 StubFieldOffset id(idOffset, StubField::Type::Id);
8628 AutoScratchRegister scratch(allocator, masm);
8630 callvm.prepare();
8632 masm.Push(Imm32(strict));
8633 masm.Push(val);
8634 emitLoadStubField(id, scratch);
8635 masm.Push(scratch);
8636 masm.Push(obj);
8638 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
8639 callvm.callNoResult<Fn, SetPropertyMegamorphic<false>>();
8640 return true;
8643 bool CacheIRCompiler::emitGuardHasGetterSetter(ObjOperandId objId,
8644 uint32_t idOffset,
8645 uint32_t getterSetterOffset) {
8646 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8648 Register obj = allocator.useRegister(masm, objId);
8650 StubFieldOffset id(idOffset, StubField::Type::Id);
8651 StubFieldOffset getterSetter(getterSetterOffset,
8652 StubField::Type::WeakGetterSetter);
8654 AutoScratchRegister scratch1(allocator, masm);
8655 AutoScratchRegister scratch2(allocator, masm);
8656 AutoScratchRegister scratch3(allocator, masm);
8658 FailurePath* failure;
8659 if (!addFailurePath(&failure)) {
8660 return false;
8663 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8664 liveVolatileFloatRegs());
8665 volatileRegs.takeUnchecked(scratch1);
8666 volatileRegs.takeUnchecked(scratch2);
8667 masm.PushRegsInMask(volatileRegs);
8669 using Fn = bool (*)(JSContext* cx, JSObject* obj, jsid id,
8670 GetterSetter* getterSetter);
8671 masm.setupUnalignedABICall(scratch1);
8672 masm.loadJSContext(scratch1);
8673 masm.passABIArg(scratch1);
8674 masm.passABIArg(obj);
8675 emitLoadStubField(id, scratch2);
8676 masm.passABIArg(scratch2);
8677 emitLoadStubField(getterSetter, scratch3);
8678 masm.passABIArg(scratch3);
8679 masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
8680 masm.storeCallPointerResult(scratch1);
8681 masm.PopRegsInMask(volatileRegs);
8683 masm.branchIfFalseBool(scratch1, failure->label());
8684 return true;
8687 bool CacheIRCompiler::emitGuardWasmArg(ValOperandId argId,
8688 wasm::ValType::Kind kind) {
8689 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8691 // All values can be boxed as AnyRef.
8692 if (kind == wasm::ValType::Ref) {
8693 return true;
8695 MOZ_ASSERT(kind != wasm::ValType::V128);
8697 ValueOperand arg = allocator.useValueRegister(masm, argId);
8699 FailurePath* failure;
8700 if (!addFailurePath(&failure)) {
8701 return false;
8704 // Check that the argument can be converted to the Wasm type in Warp code
8705 // without bailing out.
8706 Label done;
8707 switch (kind) {
8708 case wasm::ValType::I32:
8709 case wasm::ValType::F32:
8710 case wasm::ValType::F64: {
8711 // Argument must be number, bool, or undefined.
8712 masm.branchTestNumber(Assembler::Equal, arg, &done);
8713 masm.branchTestBoolean(Assembler::Equal, arg, &done);
8714 masm.branchTestUndefined(Assembler::NotEqual, arg, failure->label());
8715 break;
8717 case wasm::ValType::I64: {
8718 // Argument must be bigint, bool, or string.
8719 masm.branchTestBigInt(Assembler::Equal, arg, &done);
8720 masm.branchTestBoolean(Assembler::Equal, arg, &done);
8721 masm.branchTestString(Assembler::NotEqual, arg, failure->label());
8722 break;
8724 default:
8725 MOZ_CRASH("Unexpected kind");
8727 masm.bind(&done);
8729 return true;
8732 bool CacheIRCompiler::emitGuardMultipleShapes(ObjOperandId objId,
8733 uint32_t shapesOffset) {
8734 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8735 Register obj = allocator.useRegister(masm, objId);
8736 AutoScratchRegister shapes(allocator, masm);
8737 AutoScratchRegister scratch(allocator, masm);
8738 AutoScratchRegister scratch2(allocator, masm);
8740 bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
8742 Register spectreScratch = InvalidReg;
8743 Maybe<AutoScratchRegister> maybeSpectreScratch;
8744 if (needSpectreMitigations) {
8745 maybeSpectreScratch.emplace(allocator, masm);
8746 spectreScratch = *maybeSpectreScratch;
8749 FailurePath* failure;
8750 if (!addFailurePath(&failure)) {
8751 return false;
8754 // The stub field contains a ListObject. Load its elements.
8755 StubFieldOffset shapeArray(shapesOffset, StubField::Type::JSObject);
8756 emitLoadStubField(shapeArray, shapes);
8757 masm.loadPtr(Address(shapes, NativeObject::offsetOfElements()), shapes);
8759 masm.branchTestObjShapeList(Assembler::NotEqual, obj, shapes, scratch,
8760 scratch2, spectreScratch, failure->label());
8761 return true;
8764 bool CacheIRCompiler::emitLoadObject(ObjOperandId resultId,
8765 uint32_t objOffset) {
8766 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8767 Register reg = allocator.defineRegister(masm, resultId);
8768 StubFieldOffset obj(objOffset, StubField::Type::JSObject);
8769 emitLoadStubField(obj, reg);
8770 return true;
8773 bool CacheIRCompiler::emitLoadProtoObject(ObjOperandId resultId,
8774 uint32_t objOffset,
8775 ObjOperandId receiverObjId) {
8776 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8777 Register reg = allocator.defineRegister(masm, resultId);
8778 StubFieldOffset obj(objOffset, StubField::Type::JSObject);
8779 emitLoadStubField(obj, reg);
8780 return true;
8783 bool CacheIRCompiler::emitLoadInt32Constant(uint32_t valOffset,
8784 Int32OperandId resultId) {
8785 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8786 Register reg = allocator.defineRegister(masm, resultId);
8787 StubFieldOffset val(valOffset, StubField::Type::RawInt32);
8788 emitLoadStubField(val, reg);
8789 return true;
8792 bool CacheIRCompiler::emitLoadBooleanConstant(bool val,
8793 BooleanOperandId resultId) {
8794 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8795 Register reg = allocator.defineRegister(masm, resultId);
8796 masm.move32(Imm32(val), reg);
8797 return true;
8800 bool CacheIRCompiler::emitLoadDoubleConstant(uint32_t valOffset,
8801 NumberOperandId resultId) {
8802 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8804 ValueOperand output = allocator.defineValueRegister(masm, resultId);
8805 StubFieldOffset val(valOffset, StubField::Type::Double);
8807 AutoScratchFloatRegister floatReg(this);
8809 emitLoadDoubleValueStubField(val, output, floatReg);
8810 return true;
8813 bool CacheIRCompiler::emitLoadUndefined(ValOperandId resultId) {
8814 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8816 ValueOperand reg = allocator.defineValueRegister(masm, resultId);
8817 masm.moveValue(UndefinedValue(), reg);
8818 return true;
8821 bool CacheIRCompiler::emitLoadConstantString(uint32_t strOffset,
8822 StringOperandId resultId) {
8823 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8824 Register reg = allocator.defineRegister(masm, resultId);
8825 StubFieldOffset str(strOffset, StubField::Type::String);
8826 emitLoadStubField(str, reg);
8827 return true;
8830 bool CacheIRCompiler::emitCallInt32ToString(Int32OperandId inputId,
8831 StringOperandId resultId) {
8832 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8833 Register input = allocator.useRegister(masm, inputId);
8834 Register result = allocator.defineRegister(masm, resultId);
8836 FailurePath* failure;
8837 if (!addFailurePath(&failure)) {
8838 return false;
8841 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8842 liveVolatileFloatRegs());
8843 volatileRegs.takeUnchecked(result);
8844 masm.PushRegsInMask(volatileRegs);
8846 using Fn = JSLinearString* (*)(JSContext* cx, int32_t i);
8847 masm.setupUnalignedABICall(result);
8848 masm.loadJSContext(result);
8849 masm.passABIArg(result);
8850 masm.passABIArg(input);
8851 masm.callWithABI<Fn, js::Int32ToStringPure>();
8853 masm.storeCallPointerResult(result);
8854 masm.PopRegsInMask(volatileRegs);
8856 masm.branchPtr(Assembler::Equal, result, ImmPtr(nullptr), failure->label());
8857 return true;
8860 bool CacheIRCompiler::emitCallNumberToString(NumberOperandId inputId,
8861 StringOperandId resultId) {
8862 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8864 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
8866 allocator.ensureDoubleRegister(masm, inputId, floatScratch0);
8867 Register result = allocator.defineRegister(masm, resultId);
8869 FailurePath* failure;
8870 if (!addFailurePath(&failure)) {
8871 return false;
8874 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8875 liveVolatileFloatRegs());
8876 volatileRegs.takeUnchecked(result);
8877 masm.PushRegsInMask(volatileRegs);
8879 using Fn = JSString* (*)(JSContext* cx, double d);
8880 masm.setupUnalignedABICall(result);
8881 masm.loadJSContext(result);
8882 masm.passABIArg(result);
8883 masm.passABIArg(floatScratch0, ABIType::Float64);
8884 masm.callWithABI<Fn, js::NumberToStringPure>();
8886 masm.storeCallPointerResult(result);
8887 masm.PopRegsInMask(volatileRegs);
8889 masm.branchPtr(Assembler::Equal, result, ImmPtr(nullptr), failure->label());
8890 return true;
8893 bool CacheIRCompiler::emitInt32ToStringWithBaseResult(Int32OperandId inputId,
8894 Int32OperandId baseId) {
8895 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8897 AutoCallVM callvm(masm, this, allocator);
8898 Register input = allocator.useRegister(masm, inputId);
8899 Register base = allocator.useRegister(masm, baseId);
8901 FailurePath* failure;
8902 if (!addFailurePath(&failure)) {
8903 return false;
8906 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8907 // we can't use both at the same time. This isn't an issue here, because Ion
8908 // doesn't support CallICs. If that ever changes, this code must be updated.
8909 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8911 masm.branch32(Assembler::LessThan, base, Imm32(2), failure->label());
8912 masm.branch32(Assembler::GreaterThan, base, Imm32(36), failure->label());
8914 // Use lower-case characters by default.
8915 constexpr bool lowerCase = true;
8917 callvm.prepare();
8919 masm.Push(Imm32(lowerCase));
8920 masm.Push(base);
8921 masm.Push(input);
8923 using Fn = JSString* (*)(JSContext*, int32_t, int32_t, bool);
8924 callvm.call<Fn, js::Int32ToStringWithBase>();
8925 return true;
8928 bool CacheIRCompiler::emitBooleanToString(BooleanOperandId inputId,
8929 StringOperandId resultId) {
8930 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8931 Register boolean = allocator.useRegister(masm, inputId);
8932 Register result = allocator.defineRegister(masm, resultId);
8933 const JSAtomState& names = cx_->names();
8934 Label true_, done;
8936 masm.branchTest32(Assembler::NonZero, boolean, boolean, &true_);
8938 // False case
8939 masm.movePtr(ImmGCPtr(names.false_), result);
8940 masm.jump(&done);
8942 // True case
8943 masm.bind(&true_);
8944 masm.movePtr(ImmGCPtr(names.true_), result);
8945 masm.bind(&done);
8947 return true;
8950 bool CacheIRCompiler::emitObjectToStringResult(ObjOperandId objId) {
8951 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8953 AutoOutputRegister output(*this);
8954 Register obj = allocator.useRegister(masm, objId);
8955 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8957 FailurePath* failure;
8958 if (!addFailurePath(&failure)) {
8959 return false;
8962 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8963 liveVolatileFloatRegs());
8964 volatileRegs.takeUnchecked(output.valueReg());
8965 volatileRegs.takeUnchecked(scratch);
8966 masm.PushRegsInMask(volatileRegs);
8968 using Fn = JSString* (*)(JSContext*, JSObject*);
8969 masm.setupUnalignedABICall(scratch);
8970 masm.loadJSContext(scratch);
8971 masm.passABIArg(scratch);
8972 masm.passABIArg(obj);
8973 masm.callWithABI<Fn, js::ObjectClassToString>();
8974 masm.storeCallPointerResult(scratch);
8976 masm.PopRegsInMask(volatileRegs);
8978 masm.branchPtr(Assembler::Equal, scratch, ImmPtr(nullptr), failure->label());
8979 masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
8981 return true;
8984 bool CacheIRCompiler::emitCallStringConcatResult(StringOperandId lhsId,
8985 StringOperandId rhsId) {
8986 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8987 AutoCallVM callvm(masm, this, allocator);
8989 Register lhs = allocator.useRegister(masm, lhsId);
8990 Register rhs = allocator.useRegister(masm, rhsId);
8992 callvm.prepare();
8994 masm.Push(static_cast<js::jit::Imm32>(int32_t(js::gc::Heap::Default)));
8995 masm.Push(rhs);
8996 masm.Push(lhs);
8998 using Fn =
8999 JSString* (*)(JSContext*, HandleString, HandleString, js::gc::Heap);
9000 callvm.call<Fn, ConcatStrings<CanGC>>();
9002 return true;
9005 bool CacheIRCompiler::emitCallIsSuspendedGeneratorResult(ValOperandId valId) {
9006 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9007 AutoOutputRegister output(*this);
9008 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9009 AutoScratchRegister scratch2(allocator, masm);
9010 ValueOperand input = allocator.useValueRegister(masm, valId);
9012 // Test if it's an object.
9013 Label returnFalse, done;
9014 masm.fallibleUnboxObject(input, scratch, &returnFalse);
9016 // Test if it's a GeneratorObject.
9017 masm.branchTestObjClass(Assembler::NotEqual, scratch,
9018 &GeneratorObject::class_, scratch2, scratch,
9019 &returnFalse);
9021 // If the resumeIndex slot holds an int32 value < RESUME_INDEX_RUNNING,
9022 // the generator is suspended.
9023 Address addr(scratch, AbstractGeneratorObject::offsetOfResumeIndexSlot());
9024 masm.fallibleUnboxInt32(addr, scratch, &returnFalse);
9025 masm.branch32(Assembler::AboveOrEqual, scratch,
9026 Imm32(AbstractGeneratorObject::RESUME_INDEX_RUNNING),
9027 &returnFalse);
9029 masm.moveValue(BooleanValue(true), output.valueReg());
9030 masm.jump(&done);
9032 masm.bind(&returnFalse);
9033 masm.moveValue(BooleanValue(false), output.valueReg());
9035 masm.bind(&done);
9036 return true;
9039 // This op generates no code. It is consumed by the transpiler.
9040 bool CacheIRCompiler::emitMetaScriptedThisShape(uint32_t) { return true; }
9042 bool CacheIRCompiler::emitCallNativeGetElementResult(ObjOperandId objId,
9043 Int32OperandId indexId) {
9044 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9045 AutoCallVM callvm(masm, this, allocator);
9047 Register obj = allocator.useRegister(masm, objId);
9048 Register index = allocator.useRegister(masm, indexId);
9050 callvm.prepare();
9052 masm.Push(index);
9053 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
9054 masm.Push(obj);
9056 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
9057 MutableHandleValue);
9058 callvm.call<Fn, NativeGetElement>();
9060 return true;
9063 bool CacheIRCompiler::emitCallNativeGetElementSuperResult(
9064 ObjOperandId objId, Int32OperandId indexId, ValOperandId receiverId) {
9065 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9066 AutoCallVM callvm(masm, this, allocator);
9068 Register obj = allocator.useRegister(masm, objId);
9069 Register index = allocator.useRegister(masm, indexId);
9070 ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
9072 callvm.prepare();
9074 masm.Push(index);
9075 masm.Push(receiver);
9076 masm.Push(obj);
9078 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
9079 MutableHandleValue);
9080 callvm.call<Fn, NativeGetElement>();
9082 return true;
9085 bool CacheIRCompiler::emitProxyHasPropResult(ObjOperandId objId,
9086 ValOperandId idId, bool hasOwn) {
9087 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9088 AutoCallVM callvm(masm, this, allocator);
9090 Register obj = allocator.useRegister(masm, objId);
9091 ValueOperand idVal = allocator.useValueRegister(masm, idId);
9093 callvm.prepare();
9095 masm.Push(idVal);
9096 masm.Push(obj);
9098 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
9099 if (hasOwn) {
9100 callvm.call<Fn, ProxyHasOwn>();
9101 } else {
9102 callvm.call<Fn, ProxyHas>();
9104 return true;
9107 bool CacheIRCompiler::emitProxyGetByValueResult(ObjOperandId objId,
9108 ValOperandId idId) {
9109 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9110 AutoCallVM callvm(masm, this, allocator);
9112 Register obj = allocator.useRegister(masm, objId);
9113 ValueOperand idVal = allocator.useValueRegister(masm, idId);
9115 callvm.prepare();
9116 masm.Push(idVal);
9117 masm.Push(obj);
9119 using Fn =
9120 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
9121 callvm.call<Fn, ProxyGetPropertyByValue>();
9122 return true;
9125 bool CacheIRCompiler::emitCallGetSparseElementResult(ObjOperandId objId,
9126 Int32OperandId indexId) {
9127 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9129 AutoCallVM callvm(masm, this, allocator);
9131 Register obj = allocator.useRegister(masm, objId);
9132 Register id = allocator.useRegister(masm, indexId);
9134 callvm.prepare();
9135 masm.Push(id);
9136 masm.Push(obj);
9138 using Fn = bool (*)(JSContext* cx, Handle<NativeObject*> obj, int32_t int_id,
9139 MutableHandleValue result);
9140 callvm.call<Fn, GetSparseElementHelper>();
9141 return true;
9144 bool CacheIRCompiler::emitRegExpSearcherLastLimitResult() {
9145 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9147 AutoOutputRegister output(*this);
9148 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
9149 AutoScratchRegister scratch2(allocator, masm);
9151 masm.loadAndClearRegExpSearcherLastLimit(scratch1, scratch2);
9153 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
9154 return true;
9157 bool CacheIRCompiler::emitRegExpFlagResult(ObjOperandId regexpId,
9158 int32_t flagsMask) {
9159 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9161 AutoOutputRegister output(*this);
9162 Register regexp = allocator.useRegister(masm, regexpId);
9163 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9165 Address flagsAddr(
9166 regexp, NativeObject::getFixedSlotOffset(RegExpObject::flagsSlot()));
9167 masm.unboxInt32(flagsAddr, scratch);
9169 Label ifFalse, done;
9170 masm.branchTest32(Assembler::Zero, scratch, Imm32(flagsMask), &ifFalse);
9171 masm.moveValue(BooleanValue(true), output.valueReg());
9172 masm.jump(&done);
9174 masm.bind(&ifFalse);
9175 masm.moveValue(BooleanValue(false), output.valueReg());
9177 masm.bind(&done);
9178 return true;
9181 bool CacheIRCompiler::emitCallSubstringKernelResult(StringOperandId strId,
9182 Int32OperandId beginId,
9183 Int32OperandId lengthId) {
9184 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9186 AutoCallVM callvm(masm, this, allocator);
9188 Register str = allocator.useRegister(masm, strId);
9189 Register begin = allocator.useRegister(masm, beginId);
9190 Register length = allocator.useRegister(masm, lengthId);
9192 callvm.prepare();
9193 masm.Push(length);
9194 masm.Push(begin);
9195 masm.Push(str);
9197 using Fn = JSString* (*)(JSContext* cx, HandleString str, int32_t begin,
9198 int32_t len);
9199 callvm.call<Fn, SubstringKernel>();
9200 return true;
9203 bool CacheIRCompiler::emitStringReplaceStringResult(
9204 StringOperandId strId, StringOperandId patternId,
9205 StringOperandId replacementId) {
9206 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9208 AutoCallVM callvm(masm, this, allocator);
9210 Register str = allocator.useRegister(masm, strId);
9211 Register pattern = allocator.useRegister(masm, patternId);
9212 Register replacement = allocator.useRegister(masm, replacementId);
9214 callvm.prepare();
9215 masm.Push(replacement);
9216 masm.Push(pattern);
9217 masm.Push(str);
9219 using Fn =
9220 JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
9221 callvm.call<Fn, jit::StringReplace>();
9222 return true;
9225 bool CacheIRCompiler::emitStringSplitStringResult(StringOperandId strId,
9226 StringOperandId separatorId) {
9227 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9229 AutoCallVM callvm(masm, this, allocator);
9231 Register str = allocator.useRegister(masm, strId);
9232 Register separator = allocator.useRegister(masm, separatorId);
9234 callvm.prepare();
9235 masm.Push(Imm32(INT32_MAX));
9236 masm.Push(separator);
9237 masm.Push(str);
9239 using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
9240 callvm.call<Fn, js::StringSplitString>();
9241 return true;
9244 bool CacheIRCompiler::emitRegExpPrototypeOptimizableResult(
9245 ObjOperandId protoId) {
9246 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9248 AutoOutputRegister output(*this);
9249 Register proto = allocator.useRegister(masm, protoId);
9250 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9252 Label slow, done;
9253 masm.branchIfNotRegExpPrototypeOptimizable(
9254 proto, scratch, /* maybeGlobal = */ nullptr, &slow);
9255 masm.moveValue(BooleanValue(true), output.valueReg());
9256 masm.jump(&done);
9259 masm.bind(&slow);
9261 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
9262 liveVolatileFloatRegs());
9263 volatileRegs.takeUnchecked(scratch);
9264 masm.PushRegsInMask(volatileRegs);
9266 using Fn = bool (*)(JSContext* cx, JSObject* proto);
9267 masm.setupUnalignedABICall(scratch);
9268 masm.loadJSContext(scratch);
9269 masm.passABIArg(scratch);
9270 masm.passABIArg(proto);
9271 masm.callWithABI<Fn, RegExpPrototypeOptimizableRaw>();
9272 masm.storeCallBoolResult(scratch);
9274 masm.PopRegsInMask(volatileRegs);
9275 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
9278 masm.bind(&done);
9279 return true;
9282 bool CacheIRCompiler::emitRegExpInstanceOptimizableResult(
9283 ObjOperandId regexpId, ObjOperandId protoId) {
9284 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9286 AutoOutputRegister output(*this);
9287 Register regexp = allocator.useRegister(masm, regexpId);
9288 Register proto = allocator.useRegister(masm, protoId);
9289 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9291 Label slow, done;
9292 masm.branchIfNotRegExpInstanceOptimizable(regexp, scratch,
9293 /* maybeGlobal = */ nullptr, &slow);
9294 masm.moveValue(BooleanValue(true), output.valueReg());
9295 masm.jump(&done);
9298 masm.bind(&slow);
9300 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
9301 liveVolatileFloatRegs());
9302 volatileRegs.takeUnchecked(scratch);
9303 masm.PushRegsInMask(volatileRegs);
9305 using Fn = bool (*)(JSContext* cx, JSObject* obj, JSObject* proto);
9306 masm.setupUnalignedABICall(scratch);
9307 masm.loadJSContext(scratch);
9308 masm.passABIArg(scratch);
9309 masm.passABIArg(regexp);
9310 masm.passABIArg(proto);
9311 masm.callWithABI<Fn, RegExpInstanceOptimizableRaw>();
9312 masm.storeCallBoolResult(scratch);
9314 masm.PopRegsInMask(volatileRegs);
9315 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
9318 masm.bind(&done);
9319 return true;
9322 bool CacheIRCompiler::emitGetFirstDollarIndexResult(StringOperandId strId) {
9323 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9325 AutoCallVM callvm(masm, this, allocator);
9327 Register str = allocator.useRegister(masm, strId);
9329 callvm.prepare();
9330 masm.Push(str);
9332 using Fn = bool (*)(JSContext*, JSString*, int32_t*);
9333 callvm.call<Fn, GetFirstDollarIndexRaw>();
9334 return true;
9337 bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
9338 ObjOperandId objId, IntPtrOperandId indexId, uint32_t expectedId,
9339 uint32_t replacementId, Scalar::Type elementType,
9340 ArrayBufferViewKind viewKind) {
9341 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9343 Maybe<AutoOutputRegister> output;
9344 Maybe<AutoCallVM> callvm;
9345 if (!Scalar::isBigIntType(elementType)) {
9346 output.emplace(*this);
9347 } else {
9348 callvm.emplace(masm, this, allocator);
9350 #ifdef JS_CODEGEN_X86
9351 // Use a scratch register to avoid running out of registers.
9352 Register obj = output ? output->valueReg().typeReg()
9353 : callvm->outputValueReg().typeReg();
9354 allocator.copyToScratchRegister(masm, objId, obj);
9355 #else
9356 Register obj = allocator.useRegister(masm, objId);
9357 #endif
9358 Register index = allocator.useRegister(masm, indexId);
9359 Register expected;
9360 Register replacement;
9361 if (!Scalar::isBigIntType(elementType)) {
9362 expected = allocator.useRegister(masm, Int32OperandId(expectedId));
9363 replacement = allocator.useRegister(masm, Int32OperandId(replacementId));
9364 } else {
9365 expected = allocator.useRegister(masm, BigIntOperandId(expectedId));
9366 replacement = allocator.useRegister(masm, BigIntOperandId(replacementId));
9369 Register scratch = output ? output->valueReg().scratchReg()
9370 : callvm->outputValueReg().scratchReg();
9371 MOZ_ASSERT(scratch != obj, "scratchReg must not be typeReg");
9373 Maybe<AutoScratchRegister> scratch2;
9374 if (viewKind == ArrayBufferViewKind::Resizable) {
9375 #ifdef JS_CODEGEN_X86
9376 // Not enough spare registers on x86.
9377 #else
9378 scratch2.emplace(allocator, masm);
9379 #endif
9382 // Not enough registers on X86.
9383 constexpr auto spectreTemp = mozilla::Nothing{};
9385 FailurePath* failure;
9386 if (!addFailurePath(&failure)) {
9387 return false;
9390 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
9391 // we can't use both at the same time. This isn't an issue here, because Ion
9392 // doesn't support CallICs. If that ever changes, this code must be updated.
9393 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
9395 // Bounds check.
9396 emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
9397 spectreTemp, failure->label());
9399 // Atomic operations are highly platform-dependent, for example x86/x64 has
9400 // specific requirements on which registers are used; MIPS needs multiple
9401 // additional temporaries. Therefore we're using either an ABI or VM call here
9402 // instead of handling each platform separately.
9404 if (Scalar::isBigIntType(elementType)) {
9405 callvm->prepare();
9407 masm.Push(replacement);
9408 masm.Push(expected);
9409 masm.Push(index);
9410 masm.Push(obj);
9412 using Fn = BigInt* (*)(JSContext*, TypedArrayObject*, size_t, const BigInt*,
9413 const BigInt*);
9414 callvm->call<Fn, jit::AtomicsCompareExchange64>();
9415 return true;
9419 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
9420 liveVolatileFloatRegs());
9421 volatileRegs.takeUnchecked(output->valueReg());
9422 volatileRegs.takeUnchecked(scratch);
9423 masm.PushRegsInMask(volatileRegs);
9425 masm.setupUnalignedABICall(scratch);
9426 masm.passABIArg(obj);
9427 masm.passABIArg(index);
9428 masm.passABIArg(expected);
9429 masm.passABIArg(replacement);
9430 masm.callWithABI(DynamicFunction<AtomicsCompareExchangeFn>(
9431 AtomicsCompareExchange(elementType)));
9432 masm.storeCallInt32Result(scratch);
9434 masm.PopRegsInMask(volatileRegs);
9437 if (elementType != Scalar::Uint32) {
9438 masm.tagValue(JSVAL_TYPE_INT32, scratch, output->valueReg());
9439 } else {
9440 ScratchDoubleScope fpscratch(masm);
9441 masm.convertUInt32ToDouble(scratch, fpscratch);
9442 masm.boxDouble(fpscratch, output->valueReg(), fpscratch);
9445 return true;
9448 bool CacheIRCompiler::emitAtomicsReadModifyWriteResult(
9449 ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
9450 Scalar::Type elementType, ArrayBufferViewKind viewKind,
9451 AtomicsReadWriteModifyFn fn) {
9452 AutoOutputRegister output(*this);
9453 Register obj = allocator.useRegister(masm, objId);
9454 Register index = allocator.useRegister(masm, indexId);
9455 Register value = allocator.useRegister(masm, Int32OperandId(valueId));
9456 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9457 Maybe<AutoScratchRegisterMaybeOutputType> scratch2;
9458 if (viewKind == ArrayBufferViewKind::Resizable) {
9459 scratch2.emplace(allocator, masm, output);
9462 // Not enough registers on X86.
9463 constexpr auto spectreTemp = mozilla::Nothing{};
9465 FailurePath* failure;
9466 if (!addFailurePath(&failure)) {
9467 return false;
9470 // Bounds check.
9471 emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
9472 spectreTemp, failure->label());
9474 // See comment in emitAtomicsCompareExchange for why we use an ABI call.
9476 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
9477 liveVolatileFloatRegs());
9478 volatileRegs.takeUnchecked(output.valueReg());
9479 volatileRegs.takeUnchecked(scratch);
9480 masm.PushRegsInMask(volatileRegs);
9482 masm.setupUnalignedABICall(scratch);
9483 masm.passABIArg(obj);
9484 masm.passABIArg(index);
9485 masm.passABIArg(value);
9486 masm.callWithABI(DynamicFunction<AtomicsReadWriteModifyFn>(fn));
9487 masm.storeCallInt32Result(scratch);
9489 masm.PopRegsInMask(volatileRegs);
9492 if (elementType != Scalar::Uint32) {
9493 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
9494 } else {
9495 ScratchDoubleScope fpscratch(masm);
9496 masm.convertUInt32ToDouble(scratch, fpscratch);
9497 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
9500 return true;
9503 template <CacheIRCompiler::AtomicsReadWriteModify64Fn fn>
9504 bool CacheIRCompiler::emitAtomicsReadModifyWriteResult64(
9505 ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
9506 ArrayBufferViewKind viewKind) {
9507 AutoCallVM callvm(masm, this, allocator);
9508 Register obj = allocator.useRegister(masm, objId);
9509 Register index = allocator.useRegister(masm, indexId);
9510 Register value = allocator.useRegister(masm, BigIntOperandId(valueId));
9511 AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
9512 Maybe<AutoScratchRegisterMaybeOutputType> scratch2;
9513 if (viewKind == ArrayBufferViewKind::Resizable) {
9514 scratch2.emplace(allocator, masm, callvm.output());
9517 // Not enough registers on X86.
9518 constexpr auto spectreTemp = mozilla::Nothing{};
9520 FailurePath* failure;
9521 if (!addFailurePath(&failure)) {
9522 return false;
9525 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
9526 // we can't use both at the same time. This isn't an issue here, because Ion
9527 // doesn't support CallICs. If that ever changes, this code must be updated.
9528 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
9530 // Bounds check.
9531 emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
9532 spectreTemp, failure->label());
9534 // See comment in emitAtomicsCompareExchange for why we use a VM call.
9536 callvm.prepare();
9538 masm.Push(value);
9539 masm.Push(index);
9540 masm.Push(obj);
9542 callvm.call<AtomicsReadWriteModify64Fn, fn>();
9543 return true;
9546 bool CacheIRCompiler::emitAtomicsExchangeResult(ObjOperandId objId,
9547 IntPtrOperandId indexId,
9548 uint32_t valueId,
9549 Scalar::Type elementType,
9550 ArrayBufferViewKind viewKind) {
9551 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9553 if (Scalar::isBigIntType(elementType)) {
9554 return emitAtomicsReadModifyWriteResult64<jit::AtomicsExchange64>(
9555 objId, indexId, valueId, viewKind);
9557 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
9558 viewKind,
9559 AtomicsExchange(elementType));
9562 bool CacheIRCompiler::emitAtomicsAddResult(
9563 ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
9564 Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
9565 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9567 if (Scalar::isBigIntType(elementType)) {
9568 return emitAtomicsReadModifyWriteResult64<jit::AtomicsAdd64>(
9569 objId, indexId, valueId, viewKind);
9571 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
9572 viewKind, AtomicsAdd(elementType));
9575 bool CacheIRCompiler::emitAtomicsSubResult(
9576 ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
9577 Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
9578 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9580 if (Scalar::isBigIntType(elementType)) {
9581 return emitAtomicsReadModifyWriteResult64<jit::AtomicsSub64>(
9582 objId, indexId, valueId, viewKind);
9584 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
9585 viewKind, AtomicsSub(elementType));
9588 bool CacheIRCompiler::emitAtomicsAndResult(
9589 ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
9590 Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
9591 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9593 if (Scalar::isBigIntType(elementType)) {
9594 return emitAtomicsReadModifyWriteResult64<jit::AtomicsAnd64>(
9595 objId, indexId, valueId, viewKind);
9597 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
9598 viewKind, AtomicsAnd(elementType));
9601 bool CacheIRCompiler::emitAtomicsOrResult(
9602 ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
9603 Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
9604 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9606 if (Scalar::isBigIntType(elementType)) {
9607 return emitAtomicsReadModifyWriteResult64<jit::AtomicsOr64>(
9608 objId, indexId, valueId, viewKind);
9610 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
9611 viewKind, AtomicsOr(elementType));
9614 bool CacheIRCompiler::emitAtomicsXorResult(
9615 ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
9616 Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
9617 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9619 if (Scalar::isBigIntType(elementType)) {
9620 return emitAtomicsReadModifyWriteResult64<jit::AtomicsXor64>(
9621 objId, indexId, valueId, viewKind);
9623 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
9624 viewKind, AtomicsXor(elementType));
9627 bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId,
9628 IntPtrOperandId indexId,
9629 Scalar::Type elementType,
9630 ArrayBufferViewKind viewKind) {
9631 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9633 Maybe<AutoOutputRegister> output;
9634 Maybe<AutoCallVM> callvm;
9635 if (!Scalar::isBigIntType(elementType)) {
9636 output.emplace(*this);
9637 } else {
9638 callvm.emplace(masm, this, allocator);
9640 Register obj = allocator.useRegister(masm, objId);
9641 Register index = allocator.useRegister(masm, indexId);
9642 AutoScratchRegisterMaybeOutput scratch(allocator, masm,
9643 output ? *output : callvm->output());
9644 Maybe<AutoSpectreBoundsScratchRegister> spectreTemp;
9645 Maybe<AutoScratchRegister> scratch2;
9646 if (viewKind == ArrayBufferViewKind::FixedLength) {
9647 spectreTemp.emplace(allocator, masm);
9648 } else {
9649 scratch2.emplace(allocator, masm);
9651 AutoAvailableFloatRegister floatReg(*this, FloatReg0);
9653 FailurePath* failure;
9654 if (!addFailurePath(&failure)) {
9655 return false;
9658 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
9659 // we can't use both at the same time. This isn't an issue here, because Ion
9660 // doesn't support CallICs. If that ever changes, this code must be updated.
9661 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
9663 // Bounds check.
9664 emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
9665 spectreTemp, failure->label());
9667 // Atomic operations are highly platform-dependent, for example x86/arm32 has
9668 // specific requirements on which registers are used. Therefore we're using a
9669 // VM call here instead of handling each platform separately.
9670 if (Scalar::isBigIntType(elementType)) {
9671 callvm->prepare();
9673 masm.Push(index);
9674 masm.Push(obj);
9676 using Fn = BigInt* (*)(JSContext*, TypedArrayObject*, size_t);
9677 callvm->call<Fn, jit::AtomicsLoad64>();
9678 return true;
9681 // Load the elements vector.
9682 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch);
9684 // Load the value.
9685 BaseIndex source(scratch, index, ScaleFromScalarType(elementType));
9687 // NOTE: the generated code must match the assembly code in gen_load in
9688 // GenerateAtomicOperations.py
9689 auto sync = Synchronization::Load();
9691 masm.memoryBarrierBefore(sync);
9693 Label* failUint32 = nullptr;
9694 MacroAssembler::Uint32Mode mode = MacroAssembler::Uint32Mode::ForceDouble;
9695 masm.loadFromTypedArray(elementType, source, output->valueReg(), mode,
9696 scratch, failUint32);
9697 masm.memoryBarrierAfter(sync);
9699 return true;
9702 bool CacheIRCompiler::emitAtomicsStoreResult(ObjOperandId objId,
9703 IntPtrOperandId indexId,
9704 uint32_t valueId,
9705 Scalar::Type elementType,
9706 ArrayBufferViewKind viewKind) {
9707 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9709 AutoOutputRegister output(*this);
9710 Register obj = allocator.useRegister(masm, objId);
9711 Register index = allocator.useRegister(masm, indexId);
9712 Maybe<Register> valueInt32;
9713 Maybe<Register> valueBigInt;
9714 if (!Scalar::isBigIntType(elementType)) {
9715 valueInt32.emplace(allocator.useRegister(masm, Int32OperandId(valueId)));
9716 } else {
9717 valueBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(valueId)));
9719 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9720 Maybe<AutoScratchRegisterMaybeOutputType> scratch2;
9721 if (viewKind == ArrayBufferViewKind::Resizable) {
9722 scratch2.emplace(allocator, masm, output);
9725 // Not enough registers on X86.
9726 constexpr auto spectreTemp = mozilla::Nothing{};
9728 FailurePath* failure;
9729 if (!addFailurePath(&failure)) {
9730 return false;
9733 // Bounds check.
9734 emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
9735 spectreTemp, failure->label());
9737 if (!Scalar::isBigIntType(elementType)) {
9738 // Load the elements vector.
9739 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch);
9741 // Store the value.
9742 BaseIndex dest(scratch, index, ScaleFromScalarType(elementType));
9744 // NOTE: the generated code must match the assembly code in gen_store in
9745 // GenerateAtomicOperations.py
9746 auto sync = Synchronization::Store();
9748 masm.memoryBarrierBefore(sync);
9749 masm.storeToTypedIntArray(elementType, *valueInt32, dest);
9750 masm.memoryBarrierAfter(sync);
9752 masm.tagValue(JSVAL_TYPE_INT32, *valueInt32, output.valueReg());
9753 } else {
9754 // See comment in emitAtomicsCompareExchange for why we use an ABI call.
9756 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
9757 liveVolatileFloatRegs());
9758 volatileRegs.takeUnchecked(output.valueReg());
9759 volatileRegs.takeUnchecked(scratch);
9760 masm.PushRegsInMask(volatileRegs);
9762 using Fn = void (*)(TypedArrayObject*, size_t, const BigInt*);
9763 masm.setupUnalignedABICall(scratch);
9764 masm.passABIArg(obj);
9765 masm.passABIArg(index);
9766 masm.passABIArg(*valueBigInt);
9767 masm.callWithABI<Fn, jit::AtomicsStore64>();
9769 masm.PopRegsInMask(volatileRegs);
9771 masm.tagValue(JSVAL_TYPE_BIGINT, *valueBigInt, output.valueReg());
9774 return true;
9777 bool CacheIRCompiler::emitAtomicsIsLockFreeResult(Int32OperandId valueId) {
9778 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9780 AutoOutputRegister output(*this);
9781 Register value = allocator.useRegister(masm, valueId);
9782 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9784 masm.atomicIsLockFreeJS(value, scratch);
9785 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
9787 return true;
9790 bool CacheIRCompiler::emitBigIntAsIntNResult(Int32OperandId bitsId,
9791 BigIntOperandId bigIntId) {
9792 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9794 AutoCallVM callvm(masm, this, allocator);
9796 Register bits = allocator.useRegister(masm, bitsId);
9797 Register bigInt = allocator.useRegister(masm, bigIntId);
9799 callvm.prepare();
9800 masm.Push(bits);
9801 masm.Push(bigInt);
9803 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
9804 callvm.call<Fn, jit::BigIntAsIntN>();
9805 return true;
9808 bool CacheIRCompiler::emitBigIntAsUintNResult(Int32OperandId bitsId,
9809 BigIntOperandId bigIntId) {
9810 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9812 AutoCallVM callvm(masm, this, allocator);
9814 Register bits = allocator.useRegister(masm, bitsId);
9815 Register bigInt = allocator.useRegister(masm, bigIntId);
9817 callvm.prepare();
9818 masm.Push(bits);
9819 masm.Push(bigInt);
9821 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
9822 callvm.call<Fn, jit::BigIntAsUintN>();
9823 return true;
9826 bool CacheIRCompiler::emitSetHasResult(ObjOperandId setId, ValOperandId valId) {
9827 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9829 AutoCallVM callvm(masm, this, allocator);
9831 Register set = allocator.useRegister(masm, setId);
9832 ValueOperand val = allocator.useValueRegister(masm, valId);
9834 callvm.prepare();
9835 masm.Push(val);
9836 masm.Push(set);
9838 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
9839 callvm.call<Fn, jit::SetObjectHas>();
9840 return true;
9843 bool CacheIRCompiler::emitSetHasNonGCThingResult(ObjOperandId setId,
9844 ValOperandId valId) {
9845 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9847 AutoOutputRegister output(*this);
9848 Register set = allocator.useRegister(masm, setId);
9849 ValueOperand val = allocator.useValueRegister(masm, valId);
9851 AutoScratchRegister scratch1(allocator, masm);
9852 AutoScratchRegister scratch2(allocator, masm);
9853 AutoScratchRegister scratch3(allocator, masm);
9854 AutoScratchRegister scratch4(allocator, masm);
9855 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
9857 masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
9858 masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
9860 masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
9861 scratch3, scratch4);
9862 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9863 return true;
9866 bool CacheIRCompiler::emitSetHasSymbolResult(ObjOperandId setId,
9867 SymbolOperandId symId) {
9868 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9870 AutoOutputRegister output(*this);
9871 Register set = allocator.useRegister(masm, setId);
9872 Register sym = allocator.useRegister(masm, symId);
9874 AutoScratchRegister scratch1(allocator, masm);
9875 AutoScratchRegister scratch2(allocator, masm);
9876 AutoScratchRegister scratch3(allocator, masm);
9877 AutoScratchRegister scratch4(allocator, masm);
9879 masm.prepareHashSymbol(sym, scratch1);
9881 masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
9882 masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
9883 scratch3, scratch4);
9884 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9885 return true;
9888 bool CacheIRCompiler::emitSetHasBigIntResult(ObjOperandId setId,
9889 BigIntOperandId bigIntId) {
9890 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9892 AutoOutputRegister output(*this);
9893 Register set = allocator.useRegister(masm, setId);
9894 Register bigInt = allocator.useRegister(masm, bigIntId);
9896 AutoScratchRegister scratch1(allocator, masm);
9897 AutoScratchRegister scratch2(allocator, masm);
9898 AutoScratchRegister scratch3(allocator, masm);
9899 AutoScratchRegister scratch4(allocator, masm);
9900 AutoScratchRegister scratch5(allocator, masm);
9901 #ifndef JS_CODEGEN_ARM
9902 AutoScratchRegister scratch6(allocator, masm);
9903 #else
9904 // We don't have more registers available on ARM32.
9905 Register scratch6 = set;
9907 masm.push(set);
9908 #endif
9910 masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
9912 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
9913 masm.setObjectHasBigInt(set, output.valueReg(), scratch1, scratch2, scratch3,
9914 scratch4, scratch5, scratch6);
9915 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9917 #ifdef JS_CODEGEN_ARM
9918 masm.pop(set);
9919 #endif
9920 return true;
9923 bool CacheIRCompiler::emitSetHasObjectResult(ObjOperandId setId,
9924 ObjOperandId objId) {
9925 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9927 AutoOutputRegister output(*this);
9928 Register set = allocator.useRegister(masm, setId);
9929 Register obj = allocator.useRegister(masm, objId);
9931 AutoScratchRegister scratch1(allocator, masm);
9932 AutoScratchRegister scratch2(allocator, masm);
9933 AutoScratchRegister scratch3(allocator, masm);
9934 AutoScratchRegister scratch4(allocator, masm);
9935 AutoScratchRegister scratch5(allocator, masm);
9937 masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
9938 masm.prepareHashObject(set, output.valueReg(), scratch1, scratch2, scratch3,
9939 scratch4, scratch5);
9941 masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
9942 scratch3, scratch4);
9943 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9944 return true;
9947 bool CacheIRCompiler::emitSetSizeResult(ObjOperandId setId) {
9948 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9950 AutoOutputRegister output(*this);
9951 Register set = allocator.useRegister(masm, setId);
9952 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9954 masm.loadSetObjectSize(set, scratch);
9955 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
9956 return true;
9959 bool CacheIRCompiler::emitMapHasResult(ObjOperandId mapId, ValOperandId valId) {
9960 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9962 AutoCallVM callvm(masm, this, allocator);
9964 Register map = allocator.useRegister(masm, mapId);
9965 ValueOperand val = allocator.useValueRegister(masm, valId);
9967 callvm.prepare();
9968 masm.Push(val);
9969 masm.Push(map);
9971 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
9972 callvm.call<Fn, jit::MapObjectHas>();
9973 return true;
9976 bool CacheIRCompiler::emitMapHasNonGCThingResult(ObjOperandId mapId,
9977 ValOperandId valId) {
9978 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9980 AutoOutputRegister output(*this);
9981 Register map = allocator.useRegister(masm, mapId);
9982 ValueOperand val = allocator.useValueRegister(masm, valId);
9984 AutoScratchRegister scratch1(allocator, masm);
9985 AutoScratchRegister scratch2(allocator, masm);
9986 AutoScratchRegister scratch3(allocator, masm);
9987 AutoScratchRegister scratch4(allocator, masm);
9988 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
9990 masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
9991 masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
9993 masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
9994 scratch3, scratch4);
9995 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9996 return true;
9999 bool CacheIRCompiler::emitMapHasSymbolResult(ObjOperandId mapId,
10000 SymbolOperandId symId) {
10001 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
10003 AutoOutputRegister output(*this);
10004 Register map = allocator.useRegister(masm, mapId);
10005 Register sym = allocator.useRegister(masm, symId);
10007 AutoScratchRegister scratch1(allocator, masm);
10008 AutoScratchRegister scratch2(allocator, masm);
10009 AutoScratchRegister scratch3(allocator, masm);
10010 AutoScratchRegister scratch4(allocator, masm);
10012 masm.prepareHashSymbol(sym, scratch1);
10014 masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
10015 masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
10016 scratch3, scratch4);
10017 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
10018 return true;
10021 bool CacheIRCompiler::emitMapHasBigIntResult(ObjOperandId mapId,
10022 BigIntOperandId bigIntId) {
10023 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
10025 AutoOutputRegister output(*this);
10026 Register map = allocator.useRegister(masm, mapId);
10027 Register bigInt = allocator.useRegister(masm, bigIntId);
10029 AutoScratchRegister scratch1(allocator, masm);
10030 AutoScratchRegister scratch2(allocator, masm);
10031 AutoScratchRegister scratch3(allocator, masm);
10032 AutoScratchRegister scratch4(allocator, masm);
10033 AutoScratchRegister scratch5(allocator, masm);
10034 #ifndef JS_CODEGEN_ARM
10035 AutoScratchRegister scratch6(allocator, masm);
10036 #else
10037 // We don't have more registers available on ARM32.
10038 Register scratch6 = map;
10040 masm.push(map);
10041 #endif
10043 masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
10045 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
10046 masm.mapObjectHasBigInt(map, output.valueReg(), scratch1, scratch2, scratch3,
10047 scratch4, scratch5, scratch6);
10048 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
10050 #ifdef JS_CODEGEN_ARM
10051 masm.pop(map);
10052 #endif
10053 return true;
10056 bool CacheIRCompiler::emitMapHasObjectResult(ObjOperandId mapId,
10057 ObjOperandId objId) {
10058 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
10060 AutoOutputRegister output(*this);
10061 Register map = allocator.useRegister(masm, mapId);
10062 Register obj = allocator.useRegister(masm, objId);
10064 AutoScratchRegister scratch1(allocator, masm);
10065 AutoScratchRegister scratch2(allocator, masm);
10066 AutoScratchRegister scratch3(allocator, masm);
10067 AutoScratchRegister scratch4(allocator, masm);
10068 AutoScratchRegister scratch5(allocator, masm);
10070 masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
10071 masm.prepareHashObject(map, output.valueReg(), scratch1, scratch2, scratch3,
10072 scratch4, scratch5);
10074 masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
10075 scratch3, scratch4);
10076 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
10077 return true;
10080 bool CacheIRCompiler::emitMapGetResult(ObjOperandId mapId, ValOperandId valId) {
10081 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
10083 AutoCallVM callvm(masm, this, allocator);
10085 Register map = allocator.useRegister(masm, mapId);
10086 ValueOperand val = allocator.useValueRegister(masm, valId);
10088 callvm.prepare();
10089 masm.Push(val);
10090 masm.Push(map);
10092 using Fn =
10093 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
10094 callvm.call<Fn, jit::MapObjectGet>();
10095 return true;
10098 bool CacheIRCompiler::emitMapGetNonGCThingResult(ObjOperandId mapId,
10099 ValOperandId valId) {
10100 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
10102 AutoOutputRegister output(*this);
10103 Register map = allocator.useRegister(masm, mapId);
10104 ValueOperand val = allocator.useValueRegister(masm, valId);
10106 AutoScratchRegister scratch1(allocator, masm);
10107 AutoScratchRegister scratch2(allocator, masm);
10108 AutoScratchRegister scratch3(allocator, masm);
10109 AutoScratchRegister scratch4(allocator, masm);
10110 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
10112 masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
10113 masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
10115 masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
10116 output.valueReg(), scratch2, scratch3, scratch4);
10117 return true;
10120 bool CacheIRCompiler::emitMapGetSymbolResult(ObjOperandId mapId,
10121 SymbolOperandId symId) {
10122 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
10124 AutoOutputRegister output(*this);
10125 Register map = allocator.useRegister(masm, mapId);
10126 Register sym = allocator.useRegister(masm, symId);
10128 AutoScratchRegister scratch1(allocator, masm);
10129 AutoScratchRegister scratch2(allocator, masm);
10130 AutoScratchRegister scratch3(allocator, masm);
10131 AutoScratchRegister scratch4(allocator, masm);
10133 masm.prepareHashSymbol(sym, scratch1);
10135 masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
10136 masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
10137 output.valueReg(), scratch2, scratch3, scratch4);
10138 return true;
10141 bool CacheIRCompiler::emitMapGetBigIntResult(ObjOperandId mapId,
10142 BigIntOperandId bigIntId) {
10143 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
10145 AutoOutputRegister output(*this);
10146 Register map = allocator.useRegister(masm, mapId);
10147 Register bigInt = allocator.useRegister(masm, bigIntId);
10149 AutoScratchRegister scratch1(allocator, masm);
10150 AutoScratchRegister scratch2(allocator, masm);
10151 AutoScratchRegister scratch3(allocator, masm);
10152 AutoScratchRegister scratch4(allocator, masm);
10153 AutoScratchRegister scratch5(allocator, masm);
10154 #ifndef JS_CODEGEN_ARM
10155 AutoScratchRegister scratch6(allocator, masm);
10156 #else
10157 // We don't have more registers available on ARM32.
10158 Register scratch6 = map;
10160 masm.push(map);
10161 #endif
10163 masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
10165 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
10166 masm.mapObjectGetBigInt(map, output.valueReg(), scratch1, output.valueReg(),
10167 scratch2, scratch3, scratch4, scratch5, scratch6);
10169 #ifdef JS_CODEGEN_ARM
10170 masm.pop(map);
10171 #endif
10172 return true;
10175 bool CacheIRCompiler::emitMapGetObjectResult(ObjOperandId mapId,
10176 ObjOperandId objId) {
10177 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
10179 AutoOutputRegister output(*this);
10180 Register map = allocator.useRegister(masm, mapId);
10181 Register obj = allocator.useRegister(masm, objId);
10183 AutoScratchRegister scratch1(allocator, masm);
10184 AutoScratchRegister scratch2(allocator, masm);
10185 AutoScratchRegister scratch3(allocator, masm);
10186 AutoScratchRegister scratch4(allocator, masm);
10187 AutoScratchRegister scratch5(allocator, masm);
10189 masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
10190 masm.prepareHashObject(map, output.valueReg(), scratch1, scratch2, scratch3,
10191 scratch4, scratch5);
10193 masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
10194 output.valueReg(), scratch2, scratch3, scratch4);
10195 return true;
10198 bool CacheIRCompiler::emitMapSizeResult(ObjOperandId mapId) {
10199 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
10201 AutoOutputRegister output(*this);
10202 Register map = allocator.useRegister(masm, mapId);
10203 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
10205 masm.loadMapObjectSize(map, scratch);
10206 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
10207 return true;
10210 bool CacheIRCompiler::emitArrayFromArgumentsObjectResult(ObjOperandId objId,
10211 uint32_t shapeOffset) {
10212 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
10214 AutoCallVM callvm(masm, this, allocator);
10216 Register obj = allocator.useRegister(masm, objId);
10218 callvm.prepare();
10219 masm.Push(obj);
10221 using Fn = ArrayObject* (*)(JSContext*, Handle<ArgumentsObject*>);
10222 callvm.call<Fn, js::ArrayFromArgumentsObject>();
10223 return true;
10226 bool CacheIRCompiler::emitGuardGlobalGeneration(uint32_t expectedOffset,
10227 uint32_t generationAddrOffset) {
10228 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
10230 AutoScratchRegister scratch(allocator, masm);
10231 AutoScratchRegister scratch2(allocator, masm);
10233 FailurePath* failure;
10234 if (!addFailurePath(&failure)) {
10235 return false;
10238 StubFieldOffset expected(expectedOffset, StubField::Type::RawInt32);
10239 emitLoadStubField(expected, scratch);
10241 StubFieldOffset generationAddr(generationAddrOffset,
10242 StubField::Type::RawPointer);
10243 emitLoadStubField(generationAddr, scratch2);
10245 masm.branch32(Assembler::NotEqual, Address(scratch2, 0), scratch,
10246 failure->label());
10248 return true;
10251 bool CacheIRCompiler::emitGuardFuse(RealmFuses::FuseIndex fuseIndex) {
10252 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
10253 AutoScratchRegister scratch(allocator, masm);
10255 FailurePath* failure;
10256 if (!addFailurePath(&failure)) {
10257 return false;
10260 masm.loadRealmFuse(fuseIndex, scratch);
10261 masm.branchPtr(Assembler::NotEqual, scratch, ImmPtr(nullptr),
10262 failure->label());
10263 return true;
10266 bool CacheIRCompiler::emitBailout() {
10267 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
10269 // Generates no code.
10271 return true;
10274 bool CacheIRCompiler::emitAssertRecoveredOnBailoutResult(ValOperandId valId,
10275 bool mustBeRecovered) {
10276 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
10278 AutoOutputRegister output(*this);
10280 // NOP when not in IonMonkey
10281 masm.moveValue(UndefinedValue(), output.valueReg());
10283 return true;
10286 bool CacheIRCompiler::emitAssertPropertyLookup(ObjOperandId objId,
10287 uint32_t idOffset,
10288 uint32_t slotOffset) {
10289 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
10291 Register obj = allocator.useRegister(masm, objId);
10293 AutoScratchRegister id(allocator, masm);
10294 AutoScratchRegister slot(allocator, masm);
10296 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
10297 masm.PushRegsInMask(save);
10299 masm.setupUnalignedABICall(id);
10301 StubFieldOffset idField(idOffset, StubField::Type::Id);
10302 emitLoadStubField(idField, id);
10304 StubFieldOffset slotField(slotOffset, StubField::Type::RawInt32);
10305 emitLoadStubField(slotField, slot);
10307 masm.passABIArg(obj);
10308 masm.passABIArg(id);
10309 masm.passABIArg(slot);
10310 using Fn = void (*)(NativeObject*, PropertyKey, uint32_t);
10311 masm.callWithABI<Fn, js::jit::AssertPropertyLookup>();
10312 masm.PopRegsInMask(save);
10314 return true;
10317 #ifdef FUZZING_JS_FUZZILLI
10318 bool CacheIRCompiler::emitFuzzilliHashResult(ValOperandId valId) {
10319 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
10321 ValueOperand input = allocator.useValueRegister(masm, valId);
10322 AutoScratchRegister scratch(allocator, masm);
10323 AutoScratchRegister scratchJSContext(allocator, masm);
10324 AutoScratchFloatRegister floatReg(this);
10325 # ifdef JS_PUNBOX64
10326 AutoScratchRegister64 scratch64(allocator, masm);
10327 # else
10328 AutoScratchRegister scratch2(allocator, masm);
10329 # endif
10331 Label addFloat, updateHash, done;
10334 ScratchTagScope tag(masm, input);
10335 masm.splitTagForTest(input, tag);
10337 Label notInt32;
10338 masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
10340 ScratchTagScopeRelease _(&tag);
10342 masm.unboxInt32(input, scratch);
10343 masm.convertInt32ToDouble(scratch, floatReg);
10344 masm.jump(&addFloat);
10346 masm.bind(&notInt32);
10348 Label notDouble;
10349 masm.branchTestDouble(Assembler::NotEqual, tag, &notDouble);
10351 ScratchTagScopeRelease _(&tag);
10353 masm.unboxDouble(input, floatReg);
10354 masm.canonicalizeDouble(floatReg);
10355 masm.jump(&addFloat);
10357 masm.bind(&notDouble);
10359 Label notNull;
10360 masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
10362 ScratchTagScopeRelease _(&tag);
10364 masm.move32(Imm32(1), scratch);
10365 masm.convertInt32ToDouble(scratch, floatReg);
10366 masm.jump(&addFloat);
10368 masm.bind(&notNull);
10370 Label notUndefined;
10371 masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
10373 ScratchTagScopeRelease _(&tag);
10375 masm.move32(Imm32(2), scratch);
10376 masm.convertInt32ToDouble(scratch, floatReg);
10377 masm.jump(&addFloat);
10379 masm.bind(&notUndefined);
10381 Label notBoolean;
10382 masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
10384 ScratchTagScopeRelease _(&tag);
10386 masm.unboxBoolean(input, scratch);
10387 masm.add32(Imm32(3), scratch);
10388 masm.convertInt32ToDouble(scratch, floatReg);
10389 masm.jump(&addFloat);
10391 masm.bind(&notBoolean);
10393 Label notBigInt;
10394 masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
10396 ScratchTagScopeRelease _(&tag);
10398 masm.unboxBigInt(input, scratch);
10400 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
10401 liveVolatileFloatRegs());
10402 masm.PushRegsInMask(volatileRegs);
10403 // TODO: remove floatReg, scratch, scratchJS?
10405 using Fn = uint32_t (*)(BigInt* bigInt);
10406 masm.setupUnalignedABICall(scratchJSContext);
10407 masm.loadJSContext(scratchJSContext);
10408 masm.passABIArg(scratch);
10409 masm.callWithABI<Fn, js::FuzzilliHashBigInt>();
10410 masm.storeCallInt32Result(scratch);
10412 LiveRegisterSet ignore;
10413 ignore.add(scratch);
10414 ignore.add(scratchJSContext);
10415 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
10416 masm.jump(&updateHash);
10418 masm.bind(&notBigInt);
10420 Label notObject;
10421 masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
10423 ScratchTagScopeRelease _(&tag);
10425 AutoCallVM callvm(masm, this, allocator);
10426 Register obj = allocator.allocateRegister(masm);
10427 masm.unboxObject(input, obj);
10429 callvm.prepare();
10430 masm.Push(obj);
10432 using Fn = void (*)(JSContext* cx, JSObject* o);
10433 callvm.callNoResult<Fn, js::FuzzilliHashObject>();
10434 allocator.releaseRegister(obj);
10436 masm.jump(&done);
10438 masm.bind(&notObject);
10440 masm.move32(Imm32(0), scratch);
10441 masm.jump(&updateHash);
10446 masm.bind(&addFloat);
10448 masm.loadJSContext(scratchJSContext);
10449 Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
10451 # ifdef JS_PUNBOX64
10452 masm.moveDoubleToGPR64(floatReg, scratch64);
10453 masm.move32(scratch64.get().reg, scratch);
10454 masm.rshift64(Imm32(32), scratch64);
10455 masm.add32(scratch64.get().reg, scratch);
10456 # else
10457 Register64 scratch64(scratch, scratch2);
10458 masm.moveDoubleToGPR64(floatReg, scratch64);
10459 masm.add32(scratch2, scratch);
10460 # endif
10464 masm.bind(&updateHash);
10466 masm.loadJSContext(scratchJSContext);
10467 Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
10468 masm.load32(addrExecHash, scratchJSContext);
10469 masm.add32(scratchJSContext, scratch);
10470 masm.rotateLeft(Imm32(1), scratch, scratch);
10471 masm.loadJSContext(scratchJSContext);
10472 masm.store32(scratch, addrExecHash);
10474 // stats
10475 Address addrExecHashInputs(scratchJSContext,
10476 offsetof(JSContext, executionHashInputs));
10477 masm.load32(addrExecHashInputs, scratch);
10478 masm.add32(Imm32(1), scratch);
10479 masm.store32(scratch, addrExecHashInputs);
10482 masm.bind(&done);
10484 AutoOutputRegister output(*this);
10485 masm.moveValue(UndefinedValue(), output.valueReg());
10486 return true;
10488 #endif
10490 template <typename Fn, Fn fn>
10491 void CacheIRCompiler::callVM(MacroAssembler& masm) {
10492 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
10493 callVMInternal(masm, id);
10496 void CacheIRCompiler::callVMInternal(MacroAssembler& masm, VMFunctionId id) {
10497 MOZ_ASSERT(enteredStubFrame_);
10498 if (mode_ == Mode::Ion) {
10499 TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(id);
10500 const VMFunctionData& fun = GetVMFunction(id);
10501 uint32_t frameSize = fun.explicitStackSlots() * sizeof(void*);
10502 masm.PushFrameDescriptor(FrameType::IonICCall);
10503 masm.callJit(code);
10505 // Pop rest of the exit frame and the arguments left on the stack.
10506 int framePop =
10507 sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
10508 masm.implicitPop(frameSize + framePop);
10510 masm.freeStack(asIon()->localTracingSlots() * sizeof(Value));
10512 // Pop IonICCallFrameLayout.
10513 masm.Pop(FramePointer);
10514 masm.freeStack(IonICCallFrameLayout::Size() - sizeof(void*));
10515 return;
10518 MOZ_ASSERT(mode_ == Mode::Baseline);
10520 TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(id);
10522 EmitBaselineCallVM(code, masm);
10525 bool CacheIRCompiler::isBaseline() { return mode_ == Mode::Baseline; }
10527 bool CacheIRCompiler::isIon() { return mode_ == Mode::Ion; }
10529 BaselineCacheIRCompiler* CacheIRCompiler::asBaseline() {
10530 MOZ_ASSERT(this->isBaseline());
10531 return static_cast<BaselineCacheIRCompiler*>(this);
10534 IonCacheIRCompiler* CacheIRCompiler::asIon() {
10535 MOZ_ASSERT(this->isIon());
10536 return static_cast<IonCacheIRCompiler*>(this);
10539 #ifdef DEBUG
10540 void CacheIRCompiler::assertFloatRegisterAvailable(FloatRegister reg) {
10541 if (isBaseline()) {
10542 // Baseline does not have any FloatRegisters live when calling an IC stub.
10543 return;
10546 asIon()->assertFloatRegisterAvailable(reg);
10548 #endif
10550 AutoCallVM::AutoCallVM(MacroAssembler& masm, CacheIRCompiler* compiler,
10551 CacheRegisterAllocator& allocator)
10552 : masm_(masm), compiler_(compiler), allocator_(allocator) {
10553 // Ion needs to `enterStubFrame` before it can callVM and it also needs to
10554 // initialize AutoSaveLiveRegisters.
10555 if (compiler_->mode_ == CacheIRCompiler::Mode::Ion) {
10556 // Will need to use a downcast here as well, in order to pass the
10557 // stub to AutoSaveLiveRegisters
10558 save_.emplace(*compiler_->asIon());
10561 if (compiler->outputUnchecked_.isSome()) {
10562 output_.emplace(*compiler);
10565 if (compiler_->mode_ == CacheIRCompiler::Mode::Baseline) {
10566 stubFrame_.emplace(*compiler_->asBaseline());
10567 if (output_.isSome()) {
10568 scratch_.emplace(allocator_, masm_, output_.ref());
10569 } else {
10570 scratch_.emplace(allocator_, masm_);
10575 void AutoCallVM::prepare() {
10576 allocator_.discardStack(masm_);
10577 MOZ_ASSERT(compiler_ != nullptr);
10578 if (compiler_->mode_ == CacheIRCompiler::Mode::Ion) {
10579 compiler_->asIon()->enterStubFrame(masm_, *save_.ptr());
10580 return;
10582 MOZ_ASSERT(compiler_->mode_ == CacheIRCompiler::Mode::Baseline);
10583 stubFrame_->enter(masm_, scratch_.ref());
10586 void AutoCallVM::storeResult(JSValueType returnType) {
10587 MOZ_ASSERT(returnType != JSVAL_TYPE_DOUBLE);
10589 if (returnType == JSVAL_TYPE_UNKNOWN) {
10590 masm_.storeCallResultValue(output_.ref());
10591 } else {
10592 if (output_->hasValue()) {
10593 masm_.tagValue(returnType, ReturnReg, output_->valueReg());
10594 } else {
10595 masm_.storeCallPointerResult(output_->typedReg().gpr());
10600 void AutoCallVM::leaveBaselineStubFrame() {
10601 if (compiler_->mode_ == CacheIRCompiler::Mode::Baseline) {
10602 stubFrame_->leave(masm_);
10606 template <typename...>
10607 struct VMFunctionReturnType;
10609 template <class R, typename... Args>
10610 struct VMFunctionReturnType<R (*)(JSContext*, Args...)> {
10611 using LastArgument = typename LastArg<Args...>::Type;
10613 // By convention VMFunctions returning `bool` use an output parameter.
10614 using ReturnType =
10615 std::conditional_t<std::is_same_v<R, bool>, LastArgument, R>;
10618 template <class>
10619 struct ReturnTypeToJSValueType;
10621 // Definitions for the currently used return types.
10622 template <>
10623 struct ReturnTypeToJSValueType<MutableHandleValue> {
10624 static constexpr JSValueType result = JSVAL_TYPE_UNKNOWN;
10626 template <>
10627 struct ReturnTypeToJSValueType<bool*> {
10628 static constexpr JSValueType result = JSVAL_TYPE_BOOLEAN;
10630 template <>
10631 struct ReturnTypeToJSValueType<int32_t*> {
10632 static constexpr JSValueType result = JSVAL_TYPE_INT32;
10634 template <>
10635 struct ReturnTypeToJSValueType<JSString*> {
10636 static constexpr JSValueType result = JSVAL_TYPE_STRING;
10638 template <>
10639 struct ReturnTypeToJSValueType<JSAtom*> {
10640 static constexpr JSValueType result = JSVAL_TYPE_STRING;
10642 template <>
10643 struct ReturnTypeToJSValueType<BigInt*> {
10644 static constexpr JSValueType result = JSVAL_TYPE_BIGINT;
10646 template <>
10647 struct ReturnTypeToJSValueType<JSObject*> {
10648 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10650 template <>
10651 struct ReturnTypeToJSValueType<PropertyIteratorObject*> {
10652 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10654 template <>
10655 struct ReturnTypeToJSValueType<ArrayIteratorObject*> {
10656 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10658 template <>
10659 struct ReturnTypeToJSValueType<StringIteratorObject*> {
10660 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10662 template <>
10663 struct ReturnTypeToJSValueType<RegExpStringIteratorObject*> {
10664 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10666 template <>
10667 struct ReturnTypeToJSValueType<PlainObject*> {
10668 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10670 template <>
10671 struct ReturnTypeToJSValueType<ArrayObject*> {
10672 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10674 template <>
10675 struct ReturnTypeToJSValueType<TypedArrayObject*> {
10676 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10679 template <typename Fn>
10680 void AutoCallVM::storeResult() {
10681 using ReturnType = typename VMFunctionReturnType<Fn>::ReturnType;
10682 storeResult(ReturnTypeToJSValueType<ReturnType>::result);
10685 AutoScratchFloatRegister::AutoScratchFloatRegister(CacheIRCompiler* compiler,
10686 FailurePath* failure)
10687 : compiler_(compiler), failure_(failure) {
10688 // If we're compiling a Baseline IC, FloatReg0 is always available.
10689 if (!compiler_->isBaseline()) {
10690 MacroAssembler& masm = compiler_->masm;
10691 masm.push(FloatReg0);
10692 compiler->allocator.setHasAutoScratchFloatRegisterSpill(true);
10695 if (failure_) {
10696 failure_->setHasAutoScratchFloatRegister();
10700 AutoScratchFloatRegister::~AutoScratchFloatRegister() {
10701 if (failure_) {
10702 failure_->clearHasAutoScratchFloatRegister();
10705 if (!compiler_->isBaseline()) {
10706 MacroAssembler& masm = compiler_->masm;
10707 masm.pop(FloatReg0);
10708 compiler_->allocator.setHasAutoScratchFloatRegisterSpill(false);
10710 if (failure_) {
10711 Label done;
10712 masm.jump(&done);
10713 masm.bind(&failurePopReg_);
10714 masm.pop(FloatReg0);
10715 masm.jump(failure_->label());
10716 masm.bind(&done);
10721 Label* AutoScratchFloatRegister::failure() {
10722 MOZ_ASSERT(failure_);
10724 if (!compiler_->isBaseline()) {
10725 return &failurePopReg_;
10727 return failure_->labelUnchecked();