Bug 1867190 - Add prefs for PHC probablities r=glandium
[gecko.git] / js / src / jit / CacheIRCompiler.cpp
blob393227d14e75cb83a4d885876501baa0ef8ea7f1
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/CacheIRCompiler.h"
9 #include "mozilla/ArrayUtils.h"
10 #include "mozilla/FunctionTypeTraits.h"
11 #include "mozilla/MaybeOneOf.h"
12 #include "mozilla/ScopeExit.h"
14 #include <type_traits>
15 #include <utility>
17 #include "jslibmath.h"
18 #include "jsmath.h"
20 #include "builtin/DataViewObject.h"
21 #include "builtin/MapObject.h"
22 #include "builtin/Object.h"
23 #include "gc/GCEnum.h"
24 #include "gc/SweepingAPI.h" // js::gc::AutoLockStoreBuffer
25 #include "jit/BaselineCacheIRCompiler.h"
26 #include "jit/CacheIRGenerator.h"
27 #include "jit/IonCacheIRCompiler.h"
28 #include "jit/JitFrames.h"
29 #include "jit/JitRuntime.h"
30 #include "jit/JitZone.h"
31 #include "jit/SharedICHelpers.h"
32 #include "jit/SharedICRegisters.h"
33 #include "jit/VMFunctions.h"
34 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
35 #include "js/friend/XrayJitInfo.h" // js::jit::GetXrayJitInfo
36 #include "js/ScalarType.h" // js::Scalar::Type
37 #include "proxy/DOMProxy.h"
38 #include "proxy/Proxy.h"
39 #include "proxy/ScriptedProxyHandler.h"
40 #include "vm/ArgumentsObject.h"
41 #include "vm/ArrayBufferObject.h"
42 #include "vm/ArrayBufferViewObject.h"
43 #include "vm/BigIntType.h"
44 #include "vm/FunctionFlags.h" // js::FunctionFlags
45 #include "vm/GeneratorObject.h"
46 #include "vm/GetterSetter.h"
47 #include "vm/Interpreter.h"
48 #include "vm/Uint8Clamped.h"
50 #include "builtin/Boolean-inl.h"
51 #include "jit/MacroAssembler-inl.h"
52 #include "jit/SharedICHelpers-inl.h"
53 #include "jit/VMFunctionList-inl.h"
55 using namespace js;
56 using namespace js::jit;
58 using mozilla::BitwiseCast;
59 using mozilla::Maybe;
61 using JS::ExpandoAndGeneration;
63 ValueOperand CacheRegisterAllocator::useValueRegister(MacroAssembler& masm,
64 ValOperandId op) {
65 OperandLocation& loc = operandLocations_[op.id()];
67 switch (loc.kind()) {
68 case OperandLocation::ValueReg:
69 currentOpRegs_.add(loc.valueReg());
70 return loc.valueReg();
72 case OperandLocation::ValueStack: {
73 ValueOperand reg = allocateValueRegister(masm);
74 popValue(masm, &loc, reg);
75 return reg;
78 case OperandLocation::BaselineFrame: {
79 ValueOperand reg = allocateValueRegister(masm);
80 Address addr = addressOf(masm, loc.baselineFrameSlot());
81 masm.loadValue(addr, reg);
82 loc.setValueReg(reg);
83 return reg;
86 case OperandLocation::Constant: {
87 ValueOperand reg = allocateValueRegister(masm);
88 masm.moveValue(loc.constant(), reg);
89 loc.setValueReg(reg);
90 return reg;
93 case OperandLocation::PayloadReg: {
94 // Temporarily add the payload register to currentOpRegs_ so
95 // allocateValueRegister will stay away from it.
96 currentOpRegs_.add(loc.payloadReg());
97 ValueOperand reg = allocateValueRegister(masm);
98 masm.tagValue(loc.payloadType(), loc.payloadReg(), reg);
99 currentOpRegs_.take(loc.payloadReg());
100 availableRegs_.add(loc.payloadReg());
101 loc.setValueReg(reg);
102 return reg;
105 case OperandLocation::PayloadStack: {
106 ValueOperand reg = allocateValueRegister(masm);
107 popPayload(masm, &loc, reg.scratchReg());
108 masm.tagValue(loc.payloadType(), reg.scratchReg(), reg);
109 loc.setValueReg(reg);
110 return reg;
113 case OperandLocation::DoubleReg: {
114 ValueOperand reg = allocateValueRegister(masm);
116 ScratchDoubleScope fpscratch(masm);
117 masm.boxDouble(loc.doubleReg(), reg, fpscratch);
119 loc.setValueReg(reg);
120 return reg;
123 case OperandLocation::Uninitialized:
124 break;
127 MOZ_CRASH();
130 // Load a value operand directly into a float register. Caller must have
131 // guarded isNumber on the provided val.
132 void CacheRegisterAllocator::ensureDoubleRegister(MacroAssembler& masm,
133 NumberOperandId op,
134 FloatRegister dest) const {
135 // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
136 // any stack slot offsets below.
137 int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
139 const OperandLocation& loc = operandLocations_[op.id()];
141 Label failure, done;
142 switch (loc.kind()) {
143 case OperandLocation::ValueReg: {
144 masm.ensureDouble(loc.valueReg(), dest, &failure);
145 break;
148 case OperandLocation::ValueStack: {
149 Address addr = valueAddress(masm, &loc);
150 addr.offset += stackOffset;
151 masm.ensureDouble(addr, dest, &failure);
152 break;
155 case OperandLocation::BaselineFrame: {
156 Address addr = addressOf(masm, loc.baselineFrameSlot());
157 addr.offset += stackOffset;
158 masm.ensureDouble(addr, dest, &failure);
159 break;
162 case OperandLocation::DoubleReg: {
163 masm.moveDouble(loc.doubleReg(), dest);
164 return;
167 case OperandLocation::Constant: {
168 MOZ_ASSERT(loc.constant().isNumber(),
169 "Caller must ensure the operand is a number value");
170 masm.loadConstantDouble(loc.constant().toNumber(), dest);
171 return;
174 case OperandLocation::PayloadReg: {
175 // Doubles can't be stored in payload registers, so this must be an int32.
176 MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
177 "Caller must ensure the operand is a number value");
178 masm.convertInt32ToDouble(loc.payloadReg(), dest);
179 return;
182 case OperandLocation::PayloadStack: {
183 // Doubles can't be stored in payload registers, so this must be an int32.
184 MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
185 "Caller must ensure the operand is a number value");
186 MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
187 Address addr = payloadAddress(masm, &loc);
188 addr.offset += stackOffset;
189 masm.convertInt32ToDouble(addr, dest);
190 return;
193 case OperandLocation::Uninitialized:
194 MOZ_CRASH("Unhandled operand type in ensureDoubleRegister");
195 return;
197 masm.jump(&done);
198 masm.bind(&failure);
199 masm.assumeUnreachable(
200 "Missing guard allowed non-number to hit ensureDoubleRegister");
201 masm.bind(&done);
204 void CacheRegisterAllocator::copyToScratchRegister(MacroAssembler& masm,
205 TypedOperandId typedId,
206 Register dest) const {
207 // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
208 // any stack slot offsets below.
209 int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
211 const OperandLocation& loc = operandLocations_[typedId.id()];
213 switch (loc.kind()) {
214 case OperandLocation::ValueReg: {
215 masm.unboxNonDouble(loc.valueReg(), dest, typedId.type());
216 break;
218 case OperandLocation::ValueStack: {
219 Address addr = valueAddress(masm, &loc);
220 addr.offset += stackOffset;
221 masm.unboxNonDouble(addr, dest, typedId.type());
222 break;
224 case OperandLocation::BaselineFrame: {
225 Address addr = addressOf(masm, loc.baselineFrameSlot());
226 addr.offset += stackOffset;
227 masm.unboxNonDouble(addr, dest, typedId.type());
228 break;
230 case OperandLocation::PayloadReg: {
231 MOZ_ASSERT(loc.payloadType() == typedId.type());
232 masm.mov(loc.payloadReg(), dest);
233 return;
235 case OperandLocation::PayloadStack: {
236 MOZ_ASSERT(loc.payloadType() == typedId.type());
237 MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
238 Address addr = payloadAddress(masm, &loc);
239 addr.offset += stackOffset;
240 masm.loadPtr(addr, dest);
241 return;
243 case OperandLocation::DoubleReg:
244 case OperandLocation::Constant:
245 case OperandLocation::Uninitialized:
246 MOZ_CRASH("Unhandled operand location");
250 void CacheRegisterAllocator::copyToScratchValueRegister(
251 MacroAssembler& masm, ValOperandId valId, ValueOperand dest) const {
252 MOZ_ASSERT(!addedFailurePath_);
253 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
255 const OperandLocation& loc = operandLocations_[valId.id()];
256 switch (loc.kind()) {
257 case OperandLocation::ValueReg:
258 masm.moveValue(loc.valueReg(), dest);
259 break;
260 case OperandLocation::ValueStack: {
261 Address addr = valueAddress(masm, &loc);
262 masm.loadValue(addr, dest);
263 break;
265 case OperandLocation::BaselineFrame: {
266 Address addr = addressOf(masm, loc.baselineFrameSlot());
267 masm.loadValue(addr, dest);
268 break;
270 case OperandLocation::Constant:
271 masm.moveValue(loc.constant(), dest);
272 break;
273 case OperandLocation::PayloadReg:
274 masm.tagValue(loc.payloadType(), loc.payloadReg(), dest);
275 break;
276 case OperandLocation::PayloadStack: {
277 Address addr = payloadAddress(masm, &loc);
278 masm.loadPtr(addr, dest.scratchReg());
279 masm.tagValue(loc.payloadType(), dest.scratchReg(), dest);
280 break;
282 case OperandLocation::DoubleReg: {
283 ScratchDoubleScope fpscratch(masm);
284 masm.boxDouble(loc.doubleReg(), dest, fpscratch);
285 break;
287 case OperandLocation::Uninitialized:
288 MOZ_CRASH();
292 Register CacheRegisterAllocator::useRegister(MacroAssembler& masm,
293 TypedOperandId typedId) {
294 MOZ_ASSERT(!addedFailurePath_);
295 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
297 OperandLocation& loc = operandLocations_[typedId.id()];
298 switch (loc.kind()) {
299 case OperandLocation::PayloadReg:
300 currentOpRegs_.add(loc.payloadReg());
301 return loc.payloadReg();
303 case OperandLocation::ValueReg: {
304 // It's possible the value is still boxed: as an optimization, we unbox
305 // the first time we use a value as object.
306 ValueOperand val = loc.valueReg();
307 availableRegs_.add(val);
308 Register reg = val.scratchReg();
309 availableRegs_.take(reg);
310 masm.unboxNonDouble(val, reg, typedId.type());
311 loc.setPayloadReg(reg, typedId.type());
312 currentOpRegs_.add(reg);
313 return reg;
316 case OperandLocation::PayloadStack: {
317 Register reg = allocateRegister(masm);
318 popPayload(masm, &loc, reg);
319 return reg;
322 case OperandLocation::ValueStack: {
323 // The value is on the stack, but boxed. If it's on top of the stack we
324 // unbox it and then remove it from the stack, else we just unbox.
325 Register reg = allocateRegister(masm);
326 if (loc.valueStack() == stackPushed_) {
327 masm.unboxNonDouble(Address(masm.getStackPointer(), 0), reg,
328 typedId.type());
329 masm.addToStackPtr(Imm32(sizeof(js::Value)));
330 MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
331 stackPushed_ -= sizeof(js::Value);
332 } else {
333 MOZ_ASSERT(loc.valueStack() < stackPushed_);
334 masm.unboxNonDouble(
335 Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()),
336 reg, typedId.type());
338 loc.setPayloadReg(reg, typedId.type());
339 return reg;
342 case OperandLocation::BaselineFrame: {
343 Register reg = allocateRegister(masm);
344 Address addr = addressOf(masm, loc.baselineFrameSlot());
345 masm.unboxNonDouble(addr, reg, typedId.type());
346 loc.setPayloadReg(reg, typedId.type());
347 return reg;
350 case OperandLocation::Constant: {
351 Value v = loc.constant();
352 Register reg = allocateRegister(masm);
353 if (v.isString()) {
354 masm.movePtr(ImmGCPtr(v.toString()), reg);
355 } else if (v.isSymbol()) {
356 masm.movePtr(ImmGCPtr(v.toSymbol()), reg);
357 } else if (v.isBigInt()) {
358 masm.movePtr(ImmGCPtr(v.toBigInt()), reg);
359 } else {
360 MOZ_CRASH("Unexpected Value");
362 loc.setPayloadReg(reg, v.extractNonDoubleType());
363 return reg;
366 case OperandLocation::DoubleReg:
367 case OperandLocation::Uninitialized:
368 break;
371 MOZ_CRASH();
374 ConstantOrRegister CacheRegisterAllocator::useConstantOrRegister(
375 MacroAssembler& masm, ValOperandId val) {
376 MOZ_ASSERT(!addedFailurePath_);
377 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
379 OperandLocation& loc = operandLocations_[val.id()];
380 switch (loc.kind()) {
381 case OperandLocation::Constant:
382 return loc.constant();
384 case OperandLocation::PayloadReg:
385 case OperandLocation::PayloadStack: {
386 JSValueType payloadType = loc.payloadType();
387 Register reg = useRegister(masm, TypedOperandId(val, payloadType));
388 return TypedOrValueRegister(MIRTypeFromValueType(payloadType),
389 AnyRegister(reg));
392 case OperandLocation::ValueReg:
393 case OperandLocation::ValueStack:
394 case OperandLocation::BaselineFrame:
395 return TypedOrValueRegister(useValueRegister(masm, val));
397 case OperandLocation::DoubleReg:
398 return TypedOrValueRegister(MIRType::Double,
399 AnyRegister(loc.doubleReg()));
401 case OperandLocation::Uninitialized:
402 break;
405 MOZ_CRASH();
408 Register CacheRegisterAllocator::defineRegister(MacroAssembler& masm,
409 TypedOperandId typedId) {
410 MOZ_ASSERT(!addedFailurePath_);
411 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
413 OperandLocation& loc = operandLocations_[typedId.id()];
414 MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
416 Register reg = allocateRegister(masm);
417 loc.setPayloadReg(reg, typedId.type());
418 return reg;
421 ValueOperand CacheRegisterAllocator::defineValueRegister(MacroAssembler& masm,
422 ValOperandId val) {
423 MOZ_ASSERT(!addedFailurePath_);
424 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
426 OperandLocation& loc = operandLocations_[val.id()];
427 MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
429 ValueOperand reg = allocateValueRegister(masm);
430 loc.setValueReg(reg);
431 return reg;
434 void CacheRegisterAllocator::freeDeadOperandLocations(MacroAssembler& masm) {
435 // See if any operands are dead so we can reuse their registers. Note that
436 // we skip the input operands, as those are also used by failure paths, and
437 // we currently don't track those uses.
438 for (size_t i = writer_.numInputOperands(); i < operandLocations_.length();
439 i++) {
440 if (!writer_.operandIsDead(i, currentInstruction_)) {
441 continue;
444 OperandLocation& loc = operandLocations_[i];
445 switch (loc.kind()) {
446 case OperandLocation::PayloadReg:
447 availableRegs_.add(loc.payloadReg());
448 break;
449 case OperandLocation::ValueReg:
450 availableRegs_.add(loc.valueReg());
451 break;
452 case OperandLocation::PayloadStack:
453 masm.propagateOOM(freePayloadSlots_.append(loc.payloadStack()));
454 break;
455 case OperandLocation::ValueStack:
456 masm.propagateOOM(freeValueSlots_.append(loc.valueStack()));
457 break;
458 case OperandLocation::Uninitialized:
459 case OperandLocation::BaselineFrame:
460 case OperandLocation::Constant:
461 case OperandLocation::DoubleReg:
462 break;
464 loc.setUninitialized();
468 void CacheRegisterAllocator::discardStack(MacroAssembler& masm) {
469 // This should only be called when we are no longer using the operands,
470 // as we're discarding everything from the native stack. Set all operand
471 // locations to Uninitialized to catch bugs.
472 for (size_t i = 0; i < operandLocations_.length(); i++) {
473 operandLocations_[i].setUninitialized();
476 if (stackPushed_ > 0) {
477 masm.addToStackPtr(Imm32(stackPushed_));
478 stackPushed_ = 0;
480 freePayloadSlots_.clear();
481 freeValueSlots_.clear();
484 Register CacheRegisterAllocator::allocateRegister(MacroAssembler& masm) {
485 MOZ_ASSERT(!addedFailurePath_);
486 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
488 if (availableRegs_.empty()) {
489 freeDeadOperandLocations(masm);
492 if (availableRegs_.empty()) {
493 // Still no registers available, try to spill unused operands to
494 // the stack.
495 for (size_t i = 0; i < operandLocations_.length(); i++) {
496 OperandLocation& loc = operandLocations_[i];
497 if (loc.kind() == OperandLocation::PayloadReg) {
498 Register reg = loc.payloadReg();
499 if (currentOpRegs_.has(reg)) {
500 continue;
503 spillOperandToStack(masm, &loc);
504 availableRegs_.add(reg);
505 break; // We got a register, so break out of the loop.
507 if (loc.kind() == OperandLocation::ValueReg) {
508 ValueOperand reg = loc.valueReg();
509 if (currentOpRegs_.aliases(reg)) {
510 continue;
513 spillOperandToStack(masm, &loc);
514 availableRegs_.add(reg);
515 break; // Break out of the loop.
520 if (availableRegs_.empty() && !availableRegsAfterSpill_.empty()) {
521 Register reg = availableRegsAfterSpill_.takeAny();
522 masm.push(reg);
523 stackPushed_ += sizeof(uintptr_t);
525 masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
527 availableRegs_.add(reg);
530 // At this point, there must be a free register.
531 MOZ_RELEASE_ASSERT(!availableRegs_.empty());
533 Register reg = availableRegs_.takeAny();
534 currentOpRegs_.add(reg);
535 return reg;
538 void CacheRegisterAllocator::allocateFixedRegister(MacroAssembler& masm,
539 Register reg) {
540 MOZ_ASSERT(!addedFailurePath_);
541 MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
543 // Fixed registers should be allocated first, to ensure they're
544 // still available.
545 MOZ_ASSERT(!currentOpRegs_.has(reg), "Register is in use");
547 freeDeadOperandLocations(masm);
549 if (availableRegs_.has(reg)) {
550 availableRegs_.take(reg);
551 currentOpRegs_.add(reg);
552 return;
555 // Register may be available only after spilling contents.
556 if (availableRegsAfterSpill_.has(reg)) {
557 availableRegsAfterSpill_.take(reg);
558 masm.push(reg);
559 stackPushed_ += sizeof(uintptr_t);
561 masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
562 currentOpRegs_.add(reg);
563 return;
566 // The register must be used by some operand. Spill it to the stack.
567 for (size_t i = 0; i < operandLocations_.length(); i++) {
568 OperandLocation& loc = operandLocations_[i];
569 if (loc.kind() == OperandLocation::PayloadReg) {
570 if (loc.payloadReg() != reg) {
571 continue;
574 spillOperandToStackOrRegister(masm, &loc);
575 currentOpRegs_.add(reg);
576 return;
578 if (loc.kind() == OperandLocation::ValueReg) {
579 if (!loc.valueReg().aliases(reg)) {
580 continue;
583 ValueOperand valueReg = loc.valueReg();
584 spillOperandToStackOrRegister(masm, &loc);
586 availableRegs_.add(valueReg);
587 availableRegs_.take(reg);
588 currentOpRegs_.add(reg);
589 return;
593 MOZ_CRASH("Invalid register");
596 void CacheRegisterAllocator::allocateFixedValueRegister(MacroAssembler& masm,
597 ValueOperand reg) {
598 #ifdef JS_NUNBOX32
599 allocateFixedRegister(masm, reg.payloadReg());
600 allocateFixedRegister(masm, reg.typeReg());
601 #else
602 allocateFixedRegister(masm, reg.valueReg());
603 #endif
606 #ifdef JS_NUNBOX32
607 // Possible miscompilation in clang-12 (bug 1689641)
608 MOZ_NEVER_INLINE
609 #endif
610 ValueOperand CacheRegisterAllocator::allocateValueRegister(
611 MacroAssembler& masm) {
612 #ifdef JS_NUNBOX32
613 Register reg1 = allocateRegister(masm);
614 Register reg2 = allocateRegister(masm);
615 return ValueOperand(reg1, reg2);
616 #else
617 Register reg = allocateRegister(masm);
618 return ValueOperand(reg);
619 #endif
622 bool CacheRegisterAllocator::init() {
623 if (!origInputLocations_.resize(writer_.numInputOperands())) {
624 return false;
626 if (!operandLocations_.resize(writer_.numOperandIds())) {
627 return false;
629 return true;
632 void CacheRegisterAllocator::initAvailableRegsAfterSpill() {
633 // Registers not in availableRegs_ and not used by input operands are
634 // available after being spilled.
635 availableRegsAfterSpill_.set() = GeneralRegisterSet::Intersect(
636 GeneralRegisterSet::Not(availableRegs_.set()),
637 GeneralRegisterSet::Not(inputRegisterSet()));
640 void CacheRegisterAllocator::fixupAliasedInputs(MacroAssembler& masm) {
641 // If IC inputs alias each other, make sure they are stored in different
642 // locations so we don't have to deal with this complexity in the rest of
643 // the allocator.
645 // Note that this can happen in IonMonkey with something like |o.foo = o|
646 // or |o[i] = i|.
648 size_t numInputs = writer_.numInputOperands();
649 MOZ_ASSERT(origInputLocations_.length() == numInputs);
651 for (size_t i = 1; i < numInputs; i++) {
652 OperandLocation& loc1 = operandLocations_[i];
653 if (!loc1.isInRegister()) {
654 continue;
657 for (size_t j = 0; j < i; j++) {
658 OperandLocation& loc2 = operandLocations_[j];
659 if (!loc1.aliasesReg(loc2)) {
660 continue;
663 // loc1 and loc2 alias so we spill one of them. If one is a
664 // ValueReg and the other is a PayloadReg, we have to spill the
665 // PayloadReg: spilling the ValueReg instead would leave its type
666 // register unallocated on 32-bit platforms.
667 if (loc1.kind() == OperandLocation::ValueReg) {
668 spillOperandToStack(masm, &loc2);
669 } else {
670 MOZ_ASSERT(loc1.kind() == OperandLocation::PayloadReg);
671 spillOperandToStack(masm, &loc1);
672 break; // Spilled loc1, so nothing else will alias it.
677 #ifdef DEBUG
678 assertValidState();
679 #endif
682 GeneralRegisterSet CacheRegisterAllocator::inputRegisterSet() const {
683 MOZ_ASSERT(origInputLocations_.length() == writer_.numInputOperands());
685 AllocatableGeneralRegisterSet result;
686 for (size_t i = 0; i < writer_.numInputOperands(); i++) {
687 const OperandLocation& loc = operandLocations_[i];
688 MOZ_ASSERT(loc == origInputLocations_[i]);
690 switch (loc.kind()) {
691 case OperandLocation::PayloadReg:
692 result.addUnchecked(loc.payloadReg());
693 continue;
694 case OperandLocation::ValueReg:
695 result.addUnchecked(loc.valueReg());
696 continue;
697 case OperandLocation::PayloadStack:
698 case OperandLocation::ValueStack:
699 case OperandLocation::BaselineFrame:
700 case OperandLocation::Constant:
701 case OperandLocation::DoubleReg:
702 continue;
703 case OperandLocation::Uninitialized:
704 break;
706 MOZ_CRASH("Invalid kind");
709 return result.set();
712 JSValueType CacheRegisterAllocator::knownType(ValOperandId val) const {
713 const OperandLocation& loc = operandLocations_[val.id()];
715 switch (loc.kind()) {
716 case OperandLocation::ValueReg:
717 case OperandLocation::ValueStack:
718 case OperandLocation::BaselineFrame:
719 return JSVAL_TYPE_UNKNOWN;
721 case OperandLocation::PayloadStack:
722 case OperandLocation::PayloadReg:
723 return loc.payloadType();
725 case OperandLocation::Constant:
726 return loc.constant().isDouble() ? JSVAL_TYPE_DOUBLE
727 : loc.constant().extractNonDoubleType();
729 case OperandLocation::DoubleReg:
730 return JSVAL_TYPE_DOUBLE;
732 case OperandLocation::Uninitialized:
733 break;
736 MOZ_CRASH("Invalid kind");
739 void CacheRegisterAllocator::initInputLocation(
740 size_t i, const TypedOrValueRegister& reg) {
741 if (reg.hasValue()) {
742 initInputLocation(i, reg.valueReg());
743 } else if (reg.typedReg().isFloat()) {
744 MOZ_ASSERT(reg.type() == MIRType::Double);
745 initInputLocation(i, reg.typedReg().fpu());
746 } else {
747 initInputLocation(i, reg.typedReg().gpr(),
748 ValueTypeFromMIRType(reg.type()));
752 void CacheRegisterAllocator::initInputLocation(
753 size_t i, const ConstantOrRegister& value) {
754 if (value.constant()) {
755 initInputLocation(i, value.value());
756 } else {
757 initInputLocation(i, value.reg());
761 void CacheRegisterAllocator::spillOperandToStack(MacroAssembler& masm,
762 OperandLocation* loc) {
763 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
765 if (loc->kind() == OperandLocation::ValueReg) {
766 if (!freeValueSlots_.empty()) {
767 uint32_t stackPos = freeValueSlots_.popCopy();
768 MOZ_ASSERT(stackPos <= stackPushed_);
769 masm.storeValue(loc->valueReg(),
770 Address(masm.getStackPointer(), stackPushed_ - stackPos));
771 loc->setValueStack(stackPos);
772 return;
774 stackPushed_ += sizeof(js::Value);
775 masm.pushValue(loc->valueReg());
776 loc->setValueStack(stackPushed_);
777 return;
780 MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
782 if (!freePayloadSlots_.empty()) {
783 uint32_t stackPos = freePayloadSlots_.popCopy();
784 MOZ_ASSERT(stackPos <= stackPushed_);
785 masm.storePtr(loc->payloadReg(),
786 Address(masm.getStackPointer(), stackPushed_ - stackPos));
787 loc->setPayloadStack(stackPos, loc->payloadType());
788 return;
790 stackPushed_ += sizeof(uintptr_t);
791 masm.push(loc->payloadReg());
792 loc->setPayloadStack(stackPushed_, loc->payloadType());
795 void CacheRegisterAllocator::spillOperandToStackOrRegister(
796 MacroAssembler& masm, OperandLocation* loc) {
797 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
799 // If enough registers are available, use them.
800 if (loc->kind() == OperandLocation::ValueReg) {
801 static const size_t BoxPieces = sizeof(Value) / sizeof(uintptr_t);
802 if (availableRegs_.set().size() >= BoxPieces) {
803 ValueOperand reg = availableRegs_.takeAnyValue();
804 masm.moveValue(loc->valueReg(), reg);
805 loc->setValueReg(reg);
806 return;
808 } else {
809 MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
810 if (!availableRegs_.empty()) {
811 Register reg = availableRegs_.takeAny();
812 masm.movePtr(loc->payloadReg(), reg);
813 loc->setPayloadReg(reg, loc->payloadType());
814 return;
818 // Not enough registers available, spill to the stack.
819 spillOperandToStack(masm, loc);
822 void CacheRegisterAllocator::popPayload(MacroAssembler& masm,
823 OperandLocation* loc, Register dest) {
824 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
825 MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
827 // The payload is on the stack. If it's on top of the stack we can just
828 // pop it, else we emit a load.
829 if (loc->payloadStack() == stackPushed_) {
830 masm.pop(dest);
831 stackPushed_ -= sizeof(uintptr_t);
832 } else {
833 MOZ_ASSERT(loc->payloadStack() < stackPushed_);
834 masm.loadPtr(payloadAddress(masm, loc), dest);
835 masm.propagateOOM(freePayloadSlots_.append(loc->payloadStack()));
838 loc->setPayloadReg(dest, loc->payloadType());
841 Address CacheRegisterAllocator::valueAddress(MacroAssembler& masm,
842 const OperandLocation* loc) const {
843 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
844 return Address(masm.getStackPointer(), stackPushed_ - loc->valueStack());
847 Address CacheRegisterAllocator::payloadAddress(
848 MacroAssembler& masm, const OperandLocation* loc) const {
849 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
850 return Address(masm.getStackPointer(), stackPushed_ - loc->payloadStack());
853 void CacheRegisterAllocator::popValue(MacroAssembler& masm,
854 OperandLocation* loc, ValueOperand dest) {
855 MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
856 MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
858 // The Value is on the stack. If it's on top of the stack we can just
859 // pop it, else we emit a load.
860 if (loc->valueStack() == stackPushed_) {
861 masm.popValue(dest);
862 stackPushed_ -= sizeof(js::Value);
863 } else {
864 MOZ_ASSERT(loc->valueStack() < stackPushed_);
865 masm.loadValue(
866 Address(masm.getStackPointer(), stackPushed_ - loc->valueStack()),
867 dest);
868 masm.propagateOOM(freeValueSlots_.append(loc->valueStack()));
871 loc->setValueReg(dest);
874 #ifdef DEBUG
875 void CacheRegisterAllocator::assertValidState() const {
876 // Assert different operands don't have aliasing storage. We depend on this
877 // when spilling registers, for instance.
879 if (!JitOptions.fullDebugChecks) {
880 return;
883 for (size_t i = 0; i < operandLocations_.length(); i++) {
884 const auto& loc1 = operandLocations_[i];
885 if (loc1.isUninitialized()) {
886 continue;
889 for (size_t j = 0; j < i; j++) {
890 const auto& loc2 = operandLocations_[j];
891 if (loc2.isUninitialized()) {
892 continue;
894 MOZ_ASSERT(!loc1.aliasesReg(loc2));
898 #endif
900 bool OperandLocation::aliasesReg(const OperandLocation& other) const {
901 MOZ_ASSERT(&other != this);
903 switch (other.kind_) {
904 case PayloadReg:
905 return aliasesReg(other.payloadReg());
906 case ValueReg:
907 return aliasesReg(other.valueReg());
908 case PayloadStack:
909 case ValueStack:
910 case BaselineFrame:
911 case Constant:
912 case DoubleReg:
913 return false;
914 case Uninitialized:
915 break;
918 MOZ_CRASH("Invalid kind");
921 void CacheRegisterAllocator::restoreInputState(MacroAssembler& masm,
922 bool shouldDiscardStack) {
923 size_t numInputOperands = origInputLocations_.length();
924 MOZ_ASSERT(writer_.numInputOperands() == numInputOperands);
926 for (size_t j = 0; j < numInputOperands; j++) {
927 const OperandLocation& dest = origInputLocations_[j];
928 OperandLocation& cur = operandLocations_[j];
929 if (dest == cur) {
930 continue;
933 auto autoAssign = mozilla::MakeScopeExit([&] { cur = dest; });
935 // We have a cycle if a destination register will be used later
936 // as source register. If that happens, just push the current value
937 // on the stack and later get it from there.
938 for (size_t k = j + 1; k < numInputOperands; k++) {
939 OperandLocation& laterSource = operandLocations_[k];
940 if (dest.aliasesReg(laterSource)) {
941 spillOperandToStack(masm, &laterSource);
945 if (dest.kind() == OperandLocation::ValueReg) {
946 // We have to restore a Value register.
947 switch (cur.kind()) {
948 case OperandLocation::ValueReg:
949 masm.moveValue(cur.valueReg(), dest.valueReg());
950 continue;
951 case OperandLocation::PayloadReg:
952 masm.tagValue(cur.payloadType(), cur.payloadReg(), dest.valueReg());
953 continue;
954 case OperandLocation::PayloadStack: {
955 Register scratch = dest.valueReg().scratchReg();
956 popPayload(masm, &cur, scratch);
957 masm.tagValue(cur.payloadType(), scratch, dest.valueReg());
958 continue;
960 case OperandLocation::ValueStack:
961 popValue(masm, &cur, dest.valueReg());
962 continue;
963 case OperandLocation::DoubleReg:
964 masm.boxDouble(cur.doubleReg(), dest.valueReg(), cur.doubleReg());
965 continue;
966 case OperandLocation::Constant:
967 case OperandLocation::BaselineFrame:
968 case OperandLocation::Uninitialized:
969 break;
971 } else if (dest.kind() == OperandLocation::PayloadReg) {
972 // We have to restore a payload register.
973 switch (cur.kind()) {
974 case OperandLocation::ValueReg:
975 MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
976 masm.unboxNonDouble(cur.valueReg(), dest.payloadReg(),
977 dest.payloadType());
978 continue;
979 case OperandLocation::PayloadReg:
980 MOZ_ASSERT(cur.payloadType() == dest.payloadType());
981 masm.mov(cur.payloadReg(), dest.payloadReg());
982 continue;
983 case OperandLocation::PayloadStack: {
984 MOZ_ASSERT(cur.payloadType() == dest.payloadType());
985 popPayload(masm, &cur, dest.payloadReg());
986 continue;
988 case OperandLocation::ValueStack:
989 MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
990 MOZ_ASSERT(cur.valueStack() <= stackPushed_);
991 MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
992 masm.unboxNonDouble(
993 Address(masm.getStackPointer(), stackPushed_ - cur.valueStack()),
994 dest.payloadReg(), dest.payloadType());
995 continue;
996 case OperandLocation::Constant:
997 case OperandLocation::BaselineFrame:
998 case OperandLocation::DoubleReg:
999 case OperandLocation::Uninitialized:
1000 break;
1002 } else if (dest.kind() == OperandLocation::Constant ||
1003 dest.kind() == OperandLocation::BaselineFrame ||
1004 dest.kind() == OperandLocation::DoubleReg) {
1005 // Nothing to do.
1006 continue;
1009 MOZ_CRASH("Invalid kind");
1012 for (const SpilledRegister& spill : spilledRegs_) {
1013 MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
1015 if (spill.stackPushed == stackPushed_) {
1016 masm.pop(spill.reg);
1017 stackPushed_ -= sizeof(uintptr_t);
1018 } else {
1019 MOZ_ASSERT(spill.stackPushed < stackPushed_);
1020 masm.loadPtr(
1021 Address(masm.getStackPointer(), stackPushed_ - spill.stackPushed),
1022 spill.reg);
1026 if (shouldDiscardStack) {
1027 discardStack(masm);
1031 size_t CacheIRStubInfo::stubDataSize() const {
1032 size_t field = 0;
1033 size_t size = 0;
1034 while (true) {
1035 StubField::Type type = fieldType(field++);
1036 if (type == StubField::Type::Limit) {
1037 return size;
1039 size += StubField::sizeInBytes(type);
1043 template <typename T>
1044 static GCPtr<T>* AsGCPtr(void* ptr) {
1045 return static_cast<GCPtr<T>*>(ptr);
1048 void CacheIRStubInfo::replaceStubRawWord(uint8_t* stubData, uint32_t offset,
1049 uintptr_t oldWord,
1050 uintptr_t newWord) const {
1051 MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
1052 uintptr_t* addr = reinterpret_cast<uintptr_t*>(stubData + offset);
1053 MOZ_ASSERT(*addr == oldWord);
1054 *addr = newWord;
1057 template <class Stub, StubField::Type type>
1058 typename MapStubFieldToType<type>::WrappedType& CacheIRStubInfo::getStubField(
1059 Stub* stub, uint32_t offset) const {
1060 uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
1061 MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
1063 using WrappedType = typename MapStubFieldToType<type>::WrappedType;
1064 return *reinterpret_cast<WrappedType*>(stubData + offset);
1067 #define INSTANTIATE_GET_STUB_FIELD(Type) \
1068 template typename MapStubFieldToType<Type>::WrappedType& \
1069 CacheIRStubInfo::getStubField<ICCacheIRStub, Type>(ICCacheIRStub * stub, \
1070 uint32_t offset) const;
1071 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Shape)
1072 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakShape)
1073 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakGetterSetter)
1074 INSTANTIATE_GET_STUB_FIELD(StubField::Type::JSObject)
1075 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakObject)
1076 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Symbol)
1077 INSTANTIATE_GET_STUB_FIELD(StubField::Type::String)
1078 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakBaseScript)
1079 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Value)
1080 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Id)
1081 #undef INSTANTIATE_GET_STUB_FIELD
1083 template <class Stub, class T>
1084 T* CacheIRStubInfo::getPtrStubField(Stub* stub, uint32_t offset) const {
1085 uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
1086 MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
1088 return *reinterpret_cast<T**>(stubData + offset);
1091 template gc::AllocSite* CacheIRStubInfo::getPtrStubField(ICCacheIRStub* stub,
1092 uint32_t offset) const;
1094 template <StubField::Type type, typename V>
1095 static void InitWrappedPtr(void* ptr, V val) {
1096 using RawType = typename MapStubFieldToType<type>::RawType;
1097 using WrappedType = typename MapStubFieldToType<type>::WrappedType;
1098 auto* wrapped = static_cast<WrappedType*>(ptr);
1099 new (wrapped) WrappedType(mozilla::BitwiseCast<RawType>(val));
1102 static void InitWordStubField(StubField::Type type, void* dest,
1103 uintptr_t value) {
1104 MOZ_ASSERT(StubField::sizeIsWord(type));
1105 MOZ_ASSERT((uintptr_t(dest) % sizeof(uintptr_t)) == 0,
1106 "Unaligned stub field");
1108 switch (type) {
1109 case StubField::Type::RawInt32:
1110 case StubField::Type::RawPointer:
1111 case StubField::Type::AllocSite:
1112 *static_cast<uintptr_t*>(dest) = value;
1113 break;
1114 case StubField::Type::Shape:
1115 InitWrappedPtr<StubField::Type::Shape>(dest, value);
1116 break;
1117 case StubField::Type::WeakShape:
1118 // No read barrier required to copy weak pointer.
1119 InitWrappedPtr<StubField::Type::WeakShape>(dest, value);
1120 break;
1121 case StubField::Type::WeakGetterSetter:
1122 // No read barrier required to copy weak pointer.
1123 InitWrappedPtr<StubField::Type::WeakGetterSetter>(dest, value);
1124 break;
1125 case StubField::Type::JSObject:
1126 InitWrappedPtr<StubField::Type::JSObject>(dest, value);
1127 break;
1128 case StubField::Type::WeakObject:
1129 // No read barrier required to copy weak pointer.
1130 InitWrappedPtr<StubField::Type::WeakObject>(dest, value);
1131 break;
1132 case StubField::Type::Symbol:
1133 InitWrappedPtr<StubField::Type::Symbol>(dest, value);
1134 break;
1135 case StubField::Type::String:
1136 InitWrappedPtr<StubField::Type::String>(dest, value);
1137 break;
1138 case StubField::Type::WeakBaseScript:
1139 // No read barrier required to copy weak pointer.
1140 InitWrappedPtr<StubField::Type::WeakBaseScript>(dest, value);
1141 break;
1142 case StubField::Type::JitCode:
1143 InitWrappedPtr<StubField::Type::JitCode>(dest, value);
1144 break;
1145 case StubField::Type::Id:
1146 AsGCPtr<jsid>(dest)->init(jsid::fromRawBits(value));
1147 break;
1148 case StubField::Type::RawInt64:
1149 case StubField::Type::Double:
1150 case StubField::Type::Value:
1151 case StubField::Type::Limit:
1152 MOZ_CRASH("Invalid type");
1156 static void InitInt64StubField(StubField::Type type, void* dest,
1157 uint64_t value) {
1158 MOZ_ASSERT(StubField::sizeIsInt64(type));
1159 MOZ_ASSERT((uintptr_t(dest) % sizeof(uint64_t)) == 0, "Unaligned stub field");
1161 switch (type) {
1162 case StubField::Type::RawInt64:
1163 case StubField::Type::Double:
1164 *static_cast<uint64_t*>(dest) = value;
1165 break;
1166 case StubField::Type::Value:
1167 AsGCPtr<Value>(dest)->init(Value::fromRawBits(value));
1168 break;
1169 case StubField::Type::RawInt32:
1170 case StubField::Type::RawPointer:
1171 case StubField::Type::AllocSite:
1172 case StubField::Type::Shape:
1173 case StubField::Type::WeakShape:
1174 case StubField::Type::WeakGetterSetter:
1175 case StubField::Type::JSObject:
1176 case StubField::Type::WeakObject:
1177 case StubField::Type::Symbol:
1178 case StubField::Type::String:
1179 case StubField::Type::WeakBaseScript:
1180 case StubField::Type::JitCode:
1181 case StubField::Type::Id:
1182 case StubField::Type::Limit:
1183 MOZ_CRASH("Invalid type");
1187 void CacheIRWriter::copyStubData(uint8_t* dest) const {
1188 MOZ_ASSERT(!failed());
1190 for (const StubField& field : stubFields_) {
1191 if (field.sizeIsWord()) {
1192 InitWordStubField(field.type(), dest, field.asWord());
1193 dest += sizeof(uintptr_t);
1194 } else {
1195 InitInt64StubField(field.type(), dest, field.asInt64());
1196 dest += sizeof(uint64_t);
1201 ICCacheIRStub* ICCacheIRStub::clone(JSRuntime* rt, ICStubSpace& newSpace) {
1202 const CacheIRStubInfo* info = stubInfo();
1203 MOZ_ASSERT(info->makesGCCalls());
1205 size_t bytesNeeded = info->stubDataOffset() + info->stubDataSize();
1207 AutoEnterOOMUnsafeRegion oomUnsafe;
1208 void* newStubMem = newSpace.alloc(bytesNeeded);
1209 if (!newStubMem) {
1210 oomUnsafe.crash("ICCacheIRStub::clone");
1213 ICCacheIRStub* newStub = new (newStubMem) ICCacheIRStub(*this);
1215 const uint8_t* src = this->stubDataStart();
1216 uint8_t* dest = newStub->stubDataStart();
1218 // Because this can be called during sweeping when discarding JIT code, we
1219 // have to lock the store buffer
1220 gc::AutoLockStoreBuffer lock(rt);
1222 uint32_t field = 0;
1223 while (true) {
1224 StubField::Type type = info->fieldType(field);
1225 if (type == StubField::Type::Limit) {
1226 break; // Done.
1229 if (StubField::sizeIsWord(type)) {
1230 const uintptr_t* srcField = reinterpret_cast<const uintptr_t*>(src);
1231 InitWordStubField(type, dest, *srcField);
1232 src += sizeof(uintptr_t);
1233 dest += sizeof(uintptr_t);
1234 } else {
1235 const uint64_t* srcField = reinterpret_cast<const uint64_t*>(src);
1236 InitInt64StubField(type, dest, *srcField);
1237 src += sizeof(uint64_t);
1238 dest += sizeof(uint64_t);
1241 field++;
1244 return newStub;
1247 template <typename T>
1248 static inline bool ShouldTraceWeakEdgeInStub(JSTracer* trc) {
1249 if constexpr (std::is_same_v<T, IonICStub>) {
1250 // 'Weak' edges are traced strongly in IonICs.
1251 return true;
1252 } else {
1253 static_assert(std::is_same_v<T, ICCacheIRStub>);
1254 return trc->traceWeakEdges();
1258 template <typename T>
1259 void jit::TraceCacheIRStub(JSTracer* trc, T* stub,
1260 const CacheIRStubInfo* stubInfo) {
1261 using Type = StubField::Type;
1263 uint32_t field = 0;
1264 size_t offset = 0;
1265 while (true) {
1266 Type fieldType = stubInfo->fieldType(field);
1267 switch (fieldType) {
1268 case Type::RawInt32:
1269 case Type::RawPointer:
1270 case Type::RawInt64:
1271 case Type::Double:
1272 break;
1273 case Type::Shape: {
1274 // For CCW IC stubs, we can store same-zone but cross-compartment
1275 // shapes. Use TraceSameZoneCrossCompartmentEdge to not assert in the
1276 // GC. Note: CacheIRWriter::writeShapeField asserts we never store
1277 // cross-zone shapes.
1278 GCPtr<Shape*>& shapeField =
1279 stubInfo->getStubField<T, Type::Shape>(stub, offset);
1280 TraceSameZoneCrossCompartmentEdge(trc, &shapeField, "cacheir-shape");
1281 break;
1283 case Type::WeakShape:
1284 if (ShouldTraceWeakEdgeInStub<T>(trc)) {
1285 WeakHeapPtr<Shape*>& shapeField =
1286 stubInfo->getStubField<T, Type::WeakShape>(stub, offset);
1287 if (shapeField) {
1288 TraceSameZoneCrossCompartmentEdge(trc, &shapeField,
1289 "cacheir-weak-shape");
1292 break;
1293 case Type::WeakGetterSetter:
1294 if (ShouldTraceWeakEdgeInStub<T>(trc)) {
1295 TraceNullableEdge(
1296 trc,
1297 &stubInfo->getStubField<T, Type::WeakGetterSetter>(stub, offset),
1298 "cacheir-weak-getter-setter");
1300 break;
1301 case Type::JSObject: {
1302 TraceEdge(trc, &stubInfo->getStubField<T, Type::JSObject>(stub, offset),
1303 "cacheir-object");
1304 break;
1306 case Type::WeakObject:
1307 if (ShouldTraceWeakEdgeInStub<T>(trc)) {
1308 TraceNullableEdge(
1309 trc, &stubInfo->getStubField<T, Type::WeakObject>(stub, offset),
1310 "cacheir-weak-object");
1312 break;
1313 case Type::Symbol:
1314 TraceEdge(trc, &stubInfo->getStubField<T, Type::Symbol>(stub, offset),
1315 "cacheir-symbol");
1316 break;
1317 case Type::String:
1318 TraceEdge(trc, &stubInfo->getStubField<T, Type::String>(stub, offset),
1319 "cacheir-string");
1320 break;
1321 case Type::WeakBaseScript:
1322 if (ShouldTraceWeakEdgeInStub<T>(trc)) {
1323 TraceNullableEdge(
1324 trc,
1325 &stubInfo->getStubField<T, Type::WeakBaseScript>(stub, offset),
1326 "cacheir-weak-script");
1328 break;
1329 case Type::JitCode:
1330 TraceEdge(trc, &stubInfo->getStubField<T, Type::JitCode>(stub, offset),
1331 "cacheir-jitcode");
1332 break;
1333 case Type::Id:
1334 TraceEdge(trc, &stubInfo->getStubField<T, Type::Id>(stub, offset),
1335 "cacheir-id");
1336 break;
1337 case Type::Value:
1338 TraceEdge(trc, &stubInfo->getStubField<T, Type::Value>(stub, offset),
1339 "cacheir-value");
1340 break;
1341 case Type::AllocSite: {
1342 gc::AllocSite* site =
1343 stubInfo->getPtrStubField<T, gc::AllocSite>(stub, offset);
1344 site->trace(trc);
1345 break;
1347 case Type::Limit:
1348 return; // Done.
1350 field++;
1351 offset += StubField::sizeInBytes(fieldType);
1355 template void jit::TraceCacheIRStub(JSTracer* trc, ICCacheIRStub* stub,
1356 const CacheIRStubInfo* stubInfo);
1358 template void jit::TraceCacheIRStub(JSTracer* trc, IonICStub* stub,
1359 const CacheIRStubInfo* stubInfo);
1361 template <typename T>
1362 bool jit::TraceWeakCacheIRStub(JSTracer* trc, T* stub,
1363 const CacheIRStubInfo* stubInfo) {
1364 using Type = StubField::Type;
1366 uint32_t field = 0;
1367 size_t offset = 0;
1368 while (true) {
1369 Type fieldType = stubInfo->fieldType(field);
1370 switch (fieldType) {
1371 case Type::WeakShape: {
1372 WeakHeapPtr<Shape*>& shapeField =
1373 stubInfo->getStubField<T, Type::WeakShape>(stub, offset);
1374 auto r = TraceWeakEdge(trc, &shapeField, "cacheir-weak-shape");
1375 if (r.isDead()) {
1376 return false;
1378 break;
1380 case Type::WeakObject: {
1381 WeakHeapPtr<JSObject*>& objectField =
1382 stubInfo->getStubField<T, Type::WeakObject>(stub, offset);
1383 auto r = TraceWeakEdge(trc, &objectField, "cacheir-weak-object");
1384 if (r.isDead()) {
1385 return false;
1387 break;
1389 case Type::WeakBaseScript: {
1390 WeakHeapPtr<BaseScript*>& scriptField =
1391 stubInfo->getStubField<T, Type::WeakBaseScript>(stub, offset);
1392 auto r = TraceWeakEdge(trc, &scriptField, "cacheir-weak-script");
1393 if (r.isDead()) {
1394 return false;
1396 break;
1398 case Type::WeakGetterSetter: {
1399 WeakHeapPtr<GetterSetter*>& getterSetterField =
1400 stubInfo->getStubField<T, Type::WeakGetterSetter>(stub, offset);
1401 auto r = TraceWeakEdge(trc, &getterSetterField,
1402 "cacheir-weak-getter-setter");
1403 if (r.isDead()) {
1404 return false;
1406 break;
1408 case Type::Limit:
1409 return true; // Done.
1410 case Type::RawInt32:
1411 case Type::RawPointer:
1412 case Type::Shape:
1413 case Type::JSObject:
1414 case Type::Symbol:
1415 case Type::String:
1416 case Type::JitCode:
1417 case Type::Id:
1418 case Type::AllocSite:
1419 case Type::RawInt64:
1420 case Type::Value:
1421 case Type::Double:
1422 break; // Skip non-weak fields.
1424 field++;
1425 offset += StubField::sizeInBytes(fieldType);
1429 template bool jit::TraceWeakCacheIRStub(JSTracer* trc, ICCacheIRStub* stub,
1430 const CacheIRStubInfo* stubInfo);
1432 template bool jit::TraceWeakCacheIRStub(JSTracer* trc, IonICStub* stub,
1433 const CacheIRStubInfo* stubInfo);
1435 bool CacheIRWriter::stubDataEquals(const uint8_t* stubData) const {
1436 MOZ_ASSERT(!failed());
1438 const uintptr_t* stubDataWords = reinterpret_cast<const uintptr_t*>(stubData);
1440 for (const StubField& field : stubFields_) {
1441 if (field.sizeIsWord()) {
1442 if (field.asWord() != *stubDataWords) {
1443 return false;
1445 stubDataWords++;
1446 continue;
1449 if (field.asInt64() != *reinterpret_cast<const uint64_t*>(stubDataWords)) {
1450 return false;
1452 stubDataWords += sizeof(uint64_t) / sizeof(uintptr_t);
1455 return true;
1458 bool CacheIRWriter::stubDataEqualsIgnoring(const uint8_t* stubData,
1459 uint32_t ignoreOffset) const {
1460 MOZ_ASSERT(!failed());
1462 uint32_t offset = 0;
1463 for (const StubField& field : stubFields_) {
1464 if (offset != ignoreOffset) {
1465 if (field.sizeIsWord()) {
1466 uintptr_t raw = *reinterpret_cast<const uintptr_t*>(stubData + offset);
1467 if (field.asWord() != raw) {
1468 return false;
1470 } else {
1471 uint64_t raw = *reinterpret_cast<const uint64_t*>(stubData + offset);
1472 if (field.asInt64() != raw) {
1473 return false;
1477 offset += StubField::sizeInBytes(field.type());
1480 return true;
1483 HashNumber CacheIRStubKey::hash(const CacheIRStubKey::Lookup& l) {
1484 HashNumber hash = mozilla::HashBytes(l.code, l.length);
1485 hash = mozilla::AddToHash(hash, uint32_t(l.kind));
1486 hash = mozilla::AddToHash(hash, uint32_t(l.engine));
1487 return hash;
1490 bool CacheIRStubKey::match(const CacheIRStubKey& entry,
1491 const CacheIRStubKey::Lookup& l) {
1492 if (entry.stubInfo->kind() != l.kind) {
1493 return false;
1496 if (entry.stubInfo->engine() != l.engine) {
1497 return false;
1500 if (entry.stubInfo->codeLength() != l.length) {
1501 return false;
1504 if (!mozilla::ArrayEqual(entry.stubInfo->code(), l.code, l.length)) {
1505 return false;
1508 return true;
1511 CacheIRReader::CacheIRReader(const CacheIRStubInfo* stubInfo)
1512 : CacheIRReader(stubInfo->code(),
1513 stubInfo->code() + stubInfo->codeLength()) {}
1515 CacheIRStubInfo* CacheIRStubInfo::New(CacheKind kind, ICStubEngine engine,
1516 bool makesGCCalls,
1517 uint32_t stubDataOffset,
1518 const CacheIRWriter& writer) {
1519 size_t numStubFields = writer.numStubFields();
1520 size_t bytesNeeded =
1521 sizeof(CacheIRStubInfo) + writer.codeLength() +
1522 (numStubFields + 1); // +1 for the GCType::Limit terminator.
1523 uint8_t* p = js_pod_malloc<uint8_t>(bytesNeeded);
1524 if (!p) {
1525 return nullptr;
1528 // Copy the CacheIR code.
1529 uint8_t* codeStart = p + sizeof(CacheIRStubInfo);
1530 mozilla::PodCopy(codeStart, writer.codeStart(), writer.codeLength());
1532 static_assert(sizeof(StubField::Type) == sizeof(uint8_t),
1533 "StubField::Type must fit in uint8_t");
1535 // Copy the stub field types.
1536 uint8_t* fieldTypes = codeStart + writer.codeLength();
1537 for (size_t i = 0; i < numStubFields; i++) {
1538 fieldTypes[i] = uint8_t(writer.stubFieldType(i));
1540 fieldTypes[numStubFields] = uint8_t(StubField::Type::Limit);
1542 return new (p) CacheIRStubInfo(kind, engine, makesGCCalls, stubDataOffset,
1543 writer.codeLength());
1546 bool OperandLocation::operator==(const OperandLocation& other) const {
1547 if (kind_ != other.kind_) {
1548 return false;
1551 switch (kind()) {
1552 case Uninitialized:
1553 return true;
1554 case PayloadReg:
1555 return payloadReg() == other.payloadReg() &&
1556 payloadType() == other.payloadType();
1557 case ValueReg:
1558 return valueReg() == other.valueReg();
1559 case PayloadStack:
1560 return payloadStack() == other.payloadStack() &&
1561 payloadType() == other.payloadType();
1562 case ValueStack:
1563 return valueStack() == other.valueStack();
1564 case BaselineFrame:
1565 return baselineFrameSlot() == other.baselineFrameSlot();
1566 case Constant:
1567 return constant() == other.constant();
1568 case DoubleReg:
1569 return doubleReg() == other.doubleReg();
1572 MOZ_CRASH("Invalid OperandLocation kind");
1575 AutoOutputRegister::AutoOutputRegister(CacheIRCompiler& compiler)
1576 : output_(compiler.outputUnchecked_.ref()), alloc_(compiler.allocator) {
1577 if (output_.hasValue()) {
1578 alloc_.allocateFixedValueRegister(compiler.masm, output_.valueReg());
1579 } else if (!output_.typedReg().isFloat()) {
1580 alloc_.allocateFixedRegister(compiler.masm, output_.typedReg().gpr());
1584 AutoOutputRegister::~AutoOutputRegister() {
1585 if (output_.hasValue()) {
1586 alloc_.releaseValueRegister(output_.valueReg());
1587 } else if (!output_.typedReg().isFloat()) {
1588 alloc_.releaseRegister(output_.typedReg().gpr());
1592 bool FailurePath::canShareFailurePath(const FailurePath& other) const {
1593 if (stackPushed_ != other.stackPushed_) {
1594 return false;
1597 if (spilledRegs_.length() != other.spilledRegs_.length()) {
1598 return false;
1601 for (size_t i = 0; i < spilledRegs_.length(); i++) {
1602 if (spilledRegs_[i] != other.spilledRegs_[i]) {
1603 return false;
1607 MOZ_ASSERT(inputs_.length() == other.inputs_.length());
1609 for (size_t i = 0; i < inputs_.length(); i++) {
1610 if (inputs_[i] != other.inputs_[i]) {
1611 return false;
1614 return true;
1617 bool CacheIRCompiler::addFailurePath(FailurePath** failure) {
1618 #ifdef DEBUG
1619 allocator.setAddedFailurePath();
1620 #endif
1621 MOZ_ASSERT(!allocator.hasAutoScratchFloatRegisterSpill());
1623 FailurePath newFailure;
1624 for (size_t i = 0; i < writer_.numInputOperands(); i++) {
1625 if (!newFailure.appendInput(allocator.operandLocation(i))) {
1626 return false;
1629 if (!newFailure.setSpilledRegs(allocator.spilledRegs())) {
1630 return false;
1632 newFailure.setStackPushed(allocator.stackPushed());
1634 // Reuse the previous failure path if the current one is the same, to
1635 // avoid emitting duplicate code.
1636 if (failurePaths.length() > 0 &&
1637 failurePaths.back().canShareFailurePath(newFailure)) {
1638 *failure = &failurePaths.back();
1639 return true;
1642 if (!failurePaths.append(std::move(newFailure))) {
1643 return false;
1646 *failure = &failurePaths.back();
1647 return true;
1650 bool CacheIRCompiler::emitFailurePath(size_t index) {
1651 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1652 FailurePath& failure = failurePaths[index];
1654 allocator.setStackPushed(failure.stackPushed());
1656 for (size_t i = 0; i < writer_.numInputOperands(); i++) {
1657 allocator.setOperandLocation(i, failure.input(i));
1660 if (!allocator.setSpilledRegs(failure.spilledRegs())) {
1661 return false;
1664 masm.bind(failure.label());
1665 allocator.restoreInputState(masm);
1666 return true;
1669 bool CacheIRCompiler::emitGuardIsNumber(ValOperandId inputId) {
1670 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1671 JSValueType knownType = allocator.knownType(inputId);
1673 // Doubles and ints are numbers!
1674 if (knownType == JSVAL_TYPE_DOUBLE || knownType == JSVAL_TYPE_INT32) {
1675 return true;
1678 ValueOperand input = allocator.useValueRegister(masm, inputId);
1679 FailurePath* failure;
1680 if (!addFailurePath(&failure)) {
1681 return false;
1684 masm.branchTestNumber(Assembler::NotEqual, input, failure->label());
1685 return true;
1688 bool CacheIRCompiler::emitGuardToObject(ValOperandId inputId) {
1689 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1690 if (allocator.knownType(inputId) == JSVAL_TYPE_OBJECT) {
1691 return true;
1694 ValueOperand input = allocator.useValueRegister(masm, inputId);
1695 FailurePath* failure;
1696 if (!addFailurePath(&failure)) {
1697 return false;
1699 masm.branchTestObject(Assembler::NotEqual, input, failure->label());
1700 return true;
1703 bool CacheIRCompiler::emitGuardIsNullOrUndefined(ValOperandId inputId) {
1704 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1705 JSValueType knownType = allocator.knownType(inputId);
1706 if (knownType == JSVAL_TYPE_UNDEFINED || knownType == JSVAL_TYPE_NULL) {
1707 return true;
1710 ValueOperand input = allocator.useValueRegister(masm, inputId);
1711 FailurePath* failure;
1712 if (!addFailurePath(&failure)) {
1713 return false;
1716 Label success;
1717 masm.branchTestNull(Assembler::Equal, input, &success);
1718 masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
1720 masm.bind(&success);
1721 return true;
1724 bool CacheIRCompiler::emitGuardIsNull(ValOperandId inputId) {
1725 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1726 JSValueType knownType = allocator.knownType(inputId);
1727 if (knownType == JSVAL_TYPE_NULL) {
1728 return true;
1731 ValueOperand input = allocator.useValueRegister(masm, inputId);
1732 FailurePath* failure;
1733 if (!addFailurePath(&failure)) {
1734 return false;
1737 masm.branchTestNull(Assembler::NotEqual, input, failure->label());
1738 return true;
1741 bool CacheIRCompiler::emitGuardIsUndefined(ValOperandId inputId) {
1742 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1743 JSValueType knownType = allocator.knownType(inputId);
1744 if (knownType == JSVAL_TYPE_UNDEFINED) {
1745 return true;
1748 ValueOperand input = allocator.useValueRegister(masm, inputId);
1749 FailurePath* failure;
1750 if (!addFailurePath(&failure)) {
1751 return false;
1754 masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
1755 return true;
1758 bool CacheIRCompiler::emitGuardIsNotUninitializedLexical(ValOperandId valId) {
1759 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1761 ValueOperand val = allocator.useValueRegister(masm, valId);
1763 FailurePath* failure;
1764 if (!addFailurePath(&failure)) {
1765 return false;
1768 masm.branchTestMagicValue(Assembler::Equal, val, JS_UNINITIALIZED_LEXICAL,
1769 failure->label());
1770 return true;
1773 bool CacheIRCompiler::emitGuardBooleanToInt32(ValOperandId inputId,
1774 Int32OperandId resultId) {
1775 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1776 Register output = allocator.defineRegister(masm, resultId);
1778 if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
1779 Register input =
1780 allocator.useRegister(masm, BooleanOperandId(inputId.id()));
1781 masm.move32(input, output);
1782 return true;
1784 ValueOperand input = allocator.useValueRegister(masm, inputId);
1786 FailurePath* failure;
1787 if (!addFailurePath(&failure)) {
1788 return false;
1791 masm.fallibleUnboxBoolean(input, output, failure->label());
1792 return true;
1795 bool CacheIRCompiler::emitGuardToString(ValOperandId inputId) {
1796 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1797 if (allocator.knownType(inputId) == JSVAL_TYPE_STRING) {
1798 return true;
1801 ValueOperand input = allocator.useValueRegister(masm, inputId);
1802 FailurePath* failure;
1803 if (!addFailurePath(&failure)) {
1804 return false;
1806 masm.branchTestString(Assembler::NotEqual, input, failure->label());
1807 return true;
1810 bool CacheIRCompiler::emitGuardToSymbol(ValOperandId inputId) {
1811 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1812 if (allocator.knownType(inputId) == JSVAL_TYPE_SYMBOL) {
1813 return true;
1816 ValueOperand input = allocator.useValueRegister(masm, inputId);
1817 FailurePath* failure;
1818 if (!addFailurePath(&failure)) {
1819 return false;
1821 masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
1822 return true;
1825 bool CacheIRCompiler::emitGuardToBigInt(ValOperandId inputId) {
1826 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1827 if (allocator.knownType(inputId) == JSVAL_TYPE_BIGINT) {
1828 return true;
1831 ValueOperand input = allocator.useValueRegister(masm, inputId);
1832 FailurePath* failure;
1833 if (!addFailurePath(&failure)) {
1834 return false;
1836 masm.branchTestBigInt(Assembler::NotEqual, input, failure->label());
1837 return true;
1840 bool CacheIRCompiler::emitGuardToBoolean(ValOperandId inputId) {
1841 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1843 if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
1844 return true;
1847 ValueOperand input = allocator.useValueRegister(masm, inputId);
1848 FailurePath* failure;
1849 if (!addFailurePath(&failure)) {
1850 return false;
1852 masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
1853 return true;
1856 bool CacheIRCompiler::emitGuardToInt32(ValOperandId inputId) {
1857 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1859 if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
1860 return true;
1863 ValueOperand input = allocator.useValueRegister(masm, inputId);
1865 FailurePath* failure;
1866 if (!addFailurePath(&failure)) {
1867 return false;
1870 masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
1871 return true;
1874 bool CacheIRCompiler::emitGuardToNonGCThing(ValOperandId inputId) {
1875 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1877 ValueOperand input = allocator.useValueRegister(masm, inputId);
1879 FailurePath* failure;
1880 if (!addFailurePath(&failure)) {
1881 return false;
1884 masm.branchTestGCThing(Assembler::Equal, input, failure->label());
1885 return true;
1888 // Infallible |emitDouble| emitters can use this implementation to avoid
1889 // generating extra clean-up instructions to restore the scratch float register.
1890 // To select this function simply omit the |Label* fail| parameter for the
1891 // emitter lambda function.
1892 template <typename EmitDouble>
1893 static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 1,
1894 void>
1895 EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
1896 ValueOperand input, FailurePath* failure,
1897 EmitDouble emitDouble) {
1898 AutoScratchFloatRegister floatReg(compiler);
1900 masm.unboxDouble(input, floatReg);
1901 emitDouble(floatReg.get());
1904 template <typename EmitDouble>
1905 static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 2,
1906 void>
1907 EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
1908 ValueOperand input, FailurePath* failure,
1909 EmitDouble emitDouble) {
1910 AutoScratchFloatRegister floatReg(compiler, failure);
1912 masm.unboxDouble(input, floatReg);
1913 emitDouble(floatReg.get(), floatReg.failure());
1916 template <typename EmitInt32, typename EmitDouble>
1917 static void EmitGuardInt32OrDouble(CacheIRCompiler* compiler,
1918 MacroAssembler& masm, ValueOperand input,
1919 Register output, FailurePath* failure,
1920 EmitInt32 emitInt32, EmitDouble emitDouble) {
1921 Label done;
1924 ScratchTagScope tag(masm, input);
1925 masm.splitTagForTest(input, tag);
1927 Label notInt32;
1928 masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
1930 ScratchTagScopeRelease _(&tag);
1932 masm.unboxInt32(input, output);
1933 emitInt32();
1935 masm.jump(&done);
1937 masm.bind(&notInt32);
1939 masm.branchTestDouble(Assembler::NotEqual, tag, failure->label());
1941 ScratchTagScopeRelease _(&tag);
1943 EmitGuardDouble(compiler, masm, input, failure, emitDouble);
1947 masm.bind(&done);
1950 bool CacheIRCompiler::emitGuardToInt32Index(ValOperandId inputId,
1951 Int32OperandId resultId) {
1952 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1953 Register output = allocator.defineRegister(masm, resultId);
1955 if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
1956 Register input = allocator.useRegister(masm, Int32OperandId(inputId.id()));
1957 masm.move32(input, output);
1958 return true;
1961 ValueOperand input = allocator.useValueRegister(masm, inputId);
1963 FailurePath* failure;
1964 if (!addFailurePath(&failure)) {
1965 return false;
1968 EmitGuardInt32OrDouble(
1969 this, masm, input, output, failure,
1970 []() {
1971 // No-op if the value is already an int32.
1973 [&](FloatRegister floatReg, Label* fail) {
1974 // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
1975 masm.convertDoubleToInt32(floatReg, output, fail, false);
1978 return true;
1981 bool CacheIRCompiler::emitInt32ToIntPtr(Int32OperandId inputId,
1982 IntPtrOperandId resultId) {
1983 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1985 Register input = allocator.useRegister(masm, inputId);
1986 Register output = allocator.defineRegister(masm, resultId);
1988 masm.move32SignExtendToPtr(input, output);
1989 return true;
1992 bool CacheIRCompiler::emitGuardNumberToIntPtrIndex(NumberOperandId inputId,
1993 bool supportOOB,
1994 IntPtrOperandId resultId) {
1995 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1997 Register output = allocator.defineRegister(masm, resultId);
1999 FailurePath* failure = nullptr;
2000 if (!supportOOB) {
2001 if (!addFailurePath(&failure)) {
2002 return false;
2006 AutoScratchFloatRegister floatReg(this, failure);
2007 allocator.ensureDoubleRegister(masm, inputId, floatReg);
2009 // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
2010 if (supportOOB) {
2011 Label done, fail;
2012 masm.convertDoubleToPtr(floatReg, output, &fail, false);
2013 masm.jump(&done);
2015 // Substitute the invalid index with an arbitrary out-of-bounds index.
2016 masm.bind(&fail);
2017 masm.movePtr(ImmWord(-1), output);
2019 masm.bind(&done);
2020 } else {
2021 masm.convertDoubleToPtr(floatReg, output, floatReg.failure(), false);
2024 return true;
2027 bool CacheIRCompiler::emitGuardToInt32ModUint32(ValOperandId inputId,
2028 Int32OperandId resultId) {
2029 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2030 Register output = allocator.defineRegister(masm, resultId);
2032 if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
2033 ConstantOrRegister input = allocator.useConstantOrRegister(masm, inputId);
2034 if (input.constant()) {
2035 masm.move32(Imm32(input.value().toInt32()), output);
2036 } else {
2037 MOZ_ASSERT(input.reg().type() == MIRType::Int32);
2038 masm.move32(input.reg().typedReg().gpr(), output);
2040 return true;
2043 ValueOperand input = allocator.useValueRegister(masm, inputId);
2045 FailurePath* failure;
2046 if (!addFailurePath(&failure)) {
2047 return false;
2050 EmitGuardInt32OrDouble(
2051 this, masm, input, output, failure,
2052 []() {
2053 // No-op if the value is already an int32.
2055 [&](FloatRegister floatReg, Label* fail) {
2056 masm.branchTruncateDoubleMaybeModUint32(floatReg, output, fail);
2059 return true;
2062 bool CacheIRCompiler::emitGuardToUint8Clamped(ValOperandId inputId,
2063 Int32OperandId resultId) {
2064 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2065 Register output = allocator.defineRegister(masm, resultId);
2067 if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
2068 ConstantOrRegister input = allocator.useConstantOrRegister(masm, inputId);
2069 if (input.constant()) {
2070 masm.move32(Imm32(ClampDoubleToUint8(input.value().toInt32())), output);
2071 } else {
2072 MOZ_ASSERT(input.reg().type() == MIRType::Int32);
2073 masm.move32(input.reg().typedReg().gpr(), output);
2074 masm.clampIntToUint8(output);
2076 return true;
2079 ValueOperand input = allocator.useValueRegister(masm, inputId);
2081 FailurePath* failure;
2082 if (!addFailurePath(&failure)) {
2083 return false;
2086 EmitGuardInt32OrDouble(
2087 this, masm, input, output, failure,
2088 [&]() {
2089 // |output| holds the unboxed int32 value.
2090 masm.clampIntToUint8(output);
2092 [&](FloatRegister floatReg) {
2093 masm.clampDoubleToUint8(floatReg, output);
2096 return true;
2099 bool CacheIRCompiler::emitGuardNonDoubleType(ValOperandId inputId,
2100 ValueType type) {
2101 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2103 if (allocator.knownType(inputId) == JSValueType(type)) {
2104 return true;
2107 ValueOperand input = allocator.useValueRegister(masm, inputId);
2109 FailurePath* failure;
2110 if (!addFailurePath(&failure)) {
2111 return false;
2114 switch (type) {
2115 case ValueType::String:
2116 masm.branchTestString(Assembler::NotEqual, input, failure->label());
2117 break;
2118 case ValueType::Symbol:
2119 masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
2120 break;
2121 case ValueType::BigInt:
2122 masm.branchTestBigInt(Assembler::NotEqual, input, failure->label());
2123 break;
2124 case ValueType::Int32:
2125 masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
2126 break;
2127 case ValueType::Boolean:
2128 masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
2129 break;
2130 case ValueType::Undefined:
2131 masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
2132 break;
2133 case ValueType::Null:
2134 masm.branchTestNull(Assembler::NotEqual, input, failure->label());
2135 break;
2136 case ValueType::Double:
2137 case ValueType::Magic:
2138 case ValueType::PrivateGCThing:
2139 case ValueType::Object:
2140 #ifdef ENABLE_RECORD_TUPLE
2141 case ValueType::ExtendedPrimitive:
2142 #endif
2143 MOZ_CRASH("unexpected type");
2146 return true;
2149 bool CacheIRCompiler::emitGuardClass(ObjOperandId objId, GuardClassKind kind) {
2150 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2151 Register obj = allocator.useRegister(masm, objId);
2152 AutoScratchRegister scratch(allocator, masm);
2154 FailurePath* failure;
2155 if (!addFailurePath(&failure)) {
2156 return false;
2159 if (kind == GuardClassKind::JSFunction) {
2160 if (objectGuardNeedsSpectreMitigations(objId)) {
2161 masm.branchTestObjIsFunction(Assembler::NotEqual, obj, scratch, obj,
2162 failure->label());
2163 } else {
2164 masm.branchTestObjIsFunctionNoSpectreMitigations(
2165 Assembler::NotEqual, obj, scratch, failure->label());
2167 return true;
2170 const JSClass* clasp = nullptr;
2171 switch (kind) {
2172 case GuardClassKind::Array:
2173 clasp = &ArrayObject::class_;
2174 break;
2175 case GuardClassKind::PlainObject:
2176 clasp = &PlainObject::class_;
2177 break;
2178 case GuardClassKind::FixedLengthArrayBuffer:
2179 clasp = &FixedLengthArrayBufferObject::class_;
2180 break;
2181 case GuardClassKind::FixedLengthSharedArrayBuffer:
2182 clasp = &FixedLengthSharedArrayBufferObject::class_;
2183 break;
2184 case GuardClassKind::FixedLengthDataView:
2185 clasp = &FixedLengthDataViewObject::class_;
2186 break;
2187 case GuardClassKind::MappedArguments:
2188 clasp = &MappedArgumentsObject::class_;
2189 break;
2190 case GuardClassKind::UnmappedArguments:
2191 clasp = &UnmappedArgumentsObject::class_;
2192 break;
2193 case GuardClassKind::WindowProxy:
2194 clasp = cx_->runtime()->maybeWindowProxyClass();
2195 break;
2196 case GuardClassKind::Set:
2197 clasp = &SetObject::class_;
2198 break;
2199 case GuardClassKind::Map:
2200 clasp = &MapObject::class_;
2201 break;
2202 case GuardClassKind::BoundFunction:
2203 clasp = &BoundFunctionObject::class_;
2204 break;
2205 case GuardClassKind::JSFunction:
2206 MOZ_CRASH("JSFunction handled before switch");
2208 MOZ_ASSERT(clasp);
2210 if (objectGuardNeedsSpectreMitigations(objId)) {
2211 masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj,
2212 failure->label());
2213 } else {
2214 masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp,
2215 scratch, failure->label());
2218 return true;
2221 bool CacheIRCompiler::emitGuardNullProto(ObjOperandId objId) {
2222 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2223 Register obj = allocator.useRegister(masm, objId);
2224 AutoScratchRegister scratch(allocator, masm);
2226 FailurePath* failure;
2227 if (!addFailurePath(&failure)) {
2228 return false;
2231 masm.loadObjProto(obj, scratch);
2232 masm.branchTestPtr(Assembler::NonZero, scratch, scratch, failure->label());
2233 return true;
2236 bool CacheIRCompiler::emitGuardIsExtensible(ObjOperandId objId) {
2237 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2238 Register obj = allocator.useRegister(masm, objId);
2239 AutoScratchRegister scratch(allocator, masm);
2241 FailurePath* failure;
2242 if (!addFailurePath(&failure)) {
2243 return false;
2246 masm.branchIfObjectNotExtensible(obj, scratch, failure->label());
2247 return true;
2250 bool CacheIRCompiler::emitGuardDynamicSlotIsSpecificObject(
2251 ObjOperandId objId, ObjOperandId expectedId, uint32_t slotOffset) {
2252 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2253 Register obj = allocator.useRegister(masm, objId);
2254 Register expectedObject = allocator.useRegister(masm, expectedId);
2256 // Allocate registers before the failure path to make sure they're registered
2257 // by addFailurePath.
2258 AutoScratchRegister scratch1(allocator, masm);
2259 AutoScratchRegister scratch2(allocator, masm);
2261 FailurePath* failure;
2262 if (!addFailurePath(&failure)) {
2263 return false;
2266 // Guard on the expected object.
2267 StubFieldOffset slot(slotOffset, StubField::Type::RawInt32);
2268 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2269 emitLoadStubField(slot, scratch2);
2270 BaseObjectSlotIndex expectedSlot(scratch1, scratch2);
2271 masm.fallibleUnboxObject(expectedSlot, scratch1, failure->label());
2272 masm.branchPtr(Assembler::NotEqual, expectedObject, scratch1,
2273 failure->label());
2275 return true;
2278 bool CacheIRCompiler::emitGuardDynamicSlotIsNotObject(ObjOperandId objId,
2279 uint32_t slotOffset) {
2280 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2281 Register obj = allocator.useRegister(masm, objId);
2283 AutoScratchRegister scratch1(allocator, masm);
2284 AutoScratchRegister scratch2(allocator, masm);
2286 FailurePath* failure;
2287 if (!addFailurePath(&failure)) {
2288 return false;
2291 // Guard that the slot isn't an object.
2292 StubFieldOffset slot(slotOffset, StubField::Type::RawInt32);
2293 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2294 emitLoadStubField(slot, scratch2);
2295 BaseObjectSlotIndex expectedSlot(scratch1, scratch2);
2296 masm.branchTestObject(Assembler::Equal, expectedSlot, failure->label());
2298 return true;
2301 bool CacheIRCompiler::emitGuardFixedSlotValue(ObjOperandId objId,
2302 uint32_t offsetOffset,
2303 uint32_t valOffset) {
2304 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2306 Register obj = allocator.useRegister(masm, objId);
2308 AutoScratchRegister scratch(allocator, masm);
2309 AutoScratchValueRegister scratchVal(allocator, masm);
2311 FailurePath* failure;
2312 if (!addFailurePath(&failure)) {
2313 return false;
2316 StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
2317 emitLoadStubField(offset, scratch);
2319 StubFieldOffset val(valOffset, StubField::Type::Value);
2320 emitLoadValueStubField(val, scratchVal);
2322 BaseIndex slotVal(obj, scratch, TimesOne);
2323 masm.branchTestValue(Assembler::NotEqual, slotVal, scratchVal,
2324 failure->label());
2325 return true;
2328 bool CacheIRCompiler::emitGuardDynamicSlotValue(ObjOperandId objId,
2329 uint32_t offsetOffset,
2330 uint32_t valOffset) {
2331 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2333 Register obj = allocator.useRegister(masm, objId);
2335 AutoScratchRegister scratch1(allocator, masm);
2336 AutoScratchRegister scratch2(allocator, masm);
2337 AutoScratchValueRegister scratchVal(allocator, masm);
2339 FailurePath* failure;
2340 if (!addFailurePath(&failure)) {
2341 return false;
2344 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2346 StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
2347 emitLoadStubField(offset, scratch2);
2349 StubFieldOffset val(valOffset, StubField::Type::Value);
2350 emitLoadValueStubField(val, scratchVal);
2352 BaseIndex slotVal(scratch1, scratch2, TimesOne);
2353 masm.branchTestValue(Assembler::NotEqual, slotVal, scratchVal,
2354 failure->label());
2355 return true;
2358 bool CacheIRCompiler::emitLoadScriptedProxyHandler(ValOperandId resultId,
2359 ObjOperandId objId) {
2360 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2362 Register obj = allocator.useRegister(masm, objId);
2363 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2365 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
2366 output.scratchReg());
2367 masm.loadValue(
2368 Address(output.scratchReg(), js::detail::ProxyReservedSlots::offsetOfSlot(
2369 ScriptedProxyHandler::HANDLER_EXTRA)),
2370 output);
2371 return true;
2374 bool CacheIRCompiler::emitIdToStringOrSymbol(ValOperandId resultId,
2375 ValOperandId idId) {
2376 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2378 ValueOperand id = allocator.useValueRegister(masm, idId);
2379 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2380 AutoScratchRegister scratch(allocator, masm);
2382 FailurePath* failure;
2383 if (!addFailurePath(&failure)) {
2384 return false;
2387 masm.moveValue(id, output);
2389 Label done, intDone, callVM;
2391 ScratchTagScope tag(masm, output);
2392 masm.splitTagForTest(output, tag);
2393 masm.branchTestString(Assembler::Equal, tag, &done);
2394 masm.branchTestSymbol(Assembler::Equal, tag, &done);
2395 masm.branchTestInt32(Assembler::NotEqual, tag, failure->label());
2398 Register intReg = output.scratchReg();
2399 masm.unboxInt32(output, intReg);
2401 // Fast path for small integers.
2402 masm.lookupStaticIntString(intReg, intReg, scratch, cx_->staticStrings(),
2403 &callVM);
2404 masm.jump(&intDone);
2406 masm.bind(&callVM);
2407 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2408 liveVolatileFloatRegs());
2409 masm.PushRegsInMask(volatileRegs);
2411 using Fn = JSLinearString* (*)(JSContext* cx, int32_t i);
2412 masm.setupUnalignedABICall(scratch);
2413 masm.loadJSContext(scratch);
2414 masm.passABIArg(scratch);
2415 masm.passABIArg(intReg);
2416 masm.callWithABI<Fn, js::Int32ToStringPure>();
2418 masm.storeCallPointerResult(intReg);
2420 LiveRegisterSet ignore;
2421 ignore.add(intReg);
2422 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
2424 masm.branchPtr(Assembler::Equal, intReg, ImmPtr(nullptr), failure->label());
2426 masm.bind(&intDone);
2427 masm.tagValue(JSVAL_TYPE_STRING, intReg, output);
2428 masm.bind(&done);
2430 return true;
2433 bool CacheIRCompiler::emitLoadFixedSlot(ValOperandId resultId,
2434 ObjOperandId objId,
2435 uint32_t offsetOffset) {
2436 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2438 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2439 Register obj = allocator.useRegister(masm, objId);
2440 AutoScratchRegister scratch(allocator, masm);
2442 StubFieldOffset slotIndex(offsetOffset, StubField::Type::RawInt32);
2443 emitLoadStubField(slotIndex, scratch);
2445 masm.loadValue(BaseIndex(obj, scratch, TimesOne), output);
2446 return true;
2449 bool CacheIRCompiler::emitLoadDynamicSlot(ValOperandId resultId,
2450 ObjOperandId objId,
2451 uint32_t slotOffset) {
2452 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2454 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2455 Register obj = allocator.useRegister(masm, objId);
2456 AutoScratchRegister scratch1(allocator, masm);
2457 Register scratch2 = output.scratchReg();
2459 StubFieldOffset slotIndex(slotOffset, StubField::Type::RawInt32);
2460 emitLoadStubField(slotIndex, scratch2);
2462 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2463 masm.loadValue(BaseObjectSlotIndex(scratch1, scratch2), output);
2464 return true;
2467 bool CacheIRCompiler::emitGuardIsNativeObject(ObjOperandId objId) {
2468 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2470 Register obj = allocator.useRegister(masm, objId);
2471 AutoScratchRegister scratch(allocator, masm);
2473 FailurePath* failure;
2474 if (!addFailurePath(&failure)) {
2475 return false;
2478 masm.branchIfNonNativeObj(obj, scratch, failure->label());
2479 return true;
2482 bool CacheIRCompiler::emitGuardIsProxy(ObjOperandId objId) {
2483 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2485 Register obj = allocator.useRegister(masm, objId);
2486 AutoScratchRegister scratch(allocator, masm);
2488 FailurePath* failure;
2489 if (!addFailurePath(&failure)) {
2490 return false;
2493 masm.branchTestObjectIsProxy(false, obj, scratch, failure->label());
2494 return true;
2497 bool CacheIRCompiler::emitGuardIsNotProxy(ObjOperandId objId) {
2498 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2500 Register obj = allocator.useRegister(masm, objId);
2501 AutoScratchRegister scratch(allocator, masm);
2503 FailurePath* failure;
2504 if (!addFailurePath(&failure)) {
2505 return false;
2508 masm.branchTestObjectIsProxy(true, obj, scratch, failure->label());
2509 return true;
2512 bool CacheIRCompiler::emitGuardIsNotArrayBufferMaybeShared(ObjOperandId objId) {
2513 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2515 Register obj = allocator.useRegister(masm, objId);
2516 AutoScratchRegister scratch(allocator, masm);
2518 FailurePath* failure;
2519 if (!addFailurePath(&failure)) {
2520 return false;
2523 masm.loadObjClassUnsafe(obj, scratch);
2524 masm.branchPtr(Assembler::Equal, scratch,
2525 ImmPtr(&FixedLengthArrayBufferObject::class_),
2526 failure->label());
2527 masm.branchPtr(Assembler::Equal, scratch,
2528 ImmPtr(&FixedLengthSharedArrayBufferObject::class_),
2529 failure->label());
2530 masm.branchPtr(Assembler::Equal, scratch,
2531 ImmPtr(&ResizableArrayBufferObject::class_), failure->label());
2532 masm.branchPtr(Assembler::Equal, scratch,
2533 ImmPtr(&GrowableSharedArrayBufferObject::class_),
2534 failure->label());
2535 return true;
2538 bool CacheIRCompiler::emitGuardIsTypedArray(ObjOperandId objId) {
2539 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2541 Register obj = allocator.useRegister(masm, objId);
2542 AutoScratchRegister scratch(allocator, masm);
2544 FailurePath* failure;
2545 if (!addFailurePath(&failure)) {
2546 return false;
2549 masm.loadObjClassUnsafe(obj, scratch);
2550 masm.branchIfClassIsNotTypedArray(scratch, failure->label());
2551 return true;
2554 bool CacheIRCompiler::emitGuardIsFixedLengthTypedArray(ObjOperandId objId) {
2555 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2557 Register obj = allocator.useRegister(masm, objId);
2558 AutoScratchRegister scratch(allocator, masm);
2560 FailurePath* failure;
2561 if (!addFailurePath(&failure)) {
2562 return false;
2565 masm.loadObjClassUnsafe(obj, scratch);
2566 masm.branchIfClassIsNotFixedLengthTypedArray(scratch, failure->label());
2567 return true;
2570 bool CacheIRCompiler::emitGuardIsNotDOMProxy(ObjOperandId objId) {
2571 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2572 Register obj = allocator.useRegister(masm, objId);
2573 AutoScratchRegister scratch(allocator, masm);
2575 FailurePath* failure;
2576 if (!addFailurePath(&failure)) {
2577 return false;
2580 masm.branchTestProxyHandlerFamily(Assembler::Equal, obj, scratch,
2581 GetDOMProxyHandlerFamily(),
2582 failure->label());
2583 return true;
2586 bool CacheIRCompiler::emitGuardNoDenseElements(ObjOperandId objId) {
2587 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2588 Register obj = allocator.useRegister(masm, objId);
2589 AutoScratchRegister scratch(allocator, masm);
2591 FailurePath* failure;
2592 if (!addFailurePath(&failure)) {
2593 return false;
2596 // Load obj->elements.
2597 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
2599 // Make sure there are no dense elements.
2600 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
2601 masm.branch32(Assembler::NotEqual, initLength, Imm32(0), failure->label());
2602 return true;
2605 bool CacheIRCompiler::emitGuardSpecificInt32(Int32OperandId numId,
2606 int32_t expected) {
2607 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2608 Register num = allocator.useRegister(masm, numId);
2610 FailurePath* failure;
2611 if (!addFailurePath(&failure)) {
2612 return false;
2615 masm.branch32(Assembler::NotEqual, num, Imm32(expected), failure->label());
2616 return true;
2619 bool CacheIRCompiler::emitGuardStringToInt32(StringOperandId strId,
2620 Int32OperandId resultId) {
2621 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2622 Register str = allocator.useRegister(masm, strId);
2623 Register output = allocator.defineRegister(masm, resultId);
2624 AutoScratchRegister scratch(allocator, masm);
2626 FailurePath* failure;
2627 if (!addFailurePath(&failure)) {
2628 return false;
2631 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2632 liveVolatileFloatRegs());
2633 masm.guardStringToInt32(str, output, scratch, volatileRegs, failure->label());
2634 return true;
2637 bool CacheIRCompiler::emitGuardStringToNumber(StringOperandId strId,
2638 NumberOperandId resultId) {
2639 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2640 Register str = allocator.useRegister(masm, strId);
2641 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2642 AutoScratchRegister scratch(allocator, masm);
2644 FailurePath* failure;
2645 if (!addFailurePath(&failure)) {
2646 return false;
2649 Label vmCall, done;
2650 // Use indexed value as fast path if possible.
2651 masm.loadStringIndexValue(str, scratch, &vmCall);
2652 masm.tagValue(JSVAL_TYPE_INT32, scratch, output);
2653 masm.jump(&done);
2655 masm.bind(&vmCall);
2657 // Reserve stack for holding the result value of the call.
2658 masm.reserveStack(sizeof(double));
2659 masm.moveStackPtrTo(output.payloadOrValueReg());
2661 // We cannot use callVM, as callVM expects to be able to clobber all
2662 // operands, however, since this op is not the last in the generated IC, we
2663 // want to be able to reference other live values.
2664 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2665 liveVolatileFloatRegs());
2666 masm.PushRegsInMask(volatileRegs);
2668 using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
2669 masm.setupUnalignedABICall(scratch);
2670 masm.loadJSContext(scratch);
2671 masm.passABIArg(scratch);
2672 masm.passABIArg(str);
2673 masm.passABIArg(output.payloadOrValueReg());
2674 masm.callWithABI<Fn, js::StringToNumberPure>();
2675 masm.storeCallPointerResult(scratch);
2677 LiveRegisterSet ignore;
2678 ignore.add(scratch);
2679 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
2681 Label ok;
2682 masm.branchIfTrueBool(scratch, &ok);
2684 // OOM path, recovered by StringToNumberPure.
2686 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
2687 // flow-insensitively, and using it twice would confuse the stack height
2688 // tracking.
2689 masm.addToStackPtr(Imm32(sizeof(double)));
2690 masm.jump(failure->label());
2692 masm.bind(&ok);
2695 ScratchDoubleScope fpscratch(masm);
2696 masm.loadDouble(Address(output.payloadOrValueReg(), 0), fpscratch);
2697 masm.boxDouble(fpscratch, output, fpscratch);
2699 masm.freeStack(sizeof(double));
2701 masm.bind(&done);
2702 return true;
2705 bool CacheIRCompiler::emitNumberParseIntResult(StringOperandId strId,
2706 Int32OperandId radixId) {
2707 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2709 AutoCallVM callvm(masm, this, allocator);
2711 Register str = allocator.useRegister(masm, strId);
2712 Register radix = allocator.useRegister(masm, radixId);
2713 AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
2715 #ifdef DEBUG
2716 Label ok;
2717 masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
2718 masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
2719 masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
2720 masm.bind(&ok);
2721 #endif
2723 // Discard the stack to ensure it's balanced when we skip the vm-call.
2724 allocator.discardStack(masm);
2726 // Use indexed value as fast path if possible.
2727 Label vmCall, done;
2728 masm.loadStringIndexValue(str, scratch, &vmCall);
2729 masm.tagValue(JSVAL_TYPE_INT32, scratch, callvm.outputValueReg());
2730 masm.jump(&done);
2732 masm.bind(&vmCall);
2734 callvm.prepare();
2735 masm.Push(radix);
2736 masm.Push(str);
2738 using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
2739 callvm.call<Fn, js::NumberParseInt>();
2741 masm.bind(&done);
2742 return true;
2745 bool CacheIRCompiler::emitDoubleParseIntResult(NumberOperandId numId) {
2746 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2748 AutoOutputRegister output(*this);
2749 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2750 AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
2751 AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
2753 FailurePath* failure;
2754 if (!addFailurePath(&failure)) {
2755 return false;
2758 allocator.ensureDoubleRegister(masm, numId, floatScratch1);
2760 masm.branchDouble(Assembler::DoubleUnordered, floatScratch1, floatScratch1,
2761 failure->label());
2762 masm.branchTruncateDoubleToInt32(floatScratch1, scratch, failure->label());
2764 Label ok;
2765 masm.branch32(Assembler::NotEqual, scratch, Imm32(0), &ok);
2767 // Accept both +0 and -0 and return 0.
2768 masm.loadConstantDouble(0.0, floatScratch2);
2769 masm.branchDouble(Assembler::DoubleEqual, floatScratch1, floatScratch2,
2770 &ok);
2772 // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
2773 masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, floatScratch2);
2774 masm.branchDouble(Assembler::DoubleLessThan, floatScratch1, floatScratch2,
2775 failure->label());
2777 masm.bind(&ok);
2779 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2780 return true;
2783 bool CacheIRCompiler::emitBooleanToNumber(BooleanOperandId booleanId,
2784 NumberOperandId resultId) {
2785 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2786 Register boolean = allocator.useRegister(masm, booleanId);
2787 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2788 masm.tagValue(JSVAL_TYPE_INT32, boolean, output);
2789 return true;
2792 bool CacheIRCompiler::emitGuardStringToIndex(StringOperandId strId,
2793 Int32OperandId resultId) {
2794 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2795 Register str = allocator.useRegister(masm, strId);
2796 Register output = allocator.defineRegister(masm, resultId);
2798 FailurePath* failure;
2799 if (!addFailurePath(&failure)) {
2800 return false;
2803 Label vmCall, done;
2804 masm.loadStringIndexValue(str, output, &vmCall);
2805 masm.jump(&done);
2808 masm.bind(&vmCall);
2809 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
2810 liveVolatileFloatRegs());
2811 masm.PushRegsInMask(save);
2813 using Fn = int32_t (*)(JSString* str);
2814 masm.setupUnalignedABICall(output);
2815 masm.passABIArg(str);
2816 masm.callWithABI<Fn, GetIndexFromString>();
2817 masm.storeCallInt32Result(output);
2819 LiveRegisterSet ignore;
2820 ignore.add(output);
2821 masm.PopRegsInMaskIgnore(save, ignore);
2823 // GetIndexFromString returns a negative value on failure.
2824 masm.branchTest32(Assembler::Signed, output, output, failure->label());
2827 masm.bind(&done);
2828 return true;
2831 bool CacheIRCompiler::emitLoadProto(ObjOperandId objId, ObjOperandId resultId) {
2832 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2833 Register obj = allocator.useRegister(masm, objId);
2834 Register reg = allocator.defineRegister(masm, resultId);
2835 masm.loadObjProto(obj, reg);
2837 #ifdef DEBUG
2838 // We shouldn't encounter a null or lazy proto.
2839 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
2841 Label done;
2842 masm.branchPtr(Assembler::Above, reg, ImmWord(1), &done);
2843 masm.assumeUnreachable("Unexpected null or lazy proto in CacheIR LoadProto");
2844 masm.bind(&done);
2845 #endif
2846 return true;
2849 bool CacheIRCompiler::emitLoadEnclosingEnvironment(ObjOperandId objId,
2850 ObjOperandId resultId) {
2851 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2852 Register obj = allocator.useRegister(masm, objId);
2853 Register reg = allocator.defineRegister(masm, resultId);
2854 masm.unboxObject(
2855 Address(obj, EnvironmentObject::offsetOfEnclosingEnvironment()), reg);
2856 return true;
2859 bool CacheIRCompiler::emitLoadWrapperTarget(ObjOperandId objId,
2860 ObjOperandId resultId) {
2861 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2862 Register obj = allocator.useRegister(masm, objId);
2863 Register reg = allocator.defineRegister(masm, resultId);
2865 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), reg);
2866 masm.unboxObject(
2867 Address(reg, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()), reg);
2868 return true;
2871 bool CacheIRCompiler::emitLoadValueTag(ValOperandId valId,
2872 ValueTagOperandId resultId) {
2873 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2874 ValueOperand val = allocator.useValueRegister(masm, valId);
2875 Register res = allocator.defineRegister(masm, resultId);
2877 Register tag = masm.extractTag(val, res);
2878 if (tag != res) {
2879 masm.mov(tag, res);
2881 return true;
2884 bool CacheIRCompiler::emitLoadDOMExpandoValue(ObjOperandId objId,
2885 ValOperandId resultId) {
2886 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2887 Register obj = allocator.useRegister(masm, objId);
2888 ValueOperand val = allocator.defineValueRegister(masm, resultId);
2890 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
2891 val.scratchReg());
2892 masm.loadValue(Address(val.scratchReg(),
2893 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
2894 val);
2895 return true;
2898 bool CacheIRCompiler::emitLoadDOMExpandoValueIgnoreGeneration(
2899 ObjOperandId objId, ValOperandId resultId) {
2900 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2901 Register obj = allocator.useRegister(masm, objId);
2902 ValueOperand output = allocator.defineValueRegister(masm, resultId);
2904 // Determine the expando's Address.
2905 Register scratch = output.scratchReg();
2906 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
2907 Address expandoAddr(scratch,
2908 js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
2910 #ifdef DEBUG
2911 // Private values are stored as doubles, so assert we have a double.
2912 Label ok;
2913 masm.branchTestDouble(Assembler::Equal, expandoAddr, &ok);
2914 masm.assumeUnreachable("DOM expando is not a PrivateValue!");
2915 masm.bind(&ok);
2916 #endif
2918 // Load the ExpandoAndGeneration* from the PrivateValue.
2919 masm.loadPrivate(expandoAddr, scratch);
2921 // Load expandoAndGeneration->expando into the output Value register.
2922 masm.loadValue(Address(scratch, ExpandoAndGeneration::offsetOfExpando()),
2923 output);
2924 return true;
2927 bool CacheIRCompiler::emitLoadUndefinedResult() {
2928 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2929 AutoOutputRegister output(*this);
2930 masm.moveValue(UndefinedValue(), output.valueReg());
2931 return true;
2934 static void EmitStoreBoolean(MacroAssembler& masm, bool b,
2935 const AutoOutputRegister& output) {
2936 if (output.hasValue()) {
2937 Value val = BooleanValue(b);
2938 masm.moveValue(val, output.valueReg());
2939 } else {
2940 MOZ_ASSERT(output.type() == JSVAL_TYPE_BOOLEAN);
2941 masm.movePtr(ImmWord(b), output.typedReg().gpr());
2945 bool CacheIRCompiler::emitLoadBooleanResult(bool val) {
2946 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2947 AutoOutputRegister output(*this);
2948 EmitStoreBoolean(masm, val, output);
2949 return true;
2952 bool CacheIRCompiler::emitLoadOperandResult(ValOperandId inputId) {
2953 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2954 AutoOutputRegister output(*this);
2955 ValueOperand input = allocator.useValueRegister(masm, inputId);
2956 masm.moveValue(input, output.valueReg());
2957 return true;
2960 static void EmitStoreResult(MacroAssembler& masm, Register reg,
2961 JSValueType type,
2962 const AutoOutputRegister& output) {
2963 if (output.hasValue()) {
2964 masm.tagValue(type, reg, output.valueReg());
2965 return;
2967 if (type == JSVAL_TYPE_INT32 && output.typedReg().isFloat()) {
2968 masm.convertInt32ToDouble(reg, output.typedReg().fpu());
2969 return;
2971 if (type == output.type()) {
2972 masm.mov(reg, output.typedReg().gpr());
2973 return;
2975 masm.assumeUnreachable("Should have monitored result");
2978 bool CacheIRCompiler::emitLoadInt32ArrayLengthResult(ObjOperandId objId) {
2979 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2980 AutoOutputRegister output(*this);
2981 Register obj = allocator.useRegister(masm, objId);
2982 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2984 FailurePath* failure;
2985 if (!addFailurePath(&failure)) {
2986 return false;
2989 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
2990 masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratch);
2992 // Guard length fits in an int32.
2993 masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
2994 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2995 return true;
2998 bool CacheIRCompiler::emitLoadInt32ArrayLength(ObjOperandId objId,
2999 Int32OperandId resultId) {
3000 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3001 Register obj = allocator.useRegister(masm, objId);
3002 Register res = allocator.defineRegister(masm, resultId);
3004 FailurePath* failure;
3005 if (!addFailurePath(&failure)) {
3006 return false;
3009 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), res);
3010 masm.load32(Address(res, ObjectElements::offsetOfLength()), res);
3012 // Guard length fits in an int32.
3013 masm.branchTest32(Assembler::Signed, res, res, failure->label());
3014 return true;
3017 bool CacheIRCompiler::emitDoubleAddResult(NumberOperandId lhsId,
3018 NumberOperandId rhsId) {
3019 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3020 AutoOutputRegister output(*this);
3022 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3023 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3025 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3026 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3028 masm.addDouble(floatScratch1, floatScratch0);
3029 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3031 return true;
3033 bool CacheIRCompiler::emitDoubleSubResult(NumberOperandId lhsId,
3034 NumberOperandId rhsId) {
3035 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3036 AutoOutputRegister output(*this);
3038 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3039 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3041 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3042 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3044 masm.subDouble(floatScratch1, floatScratch0);
3045 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3047 return true;
3049 bool CacheIRCompiler::emitDoubleMulResult(NumberOperandId lhsId,
3050 NumberOperandId rhsId) {
3051 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3052 AutoOutputRegister output(*this);
3054 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3055 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3057 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3058 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3060 masm.mulDouble(floatScratch1, floatScratch0);
3061 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3063 return true;
3065 bool CacheIRCompiler::emitDoubleDivResult(NumberOperandId lhsId,
3066 NumberOperandId rhsId) {
3067 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3068 AutoOutputRegister output(*this);
3070 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3071 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3073 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3074 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3076 masm.divDouble(floatScratch1, floatScratch0);
3077 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3079 return true;
3081 bool CacheIRCompiler::emitDoubleModResult(NumberOperandId lhsId,
3082 NumberOperandId rhsId) {
3083 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3084 AutoOutputRegister output(*this);
3085 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3087 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3088 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3090 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3091 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3093 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3094 masm.PushRegsInMask(save);
3096 using Fn = double (*)(double a, double b);
3097 masm.setupUnalignedABICall(scratch);
3098 masm.passABIArg(floatScratch0, ABIType::Float64);
3099 masm.passABIArg(floatScratch1, ABIType::Float64);
3100 masm.callWithABI<Fn, js::NumberMod>(ABIType::Float64);
3101 masm.storeCallFloatResult(floatScratch0);
3103 LiveRegisterSet ignore;
3104 ignore.add(floatScratch0);
3105 masm.PopRegsInMaskIgnore(save, ignore);
3107 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3109 return true;
3111 bool CacheIRCompiler::emitDoublePowResult(NumberOperandId lhsId,
3112 NumberOperandId rhsId) {
3113 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3114 AutoOutputRegister output(*this);
3115 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3117 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
3118 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
3120 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
3121 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
3123 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3124 masm.PushRegsInMask(save);
3126 using Fn = double (*)(double x, double y);
3127 masm.setupUnalignedABICall(scratch);
3128 masm.passABIArg(floatScratch0, ABIType::Float64);
3129 masm.passABIArg(floatScratch1, ABIType::Float64);
3130 masm.callWithABI<Fn, js::ecmaPow>(ABIType::Float64);
3131 masm.storeCallFloatResult(floatScratch0);
3133 LiveRegisterSet ignore;
3134 ignore.add(floatScratch0);
3135 masm.PopRegsInMaskIgnore(save, ignore);
3137 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
3139 return true;
3142 bool CacheIRCompiler::emitInt32AddResult(Int32OperandId lhsId,
3143 Int32OperandId rhsId) {
3144 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3145 AutoOutputRegister output(*this);
3146 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3148 Register lhs = allocator.useRegister(masm, lhsId);
3149 Register rhs = allocator.useRegister(masm, rhsId);
3151 FailurePath* failure;
3152 if (!addFailurePath(&failure)) {
3153 return false;
3156 masm.mov(rhs, scratch);
3157 masm.branchAdd32(Assembler::Overflow, lhs, scratch, failure->label());
3158 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3160 return true;
3162 bool CacheIRCompiler::emitInt32SubResult(Int32OperandId lhsId,
3163 Int32OperandId rhsId) {
3164 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3165 AutoOutputRegister output(*this);
3166 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3167 Register lhs = allocator.useRegister(masm, lhsId);
3168 Register rhs = allocator.useRegister(masm, rhsId);
3170 FailurePath* failure;
3171 if (!addFailurePath(&failure)) {
3172 return false;
3175 masm.mov(lhs, scratch);
3176 masm.branchSub32(Assembler::Overflow, rhs, scratch, failure->label());
3177 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3179 return true;
3182 bool CacheIRCompiler::emitInt32MulResult(Int32OperandId lhsId,
3183 Int32OperandId rhsId) {
3184 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3185 AutoOutputRegister output(*this);
3186 Register lhs = allocator.useRegister(masm, lhsId);
3187 Register rhs = allocator.useRegister(masm, rhsId);
3188 AutoScratchRegister scratch(allocator, masm);
3189 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
3191 FailurePath* failure;
3192 if (!addFailurePath(&failure)) {
3193 return false;
3196 Label maybeNegZero, done;
3197 masm.mov(lhs, scratch);
3198 masm.branchMul32(Assembler::Overflow, rhs, scratch, failure->label());
3199 masm.branchTest32(Assembler::Zero, scratch, scratch, &maybeNegZero);
3200 masm.jump(&done);
3202 masm.bind(&maybeNegZero);
3203 masm.mov(lhs, scratch2);
3204 // Result is -0 if exactly one of lhs or rhs is negative.
3205 masm.or32(rhs, scratch2);
3206 masm.branchTest32(Assembler::Signed, scratch2, scratch2, failure->label());
3208 masm.bind(&done);
3209 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3210 return true;
3213 bool CacheIRCompiler::emitInt32DivResult(Int32OperandId lhsId,
3214 Int32OperandId rhsId) {
3215 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3216 AutoOutputRegister output(*this);
3217 Register lhs = allocator.useRegister(masm, lhsId);
3218 Register rhs = allocator.useRegister(masm, rhsId);
3219 AutoScratchRegister rem(allocator, masm);
3220 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3222 FailurePath* failure;
3223 if (!addFailurePath(&failure)) {
3224 return false;
3227 // Prevent division by 0.
3228 masm.branchTest32(Assembler::Zero, rhs, rhs, failure->label());
3230 // Prevent -2147483648 / -1.
3231 Label notOverflow;
3232 masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), &notOverflow);
3233 masm.branch32(Assembler::Equal, rhs, Imm32(-1), failure->label());
3234 masm.bind(&notOverflow);
3236 // Prevent negative 0.
3237 Label notZero;
3238 masm.branchTest32(Assembler::NonZero, lhs, lhs, &notZero);
3239 masm.branchTest32(Assembler::Signed, rhs, rhs, failure->label());
3240 masm.bind(&notZero);
3242 masm.mov(lhs, scratch);
3243 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
3244 liveVolatileFloatRegs());
3245 masm.flexibleDivMod32(rhs, scratch, rem, false, volatileRegs);
3247 // A remainder implies a double result.
3248 masm.branchTest32(Assembler::NonZero, rem, rem, failure->label());
3249 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3250 return true;
3253 bool CacheIRCompiler::emitInt32ModResult(Int32OperandId lhsId,
3254 Int32OperandId rhsId) {
3255 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3256 AutoOutputRegister output(*this);
3257 Register lhs = allocator.useRegister(masm, lhsId);
3258 Register rhs = allocator.useRegister(masm, rhsId);
3259 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3261 FailurePath* failure;
3262 if (!addFailurePath(&failure)) {
3263 return false;
3266 // x % 0 results in NaN
3267 masm.branchTest32(Assembler::Zero, rhs, rhs, failure->label());
3269 // Prevent -2147483648 % -1.
3271 // Traps on x86 and has undefined behavior on ARM32 (when __aeabi_idivmod is
3272 // called).
3273 Label notOverflow;
3274 masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), &notOverflow);
3275 masm.branch32(Assembler::Equal, rhs, Imm32(-1), failure->label());
3276 masm.bind(&notOverflow);
3278 masm.mov(lhs, scratch);
3279 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
3280 liveVolatileFloatRegs());
3281 masm.flexibleRemainder32(rhs, scratch, false, volatileRegs);
3283 // Modulo takes the sign of the dividend; we can't return negative zero here.
3284 Label notZero;
3285 masm.branchTest32(Assembler::NonZero, scratch, scratch, &notZero);
3286 masm.branchTest32(Assembler::Signed, lhs, lhs, failure->label());
3287 masm.bind(&notZero);
3289 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3291 return true;
3294 bool CacheIRCompiler::emitInt32PowResult(Int32OperandId lhsId,
3295 Int32OperandId rhsId) {
3296 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3297 AutoOutputRegister output(*this);
3298 Register base = allocator.useRegister(masm, lhsId);
3299 Register power = allocator.useRegister(masm, rhsId);
3300 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
3301 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
3302 AutoScratchRegister scratch3(allocator, masm);
3304 FailurePath* failure;
3305 if (!addFailurePath(&failure)) {
3306 return false;
3309 masm.pow32(base, power, scratch1, scratch2, scratch3, failure->label());
3311 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
3312 return true;
3315 bool CacheIRCompiler::emitInt32BitOrResult(Int32OperandId lhsId,
3316 Int32OperandId rhsId) {
3317 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3318 AutoOutputRegister output(*this);
3319 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3321 Register lhs = allocator.useRegister(masm, lhsId);
3322 Register rhs = allocator.useRegister(masm, rhsId);
3324 masm.mov(rhs, scratch);
3325 masm.or32(lhs, scratch);
3326 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3328 return true;
3330 bool CacheIRCompiler::emitInt32BitXorResult(Int32OperandId lhsId,
3331 Int32OperandId rhsId) {
3332 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3333 AutoOutputRegister output(*this);
3334 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3336 Register lhs = allocator.useRegister(masm, lhsId);
3337 Register rhs = allocator.useRegister(masm, rhsId);
3339 masm.mov(rhs, scratch);
3340 masm.xor32(lhs, scratch);
3341 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3343 return true;
3345 bool CacheIRCompiler::emitInt32BitAndResult(Int32OperandId lhsId,
3346 Int32OperandId rhsId) {
3347 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3348 AutoOutputRegister output(*this);
3349 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3351 Register lhs = allocator.useRegister(masm, lhsId);
3352 Register rhs = allocator.useRegister(masm, rhsId);
3354 masm.mov(rhs, scratch);
3355 masm.and32(lhs, scratch);
3356 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3358 return true;
3360 bool CacheIRCompiler::emitInt32LeftShiftResult(Int32OperandId lhsId,
3361 Int32OperandId rhsId) {
3362 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3363 AutoOutputRegister output(*this);
3364 Register lhs = allocator.useRegister(masm, lhsId);
3365 Register rhs = allocator.useRegister(masm, rhsId);
3366 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3368 masm.mov(lhs, scratch);
3369 masm.flexibleLshift32(rhs, scratch);
3370 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3372 return true;
3375 bool CacheIRCompiler::emitInt32RightShiftResult(Int32OperandId lhsId,
3376 Int32OperandId rhsId) {
3377 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3378 AutoOutputRegister output(*this);
3379 Register lhs = allocator.useRegister(masm, lhsId);
3380 Register rhs = allocator.useRegister(masm, rhsId);
3381 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3383 masm.mov(lhs, scratch);
3384 masm.flexibleRshift32Arithmetic(rhs, scratch);
3385 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3387 return true;
3390 bool CacheIRCompiler::emitInt32URightShiftResult(Int32OperandId lhsId,
3391 Int32OperandId rhsId,
3392 bool forceDouble) {
3393 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3394 AutoOutputRegister output(*this);
3396 Register lhs = allocator.useRegister(masm, lhsId);
3397 Register rhs = allocator.useRegister(masm, rhsId);
3398 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3400 FailurePath* failure;
3401 if (!addFailurePath(&failure)) {
3402 return false;
3405 masm.mov(lhs, scratch);
3406 masm.flexibleRshift32(rhs, scratch);
3407 if (forceDouble) {
3408 ScratchDoubleScope fpscratch(masm);
3409 masm.convertUInt32ToDouble(scratch, fpscratch);
3410 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
3411 } else {
3412 masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
3413 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3415 return true;
3418 bool CacheIRCompiler::emitInt32NegationResult(Int32OperandId inputId) {
3419 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3420 AutoOutputRegister output(*this);
3421 Register val = allocator.useRegister(masm, inputId);
3422 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3424 FailurePath* failure;
3425 if (!addFailurePath(&failure)) {
3426 return false;
3429 // Guard against 0 and MIN_INT by checking if low 31-bits are all zero.
3430 // Both of these result in a double.
3431 masm.branchTest32(Assembler::Zero, val, Imm32(0x7fffffff), failure->label());
3432 masm.mov(val, scratch);
3433 masm.neg32(scratch);
3434 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3435 return true;
3438 bool CacheIRCompiler::emitInt32IncResult(Int32OperandId inputId) {
3439 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3440 AutoOutputRegister output(*this);
3441 Register input = allocator.useRegister(masm, inputId);
3442 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3444 FailurePath* failure;
3445 if (!addFailurePath(&failure)) {
3446 return false;
3449 masm.mov(input, scratch);
3450 masm.branchAdd32(Assembler::Overflow, Imm32(1), scratch, failure->label());
3451 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3453 return true;
3456 bool CacheIRCompiler::emitInt32DecResult(Int32OperandId inputId) {
3457 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3458 AutoOutputRegister output(*this);
3459 Register input = allocator.useRegister(masm, inputId);
3460 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3462 FailurePath* failure;
3463 if (!addFailurePath(&failure)) {
3464 return false;
3467 masm.mov(input, scratch);
3468 masm.branchSub32(Assembler::Overflow, Imm32(1), scratch, failure->label());
3469 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3471 return true;
3474 bool CacheIRCompiler::emitInt32NotResult(Int32OperandId inputId) {
3475 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3476 AutoOutputRegister output(*this);
3477 Register val = allocator.useRegister(masm, inputId);
3478 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3480 masm.mov(val, scratch);
3481 masm.not32(scratch);
3482 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3483 return true;
3486 bool CacheIRCompiler::emitDoubleNegationResult(NumberOperandId inputId) {
3487 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3488 AutoOutputRegister output(*this);
3490 AutoScratchFloatRegister floatReg(this);
3492 allocator.ensureDoubleRegister(masm, inputId, floatReg);
3494 masm.negateDouble(floatReg);
3495 masm.boxDouble(floatReg, output.valueReg(), floatReg);
3497 return true;
3500 bool CacheIRCompiler::emitDoubleIncDecResult(bool isInc,
3501 NumberOperandId inputId) {
3502 AutoOutputRegister output(*this);
3504 AutoScratchFloatRegister floatReg(this);
3506 allocator.ensureDoubleRegister(masm, inputId, floatReg);
3509 ScratchDoubleScope fpscratch(masm);
3510 masm.loadConstantDouble(1.0, fpscratch);
3511 if (isInc) {
3512 masm.addDouble(fpscratch, floatReg);
3513 } else {
3514 masm.subDouble(fpscratch, floatReg);
3517 masm.boxDouble(floatReg, output.valueReg(), floatReg);
3519 return true;
3522 bool CacheIRCompiler::emitDoubleIncResult(NumberOperandId inputId) {
3523 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3524 return emitDoubleIncDecResult(true, inputId);
3527 bool CacheIRCompiler::emitDoubleDecResult(NumberOperandId inputId) {
3528 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3529 return emitDoubleIncDecResult(false, inputId);
3532 template <typename Fn, Fn fn>
3533 bool CacheIRCompiler::emitBigIntBinaryOperationShared(BigIntOperandId lhsId,
3534 BigIntOperandId rhsId) {
3535 AutoCallVM callvm(masm, this, allocator);
3536 Register lhs = allocator.useRegister(masm, lhsId);
3537 Register rhs = allocator.useRegister(masm, rhsId);
3539 callvm.prepare();
3541 masm.Push(rhs);
3542 masm.Push(lhs);
3544 callvm.call<Fn, fn>();
3545 return true;
3548 bool CacheIRCompiler::emitBigIntAddResult(BigIntOperandId lhsId,
3549 BigIntOperandId rhsId) {
3550 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3551 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3552 return emitBigIntBinaryOperationShared<Fn, BigInt::add>(lhsId, rhsId);
3555 bool CacheIRCompiler::emitBigIntSubResult(BigIntOperandId lhsId,
3556 BigIntOperandId rhsId) {
3557 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3558 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3559 return emitBigIntBinaryOperationShared<Fn, BigInt::sub>(lhsId, rhsId);
3562 bool CacheIRCompiler::emitBigIntMulResult(BigIntOperandId lhsId,
3563 BigIntOperandId rhsId) {
3564 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3565 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3566 return emitBigIntBinaryOperationShared<Fn, BigInt::mul>(lhsId, rhsId);
3569 bool CacheIRCompiler::emitBigIntDivResult(BigIntOperandId lhsId,
3570 BigIntOperandId rhsId) {
3571 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3572 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3573 return emitBigIntBinaryOperationShared<Fn, BigInt::div>(lhsId, rhsId);
3576 bool CacheIRCompiler::emitBigIntModResult(BigIntOperandId lhsId,
3577 BigIntOperandId rhsId) {
3578 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3579 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3580 return emitBigIntBinaryOperationShared<Fn, BigInt::mod>(lhsId, rhsId);
3583 bool CacheIRCompiler::emitBigIntPowResult(BigIntOperandId lhsId,
3584 BigIntOperandId rhsId) {
3585 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3586 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3587 return emitBigIntBinaryOperationShared<Fn, BigInt::pow>(lhsId, rhsId);
3590 bool CacheIRCompiler::emitBigIntBitAndResult(BigIntOperandId lhsId,
3591 BigIntOperandId rhsId) {
3592 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3593 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3594 return emitBigIntBinaryOperationShared<Fn, BigInt::bitAnd>(lhsId, rhsId);
3597 bool CacheIRCompiler::emitBigIntBitOrResult(BigIntOperandId lhsId,
3598 BigIntOperandId rhsId) {
3599 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3600 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3601 return emitBigIntBinaryOperationShared<Fn, BigInt::bitOr>(lhsId, rhsId);
3604 bool CacheIRCompiler::emitBigIntBitXorResult(BigIntOperandId lhsId,
3605 BigIntOperandId rhsId) {
3606 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3607 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3608 return emitBigIntBinaryOperationShared<Fn, BigInt::bitXor>(lhsId, rhsId);
3611 bool CacheIRCompiler::emitBigIntLeftShiftResult(BigIntOperandId lhsId,
3612 BigIntOperandId rhsId) {
3613 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3614 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3615 return emitBigIntBinaryOperationShared<Fn, BigInt::lsh>(lhsId, rhsId);
3618 bool CacheIRCompiler::emitBigIntRightShiftResult(BigIntOperandId lhsId,
3619 BigIntOperandId rhsId) {
3620 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3621 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3622 return emitBigIntBinaryOperationShared<Fn, BigInt::rsh>(lhsId, rhsId);
3625 template <typename Fn, Fn fn>
3626 bool CacheIRCompiler::emitBigIntUnaryOperationShared(BigIntOperandId inputId) {
3627 AutoCallVM callvm(masm, this, allocator);
3628 Register val = allocator.useRegister(masm, inputId);
3630 callvm.prepare();
3632 masm.Push(val);
3634 callvm.call<Fn, fn>();
3635 return true;
3638 bool CacheIRCompiler::emitBigIntNotResult(BigIntOperandId inputId) {
3639 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3640 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3641 return emitBigIntUnaryOperationShared<Fn, BigInt::bitNot>(inputId);
3644 bool CacheIRCompiler::emitBigIntNegationResult(BigIntOperandId inputId) {
3645 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3646 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3647 return emitBigIntUnaryOperationShared<Fn, BigInt::neg>(inputId);
3650 bool CacheIRCompiler::emitBigIntIncResult(BigIntOperandId inputId) {
3651 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3652 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3653 return emitBigIntUnaryOperationShared<Fn, BigInt::inc>(inputId);
3656 bool CacheIRCompiler::emitBigIntDecResult(BigIntOperandId inputId) {
3657 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3658 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3659 return emitBigIntUnaryOperationShared<Fn, BigInt::dec>(inputId);
3662 bool CacheIRCompiler::emitTruncateDoubleToUInt32(NumberOperandId inputId,
3663 Int32OperandId resultId) {
3664 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3665 Register res = allocator.defineRegister(masm, resultId);
3667 AutoScratchFloatRegister floatReg(this);
3669 allocator.ensureDoubleRegister(masm, inputId, floatReg);
3671 Label done, truncateABICall;
3673 masm.branchTruncateDoubleMaybeModUint32(floatReg, res, &truncateABICall);
3674 masm.jump(&done);
3676 masm.bind(&truncateABICall);
3677 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3678 save.takeUnchecked(floatReg);
3679 // Bug 1451976
3680 save.takeUnchecked(floatReg.get().asSingle());
3681 masm.PushRegsInMask(save);
3683 using Fn = int32_t (*)(double);
3684 masm.setupUnalignedABICall(res);
3685 masm.passABIArg(floatReg, ABIType::Float64);
3686 masm.callWithABI<Fn, JS::ToInt32>(ABIType::General,
3687 CheckUnsafeCallWithABI::DontCheckOther);
3688 masm.storeCallInt32Result(res);
3690 LiveRegisterSet ignore;
3691 ignore.add(res);
3692 masm.PopRegsInMaskIgnore(save, ignore);
3694 masm.bind(&done);
3695 return true;
3698 bool CacheIRCompiler::emitDoubleToUint8Clamped(NumberOperandId inputId,
3699 Int32OperandId resultId) {
3700 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3701 Register res = allocator.defineRegister(masm, resultId);
3703 AutoScratchFloatRegister floatReg(this);
3705 allocator.ensureDoubleRegister(masm, inputId, floatReg);
3707 masm.clampDoubleToUint8(floatReg, res);
3708 return true;
3711 bool CacheIRCompiler::emitLoadArgumentsObjectLengthResult(ObjOperandId objId) {
3712 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3713 AutoOutputRegister output(*this);
3714 Register obj = allocator.useRegister(masm, objId);
3715 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3717 FailurePath* failure;
3718 if (!addFailurePath(&failure)) {
3719 return false;
3722 masm.loadArgumentsObjectLength(obj, scratch, failure->label());
3724 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3725 return true;
3728 bool CacheIRCompiler::emitLoadArgumentsObjectLength(ObjOperandId objId,
3729 Int32OperandId resultId) {
3730 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3731 Register obj = allocator.useRegister(masm, objId);
3732 Register res = allocator.defineRegister(masm, resultId);
3734 FailurePath* failure;
3735 if (!addFailurePath(&failure)) {
3736 return false;
3739 masm.loadArgumentsObjectLength(obj, res, failure->label());
3740 return true;
3743 bool CacheIRCompiler::emitLoadArrayBufferByteLengthInt32Result(
3744 ObjOperandId objId) {
3745 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3746 AutoOutputRegister output(*this);
3747 Register obj = allocator.useRegister(masm, objId);
3748 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3750 FailurePath* failure;
3751 if (!addFailurePath(&failure)) {
3752 return false;
3755 masm.loadArrayBufferByteLengthIntPtr(obj, scratch);
3756 masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
3757 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3758 return true;
3761 bool CacheIRCompiler::emitLoadArrayBufferByteLengthDoubleResult(
3762 ObjOperandId objId) {
3763 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3764 AutoOutputRegister output(*this);
3765 Register obj = allocator.useRegister(masm, objId);
3766 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3768 ScratchDoubleScope fpscratch(masm);
3769 masm.loadArrayBufferByteLengthIntPtr(obj, scratch);
3770 masm.convertIntPtrToDouble(scratch, fpscratch);
3771 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
3772 return true;
3775 bool CacheIRCompiler::emitLoadArrayBufferViewLengthInt32Result(
3776 ObjOperandId objId) {
3777 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3778 AutoOutputRegister output(*this);
3779 Register obj = allocator.useRegister(masm, objId);
3780 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3782 FailurePath* failure;
3783 if (!addFailurePath(&failure)) {
3784 return false;
3787 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
3788 masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
3789 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3790 return true;
3793 bool CacheIRCompiler::emitLoadArrayBufferViewLengthDoubleResult(
3794 ObjOperandId objId) {
3795 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3796 AutoOutputRegister output(*this);
3797 Register obj = allocator.useRegister(masm, objId);
3798 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3800 ScratchDoubleScope fpscratch(masm);
3801 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
3802 masm.convertIntPtrToDouble(scratch, fpscratch);
3803 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
3804 return true;
3807 bool CacheIRCompiler::emitLoadBoundFunctionNumArgs(ObjOperandId objId,
3808 Int32OperandId resultId) {
3809 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3811 Register obj = allocator.useRegister(masm, objId);
3812 Register output = allocator.defineRegister(masm, resultId);
3814 masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
3815 output);
3816 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
3817 return true;
3820 bool CacheIRCompiler::emitLoadBoundFunctionTarget(ObjOperandId objId,
3821 ObjOperandId resultId) {
3822 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3824 Register obj = allocator.useRegister(masm, objId);
3825 Register output = allocator.defineRegister(masm, resultId);
3827 masm.unboxObject(Address(obj, BoundFunctionObject::offsetOfTargetSlot()),
3828 output);
3829 return true;
3832 bool CacheIRCompiler::emitGuardBoundFunctionIsConstructor(ObjOperandId objId) {
3833 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3835 Register obj = allocator.useRegister(masm, objId);
3837 FailurePath* failure;
3838 if (!addFailurePath(&failure)) {
3839 return false;
3842 Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
3843 masm.branchTest32(Assembler::Zero, flagsSlot,
3844 Imm32(BoundFunctionObject::IsConstructorFlag),
3845 failure->label());
3846 return true;
3849 bool CacheIRCompiler::emitGuardObjectIdentity(ObjOperandId obj1Id,
3850 ObjOperandId obj2Id) {
3851 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3853 Register obj1 = allocator.useRegister(masm, obj1Id);
3854 Register obj2 = allocator.useRegister(masm, obj2Id);
3856 FailurePath* failure;
3857 if (!addFailurePath(&failure)) {
3858 return false;
3861 masm.branchPtr(Assembler::NotEqual, obj1, obj2, failure->label());
3862 return true;
3865 bool CacheIRCompiler::emitLoadFunctionLengthResult(ObjOperandId objId) {
3866 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3867 AutoOutputRegister output(*this);
3868 Register obj = allocator.useRegister(masm, objId);
3869 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3871 FailurePath* failure;
3872 if (!addFailurePath(&failure)) {
3873 return false;
3876 // Get the JSFunction flags and arg count.
3877 masm.load32(Address(obj, JSFunction::offsetOfFlagsAndArgCount()), scratch);
3879 // Functions with a SelfHostedLazyScript must be compiled with the slow-path
3880 // before the function length is known. If the length was previously resolved,
3881 // the length property may be shadowed.
3882 masm.branchTest32(
3883 Assembler::NonZero, scratch,
3884 Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
3885 failure->label());
3887 masm.loadFunctionLength(obj, scratch, scratch, failure->label());
3888 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3889 return true;
3892 bool CacheIRCompiler::emitLoadFunctionNameResult(ObjOperandId objId) {
3893 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3894 AutoOutputRegister output(*this);
3895 Register obj = allocator.useRegister(masm, objId);
3896 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3898 FailurePath* failure;
3899 if (!addFailurePath(&failure)) {
3900 return false;
3903 masm.loadFunctionName(obj, scratch, ImmGCPtr(cx_->names().empty_),
3904 failure->label());
3906 masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
3907 return true;
3910 bool CacheIRCompiler::emitLinearizeForCharAccess(StringOperandId strId,
3911 Int32OperandId indexId,
3912 StringOperandId resultId) {
3913 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3914 Register str = allocator.useRegister(masm, strId);
3915 Register index = allocator.useRegister(masm, indexId);
3916 Register result = allocator.defineRegister(masm, resultId);
3917 AutoScratchRegister scratch(allocator, masm);
3919 FailurePath* failure;
3920 if (!addFailurePath(&failure)) {
3921 return false;
3924 Label done;
3925 masm.movePtr(str, result);
3927 // We can omit the bounds check, because we only compare the index against the
3928 // string length. In the worst case we unnecessarily linearize the string
3929 // when the index is out-of-bounds.
3931 masm.branchIfCanLoadStringChar(str, index, scratch, &done);
3933 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
3934 liveVolatileFloatRegs());
3935 masm.PushRegsInMask(volatileRegs);
3937 using Fn = JSLinearString* (*)(JSString*);
3938 masm.setupUnalignedABICall(scratch);
3939 masm.passABIArg(str);
3940 masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
3941 masm.storeCallPointerResult(result);
3943 LiveRegisterSet ignore;
3944 ignore.add(result);
3945 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
3947 masm.branchTestPtr(Assembler::Zero, result, result, failure->label());
3950 masm.bind(&done);
3951 return true;
3954 bool CacheIRCompiler::emitLinearizeForCodePointAccess(
3955 StringOperandId strId, Int32OperandId indexId, StringOperandId resultId) {
3956 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3957 Register str = allocator.useRegister(masm, strId);
3958 Register index = allocator.useRegister(masm, indexId);
3959 Register result = allocator.defineRegister(masm, resultId);
3960 AutoScratchRegister scratch1(allocator, masm);
3961 AutoScratchRegister scratch2(allocator, masm);
3963 FailurePath* failure;
3964 if (!addFailurePath(&failure)) {
3965 return false;
3968 Label done;
3969 masm.movePtr(str, result);
3971 // We can omit the bounds check, because we only compare the index against the
3972 // string length. In the worst case we unnecessarily linearize the string
3973 // when the index is out-of-bounds.
3975 masm.branchIfCanLoadStringCodePoint(str, index, scratch1, scratch2, &done);
3977 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
3978 liveVolatileFloatRegs());
3979 masm.PushRegsInMask(volatileRegs);
3981 using Fn = JSLinearString* (*)(JSString*);
3982 masm.setupUnalignedABICall(scratch1);
3983 masm.passABIArg(str);
3984 masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
3985 masm.storeCallPointerResult(result);
3987 LiveRegisterSet ignore;
3988 ignore.add(result);
3989 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
3991 masm.branchTestPtr(Assembler::Zero, result, result, failure->label());
3994 masm.bind(&done);
3995 return true;
3998 bool CacheIRCompiler::emitToRelativeStringIndex(Int32OperandId indexId,
3999 StringOperandId strId,
4000 Int32OperandId resultId) {
4001 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4002 Register index = allocator.useRegister(masm, indexId);
4003 Register str = allocator.useRegister(masm, strId);
4004 Register result = allocator.defineRegister(masm, resultId);
4006 // If |index| is non-negative, it's an index relative to the start of the
4007 // string. Otherwise it's an index relative to the end of the string.
4008 masm.move32(Imm32(0), result);
4009 masm.cmp32Load32(Assembler::LessThan, index, Imm32(0),
4010 Address(str, JSString::offsetOfLength()), result);
4011 masm.add32(index, result);
4012 return true;
4015 bool CacheIRCompiler::emitLoadStringLengthResult(StringOperandId strId) {
4016 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4017 AutoOutputRegister output(*this);
4018 Register str = allocator.useRegister(masm, strId);
4019 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4021 masm.loadStringLength(str, scratch);
4022 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4023 return true;
4026 bool CacheIRCompiler::emitLoadStringCharCodeResult(StringOperandId strId,
4027 Int32OperandId indexId,
4028 bool handleOOB) {
4029 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4030 AutoOutputRegister output(*this);
4031 Register str = allocator.useRegister(masm, strId);
4032 Register index = allocator.useRegister(masm, indexId);
4033 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
4034 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
4035 AutoScratchRegister scratch3(allocator, masm);
4037 // Bounds check, load string char.
4038 Label done;
4039 if (!handleOOB) {
4040 FailurePath* failure;
4041 if (!addFailurePath(&failure)) {
4042 return false;
4045 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
4046 scratch1, failure->label());
4047 masm.loadStringChar(str, index, scratch1, scratch2, scratch3,
4048 failure->label());
4049 } else {
4050 // Return NaN for out-of-bounds access.
4051 masm.moveValue(JS::NaNValue(), output.valueReg());
4053 // The bounds check mustn't use a scratch register which aliases the output.
4054 MOZ_ASSERT(!output.valueReg().aliases(scratch3));
4056 // This CacheIR op is always preceded by |LinearizeForCharAccess|, so we're
4057 // guaranteed to see no nested ropes.
4058 Label loadFailed;
4059 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
4060 scratch3, &done);
4061 masm.loadStringChar(str, index, scratch1, scratch2, scratch3, &loadFailed);
4063 Label loadedChar;
4064 masm.jump(&loadedChar);
4065 masm.bind(&loadFailed);
4066 masm.assumeUnreachable("loadStringChar can't fail for linear strings");
4067 masm.bind(&loadedChar);
4070 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
4071 masm.bind(&done);
4072 return true;
4075 bool CacheIRCompiler::emitLoadStringCodePointResult(StringOperandId strId,
4076 Int32OperandId indexId,
4077 bool handleOOB) {
4078 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4079 AutoOutputRegister output(*this);
4080 Register str = allocator.useRegister(masm, strId);
4081 Register index = allocator.useRegister(masm, indexId);
4082 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
4083 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
4084 AutoScratchRegister scratch3(allocator, masm);
4086 // Bounds check, load string char.
4087 Label done;
4088 if (!handleOOB) {
4089 FailurePath* failure;
4090 if (!addFailurePath(&failure)) {
4091 return false;
4094 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
4095 scratch1, failure->label());
4096 masm.loadStringCodePoint(str, index, scratch1, scratch2, scratch3,
4097 failure->label());
4098 } else {
4099 // Return undefined for out-of-bounds access.
4100 masm.moveValue(JS::UndefinedValue(), output.valueReg());
4102 // The bounds check mustn't use a scratch register which aliases the output.
4103 MOZ_ASSERT(!output.valueReg().aliases(scratch3));
4105 // This CacheIR op is always preceded by |LinearizeForCodePointAccess|, so
4106 // we're guaranteed to see no nested ropes or split surrogates.
4107 Label loadFailed;
4108 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
4109 scratch3, &done);
4110 masm.loadStringCodePoint(str, index, scratch1, scratch2, scratch3,
4111 &loadFailed);
4113 Label loadedChar;
4114 masm.jump(&loadedChar);
4115 masm.bind(&loadFailed);
4116 masm.assumeUnreachable("loadStringCodePoint can't fail for linear strings");
4117 masm.bind(&loadedChar);
4120 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
4121 masm.bind(&done);
4122 return true;
4125 bool CacheIRCompiler::emitNewStringObjectResult(uint32_t templateObjectOffset,
4126 StringOperandId strId) {
4127 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4129 AutoCallVM callvm(masm, this, allocator);
4131 Register str = allocator.useRegister(masm, strId);
4133 callvm.prepare();
4134 masm.Push(str);
4136 using Fn = JSObject* (*)(JSContext*, HandleString);
4137 callvm.call<Fn, NewStringObject>();
4138 return true;
4141 bool CacheIRCompiler::emitStringIncludesResult(StringOperandId strId,
4142 StringOperandId searchStrId) {
4143 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4145 AutoCallVM callvm(masm, this, allocator);
4147 Register str = allocator.useRegister(masm, strId);
4148 Register searchStr = allocator.useRegister(masm, searchStrId);
4150 callvm.prepare();
4151 masm.Push(searchStr);
4152 masm.Push(str);
4154 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
4155 callvm.call<Fn, js::StringIncludes>();
4156 return true;
4159 bool CacheIRCompiler::emitStringIndexOfResult(StringOperandId strId,
4160 StringOperandId searchStrId) {
4161 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4163 AutoCallVM callvm(masm, this, allocator);
4165 Register str = allocator.useRegister(masm, strId);
4166 Register searchStr = allocator.useRegister(masm, searchStrId);
4168 callvm.prepare();
4169 masm.Push(searchStr);
4170 masm.Push(str);
4172 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
4173 callvm.call<Fn, js::StringIndexOf>();
4174 return true;
4177 bool CacheIRCompiler::emitStringLastIndexOfResult(StringOperandId strId,
4178 StringOperandId searchStrId) {
4179 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4181 AutoCallVM callvm(masm, this, allocator);
4183 Register str = allocator.useRegister(masm, strId);
4184 Register searchStr = allocator.useRegister(masm, searchStrId);
4186 callvm.prepare();
4187 masm.Push(searchStr);
4188 masm.Push(str);
4190 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
4191 callvm.call<Fn, js::StringLastIndexOf>();
4192 return true;
4195 bool CacheIRCompiler::emitStringStartsWithResult(StringOperandId strId,
4196 StringOperandId searchStrId) {
4197 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4199 AutoCallVM callvm(masm, this, allocator);
4201 Register str = allocator.useRegister(masm, strId);
4202 Register searchStr = allocator.useRegister(masm, searchStrId);
4204 callvm.prepare();
4205 masm.Push(searchStr);
4206 masm.Push(str);
4208 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
4209 callvm.call<Fn, js::StringStartsWith>();
4210 return true;
4213 bool CacheIRCompiler::emitStringEndsWithResult(StringOperandId strId,
4214 StringOperandId searchStrId) {
4215 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4217 AutoCallVM callvm(masm, this, allocator);
4219 Register str = allocator.useRegister(masm, strId);
4220 Register searchStr = allocator.useRegister(masm, searchStrId);
4222 callvm.prepare();
4223 masm.Push(searchStr);
4224 masm.Push(str);
4226 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
4227 callvm.call<Fn, js::StringEndsWith>();
4228 return true;
4231 bool CacheIRCompiler::emitStringToLowerCaseResult(StringOperandId strId) {
4232 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4234 AutoCallVM callvm(masm, this, allocator);
4236 Register str = allocator.useRegister(masm, strId);
4238 callvm.prepare();
4239 masm.Push(str);
4241 using Fn = JSString* (*)(JSContext*, HandleString);
4242 callvm.call<Fn, js::StringToLowerCase>();
4243 return true;
4246 bool CacheIRCompiler::emitStringToUpperCaseResult(StringOperandId strId) {
4247 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4249 AutoCallVM callvm(masm, this, allocator);
4251 Register str = allocator.useRegister(masm, strId);
4253 callvm.prepare();
4254 masm.Push(str);
4256 using Fn = JSString* (*)(JSContext*, HandleString);
4257 callvm.call<Fn, js::StringToUpperCase>();
4258 return true;
4261 bool CacheIRCompiler::emitStringTrimResult(StringOperandId strId) {
4262 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4264 AutoCallVM callvm(masm, this, allocator);
4266 Register str = allocator.useRegister(masm, strId);
4268 callvm.prepare();
4269 masm.Push(str);
4271 using Fn = JSString* (*)(JSContext*, HandleString);
4272 callvm.call<Fn, js::StringTrim>();
4273 return true;
4276 bool CacheIRCompiler::emitStringTrimStartResult(StringOperandId strId) {
4277 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4279 AutoCallVM callvm(masm, this, allocator);
4281 Register str = allocator.useRegister(masm, strId);
4283 callvm.prepare();
4284 masm.Push(str);
4286 using Fn = JSString* (*)(JSContext*, HandleString);
4287 callvm.call<Fn, js::StringTrimStart>();
4288 return true;
4291 bool CacheIRCompiler::emitStringTrimEndResult(StringOperandId strId) {
4292 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4294 AutoCallVM callvm(masm, this, allocator);
4296 Register str = allocator.useRegister(masm, strId);
4298 callvm.prepare();
4299 masm.Push(str);
4301 using Fn = JSString* (*)(JSContext*, HandleString);
4302 callvm.call<Fn, js::StringTrimEnd>();
4303 return true;
4306 bool CacheIRCompiler::emitLoadArgumentsObjectArgResult(ObjOperandId objId,
4307 Int32OperandId indexId) {
4308 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4309 AutoOutputRegister output(*this);
4310 Register obj = allocator.useRegister(masm, objId);
4311 Register index = allocator.useRegister(masm, indexId);
4312 AutoScratchRegister scratch(allocator, masm);
4314 FailurePath* failure;
4315 if (!addFailurePath(&failure)) {
4316 return false;
4319 masm.loadArgumentsObjectElement(obj, index, output.valueReg(), scratch,
4320 failure->label());
4321 return true;
4324 bool CacheIRCompiler::emitLoadArgumentsObjectArgHoleResult(
4325 ObjOperandId objId, Int32OperandId indexId) {
4326 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4327 AutoOutputRegister output(*this);
4328 Register obj = allocator.useRegister(masm, objId);
4329 Register index = allocator.useRegister(masm, indexId);
4330 AutoScratchRegister scratch(allocator, masm);
4332 FailurePath* failure;
4333 if (!addFailurePath(&failure)) {
4334 return false;
4337 masm.loadArgumentsObjectElementHole(obj, index, output.valueReg(), scratch,
4338 failure->label());
4339 return true;
4342 bool CacheIRCompiler::emitLoadArgumentsObjectArgExistsResult(
4343 ObjOperandId objId, Int32OperandId indexId) {
4344 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4345 AutoOutputRegister output(*this);
4346 Register obj = allocator.useRegister(masm, objId);
4347 Register index = allocator.useRegister(masm, indexId);
4348 AutoScratchRegister scratch1(allocator, masm);
4349 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
4351 FailurePath* failure;
4352 if (!addFailurePath(&failure)) {
4353 return false;
4356 masm.loadArgumentsObjectElementExists(obj, index, scratch2, scratch1,
4357 failure->label());
4358 EmitStoreResult(masm, scratch2, JSVAL_TYPE_BOOLEAN, output);
4359 return true;
4362 bool CacheIRCompiler::emitLoadDenseElementResult(ObjOperandId objId,
4363 Int32OperandId indexId) {
4364 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4365 AutoOutputRegister output(*this);
4366 Register obj = allocator.useRegister(masm, objId);
4367 Register index = allocator.useRegister(masm, indexId);
4368 AutoScratchRegister scratch1(allocator, masm);
4369 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
4371 FailurePath* failure;
4372 if (!addFailurePath(&failure)) {
4373 return false;
4376 // Load obj->elements.
4377 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
4379 // Bounds check.
4380 Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
4381 masm.spectreBoundsCheck32(index, initLength, scratch2, failure->label());
4383 // Hole check.
4384 BaseObjectElementIndex element(scratch1, index);
4385 masm.branchTestMagic(Assembler::Equal, element, failure->label());
4386 masm.loadTypedOrValue(element, output);
4387 return true;
4390 bool CacheIRCompiler::emitGuardInt32IsNonNegative(Int32OperandId indexId) {
4391 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4392 Register index = allocator.useRegister(masm, indexId);
4394 FailurePath* failure;
4395 if (!addFailurePath(&failure)) {
4396 return false;
4399 masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
4400 return true;
4403 bool CacheIRCompiler::emitGuardIndexIsNotDenseElement(ObjOperandId objId,
4404 Int32OperandId indexId) {
4405 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4406 Register obj = allocator.useRegister(masm, objId);
4407 Register index = allocator.useRegister(masm, indexId);
4408 AutoScratchRegister scratch(allocator, masm);
4409 AutoSpectreBoundsScratchRegister spectreScratch(allocator, masm);
4411 FailurePath* failure;
4412 if (!addFailurePath(&failure)) {
4413 return false;
4416 // Load obj->elements.
4417 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4419 // Ensure index >= initLength or the element is a hole.
4420 Label notDense;
4421 Address capacity(scratch, ObjectElements::offsetOfInitializedLength());
4422 masm.spectreBoundsCheck32(index, capacity, spectreScratch, &notDense);
4424 BaseValueIndex element(scratch, index);
4425 masm.branchTestMagic(Assembler::Equal, element, &notDense);
4427 masm.jump(failure->label());
4429 masm.bind(&notDense);
4430 return true;
4433 bool CacheIRCompiler::emitGuardIndexIsValidUpdateOrAdd(ObjOperandId objId,
4434 Int32OperandId indexId) {
4435 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4436 Register obj = allocator.useRegister(masm, objId);
4437 Register index = allocator.useRegister(masm, indexId);
4438 AutoScratchRegister scratch(allocator, masm);
4439 AutoSpectreBoundsScratchRegister spectreScratch(allocator, masm);
4441 FailurePath* failure;
4442 if (!addFailurePath(&failure)) {
4443 return false;
4446 // Load obj->elements.
4447 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4449 Label success;
4451 // If length is writable, branch to &success. All indices are writable.
4452 Address flags(scratch, ObjectElements::offsetOfFlags());
4453 masm.branchTest32(Assembler::Zero, flags,
4454 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
4455 &success);
4457 // Otherwise, ensure index is in bounds.
4458 Address length(scratch, ObjectElements::offsetOfLength());
4459 masm.spectreBoundsCheck32(index, length, spectreScratch,
4460 /* failure = */ failure->label());
4461 masm.bind(&success);
4462 return true;
4465 bool CacheIRCompiler::emitGuardTagNotEqual(ValueTagOperandId lhsId,
4466 ValueTagOperandId rhsId) {
4467 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4468 Register lhs = allocator.useRegister(masm, lhsId);
4469 Register rhs = allocator.useRegister(masm, rhsId);
4471 FailurePath* failure;
4472 if (!addFailurePath(&failure)) {
4473 return false;
4476 Label done;
4477 masm.branch32(Assembler::Equal, lhs, rhs, failure->label());
4479 // If both lhs and rhs are numbers, can't use tag comparison to do inequality
4480 // comparison
4481 masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
4482 masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
4483 masm.jump(failure->label());
4485 masm.bind(&done);
4486 return true;
4489 bool CacheIRCompiler::emitGuardXrayExpandoShapeAndDefaultProto(
4490 ObjOperandId objId, uint32_t shapeWrapperOffset) {
4491 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4493 Register obj = allocator.useRegister(masm, objId);
4494 StubFieldOffset shapeWrapper(shapeWrapperOffset, StubField::Type::JSObject);
4496 AutoScratchRegister scratch(allocator, masm);
4497 AutoScratchRegister scratch2(allocator, masm);
4498 AutoScratchRegister scratch3(allocator, masm);
4500 FailurePath* failure;
4501 if (!addFailurePath(&failure)) {
4502 return false;
4505 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
4506 Address holderAddress(scratch,
4507 sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
4508 Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(
4509 GetXrayJitInfo()->holderExpandoSlot));
4511 masm.fallibleUnboxObject(holderAddress, scratch, failure->label());
4512 masm.fallibleUnboxObject(expandoAddress, scratch, failure->label());
4514 // Unwrap the expando before checking its shape.
4515 masm.loadPtr(Address(scratch, ProxyObject::offsetOfReservedSlots()), scratch);
4516 masm.unboxObject(
4517 Address(scratch, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
4518 scratch);
4520 emitLoadStubField(shapeWrapper, scratch2);
4521 LoadShapeWrapperContents(masm, scratch2, scratch2, failure->label());
4522 masm.branchTestObjShape(Assembler::NotEqual, scratch, scratch2, scratch3,
4523 scratch, failure->label());
4525 // The reserved slots on the expando should all be in fixed slots.
4526 Address protoAddress(scratch, NativeObject::getFixedSlotOffset(
4527 GetXrayJitInfo()->expandoProtoSlot));
4528 masm.branchTestUndefined(Assembler::NotEqual, protoAddress, failure->label());
4530 return true;
4533 bool CacheIRCompiler::emitGuardXrayNoExpando(ObjOperandId objId) {
4534 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4536 Register obj = allocator.useRegister(masm, objId);
4537 AutoScratchRegister scratch(allocator, masm);
4539 FailurePath* failure;
4540 if (!addFailurePath(&failure)) {
4541 return false;
4544 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
4545 Address holderAddress(scratch,
4546 sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
4547 Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(
4548 GetXrayJitInfo()->holderExpandoSlot));
4550 Label done;
4551 masm.fallibleUnboxObject(holderAddress, scratch, &done);
4552 masm.branchTestObject(Assembler::Equal, expandoAddress, failure->label());
4553 masm.bind(&done);
4555 return true;
4558 bool CacheIRCompiler::emitGuardNoAllocationMetadataBuilder(
4559 uint32_t builderAddrOffset) {
4560 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4561 AutoScratchRegister scratch(allocator, masm);
4563 FailurePath* failure;
4564 if (!addFailurePath(&failure)) {
4565 return false;
4568 StubFieldOffset builderField(builderAddrOffset, StubField::Type::RawPointer);
4569 emitLoadStubField(builderField, scratch);
4570 masm.branchPtr(Assembler::NotEqual, Address(scratch, 0), ImmWord(0),
4571 failure->label());
4573 return true;
4576 bool CacheIRCompiler::emitGuardFunctionHasJitEntry(ObjOperandId funId,
4577 bool constructing) {
4578 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4579 Register fun = allocator.useRegister(masm, funId);
4581 FailurePath* failure;
4582 if (!addFailurePath(&failure)) {
4583 return false;
4586 masm.branchIfFunctionHasNoJitEntry(fun, constructing, failure->label());
4587 return true;
4590 bool CacheIRCompiler::emitGuardFunctionHasNoJitEntry(ObjOperandId funId) {
4591 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4592 Register obj = allocator.useRegister(masm, funId);
4593 AutoScratchRegister scratch(allocator, masm);
4595 FailurePath* failure;
4596 if (!addFailurePath(&failure)) {
4597 return false;
4600 masm.branchIfFunctionHasJitEntry(obj, /*isConstructing =*/false,
4601 failure->label());
4602 return true;
4605 bool CacheIRCompiler::emitGuardFunctionIsNonBuiltinCtor(ObjOperandId funId) {
4606 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4608 Register fun = allocator.useRegister(masm, funId);
4609 AutoScratchRegister scratch(allocator, masm);
4611 FailurePath* failure;
4612 if (!addFailurePath(&failure)) {
4613 return false;
4616 masm.branchIfNotFunctionIsNonBuiltinCtor(fun, scratch, failure->label());
4617 return true;
4620 bool CacheIRCompiler::emitGuardFunctionIsConstructor(ObjOperandId funId) {
4621 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4622 Register funcReg = allocator.useRegister(masm, funId);
4623 AutoScratchRegister scratch(allocator, masm);
4625 FailurePath* failure;
4626 if (!addFailurePath(&failure)) {
4627 return false;
4630 // Ensure obj is a constructor
4631 masm.branchTestFunctionFlags(funcReg, FunctionFlags::CONSTRUCTOR,
4632 Assembler::Zero, failure->label());
4633 return true;
4636 bool CacheIRCompiler::emitGuardNotClassConstructor(ObjOperandId funId) {
4637 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4638 Register fun = allocator.useRegister(masm, funId);
4639 AutoScratchRegister scratch(allocator, masm);
4641 FailurePath* failure;
4642 if (!addFailurePath(&failure)) {
4643 return false;
4646 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
4647 fun, scratch, failure->label());
4648 return true;
4651 bool CacheIRCompiler::emitGuardArrayIsPacked(ObjOperandId arrayId) {
4652 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4653 Register array = allocator.useRegister(masm, arrayId);
4654 AutoScratchRegister scratch(allocator, masm);
4655 AutoScratchRegister scratch2(allocator, masm);
4657 FailurePath* failure;
4658 if (!addFailurePath(&failure)) {
4659 return false;
4662 masm.branchArrayIsNotPacked(array, scratch, scratch2, failure->label());
4663 return true;
4666 bool CacheIRCompiler::emitGuardArgumentsObjectFlags(ObjOperandId objId,
4667 uint8_t flags) {
4668 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4669 Register obj = allocator.useRegister(masm, objId);
4670 AutoScratchRegister scratch(allocator, masm);
4672 FailurePath* failure;
4673 if (!addFailurePath(&failure)) {
4674 return false;
4677 masm.branchTestArgumentsObjectFlags(obj, scratch, flags, Assembler::NonZero,
4678 failure->label());
4679 return true;
4682 bool CacheIRCompiler::emitLoadDenseElementHoleResult(ObjOperandId objId,
4683 Int32OperandId indexId) {
4684 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4685 AutoOutputRegister output(*this);
4686 Register obj = allocator.useRegister(masm, objId);
4687 Register index = allocator.useRegister(masm, indexId);
4688 AutoScratchRegister scratch1(allocator, masm);
4689 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
4691 FailurePath* failure;
4692 if (!addFailurePath(&failure)) {
4693 return false;
4696 // Make sure the index is nonnegative.
4697 masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
4699 // Load obj->elements.
4700 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
4702 // Guard on the initialized length.
4703 Label hole;
4704 Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
4705 masm.spectreBoundsCheck32(index, initLength, scratch2, &hole);
4707 // Load the value.
4708 Label done;
4709 masm.loadValue(BaseObjectElementIndex(scratch1, index), output.valueReg());
4710 masm.branchTestMagic(Assembler::NotEqual, output.valueReg(), &done);
4712 // Load undefined for the hole.
4713 masm.bind(&hole);
4714 masm.moveValue(UndefinedValue(), output.valueReg());
4716 masm.bind(&done);
4717 return true;
4720 bool CacheIRCompiler::emitLoadTypedArrayElementExistsResult(
4721 ObjOperandId objId, IntPtrOperandId indexId) {
4722 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4723 AutoOutputRegister output(*this);
4724 Register obj = allocator.useRegister(masm, objId);
4725 Register index = allocator.useRegister(masm, indexId);
4726 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4728 Label outOfBounds, done;
4730 // Bounds check.
4731 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
4732 masm.branchPtr(Assembler::BelowOrEqual, scratch, index, &outOfBounds);
4733 EmitStoreBoolean(masm, true, output);
4734 masm.jump(&done);
4736 masm.bind(&outOfBounds);
4737 EmitStoreBoolean(masm, false, output);
4739 masm.bind(&done);
4740 return true;
4743 bool CacheIRCompiler::emitLoadDenseElementExistsResult(ObjOperandId objId,
4744 Int32OperandId indexId) {
4745 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4746 AutoOutputRegister output(*this);
4747 Register obj = allocator.useRegister(masm, objId);
4748 Register index = allocator.useRegister(masm, indexId);
4749 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4751 FailurePath* failure;
4752 if (!addFailurePath(&failure)) {
4753 return false;
4756 // Load obj->elements.
4757 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4759 // Bounds check. Unsigned compare sends negative indices to next IC.
4760 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
4761 masm.branch32(Assembler::BelowOrEqual, initLength, index, failure->label());
4763 // Hole check.
4764 BaseObjectElementIndex element(scratch, index);
4765 masm.branchTestMagic(Assembler::Equal, element, failure->label());
4767 EmitStoreBoolean(masm, true, output);
4768 return true;
4771 bool CacheIRCompiler::emitLoadDenseElementHoleExistsResult(
4772 ObjOperandId objId, Int32OperandId indexId) {
4773 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4774 AutoOutputRegister output(*this);
4775 Register obj = allocator.useRegister(masm, objId);
4776 Register index = allocator.useRegister(masm, indexId);
4777 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4779 FailurePath* failure;
4780 if (!addFailurePath(&failure)) {
4781 return false;
4784 // Make sure the index is nonnegative.
4785 masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
4787 // Load obj->elements.
4788 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4790 // Guard on the initialized length.
4791 Label hole;
4792 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
4793 masm.branch32(Assembler::BelowOrEqual, initLength, index, &hole);
4795 // Load value and replace with true.
4796 Label done;
4797 BaseObjectElementIndex element(scratch, index);
4798 masm.branchTestMagic(Assembler::Equal, element, &hole);
4799 EmitStoreBoolean(masm, true, output);
4800 masm.jump(&done);
4802 // Load false for the hole.
4803 masm.bind(&hole);
4804 EmitStoreBoolean(masm, false, output);
4806 masm.bind(&done);
4807 return true;
4810 bool CacheIRCompiler::emitPackedArrayPopResult(ObjOperandId arrayId) {
4811 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4813 AutoOutputRegister output(*this);
4814 Register array = allocator.useRegister(masm, arrayId);
4815 AutoScratchRegister scratch1(allocator, masm);
4816 AutoScratchRegister scratch2(allocator, masm);
4818 FailurePath* failure;
4819 if (!addFailurePath(&failure)) {
4820 return false;
4823 masm.packedArrayPop(array, output.valueReg(), scratch1, scratch2,
4824 failure->label());
4825 return true;
4828 bool CacheIRCompiler::emitPackedArrayShiftResult(ObjOperandId arrayId) {
4829 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4831 AutoOutputRegister output(*this);
4832 Register array = allocator.useRegister(masm, arrayId);
4833 AutoScratchRegister scratch1(allocator, masm);
4834 AutoScratchRegister scratch2(allocator, masm);
4836 FailurePath* failure;
4837 if (!addFailurePath(&failure)) {
4838 return false;
4841 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
4842 liveVolatileFloatRegs());
4843 masm.packedArrayShift(array, output.valueReg(), scratch1, scratch2,
4844 volatileRegs, failure->label());
4845 return true;
4848 bool CacheIRCompiler::emitIsObjectResult(ValOperandId inputId) {
4849 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4851 AutoOutputRegister output(*this);
4852 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4854 ValueOperand val = allocator.useValueRegister(masm, inputId);
4856 masm.testObjectSet(Assembler::Equal, val, scratch);
4858 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
4859 return true;
4862 bool CacheIRCompiler::emitIsPackedArrayResult(ObjOperandId objId) {
4863 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4865 AutoOutputRegister output(*this);
4866 Register obj = allocator.useRegister(masm, objId);
4867 AutoScratchRegister scratch(allocator, masm);
4869 Register outputScratch = output.valueReg().scratchReg();
4870 masm.setIsPackedArray(obj, outputScratch, scratch);
4871 masm.tagValue(JSVAL_TYPE_BOOLEAN, outputScratch, output.valueReg());
4872 return true;
4875 bool CacheIRCompiler::emitIsCallableResult(ValOperandId inputId) {
4876 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4878 AutoOutputRegister output(*this);
4879 AutoScratchRegister scratch1(allocator, masm);
4880 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
4882 ValueOperand val = allocator.useValueRegister(masm, inputId);
4884 Label isObject, done;
4885 masm.branchTestObject(Assembler::Equal, val, &isObject);
4886 // Primitives are never callable.
4887 masm.move32(Imm32(0), scratch2);
4888 masm.jump(&done);
4890 masm.bind(&isObject);
4891 masm.unboxObject(val, scratch1);
4893 Label isProxy;
4894 masm.isCallable(scratch1, scratch2, &isProxy);
4895 masm.jump(&done);
4897 masm.bind(&isProxy);
4899 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
4900 liveVolatileFloatRegs());
4901 masm.PushRegsInMask(volatileRegs);
4903 using Fn = bool (*)(JSObject* obj);
4904 masm.setupUnalignedABICall(scratch2);
4905 masm.passABIArg(scratch1);
4906 masm.callWithABI<Fn, ObjectIsCallable>();
4907 masm.storeCallBoolResult(scratch2);
4909 LiveRegisterSet ignore;
4910 ignore.add(scratch2);
4911 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
4914 masm.bind(&done);
4915 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
4916 return true;
4919 bool CacheIRCompiler::emitIsConstructorResult(ObjOperandId objId) {
4920 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4922 AutoOutputRegister output(*this);
4923 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4925 Register obj = allocator.useRegister(masm, objId);
4927 Label isProxy, done;
4928 masm.isConstructor(obj, scratch, &isProxy);
4929 masm.jump(&done);
4931 masm.bind(&isProxy);
4933 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
4934 liveVolatileFloatRegs());
4935 masm.PushRegsInMask(volatileRegs);
4937 using Fn = bool (*)(JSObject* obj);
4938 masm.setupUnalignedABICall(scratch);
4939 masm.passABIArg(obj);
4940 masm.callWithABI<Fn, ObjectIsConstructor>();
4941 masm.storeCallBoolResult(scratch);
4943 LiveRegisterSet ignore;
4944 ignore.add(scratch);
4945 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
4948 masm.bind(&done);
4949 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
4950 return true;
4953 bool CacheIRCompiler::emitIsCrossRealmArrayConstructorResult(
4954 ObjOperandId objId) {
4955 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4957 AutoOutputRegister output(*this);
4958 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4959 Register obj = allocator.useRegister(masm, objId);
4961 masm.setIsCrossRealmArrayConstructor(obj, scratch);
4962 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
4963 return true;
4966 bool CacheIRCompiler::emitArrayBufferViewByteOffsetInt32Result(
4967 ObjOperandId objId) {
4968 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4970 AutoOutputRegister output(*this);
4971 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4972 Register obj = allocator.useRegister(masm, objId);
4974 FailurePath* failure;
4975 if (!addFailurePath(&failure)) {
4976 return false;
4979 masm.loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
4980 masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
4981 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4982 return true;
4985 bool CacheIRCompiler::emitArrayBufferViewByteOffsetDoubleResult(
4986 ObjOperandId objId) {
4987 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4989 AutoOutputRegister output(*this);
4990 Register obj = allocator.useRegister(masm, objId);
4991 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4993 ScratchDoubleScope fpscratch(masm);
4994 masm.loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
4995 masm.convertIntPtrToDouble(scratch, fpscratch);
4996 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
4997 return true;
5000 bool CacheIRCompiler::emitTypedArrayByteLengthInt32Result(ObjOperandId objId) {
5001 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5003 AutoOutputRegister output(*this);
5004 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
5005 AutoScratchRegister scratch2(allocator, masm);
5006 Register obj = allocator.useRegister(masm, objId);
5008 FailurePath* failure;
5009 if (!addFailurePath(&failure)) {
5010 return false;
5013 masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
5014 masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
5015 masm.typedArrayElementSize(obj, scratch2);
5017 masm.branchMul32(Assembler::Overflow, scratch2.get(), scratch1,
5018 failure->label());
5020 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
5021 return true;
5024 bool CacheIRCompiler::emitTypedArrayByteLengthDoubleResult(ObjOperandId objId) {
5025 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5027 AutoOutputRegister output(*this);
5028 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
5029 AutoScratchRegister scratch2(allocator, masm);
5030 Register obj = allocator.useRegister(masm, objId);
5032 masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
5033 masm.typedArrayElementSize(obj, scratch2);
5034 masm.mulPtr(scratch2, scratch1);
5036 ScratchDoubleScope fpscratch(masm);
5037 masm.convertIntPtrToDouble(scratch1, fpscratch);
5038 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
5039 return true;
5042 bool CacheIRCompiler::emitTypedArrayElementSizeResult(ObjOperandId objId) {
5043 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5045 AutoOutputRegister output(*this);
5046 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5047 Register obj = allocator.useRegister(masm, objId);
5049 masm.typedArrayElementSize(obj, scratch);
5050 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5051 return true;
5054 bool CacheIRCompiler::emitGuardHasAttachedArrayBuffer(ObjOperandId objId) {
5055 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5057 AutoScratchRegister scratch(allocator, masm);
5058 Register obj = allocator.useRegister(masm, objId);
5060 FailurePath* failure;
5061 if (!addFailurePath(&failure)) {
5062 return false;
5065 masm.branchIfHasDetachedArrayBuffer(obj, scratch, failure->label());
5066 return true;
5069 bool CacheIRCompiler::emitIsTypedArrayConstructorResult(ObjOperandId objId) {
5070 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5072 AutoOutputRegister output(*this);
5073 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5074 Register obj = allocator.useRegister(masm, objId);
5076 masm.setIsDefinitelyTypedArrayConstructor(obj, scratch);
5077 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
5078 return true;
5081 bool CacheIRCompiler::emitGetNextMapSetEntryForIteratorResult(
5082 ObjOperandId iterId, ObjOperandId resultArrId, bool isMap) {
5083 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5085 AutoOutputRegister output(*this);
5086 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5087 Register iter = allocator.useRegister(masm, iterId);
5088 Register resultArr = allocator.useRegister(masm, resultArrId);
5090 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5091 save.takeUnchecked(output.valueReg());
5092 save.takeUnchecked(scratch);
5093 masm.PushRegsInMask(save);
5095 masm.setupUnalignedABICall(scratch);
5096 masm.passABIArg(iter);
5097 masm.passABIArg(resultArr);
5098 if (isMap) {
5099 using Fn = bool (*)(MapIteratorObject* iter, ArrayObject* resultPairObj);
5100 masm.callWithABI<Fn, MapIteratorObject::next>();
5101 } else {
5102 using Fn = bool (*)(SetIteratorObject* iter, ArrayObject* resultObj);
5103 masm.callWithABI<Fn, SetIteratorObject::next>();
5105 masm.storeCallBoolResult(scratch);
5107 masm.PopRegsInMask(save);
5109 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
5110 return true;
5113 void CacheIRCompiler::emitActivateIterator(Register objBeingIterated,
5114 Register iterObject,
5115 Register nativeIter,
5116 Register scratch, Register scratch2,
5117 uint32_t enumeratorsAddrOffset) {
5118 // 'objectBeingIterated_' must be nullptr, so we don't need a pre-barrier.
5119 Address iterObjAddr(nativeIter,
5120 NativeIterator::offsetOfObjectBeingIterated());
5121 #ifdef DEBUG
5122 Label ok;
5123 masm.branchPtr(Assembler::Equal, iterObjAddr, ImmPtr(nullptr), &ok);
5124 masm.assumeUnreachable("iterator with non-null object");
5125 masm.bind(&ok);
5126 #endif
5128 // Mark iterator as active.
5129 Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlagsAndCount());
5130 masm.storePtr(objBeingIterated, iterObjAddr);
5131 masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
5133 // Post-write barrier for stores to 'objectBeingIterated_'.
5134 emitPostBarrierSlot(
5135 iterObject,
5136 TypedOrValueRegister(MIRType::Object, AnyRegister(objBeingIterated)),
5137 scratch);
5139 // Chain onto the active iterator stack.
5140 StubFieldOffset enumeratorsAddr(enumeratorsAddrOffset,
5141 StubField::Type::RawPointer);
5142 emitLoadStubField(enumeratorsAddr, scratch);
5143 masm.registerIterator(scratch, nativeIter, scratch2);
5146 bool CacheIRCompiler::emitObjectToIteratorResult(
5147 ObjOperandId objId, uint32_t enumeratorsAddrOffset) {
5148 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5150 AutoCallVM callvm(masm, this, allocator);
5151 Register obj = allocator.useRegister(masm, objId);
5153 AutoScratchRegister iterObj(allocator, masm);
5154 AutoScratchRegister scratch(allocator, masm);
5155 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, callvm.output());
5156 AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, callvm.output());
5158 Label callVM, done;
5159 masm.maybeLoadIteratorFromShape(obj, iterObj, scratch, scratch2, scratch3,
5160 &callVM);
5162 masm.loadPrivate(
5163 Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
5164 scratch);
5166 emitActivateIterator(obj, iterObj, scratch, scratch2, scratch3,
5167 enumeratorsAddrOffset);
5168 masm.jump(&done);
5170 masm.bind(&callVM);
5171 callvm.prepare();
5172 masm.Push(obj);
5173 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
5174 callvm.call<Fn, GetIterator>();
5175 masm.storeCallPointerResult(iterObj);
5177 masm.bind(&done);
5178 EmitStoreResult(masm, iterObj, JSVAL_TYPE_OBJECT, callvm.output());
5179 return true;
5182 bool CacheIRCompiler::emitValueToIteratorResult(ValOperandId valId) {
5183 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5185 AutoCallVM callvm(masm, this, allocator);
5187 ValueOperand val = allocator.useValueRegister(masm, valId);
5189 callvm.prepare();
5191 masm.Push(val);
5193 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleValue);
5194 callvm.call<Fn, ValueToIterator>();
5195 return true;
5198 bool CacheIRCompiler::emitNewArrayIteratorResult(
5199 uint32_t templateObjectOffset) {
5200 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5202 AutoCallVM callvm(masm, this, allocator);
5204 callvm.prepare();
5206 using Fn = ArrayIteratorObject* (*)(JSContext*);
5207 callvm.call<Fn, NewArrayIterator>();
5208 return true;
5211 bool CacheIRCompiler::emitNewStringIteratorResult(
5212 uint32_t templateObjectOffset) {
5213 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5215 AutoCallVM callvm(masm, this, allocator);
5217 callvm.prepare();
5219 using Fn = StringIteratorObject* (*)(JSContext*);
5220 callvm.call<Fn, NewStringIterator>();
5221 return true;
5224 bool CacheIRCompiler::emitNewRegExpStringIteratorResult(
5225 uint32_t templateObjectOffset) {
5226 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5228 AutoCallVM callvm(masm, this, allocator);
5230 callvm.prepare();
5232 using Fn = RegExpStringIteratorObject* (*)(JSContext*);
5233 callvm.call<Fn, NewRegExpStringIterator>();
5234 return true;
5237 bool CacheIRCompiler::emitObjectCreateResult(uint32_t templateObjectOffset) {
5238 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5240 AutoCallVM callvm(masm, this, allocator);
5241 AutoScratchRegister scratch(allocator, masm);
5243 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5244 emitLoadStubField(objectField, scratch);
5246 callvm.prepare();
5247 masm.Push(scratch);
5249 using Fn = PlainObject* (*)(JSContext*, Handle<PlainObject*>);
5250 callvm.call<Fn, ObjectCreateWithTemplate>();
5251 return true;
5254 bool CacheIRCompiler::emitObjectKeysResult(ObjOperandId objId) {
5255 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5257 AutoCallVM callvm(masm, this, allocator);
5258 Register obj = allocator.useRegister(masm, objId);
5260 // Our goal is only to record calls to Object.keys, to elide it when
5261 // partially used, not to provide an alternative implementation.
5263 callvm.prepare();
5264 masm.Push(obj);
5266 using Fn = JSObject* (*)(JSContext*, HandleObject);
5267 callvm.call<Fn, jit::ObjectKeys>();
5270 return true;
5273 bool CacheIRCompiler::emitNewArrayFromLengthResult(
5274 uint32_t templateObjectOffset, Int32OperandId lengthId) {
5275 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5277 AutoCallVM callvm(masm, this, allocator);
5278 AutoScratchRegister scratch(allocator, masm);
5279 Register length = allocator.useRegister(masm, lengthId);
5281 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5282 emitLoadStubField(objectField, scratch);
5284 callvm.prepare();
5285 masm.Push(length);
5286 masm.Push(scratch);
5288 using Fn = ArrayObject* (*)(JSContext*, Handle<ArrayObject*>, int32_t length);
5289 callvm.call<Fn, ArrayConstructorOneArg>();
5290 return true;
5293 bool CacheIRCompiler::emitNewTypedArrayFromLengthResult(
5294 uint32_t templateObjectOffset, Int32OperandId lengthId) {
5295 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5297 AutoCallVM callvm(masm, this, allocator);
5298 AutoScratchRegister scratch(allocator, masm);
5299 Register length = allocator.useRegister(masm, lengthId);
5301 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5302 emitLoadStubField(objectField, scratch);
5304 callvm.prepare();
5305 masm.Push(length);
5306 masm.Push(scratch);
5308 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
5309 callvm.call<Fn, NewTypedArrayWithTemplateAndLength>();
5310 return true;
5313 bool CacheIRCompiler::emitNewTypedArrayFromArrayBufferResult(
5314 uint32_t templateObjectOffset, ObjOperandId bufferId,
5315 ValOperandId byteOffsetId, ValOperandId lengthId) {
5316 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5318 #ifdef JS_CODEGEN_X86
5319 MOZ_CRASH("Instruction not supported on 32-bit x86, not enough registers");
5320 #endif
5322 AutoCallVM callvm(masm, this, allocator);
5323 AutoScratchRegister scratch(allocator, masm);
5324 Register buffer = allocator.useRegister(masm, bufferId);
5325 ValueOperand byteOffset = allocator.useValueRegister(masm, byteOffsetId);
5326 ValueOperand length = allocator.useValueRegister(masm, lengthId);
5328 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5329 emitLoadStubField(objectField, scratch);
5331 callvm.prepare();
5332 masm.Push(length);
5333 masm.Push(byteOffset);
5334 masm.Push(buffer);
5335 masm.Push(scratch);
5337 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
5338 HandleValue, HandleValue);
5339 callvm.call<Fn, NewTypedArrayWithTemplateAndBuffer>();
5340 return true;
5343 bool CacheIRCompiler::emitNewTypedArrayFromArrayResult(
5344 uint32_t templateObjectOffset, ObjOperandId arrayId) {
5345 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5347 AutoCallVM callvm(masm, this, allocator);
5348 AutoScratchRegister scratch(allocator, masm);
5349 Register array = allocator.useRegister(masm, arrayId);
5351 StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
5352 emitLoadStubField(objectField, scratch);
5354 callvm.prepare();
5355 masm.Push(array);
5356 masm.Push(scratch);
5358 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
5359 callvm.call<Fn, NewTypedArrayWithTemplateAndArray>();
5360 return true;
5363 bool CacheIRCompiler::emitAddSlotAndCallAddPropHook(ObjOperandId objId,
5364 ValOperandId rhsId,
5365 uint32_t newShapeOffset) {
5366 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5368 AutoCallVM callvm(masm, this, allocator);
5370 AutoScratchRegister scratch(allocator, masm);
5371 Register obj = allocator.useRegister(masm, objId);
5372 ValueOperand rhs = allocator.useValueRegister(masm, rhsId);
5374 StubFieldOffset shapeField(newShapeOffset, StubField::Type::Shape);
5375 emitLoadStubField(shapeField, scratch);
5377 callvm.prepare();
5379 masm.Push(scratch);
5380 masm.Push(rhs);
5381 masm.Push(obj);
5383 using Fn =
5384 bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, Handle<Shape*>);
5385 callvm.callNoResult<Fn, AddSlotAndCallAddPropHook>();
5386 return true;
5389 bool CacheIRCompiler::emitMathAbsInt32Result(Int32OperandId inputId) {
5390 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5392 AutoOutputRegister output(*this);
5393 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5395 Register input = allocator.useRegister(masm, inputId);
5397 FailurePath* failure;
5398 if (!addFailurePath(&failure)) {
5399 return false;
5402 masm.mov(input, scratch);
5403 // Don't negate already positive values.
5404 Label positive;
5405 masm.branchTest32(Assembler::NotSigned, scratch, scratch, &positive);
5406 // neg32 might overflow for INT_MIN.
5407 masm.branchNeg32(Assembler::Overflow, scratch, failure->label());
5408 masm.bind(&positive);
5410 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5411 return true;
5414 bool CacheIRCompiler::emitMathAbsNumberResult(NumberOperandId inputId) {
5415 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5417 AutoOutputRegister output(*this);
5418 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5420 allocator.ensureDoubleRegister(masm, inputId, scratch);
5422 masm.absDouble(scratch, scratch);
5423 masm.boxDouble(scratch, output.valueReg(), scratch);
5424 return true;
5427 bool CacheIRCompiler::emitMathClz32Result(Int32OperandId inputId) {
5428 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5430 AutoOutputRegister output(*this);
5431 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5432 Register input = allocator.useRegister(masm, inputId);
5434 masm.clz32(input, scratch, /* knownNotZero = */ false);
5435 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5436 return true;
5439 bool CacheIRCompiler::emitMathSignInt32Result(Int32OperandId inputId) {
5440 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5442 AutoOutputRegister output(*this);
5443 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5444 Register input = allocator.useRegister(masm, inputId);
5446 masm.signInt32(input, scratch);
5447 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5448 return true;
5451 bool CacheIRCompiler::emitMathSignNumberResult(NumberOperandId inputId) {
5452 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5454 AutoOutputRegister output(*this);
5455 AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
5456 AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
5458 allocator.ensureDoubleRegister(masm, inputId, floatScratch1);
5460 masm.signDouble(floatScratch1, floatScratch2);
5461 masm.boxDouble(floatScratch2, output.valueReg(), floatScratch2);
5462 return true;
5465 bool CacheIRCompiler::emitMathSignNumberToInt32Result(NumberOperandId inputId) {
5466 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5468 AutoOutputRegister output(*this);
5469 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5470 AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
5471 AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
5473 FailurePath* failure;
5474 if (!addFailurePath(&failure)) {
5475 return false;
5478 allocator.ensureDoubleRegister(masm, inputId, floatScratch1);
5480 masm.signDoubleToInt32(floatScratch1, scratch, floatScratch2,
5481 failure->label());
5482 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5483 return true;
5486 bool CacheIRCompiler::emitMathImulResult(Int32OperandId lhsId,
5487 Int32OperandId rhsId) {
5488 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5490 AutoOutputRegister output(*this);
5491 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5492 Register lhs = allocator.useRegister(masm, lhsId);
5493 Register rhs = allocator.useRegister(masm, rhsId);
5495 masm.mov(lhs, scratch);
5496 masm.mul32(rhs, scratch);
5497 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5498 return true;
5501 bool CacheIRCompiler::emitMathSqrtNumberResult(NumberOperandId inputId) {
5502 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5504 AutoOutputRegister output(*this);
5505 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5507 allocator.ensureDoubleRegister(masm, inputId, scratch);
5509 masm.sqrtDouble(scratch, scratch);
5510 masm.boxDouble(scratch, output.valueReg(), scratch);
5511 return true;
5514 bool CacheIRCompiler::emitMathFloorNumberResult(NumberOperandId inputId) {
5515 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5517 AutoOutputRegister output(*this);
5518 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5520 allocator.ensureDoubleRegister(masm, inputId, scratch);
5522 if (Assembler::HasRoundInstruction(RoundingMode::Down)) {
5523 masm.nearbyIntDouble(RoundingMode::Down, scratch, scratch);
5524 masm.boxDouble(scratch, output.valueReg(), scratch);
5525 return true;
5528 return emitMathFunctionNumberResultShared(UnaryMathFunction::Floor, scratch,
5529 output.valueReg());
5532 bool CacheIRCompiler::emitMathCeilNumberResult(NumberOperandId inputId) {
5533 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5535 AutoOutputRegister output(*this);
5536 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5538 allocator.ensureDoubleRegister(masm, inputId, scratch);
5540 if (Assembler::HasRoundInstruction(RoundingMode::Up)) {
5541 masm.nearbyIntDouble(RoundingMode::Up, scratch, scratch);
5542 masm.boxDouble(scratch, output.valueReg(), scratch);
5543 return true;
5546 return emitMathFunctionNumberResultShared(UnaryMathFunction::Ceil, scratch,
5547 output.valueReg());
5550 bool CacheIRCompiler::emitMathTruncNumberResult(NumberOperandId inputId) {
5551 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5553 AutoOutputRegister output(*this);
5554 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5556 allocator.ensureDoubleRegister(masm, inputId, scratch);
5558 if (Assembler::HasRoundInstruction(RoundingMode::TowardsZero)) {
5559 masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
5560 masm.boxDouble(scratch, output.valueReg(), scratch);
5561 return true;
5564 return emitMathFunctionNumberResultShared(UnaryMathFunction::Trunc, scratch,
5565 output.valueReg());
5568 bool CacheIRCompiler::emitMathFRoundNumberResult(NumberOperandId inputId) {
5569 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5571 AutoOutputRegister output(*this);
5572 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5573 FloatRegister scratchFloat32 = scratch.get().asSingle();
5575 allocator.ensureDoubleRegister(masm, inputId, scratch);
5577 masm.convertDoubleToFloat32(scratch, scratchFloat32);
5578 masm.convertFloat32ToDouble(scratchFloat32, scratch);
5580 masm.boxDouble(scratch, output.valueReg(), scratch);
5581 return true;
5584 bool CacheIRCompiler::emitMathHypot2NumberResult(NumberOperandId first,
5585 NumberOperandId second) {
5586 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5587 AutoOutputRegister output(*this);
5588 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5590 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
5591 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
5593 allocator.ensureDoubleRegister(masm, first, floatScratch0);
5594 allocator.ensureDoubleRegister(masm, second, floatScratch1);
5596 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5597 masm.PushRegsInMask(save);
5599 using Fn = double (*)(double x, double y);
5600 masm.setupUnalignedABICall(scratch);
5601 masm.passABIArg(floatScratch0, ABIType::Float64);
5602 masm.passABIArg(floatScratch1, ABIType::Float64);
5604 masm.callWithABI<Fn, ecmaHypot>(ABIType::Float64);
5605 masm.storeCallFloatResult(floatScratch0);
5607 LiveRegisterSet ignore;
5608 ignore.add(floatScratch0);
5609 masm.PopRegsInMaskIgnore(save, ignore);
5611 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
5612 return true;
5615 bool CacheIRCompiler::emitMathHypot3NumberResult(NumberOperandId first,
5616 NumberOperandId second,
5617 NumberOperandId third) {
5618 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5619 AutoOutputRegister output(*this);
5620 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5622 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
5623 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
5624 AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
5626 allocator.ensureDoubleRegister(masm, first, floatScratch0);
5627 allocator.ensureDoubleRegister(masm, second, floatScratch1);
5628 allocator.ensureDoubleRegister(masm, third, floatScratch2);
5630 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5631 masm.PushRegsInMask(save);
5633 using Fn = double (*)(double x, double y, double z);
5634 masm.setupUnalignedABICall(scratch);
5635 masm.passABIArg(floatScratch0, ABIType::Float64);
5636 masm.passABIArg(floatScratch1, ABIType::Float64);
5637 masm.passABIArg(floatScratch2, ABIType::Float64);
5639 masm.callWithABI<Fn, hypot3>(ABIType::Float64);
5640 masm.storeCallFloatResult(floatScratch0);
5642 LiveRegisterSet ignore;
5643 ignore.add(floatScratch0);
5644 masm.PopRegsInMaskIgnore(save, ignore);
5646 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
5647 return true;
5650 bool CacheIRCompiler::emitMathHypot4NumberResult(NumberOperandId first,
5651 NumberOperandId second,
5652 NumberOperandId third,
5653 NumberOperandId fourth) {
5654 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5655 AutoOutputRegister output(*this);
5656 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5658 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
5659 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
5660 AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
5661 AutoAvailableFloatRegister floatScratch3(*this, FloatReg3);
5663 allocator.ensureDoubleRegister(masm, first, floatScratch0);
5664 allocator.ensureDoubleRegister(masm, second, floatScratch1);
5665 allocator.ensureDoubleRegister(masm, third, floatScratch2);
5666 allocator.ensureDoubleRegister(masm, fourth, floatScratch3);
5668 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5669 masm.PushRegsInMask(save);
5671 using Fn = double (*)(double x, double y, double z, double w);
5672 masm.setupUnalignedABICall(scratch);
5673 masm.passABIArg(floatScratch0, ABIType::Float64);
5674 masm.passABIArg(floatScratch1, ABIType::Float64);
5675 masm.passABIArg(floatScratch2, ABIType::Float64);
5676 masm.passABIArg(floatScratch3, ABIType::Float64);
5678 masm.callWithABI<Fn, hypot4>(ABIType::Float64);
5679 masm.storeCallFloatResult(floatScratch0);
5681 LiveRegisterSet ignore;
5682 ignore.add(floatScratch0);
5683 masm.PopRegsInMaskIgnore(save, ignore);
5685 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
5686 return true;
5689 bool CacheIRCompiler::emitMathAtan2NumberResult(NumberOperandId yId,
5690 NumberOperandId xId) {
5691 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5692 AutoOutputRegister output(*this);
5693 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5695 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
5696 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
5698 allocator.ensureDoubleRegister(masm, yId, floatScratch0);
5699 allocator.ensureDoubleRegister(masm, xId, floatScratch1);
5701 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5702 masm.PushRegsInMask(save);
5704 using Fn = double (*)(double x, double y);
5705 masm.setupUnalignedABICall(scratch);
5706 masm.passABIArg(floatScratch0, ABIType::Float64);
5707 masm.passABIArg(floatScratch1, ABIType::Float64);
5708 masm.callWithABI<Fn, js::ecmaAtan2>(ABIType::Float64);
5709 masm.storeCallFloatResult(floatScratch0);
5711 LiveRegisterSet ignore;
5712 ignore.add(floatScratch0);
5713 masm.PopRegsInMaskIgnore(save, ignore);
5715 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
5717 return true;
5720 bool CacheIRCompiler::emitMathFloorToInt32Result(NumberOperandId inputId) {
5721 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5723 AutoOutputRegister output(*this);
5724 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5726 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
5728 FailurePath* failure;
5729 if (!addFailurePath(&failure)) {
5730 return false;
5733 allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
5735 masm.floorDoubleToInt32(scratchFloat, scratch, failure->label());
5737 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5738 return true;
5741 bool CacheIRCompiler::emitMathCeilToInt32Result(NumberOperandId inputId) {
5742 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5744 AutoOutputRegister output(*this);
5745 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5747 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
5749 FailurePath* failure;
5750 if (!addFailurePath(&failure)) {
5751 return false;
5754 allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
5756 masm.ceilDoubleToInt32(scratchFloat, scratch, failure->label());
5758 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5759 return true;
5762 bool CacheIRCompiler::emitMathTruncToInt32Result(NumberOperandId inputId) {
5763 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5765 AutoOutputRegister output(*this);
5766 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5768 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
5770 FailurePath* failure;
5771 if (!addFailurePath(&failure)) {
5772 return false;
5775 allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
5777 masm.truncDoubleToInt32(scratchFloat, scratch, failure->label());
5779 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5780 return true;
5783 bool CacheIRCompiler::emitMathRoundToInt32Result(NumberOperandId inputId) {
5784 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5786 AutoOutputRegister output(*this);
5787 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5789 AutoAvailableFloatRegister scratchFloat0(*this, FloatReg0);
5790 AutoAvailableFloatRegister scratchFloat1(*this, FloatReg1);
5792 FailurePath* failure;
5793 if (!addFailurePath(&failure)) {
5794 return false;
5797 allocator.ensureDoubleRegister(masm, inputId, scratchFloat0);
5799 masm.roundDoubleToInt32(scratchFloat0, scratch, scratchFloat1,
5800 failure->label());
5802 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
5803 return true;
5806 bool CacheIRCompiler::emitInt32MinMax(bool isMax, Int32OperandId firstId,
5807 Int32OperandId secondId,
5808 Int32OperandId resultId) {
5809 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5811 Register first = allocator.useRegister(masm, firstId);
5812 Register second = allocator.useRegister(masm, secondId);
5813 Register result = allocator.defineRegister(masm, resultId);
5815 Assembler::Condition cond =
5816 isMax ? Assembler::GreaterThan : Assembler::LessThan;
5817 masm.move32(first, result);
5818 masm.cmp32Move32(cond, second, first, second, result);
5819 return true;
5822 bool CacheIRCompiler::emitNumberMinMax(bool isMax, NumberOperandId firstId,
5823 NumberOperandId secondId,
5824 NumberOperandId resultId) {
5825 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5827 ValueOperand output = allocator.defineValueRegister(masm, resultId);
5829 AutoAvailableFloatRegister scratch1(*this, FloatReg0);
5830 AutoAvailableFloatRegister scratch2(*this, FloatReg1);
5832 allocator.ensureDoubleRegister(masm, firstId, scratch1);
5833 allocator.ensureDoubleRegister(masm, secondId, scratch2);
5835 if (isMax) {
5836 masm.maxDouble(scratch2, scratch1, /* handleNaN = */ true);
5837 } else {
5838 masm.minDouble(scratch2, scratch1, /* handleNaN = */ true);
5841 masm.boxDouble(scratch1, output, scratch1);
5842 return true;
5845 bool CacheIRCompiler::emitInt32MinMaxArrayResult(ObjOperandId arrayId,
5846 bool isMax) {
5847 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5849 AutoOutputRegister output(*this);
5850 Register array = allocator.useRegister(masm, arrayId);
5852 AutoScratchRegister scratch(allocator, masm);
5853 AutoScratchRegister scratch2(allocator, masm);
5854 AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, output);
5855 AutoScratchRegisterMaybeOutput result(allocator, masm, output);
5857 FailurePath* failure;
5858 if (!addFailurePath(&failure)) {
5859 return false;
5862 masm.minMaxArrayInt32(array, result, scratch, scratch2, scratch3, isMax,
5863 failure->label());
5864 masm.tagValue(JSVAL_TYPE_INT32, result, output.valueReg());
5865 return true;
5868 bool CacheIRCompiler::emitNumberMinMaxArrayResult(ObjOperandId arrayId,
5869 bool isMax) {
5870 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5872 AutoOutputRegister output(*this);
5873 Register array = allocator.useRegister(masm, arrayId);
5875 AutoAvailableFloatRegister result(*this, FloatReg0);
5876 AutoAvailableFloatRegister floatScratch(*this, FloatReg1);
5878 AutoScratchRegister scratch1(allocator, masm);
5879 AutoScratchRegister scratch2(allocator, masm);
5881 FailurePath* failure;
5882 if (!addFailurePath(&failure)) {
5883 return false;
5886 masm.minMaxArrayNumber(array, result, floatScratch, scratch1, scratch2, isMax,
5887 failure->label());
5888 masm.boxDouble(result, output.valueReg(), result);
5889 return true;
5892 bool CacheIRCompiler::emitMathFunctionNumberResultShared(
5893 UnaryMathFunction fun, FloatRegister inputScratch, ValueOperand output) {
5894 UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
5896 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5897 save.takeUnchecked(inputScratch);
5898 masm.PushRegsInMask(save);
5900 masm.setupUnalignedABICall(output.scratchReg());
5901 masm.passABIArg(inputScratch, ABIType::Float64);
5902 masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
5903 ABIType::Float64);
5904 masm.storeCallFloatResult(inputScratch);
5906 masm.PopRegsInMask(save);
5908 masm.boxDouble(inputScratch, output, inputScratch);
5909 return true;
5912 bool CacheIRCompiler::emitMathFunctionNumberResult(NumberOperandId inputId,
5913 UnaryMathFunction fun) {
5914 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5916 AutoOutputRegister output(*this);
5917 AutoAvailableFloatRegister scratch(*this, FloatReg0);
5919 allocator.ensureDoubleRegister(masm, inputId, scratch);
5921 return emitMathFunctionNumberResultShared(fun, scratch, output.valueReg());
5924 static void EmitStoreDenseElement(MacroAssembler& masm,
5925 const ConstantOrRegister& value,
5926 BaseObjectElementIndex target) {
5927 if (value.constant()) {
5928 Value v = value.value();
5929 masm.storeValue(v, target);
5930 return;
5933 TypedOrValueRegister reg = value.reg();
5934 masm.storeTypedOrValue(reg, target);
5937 bool CacheIRCompiler::emitStoreDenseElement(ObjOperandId objId,
5938 Int32OperandId indexId,
5939 ValOperandId rhsId) {
5940 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5942 Register obj = allocator.useRegister(masm, objId);
5943 Register index = allocator.useRegister(masm, indexId);
5944 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
5946 AutoScratchRegister scratch(allocator, masm);
5948 FailurePath* failure;
5949 if (!addFailurePath(&failure)) {
5950 return false;
5953 // Load obj->elements in scratch.
5954 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
5956 // Bounds check. Unfortunately we don't have more registers available on
5957 // x86, so use InvalidReg and emit slightly slower code on x86.
5958 Register spectreTemp = InvalidReg;
5959 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
5960 masm.spectreBoundsCheck32(index, initLength, spectreTemp, failure->label());
5962 // Hole check.
5963 BaseObjectElementIndex element(scratch, index);
5964 masm.branchTestMagic(Assembler::Equal, element, failure->label());
5966 // Perform the store.
5967 EmitPreBarrier(masm, element, MIRType::Value);
5968 EmitStoreDenseElement(masm, val, element);
5970 emitPostBarrierElement(obj, val, scratch, index);
5971 return true;
5974 static void EmitAssertExtensibleElements(MacroAssembler& masm,
5975 Register elementsReg) {
5976 #ifdef DEBUG
5977 // Preceding shape guards ensure the object elements are extensible.
5978 Address elementsFlags(elementsReg, ObjectElements::offsetOfFlags());
5979 Label ok;
5980 masm.branchTest32(Assembler::Zero, elementsFlags,
5981 Imm32(ObjectElements::Flags::NOT_EXTENSIBLE), &ok);
5982 masm.assumeUnreachable("Unexpected non-extensible elements");
5983 masm.bind(&ok);
5984 #endif
5987 static void EmitAssertWritableArrayLengthElements(MacroAssembler& masm,
5988 Register elementsReg) {
5989 #ifdef DEBUG
5990 // Preceding shape guards ensure the array length is writable.
5991 Address elementsFlags(elementsReg, ObjectElements::offsetOfFlags());
5992 Label ok;
5993 masm.branchTest32(Assembler::Zero, elementsFlags,
5994 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
5995 &ok);
5996 masm.assumeUnreachable("Unexpected non-writable array length elements");
5997 masm.bind(&ok);
5998 #endif
6001 bool CacheIRCompiler::emitStoreDenseElementHole(ObjOperandId objId,
6002 Int32OperandId indexId,
6003 ValOperandId rhsId,
6004 bool handleAdd) {
6005 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6007 Register obj = allocator.useRegister(masm, objId);
6008 Register index = allocator.useRegister(masm, indexId);
6009 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
6011 AutoScratchRegister scratch(allocator, masm);
6013 FailurePath* failure;
6014 if (!addFailurePath(&failure)) {
6015 return false;
6018 // Load obj->elements in scratch.
6019 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
6021 EmitAssertExtensibleElements(masm, scratch);
6022 if (handleAdd) {
6023 EmitAssertWritableArrayLengthElements(masm, scratch);
6026 BaseObjectElementIndex element(scratch, index);
6027 Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
6028 Address elementsFlags(scratch, ObjectElements::offsetOfFlags());
6030 // We don't have enough registers on x86 so use InvalidReg. This will emit
6031 // slightly less efficient code on x86.
6032 Register spectreTemp = InvalidReg;
6034 Label storeSkipPreBarrier;
6035 if (handleAdd) {
6036 // Bounds check.
6037 Label inBounds, outOfBounds;
6038 masm.spectreBoundsCheck32(index, initLength, spectreTemp, &outOfBounds);
6039 masm.jump(&inBounds);
6041 // If we're out-of-bounds, only handle the index == initLength case.
6042 masm.bind(&outOfBounds);
6043 masm.branch32(Assembler::NotEqual, initLength, index, failure->label());
6045 // If index < capacity, we can add a dense element inline. If not we
6046 // need to allocate more elements.
6047 Label allocElement, addNewElement;
6048 Address capacity(scratch, ObjectElements::offsetOfCapacity());
6049 masm.spectreBoundsCheck32(index, capacity, spectreTemp, &allocElement);
6050 masm.jump(&addNewElement);
6052 masm.bind(&allocElement);
6054 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
6055 liveVolatileFloatRegs());
6056 save.takeUnchecked(scratch);
6057 masm.PushRegsInMask(save);
6059 using Fn = bool (*)(JSContext* cx, NativeObject* obj);
6060 masm.setupUnalignedABICall(scratch);
6061 masm.loadJSContext(scratch);
6062 masm.passABIArg(scratch);
6063 masm.passABIArg(obj);
6064 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
6065 masm.storeCallPointerResult(scratch);
6067 masm.PopRegsInMask(save);
6068 masm.branchIfFalseBool(scratch, failure->label());
6070 // Load the reallocated elements pointer.
6071 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
6073 masm.bind(&addNewElement);
6075 // Increment initLength.
6076 masm.add32(Imm32(1), initLength);
6078 // If length is now <= index, increment length too.
6079 Label skipIncrementLength;
6080 Address length(scratch, ObjectElements::offsetOfLength());
6081 masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
6082 masm.add32(Imm32(1), length);
6083 masm.bind(&skipIncrementLength);
6085 // Skip EmitPreBarrier as the memory is uninitialized.
6086 masm.jump(&storeSkipPreBarrier);
6088 masm.bind(&inBounds);
6089 } else {
6090 // Fail if index >= initLength.
6091 masm.spectreBoundsCheck32(index, initLength, spectreTemp, failure->label());
6094 EmitPreBarrier(masm, element, MIRType::Value);
6096 masm.bind(&storeSkipPreBarrier);
6097 EmitStoreDenseElement(masm, val, element);
6099 emitPostBarrierElement(obj, val, scratch, index);
6100 return true;
6103 bool CacheIRCompiler::emitArrayPush(ObjOperandId objId, ValOperandId rhsId) {
6104 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6106 AutoOutputRegister output(*this);
6107 Register obj = allocator.useRegister(masm, objId);
6108 ValueOperand val = allocator.useValueRegister(masm, rhsId);
6110 AutoScratchRegisterMaybeOutput scratchLength(allocator, masm, output);
6111 AutoScratchRegisterMaybeOutputType scratch(allocator, masm, output);
6113 FailurePath* failure;
6114 if (!addFailurePath(&failure)) {
6115 return false;
6118 // Load obj->elements in scratch.
6119 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
6121 EmitAssertExtensibleElements(masm, scratch);
6122 EmitAssertWritableArrayLengthElements(masm, scratch);
6124 Address elementsInitLength(scratch,
6125 ObjectElements::offsetOfInitializedLength());
6126 Address elementsLength(scratch, ObjectElements::offsetOfLength());
6127 Address capacity(scratch, ObjectElements::offsetOfCapacity());
6129 // Fail if length != initLength.
6130 masm.load32(elementsInitLength, scratchLength);
6131 masm.branch32(Assembler::NotEqual, elementsLength, scratchLength,
6132 failure->label());
6134 // If scratchLength < capacity, we can add a dense element inline. If not we
6135 // need to allocate more elements.
6136 Label allocElement, addNewElement;
6137 masm.spectreBoundsCheck32(scratchLength, capacity, InvalidReg, &allocElement);
6138 masm.jump(&addNewElement);
6140 masm.bind(&allocElement);
6142 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
6143 save.takeUnchecked(scratch);
6144 masm.PushRegsInMask(save);
6146 using Fn = bool (*)(JSContext* cx, NativeObject* obj);
6147 masm.setupUnalignedABICall(scratch);
6148 masm.loadJSContext(scratch);
6149 masm.passABIArg(scratch);
6150 masm.passABIArg(obj);
6151 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
6152 masm.storeCallPointerResult(scratch);
6154 masm.PopRegsInMask(save);
6155 masm.branchIfFalseBool(scratch, failure->label());
6157 // Load the reallocated elements pointer.
6158 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
6160 masm.bind(&addNewElement);
6162 // Increment initLength and length.
6163 masm.add32(Imm32(1), elementsInitLength);
6164 masm.add32(Imm32(1), elementsLength);
6166 // Store the value.
6167 BaseObjectElementIndex element(scratch, scratchLength);
6168 masm.storeValue(val, element);
6169 emitPostBarrierElement(obj, val, scratch, scratchLength);
6171 // Return value is new length.
6172 masm.add32(Imm32(1), scratchLength);
6173 masm.tagValue(JSVAL_TYPE_INT32, scratchLength, output.valueReg());
6175 return true;
6178 bool CacheIRCompiler::emitStoreTypedArrayElement(ObjOperandId objId,
6179 Scalar::Type elementType,
6180 IntPtrOperandId indexId,
6181 uint32_t rhsId,
6182 bool handleOOB) {
6183 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6184 Register obj = allocator.useRegister(masm, objId);
6185 Register index = allocator.useRegister(masm, indexId);
6187 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
6189 Maybe<Register> valInt32;
6190 Maybe<Register> valBigInt;
6191 switch (elementType) {
6192 case Scalar::Int8:
6193 case Scalar::Uint8:
6194 case Scalar::Int16:
6195 case Scalar::Uint16:
6196 case Scalar::Int32:
6197 case Scalar::Uint32:
6198 case Scalar::Uint8Clamped:
6199 valInt32.emplace(allocator.useRegister(masm, Int32OperandId(rhsId)));
6200 break;
6202 case Scalar::Float32:
6203 case Scalar::Float64:
6204 allocator.ensureDoubleRegister(masm, NumberOperandId(rhsId),
6205 floatScratch0);
6206 break;
6208 case Scalar::BigInt64:
6209 case Scalar::BigUint64:
6210 valBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(rhsId)));
6211 break;
6213 case Scalar::MaxTypedArrayViewType:
6214 case Scalar::Int64:
6215 case Scalar::Simd128:
6216 MOZ_CRASH("Unsupported TypedArray type");
6219 AutoScratchRegister scratch1(allocator, masm);
6220 Maybe<AutoScratchRegister> scratch2;
6221 Maybe<AutoSpectreBoundsScratchRegister> spectreScratch;
6222 if (Scalar::isBigIntType(elementType)) {
6223 scratch2.emplace(allocator, masm);
6224 } else {
6225 spectreScratch.emplace(allocator, masm);
6228 FailurePath* failure = nullptr;
6229 if (!handleOOB) {
6230 if (!addFailurePath(&failure)) {
6231 return false;
6235 // Bounds check.
6236 Label done;
6237 Register spectreTemp = scratch2 ? scratch2->get() : spectreScratch->get();
6238 masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
6239 masm.spectreBoundsCheckPtr(index, scratch1, spectreTemp,
6240 handleOOB ? &done : failure->label());
6242 // Load the elements vector.
6243 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch1);
6245 BaseIndex dest(scratch1, index, ScaleFromScalarType(elementType));
6247 if (Scalar::isBigIntType(elementType)) {
6248 #ifdef JS_PUNBOX64
6249 Register64 temp(scratch2->get());
6250 #else
6251 // We don't have more registers available on x86, so spill |obj|.
6252 masm.push(obj);
6253 Register64 temp(scratch2->get(), obj);
6254 #endif
6256 masm.loadBigInt64(*valBigInt, temp);
6257 masm.storeToTypedBigIntArray(elementType, temp, dest);
6259 #ifndef JS_PUNBOX64
6260 masm.pop(obj);
6261 #endif
6262 } else if (elementType == Scalar::Float32) {
6263 ScratchFloat32Scope fpscratch(masm);
6264 masm.convertDoubleToFloat32(floatScratch0, fpscratch);
6265 masm.storeToTypedFloatArray(elementType, fpscratch, dest);
6266 } else if (elementType == Scalar::Float64) {
6267 masm.storeToTypedFloatArray(elementType, floatScratch0, dest);
6268 } else {
6269 masm.storeToTypedIntArray(elementType, *valInt32, dest);
6272 masm.bind(&done);
6273 return true;
6276 static gc::Heap InitialBigIntHeap(JSContext* cx) {
6277 JS::Zone* zone = cx->zone();
6278 return zone->allocNurseryBigInts() ? gc::Heap::Default : gc::Heap::Tenured;
6281 static void EmitAllocateBigInt(MacroAssembler& masm, Register result,
6282 Register temp, const LiveRegisterSet& liveSet,
6283 gc::Heap initialHeap, Label* fail) {
6284 Label fallback, done;
6285 masm.newGCBigInt(result, temp, initialHeap, &fallback);
6286 masm.jump(&done);
6288 masm.bind(&fallback);
6290 // Request a minor collection at a later time if nursery allocation failed.
6291 bool requestMinorGC = initialHeap == gc::Heap::Default;
6293 masm.PushRegsInMask(liveSet);
6294 using Fn = void* (*)(JSContext* cx, bool requestMinorGC);
6295 masm.setupUnalignedABICall(temp);
6296 masm.loadJSContext(temp);
6297 masm.passABIArg(temp);
6298 masm.move32(Imm32(requestMinorGC), result);
6299 masm.passABIArg(result);
6300 masm.callWithABI<Fn, jit::AllocateBigIntNoGC>();
6301 masm.storeCallPointerResult(result);
6303 masm.PopRegsInMask(liveSet);
6304 masm.branchPtr(Assembler::Equal, result, ImmWord(0), fail);
6306 masm.bind(&done);
6309 bool CacheIRCompiler::emitLoadTypedArrayElementResult(
6310 ObjOperandId objId, IntPtrOperandId indexId, Scalar::Type elementType,
6311 bool handleOOB, bool forceDoubleForUint32) {
6312 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6313 AutoOutputRegister output(*this);
6314 Register obj = allocator.useRegister(masm, objId);
6315 Register index = allocator.useRegister(masm, indexId);
6317 AutoScratchRegister scratch1(allocator, masm);
6318 #ifdef JS_PUNBOX64
6319 AutoScratchRegister scratch2(allocator, masm);
6320 #else
6321 // There are too few registers available on x86, so we may need to reuse the
6322 // output's scratch register.
6323 AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
6324 #endif
6326 FailurePath* failure;
6327 if (!addFailurePath(&failure)) {
6328 return false;
6331 // Bounds check.
6332 Label outOfBounds;
6333 masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
6334 masm.spectreBoundsCheckPtr(index, scratch1, scratch2,
6335 handleOOB ? &outOfBounds : failure->label());
6337 // Allocate BigInt if needed. The code after this should be infallible.
6338 Maybe<Register> bigInt;
6339 if (Scalar::isBigIntType(elementType)) {
6340 bigInt.emplace(output.valueReg().scratchReg());
6342 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
6343 liveVolatileFloatRegs());
6344 save.takeUnchecked(scratch1);
6345 save.takeUnchecked(scratch2);
6346 save.takeUnchecked(output);
6348 gc::Heap initialHeap = InitialBigIntHeap(cx_);
6349 EmitAllocateBigInt(masm, *bigInt, scratch1, save, initialHeap,
6350 failure->label());
6353 // Load the elements vector.
6354 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch1);
6356 // Load the value.
6357 BaseIndex source(scratch1, index, ScaleFromScalarType(elementType));
6359 if (Scalar::isBigIntType(elementType)) {
6360 #ifdef JS_PUNBOX64
6361 Register64 temp(scratch2);
6362 #else
6363 // We don't have more registers available on x86, so spill |obj| and
6364 // additionally use the output's type register.
6365 MOZ_ASSERT(output.valueReg().scratchReg() != output.valueReg().typeReg());
6366 masm.push(obj);
6367 Register64 temp(output.valueReg().typeReg(), obj);
6368 #endif
6370 masm.loadFromTypedBigIntArray(elementType, source, *bigInt, temp);
6372 #ifndef JS_PUNBOX64
6373 masm.pop(obj);
6374 #endif
6376 masm.tagValue(JSVAL_TYPE_BIGINT, *bigInt, output.valueReg());
6377 } else {
6378 MacroAssembler::Uint32Mode uint32Mode =
6379 forceDoubleForUint32 ? MacroAssembler::Uint32Mode::ForceDouble
6380 : MacroAssembler::Uint32Mode::FailOnDouble;
6381 masm.loadFromTypedArray(elementType, source, output.valueReg(), uint32Mode,
6382 scratch1, failure->label());
6385 if (handleOOB) {
6386 Label done;
6387 masm.jump(&done);
6389 masm.bind(&outOfBounds);
6390 masm.moveValue(UndefinedValue(), output.valueReg());
6392 masm.bind(&done);
6395 return true;
6398 static void EmitDataViewBoundsCheck(MacroAssembler& masm, size_t byteSize,
6399 Register obj, Register offset,
6400 Register scratch, Label* fail) {
6401 // Ensure both offset < length and offset + (byteSize - 1) < length.
6402 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
6403 if (byteSize == 1) {
6404 masm.spectreBoundsCheckPtr(offset, scratch, InvalidReg, fail);
6405 } else {
6406 // temp := length - (byteSize - 1)
6407 // if temp < 0: fail
6408 // if offset >= temp: fail
6409 masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), scratch, fail);
6410 masm.spectreBoundsCheckPtr(offset, scratch, InvalidReg, fail);
6414 bool CacheIRCompiler::emitLoadDataViewValueResult(
6415 ObjOperandId objId, IntPtrOperandId offsetId,
6416 BooleanOperandId littleEndianId, Scalar::Type elementType,
6417 bool forceDoubleForUint32) {
6418 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6420 AutoOutputRegister output(*this);
6421 Register obj = allocator.useRegister(masm, objId);
6422 Register offset = allocator.useRegister(masm, offsetId);
6423 Register littleEndian = allocator.useRegister(masm, littleEndianId);
6425 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
6427 Register64 outputReg64 = output.valueReg().toRegister64();
6428 Register outputScratch = outputReg64.scratchReg();
6430 FailurePath* failure;
6431 if (!addFailurePath(&failure)) {
6432 return false;
6435 const size_t byteSize = Scalar::byteSize(elementType);
6437 EmitDataViewBoundsCheck(masm, byteSize, obj, offset, outputScratch,
6438 failure->label());
6440 masm.loadPtr(Address(obj, DataViewObject::dataOffset()), outputScratch);
6442 // Load the value.
6443 BaseIndex source(outputScratch, offset, TimesOne);
6444 switch (elementType) {
6445 case Scalar::Int8:
6446 masm.load8SignExtend(source, outputScratch);
6447 break;
6448 case Scalar::Uint8:
6449 masm.load8ZeroExtend(source, outputScratch);
6450 break;
6451 case Scalar::Int16:
6452 masm.load16UnalignedSignExtend(source, outputScratch);
6453 break;
6454 case Scalar::Uint16:
6455 masm.load16UnalignedZeroExtend(source, outputScratch);
6456 break;
6457 case Scalar::Int32:
6458 case Scalar::Uint32:
6459 case Scalar::Float32:
6460 masm.load32Unaligned(source, outputScratch);
6461 break;
6462 case Scalar::Float64:
6463 case Scalar::BigInt64:
6464 case Scalar::BigUint64:
6465 masm.load64Unaligned(source, outputReg64);
6466 break;
6467 case Scalar::Uint8Clamped:
6468 default:
6469 MOZ_CRASH("Invalid typed array type");
6472 // Swap the bytes in the loaded value.
6473 if (byteSize > 1) {
6474 Label skip;
6475 masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
6476 littleEndian, Imm32(0), &skip);
6478 switch (elementType) {
6479 case Scalar::Int16:
6480 masm.byteSwap16SignExtend(outputScratch);
6481 break;
6482 case Scalar::Uint16:
6483 masm.byteSwap16ZeroExtend(outputScratch);
6484 break;
6485 case Scalar::Int32:
6486 case Scalar::Uint32:
6487 case Scalar::Float32:
6488 masm.byteSwap32(outputScratch);
6489 break;
6490 case Scalar::Float64:
6491 case Scalar::BigInt64:
6492 case Scalar::BigUint64:
6493 masm.byteSwap64(outputReg64);
6494 break;
6495 case Scalar::Int8:
6496 case Scalar::Uint8:
6497 case Scalar::Uint8Clamped:
6498 default:
6499 MOZ_CRASH("Invalid type");
6502 masm.bind(&skip);
6505 // Move the value into the output register.
6506 switch (elementType) {
6507 case Scalar::Int8:
6508 case Scalar::Uint8:
6509 case Scalar::Int16:
6510 case Scalar::Uint16:
6511 case Scalar::Int32:
6512 masm.tagValue(JSVAL_TYPE_INT32, outputScratch, output.valueReg());
6513 break;
6514 case Scalar::Uint32: {
6515 MacroAssembler::Uint32Mode uint32Mode =
6516 forceDoubleForUint32 ? MacroAssembler::Uint32Mode::ForceDouble
6517 : MacroAssembler::Uint32Mode::FailOnDouble;
6518 masm.boxUint32(outputScratch, output.valueReg(), uint32Mode,
6519 failure->label());
6520 break;
6522 case Scalar::Float32: {
6523 FloatRegister scratchFloat32 = floatScratch0.get().asSingle();
6524 masm.moveGPRToFloat32(outputScratch, scratchFloat32);
6525 masm.canonicalizeFloat(scratchFloat32);
6526 masm.convertFloat32ToDouble(scratchFloat32, floatScratch0);
6527 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
6528 break;
6530 case Scalar::Float64:
6531 masm.moveGPR64ToDouble(outputReg64, floatScratch0);
6532 masm.canonicalizeDouble(floatScratch0);
6533 masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
6534 break;
6535 case Scalar::BigInt64:
6536 case Scalar::BigUint64: {
6537 // We need two extra registers. Reuse the obj/littleEndian registers.
6538 Register bigInt = obj;
6539 Register bigIntScratch = littleEndian;
6540 masm.push(bigInt);
6541 masm.push(bigIntScratch);
6542 Label fail, done;
6543 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
6544 liveVolatileFloatRegs());
6545 save.takeUnchecked(bigInt);
6546 save.takeUnchecked(bigIntScratch);
6547 gc::Heap initialHeap = InitialBigIntHeap(cx_);
6548 EmitAllocateBigInt(masm, bigInt, bigIntScratch, save, initialHeap, &fail);
6549 masm.jump(&done);
6551 masm.bind(&fail);
6552 masm.pop(bigIntScratch);
6553 masm.pop(bigInt);
6554 masm.jump(failure->label());
6556 masm.bind(&done);
6557 masm.initializeBigInt64(elementType, bigInt, outputReg64);
6558 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
6559 masm.pop(bigIntScratch);
6560 masm.pop(bigInt);
6561 break;
6563 case Scalar::Uint8Clamped:
6564 default:
6565 MOZ_CRASH("Invalid typed array type");
6568 return true;
6571 bool CacheIRCompiler::emitStoreDataViewValueResult(
6572 ObjOperandId objId, IntPtrOperandId offsetId, uint32_t valueId,
6573 BooleanOperandId littleEndianId, Scalar::Type elementType) {
6574 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6576 AutoOutputRegister output(*this);
6577 #ifdef JS_CODEGEN_X86
6578 // Use a scratch register to avoid running out of the registers.
6579 Register obj = output.valueReg().typeReg();
6580 allocator.copyToScratchRegister(masm, objId, obj);
6581 #else
6582 Register obj = allocator.useRegister(masm, objId);
6583 #endif
6584 Register offset = allocator.useRegister(masm, offsetId);
6585 Register littleEndian = allocator.useRegister(masm, littleEndianId);
6587 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
6588 Maybe<Register> valInt32;
6589 Maybe<Register> valBigInt;
6590 switch (elementType) {
6591 case Scalar::Int8:
6592 case Scalar::Uint8:
6593 case Scalar::Int16:
6594 case Scalar::Uint16:
6595 case Scalar::Int32:
6596 case Scalar::Uint32:
6597 case Scalar::Uint8Clamped:
6598 valInt32.emplace(allocator.useRegister(masm, Int32OperandId(valueId)));
6599 break;
6601 case Scalar::Float32:
6602 case Scalar::Float64:
6603 allocator.ensureDoubleRegister(masm, NumberOperandId(valueId),
6604 floatScratch0);
6605 break;
6607 case Scalar::BigInt64:
6608 case Scalar::BigUint64:
6609 valBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(valueId)));
6610 break;
6612 case Scalar::MaxTypedArrayViewType:
6613 case Scalar::Int64:
6614 case Scalar::Simd128:
6615 MOZ_CRASH("Unsupported type");
6618 Register scratch1 = output.valueReg().scratchReg();
6619 MOZ_ASSERT(scratch1 != obj, "scratchReg must not be typeReg");
6621 // On platforms with enough registers, |scratch2| is an extra scratch register
6622 // (pair) used for byte-swapping the value.
6623 #ifndef JS_CODEGEN_X86
6624 mozilla::MaybeOneOf<AutoScratchRegister, AutoScratchRegister64> scratch2;
6625 switch (elementType) {
6626 case Scalar::Int8:
6627 case Scalar::Uint8:
6628 break;
6629 case Scalar::Int16:
6630 case Scalar::Uint16:
6631 case Scalar::Int32:
6632 case Scalar::Uint32:
6633 case Scalar::Float32:
6634 scratch2.construct<AutoScratchRegister>(allocator, masm);
6635 break;
6636 case Scalar::Float64:
6637 case Scalar::BigInt64:
6638 case Scalar::BigUint64:
6639 scratch2.construct<AutoScratchRegister64>(allocator, masm);
6640 break;
6641 case Scalar::Uint8Clamped:
6642 default:
6643 MOZ_CRASH("Invalid type");
6645 #endif
6647 FailurePath* failure;
6648 if (!addFailurePath(&failure)) {
6649 return false;
6652 const size_t byteSize = Scalar::byteSize(elementType);
6654 EmitDataViewBoundsCheck(masm, byteSize, obj, offset, scratch1,
6655 failure->label());
6657 masm.loadPtr(Address(obj, DataViewObject::dataOffset()), scratch1);
6658 BaseIndex dest(scratch1, offset, TimesOne);
6660 if (byteSize == 1) {
6661 // Byte swapping has no effect, so just do the byte store.
6662 masm.store8(*valInt32, dest);
6663 masm.moveValue(UndefinedValue(), output.valueReg());
6664 return true;
6667 // On 32-bit x86, |obj| is already a scratch register so use that. If we need
6668 // a Register64 we also use the littleEndian register and use the stack
6669 // location for the check below.
6670 bool pushedLittleEndian = false;
6671 #ifdef JS_CODEGEN_X86
6672 if (byteSize == 8) {
6673 masm.push(littleEndian);
6674 pushedLittleEndian = true;
6676 auto valScratch32 = [&]() -> Register { return obj; };
6677 auto valScratch64 = [&]() -> Register64 {
6678 return Register64(obj, littleEndian);
6680 #else
6681 auto valScratch32 = [&]() -> Register {
6682 return scratch2.ref<AutoScratchRegister>();
6684 auto valScratch64 = [&]() -> Register64 {
6685 return scratch2.ref<AutoScratchRegister64>();
6687 #endif
6689 // Load the value into a gpr register.
6690 switch (elementType) {
6691 case Scalar::Int16:
6692 case Scalar::Uint16:
6693 case Scalar::Int32:
6694 case Scalar::Uint32:
6695 masm.move32(*valInt32, valScratch32());
6696 break;
6697 case Scalar::Float32: {
6698 FloatRegister scratchFloat32 = floatScratch0.get().asSingle();
6699 masm.convertDoubleToFloat32(floatScratch0, scratchFloat32);
6700 masm.canonicalizeFloatIfDeterministic(scratchFloat32);
6701 masm.moveFloat32ToGPR(scratchFloat32, valScratch32());
6702 break;
6704 case Scalar::Float64: {
6705 masm.canonicalizeDoubleIfDeterministic(floatScratch0);
6706 masm.moveDoubleToGPR64(floatScratch0, valScratch64());
6707 break;
6709 case Scalar::BigInt64:
6710 case Scalar::BigUint64:
6711 masm.loadBigInt64(*valBigInt, valScratch64());
6712 break;
6713 case Scalar::Int8:
6714 case Scalar::Uint8:
6715 case Scalar::Uint8Clamped:
6716 default:
6717 MOZ_CRASH("Invalid type");
6720 // Swap the bytes in the loaded value.
6721 Label skip;
6722 if (pushedLittleEndian) {
6723 masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
6724 Address(masm.getStackPointer(), 0), Imm32(0), &skip);
6725 } else {
6726 masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
6727 littleEndian, Imm32(0), &skip);
6729 switch (elementType) {
6730 case Scalar::Int16:
6731 masm.byteSwap16SignExtend(valScratch32());
6732 break;
6733 case Scalar::Uint16:
6734 masm.byteSwap16ZeroExtend(valScratch32());
6735 break;
6736 case Scalar::Int32:
6737 case Scalar::Uint32:
6738 case Scalar::Float32:
6739 masm.byteSwap32(valScratch32());
6740 break;
6741 case Scalar::Float64:
6742 case Scalar::BigInt64:
6743 case Scalar::BigUint64:
6744 masm.byteSwap64(valScratch64());
6745 break;
6746 case Scalar::Int8:
6747 case Scalar::Uint8:
6748 case Scalar::Uint8Clamped:
6749 default:
6750 MOZ_CRASH("Invalid type");
6752 masm.bind(&skip);
6754 // Store the value.
6755 switch (elementType) {
6756 case Scalar::Int16:
6757 case Scalar::Uint16:
6758 masm.store16Unaligned(valScratch32(), dest);
6759 break;
6760 case Scalar::Int32:
6761 case Scalar::Uint32:
6762 case Scalar::Float32:
6763 masm.store32Unaligned(valScratch32(), dest);
6764 break;
6765 case Scalar::Float64:
6766 case Scalar::BigInt64:
6767 case Scalar::BigUint64:
6768 masm.store64Unaligned(valScratch64(), dest);
6769 break;
6770 case Scalar::Int8:
6771 case Scalar::Uint8:
6772 case Scalar::Uint8Clamped:
6773 default:
6774 MOZ_CRASH("Invalid typed array type");
6777 #ifdef JS_CODEGEN_X86
6778 // Restore registers.
6779 if (pushedLittleEndian) {
6780 masm.pop(littleEndian);
6782 #endif
6784 masm.moveValue(UndefinedValue(), output.valueReg());
6785 return true;
6788 bool CacheIRCompiler::emitStoreFixedSlotUndefinedResult(ObjOperandId objId,
6789 uint32_t offsetOffset,
6790 ValOperandId rhsId) {
6791 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6793 AutoOutputRegister output(*this);
6794 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6795 Register obj = allocator.useRegister(masm, objId);
6796 ValueOperand val = allocator.useValueRegister(masm, rhsId);
6798 StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
6799 emitLoadStubField(offset, scratch);
6801 BaseIndex slot(obj, scratch, TimesOne);
6802 EmitPreBarrier(masm, slot, MIRType::Value);
6803 masm.storeValue(val, slot);
6804 emitPostBarrierSlot(obj, val, scratch);
6806 masm.moveValue(UndefinedValue(), output.valueReg());
6807 return true;
6810 bool CacheIRCompiler::emitLoadObjectResult(ObjOperandId objId) {
6811 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6812 AutoOutputRegister output(*this);
6813 Register obj = allocator.useRegister(masm, objId);
6815 EmitStoreResult(masm, obj, JSVAL_TYPE_OBJECT, output);
6817 return true;
6820 bool CacheIRCompiler::emitLoadStringResult(StringOperandId strId) {
6821 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6822 AutoOutputRegister output(*this);
6823 Register str = allocator.useRegister(masm, strId);
6825 masm.tagValue(JSVAL_TYPE_STRING, str, output.valueReg());
6827 return true;
6830 bool CacheIRCompiler::emitLoadSymbolResult(SymbolOperandId symId) {
6831 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6832 AutoOutputRegister output(*this);
6833 Register sym = allocator.useRegister(masm, symId);
6835 masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
6837 return true;
6840 bool CacheIRCompiler::emitLoadInt32Result(Int32OperandId valId) {
6841 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6842 AutoOutputRegister output(*this);
6843 Register val = allocator.useRegister(masm, valId);
6845 masm.tagValue(JSVAL_TYPE_INT32, val, output.valueReg());
6847 return true;
6850 bool CacheIRCompiler::emitLoadBigIntResult(BigIntOperandId valId) {
6851 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6852 AutoOutputRegister output(*this);
6853 Register val = allocator.useRegister(masm, valId);
6855 masm.tagValue(JSVAL_TYPE_BIGINT, val, output.valueReg());
6857 return true;
6860 bool CacheIRCompiler::emitLoadDoubleResult(NumberOperandId valId) {
6861 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6862 AutoOutputRegister output(*this);
6863 ValueOperand val = allocator.useValueRegister(masm, valId);
6865 #ifdef DEBUG
6866 Label ok;
6867 masm.branchTestDouble(Assembler::Equal, val, &ok);
6868 masm.branchTestInt32(Assembler::Equal, val, &ok);
6869 masm.assumeUnreachable("input must be double or int32");
6870 masm.bind(&ok);
6871 #endif
6873 masm.moveValue(val, output.valueReg());
6874 masm.convertInt32ValueToDouble(output.valueReg());
6876 return true;
6879 bool CacheIRCompiler::emitLoadTypeOfObjectResult(ObjOperandId objId) {
6880 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6881 AutoOutputRegister output(*this);
6882 Register obj = allocator.useRegister(masm, objId);
6883 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6885 Label slowCheck, isObject, isCallable, isUndefined, done;
6886 masm.typeOfObject(obj, scratch, &slowCheck, &isObject, &isCallable,
6887 &isUndefined);
6889 masm.bind(&isCallable);
6890 masm.moveValue(StringValue(cx_->names().function), output.valueReg());
6891 masm.jump(&done);
6893 masm.bind(&isUndefined);
6894 masm.moveValue(StringValue(cx_->names().undefined), output.valueReg());
6895 masm.jump(&done);
6897 masm.bind(&isObject);
6898 masm.moveValue(StringValue(cx_->names().object), output.valueReg());
6899 masm.jump(&done);
6902 masm.bind(&slowCheck);
6903 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
6904 liveVolatileFloatRegs());
6905 masm.PushRegsInMask(save);
6907 using Fn = JSString* (*)(JSObject* obj, JSRuntime* rt);
6908 masm.setupUnalignedABICall(scratch);
6909 masm.passABIArg(obj);
6910 masm.movePtr(ImmPtr(cx_->runtime()), scratch);
6911 masm.passABIArg(scratch);
6912 masm.callWithABI<Fn, TypeOfNameObject>();
6913 masm.storeCallPointerResult(scratch);
6915 LiveRegisterSet ignore;
6916 ignore.add(scratch);
6917 masm.PopRegsInMaskIgnore(save, ignore);
6919 masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
6922 masm.bind(&done);
6923 return true;
6926 bool CacheIRCompiler::emitLoadInt32TruthyResult(ValOperandId inputId) {
6927 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6928 AutoOutputRegister output(*this);
6929 ValueOperand val = allocator.useValueRegister(masm, inputId);
6931 Label ifFalse, done;
6932 masm.branchTestInt32Truthy(false, val, &ifFalse);
6933 masm.moveValue(BooleanValue(true), output.valueReg());
6934 masm.jump(&done);
6936 masm.bind(&ifFalse);
6937 masm.moveValue(BooleanValue(false), output.valueReg());
6939 masm.bind(&done);
6940 return true;
6943 bool CacheIRCompiler::emitLoadStringTruthyResult(StringOperandId strId) {
6944 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6945 AutoOutputRegister output(*this);
6946 Register str = allocator.useRegister(masm, strId);
6948 Label ifFalse, done;
6949 masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
6950 Imm32(0), &ifFalse);
6951 masm.moveValue(BooleanValue(true), output.valueReg());
6952 masm.jump(&done);
6954 masm.bind(&ifFalse);
6955 masm.moveValue(BooleanValue(false), output.valueReg());
6957 masm.bind(&done);
6958 return true;
6961 bool CacheIRCompiler::emitLoadDoubleTruthyResult(NumberOperandId inputId) {
6962 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6963 AutoOutputRegister output(*this);
6965 AutoScratchFloatRegister floatReg(this);
6967 allocator.ensureDoubleRegister(masm, inputId, floatReg);
6969 Label ifFalse, done;
6971 masm.branchTestDoubleTruthy(false, floatReg, &ifFalse);
6972 masm.moveValue(BooleanValue(true), output.valueReg());
6973 masm.jump(&done);
6975 masm.bind(&ifFalse);
6976 masm.moveValue(BooleanValue(false), output.valueReg());
6978 masm.bind(&done);
6979 return true;
6982 bool CacheIRCompiler::emitLoadObjectTruthyResult(ObjOperandId objId) {
6983 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6984 AutoOutputRegister output(*this);
6985 Register obj = allocator.useRegister(masm, objId);
6986 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6988 Label emulatesUndefined, slowPath, done;
6989 masm.branchIfObjectEmulatesUndefined(obj, scratch, &slowPath,
6990 &emulatesUndefined);
6991 masm.moveValue(BooleanValue(true), output.valueReg());
6992 masm.jump(&done);
6994 masm.bind(&emulatesUndefined);
6995 masm.moveValue(BooleanValue(false), output.valueReg());
6996 masm.jump(&done);
6998 masm.bind(&slowPath);
7000 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7001 liveVolatileFloatRegs());
7002 volatileRegs.takeUnchecked(scratch);
7003 volatileRegs.takeUnchecked(output);
7004 masm.PushRegsInMask(volatileRegs);
7006 using Fn = bool (*)(JSObject* obj);
7007 masm.setupUnalignedABICall(scratch);
7008 masm.passABIArg(obj);
7009 masm.callWithABI<Fn, js::EmulatesUndefined>();
7010 masm.storeCallBoolResult(scratch);
7011 masm.xor32(Imm32(1), scratch);
7013 masm.PopRegsInMask(volatileRegs);
7015 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
7018 masm.bind(&done);
7019 return true;
7022 bool CacheIRCompiler::emitLoadBigIntTruthyResult(BigIntOperandId bigIntId) {
7023 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7024 AutoOutputRegister output(*this);
7025 Register bigInt = allocator.useRegister(masm, bigIntId);
7027 Label ifFalse, done;
7028 masm.branch32(Assembler::Equal,
7029 Address(bigInt, BigInt::offsetOfDigitLength()), Imm32(0),
7030 &ifFalse);
7031 masm.moveValue(BooleanValue(true), output.valueReg());
7032 masm.jump(&done);
7034 masm.bind(&ifFalse);
7035 masm.moveValue(BooleanValue(false), output.valueReg());
7037 masm.bind(&done);
7038 return true;
7041 bool CacheIRCompiler::emitLoadValueTruthyResult(ValOperandId inputId) {
7042 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7044 AutoOutputRegister output(*this);
7045 ValueOperand value = allocator.useValueRegister(masm, inputId);
7046 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
7047 AutoScratchRegister scratch2(allocator, masm);
7048 AutoScratchFloatRegister floatReg(this);
7050 Label ifFalse, ifTrue, done;
7053 ScratchTagScope tag(masm, value);
7054 masm.splitTagForTest(value, tag);
7056 masm.branchTestUndefined(Assembler::Equal, tag, &ifFalse);
7057 masm.branchTestNull(Assembler::Equal, tag, &ifFalse);
7059 Label notBoolean;
7060 masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
7062 ScratchTagScopeRelease _(&tag);
7063 masm.branchTestBooleanTruthy(false, value, &ifFalse);
7064 masm.jump(&ifTrue);
7066 masm.bind(&notBoolean);
7068 Label notInt32;
7069 masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
7071 ScratchTagScopeRelease _(&tag);
7072 masm.branchTestInt32Truthy(false, value, &ifFalse);
7073 masm.jump(&ifTrue);
7075 masm.bind(&notInt32);
7077 Label notObject;
7078 masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
7080 ScratchTagScopeRelease _(&tag);
7082 Register obj = masm.extractObject(value, scratch1);
7084 Label slowPath;
7085 masm.branchIfObjectEmulatesUndefined(obj, scratch2, &slowPath, &ifFalse);
7086 masm.jump(&ifTrue);
7088 masm.bind(&slowPath);
7090 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7091 liveVolatileFloatRegs());
7092 volatileRegs.takeUnchecked(scratch1);
7093 volatileRegs.takeUnchecked(scratch2);
7094 volatileRegs.takeUnchecked(output);
7095 masm.PushRegsInMask(volatileRegs);
7097 using Fn = bool (*)(JSObject* obj);
7098 masm.setupUnalignedABICall(scratch2);
7099 masm.passABIArg(obj);
7100 masm.callWithABI<Fn, js::EmulatesUndefined>();
7101 masm.storeCallPointerResult(scratch2);
7103 masm.PopRegsInMask(volatileRegs);
7105 masm.branchIfTrueBool(scratch2, &ifFalse);
7106 masm.jump(&ifTrue);
7109 masm.bind(&notObject);
7111 Label notString;
7112 masm.branchTestString(Assembler::NotEqual, tag, &notString);
7114 ScratchTagScopeRelease _(&tag);
7115 masm.branchTestStringTruthy(false, value, &ifFalse);
7116 masm.jump(&ifTrue);
7118 masm.bind(&notString);
7120 Label notBigInt;
7121 masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
7123 ScratchTagScopeRelease _(&tag);
7124 masm.branchTestBigIntTruthy(false, value, &ifFalse);
7125 masm.jump(&ifTrue);
7127 masm.bind(&notBigInt);
7129 masm.branchTestSymbol(Assembler::Equal, tag, &ifTrue);
7131 #ifdef DEBUG
7132 Label isDouble;
7133 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
7134 masm.assumeUnreachable("Unexpected value type");
7135 masm.bind(&isDouble);
7136 #endif
7139 ScratchTagScopeRelease _(&tag);
7140 masm.unboxDouble(value, floatReg);
7141 masm.branchTestDoubleTruthy(false, floatReg, &ifFalse);
7144 // Fall through to true case.
7147 masm.bind(&ifTrue);
7148 masm.moveValue(BooleanValue(true), output.valueReg());
7149 masm.jump(&done);
7151 masm.bind(&ifFalse);
7152 masm.moveValue(BooleanValue(false), output.valueReg());
7154 masm.bind(&done);
7155 return true;
7158 bool CacheIRCompiler::emitComparePointerResultShared(JSOp op,
7159 TypedOperandId lhsId,
7160 TypedOperandId rhsId) {
7161 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7162 AutoOutputRegister output(*this);
7164 Register left = allocator.useRegister(masm, lhsId);
7165 Register right = allocator.useRegister(masm, rhsId);
7167 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7169 Label ifTrue, done;
7170 masm.branchPtr(JSOpToCondition(op, /* signed = */ true), left, right,
7171 &ifTrue);
7173 EmitStoreBoolean(masm, false, output);
7174 masm.jump(&done);
7176 masm.bind(&ifTrue);
7177 EmitStoreBoolean(masm, true, output);
7178 masm.bind(&done);
7179 return true;
7182 bool CacheIRCompiler::emitCompareObjectResult(JSOp op, ObjOperandId lhsId,
7183 ObjOperandId rhsId) {
7184 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7185 return emitComparePointerResultShared(op, lhsId, rhsId);
7188 bool CacheIRCompiler::emitCompareSymbolResult(JSOp op, SymbolOperandId lhsId,
7189 SymbolOperandId rhsId) {
7190 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7191 return emitComparePointerResultShared(op, lhsId, rhsId);
7194 bool CacheIRCompiler::emitCompareInt32Result(JSOp op, Int32OperandId lhsId,
7195 Int32OperandId rhsId) {
7196 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7197 AutoOutputRegister output(*this);
7198 Register left = allocator.useRegister(masm, lhsId);
7199 Register right = allocator.useRegister(masm, rhsId);
7201 Label ifTrue, done;
7202 masm.branch32(JSOpToCondition(op, /* signed = */ true), left, right, &ifTrue);
7204 EmitStoreBoolean(masm, false, output);
7205 masm.jump(&done);
7207 masm.bind(&ifTrue);
7208 EmitStoreBoolean(masm, true, output);
7209 masm.bind(&done);
7210 return true;
7213 bool CacheIRCompiler::emitCompareDoubleResult(JSOp op, NumberOperandId lhsId,
7214 NumberOperandId rhsId) {
7215 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7216 AutoOutputRegister output(*this);
7218 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
7219 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
7221 FailurePath* failure;
7222 if (!addFailurePath(&failure)) {
7223 return false;
7226 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
7227 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
7229 Label done, ifTrue;
7230 masm.branchDouble(JSOpToDoubleCondition(op), floatScratch0, floatScratch1,
7231 &ifTrue);
7232 EmitStoreBoolean(masm, false, output);
7233 masm.jump(&done);
7235 masm.bind(&ifTrue);
7236 EmitStoreBoolean(masm, true, output);
7237 masm.bind(&done);
7238 return true;
7241 bool CacheIRCompiler::emitCompareBigIntResult(JSOp op, BigIntOperandId lhsId,
7242 BigIntOperandId rhsId) {
7243 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7244 AutoOutputRegister output(*this);
7246 Register lhs = allocator.useRegister(masm, lhsId);
7247 Register rhs = allocator.useRegister(masm, rhsId);
7249 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7251 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7252 masm.PushRegsInMask(save);
7254 masm.setupUnalignedABICall(scratch);
7256 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7257 // - |left <= right| is implemented as |right >= left|.
7258 // - |left > right| is implemented as |right < left|.
7259 if (op == JSOp::Le || op == JSOp::Gt) {
7260 masm.passABIArg(rhs);
7261 masm.passABIArg(lhs);
7262 } else {
7263 masm.passABIArg(lhs);
7264 masm.passABIArg(rhs);
7267 using Fn = bool (*)(BigInt*, BigInt*);
7268 Fn fn;
7269 if (op == JSOp::Eq || op == JSOp::StrictEq) {
7270 fn = jit::BigIntEqual<EqualityKind::Equal>;
7271 } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
7272 fn = jit::BigIntEqual<EqualityKind::NotEqual>;
7273 } else if (op == JSOp::Lt || op == JSOp::Gt) {
7274 fn = jit::BigIntCompare<ComparisonKind::LessThan>;
7275 } else {
7276 MOZ_ASSERT(op == JSOp::Le || op == JSOp::Ge);
7277 fn = jit::BigIntCompare<ComparisonKind::GreaterThanOrEqual>;
7280 masm.callWithABI(DynamicFunction<Fn>(fn));
7281 masm.storeCallBoolResult(scratch);
7283 LiveRegisterSet ignore;
7284 ignore.add(scratch);
7285 masm.PopRegsInMaskIgnore(save, ignore);
7287 EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
7288 return true;
7291 bool CacheIRCompiler::emitCompareBigIntInt32Result(JSOp op,
7292 BigIntOperandId lhsId,
7293 Int32OperandId rhsId) {
7294 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7295 AutoOutputRegister output(*this);
7296 Register bigInt = allocator.useRegister(masm, lhsId);
7297 Register int32 = allocator.useRegister(masm, rhsId);
7299 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
7300 AutoScratchRegister scratch2(allocator, masm);
7302 Label ifTrue, ifFalse;
7303 masm.compareBigIntAndInt32(op, bigInt, int32, scratch1, scratch2, &ifTrue,
7304 &ifFalse);
7306 Label done;
7307 masm.bind(&ifFalse);
7308 EmitStoreBoolean(masm, false, output);
7309 masm.jump(&done);
7311 masm.bind(&ifTrue);
7312 EmitStoreBoolean(masm, true, output);
7314 masm.bind(&done);
7315 return true;
7318 bool CacheIRCompiler::emitCompareBigIntNumberResult(JSOp op,
7319 BigIntOperandId lhsId,
7320 NumberOperandId rhsId) {
7321 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7322 AutoOutputRegister output(*this);
7324 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
7326 Register lhs = allocator.useRegister(masm, lhsId);
7327 allocator.ensureDoubleRegister(masm, rhsId, floatScratch0);
7329 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7331 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7332 masm.PushRegsInMask(save);
7334 masm.setupUnalignedABICall(scratch);
7336 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7337 // - |left <= right| is implemented as |right >= left|.
7338 // - |left > right| is implemented as |right < left|.
7339 if (op == JSOp::Le || op == JSOp::Gt) {
7340 masm.passABIArg(floatScratch0, ABIType::Float64);
7341 masm.passABIArg(lhs);
7342 } else {
7343 masm.passABIArg(lhs);
7344 masm.passABIArg(floatScratch0, ABIType::Float64);
7347 using FnBigIntNumber = bool (*)(BigInt*, double);
7348 using FnNumberBigInt = bool (*)(double, BigInt*);
7349 switch (op) {
7350 case JSOp::Eq: {
7351 masm.callWithABI<FnBigIntNumber,
7352 jit::BigIntNumberEqual<EqualityKind::Equal>>();
7353 break;
7355 case JSOp::Ne: {
7356 masm.callWithABI<FnBigIntNumber,
7357 jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
7358 break;
7360 case JSOp::Lt: {
7361 masm.callWithABI<FnBigIntNumber,
7362 jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
7363 break;
7365 case JSOp::Gt: {
7366 masm.callWithABI<FnNumberBigInt,
7367 jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
7368 break;
7370 case JSOp::Le: {
7371 masm.callWithABI<
7372 FnNumberBigInt,
7373 jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
7374 break;
7376 case JSOp::Ge: {
7377 masm.callWithABI<
7378 FnBigIntNumber,
7379 jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
7380 break;
7382 default:
7383 MOZ_CRASH("unhandled op");
7386 masm.storeCallBoolResult(scratch);
7388 LiveRegisterSet ignore;
7389 ignore.add(scratch);
7390 masm.PopRegsInMaskIgnore(save, ignore);
7392 EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
7393 return true;
7396 bool CacheIRCompiler::emitCompareBigIntStringResult(JSOp op,
7397 BigIntOperandId lhsId,
7398 StringOperandId rhsId) {
7399 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7400 AutoCallVM callvm(masm, this, allocator);
7402 Register lhs = allocator.useRegister(masm, lhsId);
7403 Register rhs = allocator.useRegister(masm, rhsId);
7405 callvm.prepare();
7407 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
7408 // - |left <= right| is implemented as |right >= left|.
7409 // - |left > right| is implemented as |right < left|.
7410 if (op == JSOp::Le || op == JSOp::Gt) {
7411 masm.Push(lhs);
7412 masm.Push(rhs);
7413 } else {
7414 masm.Push(rhs);
7415 masm.Push(lhs);
7418 using FnBigIntString =
7419 bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
7420 using FnStringBigInt =
7421 bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
7423 switch (op) {
7424 case JSOp::Eq: {
7425 constexpr auto Equal = EqualityKind::Equal;
7426 callvm.call<FnBigIntString, BigIntStringEqual<Equal>>();
7427 break;
7429 case JSOp::Ne: {
7430 constexpr auto NotEqual = EqualityKind::NotEqual;
7431 callvm.call<FnBigIntString, BigIntStringEqual<NotEqual>>();
7432 break;
7434 case JSOp::Lt: {
7435 constexpr auto LessThan = ComparisonKind::LessThan;
7436 callvm.call<FnBigIntString, BigIntStringCompare<LessThan>>();
7437 break;
7439 case JSOp::Gt: {
7440 constexpr auto LessThan = ComparisonKind::LessThan;
7441 callvm.call<FnStringBigInt, StringBigIntCompare<LessThan>>();
7442 break;
7444 case JSOp::Le: {
7445 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
7446 callvm.call<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>();
7447 break;
7449 case JSOp::Ge: {
7450 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
7451 callvm.call<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>();
7452 break;
7454 default:
7455 MOZ_CRASH("unhandled op");
7457 return true;
7460 bool CacheIRCompiler::emitCompareNullUndefinedResult(JSOp op, bool isUndefined,
7461 ValOperandId inputId) {
7462 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7464 AutoOutputRegister output(*this);
7465 ValueOperand input = allocator.useValueRegister(masm, inputId);
7466 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7467 AutoScratchRegister scratch2(allocator, masm);
7469 if (IsStrictEqualityOp(op)) {
7470 if (isUndefined) {
7471 masm.testUndefinedSet(JSOpToCondition(op, false), input, scratch);
7472 } else {
7473 masm.testNullSet(JSOpToCondition(op, false), input, scratch);
7475 EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
7476 return true;
7479 FailurePath* failure;
7480 if (!addFailurePath(&failure)) {
7481 return false;
7484 MOZ_ASSERT(IsLooseEqualityOp(op));
7486 Label nullOrLikeUndefined, notNullOrLikeUndefined, done;
7488 ScratchTagScope tag(masm, input);
7489 masm.splitTagForTest(input, tag);
7491 if (isUndefined) {
7492 masm.branchTestUndefined(Assembler::Equal, tag, &nullOrLikeUndefined);
7493 masm.branchTestNull(Assembler::Equal, tag, &nullOrLikeUndefined);
7494 } else {
7495 masm.branchTestNull(Assembler::Equal, tag, &nullOrLikeUndefined);
7496 masm.branchTestUndefined(Assembler::Equal, tag, &nullOrLikeUndefined);
7498 masm.branchTestObject(Assembler::NotEqual, tag, &notNullOrLikeUndefined);
7501 ScratchTagScopeRelease _(&tag);
7503 masm.unboxObject(input, scratch);
7504 masm.branchIfObjectEmulatesUndefined(scratch, scratch2, failure->label(),
7505 &nullOrLikeUndefined);
7506 masm.jump(&notNullOrLikeUndefined);
7510 masm.bind(&nullOrLikeUndefined);
7511 EmitStoreBoolean(masm, op == JSOp::Eq, output);
7512 masm.jump(&done);
7514 masm.bind(&notNullOrLikeUndefined);
7515 EmitStoreBoolean(masm, op == JSOp::Ne, output);
7517 masm.bind(&done);
7518 return true;
7521 bool CacheIRCompiler::emitCompareDoubleSameValueResult(NumberOperandId lhsId,
7522 NumberOperandId rhsId) {
7523 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7525 AutoOutputRegister output(*this);
7526 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7527 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
7528 AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
7529 AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
7531 allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
7532 allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
7534 masm.sameValueDouble(floatScratch0, floatScratch1, floatScratch2, scratch);
7535 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
7536 return true;
7539 bool CacheIRCompiler::emitIndirectTruncateInt32Result(Int32OperandId valId) {
7540 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7541 AutoOutputRegister output(*this);
7542 Register val = allocator.useRegister(masm, valId);
7544 if (output.hasValue()) {
7545 masm.tagValue(JSVAL_TYPE_INT32, val, output.valueReg());
7546 } else {
7547 masm.mov(val, output.typedReg().gpr());
7549 return true;
7552 bool CacheIRCompiler::emitCallPrintString(const char* str) {
7553 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7554 masm.printf(str);
7555 return true;
7558 bool CacheIRCompiler::emitBreakpoint() {
7559 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7560 masm.breakpoint();
7561 return true;
7564 void CacheIRCompiler::emitPostBarrierShared(Register obj,
7565 const ConstantOrRegister& val,
7566 Register scratch,
7567 Register maybeIndex) {
7568 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7570 if (val.constant()) {
7571 MOZ_ASSERT_IF(val.value().isGCThing(),
7572 !IsInsideNursery(val.value().toGCThing()));
7573 return;
7576 TypedOrValueRegister reg = val.reg();
7577 if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
7578 return;
7581 Label skipBarrier;
7582 if (reg.hasValue()) {
7583 masm.branchValueIsNurseryCell(Assembler::NotEqual, reg.valueReg(), scratch,
7584 &skipBarrier);
7585 } else {
7586 masm.branchPtrInNurseryChunk(Assembler::NotEqual, reg.typedReg().gpr(),
7587 scratch, &skipBarrier);
7589 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, &skipBarrier);
7591 // Check one element cache to avoid VM call.
7592 auto* lastCellAddr = cx_->runtime()->gc.addressOfLastBufferedWholeCell();
7593 masm.branchPtr(Assembler::Equal, AbsoluteAddress(lastCellAddr), obj,
7594 &skipBarrier);
7596 // Call one of these, depending on maybeIndex:
7598 // void PostWriteBarrier(JSRuntime* rt, JSObject* obj);
7599 // void PostWriteElementBarrier(JSRuntime* rt, JSObject* obj,
7600 // int32_t index);
7601 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7602 masm.PushRegsInMask(save);
7603 masm.setupUnalignedABICall(scratch);
7604 masm.movePtr(ImmPtr(cx_->runtime()), scratch);
7605 masm.passABIArg(scratch);
7606 masm.passABIArg(obj);
7607 if (maybeIndex != InvalidReg) {
7608 masm.passABIArg(maybeIndex);
7609 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
7610 masm.callWithABI<Fn, PostWriteElementBarrier>();
7611 } else {
7612 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
7613 masm.callWithABI<Fn, PostWriteBarrier>();
7615 masm.PopRegsInMask(save);
7617 masm.bind(&skipBarrier);
7620 bool CacheIRCompiler::emitWrapResult() {
7621 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7622 AutoOutputRegister output(*this);
7623 AutoScratchRegister scratch(allocator, masm);
7625 FailurePath* failure;
7626 if (!addFailurePath(&failure)) {
7627 return false;
7630 Label done;
7631 // We only have to wrap objects, because we are in the same zone.
7632 masm.branchTestObject(Assembler::NotEqual, output.valueReg(), &done);
7634 Register obj = output.valueReg().scratchReg();
7635 masm.unboxObject(output.valueReg(), obj);
7637 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
7638 masm.PushRegsInMask(save);
7640 using Fn = JSObject* (*)(JSContext* cx, JSObject* obj);
7641 masm.setupUnalignedABICall(scratch);
7642 masm.loadJSContext(scratch);
7643 masm.passABIArg(scratch);
7644 masm.passABIArg(obj);
7645 masm.callWithABI<Fn, WrapObjectPure>();
7646 masm.storeCallPointerResult(obj);
7648 LiveRegisterSet ignore;
7649 ignore.add(obj);
7650 masm.PopRegsInMaskIgnore(save, ignore);
7652 // We could not get a wrapper for this object.
7653 masm.branchTestPtr(Assembler::Zero, obj, obj, failure->label());
7655 // We clobbered the output register, so we have to retag.
7656 masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
7658 masm.bind(&done);
7659 return true;
7662 bool CacheIRCompiler::emitMegamorphicLoadSlotByValueResult(ObjOperandId objId,
7663 ValOperandId idId) {
7664 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7665 AutoOutputRegister output(*this);
7667 Register obj = allocator.useRegister(masm, objId);
7668 ValueOperand idVal = allocator.useValueRegister(masm, idId);
7670 #ifdef JS_CODEGEN_X86
7671 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
7672 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
7673 #else
7674 AutoScratchRegister scratch1(allocator, masm);
7675 AutoScratchRegister scratch2(allocator, masm);
7676 AutoScratchRegister scratch3(allocator, masm);
7677 #endif
7679 FailurePath* failure;
7680 if (!addFailurePath(&failure)) {
7681 return false;
7684 #ifdef JS_CODEGEN_X86
7685 masm.xorPtr(scratch2, scratch2);
7686 #else
7687 Label cacheHit;
7688 masm.emitMegamorphicCacheLookupByValue(
7689 idVal, obj, scratch1, scratch3, scratch2, output.valueReg(), &cacheHit);
7690 #endif
7692 masm.branchIfNonNativeObj(obj, scratch1, failure->label());
7694 // idVal will be in vp[0], result will be stored in vp[1].
7695 masm.reserveStack(sizeof(Value));
7696 masm.Push(idVal);
7697 masm.moveStackPtrTo(idVal.scratchReg());
7699 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7700 liveVolatileFloatRegs());
7701 volatileRegs.takeUnchecked(scratch1);
7702 volatileRegs.takeUnchecked(idVal);
7703 masm.PushRegsInMask(volatileRegs);
7705 using Fn = bool (*)(JSContext* cx, JSObject* obj,
7706 MegamorphicCache::Entry* cacheEntry, Value* vp);
7707 masm.setupUnalignedABICall(scratch1);
7708 masm.loadJSContext(scratch1);
7709 masm.passABIArg(scratch1);
7710 masm.passABIArg(obj);
7711 masm.passABIArg(scratch2);
7712 masm.passABIArg(idVal.scratchReg());
7713 masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
7715 masm.storeCallPointerResult(scratch1);
7716 masm.PopRegsInMask(volatileRegs);
7718 masm.Pop(idVal);
7720 Label ok;
7721 uint32_t framePushed = masm.framePushed();
7722 masm.branchIfTrueBool(scratch1, &ok);
7723 masm.adjustStack(sizeof(Value));
7724 masm.jump(failure->label());
7726 masm.bind(&ok);
7727 masm.setFramePushed(framePushed);
7728 masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
7729 masm.adjustStack(sizeof(Value));
7731 #ifndef JS_CODEGEN_X86
7732 masm.bind(&cacheHit);
7733 #endif
7734 return true;
7737 bool CacheIRCompiler::emitMegamorphicHasPropResult(ObjOperandId objId,
7738 ValOperandId idId,
7739 bool hasOwn) {
7740 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7741 AutoOutputRegister output(*this);
7743 Register obj = allocator.useRegister(masm, objId);
7744 ValueOperand idVal = allocator.useValueRegister(masm, idId);
7746 #ifdef JS_CODEGEN_X86
7747 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
7748 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
7749 #else
7750 AutoScratchRegister scratch1(allocator, masm);
7751 AutoScratchRegister scratch2(allocator, masm);
7752 AutoScratchRegister scratch3(allocator, masm);
7753 #endif
7755 FailurePath* failure;
7756 if (!addFailurePath(&failure)) {
7757 return false;
7760 #ifndef JS_CODEGEN_X86
7761 Label cacheHit, done;
7762 masm.emitMegamorphicCacheLookupExists(idVal, obj, scratch1, scratch3,
7763 scratch2, output.maybeReg(), &cacheHit,
7764 hasOwn);
7765 #else
7766 masm.xorPtr(scratch2, scratch2);
7767 #endif
7769 masm.branchIfNonNativeObj(obj, scratch1, failure->label());
7771 // idVal will be in vp[0], result will be stored in vp[1].
7772 masm.reserveStack(sizeof(Value));
7773 masm.Push(idVal);
7774 masm.moveStackPtrTo(idVal.scratchReg());
7776 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7777 liveVolatileFloatRegs());
7778 volatileRegs.takeUnchecked(scratch1);
7779 volatileRegs.takeUnchecked(idVal);
7780 masm.PushRegsInMask(volatileRegs);
7782 using Fn = bool (*)(JSContext* cx, JSObject* obj,
7783 MegamorphicCache::Entry* cacheEntry, Value* vp);
7784 masm.setupUnalignedABICall(scratch1);
7785 masm.loadJSContext(scratch1);
7786 masm.passABIArg(scratch1);
7787 masm.passABIArg(obj);
7788 masm.passABIArg(scratch2);
7789 masm.passABIArg(idVal.scratchReg());
7790 if (hasOwn) {
7791 masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
7792 } else {
7793 masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
7795 masm.storeCallPointerResult(scratch1);
7796 masm.PopRegsInMask(volatileRegs);
7798 masm.Pop(idVal);
7800 Label ok;
7801 uint32_t framePushed = masm.framePushed();
7802 masm.branchIfTrueBool(scratch1, &ok);
7803 masm.adjustStack(sizeof(Value));
7804 masm.jump(failure->label());
7806 masm.bind(&ok);
7807 masm.setFramePushed(framePushed);
7808 masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
7809 masm.adjustStack(sizeof(Value));
7811 #ifndef JS_CODEGEN_X86
7812 masm.jump(&done);
7813 masm.bind(&cacheHit);
7814 if (output.hasValue()) {
7815 masm.tagValue(JSVAL_TYPE_BOOLEAN, output.valueReg().scratchReg(),
7816 output.valueReg());
7818 masm.bind(&done);
7819 #endif
7820 return true;
7823 bool CacheIRCompiler::emitCallObjectHasSparseElementResult(
7824 ObjOperandId objId, Int32OperandId indexId) {
7825 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7826 AutoOutputRegister output(*this);
7828 Register obj = allocator.useRegister(masm, objId);
7829 Register index = allocator.useRegister(masm, indexId);
7831 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
7832 AutoScratchRegister scratch2(allocator, masm);
7834 FailurePath* failure;
7835 if (!addFailurePath(&failure)) {
7836 return false;
7839 masm.reserveStack(sizeof(Value));
7840 masm.moveStackPtrTo(scratch2.get());
7842 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7843 liveVolatileFloatRegs());
7844 volatileRegs.takeUnchecked(scratch1);
7845 volatileRegs.takeUnchecked(index);
7846 masm.PushRegsInMask(volatileRegs);
7848 using Fn =
7849 bool (*)(JSContext* cx, NativeObject* obj, int32_t index, Value* vp);
7850 masm.setupUnalignedABICall(scratch1);
7851 masm.loadJSContext(scratch1);
7852 masm.passABIArg(scratch1);
7853 masm.passABIArg(obj);
7854 masm.passABIArg(index);
7855 masm.passABIArg(scratch2);
7856 masm.callWithABI<Fn, HasNativeElementPure>();
7857 masm.storeCallPointerResult(scratch1);
7858 masm.PopRegsInMask(volatileRegs);
7860 Label ok;
7861 uint32_t framePushed = masm.framePushed();
7862 masm.branchIfTrueBool(scratch1, &ok);
7863 masm.adjustStack(sizeof(Value));
7864 masm.jump(failure->label());
7866 masm.bind(&ok);
7867 masm.setFramePushed(framePushed);
7868 masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
7869 masm.adjustStack(sizeof(Value));
7870 return true;
7874 * Move a constant value into register dest.
7876 void CacheIRCompiler::emitLoadStubFieldConstant(StubFieldOffset val,
7877 Register dest) {
7878 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7879 MOZ_ASSERT(mode_ == Mode::Ion);
7880 switch (val.getStubFieldType()) {
7881 case StubField::Type::Shape:
7882 masm.movePtr(ImmGCPtr(shapeStubField(val.getOffset())), dest);
7883 break;
7884 case StubField::Type::WeakGetterSetter:
7885 masm.movePtr(ImmGCPtr(weakGetterSetterStubField(val.getOffset())), dest);
7886 break;
7887 case StubField::Type::String:
7888 masm.movePtr(ImmGCPtr(stringStubField(val.getOffset())), dest);
7889 break;
7890 case StubField::Type::JSObject:
7891 masm.movePtr(ImmGCPtr(objectStubField(val.getOffset())), dest);
7892 break;
7893 case StubField::Type::RawPointer:
7894 masm.movePtr(ImmPtr(pointerStubField(val.getOffset())), dest);
7895 break;
7896 case StubField::Type::RawInt32:
7897 masm.move32(Imm32(int32StubField(val.getOffset())), dest);
7898 break;
7899 case StubField::Type::Id:
7900 masm.movePropertyKey(idStubField(val.getOffset()), dest);
7901 break;
7902 default:
7903 MOZ_CRASH("Unhandled stub field constant type");
7908 * After this is done executing, dest contains the value; either through a
7909 * constant load or through the load from the stub data.
7911 * The current policy is that Baseline will use loads from the stub data (to
7912 * allow IC sharing), where as Ion doesn't share ICs, and so we can safely use
7913 * constants in the IC.
7915 void CacheIRCompiler::emitLoadStubField(StubFieldOffset val, Register dest) {
7916 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7917 if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
7918 emitLoadStubFieldConstant(val, dest);
7919 } else {
7920 Address load(ICStubReg, stubDataOffset_ + val.getOffset());
7922 switch (val.getStubFieldType()) {
7923 case StubField::Type::RawPointer:
7924 case StubField::Type::Shape:
7925 case StubField::Type::WeakGetterSetter:
7926 case StubField::Type::JSObject:
7927 case StubField::Type::Symbol:
7928 case StubField::Type::String:
7929 case StubField::Type::Id:
7930 masm.loadPtr(load, dest);
7931 break;
7932 case StubField::Type::RawInt32:
7933 masm.load32(load, dest);
7934 break;
7935 default:
7936 MOZ_CRASH("Unhandled stub field constant type");
7941 void CacheIRCompiler::emitLoadValueStubField(StubFieldOffset val,
7942 ValueOperand dest) {
7943 MOZ_ASSERT(val.getStubFieldType() == StubField::Type::Value);
7945 if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
7946 MOZ_ASSERT(mode_ == Mode::Ion);
7947 masm.moveValue(valueStubField(val.getOffset()), dest);
7948 } else {
7949 Address addr(ICStubReg, stubDataOffset_ + val.getOffset());
7950 masm.loadValue(addr, dest);
7954 void CacheIRCompiler::emitLoadDoubleValueStubField(StubFieldOffset val,
7955 ValueOperand dest,
7956 FloatRegister scratch) {
7957 MOZ_ASSERT(val.getStubFieldType() == StubField::Type::Double);
7959 if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
7960 MOZ_ASSERT(mode_ == Mode::Ion);
7961 double d = doubleStubField(val.getOffset());
7962 masm.moveValue(DoubleValue(d), dest);
7963 } else {
7964 Address addr(ICStubReg, stubDataOffset_ + val.getOffset());
7965 masm.loadDouble(addr, scratch);
7966 masm.boxDouble(scratch, dest, scratch);
7970 bool CacheIRCompiler::emitLoadInstanceOfObjectResult(ValOperandId lhsId,
7971 ObjOperandId protoId) {
7972 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7973 AutoOutputRegister output(*this);
7974 ValueOperand lhs = allocator.useValueRegister(masm, lhsId);
7975 Register proto = allocator.useRegister(masm, protoId);
7977 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7979 FailurePath* failure;
7980 if (!addFailurePath(&failure)) {
7981 return false;
7984 Label returnFalse, returnTrue, done;
7985 masm.fallibleUnboxObject(lhs, scratch, &returnFalse);
7987 // LHS is an object. Load its proto.
7988 masm.loadObjProto(scratch, scratch);
7990 // Walk the proto chain until we either reach the target object,
7991 // nullptr or LazyProto.
7992 Label loop;
7993 masm.bind(&loop);
7995 masm.branchPtr(Assembler::Equal, scratch, proto, &returnTrue);
7996 masm.branchTestPtr(Assembler::Zero, scratch, scratch, &returnFalse);
7998 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
7999 masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), failure->label());
8001 masm.loadObjProto(scratch, scratch);
8002 masm.jump(&loop);
8005 masm.bind(&returnFalse);
8006 EmitStoreBoolean(masm, false, output);
8007 masm.jump(&done);
8009 masm.bind(&returnTrue);
8010 EmitStoreBoolean(masm, true, output);
8011 // fallthrough
8012 masm.bind(&done);
8013 return true;
8016 bool CacheIRCompiler::emitMegamorphicLoadSlotResult(ObjOperandId objId,
8017 uint32_t idOffset) {
8018 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8019 AutoOutputRegister output(*this);
8021 Register obj = allocator.useRegister(masm, objId);
8022 StubFieldOffset id(idOffset, StubField::Type::Id);
8024 AutoScratchRegisterMaybeOutput idReg(allocator, masm, output);
8025 AutoScratchRegister scratch1(allocator, masm);
8026 AutoScratchRegister scratch2(allocator, masm);
8027 AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, output);
8029 FailurePath* failure;
8030 if (!addFailurePath(&failure)) {
8031 return false;
8034 #ifdef JS_CODEGEN_X86
8035 masm.xorPtr(scratch3, scratch3);
8036 #else
8037 Label cacheHit;
8038 emitLoadStubField(id, idReg);
8039 masm.emitMegamorphicCacheLookupByValue(idReg.get(), obj, scratch1, scratch2,
8040 scratch3, output.valueReg(),
8041 &cacheHit);
8042 #endif
8044 masm.branchIfNonNativeObj(obj, scratch1, failure->label());
8046 masm.Push(UndefinedValue());
8047 masm.moveStackPtrTo(idReg.get());
8049 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8050 liveVolatileFloatRegs());
8051 volatileRegs.takeUnchecked(scratch1);
8052 volatileRegs.takeUnchecked(scratch2);
8053 volatileRegs.takeUnchecked(scratch3);
8054 volatileRegs.takeUnchecked(idReg);
8055 masm.PushRegsInMask(volatileRegs);
8057 using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
8058 MegamorphicCache::Entry* cacheEntry, Value* vp);
8059 masm.setupUnalignedABICall(scratch1);
8060 masm.loadJSContext(scratch1);
8061 masm.passABIArg(scratch1);
8062 masm.passABIArg(obj);
8063 emitLoadStubField(id, scratch2);
8064 masm.passABIArg(scratch2);
8065 masm.passABIArg(scratch3);
8066 masm.passABIArg(idReg);
8068 #ifdef JS_CODEGEN_X86
8069 masm.callWithABI<Fn, GetNativeDataPropertyPureWithCacheLookup>();
8070 #else
8071 masm.callWithABI<Fn, GetNativeDataPropertyPure>();
8072 #endif
8074 masm.storeCallPointerResult(scratch2);
8075 masm.PopRegsInMask(volatileRegs);
8077 masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
8078 masm.adjustStack(sizeof(Value));
8080 masm.branchIfFalseBool(scratch2, failure->label());
8081 #ifndef JS_CODEGEN_X86
8082 masm.bind(&cacheHit);
8083 #endif
8085 return true;
8088 bool CacheIRCompiler::emitMegamorphicStoreSlot(ObjOperandId objId,
8089 uint32_t idOffset,
8090 ValOperandId rhsId,
8091 bool strict) {
8092 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8094 AutoCallVM callvm(masm, this, allocator);
8096 Register obj = allocator.useRegister(masm, objId);
8097 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
8098 StubFieldOffset id(idOffset, StubField::Type::Id);
8099 AutoScratchRegister scratch(allocator, masm);
8101 callvm.prepare();
8103 masm.Push(Imm32(strict));
8104 masm.Push(val);
8105 emitLoadStubField(id, scratch);
8106 masm.Push(scratch);
8107 masm.Push(obj);
8109 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
8110 callvm.callNoResult<Fn, SetPropertyMegamorphic<false>>();
8111 return true;
8114 bool CacheIRCompiler::emitGuardHasGetterSetter(ObjOperandId objId,
8115 uint32_t idOffset,
8116 uint32_t getterSetterOffset) {
8117 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8119 Register obj = allocator.useRegister(masm, objId);
8121 StubFieldOffset id(idOffset, StubField::Type::Id);
8122 StubFieldOffset getterSetter(getterSetterOffset,
8123 StubField::Type::WeakGetterSetter);
8125 AutoScratchRegister scratch1(allocator, masm);
8126 AutoScratchRegister scratch2(allocator, masm);
8127 AutoScratchRegister scratch3(allocator, masm);
8129 FailurePath* failure;
8130 if (!addFailurePath(&failure)) {
8131 return false;
8134 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8135 liveVolatileFloatRegs());
8136 volatileRegs.takeUnchecked(scratch1);
8137 volatileRegs.takeUnchecked(scratch2);
8138 masm.PushRegsInMask(volatileRegs);
8140 using Fn = bool (*)(JSContext* cx, JSObject* obj, jsid id,
8141 GetterSetter* getterSetter);
8142 masm.setupUnalignedABICall(scratch1);
8143 masm.loadJSContext(scratch1);
8144 masm.passABIArg(scratch1);
8145 masm.passABIArg(obj);
8146 emitLoadStubField(id, scratch2);
8147 masm.passABIArg(scratch2);
8148 emitLoadStubField(getterSetter, scratch3);
8149 masm.passABIArg(scratch3);
8150 masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
8151 masm.storeCallPointerResult(scratch1);
8152 masm.PopRegsInMask(volatileRegs);
8154 masm.branchIfFalseBool(scratch1, failure->label());
8155 return true;
8158 bool CacheIRCompiler::emitGuardWasmArg(ValOperandId argId,
8159 wasm::ValType::Kind kind) {
8160 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8162 // All values can be boxed as AnyRef.
8163 if (kind == wasm::ValType::Ref) {
8164 return true;
8166 MOZ_ASSERT(kind != wasm::ValType::V128);
8168 ValueOperand arg = allocator.useValueRegister(masm, argId);
8170 FailurePath* failure;
8171 if (!addFailurePath(&failure)) {
8172 return false;
8175 // Check that the argument can be converted to the Wasm type in Warp code
8176 // without bailing out.
8177 Label done;
8178 switch (kind) {
8179 case wasm::ValType::I32:
8180 case wasm::ValType::F32:
8181 case wasm::ValType::F64: {
8182 // Argument must be number, bool, or undefined.
8183 masm.branchTestNumber(Assembler::Equal, arg, &done);
8184 masm.branchTestBoolean(Assembler::Equal, arg, &done);
8185 masm.branchTestUndefined(Assembler::NotEqual, arg, failure->label());
8186 break;
8188 case wasm::ValType::I64: {
8189 // Argument must be bigint, bool, or string.
8190 masm.branchTestBigInt(Assembler::Equal, arg, &done);
8191 masm.branchTestBoolean(Assembler::Equal, arg, &done);
8192 masm.branchTestString(Assembler::NotEqual, arg, failure->label());
8193 break;
8195 default:
8196 MOZ_CRASH("Unexpected kind");
8198 masm.bind(&done);
8200 return true;
8203 bool CacheIRCompiler::emitGuardMultipleShapes(ObjOperandId objId,
8204 uint32_t shapesOffset) {
8205 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8206 Register obj = allocator.useRegister(masm, objId);
8207 AutoScratchRegister shapes(allocator, masm);
8208 AutoScratchRegister scratch(allocator, masm);
8209 AutoScratchRegister scratch2(allocator, masm);
8211 bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
8213 Register spectreScratch = InvalidReg;
8214 Maybe<AutoScratchRegister> maybeSpectreScratch;
8215 if (needSpectreMitigations) {
8216 maybeSpectreScratch.emplace(allocator, masm);
8217 spectreScratch = *maybeSpectreScratch;
8220 FailurePath* failure;
8221 if (!addFailurePath(&failure)) {
8222 return false;
8225 // The stub field contains a ListObject. Load its elements.
8226 StubFieldOffset shapeArray(shapesOffset, StubField::Type::JSObject);
8227 emitLoadStubField(shapeArray, shapes);
8228 masm.loadPtr(Address(shapes, NativeObject::offsetOfElements()), shapes);
8230 masm.branchTestObjShapeList(Assembler::NotEqual, obj, shapes, scratch,
8231 scratch2, spectreScratch, failure->label());
8232 return true;
8235 bool CacheIRCompiler::emitLoadObject(ObjOperandId resultId,
8236 uint32_t objOffset) {
8237 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8238 Register reg = allocator.defineRegister(masm, resultId);
8239 StubFieldOffset obj(objOffset, StubField::Type::JSObject);
8240 emitLoadStubField(obj, reg);
8241 return true;
8244 bool CacheIRCompiler::emitLoadProtoObject(ObjOperandId resultId,
8245 uint32_t objOffset,
8246 ObjOperandId receiverObjId) {
8247 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8248 Register reg = allocator.defineRegister(masm, resultId);
8249 StubFieldOffset obj(objOffset, StubField::Type::JSObject);
8250 emitLoadStubField(obj, reg);
8251 return true;
8254 bool CacheIRCompiler::emitLoadInt32Constant(uint32_t valOffset,
8255 Int32OperandId resultId) {
8256 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8257 Register reg = allocator.defineRegister(masm, resultId);
8258 StubFieldOffset val(valOffset, StubField::Type::RawInt32);
8259 emitLoadStubField(val, reg);
8260 return true;
8263 bool CacheIRCompiler::emitLoadBooleanConstant(bool val,
8264 BooleanOperandId resultId) {
8265 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8266 Register reg = allocator.defineRegister(masm, resultId);
8267 masm.move32(Imm32(val), reg);
8268 return true;
8271 bool CacheIRCompiler::emitLoadDoubleConstant(uint32_t valOffset,
8272 NumberOperandId resultId) {
8273 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8275 ValueOperand output = allocator.defineValueRegister(masm, resultId);
8276 StubFieldOffset val(valOffset, StubField::Type::Double);
8278 AutoScratchFloatRegister floatReg(this);
8280 emitLoadDoubleValueStubField(val, output, floatReg);
8281 return true;
8284 bool CacheIRCompiler::emitLoadUndefined(ValOperandId resultId) {
8285 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8287 ValueOperand reg = allocator.defineValueRegister(masm, resultId);
8288 masm.moveValue(UndefinedValue(), reg);
8289 return true;
8292 bool CacheIRCompiler::emitLoadConstantString(uint32_t strOffset,
8293 StringOperandId resultId) {
8294 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8295 Register reg = allocator.defineRegister(masm, resultId);
8296 StubFieldOffset str(strOffset, StubField::Type::String);
8297 emitLoadStubField(str, reg);
8298 return true;
8301 bool CacheIRCompiler::emitCallInt32ToString(Int32OperandId inputId,
8302 StringOperandId resultId) {
8303 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8304 Register input = allocator.useRegister(masm, inputId);
8305 Register result = allocator.defineRegister(masm, resultId);
8307 FailurePath* failure;
8308 if (!addFailurePath(&failure)) {
8309 return false;
8312 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8313 liveVolatileFloatRegs());
8314 volatileRegs.takeUnchecked(result);
8315 masm.PushRegsInMask(volatileRegs);
8317 using Fn = JSLinearString* (*)(JSContext* cx, int32_t i);
8318 masm.setupUnalignedABICall(result);
8319 masm.loadJSContext(result);
8320 masm.passABIArg(result);
8321 masm.passABIArg(input);
8322 masm.callWithABI<Fn, js::Int32ToStringPure>();
8324 masm.storeCallPointerResult(result);
8325 masm.PopRegsInMask(volatileRegs);
8327 masm.branchPtr(Assembler::Equal, result, ImmPtr(nullptr), failure->label());
8328 return true;
8331 bool CacheIRCompiler::emitCallNumberToString(NumberOperandId inputId,
8332 StringOperandId resultId) {
8333 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8335 AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
8337 allocator.ensureDoubleRegister(masm, inputId, floatScratch0);
8338 Register result = allocator.defineRegister(masm, resultId);
8340 FailurePath* failure;
8341 if (!addFailurePath(&failure)) {
8342 return false;
8345 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8346 liveVolatileFloatRegs());
8347 volatileRegs.takeUnchecked(result);
8348 masm.PushRegsInMask(volatileRegs);
8350 using Fn = JSString* (*)(JSContext* cx, double d);
8351 masm.setupUnalignedABICall(result);
8352 masm.loadJSContext(result);
8353 masm.passABIArg(result);
8354 masm.passABIArg(floatScratch0, ABIType::Float64);
8355 masm.callWithABI<Fn, js::NumberToStringPure>();
8357 masm.storeCallPointerResult(result);
8358 masm.PopRegsInMask(volatileRegs);
8360 masm.branchPtr(Assembler::Equal, result, ImmPtr(nullptr), failure->label());
8361 return true;
8364 bool CacheIRCompiler::emitInt32ToStringWithBaseResult(Int32OperandId inputId,
8365 Int32OperandId baseId) {
8366 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8368 AutoCallVM callvm(masm, this, allocator);
8369 Register input = allocator.useRegister(masm, inputId);
8370 Register base = allocator.useRegister(masm, baseId);
8372 FailurePath* failure;
8373 if (!addFailurePath(&failure)) {
8374 return false;
8377 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8378 // we can't use both at the same time. This isn't an issue here, because Ion
8379 // doesn't support CallICs. If that ever changes, this code must be updated.
8380 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8382 masm.branch32(Assembler::LessThan, base, Imm32(2), failure->label());
8383 masm.branch32(Assembler::GreaterThan, base, Imm32(36), failure->label());
8385 // Use lower-case characters by default.
8386 constexpr bool lowerCase = true;
8388 callvm.prepare();
8390 masm.Push(Imm32(lowerCase));
8391 masm.Push(base);
8392 masm.Push(input);
8394 using Fn = JSString* (*)(JSContext*, int32_t, int32_t, bool);
8395 callvm.call<Fn, js::Int32ToStringWithBase>();
8396 return true;
8399 bool CacheIRCompiler::emitBooleanToString(BooleanOperandId inputId,
8400 StringOperandId resultId) {
8401 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8402 Register boolean = allocator.useRegister(masm, inputId);
8403 Register result = allocator.defineRegister(masm, resultId);
8404 const JSAtomState& names = cx_->names();
8405 Label true_, done;
8407 masm.branchTest32(Assembler::NonZero, boolean, boolean, &true_);
8409 // False case
8410 masm.movePtr(ImmGCPtr(names.false_), result);
8411 masm.jump(&done);
8413 // True case
8414 masm.bind(&true_);
8415 masm.movePtr(ImmGCPtr(names.true_), result);
8416 masm.bind(&done);
8418 return true;
8421 bool CacheIRCompiler::emitObjectToStringResult(ObjOperandId objId) {
8422 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8424 AutoOutputRegister output(*this);
8425 Register obj = allocator.useRegister(masm, objId);
8426 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8428 FailurePath* failure;
8429 if (!addFailurePath(&failure)) {
8430 return false;
8433 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8434 liveVolatileFloatRegs());
8435 volatileRegs.takeUnchecked(output.valueReg());
8436 volatileRegs.takeUnchecked(scratch);
8437 masm.PushRegsInMask(volatileRegs);
8439 using Fn = JSString* (*)(JSContext*, JSObject*);
8440 masm.setupUnalignedABICall(scratch);
8441 masm.loadJSContext(scratch);
8442 masm.passABIArg(scratch);
8443 masm.passABIArg(obj);
8444 masm.callWithABI<Fn, js::ObjectClassToString>();
8445 masm.storeCallPointerResult(scratch);
8447 masm.PopRegsInMask(volatileRegs);
8449 masm.branchPtr(Assembler::Equal, scratch, ImmPtr(nullptr), failure->label());
8450 masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
8452 return true;
8455 bool CacheIRCompiler::emitCallStringConcatResult(StringOperandId lhsId,
8456 StringOperandId rhsId) {
8457 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8458 AutoCallVM callvm(masm, this, allocator);
8460 Register lhs = allocator.useRegister(masm, lhsId);
8461 Register rhs = allocator.useRegister(masm, rhsId);
8463 callvm.prepare();
8465 masm.Push(static_cast<js::jit::Imm32>(int32_t(js::gc::Heap::Default)));
8466 masm.Push(rhs);
8467 masm.Push(lhs);
8469 using Fn =
8470 JSString* (*)(JSContext*, HandleString, HandleString, js::gc::Heap);
8471 callvm.call<Fn, ConcatStrings<CanGC>>();
8473 return true;
8476 bool CacheIRCompiler::emitCallIsSuspendedGeneratorResult(ValOperandId valId) {
8477 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8478 AutoOutputRegister output(*this);
8479 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8480 AutoScratchRegister scratch2(allocator, masm);
8481 ValueOperand input = allocator.useValueRegister(masm, valId);
8483 // Test if it's an object.
8484 Label returnFalse, done;
8485 masm.fallibleUnboxObject(input, scratch, &returnFalse);
8487 // Test if it's a GeneratorObject.
8488 masm.branchTestObjClass(Assembler::NotEqual, scratch,
8489 &GeneratorObject::class_, scratch2, scratch,
8490 &returnFalse);
8492 // If the resumeIndex slot holds an int32 value < RESUME_INDEX_RUNNING,
8493 // the generator is suspended.
8494 Address addr(scratch, AbstractGeneratorObject::offsetOfResumeIndexSlot());
8495 masm.fallibleUnboxInt32(addr, scratch, &returnFalse);
8496 masm.branch32(Assembler::AboveOrEqual, scratch,
8497 Imm32(AbstractGeneratorObject::RESUME_INDEX_RUNNING),
8498 &returnFalse);
8500 masm.moveValue(BooleanValue(true), output.valueReg());
8501 masm.jump(&done);
8503 masm.bind(&returnFalse);
8504 masm.moveValue(BooleanValue(false), output.valueReg());
8506 masm.bind(&done);
8507 return true;
8510 // This op generates no code. It is consumed by the transpiler.
8511 bool CacheIRCompiler::emitMetaScriptedThisShape(uint32_t) { return true; }
8513 bool CacheIRCompiler::emitCallNativeGetElementResult(ObjOperandId objId,
8514 Int32OperandId indexId) {
8515 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8516 AutoCallVM callvm(masm, this, allocator);
8518 Register obj = allocator.useRegister(masm, objId);
8519 Register index = allocator.useRegister(masm, indexId);
8521 callvm.prepare();
8523 masm.Push(index);
8524 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
8525 masm.Push(obj);
8527 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
8528 MutableHandleValue);
8529 callvm.call<Fn, NativeGetElement>();
8531 return true;
8534 bool CacheIRCompiler::emitCallNativeGetElementSuperResult(
8535 ObjOperandId objId, Int32OperandId indexId, ValOperandId receiverId) {
8536 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8537 AutoCallVM callvm(masm, this, allocator);
8539 Register obj = allocator.useRegister(masm, objId);
8540 Register index = allocator.useRegister(masm, indexId);
8541 ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
8543 callvm.prepare();
8545 masm.Push(index);
8546 masm.Push(receiver);
8547 masm.Push(obj);
8549 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
8550 MutableHandleValue);
8551 callvm.call<Fn, NativeGetElement>();
8553 return true;
8556 bool CacheIRCompiler::emitProxyHasPropResult(ObjOperandId objId,
8557 ValOperandId idId, bool hasOwn) {
8558 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8559 AutoCallVM callvm(masm, this, allocator);
8561 Register obj = allocator.useRegister(masm, objId);
8562 ValueOperand idVal = allocator.useValueRegister(masm, idId);
8564 callvm.prepare();
8566 masm.Push(idVal);
8567 masm.Push(obj);
8569 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
8570 if (hasOwn) {
8571 callvm.call<Fn, ProxyHasOwn>();
8572 } else {
8573 callvm.call<Fn, ProxyHas>();
8575 return true;
8578 bool CacheIRCompiler::emitProxyGetByValueResult(ObjOperandId objId,
8579 ValOperandId idId) {
8580 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8581 AutoCallVM callvm(masm, this, allocator);
8583 Register obj = allocator.useRegister(masm, objId);
8584 ValueOperand idVal = allocator.useValueRegister(masm, idId);
8586 callvm.prepare();
8587 masm.Push(idVal);
8588 masm.Push(obj);
8590 using Fn =
8591 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
8592 callvm.call<Fn, ProxyGetPropertyByValue>();
8593 return true;
8596 bool CacheIRCompiler::emitCallGetSparseElementResult(ObjOperandId objId,
8597 Int32OperandId indexId) {
8598 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8600 AutoCallVM callvm(masm, this, allocator);
8602 Register obj = allocator.useRegister(masm, objId);
8603 Register id = allocator.useRegister(masm, indexId);
8605 callvm.prepare();
8606 masm.Push(id);
8607 masm.Push(obj);
8609 using Fn = bool (*)(JSContext* cx, Handle<NativeObject*> obj, int32_t int_id,
8610 MutableHandleValue result);
8611 callvm.call<Fn, GetSparseElementHelper>();
8612 return true;
8615 bool CacheIRCompiler::emitRegExpSearcherLastLimitResult() {
8616 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8618 AutoOutputRegister output(*this);
8619 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
8620 AutoScratchRegister scratch2(allocator, masm);
8622 masm.loadAndClearRegExpSearcherLastLimit(scratch1, scratch2);
8624 masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
8625 return true;
8628 bool CacheIRCompiler::emitRegExpFlagResult(ObjOperandId regexpId,
8629 int32_t flagsMask) {
8630 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8632 AutoOutputRegister output(*this);
8633 Register regexp = allocator.useRegister(masm, regexpId);
8634 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8636 Address flagsAddr(
8637 regexp, NativeObject::getFixedSlotOffset(RegExpObject::flagsSlot()));
8638 masm.unboxInt32(flagsAddr, scratch);
8640 Label ifFalse, done;
8641 masm.branchTest32(Assembler::Zero, scratch, Imm32(flagsMask), &ifFalse);
8642 masm.moveValue(BooleanValue(true), output.valueReg());
8643 masm.jump(&done);
8645 masm.bind(&ifFalse);
8646 masm.moveValue(BooleanValue(false), output.valueReg());
8648 masm.bind(&done);
8649 return true;
8652 bool CacheIRCompiler::emitCallSubstringKernelResult(StringOperandId strId,
8653 Int32OperandId beginId,
8654 Int32OperandId lengthId) {
8655 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8657 AutoCallVM callvm(masm, this, allocator);
8659 Register str = allocator.useRegister(masm, strId);
8660 Register begin = allocator.useRegister(masm, beginId);
8661 Register length = allocator.useRegister(masm, lengthId);
8663 callvm.prepare();
8664 masm.Push(length);
8665 masm.Push(begin);
8666 masm.Push(str);
8668 using Fn = JSString* (*)(JSContext* cx, HandleString str, int32_t begin,
8669 int32_t len);
8670 callvm.call<Fn, SubstringKernel>();
8671 return true;
8674 bool CacheIRCompiler::emitStringReplaceStringResult(
8675 StringOperandId strId, StringOperandId patternId,
8676 StringOperandId replacementId) {
8677 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8679 AutoCallVM callvm(masm, this, allocator);
8681 Register str = allocator.useRegister(masm, strId);
8682 Register pattern = allocator.useRegister(masm, patternId);
8683 Register replacement = allocator.useRegister(masm, replacementId);
8685 callvm.prepare();
8686 masm.Push(replacement);
8687 masm.Push(pattern);
8688 masm.Push(str);
8690 using Fn =
8691 JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
8692 callvm.call<Fn, jit::StringReplace>();
8693 return true;
8696 bool CacheIRCompiler::emitStringSplitStringResult(StringOperandId strId,
8697 StringOperandId separatorId) {
8698 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8700 AutoCallVM callvm(masm, this, allocator);
8702 Register str = allocator.useRegister(masm, strId);
8703 Register separator = allocator.useRegister(masm, separatorId);
8705 callvm.prepare();
8706 masm.Push(Imm32(INT32_MAX));
8707 masm.Push(separator);
8708 masm.Push(str);
8710 using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
8711 callvm.call<Fn, js::StringSplitString>();
8712 return true;
8715 bool CacheIRCompiler::emitRegExpPrototypeOptimizableResult(
8716 ObjOperandId protoId) {
8717 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8719 AutoOutputRegister output(*this);
8720 Register proto = allocator.useRegister(masm, protoId);
8721 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8723 Label slow, done;
8724 masm.branchIfNotRegExpPrototypeOptimizable(
8725 proto, scratch, /* maybeGlobal = */ nullptr, &slow);
8726 masm.moveValue(BooleanValue(true), output.valueReg());
8727 masm.jump(&done);
8730 masm.bind(&slow);
8732 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8733 liveVolatileFloatRegs());
8734 volatileRegs.takeUnchecked(scratch);
8735 masm.PushRegsInMask(volatileRegs);
8737 using Fn = bool (*)(JSContext* cx, JSObject* proto);
8738 masm.setupUnalignedABICall(scratch);
8739 masm.loadJSContext(scratch);
8740 masm.passABIArg(scratch);
8741 masm.passABIArg(proto);
8742 masm.callWithABI<Fn, RegExpPrototypeOptimizableRaw>();
8743 masm.storeCallBoolResult(scratch);
8745 masm.PopRegsInMask(volatileRegs);
8746 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
8749 masm.bind(&done);
8750 return true;
8753 bool CacheIRCompiler::emitRegExpInstanceOptimizableResult(
8754 ObjOperandId regexpId, ObjOperandId protoId) {
8755 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8757 AutoOutputRegister output(*this);
8758 Register regexp = allocator.useRegister(masm, regexpId);
8759 Register proto = allocator.useRegister(masm, protoId);
8760 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8762 Label slow, done;
8763 masm.branchIfNotRegExpInstanceOptimizable(regexp, scratch,
8764 /* maybeGlobal = */ nullptr, &slow);
8765 masm.moveValue(BooleanValue(true), output.valueReg());
8766 masm.jump(&done);
8769 masm.bind(&slow);
8771 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8772 liveVolatileFloatRegs());
8773 volatileRegs.takeUnchecked(scratch);
8774 masm.PushRegsInMask(volatileRegs);
8776 using Fn = bool (*)(JSContext* cx, JSObject* obj, JSObject* proto);
8777 masm.setupUnalignedABICall(scratch);
8778 masm.loadJSContext(scratch);
8779 masm.passABIArg(scratch);
8780 masm.passABIArg(regexp);
8781 masm.passABIArg(proto);
8782 masm.callWithABI<Fn, RegExpInstanceOptimizableRaw>();
8783 masm.storeCallBoolResult(scratch);
8785 masm.PopRegsInMask(volatileRegs);
8786 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
8789 masm.bind(&done);
8790 return true;
8793 bool CacheIRCompiler::emitGetFirstDollarIndexResult(StringOperandId strId) {
8794 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8796 AutoCallVM callvm(masm, this, allocator);
8798 Register str = allocator.useRegister(masm, strId);
8800 callvm.prepare();
8801 masm.Push(str);
8803 using Fn = bool (*)(JSContext*, JSString*, int32_t*);
8804 callvm.call<Fn, GetFirstDollarIndexRaw>();
8805 return true;
8808 bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
8809 ObjOperandId objId, IntPtrOperandId indexId, uint32_t expectedId,
8810 uint32_t replacementId, Scalar::Type elementType) {
8811 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8813 Maybe<AutoOutputRegister> output;
8814 Maybe<AutoCallVM> callvm;
8815 if (!Scalar::isBigIntType(elementType)) {
8816 output.emplace(*this);
8817 } else {
8818 callvm.emplace(masm, this, allocator);
8820 #ifdef JS_CODEGEN_X86
8821 // Use a scratch register to avoid running out of registers.
8822 Register obj = output ? output->valueReg().typeReg()
8823 : callvm->outputValueReg().typeReg();
8824 allocator.copyToScratchRegister(masm, objId, obj);
8825 #else
8826 Register obj = allocator.useRegister(masm, objId);
8827 #endif
8828 Register index = allocator.useRegister(masm, indexId);
8829 Register expected;
8830 Register replacement;
8831 if (!Scalar::isBigIntType(elementType)) {
8832 expected = allocator.useRegister(masm, Int32OperandId(expectedId));
8833 replacement = allocator.useRegister(masm, Int32OperandId(replacementId));
8834 } else {
8835 expected = allocator.useRegister(masm, BigIntOperandId(expectedId));
8836 replacement = allocator.useRegister(masm, BigIntOperandId(replacementId));
8839 Register scratch = output ? output->valueReg().scratchReg()
8840 : callvm->outputValueReg().scratchReg();
8841 MOZ_ASSERT(scratch != obj, "scratchReg must not be typeReg");
8843 // Not enough registers on X86.
8844 Register spectreTemp = Register::Invalid();
8846 FailurePath* failure;
8847 if (!addFailurePath(&failure)) {
8848 return false;
8851 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8852 // we can't use both at the same time. This isn't an issue here, because Ion
8853 // doesn't support CallICs. If that ever changes, this code must be updated.
8854 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8856 // Bounds check.
8857 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
8858 masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
8860 // Atomic operations are highly platform-dependent, for example x86/x64 has
8861 // specific requirements on which registers are used; MIPS needs multiple
8862 // additional temporaries. Therefore we're using either an ABI or VM call here
8863 // instead of handling each platform separately.
8865 if (Scalar::isBigIntType(elementType)) {
8866 callvm->prepare();
8868 masm.Push(replacement);
8869 masm.Push(expected);
8870 masm.Push(index);
8871 masm.Push(obj);
8873 using Fn = BigInt* (*)(JSContext*, FixedLengthTypedArrayObject*, size_t,
8874 const BigInt*, const BigInt*);
8875 callvm->call<Fn, jit::AtomicsCompareExchange64>();
8876 return true;
8880 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8881 liveVolatileFloatRegs());
8882 volatileRegs.takeUnchecked(output->valueReg());
8883 volatileRegs.takeUnchecked(scratch);
8884 masm.PushRegsInMask(volatileRegs);
8886 masm.setupUnalignedABICall(scratch);
8887 masm.passABIArg(obj);
8888 masm.passABIArg(index);
8889 masm.passABIArg(expected);
8890 masm.passABIArg(replacement);
8891 masm.callWithABI(DynamicFunction<AtomicsCompareExchangeFn>(
8892 AtomicsCompareExchange(elementType)));
8893 masm.storeCallInt32Result(scratch);
8895 masm.PopRegsInMask(volatileRegs);
8898 if (elementType != Scalar::Uint32) {
8899 masm.tagValue(JSVAL_TYPE_INT32, scratch, output->valueReg());
8900 } else {
8901 ScratchDoubleScope fpscratch(masm);
8902 masm.convertUInt32ToDouble(scratch, fpscratch);
8903 masm.boxDouble(fpscratch, output->valueReg(), fpscratch);
8906 return true;
8909 bool CacheIRCompiler::emitAtomicsReadModifyWriteResult(
8910 ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
8911 Scalar::Type elementType, AtomicsReadWriteModifyFn fn) {
8912 AutoOutputRegister output(*this);
8913 Register obj = allocator.useRegister(masm, objId);
8914 Register index = allocator.useRegister(masm, indexId);
8915 Register value = allocator.useRegister(masm, Int32OperandId(valueId));
8916 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8918 // Not enough registers on X86.
8919 Register spectreTemp = Register::Invalid();
8921 FailurePath* failure;
8922 if (!addFailurePath(&failure)) {
8923 return false;
8926 // Bounds check.
8927 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
8928 masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
8930 // See comment in emitAtomicsCompareExchange for why we use an ABI call.
8932 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8933 liveVolatileFloatRegs());
8934 volatileRegs.takeUnchecked(output.valueReg());
8935 volatileRegs.takeUnchecked(scratch);
8936 masm.PushRegsInMask(volatileRegs);
8938 masm.setupUnalignedABICall(scratch);
8939 masm.passABIArg(obj);
8940 masm.passABIArg(index);
8941 masm.passABIArg(value);
8942 masm.callWithABI(DynamicFunction<AtomicsReadWriteModifyFn>(fn));
8943 masm.storeCallInt32Result(scratch);
8945 masm.PopRegsInMask(volatileRegs);
8948 if (elementType != Scalar::Uint32) {
8949 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
8950 } else {
8951 ScratchDoubleScope fpscratch(masm);
8952 masm.convertUInt32ToDouble(scratch, fpscratch);
8953 masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
8956 return true;
8959 template <CacheIRCompiler::AtomicsReadWriteModify64Fn fn>
8960 bool CacheIRCompiler::emitAtomicsReadModifyWriteResult64(
8961 ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId) {
8962 AutoCallVM callvm(masm, this, allocator);
8963 Register obj = allocator.useRegister(masm, objId);
8964 Register index = allocator.useRegister(masm, indexId);
8965 Register value = allocator.useRegister(masm, BigIntOperandId(valueId));
8966 AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
8968 // Not enough registers on X86.
8969 Register spectreTemp = Register::Invalid();
8971 FailurePath* failure;
8972 if (!addFailurePath(&failure)) {
8973 return false;
8976 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
8977 // we can't use both at the same time. This isn't an issue here, because Ion
8978 // doesn't support CallICs. If that ever changes, this code must be updated.
8979 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
8981 // Bounds check.
8982 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
8983 masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
8985 // See comment in emitAtomicsCompareExchange for why we use a VM call.
8987 callvm.prepare();
8989 masm.Push(value);
8990 masm.Push(index);
8991 masm.Push(obj);
8993 callvm.call<AtomicsReadWriteModify64Fn, fn>();
8994 return true;
8997 bool CacheIRCompiler::emitAtomicsExchangeResult(ObjOperandId objId,
8998 IntPtrOperandId indexId,
8999 uint32_t valueId,
9000 Scalar::Type elementType) {
9001 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9003 if (Scalar::isBigIntType(elementType)) {
9004 return emitAtomicsReadModifyWriteResult64<jit::AtomicsExchange64>(
9005 objId, indexId, valueId);
9007 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
9008 AtomicsExchange(elementType));
9011 bool CacheIRCompiler::emitAtomicsAddResult(ObjOperandId objId,
9012 IntPtrOperandId indexId,
9013 uint32_t valueId,
9014 Scalar::Type elementType,
9015 bool forEffect) {
9016 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9018 if (Scalar::isBigIntType(elementType)) {
9019 return emitAtomicsReadModifyWriteResult64<jit::AtomicsAdd64>(objId, indexId,
9020 valueId);
9022 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
9023 AtomicsAdd(elementType));
9026 bool CacheIRCompiler::emitAtomicsSubResult(ObjOperandId objId,
9027 IntPtrOperandId indexId,
9028 uint32_t valueId,
9029 Scalar::Type elementType,
9030 bool forEffect) {
9031 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9033 if (Scalar::isBigIntType(elementType)) {
9034 return emitAtomicsReadModifyWriteResult64<jit::AtomicsSub64>(objId, indexId,
9035 valueId);
9037 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
9038 AtomicsSub(elementType));
9041 bool CacheIRCompiler::emitAtomicsAndResult(ObjOperandId objId,
9042 IntPtrOperandId indexId,
9043 uint32_t valueId,
9044 Scalar::Type elementType,
9045 bool forEffect) {
9046 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9048 if (Scalar::isBigIntType(elementType)) {
9049 return emitAtomicsReadModifyWriteResult64<jit::AtomicsAnd64>(objId, indexId,
9050 valueId);
9052 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
9053 AtomicsAnd(elementType));
9056 bool CacheIRCompiler::emitAtomicsOrResult(ObjOperandId objId,
9057 IntPtrOperandId indexId,
9058 uint32_t valueId,
9059 Scalar::Type elementType,
9060 bool forEffect) {
9061 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9063 if (Scalar::isBigIntType(elementType)) {
9064 return emitAtomicsReadModifyWriteResult64<jit::AtomicsOr64>(objId, indexId,
9065 valueId);
9067 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
9068 AtomicsOr(elementType));
9071 bool CacheIRCompiler::emitAtomicsXorResult(ObjOperandId objId,
9072 IntPtrOperandId indexId,
9073 uint32_t valueId,
9074 Scalar::Type elementType,
9075 bool forEffect) {
9076 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9078 if (Scalar::isBigIntType(elementType)) {
9079 return emitAtomicsReadModifyWriteResult64<jit::AtomicsXor64>(objId, indexId,
9080 valueId);
9082 return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
9083 AtomicsXor(elementType));
9086 bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId,
9087 IntPtrOperandId indexId,
9088 Scalar::Type elementType) {
9089 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9091 Maybe<AutoOutputRegister> output;
9092 Maybe<AutoCallVM> callvm;
9093 if (!Scalar::isBigIntType(elementType)) {
9094 output.emplace(*this);
9095 } else {
9096 callvm.emplace(masm, this, allocator);
9098 Register obj = allocator.useRegister(masm, objId);
9099 Register index = allocator.useRegister(masm, indexId);
9100 AutoScratchRegisterMaybeOutput scratch(allocator, masm,
9101 output ? *output : callvm->output());
9102 AutoSpectreBoundsScratchRegister spectreTemp(allocator, masm);
9103 AutoAvailableFloatRegister floatReg(*this, FloatReg0);
9105 FailurePath* failure;
9106 if (!addFailurePath(&failure)) {
9107 return false;
9110 // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
9111 // we can't use both at the same time. This isn't an issue here, because Ion
9112 // doesn't support CallICs. If that ever changes, this code must be updated.
9113 MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
9115 // Bounds check.
9116 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
9117 masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
9119 // Atomic operations are highly platform-dependent, for example x86/arm32 has
9120 // specific requirements on which registers are used. Therefore we're using a
9121 // VM call here instead of handling each platform separately.
9122 if (Scalar::isBigIntType(elementType)) {
9123 callvm->prepare();
9125 masm.Push(index);
9126 masm.Push(obj);
9128 using Fn = BigInt* (*)(JSContext*, FixedLengthTypedArrayObject*, size_t);
9129 callvm->call<Fn, jit::AtomicsLoad64>();
9130 return true;
9133 // Load the elements vector.
9134 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch);
9136 // Load the value.
9137 BaseIndex source(scratch, index, ScaleFromScalarType(elementType));
9139 // NOTE: the generated code must match the assembly code in gen_load in
9140 // GenerateAtomicOperations.py
9141 auto sync = Synchronization::Load();
9143 masm.memoryBarrierBefore(sync);
9145 Label* failUint32 = nullptr;
9146 MacroAssembler::Uint32Mode mode = MacroAssembler::Uint32Mode::ForceDouble;
9147 masm.loadFromTypedArray(elementType, source, output->valueReg(), mode,
9148 scratch, failUint32);
9149 masm.memoryBarrierAfter(sync);
9151 return true;
9154 bool CacheIRCompiler::emitAtomicsStoreResult(ObjOperandId objId,
9155 IntPtrOperandId indexId,
9156 uint32_t valueId,
9157 Scalar::Type elementType) {
9158 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9160 AutoOutputRegister output(*this);
9161 Register obj = allocator.useRegister(masm, objId);
9162 Register index = allocator.useRegister(masm, indexId);
9163 Maybe<Register> valueInt32;
9164 Maybe<Register> valueBigInt;
9165 if (!Scalar::isBigIntType(elementType)) {
9166 valueInt32.emplace(allocator.useRegister(masm, Int32OperandId(valueId)));
9167 } else {
9168 valueBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(valueId)));
9170 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9172 // Not enough registers on X86.
9173 Register spectreTemp = Register::Invalid();
9175 FailurePath* failure;
9176 if (!addFailurePath(&failure)) {
9177 return false;
9180 // Bounds check.
9181 masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
9182 masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
9184 if (!Scalar::isBigIntType(elementType)) {
9185 // Load the elements vector.
9186 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch);
9188 // Store the value.
9189 BaseIndex dest(scratch, index, ScaleFromScalarType(elementType));
9191 // NOTE: the generated code must match the assembly code in gen_store in
9192 // GenerateAtomicOperations.py
9193 auto sync = Synchronization::Store();
9195 masm.memoryBarrierBefore(sync);
9196 masm.storeToTypedIntArray(elementType, *valueInt32, dest);
9197 masm.memoryBarrierAfter(sync);
9199 masm.tagValue(JSVAL_TYPE_INT32, *valueInt32, output.valueReg());
9200 } else {
9201 // See comment in emitAtomicsCompareExchange for why we use an ABI call.
9203 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
9204 liveVolatileFloatRegs());
9205 volatileRegs.takeUnchecked(output.valueReg());
9206 volatileRegs.takeUnchecked(scratch);
9207 masm.PushRegsInMask(volatileRegs);
9209 using Fn = void (*)(FixedLengthTypedArrayObject*, size_t, const BigInt*);
9210 masm.setupUnalignedABICall(scratch);
9211 masm.passABIArg(obj);
9212 masm.passABIArg(index);
9213 masm.passABIArg(*valueBigInt);
9214 masm.callWithABI<Fn, jit::AtomicsStore64>();
9216 masm.PopRegsInMask(volatileRegs);
9218 masm.tagValue(JSVAL_TYPE_BIGINT, *valueBigInt, output.valueReg());
9221 return true;
9224 bool CacheIRCompiler::emitAtomicsIsLockFreeResult(Int32OperandId valueId) {
9225 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9227 AutoOutputRegister output(*this);
9228 Register value = allocator.useRegister(masm, valueId);
9229 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9231 masm.atomicIsLockFreeJS(value, scratch);
9232 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
9234 return true;
9237 bool CacheIRCompiler::emitBigIntAsIntNResult(Int32OperandId bitsId,
9238 BigIntOperandId bigIntId) {
9239 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9241 AutoCallVM callvm(masm, this, allocator);
9243 Register bits = allocator.useRegister(masm, bitsId);
9244 Register bigInt = allocator.useRegister(masm, bigIntId);
9246 callvm.prepare();
9247 masm.Push(bits);
9248 masm.Push(bigInt);
9250 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
9251 callvm.call<Fn, jit::BigIntAsIntN>();
9252 return true;
9255 bool CacheIRCompiler::emitBigIntAsUintNResult(Int32OperandId bitsId,
9256 BigIntOperandId bigIntId) {
9257 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9259 AutoCallVM callvm(masm, this, allocator);
9261 Register bits = allocator.useRegister(masm, bitsId);
9262 Register bigInt = allocator.useRegister(masm, bigIntId);
9264 callvm.prepare();
9265 masm.Push(bits);
9266 masm.Push(bigInt);
9268 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
9269 callvm.call<Fn, jit::BigIntAsUintN>();
9270 return true;
9273 bool CacheIRCompiler::emitSetHasResult(ObjOperandId setId, ValOperandId valId) {
9274 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9276 AutoCallVM callvm(masm, this, allocator);
9278 Register set = allocator.useRegister(masm, setId);
9279 ValueOperand val = allocator.useValueRegister(masm, valId);
9281 callvm.prepare();
9282 masm.Push(val);
9283 masm.Push(set);
9285 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
9286 callvm.call<Fn, jit::SetObjectHas>();
9287 return true;
9290 bool CacheIRCompiler::emitSetHasNonGCThingResult(ObjOperandId setId,
9291 ValOperandId valId) {
9292 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9294 AutoOutputRegister output(*this);
9295 Register set = allocator.useRegister(masm, setId);
9296 ValueOperand val = allocator.useValueRegister(masm, valId);
9298 AutoScratchRegister scratch1(allocator, masm);
9299 AutoScratchRegister scratch2(allocator, masm);
9300 AutoScratchRegister scratch3(allocator, masm);
9301 AutoScratchRegister scratch4(allocator, masm);
9302 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
9304 masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
9305 masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
9307 masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
9308 scratch3, scratch4);
9309 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9310 return true;
9313 bool CacheIRCompiler::emitSetHasSymbolResult(ObjOperandId setId,
9314 SymbolOperandId symId) {
9315 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9317 AutoOutputRegister output(*this);
9318 Register set = allocator.useRegister(masm, setId);
9319 Register sym = allocator.useRegister(masm, symId);
9321 AutoScratchRegister scratch1(allocator, masm);
9322 AutoScratchRegister scratch2(allocator, masm);
9323 AutoScratchRegister scratch3(allocator, masm);
9324 AutoScratchRegister scratch4(allocator, masm);
9326 masm.prepareHashSymbol(sym, scratch1);
9328 masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
9329 masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
9330 scratch3, scratch4);
9331 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9332 return true;
9335 bool CacheIRCompiler::emitSetHasBigIntResult(ObjOperandId setId,
9336 BigIntOperandId bigIntId) {
9337 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9339 AutoOutputRegister output(*this);
9340 Register set = allocator.useRegister(masm, setId);
9341 Register bigInt = allocator.useRegister(masm, bigIntId);
9343 AutoScratchRegister scratch1(allocator, masm);
9344 AutoScratchRegister scratch2(allocator, masm);
9345 AutoScratchRegister scratch3(allocator, masm);
9346 AutoScratchRegister scratch4(allocator, masm);
9347 AutoScratchRegister scratch5(allocator, masm);
9348 #ifndef JS_CODEGEN_ARM
9349 AutoScratchRegister scratch6(allocator, masm);
9350 #else
9351 // We don't have more registers available on ARM32.
9352 Register scratch6 = set;
9354 masm.push(set);
9355 #endif
9357 masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
9359 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
9360 masm.setObjectHasBigInt(set, output.valueReg(), scratch1, scratch2, scratch3,
9361 scratch4, scratch5, scratch6);
9362 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9364 #ifdef JS_CODEGEN_ARM
9365 masm.pop(set);
9366 #endif
9367 return true;
9370 bool CacheIRCompiler::emitSetHasObjectResult(ObjOperandId setId,
9371 ObjOperandId objId) {
9372 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9374 AutoOutputRegister output(*this);
9375 Register set = allocator.useRegister(masm, setId);
9376 Register obj = allocator.useRegister(masm, objId);
9378 AutoScratchRegister scratch1(allocator, masm);
9379 AutoScratchRegister scratch2(allocator, masm);
9380 AutoScratchRegister scratch3(allocator, masm);
9381 AutoScratchRegister scratch4(allocator, masm);
9382 AutoScratchRegister scratch5(allocator, masm);
9384 masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
9385 masm.prepareHashObject(set, output.valueReg(), scratch1, scratch2, scratch3,
9386 scratch4, scratch5);
9388 masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
9389 scratch3, scratch4);
9390 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9391 return true;
9394 bool CacheIRCompiler::emitSetSizeResult(ObjOperandId setId) {
9395 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9397 AutoOutputRegister output(*this);
9398 Register set = allocator.useRegister(masm, setId);
9399 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9401 masm.loadSetObjectSize(set, scratch);
9402 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
9403 return true;
9406 bool CacheIRCompiler::emitMapHasResult(ObjOperandId mapId, ValOperandId valId) {
9407 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9409 AutoCallVM callvm(masm, this, allocator);
9411 Register map = allocator.useRegister(masm, mapId);
9412 ValueOperand val = allocator.useValueRegister(masm, valId);
9414 callvm.prepare();
9415 masm.Push(val);
9416 masm.Push(map);
9418 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
9419 callvm.call<Fn, jit::MapObjectHas>();
9420 return true;
9423 bool CacheIRCompiler::emitMapHasNonGCThingResult(ObjOperandId mapId,
9424 ValOperandId valId) {
9425 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9427 AutoOutputRegister output(*this);
9428 Register map = allocator.useRegister(masm, mapId);
9429 ValueOperand val = allocator.useValueRegister(masm, valId);
9431 AutoScratchRegister scratch1(allocator, masm);
9432 AutoScratchRegister scratch2(allocator, masm);
9433 AutoScratchRegister scratch3(allocator, masm);
9434 AutoScratchRegister scratch4(allocator, masm);
9435 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
9437 masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
9438 masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
9440 masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
9441 scratch3, scratch4);
9442 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9443 return true;
9446 bool CacheIRCompiler::emitMapHasSymbolResult(ObjOperandId mapId,
9447 SymbolOperandId symId) {
9448 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9450 AutoOutputRegister output(*this);
9451 Register map = allocator.useRegister(masm, mapId);
9452 Register sym = allocator.useRegister(masm, symId);
9454 AutoScratchRegister scratch1(allocator, masm);
9455 AutoScratchRegister scratch2(allocator, masm);
9456 AutoScratchRegister scratch3(allocator, masm);
9457 AutoScratchRegister scratch4(allocator, masm);
9459 masm.prepareHashSymbol(sym, scratch1);
9461 masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
9462 masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
9463 scratch3, scratch4);
9464 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9465 return true;
9468 bool CacheIRCompiler::emitMapHasBigIntResult(ObjOperandId mapId,
9469 BigIntOperandId bigIntId) {
9470 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9472 AutoOutputRegister output(*this);
9473 Register map = allocator.useRegister(masm, mapId);
9474 Register bigInt = allocator.useRegister(masm, bigIntId);
9476 AutoScratchRegister scratch1(allocator, masm);
9477 AutoScratchRegister scratch2(allocator, masm);
9478 AutoScratchRegister scratch3(allocator, masm);
9479 AutoScratchRegister scratch4(allocator, masm);
9480 AutoScratchRegister scratch5(allocator, masm);
9481 #ifndef JS_CODEGEN_ARM
9482 AutoScratchRegister scratch6(allocator, masm);
9483 #else
9484 // We don't have more registers available on ARM32.
9485 Register scratch6 = map;
9487 masm.push(map);
9488 #endif
9490 masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
9492 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
9493 masm.mapObjectHasBigInt(map, output.valueReg(), scratch1, scratch2, scratch3,
9494 scratch4, scratch5, scratch6);
9495 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9497 #ifdef JS_CODEGEN_ARM
9498 masm.pop(map);
9499 #endif
9500 return true;
9503 bool CacheIRCompiler::emitMapHasObjectResult(ObjOperandId mapId,
9504 ObjOperandId objId) {
9505 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9507 AutoOutputRegister output(*this);
9508 Register map = allocator.useRegister(masm, mapId);
9509 Register obj = allocator.useRegister(masm, objId);
9511 AutoScratchRegister scratch1(allocator, masm);
9512 AutoScratchRegister scratch2(allocator, masm);
9513 AutoScratchRegister scratch3(allocator, masm);
9514 AutoScratchRegister scratch4(allocator, masm);
9515 AutoScratchRegister scratch5(allocator, masm);
9517 masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
9518 masm.prepareHashObject(map, output.valueReg(), scratch1, scratch2, scratch3,
9519 scratch4, scratch5);
9521 masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
9522 scratch3, scratch4);
9523 masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
9524 return true;
9527 bool CacheIRCompiler::emitMapGetResult(ObjOperandId mapId, ValOperandId valId) {
9528 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9530 AutoCallVM callvm(masm, this, allocator);
9532 Register map = allocator.useRegister(masm, mapId);
9533 ValueOperand val = allocator.useValueRegister(masm, valId);
9535 callvm.prepare();
9536 masm.Push(val);
9537 masm.Push(map);
9539 using Fn =
9540 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
9541 callvm.call<Fn, jit::MapObjectGet>();
9542 return true;
9545 bool CacheIRCompiler::emitMapGetNonGCThingResult(ObjOperandId mapId,
9546 ValOperandId valId) {
9547 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9549 AutoOutputRegister output(*this);
9550 Register map = allocator.useRegister(masm, mapId);
9551 ValueOperand val = allocator.useValueRegister(masm, valId);
9553 AutoScratchRegister scratch1(allocator, masm);
9554 AutoScratchRegister scratch2(allocator, masm);
9555 AutoScratchRegister scratch3(allocator, masm);
9556 AutoScratchRegister scratch4(allocator, masm);
9557 AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
9559 masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
9560 masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
9562 masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
9563 output.valueReg(), scratch2, scratch3, scratch4);
9564 return true;
9567 bool CacheIRCompiler::emitMapGetSymbolResult(ObjOperandId mapId,
9568 SymbolOperandId symId) {
9569 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9571 AutoOutputRegister output(*this);
9572 Register map = allocator.useRegister(masm, mapId);
9573 Register sym = allocator.useRegister(masm, symId);
9575 AutoScratchRegister scratch1(allocator, masm);
9576 AutoScratchRegister scratch2(allocator, masm);
9577 AutoScratchRegister scratch3(allocator, masm);
9578 AutoScratchRegister scratch4(allocator, masm);
9580 masm.prepareHashSymbol(sym, scratch1);
9582 masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
9583 masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
9584 output.valueReg(), scratch2, scratch3, scratch4);
9585 return true;
9588 bool CacheIRCompiler::emitMapGetBigIntResult(ObjOperandId mapId,
9589 BigIntOperandId bigIntId) {
9590 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9592 AutoOutputRegister output(*this);
9593 Register map = allocator.useRegister(masm, mapId);
9594 Register bigInt = allocator.useRegister(masm, bigIntId);
9596 AutoScratchRegister scratch1(allocator, masm);
9597 AutoScratchRegister scratch2(allocator, masm);
9598 AutoScratchRegister scratch3(allocator, masm);
9599 AutoScratchRegister scratch4(allocator, masm);
9600 AutoScratchRegister scratch5(allocator, masm);
9601 #ifndef JS_CODEGEN_ARM
9602 AutoScratchRegister scratch6(allocator, masm);
9603 #else
9604 // We don't have more registers available on ARM32.
9605 Register scratch6 = map;
9607 masm.push(map);
9608 #endif
9610 masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
9612 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
9613 masm.mapObjectGetBigInt(map, output.valueReg(), scratch1, output.valueReg(),
9614 scratch2, scratch3, scratch4, scratch5, scratch6);
9616 #ifdef JS_CODEGEN_ARM
9617 masm.pop(map);
9618 #endif
9619 return true;
9622 bool CacheIRCompiler::emitMapGetObjectResult(ObjOperandId mapId,
9623 ObjOperandId objId) {
9624 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9626 AutoOutputRegister output(*this);
9627 Register map = allocator.useRegister(masm, mapId);
9628 Register obj = allocator.useRegister(masm, objId);
9630 AutoScratchRegister scratch1(allocator, masm);
9631 AutoScratchRegister scratch2(allocator, masm);
9632 AutoScratchRegister scratch3(allocator, masm);
9633 AutoScratchRegister scratch4(allocator, masm);
9634 AutoScratchRegister scratch5(allocator, masm);
9636 masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
9637 masm.prepareHashObject(map, output.valueReg(), scratch1, scratch2, scratch3,
9638 scratch4, scratch5);
9640 masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
9641 output.valueReg(), scratch2, scratch3, scratch4);
9642 return true;
9645 bool CacheIRCompiler::emitMapSizeResult(ObjOperandId mapId) {
9646 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9648 AutoOutputRegister output(*this);
9649 Register map = allocator.useRegister(masm, mapId);
9650 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
9652 masm.loadMapObjectSize(map, scratch);
9653 masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
9654 return true;
9657 bool CacheIRCompiler::emitArrayFromArgumentsObjectResult(ObjOperandId objId,
9658 uint32_t shapeOffset) {
9659 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9661 AutoCallVM callvm(masm, this, allocator);
9663 Register obj = allocator.useRegister(masm, objId);
9665 callvm.prepare();
9666 masm.Push(obj);
9668 using Fn = ArrayObject* (*)(JSContext*, Handle<ArgumentsObject*>);
9669 callvm.call<Fn, js::ArrayFromArgumentsObject>();
9670 return true;
9673 bool CacheIRCompiler::emitGuardGlobalGeneration(uint32_t expectedOffset,
9674 uint32_t generationAddrOffset) {
9675 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9677 AutoScratchRegister scratch(allocator, masm);
9678 AutoScratchRegister scratch2(allocator, masm);
9680 FailurePath* failure;
9681 if (!addFailurePath(&failure)) {
9682 return false;
9685 StubFieldOffset expected(expectedOffset, StubField::Type::RawInt32);
9686 emitLoadStubField(expected, scratch);
9688 StubFieldOffset generationAddr(generationAddrOffset,
9689 StubField::Type::RawPointer);
9690 emitLoadStubField(generationAddr, scratch2);
9692 masm.branch32(Assembler::NotEqual, Address(scratch2, 0), scratch,
9693 failure->label());
9695 return true;
9698 bool CacheIRCompiler::emitGuardFuse(RealmFuses::FuseIndex fuseIndex) {
9699 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9700 AutoScratchRegister scratch(allocator, masm);
9702 FailurePath* failure;
9703 if (!addFailurePath(&failure)) {
9704 return false;
9707 masm.loadRealmFuse(fuseIndex, scratch);
9708 masm.branchPtr(Assembler::NotEqual, scratch, ImmPtr(nullptr),
9709 failure->label());
9710 return true;
9713 bool CacheIRCompiler::emitBailout() {
9714 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9716 // Generates no code.
9718 return true;
9721 bool CacheIRCompiler::emitAssertRecoveredOnBailoutResult(ValOperandId valId,
9722 bool mustBeRecovered) {
9723 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9725 AutoOutputRegister output(*this);
9727 // NOP when not in IonMonkey
9728 masm.moveValue(UndefinedValue(), output.valueReg());
9730 return true;
9733 bool CacheIRCompiler::emitAssertPropertyLookup(ObjOperandId objId,
9734 uint32_t idOffset,
9735 uint32_t slotOffset) {
9736 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9738 Register obj = allocator.useRegister(masm, objId);
9740 AutoScratchRegister id(allocator, masm);
9741 AutoScratchRegister slot(allocator, masm);
9743 LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
9744 masm.PushRegsInMask(save);
9746 masm.setupUnalignedABICall(id);
9748 StubFieldOffset idField(idOffset, StubField::Type::Id);
9749 emitLoadStubField(idField, id);
9751 StubFieldOffset slotField(slotOffset, StubField::Type::RawInt32);
9752 emitLoadStubField(slotField, slot);
9754 masm.passABIArg(obj);
9755 masm.passABIArg(id);
9756 masm.passABIArg(slot);
9757 using Fn = void (*)(NativeObject*, PropertyKey, uint32_t);
9758 masm.callWithABI<Fn, js::jit::AssertPropertyLookup>();
9759 masm.PopRegsInMask(save);
9761 return true;
9764 #ifdef FUZZING_JS_FUZZILLI
9765 bool CacheIRCompiler::emitFuzzilliHashResult(ValOperandId valId) {
9766 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
9768 ValueOperand input = allocator.useValueRegister(masm, valId);
9769 AutoScratchRegister scratch(allocator, masm);
9770 AutoScratchRegister scratchJSContext(allocator, masm);
9771 AutoScratchFloatRegister floatReg(this);
9772 # ifdef JS_PUNBOX64
9773 AutoScratchRegister64 scratch64(allocator, masm);
9774 # else
9775 AutoScratchRegister scratch2(allocator, masm);
9776 # endif
9778 Label addFloat, updateHash, done;
9781 ScratchTagScope tag(masm, input);
9782 masm.splitTagForTest(input, tag);
9784 Label notInt32;
9785 masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
9787 ScratchTagScopeRelease _(&tag);
9789 masm.unboxInt32(input, scratch);
9790 masm.convertInt32ToDouble(scratch, floatReg);
9791 masm.jump(&addFloat);
9793 masm.bind(&notInt32);
9795 Label notDouble;
9796 masm.branchTestDouble(Assembler::NotEqual, tag, &notDouble);
9798 ScratchTagScopeRelease _(&tag);
9800 masm.unboxDouble(input, floatReg);
9801 masm.canonicalizeDouble(floatReg);
9802 masm.jump(&addFloat);
9804 masm.bind(&notDouble);
9806 Label notNull;
9807 masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
9809 ScratchTagScopeRelease _(&tag);
9811 masm.move32(Imm32(1), scratch);
9812 masm.convertInt32ToDouble(scratch, floatReg);
9813 masm.jump(&addFloat);
9815 masm.bind(&notNull);
9817 Label notUndefined;
9818 masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
9820 ScratchTagScopeRelease _(&tag);
9822 masm.move32(Imm32(2), scratch);
9823 masm.convertInt32ToDouble(scratch, floatReg);
9824 masm.jump(&addFloat);
9826 masm.bind(&notUndefined);
9828 Label notBoolean;
9829 masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
9831 ScratchTagScopeRelease _(&tag);
9833 masm.unboxBoolean(input, scratch);
9834 masm.add32(Imm32(3), scratch);
9835 masm.convertInt32ToDouble(scratch, floatReg);
9836 masm.jump(&addFloat);
9838 masm.bind(&notBoolean);
9840 Label notBigInt;
9841 masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
9843 ScratchTagScopeRelease _(&tag);
9845 masm.unboxBigInt(input, scratch);
9847 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
9848 liveVolatileFloatRegs());
9849 masm.PushRegsInMask(volatileRegs);
9850 // TODO: remove floatReg, scratch, scratchJS?
9852 using Fn = uint32_t (*)(BigInt* bigInt);
9853 masm.setupUnalignedABICall(scratchJSContext);
9854 masm.loadJSContext(scratchJSContext);
9855 masm.passABIArg(scratch);
9856 masm.callWithABI<Fn, js::FuzzilliHashBigInt>();
9857 masm.storeCallInt32Result(scratch);
9859 LiveRegisterSet ignore;
9860 ignore.add(scratch);
9861 ignore.add(scratchJSContext);
9862 masm.PopRegsInMaskIgnore(volatileRegs, ignore);
9863 masm.jump(&updateHash);
9865 masm.bind(&notBigInt);
9867 Label notObject;
9868 masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
9870 ScratchTagScopeRelease _(&tag);
9872 AutoCallVM callvm(masm, this, allocator);
9873 Register obj = allocator.allocateRegister(masm);
9874 masm.unboxObject(input, obj);
9876 callvm.prepare();
9877 masm.Push(obj);
9879 using Fn = void (*)(JSContext* cx, JSObject* o);
9880 callvm.callNoResult<Fn, js::FuzzilliHashObject>();
9881 allocator.releaseRegister(obj);
9883 masm.jump(&done);
9885 masm.bind(&notObject);
9887 masm.move32(Imm32(0), scratch);
9888 masm.jump(&updateHash);
9893 masm.bind(&addFloat);
9895 masm.loadJSContext(scratchJSContext);
9896 Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
9898 # ifdef JS_PUNBOX64
9899 masm.moveDoubleToGPR64(floatReg, scratch64);
9900 masm.move32(scratch64.get().reg, scratch);
9901 masm.rshift64(Imm32(32), scratch64);
9902 masm.add32(scratch64.get().reg, scratch);
9903 # else
9904 Register64 scratch64(scratch, scratch2);
9905 masm.moveDoubleToGPR64(floatReg, scratch64);
9906 masm.add32(scratch2, scratch);
9907 # endif
9911 masm.bind(&updateHash);
9913 masm.loadJSContext(scratchJSContext);
9914 Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
9915 masm.load32(addrExecHash, scratchJSContext);
9916 masm.add32(scratchJSContext, scratch);
9917 masm.rotateLeft(Imm32(1), scratch, scratch);
9918 masm.loadJSContext(scratchJSContext);
9919 masm.store32(scratch, addrExecHash);
9921 // stats
9922 Address addrExecHashInputs(scratchJSContext,
9923 offsetof(JSContext, executionHashInputs));
9924 masm.load32(addrExecHashInputs, scratch);
9925 masm.add32(Imm32(1), scratch);
9926 masm.store32(scratch, addrExecHashInputs);
9929 masm.bind(&done);
9931 AutoOutputRegister output(*this);
9932 masm.moveValue(UndefinedValue(), output.valueReg());
9933 return true;
9935 #endif
9937 template <typename Fn, Fn fn>
9938 void CacheIRCompiler::callVM(MacroAssembler& masm) {
9939 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
9940 callVMInternal(masm, id);
9943 void CacheIRCompiler::callVMInternal(MacroAssembler& masm, VMFunctionId id) {
9944 MOZ_ASSERT(enteredStubFrame_);
9945 if (mode_ == Mode::Ion) {
9946 TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(id);
9947 const VMFunctionData& fun = GetVMFunction(id);
9948 uint32_t frameSize = fun.explicitStackSlots() * sizeof(void*);
9949 masm.PushFrameDescriptor(FrameType::IonICCall);
9950 masm.callJit(code);
9952 // Pop rest of the exit frame and the arguments left on the stack.
9953 int framePop =
9954 sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
9955 masm.implicitPop(frameSize + framePop);
9957 masm.freeStack(asIon()->localTracingSlots() * sizeof(Value));
9959 // Pop IonICCallFrameLayout.
9960 masm.Pop(FramePointer);
9961 masm.freeStack(IonICCallFrameLayout::Size() - sizeof(void*));
9962 return;
9965 MOZ_ASSERT(mode_ == Mode::Baseline);
9967 TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(id);
9969 EmitBaselineCallVM(code, masm);
9972 bool CacheIRCompiler::isBaseline() { return mode_ == Mode::Baseline; }
9974 bool CacheIRCompiler::isIon() { return mode_ == Mode::Ion; }
9976 BaselineCacheIRCompiler* CacheIRCompiler::asBaseline() {
9977 MOZ_ASSERT(this->isBaseline());
9978 return static_cast<BaselineCacheIRCompiler*>(this);
9981 IonCacheIRCompiler* CacheIRCompiler::asIon() {
9982 MOZ_ASSERT(this->isIon());
9983 return static_cast<IonCacheIRCompiler*>(this);
9986 #ifdef DEBUG
9987 void CacheIRCompiler::assertFloatRegisterAvailable(FloatRegister reg) {
9988 if (isBaseline()) {
9989 // Baseline does not have any FloatRegisters live when calling an IC stub.
9990 return;
9993 asIon()->assertFloatRegisterAvailable(reg);
9995 #endif
9997 AutoCallVM::AutoCallVM(MacroAssembler& masm, CacheIRCompiler* compiler,
9998 CacheRegisterAllocator& allocator)
9999 : masm_(masm), compiler_(compiler), allocator_(allocator) {
10000 // Ion needs to `enterStubFrame` before it can callVM and it also needs to
10001 // initialize AutoSaveLiveRegisters.
10002 if (compiler_->mode_ == CacheIRCompiler::Mode::Ion) {
10003 // Will need to use a downcast here as well, in order to pass the
10004 // stub to AutoSaveLiveRegisters
10005 save_.emplace(*compiler_->asIon());
10008 if (compiler->outputUnchecked_.isSome()) {
10009 output_.emplace(*compiler);
10012 if (compiler_->mode_ == CacheIRCompiler::Mode::Baseline) {
10013 stubFrame_.emplace(*compiler_->asBaseline());
10014 if (output_.isSome()) {
10015 scratch_.emplace(allocator_, masm_, output_.ref());
10016 } else {
10017 scratch_.emplace(allocator_, masm_);
10022 void AutoCallVM::prepare() {
10023 allocator_.discardStack(masm_);
10024 MOZ_ASSERT(compiler_ != nullptr);
10025 if (compiler_->mode_ == CacheIRCompiler::Mode::Ion) {
10026 compiler_->asIon()->enterStubFrame(masm_, *save_.ptr());
10027 return;
10029 MOZ_ASSERT(compiler_->mode_ == CacheIRCompiler::Mode::Baseline);
10030 stubFrame_->enter(masm_, scratch_.ref());
10033 void AutoCallVM::storeResult(JSValueType returnType) {
10034 MOZ_ASSERT(returnType != JSVAL_TYPE_DOUBLE);
10036 if (returnType == JSVAL_TYPE_UNKNOWN) {
10037 masm_.storeCallResultValue(output_.ref());
10038 } else {
10039 if (output_->hasValue()) {
10040 masm_.tagValue(returnType, ReturnReg, output_->valueReg());
10041 } else {
10042 masm_.storeCallPointerResult(output_->typedReg().gpr());
10047 void AutoCallVM::leaveBaselineStubFrame() {
10048 if (compiler_->mode_ == CacheIRCompiler::Mode::Baseline) {
10049 stubFrame_->leave(masm_);
10053 template <typename...>
10054 struct VMFunctionReturnType;
10056 template <class R, typename... Args>
10057 struct VMFunctionReturnType<R (*)(JSContext*, Args...)> {
10058 using LastArgument = typename LastArg<Args...>::Type;
10060 // By convention VMFunctions returning `bool` use an output parameter.
10061 using ReturnType =
10062 std::conditional_t<std::is_same_v<R, bool>, LastArgument, R>;
10065 template <class>
10066 struct ReturnTypeToJSValueType;
10068 // Definitions for the currently used return types.
10069 template <>
10070 struct ReturnTypeToJSValueType<MutableHandleValue> {
10071 static constexpr JSValueType result = JSVAL_TYPE_UNKNOWN;
10073 template <>
10074 struct ReturnTypeToJSValueType<bool*> {
10075 static constexpr JSValueType result = JSVAL_TYPE_BOOLEAN;
10077 template <>
10078 struct ReturnTypeToJSValueType<int32_t*> {
10079 static constexpr JSValueType result = JSVAL_TYPE_INT32;
10081 template <>
10082 struct ReturnTypeToJSValueType<JSString*> {
10083 static constexpr JSValueType result = JSVAL_TYPE_STRING;
10085 template <>
10086 struct ReturnTypeToJSValueType<BigInt*> {
10087 static constexpr JSValueType result = JSVAL_TYPE_BIGINT;
10089 template <>
10090 struct ReturnTypeToJSValueType<JSObject*> {
10091 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10093 template <>
10094 struct ReturnTypeToJSValueType<PropertyIteratorObject*> {
10095 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10097 template <>
10098 struct ReturnTypeToJSValueType<ArrayIteratorObject*> {
10099 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10101 template <>
10102 struct ReturnTypeToJSValueType<StringIteratorObject*> {
10103 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10105 template <>
10106 struct ReturnTypeToJSValueType<RegExpStringIteratorObject*> {
10107 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10109 template <>
10110 struct ReturnTypeToJSValueType<PlainObject*> {
10111 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10113 template <>
10114 struct ReturnTypeToJSValueType<ArrayObject*> {
10115 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10117 template <>
10118 struct ReturnTypeToJSValueType<TypedArrayObject*> {
10119 static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
10122 template <typename Fn>
10123 void AutoCallVM::storeResult() {
10124 using ReturnType = typename VMFunctionReturnType<Fn>::ReturnType;
10125 storeResult(ReturnTypeToJSValueType<ReturnType>::result);
10128 AutoScratchFloatRegister::AutoScratchFloatRegister(CacheIRCompiler* compiler,
10129 FailurePath* failure)
10130 : compiler_(compiler), failure_(failure) {
10131 // If we're compiling a Baseline IC, FloatReg0 is always available.
10132 if (!compiler_->isBaseline()) {
10133 MacroAssembler& masm = compiler_->masm;
10134 masm.push(FloatReg0);
10135 compiler->allocator.setHasAutoScratchFloatRegisterSpill(true);
10138 if (failure_) {
10139 failure_->setHasAutoScratchFloatRegister();
10143 AutoScratchFloatRegister::~AutoScratchFloatRegister() {
10144 if (failure_) {
10145 failure_->clearHasAutoScratchFloatRegister();
10148 if (!compiler_->isBaseline()) {
10149 MacroAssembler& masm = compiler_->masm;
10150 masm.pop(FloatReg0);
10151 compiler_->allocator.setHasAutoScratchFloatRegisterSpill(false);
10153 if (failure_) {
10154 Label done;
10155 masm.jump(&done);
10156 masm.bind(&failurePopReg_);
10157 masm.pop(FloatReg0);
10158 masm.jump(failure_->label());
10159 masm.bind(&done);
10164 Label* AutoScratchFloatRegister::failure() {
10165 MOZ_ASSERT(failure_);
10167 if (!compiler_->isBaseline()) {
10168 return &failurePopReg_;
10170 return failure_->labelUnchecked();