Bug 1837620 - Part 1: Remove baseline ICs that guard shapes when the shape becomes...
[gecko.git] / js / src / jit / IonCacheIRCompiler.cpp
blob0a2169179aca98eea49818b5a477490c30c1800e
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/IonCacheIRCompiler.h"
8 #include "mozilla/Maybe.h"
10 #include <algorithm>
12 #include "jit/CacheIRCompiler.h"
13 #include "jit/CacheIRWriter.h"
14 #include "jit/IonIC.h"
15 #include "jit/JitcodeMap.h"
16 #include "jit/JitFrames.h"
17 #include "jit/JitRuntime.h"
18 #include "jit/JitZone.h"
19 #include "jit/JSJitFrameIter.h"
20 #include "jit/Linker.h"
21 #include "jit/SharedICHelpers.h"
22 #include "jit/VMFunctions.h"
23 #include "proxy/DeadObjectProxy.h"
24 #include "proxy/Proxy.h"
25 #include "util/Memory.h"
26 #include "vm/StaticStrings.h"
28 #include "jit/JSJitFrameIter-inl.h"
29 #include "jit/MacroAssembler-inl.h"
30 #include "jit/VMFunctionList-inl.h"
32 using namespace js;
33 using namespace js::jit;
35 using mozilla::Maybe;
37 namespace JS {
38 struct ExpandoAndGeneration;
41 using JS::ExpandoAndGeneration;
43 namespace js {
44 namespace jit {
46 // IonCacheIRCompiler compiles CacheIR to IonIC native code.
47 IonCacheIRCompiler::IonCacheIRCompiler(JSContext* cx, TempAllocator& alloc,
48 const CacheIRWriter& writer, IonIC* ic,
49 IonScript* ionScript,
50 uint32_t stubDataOffset)
51 : CacheIRCompiler(cx, alloc, writer, stubDataOffset, Mode::Ion,
52 StubFieldPolicy::Constant),
53 writer_(writer),
54 ic_(ic),
55 ionScript_(ionScript),
56 savedLiveRegs_(false) {
57 MOZ_ASSERT(ic_);
58 MOZ_ASSERT(ionScript_);
61 template <typename T>
62 T IonCacheIRCompiler::rawPointerStubField(uint32_t offset) {
63 static_assert(sizeof(T) == sizeof(uintptr_t), "T must have pointer size");
64 return (T)readStubWord(offset, StubField::Type::RawPointer);
67 template <typename T>
68 T IonCacheIRCompiler::rawInt64StubField(uint32_t offset) {
69 static_assert(sizeof(T) == sizeof(int64_t), "T musthave int64 size");
70 return (T)readStubInt64(offset, StubField::Type::RawInt64);
73 template <typename Fn, Fn fn>
74 void IonCacheIRCompiler::callVM(MacroAssembler& masm) {
75 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
76 callVMInternal(masm, id);
79 void IonCacheIRCompiler::pushStubCodePointer() {
80 stubJitCodeOffset_.emplace(masm.PushWithPatch(ImmPtr((void*)-1)));
83 // AutoSaveLiveRegisters must be used when we make a call that can GC. The
84 // constructor ensures all live registers are stored on the stack (where the GC
85 // expects them) and the destructor restores these registers.
86 AutoSaveLiveRegisters::AutoSaveLiveRegisters(IonCacheIRCompiler& compiler)
87 : compiler_(compiler) {
88 MOZ_ASSERT(compiler_.liveRegs_.isSome());
89 MOZ_ASSERT(compiler_.ic_);
90 compiler_.allocator.saveIonLiveRegisters(
91 compiler_.masm, compiler_.liveRegs_.ref(),
92 compiler_.ic_->scratchRegisterForEntryJump(), compiler_.ionScript_);
93 compiler_.savedLiveRegs_ = true;
95 AutoSaveLiveRegisters::~AutoSaveLiveRegisters() {
96 MOZ_ASSERT(compiler_.stubJitCodeOffset_.isSome(),
97 "Must have pushed JitCode* pointer");
98 compiler_.allocator.restoreIonLiveRegisters(compiler_.masm,
99 compiler_.liveRegs_.ref());
100 MOZ_ASSERT(compiler_.masm.framePushed() == compiler_.ionScript_->frameSize());
103 } // namespace jit
104 } // namespace js
106 void CacheRegisterAllocator::saveIonLiveRegisters(MacroAssembler& masm,
107 LiveRegisterSet liveRegs,
108 Register scratch,
109 IonScript* ionScript) {
110 // We have to push all registers in liveRegs on the stack. It's possible we
111 // stored other values in our live registers and stored operands on the
112 // stack (where our live registers should go), so this requires some careful
113 // work. Try to keep it simple by taking one small step at a time.
115 // Step 1. Discard any dead operands so we can reuse their registers.
116 freeDeadOperandLocations(masm);
118 // Step 2. Figure out the size of our live regs. This is consistent with
119 // the fact that we're using storeRegsInMask to generate the save code and
120 // PopRegsInMask to generate the restore code.
121 size_t sizeOfLiveRegsInBytes = masm.PushRegsInMaskSizeInBytes(liveRegs);
123 MOZ_ASSERT(sizeOfLiveRegsInBytes > 0);
125 // Step 3. Ensure all non-input operands are on the stack.
126 size_t numInputs = writer_.numInputOperands();
127 for (size_t i = numInputs; i < operandLocations_.length(); i++) {
128 OperandLocation& loc = operandLocations_[i];
129 if (loc.isInRegister()) {
130 spillOperandToStack(masm, &loc);
134 // Step 4. Restore the register state, but don't discard the stack as
135 // non-input operands are stored there.
136 restoreInputState(masm, /* shouldDiscardStack = */ false);
138 // We just restored the input state, so no input operands should be stored
139 // on the stack.
140 #ifdef DEBUG
141 for (size_t i = 0; i < numInputs; i++) {
142 const OperandLocation& loc = operandLocations_[i];
143 MOZ_ASSERT(!loc.isOnStack());
145 #endif
147 // Step 5. At this point our register state is correct. Stack values,
148 // however, may cover the space where we have to store the live registers.
149 // Move them out of the way.
151 bool hasOperandOnStack = false;
152 for (size_t i = numInputs; i < operandLocations_.length(); i++) {
153 OperandLocation& loc = operandLocations_[i];
154 if (!loc.isOnStack()) {
155 continue;
158 hasOperandOnStack = true;
160 size_t operandSize = loc.stackSizeInBytes();
161 size_t operandStackPushed = loc.stackPushed();
162 MOZ_ASSERT(operandSize > 0);
163 MOZ_ASSERT(stackPushed_ >= operandStackPushed);
164 MOZ_ASSERT(operandStackPushed >= operandSize);
166 // If this operand doesn't cover the live register space, there's
167 // nothing to do.
168 if (operandStackPushed - operandSize >= sizeOfLiveRegsInBytes) {
169 MOZ_ASSERT(stackPushed_ > sizeOfLiveRegsInBytes);
170 continue;
173 // Reserve stack space for the live registers if needed.
174 if (sizeOfLiveRegsInBytes > stackPushed_) {
175 size_t extraBytes = sizeOfLiveRegsInBytes - stackPushed_;
176 MOZ_ASSERT((extraBytes % sizeof(uintptr_t)) == 0);
177 masm.subFromStackPtr(Imm32(extraBytes));
178 stackPushed_ += extraBytes;
181 // Push the operand below the live register space.
182 if (loc.kind() == OperandLocation::PayloadStack) {
183 masm.push(
184 Address(masm.getStackPointer(), stackPushed_ - operandStackPushed));
185 stackPushed_ += operandSize;
186 loc.setPayloadStack(stackPushed_, loc.payloadType());
187 continue;
189 MOZ_ASSERT(loc.kind() == OperandLocation::ValueStack);
190 masm.pushValue(
191 Address(masm.getStackPointer(), stackPushed_ - operandStackPushed));
192 stackPushed_ += operandSize;
193 loc.setValueStack(stackPushed_);
196 // Step 6. If we have any operands on the stack, adjust their stackPushed
197 // values to not include sizeOfLiveRegsInBytes (this simplifies code down
198 // the line). Then push/store the live registers.
199 if (hasOperandOnStack) {
200 MOZ_ASSERT(stackPushed_ > sizeOfLiveRegsInBytes);
201 stackPushed_ -= sizeOfLiveRegsInBytes;
203 for (size_t i = numInputs; i < operandLocations_.length(); i++) {
204 OperandLocation& loc = operandLocations_[i];
205 if (loc.isOnStack()) {
206 loc.adjustStackPushed(-int32_t(sizeOfLiveRegsInBytes));
210 size_t stackBottom = stackPushed_ + sizeOfLiveRegsInBytes;
211 masm.storeRegsInMask(liveRegs, Address(masm.getStackPointer(), stackBottom),
212 scratch);
213 masm.setFramePushed(masm.framePushed() + sizeOfLiveRegsInBytes);
214 } else {
215 // If no operands are on the stack, discard the unused stack space.
216 if (stackPushed_ > 0) {
217 masm.addToStackPtr(Imm32(stackPushed_));
218 stackPushed_ = 0;
220 masm.PushRegsInMask(liveRegs);
222 freePayloadSlots_.clear();
223 freeValueSlots_.clear();
225 MOZ_ASSERT(masm.framePushed() ==
226 ionScript->frameSize() + sizeOfLiveRegsInBytes);
228 // Step 7. All live registers and non-input operands are stored on the stack
229 // now, so at this point all registers except for the input registers are
230 // available.
231 availableRegs_.set() = GeneralRegisterSet::Not(inputRegisterSet());
232 availableRegsAfterSpill_.set() = GeneralRegisterSet();
234 // Step 8. We restored our input state, so we have to fix up aliased input
235 // registers again.
236 fixupAliasedInputs(masm);
239 void CacheRegisterAllocator::restoreIonLiveRegisters(MacroAssembler& masm,
240 LiveRegisterSet liveRegs) {
241 masm.PopRegsInMask(liveRegs);
243 availableRegs_.set() = GeneralRegisterSet();
244 availableRegsAfterSpill_.set() = GeneralRegisterSet::All();
247 static void* GetReturnAddressToIonCode(JSContext* cx) {
248 JSJitFrameIter frame(cx->activation()->asJit());
249 MOZ_ASSERT(frame.type() == FrameType::Exit,
250 "An exit frame is expected as update functions are called with a "
251 "VMFunction.");
253 void* returnAddr = frame.returnAddress();
254 #ifdef DEBUG
255 ++frame;
256 MOZ_ASSERT(frame.isIonJS());
257 #endif
258 return returnAddr;
261 // The AutoSaveLiveRegisters parameter is used to ensure registers were saved
262 void IonCacheIRCompiler::enterStubFrame(MacroAssembler& masm,
263 const AutoSaveLiveRegisters&) {
264 MOZ_ASSERT(!enteredStubFrame_);
265 pushStubCodePointer();
266 masm.PushFrameDescriptor(FrameType::IonJS);
267 masm.Push(ImmPtr(GetReturnAddressToIonCode(cx_)));
269 masm.Push(FramePointer);
270 masm.moveStackPtrTo(FramePointer);
272 enteredStubFrame_ = true;
275 bool IonCacheIRCompiler::init() {
276 if (!allocator.init()) {
277 return false;
280 size_t numInputs = writer_.numInputOperands();
281 MOZ_ASSERT(numInputs == NumInputsForCacheKind(ic_->kind()));
283 AllocatableGeneralRegisterSet available;
285 switch (ic_->kind()) {
286 case CacheKind::GetProp:
287 case CacheKind::GetElem: {
288 IonGetPropertyIC* ic = ic_->asGetPropertyIC();
289 ValueOperand output = ic->output();
291 available.add(output);
293 liveRegs_.emplace(ic->liveRegs());
294 outputUnchecked_.emplace(output);
296 MOZ_ASSERT(numInputs == 1 || numInputs == 2);
298 allocator.initInputLocation(0, ic->value());
299 if (numInputs > 1) {
300 allocator.initInputLocation(1, ic->id());
302 break;
304 case CacheKind::GetPropSuper:
305 case CacheKind::GetElemSuper: {
306 IonGetPropSuperIC* ic = ic_->asGetPropSuperIC();
307 ValueOperand output = ic->output();
309 available.add(output);
311 liveRegs_.emplace(ic->liveRegs());
312 outputUnchecked_.emplace(output);
314 MOZ_ASSERT(numInputs == 2 || numInputs == 3);
316 allocator.initInputLocation(0, ic->object(), JSVAL_TYPE_OBJECT);
318 if (ic->kind() == CacheKind::GetPropSuper) {
319 MOZ_ASSERT(numInputs == 2);
320 allocator.initInputLocation(1, ic->receiver());
321 } else {
322 MOZ_ASSERT(numInputs == 3);
323 allocator.initInputLocation(1, ic->id());
324 allocator.initInputLocation(2, ic->receiver());
326 break;
328 case CacheKind::SetProp:
329 case CacheKind::SetElem: {
330 IonSetPropertyIC* ic = ic_->asSetPropertyIC();
332 available.add(ic->temp());
334 liveRegs_.emplace(ic->liveRegs());
336 allocator.initInputLocation(0, ic->object(), JSVAL_TYPE_OBJECT);
338 if (ic->kind() == CacheKind::SetProp) {
339 MOZ_ASSERT(numInputs == 2);
340 allocator.initInputLocation(1, ic->rhs());
341 } else {
342 MOZ_ASSERT(numInputs == 3);
343 allocator.initInputLocation(1, ic->id());
344 allocator.initInputLocation(2, ic->rhs());
346 break;
348 case CacheKind::GetName: {
349 IonGetNameIC* ic = ic_->asGetNameIC();
350 ValueOperand output = ic->output();
352 available.add(output);
353 available.add(ic->temp());
355 liveRegs_.emplace(ic->liveRegs());
356 outputUnchecked_.emplace(output);
358 MOZ_ASSERT(numInputs == 1);
359 allocator.initInputLocation(0, ic->environment(), JSVAL_TYPE_OBJECT);
360 break;
362 case CacheKind::BindName: {
363 IonBindNameIC* ic = ic_->asBindNameIC();
364 Register output = ic->output();
366 available.add(output);
367 available.add(ic->temp());
369 liveRegs_.emplace(ic->liveRegs());
370 outputUnchecked_.emplace(
371 TypedOrValueRegister(MIRType::Object, AnyRegister(output)));
373 MOZ_ASSERT(numInputs == 1);
374 allocator.initInputLocation(0, ic->environment(), JSVAL_TYPE_OBJECT);
375 break;
377 case CacheKind::GetIterator: {
378 IonGetIteratorIC* ic = ic_->asGetIteratorIC();
379 Register output = ic->output();
381 available.add(output);
382 available.add(ic->temp1());
383 available.add(ic->temp2());
385 liveRegs_.emplace(ic->liveRegs());
386 outputUnchecked_.emplace(
387 TypedOrValueRegister(MIRType::Object, AnyRegister(output)));
389 MOZ_ASSERT(numInputs == 1);
390 allocator.initInputLocation(0, ic->value());
391 break;
393 case CacheKind::OptimizeSpreadCall: {
394 auto* ic = ic_->asOptimizeSpreadCallIC();
395 ValueOperand output = ic->output();
397 available.add(output);
398 available.add(ic->temp());
400 liveRegs_.emplace(ic->liveRegs());
401 outputUnchecked_.emplace(output);
403 MOZ_ASSERT(numInputs == 1);
404 allocator.initInputLocation(0, ic->value());
405 break;
407 case CacheKind::In: {
408 IonInIC* ic = ic_->asInIC();
409 Register output = ic->output();
411 available.add(output);
413 liveRegs_.emplace(ic->liveRegs());
414 outputUnchecked_.emplace(
415 TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
417 MOZ_ASSERT(numInputs == 2);
418 allocator.initInputLocation(0, ic->key());
419 allocator.initInputLocation(
420 1, TypedOrValueRegister(MIRType::Object, AnyRegister(ic->object())));
421 break;
423 case CacheKind::HasOwn: {
424 IonHasOwnIC* ic = ic_->asHasOwnIC();
425 Register output = ic->output();
427 available.add(output);
429 liveRegs_.emplace(ic->liveRegs());
430 outputUnchecked_.emplace(
431 TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
433 MOZ_ASSERT(numInputs == 2);
434 allocator.initInputLocation(0, ic->id());
435 allocator.initInputLocation(1, ic->value());
436 break;
438 case CacheKind::CheckPrivateField: {
439 IonCheckPrivateFieldIC* ic = ic_->asCheckPrivateFieldIC();
440 Register output = ic->output();
442 available.add(output);
444 liveRegs_.emplace(ic->liveRegs());
445 outputUnchecked_.emplace(
446 TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
448 MOZ_ASSERT(numInputs == 2);
449 allocator.initInputLocation(0, ic->value());
450 allocator.initInputLocation(1, ic->id());
451 break;
453 case CacheKind::InstanceOf: {
454 IonInstanceOfIC* ic = ic_->asInstanceOfIC();
455 Register output = ic->output();
456 available.add(output);
457 liveRegs_.emplace(ic->liveRegs());
458 outputUnchecked_.emplace(
459 TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
461 MOZ_ASSERT(numInputs == 2);
462 allocator.initInputLocation(0, ic->lhs());
463 allocator.initInputLocation(
464 1, TypedOrValueRegister(MIRType::Object, AnyRegister(ic->rhs())));
465 break;
467 case CacheKind::ToPropertyKey: {
468 IonToPropertyKeyIC* ic = ic_->asToPropertyKeyIC();
469 ValueOperand output = ic->output();
471 available.add(output);
473 liveRegs_.emplace(ic->liveRegs());
474 outputUnchecked_.emplace(TypedOrValueRegister(output));
476 MOZ_ASSERT(numInputs == 1);
477 allocator.initInputLocation(0, ic->input());
478 break;
480 case CacheKind::UnaryArith: {
481 IonUnaryArithIC* ic = ic_->asUnaryArithIC();
482 ValueOperand output = ic->output();
484 available.add(output);
486 liveRegs_.emplace(ic->liveRegs());
487 outputUnchecked_.emplace(TypedOrValueRegister(output));
489 MOZ_ASSERT(numInputs == 1);
490 allocator.initInputLocation(0, ic->input());
491 break;
493 case CacheKind::BinaryArith: {
494 IonBinaryArithIC* ic = ic_->asBinaryArithIC();
495 ValueOperand output = ic->output();
497 available.add(output);
499 liveRegs_.emplace(ic->liveRegs());
500 outputUnchecked_.emplace(TypedOrValueRegister(output));
502 MOZ_ASSERT(numInputs == 2);
503 allocator.initInputLocation(0, ic->lhs());
504 allocator.initInputLocation(1, ic->rhs());
505 break;
507 case CacheKind::Compare: {
508 IonCompareIC* ic = ic_->asCompareIC();
509 Register output = ic->output();
511 available.add(output);
513 liveRegs_.emplace(ic->liveRegs());
514 outputUnchecked_.emplace(
515 TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
517 MOZ_ASSERT(numInputs == 2);
518 allocator.initInputLocation(0, ic->lhs());
519 allocator.initInputLocation(1, ic->rhs());
520 break;
522 case CacheKind::CloseIter: {
523 IonCloseIterIC* ic = ic_->asCloseIterIC();
525 available.add(ic->temp());
527 liveRegs_.emplace(ic->liveRegs());
528 allocator.initInputLocation(0, ic->iter(), JSVAL_TYPE_OBJECT);
529 break;
531 case CacheKind::Call:
532 case CacheKind::TypeOf:
533 case CacheKind::ToBool:
534 case CacheKind::GetIntrinsic:
535 case CacheKind::NewArray:
536 case CacheKind::NewObject:
537 MOZ_CRASH("Unsupported IC");
540 liveFloatRegs_ = LiveFloatRegisterSet(liveRegs_->fpus());
542 allocator.initAvailableRegs(available);
543 allocator.initAvailableRegsAfterSpill();
544 return true;
547 JitCode* IonCacheIRCompiler::compile(IonICStub* stub) {
548 AutoCreatedBy acb(masm, "IonCacheIRCompiler::compile");
550 masm.setFramePushed(ionScript_->frameSize());
551 if (cx_->runtime()->geckoProfiler().enabled()) {
552 masm.enableProfilingInstrumentation();
555 allocator.fixupAliasedInputs(masm);
557 CacheIRReader reader(writer_);
558 do {
559 CacheOp op = reader.readOp();
560 perfSpewer_.recordInstruction(masm, op);
561 switch (op) {
562 #define DEFINE_OP(op, ...) \
563 case CacheOp::op: \
564 if (!emit##op(reader)) return nullptr; \
565 break;
566 CACHE_IR_OPS(DEFINE_OP)
567 #undef DEFINE_OP
569 default:
570 MOZ_CRASH("Invalid op");
572 allocator.nextOp();
573 } while (reader.more());
575 masm.assumeUnreachable("Should have returned from IC");
577 // Done emitting the main IC code. Now emit the failure paths.
578 for (size_t i = 0; i < failurePaths.length(); i++) {
579 if (!emitFailurePath(i)) {
580 return nullptr;
582 Register scratch = ic_->scratchRegisterForEntryJump();
583 CodeOffset offset = masm.movWithPatch(ImmWord(-1), scratch);
584 masm.jump(Address(scratch, 0));
585 if (!nextCodeOffsets_.append(offset)) {
586 return nullptr;
590 Linker linker(masm);
591 Rooted<JitCode*> newStubCode(cx_, linker.newCode(cx_, CodeKind::Ion));
592 if (!newStubCode) {
593 cx_->recoverFromOutOfMemory();
594 return nullptr;
597 for (CodeOffset offset : nextCodeOffsets_) {
598 Assembler::PatchDataWithValueCheck(CodeLocationLabel(newStubCode, offset),
599 ImmPtr(stub->nextCodeRawPtr()),
600 ImmPtr((void*)-1));
602 if (stubJitCodeOffset_) {
603 Assembler::PatchDataWithValueCheck(
604 CodeLocationLabel(newStubCode, *stubJitCodeOffset_),
605 ImmPtr(newStubCode.get()), ImmPtr((void*)-1));
608 return newStubCode;
611 #ifdef DEBUG
612 void IonCacheIRCompiler::assertFloatRegisterAvailable(FloatRegister reg) {
613 switch (ic_->kind()) {
614 case CacheKind::GetProp:
615 case CacheKind::GetElem:
616 case CacheKind::GetPropSuper:
617 case CacheKind::GetElemSuper:
618 case CacheKind::GetName:
619 case CacheKind::BindName:
620 case CacheKind::GetIterator:
621 case CacheKind::In:
622 case CacheKind::HasOwn:
623 case CacheKind::CheckPrivateField:
624 case CacheKind::InstanceOf:
625 case CacheKind::UnaryArith:
626 case CacheKind::ToPropertyKey:
627 case CacheKind::OptimizeSpreadCall:
628 case CacheKind::CloseIter:
629 MOZ_CRASH("No float registers available");
630 case CacheKind::SetProp:
631 case CacheKind::SetElem:
632 // FloatReg0 is available per LIRGenerator::visitSetPropertyCache.
633 MOZ_ASSERT(reg == FloatReg0);
634 break;
635 case CacheKind::BinaryArith:
636 case CacheKind::Compare:
637 // FloatReg0 and FloatReg1 are available per
638 // LIRGenerator::visitBinaryCache.
639 MOZ_ASSERT(reg == FloatReg0 || reg == FloatReg1);
640 break;
641 case CacheKind::Call:
642 case CacheKind::TypeOf:
643 case CacheKind::ToBool:
644 case CacheKind::GetIntrinsic:
645 case CacheKind::NewArray:
646 case CacheKind::NewObject:
647 MOZ_CRASH("Unsupported IC");
650 #endif
652 bool IonCacheIRCompiler::emitGuardShape(ObjOperandId objId,
653 uint32_t shapeOffset) {
654 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
655 Register obj = allocator.useRegister(masm, objId);
656 Shape* shape = weakShapeStubField(shapeOffset);
658 bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
660 Maybe<AutoScratchRegister> maybeScratch;
661 if (needSpectreMitigations) {
662 maybeScratch.emplace(allocator, masm);
665 FailurePath* failure;
666 if (!addFailurePath(&failure)) {
667 return false;
670 if (needSpectreMitigations) {
671 masm.branchTestObjShape(Assembler::NotEqual, obj, shape, *maybeScratch, obj,
672 failure->label());
673 } else {
674 masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, obj, shape,
675 failure->label());
678 return true;
681 bool IonCacheIRCompiler::emitGuardProto(ObjOperandId objId,
682 uint32_t protoOffset) {
683 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
684 Register obj = allocator.useRegister(masm, objId);
685 JSObject* proto = objectStubField(protoOffset);
687 AutoScratchRegister scratch(allocator, masm);
689 FailurePath* failure;
690 if (!addFailurePath(&failure)) {
691 return false;
694 masm.loadObjProto(obj, scratch);
695 masm.branchPtr(Assembler::NotEqual, scratch, ImmGCPtr(proto),
696 failure->label());
697 return true;
700 bool IonCacheIRCompiler::emitGuardCompartment(ObjOperandId objId,
701 uint32_t globalOffset,
702 uint32_t compartmentOffset) {
703 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
704 Register obj = allocator.useRegister(masm, objId);
705 JSObject* globalWrapper = objectStubField(globalOffset);
706 JS::Compartment* compartment = compartmentStubField(compartmentOffset);
707 AutoScratchRegister scratch(allocator, masm);
709 FailurePath* failure;
710 if (!addFailurePath(&failure)) {
711 return false;
714 // Verify that the global wrapper is still valid, as
715 // it is pre-requisite for doing the compartment check.
716 masm.movePtr(ImmGCPtr(globalWrapper), scratch);
717 Address handlerAddr(scratch, ProxyObject::offsetOfHandler());
718 masm.branchPtr(Assembler::Equal, handlerAddr,
719 ImmPtr(&DeadObjectProxy::singleton), failure->label());
721 masm.branchTestObjCompartment(Assembler::NotEqual, obj, compartment, scratch,
722 failure->label());
723 return true;
726 bool IonCacheIRCompiler::emitGuardAnyClass(ObjOperandId objId,
727 uint32_t claspOffset) {
728 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
729 Register obj = allocator.useRegister(masm, objId);
730 AutoScratchRegister scratch(allocator, masm);
732 const JSClass* clasp = classStubField(claspOffset);
734 FailurePath* failure;
735 if (!addFailurePath(&failure)) {
736 return false;
739 if (objectGuardNeedsSpectreMitigations(objId)) {
740 masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj,
741 failure->label());
742 } else {
743 masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp,
744 scratch, failure->label());
747 return true;
750 bool IonCacheIRCompiler::emitGuardHasProxyHandler(ObjOperandId objId,
751 uint32_t handlerOffset) {
752 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
753 Register obj = allocator.useRegister(masm, objId);
754 const void* handler = proxyHandlerStubField(handlerOffset);
756 FailurePath* failure;
757 if (!addFailurePath(&failure)) {
758 return false;
761 Address handlerAddr(obj, ProxyObject::offsetOfHandler());
762 masm.branchPtr(Assembler::NotEqual, handlerAddr, ImmPtr(handler),
763 failure->label());
764 return true;
767 bool IonCacheIRCompiler::emitGuardSpecificObject(ObjOperandId objId,
768 uint32_t expectedOffset) {
769 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
770 Register obj = allocator.useRegister(masm, objId);
771 JSObject* expected = objectStubField(expectedOffset);
773 FailurePath* failure;
774 if (!addFailurePath(&failure)) {
775 return false;
778 masm.branchPtr(Assembler::NotEqual, obj, ImmGCPtr(expected),
779 failure->label());
780 return true;
783 bool IonCacheIRCompiler::emitGuardSpecificFunction(
784 ObjOperandId objId, uint32_t expectedOffset, uint32_t nargsAndFlagsOffset) {
785 return emitGuardSpecificObject(objId, expectedOffset);
788 bool IonCacheIRCompiler::emitGuardSpecificAtom(StringOperandId strId,
789 uint32_t expectedOffset) {
790 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
791 Register str = allocator.useRegister(masm, strId);
792 AutoScratchRegister scratch(allocator, masm);
794 JSAtom* atom = &stringStubField(expectedOffset)->asAtom();
796 FailurePath* failure;
797 if (!addFailurePath(&failure)) {
798 return false;
801 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
802 liveVolatileFloatRegs());
803 volatileRegs.takeUnchecked(scratch);
805 masm.guardSpecificAtom(str, atom, scratch, volatileRegs, failure->label());
806 return true;
809 bool IonCacheIRCompiler::emitGuardSpecificSymbol(SymbolOperandId symId,
810 uint32_t expectedOffset) {
811 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
812 Register sym = allocator.useRegister(masm, symId);
813 JS::Symbol* expected = symbolStubField(expectedOffset);
815 FailurePath* failure;
816 if (!addFailurePath(&failure)) {
817 return false;
820 masm.branchPtr(Assembler::NotEqual, sym, ImmGCPtr(expected),
821 failure->label());
822 return true;
825 bool IonCacheIRCompiler::emitLoadValueResult(uint32_t valOffset) {
826 MOZ_CRASH("Baseline-specific op");
829 bool IonCacheIRCompiler::emitLoadFixedSlotResult(ObjOperandId objId,
830 uint32_t offsetOffset) {
831 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
832 AutoOutputRegister output(*this);
833 Register obj = allocator.useRegister(masm, objId);
834 int32_t offset = int32StubField(offsetOffset);
835 masm.loadTypedOrValue(Address(obj, offset), output);
836 return true;
839 bool IonCacheIRCompiler::emitLoadFixedSlotTypedResult(ObjOperandId objId,
840 uint32_t offsetOffset,
841 ValueType) {
842 MOZ_CRASH("Call ICs not used in ion");
845 bool IonCacheIRCompiler::emitLoadDynamicSlotResult(ObjOperandId objId,
846 uint32_t offsetOffset) {
847 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
848 AutoOutputRegister output(*this);
849 Register obj = allocator.useRegister(masm, objId);
850 int32_t offset = int32StubField(offsetOffset);
852 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
853 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
854 masm.loadTypedOrValue(Address(scratch, offset), output);
855 return true;
858 bool IonCacheIRCompiler::emitCallScriptedGetterResult(
859 ValOperandId receiverId, uint32_t getterOffset, bool sameRealm,
860 uint32_t nargsAndFlagsOffset) {
861 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
862 AutoSaveLiveRegisters save(*this);
863 AutoOutputRegister output(*this);
865 ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
867 JSFunction* target = &objectStubField(getterOffset)->as<JSFunction>();
868 AutoScratchRegister scratch(allocator, masm);
870 MOZ_ASSERT(sameRealm == (cx_->realm() == target->realm()));
872 allocator.discardStack(masm);
874 uint32_t framePushedBefore = masm.framePushed();
876 enterStubFrame(masm, save);
878 // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
879 // so we just have to make sure the stack is aligned after we push the
880 // |this| + argument Values.
881 uint32_t argSize = (target->nargs() + 1) * sizeof(Value);
882 uint32_t padding =
883 ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
884 MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
885 MOZ_ASSERT(padding < JitStackAlignment);
886 masm.reserveStack(padding);
888 for (size_t i = 0; i < target->nargs(); i++) {
889 masm.Push(UndefinedValue());
891 masm.Push(receiver);
893 if (!sameRealm) {
894 masm.switchToRealm(target->realm(), scratch);
897 masm.movePtr(ImmGCPtr(target), scratch);
899 masm.Push(scratch);
900 masm.PushFrameDescriptorForJitCall(FrameType::IonICCall, /* argc = */ 0);
902 // Check stack alignment. Add 2 * sizeof(uintptr_t) for the return address and
903 // frame pointer pushed by the call/callee.
904 MOZ_ASSERT(
905 ((masm.framePushed() + 2 * sizeof(uintptr_t)) % JitStackAlignment) == 0);
907 MOZ_ASSERT(target->hasJitEntry());
908 masm.loadJitCodeRaw(scratch, scratch);
909 masm.callJit(scratch);
911 if (!sameRealm) {
912 static_assert(!JSReturnOperand.aliases(ReturnReg),
913 "ReturnReg available as scratch after scripted calls");
914 masm.switchToRealm(cx_->realm(), ReturnReg);
917 masm.storeCallResultValue(output);
919 // Restore the frame pointer and stack pointer.
920 masm.loadPtr(Address(FramePointer, 0), FramePointer);
921 masm.freeStack(masm.framePushed() - framePushedBefore);
922 return true;
925 bool IonCacheIRCompiler::emitCallInlinedGetterResult(
926 ValOperandId receiverId, uint32_t getterOffset, uint32_t icScriptOffset,
927 bool sameRealm, uint32_t nargsAndFlagsOffset) {
928 MOZ_CRASH("Trial inlining not supported in Ion");
931 bool IonCacheIRCompiler::emitCallNativeGetterResult(
932 ValOperandId receiverId, uint32_t getterOffset, bool sameRealm,
933 uint32_t nargsAndFlagsOffset) {
934 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
935 AutoSaveLiveRegisters save(*this);
936 AutoOutputRegister output(*this);
938 ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
940 JSFunction* target = &objectStubField(getterOffset)->as<JSFunction>();
941 MOZ_ASSERT(target->isNativeFun());
943 AutoScratchRegisterMaybeOutput argJSContext(allocator, masm, output);
944 AutoScratchRegisterMaybeOutputType argUintN(allocator, masm, output);
945 AutoScratchRegister argVp(allocator, masm);
946 AutoScratchRegister scratch(allocator, masm);
948 allocator.discardStack(masm);
950 // Native functions have the signature:
951 // bool (*)(JSContext*, unsigned, Value* vp)
952 // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
953 // are the function arguments.
955 // Construct vp array:
956 // Push receiver value for |this|
957 masm.Push(receiver);
958 // Push callee/outparam.
959 masm.Push(ObjectValue(*target));
961 // Preload arguments into registers.
962 masm.loadJSContext(argJSContext);
963 masm.move32(Imm32(0), argUintN);
964 masm.moveStackPtrTo(argVp.get());
966 // Push marking data for later use.
967 masm.Push(argUintN);
968 pushStubCodePointer();
970 if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save)) {
971 return false;
973 masm.enterFakeExitFrame(argJSContext, scratch, ExitFrameType::IonOOLNative);
975 if (!sameRealm) {
976 masm.switchToRealm(target->realm(), scratch);
979 // Construct and execute call.
980 masm.setupUnalignedABICall(scratch);
981 masm.passABIArg(argJSContext);
982 masm.passABIArg(argUintN);
983 masm.passABIArg(argVp);
984 masm.callWithABI(DynamicFunction<JSNative>(target->native()), MoveOp::GENERAL,
985 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
987 // Test for failure.
988 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
990 if (!sameRealm) {
991 masm.switchToRealm(cx_->realm(), ReturnReg);
994 // Load the outparam vp[0] into output register(s).
995 Address outparam(masm.getStackPointer(),
996 IonOOLNativeExitFrameLayout::offsetOfResult());
997 masm.loadValue(outparam, output.valueReg());
999 if (JitOptions.spectreJitToCxxCalls) {
1000 masm.speculationBarrier();
1003 masm.adjustStack(IonOOLNativeExitFrameLayout::Size(0));
1004 return true;
1007 bool IonCacheIRCompiler::emitCallDOMGetterResult(ObjOperandId objId,
1008 uint32_t jitInfoOffset) {
1009 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1010 AutoSaveLiveRegisters save(*this);
1011 AutoOutputRegister output(*this);
1013 Register obj = allocator.useRegister(masm, objId);
1015 const JSJitInfo* info = rawPointerStubField<const JSJitInfo*>(jitInfoOffset);
1017 allocator.discardStack(masm);
1018 enterStubFrame(masm, save);
1020 masm.Push(obj);
1021 masm.Push(ImmPtr(info));
1023 using Fn =
1024 bool (*)(JSContext*, const JSJitInfo*, HandleObject, MutableHandleValue);
1025 callVM<Fn, jit::CallDOMGetter>(masm);
1027 masm.storeCallResultValue(output);
1028 return true;
1031 bool IonCacheIRCompiler::emitCallDOMSetter(ObjOperandId objId,
1032 uint32_t jitInfoOffset,
1033 ValOperandId rhsId) {
1034 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1035 AutoSaveLiveRegisters save(*this);
1037 Register obj = allocator.useRegister(masm, objId);
1038 ValueOperand val = allocator.useValueRegister(masm, rhsId);
1040 const JSJitInfo* info = rawPointerStubField<const JSJitInfo*>(jitInfoOffset);
1042 allocator.discardStack(masm);
1043 enterStubFrame(masm, save);
1045 masm.Push(val);
1046 masm.Push(obj);
1047 masm.Push(ImmPtr(info));
1049 using Fn = bool (*)(JSContext*, const JSJitInfo*, HandleObject, HandleValue);
1050 callVM<Fn, jit::CallDOMSetter>(masm);
1051 return true;
1054 bool IonCacheIRCompiler::emitProxyGetResult(ObjOperandId objId,
1055 uint32_t idOffset) {
1056 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1057 AutoSaveLiveRegisters save(*this);
1058 AutoOutputRegister output(*this);
1060 Register obj = allocator.useRegister(masm, objId);
1061 jsid id = idStubField(idOffset);
1063 // ProxyGetProperty(JSContext* cx, HandleObject proxy, HandleId id,
1064 // MutableHandleValue vp)
1065 AutoScratchRegisterMaybeOutput argJSContext(allocator, masm, output);
1066 AutoScratchRegisterMaybeOutputType argProxy(allocator, masm, output);
1067 AutoScratchRegister argId(allocator, masm);
1068 AutoScratchRegister argVp(allocator, masm);
1069 AutoScratchRegister scratch(allocator, masm);
1071 allocator.discardStack(masm);
1073 // Push stubCode for marking.
1074 pushStubCodePointer();
1076 // Push args on stack first so we can take pointers to make handles.
1077 masm.Push(UndefinedValue());
1078 masm.moveStackPtrTo(argVp.get());
1080 masm.Push(id, scratch);
1081 masm.moveStackPtrTo(argId.get());
1083 // Push the proxy. Also used as receiver.
1084 masm.Push(obj);
1085 masm.moveStackPtrTo(argProxy.get());
1087 masm.loadJSContext(argJSContext);
1089 if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save)) {
1090 return false;
1092 masm.enterFakeExitFrame(argJSContext, scratch, ExitFrameType::IonOOLProxy);
1094 // Make the call.
1095 using Fn = bool (*)(JSContext* cx, HandleObject proxy, HandleId id,
1096 MutableHandleValue vp);
1097 masm.setupUnalignedABICall(scratch);
1098 masm.passABIArg(argJSContext);
1099 masm.passABIArg(argProxy);
1100 masm.passABIArg(argId);
1101 masm.passABIArg(argVp);
1102 masm.callWithABI<Fn, ProxyGetProperty>(
1103 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
1105 // Test for failure.
1106 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
1108 // Load the outparam vp[0] into output register(s).
1109 Address outparam(masm.getStackPointer(),
1110 IonOOLProxyExitFrameLayout::offsetOfResult());
1111 masm.loadValue(outparam, output.valueReg());
1113 // Spectre mitigation in case of speculative execution within C++ code.
1114 if (JitOptions.spectreJitToCxxCalls) {
1115 masm.speculationBarrier();
1118 // masm.leaveExitFrame & pop locals
1119 masm.adjustStack(IonOOLProxyExitFrameLayout::Size());
1120 return true;
1123 bool IonCacheIRCompiler::emitFrameIsConstructingResult() {
1124 MOZ_CRASH("Baseline-specific op");
1127 bool IonCacheIRCompiler::emitLoadConstantStringResult(uint32_t strOffset) {
1128 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1129 MOZ_CRASH("not used in ion");
1132 bool IonCacheIRCompiler::emitCompareStringResult(JSOp op, StringOperandId lhsId,
1133 StringOperandId rhsId) {
1134 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1135 AutoSaveLiveRegisters save(*this);
1136 AutoOutputRegister output(*this);
1138 Register left = allocator.useRegister(masm, lhsId);
1139 Register right = allocator.useRegister(masm, rhsId);
1141 allocator.discardStack(masm);
1143 Label slow, done;
1144 MOZ_ASSERT(!output.hasValue());
1145 masm.compareStrings(op, left, right, output.typedReg().gpr(), &slow);
1147 masm.jump(&done);
1148 masm.bind(&slow);
1150 enterStubFrame(masm, save);
1152 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
1153 // - |left <= right| is implemented as |right >= left|.
1154 // - |left > right| is implemented as |right < left|.
1155 if (op == JSOp::Le || op == JSOp::Gt) {
1156 masm.Push(left);
1157 masm.Push(right);
1158 } else {
1159 masm.Push(right);
1160 masm.Push(left);
1163 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
1164 if (op == JSOp::Eq || op == JSOp::StrictEq) {
1165 callVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(masm);
1166 } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
1167 callVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(masm);
1168 } else if (op == JSOp::Lt || op == JSOp::Gt) {
1169 callVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(masm);
1170 } else {
1171 MOZ_ASSERT(op == JSOp::Le || op == JSOp::Ge);
1172 callVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(masm);
1175 masm.storeCallBoolResult(output.typedReg().gpr());
1176 masm.bind(&done);
1177 return true;
1180 bool IonCacheIRCompiler::emitStoreFixedSlot(ObjOperandId objId,
1181 uint32_t offsetOffset,
1182 ValOperandId rhsId) {
1183 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1184 Register obj = allocator.useRegister(masm, objId);
1185 int32_t offset = int32StubField(offsetOffset);
1186 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1187 AutoScratchRegister scratch(allocator, masm);
1189 Address slot(obj, offset);
1190 EmitPreBarrier(masm, slot, MIRType::Value);
1191 masm.storeConstantOrRegister(val, slot);
1192 emitPostBarrierSlot(obj, val, scratch);
1193 return true;
1196 bool IonCacheIRCompiler::emitStoreDynamicSlot(ObjOperandId objId,
1197 uint32_t offsetOffset,
1198 ValOperandId rhsId) {
1199 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1200 Register obj = allocator.useRegister(masm, objId);
1201 int32_t offset = int32StubField(offsetOffset);
1202 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1203 AutoScratchRegister scratch(allocator, masm);
1205 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
1206 Address slot(scratch, offset);
1207 EmitPreBarrier(masm, slot, MIRType::Value);
1208 masm.storeConstantOrRegister(val, slot);
1209 emitPostBarrierSlot(obj, val, scratch);
1210 return true;
1213 bool IonCacheIRCompiler::emitAddAndStoreSlotShared(
1214 CacheOp op, ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
1215 uint32_t newShapeOffset, Maybe<uint32_t> numNewSlotsOffset) {
1216 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1217 Register obj = allocator.useRegister(masm, objId);
1218 int32_t offset = int32StubField(offsetOffset);
1219 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1221 AutoScratchRegister scratch1(allocator, masm);
1223 Maybe<AutoScratchRegister> scratch2;
1224 if (op == CacheOp::AllocateAndStoreDynamicSlot) {
1225 scratch2.emplace(allocator, masm);
1228 Shape* newShape = shapeStubField(newShapeOffset);
1230 if (op == CacheOp::AllocateAndStoreDynamicSlot) {
1231 // We have to (re)allocate dynamic slots. Do this first, as it's the
1232 // only fallible operation here. Note that growSlotsPure is
1233 // fallible but does not GC.
1235 FailurePath* failure;
1236 if (!addFailurePath(&failure)) {
1237 return false;
1240 int32_t numNewSlots = int32StubField(*numNewSlotsOffset);
1241 MOZ_ASSERT(numNewSlots > 0);
1243 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
1244 liveVolatileFloatRegs());
1245 masm.PushRegsInMask(save);
1247 using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
1248 masm.setupUnalignedABICall(scratch1);
1249 masm.loadJSContext(scratch1);
1250 masm.passABIArg(scratch1);
1251 masm.passABIArg(obj);
1252 masm.move32(Imm32(numNewSlots), scratch2.ref());
1253 masm.passABIArg(scratch2.ref());
1254 masm.callWithABI<Fn, NativeObject::growSlotsPure>();
1255 masm.storeCallPointerResult(scratch1);
1257 LiveRegisterSet ignore;
1258 ignore.add(scratch1);
1259 masm.PopRegsInMaskIgnore(save, ignore);
1261 masm.branchIfFalseBool(scratch1, failure->label());
1264 // Update the object's shape.
1265 masm.storeObjShape(newShape, obj,
1266 [](MacroAssembler& masm, const Address& addr) {
1267 EmitPreBarrier(masm, addr, MIRType::Shape);
1270 // Perform the store. No pre-barrier required since this is a new
1271 // initialization.
1272 if (op == CacheOp::AddAndStoreFixedSlot) {
1273 Address slot(obj, offset);
1274 masm.storeConstantOrRegister(val, slot);
1275 } else {
1276 MOZ_ASSERT(op == CacheOp::AddAndStoreDynamicSlot ||
1277 op == CacheOp::AllocateAndStoreDynamicSlot);
1278 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
1279 Address slot(scratch1, offset);
1280 masm.storeConstantOrRegister(val, slot);
1283 emitPostBarrierSlot(obj, val, scratch1);
1285 return true;
1288 bool IonCacheIRCompiler::emitAddAndStoreFixedSlot(ObjOperandId objId,
1289 uint32_t offsetOffset,
1290 ValOperandId rhsId,
1291 uint32_t newShapeOffset) {
1292 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1293 Maybe<uint32_t> numNewSlotsOffset = mozilla::Nothing();
1294 return emitAddAndStoreSlotShared(CacheOp::AddAndStoreFixedSlot, objId,
1295 offsetOffset, rhsId, newShapeOffset,
1296 numNewSlotsOffset);
1299 bool IonCacheIRCompiler::emitAddAndStoreDynamicSlot(ObjOperandId objId,
1300 uint32_t offsetOffset,
1301 ValOperandId rhsId,
1302 uint32_t newShapeOffset) {
1303 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1304 Maybe<uint32_t> numNewSlotsOffset = mozilla::Nothing();
1305 return emitAddAndStoreSlotShared(CacheOp::AddAndStoreDynamicSlot, objId,
1306 offsetOffset, rhsId, newShapeOffset,
1307 numNewSlotsOffset);
1310 bool IonCacheIRCompiler::emitAllocateAndStoreDynamicSlot(
1311 ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
1312 uint32_t newShapeOffset, uint32_t numNewSlotsOffset) {
1313 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1314 return emitAddAndStoreSlotShared(CacheOp::AllocateAndStoreDynamicSlot, objId,
1315 offsetOffset, rhsId, newShapeOffset,
1316 mozilla::Some(numNewSlotsOffset));
1319 bool IonCacheIRCompiler::emitLoadStringCharResult(StringOperandId strId,
1320 Int32OperandId indexId,
1321 bool handleOOB) {
1322 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1323 AutoOutputRegister output(*this);
1324 Register str = allocator.useRegister(masm, strId);
1325 Register index = allocator.useRegister(masm, indexId);
1326 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
1327 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
1328 AutoScratchRegister scratch3(allocator, masm);
1330 FailurePath* failure;
1331 if (!addFailurePath(&failure)) {
1332 return false;
1335 // Bounds check, load string char.
1336 Label done;
1337 Label loadFailed;
1338 if (!handleOOB) {
1339 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
1340 scratch1, failure->label());
1341 masm.loadStringChar(str, index, scratch1, scratch2, scratch3,
1342 failure->label());
1343 } else {
1344 // Return the empty string for out-of-bounds access.
1345 masm.movePtr(ImmGCPtr(cx_->runtime()->emptyString), scratch2);
1347 // This CacheIR op is always preceded by |LinearizeForCharAccess|, so we're
1348 // guaranteed to see no nested ropes.
1349 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
1350 scratch1, &done);
1351 masm.loadStringChar(str, index, scratch1, scratch2, scratch3, &loadFailed);
1354 // Load StaticString for this char. For larger code units perform a VM call.
1355 Label vmCall;
1356 masm.boundsCheck32PowerOfTwo(scratch1, StaticStrings::UNIT_STATIC_LIMIT,
1357 &vmCall);
1358 masm.movePtr(ImmPtr(&cx_->staticStrings().unitStaticTable), scratch2);
1359 masm.loadPtr(BaseIndex(scratch2, scratch1, ScalePointer), scratch2);
1361 masm.jump(&done);
1363 if (handleOOB) {
1364 masm.bind(&loadFailed);
1365 masm.assumeUnreachable("loadStringChar can't fail for linear strings");
1369 masm.bind(&vmCall);
1371 // FailurePath and AutoSaveLiveRegisters don't get along very well. Both are
1372 // modifying the stack and expect that no other stack manipulations are
1373 // made. Therefore we need to use an ABI call instead of a VM call here.
1375 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
1376 liveVolatileFloatRegs());
1377 volatileRegs.takeUnchecked(scratch1);
1378 volatileRegs.takeUnchecked(scratch2);
1379 volatileRegs.takeUnchecked(scratch3);
1380 volatileRegs.takeUnchecked(output);
1381 masm.PushRegsInMask(volatileRegs);
1383 using Fn = JSLinearString* (*)(JSContext* cx, int32_t code);
1384 masm.setupUnalignedABICall(scratch2);
1385 masm.loadJSContext(scratch2);
1386 masm.passABIArg(scratch2);
1387 masm.passABIArg(scratch1);
1388 masm.callWithABI<Fn, jit::StringFromCharCodeNoGC>();
1389 masm.storeCallPointerResult(scratch2);
1391 masm.PopRegsInMask(volatileRegs);
1393 masm.branchPtr(Assembler::Equal, scratch2, ImmWord(0), failure->label());
1396 masm.bind(&done);
1397 masm.tagValue(JSVAL_TYPE_STRING, scratch2, output.valueReg());
1398 return true;
1401 bool IonCacheIRCompiler::emitCallNativeSetter(ObjOperandId receiverId,
1402 uint32_t setterOffset,
1403 ValOperandId rhsId,
1404 bool sameRealm,
1405 uint32_t nargsAndFlagsOffset) {
1406 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1407 AutoSaveLiveRegisters save(*this);
1409 Register receiver = allocator.useRegister(masm, receiverId);
1410 JSFunction* target = &objectStubField(setterOffset)->as<JSFunction>();
1411 MOZ_ASSERT(target->isNativeFun());
1412 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1414 AutoScratchRegister argJSContext(allocator, masm);
1415 AutoScratchRegister argVp(allocator, masm);
1416 AutoScratchRegister argUintN(allocator, masm);
1417 #ifndef JS_CODEGEN_X86
1418 AutoScratchRegister scratch(allocator, masm);
1419 #else
1420 // Not enough registers on x86.
1421 Register scratch = argUintN;
1422 #endif
1424 allocator.discardStack(masm);
1426 // Set up the call:
1427 // bool (*)(JSContext*, unsigned, Value* vp)
1428 // vp[0] is callee/outparam
1429 // vp[1] is |this|
1430 // vp[2] is the value
1432 // Build vp and move the base into argVpReg.
1433 masm.Push(val);
1434 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(receiver)));
1435 masm.Push(ObjectValue(*target));
1436 masm.moveStackPtrTo(argVp.get());
1438 // Preload other regs.
1439 masm.loadJSContext(argJSContext);
1440 masm.move32(Imm32(1), argUintN);
1442 // Push marking data for later use.
1443 masm.Push(argUintN);
1444 pushStubCodePointer();
1446 if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save)) {
1447 return false;
1449 masm.enterFakeExitFrame(argJSContext, scratch, ExitFrameType::IonOOLNative);
1451 if (!sameRealm) {
1452 masm.switchToRealm(target->realm(), scratch);
1455 // Make the call.
1456 masm.setupUnalignedABICall(scratch);
1457 #ifdef JS_CODEGEN_X86
1458 // Reload argUintN because it was clobbered.
1459 masm.move32(Imm32(1), argUintN);
1460 #endif
1461 masm.passABIArg(argJSContext);
1462 masm.passABIArg(argUintN);
1463 masm.passABIArg(argVp);
1464 masm.callWithABI(DynamicFunction<JSNative>(target->native()), MoveOp::GENERAL,
1465 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
1467 // Test for failure.
1468 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
1470 if (!sameRealm) {
1471 masm.switchToRealm(cx_->realm(), ReturnReg);
1474 masm.adjustStack(IonOOLNativeExitFrameLayout::Size(1));
1475 return true;
1478 bool IonCacheIRCompiler::emitCallScriptedSetter(ObjOperandId receiverId,
1479 uint32_t setterOffset,
1480 ValOperandId rhsId,
1481 bool sameRealm,
1482 uint32_t nargsAndFlagsOffset) {
1483 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1484 AutoSaveLiveRegisters save(*this);
1486 Register receiver = allocator.useRegister(masm, receiverId);
1487 JSFunction* target = &objectStubField(setterOffset)->as<JSFunction>();
1488 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1490 MOZ_ASSERT(sameRealm == (cx_->realm() == target->realm()));
1492 AutoScratchRegister scratch(allocator, masm);
1494 allocator.discardStack(masm);
1496 uint32_t framePushedBefore = masm.framePushed();
1498 enterStubFrame(masm, save);
1500 // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
1501 // so we just have to make sure the stack is aligned after we push the
1502 // |this| + argument Values.
1503 size_t numArgs = std::max<size_t>(1, target->nargs());
1504 uint32_t argSize = (numArgs + 1) * sizeof(Value);
1505 uint32_t padding =
1506 ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
1507 MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
1508 MOZ_ASSERT(padding < JitStackAlignment);
1509 masm.reserveStack(padding);
1511 for (size_t i = 1; i < target->nargs(); i++) {
1512 masm.Push(UndefinedValue());
1514 masm.Push(val);
1515 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(receiver)));
1517 if (!sameRealm) {
1518 masm.switchToRealm(target->realm(), scratch);
1521 masm.movePtr(ImmGCPtr(target), scratch);
1523 masm.Push(scratch);
1524 masm.PushFrameDescriptorForJitCall(FrameType::IonICCall, /* argc = */ 1);
1526 // Check stack alignment. Add 2 * sizeof(uintptr_t) for the return address and
1527 // frame pointer pushed by the call/callee.
1528 MOZ_ASSERT(
1529 ((masm.framePushed() + 2 * sizeof(uintptr_t)) % JitStackAlignment) == 0);
1531 MOZ_ASSERT(target->hasJitEntry());
1532 masm.loadJitCodeRaw(scratch, scratch);
1533 masm.callJit(scratch);
1535 if (!sameRealm) {
1536 masm.switchToRealm(cx_->realm(), ReturnReg);
1539 // Restore the frame pointer and stack pointer.
1540 masm.loadPtr(Address(FramePointer, 0), FramePointer);
1541 masm.freeStack(masm.framePushed() - framePushedBefore);
1542 return true;
1545 bool IonCacheIRCompiler::emitCallInlinedSetter(
1546 ObjOperandId receiverId, uint32_t setterOffset, ValOperandId rhsId,
1547 uint32_t icScriptOffset, bool sameRealm, uint32_t nargsAndFlagsOffset) {
1548 MOZ_CRASH("Trial inlining not supported in Ion");
1551 bool IonCacheIRCompiler::emitCallSetArrayLength(ObjOperandId objId, bool strict,
1552 ValOperandId rhsId) {
1553 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1554 AutoSaveLiveRegisters save(*this);
1556 Register obj = allocator.useRegister(masm, objId);
1557 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1559 allocator.discardStack(masm);
1560 enterStubFrame(masm, save);
1562 masm.Push(Imm32(strict));
1563 masm.Push(val);
1564 masm.Push(obj);
1566 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
1567 callVM<Fn, jit::SetArrayLength>(masm);
1568 return true;
1571 bool IonCacheIRCompiler::emitProxySet(ObjOperandId objId, uint32_t idOffset,
1572 ValOperandId rhsId, bool strict) {
1573 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1574 AutoSaveLiveRegisters save(*this);
1576 Register obj = allocator.useRegister(masm, objId);
1577 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1578 jsid id = idStubField(idOffset);
1580 AutoScratchRegister scratch(allocator, masm);
1582 allocator.discardStack(masm);
1583 enterStubFrame(masm, save);
1585 masm.Push(Imm32(strict));
1586 masm.Push(val);
1587 masm.Push(id, scratch);
1588 masm.Push(obj);
1590 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
1591 callVM<Fn, ProxySetProperty>(masm);
1592 return true;
1595 bool IonCacheIRCompiler::emitProxySetByValue(ObjOperandId objId,
1596 ValOperandId idId,
1597 ValOperandId rhsId, bool strict) {
1598 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1599 AutoSaveLiveRegisters save(*this);
1601 Register obj = allocator.useRegister(masm, objId);
1602 ConstantOrRegister idVal = allocator.useConstantOrRegister(masm, idId);
1603 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1605 allocator.discardStack(masm);
1606 enterStubFrame(masm, save);
1608 masm.Push(Imm32(strict));
1609 masm.Push(val);
1610 masm.Push(idVal);
1611 masm.Push(obj);
1613 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
1614 callVM<Fn, ProxySetPropertyByValue>(masm);
1615 return true;
1618 bool IonCacheIRCompiler::emitCallAddOrUpdateSparseElementHelper(
1619 ObjOperandId objId, Int32OperandId idId, ValOperandId rhsId, bool strict) {
1620 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1621 AutoSaveLiveRegisters save(*this);
1623 Register obj = allocator.useRegister(masm, objId);
1624 Register id = allocator.useRegister(masm, idId);
1625 ValueOperand val = allocator.useValueRegister(masm, rhsId);
1627 allocator.discardStack(masm);
1628 enterStubFrame(masm, save);
1630 masm.Push(Imm32(strict));
1631 masm.Push(val);
1632 masm.Push(id);
1633 masm.Push(obj);
1635 using Fn = bool (*)(JSContext* cx, Handle<NativeObject*> obj, int32_t int_id,
1636 HandleValue v, bool strict);
1637 callVM<Fn, AddOrUpdateSparseElementHelper>(masm);
1638 return true;
1641 bool IonCacheIRCompiler::emitMegamorphicSetElement(ObjOperandId objId,
1642 ValOperandId idId,
1643 ValOperandId rhsId,
1644 bool strict) {
1645 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1646 AutoSaveLiveRegisters save(*this);
1648 Register obj = allocator.useRegister(masm, objId);
1649 ConstantOrRegister idVal = allocator.useConstantOrRegister(masm, idId);
1650 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1652 allocator.discardStack(masm);
1653 enterStubFrame(masm, save);
1655 masm.Push(Imm32(strict));
1656 masm.Push(val);
1657 masm.Push(idVal);
1658 masm.Push(obj);
1660 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
1661 callVM<Fn, SetElementMegamorphic<false>>(masm);
1662 return true;
1665 bool IonCacheIRCompiler::emitReturnFromIC() {
1666 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1667 if (!savedLiveRegs_) {
1668 allocator.restoreInputState(masm);
1671 uint8_t* rejoinAddr = ic_->rejoinAddr(ionScript_);
1672 masm.jump(ImmPtr(rejoinAddr));
1673 return true;
1676 bool IonCacheIRCompiler::emitGuardDOMExpandoMissingOrGuardShape(
1677 ValOperandId expandoId, uint32_t shapeOffset) {
1678 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1679 ValueOperand val = allocator.useValueRegister(masm, expandoId);
1680 Shape* shape = shapeStubField(shapeOffset);
1682 AutoScratchRegister objScratch(allocator, masm);
1684 FailurePath* failure;
1685 if (!addFailurePath(&failure)) {
1686 return false;
1689 Label done;
1690 masm.branchTestUndefined(Assembler::Equal, val, &done);
1692 masm.debugAssertIsObject(val);
1693 masm.unboxObject(val, objScratch);
1694 // The expando object is not used in this case, so we don't need Spectre
1695 // mitigations.
1696 masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, objScratch,
1697 shape, failure->label());
1699 masm.bind(&done);
1700 return true;
1703 bool IonCacheIRCompiler::emitLoadDOMExpandoValueGuardGeneration(
1704 ObjOperandId objId, uint32_t expandoAndGenerationOffset,
1705 uint32_t generationOffset, ValOperandId resultId) {
1706 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1707 Register obj = allocator.useRegister(masm, objId);
1708 ExpandoAndGeneration* expandoAndGeneration =
1709 rawPointerStubField<ExpandoAndGeneration*>(expandoAndGenerationOffset);
1710 uint64_t generation = rawInt64StubField<uint64_t>(generationOffset);
1712 ValueOperand output = allocator.defineValueRegister(masm, resultId);
1714 FailurePath* failure;
1715 if (!addFailurePath(&failure)) {
1716 return false;
1719 masm.loadDOMExpandoValueGuardGeneration(obj, output, expandoAndGeneration,
1720 generation, failure->label());
1721 return true;
1724 void IonIC::attachCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
1725 CacheKind kind, IonScript* ionScript,
1726 bool* attached) {
1727 // We shouldn't GC or report OOM (or any other exception) here.
1728 AutoAssertNoPendingException aanpe(cx);
1729 JS::AutoCheckCannotGC nogc;
1731 MOZ_ASSERT(!*attached);
1733 // Do nothing if the IR generator failed or triggered a GC that invalidated
1734 // the script.
1735 if (writer.failed() || ionScript->invalidated()) {
1736 return;
1739 JitZone* jitZone = cx->zone()->jitZone();
1741 constexpr uint32_t stubDataOffset = sizeof(IonICStub);
1742 static_assert(stubDataOffset % sizeof(uint64_t) == 0,
1743 "Stub fields must be aligned");
1745 // Try to reuse a previously-allocated CacheIRStubInfo.
1746 CacheIRStubKey::Lookup lookup(kind, ICStubEngine::IonIC, writer.codeStart(),
1747 writer.codeLength());
1748 CacheIRStubInfo* stubInfo = jitZone->getIonCacheIRStubInfo(lookup);
1749 if (!stubInfo) {
1750 // Allocate the shared CacheIRStubInfo. Note that the
1751 // putIonCacheIRStubInfo call below will transfer ownership to
1752 // the stub info HashSet, so we don't have to worry about freeing
1753 // it below.
1755 // For Ion ICs, we don't track/use the makesGCCalls flag, so just pass true.
1756 bool makesGCCalls = true;
1757 stubInfo = CacheIRStubInfo::New(kind, ICStubEngine::IonIC, makesGCCalls,
1758 stubDataOffset, writer);
1759 if (!stubInfo) {
1760 return;
1763 CacheIRStubKey key(stubInfo);
1764 if (!jitZone->putIonCacheIRStubInfo(lookup, key)) {
1765 return;
1769 MOZ_ASSERT(stubInfo);
1771 // Ensure we don't attach duplicate stubs. This can happen if a stub failed
1772 // for some reason and the IR generator doesn't check for exactly the same
1773 // conditions.
1774 for (IonICStub* stub = firstStub_; stub; stub = stub->next()) {
1775 if (stub->stubInfo() != stubInfo) {
1776 continue;
1778 if (!writer.stubDataEquals(stub->stubDataStart())) {
1779 continue;
1781 return;
1784 size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
1786 // Allocate the IonICStub in the optimized stub space. Ion stubs and
1787 // CacheIRStubInfo instances for Ion stubs can be purged on GC. That's okay
1788 // because the stub code is rooted separately when we make a VM call, and
1789 // stub code should never access the IonICStub after making a VM call. The
1790 // IonICStub::poison method poisons the stub to catch bugs in this area.
1791 ICStubSpace* stubSpace = cx->zone()->jitZone()->optimizedStubSpace();
1792 void* newStubMem = stubSpace->alloc(bytesNeeded);
1793 if (!newStubMem) {
1794 return;
1797 IonICStub* newStub =
1798 new (newStubMem) IonICStub(fallbackAddr(ionScript), stubInfo);
1799 writer.copyStubData(newStub->stubDataStart());
1801 TempAllocator temp(&cx->tempLifoAlloc());
1802 JitContext jctx(cx);
1803 IonCacheIRCompiler compiler(cx, temp, writer, this, ionScript,
1804 stubDataOffset);
1805 if (!compiler.init()) {
1806 return;
1809 JitCode* code = compiler.compile(newStub);
1810 if (!code) {
1811 return;
1814 // Record the stub code if perf spewer is enabled.
1815 CacheKind stubKind = newStub->stubInfo()->kind();
1816 compiler.perfSpewer().saveProfile(cx, script(), code,
1817 CacheKindNames[uint8_t(stubKind)]);
1819 // Add an entry to the profiler's code table, so that the profiler can
1820 // identify this as Ion code.
1821 if (ionScript->hasProfilingInstrumentation()) {
1822 uint8_t* addr = rejoinAddr(ionScript);
1823 auto entry = MakeJitcodeGlobalEntry<IonICEntry>(cx, code, code->raw(),
1824 code->rawEnd(), addr);
1825 if (!entry) {
1826 cx->recoverFromOutOfMemory();
1827 return;
1830 auto* globalTable = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
1831 if (!globalTable->addEntry(std::move(entry))) {
1832 return;
1836 attachStub(newStub, code);
1837 *attached = true;
1840 bool IonCacheIRCompiler::emitCallStringObjectConcatResult(ValOperandId lhsId,
1841 ValOperandId rhsId) {
1842 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1843 AutoSaveLiveRegisters save(*this);
1844 AutoOutputRegister output(*this);
1846 ValueOperand lhs = allocator.useValueRegister(masm, lhsId);
1847 ValueOperand rhs = allocator.useValueRegister(masm, rhsId);
1849 allocator.discardStack(masm);
1851 enterStubFrame(masm, save);
1852 masm.Push(rhs);
1853 masm.Push(lhs);
1855 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, MutableHandleValue);
1856 callVM<Fn, DoConcatStringObject>(masm);
1858 masm.storeCallResultValue(output);
1859 return true;
1862 bool IonCacheIRCompiler::emitCloseIterScriptedResult(ObjOperandId iterId,
1863 ObjOperandId calleeId,
1864 CompletionKind kind,
1865 uint32_t calleeNargs) {
1866 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1867 AutoSaveLiveRegisters save(*this);
1869 Register iter = allocator.useRegister(masm, iterId);
1870 Register callee = allocator.useRegister(masm, calleeId);
1872 allocator.discardStack(masm);
1874 uint32_t framePushedBefore = masm.framePushed();
1876 // Construct IonICCallFrameLayout.
1877 enterStubFrame(masm, save);
1879 uint32_t stubFramePushed = masm.framePushed();
1881 // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
1882 // so we just have to make sure the stack is aligned after we push |this|
1883 // and |calleeNargs| undefined arguments.
1884 uint32_t argSize = (calleeNargs + 1) * sizeof(Value);
1885 uint32_t padding =
1886 ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
1887 MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
1888 MOZ_ASSERT(padding < JitStackAlignment);
1889 masm.reserveStack(padding);
1891 for (uint32_t i = 0; i < calleeNargs; i++) {
1892 masm.Push(UndefinedValue());
1894 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(iter)));
1896 masm.Push(callee);
1897 masm.PushFrameDescriptorForJitCall(FrameType::IonICCall, /* argc = */ 0);
1899 masm.loadJitCodeRaw(callee, callee);
1900 masm.callJit(callee);
1902 if (kind != CompletionKind::Throw) {
1903 // Verify that the return value is an object.
1904 Label success;
1905 masm.branchTestObject(Assembler::Equal, JSReturnOperand, &success);
1907 // We can reuse the same stub frame, but we first have to pop the arguments
1908 // from the previous call.
1909 uint32_t framePushedAfterCall = masm.framePushed();
1910 masm.freeStack(masm.framePushed() - stubFramePushed);
1912 masm.push(Imm32(int32_t(CheckIsObjectKind::IteratorReturn)));
1913 using Fn = bool (*)(JSContext*, CheckIsObjectKind);
1914 callVM<Fn, ThrowCheckIsObject>(masm);
1916 masm.bind(&success);
1917 masm.setFramePushed(framePushedAfterCall);
1920 // Restore the frame pointer and stack pointer.
1921 masm.loadPtr(Address(FramePointer, 0), FramePointer);
1922 masm.freeStack(masm.framePushed() - framePushedBefore);
1923 return true;
1926 bool IonCacheIRCompiler::emitGuardFunctionScript(ObjOperandId funId,
1927 uint32_t expectedOffset,
1928 uint32_t nargsAndFlagsOffset) {
1929 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1931 Register fun = allocator.useRegister(masm, funId);
1932 AutoScratchRegister scratch(allocator, masm);
1933 BaseScript* expected = baseScriptStubField(expectedOffset);
1935 FailurePath* failure;
1936 if (!addFailurePath(&failure)) {
1937 return false;
1940 masm.loadPrivate(Address(fun, JSFunction::offsetOfJitInfoOrScript()),
1941 scratch);
1942 masm.branchPtr(Assembler::NotEqual, scratch, ImmGCPtr(expected),
1943 failure->label());
1944 return true;
1947 bool IonCacheIRCompiler::emitCallScriptedFunction(ObjOperandId calleeId,
1948 Int32OperandId argcId,
1949 CallFlags flags,
1950 uint32_t argcFixed) {
1951 MOZ_CRASH("Call ICs not used in ion");
1954 bool IonCacheIRCompiler::emitCallBoundScriptedFunction(ObjOperandId calleeId,
1955 ObjOperandId targetId,
1956 Int32OperandId argcId,
1957 CallFlags flags,
1958 uint32_t numBoundArgs) {
1959 MOZ_CRASH("Call ICs not used in ion");
1962 bool IonCacheIRCompiler::emitCallWasmFunction(
1963 ObjOperandId calleeId, Int32OperandId argcId, CallFlags flags,
1964 uint32_t argcFixed, uint32_t funcExportOffset, uint32_t instanceOffset) {
1965 MOZ_CRASH("Call ICs not used in ion");
1968 #ifdef JS_SIMULATOR
1969 bool IonCacheIRCompiler::emitCallNativeFunction(ObjOperandId calleeId,
1970 Int32OperandId argcId,
1971 CallFlags flags,
1972 uint32_t argcFixed,
1973 uint32_t targetOffset) {
1974 MOZ_CRASH("Call ICs not used in ion");
1977 bool IonCacheIRCompiler::emitCallDOMFunction(
1978 ObjOperandId calleeId, Int32OperandId argcId, ObjOperandId thisObjId,
1979 CallFlags flags, uint32_t argcFixed, uint32_t targetOffset) {
1980 MOZ_CRASH("Call ICs not used in ion");
1982 #else
1983 bool IonCacheIRCompiler::emitCallNativeFunction(ObjOperandId calleeId,
1984 Int32OperandId argcId,
1985 CallFlags flags,
1986 uint32_t argcFixed,
1987 bool ignoresReturnValue) {
1988 MOZ_CRASH("Call ICs not used in ion");
1991 bool IonCacheIRCompiler::emitCallDOMFunction(ObjOperandId calleeId,
1992 Int32OperandId argcId,
1993 ObjOperandId thisObjId,
1994 CallFlags flags,
1995 uint32_t argcFixed) {
1996 MOZ_CRASH("Call ICs not used in ion");
1998 #endif
2000 bool IonCacheIRCompiler::emitCallClassHook(ObjOperandId calleeId,
2001 Int32OperandId argcId,
2002 CallFlags flags, uint32_t argcFixed,
2003 uint32_t targetOffset) {
2004 MOZ_CRASH("Call ICs not used in ion");
2007 bool IonCacheIRCompiler::emitCallInlinedFunction(ObjOperandId calleeId,
2008 Int32OperandId argcId,
2009 uint32_t icScriptOffset,
2010 CallFlags flags,
2011 uint32_t argcFixed) {
2012 MOZ_CRASH("Call ICs not used in ion");
2015 bool IonCacheIRCompiler::emitBindFunctionResult(ObjOperandId targetId,
2016 uint32_t argc,
2017 uint32_t templateObjectOffset) {
2018 MOZ_CRASH("Call ICs not used in ion");
2021 bool IonCacheIRCompiler::emitSpecializedBindFunctionResult(
2022 ObjOperandId targetId, uint32_t argc, uint32_t templateObjectOffset) {
2023 MOZ_CRASH("Call ICs not used in ion");
2026 bool IonCacheIRCompiler::emitLoadArgumentFixedSlot(ValOperandId resultId,
2027 uint8_t slotIndex) {
2028 MOZ_CRASH("Call ICs not used in ion");
2031 bool IonCacheIRCompiler::emitLoadArgumentDynamicSlot(ValOperandId resultId,
2032 Int32OperandId argcId,
2033 uint8_t slotIndex) {
2034 MOZ_CRASH("Call ICs not used in ion");
2037 bool IonCacheIRCompiler::emitArrayJoinResult(ObjOperandId objId,
2038 StringOperandId sepId) {
2039 MOZ_CRASH("Call ICs not used in ion");
2042 bool IonCacheIRCompiler::emitPackedArraySliceResult(
2043 uint32_t templateObjectOffset, ObjOperandId arrayId, Int32OperandId beginId,
2044 Int32OperandId endId) {
2045 MOZ_CRASH("Call ICs not used in ion");
2048 bool IonCacheIRCompiler::emitArgumentsSliceResult(uint32_t templateObjectOffset,
2049 ObjOperandId argsId,
2050 Int32OperandId beginId,
2051 Int32OperandId endId) {
2052 MOZ_CRASH("Call ICs not used in ion");
2055 bool IonCacheIRCompiler::emitIsArrayResult(ValOperandId inputId) {
2056 MOZ_CRASH("Call ICs not used in ion");
2059 bool IonCacheIRCompiler::emitIsTypedArrayResult(ObjOperandId objId,
2060 bool isPossiblyWrapped) {
2061 MOZ_CRASH("Call ICs not used in ion");
2064 bool IonCacheIRCompiler::emitStringFromCharCodeResult(Int32OperandId codeId) {
2065 MOZ_CRASH("Call ICs not used in ion");
2068 bool IonCacheIRCompiler::emitStringFromCodePointResult(Int32OperandId codeId) {
2069 MOZ_CRASH("Call ICs not used in ion");
2072 bool IonCacheIRCompiler::emitMathRandomResult(uint32_t rngOffset) {
2073 MOZ_CRASH("Call ICs not used in ion");
2076 bool IonCacheIRCompiler::emitReflectGetPrototypeOfResult(ObjOperandId objId) {
2077 MOZ_CRASH("Call ICs not used in ion");
2080 bool IonCacheIRCompiler::emitHasClassResult(ObjOperandId objId,
2081 uint32_t claspOffset) {
2082 MOZ_CRASH("Call ICs not used in ion");
2085 bool IonCacheIRCompiler::emitSameValueResult(ValOperandId lhs,
2086 ValOperandId rhs) {
2087 MOZ_CRASH("Call ICs not used in ion");
2090 bool IonCacheIRCompiler::emitSetHasStringResult(ObjOperandId setId,
2091 StringOperandId strId) {
2092 MOZ_CRASH("Call ICs not used in ion");
2095 bool IonCacheIRCompiler::emitMapHasStringResult(ObjOperandId mapId,
2096 StringOperandId strId) {
2097 MOZ_CRASH("Call ICs not used in ion");
2100 bool IonCacheIRCompiler::emitMapGetStringResult(ObjOperandId mapId,
2101 StringOperandId strId) {
2102 MOZ_CRASH("Call ICs not used in ion");
2105 bool IonCacheIRCompiler::emitNewArrayObjectResult(uint32_t arrayLength,
2106 uint32_t shapeOffset,
2107 uint32_t siteOffset) {
2108 MOZ_CRASH("NewArray ICs not used in ion");
2111 bool IonCacheIRCompiler::emitNewPlainObjectResult(uint32_t numFixedSlots,
2112 uint32_t numDynamicSlots,
2113 gc::AllocKind allocKind,
2114 uint32_t shapeOffset,
2115 uint32_t siteOffset) {
2116 MOZ_CRASH("NewObject ICs not used in ion");
2119 bool IonCacheIRCompiler::emitCallRegExpMatcherResult(ObjOperandId regexpId,
2120 StringOperandId inputId,
2121 Int32OperandId lastIndexId,
2122 uint32_t stubOffset) {
2123 MOZ_CRASH("Call ICs not used in ion");
2126 bool IonCacheIRCompiler::emitCallRegExpSearcherResult(
2127 ObjOperandId regexpId, StringOperandId inputId, Int32OperandId lastIndexId,
2128 uint32_t stubOffset) {
2129 MOZ_CRASH("Call ICs not used in ion");
2132 bool IonCacheIRCompiler::emitRegExpBuiltinExecMatchResult(
2133 ObjOperandId regexpId, StringOperandId inputId, uint32_t stubOffset) {
2134 MOZ_CRASH("Call ICs not used in ion");
2137 bool IonCacheIRCompiler::emitRegExpBuiltinExecTestResult(
2138 ObjOperandId regexpId, StringOperandId inputId, uint32_t stubOffset) {
2139 MOZ_CRASH("Call ICs not used in ion");