Bug 1795082 - Part 2/2: Drop post-processing from getURL() r=zombie
[gecko.git] / js / src / jit / IonCacheIRCompiler.cpp
blob43b5647522b4f2bac87cc0939372c3d6e0fa2b23
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/IonCacheIRCompiler.h"
8 #include "mozilla/Maybe.h"
10 #include <algorithm>
12 #include "jit/CacheIRCompiler.h"
13 #include "jit/CacheIRWriter.h"
14 #include "jit/IonIC.h"
15 #include "jit/JitcodeMap.h"
16 #include "jit/JitFrames.h"
17 #include "jit/JitRuntime.h"
18 #include "jit/JitZone.h"
19 #include "jit/JSJitFrameIter.h"
20 #include "jit/Linker.h"
21 #include "jit/SharedICHelpers.h"
22 #include "jit/VMFunctions.h"
23 #include "proxy/DeadObjectProxy.h"
24 #include "proxy/Proxy.h"
25 #include "util/Memory.h"
26 #include "vm/StaticStrings.h"
28 #include "jit/JSJitFrameIter-inl.h"
29 #include "jit/MacroAssembler-inl.h"
30 #include "jit/VMFunctionList-inl.h"
32 using namespace js;
33 using namespace js::jit;
35 using mozilla::Maybe;
37 namespace JS {
38 struct ExpandoAndGeneration;
41 using JS::ExpandoAndGeneration;
43 namespace js {
44 namespace jit {
46 // IonCacheIRCompiler compiles CacheIR to IonIC native code.
47 IonCacheIRCompiler::IonCacheIRCompiler(JSContext* cx, TempAllocator& alloc,
48 const CacheIRWriter& writer, IonIC* ic,
49 IonScript* ionScript,
50 uint32_t stubDataOffset)
51 : CacheIRCompiler(cx, alloc, writer, stubDataOffset, Mode::Ion,
52 StubFieldPolicy::Constant),
53 writer_(writer),
54 ic_(ic),
55 ionScript_(ionScript),
56 savedLiveRegs_(false),
57 localTracingSlots_(0),
58 perfSpewer_(ic->pc()) {
59 MOZ_ASSERT(ic_);
60 MOZ_ASSERT(ionScript_);
63 template <typename T>
64 T IonCacheIRCompiler::rawPointerStubField(uint32_t offset) {
65 static_assert(sizeof(T) == sizeof(uintptr_t), "T must have pointer size");
66 return (T)readStubWord(offset, StubField::Type::RawPointer);
69 template <typename T>
70 T IonCacheIRCompiler::rawInt64StubField(uint32_t offset) {
71 static_assert(sizeof(T) == sizeof(int64_t), "T musthave int64 size");
72 return (T)readStubInt64(offset, StubField::Type::RawInt64);
75 template <typename Fn, Fn fn>
76 void IonCacheIRCompiler::callVM(MacroAssembler& masm) {
77 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
78 callVMInternal(masm, id);
81 void IonCacheIRCompiler::pushStubCodePointer() {
82 stubJitCodeOffset_.emplace(masm.PushWithPatch(ImmPtr((void*)-1)));
85 // AutoSaveLiveRegisters must be used when we make a call that can GC. The
86 // constructor ensures all live registers are stored on the stack (where the GC
87 // expects them) and the destructor restores these registers.
88 AutoSaveLiveRegisters::AutoSaveLiveRegisters(IonCacheIRCompiler& compiler)
89 : compiler_(compiler) {
90 MOZ_ASSERT(compiler_.liveRegs_.isSome());
91 MOZ_ASSERT(compiler_.ic_);
92 compiler_.allocator.saveIonLiveRegisters(
93 compiler_.masm, compiler_.liveRegs_.ref(),
94 compiler_.ic_->scratchRegisterForEntryJump(), compiler_.ionScript_);
95 compiler_.savedLiveRegs_ = true;
97 AutoSaveLiveRegisters::~AutoSaveLiveRegisters() {
98 MOZ_ASSERT(compiler_.stubJitCodeOffset_.isSome(),
99 "Must have pushed JitCode* pointer");
100 compiler_.allocator.restoreIonLiveRegisters(compiler_.masm,
101 compiler_.liveRegs_.ref());
102 MOZ_ASSERT(compiler_.masm.framePushed() == compiler_.ionScript_->frameSize());
105 } // namespace jit
106 } // namespace js
108 void CacheRegisterAllocator::saveIonLiveRegisters(MacroAssembler& masm,
109 LiveRegisterSet liveRegs,
110 Register scratch,
111 IonScript* ionScript) {
112 // We have to push all registers in liveRegs on the stack. It's possible we
113 // stored other values in our live registers and stored operands on the
114 // stack (where our live registers should go), so this requires some careful
115 // work. Try to keep it simple by taking one small step at a time.
117 // Step 1. Discard any dead operands so we can reuse their registers.
118 freeDeadOperandLocations(masm);
120 // Step 2. Figure out the size of our live regs. This is consistent with
121 // the fact that we're using storeRegsInMask to generate the save code and
122 // PopRegsInMask to generate the restore code.
123 size_t sizeOfLiveRegsInBytes =
124 MacroAssembler::PushRegsInMaskSizeInBytes(liveRegs);
126 MOZ_ASSERT(sizeOfLiveRegsInBytes > 0);
128 // Step 3. Ensure all non-input operands are on the stack.
129 size_t numInputs = writer_.numInputOperands();
130 for (size_t i = numInputs; i < operandLocations_.length(); i++) {
131 OperandLocation& loc = operandLocations_[i];
132 if (loc.isInRegister()) {
133 spillOperandToStack(masm, &loc);
137 // Step 4. Restore the register state, but don't discard the stack as
138 // non-input operands are stored there.
139 restoreInputState(masm, /* shouldDiscardStack = */ false);
141 // We just restored the input state, so no input operands should be stored
142 // on the stack.
143 #ifdef DEBUG
144 for (size_t i = 0; i < numInputs; i++) {
145 const OperandLocation& loc = operandLocations_[i];
146 MOZ_ASSERT(!loc.isOnStack());
148 #endif
150 // Step 5. At this point our register state is correct. Stack values,
151 // however, may cover the space where we have to store the live registers.
152 // Move them out of the way.
154 bool hasOperandOnStack = false;
155 for (size_t i = numInputs; i < operandLocations_.length(); i++) {
156 OperandLocation& loc = operandLocations_[i];
157 if (!loc.isOnStack()) {
158 continue;
161 hasOperandOnStack = true;
163 size_t operandSize = loc.stackSizeInBytes();
164 size_t operandStackPushed = loc.stackPushed();
165 MOZ_ASSERT(operandSize > 0);
166 MOZ_ASSERT(stackPushed_ >= operandStackPushed);
167 MOZ_ASSERT(operandStackPushed >= operandSize);
169 // If this operand doesn't cover the live register space, there's
170 // nothing to do.
171 if (operandStackPushed - operandSize >= sizeOfLiveRegsInBytes) {
172 MOZ_ASSERT(stackPushed_ > sizeOfLiveRegsInBytes);
173 continue;
176 // Reserve stack space for the live registers if needed.
177 if (sizeOfLiveRegsInBytes > stackPushed_) {
178 size_t extraBytes = sizeOfLiveRegsInBytes - stackPushed_;
179 MOZ_ASSERT((extraBytes % sizeof(uintptr_t)) == 0);
180 masm.subFromStackPtr(Imm32(extraBytes));
181 stackPushed_ += extraBytes;
184 // Push the operand below the live register space.
185 if (loc.kind() == OperandLocation::PayloadStack) {
186 masm.push(
187 Address(masm.getStackPointer(), stackPushed_ - operandStackPushed));
188 stackPushed_ += operandSize;
189 loc.setPayloadStack(stackPushed_, loc.payloadType());
190 continue;
192 MOZ_ASSERT(loc.kind() == OperandLocation::ValueStack);
193 masm.pushValue(
194 Address(masm.getStackPointer(), stackPushed_ - operandStackPushed));
195 stackPushed_ += operandSize;
196 loc.setValueStack(stackPushed_);
199 // Step 6. If we have any operands on the stack, adjust their stackPushed
200 // values to not include sizeOfLiveRegsInBytes (this simplifies code down
201 // the line). Then push/store the live registers.
202 if (hasOperandOnStack) {
203 MOZ_ASSERT(stackPushed_ > sizeOfLiveRegsInBytes);
204 stackPushed_ -= sizeOfLiveRegsInBytes;
206 for (size_t i = numInputs; i < operandLocations_.length(); i++) {
207 OperandLocation& loc = operandLocations_[i];
208 if (loc.isOnStack()) {
209 loc.adjustStackPushed(-int32_t(sizeOfLiveRegsInBytes));
213 size_t stackBottom = stackPushed_ + sizeOfLiveRegsInBytes;
214 masm.storeRegsInMask(liveRegs, Address(masm.getStackPointer(), stackBottom),
215 scratch);
216 masm.setFramePushed(masm.framePushed() + sizeOfLiveRegsInBytes);
217 } else {
218 // If no operands are on the stack, discard the unused stack space.
219 if (stackPushed_ > 0) {
220 masm.addToStackPtr(Imm32(stackPushed_));
221 stackPushed_ = 0;
223 masm.PushRegsInMask(liveRegs);
225 freePayloadSlots_.clear();
226 freeValueSlots_.clear();
228 MOZ_ASSERT(masm.framePushed() ==
229 ionScript->frameSize() + sizeOfLiveRegsInBytes);
231 // Step 7. All live registers and non-input operands are stored on the stack
232 // now, so at this point all registers except for the input registers are
233 // available.
234 availableRegs_.set() = GeneralRegisterSet::Not(inputRegisterSet());
235 availableRegsAfterSpill_.set() = GeneralRegisterSet();
237 // Step 8. We restored our input state, so we have to fix up aliased input
238 // registers again.
239 fixupAliasedInputs(masm);
242 void CacheRegisterAllocator::restoreIonLiveRegisters(MacroAssembler& masm,
243 LiveRegisterSet liveRegs) {
244 masm.PopRegsInMask(liveRegs);
246 availableRegs_.set() = GeneralRegisterSet();
247 availableRegsAfterSpill_.set() = GeneralRegisterSet::All();
250 static void* GetReturnAddressToIonCode(JSContext* cx) {
251 JSJitFrameIter frame(cx->activation()->asJit());
252 MOZ_ASSERT(frame.type() == FrameType::Exit,
253 "An exit frame is expected as update functions are called with a "
254 "VMFunction.");
256 void* returnAddr = frame.returnAddress();
257 #ifdef DEBUG
258 ++frame;
259 MOZ_ASSERT(frame.isIonJS());
260 #endif
261 return returnAddr;
264 // The AutoSaveLiveRegisters parameter is used to ensure registers were saved
265 void IonCacheIRCompiler::enterStubFrame(MacroAssembler& masm,
266 const AutoSaveLiveRegisters&) {
267 MOZ_ASSERT(!enteredStubFrame_);
268 pushStubCodePointer();
269 masm.PushFrameDescriptor(FrameType::IonJS);
270 masm.Push(ImmPtr(GetReturnAddressToIonCode(cx_)));
272 masm.Push(FramePointer);
273 masm.moveStackPtrTo(FramePointer);
275 enteredStubFrame_ = true;
278 void IonCacheIRCompiler::storeTracedValue(MacroAssembler& masm,
279 ValueOperand value) {
280 MOZ_ASSERT(localTracingSlots_ < 255);
281 masm.Push(value);
282 localTracingSlots_++;
285 void IonCacheIRCompiler::loadTracedValue(MacroAssembler& masm,
286 uint8_t slotIndex,
287 ValueOperand value) {
288 MOZ_ASSERT(slotIndex <= localTracingSlots_);
289 int32_t offset = IonICCallFrameLayout::LocallyTracedValueOffset +
290 slotIndex * sizeof(Value);
291 masm.loadValue(Address(FramePointer, -offset), value);
294 bool IonCacheIRCompiler::init() {
295 if (!allocator.init()) {
296 return false;
299 size_t numInputs = writer_.numInputOperands();
300 MOZ_ASSERT(numInputs == NumInputsForCacheKind(ic_->kind()));
302 AllocatableGeneralRegisterSet available;
304 switch (ic_->kind()) {
305 case CacheKind::GetProp:
306 case CacheKind::GetElem: {
307 IonGetPropertyIC* ic = ic_->asGetPropertyIC();
308 ValueOperand output = ic->output();
310 available.add(output);
312 liveRegs_.emplace(ic->liveRegs());
313 outputUnchecked_.emplace(output);
315 MOZ_ASSERT(numInputs == 1 || numInputs == 2);
317 allocator.initInputLocation(0, ic->value());
318 if (numInputs > 1) {
319 allocator.initInputLocation(1, ic->id());
321 break;
323 case CacheKind::GetPropSuper:
324 case CacheKind::GetElemSuper: {
325 IonGetPropSuperIC* ic = ic_->asGetPropSuperIC();
326 ValueOperand output = ic->output();
328 available.add(output);
330 liveRegs_.emplace(ic->liveRegs());
331 outputUnchecked_.emplace(output);
333 MOZ_ASSERT(numInputs == 2 || numInputs == 3);
335 allocator.initInputLocation(0, ic->object(), JSVAL_TYPE_OBJECT);
337 if (ic->kind() == CacheKind::GetPropSuper) {
338 MOZ_ASSERT(numInputs == 2);
339 allocator.initInputLocation(1, ic->receiver());
340 } else {
341 MOZ_ASSERT(numInputs == 3);
342 allocator.initInputLocation(1, ic->id());
343 allocator.initInputLocation(2, ic->receiver());
345 break;
347 case CacheKind::SetProp:
348 case CacheKind::SetElem: {
349 IonSetPropertyIC* ic = ic_->asSetPropertyIC();
351 available.add(ic->temp());
353 liveRegs_.emplace(ic->liveRegs());
355 allocator.initInputLocation(0, ic->object(), JSVAL_TYPE_OBJECT);
357 if (ic->kind() == CacheKind::SetProp) {
358 MOZ_ASSERT(numInputs == 2);
359 allocator.initInputLocation(1, ic->rhs());
360 } else {
361 MOZ_ASSERT(numInputs == 3);
362 allocator.initInputLocation(1, ic->id());
363 allocator.initInputLocation(2, ic->rhs());
365 break;
367 case CacheKind::GetName: {
368 IonGetNameIC* ic = ic_->asGetNameIC();
369 ValueOperand output = ic->output();
371 available.add(output);
372 available.add(ic->temp());
374 liveRegs_.emplace(ic->liveRegs());
375 outputUnchecked_.emplace(output);
377 MOZ_ASSERT(numInputs == 1);
378 allocator.initInputLocation(0, ic->environment(), JSVAL_TYPE_OBJECT);
379 break;
381 case CacheKind::BindName: {
382 IonBindNameIC* ic = ic_->asBindNameIC();
383 Register output = ic->output();
385 available.add(output);
386 available.add(ic->temp());
388 liveRegs_.emplace(ic->liveRegs());
389 outputUnchecked_.emplace(
390 TypedOrValueRegister(MIRType::Object, AnyRegister(output)));
392 MOZ_ASSERT(numInputs == 1);
393 allocator.initInputLocation(0, ic->environment(), JSVAL_TYPE_OBJECT);
394 break;
396 case CacheKind::GetIterator: {
397 IonGetIteratorIC* ic = ic_->asGetIteratorIC();
398 Register output = ic->output();
400 available.add(output);
401 available.add(ic->temp1());
402 available.add(ic->temp2());
404 liveRegs_.emplace(ic->liveRegs());
405 outputUnchecked_.emplace(
406 TypedOrValueRegister(MIRType::Object, AnyRegister(output)));
408 MOZ_ASSERT(numInputs == 1);
409 allocator.initInputLocation(0, ic->value());
410 break;
412 case CacheKind::OptimizeSpreadCall: {
413 auto* ic = ic_->asOptimizeSpreadCallIC();
414 ValueOperand output = ic->output();
416 available.add(output);
417 available.add(ic->temp());
419 liveRegs_.emplace(ic->liveRegs());
420 outputUnchecked_.emplace(output);
422 MOZ_ASSERT(numInputs == 1);
423 allocator.initInputLocation(0, ic->value());
424 break;
426 case CacheKind::In: {
427 IonInIC* ic = ic_->asInIC();
428 Register output = ic->output();
430 available.add(output);
432 liveRegs_.emplace(ic->liveRegs());
433 outputUnchecked_.emplace(
434 TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
436 MOZ_ASSERT(numInputs == 2);
437 allocator.initInputLocation(0, ic->key());
438 allocator.initInputLocation(
439 1, TypedOrValueRegister(MIRType::Object, AnyRegister(ic->object())));
440 break;
442 case CacheKind::HasOwn: {
443 IonHasOwnIC* ic = ic_->asHasOwnIC();
444 Register output = ic->output();
446 available.add(output);
448 liveRegs_.emplace(ic->liveRegs());
449 outputUnchecked_.emplace(
450 TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
452 MOZ_ASSERT(numInputs == 2);
453 allocator.initInputLocation(0, ic->id());
454 allocator.initInputLocation(1, ic->value());
455 break;
457 case CacheKind::CheckPrivateField: {
458 IonCheckPrivateFieldIC* ic = ic_->asCheckPrivateFieldIC();
459 Register output = ic->output();
461 available.add(output);
463 liveRegs_.emplace(ic->liveRegs());
464 outputUnchecked_.emplace(
465 TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
467 MOZ_ASSERT(numInputs == 2);
468 allocator.initInputLocation(0, ic->value());
469 allocator.initInputLocation(1, ic->id());
470 break;
472 case CacheKind::InstanceOf: {
473 IonInstanceOfIC* ic = ic_->asInstanceOfIC();
474 Register output = ic->output();
475 available.add(output);
476 liveRegs_.emplace(ic->liveRegs());
477 outputUnchecked_.emplace(
478 TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
480 MOZ_ASSERT(numInputs == 2);
481 allocator.initInputLocation(0, ic->lhs());
482 allocator.initInputLocation(
483 1, TypedOrValueRegister(MIRType::Object, AnyRegister(ic->rhs())));
484 break;
486 case CacheKind::ToPropertyKey: {
487 IonToPropertyKeyIC* ic = ic_->asToPropertyKeyIC();
488 ValueOperand output = ic->output();
490 available.add(output);
492 liveRegs_.emplace(ic->liveRegs());
493 outputUnchecked_.emplace(TypedOrValueRegister(output));
495 MOZ_ASSERT(numInputs == 1);
496 allocator.initInputLocation(0, ic->input());
497 break;
499 case CacheKind::UnaryArith: {
500 IonUnaryArithIC* ic = ic_->asUnaryArithIC();
501 ValueOperand output = ic->output();
503 available.add(output);
505 liveRegs_.emplace(ic->liveRegs());
506 outputUnchecked_.emplace(TypedOrValueRegister(output));
508 MOZ_ASSERT(numInputs == 1);
509 allocator.initInputLocation(0, ic->input());
510 break;
512 case CacheKind::BinaryArith: {
513 IonBinaryArithIC* ic = ic_->asBinaryArithIC();
514 ValueOperand output = ic->output();
516 available.add(output);
518 liveRegs_.emplace(ic->liveRegs());
519 outputUnchecked_.emplace(TypedOrValueRegister(output));
521 MOZ_ASSERT(numInputs == 2);
522 allocator.initInputLocation(0, ic->lhs());
523 allocator.initInputLocation(1, ic->rhs());
524 break;
526 case CacheKind::Compare: {
527 IonCompareIC* ic = ic_->asCompareIC();
528 Register output = ic->output();
530 available.add(output);
532 liveRegs_.emplace(ic->liveRegs());
533 outputUnchecked_.emplace(
534 TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
536 MOZ_ASSERT(numInputs == 2);
537 allocator.initInputLocation(0, ic->lhs());
538 allocator.initInputLocation(1, ic->rhs());
539 break;
541 case CacheKind::CloseIter: {
542 IonCloseIterIC* ic = ic_->asCloseIterIC();
544 available.add(ic->temp());
546 liveRegs_.emplace(ic->liveRegs());
547 allocator.initInputLocation(0, ic->iter(), JSVAL_TYPE_OBJECT);
548 break;
550 case CacheKind::OptimizeGetIterator: {
551 auto* ic = ic_->asOptimizeGetIteratorIC();
552 Register output = ic->output();
554 available.add(output);
555 available.add(ic->temp());
557 liveRegs_.emplace(ic->liveRegs());
558 outputUnchecked_.emplace(
559 TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
561 MOZ_ASSERT(numInputs == 1);
562 allocator.initInputLocation(0, ic->value());
563 break;
565 case CacheKind::Call:
566 case CacheKind::TypeOf:
567 case CacheKind::TypeOfEq:
568 case CacheKind::ToBool:
569 case CacheKind::GetIntrinsic:
570 case CacheKind::NewArray:
571 case CacheKind::NewObject:
572 MOZ_CRASH("Unsupported IC");
575 liveFloatRegs_ = LiveFloatRegisterSet(liveRegs_->fpus());
577 allocator.initAvailableRegs(available);
578 allocator.initAvailableRegsAfterSpill();
579 return true;
582 JitCode* IonCacheIRCompiler::compile(IonICStub* stub) {
583 AutoCreatedBy acb(masm, "IonCacheIRCompiler::compile");
585 masm.setFramePushed(ionScript_->frameSize());
586 if (cx_->runtime()->geckoProfiler().enabled()) {
587 masm.enableProfilingInstrumentation();
590 allocator.fixupAliasedInputs(masm);
592 CacheIRReader reader(writer_);
593 do {
594 CacheOp op = reader.readOp();
595 perfSpewer_.recordInstruction(masm, op);
596 switch (op) {
597 #define DEFINE_OP(op, ...) \
598 case CacheOp::op: \
599 if (!emit##op(reader)) return nullptr; \
600 break;
601 CACHE_IR_OPS(DEFINE_OP)
602 #undef DEFINE_OP
604 default:
605 MOZ_CRASH("Invalid op");
607 allocator.nextOp();
608 } while (reader.more());
610 masm.assumeUnreachable("Should have returned from IC");
612 // Done emitting the main IC code. Now emit the failure paths.
613 for (size_t i = 0; i < failurePaths.length(); i++) {
614 if (!emitFailurePath(i)) {
615 return nullptr;
617 Register scratch = ic_->scratchRegisterForEntryJump();
618 CodeOffset offset = masm.movWithPatch(ImmWord(-1), scratch);
619 masm.jump(Address(scratch, 0));
620 if (!nextCodeOffsets_.append(offset)) {
621 return nullptr;
625 Linker linker(masm);
626 Rooted<JitCode*> newStubCode(cx_, linker.newCode(cx_, CodeKind::Ion));
627 if (!newStubCode) {
628 cx_->recoverFromOutOfMemory();
629 return nullptr;
632 newStubCode->setLocalTracingSlots(localTracingSlots_);
634 for (CodeOffset offset : nextCodeOffsets_) {
635 Assembler::PatchDataWithValueCheck(CodeLocationLabel(newStubCode, offset),
636 ImmPtr(stub->nextCodeRawPtr()),
637 ImmPtr((void*)-1));
639 if (stubJitCodeOffset_) {
640 Assembler::PatchDataWithValueCheck(
641 CodeLocationLabel(newStubCode, *stubJitCodeOffset_),
642 ImmPtr(newStubCode.get()), ImmPtr((void*)-1));
645 return newStubCode;
648 #ifdef DEBUG
649 void IonCacheIRCompiler::assertFloatRegisterAvailable(FloatRegister reg) {
650 switch (ic_->kind()) {
651 case CacheKind::GetProp:
652 case CacheKind::GetElem:
653 case CacheKind::GetPropSuper:
654 case CacheKind::GetElemSuper:
655 case CacheKind::GetName:
656 case CacheKind::BindName:
657 case CacheKind::GetIterator:
658 case CacheKind::In:
659 case CacheKind::HasOwn:
660 case CacheKind::CheckPrivateField:
661 case CacheKind::InstanceOf:
662 case CacheKind::UnaryArith:
663 case CacheKind::ToPropertyKey:
664 case CacheKind::OptimizeSpreadCall:
665 case CacheKind::CloseIter:
666 case CacheKind::OptimizeGetIterator:
667 MOZ_CRASH("No float registers available");
668 case CacheKind::SetProp:
669 case CacheKind::SetElem:
670 // FloatReg0 is available per LIRGenerator::visitSetPropertyCache.
671 MOZ_ASSERT(reg == FloatReg0);
672 break;
673 case CacheKind::BinaryArith:
674 case CacheKind::Compare:
675 // FloatReg0 and FloatReg1 are available per
676 // LIRGenerator::visitBinaryCache.
677 MOZ_ASSERT(reg == FloatReg0 || reg == FloatReg1);
678 break;
679 case CacheKind::Call:
680 case CacheKind::TypeOf:
681 case CacheKind::TypeOfEq:
682 case CacheKind::ToBool:
683 case CacheKind::GetIntrinsic:
684 case CacheKind::NewArray:
685 case CacheKind::NewObject:
686 MOZ_CRASH("Unsupported IC");
689 #endif
691 bool IonCacheIRCompiler::emitGuardShape(ObjOperandId objId,
692 uint32_t shapeOffset) {
693 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
694 Register obj = allocator.useRegister(masm, objId);
695 Shape* shape = weakShapeStubField(shapeOffset);
697 bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
699 Maybe<AutoScratchRegister> maybeScratch;
700 if (needSpectreMitigations) {
701 maybeScratch.emplace(allocator, masm);
704 FailurePath* failure;
705 if (!addFailurePath(&failure)) {
706 return false;
709 if (needSpectreMitigations) {
710 masm.branchTestObjShape(Assembler::NotEqual, obj, shape, *maybeScratch, obj,
711 failure->label());
712 } else {
713 masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, obj, shape,
714 failure->label());
717 return true;
720 bool IonCacheIRCompiler::emitGuardProto(ObjOperandId objId,
721 uint32_t protoOffset) {
722 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
723 Register obj = allocator.useRegister(masm, objId);
724 JSObject* proto = weakObjectStubField(protoOffset);
726 AutoScratchRegister scratch(allocator, masm);
728 FailurePath* failure;
729 if (!addFailurePath(&failure)) {
730 return false;
733 masm.loadObjProto(obj, scratch);
734 masm.branchPtr(Assembler::NotEqual, scratch, ImmGCPtr(proto),
735 failure->label());
736 return true;
739 bool IonCacheIRCompiler::emitGuardCompartment(ObjOperandId objId,
740 uint32_t globalOffset,
741 uint32_t compartmentOffset) {
742 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
743 Register obj = allocator.useRegister(masm, objId);
744 JSObject* globalWrapper = objectStubField(globalOffset);
745 JS::Compartment* compartment = compartmentStubField(compartmentOffset);
746 AutoScratchRegister scratch(allocator, masm);
748 FailurePath* failure;
749 if (!addFailurePath(&failure)) {
750 return false;
753 // Verify that the global wrapper is still valid, as
754 // it is pre-requisite for doing the compartment check.
755 masm.movePtr(ImmGCPtr(globalWrapper), scratch);
756 Address handlerAddr(scratch, ProxyObject::offsetOfHandler());
757 masm.branchPtr(Assembler::Equal, handlerAddr,
758 ImmPtr(&DeadObjectProxy::singleton), failure->label());
760 masm.branchTestObjCompartment(Assembler::NotEqual, obj, compartment, scratch,
761 failure->label());
762 return true;
765 bool IonCacheIRCompiler::emitGuardAnyClass(ObjOperandId objId,
766 uint32_t claspOffset) {
767 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
768 Register obj = allocator.useRegister(masm, objId);
769 AutoScratchRegister scratch(allocator, masm);
771 const JSClass* clasp = classStubField(claspOffset);
773 FailurePath* failure;
774 if (!addFailurePath(&failure)) {
775 return false;
778 if (objectGuardNeedsSpectreMitigations(objId)) {
779 masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj,
780 failure->label());
781 } else {
782 masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp,
783 scratch, failure->label());
786 return true;
789 bool IonCacheIRCompiler::emitGuardHasProxyHandler(ObjOperandId objId,
790 uint32_t handlerOffset) {
791 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
792 Register obj = allocator.useRegister(masm, objId);
793 const void* handler = proxyHandlerStubField(handlerOffset);
795 FailurePath* failure;
796 if (!addFailurePath(&failure)) {
797 return false;
800 Address handlerAddr(obj, ProxyObject::offsetOfHandler());
801 masm.branchPtr(Assembler::NotEqual, handlerAddr, ImmPtr(handler),
802 failure->label());
803 return true;
806 bool IonCacheIRCompiler::emitGuardSpecificObject(ObjOperandId objId,
807 uint32_t expectedOffset) {
808 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
809 Register obj = allocator.useRegister(masm, objId);
810 JSObject* expected = weakObjectStubField(expectedOffset);
812 FailurePath* failure;
813 if (!addFailurePath(&failure)) {
814 return false;
817 masm.branchPtr(Assembler::NotEqual, obj, ImmGCPtr(expected),
818 failure->label());
819 return true;
822 bool IonCacheIRCompiler::emitGuardSpecificFunction(
823 ObjOperandId objId, uint32_t expectedOffset, uint32_t nargsAndFlagsOffset) {
824 return emitGuardSpecificObject(objId, expectedOffset);
827 bool IonCacheIRCompiler::emitGuardSpecificAtom(StringOperandId strId,
828 uint32_t expectedOffset) {
829 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
830 Register str = allocator.useRegister(masm, strId);
831 AutoScratchRegister scratch(allocator, masm);
833 JSAtom* atom = &stringStubField(expectedOffset)->asAtom();
835 FailurePath* failure;
836 if (!addFailurePath(&failure)) {
837 return false;
840 LiveRegisterSet volatileRegs = liveVolatileRegs();
841 volatileRegs.takeUnchecked(scratch);
843 masm.guardSpecificAtom(str, atom, scratch, volatileRegs, failure->label());
844 return true;
847 bool IonCacheIRCompiler::emitGuardSpecificSymbol(SymbolOperandId symId,
848 uint32_t expectedOffset) {
849 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
850 Register sym = allocator.useRegister(masm, symId);
851 JS::Symbol* expected = symbolStubField(expectedOffset);
853 FailurePath* failure;
854 if (!addFailurePath(&failure)) {
855 return false;
858 masm.branchPtr(Assembler::NotEqual, sym, ImmGCPtr(expected),
859 failure->label());
860 return true;
863 bool IonCacheIRCompiler::emitLoadValueResult(uint32_t valOffset) {
864 MOZ_CRASH("Baseline-specific op");
867 bool IonCacheIRCompiler::emitLoadFixedSlotResult(ObjOperandId objId,
868 uint32_t offsetOffset) {
869 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
870 AutoOutputRegister output(*this);
871 Register obj = allocator.useRegister(masm, objId);
872 int32_t offset = int32StubField(offsetOffset);
873 masm.loadTypedOrValue(Address(obj, offset), output);
874 return true;
877 bool IonCacheIRCompiler::emitLoadFixedSlotTypedResult(ObjOperandId objId,
878 uint32_t offsetOffset,
879 ValueType) {
880 MOZ_CRASH("Call ICs not used in ion");
883 bool IonCacheIRCompiler::emitLoadDynamicSlotResult(ObjOperandId objId,
884 uint32_t offsetOffset) {
885 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
886 AutoOutputRegister output(*this);
887 Register obj = allocator.useRegister(masm, objId);
888 int32_t offset = int32StubField(offsetOffset);
890 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
891 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
892 masm.loadTypedOrValue(Address(scratch, offset), output);
893 return true;
896 bool IonCacheIRCompiler::emitCallScriptedGetterResult(
897 ValOperandId receiverId, uint32_t getterOffset, bool sameRealm,
898 uint32_t nargsAndFlagsOffset) {
899 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
900 AutoSaveLiveRegisters save(*this);
901 AutoOutputRegister output(*this);
903 ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
905 JSFunction* target = &objectStubField(getterOffset)->as<JSFunction>();
906 AutoScratchRegister scratch(allocator, masm);
908 MOZ_ASSERT(sameRealm == (cx_->realm() == target->realm()));
910 allocator.discardStack(masm);
912 uint32_t framePushedBefore = masm.framePushed();
914 enterStubFrame(masm, save);
916 // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
917 // so we just have to make sure the stack is aligned after we push the
918 // |this| + argument Values.
919 uint32_t argSize = (target->nargs() + 1) * sizeof(Value);
920 uint32_t padding =
921 ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
922 MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
923 MOZ_ASSERT(padding < JitStackAlignment);
924 masm.reserveStack(padding);
926 for (size_t i = 0; i < target->nargs(); i++) {
927 masm.Push(UndefinedValue());
929 masm.Push(receiver);
931 if (!sameRealm) {
932 masm.switchToRealm(target->realm(), scratch);
935 masm.movePtr(ImmGCPtr(target), scratch);
937 masm.Push(scratch);
938 masm.PushFrameDescriptorForJitCall(FrameType::IonICCall, /* argc = */ 0);
940 // Check stack alignment. Add 2 * sizeof(uintptr_t) for the return address and
941 // frame pointer pushed by the call/callee.
942 MOZ_ASSERT(
943 ((masm.framePushed() + 2 * sizeof(uintptr_t)) % JitStackAlignment) == 0);
945 MOZ_ASSERT(target->hasJitEntry());
946 masm.loadJitCodeRaw(scratch, scratch);
947 masm.callJit(scratch);
949 if (!sameRealm) {
950 static_assert(!JSReturnOperand.aliases(ReturnReg),
951 "ReturnReg available as scratch after scripted calls");
952 masm.switchToRealm(cx_->realm(), ReturnReg);
955 masm.storeCallResultValue(output);
957 // Restore the frame pointer and stack pointer.
958 masm.loadPtr(Address(FramePointer, 0), FramePointer);
959 masm.freeStack(masm.framePushed() - framePushedBefore);
960 return true;
963 #ifdef JS_PUNBOX64
964 template <typename IdType>
965 bool IonCacheIRCompiler::emitCallScriptedProxyGetShared(
966 ValOperandId targetId, ObjOperandId receiverId, ObjOperandId handlerId,
967 ObjOperandId trapId, IdType id, uint32_t nargsAndFlags) {
968 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
969 AutoSaveLiveRegisters save(*this);
970 AutoOutputRegister output(*this);
972 ValueOperand target = allocator.useValueRegister(masm, targetId);
973 Register receiver = allocator.useRegister(masm, receiverId);
974 Register handler = allocator.useRegister(masm, handlerId);
975 Register callee = allocator.useRegister(masm, trapId);
976 ValueOperand idVal;
977 if constexpr (std::is_same_v<IdType, ValOperandId>) {
978 idVal = allocator.useValueRegister(masm, id);
980 size_t nargs = nargsAndFlags >> JSFunction::ArgCountShift;
982 AutoScratchRegister scratch(allocator, masm);
983 AutoScratchRegister scratch2(allocator, masm);
984 ValueOperand scratchVal(scratch);
985 ValueOperand scratchVal2(scratch2);
987 allocator.discardStack(masm);
989 uint32_t framePushedBefore = masm.framePushed();
991 enterStubFrame(masm, save);
993 // We need to keep the target around to potentially validate the proxy result
994 storeTracedValue(masm, target);
995 if constexpr (std::is_same_v<IdType, ValOperandId>) {
996 // Same for the id, assuming it's not baked in
997 storeTracedValue(masm, idVal);
999 uint32_t framePushedBeforeArgs = masm.framePushed();
1001 // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
1002 // so we just have to make sure the stack is aligned after we push the
1003 // |this| + argument Values.
1004 uint32_t argSize = (std::max(nargs, (size_t)3) + 1) * sizeof(Value);
1005 uint32_t padding =
1006 ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
1007 MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
1008 MOZ_ASSERT(padding < JitStackAlignment);
1009 masm.reserveStack(padding);
1011 for (size_t i = 3; i < nargs; i++) {
1012 masm.Push(UndefinedValue());
1015 masm.tagValue(JSVAL_TYPE_OBJECT, receiver, scratchVal);
1016 masm.Push(scratchVal);
1018 if constexpr (std::is_same_v<IdType, ValOperandId>) {
1019 masm.Push(idVal);
1020 } else {
1021 masm.movePropertyKey(idStubField(id), scratch);
1022 masm.tagValue(JSVAL_TYPE_STRING, scratch, scratchVal);
1023 masm.Push(scratchVal);
1026 masm.Push(target);
1028 masm.tagValue(JSVAL_TYPE_OBJECT, handler, scratchVal);
1029 masm.Push(scratchVal);
1031 masm.Push(callee);
1032 masm.PushFrameDescriptorForJitCall(FrameType::IonICCall, /* argc = */ 3);
1034 // Check stack alignment. Add 2 * sizeof(uintptr_t) for the return address and
1035 // frame pointer pushed by the call/callee.
1036 MOZ_ASSERT(
1037 ((masm.framePushed() + 2 * sizeof(uintptr_t)) % JitStackAlignment) == 0);
1039 masm.loadJitCodeRaw(callee, scratch);
1040 masm.callJit(scratch);
1042 masm.storeCallResultValue(output);
1044 Label success, end;
1045 loadTracedValue(masm, 0, scratchVal);
1046 masm.unboxObject(scratchVal, scratch);
1047 masm.branchTestObjectNeedsProxyResultValidation(Assembler::Zero, scratch,
1048 scratch2, &success);
1050 if constexpr (std::is_same_v<IdType, ValOperandId>) {
1051 loadTracedValue(masm, 1, scratchVal2);
1052 } else {
1053 masm.moveValue(StringValue(idStubField(id).toString()), scratchVal2);
1056 uint32_t framePushedAfterCall = masm.framePushed();
1057 masm.freeStack(masm.framePushed() - framePushedBeforeArgs);
1059 masm.Push(output.valueReg());
1060 masm.Push(scratchVal2);
1061 masm.Push(scratch);
1063 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue,
1064 MutableHandleValue);
1065 callVM<Fn, CheckProxyGetByValueResult>(masm);
1067 masm.storeCallResultValue(output);
1069 masm.jump(&end);
1070 masm.bind(&success);
1071 masm.setFramePushed(framePushedAfterCall);
1073 // Restore the frame pointer and stack pointer.
1074 masm.loadPtr(Address(FramePointer, 0), FramePointer);
1075 masm.freeStack(masm.framePushed() - framePushedBefore);
1076 masm.bind(&end);
1078 return true;
1081 bool IonCacheIRCompiler::emitCallScriptedProxyGetResult(
1082 ValOperandId targetId, ObjOperandId receiverId, ObjOperandId handlerId,
1083 ObjOperandId trapId, uint32_t id, uint32_t nargsAndFlags) {
1084 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1085 return emitCallScriptedProxyGetShared(targetId, receiverId, handlerId, trapId,
1086 id, nargsAndFlags);
1089 bool IonCacheIRCompiler::emitCallScriptedProxyGetByValueResult(
1090 ValOperandId targetId, ObjOperandId receiverId, ObjOperandId handlerId,
1091 ValOperandId idId, ObjOperandId trapId, uint32_t nargsAndFlags) {
1092 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1093 return emitCallScriptedProxyGetShared(targetId, receiverId, handlerId, trapId,
1094 idId, nargsAndFlags);
1096 #endif
1098 bool IonCacheIRCompiler::emitCallInlinedGetterResult(
1099 ValOperandId receiverId, uint32_t getterOffset, uint32_t icScriptOffset,
1100 bool sameRealm, uint32_t nargsAndFlagsOffset) {
1101 MOZ_CRASH("Trial inlining not supported in Ion");
1104 bool IonCacheIRCompiler::emitCallNativeGetterResult(
1105 ValOperandId receiverId, uint32_t getterOffset, bool sameRealm,
1106 uint32_t nargsAndFlagsOffset) {
1107 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1108 AutoSaveLiveRegisters save(*this);
1109 AutoOutputRegister output(*this);
1111 ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
1113 JSFunction* target = &objectStubField(getterOffset)->as<JSFunction>();
1114 MOZ_ASSERT(target->isNativeFun());
1116 AutoScratchRegisterMaybeOutput argJSContext(allocator, masm, output);
1117 AutoScratchRegisterMaybeOutputType argUintN(allocator, masm, output);
1118 AutoScratchRegister argVp(allocator, masm);
1119 AutoScratchRegister scratch(allocator, masm);
1121 allocator.discardStack(masm);
1123 // Native functions have the signature:
1124 // bool (*)(JSContext*, unsigned, Value* vp)
1125 // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
1126 // are the function arguments.
1128 // Construct vp array:
1129 // Push receiver value for |this|
1130 masm.Push(receiver);
1131 // Push callee/outparam.
1132 masm.Push(ObjectValue(*target));
1134 // Preload arguments into registers.
1135 masm.loadJSContext(argJSContext);
1136 masm.move32(Imm32(0), argUintN);
1137 masm.moveStackPtrTo(argVp.get());
1139 // Push marking data for later use.
1140 masm.Push(argUintN);
1141 pushStubCodePointer();
1143 if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save)) {
1144 return false;
1146 masm.enterFakeExitFrame(argJSContext, scratch, ExitFrameType::IonOOLNative);
1148 if (!sameRealm) {
1149 masm.switchToRealm(target->realm(), scratch);
1152 // Construct and execute call.
1153 masm.setupUnalignedABICall(scratch);
1154 masm.passABIArg(argJSContext);
1155 masm.passABIArg(argUintN);
1156 masm.passABIArg(argVp);
1157 masm.callWithABI(DynamicFunction<JSNative>(target->native()),
1158 ABIType::General,
1159 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
1161 // Test for failure.
1162 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
1164 if (!sameRealm) {
1165 masm.switchToRealm(cx_->realm(), ReturnReg);
1168 // Load the outparam vp[0] into output register(s).
1169 Address outparam(masm.getStackPointer(),
1170 IonOOLNativeExitFrameLayout::offsetOfResult());
1171 masm.loadValue(outparam, output.valueReg());
1173 if (JitOptions.spectreJitToCxxCalls) {
1174 masm.speculationBarrier();
1177 masm.adjustStack(IonOOLNativeExitFrameLayout::Size(0));
1178 return true;
1181 bool IonCacheIRCompiler::emitCallDOMGetterResult(ObjOperandId objId,
1182 uint32_t jitInfoOffset) {
1183 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1184 AutoSaveLiveRegisters save(*this);
1185 AutoOutputRegister output(*this);
1187 Register obj = allocator.useRegister(masm, objId);
1189 const JSJitInfo* info = rawPointerStubField<const JSJitInfo*>(jitInfoOffset);
1191 allocator.discardStack(masm);
1192 enterStubFrame(masm, save);
1194 masm.Push(obj);
1195 masm.Push(ImmPtr(info));
1197 using Fn =
1198 bool (*)(JSContext*, const JSJitInfo*, HandleObject, MutableHandleValue);
1199 callVM<Fn, jit::CallDOMGetter>(masm);
1201 masm.storeCallResultValue(output);
1202 return true;
1205 bool IonCacheIRCompiler::emitCallDOMSetter(ObjOperandId objId,
1206 uint32_t jitInfoOffset,
1207 ValOperandId rhsId) {
1208 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1209 AutoSaveLiveRegisters save(*this);
1211 Register obj = allocator.useRegister(masm, objId);
1212 ValueOperand val = allocator.useValueRegister(masm, rhsId);
1214 const JSJitInfo* info = rawPointerStubField<const JSJitInfo*>(jitInfoOffset);
1216 allocator.discardStack(masm);
1217 enterStubFrame(masm, save);
1219 masm.Push(val);
1220 masm.Push(obj);
1221 masm.Push(ImmPtr(info));
1223 using Fn = bool (*)(JSContext*, const JSJitInfo*, HandleObject, HandleValue);
1224 callVM<Fn, jit::CallDOMSetter>(masm);
1225 return true;
1228 bool IonCacheIRCompiler::emitProxyGetResult(ObjOperandId objId,
1229 uint32_t idOffset) {
1230 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1231 AutoSaveLiveRegisters save(*this);
1232 AutoOutputRegister output(*this);
1234 Register obj = allocator.useRegister(masm, objId);
1235 jsid id = idStubField(idOffset);
1237 // ProxyGetProperty(JSContext* cx, HandleObject proxy, HandleId id,
1238 // MutableHandleValue vp)
1239 AutoScratchRegisterMaybeOutput argJSContext(allocator, masm, output);
1240 AutoScratchRegisterMaybeOutputType argProxy(allocator, masm, output);
1241 AutoScratchRegister argId(allocator, masm);
1242 AutoScratchRegister argVp(allocator, masm);
1243 AutoScratchRegister scratch(allocator, masm);
1245 allocator.discardStack(masm);
1247 // Push stubCode for marking.
1248 pushStubCodePointer();
1250 // Push args on stack first so we can take pointers to make handles.
1251 masm.Push(UndefinedValue());
1252 masm.moveStackPtrTo(argVp.get());
1254 masm.Push(id, scratch);
1255 masm.moveStackPtrTo(argId.get());
1257 // Push the proxy. Also used as receiver.
1258 masm.Push(obj);
1259 masm.moveStackPtrTo(argProxy.get());
1261 masm.loadJSContext(argJSContext);
1263 if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save)) {
1264 return false;
1266 masm.enterFakeExitFrame(argJSContext, scratch, ExitFrameType::IonOOLProxy);
1268 // Make the call.
1269 using Fn = bool (*)(JSContext* cx, HandleObject proxy, HandleId id,
1270 MutableHandleValue vp);
1271 masm.setupUnalignedABICall(scratch);
1272 masm.passABIArg(argJSContext);
1273 masm.passABIArg(argProxy);
1274 masm.passABIArg(argId);
1275 masm.passABIArg(argVp);
1276 masm.callWithABI<Fn, ProxyGetProperty>(
1277 ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
1279 // Test for failure.
1280 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
1282 // Load the outparam vp[0] into output register(s).
1283 Address outparam(masm.getStackPointer(),
1284 IonOOLProxyExitFrameLayout::offsetOfResult());
1285 masm.loadValue(outparam, output.valueReg());
1287 // Spectre mitigation in case of speculative execution within C++ code.
1288 if (JitOptions.spectreJitToCxxCalls) {
1289 masm.speculationBarrier();
1292 // masm.leaveExitFrame & pop locals
1293 masm.adjustStack(IonOOLProxyExitFrameLayout::Size());
1294 return true;
1297 bool IonCacheIRCompiler::emitFrameIsConstructingResult() {
1298 MOZ_CRASH("Baseline-specific op");
1301 bool IonCacheIRCompiler::emitLoadConstantStringResult(uint32_t strOffset) {
1302 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1303 MOZ_CRASH("not used in ion");
1306 bool IonCacheIRCompiler::emitCompareStringResult(JSOp op, StringOperandId lhsId,
1307 StringOperandId rhsId) {
1308 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1309 AutoSaveLiveRegisters save(*this);
1310 AutoOutputRegister output(*this);
1312 Register left = allocator.useRegister(masm, lhsId);
1313 Register right = allocator.useRegister(masm, rhsId);
1315 allocator.discardStack(masm);
1317 Label slow, done;
1318 MOZ_ASSERT(!output.hasValue());
1319 masm.compareStrings(op, left, right, output.typedReg().gpr(), &slow);
1321 masm.jump(&done);
1322 masm.bind(&slow);
1324 enterStubFrame(masm, save);
1326 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
1327 // - |left <= right| is implemented as |right >= left|.
1328 // - |left > right| is implemented as |right < left|.
1329 if (op == JSOp::Le || op == JSOp::Gt) {
1330 masm.Push(left);
1331 masm.Push(right);
1332 } else {
1333 masm.Push(right);
1334 masm.Push(left);
1337 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
1338 if (op == JSOp::Eq || op == JSOp::StrictEq) {
1339 callVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(masm);
1340 } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
1341 callVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(masm);
1342 } else if (op == JSOp::Lt || op == JSOp::Gt) {
1343 callVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(masm);
1344 } else {
1345 MOZ_ASSERT(op == JSOp::Le || op == JSOp::Ge);
1346 callVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(masm);
1349 masm.storeCallBoolResult(output.typedReg().gpr());
1350 masm.bind(&done);
1351 return true;
1354 bool IonCacheIRCompiler::emitStoreFixedSlot(ObjOperandId objId,
1355 uint32_t offsetOffset,
1356 ValOperandId rhsId) {
1357 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1358 Register obj = allocator.useRegister(masm, objId);
1359 int32_t offset = int32StubField(offsetOffset);
1360 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1361 AutoScratchRegister scratch(allocator, masm);
1363 Address slot(obj, offset);
1364 EmitPreBarrier(masm, slot, MIRType::Value);
1365 masm.storeConstantOrRegister(val, slot);
1366 emitPostBarrierSlot(obj, val, scratch);
1367 return true;
1370 bool IonCacheIRCompiler::emitStoreDynamicSlot(ObjOperandId objId,
1371 uint32_t offsetOffset,
1372 ValOperandId rhsId) {
1373 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1374 Register obj = allocator.useRegister(masm, objId);
1375 int32_t offset = int32StubField(offsetOffset);
1376 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1377 AutoScratchRegister scratch(allocator, masm);
1379 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
1380 Address slot(scratch, offset);
1381 EmitPreBarrier(masm, slot, MIRType::Value);
1382 masm.storeConstantOrRegister(val, slot);
1383 emitPostBarrierSlot(obj, val, scratch);
1384 return true;
1387 bool IonCacheIRCompiler::emitAddAndStoreSlotShared(
1388 CacheOp op, ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
1389 uint32_t newShapeOffset, Maybe<uint32_t> numNewSlotsOffset) {
1390 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1391 Register obj = allocator.useRegister(masm, objId);
1392 int32_t offset = int32StubField(offsetOffset);
1393 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1395 AutoScratchRegister scratch1(allocator, masm);
1397 Maybe<AutoScratchRegister> scratch2;
1398 if (op == CacheOp::AllocateAndStoreDynamicSlot) {
1399 scratch2.emplace(allocator, masm);
1402 Shape* newShape = shapeStubField(newShapeOffset);
1404 if (op == CacheOp::AllocateAndStoreDynamicSlot) {
1405 // We have to (re)allocate dynamic slots. Do this first, as it's the
1406 // only fallible operation here. Note that growSlotsPure is
1407 // fallible but does not GC.
1409 FailurePath* failure;
1410 if (!addFailurePath(&failure)) {
1411 return false;
1414 int32_t numNewSlots = int32StubField(*numNewSlotsOffset);
1415 MOZ_ASSERT(numNewSlots > 0);
1417 LiveRegisterSet save = liveVolatileRegs();
1418 masm.PushRegsInMask(save);
1420 using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
1421 masm.setupUnalignedABICall(scratch1);
1422 masm.loadJSContext(scratch1);
1423 masm.passABIArg(scratch1);
1424 masm.passABIArg(obj);
1425 masm.move32(Imm32(numNewSlots), scratch2.ref());
1426 masm.passABIArg(scratch2.ref());
1427 masm.callWithABI<Fn, NativeObject::growSlotsPure>();
1428 masm.storeCallPointerResult(scratch1);
1430 LiveRegisterSet ignore;
1431 ignore.add(scratch1);
1432 masm.PopRegsInMaskIgnore(save, ignore);
1434 masm.branchIfFalseBool(scratch1, failure->label());
1437 // Update the object's shape.
1438 masm.storeObjShape(newShape, obj,
1439 [](MacroAssembler& masm, const Address& addr) {
1440 EmitPreBarrier(masm, addr, MIRType::Shape);
1443 // Perform the store. No pre-barrier required since this is a new
1444 // initialization.
1445 if (op == CacheOp::AddAndStoreFixedSlot) {
1446 Address slot(obj, offset);
1447 masm.storeConstantOrRegister(val, slot);
1448 } else {
1449 MOZ_ASSERT(op == CacheOp::AddAndStoreDynamicSlot ||
1450 op == CacheOp::AllocateAndStoreDynamicSlot);
1451 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
1452 Address slot(scratch1, offset);
1453 masm.storeConstantOrRegister(val, slot);
1456 emitPostBarrierSlot(obj, val, scratch1);
1458 return true;
1461 bool IonCacheIRCompiler::emitAddAndStoreFixedSlot(ObjOperandId objId,
1462 uint32_t offsetOffset,
1463 ValOperandId rhsId,
1464 uint32_t newShapeOffset) {
1465 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1466 Maybe<uint32_t> numNewSlotsOffset = mozilla::Nothing();
1467 return emitAddAndStoreSlotShared(CacheOp::AddAndStoreFixedSlot, objId,
1468 offsetOffset, rhsId, newShapeOffset,
1469 numNewSlotsOffset);
1472 bool IonCacheIRCompiler::emitAddAndStoreDynamicSlot(ObjOperandId objId,
1473 uint32_t offsetOffset,
1474 ValOperandId rhsId,
1475 uint32_t newShapeOffset) {
1476 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1477 Maybe<uint32_t> numNewSlotsOffset = mozilla::Nothing();
1478 return emitAddAndStoreSlotShared(CacheOp::AddAndStoreDynamicSlot, objId,
1479 offsetOffset, rhsId, newShapeOffset,
1480 numNewSlotsOffset);
1483 bool IonCacheIRCompiler::emitAllocateAndStoreDynamicSlot(
1484 ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
1485 uint32_t newShapeOffset, uint32_t numNewSlotsOffset) {
1486 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1487 return emitAddAndStoreSlotShared(CacheOp::AllocateAndStoreDynamicSlot, objId,
1488 offsetOffset, rhsId, newShapeOffset,
1489 mozilla::Some(numNewSlotsOffset));
1492 bool IonCacheIRCompiler::emitLoadStringCharResult(StringOperandId strId,
1493 Int32OperandId indexId,
1494 bool handleOOB) {
1495 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1496 AutoOutputRegister output(*this);
1497 Register str = allocator.useRegister(masm, strId);
1498 Register index = allocator.useRegister(masm, indexId);
1499 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
1500 AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
1501 AutoScratchRegister scratch3(allocator, masm);
1503 FailurePath* failure;
1504 if (!addFailurePath(&failure)) {
1505 return false;
1508 // Bounds check, load string char.
1509 Label done;
1510 Label loadFailed;
1511 if (!handleOOB) {
1512 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
1513 scratch1, failure->label());
1514 masm.loadStringChar(str, index, scratch1, scratch2, scratch3,
1515 failure->label());
1516 } else {
1517 // Return the empty string for out-of-bounds access.
1518 masm.movePtr(ImmGCPtr(cx_->runtime()->emptyString), scratch2);
1520 // This CacheIR op is always preceded by |LinearizeForCharAccess|, so we're
1521 // guaranteed to see no nested ropes.
1522 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
1523 scratch1, &done);
1524 masm.loadStringChar(str, index, scratch1, scratch2, scratch3, &loadFailed);
1527 // Load StaticString for this char. For larger code units perform a VM call.
1528 Label vmCall;
1529 masm.lookupStaticString(scratch1, scratch2, cx_->staticStrings(), &vmCall);
1530 masm.jump(&done);
1532 if (handleOOB) {
1533 masm.bind(&loadFailed);
1534 masm.assumeUnreachable("loadStringChar can't fail for linear strings");
1538 masm.bind(&vmCall);
1540 // FailurePath and AutoSaveLiveRegisters don't get along very well. Both are
1541 // modifying the stack and expect that no other stack manipulations are
1542 // made. Therefore we need to use an ABI call instead of a VM call here.
1544 LiveRegisterSet volatileRegs = liveVolatileRegs();
1545 volatileRegs.takeUnchecked(scratch1);
1546 volatileRegs.takeUnchecked(scratch2);
1547 volatileRegs.takeUnchecked(scratch3);
1548 volatileRegs.takeUnchecked(output);
1549 masm.PushRegsInMask(volatileRegs);
1551 using Fn = JSLinearString* (*)(JSContext* cx, int32_t code);
1552 masm.setupUnalignedABICall(scratch2);
1553 masm.loadJSContext(scratch2);
1554 masm.passABIArg(scratch2);
1555 masm.passABIArg(scratch1);
1556 masm.callWithABI<Fn, jit::StringFromCharCodeNoGC>();
1557 masm.storeCallPointerResult(scratch2);
1559 masm.PopRegsInMask(volatileRegs);
1561 masm.branchPtr(Assembler::Equal, scratch2, ImmWord(0), failure->label());
1564 masm.bind(&done);
1565 masm.tagValue(JSVAL_TYPE_STRING, scratch2, output.valueReg());
1566 return true;
1569 bool IonCacheIRCompiler::emitCallNativeSetter(ObjOperandId receiverId,
1570 uint32_t setterOffset,
1571 ValOperandId rhsId,
1572 bool sameRealm,
1573 uint32_t nargsAndFlagsOffset) {
1574 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1575 AutoSaveLiveRegisters save(*this);
1577 Register receiver = allocator.useRegister(masm, receiverId);
1578 JSFunction* target = &objectStubField(setterOffset)->as<JSFunction>();
1579 MOZ_ASSERT(target->isNativeFun());
1580 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1582 AutoScratchRegister argJSContext(allocator, masm);
1583 AutoScratchRegister argVp(allocator, masm);
1584 AutoScratchRegister argUintN(allocator, masm);
1585 #ifndef JS_CODEGEN_X86
1586 AutoScratchRegister scratch(allocator, masm);
1587 #else
1588 // Not enough registers on x86.
1589 Register scratch = argUintN;
1590 #endif
1592 allocator.discardStack(masm);
1594 // Set up the call:
1595 // bool (*)(JSContext*, unsigned, Value* vp)
1596 // vp[0] is callee/outparam
1597 // vp[1] is |this|
1598 // vp[2] is the value
1600 // Build vp and move the base into argVpReg.
1601 masm.Push(val);
1602 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(receiver)));
1603 masm.Push(ObjectValue(*target));
1604 masm.moveStackPtrTo(argVp.get());
1606 // Preload other regs.
1607 masm.loadJSContext(argJSContext);
1608 masm.move32(Imm32(1), argUintN);
1610 // Push marking data for later use.
1611 masm.Push(argUintN);
1612 pushStubCodePointer();
1614 if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save)) {
1615 return false;
1617 masm.enterFakeExitFrame(argJSContext, scratch, ExitFrameType::IonOOLNative);
1619 if (!sameRealm) {
1620 masm.switchToRealm(target->realm(), scratch);
1623 // Make the call.
1624 masm.setupUnalignedABICall(scratch);
1625 #ifdef JS_CODEGEN_X86
1626 // Reload argUintN because it was clobbered.
1627 masm.move32(Imm32(1), argUintN);
1628 #endif
1629 masm.passABIArg(argJSContext);
1630 masm.passABIArg(argUintN);
1631 masm.passABIArg(argVp);
1632 masm.callWithABI(DynamicFunction<JSNative>(target->native()),
1633 ABIType::General,
1634 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
1636 // Test for failure.
1637 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
1639 if (!sameRealm) {
1640 masm.switchToRealm(cx_->realm(), ReturnReg);
1643 masm.adjustStack(IonOOLNativeExitFrameLayout::Size(1));
1644 return true;
1647 bool IonCacheIRCompiler::emitCallScriptedSetter(ObjOperandId receiverId,
1648 uint32_t setterOffset,
1649 ValOperandId rhsId,
1650 bool sameRealm,
1651 uint32_t nargsAndFlagsOffset) {
1652 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1653 AutoSaveLiveRegisters save(*this);
1655 Register receiver = allocator.useRegister(masm, receiverId);
1656 JSFunction* target = &objectStubField(setterOffset)->as<JSFunction>();
1657 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1659 MOZ_ASSERT(sameRealm == (cx_->realm() == target->realm()));
1661 AutoScratchRegister scratch(allocator, masm);
1663 allocator.discardStack(masm);
1665 uint32_t framePushedBefore = masm.framePushed();
1667 enterStubFrame(masm, save);
1669 // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
1670 // so we just have to make sure the stack is aligned after we push the
1671 // |this| + argument Values.
1672 size_t numArgs = std::max<size_t>(1, target->nargs());
1673 uint32_t argSize = (numArgs + 1) * sizeof(Value);
1674 uint32_t padding =
1675 ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
1676 MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
1677 MOZ_ASSERT(padding < JitStackAlignment);
1678 masm.reserveStack(padding);
1680 for (size_t i = 1; i < target->nargs(); i++) {
1681 masm.Push(UndefinedValue());
1683 masm.Push(val);
1684 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(receiver)));
1686 if (!sameRealm) {
1687 masm.switchToRealm(target->realm(), scratch);
1690 masm.movePtr(ImmGCPtr(target), scratch);
1692 masm.Push(scratch);
1693 masm.PushFrameDescriptorForJitCall(FrameType::IonICCall, /* argc = */ 1);
1695 // Check stack alignment. Add 2 * sizeof(uintptr_t) for the return address and
1696 // frame pointer pushed by the call/callee.
1697 MOZ_ASSERT(
1698 ((masm.framePushed() + 2 * sizeof(uintptr_t)) % JitStackAlignment) == 0);
1700 MOZ_ASSERT(target->hasJitEntry());
1701 masm.loadJitCodeRaw(scratch, scratch);
1702 masm.callJit(scratch);
1704 if (!sameRealm) {
1705 masm.switchToRealm(cx_->realm(), ReturnReg);
1708 // Restore the frame pointer and stack pointer.
1709 masm.loadPtr(Address(FramePointer, 0), FramePointer);
1710 masm.freeStack(masm.framePushed() - framePushedBefore);
1711 return true;
1714 bool IonCacheIRCompiler::emitCallInlinedSetter(
1715 ObjOperandId receiverId, uint32_t setterOffset, ValOperandId rhsId,
1716 uint32_t icScriptOffset, bool sameRealm, uint32_t nargsAndFlagsOffset) {
1717 MOZ_CRASH("Trial inlining not supported in Ion");
1720 bool IonCacheIRCompiler::emitCallSetArrayLength(ObjOperandId objId, bool strict,
1721 ValOperandId rhsId) {
1722 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1723 AutoSaveLiveRegisters save(*this);
1725 Register obj = allocator.useRegister(masm, objId);
1726 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1728 allocator.discardStack(masm);
1729 enterStubFrame(masm, save);
1731 masm.Push(Imm32(strict));
1732 masm.Push(val);
1733 masm.Push(obj);
1735 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
1736 callVM<Fn, jit::SetArrayLength>(masm);
1737 return true;
1740 bool IonCacheIRCompiler::emitProxySet(ObjOperandId objId, uint32_t idOffset,
1741 ValOperandId rhsId, bool strict) {
1742 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1743 AutoSaveLiveRegisters save(*this);
1745 Register obj = allocator.useRegister(masm, objId);
1746 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1747 jsid id = idStubField(idOffset);
1749 AutoScratchRegister scratch(allocator, masm);
1751 allocator.discardStack(masm);
1752 enterStubFrame(masm, save);
1754 masm.Push(Imm32(strict));
1755 masm.Push(val);
1756 masm.Push(id, scratch);
1757 masm.Push(obj);
1759 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
1760 callVM<Fn, ProxySetProperty>(masm);
1761 return true;
1764 bool IonCacheIRCompiler::emitProxySetByValue(ObjOperandId objId,
1765 ValOperandId idId,
1766 ValOperandId rhsId, bool strict) {
1767 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1768 AutoSaveLiveRegisters save(*this);
1770 Register obj = allocator.useRegister(masm, objId);
1771 ConstantOrRegister idVal = allocator.useConstantOrRegister(masm, idId);
1772 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1774 allocator.discardStack(masm);
1775 enterStubFrame(masm, save);
1777 masm.Push(Imm32(strict));
1778 masm.Push(val);
1779 masm.Push(idVal);
1780 masm.Push(obj);
1782 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
1783 callVM<Fn, ProxySetPropertyByValue>(masm);
1784 return true;
1787 bool IonCacheIRCompiler::emitCallAddOrUpdateSparseElementHelper(
1788 ObjOperandId objId, Int32OperandId idId, ValOperandId rhsId, bool strict) {
1789 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1790 AutoSaveLiveRegisters save(*this);
1792 Register obj = allocator.useRegister(masm, objId);
1793 Register id = allocator.useRegister(masm, idId);
1794 ValueOperand val = allocator.useValueRegister(masm, rhsId);
1796 allocator.discardStack(masm);
1797 enterStubFrame(masm, save);
1799 masm.Push(Imm32(strict));
1800 masm.Push(val);
1801 masm.Push(id);
1802 masm.Push(obj);
1804 using Fn = bool (*)(JSContext* cx, Handle<NativeObject*> obj, int32_t int_id,
1805 HandleValue v, bool strict);
1806 callVM<Fn, AddOrUpdateSparseElementHelper>(masm);
1807 return true;
1810 bool IonCacheIRCompiler::emitMegamorphicSetElement(ObjOperandId objId,
1811 ValOperandId idId,
1812 ValOperandId rhsId,
1813 bool strict) {
1814 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1815 AutoSaveLiveRegisters save(*this);
1817 Register obj = allocator.useRegister(masm, objId);
1818 ConstantOrRegister idVal = allocator.useConstantOrRegister(masm, idId);
1819 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1821 allocator.discardStack(masm);
1822 enterStubFrame(masm, save);
1824 masm.Push(Imm32(strict));
1825 masm.Push(val);
1826 masm.Push(idVal);
1827 masm.Push(obj);
1829 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
1830 callVM<Fn, SetElementMegamorphic<false>>(masm);
1831 return true;
1834 bool IonCacheIRCompiler::emitReturnFromIC() {
1835 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1836 if (!savedLiveRegs_) {
1837 allocator.restoreInputState(masm);
1840 uint8_t* rejoinAddr = ic_->rejoinAddr(ionScript_);
1841 masm.jump(ImmPtr(rejoinAddr));
1842 return true;
1845 bool IonCacheIRCompiler::emitGuardDOMExpandoMissingOrGuardShape(
1846 ValOperandId expandoId, uint32_t shapeOffset) {
1847 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1848 ValueOperand val = allocator.useValueRegister(masm, expandoId);
1849 Shape* shape = shapeStubField(shapeOffset);
1851 AutoScratchRegister objScratch(allocator, masm);
1853 FailurePath* failure;
1854 if (!addFailurePath(&failure)) {
1855 return false;
1858 Label done;
1859 masm.branchTestUndefined(Assembler::Equal, val, &done);
1861 masm.debugAssertIsObject(val);
1862 masm.unboxObject(val, objScratch);
1863 // The expando object is not used in this case, so we don't need Spectre
1864 // mitigations.
1865 masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, objScratch,
1866 shape, failure->label());
1868 masm.bind(&done);
1869 return true;
1872 bool IonCacheIRCompiler::emitLoadDOMExpandoValueGuardGeneration(
1873 ObjOperandId objId, uint32_t expandoAndGenerationOffset,
1874 uint32_t generationOffset, ValOperandId resultId) {
1875 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1876 Register obj = allocator.useRegister(masm, objId);
1877 ExpandoAndGeneration* expandoAndGeneration =
1878 rawPointerStubField<ExpandoAndGeneration*>(expandoAndGenerationOffset);
1879 uint64_t generation = rawInt64StubField<uint64_t>(generationOffset);
1881 ValueOperand output = allocator.defineValueRegister(masm, resultId);
1883 FailurePath* failure;
1884 if (!addFailurePath(&failure)) {
1885 return false;
1888 masm.loadDOMExpandoValueGuardGeneration(obj, output, expandoAndGeneration,
1889 generation, failure->label());
1890 return true;
1893 void IonIC::attachCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
1894 CacheKind kind, IonScript* ionScript,
1895 bool* attached) {
1896 // We shouldn't GC or report OOM (or any other exception) here.
1897 AutoAssertNoPendingException aanpe(cx);
1898 JS::AutoCheckCannotGC nogc;
1900 MOZ_ASSERT(!*attached);
1902 // Do nothing if the IR generator failed or triggered a GC that invalidated
1903 // the script.
1904 if (writer.failed() || ionScript->invalidated()) {
1905 return;
1908 JitZone* jitZone = cx->zone()->jitZone();
1910 constexpr uint32_t stubDataOffset = sizeof(IonICStub);
1911 static_assert(stubDataOffset % sizeof(uint64_t) == 0,
1912 "Stub fields must be aligned");
1914 // Try to reuse a previously-allocated CacheIRStubInfo.
1915 CacheIRStubKey::Lookup lookup(kind, ICStubEngine::IonIC, writer.codeStart(),
1916 writer.codeLength());
1917 CacheIRStubInfo* stubInfo = jitZone->getIonCacheIRStubInfo(lookup);
1918 if (!stubInfo) {
1919 // Allocate the shared CacheIRStubInfo. Note that the
1920 // putIonCacheIRStubInfo call below will transfer ownership to
1921 // the stub info HashSet, so we don't have to worry about freeing
1922 // it below.
1924 // For Ion ICs, we don't track/use the makesGCCalls flag, so just pass true.
1925 bool makesGCCalls = true;
1926 stubInfo = CacheIRStubInfo::New(kind, ICStubEngine::IonIC, makesGCCalls,
1927 stubDataOffset, writer);
1928 if (!stubInfo) {
1929 return;
1932 CacheIRStubKey key(stubInfo);
1933 if (!jitZone->putIonCacheIRStubInfo(lookup, key)) {
1934 return;
1938 MOZ_ASSERT(stubInfo);
1940 // Ensure we don't attach duplicate stubs. This can happen if a stub failed
1941 // for some reason and the IR generator doesn't check for exactly the same
1942 // conditions.
1943 for (IonICStub* stub = firstStub_; stub; stub = stub->next()) {
1944 if (stub->stubInfo() != stubInfo) {
1945 continue;
1947 if (!writer.stubDataEquals(stub->stubDataStart())) {
1948 continue;
1950 return;
1953 size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
1955 // Allocate the IonICStub in the JitZone's stub space. Ion stubs and
1956 // CacheIRStubInfo instances for Ion stubs can be purged on GC. That's okay
1957 // because the stub code is rooted separately when we make a VM call, and
1958 // stub code should never access the IonICStub after making a VM call. The
1959 // IonICStub::poison method poisons the stub to catch bugs in this area.
1960 ICStubSpace* stubSpace = cx->zone()->jitZone()->stubSpace();
1961 void* newStubMem = stubSpace->alloc(bytesNeeded);
1962 if (!newStubMem) {
1963 return;
1966 IonICStub* newStub =
1967 new (newStubMem) IonICStub(fallbackAddr(ionScript), stubInfo);
1968 writer.copyStubData(newStub->stubDataStart());
1970 TempAllocator temp(&cx->tempLifoAlloc());
1971 JitContext jctx(cx);
1972 IonCacheIRCompiler compiler(cx, temp, writer, this, ionScript,
1973 stubDataOffset);
1974 if (!compiler.init()) {
1975 return;
1978 JitCode* code = compiler.compile(newStub);
1979 if (!code) {
1980 return;
1983 // Record the stub code if perf spewer is enabled.
1984 CacheKind stubKind = newStub->stubInfo()->kind();
1985 compiler.perfSpewer().saveProfile(cx, script(), code,
1986 CacheKindNames[uint8_t(stubKind)]);
1988 // Add an entry to the profiler's code table, so that the profiler can
1989 // identify this as Ion code.
1990 if (ionScript->hasProfilingInstrumentation()) {
1991 uint8_t* addr = rejoinAddr(ionScript);
1992 auto entry = MakeJitcodeGlobalEntry<IonICEntry>(cx, code, code->raw(),
1993 code->rawEnd(), addr);
1994 if (!entry) {
1995 cx->recoverFromOutOfMemory();
1996 return;
1999 auto* globalTable = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
2000 if (!globalTable->addEntry(std::move(entry))) {
2001 return;
2005 attachStub(newStub, code);
2006 *attached = true;
2009 bool IonCacheIRCompiler::emitCallStringObjectConcatResult(ValOperandId lhsId,
2010 ValOperandId rhsId) {
2011 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2012 AutoSaveLiveRegisters save(*this);
2013 AutoOutputRegister output(*this);
2015 ValueOperand lhs = allocator.useValueRegister(masm, lhsId);
2016 ValueOperand rhs = allocator.useValueRegister(masm, rhsId);
2018 allocator.discardStack(masm);
2020 enterStubFrame(masm, save);
2021 masm.Push(rhs);
2022 masm.Push(lhs);
2024 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, MutableHandleValue);
2025 callVM<Fn, DoConcatStringObject>(masm);
2027 masm.storeCallResultValue(output);
2028 return true;
2031 bool IonCacheIRCompiler::emitCloseIterScriptedResult(ObjOperandId iterId,
2032 ObjOperandId calleeId,
2033 CompletionKind kind,
2034 uint32_t calleeNargs) {
2035 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2036 AutoSaveLiveRegisters save(*this);
2038 Register iter = allocator.useRegister(masm, iterId);
2039 Register callee = allocator.useRegister(masm, calleeId);
2041 allocator.discardStack(masm);
2043 uint32_t framePushedBefore = masm.framePushed();
2045 // Construct IonICCallFrameLayout.
2046 enterStubFrame(masm, save);
2048 uint32_t stubFramePushed = masm.framePushed();
2050 // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
2051 // so we just have to make sure the stack is aligned after we push |this|
2052 // and |calleeNargs| undefined arguments.
2053 uint32_t argSize = (calleeNargs + 1) * sizeof(Value);
2054 uint32_t padding =
2055 ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
2056 MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
2057 MOZ_ASSERT(padding < JitStackAlignment);
2058 masm.reserveStack(padding);
2060 for (uint32_t i = 0; i < calleeNargs; i++) {
2061 masm.Push(UndefinedValue());
2063 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(iter)));
2065 masm.Push(callee);
2066 masm.PushFrameDescriptorForJitCall(FrameType::IonICCall, /* argc = */ 0);
2068 masm.loadJitCodeRaw(callee, callee);
2069 masm.callJit(callee);
2071 if (kind != CompletionKind::Throw) {
2072 // Verify that the return value is an object.
2073 Label success;
2074 masm.branchTestObject(Assembler::Equal, JSReturnOperand, &success);
2076 // We can reuse the same stub frame, but we first have to pop the arguments
2077 // from the previous call.
2078 uint32_t framePushedAfterCall = masm.framePushed();
2079 masm.freeStack(masm.framePushed() - stubFramePushed);
2081 masm.push(Imm32(int32_t(CheckIsObjectKind::IteratorReturn)));
2082 using Fn = bool (*)(JSContext*, CheckIsObjectKind);
2083 callVM<Fn, ThrowCheckIsObject>(masm);
2085 masm.bind(&success);
2086 masm.setFramePushed(framePushedAfterCall);
2089 // Restore the frame pointer and stack pointer.
2090 masm.loadPtr(Address(FramePointer, 0), FramePointer);
2091 masm.freeStack(masm.framePushed() - framePushedBefore);
2092 return true;
2095 bool IonCacheIRCompiler::emitGuardFunctionScript(ObjOperandId funId,
2096 uint32_t expectedOffset,
2097 uint32_t nargsAndFlagsOffset) {
2098 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2100 Register fun = allocator.useRegister(masm, funId);
2101 AutoScratchRegister scratch(allocator, masm);
2102 BaseScript* expected = weakBaseScriptStubField(expectedOffset);
2104 FailurePath* failure;
2105 if (!addFailurePath(&failure)) {
2106 return false;
2109 masm.loadPrivate(Address(fun, JSFunction::offsetOfJitInfoOrScript()),
2110 scratch);
2111 masm.branchPtr(Assembler::NotEqual, scratch, ImmGCPtr(expected),
2112 failure->label());
2113 return true;
2116 bool IonCacheIRCompiler::emitCallScriptedFunction(ObjOperandId calleeId,
2117 Int32OperandId argcId,
2118 CallFlags flags,
2119 uint32_t argcFixed) {
2120 MOZ_CRASH("Call ICs not used in ion");
2123 bool IonCacheIRCompiler::emitCallBoundScriptedFunction(ObjOperandId calleeId,
2124 ObjOperandId targetId,
2125 Int32OperandId argcId,
2126 CallFlags flags,
2127 uint32_t numBoundArgs) {
2128 MOZ_CRASH("Call ICs not used in ion");
2131 bool IonCacheIRCompiler::emitCallWasmFunction(
2132 ObjOperandId calleeId, Int32OperandId argcId, CallFlags flags,
2133 uint32_t argcFixed, uint32_t funcExportOffset, uint32_t instanceOffset) {
2134 MOZ_CRASH("Call ICs not used in ion");
2137 #ifdef JS_SIMULATOR
2138 bool IonCacheIRCompiler::emitCallNativeFunction(ObjOperandId calleeId,
2139 Int32OperandId argcId,
2140 CallFlags flags,
2141 uint32_t argcFixed,
2142 uint32_t targetOffset) {
2143 MOZ_CRASH("Call ICs not used in ion");
2146 bool IonCacheIRCompiler::emitCallDOMFunction(
2147 ObjOperandId calleeId, Int32OperandId argcId, ObjOperandId thisObjId,
2148 CallFlags flags, uint32_t argcFixed, uint32_t targetOffset) {
2149 MOZ_CRASH("Call ICs not used in ion");
2151 #else
2152 bool IonCacheIRCompiler::emitCallNativeFunction(ObjOperandId calleeId,
2153 Int32OperandId argcId,
2154 CallFlags flags,
2155 uint32_t argcFixed,
2156 bool ignoresReturnValue) {
2157 MOZ_CRASH("Call ICs not used in ion");
2160 bool IonCacheIRCompiler::emitCallDOMFunction(ObjOperandId calleeId,
2161 Int32OperandId argcId,
2162 ObjOperandId thisObjId,
2163 CallFlags flags,
2164 uint32_t argcFixed) {
2165 MOZ_CRASH("Call ICs not used in ion");
2167 #endif
2169 bool IonCacheIRCompiler::emitCallClassHook(ObjOperandId calleeId,
2170 Int32OperandId argcId,
2171 CallFlags flags, uint32_t argcFixed,
2172 uint32_t targetOffset) {
2173 MOZ_CRASH("Call ICs not used in ion");
2176 bool IonCacheIRCompiler::emitCallInlinedFunction(ObjOperandId calleeId,
2177 Int32OperandId argcId,
2178 uint32_t icScriptOffset,
2179 CallFlags flags,
2180 uint32_t argcFixed) {
2181 MOZ_CRASH("Call ICs not used in ion");
2184 bool IonCacheIRCompiler::emitBindFunctionResult(ObjOperandId targetId,
2185 uint32_t argc,
2186 uint32_t templateObjectOffset) {
2187 MOZ_CRASH("Call ICs not used in ion");
2190 bool IonCacheIRCompiler::emitSpecializedBindFunctionResult(
2191 ObjOperandId targetId, uint32_t argc, uint32_t templateObjectOffset) {
2192 MOZ_CRASH("Call ICs not used in ion");
2195 bool IonCacheIRCompiler::emitLoadArgumentFixedSlot(ValOperandId resultId,
2196 uint8_t slotIndex) {
2197 MOZ_CRASH("Call ICs not used in ion");
2200 bool IonCacheIRCompiler::emitLoadArgumentDynamicSlot(ValOperandId resultId,
2201 Int32OperandId argcId,
2202 uint8_t slotIndex) {
2203 MOZ_CRASH("Call ICs not used in ion");
2206 bool IonCacheIRCompiler::emitArrayJoinResult(ObjOperandId objId,
2207 StringOperandId sepId) {
2208 MOZ_CRASH("Call ICs not used in ion");
2211 bool IonCacheIRCompiler::emitPackedArraySliceResult(
2212 uint32_t templateObjectOffset, ObjOperandId arrayId, Int32OperandId beginId,
2213 Int32OperandId endId) {
2214 MOZ_CRASH("Call ICs not used in ion");
2217 bool IonCacheIRCompiler::emitArgumentsSliceResult(uint32_t templateObjectOffset,
2218 ObjOperandId argsId,
2219 Int32OperandId beginId,
2220 Int32OperandId endId) {
2221 MOZ_CRASH("Call ICs not used in ion");
2224 bool IonCacheIRCompiler::emitIsArrayResult(ValOperandId inputId) {
2225 MOZ_CRASH("Call ICs not used in ion");
2228 bool IonCacheIRCompiler::emitIsTypedArrayResult(ObjOperandId objId,
2229 bool isPossiblyWrapped) {
2230 MOZ_CRASH("Call ICs not used in ion");
2233 bool IonCacheIRCompiler::emitStringFromCharCodeResult(Int32OperandId codeId) {
2234 MOZ_CRASH("Call ICs not used in ion");
2237 bool IonCacheIRCompiler::emitStringFromCodePointResult(Int32OperandId codeId) {
2238 MOZ_CRASH("Call ICs not used in ion");
2241 bool IonCacheIRCompiler::emitMathRandomResult(uint32_t rngOffset) {
2242 MOZ_CRASH("Call ICs not used in ion");
2245 bool IonCacheIRCompiler::emitReflectGetPrototypeOfResult(ObjOperandId objId) {
2246 MOZ_CRASH("Call ICs not used in ion");
2249 bool IonCacheIRCompiler::emitHasClassResult(ObjOperandId objId,
2250 uint32_t claspOffset) {
2251 MOZ_CRASH("Call ICs not used in ion");
2254 bool IonCacheIRCompiler::emitSameValueResult(ValOperandId lhs,
2255 ValOperandId rhs) {
2256 MOZ_CRASH("Call ICs not used in ion");
2259 bool IonCacheIRCompiler::emitSetHasStringResult(ObjOperandId setId,
2260 StringOperandId strId) {
2261 MOZ_CRASH("Call ICs not used in ion");
2264 bool IonCacheIRCompiler::emitMapHasStringResult(ObjOperandId mapId,
2265 StringOperandId strId) {
2266 MOZ_CRASH("Call ICs not used in ion");
2269 bool IonCacheIRCompiler::emitMapGetStringResult(ObjOperandId mapId,
2270 StringOperandId strId) {
2271 MOZ_CRASH("Call ICs not used in ion");
2274 bool IonCacheIRCompiler::emitNewArrayObjectResult(uint32_t arrayLength,
2275 uint32_t shapeOffset,
2276 uint32_t siteOffset) {
2277 MOZ_CRASH("NewArray ICs not used in ion");
2280 bool IonCacheIRCompiler::emitNewPlainObjectResult(uint32_t numFixedSlots,
2281 uint32_t numDynamicSlots,
2282 gc::AllocKind allocKind,
2283 uint32_t shapeOffset,
2284 uint32_t siteOffset) {
2285 MOZ_CRASH("NewObject ICs not used in ion");
2288 bool IonCacheIRCompiler::emitCallRegExpMatcherResult(ObjOperandId regexpId,
2289 StringOperandId inputId,
2290 Int32OperandId lastIndexId,
2291 uint32_t stubOffset) {
2292 MOZ_CRASH("Call ICs not used in ion");
2295 bool IonCacheIRCompiler::emitCallRegExpSearcherResult(
2296 ObjOperandId regexpId, StringOperandId inputId, Int32OperandId lastIndexId,
2297 uint32_t stubOffset) {
2298 MOZ_CRASH("Call ICs not used in ion");
2301 bool IonCacheIRCompiler::emitRegExpBuiltinExecMatchResult(
2302 ObjOperandId regexpId, StringOperandId inputId, uint32_t stubOffset) {
2303 MOZ_CRASH("Call ICs not used in ion");
2306 bool IonCacheIRCompiler::emitRegExpBuiltinExecTestResult(
2307 ObjOperandId regexpId, StringOperandId inputId, uint32_t stubOffset) {
2308 MOZ_CRASH("Call ICs not used in ion");
2311 bool IonCacheIRCompiler::emitRegExpHasCaptureGroupsResult(
2312 ObjOperandId regexpId, StringOperandId inputId) {
2313 MOZ_CRASH("Call ICs not used in ion");
2316 bool IonCacheIRCompiler::emitLoadStringAtResult(StringOperandId strId,
2317 Int32OperandId indexId,
2318 bool handleOOB) {
2319 MOZ_CRASH("Call ICs not used in ion");