1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/IonCacheIRCompiler.h"
8 #include "mozilla/Maybe.h"
12 #include "jit/CacheIRCompiler.h"
13 #include "jit/CacheIRWriter.h"
14 #include "jit/IonIC.h"
15 #include "jit/JitcodeMap.h"
16 #include "jit/JitFrames.h"
17 #include "jit/JitRuntime.h"
18 #include "jit/JitZone.h"
19 #include "jit/JSJitFrameIter.h"
20 #include "jit/Linker.h"
21 #include "jit/SharedICHelpers.h"
22 #include "jit/VMFunctions.h"
23 #include "proxy/DeadObjectProxy.h"
24 #include "proxy/Proxy.h"
25 #include "util/Memory.h"
26 #include "vm/StaticStrings.h"
28 #include "jit/JSJitFrameIter-inl.h"
29 #include "jit/MacroAssembler-inl.h"
30 #include "jit/VMFunctionList-inl.h"
33 using namespace js::jit
;
38 struct ExpandoAndGeneration
;
41 using JS::ExpandoAndGeneration
;
46 // IonCacheIRCompiler compiles CacheIR to IonIC native code.
47 IonCacheIRCompiler::IonCacheIRCompiler(JSContext
* cx
, TempAllocator
& alloc
,
48 const CacheIRWriter
& writer
, IonIC
* ic
,
50 uint32_t stubDataOffset
)
51 : CacheIRCompiler(cx
, alloc
, writer
, stubDataOffset
, Mode::Ion
,
52 StubFieldPolicy::Constant
),
55 ionScript_(ionScript
),
56 savedLiveRegs_(false),
57 localTracingSlots_(0),
58 perfSpewer_(ic
->pc()) {
60 MOZ_ASSERT(ionScript_
);
64 T
IonCacheIRCompiler::rawPointerStubField(uint32_t offset
) {
65 static_assert(sizeof(T
) == sizeof(uintptr_t), "T must have pointer size");
66 return (T
)readStubWord(offset
, StubField::Type::RawPointer
);
70 T
IonCacheIRCompiler::rawInt64StubField(uint32_t offset
) {
71 static_assert(sizeof(T
) == sizeof(int64_t), "T musthave int64 size");
72 return (T
)readStubInt64(offset
, StubField::Type::RawInt64
);
75 template <typename Fn
, Fn fn
>
76 void IonCacheIRCompiler::callVM(MacroAssembler
& masm
) {
77 VMFunctionId id
= VMFunctionToId
<Fn
, fn
>::id
;
78 callVMInternal(masm
, id
);
81 void IonCacheIRCompiler::pushStubCodePointer() {
82 stubJitCodeOffset_
.emplace(masm
.PushWithPatch(ImmPtr((void*)-1)));
85 // AutoSaveLiveRegisters must be used when we make a call that can GC. The
86 // constructor ensures all live registers are stored on the stack (where the GC
87 // expects them) and the destructor restores these registers.
88 AutoSaveLiveRegisters::AutoSaveLiveRegisters(IonCacheIRCompiler
& compiler
)
89 : compiler_(compiler
) {
90 MOZ_ASSERT(compiler_
.liveRegs_
.isSome());
91 MOZ_ASSERT(compiler_
.ic_
);
92 compiler_
.allocator
.saveIonLiveRegisters(
93 compiler_
.masm
, compiler_
.liveRegs_
.ref(),
94 compiler_
.ic_
->scratchRegisterForEntryJump(), compiler_
.ionScript_
);
95 compiler_
.savedLiveRegs_
= true;
97 AutoSaveLiveRegisters::~AutoSaveLiveRegisters() {
98 MOZ_ASSERT(compiler_
.stubJitCodeOffset_
.isSome(),
99 "Must have pushed JitCode* pointer");
100 compiler_
.allocator
.restoreIonLiveRegisters(compiler_
.masm
,
101 compiler_
.liveRegs_
.ref());
102 MOZ_ASSERT(compiler_
.masm
.framePushed() == compiler_
.ionScript_
->frameSize());
108 void CacheRegisterAllocator::saveIonLiveRegisters(MacroAssembler
& masm
,
109 LiveRegisterSet liveRegs
,
111 IonScript
* ionScript
) {
112 // We have to push all registers in liveRegs on the stack. It's possible we
113 // stored other values in our live registers and stored operands on the
114 // stack (where our live registers should go), so this requires some careful
115 // work. Try to keep it simple by taking one small step at a time.
117 // Step 1. Discard any dead operands so we can reuse their registers.
118 freeDeadOperandLocations(masm
);
120 // Step 2. Figure out the size of our live regs. This is consistent with
121 // the fact that we're using storeRegsInMask to generate the save code and
122 // PopRegsInMask to generate the restore code.
123 size_t sizeOfLiveRegsInBytes
=
124 MacroAssembler::PushRegsInMaskSizeInBytes(liveRegs
);
126 MOZ_ASSERT(sizeOfLiveRegsInBytes
> 0);
128 // Step 3. Ensure all non-input operands are on the stack.
129 size_t numInputs
= writer_
.numInputOperands();
130 for (size_t i
= numInputs
; i
< operandLocations_
.length(); i
++) {
131 OperandLocation
& loc
= operandLocations_
[i
];
132 if (loc
.isInRegister()) {
133 spillOperandToStack(masm
, &loc
);
137 // Step 4. Restore the register state, but don't discard the stack as
138 // non-input operands are stored there.
139 restoreInputState(masm
, /* shouldDiscardStack = */ false);
141 // We just restored the input state, so no input operands should be stored
144 for (size_t i
= 0; i
< numInputs
; i
++) {
145 const OperandLocation
& loc
= operandLocations_
[i
];
146 MOZ_ASSERT(!loc
.isOnStack());
150 // Step 5. At this point our register state is correct. Stack values,
151 // however, may cover the space where we have to store the live registers.
152 // Move them out of the way.
154 bool hasOperandOnStack
= false;
155 for (size_t i
= numInputs
; i
< operandLocations_
.length(); i
++) {
156 OperandLocation
& loc
= operandLocations_
[i
];
157 if (!loc
.isOnStack()) {
161 hasOperandOnStack
= true;
163 size_t operandSize
= loc
.stackSizeInBytes();
164 size_t operandStackPushed
= loc
.stackPushed();
165 MOZ_ASSERT(operandSize
> 0);
166 MOZ_ASSERT(stackPushed_
>= operandStackPushed
);
167 MOZ_ASSERT(operandStackPushed
>= operandSize
);
169 // If this operand doesn't cover the live register space, there's
171 if (operandStackPushed
- operandSize
>= sizeOfLiveRegsInBytes
) {
172 MOZ_ASSERT(stackPushed_
> sizeOfLiveRegsInBytes
);
176 // Reserve stack space for the live registers if needed.
177 if (sizeOfLiveRegsInBytes
> stackPushed_
) {
178 size_t extraBytes
= sizeOfLiveRegsInBytes
- stackPushed_
;
179 MOZ_ASSERT((extraBytes
% sizeof(uintptr_t)) == 0);
180 masm
.subFromStackPtr(Imm32(extraBytes
));
181 stackPushed_
+= extraBytes
;
184 // Push the operand below the live register space.
185 if (loc
.kind() == OperandLocation::PayloadStack
) {
187 Address(masm
.getStackPointer(), stackPushed_
- operandStackPushed
));
188 stackPushed_
+= operandSize
;
189 loc
.setPayloadStack(stackPushed_
, loc
.payloadType());
192 MOZ_ASSERT(loc
.kind() == OperandLocation::ValueStack
);
194 Address(masm
.getStackPointer(), stackPushed_
- operandStackPushed
));
195 stackPushed_
+= operandSize
;
196 loc
.setValueStack(stackPushed_
);
199 // Step 6. If we have any operands on the stack, adjust their stackPushed
200 // values to not include sizeOfLiveRegsInBytes (this simplifies code down
201 // the line). Then push/store the live registers.
202 if (hasOperandOnStack
) {
203 MOZ_ASSERT(stackPushed_
> sizeOfLiveRegsInBytes
);
204 stackPushed_
-= sizeOfLiveRegsInBytes
;
206 for (size_t i
= numInputs
; i
< operandLocations_
.length(); i
++) {
207 OperandLocation
& loc
= operandLocations_
[i
];
208 if (loc
.isOnStack()) {
209 loc
.adjustStackPushed(-int32_t(sizeOfLiveRegsInBytes
));
213 size_t stackBottom
= stackPushed_
+ sizeOfLiveRegsInBytes
;
214 masm
.storeRegsInMask(liveRegs
, Address(masm
.getStackPointer(), stackBottom
),
216 masm
.setFramePushed(masm
.framePushed() + sizeOfLiveRegsInBytes
);
218 // If no operands are on the stack, discard the unused stack space.
219 if (stackPushed_
> 0) {
220 masm
.addToStackPtr(Imm32(stackPushed_
));
223 masm
.PushRegsInMask(liveRegs
);
225 freePayloadSlots_
.clear();
226 freeValueSlots_
.clear();
228 MOZ_ASSERT(masm
.framePushed() ==
229 ionScript
->frameSize() + sizeOfLiveRegsInBytes
);
231 // Step 7. All live registers and non-input operands are stored on the stack
232 // now, so at this point all registers except for the input registers are
234 availableRegs_
.set() = GeneralRegisterSet::Not(inputRegisterSet());
235 availableRegsAfterSpill_
.set() = GeneralRegisterSet();
237 // Step 8. We restored our input state, so we have to fix up aliased input
239 fixupAliasedInputs(masm
);
242 void CacheRegisterAllocator::restoreIonLiveRegisters(MacroAssembler
& masm
,
243 LiveRegisterSet liveRegs
) {
244 masm
.PopRegsInMask(liveRegs
);
246 availableRegs_
.set() = GeneralRegisterSet();
247 availableRegsAfterSpill_
.set() = GeneralRegisterSet::All();
250 static void* GetReturnAddressToIonCode(JSContext
* cx
) {
251 JSJitFrameIter
frame(cx
->activation()->asJit());
252 MOZ_ASSERT(frame
.type() == FrameType::Exit
,
253 "An exit frame is expected as update functions are called with a "
256 void* returnAddr
= frame
.returnAddress();
259 MOZ_ASSERT(frame
.isIonJS());
264 // The AutoSaveLiveRegisters parameter is used to ensure registers were saved
265 void IonCacheIRCompiler::enterStubFrame(MacroAssembler
& masm
,
266 const AutoSaveLiveRegisters
&) {
267 MOZ_ASSERT(!enteredStubFrame_
);
268 pushStubCodePointer();
269 masm
.PushFrameDescriptor(FrameType::IonJS
);
270 masm
.Push(ImmPtr(GetReturnAddressToIonCode(cx_
)));
272 masm
.Push(FramePointer
);
273 masm
.moveStackPtrTo(FramePointer
);
275 enteredStubFrame_
= true;
278 void IonCacheIRCompiler::storeTracedValue(MacroAssembler
& masm
,
279 ValueOperand value
) {
280 MOZ_ASSERT(localTracingSlots_
< 255);
282 localTracingSlots_
++;
285 void IonCacheIRCompiler::loadTracedValue(MacroAssembler
& masm
,
287 ValueOperand value
) {
288 MOZ_ASSERT(slotIndex
<= localTracingSlots_
);
289 int32_t offset
= IonICCallFrameLayout::LocallyTracedValueOffset
+
290 slotIndex
* sizeof(Value
);
291 masm
.loadValue(Address(FramePointer
, -offset
), value
);
294 bool IonCacheIRCompiler::init() {
295 if (!allocator
.init()) {
299 size_t numInputs
= writer_
.numInputOperands();
300 MOZ_ASSERT(numInputs
== NumInputsForCacheKind(ic_
->kind()));
302 AllocatableGeneralRegisterSet available
;
304 switch (ic_
->kind()) {
305 case CacheKind::GetProp
:
306 case CacheKind::GetElem
: {
307 IonGetPropertyIC
* ic
= ic_
->asGetPropertyIC();
308 ValueOperand output
= ic
->output();
310 available
.add(output
);
312 liveRegs_
.emplace(ic
->liveRegs());
313 outputUnchecked_
.emplace(output
);
315 MOZ_ASSERT(numInputs
== 1 || numInputs
== 2);
317 allocator
.initInputLocation(0, ic
->value());
319 allocator
.initInputLocation(1, ic
->id());
323 case CacheKind::GetPropSuper
:
324 case CacheKind::GetElemSuper
: {
325 IonGetPropSuperIC
* ic
= ic_
->asGetPropSuperIC();
326 ValueOperand output
= ic
->output();
328 available
.add(output
);
330 liveRegs_
.emplace(ic
->liveRegs());
331 outputUnchecked_
.emplace(output
);
333 MOZ_ASSERT(numInputs
== 2 || numInputs
== 3);
335 allocator
.initInputLocation(0, ic
->object(), JSVAL_TYPE_OBJECT
);
337 if (ic
->kind() == CacheKind::GetPropSuper
) {
338 MOZ_ASSERT(numInputs
== 2);
339 allocator
.initInputLocation(1, ic
->receiver());
341 MOZ_ASSERT(numInputs
== 3);
342 allocator
.initInputLocation(1, ic
->id());
343 allocator
.initInputLocation(2, ic
->receiver());
347 case CacheKind::SetProp
:
348 case CacheKind::SetElem
: {
349 IonSetPropertyIC
* ic
= ic_
->asSetPropertyIC();
351 available
.add(ic
->temp());
353 liveRegs_
.emplace(ic
->liveRegs());
355 allocator
.initInputLocation(0, ic
->object(), JSVAL_TYPE_OBJECT
);
357 if (ic
->kind() == CacheKind::SetProp
) {
358 MOZ_ASSERT(numInputs
== 2);
359 allocator
.initInputLocation(1, ic
->rhs());
361 MOZ_ASSERT(numInputs
== 3);
362 allocator
.initInputLocation(1, ic
->id());
363 allocator
.initInputLocation(2, ic
->rhs());
367 case CacheKind::GetName
: {
368 IonGetNameIC
* ic
= ic_
->asGetNameIC();
369 ValueOperand output
= ic
->output();
371 available
.add(output
);
372 available
.add(ic
->temp());
374 liveRegs_
.emplace(ic
->liveRegs());
375 outputUnchecked_
.emplace(output
);
377 MOZ_ASSERT(numInputs
== 1);
378 allocator
.initInputLocation(0, ic
->environment(), JSVAL_TYPE_OBJECT
);
381 case CacheKind::BindName
: {
382 IonBindNameIC
* ic
= ic_
->asBindNameIC();
383 Register output
= ic
->output();
385 available
.add(output
);
386 available
.add(ic
->temp());
388 liveRegs_
.emplace(ic
->liveRegs());
389 outputUnchecked_
.emplace(
390 TypedOrValueRegister(MIRType::Object
, AnyRegister(output
)));
392 MOZ_ASSERT(numInputs
== 1);
393 allocator
.initInputLocation(0, ic
->environment(), JSVAL_TYPE_OBJECT
);
396 case CacheKind::GetIterator
: {
397 IonGetIteratorIC
* ic
= ic_
->asGetIteratorIC();
398 Register output
= ic
->output();
400 available
.add(output
);
401 available
.add(ic
->temp1());
402 available
.add(ic
->temp2());
404 liveRegs_
.emplace(ic
->liveRegs());
405 outputUnchecked_
.emplace(
406 TypedOrValueRegister(MIRType::Object
, AnyRegister(output
)));
408 MOZ_ASSERT(numInputs
== 1);
409 allocator
.initInputLocation(0, ic
->value());
412 case CacheKind::OptimizeSpreadCall
: {
413 auto* ic
= ic_
->asOptimizeSpreadCallIC();
414 ValueOperand output
= ic
->output();
416 available
.add(output
);
417 available
.add(ic
->temp());
419 liveRegs_
.emplace(ic
->liveRegs());
420 outputUnchecked_
.emplace(output
);
422 MOZ_ASSERT(numInputs
== 1);
423 allocator
.initInputLocation(0, ic
->value());
426 case CacheKind::In
: {
427 IonInIC
* ic
= ic_
->asInIC();
428 Register output
= ic
->output();
430 available
.add(output
);
432 liveRegs_
.emplace(ic
->liveRegs());
433 outputUnchecked_
.emplace(
434 TypedOrValueRegister(MIRType::Boolean
, AnyRegister(output
)));
436 MOZ_ASSERT(numInputs
== 2);
437 allocator
.initInputLocation(0, ic
->key());
438 allocator
.initInputLocation(
439 1, TypedOrValueRegister(MIRType::Object
, AnyRegister(ic
->object())));
442 case CacheKind::HasOwn
: {
443 IonHasOwnIC
* ic
= ic_
->asHasOwnIC();
444 Register output
= ic
->output();
446 available
.add(output
);
448 liveRegs_
.emplace(ic
->liveRegs());
449 outputUnchecked_
.emplace(
450 TypedOrValueRegister(MIRType::Boolean
, AnyRegister(output
)));
452 MOZ_ASSERT(numInputs
== 2);
453 allocator
.initInputLocation(0, ic
->id());
454 allocator
.initInputLocation(1, ic
->value());
457 case CacheKind::CheckPrivateField
: {
458 IonCheckPrivateFieldIC
* ic
= ic_
->asCheckPrivateFieldIC();
459 Register output
= ic
->output();
461 available
.add(output
);
463 liveRegs_
.emplace(ic
->liveRegs());
464 outputUnchecked_
.emplace(
465 TypedOrValueRegister(MIRType::Boolean
, AnyRegister(output
)));
467 MOZ_ASSERT(numInputs
== 2);
468 allocator
.initInputLocation(0, ic
->value());
469 allocator
.initInputLocation(1, ic
->id());
472 case CacheKind::InstanceOf
: {
473 IonInstanceOfIC
* ic
= ic_
->asInstanceOfIC();
474 Register output
= ic
->output();
475 available
.add(output
);
476 liveRegs_
.emplace(ic
->liveRegs());
477 outputUnchecked_
.emplace(
478 TypedOrValueRegister(MIRType::Boolean
, AnyRegister(output
)));
480 MOZ_ASSERT(numInputs
== 2);
481 allocator
.initInputLocation(0, ic
->lhs());
482 allocator
.initInputLocation(
483 1, TypedOrValueRegister(MIRType::Object
, AnyRegister(ic
->rhs())));
486 case CacheKind::ToPropertyKey
: {
487 IonToPropertyKeyIC
* ic
= ic_
->asToPropertyKeyIC();
488 ValueOperand output
= ic
->output();
490 available
.add(output
);
492 liveRegs_
.emplace(ic
->liveRegs());
493 outputUnchecked_
.emplace(TypedOrValueRegister(output
));
495 MOZ_ASSERT(numInputs
== 1);
496 allocator
.initInputLocation(0, ic
->input());
499 case CacheKind::UnaryArith
: {
500 IonUnaryArithIC
* ic
= ic_
->asUnaryArithIC();
501 ValueOperand output
= ic
->output();
503 available
.add(output
);
505 liveRegs_
.emplace(ic
->liveRegs());
506 outputUnchecked_
.emplace(TypedOrValueRegister(output
));
508 MOZ_ASSERT(numInputs
== 1);
509 allocator
.initInputLocation(0, ic
->input());
512 case CacheKind::BinaryArith
: {
513 IonBinaryArithIC
* ic
= ic_
->asBinaryArithIC();
514 ValueOperand output
= ic
->output();
516 available
.add(output
);
518 liveRegs_
.emplace(ic
->liveRegs());
519 outputUnchecked_
.emplace(TypedOrValueRegister(output
));
521 MOZ_ASSERT(numInputs
== 2);
522 allocator
.initInputLocation(0, ic
->lhs());
523 allocator
.initInputLocation(1, ic
->rhs());
526 case CacheKind::Compare
: {
527 IonCompareIC
* ic
= ic_
->asCompareIC();
528 Register output
= ic
->output();
530 available
.add(output
);
532 liveRegs_
.emplace(ic
->liveRegs());
533 outputUnchecked_
.emplace(
534 TypedOrValueRegister(MIRType::Boolean
, AnyRegister(output
)));
536 MOZ_ASSERT(numInputs
== 2);
537 allocator
.initInputLocation(0, ic
->lhs());
538 allocator
.initInputLocation(1, ic
->rhs());
541 case CacheKind::CloseIter
: {
542 IonCloseIterIC
* ic
= ic_
->asCloseIterIC();
544 available
.add(ic
->temp());
546 liveRegs_
.emplace(ic
->liveRegs());
547 allocator
.initInputLocation(0, ic
->iter(), JSVAL_TYPE_OBJECT
);
550 case CacheKind::OptimizeGetIterator
: {
551 auto* ic
= ic_
->asOptimizeGetIteratorIC();
552 Register output
= ic
->output();
554 available
.add(output
);
555 available
.add(ic
->temp());
557 liveRegs_
.emplace(ic
->liveRegs());
558 outputUnchecked_
.emplace(
559 TypedOrValueRegister(MIRType::Boolean
, AnyRegister(output
)));
561 MOZ_ASSERT(numInputs
== 1);
562 allocator
.initInputLocation(0, ic
->value());
565 case CacheKind::Call
:
566 case CacheKind::TypeOf
:
567 case CacheKind::ToBool
:
568 case CacheKind::GetIntrinsic
:
569 case CacheKind::NewArray
:
570 case CacheKind::NewObject
:
571 MOZ_CRASH("Unsupported IC");
574 liveFloatRegs_
= LiveFloatRegisterSet(liveRegs_
->fpus());
576 allocator
.initAvailableRegs(available
);
577 allocator
.initAvailableRegsAfterSpill();
581 JitCode
* IonCacheIRCompiler::compile(IonICStub
* stub
) {
582 AutoCreatedBy
acb(masm
, "IonCacheIRCompiler::compile");
584 masm
.setFramePushed(ionScript_
->frameSize());
585 if (cx_
->runtime()->geckoProfiler().enabled()) {
586 masm
.enableProfilingInstrumentation();
589 allocator
.fixupAliasedInputs(masm
);
591 CacheIRReader
reader(writer_
);
593 CacheOp op
= reader
.readOp();
594 perfSpewer_
.recordInstruction(masm
, op
);
596 #define DEFINE_OP(op, ...) \
598 if (!emit##op(reader)) return nullptr; \
600 CACHE_IR_OPS(DEFINE_OP
)
604 MOZ_CRASH("Invalid op");
607 } while (reader
.more());
609 masm
.assumeUnreachable("Should have returned from IC");
611 // Done emitting the main IC code. Now emit the failure paths.
612 for (size_t i
= 0; i
< failurePaths
.length(); i
++) {
613 if (!emitFailurePath(i
)) {
616 Register scratch
= ic_
->scratchRegisterForEntryJump();
617 CodeOffset offset
= masm
.movWithPatch(ImmWord(-1), scratch
);
618 masm
.jump(Address(scratch
, 0));
619 if (!nextCodeOffsets_
.append(offset
)) {
625 Rooted
<JitCode
*> newStubCode(cx_
, linker
.newCode(cx_
, CodeKind::Ion
));
627 cx_
->recoverFromOutOfMemory();
631 newStubCode
->setLocalTracingSlots(localTracingSlots_
);
633 for (CodeOffset offset
: nextCodeOffsets_
) {
634 Assembler::PatchDataWithValueCheck(CodeLocationLabel(newStubCode
, offset
),
635 ImmPtr(stub
->nextCodeRawPtr()),
638 if (stubJitCodeOffset_
) {
639 Assembler::PatchDataWithValueCheck(
640 CodeLocationLabel(newStubCode
, *stubJitCodeOffset_
),
641 ImmPtr(newStubCode
.get()), ImmPtr((void*)-1));
648 void IonCacheIRCompiler::assertFloatRegisterAvailable(FloatRegister reg
) {
649 switch (ic_
->kind()) {
650 case CacheKind::GetProp
:
651 case CacheKind::GetElem
:
652 case CacheKind::GetPropSuper
:
653 case CacheKind::GetElemSuper
:
654 case CacheKind::GetName
:
655 case CacheKind::BindName
:
656 case CacheKind::GetIterator
:
658 case CacheKind::HasOwn
:
659 case CacheKind::CheckPrivateField
:
660 case CacheKind::InstanceOf
:
661 case CacheKind::UnaryArith
:
662 case CacheKind::ToPropertyKey
:
663 case CacheKind::OptimizeSpreadCall
:
664 case CacheKind::CloseIter
:
665 case CacheKind::OptimizeGetIterator
:
666 MOZ_CRASH("No float registers available");
667 case CacheKind::SetProp
:
668 case CacheKind::SetElem
:
669 // FloatReg0 is available per LIRGenerator::visitSetPropertyCache.
670 MOZ_ASSERT(reg
== FloatReg0
);
672 case CacheKind::BinaryArith
:
673 case CacheKind::Compare
:
674 // FloatReg0 and FloatReg1 are available per
675 // LIRGenerator::visitBinaryCache.
676 MOZ_ASSERT(reg
== FloatReg0
|| reg
== FloatReg1
);
678 case CacheKind::Call
:
679 case CacheKind::TypeOf
:
680 case CacheKind::ToBool
:
681 case CacheKind::GetIntrinsic
:
682 case CacheKind::NewArray
:
683 case CacheKind::NewObject
:
684 MOZ_CRASH("Unsupported IC");
689 bool IonCacheIRCompiler::emitGuardShape(ObjOperandId objId
,
690 uint32_t shapeOffset
) {
691 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
692 Register obj
= allocator
.useRegister(masm
, objId
);
693 Shape
* shape
= weakShapeStubField(shapeOffset
);
695 bool needSpectreMitigations
= objectGuardNeedsSpectreMitigations(objId
);
697 Maybe
<AutoScratchRegister
> maybeScratch
;
698 if (needSpectreMitigations
) {
699 maybeScratch
.emplace(allocator
, masm
);
702 FailurePath
* failure
;
703 if (!addFailurePath(&failure
)) {
707 if (needSpectreMitigations
) {
708 masm
.branchTestObjShape(Assembler::NotEqual
, obj
, shape
, *maybeScratch
, obj
,
711 masm
.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual
, obj
, shape
,
718 bool IonCacheIRCompiler::emitGuardProto(ObjOperandId objId
,
719 uint32_t protoOffset
) {
720 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
721 Register obj
= allocator
.useRegister(masm
, objId
);
722 JSObject
* proto
= weakObjectStubField(protoOffset
);
724 AutoScratchRegister
scratch(allocator
, masm
);
726 FailurePath
* failure
;
727 if (!addFailurePath(&failure
)) {
731 masm
.loadObjProto(obj
, scratch
);
732 masm
.branchPtr(Assembler::NotEqual
, scratch
, ImmGCPtr(proto
),
737 bool IonCacheIRCompiler::emitGuardCompartment(ObjOperandId objId
,
738 uint32_t globalOffset
,
739 uint32_t compartmentOffset
) {
740 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
741 Register obj
= allocator
.useRegister(masm
, objId
);
742 JSObject
* globalWrapper
= objectStubField(globalOffset
);
743 JS::Compartment
* compartment
= compartmentStubField(compartmentOffset
);
744 AutoScratchRegister
scratch(allocator
, masm
);
746 FailurePath
* failure
;
747 if (!addFailurePath(&failure
)) {
751 // Verify that the global wrapper is still valid, as
752 // it is pre-requisite for doing the compartment check.
753 masm
.movePtr(ImmGCPtr(globalWrapper
), scratch
);
754 Address
handlerAddr(scratch
, ProxyObject::offsetOfHandler());
755 masm
.branchPtr(Assembler::Equal
, handlerAddr
,
756 ImmPtr(&DeadObjectProxy::singleton
), failure
->label());
758 masm
.branchTestObjCompartment(Assembler::NotEqual
, obj
, compartment
, scratch
,
763 bool IonCacheIRCompiler::emitGuardAnyClass(ObjOperandId objId
,
764 uint32_t claspOffset
) {
765 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
766 Register obj
= allocator
.useRegister(masm
, objId
);
767 AutoScratchRegister
scratch(allocator
, masm
);
769 const JSClass
* clasp
= classStubField(claspOffset
);
771 FailurePath
* failure
;
772 if (!addFailurePath(&failure
)) {
776 if (objectGuardNeedsSpectreMitigations(objId
)) {
777 masm
.branchTestObjClass(Assembler::NotEqual
, obj
, clasp
, scratch
, obj
,
780 masm
.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual
, obj
, clasp
,
781 scratch
, failure
->label());
787 bool IonCacheIRCompiler::emitGuardHasProxyHandler(ObjOperandId objId
,
788 uint32_t handlerOffset
) {
789 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
790 Register obj
= allocator
.useRegister(masm
, objId
);
791 const void* handler
= proxyHandlerStubField(handlerOffset
);
793 FailurePath
* failure
;
794 if (!addFailurePath(&failure
)) {
798 Address
handlerAddr(obj
, ProxyObject::offsetOfHandler());
799 masm
.branchPtr(Assembler::NotEqual
, handlerAddr
, ImmPtr(handler
),
804 bool IonCacheIRCompiler::emitGuardSpecificObject(ObjOperandId objId
,
805 uint32_t expectedOffset
) {
806 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
807 Register obj
= allocator
.useRegister(masm
, objId
);
808 JSObject
* expected
= weakObjectStubField(expectedOffset
);
810 FailurePath
* failure
;
811 if (!addFailurePath(&failure
)) {
815 masm
.branchPtr(Assembler::NotEqual
, obj
, ImmGCPtr(expected
),
820 bool IonCacheIRCompiler::emitGuardSpecificFunction(
821 ObjOperandId objId
, uint32_t expectedOffset
, uint32_t nargsAndFlagsOffset
) {
822 return emitGuardSpecificObject(objId
, expectedOffset
);
825 bool IonCacheIRCompiler::emitGuardSpecificAtom(StringOperandId strId
,
826 uint32_t expectedOffset
) {
827 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
828 Register str
= allocator
.useRegister(masm
, strId
);
829 AutoScratchRegister
scratch(allocator
, masm
);
831 JSAtom
* atom
= &stringStubField(expectedOffset
)->asAtom();
833 FailurePath
* failure
;
834 if (!addFailurePath(&failure
)) {
838 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
839 liveVolatileFloatRegs());
840 volatileRegs
.takeUnchecked(scratch
);
842 masm
.guardSpecificAtom(str
, atom
, scratch
, volatileRegs
, failure
->label());
846 bool IonCacheIRCompiler::emitGuardSpecificSymbol(SymbolOperandId symId
,
847 uint32_t expectedOffset
) {
848 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
849 Register sym
= allocator
.useRegister(masm
, symId
);
850 JS::Symbol
* expected
= symbolStubField(expectedOffset
);
852 FailurePath
* failure
;
853 if (!addFailurePath(&failure
)) {
857 masm
.branchPtr(Assembler::NotEqual
, sym
, ImmGCPtr(expected
),
862 bool IonCacheIRCompiler::emitLoadValueResult(uint32_t valOffset
) {
863 MOZ_CRASH("Baseline-specific op");
866 bool IonCacheIRCompiler::emitLoadFixedSlotResult(ObjOperandId objId
,
867 uint32_t offsetOffset
) {
868 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
869 AutoOutputRegister
output(*this);
870 Register obj
= allocator
.useRegister(masm
, objId
);
871 int32_t offset
= int32StubField(offsetOffset
);
872 masm
.loadTypedOrValue(Address(obj
, offset
), output
);
876 bool IonCacheIRCompiler::emitLoadFixedSlotTypedResult(ObjOperandId objId
,
877 uint32_t offsetOffset
,
879 MOZ_CRASH("Call ICs not used in ion");
882 bool IonCacheIRCompiler::emitLoadDynamicSlotResult(ObjOperandId objId
,
883 uint32_t offsetOffset
) {
884 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
885 AutoOutputRegister
output(*this);
886 Register obj
= allocator
.useRegister(masm
, objId
);
887 int32_t offset
= int32StubField(offsetOffset
);
889 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
890 masm
.loadPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch
);
891 masm
.loadTypedOrValue(Address(scratch
, offset
), output
);
895 bool IonCacheIRCompiler::emitCallScriptedGetterResult(
896 ValOperandId receiverId
, uint32_t getterOffset
, bool sameRealm
,
897 uint32_t nargsAndFlagsOffset
) {
898 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
899 AutoSaveLiveRegisters
save(*this);
900 AutoOutputRegister
output(*this);
902 ValueOperand receiver
= allocator
.useValueRegister(masm
, receiverId
);
904 JSFunction
* target
= &objectStubField(getterOffset
)->as
<JSFunction
>();
905 AutoScratchRegister
scratch(allocator
, masm
);
907 MOZ_ASSERT(sameRealm
== (cx_
->realm() == target
->realm()));
909 allocator
.discardStack(masm
);
911 uint32_t framePushedBefore
= masm
.framePushed();
913 enterStubFrame(masm
, save
);
915 // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
916 // so we just have to make sure the stack is aligned after we push the
917 // |this| + argument Values.
918 uint32_t argSize
= (target
->nargs() + 1) * sizeof(Value
);
920 ComputeByteAlignment(masm
.framePushed() + argSize
, JitStackAlignment
);
921 MOZ_ASSERT(padding
% sizeof(uintptr_t) == 0);
922 MOZ_ASSERT(padding
< JitStackAlignment
);
923 masm
.reserveStack(padding
);
925 for (size_t i
= 0; i
< target
->nargs(); i
++) {
926 masm
.Push(UndefinedValue());
931 masm
.switchToRealm(target
->realm(), scratch
);
934 masm
.movePtr(ImmGCPtr(target
), scratch
);
937 masm
.PushFrameDescriptorForJitCall(FrameType::IonICCall
, /* argc = */ 0);
939 // Check stack alignment. Add 2 * sizeof(uintptr_t) for the return address and
940 // frame pointer pushed by the call/callee.
942 ((masm
.framePushed() + 2 * sizeof(uintptr_t)) % JitStackAlignment
) == 0);
944 MOZ_ASSERT(target
->hasJitEntry());
945 masm
.loadJitCodeRaw(scratch
, scratch
);
946 masm
.callJit(scratch
);
949 static_assert(!JSReturnOperand
.aliases(ReturnReg
),
950 "ReturnReg available as scratch after scripted calls");
951 masm
.switchToRealm(cx_
->realm(), ReturnReg
);
954 masm
.storeCallResultValue(output
);
956 // Restore the frame pointer and stack pointer.
957 masm
.loadPtr(Address(FramePointer
, 0), FramePointer
);
958 masm
.freeStack(masm
.framePushed() - framePushedBefore
);
963 template <typename IdType
>
964 bool IonCacheIRCompiler::emitCallScriptedProxyGetShared(ValOperandId targetId
,
965 ObjOperandId receiverId
,
966 ObjOperandId handlerId
,
969 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
970 AutoSaveLiveRegisters
save(*this);
971 AutoOutputRegister
output(*this);
973 ValueOperand target
= allocator
.useValueRegister(masm
, targetId
);
974 Register receiver
= allocator
.useRegister(masm
, receiverId
);
975 Register handler
= allocator
.useRegister(masm
, handlerId
);
977 if constexpr (std::is_same_v
<IdType
, ValOperandId
>) {
978 idVal
= allocator
.useValueRegister(masm
, id
);
981 JSFunction
* trap
= &objectStubField(trapOffset
)->as
<JSFunction
>();
982 AutoScratchRegister
scratch(allocator
, masm
);
983 AutoScratchRegister
scratch2(allocator
, masm
);
984 ValueOperand
scratchVal(scratch
);
985 ValueOperand
scratchVal2(scratch2
);
987 allocator
.discardStack(masm
);
989 uint32_t framePushedBefore
= masm
.framePushed();
991 enterStubFrame(masm
, save
);
993 // We need to keep the target around to potentially validate the proxy result
994 storeTracedValue(masm
, target
);
995 if constexpr (std::is_same_v
<IdType
, ValOperandId
>) {
996 // Same for the id, assuming it's not baked in
997 storeTracedValue(masm
, idVal
);
999 uint32_t framePushedBeforeArgs
= masm
.framePushed();
1001 // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
1002 // so we just have to make sure the stack is aligned after we push the
1003 // |this| + argument Values.
1004 uint32_t argSize
= (std::max(trap
->nargs(), (size_t)3) + 1) * sizeof(Value
);
1006 ComputeByteAlignment(masm
.framePushed() + argSize
, JitStackAlignment
);
1007 MOZ_ASSERT(padding
% sizeof(uintptr_t) == 0);
1008 MOZ_ASSERT(padding
< JitStackAlignment
);
1009 masm
.reserveStack(padding
);
1011 for (size_t i
= 3; i
< trap
->nargs(); i
++) {
1012 masm
.Push(UndefinedValue());
1015 masm
.tagValue(JSVAL_TYPE_OBJECT
, receiver
, scratchVal
);
1016 masm
.Push(scratchVal
);
1018 if constexpr (std::is_same_v
<IdType
, ValOperandId
>) {
1021 masm
.movePropertyKey(idStubField(id
), scratch
);
1022 masm
.tagValue(JSVAL_TYPE_STRING
, scratch
, scratchVal
);
1023 masm
.Push(scratchVal
);
1028 masm
.tagValue(JSVAL_TYPE_OBJECT
, handler
, scratchVal
);
1029 masm
.Push(scratchVal
);
1031 masm
.movePtr(ImmGCPtr(trap
), scratch
);
1034 masm
.PushFrameDescriptorForJitCall(FrameType::IonICCall
, /* argc = */ 3);
1036 // Check stack alignment. Add 2 * sizeof(uintptr_t) for the return address and
1037 // frame pointer pushed by the call/callee.
1039 ((masm
.framePushed() + 2 * sizeof(uintptr_t)) % JitStackAlignment
) == 0);
1041 MOZ_ASSERT(trap
->hasJitEntry());
1042 masm
.loadJitCodeRaw(scratch
, scratch
);
1043 masm
.callJit(scratch
);
1045 masm
.storeCallResultValue(output
);
1048 loadTracedValue(masm
, 0, scratchVal
);
1049 masm
.unboxObject(scratchVal
, scratch
);
1050 masm
.branchTestObjectNeedsProxyResultValidation(Assembler::Zero
, scratch
,
1051 scratch2
, &success
);
1053 if constexpr (std::is_same_v
<IdType
, ValOperandId
>) {
1054 loadTracedValue(masm
, 1, scratchVal2
);
1056 masm
.moveValue(StringValue(idStubField(id
).toString()), scratchVal2
);
1059 uint32_t framePushedAfterCall
= masm
.framePushed();
1060 masm
.freeStack(masm
.framePushed() - framePushedBeforeArgs
);
1062 masm
.Push(output
.valueReg());
1063 masm
.Push(scratchVal2
);
1066 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleValue
, HandleValue
,
1067 MutableHandleValue
);
1068 callVM
<Fn
, CheckProxyGetByValueResult
>(masm
);
1070 masm
.storeCallResultValue(output
);
1073 masm
.bind(&success
);
1074 masm
.setFramePushed(framePushedAfterCall
);
1076 // Restore the frame pointer and stack pointer.
1077 masm
.loadPtr(Address(FramePointer
, 0), FramePointer
);
1078 masm
.freeStack(masm
.framePushed() - framePushedBefore
);
1084 bool IonCacheIRCompiler::emitCallScriptedProxyGetResult(
1085 ValOperandId targetId
, ObjOperandId receiverId
, ObjOperandId handlerId
,
1086 uint32_t trapOffset
, uint32_t id
, uint32_t nargsAndFlags
) {
1087 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1088 return emitCallScriptedProxyGetShared(targetId
, receiverId
, handlerId
,
1092 bool IonCacheIRCompiler::emitCallScriptedProxyGetByValueResult(
1093 ValOperandId targetId
, ObjOperandId receiverId
, ObjOperandId handlerId
,
1094 ValOperandId idId
, uint32_t trapOffset
, uint32_t nargsAndFlags
) {
1095 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1096 return emitCallScriptedProxyGetShared(targetId
, receiverId
, handlerId
,
1101 bool IonCacheIRCompiler::emitCallInlinedGetterResult(
1102 ValOperandId receiverId
, uint32_t getterOffset
, uint32_t icScriptOffset
,
1103 bool sameRealm
, uint32_t nargsAndFlagsOffset
) {
1104 MOZ_CRASH("Trial inlining not supported in Ion");
1107 bool IonCacheIRCompiler::emitCallNativeGetterResult(
1108 ValOperandId receiverId
, uint32_t getterOffset
, bool sameRealm
,
1109 uint32_t nargsAndFlagsOffset
) {
1110 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1111 AutoSaveLiveRegisters
save(*this);
1112 AutoOutputRegister
output(*this);
1114 ValueOperand receiver
= allocator
.useValueRegister(masm
, receiverId
);
1116 JSFunction
* target
= &objectStubField(getterOffset
)->as
<JSFunction
>();
1117 MOZ_ASSERT(target
->isNativeFun());
1119 AutoScratchRegisterMaybeOutput
argJSContext(allocator
, masm
, output
);
1120 AutoScratchRegisterMaybeOutputType
argUintN(allocator
, masm
, output
);
1121 AutoScratchRegister
argVp(allocator
, masm
);
1122 AutoScratchRegister
scratch(allocator
, masm
);
1124 allocator
.discardStack(masm
);
1126 // Native functions have the signature:
1127 // bool (*)(JSContext*, unsigned, Value* vp)
1128 // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
1129 // are the function arguments.
1131 // Construct vp array:
1132 // Push receiver value for |this|
1133 masm
.Push(receiver
);
1134 // Push callee/outparam.
1135 masm
.Push(ObjectValue(*target
));
1137 // Preload arguments into registers.
1138 masm
.loadJSContext(argJSContext
);
1139 masm
.move32(Imm32(0), argUintN
);
1140 masm
.moveStackPtrTo(argVp
.get());
1142 // Push marking data for later use.
1143 masm
.Push(argUintN
);
1144 pushStubCodePointer();
1146 if (!masm
.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_
), save
)) {
1149 masm
.enterFakeExitFrame(argJSContext
, scratch
, ExitFrameType::IonOOLNative
);
1152 masm
.switchToRealm(target
->realm(), scratch
);
1155 // Construct and execute call.
1156 masm
.setupUnalignedABICall(scratch
);
1157 masm
.passABIArg(argJSContext
);
1158 masm
.passABIArg(argUintN
);
1159 masm
.passABIArg(argVp
);
1160 masm
.callWithABI(DynamicFunction
<JSNative
>(target
->native()),
1162 CheckUnsafeCallWithABI::DontCheckHasExitFrame
);
1164 // Test for failure.
1165 masm
.branchIfFalseBool(ReturnReg
, masm
.exceptionLabel());
1168 masm
.switchToRealm(cx_
->realm(), ReturnReg
);
1171 // Load the outparam vp[0] into output register(s).
1172 Address
outparam(masm
.getStackPointer(),
1173 IonOOLNativeExitFrameLayout::offsetOfResult());
1174 masm
.loadValue(outparam
, output
.valueReg());
1176 if (JitOptions
.spectreJitToCxxCalls
) {
1177 masm
.speculationBarrier();
1180 masm
.adjustStack(IonOOLNativeExitFrameLayout::Size(0));
1184 bool IonCacheIRCompiler::emitCallDOMGetterResult(ObjOperandId objId
,
1185 uint32_t jitInfoOffset
) {
1186 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1187 AutoSaveLiveRegisters
save(*this);
1188 AutoOutputRegister
output(*this);
1190 Register obj
= allocator
.useRegister(masm
, objId
);
1192 const JSJitInfo
* info
= rawPointerStubField
<const JSJitInfo
*>(jitInfoOffset
);
1194 allocator
.discardStack(masm
);
1195 enterStubFrame(masm
, save
);
1198 masm
.Push(ImmPtr(info
));
1201 bool (*)(JSContext
*, const JSJitInfo
*, HandleObject
, MutableHandleValue
);
1202 callVM
<Fn
, jit::CallDOMGetter
>(masm
);
1204 masm
.storeCallResultValue(output
);
1208 bool IonCacheIRCompiler::emitCallDOMSetter(ObjOperandId objId
,
1209 uint32_t jitInfoOffset
,
1210 ValOperandId rhsId
) {
1211 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1212 AutoSaveLiveRegisters
save(*this);
1214 Register obj
= allocator
.useRegister(masm
, objId
);
1215 ValueOperand val
= allocator
.useValueRegister(masm
, rhsId
);
1217 const JSJitInfo
* info
= rawPointerStubField
<const JSJitInfo
*>(jitInfoOffset
);
1219 allocator
.discardStack(masm
);
1220 enterStubFrame(masm
, save
);
1224 masm
.Push(ImmPtr(info
));
1226 using Fn
= bool (*)(JSContext
*, const JSJitInfo
*, HandleObject
, HandleValue
);
1227 callVM
<Fn
, jit::CallDOMSetter
>(masm
);
1231 bool IonCacheIRCompiler::emitProxyGetResult(ObjOperandId objId
,
1232 uint32_t idOffset
) {
1233 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1234 AutoSaveLiveRegisters
save(*this);
1235 AutoOutputRegister
output(*this);
1237 Register obj
= allocator
.useRegister(masm
, objId
);
1238 jsid id
= idStubField(idOffset
);
1240 // ProxyGetProperty(JSContext* cx, HandleObject proxy, HandleId id,
1241 // MutableHandleValue vp)
1242 AutoScratchRegisterMaybeOutput
argJSContext(allocator
, masm
, output
);
1243 AutoScratchRegisterMaybeOutputType
argProxy(allocator
, masm
, output
);
1244 AutoScratchRegister
argId(allocator
, masm
);
1245 AutoScratchRegister
argVp(allocator
, masm
);
1246 AutoScratchRegister
scratch(allocator
, masm
);
1248 allocator
.discardStack(masm
);
1250 // Push stubCode for marking.
1251 pushStubCodePointer();
1253 // Push args on stack first so we can take pointers to make handles.
1254 masm
.Push(UndefinedValue());
1255 masm
.moveStackPtrTo(argVp
.get());
1257 masm
.Push(id
, scratch
);
1258 masm
.moveStackPtrTo(argId
.get());
1260 // Push the proxy. Also used as receiver.
1262 masm
.moveStackPtrTo(argProxy
.get());
1264 masm
.loadJSContext(argJSContext
);
1266 if (!masm
.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_
), save
)) {
1269 masm
.enterFakeExitFrame(argJSContext
, scratch
, ExitFrameType::IonOOLProxy
);
1272 using Fn
= bool (*)(JSContext
* cx
, HandleObject proxy
, HandleId id
,
1273 MutableHandleValue vp
);
1274 masm
.setupUnalignedABICall(scratch
);
1275 masm
.passABIArg(argJSContext
);
1276 masm
.passABIArg(argProxy
);
1277 masm
.passABIArg(argId
);
1278 masm
.passABIArg(argVp
);
1279 masm
.callWithABI
<Fn
, ProxyGetProperty
>(
1280 ABIType::General
, CheckUnsafeCallWithABI::DontCheckHasExitFrame
);
1282 // Test for failure.
1283 masm
.branchIfFalseBool(ReturnReg
, masm
.exceptionLabel());
1285 // Load the outparam vp[0] into output register(s).
1286 Address
outparam(masm
.getStackPointer(),
1287 IonOOLProxyExitFrameLayout::offsetOfResult());
1288 masm
.loadValue(outparam
, output
.valueReg());
1290 // Spectre mitigation in case of speculative execution within C++ code.
1291 if (JitOptions
.spectreJitToCxxCalls
) {
1292 masm
.speculationBarrier();
1295 // masm.leaveExitFrame & pop locals
1296 masm
.adjustStack(IonOOLProxyExitFrameLayout::Size());
1300 bool IonCacheIRCompiler::emitFrameIsConstructingResult() {
1301 MOZ_CRASH("Baseline-specific op");
1304 bool IonCacheIRCompiler::emitLoadConstantStringResult(uint32_t strOffset
) {
1305 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1306 MOZ_CRASH("not used in ion");
1309 bool IonCacheIRCompiler::emitCompareStringResult(JSOp op
, StringOperandId lhsId
,
1310 StringOperandId rhsId
) {
1311 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1312 AutoSaveLiveRegisters
save(*this);
1313 AutoOutputRegister
output(*this);
1315 Register left
= allocator
.useRegister(masm
, lhsId
);
1316 Register right
= allocator
.useRegister(masm
, rhsId
);
1318 allocator
.discardStack(masm
);
1321 MOZ_ASSERT(!output
.hasValue());
1322 masm
.compareStrings(op
, left
, right
, output
.typedReg().gpr(), &slow
);
1327 enterStubFrame(masm
, save
);
1329 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
1330 // - |left <= right| is implemented as |right >= left|.
1331 // - |left > right| is implemented as |right < left|.
1332 if (op
== JSOp::Le
|| op
== JSOp::Gt
) {
1340 using Fn
= bool (*)(JSContext
*, HandleString
, HandleString
, bool*);
1341 if (op
== JSOp::Eq
|| op
== JSOp::StrictEq
) {
1342 callVM
<Fn
, jit::StringsEqual
<EqualityKind::Equal
>>(masm
);
1343 } else if (op
== JSOp::Ne
|| op
== JSOp::StrictNe
) {
1344 callVM
<Fn
, jit::StringsEqual
<EqualityKind::NotEqual
>>(masm
);
1345 } else if (op
== JSOp::Lt
|| op
== JSOp::Gt
) {
1346 callVM
<Fn
, jit::StringsCompare
<ComparisonKind::LessThan
>>(masm
);
1348 MOZ_ASSERT(op
== JSOp::Le
|| op
== JSOp::Ge
);
1349 callVM
<Fn
, jit::StringsCompare
<ComparisonKind::GreaterThanOrEqual
>>(masm
);
1352 masm
.storeCallBoolResult(output
.typedReg().gpr());
1357 bool IonCacheIRCompiler::emitStoreFixedSlot(ObjOperandId objId
,
1358 uint32_t offsetOffset
,
1359 ValOperandId rhsId
) {
1360 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1361 Register obj
= allocator
.useRegister(masm
, objId
);
1362 int32_t offset
= int32StubField(offsetOffset
);
1363 ConstantOrRegister val
= allocator
.useConstantOrRegister(masm
, rhsId
);
1364 AutoScratchRegister
scratch(allocator
, masm
);
1366 Address
slot(obj
, offset
);
1367 EmitPreBarrier(masm
, slot
, MIRType::Value
);
1368 masm
.storeConstantOrRegister(val
, slot
);
1369 emitPostBarrierSlot(obj
, val
, scratch
);
1373 bool IonCacheIRCompiler::emitStoreDynamicSlot(ObjOperandId objId
,
1374 uint32_t offsetOffset
,
1375 ValOperandId rhsId
) {
1376 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1377 Register obj
= allocator
.useRegister(masm
, objId
);
1378 int32_t offset
= int32StubField(offsetOffset
);
1379 ConstantOrRegister val
= allocator
.useConstantOrRegister(masm
, rhsId
);
1380 AutoScratchRegister
scratch(allocator
, masm
);
1382 masm
.loadPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch
);
1383 Address
slot(scratch
, offset
);
1384 EmitPreBarrier(masm
, slot
, MIRType::Value
);
1385 masm
.storeConstantOrRegister(val
, slot
);
1386 emitPostBarrierSlot(obj
, val
, scratch
);
1390 bool IonCacheIRCompiler::emitAddAndStoreSlotShared(
1391 CacheOp op
, ObjOperandId objId
, uint32_t offsetOffset
, ValOperandId rhsId
,
1392 uint32_t newShapeOffset
, Maybe
<uint32_t> numNewSlotsOffset
) {
1393 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1394 Register obj
= allocator
.useRegister(masm
, objId
);
1395 int32_t offset
= int32StubField(offsetOffset
);
1396 ConstantOrRegister val
= allocator
.useConstantOrRegister(masm
, rhsId
);
1398 AutoScratchRegister
scratch1(allocator
, masm
);
1400 Maybe
<AutoScratchRegister
> scratch2
;
1401 if (op
== CacheOp::AllocateAndStoreDynamicSlot
) {
1402 scratch2
.emplace(allocator
, masm
);
1405 Shape
* newShape
= shapeStubField(newShapeOffset
);
1407 if (op
== CacheOp::AllocateAndStoreDynamicSlot
) {
1408 // We have to (re)allocate dynamic slots. Do this first, as it's the
1409 // only fallible operation here. Note that growSlotsPure is
1410 // fallible but does not GC.
1412 FailurePath
* failure
;
1413 if (!addFailurePath(&failure
)) {
1417 int32_t numNewSlots
= int32StubField(*numNewSlotsOffset
);
1418 MOZ_ASSERT(numNewSlots
> 0);
1420 LiveRegisterSet
save(GeneralRegisterSet::Volatile(),
1421 liveVolatileFloatRegs());
1422 masm
.PushRegsInMask(save
);
1424 using Fn
= bool (*)(JSContext
* cx
, NativeObject
* obj
, uint32_t newCount
);
1425 masm
.setupUnalignedABICall(scratch1
);
1426 masm
.loadJSContext(scratch1
);
1427 masm
.passABIArg(scratch1
);
1428 masm
.passABIArg(obj
);
1429 masm
.move32(Imm32(numNewSlots
), scratch2
.ref());
1430 masm
.passABIArg(scratch2
.ref());
1431 masm
.callWithABI
<Fn
, NativeObject::growSlotsPure
>();
1432 masm
.storeCallPointerResult(scratch1
);
1434 LiveRegisterSet ignore
;
1435 ignore
.add(scratch1
);
1436 masm
.PopRegsInMaskIgnore(save
, ignore
);
1438 masm
.branchIfFalseBool(scratch1
, failure
->label());
1441 // Update the object's shape.
1442 masm
.storeObjShape(newShape
, obj
,
1443 [](MacroAssembler
& masm
, const Address
& addr
) {
1444 EmitPreBarrier(masm
, addr
, MIRType::Shape
);
1447 // Perform the store. No pre-barrier required since this is a new
1449 if (op
== CacheOp::AddAndStoreFixedSlot
) {
1450 Address
slot(obj
, offset
);
1451 masm
.storeConstantOrRegister(val
, slot
);
1453 MOZ_ASSERT(op
== CacheOp::AddAndStoreDynamicSlot
||
1454 op
== CacheOp::AllocateAndStoreDynamicSlot
);
1455 masm
.loadPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch1
);
1456 Address
slot(scratch1
, offset
);
1457 masm
.storeConstantOrRegister(val
, slot
);
1460 emitPostBarrierSlot(obj
, val
, scratch1
);
1465 bool IonCacheIRCompiler::emitAddAndStoreFixedSlot(ObjOperandId objId
,
1466 uint32_t offsetOffset
,
1468 uint32_t newShapeOffset
) {
1469 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1470 Maybe
<uint32_t> numNewSlotsOffset
= mozilla::Nothing();
1471 return emitAddAndStoreSlotShared(CacheOp::AddAndStoreFixedSlot
, objId
,
1472 offsetOffset
, rhsId
, newShapeOffset
,
1476 bool IonCacheIRCompiler::emitAddAndStoreDynamicSlot(ObjOperandId objId
,
1477 uint32_t offsetOffset
,
1479 uint32_t newShapeOffset
) {
1480 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1481 Maybe
<uint32_t> numNewSlotsOffset
= mozilla::Nothing();
1482 return emitAddAndStoreSlotShared(CacheOp::AddAndStoreDynamicSlot
, objId
,
1483 offsetOffset
, rhsId
, newShapeOffset
,
1487 bool IonCacheIRCompiler::emitAllocateAndStoreDynamicSlot(
1488 ObjOperandId objId
, uint32_t offsetOffset
, ValOperandId rhsId
,
1489 uint32_t newShapeOffset
, uint32_t numNewSlotsOffset
) {
1490 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1491 return emitAddAndStoreSlotShared(CacheOp::AllocateAndStoreDynamicSlot
, objId
,
1492 offsetOffset
, rhsId
, newShapeOffset
,
1493 mozilla::Some(numNewSlotsOffset
));
1496 bool IonCacheIRCompiler::emitLoadStringCharResult(StringOperandId strId
,
1497 Int32OperandId indexId
,
1499 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1500 AutoOutputRegister
output(*this);
1501 Register str
= allocator
.useRegister(masm
, strId
);
1502 Register index
= allocator
.useRegister(masm
, indexId
);
1503 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
1504 AutoScratchRegisterMaybeOutputType
scratch2(allocator
, masm
, output
);
1505 AutoScratchRegister
scratch3(allocator
, masm
);
1507 FailurePath
* failure
;
1508 if (!addFailurePath(&failure
)) {
1512 // Bounds check, load string char.
1516 masm
.spectreBoundsCheck32(index
, Address(str
, JSString::offsetOfLength()),
1517 scratch1
, failure
->label());
1518 masm
.loadStringChar(str
, index
, scratch1
, scratch2
, scratch3
,
1521 // Return the empty string for out-of-bounds access.
1522 masm
.movePtr(ImmGCPtr(cx_
->runtime()->emptyString
), scratch2
);
1524 // This CacheIR op is always preceded by |LinearizeForCharAccess|, so we're
1525 // guaranteed to see no nested ropes.
1526 masm
.spectreBoundsCheck32(index
, Address(str
, JSString::offsetOfLength()),
1528 masm
.loadStringChar(str
, index
, scratch1
, scratch2
, scratch3
, &loadFailed
);
1531 // Load StaticString for this char. For larger code units perform a VM call.
1533 masm
.lookupStaticString(scratch1
, scratch2
, cx_
->staticStrings(), &vmCall
);
1537 masm
.bind(&loadFailed
);
1538 masm
.assumeUnreachable("loadStringChar can't fail for linear strings");
1544 // FailurePath and AutoSaveLiveRegisters don't get along very well. Both are
1545 // modifying the stack and expect that no other stack manipulations are
1546 // made. Therefore we need to use an ABI call instead of a VM call here.
1548 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
1549 liveVolatileFloatRegs());
1550 volatileRegs
.takeUnchecked(scratch1
);
1551 volatileRegs
.takeUnchecked(scratch2
);
1552 volatileRegs
.takeUnchecked(scratch3
);
1553 volatileRegs
.takeUnchecked(output
);
1554 masm
.PushRegsInMask(volatileRegs
);
1556 using Fn
= JSLinearString
* (*)(JSContext
* cx
, int32_t code
);
1557 masm
.setupUnalignedABICall(scratch2
);
1558 masm
.loadJSContext(scratch2
);
1559 masm
.passABIArg(scratch2
);
1560 masm
.passABIArg(scratch1
);
1561 masm
.callWithABI
<Fn
, jit::StringFromCharCodeNoGC
>();
1562 masm
.storeCallPointerResult(scratch2
);
1564 masm
.PopRegsInMask(volatileRegs
);
1566 masm
.branchPtr(Assembler::Equal
, scratch2
, ImmWord(0), failure
->label());
1570 masm
.tagValue(JSVAL_TYPE_STRING
, scratch2
, output
.valueReg());
1574 bool IonCacheIRCompiler::emitCallNativeSetter(ObjOperandId receiverId
,
1575 uint32_t setterOffset
,
1578 uint32_t nargsAndFlagsOffset
) {
1579 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1580 AutoSaveLiveRegisters
save(*this);
1582 Register receiver
= allocator
.useRegister(masm
, receiverId
);
1583 JSFunction
* target
= &objectStubField(setterOffset
)->as
<JSFunction
>();
1584 MOZ_ASSERT(target
->isNativeFun());
1585 ConstantOrRegister val
= allocator
.useConstantOrRegister(masm
, rhsId
);
1587 AutoScratchRegister
argJSContext(allocator
, masm
);
1588 AutoScratchRegister
argVp(allocator
, masm
);
1589 AutoScratchRegister
argUintN(allocator
, masm
);
1590 #ifndef JS_CODEGEN_X86
1591 AutoScratchRegister
scratch(allocator
, masm
);
1593 // Not enough registers on x86.
1594 Register scratch
= argUintN
;
1597 allocator
.discardStack(masm
);
1600 // bool (*)(JSContext*, unsigned, Value* vp)
1601 // vp[0] is callee/outparam
1603 // vp[2] is the value
1605 // Build vp and move the base into argVpReg.
1607 masm
.Push(TypedOrValueRegister(MIRType::Object
, AnyRegister(receiver
)));
1608 masm
.Push(ObjectValue(*target
));
1609 masm
.moveStackPtrTo(argVp
.get());
1611 // Preload other regs.
1612 masm
.loadJSContext(argJSContext
);
1613 masm
.move32(Imm32(1), argUintN
);
1615 // Push marking data for later use.
1616 masm
.Push(argUintN
);
1617 pushStubCodePointer();
1619 if (!masm
.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_
), save
)) {
1622 masm
.enterFakeExitFrame(argJSContext
, scratch
, ExitFrameType::IonOOLNative
);
1625 masm
.switchToRealm(target
->realm(), scratch
);
1629 masm
.setupUnalignedABICall(scratch
);
1630 #ifdef JS_CODEGEN_X86
1631 // Reload argUintN because it was clobbered.
1632 masm
.move32(Imm32(1), argUintN
);
1634 masm
.passABIArg(argJSContext
);
1635 masm
.passABIArg(argUintN
);
1636 masm
.passABIArg(argVp
);
1637 masm
.callWithABI(DynamicFunction
<JSNative
>(target
->native()),
1639 CheckUnsafeCallWithABI::DontCheckHasExitFrame
);
1641 // Test for failure.
1642 masm
.branchIfFalseBool(ReturnReg
, masm
.exceptionLabel());
1645 masm
.switchToRealm(cx_
->realm(), ReturnReg
);
1648 masm
.adjustStack(IonOOLNativeExitFrameLayout::Size(1));
1652 bool IonCacheIRCompiler::emitCallScriptedSetter(ObjOperandId receiverId
,
1653 uint32_t setterOffset
,
1656 uint32_t nargsAndFlagsOffset
) {
1657 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1658 AutoSaveLiveRegisters
save(*this);
1660 Register receiver
= allocator
.useRegister(masm
, receiverId
);
1661 JSFunction
* target
= &objectStubField(setterOffset
)->as
<JSFunction
>();
1662 ConstantOrRegister val
= allocator
.useConstantOrRegister(masm
, rhsId
);
1664 MOZ_ASSERT(sameRealm
== (cx_
->realm() == target
->realm()));
1666 AutoScratchRegister
scratch(allocator
, masm
);
1668 allocator
.discardStack(masm
);
1670 uint32_t framePushedBefore
= masm
.framePushed();
1672 enterStubFrame(masm
, save
);
1674 // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
1675 // so we just have to make sure the stack is aligned after we push the
1676 // |this| + argument Values.
1677 size_t numArgs
= std::max
<size_t>(1, target
->nargs());
1678 uint32_t argSize
= (numArgs
+ 1) * sizeof(Value
);
1680 ComputeByteAlignment(masm
.framePushed() + argSize
, JitStackAlignment
);
1681 MOZ_ASSERT(padding
% sizeof(uintptr_t) == 0);
1682 MOZ_ASSERT(padding
< JitStackAlignment
);
1683 masm
.reserveStack(padding
);
1685 for (size_t i
= 1; i
< target
->nargs(); i
++) {
1686 masm
.Push(UndefinedValue());
1689 masm
.Push(TypedOrValueRegister(MIRType::Object
, AnyRegister(receiver
)));
1692 masm
.switchToRealm(target
->realm(), scratch
);
1695 masm
.movePtr(ImmGCPtr(target
), scratch
);
1698 masm
.PushFrameDescriptorForJitCall(FrameType::IonICCall
, /* argc = */ 1);
1700 // Check stack alignment. Add 2 * sizeof(uintptr_t) for the return address and
1701 // frame pointer pushed by the call/callee.
1703 ((masm
.framePushed() + 2 * sizeof(uintptr_t)) % JitStackAlignment
) == 0);
1705 MOZ_ASSERT(target
->hasJitEntry());
1706 masm
.loadJitCodeRaw(scratch
, scratch
);
1707 masm
.callJit(scratch
);
1710 masm
.switchToRealm(cx_
->realm(), ReturnReg
);
1713 // Restore the frame pointer and stack pointer.
1714 masm
.loadPtr(Address(FramePointer
, 0), FramePointer
);
1715 masm
.freeStack(masm
.framePushed() - framePushedBefore
);
1719 bool IonCacheIRCompiler::emitCallInlinedSetter(
1720 ObjOperandId receiverId
, uint32_t setterOffset
, ValOperandId rhsId
,
1721 uint32_t icScriptOffset
, bool sameRealm
, uint32_t nargsAndFlagsOffset
) {
1722 MOZ_CRASH("Trial inlining not supported in Ion");
1725 bool IonCacheIRCompiler::emitCallSetArrayLength(ObjOperandId objId
, bool strict
,
1726 ValOperandId rhsId
) {
1727 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1728 AutoSaveLiveRegisters
save(*this);
1730 Register obj
= allocator
.useRegister(masm
, objId
);
1731 ConstantOrRegister val
= allocator
.useConstantOrRegister(masm
, rhsId
);
1733 allocator
.discardStack(masm
);
1734 enterStubFrame(masm
, save
);
1736 masm
.Push(Imm32(strict
));
1740 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleValue
, bool);
1741 callVM
<Fn
, jit::SetArrayLength
>(masm
);
1745 bool IonCacheIRCompiler::emitProxySet(ObjOperandId objId
, uint32_t idOffset
,
1746 ValOperandId rhsId
, bool strict
) {
1747 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1748 AutoSaveLiveRegisters
save(*this);
1750 Register obj
= allocator
.useRegister(masm
, objId
);
1751 ConstantOrRegister val
= allocator
.useConstantOrRegister(masm
, rhsId
);
1752 jsid id
= idStubField(idOffset
);
1754 AutoScratchRegister
scratch(allocator
, masm
);
1756 allocator
.discardStack(masm
);
1757 enterStubFrame(masm
, save
);
1759 masm
.Push(Imm32(strict
));
1761 masm
.Push(id
, scratch
);
1764 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleId
, HandleValue
, bool);
1765 callVM
<Fn
, ProxySetProperty
>(masm
);
1769 bool IonCacheIRCompiler::emitProxySetByValue(ObjOperandId objId
,
1771 ValOperandId rhsId
, bool strict
) {
1772 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1773 AutoSaveLiveRegisters
save(*this);
1775 Register obj
= allocator
.useRegister(masm
, objId
);
1776 ConstantOrRegister idVal
= allocator
.useConstantOrRegister(masm
, idId
);
1777 ConstantOrRegister val
= allocator
.useConstantOrRegister(masm
, rhsId
);
1779 allocator
.discardStack(masm
);
1780 enterStubFrame(masm
, save
);
1782 masm
.Push(Imm32(strict
));
1787 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleValue
, HandleValue
, bool);
1788 callVM
<Fn
, ProxySetPropertyByValue
>(masm
);
1792 bool IonCacheIRCompiler::emitCallAddOrUpdateSparseElementHelper(
1793 ObjOperandId objId
, Int32OperandId idId
, ValOperandId rhsId
, bool strict
) {
1794 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1795 AutoSaveLiveRegisters
save(*this);
1797 Register obj
= allocator
.useRegister(masm
, objId
);
1798 Register id
= allocator
.useRegister(masm
, idId
);
1799 ValueOperand val
= allocator
.useValueRegister(masm
, rhsId
);
1801 allocator
.discardStack(masm
);
1802 enterStubFrame(masm
, save
);
1804 masm
.Push(Imm32(strict
));
1809 using Fn
= bool (*)(JSContext
* cx
, Handle
<NativeObject
*> obj
, int32_t int_id
,
1810 HandleValue v
, bool strict
);
1811 callVM
<Fn
, AddOrUpdateSparseElementHelper
>(masm
);
1815 bool IonCacheIRCompiler::emitMegamorphicSetElement(ObjOperandId objId
,
1819 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1820 AutoSaveLiveRegisters
save(*this);
1822 Register obj
= allocator
.useRegister(masm
, objId
);
1823 ConstantOrRegister idVal
= allocator
.useConstantOrRegister(masm
, idId
);
1824 ConstantOrRegister val
= allocator
.useConstantOrRegister(masm
, rhsId
);
1826 allocator
.discardStack(masm
);
1827 enterStubFrame(masm
, save
);
1829 masm
.Push(Imm32(strict
));
1834 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleValue
, HandleValue
, bool);
1835 callVM
<Fn
, SetElementMegamorphic
<false>>(masm
);
1839 bool IonCacheIRCompiler::emitReturnFromIC() {
1840 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1841 if (!savedLiveRegs_
) {
1842 allocator
.restoreInputState(masm
);
1845 uint8_t* rejoinAddr
= ic_
->rejoinAddr(ionScript_
);
1846 masm
.jump(ImmPtr(rejoinAddr
));
1850 bool IonCacheIRCompiler::emitGuardDOMExpandoMissingOrGuardShape(
1851 ValOperandId expandoId
, uint32_t shapeOffset
) {
1852 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1853 ValueOperand val
= allocator
.useValueRegister(masm
, expandoId
);
1854 Shape
* shape
= shapeStubField(shapeOffset
);
1856 AutoScratchRegister
objScratch(allocator
, masm
);
1858 FailurePath
* failure
;
1859 if (!addFailurePath(&failure
)) {
1864 masm
.branchTestUndefined(Assembler::Equal
, val
, &done
);
1866 masm
.debugAssertIsObject(val
);
1867 masm
.unboxObject(val
, objScratch
);
1868 // The expando object is not used in this case, so we don't need Spectre
1870 masm
.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual
, objScratch
,
1871 shape
, failure
->label());
1877 bool IonCacheIRCompiler::emitLoadDOMExpandoValueGuardGeneration(
1878 ObjOperandId objId
, uint32_t expandoAndGenerationOffset
,
1879 uint32_t generationOffset
, ValOperandId resultId
) {
1880 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1881 Register obj
= allocator
.useRegister(masm
, objId
);
1882 ExpandoAndGeneration
* expandoAndGeneration
=
1883 rawPointerStubField
<ExpandoAndGeneration
*>(expandoAndGenerationOffset
);
1884 uint64_t generation
= rawInt64StubField
<uint64_t>(generationOffset
);
1886 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
1888 FailurePath
* failure
;
1889 if (!addFailurePath(&failure
)) {
1893 masm
.loadDOMExpandoValueGuardGeneration(obj
, output
, expandoAndGeneration
,
1894 generation
, failure
->label());
1898 void IonIC::attachCacheIRStub(JSContext
* cx
, const CacheIRWriter
& writer
,
1899 CacheKind kind
, IonScript
* ionScript
,
1901 // We shouldn't GC or report OOM (or any other exception) here.
1902 AutoAssertNoPendingException
aanpe(cx
);
1903 JS::AutoCheckCannotGC nogc
;
1905 MOZ_ASSERT(!*attached
);
1907 // Do nothing if the IR generator failed or triggered a GC that invalidated
1909 if (writer
.failed() || ionScript
->invalidated()) {
1913 JitZone
* jitZone
= cx
->zone()->jitZone();
1915 constexpr uint32_t stubDataOffset
= sizeof(IonICStub
);
1916 static_assert(stubDataOffset
% sizeof(uint64_t) == 0,
1917 "Stub fields must be aligned");
1919 // Try to reuse a previously-allocated CacheIRStubInfo.
1920 CacheIRStubKey::Lookup
lookup(kind
, ICStubEngine::IonIC
, writer
.codeStart(),
1921 writer
.codeLength());
1922 CacheIRStubInfo
* stubInfo
= jitZone
->getIonCacheIRStubInfo(lookup
);
1924 // Allocate the shared CacheIRStubInfo. Note that the
1925 // putIonCacheIRStubInfo call below will transfer ownership to
1926 // the stub info HashSet, so we don't have to worry about freeing
1929 // For Ion ICs, we don't track/use the makesGCCalls flag, so just pass true.
1930 bool makesGCCalls
= true;
1931 stubInfo
= CacheIRStubInfo::New(kind
, ICStubEngine::IonIC
, makesGCCalls
,
1932 stubDataOffset
, writer
);
1937 CacheIRStubKey
key(stubInfo
);
1938 if (!jitZone
->putIonCacheIRStubInfo(lookup
, key
)) {
1943 MOZ_ASSERT(stubInfo
);
1945 // Ensure we don't attach duplicate stubs. This can happen if a stub failed
1946 // for some reason and the IR generator doesn't check for exactly the same
1948 for (IonICStub
* stub
= firstStub_
; stub
; stub
= stub
->next()) {
1949 if (stub
->stubInfo() != stubInfo
) {
1952 if (!writer
.stubDataEquals(stub
->stubDataStart())) {
1958 size_t bytesNeeded
= stubInfo
->stubDataOffset() + stubInfo
->stubDataSize();
1960 // Allocate the IonICStub in the JitZone's stub space. Ion stubs and
1961 // CacheIRStubInfo instances for Ion stubs can be purged on GC. That's okay
1962 // because the stub code is rooted separately when we make a VM call, and
1963 // stub code should never access the IonICStub after making a VM call. The
1964 // IonICStub::poison method poisons the stub to catch bugs in this area.
1965 ICStubSpace
* stubSpace
= cx
->zone()->jitZone()->stubSpace();
1966 void* newStubMem
= stubSpace
->alloc(bytesNeeded
);
1971 IonICStub
* newStub
=
1972 new (newStubMem
) IonICStub(fallbackAddr(ionScript
), stubInfo
);
1973 writer
.copyStubData(newStub
->stubDataStart());
1975 TempAllocator
temp(&cx
->tempLifoAlloc());
1976 JitContext
jctx(cx
);
1977 IonCacheIRCompiler
compiler(cx
, temp
, writer
, this, ionScript
,
1979 if (!compiler
.init()) {
1983 JitCode
* code
= compiler
.compile(newStub
);
1988 // Record the stub code if perf spewer is enabled.
1989 CacheKind stubKind
= newStub
->stubInfo()->kind();
1990 compiler
.perfSpewer().saveProfile(cx
, script(), code
,
1991 CacheKindNames
[uint8_t(stubKind
)]);
1993 // Add an entry to the profiler's code table, so that the profiler can
1994 // identify this as Ion code.
1995 if (ionScript
->hasProfilingInstrumentation()) {
1996 uint8_t* addr
= rejoinAddr(ionScript
);
1997 auto entry
= MakeJitcodeGlobalEntry
<IonICEntry
>(cx
, code
, code
->raw(),
1998 code
->rawEnd(), addr
);
2000 cx
->recoverFromOutOfMemory();
2004 auto* globalTable
= cx
->runtime()->jitRuntime()->getJitcodeGlobalTable();
2005 if (!globalTable
->addEntry(std::move(entry
))) {
2010 attachStub(newStub
, code
);
2014 bool IonCacheIRCompiler::emitCallStringObjectConcatResult(ValOperandId lhsId
,
2015 ValOperandId rhsId
) {
2016 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2017 AutoSaveLiveRegisters
save(*this);
2018 AutoOutputRegister
output(*this);
2020 ValueOperand lhs
= allocator
.useValueRegister(masm
, lhsId
);
2021 ValueOperand rhs
= allocator
.useValueRegister(masm
, rhsId
);
2023 allocator
.discardStack(masm
);
2025 enterStubFrame(masm
, save
);
2029 using Fn
= bool (*)(JSContext
*, HandleValue
, HandleValue
, MutableHandleValue
);
2030 callVM
<Fn
, DoConcatStringObject
>(masm
);
2032 masm
.storeCallResultValue(output
);
2036 bool IonCacheIRCompiler::emitCloseIterScriptedResult(ObjOperandId iterId
,
2037 ObjOperandId calleeId
,
2038 CompletionKind kind
,
2039 uint32_t calleeNargs
) {
2040 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2041 AutoSaveLiveRegisters
save(*this);
2043 Register iter
= allocator
.useRegister(masm
, iterId
);
2044 Register callee
= allocator
.useRegister(masm
, calleeId
);
2046 allocator
.discardStack(masm
);
2048 uint32_t framePushedBefore
= masm
.framePushed();
2050 // Construct IonICCallFrameLayout.
2051 enterStubFrame(masm
, save
);
2053 uint32_t stubFramePushed
= masm
.framePushed();
2055 // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
2056 // so we just have to make sure the stack is aligned after we push |this|
2057 // and |calleeNargs| undefined arguments.
2058 uint32_t argSize
= (calleeNargs
+ 1) * sizeof(Value
);
2060 ComputeByteAlignment(masm
.framePushed() + argSize
, JitStackAlignment
);
2061 MOZ_ASSERT(padding
% sizeof(uintptr_t) == 0);
2062 MOZ_ASSERT(padding
< JitStackAlignment
);
2063 masm
.reserveStack(padding
);
2065 for (uint32_t i
= 0; i
< calleeNargs
; i
++) {
2066 masm
.Push(UndefinedValue());
2068 masm
.Push(TypedOrValueRegister(MIRType::Object
, AnyRegister(iter
)));
2071 masm
.PushFrameDescriptorForJitCall(FrameType::IonICCall
, /* argc = */ 0);
2073 masm
.loadJitCodeRaw(callee
, callee
);
2074 masm
.callJit(callee
);
2076 if (kind
!= CompletionKind::Throw
) {
2077 // Verify that the return value is an object.
2079 masm
.branchTestObject(Assembler::Equal
, JSReturnOperand
, &success
);
2081 // We can reuse the same stub frame, but we first have to pop the arguments
2082 // from the previous call.
2083 uint32_t framePushedAfterCall
= masm
.framePushed();
2084 masm
.freeStack(masm
.framePushed() - stubFramePushed
);
2086 masm
.push(Imm32(int32_t(CheckIsObjectKind::IteratorReturn
)));
2087 using Fn
= bool (*)(JSContext
*, CheckIsObjectKind
);
2088 callVM
<Fn
, ThrowCheckIsObject
>(masm
);
2090 masm
.bind(&success
);
2091 masm
.setFramePushed(framePushedAfterCall
);
2094 // Restore the frame pointer and stack pointer.
2095 masm
.loadPtr(Address(FramePointer
, 0), FramePointer
);
2096 masm
.freeStack(masm
.framePushed() - framePushedBefore
);
2100 bool IonCacheIRCompiler::emitGuardFunctionScript(ObjOperandId funId
,
2101 uint32_t expectedOffset
,
2102 uint32_t nargsAndFlagsOffset
) {
2103 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2105 Register fun
= allocator
.useRegister(masm
, funId
);
2106 AutoScratchRegister
scratch(allocator
, masm
);
2107 BaseScript
* expected
= weakBaseScriptStubField(expectedOffset
);
2109 FailurePath
* failure
;
2110 if (!addFailurePath(&failure
)) {
2114 masm
.loadPrivate(Address(fun
, JSFunction::offsetOfJitInfoOrScript()),
2116 masm
.branchPtr(Assembler::NotEqual
, scratch
, ImmGCPtr(expected
),
2121 bool IonCacheIRCompiler::emitCallScriptedFunction(ObjOperandId calleeId
,
2122 Int32OperandId argcId
,
2124 uint32_t argcFixed
) {
2125 MOZ_CRASH("Call ICs not used in ion");
2128 bool IonCacheIRCompiler::emitCallBoundScriptedFunction(ObjOperandId calleeId
,
2129 ObjOperandId targetId
,
2130 Int32OperandId argcId
,
2132 uint32_t numBoundArgs
) {
2133 MOZ_CRASH("Call ICs not used in ion");
2136 bool IonCacheIRCompiler::emitCallWasmFunction(
2137 ObjOperandId calleeId
, Int32OperandId argcId
, CallFlags flags
,
2138 uint32_t argcFixed
, uint32_t funcExportOffset
, uint32_t instanceOffset
) {
2139 MOZ_CRASH("Call ICs not used in ion");
2143 bool IonCacheIRCompiler::emitCallNativeFunction(ObjOperandId calleeId
,
2144 Int32OperandId argcId
,
2147 uint32_t targetOffset
) {
2148 MOZ_CRASH("Call ICs not used in ion");
2151 bool IonCacheIRCompiler::emitCallDOMFunction(
2152 ObjOperandId calleeId
, Int32OperandId argcId
, ObjOperandId thisObjId
,
2153 CallFlags flags
, uint32_t argcFixed
, uint32_t targetOffset
) {
2154 MOZ_CRASH("Call ICs not used in ion");
2157 bool IonCacheIRCompiler::emitCallNativeFunction(ObjOperandId calleeId
,
2158 Int32OperandId argcId
,
2161 bool ignoresReturnValue
) {
2162 MOZ_CRASH("Call ICs not used in ion");
2165 bool IonCacheIRCompiler::emitCallDOMFunction(ObjOperandId calleeId
,
2166 Int32OperandId argcId
,
2167 ObjOperandId thisObjId
,
2169 uint32_t argcFixed
) {
2170 MOZ_CRASH("Call ICs not used in ion");
2174 bool IonCacheIRCompiler::emitCallClassHook(ObjOperandId calleeId
,
2175 Int32OperandId argcId
,
2176 CallFlags flags
, uint32_t argcFixed
,
2177 uint32_t targetOffset
) {
2178 MOZ_CRASH("Call ICs not used in ion");
2181 bool IonCacheIRCompiler::emitCallInlinedFunction(ObjOperandId calleeId
,
2182 Int32OperandId argcId
,
2183 uint32_t icScriptOffset
,
2185 uint32_t argcFixed
) {
2186 MOZ_CRASH("Call ICs not used in ion");
2189 bool IonCacheIRCompiler::emitBindFunctionResult(ObjOperandId targetId
,
2191 uint32_t templateObjectOffset
) {
2192 MOZ_CRASH("Call ICs not used in ion");
2195 bool IonCacheIRCompiler::emitSpecializedBindFunctionResult(
2196 ObjOperandId targetId
, uint32_t argc
, uint32_t templateObjectOffset
) {
2197 MOZ_CRASH("Call ICs not used in ion");
2200 bool IonCacheIRCompiler::emitLoadArgumentFixedSlot(ValOperandId resultId
,
2201 uint8_t slotIndex
) {
2202 MOZ_CRASH("Call ICs not used in ion");
2205 bool IonCacheIRCompiler::emitLoadArgumentDynamicSlot(ValOperandId resultId
,
2206 Int32OperandId argcId
,
2207 uint8_t slotIndex
) {
2208 MOZ_CRASH("Call ICs not used in ion");
2211 bool IonCacheIRCompiler::emitArrayJoinResult(ObjOperandId objId
,
2212 StringOperandId sepId
) {
2213 MOZ_CRASH("Call ICs not used in ion");
2216 bool IonCacheIRCompiler::emitPackedArraySliceResult(
2217 uint32_t templateObjectOffset
, ObjOperandId arrayId
, Int32OperandId beginId
,
2218 Int32OperandId endId
) {
2219 MOZ_CRASH("Call ICs not used in ion");
2222 bool IonCacheIRCompiler::emitArgumentsSliceResult(uint32_t templateObjectOffset
,
2223 ObjOperandId argsId
,
2224 Int32OperandId beginId
,
2225 Int32OperandId endId
) {
2226 MOZ_CRASH("Call ICs not used in ion");
2229 bool IonCacheIRCompiler::emitIsArrayResult(ValOperandId inputId
) {
2230 MOZ_CRASH("Call ICs not used in ion");
2233 bool IonCacheIRCompiler::emitIsTypedArrayResult(ObjOperandId objId
,
2234 bool isPossiblyWrapped
) {
2235 MOZ_CRASH("Call ICs not used in ion");
2238 bool IonCacheIRCompiler::emitStringFromCharCodeResult(Int32OperandId codeId
) {
2239 MOZ_CRASH("Call ICs not used in ion");
2242 bool IonCacheIRCompiler::emitStringFromCodePointResult(Int32OperandId codeId
) {
2243 MOZ_CRASH("Call ICs not used in ion");
2246 bool IonCacheIRCompiler::emitMathRandomResult(uint32_t rngOffset
) {
2247 MOZ_CRASH("Call ICs not used in ion");
2250 bool IonCacheIRCompiler::emitReflectGetPrototypeOfResult(ObjOperandId objId
) {
2251 MOZ_CRASH("Call ICs not used in ion");
2254 bool IonCacheIRCompiler::emitHasClassResult(ObjOperandId objId
,
2255 uint32_t claspOffset
) {
2256 MOZ_CRASH("Call ICs not used in ion");
2259 bool IonCacheIRCompiler::emitSameValueResult(ValOperandId lhs
,
2261 MOZ_CRASH("Call ICs not used in ion");
2264 bool IonCacheIRCompiler::emitSetHasStringResult(ObjOperandId setId
,
2265 StringOperandId strId
) {
2266 MOZ_CRASH("Call ICs not used in ion");
2269 bool IonCacheIRCompiler::emitMapHasStringResult(ObjOperandId mapId
,
2270 StringOperandId strId
) {
2271 MOZ_CRASH("Call ICs not used in ion");
2274 bool IonCacheIRCompiler::emitMapGetStringResult(ObjOperandId mapId
,
2275 StringOperandId strId
) {
2276 MOZ_CRASH("Call ICs not used in ion");
2279 bool IonCacheIRCompiler::emitNewArrayObjectResult(uint32_t arrayLength
,
2280 uint32_t shapeOffset
,
2281 uint32_t siteOffset
) {
2282 MOZ_CRASH("NewArray ICs not used in ion");
2285 bool IonCacheIRCompiler::emitNewPlainObjectResult(uint32_t numFixedSlots
,
2286 uint32_t numDynamicSlots
,
2287 gc::AllocKind allocKind
,
2288 uint32_t shapeOffset
,
2289 uint32_t siteOffset
) {
2290 MOZ_CRASH("NewObject ICs not used in ion");
2293 bool IonCacheIRCompiler::emitCallRegExpMatcherResult(ObjOperandId regexpId
,
2294 StringOperandId inputId
,
2295 Int32OperandId lastIndexId
,
2296 uint32_t stubOffset
) {
2297 MOZ_CRASH("Call ICs not used in ion");
2300 bool IonCacheIRCompiler::emitCallRegExpSearcherResult(
2301 ObjOperandId regexpId
, StringOperandId inputId
, Int32OperandId lastIndexId
,
2302 uint32_t stubOffset
) {
2303 MOZ_CRASH("Call ICs not used in ion");
2306 bool IonCacheIRCompiler::emitRegExpBuiltinExecMatchResult(
2307 ObjOperandId regexpId
, StringOperandId inputId
, uint32_t stubOffset
) {
2308 MOZ_CRASH("Call ICs not used in ion");
2311 bool IonCacheIRCompiler::emitRegExpBuiltinExecTestResult(
2312 ObjOperandId regexpId
, StringOperandId inputId
, uint32_t stubOffset
) {
2313 MOZ_CRASH("Call ICs not used in ion");
2316 bool IonCacheIRCompiler::emitRegExpHasCaptureGroupsResult(
2317 ObjOperandId regexpId
, StringOperandId inputId
) {
2318 MOZ_CRASH("Call ICs not used in ion");
2321 bool IonCacheIRCompiler::emitLoadStringAtResult(StringOperandId strId
,
2322 Int32OperandId indexId
,
2324 MOZ_CRASH("Call ICs not used in ion");