1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/BaselineCacheIRCompiler.h"
10 #include "jit/CacheIR.h"
11 #include "jit/CacheIRCloner.h"
12 #include "jit/CacheIRWriter.h"
13 #include "jit/JitFrames.h"
14 #include "jit/JitRuntime.h"
15 #include "jit/JitZone.h"
16 #include "jit/Linker.h"
17 #include "jit/MoveEmitter.h"
18 #include "jit/RegExpStubConstants.h"
19 #include "jit/SharedICHelpers.h"
20 #include "jit/VMFunctions.h"
21 #include "js/experimental/JitInfo.h" // JSJitInfo
22 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
23 #include "proxy/DeadObjectProxy.h"
24 #include "proxy/Proxy.h"
25 #include "util/Unicode.h"
26 #include "vm/StaticStrings.h"
28 #include "jit/JitScript-inl.h"
29 #include "jit/MacroAssembler-inl.h"
30 #include "jit/SharedICHelpers-inl.h"
31 #include "jit/VMFunctionList-inl.h"
32 #include "vm/List-inl.h"
35 using namespace js::jit
;
39 using JS::ExpandoAndGeneration
;
44 static uint32_t GetICStackValueOffset() {
45 uint32_t offset
= ICStackValueOffset
;
46 if (JitOptions
.enableICFramePointers
) {
47 #ifdef JS_USE_LINK_REGISTER
48 // The frame pointer and return address are also on the stack.
49 offset
+= 2 * sizeof(uintptr_t);
51 // The frame pointer is also on the stack.
52 offset
+= sizeof(uintptr_t);
58 static void PushICFrameRegs(MacroAssembler
& masm
) {
59 MOZ_ASSERT(JitOptions
.enableICFramePointers
);
60 #ifdef JS_USE_LINK_REGISTER
61 masm
.pushReturnAddress();
63 masm
.push(FramePointer
);
66 static void PopICFrameRegs(MacroAssembler
& masm
) {
67 MOZ_ASSERT(JitOptions
.enableICFramePointers
);
68 masm
.pop(FramePointer
);
69 #ifdef JS_USE_LINK_REGISTER
70 masm
.popReturnAddress();
74 Address
CacheRegisterAllocator::addressOf(MacroAssembler
& masm
,
75 BaselineFrameSlot slot
) const {
77 stackPushed_
+ GetICStackValueOffset() + slot
.slot() * sizeof(JS::Value
);
78 return Address(masm
.getStackPointer(), offset
);
80 BaseValueIndex
CacheRegisterAllocator::addressOf(MacroAssembler
& masm
,
82 BaselineFrameSlot slot
) const {
84 stackPushed_
+ GetICStackValueOffset() + slot
.slot() * sizeof(JS::Value
);
85 return BaseValueIndex(masm
.getStackPointer(), argcReg
, offset
);
88 // BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code.
89 BaselineCacheIRCompiler::BaselineCacheIRCompiler(JSContext
* cx
,
91 const CacheIRWriter
& writer
,
92 uint32_t stubDataOffset
)
93 : CacheIRCompiler(cx
, alloc
, writer
, stubDataOffset
, Mode::Baseline
,
94 StubFieldPolicy::Address
),
95 makesGCCalls_(false) {}
97 // AutoStubFrame methods
98 AutoStubFrame::AutoStubFrame(BaselineCacheIRCompiler
& compiler
)
102 framePushedAtEnterStubFrame_(0)
106 void AutoStubFrame::enter(MacroAssembler
& masm
, Register scratch
) {
107 MOZ_ASSERT(compiler
.allocator
.stackPushed() == 0);
109 if (JitOptions
.enableICFramePointers
) {
110 // If we have already pushed the frame pointer, pop it
111 // before creating the stub frame.
112 PopICFrameRegs(masm
);
114 EmitBaselineEnterStubFrame(masm
, scratch
);
117 framePushedAtEnterStubFrame_
= masm
.framePushed();
120 MOZ_ASSERT(!compiler
.enteredStubFrame_
);
121 compiler
.enteredStubFrame_
= true;
123 // All current uses of this are to call VM functions that can GC.
124 compiler
.makesGCCalls_
= true;
126 void AutoStubFrame::leave(MacroAssembler
& masm
) {
127 MOZ_ASSERT(compiler
.enteredStubFrame_
);
128 compiler
.enteredStubFrame_
= false;
131 masm
.setFramePushed(framePushedAtEnterStubFrame_
);
134 EmitBaselineLeaveStubFrame(masm
);
135 if (JitOptions
.enableICFramePointers
) {
136 // We will pop the frame pointer when we return,
137 // so we have to push it again now.
138 PushICFrameRegs(masm
);
142 void AutoStubFrame::storeTracedValue(MacroAssembler
& masm
, ValueOperand value
) {
143 MOZ_ASSERT(compiler
.localTracingSlots_
< 255);
144 MOZ_ASSERT(masm
.framePushed() - framePushedAtEnterStubFrame_
==
145 compiler
.localTracingSlots_
* sizeof(Value
));
147 compiler
.localTracingSlots_
++;
150 void AutoStubFrame::loadTracedValue(MacroAssembler
& masm
, uint8_t slotIndex
,
151 ValueOperand value
) {
152 MOZ_ASSERT(slotIndex
<= compiler
.localTracingSlots_
);
153 int32_t offset
= BaselineStubFrameLayout::LocallyTracedValueOffset
+
154 slotIndex
* sizeof(Value
);
155 masm
.loadValue(Address(FramePointer
, -offset
), value
);
159 AutoStubFrame::~AutoStubFrame() { MOZ_ASSERT(!compiler
.enteredStubFrame_
); }
165 bool BaselineCacheIRCompiler::makesGCCalls() const { return makesGCCalls_
; }
167 Address
BaselineCacheIRCompiler::stubAddress(uint32_t offset
) const {
168 return Address(ICStubReg
, stubDataOffset_
+ offset
);
171 template <typename Fn
, Fn fn
>
172 void BaselineCacheIRCompiler::callVM(MacroAssembler
& masm
) {
173 VMFunctionId id
= VMFunctionToId
<Fn
, fn
>::id
;
174 callVMInternal(masm
, id
);
177 JitCode
* BaselineCacheIRCompiler::compile() {
178 AutoCreatedBy
acb(masm
, "BaselineCacheIRCompiler::compile");
180 #ifndef JS_USE_LINK_REGISTER
181 masm
.adjustFrame(sizeof(intptr_t));
183 #ifdef JS_CODEGEN_ARM
184 AutoNonDefaultSecondScratchRegister
andssr(masm
, BaselineSecondScratchReg
);
186 if (JitOptions
.enableICFramePointers
) {
187 /* [SMDOC] Baseline IC Frame Pointers
189 * In general, ICs don't have frame pointers until just before
190 * doing a VM call, at which point we retroactively create a stub
191 * frame. However, for the sake of external profilers, we
192 * optionally support full-IC frame pointers in baseline ICs, with
193 * the following approach:
194 * 1. We push a frame pointer when we enter an IC.
195 * 2. We pop the frame pointer when we return from an IC, or
196 * when we jump to the next IC.
197 * 3. Entering a stub frame for a VM call already pushes a
198 * frame pointer, so we pop our existing frame pointer
199 * just before entering a stub frame and push it again
200 * just after leaving a stub frame.
201 * Some ops take advantage of the fact that the frame pointer is
202 * not updated until we enter a stub frame to read values from
203 * the caller's frame. To support this, we allocate a separate
204 * baselineFrame register when IC frame pointers are enabled.
206 PushICFrameRegs(masm
);
207 masm
.moveStackPtrTo(FramePointer
);
209 MOZ_ASSERT(baselineFrameReg() != FramePointer
);
210 masm
.loadPtr(Address(FramePointer
, 0), baselineFrameReg());
213 // Count stub entries: We count entries rather than successes as it much
214 // easier to ensure ICStubReg is valid at entry than at exit.
215 Address
enteredCount(ICStubReg
, ICCacheIRStub::offsetOfEnteredCount());
216 masm
.add32(Imm32(1), enteredCount
);
218 CacheIRReader
reader(writer_
);
220 CacheOp op
= reader
.readOp();
221 perfSpewer_
.recordInstruction(masm
, op
);
223 #define DEFINE_OP(op, ...) \
225 if (!emit##op(reader)) return nullptr; \
227 CACHE_IR_OPS(DEFINE_OP
)
231 MOZ_CRASH("Invalid op");
234 } while (reader
.more());
236 MOZ_ASSERT(!enteredStubFrame_
);
237 masm
.assumeUnreachable("Should have returned from IC");
239 // Done emitting the main IC code. Now emit the failure paths.
240 for (size_t i
= 0; i
< failurePaths
.length(); i
++) {
241 if (!emitFailurePath(i
)) {
244 if (JitOptions
.enableICFramePointers
) {
245 PopICFrameRegs(masm
);
247 EmitStubGuardFailure(masm
);
251 Rooted
<JitCode
*> newStubCode(cx_
, linker
.newCode(cx_
, CodeKind::Baseline
));
253 cx_
->recoverFromOutOfMemory();
257 newStubCode
->setLocalTracingSlots(localTracingSlots_
);
262 bool BaselineCacheIRCompiler::emitGuardShape(ObjOperandId objId
,
263 uint32_t shapeOffset
) {
264 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
265 Register obj
= allocator
.useRegister(masm
, objId
);
266 AutoScratchRegister
scratch1(allocator
, masm
);
268 bool needSpectreMitigations
= objectGuardNeedsSpectreMitigations(objId
);
270 Maybe
<AutoScratchRegister
> maybeScratch2
;
271 if (needSpectreMitigations
) {
272 maybeScratch2
.emplace(allocator
, masm
);
275 FailurePath
* failure
;
276 if (!addFailurePath(&failure
)) {
280 Address
addr(stubAddress(shapeOffset
));
281 masm
.loadPtr(addr
, scratch1
);
282 if (needSpectreMitigations
) {
283 masm
.branchTestObjShape(Assembler::NotEqual
, obj
, scratch1
, *maybeScratch2
,
284 obj
, failure
->label());
286 masm
.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual
, obj
,
287 scratch1
, failure
->label());
293 bool BaselineCacheIRCompiler::emitGuardProto(ObjOperandId objId
,
294 uint32_t protoOffset
) {
295 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
296 Register obj
= allocator
.useRegister(masm
, objId
);
297 AutoScratchRegister
scratch(allocator
, masm
);
299 FailurePath
* failure
;
300 if (!addFailurePath(&failure
)) {
304 Address
addr(stubAddress(protoOffset
));
305 masm
.loadObjProto(obj
, scratch
);
306 masm
.branchPtr(Assembler::NotEqual
, addr
, scratch
, failure
->label());
310 bool BaselineCacheIRCompiler::emitGuardCompartment(ObjOperandId objId
,
311 uint32_t globalOffset
,
312 uint32_t compartmentOffset
) {
313 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
314 Register obj
= allocator
.useRegister(masm
, objId
);
315 AutoScratchRegister
scratch(allocator
, masm
);
317 FailurePath
* failure
;
318 if (!addFailurePath(&failure
)) {
322 // Verify that the global wrapper is still valid, as
323 // it is pre-requisite for doing the compartment check.
324 Address
globalWrapper(stubAddress(globalOffset
));
325 masm
.loadPtr(globalWrapper
, scratch
);
326 Address
handlerAddr(scratch
, ProxyObject::offsetOfHandler());
327 masm
.branchPtr(Assembler::Equal
, handlerAddr
,
328 ImmPtr(&DeadObjectProxy::singleton
), failure
->label());
330 Address
addr(stubAddress(compartmentOffset
));
331 masm
.branchTestObjCompartment(Assembler::NotEqual
, obj
, addr
, scratch
,
336 bool BaselineCacheIRCompiler::emitGuardAnyClass(ObjOperandId objId
,
337 uint32_t claspOffset
) {
338 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
339 Register obj
= allocator
.useRegister(masm
, objId
);
340 AutoScratchRegister
scratch(allocator
, masm
);
342 FailurePath
* failure
;
343 if (!addFailurePath(&failure
)) {
347 Address
testAddr(stubAddress(claspOffset
));
348 if (objectGuardNeedsSpectreMitigations(objId
)) {
349 masm
.branchTestObjClass(Assembler::NotEqual
, obj
, testAddr
, scratch
, obj
,
352 masm
.branchTestObjClassNoSpectreMitigations(
353 Assembler::NotEqual
, obj
, testAddr
, scratch
, failure
->label());
359 bool BaselineCacheIRCompiler::emitGuardHasProxyHandler(ObjOperandId objId
,
360 uint32_t handlerOffset
) {
361 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
362 Register obj
= allocator
.useRegister(masm
, objId
);
363 AutoScratchRegister
scratch(allocator
, masm
);
365 FailurePath
* failure
;
366 if (!addFailurePath(&failure
)) {
370 Address
testAddr(stubAddress(handlerOffset
));
371 masm
.loadPtr(testAddr
, scratch
);
373 Address
handlerAddr(obj
, ProxyObject::offsetOfHandler());
374 masm
.branchPtr(Assembler::NotEqual
, handlerAddr
, scratch
, failure
->label());
378 bool BaselineCacheIRCompiler::emitGuardSpecificObject(ObjOperandId objId
,
379 uint32_t expectedOffset
) {
380 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
381 Register obj
= allocator
.useRegister(masm
, objId
);
383 FailurePath
* failure
;
384 if (!addFailurePath(&failure
)) {
388 Address
addr(stubAddress(expectedOffset
));
389 masm
.branchPtr(Assembler::NotEqual
, addr
, obj
, failure
->label());
393 bool BaselineCacheIRCompiler::emitGuardSpecificFunction(
394 ObjOperandId objId
, uint32_t expectedOffset
, uint32_t nargsAndFlagsOffset
) {
395 return emitGuardSpecificObject(objId
, expectedOffset
);
398 bool BaselineCacheIRCompiler::emitGuardFunctionScript(
399 ObjOperandId funId
, uint32_t expectedOffset
, uint32_t nargsAndFlagsOffset
) {
400 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
402 Register fun
= allocator
.useRegister(masm
, funId
);
403 AutoScratchRegister
scratch(allocator
, masm
);
405 FailurePath
* failure
;
406 if (!addFailurePath(&failure
)) {
410 Address
addr(stubAddress(expectedOffset
));
411 masm
.loadPrivate(Address(fun
, JSFunction::offsetOfJitInfoOrScript()),
413 masm
.branchPtr(Assembler::NotEqual
, addr
, scratch
, failure
->label());
417 bool BaselineCacheIRCompiler::emitGuardSpecificAtom(StringOperandId strId
,
418 uint32_t expectedOffset
) {
419 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
420 Register str
= allocator
.useRegister(masm
, strId
);
421 AutoScratchRegister
scratch(allocator
, masm
);
423 FailurePath
* failure
;
424 if (!addFailurePath(&failure
)) {
428 Address
atomAddr(stubAddress(expectedOffset
));
431 masm
.branchPtr(Assembler::Equal
, atomAddr
, str
, &done
);
433 // The pointers are not equal, so if the input string is also an atom it
434 // must be a different string.
435 masm
.branchTest32(Assembler::NonZero
, Address(str
, JSString::offsetOfFlags()),
436 Imm32(JSString::ATOM_BIT
), failure
->label());
439 masm
.loadPtr(atomAddr
, scratch
);
440 masm
.loadStringLength(scratch
, scratch
);
441 masm
.branch32(Assembler::NotEqual
, Address(str
, JSString::offsetOfLength()),
442 scratch
, failure
->label());
444 // We have a non-atomized string with the same length. Call a helper
445 // function to do the comparison.
446 LiveRegisterSet
volatileRegs(GeneralRegisterSet::Volatile(),
447 liveVolatileFloatRegs());
448 masm
.PushRegsInMask(volatileRegs
);
450 using Fn
= bool (*)(JSString
* str1
, JSString
* str2
);
451 masm
.setupUnalignedABICall(scratch
);
452 masm
.loadPtr(atomAddr
, scratch
);
453 masm
.passABIArg(scratch
);
454 masm
.passABIArg(str
);
455 masm
.callWithABI
<Fn
, EqualStringsHelperPure
>();
456 masm
.storeCallPointerResult(scratch
);
458 LiveRegisterSet ignore
;
460 masm
.PopRegsInMaskIgnore(volatileRegs
, ignore
);
461 masm
.branchIfFalseBool(scratch
, failure
->label());
467 bool BaselineCacheIRCompiler::emitGuardSpecificSymbol(SymbolOperandId symId
,
468 uint32_t expectedOffset
) {
469 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
470 Register sym
= allocator
.useRegister(masm
, symId
);
472 FailurePath
* failure
;
473 if (!addFailurePath(&failure
)) {
477 Address
addr(stubAddress(expectedOffset
));
478 masm
.branchPtr(Assembler::NotEqual
, addr
, sym
, failure
->label());
482 bool BaselineCacheIRCompiler::emitLoadValueResult(uint32_t valOffset
) {
483 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
484 AutoOutputRegister
output(*this);
485 masm
.loadValue(stubAddress(valOffset
), output
.valueReg());
489 bool BaselineCacheIRCompiler::emitLoadFixedSlotResult(ObjOperandId objId
,
490 uint32_t offsetOffset
) {
491 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
492 AutoOutputRegister
output(*this);
493 Register obj
= allocator
.useRegister(masm
, objId
);
494 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
496 masm
.load32(stubAddress(offsetOffset
), scratch
);
497 masm
.loadValue(BaseIndex(obj
, scratch
, TimesOne
), output
.valueReg());
501 bool BaselineCacheIRCompiler::emitLoadFixedSlotTypedResult(
502 ObjOperandId objId
, uint32_t offsetOffset
, ValueType
) {
503 // The type is only used by Warp.
504 return emitLoadFixedSlotResult(objId
, offsetOffset
);
507 bool BaselineCacheIRCompiler::emitLoadDynamicSlotResult(ObjOperandId objId
,
508 uint32_t offsetOffset
) {
509 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
510 AutoOutputRegister
output(*this);
511 Register obj
= allocator
.useRegister(masm
, objId
);
512 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
513 AutoScratchRegister
scratch2(allocator
, masm
);
515 masm
.load32(stubAddress(offsetOffset
), scratch
);
516 masm
.loadPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch2
);
517 masm
.loadValue(BaseIndex(scratch2
, scratch
, TimesOne
), output
.valueReg());
521 bool BaselineCacheIRCompiler::emitCallScriptedGetterShared(
522 ValOperandId receiverId
, uint32_t getterOffset
, bool sameRealm
,
523 uint32_t nargsAndFlagsOffset
, Maybe
<uint32_t> icScriptOffset
) {
524 ValueOperand receiver
= allocator
.useValueRegister(masm
, receiverId
);
525 Address
getterAddr(stubAddress(getterOffset
));
527 AutoScratchRegister
code(allocator
, masm
);
528 AutoScratchRegister
callee(allocator
, masm
);
529 AutoScratchRegister
scratch(allocator
, masm
);
531 bool isInlined
= icScriptOffset
.isSome();
533 // First, retrieve raw jitcode for getter.
534 masm
.loadPtr(getterAddr
, callee
);
536 FailurePath
* failure
;
537 if (!addFailurePath(&failure
)) {
540 masm
.loadBaselineJitCodeRaw(callee
, code
, failure
->label());
542 masm
.loadJitCodeRaw(callee
, code
);
545 allocator
.discardStack(masm
);
547 AutoStubFrame
stubFrame(*this);
548 stubFrame
.enter(masm
, scratch
);
551 masm
.switchToObjectRealm(callee
, scratch
);
554 // Align the stack such that the JitFrameLayout is aligned on
555 // JitStackAlignment.
556 masm
.alignJitStackBasedOnNArgs(0, /*countIncludesThis = */ false);
558 // Getter is called with 0 arguments, just |receiver| as thisv.
559 // Note that we use Push, not push, so that callJit will align the stack
564 // Store icScript in the context.
565 Address
icScriptAddr(stubAddress(*icScriptOffset
));
566 masm
.loadPtr(icScriptAddr
, scratch
);
567 masm
.storeICScriptInJSContext(scratch
);
571 masm
.PushFrameDescriptorForJitCall(FrameType::BaselineStub
, /* argc = */ 0);
573 // Handle arguments underflow.
575 masm
.loadFunctionArgCount(callee
, callee
);
576 masm
.branch32(Assembler::Equal
, callee
, Imm32(0), &noUnderflow
);
578 // Call the arguments rectifier.
579 ArgumentsRectifierKind kind
= isInlined
580 ? ArgumentsRectifierKind::TrialInlining
581 : ArgumentsRectifierKind::Normal
;
582 TrampolinePtr argumentsRectifier
=
583 cx_
->runtime()->jitRuntime()->getArgumentsRectifier(kind
);
584 masm
.movePtr(argumentsRectifier
, code
);
586 masm
.bind(&noUnderflow
);
589 stubFrame
.leave(masm
);
592 masm
.switchToBaselineFrameRealm(R1
.scratchReg());
598 bool BaselineCacheIRCompiler::emitCallScriptedGetterResult(
599 ValOperandId receiverId
, uint32_t getterOffset
, bool sameRealm
,
600 uint32_t nargsAndFlagsOffset
) {
601 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
602 Maybe
<uint32_t> icScriptOffset
= mozilla::Nothing();
603 return emitCallScriptedGetterShared(receiverId
, getterOffset
, sameRealm
,
604 nargsAndFlagsOffset
, icScriptOffset
);
607 bool BaselineCacheIRCompiler::emitCallInlinedGetterResult(
608 ValOperandId receiverId
, uint32_t getterOffset
, uint32_t icScriptOffset
,
609 bool sameRealm
, uint32_t nargsAndFlagsOffset
) {
610 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
611 return emitCallScriptedGetterShared(receiverId
, getterOffset
, sameRealm
,
613 mozilla::Some(icScriptOffset
));
616 bool BaselineCacheIRCompiler::emitCallNativeGetterResult(
617 ValOperandId receiverId
, uint32_t getterOffset
, bool sameRealm
,
618 uint32_t nargsAndFlagsOffset
) {
619 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
621 ValueOperand receiver
= allocator
.useValueRegister(masm
, receiverId
);
622 Address
getterAddr(stubAddress(getterOffset
));
624 AutoScratchRegister
scratch(allocator
, masm
);
626 allocator
.discardStack(masm
);
628 AutoStubFrame
stubFrame(*this);
629 stubFrame
.enter(masm
, scratch
);
631 // Load the callee in the scratch register.
632 masm
.loadPtr(getterAddr
, scratch
);
638 bool (*)(JSContext
*, HandleFunction
, HandleValue
, MutableHandleValue
);
639 callVM
<Fn
, CallNativeGetter
>(masm
);
641 stubFrame
.leave(masm
);
645 bool BaselineCacheIRCompiler::emitCallDOMGetterResult(ObjOperandId objId
,
646 uint32_t jitInfoOffset
) {
647 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
649 Register obj
= allocator
.useRegister(masm
, objId
);
650 Address
jitInfoAddr(stubAddress(jitInfoOffset
));
652 AutoScratchRegister
scratch(allocator
, masm
);
654 allocator
.discardStack(masm
);
656 AutoStubFrame
stubFrame(*this);
657 stubFrame
.enter(masm
, scratch
);
659 // Load the JSJitInfo in the scratch register.
660 masm
.loadPtr(jitInfoAddr
, scratch
);
666 bool (*)(JSContext
*, const JSJitInfo
*, HandleObject
, MutableHandleValue
);
667 callVM
<Fn
, CallDOMGetter
>(masm
);
669 stubFrame
.leave(masm
);
673 bool BaselineCacheIRCompiler::emitProxyGetResult(ObjOperandId objId
,
675 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
676 Register obj
= allocator
.useRegister(masm
, objId
);
677 Address
idAddr(stubAddress(idOffset
));
679 AutoScratchRegister
scratch(allocator
, masm
);
681 allocator
.discardStack(masm
);
683 AutoStubFrame
stubFrame(*this);
684 stubFrame
.enter(masm
, scratch
);
686 // Load the jsid in the scratch register.
687 masm
.loadPtr(idAddr
, scratch
);
692 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleId
, MutableHandleValue
);
693 callVM
<Fn
, ProxyGetProperty
>(masm
);
695 stubFrame
.leave(masm
);
699 bool BaselineCacheIRCompiler::emitFrameIsConstructingResult() {
700 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
702 AutoOutputRegister
output(*this);
703 Register outputScratch
= output
.valueReg().scratchReg();
705 // Load the CalleeToken.
706 Address
tokenAddr(baselineFrameReg(), JitFrameLayout::offsetOfCalleeToken());
707 masm
.loadPtr(tokenAddr
, outputScratch
);
709 // The low bit indicates whether this call is constructing, just clear the
711 static_assert(CalleeToken_Function
== 0x0);
712 static_assert(CalleeToken_FunctionConstructing
== 0x1);
713 masm
.andPtr(Imm32(0x1), outputScratch
);
715 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, outputScratch
, output
.valueReg());
719 bool BaselineCacheIRCompiler::emitLoadConstantStringResult(uint32_t strOffset
) {
720 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
721 AutoOutputRegister
output(*this);
722 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
724 masm
.loadPtr(stubAddress(strOffset
), scratch
);
725 masm
.tagValue(JSVAL_TYPE_STRING
, scratch
, output
.valueReg());
729 bool BaselineCacheIRCompiler::emitCompareStringResult(JSOp op
,
730 StringOperandId lhsId
,
731 StringOperandId rhsId
) {
732 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
733 AutoOutputRegister
output(*this);
735 Register left
= allocator
.useRegister(masm
, lhsId
);
736 Register right
= allocator
.useRegister(masm
, rhsId
);
738 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
740 allocator
.discardStack(masm
);
743 masm
.compareStrings(op
, left
, right
, scratch
, &slow
);
747 AutoStubFrame
stubFrame(*this);
748 stubFrame
.enter(masm
, scratch
);
750 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
751 // - |left <= right| is implemented as |right >= left|.
752 // - |left > right| is implemented as |right < left|.
753 if (op
== JSOp::Le
|| op
== JSOp::Gt
) {
761 using Fn
= bool (*)(JSContext
*, HandleString
, HandleString
, bool*);
762 if (op
== JSOp::Eq
|| op
== JSOp::StrictEq
) {
763 callVM
<Fn
, jit::StringsEqual
<EqualityKind::Equal
>>(masm
);
764 } else if (op
== JSOp::Ne
|| op
== JSOp::StrictNe
) {
765 callVM
<Fn
, jit::StringsEqual
<EqualityKind::NotEqual
>>(masm
);
766 } else if (op
== JSOp::Lt
|| op
== JSOp::Gt
) {
767 callVM
<Fn
, jit::StringsCompare
<ComparisonKind::LessThan
>>(masm
);
769 MOZ_ASSERT(op
== JSOp::Le
|| op
== JSOp::Ge
);
770 callVM
<Fn
, jit::StringsCompare
<ComparisonKind::GreaterThanOrEqual
>>(masm
);
773 stubFrame
.leave(masm
);
774 masm
.storeCallPointerResult(scratch
);
777 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
781 bool BaselineCacheIRCompiler::emitSameValueResult(ValOperandId lhsId
,
782 ValOperandId rhsId
) {
783 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
785 AutoOutputRegister
output(*this);
786 AutoScratchRegister
scratch(allocator
, masm
);
787 ValueOperand lhs
= allocator
.useValueRegister(masm
, lhsId
);
788 #ifdef JS_CODEGEN_X86
789 // Use the output to avoid running out of registers.
790 allocator
.copyToScratchValueRegister(masm
, rhsId
, output
.valueReg());
791 ValueOperand rhs
= output
.valueReg();
793 ValueOperand rhs
= allocator
.useValueRegister(masm
, rhsId
);
796 allocator
.discardStack(masm
);
801 // Check to see if the values have identical bits.
802 // This is correct for SameValue because SameValue(NaN,NaN) is true,
803 // and SameValue(0,-0) is false.
804 masm
.branch64(Assembler::NotEqual
, lhs
.toRegister64(), rhs
.toRegister64(),
806 masm
.moveValue(BooleanValue(true), output
.valueReg());
812 AutoStubFrame
stubFrame(*this);
813 stubFrame
.enter(masm
, scratch
);
818 using Fn
= bool (*)(JSContext
*, HandleValue
, HandleValue
, bool*);
819 callVM
<Fn
, SameValue
>(masm
);
821 stubFrame
.leave(masm
);
822 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, ReturnReg
, output
.valueReg());
829 bool BaselineCacheIRCompiler::emitStoreSlotShared(bool isFixed
,
831 uint32_t offsetOffset
,
832 ValOperandId rhsId
) {
833 Register obj
= allocator
.useRegister(masm
, objId
);
834 ValueOperand val
= allocator
.useValueRegister(masm
, rhsId
);
836 AutoScratchRegister
scratch1(allocator
, masm
);
837 Maybe
<AutoScratchRegister
> scratch2
;
839 scratch2
.emplace(allocator
, masm
);
842 Address offsetAddr
= stubAddress(offsetOffset
);
843 masm
.load32(offsetAddr
, scratch1
);
846 BaseIndex
slot(obj
, scratch1
, TimesOne
);
847 EmitPreBarrier(masm
, slot
, MIRType::Value
);
848 masm
.storeValue(val
, slot
);
850 masm
.loadPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch2
.ref());
851 BaseIndex
slot(scratch2
.ref(), scratch1
, TimesOne
);
852 EmitPreBarrier(masm
, slot
, MIRType::Value
);
853 masm
.storeValue(val
, slot
);
856 emitPostBarrierSlot(obj
, val
, scratch1
);
860 bool BaselineCacheIRCompiler::emitStoreFixedSlot(ObjOperandId objId
,
861 uint32_t offsetOffset
,
862 ValOperandId rhsId
) {
863 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
864 return emitStoreSlotShared(true, objId
, offsetOffset
, rhsId
);
867 bool BaselineCacheIRCompiler::emitStoreDynamicSlot(ObjOperandId objId
,
868 uint32_t offsetOffset
,
869 ValOperandId rhsId
) {
870 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
871 return emitStoreSlotShared(false, objId
, offsetOffset
, rhsId
);
874 bool BaselineCacheIRCompiler::emitAddAndStoreSlotShared(
875 CacheOp op
, ObjOperandId objId
, uint32_t offsetOffset
, ValOperandId rhsId
,
876 uint32_t newShapeOffset
, Maybe
<uint32_t> numNewSlotsOffset
) {
877 Register obj
= allocator
.useRegister(masm
, objId
);
878 ValueOperand val
= allocator
.useValueRegister(masm
, rhsId
);
880 AutoScratchRegister
scratch1(allocator
, masm
);
881 AutoScratchRegister
scratch2(allocator
, masm
);
883 Address newShapeAddr
= stubAddress(newShapeOffset
);
884 Address offsetAddr
= stubAddress(offsetOffset
);
886 if (op
== CacheOp::AllocateAndStoreDynamicSlot
) {
887 // We have to (re)allocate dynamic slots. Do this first, as it's the
888 // only fallible operation here. Note that growSlotsPure is fallible but
890 Address numNewSlotsAddr
= stubAddress(*numNewSlotsOffset
);
892 FailurePath
* failure
;
893 if (!addFailurePath(&failure
)) {
897 LiveRegisterSet
save(GeneralRegisterSet::Volatile(),
898 liveVolatileFloatRegs());
899 masm
.PushRegsInMask(save
);
901 using Fn
= bool (*)(JSContext
* cx
, NativeObject
* obj
, uint32_t newCount
);
902 masm
.setupUnalignedABICall(scratch1
);
903 masm
.loadJSContext(scratch1
);
904 masm
.passABIArg(scratch1
);
905 masm
.passABIArg(obj
);
906 masm
.load32(numNewSlotsAddr
, scratch2
);
907 masm
.passABIArg(scratch2
);
908 masm
.callWithABI
<Fn
, NativeObject::growSlotsPure
>();
909 masm
.storeCallPointerResult(scratch1
);
911 LiveRegisterSet ignore
;
912 ignore
.add(scratch1
);
913 masm
.PopRegsInMaskIgnore(save
, ignore
);
915 masm
.branchIfFalseBool(scratch1
, failure
->label());
918 // Update the object's shape.
919 masm
.loadPtr(newShapeAddr
, scratch1
);
920 masm
.storeObjShape(scratch1
, obj
,
921 [](MacroAssembler
& masm
, const Address
& addr
) {
922 EmitPreBarrier(masm
, addr
, MIRType::Shape
);
925 // Perform the store. No pre-barrier required since this is a new
927 masm
.load32(offsetAddr
, scratch1
);
928 if (op
== CacheOp::AddAndStoreFixedSlot
) {
929 BaseIndex
slot(obj
, scratch1
, TimesOne
);
930 masm
.storeValue(val
, slot
);
932 MOZ_ASSERT(op
== CacheOp::AddAndStoreDynamicSlot
||
933 op
== CacheOp::AllocateAndStoreDynamicSlot
);
934 masm
.loadPtr(Address(obj
, NativeObject::offsetOfSlots()), scratch2
);
935 BaseIndex
slot(scratch2
, scratch1
, TimesOne
);
936 masm
.storeValue(val
, slot
);
939 emitPostBarrierSlot(obj
, val
, scratch1
);
943 bool BaselineCacheIRCompiler::emitAddAndStoreFixedSlot(
944 ObjOperandId objId
, uint32_t offsetOffset
, ValOperandId rhsId
,
945 uint32_t newShapeOffset
) {
946 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
947 Maybe
<uint32_t> numNewSlotsOffset
= mozilla::Nothing();
948 return emitAddAndStoreSlotShared(CacheOp::AddAndStoreFixedSlot
, objId
,
949 offsetOffset
, rhsId
, newShapeOffset
,
953 bool BaselineCacheIRCompiler::emitAddAndStoreDynamicSlot(
954 ObjOperandId objId
, uint32_t offsetOffset
, ValOperandId rhsId
,
955 uint32_t newShapeOffset
) {
956 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
957 Maybe
<uint32_t> numNewSlotsOffset
= mozilla::Nothing();
958 return emitAddAndStoreSlotShared(CacheOp::AddAndStoreDynamicSlot
, objId
,
959 offsetOffset
, rhsId
, newShapeOffset
,
963 bool BaselineCacheIRCompiler::emitAllocateAndStoreDynamicSlot(
964 ObjOperandId objId
, uint32_t offsetOffset
, ValOperandId rhsId
,
965 uint32_t newShapeOffset
, uint32_t numNewSlotsOffset
) {
966 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
967 return emitAddAndStoreSlotShared(CacheOp::AllocateAndStoreDynamicSlot
, objId
,
968 offsetOffset
, rhsId
, newShapeOffset
,
969 mozilla::Some(numNewSlotsOffset
));
972 bool BaselineCacheIRCompiler::emitArrayJoinResult(ObjOperandId objId
,
973 StringOperandId sepId
) {
974 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
976 AutoOutputRegister
output(*this);
977 Register obj
= allocator
.useRegister(masm
, objId
);
978 Register sep
= allocator
.useRegister(masm
, sepId
);
979 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
981 allocator
.discardStack(masm
);
983 // Load obj->elements in scratch.
984 masm
.loadPtr(Address(obj
, NativeObject::offsetOfElements()), scratch
);
985 Address
lengthAddr(scratch
, ObjectElements::offsetOfLength());
987 // If array length is 0, return empty string.
992 masm
.branch32(Assembler::NotEqual
, lengthAddr
, Imm32(0), &arrayNotEmpty
);
993 masm
.movePtr(ImmGCPtr(cx_
->names().empty_
), scratch
);
994 masm
.tagValue(JSVAL_TYPE_STRING
, scratch
, output
.valueReg());
995 masm
.jump(&finished
);
996 masm
.bind(&arrayNotEmpty
);
1001 // Otherwise, handle array length 1 case.
1002 masm
.branch32(Assembler::NotEqual
, lengthAddr
, Imm32(1), &vmCall
);
1004 // But only if initializedLength is also 1.
1005 Address
initLength(scratch
, ObjectElements::offsetOfInitializedLength());
1006 masm
.branch32(Assembler::NotEqual
, initLength
, Imm32(1), &vmCall
);
1008 // And only if elem0 is a string.
1009 Address
elementAddr(scratch
, 0);
1010 masm
.branchTestString(Assembler::NotEqual
, elementAddr
, &vmCall
);
1013 masm
.loadValue(elementAddr
, output
.valueReg());
1014 masm
.jump(&finished
);
1016 // Otherwise call into the VM.
1020 AutoStubFrame
stubFrame(*this);
1021 stubFrame
.enter(masm
, scratch
);
1026 using Fn
= JSString
* (*)(JSContext
*, HandleObject
, HandleString
);
1027 callVM
<Fn
, jit::ArrayJoin
>(masm
);
1029 stubFrame
.leave(masm
);
1031 masm
.tagValue(JSVAL_TYPE_STRING
, ReturnReg
, output
.valueReg());
1034 masm
.bind(&finished
);
1039 bool BaselineCacheIRCompiler::emitPackedArraySliceResult(
1040 uint32_t templateObjectOffset
, ObjOperandId arrayId
, Int32OperandId beginId
,
1041 Int32OperandId endId
) {
1042 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1044 AutoOutputRegister
output(*this);
1045 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
1046 AutoScratchRegisterMaybeOutputType
scratch2(allocator
, masm
, output
);
1048 Register array
= allocator
.useRegister(masm
, arrayId
);
1049 Register begin
= allocator
.useRegister(masm
, beginId
);
1050 Register end
= allocator
.useRegister(masm
, endId
);
1052 FailurePath
* failure
;
1053 if (!addFailurePath(&failure
)) {
1057 masm
.branchArrayIsNotPacked(array
, scratch1
, scratch2
, failure
->label());
1059 allocator
.discardStack(masm
);
1061 AutoStubFrame
stubFrame(*this);
1062 stubFrame
.enter(masm
, scratch1
);
1064 // Don't attempt to pre-allocate the object, instead always use the slow
1066 ImmPtr
result(nullptr);
1074 JSObject
* (*)(JSContext
*, HandleObject
, int32_t, int32_t, HandleObject
);
1075 callVM
<Fn
, ArraySliceDense
>(masm
);
1077 stubFrame
.leave(masm
);
1079 masm
.tagValue(JSVAL_TYPE_OBJECT
, ReturnReg
, output
.valueReg());
1083 bool BaselineCacheIRCompiler::emitArgumentsSliceResult(
1084 uint32_t templateObjectOffset
, ObjOperandId argsId
, Int32OperandId beginId
,
1085 Int32OperandId endId
) {
1086 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1088 AutoOutputRegister
output(*this);
1089 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
1091 Register args
= allocator
.useRegister(masm
, argsId
);
1092 Register begin
= allocator
.useRegister(masm
, beginId
);
1093 Register end
= allocator
.useRegister(masm
, endId
);
1095 allocator
.discardStack(masm
);
1097 AutoStubFrame
stubFrame(*this);
1098 stubFrame
.enter(masm
, scratch
);
1100 // Don't attempt to pre-allocate the object, instead always use the slow path.
1101 ImmPtr
result(nullptr);
1109 JSObject
* (*)(JSContext
*, HandleObject
, int32_t, int32_t, HandleObject
);
1110 callVM
<Fn
, ArgumentsSliceDense
>(masm
);
1112 stubFrame
.leave(masm
);
1114 masm
.tagValue(JSVAL_TYPE_OBJECT
, ReturnReg
, output
.valueReg());
1118 bool BaselineCacheIRCompiler::emitIsArrayResult(ValOperandId inputId
) {
1119 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1121 AutoOutputRegister
output(*this);
1122 AutoScratchRegister
scratch1(allocator
, masm
);
1123 AutoScratchRegisterMaybeOutput
scratch2(allocator
, masm
, output
);
1125 ValueOperand val
= allocator
.useValueRegister(masm
, inputId
);
1127 allocator
.discardStack(masm
);
1130 // Primitives are never Arrays.
1131 masm
.fallibleUnboxObject(val
, scratch1
, &isNotArray
);
1134 masm
.branchTestObjClass(Assembler::Equal
, scratch1
, &ArrayObject::class_
,
1135 scratch2
, scratch1
, &isArray
);
1137 // isArray can also return true for Proxy wrapped Arrays.
1138 masm
.branchTestObjectIsProxy(false, scratch1
, scratch2
, &isNotArray
);
1141 AutoStubFrame
stubFrame(*this);
1142 stubFrame
.enter(masm
, scratch2
);
1144 masm
.Push(scratch1
);
1146 using Fn
= bool (*)(JSContext
*, HandleObject
, bool*);
1147 callVM
<Fn
, js::IsArrayFromJit
>(masm
);
1149 stubFrame
.leave(masm
);
1151 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, ReturnReg
, output
.valueReg());
1155 masm
.bind(&isNotArray
);
1156 masm
.moveValue(BooleanValue(false), output
.valueReg());
1159 masm
.bind(&isArray
);
1160 masm
.moveValue(BooleanValue(true), output
.valueReg());
1166 bool BaselineCacheIRCompiler::emitIsTypedArrayResult(ObjOperandId objId
,
1167 bool isPossiblyWrapped
) {
1168 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1170 AutoOutputRegister
output(*this);
1171 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
1172 Register obj
= allocator
.useRegister(masm
, objId
);
1174 allocator
.discardStack(masm
);
1176 Label notTypedArray
, isProxy
, done
;
1177 masm
.loadObjClassUnsafe(obj
, scratch
);
1178 masm
.branchIfClassIsNotTypedArray(scratch
, ¬TypedArray
);
1179 masm
.moveValue(BooleanValue(true), output
.valueReg());
1182 masm
.bind(¬TypedArray
);
1183 if (isPossiblyWrapped
) {
1184 masm
.branchTestClassIsProxy(true, scratch
, &isProxy
);
1186 masm
.moveValue(BooleanValue(false), output
.valueReg());
1188 if (isPossiblyWrapped
) {
1191 masm
.bind(&isProxy
);
1193 AutoStubFrame
stubFrame(*this);
1194 stubFrame
.enter(masm
, scratch
);
1198 using Fn
= bool (*)(JSContext
*, JSObject
*, bool*);
1199 callVM
<Fn
, jit::IsPossiblyWrappedTypedArray
>(masm
);
1201 stubFrame
.leave(masm
);
1203 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, ReturnReg
, output
.valueReg());
1210 bool BaselineCacheIRCompiler::emitLoadStringCharResult(
1211 StringOperandId strId
, Int32OperandId indexId
,
1212 StringCharOutOfBounds outOfBounds
) {
1213 AutoOutputRegister
output(*this);
1214 Register str
= allocator
.useRegister(masm
, strId
);
1215 Register index
= allocator
.useRegister(masm
, indexId
);
1216 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
, output
);
1217 AutoScratchRegisterMaybeOutputType
scratch2(allocator
, masm
, output
);
1218 AutoScratchRegister
scratch3(allocator
, masm
);
1220 // Bounds check, load string char.
1224 if (outOfBounds
== StringCharOutOfBounds::Failure
) {
1225 FailurePath
* failure
;
1226 if (!addFailurePath(&failure
)) {
1230 masm
.spectreBoundsCheck32(index
, Address(str
, JSString::offsetOfLength()),
1231 scratch3
, failure
->label());
1232 masm
.loadStringChar(str
, index
, scratch2
, scratch1
, scratch3
,
1235 allocator
.discardStack(masm
);
1237 // Discard the stack before jumping to |done|.
1238 allocator
.discardStack(masm
);
1240 if (outOfBounds
== StringCharOutOfBounds::EmptyString
) {
1241 // Return the empty string for out-of-bounds access.
1242 masm
.movePtr(ImmGCPtr(cx_
->names().empty_
), scratch1
);
1244 // Return |undefined| for out-of-bounds access.
1245 masm
.moveValue(UndefinedValue(), output
.valueReg());
1248 // This CacheIR op is always preceded by |LinearizeForCharAccess|, so we're
1249 // guaranteed to see no nested ropes.
1250 masm
.spectreBoundsCheck32(index
, Address(str
, JSString::offsetOfLength()),
1252 masm
.loadStringChar(str
, index
, scratch2
, scratch1
, scratch3
, &loadFailed
);
1255 // Load StaticString for this char. For larger code units perform a VM call.
1257 masm
.lookupStaticString(scratch2
, scratch1
, cx_
->staticStrings(), &vmCall
);
1258 masm
.jump(&tagResult
);
1260 if (outOfBounds
!= StringCharOutOfBounds::Failure
) {
1261 masm
.bind(&loadFailed
);
1262 masm
.assumeUnreachable("loadStringChar can't fail for linear strings");
1268 AutoStubFrame
stubFrame(*this);
1269 stubFrame
.enter(masm
, scratch3
);
1271 masm
.Push(scratch2
);
1273 using Fn
= JSLinearString
* (*)(JSContext
*, int32_t);
1274 callVM
<Fn
, js::StringFromCharCode
>(masm
);
1276 stubFrame
.leave(masm
);
1278 masm
.storeCallPointerResult(scratch1
);
1281 if (outOfBounds
!= StringCharOutOfBounds::UndefinedValue
) {
1282 masm
.bind(&tagResult
);
1284 masm
.tagValue(JSVAL_TYPE_STRING
, scratch1
, output
.valueReg());
1286 masm
.bind(&tagResult
);
1287 masm
.tagValue(JSVAL_TYPE_STRING
, scratch1
, output
.valueReg());
1293 bool BaselineCacheIRCompiler::emitLoadStringCharResult(StringOperandId strId
,
1294 Int32OperandId indexId
,
1296 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1297 auto outOfBounds
= handleOOB
? StringCharOutOfBounds::EmptyString
1298 : StringCharOutOfBounds::Failure
;
1299 return emitLoadStringCharResult(strId
, indexId
, outOfBounds
);
1302 bool BaselineCacheIRCompiler::emitLoadStringAtResult(StringOperandId strId
,
1303 Int32OperandId indexId
,
1305 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1306 auto outOfBounds
= handleOOB
? StringCharOutOfBounds::UndefinedValue
1307 : StringCharOutOfBounds::Failure
;
1308 return emitLoadStringCharResult(strId
, indexId
, outOfBounds
);
1311 bool BaselineCacheIRCompiler::emitStringFromCodeResult(Int32OperandId codeId
,
1312 StringCode stringCode
) {
1313 AutoOutputRegister
output(*this);
1314 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
1316 Register code
= allocator
.useRegister(masm
, codeId
);
1318 FailurePath
* failure
= nullptr;
1319 if (stringCode
== StringCode::CodePoint
) {
1320 if (!addFailurePath(&failure
)) {
1325 if (stringCode
== StringCode::CodePoint
) {
1326 // Note: This condition must match tryAttachStringFromCodePoint to prevent
1328 masm
.branch32(Assembler::Above
, code
, Imm32(unicode::NonBMPMax
),
1332 allocator
.discardStack(masm
);
1334 // We pre-allocate atoms for the first UNIT_STATIC_LIMIT characters.
1335 // For code units larger than that, we must do a VM call.
1337 masm
.lookupStaticString(code
, scratch
, cx_
->staticStrings(), &vmCall
);
1345 AutoStubFrame
stubFrame(*this);
1346 stubFrame
.enter(masm
, scratch
);
1350 if (stringCode
== StringCode::CodeUnit
) {
1351 using Fn
= JSLinearString
* (*)(JSContext
*, int32_t);
1352 callVM
<Fn
, js::StringFromCharCode
>(masm
);
1354 using Fn
= JSLinearString
* (*)(JSContext
*, char32_t
);
1355 callVM
<Fn
, js::StringFromCodePoint
>(masm
);
1358 stubFrame
.leave(masm
);
1359 masm
.storeCallPointerResult(scratch
);
1363 masm
.tagValue(JSVAL_TYPE_STRING
, scratch
, output
.valueReg());
1367 bool BaselineCacheIRCompiler::emitStringFromCharCodeResult(
1368 Int32OperandId codeId
) {
1369 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1371 return emitStringFromCodeResult(codeId
, StringCode::CodeUnit
);
1374 bool BaselineCacheIRCompiler::emitStringFromCodePointResult(
1375 Int32OperandId codeId
) {
1376 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1378 return emitStringFromCodeResult(codeId
, StringCode::CodePoint
);
1381 bool BaselineCacheIRCompiler::emitMathRandomResult(uint32_t rngOffset
) {
1382 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1384 AutoOutputRegister
output(*this);
1385 AutoScratchRegister
scratch1(allocator
, masm
);
1386 AutoScratchRegister64
scratch2(allocator
, masm
);
1387 AutoAvailableFloatRegister
scratchFloat(*this, FloatReg0
);
1389 Address
rngAddr(stubAddress(rngOffset
));
1390 masm
.loadPtr(rngAddr
, scratch1
);
1392 masm
.randomDouble(scratch1
, scratchFloat
, scratch2
,
1393 output
.valueReg().toRegister64());
1395 if (js::SupportDifferentialTesting()) {
1396 masm
.loadConstantDouble(0.0, scratchFloat
);
1399 masm
.boxDouble(scratchFloat
, output
.valueReg(), scratchFloat
);
1403 bool BaselineCacheIRCompiler::emitReflectGetPrototypeOfResult(
1404 ObjOperandId objId
) {
1405 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1407 AutoOutputRegister
output(*this);
1408 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
1410 Register obj
= allocator
.useRegister(masm
, objId
);
1412 allocator
.discardStack(masm
);
1414 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto
) == 1);
1416 masm
.loadObjProto(obj
, scratch
);
1419 masm
.branchPtr(Assembler::Above
, scratch
, ImmWord(1), &hasProto
);
1421 // Call into the VM for lazy prototypes.
1423 masm
.branchPtr(Assembler::Equal
, scratch
, ImmWord(1), &slow
);
1425 masm
.moveValue(NullValue(), output
.valueReg());
1428 masm
.bind(&hasProto
);
1429 masm
.tagValue(JSVAL_TYPE_OBJECT
, scratch
, output
.valueReg());
1435 AutoStubFrame
stubFrame(*this);
1436 stubFrame
.enter(masm
, scratch
);
1440 using Fn
= bool (*)(JSContext
*, HandleObject
, MutableHandleValue
);
1441 callVM
<Fn
, jit::GetPrototypeOf
>(masm
);
1443 stubFrame
.leave(masm
);
1450 bool BaselineCacheIRCompiler::emitHasClassResult(ObjOperandId objId
,
1451 uint32_t claspOffset
) {
1452 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1454 AutoOutputRegister
output(*this);
1455 Register obj
= allocator
.useRegister(masm
, objId
);
1456 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
1458 Address
claspAddr(stubAddress(claspOffset
));
1459 masm
.loadObjClassUnsafe(obj
, scratch
);
1460 masm
.cmpPtrSet(Assembler::Equal
, claspAddr
, scratch
.get(), scratch
);
1461 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());
1465 void BaselineCacheIRCompiler::emitAtomizeString(Register str
, Register temp
,
1468 masm
.branchTest32(Assembler::NonZero
, Address(str
, JSString::offsetOfFlags()),
1469 Imm32(JSString::ATOM_BIT
), &isAtom
);
1471 LiveRegisterSet
save(GeneralRegisterSet::Volatile(),
1472 liveVolatileFloatRegs());
1473 masm
.PushRegsInMask(save
);
1475 using Fn
= JSAtom
* (*)(JSContext
* cx
, JSString
* str
);
1476 masm
.setupUnalignedABICall(temp
);
1477 masm
.loadJSContext(temp
);
1478 masm
.passABIArg(temp
);
1479 masm
.passABIArg(str
);
1480 masm
.callWithABI
<Fn
, jit::AtomizeStringNoGC
>();
1481 masm
.storeCallPointerResult(temp
);
1483 LiveRegisterSet ignore
;
1485 masm
.PopRegsInMaskIgnore(save
, ignore
);
1487 masm
.branchPtr(Assembler::Equal
, temp
, ImmWord(0), failure
);
1488 masm
.mov(temp
, str
);
1493 bool BaselineCacheIRCompiler::emitSetHasStringResult(ObjOperandId setId
,
1494 StringOperandId strId
) {
1495 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1497 AutoOutputRegister
output(*this);
1498 Register set
= allocator
.useRegister(masm
, setId
);
1499 Register str
= allocator
.useRegister(masm
, strId
);
1501 AutoScratchRegister
scratch1(allocator
, masm
);
1502 AutoScratchRegister
scratch2(allocator
, masm
);
1503 AutoScratchRegister
scratch3(allocator
, masm
);
1504 AutoScratchRegister
scratch4(allocator
, masm
);
1506 FailurePath
* failure
;
1507 if (!addFailurePath(&failure
)) {
1511 emitAtomizeString(str
, scratch1
, failure
->label());
1512 masm
.prepareHashString(str
, scratch1
, scratch2
);
1514 masm
.tagValue(JSVAL_TYPE_STRING
, str
, output
.valueReg());
1515 masm
.setObjectHasNonBigInt(set
, output
.valueReg(), scratch1
, scratch2
,
1516 scratch3
, scratch4
);
1517 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
1521 bool BaselineCacheIRCompiler::emitMapHasStringResult(ObjOperandId mapId
,
1522 StringOperandId strId
) {
1523 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1525 AutoOutputRegister
output(*this);
1526 Register map
= allocator
.useRegister(masm
, mapId
);
1527 Register str
= allocator
.useRegister(masm
, strId
);
1529 AutoScratchRegister
scratch1(allocator
, masm
);
1530 AutoScratchRegister
scratch2(allocator
, masm
);
1531 AutoScratchRegister
scratch3(allocator
, masm
);
1532 AutoScratchRegister
scratch4(allocator
, masm
);
1534 FailurePath
* failure
;
1535 if (!addFailurePath(&failure
)) {
1539 emitAtomizeString(str
, scratch1
, failure
->label());
1540 masm
.prepareHashString(str
, scratch1
, scratch2
);
1542 masm
.tagValue(JSVAL_TYPE_STRING
, str
, output
.valueReg());
1543 masm
.mapObjectHasNonBigInt(map
, output
.valueReg(), scratch1
, scratch2
,
1544 scratch3
, scratch4
);
1545 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch2
, output
.valueReg());
1549 bool BaselineCacheIRCompiler::emitMapGetStringResult(ObjOperandId mapId
,
1550 StringOperandId strId
) {
1551 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1553 AutoOutputRegister
output(*this);
1554 Register map
= allocator
.useRegister(masm
, mapId
);
1555 Register str
= allocator
.useRegister(masm
, strId
);
1557 AutoScratchRegister
scratch1(allocator
, masm
);
1558 AutoScratchRegister
scratch2(allocator
, masm
);
1559 AutoScratchRegister
scratch3(allocator
, masm
);
1560 AutoScratchRegister
scratch4(allocator
, masm
);
1562 FailurePath
* failure
;
1563 if (!addFailurePath(&failure
)) {
1567 emitAtomizeString(str
, scratch1
, failure
->label());
1568 masm
.prepareHashString(str
, scratch1
, scratch2
);
1570 masm
.tagValue(JSVAL_TYPE_STRING
, str
, output
.valueReg());
1571 masm
.mapObjectGetNonBigInt(map
, output
.valueReg(), scratch1
,
1572 output
.valueReg(), scratch2
, scratch3
, scratch4
);
1576 bool BaselineCacheIRCompiler::emitCallNativeSetter(
1577 ObjOperandId receiverId
, uint32_t setterOffset
, ValOperandId rhsId
,
1578 bool sameRealm
, uint32_t nargsAndFlagsOffset
) {
1579 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1580 Register receiver
= allocator
.useRegister(masm
, receiverId
);
1581 Address
setterAddr(stubAddress(setterOffset
));
1582 ValueOperand val
= allocator
.useValueRegister(masm
, rhsId
);
1584 AutoScratchRegister
scratch(allocator
, masm
);
1586 allocator
.discardStack(masm
);
1588 AutoStubFrame
stubFrame(*this);
1589 stubFrame
.enter(masm
, scratch
);
1591 // Load the callee in the scratch register.
1592 masm
.loadPtr(setterAddr
, scratch
);
1595 masm
.Push(receiver
);
1598 using Fn
= bool (*)(JSContext
*, HandleFunction
, HandleObject
, HandleValue
);
1599 callVM
<Fn
, CallNativeSetter
>(masm
);
1601 stubFrame
.leave(masm
);
1605 bool BaselineCacheIRCompiler::emitCallScriptedSetterShared(
1606 ObjOperandId receiverId
, uint32_t setterOffset
, ValOperandId rhsId
,
1607 bool sameRealm
, uint32_t nargsAndFlagsOffset
,
1608 Maybe
<uint32_t> icScriptOffset
) {
1609 AutoScratchRegister
callee(allocator
, masm
);
1610 AutoScratchRegister
scratch(allocator
, masm
);
1611 #if defined(JS_CODEGEN_X86)
1612 Register code
= scratch
;
1614 AutoScratchRegister
code(allocator
, masm
);
1617 Register receiver
= allocator
.useRegister(masm
, receiverId
);
1618 Address
setterAddr(stubAddress(setterOffset
));
1619 ValueOperand val
= allocator
.useValueRegister(masm
, rhsId
);
1621 bool isInlined
= icScriptOffset
.isSome();
1623 // First, load the callee.
1624 masm
.loadPtr(setterAddr
, callee
);
1627 // If we are calling a trial-inlined setter, guard that the
1628 // target has a BaselineScript.
1629 FailurePath
* failure
;
1630 if (!addFailurePath(&failure
)) {
1633 masm
.loadBaselineJitCodeRaw(callee
, code
, failure
->label());
1636 allocator
.discardStack(masm
);
1638 AutoStubFrame
stubFrame(*this);
1639 stubFrame
.enter(masm
, scratch
);
1642 masm
.switchToObjectRealm(callee
, scratch
);
1645 // Align the stack such that the JitFrameLayout is aligned on
1646 // JitStackAlignment.
1647 masm
.alignJitStackBasedOnNArgs(1, /*countIncludesThis = */ false);
1649 // Setter is called with 1 argument, and |receiver| as thisv. Note that we use
1650 // Push, not push, so that callJit will align the stack properly on ARM.
1652 masm
.Push(TypedOrValueRegister(MIRType::Object
, AnyRegister(receiver
)));
1657 // Push frame descriptor.
1658 masm
.PushFrameDescriptorForJitCall(FrameType::BaselineStub
, /* argc = */ 1);
1661 // Store icScript in the context.
1662 Address
icScriptAddr(stubAddress(*icScriptOffset
));
1663 masm
.loadPtr(icScriptAddr
, scratch
);
1664 masm
.storeICScriptInJSContext(scratch
);
1667 // Load the jitcode pointer.
1669 // On non-x86 platforms, this pointer is still in a register
1670 // after guarding on it above. On x86, we don't have enough
1671 // registers and have to reload it here.
1672 #ifdef JS_CODEGEN_X86
1673 masm
.loadBaselineJitCodeRaw(callee
, code
);
1676 masm
.loadJitCodeRaw(callee
, code
);
1679 // Handle arguments underflow. The rhs value is no longer needed and
1680 // can be used as scratch.
1682 Register scratch2
= val
.scratchReg();
1683 masm
.loadFunctionArgCount(callee
, scratch2
);
1684 masm
.branch32(Assembler::BelowOrEqual
, scratch2
, Imm32(1), &noUnderflow
);
1686 // Call the arguments rectifier.
1687 ArgumentsRectifierKind kind
= isInlined
1688 ? ArgumentsRectifierKind::TrialInlining
1689 : ArgumentsRectifierKind::Normal
;
1690 TrampolinePtr argumentsRectifier
=
1691 cx_
->runtime()->jitRuntime()->getArgumentsRectifier(kind
);
1692 masm
.movePtr(argumentsRectifier
, code
);
1694 masm
.bind(&noUnderflow
);
1697 stubFrame
.leave(masm
);
1700 masm
.switchToBaselineFrameRealm(R1
.scratchReg());
1706 bool BaselineCacheIRCompiler::emitCallScriptedSetter(
1707 ObjOperandId receiverId
, uint32_t setterOffset
, ValOperandId rhsId
,
1708 bool sameRealm
, uint32_t nargsAndFlagsOffset
) {
1709 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1710 Maybe
<uint32_t> icScriptOffset
= mozilla::Nothing();
1711 return emitCallScriptedSetterShared(receiverId
, setterOffset
, rhsId
,
1712 sameRealm
, nargsAndFlagsOffset
,
1716 bool BaselineCacheIRCompiler::emitCallInlinedSetter(
1717 ObjOperandId receiverId
, uint32_t setterOffset
, ValOperandId rhsId
,
1718 uint32_t icScriptOffset
, bool sameRealm
, uint32_t nargsAndFlagsOffset
) {
1719 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1720 return emitCallScriptedSetterShared(receiverId
, setterOffset
, rhsId
,
1721 sameRealm
, nargsAndFlagsOffset
,
1722 mozilla::Some(icScriptOffset
));
1725 bool BaselineCacheIRCompiler::emitCallDOMSetter(ObjOperandId objId
,
1726 uint32_t jitInfoOffset
,
1727 ValOperandId rhsId
) {
1728 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1729 Register obj
= allocator
.useRegister(masm
, objId
);
1730 ValueOperand val
= allocator
.useValueRegister(masm
, rhsId
);
1731 Address
jitInfoAddr(stubAddress(jitInfoOffset
));
1733 AutoScratchRegister
scratch(allocator
, masm
);
1735 allocator
.discardStack(masm
);
1737 AutoStubFrame
stubFrame(*this);
1738 stubFrame
.enter(masm
, scratch
);
1740 // Load the JSJitInfo in the scratch register.
1741 masm
.loadPtr(jitInfoAddr
, scratch
);
1747 using Fn
= bool (*)(JSContext
*, const JSJitInfo
*, HandleObject
, HandleValue
);
1748 callVM
<Fn
, CallDOMSetter
>(masm
);
1750 stubFrame
.leave(masm
);
1754 bool BaselineCacheIRCompiler::emitCallSetArrayLength(ObjOperandId objId
,
1756 ValOperandId rhsId
) {
1757 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1758 Register obj
= allocator
.useRegister(masm
, objId
);
1759 ValueOperand val
= allocator
.useValueRegister(masm
, rhsId
);
1761 AutoScratchRegister
scratch(allocator
, masm
);
1763 allocator
.discardStack(masm
);
1765 AutoStubFrame
stubFrame(*this);
1766 stubFrame
.enter(masm
, scratch
);
1768 masm
.Push(Imm32(strict
));
1772 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleValue
, bool);
1773 callVM
<Fn
, jit::SetArrayLength
>(masm
);
1775 stubFrame
.leave(masm
);
1779 bool BaselineCacheIRCompiler::emitProxySet(ObjOperandId objId
,
1781 ValOperandId rhsId
, bool strict
) {
1782 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1783 Register obj
= allocator
.useRegister(masm
, objId
);
1784 ValueOperand val
= allocator
.useValueRegister(masm
, rhsId
);
1785 Address
idAddr(stubAddress(idOffset
));
1787 AutoScratchRegister
scratch(allocator
, masm
);
1789 allocator
.discardStack(masm
);
1791 AutoStubFrame
stubFrame(*this);
1792 stubFrame
.enter(masm
, scratch
);
1794 // Load the jsid in the scratch register.
1795 masm
.loadPtr(idAddr
, scratch
);
1797 masm
.Push(Imm32(strict
));
1802 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleId
, HandleValue
, bool);
1803 callVM
<Fn
, ProxySetProperty
>(masm
);
1805 stubFrame
.leave(masm
);
1809 bool BaselineCacheIRCompiler::emitProxySetByValue(ObjOperandId objId
,
1813 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1814 Register obj
= allocator
.useRegister(masm
, objId
);
1815 ValueOperand idVal
= allocator
.useValueRegister(masm
, idId
);
1816 ValueOperand val
= allocator
.useValueRegister(masm
, rhsId
);
1818 allocator
.discardStack(masm
);
1820 // We need a scratch register but we don't have any registers available on
1821 // x86, so temporarily store |obj| in the frame's scratch slot.
1822 int scratchOffset
= BaselineFrame::reverseOffsetOfScratchValue();
1823 masm
.storePtr(obj
, Address(baselineFrameReg(), scratchOffset
));
1825 AutoStubFrame
stubFrame(*this);
1826 stubFrame
.enter(masm
, obj
);
1828 // Restore |obj|. Because we entered a stub frame we first have to load
1829 // the original frame pointer.
1830 masm
.loadPtr(Address(FramePointer
, 0), obj
);
1831 masm
.loadPtr(Address(obj
, scratchOffset
), obj
);
1833 masm
.Push(Imm32(strict
));
1838 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleValue
, HandleValue
, bool);
1839 callVM
<Fn
, ProxySetPropertyByValue
>(masm
);
1841 stubFrame
.leave(masm
);
1845 bool BaselineCacheIRCompiler::emitCallAddOrUpdateSparseElementHelper(
1846 ObjOperandId objId
, Int32OperandId idId
, ValOperandId rhsId
, bool strict
) {
1847 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1848 Register obj
= allocator
.useRegister(masm
, objId
);
1849 Register id
= allocator
.useRegister(masm
, idId
);
1850 ValueOperand val
= allocator
.useValueRegister(masm
, rhsId
);
1851 AutoScratchRegister
scratch(allocator
, masm
);
1853 allocator
.discardStack(masm
);
1855 AutoStubFrame
stubFrame(*this);
1856 stubFrame
.enter(masm
, scratch
);
1858 masm
.Push(Imm32(strict
));
1863 using Fn
= bool (*)(JSContext
* cx
, Handle
<NativeObject
*> obj
, int32_t int_id
,
1864 HandleValue v
, bool strict
);
1865 callVM
<Fn
, AddOrUpdateSparseElementHelper
>(masm
);
1867 stubFrame
.leave(masm
);
1871 bool BaselineCacheIRCompiler::emitMegamorphicSetElement(ObjOperandId objId
,
1875 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1876 Register obj
= allocator
.useRegister(masm
, objId
);
1877 ValueOperand idVal
= allocator
.useValueRegister(masm
, idId
);
1878 ValueOperand val
= allocator
.useValueRegister(masm
, rhsId
);
1880 #ifdef JS_CODEGEN_X86
1881 allocator
.discardStack(masm
);
1882 // We need a scratch register but we don't have any registers available on
1883 // x86, so temporarily store |obj| in the frame's scratch slot.
1884 int scratchOffset
= BaselineFrame::reverseOffsetOfScratchValue();
1885 masm
.storePtr(obj
, Address(baselineFrameReg_
, scratchOffset
));
1887 AutoStubFrame
stubFrame(*this);
1888 stubFrame
.enter(masm
, obj
);
1890 // Restore |obj|. Because we entered a stub frame we first have to load
1891 // the original frame pointer.
1892 masm
.loadPtr(Address(FramePointer
, 0), obj
);
1893 masm
.loadPtr(Address(obj
, scratchOffset
), obj
);
1895 AutoScratchRegister
scratch(allocator
, masm
);
1897 allocator
.discardStack(masm
);
1898 AutoStubFrame
stubFrame(*this);
1899 stubFrame
.enter(masm
, scratch
);
1902 masm
.Push(Imm32(strict
));
1907 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleValue
, HandleValue
, bool);
1908 callVM
<Fn
, SetElementMegamorphic
<false>>(masm
);
1910 stubFrame
.leave(masm
);
1914 bool BaselineCacheIRCompiler::emitReturnFromIC() {
1915 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1916 allocator
.discardStack(masm
);
1917 if (JitOptions
.enableICFramePointers
) {
1918 PopICFrameRegs(masm
);
1920 EmitReturnFromIC(masm
);
1924 bool BaselineCacheIRCompiler::emitLoadArgumentFixedSlot(ValOperandId resultId
,
1925 uint8_t slotIndex
) {
1926 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1927 ValueOperand resultReg
= allocator
.defineValueRegister(masm
, resultId
);
1928 Address addr
= allocator
.addressOf(masm
, BaselineFrameSlot(slotIndex
));
1929 masm
.loadValue(addr
, resultReg
);
1933 bool BaselineCacheIRCompiler::emitLoadArgumentDynamicSlot(ValOperandId resultId
,
1934 Int32OperandId argcId
,
1935 uint8_t slotIndex
) {
1936 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1937 ValueOperand resultReg
= allocator
.defineValueRegister(masm
, resultId
);
1938 Register argcReg
= allocator
.useRegister(masm
, argcId
);
1939 BaseValueIndex addr
=
1940 allocator
.addressOf(masm
, argcReg
, BaselineFrameSlot(slotIndex
));
1941 masm
.loadValue(addr
, resultReg
);
1945 bool BaselineCacheIRCompiler::emitGuardDOMExpandoMissingOrGuardShape(
1946 ValOperandId expandoId
, uint32_t shapeOffset
) {
1947 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1948 ValueOperand val
= allocator
.useValueRegister(masm
, expandoId
);
1949 AutoScratchRegister
shapeScratch(allocator
, masm
);
1950 AutoScratchRegister
objScratch(allocator
, masm
);
1951 Address
shapeAddr(stubAddress(shapeOffset
));
1953 FailurePath
* failure
;
1954 if (!addFailurePath(&failure
)) {
1959 masm
.branchTestUndefined(Assembler::Equal
, val
, &done
);
1961 masm
.debugAssertIsObject(val
);
1962 masm
.loadPtr(shapeAddr
, shapeScratch
);
1963 masm
.unboxObject(val
, objScratch
);
1964 // The expando object is not used in this case, so we don't need Spectre
1966 masm
.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual
, objScratch
,
1967 shapeScratch
, failure
->label());
1973 bool BaselineCacheIRCompiler::emitLoadDOMExpandoValueGuardGeneration(
1974 ObjOperandId objId
, uint32_t expandoAndGenerationOffset
,
1975 uint32_t generationOffset
, ValOperandId resultId
) {
1976 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
1977 Register obj
= allocator
.useRegister(masm
, objId
);
1978 Address
expandoAndGenerationAddr(stubAddress(expandoAndGenerationOffset
));
1979 Address
generationAddr(stubAddress(generationOffset
));
1981 AutoScratchRegister
scratch(allocator
, masm
);
1982 ValueOperand output
= allocator
.defineValueRegister(masm
, resultId
);
1984 FailurePath
* failure
;
1985 if (!addFailurePath(&failure
)) {
1989 masm
.loadPtr(Address(obj
, ProxyObject::offsetOfReservedSlots()), scratch
);
1990 Address
expandoAddr(scratch
,
1991 js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
1993 // Load the ExpandoAndGeneration* in the output scratch register and guard
1994 // it matches the proxy's ExpandoAndGeneration.
1995 masm
.loadPtr(expandoAndGenerationAddr
, output
.scratchReg());
1996 masm
.branchPrivatePtr(Assembler::NotEqual
, expandoAddr
, output
.scratchReg(),
1999 // Guard expandoAndGeneration->generation matches the expected generation.
2001 Assembler::NotEqual
,
2002 Address(output
.scratchReg(), ExpandoAndGeneration::offsetOfGeneration()),
2003 generationAddr
, scratch
, failure
->label());
2005 // Load expandoAndGeneration->expando into the output Value register.
2007 Address(output
.scratchReg(), ExpandoAndGeneration::offsetOfExpando()),
2012 bool BaselineCacheIRCompiler::init(CacheKind kind
) {
2013 if (!allocator
.init()) {
2017 size_t numInputs
= writer_
.numInputOperands();
2018 MOZ_ASSERT(numInputs
== NumInputsForCacheKind(kind
));
2020 // Baseline passes the first 2 inputs in R0/R1, other Values are stored on
2022 size_t numInputsInRegs
= std::min(numInputs
, size_t(2));
2023 AllocatableGeneralRegisterSet available
=
2024 BaselineICAvailableGeneralRegs(numInputsInRegs
);
2027 case CacheKind::NewArray
:
2028 case CacheKind::NewObject
:
2029 case CacheKind::GetIntrinsic
:
2030 MOZ_ASSERT(numInputs
== 0);
2031 outputUnchecked_
.emplace(R0
);
2033 case CacheKind::GetProp
:
2034 case CacheKind::TypeOf
:
2035 case CacheKind::ToPropertyKey
:
2036 case CacheKind::GetIterator
:
2037 case CacheKind::OptimizeSpreadCall
:
2038 case CacheKind::OptimizeGetIterator
:
2039 case CacheKind::ToBool
:
2040 case CacheKind::UnaryArith
:
2041 MOZ_ASSERT(numInputs
== 1);
2042 allocator
.initInputLocation(0, R0
);
2043 outputUnchecked_
.emplace(R0
);
2045 case CacheKind::Compare
:
2046 case CacheKind::GetElem
:
2047 case CacheKind::GetPropSuper
:
2049 case CacheKind::HasOwn
:
2050 case CacheKind::CheckPrivateField
:
2051 case CacheKind::InstanceOf
:
2052 case CacheKind::BinaryArith
:
2053 MOZ_ASSERT(numInputs
== 2);
2054 allocator
.initInputLocation(0, R0
);
2055 allocator
.initInputLocation(1, R1
);
2056 outputUnchecked_
.emplace(R0
);
2058 case CacheKind::SetProp
:
2059 MOZ_ASSERT(numInputs
== 2);
2060 allocator
.initInputLocation(0, R0
);
2061 allocator
.initInputLocation(1, R1
);
2063 case CacheKind::GetElemSuper
:
2064 MOZ_ASSERT(numInputs
== 3);
2065 allocator
.initInputLocation(0, BaselineFrameSlot(0));
2066 allocator
.initInputLocation(1, R1
);
2067 allocator
.initInputLocation(2, R0
);
2068 outputUnchecked_
.emplace(R0
);
2070 case CacheKind::SetElem
:
2071 MOZ_ASSERT(numInputs
== 3);
2072 allocator
.initInputLocation(0, R0
);
2073 allocator
.initInputLocation(1, R1
);
2074 allocator
.initInputLocation(2, BaselineFrameSlot(0));
2076 case CacheKind::GetName
:
2077 case CacheKind::BindName
:
2078 MOZ_ASSERT(numInputs
== 1);
2079 allocator
.initInputLocation(0, R0
.scratchReg(), JSVAL_TYPE_OBJECT
);
2080 #if defined(JS_NUNBOX32)
2081 // availableGeneralRegs can't know that GetName/BindName is only using
2082 // the payloadReg and not typeReg on x86.
2083 available
.add(R0
.typeReg());
2085 outputUnchecked_
.emplace(R0
);
2087 case CacheKind::Call
:
2088 MOZ_ASSERT(numInputs
== 1);
2089 allocator
.initInputLocation(0, R0
.scratchReg(), JSVAL_TYPE_INT32
);
2090 #if defined(JS_NUNBOX32)
2091 // availableGeneralRegs can't know that Call is only using
2092 // the payloadReg and not typeReg on x86.
2093 available
.add(R0
.typeReg());
2095 outputUnchecked_
.emplace(R0
);
2097 case CacheKind::CloseIter
:
2098 MOZ_ASSERT(numInputs
== 1);
2099 allocator
.initInputLocation(0, R0
.scratchReg(), JSVAL_TYPE_OBJECT
);
2100 #if defined(JS_NUNBOX32)
2101 // availableGeneralRegs can't know that CloseIter is only using
2102 // the payloadReg and not typeReg on x86.
2103 available
.add(R0
.typeReg());
2108 // Baseline doesn't allocate float registers so none of them are live.
2109 liveFloatRegs_
= LiveFloatRegisterSet(FloatRegisterSet());
2111 if (JitOptions
.enableICFramePointers
) {
2112 baselineFrameReg_
= available
.takeAny();
2115 allocator
.initAvailableRegs(available
);
2119 static void ResetEnteredCounts(const ICEntry
* icEntry
) {
2120 ICStub
* stub
= icEntry
->firstStub();
2122 stub
->resetEnteredCount();
2123 if (stub
->isFallback()) {
2126 stub
= stub
->toCacheIRStub()->next();
2130 static const uint32_t MaxFoldedShapes
= 16;
2132 const JSClass
ShapeListObject::class_
= {"JIT ShapeList", 0, &classOps_
};
2134 const JSClassOps
ShapeListObject::classOps_
= {
2135 nullptr, // addProperty
2136 nullptr, // delProperty
2137 nullptr, // enumerate
2138 nullptr, // newEnumerate
2140 nullptr, // mayResolve
2141 nullptr, // finalize
2143 nullptr, // construct
2144 ShapeListObject::trace
, // trace
2147 /* static */ ShapeListObject
* ShapeListObject::create(JSContext
* cx
) {
2148 NativeObject
* obj
= NewTenuredObjectWithGivenProto(cx
, &class_
, nullptr);
2153 // Register this object so the GC can sweep its weak pointers.
2154 if (!cx
->zone()->registerObjectWithWeakPointers(obj
)) {
2158 return &obj
->as
<ShapeListObject
>();
2161 Shape
* ShapeListObject::get(uint32_t index
) {
2162 Value value
= ListObject::get(index
);
2163 return static_cast<Shape
*>(value
.toPrivate());
2166 void ShapeListObject::trace(JSTracer
* trc
, JSObject
* obj
) {
2167 if (trc
->traceWeakEdges()) {
2168 obj
->as
<ShapeListObject
>().traceWeak(trc
);
2172 bool ShapeListObject::traceWeak(JSTracer
* trc
) {
2173 uint32_t length
= getDenseInitializedLength();
2175 return false; // Object may be uninitialized.
2178 const HeapSlot
* src
= elements_
;
2179 const HeapSlot
* end
= src
+ length
;
2180 HeapSlot
* dst
= elements_
;
2181 while (src
!= end
) {
2182 Shape
* shape
= static_cast<Shape
*>(src
->toPrivate());
2183 MOZ_ASSERT(shape
->is
<Shape
>());
2184 if (TraceManuallyBarrieredWeakEdge(trc
, &shape
, "ShapeListObject shape")) {
2185 dst
->unbarrieredSet(PrivateValue(shape
));
2191 MOZ_ASSERT(dst
<= end
);
2192 length
= dst
- elements_
;
2193 setDenseInitializedLength(length
);
2198 bool js::jit::TryFoldingStubs(JSContext
* cx
, ICFallbackStub
* fallback
,
2199 JSScript
* script
, ICScript
* icScript
) {
2200 ICEntry
* icEntry
= icScript
->icEntryForStub(fallback
);
2201 ICStub
* entryStub
= icEntry
->firstStub();
2203 // Don't fold unless there are at least two stubs.
2204 if (entryStub
== fallback
) {
2207 ICCacheIRStub
* firstStub
= entryStub
->toCacheIRStub();
2208 if (firstStub
->next()->isFallback()) {
2212 const uint8_t* firstStubData
= firstStub
->stubDataStart();
2213 const CacheIRStubInfo
* stubInfo
= firstStub
->stubInfo();
2216 // a) all of the stubs in this chain have the exact same code.
2217 // b) all of the stubs have the same stub field data, except
2218 // for a single GuardShape where they differ.
2219 // c) at least one stub after the first has a non-zero entry count.
2220 // d) All shapes in the GuardShape have the same realm.
2222 // If all of these conditions hold, then we generate a single stub
2223 // that covers all the existing cases by replacing GuardShape with
2224 // GuardMultipleShapes.
2226 uint32_t numActive
= 0;
2227 Maybe
<uint32_t> foldableFieldOffset
;
2228 RootedValue
shape(cx
);
2229 RootedValueVector
shapeList(cx
);
2231 // Try to add a shape to the list. Can fail on OOM or for cross-realm shapes.
2232 // Returns true if the shape was successfully added to the list, and false
2233 // (with no pending exception) otherwise.
2234 auto addShape
= [&shapeList
, cx
](uintptr_t rawShape
) -> bool {
2235 Shape
* shape
= reinterpret_cast<Shape
*>(rawShape
);
2236 // Only add same realm shapes.
2237 if (shape
->realm() != cx
->realm()) {
2241 gc::ReadBarrier(shape
);
2243 if (!shapeList
.append(PrivateValue(shape
))) {
2244 cx
->recoverFromOutOfMemory();
2250 for (ICCacheIRStub
* other
= firstStub
->nextCacheIR(); other
;
2251 other
= other
->nextCacheIR()) {
2252 // Verify that the stubs share the same code.
2253 if (other
->stubInfo() != stubInfo
) {
2256 const uint8_t* otherStubData
= other
->stubDataStart();
2258 if (other
->enteredCount() > 0) {
2262 uint32_t fieldIndex
= 0;
2264 while (stubInfo
->fieldType(fieldIndex
) != StubField::Type::Limit
) {
2265 StubField::Type fieldType
= stubInfo
->fieldType(fieldIndex
);
2267 if (StubField::sizeIsWord(fieldType
)) {
2268 uintptr_t firstRaw
= stubInfo
->getStubRawWord(firstStubData
, offset
);
2269 uintptr_t otherRaw
= stubInfo
->getStubRawWord(otherStubData
, offset
);
2271 if (firstRaw
!= otherRaw
) {
2272 if (fieldType
!= StubField::Type::WeakShape
) {
2273 // Case 1: a field differs that is not a Shape. We only support
2274 // folding GuardShape to GuardMultipleShapes.
2277 if (foldableFieldOffset
.isNothing()) {
2278 // Case 2: this is the first field where the stub data differs.
2279 foldableFieldOffset
.emplace(offset
);
2280 if (!addShape(firstRaw
) || !addShape(otherRaw
)) {
2283 } else if (*foldableFieldOffset
== offset
) {
2284 // Case 3: this is the corresponding offset in a different stub.
2285 if (!addShape(otherRaw
)) {
2289 // Case 4: we have found more than one field that differs.
2294 MOZ_ASSERT(StubField::sizeIsInt64(fieldType
));
2296 // We do not support folding any ops with int64-sized fields.
2297 if (stubInfo
->getStubRawInt64(firstStubData
, offset
) !=
2298 stubInfo
->getStubRawInt64(otherStubData
, offset
)) {
2303 offset
+= StubField::sizeInBytes(fieldType
);
2307 // We should never attach two completely identical stubs.
2308 MOZ_ASSERT(foldableFieldOffset
.isSome());
2311 if (numActive
== 0) {
2315 // Clone the CacheIR, replacing GuardShape with GuardMultipleShapes.
2316 CacheIRWriter
writer(cx
);
2317 CacheIRReader
reader(stubInfo
);
2318 CacheIRCloner
cloner(firstStub
);
2320 // Initialize the operands.
2321 CacheKind cacheKind
= stubInfo
->kind();
2322 for (uint32_t i
= 0; i
< NumInputsForCacheKind(cacheKind
); i
++) {
2323 writer
.setInputOperandId(i
);
2326 bool success
= false;
2327 while (reader
.more()) {
2328 CacheOp op
= reader
.readOp();
2330 case CacheOp::GuardShape
: {
2331 ObjOperandId objId
= reader
.objOperandId();
2332 uint32_t shapeOffset
= reader
.stubOffset();
2333 if (shapeOffset
== *foldableFieldOffset
) {
2334 // Ensure that the allocation of the ShapeListObject doesn't trigger a
2335 // GC and free the stubInfo we're currently reading. Note that
2336 // AutoKeepJitScripts isn't sufficient, because optimized stubs can be
2337 // discarded even if the JitScript is preserved.
2338 gc::AutoSuppressGC
suppressGC(cx
);
2340 Rooted
<ShapeListObject
*> shapeObj(cx
, ShapeListObject::create(cx
));
2344 for (uint32_t i
= 0; i
< shapeList
.length(); i
++) {
2345 if (!shapeObj
->append(cx
, shapeList
[i
])) {
2349 MOZ_ASSERT(static_cast<Shape
*>(shapeList
[i
].toPrivate())->realm() ==
2353 writer
.guardMultipleShapes(objId
, shapeObj
);
2356 WeakHeapPtr
<Shape
*>& ptr
=
2357 stubInfo
->getStubField
<StubField::Type::WeakShape
>(firstStub
,
2359 writer
.guardShape(objId
, ptr
.unbarrieredGet());
2364 cloner
.cloneOp(op
, reader
, writer
);
2369 // If the shape field that differed was not part of a GuardShape,
2370 // we can't fold these stubs together.
2374 // Replace the existing stubs with the new folded stub.
2375 fallback
->discardStubs(cx
->zone(), icEntry
);
2377 ICAttachResult result
= AttachBaselineCacheIRStub(
2378 cx
, writer
, cacheKind
, script
, icScript
, fallback
, "StubFold");
2379 if (result
== ICAttachResult::OOM
) {
2380 ReportOutOfMemory(cx
);
2383 MOZ_ASSERT(result
== ICAttachResult::Attached
);
2385 fallback
->setMayHaveFoldedStub();
2389 static bool AddToFoldedStub(JSContext
* cx
, const CacheIRWriter
& writer
,
2390 ICScript
* icScript
, ICFallbackStub
* fallback
) {
2391 ICEntry
* icEntry
= icScript
->icEntryForStub(fallback
);
2392 ICStub
* entryStub
= icEntry
->firstStub();
2394 // We only update folded stubs if they're the only stub in the IC.
2395 if (entryStub
== fallback
) {
2398 ICCacheIRStub
* stub
= entryStub
->toCacheIRStub();
2399 if (!stub
->next()->isFallback()) {
2403 const CacheIRStubInfo
* stubInfo
= stub
->stubInfo();
2404 const uint8_t* stubData
= stub
->stubDataStart();
2406 Maybe
<uint32_t> shapeFieldOffset
;
2407 RootedValue
newShape(cx
);
2408 Rooted
<ShapeListObject
*> foldedShapes(cx
);
2410 CacheIRReader
stubReader(stubInfo
);
2411 CacheIRReader
newReader(writer
);
2412 while (newReader
.more() && stubReader
.more()) {
2413 CacheOp newOp
= newReader
.readOp();
2414 CacheOp stubOp
= stubReader
.readOp();
2416 case CacheOp::GuardMultipleShapes
: {
2417 // Check that the new stub has a corresponding GuardShape.
2418 if (newOp
!= CacheOp::GuardShape
) {
2422 // Check that the object being guarded is the same.
2423 if (newReader
.objOperandId() != stubReader
.objOperandId()) {
2427 // Check that the field offset is the same.
2428 uint32_t newShapeOffset
= newReader
.stubOffset();
2429 uint32_t stubShapesOffset
= stubReader
.stubOffset();
2430 if (newShapeOffset
!= stubShapesOffset
) {
2433 MOZ_ASSERT(shapeFieldOffset
.isNothing());
2434 shapeFieldOffset
.emplace(newShapeOffset
);
2436 // Get the shape from the new stub
2437 StubField shapeField
=
2438 writer
.readStubField(newShapeOffset
, StubField::Type::WeakShape
);
2439 Shape
* shape
= reinterpret_cast<Shape
*>(shapeField
.asWord());
2440 newShape
= PrivateValue(shape
);
2442 // Get the shape array from the old stub.
2443 JSObject
* shapeList
= stubInfo
->getStubField
<StubField::Type::JSObject
>(
2444 stub
, stubShapesOffset
);
2445 foldedShapes
= &shapeList
->as
<ShapeListObject
>();
2446 MOZ_ASSERT(foldedShapes
->compartment() == shape
->compartment());
2448 // Don't add a shape if it's from a different realm than the first
2451 // Since the list was created in the realm which guarded all the shapes
2452 // added to it, we can use its realm to check and ensure we're not
2453 // adding a cross-realm shape.
2455 // The assert verifies this property by checking the first element has
2456 // the same realm (and since everything in the list has the same realm,
2457 // checking the first element suffices)
2458 MOZ_ASSERT_IF(!foldedShapes
->isEmpty(),
2459 foldedShapes
->get(0)->realm() == foldedShapes
->realm());
2460 if (foldedShapes
->realm() != shape
->realm()) {
2467 // Check that the op is the same.
2468 if (newOp
!= stubOp
) {
2472 // Check that the arguments are the same.
2473 uint32_t argLength
= CacheIROpInfos
[size_t(newOp
)].argLength
;
2474 for (uint32_t i
= 0; i
< argLength
; i
++) {
2475 if (newReader
.readByte() != stubReader
.readByte()) {
2483 if (shapeFieldOffset
.isNothing()) {
2484 // The stub did not contain the GuardMultipleShapes op. This can happen if a
2485 // folded stub has been discarded by GC sweeping.
2489 // Check to verify that all the other stub fields are the same.
2490 if (!writer
.stubDataEqualsIgnoring(stubData
, *shapeFieldOffset
)) {
2494 // Limit the maximum number of shapes we will add before giving up.
2495 if (foldedShapes
->length() == MaxFoldedShapes
) {
2499 if (!foldedShapes
->append(cx
, newShape
)) {
2500 cx
->recoverFromOutOfMemory();
2507 ICAttachResult
js::jit::AttachBaselineCacheIRStub(
2508 JSContext
* cx
, const CacheIRWriter
& writer
, CacheKind kind
,
2509 JSScript
* outerScript
, ICScript
* icScript
, ICFallbackStub
* stub
,
2511 // We shouldn't GC or report OOM (or any other exception) here.
2512 AutoAssertNoPendingException
aanpe(cx
);
2513 JS::AutoCheckCannotGC nogc
;
2515 if (writer
.tooLarge()) {
2516 return ICAttachResult::TooLarge
;
2519 return ICAttachResult::OOM
;
2521 MOZ_ASSERT(!writer
.failed());
2523 // Just a sanity check: the caller should ensure we don't attach an
2524 // unlimited number of stubs.
2526 static const size_t MaxOptimizedCacheIRStubs
= 16;
2527 MOZ_ASSERT(stub
->numOptimizedStubs() < MaxOptimizedCacheIRStubs
);
2530 constexpr uint32_t stubDataOffset
= sizeof(ICCacheIRStub
);
2531 static_assert(stubDataOffset
% sizeof(uint64_t) == 0,
2532 "Stub fields must be aligned");
2534 JitZone
* jitZone
= cx
->zone()->jitZone();
2536 // Check if we already have JitCode for this stub.
2537 CacheIRStubInfo
* stubInfo
;
2538 CacheIRStubKey::Lookup
lookup(kind
, ICStubEngine::Baseline
,
2539 writer
.codeStart(), writer
.codeLength());
2541 JitCode
* code
= jitZone
->getBaselineCacheIRStubCode(lookup
, &stubInfo
);
2543 if (!code
&& !IsPortableBaselineInterpreterEnabled()) {
2544 // We have to generate stub code.
2545 TempAllocator
temp(&cx
->tempLifoAlloc());
2546 JitContext
jctx(cx
);
2547 BaselineCacheIRCompiler
comp(cx
, temp
, writer
, stubDataOffset
);
2548 if (!comp
.init(kind
)) {
2549 return ICAttachResult::OOM
;
2552 code
= comp
.compile();
2554 return ICAttachResult::OOM
;
2557 comp
.perfSpewer().saveProfile(code
, name
);
2559 // Allocate the shared CacheIRStubInfo. Note that the
2560 // putBaselineCacheIRStubCode call below will transfer ownership
2561 // to the stub code HashMap, so we don't have to worry about freeing
2563 MOZ_ASSERT(!stubInfo
);
2565 CacheIRStubInfo::New(kind
, ICStubEngine::Baseline
, comp
.makesGCCalls(),
2566 stubDataOffset
, writer
);
2568 return ICAttachResult::OOM
;
2571 CacheIRStubKey
key(stubInfo
);
2572 if (!jitZone
->putBaselineCacheIRStubCode(lookup
, key
, code
)) {
2573 return ICAttachResult::OOM
;
2575 } else if (!stubInfo
) {
2576 MOZ_ASSERT(IsPortableBaselineInterpreterEnabled());
2578 // Portable baseline interpreter case. We want to generate the
2579 // CacheIR bytecode but not compile it to native code.
2581 // We lie that all stubs make GC calls; this is simpler than
2582 // iterating over ops to determine if it is actually the base, and
2583 // we don't invoke the BaselineCacheIRCompiler so we otherwise
2584 // don't know for sure.
2585 stubInfo
= CacheIRStubInfo::New(kind
, ICStubEngine::Baseline
,
2586 /* makes GC calls = */ true, stubDataOffset
,
2589 return ICAttachResult::OOM
;
2592 CacheIRStubKey
key(stubInfo
);
2593 if (!jitZone
->putBaselineCacheIRStubCode(lookup
, key
,
2594 /* stubCode = */ nullptr)) {
2595 return ICAttachResult::OOM
;
2598 MOZ_ASSERT_IF(IsBaselineInterpreterEnabled(), code
);
2599 MOZ_ASSERT(stubInfo
);
2600 MOZ_ASSERT(stubInfo
->stubDataSize() == writer
.stubDataSize());
2602 ICEntry
* icEntry
= icScript
->icEntryForStub(stub
);
2604 // Ensure we don't attach duplicate stubs. This can happen if a stub failed
2605 // for some reason and the IR generator doesn't check for exactly the same
2607 for (ICStub
* iter
= icEntry
->firstStub(); iter
!= stub
;
2608 iter
= iter
->toCacheIRStub()->next()) {
2609 auto otherStub
= iter
->toCacheIRStub();
2610 if (otherStub
->stubInfo() != stubInfo
) {
2613 if (!writer
.stubDataEquals(otherStub
->stubDataStart())) {
2617 // We found a stub that's exactly the same as the stub we're about to
2618 // attach. Just return nullptr, the caller should do nothing in this
2620 JitSpew(JitSpew_BaselineICFallback
,
2621 "Tried attaching identical stub for (%s:%u:%u)",
2622 outerScript
->filename(), outerScript
->lineno(),
2623 outerScript
->column().oneOriginValue());
2624 return ICAttachResult::DuplicateStub
;
2627 // Try including this case in an existing folded stub.
2628 if (stub
->mayHaveFoldedStub() &&
2629 AddToFoldedStub(cx
, writer
, icScript
, stub
)) {
2630 // Instead of adding a new stub, we have added a new case to an existing
2631 // folded stub. We do not have to invalidate Warp, because the
2632 // ShapeListObject that stores the cases is shared between baseline and
2633 // Warp. Reset the entered count for the fallback stub so that we can still
2634 // transpile, and reset the bailout counter if we have already been
2636 stub
->resetEnteredCount();
2637 JSScript
* owningScript
= nullptr;
2638 if (cx
->zone()->jitZone()->hasStubFoldingBailoutData(outerScript
)) {
2639 owningScript
= cx
->zone()->jitZone()->stubFoldingBailoutParent();
2641 owningScript
= icScript
->isInlined()
2642 ? icScript
->inliningRoot()->owningScript()
2645 cx
->zone()->jitZone()->clearStubFoldingBailoutData();
2646 if (stub
->usedByTranspiler() && owningScript
->hasIonScript()) {
2647 owningScript
->ionScript()->resetNumFixableBailouts();
2649 // Update the last IC counter if this is not a bailout from Ion.
2650 owningScript
->updateLastICStubCounter();
2652 return ICAttachResult::Attached
;
2655 // Time to allocate and attach a new stub.
2657 size_t bytesNeeded
= stubInfo
->stubDataOffset() + stubInfo
->stubDataSize();
2659 void* newStubMem
= jitZone
->stubSpace()->alloc(bytesNeeded
);
2661 return ICAttachResult::OOM
;
2664 // Resetting the entered counts on the IC chain makes subsequent reasoning
2665 // about the chain much easier.
2666 ResetEnteredCounts(icEntry
);
2668 switch (stub
->trialInliningState()) {
2669 case TrialInliningState::Initial
:
2670 case TrialInliningState::Candidate
:
2671 stub
->setTrialInliningState(writer
.trialInliningState());
2673 case TrialInliningState::MonomorphicInlined
:
2674 stub
->setTrialInliningState(TrialInliningState::Failure
);
2676 case TrialInliningState::Inlined
:
2677 stub
->setTrialInliningState(TrialInliningState::Failure
);
2678 icScript
->removeInlinedChild(stub
->pcOffset());
2680 case TrialInliningState::Failure
:
2684 auto newStub
= new (newStubMem
) ICCacheIRStub(code
, stubInfo
);
2685 writer
.copyStubData(newStub
->stubDataStart());
2686 newStub
->setTypeData(writer
.typeData());
2687 stub
->addNewStub(icEntry
, newStub
);
2689 JSScript
* owningScript
= icScript
->isInlined()
2690 ? icScript
->inliningRoot()->owningScript()
2692 owningScript
->updateLastICStubCounter();
2693 return ICAttachResult::Attached
;
2696 uint8_t* ICCacheIRStub::stubDataStart() {
2697 return reinterpret_cast<uint8_t*>(this) + stubInfo_
->stubDataOffset();
2700 bool BaselineCacheIRCompiler::emitCallStringObjectConcatResult(
2701 ValOperandId lhsId
, ValOperandId rhsId
) {
2702 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
2703 ValueOperand lhs
= allocator
.useValueRegister(masm
, lhsId
);
2704 ValueOperand rhs
= allocator
.useValueRegister(masm
, rhsId
);
2706 AutoScratchRegister
scratch(allocator
, masm
);
2708 allocator
.discardStack(masm
);
2710 AutoStubFrame
stubFrame(*this);
2711 stubFrame
.enter(masm
, scratch
);
2713 masm
.pushValue(rhs
);
2714 masm
.pushValue(lhs
);
2716 using Fn
= bool (*)(JSContext
*, HandleValue
, HandleValue
, MutableHandleValue
);
2717 callVM
<Fn
, DoConcatStringObject
>(masm
);
2719 stubFrame
.leave(masm
);
2723 // The value of argc entering the call IC is not always the value of
2724 // argc entering the callee. (For example, argc for a spread call IC
2725 // is always 1, but argc for the callee is the length of the array.)
2726 // In these cases, we update argc as part of the call op itself, to
2727 // avoid modifying input operands while it is still possible to fail a
2728 // guard. We also limit callee argc to a reasonable value to avoid
2729 // blowing the stack limit.
2730 bool BaselineCacheIRCompiler::updateArgc(CallFlags flags
, Register argcReg
,
2732 CallFlags::ArgFormat format
= flags
.getArgFormat();
2734 case CallFlags::Standard
:
2735 // Standard calls have no extra guards, and argc is already correct.
2737 case CallFlags::FunCall
:
2738 // fun_call has no extra guards, and argc will be corrected in
2739 // pushFunCallArguments.
2741 case CallFlags::FunApplyNullUndefined
:
2742 // argc must be 0 if null or undefined is passed as second argument to
2744 masm
.move32(Imm32(0), argcReg
);
2750 // We need to guard the length of the arguments.
2751 FailurePath
* failure
;
2752 if (!addFailurePath(&failure
)) {
2756 // Load callee argc into scratch.
2757 switch (flags
.getArgFormat()) {
2758 case CallFlags::Spread
:
2759 case CallFlags::FunApplyArray
: {
2760 // Load the length of the elements.
2761 BaselineFrameSlot
slot(flags
.isConstructing());
2762 masm
.unboxObject(allocator
.addressOf(masm
, slot
), scratch
);
2763 masm
.loadPtr(Address(scratch
, NativeObject::offsetOfElements()), scratch
);
2764 masm
.load32(Address(scratch
, ObjectElements::offsetOfLength()), scratch
);
2767 case CallFlags::FunApplyArgsObj
: {
2768 // Load the arguments object length.
2769 BaselineFrameSlot
slot(0);
2770 masm
.unboxObject(allocator
.addressOf(masm
, slot
), scratch
);
2771 masm
.loadArgumentsObjectLength(scratch
, scratch
, failure
->label());
2775 MOZ_CRASH("Unknown arg format");
2778 // Ensure that callee argc does not exceed the limit.
2779 masm
.branch32(Assembler::Above
, scratch
, Imm32(JIT_ARGS_LENGTH_MAX
),
2782 // We're past the final guard. Update argc with the new value.
2783 masm
.move32(scratch
, argcReg
);
2788 void BaselineCacheIRCompiler::pushArguments(Register argcReg
,
2790 Register scratch
, Register scratch2
,
2791 CallFlags flags
, uint32_t argcFixed
,
2793 switch (flags
.getArgFormat()) {
2794 case CallFlags::Standard
:
2795 pushStandardArguments(argcReg
, scratch
, scratch2
, argcFixed
, isJitCall
,
2796 flags
.isConstructing());
2798 case CallFlags::Spread
:
2799 pushArrayArguments(argcReg
, scratch
, scratch2
, isJitCall
,
2800 flags
.isConstructing());
2802 case CallFlags::FunCall
:
2803 pushFunCallArguments(argcReg
, calleeReg
, scratch
, scratch2
, argcFixed
,
2806 case CallFlags::FunApplyArgsObj
:
2807 pushFunApplyArgsObj(argcReg
, calleeReg
, scratch
, scratch2
, isJitCall
);
2809 case CallFlags::FunApplyArray
:
2810 pushArrayArguments(argcReg
, scratch
, scratch2
, isJitCall
,
2811 /*isConstructing =*/false);
2813 case CallFlags::FunApplyNullUndefined
:
2814 pushFunApplyNullUndefinedArguments(calleeReg
, isJitCall
);
2817 MOZ_CRASH("Invalid arg format");
2821 void BaselineCacheIRCompiler::pushStandardArguments(
2822 Register argcReg
, Register scratch
, Register scratch2
, uint32_t argcFixed
,
2823 bool isJitCall
, bool isConstructing
) {
2824 MOZ_ASSERT(enteredStubFrame_
);
2826 // The arguments to the call IC are pushed on the stack left-to-right.
2827 // Our calling conventions want them right-to-left in the callee, so
2828 // we duplicate them on the stack in reverse order.
2830 int additionalArgc
= 1 + !isJitCall
+ isConstructing
;
2831 if (argcFixed
< MaxUnrolledArgCopy
) {
2834 masm
.branch32(Assembler::Equal
, argcReg
, Imm32(argcFixed
), &ok
);
2835 masm
.assumeUnreachable("Invalid argcFixed value");
2839 size_t realArgc
= argcFixed
+ additionalArgc
;
2842 masm
.alignJitStackBasedOnNArgs(realArgc
, /*countIncludesThis = */ true);
2845 for (size_t i
= 0; i
< realArgc
; ++i
) {
2846 masm
.pushValue(Address(
2847 FramePointer
, BaselineStubFrameLayout::Size() + i
* sizeof(Value
)));
2850 MOZ_ASSERT(argcFixed
== MaxUnrolledArgCopy
);
2852 // argPtr initially points to the last argument. Skip the stub frame.
2853 Register argPtr
= scratch2
;
2854 Address
argAddress(FramePointer
, BaselineStubFrameLayout::Size());
2855 masm
.computeEffectiveAddress(argAddress
, argPtr
);
2857 // countReg contains the total number of arguments to copy.
2858 // In addition to the actual arguments, we have to copy hidden arguments.
2859 // We always have to copy |this|.
2860 // If we are constructing, we have to copy |newTarget|.
2861 // If we are not a jit call, we have to copy |callee|.
2862 // We use a scratch register to avoid clobbering argc, which is an input
2864 Register countReg
= scratch
;
2865 masm
.move32(argcReg
, countReg
);
2866 masm
.add32(Imm32(additionalArgc
), countReg
);
2868 // Align the stack such that the JitFrameLayout is aligned on the
2869 // JitStackAlignment.
2871 masm
.alignJitStackBasedOnNArgs(countReg
, /*countIncludesThis = */ true);
2874 // Push all values, starting at the last one.
2876 masm
.branchTest32(Assembler::Zero
, countReg
, countReg
, &done
);
2879 masm
.pushValue(Address(argPtr
, 0));
2880 masm
.addPtr(Imm32(sizeof(Value
)), argPtr
);
2882 masm
.branchSub32(Assembler::NonZero
, Imm32(1), countReg
, &loop
);
2888 void BaselineCacheIRCompiler::pushArrayArguments(Register argcReg
,
2892 bool isConstructing
) {
2893 MOZ_ASSERT(enteredStubFrame_
);
2895 // Pull the array off the stack before aligning.
2896 Register startReg
= scratch
;
2897 size_t arrayOffset
=
2898 (isConstructing
* sizeof(Value
)) + BaselineStubFrameLayout::Size();
2899 masm
.unboxObject(Address(FramePointer
, arrayOffset
), startReg
);
2900 masm
.loadPtr(Address(startReg
, NativeObject::offsetOfElements()), startReg
);
2902 // Align the stack such that the JitFrameLayout is aligned on the
2903 // JitStackAlignment.
2905 Register alignReg
= argcReg
;
2906 if (isConstructing
) {
2907 // If we are constructing, we must take newTarget into account.
2908 alignReg
= scratch2
;
2909 masm
.computeEffectiveAddress(Address(argcReg
, 1), alignReg
);
2911 masm
.alignJitStackBasedOnNArgs(alignReg
, /*countIncludesThis =*/false);
2914 // Push newTarget, if necessary
2915 if (isConstructing
) {
2916 masm
.pushValue(Address(FramePointer
, BaselineStubFrameLayout::Size()));
2919 // Push arguments: set up endReg to point to &array[argc]
2920 Register endReg
= scratch2
;
2921 BaseValueIndex
endAddr(startReg
, argcReg
);
2922 masm
.computeEffectiveAddress(endAddr
, endReg
);
2924 // Copying pre-decrements endReg by 8 until startReg is reached
2927 masm
.bind(©Start
);
2928 masm
.branchPtr(Assembler::Equal
, endReg
, startReg
, ©Done
);
2929 masm
.subPtr(Imm32(sizeof(Value
)), endReg
);
2930 masm
.pushValue(Address(endReg
, 0));
2931 masm
.jump(©Start
);
2932 masm
.bind(©Done
);
2935 size_t thisvOffset
=
2936 BaselineStubFrameLayout::Size() + (1 + isConstructing
) * sizeof(Value
);
2937 masm
.pushValue(Address(FramePointer
, thisvOffset
));
2939 // Push |callee| if needed.
2941 size_t calleeOffset
=
2942 BaselineStubFrameLayout::Size() + (2 + isConstructing
) * sizeof(Value
);
2943 masm
.pushValue(Address(FramePointer
, calleeOffset
));
2947 void BaselineCacheIRCompiler::pushFunApplyNullUndefinedArguments(
2948 Register calleeReg
, bool isJitCall
) {
2949 // argc is already set to 0, so we just have to push |this| and (for native
2950 // calls) the callee.
2952 MOZ_ASSERT(enteredStubFrame_
);
2954 // Align the stack such that the JitFrameLayout is aligned on the
2955 // JitStackAlignment.
2957 masm
.alignJitStackBasedOnNArgs(0, /*countIncludesThis =*/false);
2961 size_t thisvOffset
= BaselineStubFrameLayout::Size() + 1 * sizeof(Value
);
2962 masm
.pushValue(Address(FramePointer
, thisvOffset
));
2964 // Push |callee| if needed.
2966 masm
.Push(TypedOrValueRegister(MIRType::Object
, AnyRegister(calleeReg
)));
2970 void BaselineCacheIRCompiler::pushFunCallArguments(
2971 Register argcReg
, Register calleeReg
, Register scratch
, Register scratch2
,
2972 uint32_t argcFixed
, bool isJitCall
) {
2973 if (argcFixed
== 0) {
2975 // Align the stack to 0 args.
2976 masm
.alignJitStackBasedOnNArgs(0, /*countIncludesThis = */ false);
2979 // Store the new |this|.
2980 masm
.pushValue(UndefinedValue());
2982 // Store |callee| if needed.
2984 masm
.Push(TypedOrValueRegister(MIRType::Object
, AnyRegister(calleeReg
)));
2986 } else if (argcFixed
< MaxUnrolledArgCopy
) {
2987 // See below for why we subtract 1 from argcFixed.
2989 masm
.sub32(Imm32(1), argcReg
);
2990 pushStandardArguments(argcReg
, scratch
, scratch2
, argcFixed
, isJitCall
,
2991 /*isConstructing =*/false);
2993 Label zeroArgs
, done
;
2994 masm
.branchTest32(Assembler::Zero
, argcReg
, argcReg
, &zeroArgs
);
2996 // When we call fun_call, the stack looks like the left column (note
2997 // that newTarget will not be present, because fun_call cannot be a
2998 // constructor call):
3000 // ***Arguments to fun_call***
3001 // callee (fun_call) ***Arguments to target***
3002 // this (target function) -----> callee
3003 // arg0 (this of target) -----> this
3004 // arg1 (arg0 of target) -----> arg0
3005 // argN (argN-1 of target) -----> arg1
3007 // As demonstrated in the right column, this is exactly what we need
3008 // the stack to look like when calling pushStandardArguments for target,
3009 // except with one more argument. If we subtract 1 from argc,
3010 // everything works out correctly.
3011 masm
.sub32(Imm32(1), argcReg
);
3013 pushStandardArguments(argcReg
, scratch
, scratch2
, argcFixed
, isJitCall
,
3014 /*isConstructing =*/false);
3017 masm
.bind(&zeroArgs
);
3019 // The exception is the case where argc == 0:
3021 // ***Arguments to fun_call***
3022 // callee (fun_call) ***Arguments to target***
3023 // this (target function) -----> callee
3024 // <nothing> -----> this
3026 // In this case, we push |undefined| for |this|.
3029 // Align the stack to 0 args.
3030 masm
.alignJitStackBasedOnNArgs(0, /*countIncludesThis = */ false);
3033 // Store the new |this|.
3034 masm
.pushValue(UndefinedValue());
3036 // Store |callee| if needed.
3038 masm
.Push(TypedOrValueRegister(MIRType::Object
, AnyRegister(calleeReg
)));
3045 void BaselineCacheIRCompiler::pushFunApplyArgsObj(Register argcReg
,
3050 MOZ_ASSERT(enteredStubFrame_
);
3052 // Load the arguments object off the stack before aligning.
3053 Register argsReg
= scratch
;
3054 masm
.unboxObject(Address(FramePointer
, BaselineStubFrameLayout::Size()),
3057 // Align the stack such that the JitFrameLayout is aligned on the
3058 // JitStackAlignment.
3060 masm
.alignJitStackBasedOnNArgs(argcReg
, /*countIncludesThis =*/false);
3063 // Load ArgumentsData.
3064 masm
.loadPrivate(Address(argsReg
, ArgumentsObject::getDataSlotOffset()),
3067 // We push the arguments onto the stack last-to-first.
3068 // Compute the bounds of the arguments array.
3069 Register currReg
= scratch2
;
3070 Address
argsStartAddr(argsReg
, ArgumentsData::offsetOfArgs());
3071 masm
.computeEffectiveAddress(argsStartAddr
, argsReg
);
3072 BaseValueIndex
argsEndAddr(argsReg
, argcReg
);
3073 masm
.computeEffectiveAddress(argsEndAddr
, currReg
);
3075 // Loop until all arguments have been pushed.
3078 masm
.branchPtr(Assembler::Equal
, currReg
, argsReg
, &done
);
3079 masm
.subPtr(Imm32(sizeof(Value
)), currReg
);
3081 Address
currArgAddr(currReg
, 0);
3083 // Arguments are forwarded to the call object if they are closed over.
3084 // In this case, OVERRIDDEN_ELEMENTS_BIT should be set.
3086 masm
.branchTestMagic(Assembler::NotEqual
, currArgAddr
, ¬Forwarded
);
3087 masm
.assumeUnreachable("Should have checked for overridden elements");
3088 masm
.bind(¬Forwarded
);
3090 masm
.pushValue(currArgAddr
);
3095 // Push arg0 as |this| for call
3097 Address(FramePointer
, BaselineStubFrameLayout::Size() + sizeof(Value
)));
3099 // Push |callee| if needed.
3101 masm
.Push(TypedOrValueRegister(MIRType::Object
, AnyRegister(calleeReg
)));
3105 void BaselineCacheIRCompiler::pushBoundFunctionArguments(
3106 Register argcReg
, Register calleeReg
, Register scratch
, Register scratch2
,
3107 CallFlags flags
, uint32_t numBoundArgs
, bool isJitCall
) {
3108 bool isConstructing
= flags
.isConstructing();
3109 uint32_t additionalArgc
= 1 + isConstructing
; // |this| and newTarget
3111 // Calculate total number of Values to push.
3112 Register countReg
= scratch
;
3113 masm
.computeEffectiveAddress(Address(argcReg
, numBoundArgs
+ additionalArgc
),
3116 // Align the stack such that the JitFrameLayout is aligned on the
3117 // JitStackAlignment.
3119 masm
.alignJitStackBasedOnNArgs(countReg
, /*countIncludesThis = */ true);
3122 if (isConstructing
) {
3123 // Push the bound function's target as newTarget.
3124 Address
boundTarget(calleeReg
, BoundFunctionObject::offsetOfTargetSlot());
3125 masm
.pushValue(boundTarget
);
3128 // Ensure argPtr initially points to the last argument. Skip the stub frame.
3129 Register argPtr
= scratch2
;
3130 Address
argAddress(FramePointer
, BaselineStubFrameLayout::Size());
3131 if (isConstructing
) {
3133 argAddress
.offset
+= sizeof(Value
);
3135 masm
.computeEffectiveAddress(argAddress
, argPtr
);
3137 // Push all supplied arguments, starting at the last one.
3139 masm
.branchTest32(Assembler::Zero
, argcReg
, argcReg
, &done
);
3140 masm
.move32(argcReg
, countReg
);
3143 masm
.pushValue(Address(argPtr
, 0));
3144 masm
.addPtr(Imm32(sizeof(Value
)), argPtr
);
3146 masm
.branchSub32(Assembler::NonZero
, Imm32(1), countReg
, &loop
);
3150 // Push the bound arguments, starting at the last one.
3151 constexpr size_t inlineArgsOffset
=
3152 BoundFunctionObject::offsetOfFirstInlineBoundArg();
3153 if (numBoundArgs
<= BoundFunctionObject::MaxInlineBoundArgs
) {
3154 for (size_t i
= 0; i
< numBoundArgs
; i
++) {
3155 size_t argIndex
= numBoundArgs
- i
- 1;
3156 Address
argAddr(calleeReg
, inlineArgsOffset
+ argIndex
* sizeof(Value
));
3157 masm
.pushValue(argAddr
);
3160 masm
.unboxObject(Address(calleeReg
, inlineArgsOffset
), scratch
);
3161 masm
.loadPtr(Address(scratch
, NativeObject::offsetOfElements()), scratch
);
3162 for (size_t i
= 0; i
< numBoundArgs
; i
++) {
3163 size_t argIndex
= numBoundArgs
- i
- 1;
3164 Address
argAddr(scratch
, argIndex
* sizeof(Value
));
3165 masm
.pushValue(argAddr
);
3169 if (isConstructing
) {
3170 // Push the |this| Value. This is either the object we allocated or the
3171 // JS_UNINITIALIZED_LEXICAL magic value. It's stored in the BaselineFrame,
3172 // so skip past the stub frame, (unbound) arguments and newTarget.
3173 BaseValueIndex
thisAddress(FramePointer
, argcReg
,
3174 BaselineStubFrameLayout::Size() + sizeof(Value
));
3175 masm
.pushValue(thisAddress
, scratch
);
3177 // Push the bound |this|.
3178 Address
boundThis(calleeReg
, BoundFunctionObject::offsetOfBoundThisSlot());
3179 masm
.pushValue(boundThis
);
3183 bool BaselineCacheIRCompiler::emitCallNativeShared(
3184 NativeCallType callType
, ObjOperandId calleeId
, Int32OperandId argcId
,
3185 CallFlags flags
, uint32_t argcFixed
, Maybe
<bool> ignoresReturnValue
,
3186 Maybe
<uint32_t> targetOffset
) {
3187 AutoOutputRegister
output(*this);
3188 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3189 AutoScratchRegister
scratch2(allocator
, masm
);
3191 Register calleeReg
= allocator
.useRegister(masm
, calleeId
);
3192 Register argcReg
= allocator
.useRegister(masm
, argcId
);
3194 bool isConstructing
= flags
.isConstructing();
3195 bool isSameRealm
= flags
.isSameRealm();
3197 if (!updateArgc(flags
, argcReg
, scratch
)) {
3201 allocator
.discardStack(masm
);
3203 // Push a stub frame so that we can perform a non-tail call.
3204 // Note that this leaves the return address in TailCallReg.
3205 AutoStubFrame
stubFrame(*this);
3206 stubFrame
.enter(masm
, scratch
);
3209 masm
.switchToObjectRealm(calleeReg
, scratch
);
3212 pushArguments(argcReg
, calleeReg
, scratch
, scratch2
, flags
, argcFixed
,
3213 /*isJitCall =*/false);
3215 // Native functions have the signature:
3217 // bool (*)(JSContext*, unsigned, Value* vp)
3219 // Where vp[0] is space for callee/return value, vp[1] is |this|, and vp[2]
3220 // onward are the function arguments.
3223 masm
.moveStackPtrTo(scratch2
.get());
3225 // Construct a native exit frame.
3228 masm
.pushFrameDescriptor(FrameType::BaselineStub
);
3229 masm
.push(ICTailCallReg
);
3230 masm
.push(FramePointer
);
3231 masm
.loadJSContext(scratch
);
3232 masm
.enterFakeExitFrameForNative(scratch
, scratch
, isConstructing
);
3235 masm
.setupUnalignedABICall(scratch
);
3236 masm
.loadJSContext(scratch
);
3237 masm
.passABIArg(scratch
);
3238 masm
.passABIArg(argcReg
);
3239 masm
.passABIArg(scratch2
);
3242 case NativeCallType::Native
: {
3244 // The simulator requires VM calls to be redirected to a special
3245 // swi instruction to handle them, so we store the redirected
3246 // pointer in the stub and use that instead of the original one.
3247 // (See CacheIRWriter::callNativeFunction.)
3248 Address
redirectedAddr(stubAddress(*targetOffset
));
3249 masm
.callWithABI(redirectedAddr
);
3251 if (*ignoresReturnValue
) {
3253 Address(calleeReg
, JSFunction::offsetOfJitInfoOrScript()),
3256 Address(calleeReg
, JSJitInfo::offsetOfIgnoresReturnValueNative()));
3258 // This depends on the native function pointer being stored unchanged as
3260 masm
.callWithABI(Address(calleeReg
, JSFunction::offsetOfNativeOrEnv()));
3264 case NativeCallType::ClassHook
: {
3265 Address
nativeAddr(stubAddress(*targetOffset
));
3266 masm
.callWithABI(nativeAddr
);
3270 // Test for failure.
3271 masm
.branchIfFalseBool(ReturnReg
, masm
.exceptionLabel());
3273 // Load the return value.
3275 Address(masm
.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
3278 stubFrame
.leave(masm
);
3281 masm
.switchToBaselineFrameRealm(scratch2
);
3288 bool BaselineCacheIRCompiler::emitCallNativeFunction(ObjOperandId calleeId
,
3289 Int32OperandId argcId
,
3292 uint32_t targetOffset
) {
3293 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3294 Maybe
<bool> ignoresReturnValue
;
3295 Maybe
<uint32_t> targetOffset_
= mozilla::Some(targetOffset
);
3296 return emitCallNativeShared(NativeCallType::Native
, calleeId
, argcId
, flags
,
3297 argcFixed
, ignoresReturnValue
, targetOffset_
);
3300 bool BaselineCacheIRCompiler::emitCallDOMFunction(
3301 ObjOperandId calleeId
, Int32OperandId argcId
, ObjOperandId thisObjId
,
3302 CallFlags flags
, uint32_t argcFixed
, uint32_t targetOffset
) {
3303 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3304 Maybe
<bool> ignoresReturnValue
;
3305 Maybe
<uint32_t> targetOffset_
= mozilla::Some(targetOffset
);
3306 return emitCallNativeShared(NativeCallType::Native
, calleeId
, argcId
, flags
,
3307 argcFixed
, ignoresReturnValue
, targetOffset_
);
3310 bool BaselineCacheIRCompiler::emitCallNativeFunction(ObjOperandId calleeId
,
3311 Int32OperandId argcId
,
3314 bool ignoresReturnValue
) {
3315 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3316 Maybe
<bool> ignoresReturnValue_
= mozilla::Some(ignoresReturnValue
);
3317 Maybe
<uint32_t> targetOffset
;
3318 return emitCallNativeShared(NativeCallType::Native
, calleeId
, argcId
, flags
,
3319 argcFixed
, ignoresReturnValue_
, targetOffset
);
3322 bool BaselineCacheIRCompiler::emitCallDOMFunction(ObjOperandId calleeId
,
3323 Int32OperandId argcId
,
3324 ObjOperandId thisObjId
,
3326 uint32_t argcFixed
) {
3327 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3328 Maybe
<bool> ignoresReturnValue
= mozilla::Some(false);
3329 Maybe
<uint32_t> targetOffset
;
3330 return emitCallNativeShared(NativeCallType::Native
, calleeId
, argcId
, flags
,
3331 argcFixed
, ignoresReturnValue
, targetOffset
);
3335 bool BaselineCacheIRCompiler::emitCallClassHook(ObjOperandId calleeId
,
3336 Int32OperandId argcId
,
3339 uint32_t targetOffset
) {
3340 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3341 Maybe
<bool> ignoresReturnValue
;
3342 Maybe
<uint32_t> targetOffset_
= mozilla::Some(targetOffset
);
3343 return emitCallNativeShared(NativeCallType::ClassHook
, calleeId
, argcId
,
3344 flags
, argcFixed
, ignoresReturnValue
,
3348 // Helper function for loading call arguments from the stack. Loads
3349 // and unboxes an object from a specific slot.
3350 void BaselineCacheIRCompiler::loadStackObject(ArgumentKind kind
,
3351 CallFlags flags
, Register argcReg
,
3353 MOZ_ASSERT(enteredStubFrame_
);
3355 bool addArgc
= false;
3356 int32_t slotIndex
= GetIndexOfArgument(kind
, flags
, &addArgc
);
3359 int32_t slotOffset
=
3360 slotIndex
* sizeof(JS::Value
) + BaselineStubFrameLayout::Size();
3361 BaseValueIndex
slotAddr(FramePointer
, argcReg
, slotOffset
);
3362 masm
.unboxObject(slotAddr
, dest
);
3364 int32_t slotOffset
=
3365 slotIndex
* sizeof(JS::Value
) + BaselineStubFrameLayout::Size();
3366 Address
slotAddr(FramePointer
, slotOffset
);
3367 masm
.unboxObject(slotAddr
, dest
);
3371 template <typename T
>
3372 void BaselineCacheIRCompiler::storeThis(const T
& newThis
, Register argcReg
,
3374 switch (flags
.getArgFormat()) {
3375 case CallFlags::Standard
: {
3376 BaseValueIndex
thisAddress(
3378 argcReg
, // Arguments
3379 1 * sizeof(Value
) + // NewTarget
3380 BaselineStubFrameLayout::Size()); // Stub frame
3381 masm
.storeValue(newThis
, thisAddress
);
3383 case CallFlags::Spread
: {
3384 Address
thisAddress(FramePointer
,
3385 2 * sizeof(Value
) + // Arg array, NewTarget
3386 BaselineStubFrameLayout::Size()); // Stub frame
3387 masm
.storeValue(newThis
, thisAddress
);
3390 MOZ_CRASH("Invalid arg format for scripted constructor");
3395 * Scripted constructors require a |this| object to be created prior to the
3396 * call. When this function is called, the stack looks like (bottom->top):
3398 * [..., Callee, ThisV, Arg0V, ..., ArgNV, NewTarget, StubFrameHeader]
3400 * At this point, |ThisV| is JSWhyMagic::JS_IS_CONSTRUCTING.
3402 * This function calls CreateThis to generate a new |this| object, then
3403 * overwrites the magic ThisV on the stack.
3405 void BaselineCacheIRCompiler::createThis(Register argcReg
, Register calleeReg
,
3406 Register scratch
, CallFlags flags
,
3407 bool isBoundFunction
) {
3408 MOZ_ASSERT(flags
.isConstructing());
3410 if (flags
.needsUninitializedThis()) {
3411 storeThis(MagicValue(JS_UNINITIALIZED_LEXICAL
), argcReg
, flags
);
3415 // Save live registers that don't have to be traced.
3416 LiveGeneralRegisterSet liveNonGCRegs
;
3417 liveNonGCRegs
.add(argcReg
);
3418 masm
.PushRegsInMask(liveNonGCRegs
);
3420 // CreateThis takes two arguments: callee, and newTarget.
3422 if (isBoundFunction
) {
3423 // Push the bound function's target as callee and newTarget.
3424 Address
boundTarget(calleeReg
, BoundFunctionObject::offsetOfTargetSlot());
3425 masm
.unboxObject(boundTarget
, scratch
);
3430 loadStackObject(ArgumentKind::NewTarget
, flags
, argcReg
, scratch
);
3434 loadStackObject(ArgumentKind::Callee
, flags
, argcReg
, scratch
);
3438 // Call CreateThisFromIC.
3440 bool (*)(JSContext
*, HandleObject
, HandleObject
, MutableHandleValue
);
3441 callVM
<Fn
, CreateThisFromIC
>(masm
);
3444 Label createdThisOK
;
3445 masm
.branchTestObject(Assembler::Equal
, JSReturnOperand
, &createdThisOK
);
3446 masm
.branchTestMagic(Assembler::Equal
, JSReturnOperand
, &createdThisOK
);
3447 masm
.assumeUnreachable(
3448 "The return of CreateThis must be an object or uninitialized.");
3449 masm
.bind(&createdThisOK
);
3452 // Restore saved registers.
3453 masm
.PopRegsInMask(liveNonGCRegs
);
3455 // Restore ICStubReg. The stub might have been moved if CreateThisFromIC
3456 // discarded JIT code.
3457 Address
stubAddr(FramePointer
, BaselineStubFrameLayout::ICStubOffsetFromFP
);
3458 masm
.loadPtr(stubAddr
, ICStubReg
);
3460 // Save |this| value back into pushed arguments on stack.
3461 MOZ_ASSERT(!liveNonGCRegs
.aliases(JSReturnOperand
));
3462 storeThis(JSReturnOperand
, argcReg
, flags
);
3464 // Restore calleeReg. CreateThisFromIC may trigger a GC, so we reload the
3465 // callee from the stub frame (which is traced) instead of spilling it to
3467 loadStackObject(ArgumentKind::Callee
, flags
, argcReg
, calleeReg
);
3470 void BaselineCacheIRCompiler::updateReturnValue() {
3471 Label skipThisReplace
;
3472 masm
.branchTestObject(Assembler::Equal
, JSReturnOperand
, &skipThisReplace
);
3474 // If a constructor does not explicitly return an object, the return value
3475 // of the constructor is |this|. We load it out of the baseline stub frame.
3477 // At this point, the stack looks like this:
3482 // ThisVal <---- We want this value.
3483 // Callee token | Skip two stack slots.
3484 // Frame descriptor v
3486 size_t thisvOffset
=
3487 JitFrameLayout::offsetOfThis() - JitFrameLayout::bytesPoppedAfterCall();
3488 Address
thisAddress(masm
.getStackPointer(), thisvOffset
);
3489 masm
.loadValue(thisAddress
, JSReturnOperand
);
3492 masm
.branchTestObject(Assembler::Equal
, JSReturnOperand
, &skipThisReplace
);
3493 masm
.assumeUnreachable("Return of constructing call should be an object.");
3495 masm
.bind(&skipThisReplace
);
3498 bool BaselineCacheIRCompiler::emitCallScriptedFunction(ObjOperandId calleeId
,
3499 Int32OperandId argcId
,
3501 uint32_t argcFixed
) {
3502 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3503 AutoOutputRegister
output(*this);
3504 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3505 AutoScratchRegister
scratch2(allocator
, masm
);
3507 Register calleeReg
= allocator
.useRegister(masm
, calleeId
);
3508 Register argcReg
= allocator
.useRegister(masm
, argcId
);
3510 bool isConstructing
= flags
.isConstructing();
3511 bool isSameRealm
= flags
.isSameRealm();
3513 if (!updateArgc(flags
, argcReg
, scratch
)) {
3517 allocator
.discardStack(masm
);
3519 // Push a stub frame so that we can perform a non-tail call.
3520 AutoStubFrame
stubFrame(*this);
3521 stubFrame
.enter(masm
, scratch
);
3524 masm
.switchToObjectRealm(calleeReg
, scratch
);
3527 if (isConstructing
) {
3528 createThis(argcReg
, calleeReg
, scratch
, flags
,
3529 /* isBoundFunction = */ false);
3532 pushArguments(argcReg
, calleeReg
, scratch
, scratch2
, flags
, argcFixed
,
3533 /*isJitCall =*/true);
3535 // Load the start of the target JitCode.
3536 Register code
= scratch2
;
3537 masm
.loadJitCodeRaw(calleeReg
, code
);
3539 // Note that we use Push, not push, so that callJit will align the stack
3541 masm
.PushCalleeToken(calleeReg
, isConstructing
);
3542 masm
.PushFrameDescriptorForJitCall(FrameType::BaselineStub
, argcReg
, scratch
);
3544 // Handle arguments underflow.
3546 masm
.loadFunctionArgCount(calleeReg
, calleeReg
);
3547 masm
.branch32(Assembler::AboveOrEqual
, argcReg
, calleeReg
, &noUnderflow
);
3549 // Call the arguments rectifier.
3550 TrampolinePtr argumentsRectifier
=
3551 cx_
->runtime()->jitRuntime()->getArgumentsRectifier();
3552 masm
.movePtr(argumentsRectifier
, code
);
3555 masm
.bind(&noUnderflow
);
3558 // If this is a constructing call, and the callee returns a non-object,
3559 // replace it with the |this| object passed in.
3560 if (isConstructing
) {
3561 updateReturnValue();
3564 stubFrame
.leave(masm
);
3567 masm
.switchToBaselineFrameRealm(scratch2
);
3573 bool BaselineCacheIRCompiler::emitCallWasmFunction(
3574 ObjOperandId calleeId
, Int32OperandId argcId
, CallFlags flags
,
3575 uint32_t argcFixed
, uint32_t funcExportOffset
, uint32_t instanceOffset
) {
3576 return emitCallScriptedFunction(calleeId
, argcId
, flags
, argcFixed
);
3579 bool BaselineCacheIRCompiler::emitCallInlinedFunction(ObjOperandId calleeId
,
3580 Int32OperandId argcId
,
3581 uint32_t icScriptOffset
,
3583 uint32_t argcFixed
) {
3584 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3585 AutoOutputRegister
output(*this);
3586 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3587 AutoScratchRegisterMaybeOutputType
scratch2(allocator
, masm
, output
);
3588 AutoScratchRegister
codeReg(allocator
, masm
);
3590 Register calleeReg
= allocator
.useRegister(masm
, calleeId
);
3591 Register argcReg
= allocator
.useRegister(masm
, argcId
);
3593 bool isConstructing
= flags
.isConstructing();
3594 bool isSameRealm
= flags
.isSameRealm();
3596 FailurePath
* failure
;
3597 if (!addFailurePath(&failure
)) {
3601 masm
.loadBaselineJitCodeRaw(calleeReg
, codeReg
, failure
->label());
3603 if (!updateArgc(flags
, argcReg
, scratch
)) {
3607 allocator
.discardStack(masm
);
3609 // Push a stub frame so that we can perform a non-tail call.
3610 AutoStubFrame
stubFrame(*this);
3611 stubFrame
.enter(masm
, scratch
);
3614 masm
.switchToObjectRealm(calleeReg
, scratch
);
3617 Label baselineScriptDiscarded
;
3618 if (isConstructing
) {
3619 createThis(argcReg
, calleeReg
, scratch
, flags
,
3620 /* isBoundFunction = */ false);
3622 // CreateThisFromIC may trigger a GC and discard the BaselineScript.
3623 // We have already called discardStack, so we can't use a FailurePath.
3624 // Instead, we skip storing the ICScript in the JSContext and use a
3625 // normal non-inlined call.
3626 masm
.loadBaselineJitCodeRaw(calleeReg
, codeReg
, &baselineScriptDiscarded
);
3629 // Store icScript in the context.
3630 Address
icScriptAddr(stubAddress(icScriptOffset
));
3631 masm
.loadPtr(icScriptAddr
, scratch
);
3632 masm
.storeICScriptInJSContext(scratch
);
3634 if (isConstructing
) {
3637 masm
.bind(&baselineScriptDiscarded
);
3638 masm
.loadJitCodeRaw(calleeReg
, codeReg
);
3642 pushArguments(argcReg
, calleeReg
, scratch
, scratch2
, flags
, argcFixed
,
3643 /*isJitCall =*/true);
3645 // Note that we use Push, not push, so that callJit will align the stack
3647 masm
.PushCalleeToken(calleeReg
, isConstructing
);
3648 masm
.PushFrameDescriptorForJitCall(FrameType::BaselineStub
, argcReg
, scratch
);
3650 // Handle arguments underflow.
3652 masm
.loadFunctionArgCount(calleeReg
, calleeReg
);
3653 masm
.branch32(Assembler::AboveOrEqual
, argcReg
, calleeReg
, &noUnderflow
);
3655 // Call the trial-inlining arguments rectifier.
3656 ArgumentsRectifierKind kind
= ArgumentsRectifierKind::TrialInlining
;
3657 TrampolinePtr argumentsRectifier
=
3658 cx_
->runtime()->jitRuntime()->getArgumentsRectifier(kind
);
3659 masm
.movePtr(argumentsRectifier
, codeReg
);
3661 masm
.bind(&noUnderflow
);
3662 masm
.callJit(codeReg
);
3664 // If this is a constructing call, and the callee returns a non-object,
3665 // replace it with the |this| object passed in.
3666 if (isConstructing
) {
3667 updateReturnValue();
3670 stubFrame
.leave(masm
);
3673 masm
.switchToBaselineFrameRealm(codeReg
);
3680 template <typename IdType
>
3681 bool BaselineCacheIRCompiler::emitCallScriptedProxyGetShared(
3682 ValOperandId targetId
, ObjOperandId receiverId
, ObjOperandId handlerId
,
3683 uint32_t trapOffset
, IdType id
, uint32_t nargsAndFlags
) {
3684 Address
trapAddr(stubAddress(trapOffset
));
3685 Register handler
= allocator
.useRegister(masm
, handlerId
);
3686 ValueOperand target
= allocator
.useValueRegister(masm
, targetId
);
3687 Register receiver
= allocator
.useRegister(masm
, receiverId
);
3689 if constexpr (std::is_same_v
<IdType
, ValOperandId
>) {
3690 idVal
= allocator
.useValueRegister(masm
, id
);
3693 AutoScratchRegister
code(allocator
, masm
);
3694 AutoScratchRegister
callee(allocator
, masm
);
3695 AutoScratchRegister
scratch(allocator
, masm
);
3696 ValueOperand
scratchVal(scratch
);
3698 masm
.loadPtr(trapAddr
, callee
);
3700 allocator
.discardStack(masm
);
3702 AutoStubFrame
stubFrame(*this);
3703 stubFrame
.enter(masm
, scratch
);
3705 // We need to keep the target around to potentially validate the proxy result
3706 stubFrame
.storeTracedValue(masm
, target
);
3707 if constexpr (std::is_same_v
<IdType
, ValOperandId
>) {
3708 stubFrame
.storeTracedValue(masm
, idVal
);
3710 // We need to either trace the id here or grab the ICStubReg back from
3711 // FramePointer + sizeof(void*) after the call in order to load it again.
3712 // We elect to do this because it unifies the code path after the call.
3713 Address
idAddr(stubAddress(id
));
3714 masm
.loadPtr(idAddr
, scratch
);
3715 masm
.tagValue(JSVAL_TYPE_STRING
, scratch
, scratchVal
);
3716 stubFrame
.storeTracedValue(masm
, scratchVal
);
3719 uint16_t nargs
= nargsAndFlags
>> JSFunction::ArgCountShift
;
3720 masm
.alignJitStackBasedOnNArgs(std::max(uint16_t(3), nargs
),
3721 /*countIncludesThis = */ false);
3722 for (size_t i
= 3; i
< nargs
; i
++) {
3723 masm
.Push(UndefinedValue());
3726 masm
.tagValue(JSVAL_TYPE_OBJECT
, receiver
, scratchVal
);
3727 masm
.Push(scratchVal
);
3729 if constexpr (std::is_same_v
<IdType
, ValOperandId
>) {
3732 stubFrame
.loadTracedValue(masm
, 1, scratchVal
);
3733 masm
.Push(scratchVal
);
3738 masm
.tagValue(JSVAL_TYPE_OBJECT
, handler
, scratchVal
);
3739 masm
.Push(scratchVal
);
3741 masm
.loadJitCodeRaw(callee
, code
);
3744 masm
.PushFrameDescriptorForJitCall(FrameType::BaselineStub
, 3);
3748 Register scratch2
= code
;
3751 stubFrame
.loadTracedValue(masm
, 0, scratchVal
);
3752 masm
.unboxObject(scratchVal
, scratch
);
3753 masm
.branchTestObjectNeedsProxyResultValidation(Assembler::Zero
, scratch
,
3754 scratch2
, &success
);
3755 ValueOperand
scratchVal2(scratch2
);
3756 stubFrame
.loadTracedValue(masm
, 1, scratchVal2
);
3757 masm
.Push(JSReturnOperand
);
3758 masm
.Push(scratchVal2
);
3760 using Fn
= bool (*)(JSContext
*, HandleObject
, HandleValue
, HandleValue
,
3761 MutableHandleValue
);
3762 callVM
<Fn
, CheckProxyGetByValueResult
>(masm
);
3764 masm
.bind(&success
);
3766 stubFrame
.leave(masm
);
3771 bool BaselineCacheIRCompiler::emitCallScriptedProxyGetResult(
3772 ValOperandId targetId
, ObjOperandId receiverId
, ObjOperandId handlerId
,
3773 uint32_t trapOffset
, uint32_t idOffset
, uint32_t nargsAndFlags
) {
3774 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3776 return emitCallScriptedProxyGetShared(targetId
, receiverId
, handlerId
,
3777 trapOffset
, idOffset
, nargsAndFlags
);
3780 bool BaselineCacheIRCompiler::emitCallScriptedProxyGetByValueResult(
3781 ValOperandId targetId
, ObjOperandId receiverId
, ObjOperandId handlerId
,
3782 ValOperandId idId
, uint32_t trapOffset
, uint32_t nargsAndFlags
) {
3783 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3785 return emitCallScriptedProxyGetShared(targetId
, receiverId
, handlerId
,
3786 trapOffset
, idId
, nargsAndFlags
);
3790 bool BaselineCacheIRCompiler::emitCallBoundScriptedFunction(
3791 ObjOperandId calleeId
, ObjOperandId targetId
, Int32OperandId argcId
,
3792 CallFlags flags
, uint32_t numBoundArgs
) {
3793 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3795 AutoOutputRegister
output(*this);
3796 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
3797 AutoScratchRegister
scratch2(allocator
, masm
);
3799 Register calleeReg
= allocator
.useRegister(masm
, calleeId
);
3800 Register argcReg
= allocator
.useRegister(masm
, argcId
);
3802 bool isConstructing
= flags
.isConstructing();
3803 bool isSameRealm
= flags
.isSameRealm();
3805 allocator
.discardStack(masm
);
3807 // Push a stub frame so that we can perform a non-tail call.
3808 AutoStubFrame
stubFrame(*this);
3809 stubFrame
.enter(masm
, scratch
);
3811 Address
boundTarget(calleeReg
, BoundFunctionObject::offsetOfTargetSlot());
3813 // If we're constructing, switch to the target's realm and create |this|. If
3814 // we're not constructing, we switch to the target's realm after pushing the
3815 // arguments and loading the target.
3816 if (isConstructing
) {
3818 masm
.unboxObject(boundTarget
, scratch
);
3819 masm
.switchToObjectRealm(scratch
, scratch
);
3821 createThis(argcReg
, calleeReg
, scratch
, flags
,
3822 /* isBoundFunction = */ true);
3825 // Push all arguments, including |this|.
3826 pushBoundFunctionArguments(argcReg
, calleeReg
, scratch
, scratch2
, flags
,
3827 numBoundArgs
, /* isJitCall = */ true);
3829 // Load the target JSFunction.
3830 masm
.unboxObject(boundTarget
, calleeReg
);
3832 if (!isConstructing
&& !isSameRealm
) {
3833 masm
.switchToObjectRealm(calleeReg
, scratch
);
3837 masm
.add32(Imm32(numBoundArgs
), argcReg
);
3839 // Load the start of the target JitCode.
3840 Register code
= scratch2
;
3841 masm
.loadJitCodeRaw(calleeReg
, code
);
3843 // Note that we use Push, not push, so that callJit will align the stack
3845 masm
.PushCalleeToken(calleeReg
, isConstructing
);
3846 masm
.PushFrameDescriptorForJitCall(FrameType::BaselineStub
, argcReg
, scratch
);
3848 // Handle arguments underflow.
3850 masm
.loadFunctionArgCount(calleeReg
, calleeReg
);
3851 masm
.branch32(Assembler::AboveOrEqual
, argcReg
, calleeReg
, &noUnderflow
);
3853 // Call the arguments rectifier.
3854 TrampolinePtr argumentsRectifier
=
3855 cx_
->runtime()->jitRuntime()->getArgumentsRectifier();
3856 masm
.movePtr(argumentsRectifier
, code
);
3859 masm
.bind(&noUnderflow
);
3862 if (isConstructing
) {
3863 updateReturnValue();
3866 stubFrame
.leave(masm
);
3869 masm
.switchToBaselineFrameRealm(scratch2
);
3875 bool BaselineCacheIRCompiler::emitNewArrayObjectResult(uint32_t arrayLength
,
3876 uint32_t shapeOffset
,
3877 uint32_t siteOffset
) {
3878 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3880 gc::AllocKind allocKind
= GuessArrayGCKind(arrayLength
);
3881 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind
, &ArrayObject::class_
));
3882 allocKind
= ForegroundToBackgroundAllocKind(allocKind
);
3884 uint32_t slotCount
= GetGCKindSlots(allocKind
);
3885 MOZ_ASSERT(slotCount
>= ObjectElements::VALUES_PER_HEADER
);
3886 uint32_t arrayCapacity
= slotCount
- ObjectElements::VALUES_PER_HEADER
;
3888 AutoOutputRegister
output(*this);
3889 AutoScratchRegister
result(allocator
, masm
);
3890 AutoScratchRegister
scratch(allocator
, masm
);
3891 AutoScratchRegister
site(allocator
, masm
);
3892 AutoScratchRegisterMaybeOutput
shape(allocator
, masm
, output
);
3894 Address
shapeAddr(stubAddress(shapeOffset
));
3895 masm
.loadPtr(shapeAddr
, shape
);
3897 Address
siteAddr(stubAddress(siteOffset
));
3898 masm
.loadPtr(siteAddr
, site
);
3900 allocator
.discardStack(masm
);
3905 masm
.createArrayWithFixedElements(
3906 result
, shape
, scratch
, InvalidReg
, arrayLength
, arrayCapacity
, 0, 0,
3907 allocKind
, gc::Heap::Default
, &fail
, AllocSiteInput(site
));
3913 // We get here if the nursery is full (unlikely) but also for tenured
3914 // allocations if the current arena is full and we need to allocate a new
3915 // one (fairly common).
3917 AutoStubFrame
stubFrame(*this);
3918 stubFrame
.enter(masm
, scratch
);
3921 masm
.Push(Imm32(int32_t(allocKind
)));
3922 masm
.Push(Imm32(arrayLength
));
3925 ArrayObject
* (*)(JSContext
*, uint32_t, gc::AllocKind
, gc::AllocSite
*);
3926 callVM
<Fn
, NewArrayObjectBaselineFallback
>(masm
);
3928 stubFrame
.leave(masm
);
3929 masm
.storeCallPointerResult(result
);
3933 masm
.tagValue(JSVAL_TYPE_OBJECT
, result
, output
.valueReg());
3937 bool BaselineCacheIRCompiler::emitNewPlainObjectResult(uint32_t numFixedSlots
,
3938 uint32_t numDynamicSlots
,
3939 gc::AllocKind allocKind
,
3940 uint32_t shapeOffset
,
3941 uint32_t siteOffset
) {
3942 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3944 AutoOutputRegister
output(*this);
3945 AutoScratchRegister
obj(allocator
, masm
);
3946 AutoScratchRegister
scratch(allocator
, masm
);
3947 AutoScratchRegister
site(allocator
, masm
);
3948 AutoScratchRegisterMaybeOutput
shape(allocator
, masm
, output
);
3950 Address
shapeAddr(stubAddress(shapeOffset
));
3951 masm
.loadPtr(shapeAddr
, shape
);
3953 Address
siteAddr(stubAddress(siteOffset
));
3954 masm
.loadPtr(siteAddr
, site
);
3956 allocator
.discardStack(masm
);
3961 masm
.createPlainGCObject(obj
, shape
, scratch
, shape
, numFixedSlots
,
3962 numDynamicSlots
, allocKind
, gc::Heap::Default
, &fail
,
3963 AllocSiteInput(site
));
3969 // We get here if the nursery is full (unlikely) but also for tenured
3970 // allocations if the current arena is full and we need to allocate a new
3971 // one (fairly common).
3973 AutoStubFrame
stubFrame(*this);
3974 stubFrame
.enter(masm
, scratch
);
3977 masm
.Push(Imm32(int32_t(allocKind
)));
3978 masm
.loadPtr(shapeAddr
, shape
); // This might have been overwritten.
3981 using Fn
= JSObject
* (*)(JSContext
*, Handle
<SharedShape
*>, gc::AllocKind
,
3983 callVM
<Fn
, NewPlainObjectBaselineFallback
>(masm
);
3985 stubFrame
.leave(masm
);
3986 masm
.storeCallPointerResult(obj
);
3990 masm
.tagValue(JSVAL_TYPE_OBJECT
, obj
, output
.valueReg());
3994 bool BaselineCacheIRCompiler::emitBindFunctionResult(
3995 ObjOperandId targetId
, uint32_t argc
, uint32_t templateObjectOffset
) {
3996 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
3998 AutoOutputRegister
output(*this);
3999 AutoScratchRegister
scratch(allocator
, masm
);
4001 Register target
= allocator
.useRegister(masm
, targetId
);
4003 allocator
.discardStack(masm
);
4005 AutoStubFrame
stubFrame(*this);
4006 stubFrame
.enter(masm
, scratch
);
4008 // Push the arguments in reverse order.
4009 for (uint32_t i
= 0; i
< argc
; i
++) {
4010 Address
argAddress(FramePointer
,
4011 BaselineStubFrameLayout::Size() + i
* sizeof(Value
));
4012 masm
.pushValue(argAddress
);
4014 masm
.moveStackPtrTo(scratch
.get());
4016 masm
.Push(ImmWord(0)); // nullptr for maybeBound
4017 masm
.Push(Imm32(argc
));
4021 using Fn
= BoundFunctionObject
* (*)(JSContext
*, Handle
<JSObject
*>, Value
*,
4022 uint32_t, Handle
<BoundFunctionObject
*>);
4023 callVM
<Fn
, BoundFunctionObject::functionBindImpl
>(masm
);
4025 stubFrame
.leave(masm
);
4026 masm
.storeCallPointerResult(scratch
);
4028 masm
.tagValue(JSVAL_TYPE_OBJECT
, scratch
, output
.valueReg());
4032 bool BaselineCacheIRCompiler::emitSpecializedBindFunctionResult(
4033 ObjOperandId targetId
, uint32_t argc
, uint32_t templateObjectOffset
) {
4034 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4036 AutoOutputRegister
output(*this);
4037 AutoScratchRegisterMaybeOutput
scratch1(allocator
, masm
);
4038 AutoScratchRegister
scratch2(allocator
, masm
);
4040 Register target
= allocator
.useRegister(masm
, targetId
);
4042 StubFieldOffset
objectField(templateObjectOffset
, StubField::Type::JSObject
);
4043 emitLoadStubField(objectField
, scratch2
);
4045 allocator
.discardStack(masm
);
4047 AutoStubFrame
stubFrame(*this);
4048 stubFrame
.enter(masm
, scratch1
);
4050 // Push the arguments in reverse order.
4051 for (uint32_t i
= 0; i
< argc
; i
++) {
4052 Address
argAddress(FramePointer
,
4053 BaselineStubFrameLayout::Size() + i
* sizeof(Value
));
4054 masm
.pushValue(argAddress
);
4056 masm
.moveStackPtrTo(scratch1
.get());
4058 masm
.Push(scratch2
);
4059 masm
.Push(Imm32(argc
));
4060 masm
.Push(scratch1
);
4063 using Fn
= BoundFunctionObject
* (*)(JSContext
*, Handle
<JSObject
*>, Value
*,
4064 uint32_t, Handle
<BoundFunctionObject
*>);
4065 callVM
<Fn
, BoundFunctionObject::functionBindSpecializedBaseline
>(masm
);
4067 stubFrame
.leave(masm
);
4068 masm
.storeCallPointerResult(scratch1
);
4070 masm
.tagValue(JSVAL_TYPE_OBJECT
, scratch1
, output
.valueReg());
4074 bool BaselineCacheIRCompiler::emitCloseIterScriptedResult(
4075 ObjOperandId iterId
, ObjOperandId calleeId
, CompletionKind kind
,
4076 uint32_t calleeNargs
) {
4077 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4078 Register iter
= allocator
.useRegister(masm
, iterId
);
4079 Register callee
= allocator
.useRegister(masm
, calleeId
);
4081 AutoScratchRegister
code(allocator
, masm
);
4082 AutoScratchRegister
scratch(allocator
, masm
);
4084 masm
.loadJitCodeRaw(callee
, code
);
4086 allocator
.discardStack(masm
);
4088 AutoStubFrame
stubFrame(*this);
4089 stubFrame
.enter(masm
, scratch
);
4091 // Call the return method.
4092 masm
.alignJitStackBasedOnNArgs(calleeNargs
, /*countIncludesThis = */ false);
4093 for (uint32_t i
= 0; i
< calleeNargs
; i
++) {
4094 masm
.pushValue(UndefinedValue());
4096 masm
.Push(TypedOrValueRegister(MIRType::Object
, AnyRegister(iter
)));
4098 masm
.PushFrameDescriptorForJitCall(FrameType::BaselineStub
, /* argc = */ 0);
4102 if (kind
!= CompletionKind::Throw
) {
4103 // Verify that the return value is an object.
4105 masm
.branchTestObject(Assembler::Equal
, JSReturnOperand
, &success
);
4107 masm
.Push(Imm32(int32_t(CheckIsObjectKind::IteratorReturn
)));
4108 using Fn
= bool (*)(JSContext
*, CheckIsObjectKind
);
4109 callVM
<Fn
, ThrowCheckIsObject
>(masm
);
4111 masm
.bind(&success
);
4114 stubFrame
.leave(masm
);
4118 static void CallRegExpStub(MacroAssembler
& masm
, size_t jitZoneStubOffset
,
4119 Register temp
, Label
* vmCall
) {
4120 // Call cx->zone()->jitZone()->regExpStub. We store a pointer to the RegExp
4121 // stub in the IC stub to keep it alive, but we shouldn't use it if the stub
4122 // has been discarded in the meantime (because we might have changed GC string
4123 // pretenuring heuristics that affect behavior of the stub). This is uncommon
4124 // but can happen if we discarded all JIT code but had some active (Baseline)
4125 // scripts on the stack.
4126 masm
.loadJSContext(temp
);
4127 masm
.loadPtr(Address(temp
, JSContext::offsetOfZone()), temp
);
4128 masm
.loadPtr(Address(temp
, Zone::offsetOfJitZone()), temp
);
4129 masm
.loadPtr(Address(temp
, jitZoneStubOffset
), temp
);
4130 masm
.branchTestPtr(Assembler::Zero
, temp
, temp
, vmCall
);
4131 masm
.call(Address(temp
, JitCode::offsetOfCode()));
4134 // Used to move inputs to the registers expected by the RegExp stub.
4135 static void SetRegExpStubInputRegisters(MacroAssembler
& masm
,
4136 Register
* regexpSrc
,
4137 Register regexpDest
, Register
* inputSrc
,
4139 Register
* lastIndexSrc
,
4140 Register lastIndexDest
) {
4141 MoveResolver
& moves
= masm
.moveResolver();
4142 if (*regexpSrc
!= regexpDest
) {
4143 masm
.propagateOOM(moves
.addMove(MoveOperand(*regexpSrc
),
4144 MoveOperand(regexpDest
), MoveOp::GENERAL
));
4145 *regexpSrc
= regexpDest
;
4147 if (*inputSrc
!= inputDest
) {
4148 masm
.propagateOOM(moves
.addMove(MoveOperand(*inputSrc
),
4149 MoveOperand(inputDest
), MoveOp::GENERAL
));
4150 *inputSrc
= inputDest
;
4152 if (lastIndexSrc
&& *lastIndexSrc
!= lastIndexDest
) {
4153 masm
.propagateOOM(moves
.addMove(MoveOperand(*lastIndexSrc
),
4154 MoveOperand(lastIndexDest
), MoveOp::INT32
));
4155 *lastIndexSrc
= lastIndexDest
;
4158 masm
.propagateOOM(moves
.resolve());
4160 MoveEmitter
emitter(masm
);
4161 emitter
.emit(moves
);
4165 bool BaselineCacheIRCompiler::emitCallRegExpMatcherResult(
4166 ObjOperandId regexpId
, StringOperandId inputId
, Int32OperandId lastIndexId
,
4167 uint32_t stubOffset
) {
4168 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4170 AutoOutputRegister
output(*this);
4171 Register regexp
= allocator
.useRegister(masm
, regexpId
);
4172 Register input
= allocator
.useRegister(masm
, inputId
);
4173 Register lastIndex
= allocator
.useRegister(masm
, lastIndexId
);
4174 Register scratch
= output
.valueReg().scratchReg();
4176 allocator
.discardStack(masm
);
4178 AutoStubFrame
stubFrame(*this);
4179 stubFrame
.enter(masm
, scratch
);
4181 SetRegExpStubInputRegisters(masm
, ®exp
, RegExpMatcherRegExpReg
, &input
,
4182 RegExpMatcherStringReg
, &lastIndex
,
4183 RegExpMatcherLastIndexReg
);
4185 masm
.reserveStack(RegExpReservedStack
);
4187 Label done
, vmCall
, vmCallNoMatches
;
4188 CallRegExpStub(masm
, JitZone::offsetOfRegExpMatcherStub(), scratch
,
4190 masm
.branchTestUndefined(Assembler::Equal
, JSReturnOperand
, &vmCall
);
4195 Label pushedMatches
;
4196 masm
.bind(&vmCallNoMatches
);
4197 masm
.push(ImmWord(0));
4198 masm
.jump(&pushedMatches
);
4201 masm
.computeEffectiveAddress(
4202 Address(masm
.getStackPointer(), InputOutputDataSize
), scratch
);
4205 masm
.bind(&pushedMatches
);
4206 masm
.Push(lastIndex
);
4210 using Fn
= bool (*)(JSContext
*, HandleObject regexp
, HandleString input
,
4211 int32_t lastIndex
, MatchPairs
* pairs
,
4212 MutableHandleValue output
);
4213 callVM
<Fn
, RegExpMatcherRaw
>(masm
);
4218 static_assert(R0
== JSReturnOperand
);
4220 stubFrame
.leave(masm
);
4224 bool BaselineCacheIRCompiler::emitCallRegExpSearcherResult(
4225 ObjOperandId regexpId
, StringOperandId inputId
, Int32OperandId lastIndexId
,
4226 uint32_t stubOffset
) {
4227 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4229 AutoOutputRegister
output(*this);
4230 Register regexp
= allocator
.useRegister(masm
, regexpId
);
4231 Register input
= allocator
.useRegister(masm
, inputId
);
4232 Register lastIndex
= allocator
.useRegister(masm
, lastIndexId
);
4233 Register scratch
= output
.valueReg().scratchReg();
4235 allocator
.discardStack(masm
);
4237 AutoStubFrame
stubFrame(*this);
4238 stubFrame
.enter(masm
, scratch
);
4240 SetRegExpStubInputRegisters(masm
, ®exp
, RegExpSearcherRegExpReg
, &input
,
4241 RegExpSearcherStringReg
, &lastIndex
,
4242 RegExpSearcherLastIndexReg
);
4243 // Ensure `scratch` doesn't conflict with the stub's input registers.
4244 scratch
= ReturnReg
;
4246 masm
.reserveStack(RegExpReservedStack
);
4248 Label done
, vmCall
, vmCallNoMatches
;
4249 CallRegExpStub(masm
, JitZone::offsetOfRegExpSearcherStub(), scratch
,
4251 masm
.branch32(Assembler::Equal
, scratch
, Imm32(RegExpSearcherResultFailed
),
4257 Label pushedMatches
;
4258 masm
.bind(&vmCallNoMatches
);
4259 masm
.push(ImmWord(0));
4260 masm
.jump(&pushedMatches
);
4263 masm
.computeEffectiveAddress(
4264 Address(masm
.getStackPointer(), InputOutputDataSize
), scratch
);
4267 masm
.bind(&pushedMatches
);
4268 masm
.Push(lastIndex
);
4272 using Fn
= bool (*)(JSContext
*, HandleObject regexp
, HandleString input
,
4273 int32_t lastIndex
, MatchPairs
* pairs
, int32_t* result
);
4274 callVM
<Fn
, RegExpSearcherRaw
>(masm
);
4279 masm
.tagValue(JSVAL_TYPE_INT32
, ReturnReg
, output
.valueReg());
4281 stubFrame
.leave(masm
);
4285 bool BaselineCacheIRCompiler::emitRegExpBuiltinExecMatchResult(
4286 ObjOperandId regexpId
, StringOperandId inputId
, uint32_t stubOffset
) {
4287 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4289 AutoOutputRegister
output(*this);
4290 Register regexp
= allocator
.useRegister(masm
, regexpId
);
4291 Register input
= allocator
.useRegister(masm
, inputId
);
4292 Register scratch
= output
.valueReg().scratchReg();
4294 allocator
.discardStack(masm
);
4296 AutoStubFrame
stubFrame(*this);
4297 stubFrame
.enter(masm
, scratch
);
4299 SetRegExpStubInputRegisters(masm
, ®exp
, RegExpMatcherRegExpReg
, &input
,
4300 RegExpMatcherStringReg
, nullptr, InvalidReg
);
4302 masm
.reserveStack(RegExpReservedStack
);
4304 Label done
, vmCall
, vmCallNoMatches
;
4305 CallRegExpStub(masm
, JitZone::offsetOfRegExpExecMatchStub(), scratch
,
4307 masm
.branchTestUndefined(Assembler::Equal
, JSReturnOperand
, &vmCall
);
4312 Label pushedMatches
;
4313 masm
.bind(&vmCallNoMatches
);
4314 masm
.push(ImmWord(0));
4315 masm
.jump(&pushedMatches
);
4318 masm
.computeEffectiveAddress(
4319 Address(masm
.getStackPointer(), InputOutputDataSize
), scratch
);
4322 masm
.bind(&pushedMatches
);
4327 bool (*)(JSContext
*, Handle
<RegExpObject
*> regexp
, HandleString input
,
4328 MatchPairs
* pairs
, MutableHandleValue output
);
4329 callVM
<Fn
, RegExpBuiltinExecMatchFromJit
>(masm
);
4334 static_assert(R0
== JSReturnOperand
);
4336 stubFrame
.leave(masm
);
4340 bool BaselineCacheIRCompiler::emitRegExpBuiltinExecTestResult(
4341 ObjOperandId regexpId
, StringOperandId inputId
, uint32_t stubOffset
) {
4342 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4344 AutoOutputRegister
output(*this);
4345 Register regexp
= allocator
.useRegister(masm
, regexpId
);
4346 Register input
= allocator
.useRegister(masm
, inputId
);
4347 Register scratch
= output
.valueReg().scratchReg();
4349 allocator
.discardStack(masm
);
4351 AutoStubFrame
stubFrame(*this);
4352 stubFrame
.enter(masm
, scratch
);
4354 SetRegExpStubInputRegisters(masm
, ®exp
, RegExpExecTestRegExpReg
, &input
,
4355 RegExpExecTestStringReg
, nullptr, InvalidReg
);
4356 // Ensure `scratch` doesn't conflict with the stub's input registers.
4357 scratch
= ReturnReg
;
4360 CallRegExpStub(masm
, JitZone::offsetOfRegExpExecTestStub(), scratch
, &vmCall
);
4361 masm
.branch32(Assembler::Equal
, scratch
, Imm32(RegExpExecTestResultFailed
),
4372 using Fn
= bool (*)(JSContext
*, Handle
<RegExpObject
*> regexp
,
4373 HandleString input
, bool* result
);
4374 callVM
<Fn
, RegExpBuiltinExecTestFromJit
>(masm
);
4379 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, ReturnReg
, output
.valueReg());
4381 stubFrame
.leave(masm
);
4385 bool BaselineCacheIRCompiler::emitRegExpHasCaptureGroupsResult(
4386 ObjOperandId regexpId
, StringOperandId inputId
) {
4387 JitSpew(JitSpew_Codegen
, "%s", __FUNCTION__
);
4389 AutoOutputRegister
output(*this);
4390 Register regexp
= allocator
.useRegister(masm
, regexpId
);
4391 Register input
= allocator
.useRegister(masm
, inputId
);
4392 AutoScratchRegisterMaybeOutput
scratch(allocator
, masm
, output
);
4394 allocator
.discardStack(masm
);
4396 // Load RegExpShared in |scratch|.
4398 masm
.loadParsedRegExpShared(regexp
, scratch
, &vmCall
);
4400 // Return true iff pairCount > 1.
4401 Label returnTrue
, done
;
4402 masm
.branch32(Assembler::Above
,
4403 Address(scratch
, RegExpShared::offsetOfPairCount()), Imm32(1),
4405 masm
.moveValue(BooleanValue(false), output
.valueReg());
4408 masm
.bind(&returnTrue
);
4409 masm
.moveValue(BooleanValue(true), output
.valueReg());
4415 AutoStubFrame
stubFrame(*this);
4416 stubFrame
.enter(masm
, scratch
);
4422 bool (*)(JSContext
*, Handle
<RegExpObject
*>, Handle
<JSString
*>, bool*);
4423 callVM
<Fn
, RegExpHasCaptureGroups
>(masm
);
4425 stubFrame
.leave(masm
);
4426 masm
.storeCallBoolResult(scratch
);
4427 masm
.tagValue(JSVAL_TYPE_BOOLEAN
, scratch
, output
.valueReg());