1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef jit_CacheIRCompiler_h
8 #define jit_CacheIRCompiler_h
10 #include "mozilla/Maybe.h"
12 #include "jit/CacheIR.h"
13 #include "jit/JitOptions.h"
14 #include "jit/SharedICRegisters.h"
15 #include "js/ScalarType.h" // js::Scalar::Type
19 class TypedArrayObject
;
23 class BaselineCacheIRCompiler
;
24 class IonCacheIRCompiler
;
26 // [SMDOC] CacheIR Value Representation and Tracking
28 // While compiling an IC stub the CacheIR compiler needs to keep track of the
29 // physical location for each logical piece of data we care about, as well as
30 // ensure that in the case of a stub failing, we are able to restore the input
31 // state so that a subsequent stub can attempt to provide a value.
33 // OperandIds are created in the CacheIR front-end to keep track of values that
34 // are passed between CacheIR ops during the execution of a given CacheIR stub.
35 // In the CacheRegisterAllocator these OperandIds are given OperandLocations,
36 // that represent the physical location of the OperandId at a given point in
37 // time during CacheRegister allocation.
39 // In the CacheRegisterAllocator physical locations include the stack, and
40 // registers, as well as whether or not the value has been unboxed or not.
41 // Constants are also represented separately to provide for on-demand
44 // Intra-op Register allocation:
46 // During the emission of a CacheIR op, code can ask the CacheRegisterAllocator
47 // for access to a particular OperandId, and the register allocator will
48 // generate the required code to fill that request.
50 // Input OperandIds should be considered as immutable, and should not be mutated
51 // during the execution of a stub.
53 // There are also a number of RAII classes that interact with the register
54 // allocator, in order to provide access to more registers than just those
55 // provided for by the OperandIds.
57 // - AutoOutputReg: The register which will hold the output value of the stub.
58 // - AutoScratchReg: By default, an arbitrary scratch register, however a
59 // specific register can be requested.
60 // - AutoScratchRegMaybeOutput: Any arbitrary scratch register, but the output
61 // register may be used as well.
63 // These RAII classes take ownership of a register for the duration of their
64 // lifetime so they can be used for computation or output. The register
65 // allocator can spill values with OperandLocations in order to try to ensure
66 // that a register is made available for use.
68 // If a specific register is required (via AutoScratchRegister), it should be
69 // the first register acquired, as the register rallocator will be unable to
70 // allocate the fixed register if the current op is using it for something else.
72 // If no register can be provided after attempting to spill, a
73 // MOZ_RELEASE_ASSERT ensures the browser will crash. The register allocator is
74 // not provided enough information in its current design to insert spills and
75 // fills at arbitrary locations, and so it can fail to find an allocation
76 // solution. However, this will only happen within the implementation of an
77 // operand emitter, and because the cache register allocator is mostly
78 // determinstic, so long as the operand id emitter is tested, this won't
79 // suddenly crop up in an arbitrary webpage. It's worth noting the most
80 // difficult platform to support is x86-32, because it has the least number of
81 // registers available.
83 // FailurePaths checkpoint the state of the register allocator so that the input
84 // state can be recomputed from the current state before jumping to the next
85 // stub in the IC chain. An important invariant is that the FailurePath must be
86 // allocated for each op after all the manipulation of OperandLocations has
87 // happened, so that its recording is correct.
89 // Inter-op Register Allocation:
91 // The RAII register management classes are RAII because all register state
92 // outside the OperandLocations is reset before the compilation of each
93 // individual CacheIR op. This means that you cannot rely on a value surviving
94 // between ops, even if you use the ability of AutoScratchRegister to name a
95 // specific register. Values that need to be preserved between ops must be given
98 // Represents a Value on the Baseline frame's expression stack. Slot 0 is the
99 // value on top of the stack (the most recently pushed value), slot 1 is the
100 // value pushed before that, etc.
101 class BaselineFrameSlot
{
105 explicit BaselineFrameSlot(uint32_t slot
) : slot_(slot
) {}
106 uint32_t slot() const { return slot_
; }
108 bool operator==(const BaselineFrameSlot
& other
) const {
109 return slot_
== other
.slot_
;
111 bool operator!=(const BaselineFrameSlot
& other
) const {
112 return slot_
!= other
.slot_
;
116 // OperandLocation represents the location of an OperandId. The operand is
117 // either in a register or on the stack, and is either boxed or unboxed.
118 class OperandLocation
{
139 FloatRegister doubleReg
;
140 ValueOperand valueReg
;
142 uint32_t stackPushed
;
145 uint32_t valueStackPushed
;
146 BaselineFrameSlot baselineFrameSlot
;
149 Data() : valueStackPushed(0) {}
154 OperandLocation() : kind_(Uninitialized
) {}
156 Kind
kind() const { return kind_
; }
158 void setUninitialized() { kind_
= Uninitialized
; }
160 ValueOperand
valueReg() const {
161 MOZ_ASSERT(kind_
== ValueReg
);
162 return data_
.valueReg
;
164 Register
payloadReg() const {
165 MOZ_ASSERT(kind_
== PayloadReg
);
166 return data_
.payloadReg
.reg
;
168 FloatRegister
doubleReg() const {
169 MOZ_ASSERT(kind_
== DoubleReg
);
170 return data_
.doubleReg
;
172 uint32_t payloadStack() const {
173 MOZ_ASSERT(kind_
== PayloadStack
);
174 return data_
.payloadStack
.stackPushed
;
176 uint32_t valueStack() const {
177 MOZ_ASSERT(kind_
== ValueStack
);
178 return data_
.valueStackPushed
;
180 JSValueType
payloadType() const {
181 if (kind_
== PayloadReg
) {
182 return data_
.payloadReg
.type
;
184 MOZ_ASSERT(kind_
== PayloadStack
);
185 return data_
.payloadStack
.type
;
187 Value
constant() const {
188 MOZ_ASSERT(kind_
== Constant
);
189 return data_
.constant
;
191 BaselineFrameSlot
baselineFrameSlot() const {
192 MOZ_ASSERT(kind_
== BaselineFrame
);
193 return data_
.baselineFrameSlot
;
196 void setPayloadReg(Register reg
, JSValueType type
) {
198 data_
.payloadReg
.reg
= reg
;
199 data_
.payloadReg
.type
= type
;
201 void setDoubleReg(FloatRegister reg
) {
203 data_
.doubleReg
= reg
;
205 void setValueReg(ValueOperand reg
) {
207 data_
.valueReg
= reg
;
209 void setPayloadStack(uint32_t stackPushed
, JSValueType type
) {
210 kind_
= PayloadStack
;
211 data_
.payloadStack
.stackPushed
= stackPushed
;
212 data_
.payloadStack
.type
= type
;
214 void setValueStack(uint32_t stackPushed
) {
216 data_
.valueStackPushed
= stackPushed
;
218 void setConstant(const Value
& v
) {
222 void setBaselineFrame(BaselineFrameSlot slot
) {
223 kind_
= BaselineFrame
;
224 data_
.baselineFrameSlot
= slot
;
227 bool isUninitialized() const { return kind_
== Uninitialized
; }
228 bool isInRegister() const { return kind_
== PayloadReg
|| kind_
== ValueReg
; }
229 bool isOnStack() const {
230 return kind_
== PayloadStack
|| kind_
== ValueStack
;
233 size_t stackPushed() const {
234 if (kind_
== PayloadStack
) {
235 return data_
.payloadStack
.stackPushed
;
237 MOZ_ASSERT(kind_
== ValueStack
);
238 return data_
.valueStackPushed
;
240 size_t stackSizeInBytes() const {
241 if (kind_
== PayloadStack
) {
242 return sizeof(uintptr_t);
244 MOZ_ASSERT(kind_
== ValueStack
);
245 return sizeof(js::Value
);
247 void adjustStackPushed(int32_t diff
) {
248 if (kind_
== PayloadStack
) {
249 data_
.payloadStack
.stackPushed
+= diff
;
252 MOZ_ASSERT(kind_
== ValueStack
);
253 data_
.valueStackPushed
+= diff
;
256 bool aliasesReg(Register reg
) const {
257 if (kind_
== PayloadReg
) {
258 return payloadReg() == reg
;
260 if (kind_
== ValueReg
) {
261 return valueReg().aliases(reg
);
265 bool aliasesReg(ValueOperand reg
) const {
266 #if defined(JS_NUNBOX32)
267 return aliasesReg(reg
.typeReg()) || aliasesReg(reg
.payloadReg());
269 return aliasesReg(reg
.valueReg());
273 bool aliasesReg(const OperandLocation
& other
) const;
275 bool operator==(const OperandLocation
& other
) const;
276 bool operator!=(const OperandLocation
& other
) const {
277 return !operator==(other
);
281 struct SpilledRegister
{
283 uint32_t stackPushed
;
285 SpilledRegister(Register reg
, uint32_t stackPushed
)
286 : reg(reg
), stackPushed(stackPushed
) {}
287 bool operator==(const SpilledRegister
& other
) const {
288 return reg
== other
.reg
&& stackPushed
== other
.stackPushed
;
290 bool operator!=(const SpilledRegister
& other
) const {
291 return !(*this == other
);
295 using SpilledRegisterVector
= Vector
<SpilledRegister
, 2, SystemAllocPolicy
>;
297 // Class to track and allocate registers while emitting IC code.
298 class MOZ_RAII CacheRegisterAllocator
{
299 // The original location of the inputs to the cache.
300 Vector
<OperandLocation
, 4, SystemAllocPolicy
> origInputLocations_
;
302 // The current location of each operand.
303 Vector
<OperandLocation
, 8, SystemAllocPolicy
> operandLocations_
;
305 // Free lists for value- and payload-slots on stack
306 Vector
<uint32_t, 2, SystemAllocPolicy
> freeValueSlots_
;
307 Vector
<uint32_t, 2, SystemAllocPolicy
> freePayloadSlots_
;
309 // The registers allocated while emitting the current CacheIR op.
310 // This prevents us from allocating a register and then immediately
311 // clobbering it for something else, while we're still holding on to it.
312 LiveGeneralRegisterSet currentOpRegs_
;
314 const AllocatableGeneralRegisterSet allocatableRegs_
;
316 // Registers that are currently unused and available.
317 AllocatableGeneralRegisterSet availableRegs_
;
319 // Registers that are available, but before use they must be saved and
320 // then restored when returning from the stub.
321 AllocatableGeneralRegisterSet availableRegsAfterSpill_
;
323 // Registers we took from availableRegsAfterSpill_ and spilled to the stack.
324 SpilledRegisterVector spilledRegs_
;
326 // The number of bytes pushed on the native stack.
327 uint32_t stackPushed_
;
330 // Flag used to assert individual CacheIR instructions don't allocate
331 // registers after calling addFailurePath.
332 bool addedFailurePath_
;
335 // The index of the CacheIR instruction we're currently emitting.
336 uint32_t currentInstruction_
;
338 // Whether the stack contains a double spilled by AutoScratchFloatRegister.
339 bool hasAutoScratchFloatRegisterSpill_
= false;
341 const CacheIRWriter
& writer_
;
343 CacheRegisterAllocator(const CacheRegisterAllocator
&) = delete;
344 CacheRegisterAllocator
& operator=(const CacheRegisterAllocator
&) = delete;
346 void freeDeadOperandLocations(MacroAssembler
& masm
);
348 void spillOperandToStack(MacroAssembler
& masm
, OperandLocation
* loc
);
349 void spillOperandToStackOrRegister(MacroAssembler
& masm
,
350 OperandLocation
* loc
);
352 void popPayload(MacroAssembler
& masm
, OperandLocation
* loc
, Register dest
);
353 void popValue(MacroAssembler
& masm
, OperandLocation
* loc
, ValueOperand dest
);
354 Address
valueAddress(MacroAssembler
& masm
, const OperandLocation
* loc
) const;
357 void assertValidState() const;
361 friend class AutoScratchRegister
;
362 friend class AutoScratchRegisterExcluding
;
364 explicit CacheRegisterAllocator(const CacheIRWriter
& writer
)
365 : allocatableRegs_(GeneralRegisterSet::All()),
368 addedFailurePath_(false),
370 currentInstruction_(0),
374 MOZ_MUST_USE
bool init();
376 void initAvailableRegs(const AllocatableGeneralRegisterSet
& available
) {
377 availableRegs_
= available
;
379 void initAvailableRegsAfterSpill();
381 void fixupAliasedInputs(MacroAssembler
& masm
);
383 OperandLocation
operandLocation(size_t i
) const {
384 return operandLocations_
[i
];
386 void setOperandLocation(size_t i
, const OperandLocation
& loc
) {
387 operandLocations_
[i
] = loc
;
390 OperandLocation
origInputLocation(size_t i
) const {
391 return origInputLocations_
[i
];
393 void initInputLocation(size_t i
, ValueOperand reg
) {
394 origInputLocations_
[i
].setValueReg(reg
);
395 operandLocations_
[i
].setValueReg(reg
);
397 void initInputLocation(size_t i
, Register reg
, JSValueType type
) {
398 origInputLocations_
[i
].setPayloadReg(reg
, type
);
399 operandLocations_
[i
].setPayloadReg(reg
, type
);
401 void initInputLocation(size_t i
, FloatRegister reg
) {
402 origInputLocations_
[i
].setDoubleReg(reg
);
403 operandLocations_
[i
].setDoubleReg(reg
);
405 void initInputLocation(size_t i
, const Value
& v
) {
406 origInputLocations_
[i
].setConstant(v
);
407 operandLocations_
[i
].setConstant(v
);
409 void initInputLocation(size_t i
, BaselineFrameSlot slot
) {
410 origInputLocations_
[i
].setBaselineFrame(slot
);
411 operandLocations_
[i
].setBaselineFrame(slot
);
414 void initInputLocation(size_t i
, const TypedOrValueRegister
& reg
);
415 void initInputLocation(size_t i
, const ConstantOrRegister
& value
);
417 const SpilledRegisterVector
& spilledRegs() const { return spilledRegs_
; }
419 MOZ_MUST_USE
bool setSpilledRegs(const SpilledRegisterVector
& regs
) {
420 spilledRegs_
.clear();
421 return spilledRegs_
.appendAll(regs
);
424 bool hasAutoScratchFloatRegisterSpill() const {
425 return hasAutoScratchFloatRegisterSpill_
;
427 void setHasAutoScratchFloatRegisterSpill(bool b
) {
428 MOZ_ASSERT(hasAutoScratchFloatRegisterSpill_
!= b
);
429 hasAutoScratchFloatRegisterSpill_
= b
;
435 addedFailurePath_
= false;
437 currentOpRegs_
.clear();
438 currentInstruction_
++;
442 void setAddedFailurePath() {
443 MOZ_ASSERT(!addedFailurePath_
, "multiple failure paths for instruction");
444 addedFailurePath_
= true;
448 bool isDeadAfterInstruction(OperandId opId
) const {
449 return writer_
.operandIsDead(opId
.id(), currentInstruction_
+ 1);
452 uint32_t stackPushed() const { return stackPushed_
; }
453 void setStackPushed(uint32_t pushed
) { stackPushed_
= pushed
; }
455 bool isAllocatable(Register reg
) const { return allocatableRegs_
.has(reg
); }
457 // Allocates a new register.
458 Register
allocateRegister(MacroAssembler
& masm
);
459 ValueOperand
allocateValueRegister(MacroAssembler
& masm
);
461 void allocateFixedRegister(MacroAssembler
& masm
, Register reg
);
462 void allocateFixedValueRegister(MacroAssembler
& masm
, ValueOperand reg
);
464 // Releases a register so it can be reused later.
465 void releaseRegister(Register reg
) {
466 MOZ_ASSERT(currentOpRegs_
.has(reg
));
467 availableRegs_
.add(reg
);
468 currentOpRegs_
.take(reg
);
470 void releaseValueRegister(ValueOperand reg
) {
472 releaseRegister(reg
.payloadReg());
473 releaseRegister(reg
.typeReg());
475 releaseRegister(reg
.valueReg());
479 // Removes spilled values from the native stack. This should only be
480 // called after all registers have been allocated.
481 void discardStack(MacroAssembler
& masm
);
483 Address
addressOf(MacroAssembler
& masm
, BaselineFrameSlot slot
) const;
484 BaseValueIndex
addressOf(MacroAssembler
& masm
, Register argcReg
,
485 BaselineFrameSlot slot
) const;
487 // Returns the register for the given operand. If the operand is currently
488 // not in a register, it will load it into one.
489 ValueOperand
useValueRegister(MacroAssembler
& masm
, ValOperandId val
);
490 ValueOperand
useFixedValueRegister(MacroAssembler
& masm
, ValOperandId valId
,
492 Register
useRegister(MacroAssembler
& masm
, TypedOperandId typedId
);
494 ConstantOrRegister
useConstantOrRegister(MacroAssembler
& masm
,
497 // Allocates an output register for the given operand.
498 Register
defineRegister(MacroAssembler
& masm
, TypedOperandId typedId
);
499 ValueOperand
defineValueRegister(MacroAssembler
& masm
, ValOperandId val
);
501 // Loads (potentially coercing) and unboxes a value into a float register
502 // This is infallible, as there should have been a previous guard
503 // to ensure the value is already a number.
504 // Does not change the allocator's state.
505 void ensureDoubleRegister(MacroAssembler
& masm
, NumberOperandId op
,
506 FloatRegister dest
) const;
508 // Loads an unboxed value into a scratch register. This can be useful
509 // especially on 32-bit x86 when there are not enough registers for
511 // Does not change the allocator's state.
512 void copyToScratchRegister(MacroAssembler
& masm
, TypedOperandId typedId
,
513 Register dest
) const;
515 // Returns |val|'s JSValueType or JSVAL_TYPE_UNKNOWN.
516 JSValueType
knownType(ValOperandId val
) const;
518 // Emits code to restore registers and stack to the state at the start of
520 void restoreInputState(MacroAssembler
& masm
, bool discardStack
= true);
522 // Returns the set of registers storing the IC input operands.
523 GeneralRegisterSet
inputRegisterSet() const;
525 void saveIonLiveRegisters(MacroAssembler
& masm
, LiveRegisterSet liveRegs
,
526 Register scratch
, IonScript
* ionScript
);
527 void restoreIonLiveRegisters(MacroAssembler
& masm
, LiveRegisterSet liveRegs
);
530 // RAII class to allocate a scratch register and release it when we're done
532 class MOZ_RAII AutoScratchRegister
{
533 CacheRegisterAllocator
& alloc_
;
536 AutoScratchRegister(const AutoScratchRegister
&) = delete;
537 void operator=(const AutoScratchRegister
&) = delete;
540 AutoScratchRegister(CacheRegisterAllocator
& alloc
, MacroAssembler
& masm
,
541 Register reg
= InvalidReg
)
543 if (reg
!= InvalidReg
) {
544 alloc
.allocateFixedRegister(masm
, reg
);
547 reg_
= alloc
.allocateRegister(masm
);
549 MOZ_ASSERT(alloc_
.currentOpRegs_
.has(reg_
));
551 ~AutoScratchRegister() { alloc_
.releaseRegister(reg_
); }
553 Register
get() const { return reg_
; }
554 operator Register() const { return reg_
; }
557 // On x86, spectreBoundsCheck32 can emit better code if it has a scratch
558 // register and index masking is enabled.
559 class MOZ_RAII AutoSpectreBoundsScratchRegister
{
560 mozilla::Maybe
<AutoScratchRegister
> scratch_
;
561 Register reg_
= InvalidReg
;
563 AutoSpectreBoundsScratchRegister(const AutoSpectreBoundsScratchRegister
&) =
565 void operator=(const AutoSpectreBoundsScratchRegister
&) = delete;
568 AutoSpectreBoundsScratchRegister(CacheRegisterAllocator
& alloc
,
569 MacroAssembler
& masm
) {
570 #ifdef JS_CODEGEN_X86
571 if (JitOptions
.spectreIndexMasking
) {
572 scratch_
.emplace(alloc
, masm
);
573 reg_
= scratch_
->get();
578 Register
get() const { return reg_
; }
579 operator Register() const { return reg_
; }
582 // Scratch Register64. Implemented with a single AutoScratchRegister on 64-bit
583 // platforms and two AutoScratchRegisters on 32-bit platforms.
584 class MOZ_RAII AutoScratchRegister64
{
585 AutoScratchRegister reg1_
;
586 #if JS_BITS_PER_WORD == 32
587 AutoScratchRegister reg2_
;
591 AutoScratchRegister64(const AutoScratchRegister64
&) = delete;
592 void operator=(const AutoScratchRegister64
&) = delete;
594 #if JS_BITS_PER_WORD == 32
595 AutoScratchRegister64(CacheRegisterAllocator
& alloc
, MacroAssembler
& masm
)
596 : reg1_(alloc
, masm
), reg2_(alloc
, masm
) {}
598 Register64
get() const { return Register64(reg1_
, reg2_
); }
600 AutoScratchRegister64(CacheRegisterAllocator
& alloc
, MacroAssembler
& masm
)
601 : reg1_(alloc
, masm
) {}
603 Register64
get() const { return Register64(reg1_
); }
606 operator Register64() const { return get(); }
609 // The FailurePath class stores everything we need to generate a failure path
610 // at the end of the IC code. The failure path restores the input registers, if
611 // needed, and jumps to the next stub.
613 Vector
<OperandLocation
, 4, SystemAllocPolicy
> inputs_
;
614 SpilledRegisterVector spilledRegs_
;
615 NonAssertingLabel label_
;
616 uint32_t stackPushed_
;
618 // Flag to ensure FailurePath::label() isn't taken while there's a scratch
619 // float register which still needs to be restored.
620 bool hasAutoScratchFloatRegister_
= false;
624 FailurePath() = default;
626 FailurePath(FailurePath
&& other
)
627 : inputs_(std::move(other
.inputs_
)),
628 spilledRegs_(std::move(other
.spilledRegs_
)),
629 label_(other
.label_
),
630 stackPushed_(other
.stackPushed_
) {}
632 Label
* labelUnchecked() { return &label_
; }
634 MOZ_ASSERT(!hasAutoScratchFloatRegister_
);
635 return labelUnchecked();
638 void setStackPushed(uint32_t i
) { stackPushed_
= i
; }
639 uint32_t stackPushed() const { return stackPushed_
; }
641 MOZ_MUST_USE
bool appendInput(const OperandLocation
& loc
) {
642 return inputs_
.append(loc
);
644 OperandLocation
input(size_t i
) const { return inputs_
[i
]; }
646 const SpilledRegisterVector
& spilledRegs() const { return spilledRegs_
; }
648 MOZ_MUST_USE
bool setSpilledRegs(const SpilledRegisterVector
& regs
) {
649 MOZ_ASSERT(spilledRegs_
.empty());
650 return spilledRegs_
.appendAll(regs
);
653 // If canShareFailurePath(other) returns true, the same machine code will
654 // be emitted for two failure paths, so we can share them.
655 bool canShareFailurePath(const FailurePath
& other
) const;
657 void setHasAutoScratchFloatRegister() {
659 MOZ_ASSERT(!hasAutoScratchFloatRegister_
);
660 hasAutoScratchFloatRegister_
= true;
664 void clearHasAutoScratchFloatRegister() {
666 MOZ_ASSERT(hasAutoScratchFloatRegister_
);
667 hasAutoScratchFloatRegister_
= false;
673 * Wrap an offset so that a call can decide to embed a constant
674 * or load from the stub data.
676 class StubFieldOffset
{
679 StubField::Type type_
;
682 StubFieldOffset(uint32_t offset
, StubField::Type type
)
683 : offset_(offset
), type_(type
) {}
685 uint32_t getOffset() { return offset_
; }
686 StubField::Type
getStubFieldType() { return type_
; }
689 class AutoOutputRegister
;
691 // Base class for BaselineCacheIRCompiler and IonCacheIRCompiler.
692 class MOZ_RAII CacheIRCompiler
{
694 friend class AutoOutputRegister
;
695 friend class AutoStubFrame
;
696 friend class AutoSaveLiveRegisters
;
697 friend class AutoCallVM
;
698 friend class AutoScratchFloatRegister
;
699 friend class AutoAvailableFloatRegister
;
701 enum class Mode
{ Baseline
, Ion
};
703 bool preparedForVMCall_
;
707 BaselineCacheIRCompiler
* asBaseline();
708 IonCacheIRCompiler
* asIon();
711 const CacheIRWriter
& writer_
;
712 StackMacroAssembler masm
;
714 CacheRegisterAllocator allocator
;
715 Vector
<FailurePath
, 4, SystemAllocPolicy
> failurePaths
;
717 // Float registers that are live. Registers not in this set can be
718 // clobbered and don't need to be saved before performing a VM call.
719 // Doing this for non-float registers is a bit more complicated because
720 // the IC register allocator allocates GPRs.
721 LiveFloatRegisterSet liveFloatRegs_
;
723 mozilla::Maybe
<TypedOrValueRegister
> outputUnchecked_
;
726 // Whether this IC may read double values from uint32 arrays.
727 mozilla::Maybe
<bool> allowDoubleResult_
;
729 // Distance from the IC to the stub data; mostly will be
731 uint32_t stubDataOffset_
;
733 enum class StubFieldPolicy
{ Address
, Constant
};
735 StubFieldPolicy stubFieldPolicy_
;
737 CacheIRCompiler(JSContext
* cx
, const CacheIRWriter
& writer
,
738 uint32_t stubDataOffset
, Mode mode
, StubFieldPolicy policy
)
739 : preparedForVMCall_(false),
743 liveFloatRegs_(FloatRegisterSet::All()),
745 stubDataOffset_(stubDataOffset
),
746 stubFieldPolicy_(policy
) {
747 MOZ_ASSERT(!writer
.failed());
750 MOZ_MUST_USE
bool addFailurePath(FailurePath
** failure
);
751 MOZ_MUST_USE
bool emitFailurePath(size_t i
);
753 // Returns the set of volatile float registers that are live. These
754 // registers need to be saved when making non-GC calls with callWithABI.
755 FloatRegisterSet
liveVolatileFloatRegs() const {
756 return FloatRegisterSet::Intersect(liveFloatRegs_
.set(),
757 FloatRegisterSet::Volatile());
760 bool objectGuardNeedsSpectreMitigations(ObjOperandId objId
) const {
761 // Instructions like GuardShape need Spectre mitigations if
762 // (1) mitigations are enabled and (2) the object is used by other
763 // instructions (if the object is *not* used by other instructions,
764 // zeroing its register is pointless).
765 return JitOptions
.spectreObjectMitigationsMisc
&&
766 !allocator
.isDeadAfterInstruction(objId
);
769 bool emitLoadTypedElementResult(ObjOperandId objId
, Int32OperandId indexId
,
770 TypedThingLayout layout
,
771 Scalar::Type elementType
, bool handleOOB
,
772 bool allowDoubleForUint32
);
774 bool emitStoreTypedElement(ObjOperandId objId
, TypedThingLayout layout
,
775 Scalar::Type elementType
, Int32OperandId indexId
,
776 uint32_t rhsId
, bool handleOOB
);
778 void emitStoreTypedObjectReferenceProp(ValueOperand val
, ReferenceType type
,
779 const Address
& dest
, Register scratch
);
781 void emitRegisterEnumerator(Register enumeratorsList
, Register iter
,
785 void emitPostBarrierShared(Register obj
, const ConstantOrRegister
& val
,
786 Register scratch
, Register maybeIndex
);
788 void emitPostBarrierShared(Register obj
, ValueOperand val
, Register scratch
,
789 Register maybeIndex
) {
790 emitPostBarrierShared(obj
, ConstantOrRegister(val
), scratch
, maybeIndex
);
794 template <typename T
>
795 void emitPostBarrierSlot(Register obj
, const T
& val
, Register scratch
) {
796 emitPostBarrierShared(obj
, val
, scratch
, InvalidReg
);
799 template <typename T
>
800 void emitPostBarrierElement(Register obj
, const T
& val
, Register scratch
,
802 MOZ_ASSERT(index
!= InvalidReg
);
803 emitPostBarrierShared(obj
, val
, scratch
, index
);
806 bool emitComparePointerResultShared(JSOp op
, TypedOperandId lhsId
,
807 TypedOperandId rhsId
);
809 bool emitCompareBigIntInt32ResultShared(Register bigInt
, Register int32
,
810 Register scratch1
, Register scratch2
,
812 const AutoOutputRegister
& output
);
814 template <typename Fn
, Fn fn
>
815 MOZ_MUST_USE
bool emitBigIntBinaryOperationShared(BigIntOperandId lhsId
,
816 BigIntOperandId rhsId
);
818 template <typename Fn
, Fn fn
>
819 MOZ_MUST_USE
bool emitBigIntUnaryOperationShared(BigIntOperandId inputId
);
821 bool emitDoubleIncDecResult(bool isInc
, NumberOperandId inputId
);
823 using AtomicsReadWriteModifyFn
= int32_t (*)(TypedArrayObject
*, int32_t,
826 MOZ_MUST_USE
bool emitAtomicsReadModifyWriteResult(
827 ObjOperandId objId
, Int32OperandId indexId
, Int32OperandId valueId
,
828 Scalar::Type elementType
, AtomicsReadWriteModifyFn fn
);
830 CACHE_IR_COMPILER_SHARED_GENERATED
832 void emitLoadStubField(StubFieldOffset val
, Register dest
);
833 void emitLoadStubFieldConstant(StubFieldOffset val
, Register dest
);
834 Address
emitAddressFromStubField(StubFieldOffset val
, Register base
);
836 uintptr_t readStubWord(uint32_t offset
, StubField::Type type
) {
837 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
838 MOZ_ASSERT((offset
% sizeof(uintptr_t)) == 0);
839 return writer_
.readStubFieldForIon(offset
, type
).asWord();
841 uint64_t readStubInt64(uint32_t offset
, StubField::Type type
) {
842 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
843 MOZ_ASSERT((offset
% sizeof(uintptr_t)) == 0);
844 return writer_
.readStubFieldForIon(offset
, type
).asInt64();
846 int32_t int32StubField(uint32_t offset
) {
847 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
848 return readStubWord(offset
, StubField::Type::RawWord
);
850 uint32_t uint32StubField(uint32_t offset
) {
851 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
852 return readStubWord(offset
, StubField::Type::RawWord
);
854 Shape
* shapeStubField(uint32_t offset
) {
855 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
856 return (Shape
*)readStubWord(offset
, StubField::Type::Shape
);
858 JSObject
* objectStubField(uint32_t offset
) {
859 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
860 return (JSObject
*)readStubWord(offset
, StubField::Type::JSObject
);
862 // This accessor is for cases where the stubField policy is
863 // being respected through other means, so we don't check the
864 // policy here. (see LoadNewObjectFromTemplateResult)
865 JSObject
* objectStubFieldUnchecked(uint32_t offset
) {
866 return (JSObject
*)writer_
867 .readStubFieldForIon(offset
, StubField::Type::JSObject
)
870 JSString
* stringStubField(uint32_t offset
) {
871 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
872 return (JSString
*)readStubWord(offset
, StubField::Type::String
);
874 JS::Symbol
* symbolStubField(uint32_t offset
) {
875 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
876 return (JS::Symbol
*)readStubWord(offset
, StubField::Type::Symbol
);
878 ObjectGroup
* groupStubField(uint32_t offset
) {
879 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
880 return (ObjectGroup
*)readStubWord(offset
, StubField::Type::ObjectGroup
);
882 JS::Compartment
* compartmentStubField(uint32_t offset
) {
883 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
884 return (JS::Compartment
*)readStubWord(offset
, StubField::Type::RawWord
);
886 const JSClass
* classStubField(uintptr_t offset
) {
887 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
888 return (const JSClass
*)readStubWord(offset
, StubField::Type::RawWord
);
890 const void* proxyHandlerStubField(uintptr_t offset
) {
891 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
892 return (const void*)readStubWord(offset
, StubField::Type::RawWord
);
894 jsid
idStubField(uint32_t offset
) {
895 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
896 return jsid::fromRawBits(readStubWord(offset
, StubField::Type::Id
));
900 void assertFloatRegisterAvailable(FloatRegister reg
);
904 void callVMInternal(MacroAssembler
& masm
, VMFunctionId id
);
905 template <typename Fn
, Fn fn
>
906 void callVM(MacroAssembler
& masm
);
909 // Ensures the IC's output register is available for writing.
910 class MOZ_RAII AutoOutputRegister
{
911 TypedOrValueRegister output_
;
912 CacheRegisterAllocator
& alloc_
;
914 AutoOutputRegister(const AutoOutputRegister
&) = delete;
915 void operator=(const AutoOutputRegister
&) = delete;
918 explicit AutoOutputRegister(CacheIRCompiler
& compiler
);
919 ~AutoOutputRegister();
921 Register
maybeReg() const {
922 if (output_
.hasValue()) {
923 return output_
.valueReg().scratchReg();
925 if (!output_
.typedReg().isFloat()) {
926 return output_
.typedReg().gpr();
931 bool hasValue() const { return output_
.hasValue(); }
932 ValueOperand
valueReg() const { return output_
.valueReg(); }
933 AnyRegister
typedReg() const { return output_
.typedReg(); }
935 JSValueType
type() const {
936 MOZ_ASSERT(!hasValue());
937 return ValueTypeFromMIRType(output_
.type());
940 operator TypedOrValueRegister() const { return output_
; }
943 enum class CallCanGC
{ CanGC
, CanNotGC
};
945 // Instructions that have to perform a callVM require a stub frame. Call its
946 // enter() and leave() methods to enter/leave the stub frame.
947 // Hoisted from jit/BaselineCacheIRCompiler.cpp. See there for method
949 class MOZ_RAII AutoStubFrame
{
950 BaselineCacheIRCompiler
& compiler
;
952 uint32_t framePushedAtEnterStubFrame_
;
955 AutoStubFrame(const AutoStubFrame
&) = delete;
956 void operator=(const AutoStubFrame
&) = delete;
959 explicit AutoStubFrame(BaselineCacheIRCompiler
& compiler
);
961 void enter(MacroAssembler
& masm
, Register scratch
,
962 CallCanGC canGC
= CallCanGC::CanGC
);
963 void leave(MacroAssembler
& masm
, bool calledIntoIon
= false);
969 // AutoSaveLiveRegisters must be used when we make a call that can GC. The
970 // constructor ensures all live registers are stored on the stack (where the GC
971 // expects them) and the destructor restores these registers.
972 class MOZ_RAII AutoSaveLiveRegisters
{
973 IonCacheIRCompiler
& compiler_
;
975 AutoSaveLiveRegisters(const AutoSaveLiveRegisters
&) = delete;
976 void operator=(const AutoSaveLiveRegisters
&) = delete;
979 explicit AutoSaveLiveRegisters(IonCacheIRCompiler
& compiler
);
981 ~AutoSaveLiveRegisters();
983 // Like AutoScratchRegister, but reuse a register of |output| if possible.
984 class MOZ_RAII AutoScratchRegisterMaybeOutput
{
985 mozilla::Maybe
<AutoScratchRegister
> scratch_
;
986 Register scratchReg_
;
988 AutoScratchRegisterMaybeOutput(const AutoScratchRegisterMaybeOutput
&) =
990 void operator=(const AutoScratchRegisterMaybeOutput
&) = delete;
993 AutoScratchRegisterMaybeOutput(CacheRegisterAllocator
& alloc
,
994 MacroAssembler
& masm
,
995 const AutoOutputRegister
& output
) {
996 scratchReg_
= output
.maybeReg();
997 if (scratchReg_
== InvalidReg
) {
998 scratch_
.emplace(alloc
, masm
);
999 scratchReg_
= scratch_
.ref();
1003 Register
get() const { return scratchReg_
; }
1004 operator Register() const { return scratchReg_
; }
1007 // Like AutoScratchRegisterMaybeOutput, but tries to use the ValueOperand's
1008 // type register for the scratch register on 32-bit.
1010 // Word of warning: Passing an instance of this class and AutoOutputRegister to
1011 // functions may not work correctly, because no guarantee is given that the type
1012 // register is used last when modifying the output's ValueOperand.
1013 class MOZ_RAII AutoScratchRegisterMaybeOutputType
{
1014 mozilla::Maybe
<AutoScratchRegister
> scratch_
;
1015 Register scratchReg_
;
1018 AutoScratchRegisterMaybeOutputType(CacheRegisterAllocator
& alloc
,
1019 MacroAssembler
& masm
,
1020 const AutoOutputRegister
& output
) {
1021 #if defined(JS_NUNBOX32)
1022 scratchReg_
= output
.hasValue() ? output
.valueReg().typeReg() : InvalidReg
;
1024 scratchReg_
= InvalidReg
;
1026 if (scratchReg_
== InvalidReg
) {
1027 scratch_
.emplace(alloc
, masm
);
1028 scratchReg_
= scratch_
.ref();
1032 AutoScratchRegisterMaybeOutputType(
1033 const AutoScratchRegisterMaybeOutputType
&) = delete;
1035 void operator=(const AutoScratchRegisterMaybeOutputType
&) = delete;
1037 operator Register() const { return scratchReg_
; }
1040 // AutoCallVM is a wrapper class that unifies methods shared by
1041 // IonCacheIRCompiler and BaselineCacheIRCompiler that perform a callVM, but
1042 // require stub specific functionality before performing the VM call.
1046 // OPs with implementations that may be unified by this class must:
1047 // - Be listed in the CACHEIR_OPS list but not in the CACHE_IR_SHARED_OPS
1049 // - Differ only in their use of `AutoSaveLiveRegisters`,
1050 // `AutoOutputRegister`, and `AutoScratchRegister`. The Ion
1051 // implementation will use `AutoSaveLiveRegisters` and
1052 // `AutoOutputRegister`, while the Baseline implementation will use
1053 // `AutoScratchRegister`.
1054 // - Both use the `callVM` method.
1056 // Using AutoCallVM:
1057 // - The constructor initializes `AutoOutputRegister` for both compiler
1058 // types. Additionally it initializes an `AutoSaveLiveRegisters` for
1059 // CacheIRCompilers with the mode Ion, and initializes
1060 // `AutoScratchRegisterMaybeOutput` and `AutoStubFrame` variables for
1061 // compilers with mode Baseline.
1062 // - The `prepare()` method calls the IonCacheIRCompiler method
1063 // `prepareVMCall` for IonCacheIRCompilers, calls the `enter()` method of
1064 // `AutoStubFrame` for BaselineCacheIRCompilers, and calls the
1065 // `discardStack` method of the `Register` class for both compiler types.
1066 // - The `call()` method invokes `callVM` on the CacheIRCompiler and stores
1067 // the call result according to its type. Finally it calls the `leave`
1068 // method of `AutoStubFrame` for BaselineCacheIRCompilers.
1070 // Expected Usage Example:
1071 // See: `CacheIRCompiler::emitCallGetSparseElementResult()`
1074 // - OPs that do not meet the criteria listed above can not be unified with
1078 class MOZ_RAII AutoCallVM
{
1079 MacroAssembler
& masm_
;
1080 CacheIRCompiler
* compiler_
;
1081 CacheRegisterAllocator
& allocator_
;
1082 mozilla::Maybe
<AutoOutputRegister
> output_
;
1084 // Baseline specific stuff
1085 mozilla::Maybe
<AutoStubFrame
> stubFrame_
;
1086 mozilla::Maybe
<AutoScratchRegisterMaybeOutput
> scratch_
;
1088 // Ion specific stuff
1089 mozilla::Maybe
<AutoSaveLiveRegisters
> save_
;
1091 void storeResult(JSValueType returnType
);
1093 template <typename Fn
>
1096 void leaveBaselineStubFrame();
1099 AutoCallVM(MacroAssembler
& masm
, CacheIRCompiler
* compiler
,
1100 CacheRegisterAllocator
& allocator
);
1104 template <typename Fn
, Fn fn
>
1106 compiler_
->callVM
<Fn
, fn
>(masm_
);
1108 leaveBaselineStubFrame();
1111 template <typename Fn
, Fn fn
>
1112 void callNoResult() {
1113 compiler_
->callVM
<Fn
, fn
>(masm_
);
1114 leaveBaselineStubFrame();
1117 ValueOperand
outputValueReg() const { return output_
->valueReg(); }
1120 // RAII class to allocate FloatReg0 as a scratch register and release it when
1121 // we're done with it. The previous contents of FloatReg0 may be spilled on the
1122 // stack and, if necessary, are restored when the destructor runs.
1124 // When FailurePath is passed to the constructor, FailurePath::label() must not
1125 // be used during the life time of the AutoScratchFloatRegister. Instead use
1126 // AutoScratchFloatRegister::failure().
1127 class MOZ_RAII AutoScratchFloatRegister
{
1128 Label failurePopReg_
{};
1129 CacheIRCompiler
* compiler_
;
1130 FailurePath
* failure_
;
1132 AutoScratchFloatRegister(const AutoScratchFloatRegister
&) = delete;
1133 void operator=(const AutoScratchFloatRegister
&) = delete;
1136 explicit AutoScratchFloatRegister(CacheIRCompiler
* compiler
)
1137 : AutoScratchFloatRegister(compiler
, nullptr) {}
1139 AutoScratchFloatRegister(CacheIRCompiler
* compiler
, FailurePath
* failure
);
1141 ~AutoScratchFloatRegister();
1145 FloatRegister
get() const { return FloatReg0
; }
1146 operator FloatRegister() const { return FloatReg0
; }
1149 // This class can be used to assert a certain FloatRegister is available. In
1150 // Baseline mode, all float registers are available. In Ion mode, only the
1151 // registers added as fixed temps in LIRGenerator are available.
1152 class MOZ_RAII AutoAvailableFloatRegister
{
1155 AutoAvailableFloatRegister(const AutoAvailableFloatRegister
&) = delete;
1156 void operator=(const AutoAvailableFloatRegister
&) = delete;
1159 explicit AutoAvailableFloatRegister(CacheIRCompiler
& compiler
,
1163 compiler
.assertFloatRegisterAvailable(reg
);
1167 FloatRegister
get() const { return reg_
; }
1168 operator FloatRegister() const { return reg_
; }
1171 // See the 'Sharing Baseline stub code' comment in CacheIR.h for a description
1173 class CacheIRStubInfo
{
1174 // These fields don't require 8 bits, but GCC complains if these fields are
1175 // smaller than the size of the enums.
1176 CacheKind kind_
: 8;
1177 ICStubEngine engine_
: 8;
1178 bool makesGCCalls_
: 1;
1179 uint8_t stubDataOffset_
;
1181 const uint8_t* code_
;
1183 const uint8_t* fieldTypes_
;
1185 CacheIRStubInfo(CacheKind kind
, ICStubEngine engine
, bool makesGCCalls
,
1186 uint32_t stubDataOffset
, const uint8_t* code
,
1187 uint32_t codeLength
, const uint8_t* fieldTypes
)
1190 makesGCCalls_(makesGCCalls
),
1191 stubDataOffset_(stubDataOffset
),
1193 length_(codeLength
),
1194 fieldTypes_(fieldTypes
) {
1195 MOZ_ASSERT(kind_
== kind
, "Kind must fit in bitfield");
1196 MOZ_ASSERT(engine_
== engine
, "Engine must fit in bitfield");
1197 MOZ_ASSERT(stubDataOffset_
== stubDataOffset
,
1198 "stubDataOffset must fit in uint8_t");
1201 CacheIRStubInfo(const CacheIRStubInfo
&) = delete;
1202 CacheIRStubInfo
& operator=(const CacheIRStubInfo
&) = delete;
1205 CacheKind
kind() const { return kind_
; }
1206 ICStubEngine
engine() const { return engine_
; }
1207 bool makesGCCalls() const { return makesGCCalls_
; }
1209 const uint8_t* code() const { return code_
; }
1210 uint32_t codeLength() const { return length_
; }
1211 uint32_t stubDataOffset() const { return stubDataOffset_
; }
1213 size_t stubDataSize() const;
1215 StubField::Type
fieldType(uint32_t i
) const {
1216 return (StubField::Type
)fieldTypes_
[i
];
1219 static CacheIRStubInfo
* New(CacheKind kind
, ICStubEngine engine
,
1220 bool canMakeCalls
, uint32_t stubDataOffset
,
1221 const CacheIRWriter
& writer
);
1223 template <class Stub
, class T
>
1224 js::GCPtr
<T
>& getStubField(Stub
* stub
, uint32_t offset
) const;
1227 js::GCPtr
<T
>& getStubField(ICStub
* stub
, uint32_t offset
) const {
1228 return getStubField
<ICStub
, T
>(stub
, offset
);
1231 uintptr_t getStubRawWord(const uint8_t* stubData
, uint32_t offset
) const;
1232 uintptr_t getStubRawWord(ICStub
* stub
, uint32_t offset
) const;
1234 int64_t getStubRawInt64(const uint8_t* stubData
, uint32_t offset
) const;
1235 int64_t getStubRawInt64(ICStub
* stub
, uint32_t offset
) const;
1237 void replaceStubRawWord(uint8_t* stubData
, uint32_t offset
, uintptr_t oldWord
,
1238 uintptr_t newWord
) const;
1241 template <typename T
>
1242 void TraceCacheIRStub(JSTracer
* trc
, T
* stub
, const CacheIRStubInfo
* stubInfo
);
1244 void LoadTypedThingData(MacroAssembler
& masm
, TypedThingLayout layout
,
1245 Register obj
, Register result
);
1247 void LoadTypedThingLength(MacroAssembler
& masm
, TypedThingLayout layout
,
1248 Register obj
, Register result
);
1253 #endif /* jit_CacheIRCompiler_h */