1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef jit_CacheIRCompiler_h
8 #define jit_CacheIRCompiler_h
10 #include "mozilla/Casting.h"
11 #include "mozilla/Maybe.h"
13 #include "jit/CacheIR.h"
14 #include "jit/CacheIRReader.h"
15 #include "jit/CacheIRWriter.h"
16 #include "jit/JitOptions.h"
17 #include "jit/MacroAssembler.h"
18 #include "jit/PerfSpewer.h"
19 #include "jit/SharedICRegisters.h"
20 #include "js/ScalarType.h" // js::Scalar::Type
28 class TypedArrayObject
;
29 enum class UnaryMathFunction
: uint8_t;
33 class BaselineCacheIRCompiler
;
35 class IonCacheIRCompiler
;
38 enum class ICStubEngine
: uint8_t;
40 // [SMDOC] CacheIR Value Representation and Tracking
42 // While compiling an IC stub the CacheIR compiler needs to keep track of the
43 // physical location for each logical piece of data we care about, as well as
44 // ensure that in the case of a stub failing, we are able to restore the input
45 // state so that a subsequent stub can attempt to provide a value.
47 // OperandIds are created in the CacheIR front-end to keep track of values that
48 // are passed between CacheIR ops during the execution of a given CacheIR stub.
49 // In the CacheRegisterAllocator these OperandIds are given OperandLocations,
50 // that represent the physical location of the OperandId at a given point in
51 // time during CacheRegister allocation.
53 // In the CacheRegisterAllocator physical locations include the stack, and
54 // registers, as well as whether or not the value has been unboxed or not.
55 // Constants are also represented separately to provide for on-demand
58 // Intra-op Register allocation:
60 // During the emission of a CacheIR op, code can ask the CacheRegisterAllocator
61 // for access to a particular OperandId, and the register allocator will
62 // generate the required code to fill that request.
64 // Input OperandIds should be considered as immutable, and should not be mutated
65 // during the execution of a stub.
67 // There are also a number of RAII classes that interact with the register
68 // allocator, in order to provide access to more registers than just those
69 // provided for by the OperandIds.
71 // - AutoOutputReg: The register which will hold the output value of the stub.
72 // - AutoScratchReg: By default, an arbitrary scratch register, however a
73 // specific register can be requested.
74 // - AutoScratchRegMaybeOutput: Any arbitrary scratch register, but the output
75 // register may be used as well.
77 // These RAII classes take ownership of a register for the duration of their
78 // lifetime so they can be used for computation or output. The register
79 // allocator can spill values with OperandLocations in order to try to ensure
80 // that a register is made available for use.
82 // If a specific register is required (via AutoScratchRegister), it should be
83 // the first register acquired, as the register rallocator will be unable to
84 // allocate the fixed register if the current op is using it for something else.
86 // If no register can be provided after attempting to spill, a
87 // MOZ_RELEASE_ASSERT ensures the browser will crash. The register allocator is
88 // not provided enough information in its current design to insert spills and
89 // fills at arbitrary locations, and so it can fail to find an allocation
90 // solution. However, this will only happen within the implementation of an
91 // operand emitter, and because the cache register allocator is mostly
92 // determinstic, so long as the operand id emitter is tested, this won't
93 // suddenly crop up in an arbitrary webpage. It's worth noting the most
94 // difficult platform to support is x86-32, because it has the least number of
95 // registers available.
97 // FailurePaths checkpoint the state of the register allocator so that the input
98 // state can be recomputed from the current state before jumping to the next
99 // stub in the IC chain. An important invariant is that the FailurePath must be
100 // allocated for each op after all the manipulation of OperandLocations has
101 // happened, so that its recording is correct.
103 // Inter-op Register Allocation:
105 // The RAII register management classes are RAII because all register state
106 // outside the OperandLocations is reset before the compilation of each
107 // individual CacheIR op. This means that you cannot rely on a value surviving
108 // between ops, even if you use the ability of AutoScratchRegister to name a
109 // specific register. Values that need to be preserved between ops must be given
112 // Represents a Value on the Baseline frame's expression stack. Slot 0 is the
113 // value on top of the stack (the most recently pushed value), slot 1 is the
114 // value pushed before that, etc.
115 class BaselineFrameSlot
{
119 explicit BaselineFrameSlot(uint32_t slot
) : slot_(slot
) {}
120 uint32_t slot() const { return slot_
; }
122 bool operator==(const BaselineFrameSlot
& other
) const {
123 return slot_
== other
.slot_
;
125 bool operator!=(const BaselineFrameSlot
& other
) const {
126 return slot_
!= other
.slot_
;
130 // OperandLocation represents the location of an OperandId. The operand is
131 // either in a register or on the stack, and is either boxed or unboxed.
132 class OperandLocation
{
153 FloatRegister doubleReg
;
154 ValueOperand valueReg
;
156 uint32_t stackPushed
;
159 uint32_t valueStackPushed
;
160 BaselineFrameSlot baselineFrameSlot
;
163 Data() : valueStackPushed(0) {}
168 OperandLocation() : kind_(Uninitialized
) {}
170 Kind
kind() const { return kind_
; }
172 void setUninitialized() { kind_
= Uninitialized
; }
174 ValueOperand
valueReg() const {
175 MOZ_ASSERT(kind_
== ValueReg
);
176 return data_
.valueReg
;
178 Register
payloadReg() const {
179 MOZ_ASSERT(kind_
== PayloadReg
);
180 return data_
.payloadReg
.reg
;
182 FloatRegister
doubleReg() const {
183 MOZ_ASSERT(kind_
== DoubleReg
);
184 return data_
.doubleReg
;
186 uint32_t payloadStack() const {
187 MOZ_ASSERT(kind_
== PayloadStack
);
188 return data_
.payloadStack
.stackPushed
;
190 uint32_t valueStack() const {
191 MOZ_ASSERT(kind_
== ValueStack
);
192 return data_
.valueStackPushed
;
194 JSValueType
payloadType() const {
195 if (kind_
== PayloadReg
) {
196 return data_
.payloadReg
.type
;
198 MOZ_ASSERT(kind_
== PayloadStack
);
199 return data_
.payloadStack
.type
;
201 Value
constant() const {
202 MOZ_ASSERT(kind_
== Constant
);
203 return data_
.constant
;
205 BaselineFrameSlot
baselineFrameSlot() const {
206 MOZ_ASSERT(kind_
== BaselineFrame
);
207 return data_
.baselineFrameSlot
;
210 void setPayloadReg(Register reg
, JSValueType type
) {
212 data_
.payloadReg
.reg
= reg
;
213 data_
.payloadReg
.type
= type
;
215 void setDoubleReg(FloatRegister reg
) {
217 data_
.doubleReg
= reg
;
219 void setValueReg(ValueOperand reg
) {
221 data_
.valueReg
= reg
;
223 void setPayloadStack(uint32_t stackPushed
, JSValueType type
) {
224 kind_
= PayloadStack
;
225 data_
.payloadStack
.stackPushed
= stackPushed
;
226 data_
.payloadStack
.type
= type
;
228 void setValueStack(uint32_t stackPushed
) {
230 data_
.valueStackPushed
= stackPushed
;
232 void setConstant(const Value
& v
) {
236 void setBaselineFrame(BaselineFrameSlot slot
) {
237 kind_
= BaselineFrame
;
238 data_
.baselineFrameSlot
= slot
;
241 bool isUninitialized() const { return kind_
== Uninitialized
; }
242 bool isInRegister() const { return kind_
== PayloadReg
|| kind_
== ValueReg
; }
243 bool isOnStack() const {
244 return kind_
== PayloadStack
|| kind_
== ValueStack
;
247 size_t stackPushed() const {
248 if (kind_
== PayloadStack
) {
249 return data_
.payloadStack
.stackPushed
;
251 MOZ_ASSERT(kind_
== ValueStack
);
252 return data_
.valueStackPushed
;
254 size_t stackSizeInBytes() const {
255 if (kind_
== PayloadStack
) {
256 return sizeof(uintptr_t);
258 MOZ_ASSERT(kind_
== ValueStack
);
259 return sizeof(js::Value
);
261 void adjustStackPushed(int32_t diff
) {
262 if (kind_
== PayloadStack
) {
263 data_
.payloadStack
.stackPushed
+= diff
;
266 MOZ_ASSERT(kind_
== ValueStack
);
267 data_
.valueStackPushed
+= diff
;
270 bool aliasesReg(Register reg
) const {
271 if (kind_
== PayloadReg
) {
272 return payloadReg() == reg
;
274 if (kind_
== ValueReg
) {
275 return valueReg().aliases(reg
);
279 bool aliasesReg(ValueOperand reg
) const {
280 #if defined(JS_NUNBOX32)
281 return aliasesReg(reg
.typeReg()) || aliasesReg(reg
.payloadReg());
283 return aliasesReg(reg
.valueReg());
287 bool aliasesReg(const OperandLocation
& other
) const;
289 bool operator==(const OperandLocation
& other
) const;
290 bool operator!=(const OperandLocation
& other
) const {
291 return !operator==(other
);
295 struct SpilledRegister
{
297 uint32_t stackPushed
;
299 SpilledRegister(Register reg
, uint32_t stackPushed
)
300 : reg(reg
), stackPushed(stackPushed
) {}
301 bool operator==(const SpilledRegister
& other
) const {
302 return reg
== other
.reg
&& stackPushed
== other
.stackPushed
;
304 bool operator!=(const SpilledRegister
& other
) const {
305 return !(*this == other
);
309 using SpilledRegisterVector
= Vector
<SpilledRegister
, 2, SystemAllocPolicy
>;
311 // Class to track and allocate registers while emitting IC code.
312 class MOZ_RAII CacheRegisterAllocator
{
313 // The original location of the inputs to the cache.
314 Vector
<OperandLocation
, 4, SystemAllocPolicy
> origInputLocations_
;
316 // The current location of each operand.
317 Vector
<OperandLocation
, 8, SystemAllocPolicy
> operandLocations_
;
319 // Free lists for value- and payload-slots on stack
320 Vector
<uint32_t, 2, SystemAllocPolicy
> freeValueSlots_
;
321 Vector
<uint32_t, 2, SystemAllocPolicy
> freePayloadSlots_
;
323 // The registers allocated while emitting the current CacheIR op.
324 // This prevents us from allocating a register and then immediately
325 // clobbering it for something else, while we're still holding on to it.
326 LiveGeneralRegisterSet currentOpRegs_
;
328 const AllocatableGeneralRegisterSet allocatableRegs_
;
330 // Registers that are currently unused and available.
331 AllocatableGeneralRegisterSet availableRegs_
;
333 // Registers that are available, but before use they must be saved and
334 // then restored when returning from the stub.
335 AllocatableGeneralRegisterSet availableRegsAfterSpill_
;
337 // Registers we took from availableRegsAfterSpill_ and spilled to the stack.
338 SpilledRegisterVector spilledRegs_
;
340 // The number of bytes pushed on the native stack.
341 uint32_t stackPushed_
;
344 // Flag used to assert individual CacheIR instructions don't allocate
345 // registers after calling addFailurePath.
346 bool addedFailurePath_
;
349 // The index of the CacheIR instruction we're currently emitting.
350 uint32_t currentInstruction_
;
352 // Whether the stack contains a double spilled by AutoScratchFloatRegister.
353 bool hasAutoScratchFloatRegisterSpill_
= false;
355 const CacheIRWriter
& writer_
;
357 CacheRegisterAllocator(const CacheRegisterAllocator
&) = delete;
358 CacheRegisterAllocator
& operator=(const CacheRegisterAllocator
&) = delete;
360 void freeDeadOperandLocations(MacroAssembler
& masm
);
362 void spillOperandToStack(MacroAssembler
& masm
, OperandLocation
* loc
);
363 void spillOperandToStackOrRegister(MacroAssembler
& masm
,
364 OperandLocation
* loc
);
366 void popPayload(MacroAssembler
& masm
, OperandLocation
* loc
, Register dest
);
367 void popValue(MacroAssembler
& masm
, OperandLocation
* loc
, ValueOperand dest
);
368 Address
payloadAddress(MacroAssembler
& masm
,
369 const OperandLocation
* loc
) const;
370 Address
valueAddress(MacroAssembler
& masm
, const OperandLocation
* loc
) const;
373 void assertValidState() const;
377 friend class AutoScratchRegister
;
378 friend class AutoScratchRegisterExcluding
;
380 explicit CacheRegisterAllocator(const CacheIRWriter
& writer
)
381 : allocatableRegs_(GeneralRegisterSet::All()),
384 addedFailurePath_(false),
386 currentInstruction_(0),
390 [[nodiscard
]] bool init();
392 void initAvailableRegs(const AllocatableGeneralRegisterSet
& available
) {
393 availableRegs_
= available
;
395 void initAvailableRegsAfterSpill();
397 void fixupAliasedInputs(MacroAssembler
& masm
);
399 OperandLocation
operandLocation(size_t i
) const {
400 return operandLocations_
[i
];
402 void setOperandLocation(size_t i
, const OperandLocation
& loc
) {
403 operandLocations_
[i
] = loc
;
406 OperandLocation
origInputLocation(size_t i
) const {
407 return origInputLocations_
[i
];
409 void initInputLocation(size_t i
, ValueOperand reg
) {
410 origInputLocations_
[i
].setValueReg(reg
);
411 operandLocations_
[i
].setValueReg(reg
);
413 void initInputLocation(size_t i
, Register reg
, JSValueType type
) {
414 origInputLocations_
[i
].setPayloadReg(reg
, type
);
415 operandLocations_
[i
].setPayloadReg(reg
, type
);
417 void initInputLocation(size_t i
, FloatRegister reg
) {
418 origInputLocations_
[i
].setDoubleReg(reg
);
419 operandLocations_
[i
].setDoubleReg(reg
);
421 void initInputLocation(size_t i
, const Value
& v
) {
422 origInputLocations_
[i
].setConstant(v
);
423 operandLocations_
[i
].setConstant(v
);
425 void initInputLocation(size_t i
, BaselineFrameSlot slot
) {
426 origInputLocations_
[i
].setBaselineFrame(slot
);
427 operandLocations_
[i
].setBaselineFrame(slot
);
430 void initInputLocation(size_t i
, const TypedOrValueRegister
& reg
);
431 void initInputLocation(size_t i
, const ConstantOrRegister
& value
);
433 const SpilledRegisterVector
& spilledRegs() const { return spilledRegs_
; }
435 [[nodiscard
]] bool setSpilledRegs(const SpilledRegisterVector
& regs
) {
436 spilledRegs_
.clear();
437 return spilledRegs_
.appendAll(regs
);
440 bool hasAutoScratchFloatRegisterSpill() const {
441 return hasAutoScratchFloatRegisterSpill_
;
443 void setHasAutoScratchFloatRegisterSpill(bool b
) {
444 MOZ_ASSERT(hasAutoScratchFloatRegisterSpill_
!= b
);
445 hasAutoScratchFloatRegisterSpill_
= b
;
451 addedFailurePath_
= false;
453 currentOpRegs_
.clear();
454 currentInstruction_
++;
458 void setAddedFailurePath() {
459 MOZ_ASSERT(!addedFailurePath_
, "multiple failure paths for instruction");
460 addedFailurePath_
= true;
464 bool isDeadAfterInstruction(OperandId opId
) const {
465 return writer_
.operandIsDead(opId
.id(), currentInstruction_
+ 1);
468 uint32_t stackPushed() const { return stackPushed_
; }
469 void setStackPushed(uint32_t pushed
) { stackPushed_
= pushed
; }
471 bool isAllocatable(Register reg
) const { return allocatableRegs_
.has(reg
); }
473 // Allocates a new register.
474 Register
allocateRegister(MacroAssembler
& masm
);
475 ValueOperand
allocateValueRegister(MacroAssembler
& masm
);
477 void allocateFixedRegister(MacroAssembler
& masm
, Register reg
);
478 void allocateFixedValueRegister(MacroAssembler
& masm
, ValueOperand reg
);
480 // Releases a register so it can be reused later.
481 void releaseRegister(Register reg
) {
482 MOZ_ASSERT(currentOpRegs_
.has(reg
));
483 availableRegs_
.add(reg
);
484 currentOpRegs_
.take(reg
);
486 void releaseValueRegister(ValueOperand reg
) {
488 releaseRegister(reg
.payloadReg());
489 releaseRegister(reg
.typeReg());
491 releaseRegister(reg
.valueReg());
495 // Removes spilled values from the native stack. This should only be
496 // called after all registers have been allocated.
497 void discardStack(MacroAssembler
& masm
);
499 Address
addressOf(MacroAssembler
& masm
, BaselineFrameSlot slot
) const;
500 BaseValueIndex
addressOf(MacroAssembler
& masm
, Register argcReg
,
501 BaselineFrameSlot slot
) const;
503 // Returns the register for the given operand. If the operand is currently
504 // not in a register, it will load it into one.
505 ValueOperand
useValueRegister(MacroAssembler
& masm
, ValOperandId val
);
506 Register
useRegister(MacroAssembler
& masm
, TypedOperandId typedId
);
508 ConstantOrRegister
useConstantOrRegister(MacroAssembler
& masm
,
511 // Allocates an output register for the given operand.
512 Register
defineRegister(MacroAssembler
& masm
, TypedOperandId typedId
);
513 ValueOperand
defineValueRegister(MacroAssembler
& masm
, ValOperandId val
);
515 // Loads (potentially coercing) and unboxes a value into a float register
516 // This is infallible, as there should have been a previous guard
517 // to ensure the value is already a number.
518 // Does not change the allocator's state.
519 void ensureDoubleRegister(MacroAssembler
& masm
, NumberOperandId op
,
520 FloatRegister dest
) const;
522 // Loads an unboxed value into a scratch register. This can be useful
523 // especially on 32-bit x86 when there are not enough registers for
525 // Does not change the allocator's state.
526 void copyToScratchRegister(MacroAssembler
& masm
, TypedOperandId typedId
,
527 Register dest
) const;
528 void copyToScratchValueRegister(MacroAssembler
& masm
, ValOperandId valId
,
529 ValueOperand dest
) const;
531 // Returns |val|'s JSValueType or JSVAL_TYPE_UNKNOWN.
532 JSValueType
knownType(ValOperandId val
) const;
534 // Emits code to restore registers and stack to the state at the start of
536 void restoreInputState(MacroAssembler
& masm
, bool discardStack
= true);
538 // Returns the set of registers storing the IC input operands.
539 GeneralRegisterSet
inputRegisterSet() const;
541 void saveIonLiveRegisters(MacroAssembler
& masm
, LiveRegisterSet liveRegs
,
542 Register scratch
, IonScript
* ionScript
);
543 void restoreIonLiveRegisters(MacroAssembler
& masm
, LiveRegisterSet liveRegs
);
546 // RAII class to allocate a scratch register and release it when we're done
548 class MOZ_RAII AutoScratchRegister
{
549 CacheRegisterAllocator
& alloc_
;
552 AutoScratchRegister(const AutoScratchRegister
&) = delete;
553 void operator=(const AutoScratchRegister
&) = delete;
556 AutoScratchRegister(CacheRegisterAllocator
& alloc
, MacroAssembler
& masm
,
557 Register reg
= InvalidReg
)
559 if (reg
!= InvalidReg
) {
560 alloc
.allocateFixedRegister(masm
, reg
);
563 reg_
= alloc
.allocateRegister(masm
);
565 MOZ_ASSERT(alloc_
.currentOpRegs_
.has(reg_
));
567 ~AutoScratchRegister() { alloc_
.releaseRegister(reg_
); }
569 Register
get() const { return reg_
; }
570 operator Register() const { return reg_
; }
573 // On x86, spectreBoundsCheck32 can emit better code if it has a scratch
574 // register and index masking is enabled.
575 class MOZ_RAII AutoSpectreBoundsScratchRegister
{
576 mozilla::Maybe
<AutoScratchRegister
> scratch_
;
577 Register reg_
= InvalidReg
;
579 AutoSpectreBoundsScratchRegister(const AutoSpectreBoundsScratchRegister
&) =
581 void operator=(const AutoSpectreBoundsScratchRegister
&) = delete;
584 AutoSpectreBoundsScratchRegister(CacheRegisterAllocator
& alloc
,
585 MacroAssembler
& masm
) {
586 #ifdef JS_CODEGEN_X86
587 if (JitOptions
.spectreIndexMasking
) {
588 scratch_
.emplace(alloc
, masm
);
589 reg_
= scratch_
->get();
594 Register
get() const { return reg_
; }
595 operator Register() const { return reg_
; }
598 // Scratch Register64. Implemented with a single AutoScratchRegister on 64-bit
599 // platforms and two AutoScratchRegisters on 32-bit platforms.
600 class MOZ_RAII AutoScratchRegister64
{
601 AutoScratchRegister reg1_
;
602 #if JS_BITS_PER_WORD == 32
603 AutoScratchRegister reg2_
;
607 AutoScratchRegister64(const AutoScratchRegister64
&) = delete;
608 void operator=(const AutoScratchRegister64
&) = delete;
610 #if JS_BITS_PER_WORD == 32
611 AutoScratchRegister64(CacheRegisterAllocator
& alloc
, MacroAssembler
& masm
)
612 : reg1_(alloc
, masm
), reg2_(alloc
, masm
) {}
614 Register64
get() const { return Register64(reg1_
, reg2_
); }
616 AutoScratchRegister64(CacheRegisterAllocator
& alloc
, MacroAssembler
& masm
)
617 : reg1_(alloc
, masm
) {}
619 Register64
get() const { return Register64(reg1_
); }
622 operator Register64() const { return get(); }
625 // Scratch ValueOperand. Implemented with a single AutoScratchRegister on 64-bit
626 // platforms and two AutoScratchRegisters on 32-bit platforms.
627 class MOZ_RAII AutoScratchValueRegister
{
628 AutoScratchRegister reg1_
;
629 #if JS_BITS_PER_WORD == 32
630 AutoScratchRegister reg2_
;
634 AutoScratchValueRegister(const AutoScratchValueRegister
&) = delete;
635 void operator=(const AutoScratchValueRegister
&) = delete;
637 #if JS_BITS_PER_WORD == 32
638 AutoScratchValueRegister(CacheRegisterAllocator
& alloc
, MacroAssembler
& masm
)
639 : reg1_(alloc
, masm
), reg2_(alloc
, masm
) {}
641 ValueOperand
get() const { return ValueOperand(reg1_
, reg2_
); }
643 AutoScratchValueRegister(CacheRegisterAllocator
& alloc
, MacroAssembler
& masm
)
644 : reg1_(alloc
, masm
) {}
646 ValueOperand
get() const { return ValueOperand(reg1_
); }
649 operator ValueOperand() const { return get(); }
652 // The FailurePath class stores everything we need to generate a failure path
653 // at the end of the IC code. The failure path restores the input registers, if
654 // needed, and jumps to the next stub.
656 Vector
<OperandLocation
, 4, SystemAllocPolicy
> inputs_
;
657 SpilledRegisterVector spilledRegs_
;
658 NonAssertingLabel label_
;
659 uint32_t stackPushed_
;
661 // Flag to ensure FailurePath::label() isn't taken while there's a scratch
662 // float register which still needs to be restored.
663 bool hasAutoScratchFloatRegister_
= false;
667 FailurePath() = default;
669 FailurePath(FailurePath
&& other
)
670 : inputs_(std::move(other
.inputs_
)),
671 spilledRegs_(std::move(other
.spilledRegs_
)),
672 label_(other
.label_
),
673 stackPushed_(other
.stackPushed_
) {}
675 Label
* labelUnchecked() { return &label_
; }
677 MOZ_ASSERT(!hasAutoScratchFloatRegister_
);
678 return labelUnchecked();
681 void setStackPushed(uint32_t i
) { stackPushed_
= i
; }
682 uint32_t stackPushed() const { return stackPushed_
; }
684 [[nodiscard
]] bool appendInput(const OperandLocation
& loc
) {
685 return inputs_
.append(loc
);
687 OperandLocation
input(size_t i
) const { return inputs_
[i
]; }
689 const SpilledRegisterVector
& spilledRegs() const { return spilledRegs_
; }
691 [[nodiscard
]] bool setSpilledRegs(const SpilledRegisterVector
& regs
) {
692 MOZ_ASSERT(spilledRegs_
.empty());
693 return spilledRegs_
.appendAll(regs
);
696 // If canShareFailurePath(other) returns true, the same machine code will
697 // be emitted for two failure paths, so we can share them.
698 bool canShareFailurePath(const FailurePath
& other
) const;
700 void setHasAutoScratchFloatRegister() {
702 MOZ_ASSERT(!hasAutoScratchFloatRegister_
);
703 hasAutoScratchFloatRegister_
= true;
707 void clearHasAutoScratchFloatRegister() {
709 MOZ_ASSERT(hasAutoScratchFloatRegister_
);
710 hasAutoScratchFloatRegister_
= false;
716 * Wrap an offset so that a call can decide to embed a constant
717 * or load from the stub data.
719 class StubFieldOffset
{
722 StubField::Type type_
;
725 StubFieldOffset(uint32_t offset
, StubField::Type type
)
726 : offset_(offset
), type_(type
) {}
728 uint32_t getOffset() { return offset_
; }
729 StubField::Type
getStubFieldType() { return type_
; }
732 class AutoOutputRegister
;
734 // Base class for BaselineCacheIRCompiler and IonCacheIRCompiler.
735 class MOZ_RAII CacheIRCompiler
{
737 friend class AutoOutputRegister
;
738 friend class AutoStubFrame
;
739 friend class AutoSaveLiveRegisters
;
740 friend class AutoCallVM
;
741 friend class AutoScratchFloatRegister
;
742 friend class AutoAvailableFloatRegister
;
744 enum class Mode
{ Baseline
, Ion
};
746 bool enteredStubFrame_
;
750 BaselineCacheIRCompiler
* asBaseline();
751 IonCacheIRCompiler
* asIon();
754 const CacheIRWriter
& writer_
;
755 StackMacroAssembler masm
;
757 CacheRegisterAllocator allocator
;
758 Vector
<FailurePath
, 4, SystemAllocPolicy
> failurePaths
;
760 // Float registers that are live. Registers not in this set can be
761 // clobbered and don't need to be saved before performing a VM call.
762 // Doing this for non-float registers is a bit more complicated because
763 // the IC register allocator allocates GPRs.
764 LiveFloatRegisterSet liveFloatRegs_
;
766 mozilla::Maybe
<TypedOrValueRegister
> outputUnchecked_
;
769 // Distance from the IC to the stub data; mostly will be
771 uint32_t stubDataOffset_
;
773 enum class StubFieldPolicy
{ Address
, Constant
};
775 StubFieldPolicy stubFieldPolicy_
;
777 CacheIRCompiler(JSContext
* cx
, TempAllocator
& alloc
,
778 const CacheIRWriter
& writer
, uint32_t stubDataOffset
,
779 Mode mode
, StubFieldPolicy policy
)
780 : enteredStubFrame_(false),
785 liveFloatRegs_(FloatRegisterSet::All()),
787 stubDataOffset_(stubDataOffset
),
788 stubFieldPolicy_(policy
) {
789 MOZ_ASSERT(!writer
.failed());
792 [[nodiscard
]] bool addFailurePath(FailurePath
** failure
);
793 [[nodiscard
]] bool emitFailurePath(size_t i
);
795 // Returns the set of volatile float registers that are live. These
796 // registers need to be saved when making non-GC calls with callWithABI.
797 FloatRegisterSet
liveVolatileFloatRegs() const {
798 return FloatRegisterSet::Intersect(liveFloatRegs_
.set(),
799 FloatRegisterSet::Volatile());
802 bool objectGuardNeedsSpectreMitigations(ObjOperandId objId
) const {
803 // Instructions like GuardShape need Spectre mitigations if
804 // (1) mitigations are enabled and (2) the object is used by other
805 // instructions (if the object is *not* used by other instructions,
806 // zeroing its register is pointless).
807 return JitOptions
.spectreObjectMitigations
&&
808 !allocator
.isDeadAfterInstruction(objId
);
812 void emitPostBarrierShared(Register obj
, const ConstantOrRegister
& val
,
813 Register scratch
, Register maybeIndex
);
815 void emitPostBarrierShared(Register obj
, ValueOperand val
, Register scratch
,
816 Register maybeIndex
) {
817 emitPostBarrierShared(obj
, ConstantOrRegister(val
), scratch
, maybeIndex
);
821 template <typename T
>
822 void emitPostBarrierSlot(Register obj
, const T
& val
, Register scratch
) {
823 emitPostBarrierShared(obj
, val
, scratch
, InvalidReg
);
826 template <typename T
>
827 void emitPostBarrierElement(Register obj
, const T
& val
, Register scratch
,
829 MOZ_ASSERT(index
!= InvalidReg
);
830 emitPostBarrierShared(obj
, val
, scratch
, index
);
833 bool emitComparePointerResultShared(JSOp op
, TypedOperandId lhsId
,
834 TypedOperandId rhsId
);
836 [[nodiscard
]] bool emitMathFunctionNumberResultShared(
837 UnaryMathFunction fun
, FloatRegister inputScratch
, ValueOperand output
);
839 template <typename Fn
, Fn fn
>
840 [[nodiscard
]] bool emitBigIntBinaryOperationShared(BigIntOperandId lhsId
,
841 BigIntOperandId rhsId
);
843 template <typename Fn
, Fn fn
>
844 [[nodiscard
]] bool emitBigIntUnaryOperationShared(BigIntOperandId inputId
);
846 bool emitDoubleIncDecResult(bool isInc
, NumberOperandId inputId
);
848 using AtomicsReadWriteModifyFn
= int32_t (*)(TypedArrayObject
*, size_t,
851 [[nodiscard
]] bool emitAtomicsReadModifyWriteResult(
852 ObjOperandId objId
, IntPtrOperandId indexId
, uint32_t valueId
,
853 Scalar::Type elementType
, AtomicsReadWriteModifyFn fn
);
855 using AtomicsReadWriteModify64Fn
= JS::BigInt
* (*)(JSContext
*,
856 TypedArrayObject
*, size_t,
859 template <AtomicsReadWriteModify64Fn fn
>
860 [[nodiscard
]] bool emitAtomicsReadModifyWriteResult64(ObjOperandId objId
,
861 IntPtrOperandId indexId
,
864 void emitActivateIterator(Register objBeingIterated
, Register iterObject
,
865 Register nativeIter
, Register scratch
,
866 Register scratch2
, uint32_t enumeratorsAddrOffset
);
868 CACHE_IR_COMPILER_SHARED_GENERATED
870 void emitLoadStubField(StubFieldOffset val
, Register dest
);
871 void emitLoadStubFieldConstant(StubFieldOffset val
, Register dest
);
873 void emitLoadValueStubField(StubFieldOffset val
, ValueOperand dest
);
874 void emitLoadDoubleValueStubField(StubFieldOffset val
, ValueOperand dest
,
875 FloatRegister scratch
);
877 uintptr_t readStubWord(uint32_t offset
, StubField::Type type
) {
878 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
879 MOZ_ASSERT((offset
% sizeof(uintptr_t)) == 0);
880 return writer_
.readStubField(offset
, type
).asWord();
882 uint64_t readStubInt64(uint32_t offset
, StubField::Type type
) {
883 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
884 MOZ_ASSERT((offset
% sizeof(uintptr_t)) == 0);
885 return writer_
.readStubField(offset
, type
).asInt64();
887 int32_t int32StubField(uint32_t offset
) {
888 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
889 return readStubWord(offset
, StubField::Type::RawInt32
);
891 uint32_t uint32StubField(uint32_t offset
) {
892 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
893 return readStubWord(offset
, StubField::Type::RawInt32
);
895 Shape
* shapeStubField(uint32_t offset
) {
896 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
897 return (Shape
*)readStubWord(offset
, StubField::Type::Shape
);
899 Shape
* weakShapeStubField(uint32_t offset
) {
900 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
901 Shape
* shape
= (Shape
*)readStubWord(offset
, StubField::Type::WeakShape
);
902 gc::ReadBarrier(shape
);
905 GetterSetter
* getterSetterStubField(uint32_t offset
) {
906 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
907 return (GetterSetter
*)readStubWord(offset
, StubField::Type::GetterSetter
);
909 JSObject
* objectStubField(uint32_t offset
) {
910 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
911 return (JSObject
*)readStubWord(offset
, StubField::Type::JSObject
);
913 Value
valueStubField(uint32_t offset
) {
914 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
915 uint64_t raw
= readStubInt64(offset
, StubField::Type::Value
);
916 return Value::fromRawBits(raw
);
918 double doubleStubField(uint32_t offset
) {
919 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
920 uint64_t raw
= readStubInt64(offset
, StubField::Type::Double
);
921 return mozilla::BitwiseCast
<double>(raw
);
923 JSString
* stringStubField(uint32_t offset
) {
924 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
925 return (JSString
*)readStubWord(offset
, StubField::Type::String
);
927 JS::Symbol
* symbolStubField(uint32_t offset
) {
928 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
929 return (JS::Symbol
*)readStubWord(offset
, StubField::Type::Symbol
);
931 JS::Compartment
* compartmentStubField(uint32_t offset
) {
932 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
933 return (JS::Compartment
*)readStubWord(offset
, StubField::Type::RawPointer
);
935 BaseScript
* baseScriptStubField(uint32_t offset
) {
936 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
937 return (BaseScript
*)readStubWord(offset
, StubField::Type::BaseScript
);
939 const JSClass
* classStubField(uintptr_t offset
) {
940 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
941 return (const JSClass
*)readStubWord(offset
, StubField::Type::RawPointer
);
943 const void* proxyHandlerStubField(uintptr_t offset
) {
944 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
945 return (const void*)readStubWord(offset
, StubField::Type::RawPointer
);
947 const void* pointerStubField(uintptr_t offset
) {
948 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
949 return (const void*)readStubWord(offset
, StubField::Type::RawPointer
);
951 jsid
idStubField(uint32_t offset
) {
952 MOZ_ASSERT(stubFieldPolicy_
== StubFieldPolicy::Constant
);
953 return jsid::fromRawBits(readStubWord(offset
, StubField::Type::Id
));
957 void assertFloatRegisterAvailable(FloatRegister reg
);
960 void callVMInternal(MacroAssembler
& masm
, VMFunctionId id
);
961 template <typename Fn
, Fn fn
>
962 void callVM(MacroAssembler
& masm
);
965 // Ensures the IC's output register is available for writing.
966 class MOZ_RAII AutoOutputRegister
{
967 TypedOrValueRegister output_
;
968 CacheRegisterAllocator
& alloc_
;
970 AutoOutputRegister(const AutoOutputRegister
&) = delete;
971 void operator=(const AutoOutputRegister
&) = delete;
974 explicit AutoOutputRegister(CacheIRCompiler
& compiler
);
975 ~AutoOutputRegister();
977 Register
maybeReg() const {
978 if (output_
.hasValue()) {
979 return output_
.valueReg().scratchReg();
981 if (!output_
.typedReg().isFloat()) {
982 return output_
.typedReg().gpr();
987 bool hasValue() const { return output_
.hasValue(); }
988 ValueOperand
valueReg() const { return output_
.valueReg(); }
989 AnyRegister
typedReg() const { return output_
.typedReg(); }
991 JSValueType
type() const {
992 MOZ_ASSERT(!hasValue());
993 return ValueTypeFromMIRType(output_
.type());
996 operator TypedOrValueRegister() const { return output_
; }
999 // Instructions that have to perform a callVM require a stub frame. Call its
1000 // enter() and leave() methods to enter/leave the stub frame.
1001 // Hoisted from jit/BaselineCacheIRCompiler.cpp. See there for method
1003 class MOZ_RAII AutoStubFrame
{
1004 BaselineCacheIRCompiler
& compiler
;
1006 uint32_t framePushedAtEnterStubFrame_
;
1009 AutoStubFrame(const AutoStubFrame
&) = delete;
1010 void operator=(const AutoStubFrame
&) = delete;
1013 explicit AutoStubFrame(BaselineCacheIRCompiler
& compiler
);
1015 void enter(MacroAssembler
& masm
, Register scratch
);
1016 void leave(MacroAssembler
& masm
);
1022 // AutoSaveLiveRegisters must be used when we make a call that can GC. The
1023 // constructor ensures all live registers are stored on the stack (where the GC
1024 // expects them) and the destructor restores these registers.
1025 class MOZ_RAII AutoSaveLiveRegisters
{
1026 IonCacheIRCompiler
& compiler_
;
1028 AutoSaveLiveRegisters(const AutoSaveLiveRegisters
&) = delete;
1029 void operator=(const AutoSaveLiveRegisters
&) = delete;
1032 explicit AutoSaveLiveRegisters(IonCacheIRCompiler
& compiler
);
1034 ~AutoSaveLiveRegisters();
1036 // Like AutoScratchRegister, but reuse a register of |output| if possible.
1037 class MOZ_RAII AutoScratchRegisterMaybeOutput
{
1038 mozilla::Maybe
<AutoScratchRegister
> scratch_
;
1039 Register scratchReg_
;
1041 AutoScratchRegisterMaybeOutput(const AutoScratchRegisterMaybeOutput
&) =
1043 void operator=(const AutoScratchRegisterMaybeOutput
&) = delete;
1046 AutoScratchRegisterMaybeOutput(CacheRegisterAllocator
& alloc
,
1047 MacroAssembler
& masm
,
1048 const AutoOutputRegister
& output
) {
1049 scratchReg_
= output
.maybeReg();
1050 if (scratchReg_
== InvalidReg
) {
1051 scratch_
.emplace(alloc
, masm
);
1052 scratchReg_
= scratch_
.ref();
1055 AutoScratchRegisterMaybeOutput(CacheRegisterAllocator
& alloc
,
1056 MacroAssembler
& masm
) {
1057 scratch_
.emplace(alloc
, masm
);
1058 scratchReg_
= scratch_
.ref();
1061 Register
get() const { return scratchReg_
; }
1062 operator Register() const { return scratchReg_
; }
1065 // Like AutoScratchRegisterMaybeOutput, but tries to use the ValueOperand's
1066 // type register for the scratch register on 32-bit.
1068 // Word of warning: Passing an instance of this class and AutoOutputRegister to
1069 // functions may not work correctly, because no guarantee is given that the type
1070 // register is used last when modifying the output's ValueOperand.
1071 class MOZ_RAII AutoScratchRegisterMaybeOutputType
{
1072 mozilla::Maybe
<AutoScratchRegister
> scratch_
;
1073 Register scratchReg_
;
1076 AutoScratchRegisterMaybeOutputType(CacheRegisterAllocator
& alloc
,
1077 MacroAssembler
& masm
,
1078 const AutoOutputRegister
& output
) {
1079 #if defined(JS_NUNBOX32)
1080 scratchReg_
= output
.hasValue() ? output
.valueReg().typeReg() : InvalidReg
;
1082 scratchReg_
= InvalidReg
;
1084 if (scratchReg_
== InvalidReg
) {
1085 scratch_
.emplace(alloc
, masm
);
1086 scratchReg_
= scratch_
.ref();
1090 AutoScratchRegisterMaybeOutputType(
1091 const AutoScratchRegisterMaybeOutputType
&) = delete;
1093 void operator=(const AutoScratchRegisterMaybeOutputType
&) = delete;
1095 Register
get() const { return scratchReg_
; }
1096 operator Register() const { return scratchReg_
; }
1099 // AutoCallVM is a wrapper class that unifies methods shared by
1100 // IonCacheIRCompiler and BaselineCacheIRCompiler that perform a callVM, but
1101 // require stub specific functionality before performing the VM call.
1105 // OPs with implementations that may be unified by this class must:
1106 // - Be listed in the CACHEIR_OPS list but not in the CACHE_IR_SHARED_OPS
1108 // - Differ only in their use of `AutoSaveLiveRegisters`,
1109 // `AutoOutputRegister`, and `AutoScratchRegister`. The Ion
1110 // implementation will use `AutoSaveLiveRegisters` and
1111 // `AutoOutputRegister`, while the Baseline implementation will use
1112 // `AutoScratchRegister`.
1113 // - Both use the `callVM` method.
1115 // Using AutoCallVM:
1116 // - The constructor initializes `AutoOutputRegister` for both compiler
1117 // types. Additionally it initializes an `AutoSaveLiveRegisters` for
1118 // CacheIRCompilers with the mode Ion, and initializes
1119 // `AutoScratchRegisterMaybeOutput` and `AutoStubFrame` variables for
1120 // compilers with mode Baseline.
1121 // - The `prepare()` method calls the IonCacheIRCompiler method
1122 // `prepareVMCall` for IonCacheIRCompilers, calls the `enter()` method of
1123 // `AutoStubFrame` for BaselineCacheIRCompilers, and calls the
1124 // `discardStack` method of the `Register` class for both compiler types.
1125 // - The `call()` method invokes `callVM` on the CacheIRCompiler and stores
1126 // the call result according to its type. Finally it calls the `leave`
1127 // method of `AutoStubFrame` for BaselineCacheIRCompilers.
1129 // Expected Usage Example:
1130 // See: `CacheIRCompiler::emitCallGetSparseElementResult()`
1133 // - OPs that do not meet the criteria listed above can not be unified with
1137 class MOZ_RAII AutoCallVM
{
1138 MacroAssembler
& masm_
;
1139 CacheIRCompiler
* compiler_
;
1140 CacheRegisterAllocator
& allocator_
;
1141 mozilla::Maybe
<AutoOutputRegister
> output_
;
1143 // Baseline specific stuff
1144 mozilla::Maybe
<AutoStubFrame
> stubFrame_
;
1145 mozilla::Maybe
<AutoScratchRegisterMaybeOutput
> scratch_
;
1147 // Ion specific stuff
1148 mozilla::Maybe
<AutoSaveLiveRegisters
> save_
;
1150 void storeResult(JSValueType returnType
);
1152 template <typename Fn
>
1155 void leaveBaselineStubFrame();
1158 AutoCallVM(MacroAssembler
& masm
, CacheIRCompiler
* compiler
,
1159 CacheRegisterAllocator
& allocator
);
1163 template <typename Fn
, Fn fn
>
1165 compiler_
->callVM
<Fn
, fn
>(masm_
);
1167 leaveBaselineStubFrame();
1170 template <typename Fn
, Fn fn
>
1171 void callNoResult() {
1172 compiler_
->callVM
<Fn
, fn
>(masm_
);
1173 leaveBaselineStubFrame();
1176 const AutoOutputRegister
& output() const { return *output_
; }
1177 ValueOperand
outputValueReg() const { return output_
->valueReg(); }
1180 // RAII class to allocate FloatReg0 as a scratch register and release it when
1181 // we're done with it. The previous contents of FloatReg0 may be spilled on the
1182 // stack and, if necessary, are restored when the destructor runs.
1184 // When FailurePath is passed to the constructor, FailurePath::label() must not
1185 // be used during the life time of the AutoScratchFloatRegister. Instead use
1186 // AutoScratchFloatRegister::failure().
1187 class MOZ_RAII AutoScratchFloatRegister
{
1188 Label failurePopReg_
{};
1189 CacheIRCompiler
* compiler_
;
1190 FailurePath
* failure_
;
1192 AutoScratchFloatRegister(const AutoScratchFloatRegister
&) = delete;
1193 void operator=(const AutoScratchFloatRegister
&) = delete;
1196 explicit AutoScratchFloatRegister(CacheIRCompiler
* compiler
)
1197 : AutoScratchFloatRegister(compiler
, nullptr) {}
1199 AutoScratchFloatRegister(CacheIRCompiler
* compiler
, FailurePath
* failure
);
1201 ~AutoScratchFloatRegister();
1205 FloatRegister
get() const { return FloatReg0
; }
1206 operator FloatRegister() const { return FloatReg0
; }
1209 // This class can be used to assert a certain FloatRegister is available. In
1210 // Baseline mode, all float registers are available. In Ion mode, only the
1211 // registers added as fixed temps in LIRGenerator are available.
1212 class MOZ_RAII AutoAvailableFloatRegister
{
1215 AutoAvailableFloatRegister(const AutoAvailableFloatRegister
&) = delete;
1216 void operator=(const AutoAvailableFloatRegister
&) = delete;
1219 explicit AutoAvailableFloatRegister(CacheIRCompiler
& compiler
,
1223 compiler
.assertFloatRegisterAvailable(reg
);
1227 FloatRegister
get() const { return reg_
; }
1228 operator FloatRegister() const { return reg_
; }
1231 // See the 'Sharing Baseline stub code' comment in CacheIR.h for a description
1234 // CacheIRStubInfo has a trailing variable-length array of bytes. The memory
1235 // layout is as follows:
1238 // -----------------+--------------------------------------
1239 // CacheIRStubInfo | 0
1240 // CacheIR bytecode | sizeof(CacheIRStubInfo)
1241 // Stub field types | sizeof(CacheIRStubInfo) + codeLength_
1243 // The array of stub field types is terminated by StubField::Type::Limit.
1244 class CacheIRStubInfo
{
1245 uint32_t codeLength_
;
1247 ICStubEngine engine_
;
1248 uint8_t stubDataOffset_
;
1251 CacheIRStubInfo(CacheKind kind
, ICStubEngine engine
, bool makesGCCalls
,
1252 uint32_t stubDataOffset
, uint32_t codeLength
)
1253 : codeLength_(codeLength
),
1256 stubDataOffset_(stubDataOffset
),
1257 makesGCCalls_(makesGCCalls
) {
1258 MOZ_ASSERT(kind_
== kind
, "Kind must fit in bitfield");
1259 MOZ_ASSERT(engine_
== engine
, "Engine must fit in bitfield");
1260 MOZ_ASSERT(stubDataOffset_
== stubDataOffset
,
1261 "stubDataOffset must fit in uint8_t");
1264 CacheIRStubInfo(const CacheIRStubInfo
&) = delete;
1265 CacheIRStubInfo
& operator=(const CacheIRStubInfo
&) = delete;
1268 CacheKind
kind() const { return kind_
; }
1269 ICStubEngine
engine() const { return engine_
; }
1270 bool makesGCCalls() const { return makesGCCalls_
; }
1272 const uint8_t* code() const {
1273 return reinterpret_cast<const uint8_t*>(this) + sizeof(CacheIRStubInfo
);
1275 uint32_t codeLength() const { return codeLength_
; }
1276 uint32_t stubDataOffset() const { return stubDataOffset_
; }
1278 size_t stubDataSize() const;
1280 StubField::Type
fieldType(uint32_t i
) const {
1281 static_assert(sizeof(StubField::Type
) == sizeof(uint8_t));
1282 const uint8_t* fieldTypes
= code() + codeLength_
;
1283 return static_cast<StubField::Type
>(fieldTypes
[i
]);
1286 static CacheIRStubInfo
* New(CacheKind kind
, ICStubEngine engine
,
1287 bool canMakeCalls
, uint32_t stubDataOffset
,
1288 const CacheIRWriter
& writer
);
1290 template <class Stub
, class T
>
1291 js::GCPtr
<T
>& getStubField(Stub
* stub
, uint32_t offset
) const;
1293 template <class Stub
, class T
>
1294 T
* getPtrStubField(Stub
* stub
, uint32_t offset
) const;
1297 js::GCPtr
<T
>& getStubField(ICCacheIRStub
* stub
, uint32_t offset
) const {
1298 return getStubField
<ICCacheIRStub
, T
>(stub
, offset
);
1301 uintptr_t getStubRawWord(const uint8_t* stubData
, uint32_t offset
) const;
1302 uintptr_t getStubRawWord(ICCacheIRStub
* stub
, uint32_t offset
) const;
1304 int64_t getStubRawInt64(const uint8_t* stubData
, uint32_t offset
) const;
1305 int64_t getStubRawInt64(ICCacheIRStub
* stub
, uint32_t offset
) const;
1307 void replaceStubRawWord(uint8_t* stubData
, uint32_t offset
, uintptr_t oldWord
,
1308 uintptr_t newWord
) const;
1311 template <typename T
>
1312 void TraceCacheIRStub(JSTracer
* trc
, T
* stub
, const CacheIRStubInfo
* stubInfo
);
1314 template <typename T
>
1315 bool TraceWeakCacheIRStub(JSTracer
* trc
, T
* stub
,
1316 const CacheIRStubInfo
* stubInfo
);
1321 #endif /* jit_CacheIRCompiler_h */