1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef jit_CacheIRWriter_h
8 #define jit_CacheIRWriter_h
10 #include "mozilla/Assertions.h"
11 #include "mozilla/Attributes.h"
12 #include "mozilla/Casting.h"
13 #include "mozilla/Maybe.h"
19 #include "NamespaceImports.h"
21 #include "gc/AllocKind.h"
22 #include "jit/ABIFunctions.h"
23 #include "jit/CacheIR.h"
24 #include "jit/CacheIROpsGenerated.h"
25 #include "jit/CompactBuffer.h"
26 #include "jit/ICState.h"
27 #include "jit/Simulator.h"
28 #include "jit/TypeData.h"
29 #include "js/AllocPolicy.h"
30 #include "js/CallArgs.h"
32 #include "js/experimental/JitInfo.h"
34 #include "js/RootingAPI.h"
35 #include "js/ScalarType.h"
37 #include "js/Vector.h"
38 #include "util/Memory.h"
39 #include "vm/GuardFuse.h"
40 #include "vm/JSFunction.h"
41 #include "vm/JSScript.h"
43 #include "vm/Opcodes.h"
44 #include "vm/RealmFuses.h"
46 #include "wasm/WasmConstants.h"
47 #include "wasm/WasmValType.h"
49 class JS_PUBLIC_API JSTracer
;
50 struct JS_PUBLIC_API JSContext
;
62 enum class UnaryMathFunction
: uint8_t;
72 // Class to record CacheIR + some additional metadata for code generation.
73 class MOZ_RAII CacheIRWriter
: public JS::CustomAutoRooter
{
77 CompactBufferWriter buffer_
;
79 uint32_t nextOperandId_
;
80 uint32_t nextInstructionId_
;
81 uint32_t numInputOperands_
;
85 // The data (shapes, slot offsets, etc.) that will be stored in the ICStub.
86 Vector
<StubField
, 8, SystemAllocPolicy
> stubFields_
;
89 // For each operand id, record which instruction accessed it last. This
90 // information greatly improves register allocation.
91 Vector
<uint32_t, 8, SystemAllocPolicy
> operandLastUsed_
;
93 // OperandId and stub offsets are stored in a single byte, so make sure
94 // this doesn't overflow. We use a very conservative limit for now.
95 static const size_t MaxOperandIds
= 20;
96 static const size_t MaxStubDataSizeInBytes
= 20 * sizeof(uintptr_t);
99 // Assume this stub can't be trial inlined until we see a scripted call/inline
101 TrialInliningState trialInliningState_
= TrialInliningState::Failure
;
103 // Basic caching to avoid quadatic lookup behaviour in readStubField.
104 mutable uint32_t lastOffset_
;
105 mutable uint32_t lastIndex_
;
108 // Information for assertLengthMatches.
109 mozilla::Maybe
<CacheOp
> currentOp_
;
110 size_t currentOpArgsStart_
= 0;
114 void assertSameCompartment(JSObject
* obj
);
115 void assertSameZone(Shape
* shape
);
117 void assertSameCompartment(JSObject
* obj
) {}
118 void assertSameZone(Shape
* shape
) {}
121 void writeOp(CacheOp op
) {
122 buffer_
.writeFixedUint16_t(uint16_t(op
));
123 nextInstructionId_
++;
125 MOZ_ASSERT(currentOp_
.isNothing(), "Missing call to assertLengthMatches?");
126 currentOp_
.emplace(op
);
127 currentOpArgsStart_
= buffer_
.length();
131 void assertLengthMatches() {
133 // After writing arguments, assert the length matches CacheIROpArgLengths.
134 size_t expectedLen
= CacheIROpInfos
[size_t(*currentOp_
)].argLength
;
135 MOZ_ASSERT_IF(!failed(),
136 buffer_
.length() - currentOpArgsStart_
== expectedLen
);
141 void writeOperandId(OperandId opId
) {
142 if (opId
.id() < MaxOperandIds
) {
143 static_assert(MaxOperandIds
<= UINT8_MAX
,
144 "operand id must fit in a single byte");
145 buffer_
.writeByte(opId
.id());
150 if (opId
.id() >= operandLastUsed_
.length()) {
151 buffer_
.propagateOOM(operandLastUsed_
.resize(opId
.id() + 1));
156 MOZ_ASSERT(nextInstructionId_
> 0);
157 operandLastUsed_
[opId
.id()] = nextInstructionId_
- 1;
160 void writeCallFlagsImm(CallFlags flags
) { buffer_
.writeByte(flags
.toByte()); }
162 void addStubField(uint64_t value
, StubField::Type fieldType
) {
163 size_t fieldOffset
= stubDataSize_
;
165 // On 32-bit platforms there are two stub field sizes (4 bytes and 8 bytes).
166 // Ensure 8-byte fields are properly aligned.
167 if (StubField::sizeIsInt64(fieldType
)) {
168 fieldOffset
= AlignBytes(fieldOffset
, sizeof(uint64_t));
171 MOZ_ASSERT((fieldOffset
% StubField::sizeInBytes(fieldType
)) == 0);
173 size_t newStubDataSize
= fieldOffset
+ StubField::sizeInBytes(fieldType
);
174 if (newStubDataSize
< MaxStubDataSizeInBytes
) {
176 // Add a RawInt32 stub field for padding if necessary, because when we
177 // iterate over the stub fields we assume there are no 'holes'.
178 if (fieldOffset
!= stubDataSize_
) {
179 MOZ_ASSERT((stubDataSize_
+ sizeof(uintptr_t)) == fieldOffset
);
180 buffer_
.propagateOOM(
181 stubFields_
.append(StubField(0, StubField::Type::RawInt32
)));
184 buffer_
.propagateOOM(stubFields_
.append(StubField(value
, fieldType
)));
185 MOZ_ASSERT((fieldOffset
% sizeof(uintptr_t)) == 0);
186 buffer_
.writeByte(fieldOffset
/ sizeof(uintptr_t));
187 stubDataSize_
= newStubDataSize
;
193 void writeShapeField(Shape
* shape
) {
195 assertSameZone(shape
);
196 addStubField(uintptr_t(shape
), StubField::Type::Shape
);
198 void writeWeakShapeField(Shape
* shape
) {
200 assertSameZone(shape
);
201 addStubField(uintptr_t(shape
), StubField::Type::WeakShape
);
203 void writeWeakGetterSetterField(GetterSetter
* gs
) {
205 addStubField(uintptr_t(gs
), StubField::Type::WeakGetterSetter
);
207 void writeObjectField(JSObject
* obj
) {
209 assertSameCompartment(obj
);
210 addStubField(uintptr_t(obj
), StubField::Type::JSObject
);
212 void writeWeakObjectField(JSObject
* obj
) {
214 assertSameCompartment(obj
);
215 addStubField(uintptr_t(obj
), StubField::Type::WeakObject
);
217 void writeStringField(JSString
* str
) {
219 addStubField(uintptr_t(str
), StubField::Type::String
);
221 void writeSymbolField(JS::Symbol
* sym
) {
223 addStubField(uintptr_t(sym
), StubField::Type::Symbol
);
225 void writeWeakBaseScriptField(BaseScript
* script
) {
227 addStubField(uintptr_t(script
), StubField::Type::WeakBaseScript
);
229 void writeJitCodeField(JitCode
* code
) {
231 addStubField(uintptr_t(code
), StubField::Type::JitCode
);
233 void writeRawInt32Field(uint32_t val
) {
234 addStubField(val
, StubField::Type::RawInt32
);
236 void writeRawPointerField(const void* ptr
) {
237 addStubField(uintptr_t(ptr
), StubField::Type::RawPointer
);
239 void writeIdField(jsid id
) {
240 addStubField(id
.asRawBits(), StubField::Type::Id
);
242 void writeValueField(const Value
& val
) {
243 addStubField(val
.asRawBits(), StubField::Type::Value
);
245 void writeRawInt64Field(uint64_t val
) {
246 addStubField(val
, StubField::Type::RawInt64
);
248 void writeDoubleField(double d
) {
249 uint64_t bits
= mozilla::BitwiseCast
<uint64_t>(d
);
250 addStubField(bits
, StubField::Type::Double
);
252 void writeAllocSiteField(gc::AllocSite
* ptr
) {
253 addStubField(uintptr_t(ptr
), StubField::Type::AllocSite
);
256 void writeJSOpImm(JSOp op
) {
257 static_assert(sizeof(JSOp
) == sizeof(uint8_t), "JSOp must fit in a byte");
258 buffer_
.writeByte(uint8_t(op
));
260 void writeGuardClassKindImm(GuardClassKind kind
) {
261 static_assert(sizeof(GuardClassKind
) == sizeof(uint8_t),
262 "GuardClassKind must fit in a byte");
263 buffer_
.writeByte(uint8_t(kind
));
265 void writeArrayBufferViewKindImm(ArrayBufferViewKind kind
) {
266 static_assert(sizeof(ArrayBufferViewKind
) == sizeof(uint8_t),
267 "ArrayBufferViewKind must fit in a byte");
268 buffer_
.writeByte(uint8_t(kind
));
270 void writeValueTypeImm(ValueType type
) {
271 static_assert(sizeof(ValueType
) == sizeof(uint8_t),
272 "ValueType must fit in uint8_t");
273 buffer_
.writeByte(uint8_t(type
));
275 void writeJSWhyMagicImm(JSWhyMagic whyMagic
) {
276 static_assert(JS_WHY_MAGIC_COUNT
<= UINT8_MAX
,
277 "JSWhyMagic must fit in uint8_t");
278 buffer_
.writeByte(uint8_t(whyMagic
));
280 void writeScalarTypeImm(Scalar::Type type
) {
281 MOZ_ASSERT(size_t(type
) <= UINT8_MAX
);
282 buffer_
.writeByte(uint8_t(type
));
284 void writeUnaryMathFunctionImm(UnaryMathFunction fun
) {
285 static_assert(sizeof(UnaryMathFunction
) == sizeof(uint8_t),
286 "UnaryMathFunction must fit in a byte");
287 buffer_
.writeByte(uint8_t(fun
));
289 void writeCompletionKindImm(CompletionKind kind
) {
290 static_assert(sizeof(CompletionKind
) == sizeof(uint8_t),
291 "CompletionKind must fit in a byte");
292 buffer_
.writeByte(uint8_t(kind
));
294 void writeBoolImm(bool b
) { buffer_
.writeByte(uint32_t(b
)); }
295 void writeRealmFuseIndexImm(RealmFuses::FuseIndex realmFuseIndex
) {
296 static_assert(sizeof(RealmFuses::FuseIndex
) == sizeof(uint8_t),
297 "RealmFuses::FuseIndex must fit in a byte");
298 buffer_
.writeByte(uint8_t(realmFuseIndex
));
301 void writeByteImm(uint32_t b
) {
302 MOZ_ASSERT(b
<= UINT8_MAX
);
303 buffer_
.writeByte(b
);
306 void writeInt32Imm(int32_t i32
) { buffer_
.writeFixedUint32_t(i32
); }
307 void writeUInt32Imm(uint32_t u32
) { buffer_
.writeFixedUint32_t(u32
); }
308 void writePointer(const void* ptr
) { buffer_
.writeRawPointer(ptr
); }
310 void writeJSNativeImm(JSNative native
) {
311 writePointer(JS_FUNC_TO_DATA_PTR(void*, native
));
313 void writeStaticStringImm(const char* str
) { writePointer(str
); }
315 void writeWasmValTypeImm(wasm::ValType::Kind kind
) {
316 static_assert(unsigned(wasm::TypeCode::Limit
) <= UINT8_MAX
);
317 buffer_
.writeByte(uint8_t(kind
));
320 void writeAllocKindImm(gc::AllocKind kind
) {
321 static_assert(unsigned(gc::AllocKind::LIMIT
) <= UINT8_MAX
);
322 buffer_
.writeByte(uint8_t(kind
));
325 uint32_t newOperandId() { return nextOperandId_
++; }
327 CacheIRWriter(const CacheIRWriter
&) = delete;
328 CacheIRWriter
& operator=(const CacheIRWriter
&) = delete;
331 explicit CacheIRWriter(JSContext
* cx
)
332 : CustomAutoRooter(cx
),
337 nextInstructionId_(0),
338 numInputOperands_(0),
345 bool tooLarge() const { return tooLarge_
; }
346 bool oom() const { return buffer_
.oom(); }
347 bool failed() const { return tooLarge() || oom(); }
349 TrialInliningState
trialInliningState() const { return trialInliningState_
; }
351 uint32_t numInputOperands() const { return numInputOperands_
; }
352 uint32_t numOperandIds() const { return nextOperandId_
; }
353 uint32_t numInstructions() const { return nextInstructionId_
; }
355 size_t numStubFields() const { return stubFields_
.length(); }
356 StubField::Type
stubFieldType(uint32_t i
) const {
357 return stubFields_
[i
].type();
360 uint32_t setInputOperandId(uint32_t op
) {
361 MOZ_ASSERT(op
== nextOperandId_
);
367 TypeData
typeData() const { return typeData_
; }
368 void setTypeData(TypeData data
) { typeData_
= data
; }
370 void trace(JSTracer
* trc
) override
{
371 // For now, assert we only GC before we append stub fields.
372 MOZ_RELEASE_ASSERT(stubFields_
.empty());
375 size_t stubDataSize() const { return stubDataSize_
; }
376 void copyStubData(uint8_t* dest
) const;
377 bool stubDataEquals(const uint8_t* stubData
) const;
378 bool stubDataEqualsIgnoring(const uint8_t* stubData
,
379 uint32_t ignoreOffset
) const;
381 bool operandIsDead(uint32_t operandId
, uint32_t currentInstruction
) const {
382 if (operandId
>= operandLastUsed_
.length()) {
385 return currentInstruction
> operandLastUsed_
[operandId
];
388 const uint8_t* codeStart() const {
389 MOZ_ASSERT(!failed());
390 return buffer_
.buffer();
393 const uint8_t* codeEnd() const {
394 MOZ_ASSERT(!failed());
395 return buffer_
.buffer() + buffer_
.length();
398 uint32_t codeLength() const {
399 MOZ_ASSERT(!failed());
400 return buffer_
.length();
403 // This should not be used when compiling Baseline code, as Baseline code
404 // shouldn't bake in stub values.
405 StubField
readStubField(uint32_t offset
, StubField::Type type
) const;
407 ObjOperandId
guardToObject(ValOperandId input
) {
408 guardToObject_(input
);
409 return ObjOperandId(input
.id());
412 StringOperandId
guardToString(ValOperandId input
) {
413 guardToString_(input
);
414 return StringOperandId(input
.id());
417 SymbolOperandId
guardToSymbol(ValOperandId input
) {
418 guardToSymbol_(input
);
419 return SymbolOperandId(input
.id());
422 BigIntOperandId
guardToBigInt(ValOperandId input
) {
423 guardToBigInt_(input
);
424 return BigIntOperandId(input
.id());
427 BooleanOperandId
guardToBoolean(ValOperandId input
) {
428 guardToBoolean_(input
);
429 return BooleanOperandId(input
.id());
432 Int32OperandId
guardToInt32(ValOperandId input
) {
433 guardToInt32_(input
);
434 return Int32OperandId(input
.id());
437 NumberOperandId
guardIsNumber(ValOperandId input
) {
438 guardIsNumber_(input
);
439 return NumberOperandId(input
.id());
442 StringOperandId
stringToAtom(StringOperandId input
) {
443 stringToAtom_(input
);
447 ValOperandId
boxObject(ObjOperandId input
) {
448 return ValOperandId(input
.id());
451 void guardShapeForClass(ObjOperandId obj
, Shape
* shape
) {
452 // Guard shape to ensure that object class is unchanged. This is true
454 guardShape(obj
, shape
);
457 void guardShapeForOwnProperties(ObjOperandId obj
, Shape
* shape
) {
458 // Guard shape to detect changes to (non-dense) own properties. This
459 // also implies |guardShapeForClass|.
460 MOZ_ASSERT(shape
->getObjectClass()->isNativeObject());
461 guardShape(obj
, shape
);
465 void guardSpecificFunction(ObjOperandId obj
, JSFunction
* expected
) {
466 // Guard object is a specific function. This implies immutable fields on
467 // the JSFunction struct itself are unchanged.
468 // Bake in the nargs and FunctionFlags so Warp can use them off-main thread,
469 // instead of directly using the JSFunction fields.
470 uint32_t nargsAndFlags
= expected
->flagsAndArgCountRaw();
471 guardSpecificFunction_(obj
, expected
, nargsAndFlags
);
474 void guardFunctionScript(ObjOperandId fun
, BaseScript
* expected
) {
475 // Guard function has a specific BaseScript. This implies immutable fields
476 // on the JSFunction struct itself are unchanged and are equivalent for
478 // Bake in the nargs and FunctionFlags so Warp can use them off-main thread,
479 // instead of directly using the JSFunction fields.
480 uint32_t nargsAndFlags
= expected
->function()->flagsAndArgCountRaw();
481 guardFunctionScript_(fun
, expected
, nargsAndFlags
);
484 ValOperandId
loadArgumentFixedSlot(
485 ArgumentKind kind
, uint32_t argc
,
486 CallFlags flags
= CallFlags(CallFlags::Standard
)) {
488 int32_t slotIndex
= GetIndexOfArgument(kind
, flags
, &addArgc
);
492 MOZ_ASSERT(slotIndex
>= 0);
493 MOZ_RELEASE_ASSERT(slotIndex
<= UINT8_MAX
);
494 return loadArgumentFixedSlot_(slotIndex
);
497 ValOperandId
loadArgumentDynamicSlot(
498 ArgumentKind kind
, Int32OperandId argcId
,
499 CallFlags flags
= CallFlags(CallFlags::Standard
)) {
501 int32_t slotIndex
= GetIndexOfArgument(kind
, flags
, &addArgc
);
503 return loadArgumentDynamicSlot_(argcId
, slotIndex
);
505 return loadArgumentFixedSlot_(slotIndex
);
508 ObjOperandId
loadSpreadArgs() {
509 ArgumentKind kind
= ArgumentKind::Arg0
;
511 CallFlags
flags(CallFlags::Spread
);
512 return ObjOperandId(loadArgumentFixedSlot(kind
, argc
, flags
).id());
515 void callScriptedFunction(ObjOperandId callee
, Int32OperandId argc
,
516 CallFlags flags
, uint32_t argcFixed
) {
517 callScriptedFunction_(callee
, argc
, flags
, argcFixed
);
518 trialInliningState_
= TrialInliningState::Candidate
;
521 void callInlinedFunction(ObjOperandId callee
, Int32OperandId argc
,
522 ICScript
* icScript
, CallFlags flags
,
523 uint32_t argcFixed
) {
524 callInlinedFunction_(callee
, argc
, icScript
, flags
, argcFixed
);
525 trialInliningState_
= TrialInliningState::Inlined
;
528 void callNativeFunction(ObjOperandId calleeId
, Int32OperandId argc
, JSOp op
,
529 JSFunction
* calleeFunc
, CallFlags flags
,
530 uint32_t argcFixed
) {
531 // Some native functions can be implemented faster if we know that
532 // the return value is ignored.
533 bool ignoresReturnValue
=
534 op
== JSOp::CallIgnoresRv
&& calleeFunc
->hasJitInfo() &&
535 calleeFunc
->jitInfo()->type() == JSJitInfo::IgnoresReturnValueNative
;
538 // The simulator requires VM calls to be redirected to a special
539 // swi instruction to handle them, so we store the redirected
540 // pointer in the stub and use that instead of the original one.
541 // If we are calling the ignoresReturnValue version of a native
542 // function, we bake it into the redirected pointer.
543 // (See BaselineCacheIRCompiler::emitCallNativeFunction.)
544 JSNative target
= ignoresReturnValue
545 ? calleeFunc
->jitInfo()->ignoresReturnValueMethod
546 : calleeFunc
->native();
547 void* rawPtr
= JS_FUNC_TO_DATA_PTR(void*, target
);
548 void* redirected
= Simulator::RedirectNativeFunction(rawPtr
, Args_General3
);
549 callNativeFunction_(calleeId
, argc
, flags
, argcFixed
, redirected
);
551 // If we are not running in the simulator, we generate different jitcode
552 // to find the ignoresReturnValue version of a native function.
553 callNativeFunction_(calleeId
, argc
, flags
, argcFixed
, ignoresReturnValue
);
557 void callDOMFunction(ObjOperandId calleeId
, Int32OperandId argc
,
558 ObjOperandId thisObjId
, JSFunction
* calleeFunc
,
559 CallFlags flags
, uint32_t argcFixed
) {
561 void* rawPtr
= JS_FUNC_TO_DATA_PTR(void*, calleeFunc
->native());
562 void* redirected
= Simulator::RedirectNativeFunction(rawPtr
, Args_General3
);
563 callDOMFunction_(calleeId
, argc
, thisObjId
, flags
, argcFixed
, redirected
);
565 callDOMFunction_(calleeId
, argc
, thisObjId
, flags
, argcFixed
);
569 void callAnyNativeFunction(ObjOperandId calleeId
, Int32OperandId argc
,
570 CallFlags flags
, uint32_t argcFixed
) {
571 MOZ_ASSERT(!flags
.isSameRealm());
573 const void* redirected
= RedirectedCallAnyNative();
574 callNativeFunction_(calleeId
, argc
, flags
, argcFixed
, redirected
);
576 callNativeFunction_(calleeId
, argc
, flags
, argcFixed
,
577 /* ignoresReturnValue = */ false);
581 void callClassHook(ObjOperandId calleeId
, Int32OperandId argc
, JSNative hook
,
582 CallFlags flags
, uint32_t argcFixed
) {
583 MOZ_ASSERT(!flags
.isSameRealm());
584 void* target
= JS_FUNC_TO_DATA_PTR(void*, hook
);
586 // The simulator requires VM calls to be redirected to a special
587 // swi instruction to handle them, so we store the redirected
588 // pointer in the stub and use that instead of the original one.
589 target
= Simulator::RedirectNativeFunction(target
, Args_General3
);
591 callClassHook_(calleeId
, argc
, flags
, argcFixed
, target
);
594 void callScriptedGetterResult(ValOperandId receiver
, JSFunction
* getter
,
596 MOZ_ASSERT(getter
->hasJitEntry());
597 uint32_t nargsAndFlags
= getter
->flagsAndArgCountRaw();
598 callScriptedGetterResult_(receiver
, getter
, sameRealm
, nargsAndFlags
);
599 trialInliningState_
= TrialInliningState::Candidate
;
602 void callInlinedGetterResult(ValOperandId receiver
, JSFunction
* getter
,
603 ICScript
* icScript
, bool sameRealm
) {
604 MOZ_ASSERT(getter
->hasJitEntry());
605 uint32_t nargsAndFlags
= getter
->flagsAndArgCountRaw();
606 callInlinedGetterResult_(receiver
, getter
, icScript
, sameRealm
,
608 trialInliningState_
= TrialInliningState::Inlined
;
611 void callNativeGetterResult(ValOperandId receiver
, JSFunction
* getter
,
613 MOZ_ASSERT(getter
->isNativeWithoutJitEntry());
614 uint32_t nargsAndFlags
= getter
->flagsAndArgCountRaw();
615 callNativeGetterResult_(receiver
, getter
, sameRealm
, nargsAndFlags
);
618 void callScriptedSetter(ObjOperandId receiver
, JSFunction
* setter
,
619 ValOperandId rhs
, bool sameRealm
) {
620 MOZ_ASSERT(setter
->hasJitEntry());
621 uint32_t nargsAndFlags
= setter
->flagsAndArgCountRaw();
622 callScriptedSetter_(receiver
, setter
, rhs
, sameRealm
, nargsAndFlags
);
623 trialInliningState_
= TrialInliningState::Candidate
;
626 void callInlinedSetter(ObjOperandId receiver
, JSFunction
* setter
,
627 ValOperandId rhs
, ICScript
* icScript
, bool sameRealm
) {
628 MOZ_ASSERT(setter
->hasJitEntry());
629 uint32_t nargsAndFlags
= setter
->flagsAndArgCountRaw();
630 callInlinedSetter_(receiver
, setter
, rhs
, icScript
, sameRealm
,
632 trialInliningState_
= TrialInliningState::Inlined
;
635 void callNativeSetter(ObjOperandId receiver
, JSFunction
* setter
,
636 ValOperandId rhs
, bool sameRealm
) {
637 MOZ_ASSERT(setter
->isNativeWithoutJitEntry());
638 uint32_t nargsAndFlags
= setter
->flagsAndArgCountRaw();
639 callNativeSetter_(receiver
, setter
, rhs
, sameRealm
, nargsAndFlags
);
643 void callScriptedProxyGetResult(ValOperandId target
, ObjOperandId receiver
,
644 ObjOperandId handler
, JSFunction
* trap
,
646 MOZ_ASSERT(trap
->hasJitEntry());
647 uint32_t nargsAndFlags
= trap
->flagsAndArgCountRaw();
648 callScriptedProxyGetResult_(target
, receiver
, handler
, trap
, property
,
652 void callScriptedProxyGetByValueResult(ValOperandId target
,
653 ObjOperandId receiver
,
654 ObjOperandId handler
,
655 ValOperandId property
,
657 MOZ_ASSERT(trap
->hasJitEntry());
658 uint32_t nargsAndFlags
= trap
->flagsAndArgCountRaw();
659 callScriptedProxyGetByValueResult_(target
, receiver
, handler
, property
,
660 trap
, nargsAndFlags
);
664 void metaScriptedThisShape(Shape
* thisShape
) {
665 metaScriptedThisShape_(thisShape
);
668 void guardMultipleShapes(ObjOperandId obj
, ListObject
* shapes
) {
669 MOZ_ASSERT(shapes
->length() > 0);
670 guardMultipleShapes_(obj
, shapes
);
673 friend class CacheIRCloner
;
675 CACHE_IR_WRITER_GENERATED
681 #endif /* jit_CacheIRWriter_h */