1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
10 #include "mozilla/Assertions.h"
11 #include "mozilla/Attributes.h"
18 #include "jit/CacheIROpsGenerated.h"
19 #include "js/GCAnnotations.h"
22 struct JS_PUBLIC_API JSContext
;
29 // CacheIR is an (extremely simple) linear IR language for inline caches.
30 // From this IR, we can generate machine code for Baseline or Ion IC stubs.
34 // CacheIR bytecode is written using IRWriter. This class also records some
35 // metadata that's used by the Baseline and Ion code generators to generate
36 // (efficient) machine code.
38 // Sharing Baseline stub code
39 // --------------------------
40 // Baseline stores data (like Shape* and fixed slot offsets) inside the ICStub
41 // structure, instead of embedding them directly in the JitCode. This makes
42 // Baseline IC code slightly slower, but allows us to share IC code between
43 // caches. CacheIR makes it easy to share code between stubs: stubs that have
44 // the same CacheIR (and CacheKind), will have the same Baseline stub code.
46 // Baseline stubs that share JitCode also share a CacheIRStubInfo structure.
47 // This class stores the CacheIR and the location of GC things stored in the
50 // JitZone has a CacheIRStubInfo* -> JitCode* weak map that's used to share both
51 // the IR and JitCode between Baseline CacheIR stubs. This HashMap owns the
52 // stubInfo (it uses UniquePtr), so once there are no references left to the
53 // shared stub code, we can also free the CacheIRStubInfo.
57 // Unlike Baseline stubs, Ion stubs do not share stub code, and data stored in
58 // the IonICStub is baked into JIT code. This is one of the reasons Ion stubs
59 // are faster than Baseline stubs. Also note that Ion ICs contain more state
60 // (see IonGetPropertyIC for example) and use dynamic input/output registers,
61 // so sharing stub code for Ion would be much more difficult.
63 // An OperandId represents either a cache input or a value returned by a
64 // CacheIR instruction. Most code should use the ValOperandId and ObjOperandId
65 // classes below. The ObjOperandId class represents an operand that's known to
66 // be an object, just as StringOperandId represents a known string, etc.
69 static const uint16_t InvalidId
= UINT16_MAX
;
72 explicit OperandId(uint16_t id
) : id_(id
) {}
75 OperandId() : id_(InvalidId
) {}
76 uint16_t id() const { return id_
; }
77 bool valid() const { return id_
!= InvalidId
; }
80 class ValOperandId
: public OperandId
{
82 ValOperandId() = default;
83 explicit ValOperandId(uint16_t id
) : OperandId(id
) {}
86 class ValueTagOperandId
: public OperandId
{
88 ValueTagOperandId() = default;
89 explicit ValueTagOperandId(uint16_t id
) : OperandId(id
) {}
92 class IntPtrOperandId
: public OperandId
{
94 IntPtrOperandId() = default;
95 explicit IntPtrOperandId(uint16_t id
) : OperandId(id
) {}
98 class ObjOperandId
: public OperandId
{
100 ObjOperandId() = default;
101 explicit ObjOperandId(uint16_t id
) : OperandId(id
) {}
103 bool operator==(const ObjOperandId
& other
) const { return id_
== other
.id_
; }
104 bool operator!=(const ObjOperandId
& other
) const { return id_
!= other
.id_
; }
107 class NumberOperandId
: public ValOperandId
{
109 NumberOperandId() = default;
110 explicit NumberOperandId(uint16_t id
) : ValOperandId(id
) {}
113 class StringOperandId
: public OperandId
{
115 StringOperandId() = default;
116 explicit StringOperandId(uint16_t id
) : OperandId(id
) {}
119 class SymbolOperandId
: public OperandId
{
121 SymbolOperandId() = default;
122 explicit SymbolOperandId(uint16_t id
) : OperandId(id
) {}
125 class BigIntOperandId
: public OperandId
{
127 BigIntOperandId() = default;
128 explicit BigIntOperandId(uint16_t id
) : OperandId(id
) {}
131 class BooleanOperandId
: public OperandId
{
133 BooleanOperandId() = default;
134 explicit BooleanOperandId(uint16_t id
) : OperandId(id
) {}
137 class Int32OperandId
: public OperandId
{
139 Int32OperandId() = default;
140 explicit Int32OperandId(uint16_t id
) : OperandId(id
) {}
143 class TypedOperandId
: public OperandId
{
147 MOZ_IMPLICIT
TypedOperandId(ObjOperandId id
)
148 : OperandId(id
.id()), type_(JSVAL_TYPE_OBJECT
) {}
149 MOZ_IMPLICIT
TypedOperandId(StringOperandId id
)
150 : OperandId(id
.id()), type_(JSVAL_TYPE_STRING
) {}
151 MOZ_IMPLICIT
TypedOperandId(SymbolOperandId id
)
152 : OperandId(id
.id()), type_(JSVAL_TYPE_SYMBOL
) {}
153 MOZ_IMPLICIT
TypedOperandId(BigIntOperandId id
)
154 : OperandId(id
.id()), type_(JSVAL_TYPE_BIGINT
) {}
155 MOZ_IMPLICIT
TypedOperandId(BooleanOperandId id
)
156 : OperandId(id
.id()), type_(JSVAL_TYPE_BOOLEAN
) {}
157 MOZ_IMPLICIT
TypedOperandId(Int32OperandId id
)
158 : OperandId(id
.id()), type_(JSVAL_TYPE_INT32
) {}
160 MOZ_IMPLICIT
TypedOperandId(ValueTagOperandId val
)
161 : OperandId(val
.id()), type_(JSVAL_TYPE_UNKNOWN
) {}
162 MOZ_IMPLICIT
TypedOperandId(IntPtrOperandId id
)
163 : OperandId(id
.id()), type_(JSVAL_TYPE_UNKNOWN
) {}
165 TypedOperandId(ValOperandId val
, JSValueType type
)
166 : OperandId(val
.id()), type_(type
) {}
168 JSValueType
type() const { return type_
; }
171 #define CACHE_IR_KINDS(_) \
183 _(CheckPrivateField) \
189 _(OptimizeGetIterator) \
190 _(OptimizeSpreadCall) \
199 enum class CacheKind
: uint8_t {
200 #define DEFINE_KIND(kind) kind,
201 CACHE_IR_KINDS(DEFINE_KIND
)
205 extern const char* const CacheKindNames
[];
207 extern size_t NumInputsForCacheKind(CacheKind kind
);
209 enum class CacheOp
: uint16_t {
210 #define DEFINE_OP(op, ...) op,
211 CACHE_IR_OPS(DEFINE_OP
)
216 // CacheIR opcode info that's read in performance-sensitive code. Stored as a
217 // single byte per op for better cache locality.
218 struct CacheIROpInfo
{
219 uint8_t argLength
: 7;
222 static_assert(sizeof(CacheIROpInfo
) == 1);
223 extern const CacheIROpInfo CacheIROpInfos
[];
225 extern const char* const CacheIROpNames
[];
227 inline const char* CacheIRCodeName(CacheOp op
) {
228 return CacheIROpNames
[static_cast<size_t>(op
)];
231 extern const uint32_t CacheIROpHealth
[];
235 enum class Type
: uint8_t {
236 // These fields take up a single word.
252 // These fields take up 64 bits on all platforms.
254 First64BitType
= RawInt64
,
261 static bool sizeIsWord(Type type
) {
262 MOZ_ASSERT(type
!= Type::Limit
);
263 return type
< Type::First64BitType
;
266 static bool sizeIsInt64(Type type
) {
267 MOZ_ASSERT(type
!= Type::Limit
);
268 return type
>= Type::First64BitType
;
271 static size_t sizeInBytes(Type type
) {
272 if (sizeIsWord(type
)) {
273 return sizeof(uintptr_t);
275 MOZ_ASSERT(sizeIsInt64(type
));
276 return sizeof(int64_t);
284 StubField(uint64_t data
, Type type
) : data_(data
), type_(type
) {
285 MOZ_ASSERT_IF(sizeIsWord(), data
<= UINTPTR_MAX
);
288 Type
type() const { return type_
; }
290 bool sizeIsWord() const { return sizeIsWord(type_
); }
291 bool sizeIsInt64() const { return sizeIsInt64(type_
); }
293 size_t sizeInBytes() const { return sizeInBytes(type_
); }
295 uintptr_t asWord() const {
296 MOZ_ASSERT(sizeIsWord());
297 return uintptr_t(data_
);
299 uint64_t asInt64() const {
300 MOZ_ASSERT(sizeIsInt64());
305 // This class is used to wrap up information about a call to make it
306 // easier to convey from one function to another. (In particular,
307 // CacheIRWriter encodes the CallFlags in CacheIR, and CacheIRReader
308 // decodes them and uses them for compilation.)
311 enum ArgFormat
: uint8_t {
318 FunApplyNullUndefined
,
319 LastArgFormat
= FunApplyNullUndefined
322 CallFlags() = default;
323 explicit CallFlags(ArgFormat format
) : argFormat_(format
) {}
324 CallFlags(ArgFormat format
, bool isConstructing
, bool isSameRealm
,
325 bool needsUninitializedThis
)
326 : argFormat_(format
),
327 isConstructing_(isConstructing
),
328 isSameRealm_(isSameRealm
),
329 needsUninitializedThis_(needsUninitializedThis
) {}
330 CallFlags(bool isConstructing
, bool isSpread
, bool isSameRealm
= false,
331 bool needsUninitializedThis
= false)
332 : argFormat_(isSpread
? Spread
: Standard
),
333 isConstructing_(isConstructing
),
334 isSameRealm_(isSameRealm
),
335 needsUninitializedThis_(needsUninitializedThis
) {}
337 ArgFormat
getArgFormat() const { return argFormat_
; }
338 bool isConstructing() const {
339 MOZ_ASSERT_IF(isConstructing_
,
340 argFormat_
== Standard
|| argFormat_
== Spread
);
341 return isConstructing_
;
343 bool isSameRealm() const { return isSameRealm_
; }
344 void setIsSameRealm() { isSameRealm_
= true; }
346 bool needsUninitializedThis() const { return needsUninitializedThis_
; }
347 void setNeedsUninitializedThis() { needsUninitializedThis_
= true; }
349 uint8_t toByte() const {
350 // See CacheIRReader::callFlags()
351 MOZ_ASSERT(argFormat_
!= ArgFormat::Unknown
);
352 uint8_t value
= getArgFormat();
353 if (isConstructing()) {
354 value
|= CallFlags::IsConstructing
;
357 value
|= CallFlags::IsSameRealm
;
359 if (needsUninitializedThis()) {
360 value
|= CallFlags::NeedsUninitializedThis
;
366 ArgFormat argFormat_
= ArgFormat::Unknown
;
367 bool isConstructing_
= false;
368 bool isSameRealm_
= false;
369 bool needsUninitializedThis_
= false;
371 // Used for encoding/decoding
372 static const uint8_t ArgFormatBits
= 4;
373 static const uint8_t ArgFormatMask
= (1 << ArgFormatBits
) - 1;
374 static_assert(LastArgFormat
<= ArgFormatMask
, "Not enough arg format bits");
375 static const uint8_t IsConstructing
= 1 << 5;
376 static const uint8_t IsSameRealm
= 1 << 6;
377 static const uint8_t NeedsUninitializedThis
= 1 << 7;
379 friend class CacheIRReader
;
380 friend class CacheIRWriter
;
383 // In baseline, we have to copy args onto the stack. Below this threshold, we
384 // will unroll the arg copy loop. We need to clamp this before providing it as
385 // an arg to a CacheIR op so that everything 5 or greater can share an IC.
386 const uint32_t MaxUnrolledArgCopy
= 5;
387 inline uint32_t ClampFixedArgc(uint32_t argc
) {
388 return std::min(argc
, MaxUnrolledArgCopy
);
391 enum class AttachDecision
{
392 // We cannot attach a stub.
395 // We can attach a stub.
398 // We cannot currently attach a stub, but we expect to be able to do so in the
399 // future. In this case, we do not call trackNotAttached().
400 TemporarilyUnoptimizable
,
402 // We want to attach a stub, but the result of the operation is
403 // needed to generate that stub. For example, AddSlot needs to know
404 // the resulting shape. Note: the attached stub will inspect the
405 // inputs to the operation, so most input checks should be done
406 // before the actual operation, with only minimal checks remaining
407 // for the deferred portion. This prevents arbitrary scripted code
408 // run by the operation from interfering with the conditions being
413 // If the input expression evaluates to an AttachDecision other than NoAction,
414 // return that AttachDecision. If it is NoAction, do nothing.
415 #define TRY_ATTACH(expr) \
417 AttachDecision tryAttachTempResult_ = expr; \
418 if (tryAttachTempResult_ != AttachDecision::NoAction) { \
419 return tryAttachTempResult_; \
423 // Set of arguments supported by GetIndexOfArgument.
424 // Support for higher argument indices can be added easily, but is currently
426 enum class ArgumentKind
: uint8_t {
441 const uint8_t ArgumentKindArgIndexLimit
=
442 uint8_t(ArgumentKind::NumKinds
) - uint8_t(ArgumentKind::Arg0
);
444 inline ArgumentKind
ArgumentKindForArgIndex(uint32_t idx
) {
445 MOZ_ASSERT(idx
< ArgumentKindArgIndexLimit
);
446 return ArgumentKind(uint32_t(ArgumentKind::Arg0
) + idx
);
449 // This function calculates the index of an argument based on the call flags.
450 // addArgc is an out-parameter, indicating whether the value of argc should
451 // be added to the return value to find the actual index.
452 inline int32_t GetIndexOfArgument(ArgumentKind kind
, CallFlags flags
,
454 // *** STACK LAYOUT (bottom to top) *** ******** INDEX ********
455 // Callee <-- argc+1 + isConstructing
456 // ThisValue <-- argc + isConstructing
457 // Args: | Arg0 | | ArgArray | <-- argc-1 + isConstructing
458 // | Arg1 | --or-- | | <-- argc-2 + isConstructing
459 // | ... | | (if spread | <-- ...
460 // | ArgN | | call) | <-- 0 + isConstructing
461 // NewTarget (only if constructing) <-- 0 (if it exists)
463 // If this is a spread call, then argc is always 1, and we can calculate the
464 // index directly. If this is not a spread call, then the index of any
465 // argument other than NewTarget depends on argc.
467 // First we determine whether the caller needs to add argc.
468 switch (flags
.getArgFormat()) {
469 case CallFlags::Standard
:
472 case CallFlags::Spread
:
473 // Spread calls do not have Arg1 or higher.
474 MOZ_ASSERT(kind
<= ArgumentKind::Arg0
);
477 case CallFlags::Unknown
:
478 case CallFlags::FunCall
:
479 case CallFlags::FunApplyArgsObj
:
480 case CallFlags::FunApplyArray
:
481 case CallFlags::FunApplyNullUndefined
:
482 MOZ_CRASH("Currently unreachable");
486 // Second, we determine the offset relative to argc.
487 bool hasArgumentArray
= !*addArgc
;
489 case ArgumentKind::Callee
:
490 return flags
.isConstructing() + hasArgumentArray
+ 1;
491 case ArgumentKind::This
:
492 return flags
.isConstructing() + hasArgumentArray
;
493 case ArgumentKind::Arg0
:
494 return flags
.isConstructing() + hasArgumentArray
- 1;
495 case ArgumentKind::Arg1
:
496 return flags
.isConstructing() + hasArgumentArray
- 2;
497 case ArgumentKind::Arg2
:
498 return flags
.isConstructing() + hasArgumentArray
- 3;
499 case ArgumentKind::Arg3
:
500 return flags
.isConstructing() + hasArgumentArray
- 4;
501 case ArgumentKind::Arg4
:
502 return flags
.isConstructing() + hasArgumentArray
- 5;
503 case ArgumentKind::Arg5
:
504 return flags
.isConstructing() + hasArgumentArray
- 6;
505 case ArgumentKind::Arg6
:
506 return flags
.isConstructing() + hasArgumentArray
- 7;
507 case ArgumentKind::Arg7
:
508 return flags
.isConstructing() + hasArgumentArray
- 8;
509 case ArgumentKind::NewTarget
:
510 MOZ_ASSERT(flags
.isConstructing());
514 MOZ_CRASH("Invalid argument kind");
518 // We use this enum as GuardClass operand, instead of storing Class* pointers
519 // in the IR, to keep the IR compact and the same size on all platforms.
520 enum class GuardClassKind
: uint8_t {
523 FixedLengthArrayBuffer
,
524 ResizableArrayBuffer
,
525 FixedLengthSharedArrayBuffer
,
526 GrowableSharedArrayBuffer
,
538 const JSClass
* ClassFor(GuardClassKind kind
);
540 enum class ArrayBufferViewKind
: uint8_t {
548 #endif /* jit_CacheIR_h */