Bug 1867190 - Add prefs for PHC probablities r=glandium
[gecko.git] / js / src / jit / CacheIR.h
blobb483257d12a57d6570a1e970078c9a33ae124882
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef jit_CacheIR_h
8 #define jit_CacheIR_h
10 #include "mozilla/Assertions.h"
11 #include "mozilla/Attributes.h"
13 #include <stddef.h>
14 #include <stdint.h>
16 #include "jstypes.h"
18 #include "jit/CacheIROpsGenerated.h"
19 #include "js/GCAnnotations.h"
20 #include "js/Value.h"
22 struct JS_PUBLIC_API JSContext;
24 namespace js {
25 namespace jit {
27 // [SMDOC] CacheIR
29 // CacheIR is an (extremely simple) linear IR language for inline caches.
30 // From this IR, we can generate machine code for Baseline or Ion IC stubs.
32 // IRWriter
33 // --------
34 // CacheIR bytecode is written using IRWriter. This class also records some
35 // metadata that's used by the Baseline and Ion code generators to generate
36 // (efficient) machine code.
38 // Sharing Baseline stub code
39 // --------------------------
40 // Baseline stores data (like Shape* and fixed slot offsets) inside the ICStub
41 // structure, instead of embedding them directly in the JitCode. This makes
42 // Baseline IC code slightly slower, but allows us to share IC code between
43 // caches. CacheIR makes it easy to share code between stubs: stubs that have
44 // the same CacheIR (and CacheKind), will have the same Baseline stub code.
46 // Baseline stubs that share JitCode also share a CacheIRStubInfo structure.
47 // This class stores the CacheIR and the location of GC things stored in the
48 // stub, for the GC.
50 // JitZone has a CacheIRStubInfo* -> JitCode* weak map that's used to share both
51 // the IR and JitCode between Baseline CacheIR stubs. This HashMap owns the
52 // stubInfo (it uses UniquePtr), so once there are no references left to the
53 // shared stub code, we can also free the CacheIRStubInfo.
55 // Ion stubs
56 // ---------
57 // Unlike Baseline stubs, Ion stubs do not share stub code, and data stored in
58 // the IonICStub is baked into JIT code. This is one of the reasons Ion stubs
59 // are faster than Baseline stubs. Also note that Ion ICs contain more state
60 // (see IonGetPropertyIC for example) and use dynamic input/output registers,
61 // so sharing stub code for Ion would be much more difficult.
63 // An OperandId represents either a cache input or a value returned by a
64 // CacheIR instruction. Most code should use the ValOperandId and ObjOperandId
65 // classes below. The ObjOperandId class represents an operand that's known to
66 // be an object, just as StringOperandId represents a known string, etc.
67 class OperandId {
68 protected:
69 static const uint16_t InvalidId = UINT16_MAX;
70 uint16_t id_;
72 explicit OperandId(uint16_t id) : id_(id) {}
74 public:
75 OperandId() : id_(InvalidId) {}
76 uint16_t id() const { return id_; }
77 bool valid() const { return id_ != InvalidId; }
80 class ValOperandId : public OperandId {
81 public:
82 ValOperandId() = default;
83 explicit ValOperandId(uint16_t id) : OperandId(id) {}
86 class ValueTagOperandId : public OperandId {
87 public:
88 ValueTagOperandId() = default;
89 explicit ValueTagOperandId(uint16_t id) : OperandId(id) {}
92 class IntPtrOperandId : public OperandId {
93 public:
94 IntPtrOperandId() = default;
95 explicit IntPtrOperandId(uint16_t id) : OperandId(id) {}
98 class ObjOperandId : public OperandId {
99 public:
100 ObjOperandId() = default;
101 explicit ObjOperandId(uint16_t id) : OperandId(id) {}
103 bool operator==(const ObjOperandId& other) const { return id_ == other.id_; }
104 bool operator!=(const ObjOperandId& other) const { return id_ != other.id_; }
107 class NumberOperandId : public ValOperandId {
108 public:
109 NumberOperandId() = default;
110 explicit NumberOperandId(uint16_t id) : ValOperandId(id) {}
113 class StringOperandId : public OperandId {
114 public:
115 StringOperandId() = default;
116 explicit StringOperandId(uint16_t id) : OperandId(id) {}
119 class SymbolOperandId : public OperandId {
120 public:
121 SymbolOperandId() = default;
122 explicit SymbolOperandId(uint16_t id) : OperandId(id) {}
125 class BigIntOperandId : public OperandId {
126 public:
127 BigIntOperandId() = default;
128 explicit BigIntOperandId(uint16_t id) : OperandId(id) {}
131 class BooleanOperandId : public OperandId {
132 public:
133 BooleanOperandId() = default;
134 explicit BooleanOperandId(uint16_t id) : OperandId(id) {}
137 class Int32OperandId : public OperandId {
138 public:
139 Int32OperandId() = default;
140 explicit Int32OperandId(uint16_t id) : OperandId(id) {}
143 class TypedOperandId : public OperandId {
144 JSValueType type_;
146 public:
147 MOZ_IMPLICIT TypedOperandId(ObjOperandId id)
148 : OperandId(id.id()), type_(JSVAL_TYPE_OBJECT) {}
149 MOZ_IMPLICIT TypedOperandId(StringOperandId id)
150 : OperandId(id.id()), type_(JSVAL_TYPE_STRING) {}
151 MOZ_IMPLICIT TypedOperandId(SymbolOperandId id)
152 : OperandId(id.id()), type_(JSVAL_TYPE_SYMBOL) {}
153 MOZ_IMPLICIT TypedOperandId(BigIntOperandId id)
154 : OperandId(id.id()), type_(JSVAL_TYPE_BIGINT) {}
155 MOZ_IMPLICIT TypedOperandId(BooleanOperandId id)
156 : OperandId(id.id()), type_(JSVAL_TYPE_BOOLEAN) {}
157 MOZ_IMPLICIT TypedOperandId(Int32OperandId id)
158 : OperandId(id.id()), type_(JSVAL_TYPE_INT32) {}
160 MOZ_IMPLICIT TypedOperandId(ValueTagOperandId val)
161 : OperandId(val.id()), type_(JSVAL_TYPE_UNKNOWN) {}
162 MOZ_IMPLICIT TypedOperandId(IntPtrOperandId id)
163 : OperandId(id.id()), type_(JSVAL_TYPE_UNKNOWN) {}
165 TypedOperandId(ValOperandId val, JSValueType type)
166 : OperandId(val.id()), type_(type) {}
168 JSValueType type() const { return type_; }
171 #define CACHE_IR_KINDS(_) \
172 _(GetProp) \
173 _(GetElem) \
174 _(GetName) \
175 _(GetPropSuper) \
176 _(GetElemSuper) \
177 _(GetIntrinsic) \
178 _(SetProp) \
179 _(SetElem) \
180 _(BindName) \
181 _(In) \
182 _(HasOwn) \
183 _(CheckPrivateField) \
184 _(TypeOf) \
185 _(ToPropertyKey) \
186 _(InstanceOf) \
187 _(GetIterator) \
188 _(CloseIter) \
189 _(OptimizeGetIterator) \
190 _(OptimizeSpreadCall) \
191 _(Compare) \
192 _(ToBool) \
193 _(Call) \
194 _(UnaryArith) \
195 _(BinaryArith) \
196 _(NewObject) \
197 _(NewArray)
199 enum class CacheKind : uint8_t {
200 #define DEFINE_KIND(kind) kind,
201 CACHE_IR_KINDS(DEFINE_KIND)
202 #undef DEFINE_KIND
205 extern const char* const CacheKindNames[];
207 extern size_t NumInputsForCacheKind(CacheKind kind);
209 enum class CacheOp : uint16_t {
210 #define DEFINE_OP(op, ...) op,
211 CACHE_IR_OPS(DEFINE_OP)
212 #undef DEFINE_OP
213 NumOpcodes,
216 // CacheIR opcode info that's read in performance-sensitive code. Stored as a
217 // single byte per op for better cache locality.
218 struct CacheIROpInfo {
219 uint8_t argLength : 7;
220 bool transpile : 1;
222 static_assert(sizeof(CacheIROpInfo) == 1);
223 extern const CacheIROpInfo CacheIROpInfos[];
225 extern const char* const CacheIROpNames[];
227 inline const char* CacheIRCodeName(CacheOp op) {
228 return CacheIROpNames[static_cast<size_t>(op)];
231 extern const uint32_t CacheIROpHealth[];
233 class StubField {
234 public:
235 enum class Type : uint8_t {
236 // These fields take up a single word.
237 RawInt32,
238 RawPointer,
239 Shape,
240 WeakShape,
241 WeakGetterSetter,
242 JSObject,
243 WeakObject,
244 Symbol,
245 String,
246 WeakBaseScript,
247 JitCode,
250 AllocSite,
252 // These fields take up 64 bits on all platforms.
253 RawInt64,
254 First64BitType = RawInt64,
255 Value,
256 Double,
258 Limit
261 static bool sizeIsWord(Type type) {
262 MOZ_ASSERT(type != Type::Limit);
263 return type < Type::First64BitType;
266 static bool sizeIsInt64(Type type) {
267 MOZ_ASSERT(type != Type::Limit);
268 return type >= Type::First64BitType;
271 static size_t sizeInBytes(Type type) {
272 if (sizeIsWord(type)) {
273 return sizeof(uintptr_t);
275 MOZ_ASSERT(sizeIsInt64(type));
276 return sizeof(int64_t);
279 private:
280 uint64_t data_;
281 Type type_;
283 public:
284 StubField(uint64_t data, Type type) : data_(data), type_(type) {
285 MOZ_ASSERT_IF(sizeIsWord(), data <= UINTPTR_MAX);
288 Type type() const { return type_; }
290 bool sizeIsWord() const { return sizeIsWord(type_); }
291 bool sizeIsInt64() const { return sizeIsInt64(type_); }
293 size_t sizeInBytes() const { return sizeInBytes(type_); }
295 uintptr_t asWord() const {
296 MOZ_ASSERT(sizeIsWord());
297 return uintptr_t(data_);
299 uint64_t asInt64() const {
300 MOZ_ASSERT(sizeIsInt64());
301 return data_;
303 } JS_HAZ_GC_POINTER;
305 // This class is used to wrap up information about a call to make it
306 // easier to convey from one function to another. (In particular,
307 // CacheIRWriter encodes the CallFlags in CacheIR, and CacheIRReader
308 // decodes them and uses them for compilation.)
309 class CallFlags {
310 public:
311 enum ArgFormat : uint8_t {
312 Unknown,
313 Standard,
314 Spread,
315 FunCall,
316 FunApplyArgsObj,
317 FunApplyArray,
318 FunApplyNullUndefined,
319 LastArgFormat = FunApplyNullUndefined
322 CallFlags() = default;
323 explicit CallFlags(ArgFormat format) : argFormat_(format) {}
324 CallFlags(bool isConstructing, bool isSpread, bool isSameRealm = false,
325 bool needsUninitializedThis = false)
326 : argFormat_(isSpread ? Spread : Standard),
327 isConstructing_(isConstructing),
328 isSameRealm_(isSameRealm),
329 needsUninitializedThis_(needsUninitializedThis) {}
331 ArgFormat getArgFormat() const { return argFormat_; }
332 bool isConstructing() const {
333 MOZ_ASSERT_IF(isConstructing_,
334 argFormat_ == Standard || argFormat_ == Spread);
335 return isConstructing_;
337 bool isSameRealm() const { return isSameRealm_; }
338 void setIsSameRealm() { isSameRealm_ = true; }
340 bool needsUninitializedThis() const { return needsUninitializedThis_; }
341 void setNeedsUninitializedThis() { needsUninitializedThis_ = true; }
343 uint8_t toByte() const {
344 // See CacheIRReader::callFlags()
345 MOZ_ASSERT(argFormat_ != ArgFormat::Unknown);
346 uint8_t value = getArgFormat();
347 if (isConstructing()) {
348 value |= CallFlags::IsConstructing;
350 if (isSameRealm()) {
351 value |= CallFlags::IsSameRealm;
353 if (needsUninitializedThis()) {
354 value |= CallFlags::NeedsUninitializedThis;
356 return value;
359 private:
360 ArgFormat argFormat_ = ArgFormat::Unknown;
361 bool isConstructing_ = false;
362 bool isSameRealm_ = false;
363 bool needsUninitializedThis_ = false;
365 // Used for encoding/decoding
366 static const uint8_t ArgFormatBits = 4;
367 static const uint8_t ArgFormatMask = (1 << ArgFormatBits) - 1;
368 static_assert(LastArgFormat <= ArgFormatMask, "Not enough arg format bits");
369 static const uint8_t IsConstructing = 1 << 5;
370 static const uint8_t IsSameRealm = 1 << 6;
371 static const uint8_t NeedsUninitializedThis = 1 << 7;
373 friend class CacheIRReader;
374 friend class CacheIRWriter;
377 // In baseline, we have to copy args onto the stack. Below this threshold, we
378 // will unroll the arg copy loop. We need to clamp this before providing it as
379 // an arg to a CacheIR op so that everything 5 or greater can share an IC.
380 const uint32_t MaxUnrolledArgCopy = 5;
381 inline uint32_t ClampFixedArgc(uint32_t argc) {
382 return std::min(argc, MaxUnrolledArgCopy);
385 enum class AttachDecision {
386 // We cannot attach a stub.
387 NoAction,
389 // We can attach a stub.
390 Attach,
392 // We cannot currently attach a stub, but we expect to be able to do so in the
393 // future. In this case, we do not call trackNotAttached().
394 TemporarilyUnoptimizable,
396 // We want to attach a stub, but the result of the operation is
397 // needed to generate that stub. For example, AddSlot needs to know
398 // the resulting shape. Note: the attached stub will inspect the
399 // inputs to the operation, so most input checks should be done
400 // before the actual operation, with only minimal checks remaining
401 // for the deferred portion. This prevents arbitrary scripted code
402 // run by the operation from interfering with the conditions being
403 // checked.
404 Deferred
407 // If the input expression evaluates to an AttachDecision other than NoAction,
408 // return that AttachDecision. If it is NoAction, do nothing.
409 #define TRY_ATTACH(expr) \
410 do { \
411 AttachDecision tryAttachTempResult_ = expr; \
412 if (tryAttachTempResult_ != AttachDecision::NoAction) { \
413 return tryAttachTempResult_; \
415 } while (0)
417 // Set of arguments supported by GetIndexOfArgument.
418 // Support for higher argument indices can be added easily, but is currently
419 // unneeded.
420 enum class ArgumentKind : uint8_t {
421 Callee,
422 This,
423 NewTarget,
424 Arg0,
425 Arg1,
426 Arg2,
427 Arg3,
428 Arg4,
429 Arg5,
430 Arg6,
431 Arg7,
432 NumKinds
435 const uint8_t ArgumentKindArgIndexLimit =
436 uint8_t(ArgumentKind::NumKinds) - uint8_t(ArgumentKind::Arg0);
438 inline ArgumentKind ArgumentKindForArgIndex(uint32_t idx) {
439 MOZ_ASSERT(idx < ArgumentKindArgIndexLimit);
440 return ArgumentKind(uint32_t(ArgumentKind::Arg0) + idx);
443 // This function calculates the index of an argument based on the call flags.
444 // addArgc is an out-parameter, indicating whether the value of argc should
445 // be added to the return value to find the actual index.
446 inline int32_t GetIndexOfArgument(ArgumentKind kind, CallFlags flags,
447 bool* addArgc) {
448 // *** STACK LAYOUT (bottom to top) *** ******** INDEX ********
449 // Callee <-- argc+1 + isConstructing
450 // ThisValue <-- argc + isConstructing
451 // Args: | Arg0 | | ArgArray | <-- argc-1 + isConstructing
452 // | Arg1 | --or-- | | <-- argc-2 + isConstructing
453 // | ... | | (if spread | <-- ...
454 // | ArgN | | call) | <-- 0 + isConstructing
455 // NewTarget (only if constructing) <-- 0 (if it exists)
457 // If this is a spread call, then argc is always 1, and we can calculate the
458 // index directly. If this is not a spread call, then the index of any
459 // argument other than NewTarget depends on argc.
461 // First we determine whether the caller needs to add argc.
462 switch (flags.getArgFormat()) {
463 case CallFlags::Standard:
464 *addArgc = true;
465 break;
466 case CallFlags::Spread:
467 // Spread calls do not have Arg1 or higher.
468 MOZ_ASSERT(kind <= ArgumentKind::Arg0);
469 *addArgc = false;
470 break;
471 case CallFlags::Unknown:
472 case CallFlags::FunCall:
473 case CallFlags::FunApplyArgsObj:
474 case CallFlags::FunApplyArray:
475 case CallFlags::FunApplyNullUndefined:
476 MOZ_CRASH("Currently unreachable");
477 break;
480 // Second, we determine the offset relative to argc.
481 bool hasArgumentArray = !*addArgc;
482 switch (kind) {
483 case ArgumentKind::Callee:
484 return flags.isConstructing() + hasArgumentArray + 1;
485 case ArgumentKind::This:
486 return flags.isConstructing() + hasArgumentArray;
487 case ArgumentKind::Arg0:
488 return flags.isConstructing() + hasArgumentArray - 1;
489 case ArgumentKind::Arg1:
490 return flags.isConstructing() + hasArgumentArray - 2;
491 case ArgumentKind::Arg2:
492 return flags.isConstructing() + hasArgumentArray - 3;
493 case ArgumentKind::Arg3:
494 return flags.isConstructing() + hasArgumentArray - 4;
495 case ArgumentKind::Arg4:
496 return flags.isConstructing() + hasArgumentArray - 5;
497 case ArgumentKind::Arg5:
498 return flags.isConstructing() + hasArgumentArray - 6;
499 case ArgumentKind::Arg6:
500 return flags.isConstructing() + hasArgumentArray - 7;
501 case ArgumentKind::Arg7:
502 return flags.isConstructing() + hasArgumentArray - 8;
503 case ArgumentKind::NewTarget:
504 MOZ_ASSERT(flags.isConstructing());
505 *addArgc = false;
506 return 0;
507 default:
508 MOZ_CRASH("Invalid argument kind");
512 // We use this enum as GuardClass operand, instead of storing Class* pointers
513 // in the IR, to keep the IR compact and the same size on all platforms.
514 enum class GuardClassKind : uint8_t {
515 Array,
516 PlainObject,
517 FixedLengthArrayBuffer,
518 FixedLengthSharedArrayBuffer,
519 FixedLengthDataView,
520 MappedArguments,
521 UnmappedArguments,
522 WindowProxy,
523 JSFunction,
524 BoundFunction,
525 Set,
526 Map,
529 } // namespace jit
530 } // namespace js
532 #endif /* jit_CacheIR_h */