Bug 1890513: Directly invoke variadic native functions. r=jandem
[gecko.git] / js / src / jit / LIR.h
blob30a35b0deadbf55d45f5cedb3d34c1676d2142bb
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef jit_LIR_h
8 #define jit_LIR_h
10 // This file declares the core data structures for LIR: storage allocations for
11 // inputs and outputs, as well as the interface instructions must conform to.
13 #include "mozilla/Array.h"
14 #include "mozilla/Casting.h"
16 #include "jit/Bailouts.h"
17 #include "jit/FixedList.h"
18 #include "jit/InlineList.h"
19 #include "jit/JitAllocPolicy.h"
20 #include "jit/LIROpsGenerated.h"
21 #include "jit/MIR.h"
22 #include "jit/MIRGraph.h"
23 #include "jit/Registers.h"
24 #include "jit/Safepoints.h"
25 #include "util/Memory.h"
27 namespace js {
28 namespace jit {
30 class LUse;
31 class LGeneralReg;
32 class LFloatReg;
33 class LStackSlot;
34 class LStackArea;
35 class LArgument;
36 class LConstantIndex;
37 class LInstruction;
38 class LDefinition;
39 class MBasicBlock;
40 class MIRGenerator;
42 static const uint32_t VREG_INCREMENT = 1;
44 static const uint32_t THIS_FRAME_ARGSLOT = 0;
46 #if defined(JS_NUNBOX32)
47 # define BOX_PIECES 2
48 static const uint32_t VREG_TYPE_OFFSET = 0;
49 static const uint32_t VREG_DATA_OFFSET = 1;
50 static const uint32_t TYPE_INDEX = 0;
51 static const uint32_t PAYLOAD_INDEX = 1;
52 static const uint32_t INT64LOW_INDEX = 0;
53 static const uint32_t INT64HIGH_INDEX = 1;
54 #elif defined(JS_PUNBOX64)
55 # define BOX_PIECES 1
56 #else
57 # error "Unknown!"
58 #endif
60 static const uint32_t INT64_PIECES = sizeof(int64_t) / sizeof(uintptr_t);
62 // Represents storage for an operand. For constants, the pointer is tagged
63 // with a single bit, and the untagged pointer is a pointer to a Value.
64 class LAllocation {
65 uintptr_t bits_;
67 // 3 bits gives us enough for an interesting set of Kinds and also fits
68 // within the alignment bits of pointers to Value, which are always
69 // 8-byte aligned.
70 static const uintptr_t KIND_BITS = 3;
71 static const uintptr_t KIND_SHIFT = 0;
72 static const uintptr_t KIND_MASK = (1 << KIND_BITS) - 1;
74 protected:
75 #ifdef JS_64BIT
76 static const uintptr_t DATA_BITS = sizeof(uint32_t) * 8;
77 #else
78 static const uintptr_t DATA_BITS = (sizeof(uint32_t) * 8) - KIND_BITS;
79 #endif
80 static const uintptr_t DATA_SHIFT = KIND_SHIFT + KIND_BITS;
82 public:
83 enum Kind {
84 CONSTANT_VALUE, // MConstant*.
85 CONSTANT_INDEX, // Constant arbitrary index.
86 USE, // Use of a virtual register, with physical allocation policy.
87 GPR, // General purpose register.
88 FPU, // Floating-point register.
89 STACK_SLOT, // Stack slot.
90 STACK_AREA, // Stack area.
91 ARGUMENT_SLOT // Argument slot.
94 static const uintptr_t DATA_MASK = (uintptr_t(1) << DATA_BITS) - 1;
96 protected:
97 uint32_t data() const {
98 MOZ_ASSERT(!hasIns());
99 return mozilla::AssertedCast<uint32_t>(bits_ >> DATA_SHIFT);
101 void setData(uintptr_t data) {
102 MOZ_ASSERT(!hasIns());
103 MOZ_ASSERT(data <= DATA_MASK);
104 bits_ &= ~(DATA_MASK << DATA_SHIFT);
105 bits_ |= (data << DATA_SHIFT);
107 void setKindAndData(Kind kind, uintptr_t data) {
108 MOZ_ASSERT(data <= DATA_MASK);
109 bits_ = (uintptr_t(kind) << KIND_SHIFT) | data << DATA_SHIFT;
110 MOZ_ASSERT(!hasIns());
113 bool hasIns() const { return isStackArea(); }
114 const LInstruction* ins() const {
115 MOZ_ASSERT(hasIns());
116 return reinterpret_cast<const LInstruction*>(bits_ &
117 ~(KIND_MASK << KIND_SHIFT));
119 LInstruction* ins() {
120 MOZ_ASSERT(hasIns());
121 return reinterpret_cast<LInstruction*>(bits_ & ~(KIND_MASK << KIND_SHIFT));
123 void setKindAndIns(Kind kind, LInstruction* ins) {
124 uintptr_t data = reinterpret_cast<uintptr_t>(ins);
125 MOZ_ASSERT((data & (KIND_MASK << KIND_SHIFT)) == 0);
126 bits_ = data | (uintptr_t(kind) << KIND_SHIFT);
127 MOZ_ASSERT(hasIns());
130 LAllocation(Kind kind, uintptr_t data) { setKindAndData(kind, data); }
131 LAllocation(Kind kind, LInstruction* ins) { setKindAndIns(kind, ins); }
132 explicit LAllocation(Kind kind) { setKindAndData(kind, 0); }
134 public:
135 LAllocation() : bits_(0) { MOZ_ASSERT(isBogus()); }
137 // The MConstant pointer must have its low bits cleared.
138 explicit LAllocation(const MConstant* c) {
139 MOZ_ASSERT(c);
140 bits_ = uintptr_t(c);
141 MOZ_ASSERT((bits_ & (KIND_MASK << KIND_SHIFT)) == 0);
142 bits_ |= CONSTANT_VALUE << KIND_SHIFT;
144 inline explicit LAllocation(AnyRegister reg);
146 Kind kind() const { return (Kind)((bits_ >> KIND_SHIFT) & KIND_MASK); }
148 bool isBogus() const { return bits_ == 0; }
149 bool isUse() const { return kind() == USE; }
150 bool isConstant() const { return isConstantValue() || isConstantIndex(); }
151 bool isConstantValue() const { return kind() == CONSTANT_VALUE; }
152 bool isConstantIndex() const { return kind() == CONSTANT_INDEX; }
153 bool isGeneralReg() const { return kind() == GPR; }
154 bool isFloatReg() const { return kind() == FPU; }
155 bool isStackSlot() const { return kind() == STACK_SLOT; }
156 bool isStackArea() const { return kind() == STACK_AREA; }
157 bool isArgument() const { return kind() == ARGUMENT_SLOT; }
158 bool isRegister() const { return isGeneralReg() || isFloatReg(); }
159 bool isRegister(bool needFloat) const {
160 return needFloat ? isFloatReg() : isGeneralReg();
162 bool isMemory() const { return isStackSlot() || isArgument(); }
163 inline uint32_t memorySlot() const;
164 inline LUse* toUse();
165 inline const LUse* toUse() const;
166 inline const LGeneralReg* toGeneralReg() const;
167 inline const LFloatReg* toFloatReg() const;
168 inline const LStackSlot* toStackSlot() const;
169 inline LStackArea* toStackArea();
170 inline const LStackArea* toStackArea() const;
171 inline const LArgument* toArgument() const;
172 inline const LConstantIndex* toConstantIndex() const;
173 inline AnyRegister toRegister() const;
175 const MConstant* toConstant() const {
176 MOZ_ASSERT(isConstantValue());
177 return reinterpret_cast<const MConstant*>(bits_ &
178 ~(KIND_MASK << KIND_SHIFT));
181 bool operator==(const LAllocation& other) const {
182 return bits_ == other.bits_;
185 bool operator!=(const LAllocation& other) const {
186 return bits_ != other.bits_;
189 HashNumber hash() const { return bits_; }
191 bool aliases(const LAllocation& other) const;
193 #ifdef JS_JITSPEW
194 UniqueChars toString() const;
195 void dump() const;
196 #endif
199 class LUse : public LAllocation {
200 static const uint32_t POLICY_BITS = 3;
201 static const uint32_t POLICY_SHIFT = 0;
202 static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1;
203 #ifdef JS_CODEGEN_ARM64
204 static const uint32_t REG_BITS = 7;
205 #else
206 static const uint32_t REG_BITS = 6;
207 #endif
208 static const uint32_t REG_SHIFT = POLICY_SHIFT + POLICY_BITS;
209 static const uint32_t REG_MASK = (1 << REG_BITS) - 1;
211 // Whether the physical register for this operand may be reused for a def.
212 static const uint32_t USED_AT_START_BITS = 1;
213 static const uint32_t USED_AT_START_SHIFT = REG_SHIFT + REG_BITS;
214 static const uint32_t USED_AT_START_MASK = (1 << USED_AT_START_BITS) - 1;
216 // The REG field will hold the register code for any Register or
217 // FloatRegister, though not for an AnyRegister.
218 static_assert(std::max(Registers::Total, FloatRegisters::Total) <=
219 REG_MASK + 1,
220 "The field must be able to represent any register code");
222 public:
223 // Virtual registers get the remaining bits.
224 static const uint32_t VREG_BITS =
225 DATA_BITS - (USED_AT_START_SHIFT + USED_AT_START_BITS);
226 static const uint32_t VREG_SHIFT = USED_AT_START_SHIFT + USED_AT_START_BITS;
227 static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1;
229 enum Policy {
230 // Input should be in a read-only register or stack slot.
231 ANY,
233 // Input must be in a read-only register.
234 REGISTER,
236 // Input must be in a specific, read-only register.
237 FIXED,
239 // Keep the used virtual register alive, and use whatever allocation is
240 // available. This is similar to ANY but hints to the register allocator
241 // that it is never useful to optimize this site.
242 KEEPALIVE,
244 // Input must be allocated on the stack. Only used when extracting stack
245 // results from stack result areas.
246 STACK,
248 // For snapshot inputs, indicates that the associated instruction will
249 // write this input to its output register before bailing out.
250 // The register allocator may thus allocate that output register, and
251 // does not need to keep the virtual register alive (alternatively,
252 // this may be treated as KEEPALIVE).
253 RECOVERED_INPUT
256 void set(Policy policy, uint32_t reg, bool usedAtStart) {
257 MOZ_ASSERT(reg <= REG_MASK, "Register code must fit in field");
258 setKindAndData(USE, (policy << POLICY_SHIFT) | (reg << REG_SHIFT) |
259 ((usedAtStart ? 1 : 0) << USED_AT_START_SHIFT));
262 public:
263 LUse(uint32_t vreg, Policy policy, bool usedAtStart = false) {
264 set(policy, 0, usedAtStart);
265 setVirtualRegister(vreg);
267 explicit LUse(Policy policy, bool usedAtStart = false) {
268 set(policy, 0, usedAtStart);
270 explicit LUse(Register reg, bool usedAtStart = false) {
271 set(FIXED, reg.code(), usedAtStart);
273 explicit LUse(FloatRegister reg, bool usedAtStart = false) {
274 set(FIXED, reg.code(), usedAtStart);
276 LUse(Register reg, uint32_t virtualRegister, bool usedAtStart = false) {
277 set(FIXED, reg.code(), usedAtStart);
278 setVirtualRegister(virtualRegister);
280 LUse(FloatRegister reg, uint32_t virtualRegister, bool usedAtStart = false) {
281 set(FIXED, reg.code(), usedAtStart);
282 setVirtualRegister(virtualRegister);
285 void setVirtualRegister(uint32_t index) {
286 MOZ_ASSERT(index < VREG_MASK);
288 uint32_t old = data() & ~(VREG_MASK << VREG_SHIFT);
289 setData(old | (index << VREG_SHIFT));
292 Policy policy() const {
293 Policy policy = (Policy)((data() >> POLICY_SHIFT) & POLICY_MASK);
294 return policy;
296 uint32_t virtualRegister() const {
297 uint32_t index = (data() >> VREG_SHIFT) & VREG_MASK;
298 MOZ_ASSERT(index != 0);
299 return index;
301 uint32_t registerCode() const {
302 MOZ_ASSERT(policy() == FIXED);
303 return (data() >> REG_SHIFT) & REG_MASK;
305 bool isFixedRegister() const { return policy() == FIXED; }
306 bool usedAtStart() const {
307 return !!((data() >> USED_AT_START_SHIFT) & USED_AT_START_MASK);
311 static const uint32_t MAX_VIRTUAL_REGISTERS = LUse::VREG_MASK;
313 class LBoxAllocation {
314 #ifdef JS_NUNBOX32
315 LAllocation type_;
316 LAllocation payload_;
317 #else
318 LAllocation value_;
319 #endif
321 public:
322 #ifdef JS_NUNBOX32
323 LBoxAllocation(LAllocation type, LAllocation payload)
324 : type_(type), payload_(payload) {}
326 LAllocation type() const { return type_; }
327 LAllocation payload() const { return payload_; }
328 #else
329 explicit LBoxAllocation(LAllocation value) : value_(value) {}
331 LAllocation value() const { return value_; }
332 #endif
335 template <class ValT>
336 class LInt64Value {
337 #if JS_BITS_PER_WORD == 32
338 ValT high_;
339 ValT low_;
340 #else
341 ValT value_;
342 #endif
344 public:
345 LInt64Value() = default;
347 #if JS_BITS_PER_WORD == 32
348 LInt64Value(ValT high, ValT low) : high_(high), low_(low) {}
350 ValT high() const { return high_; }
351 ValT low() const { return low_; }
353 const ValT* pointerHigh() const { return &high_; }
354 const ValT* pointerLow() const { return &low_; }
355 #else
356 explicit LInt64Value(ValT value) : value_(value) {}
358 ValT value() const { return value_; }
359 const ValT* pointer() const { return &value_; }
360 #endif
363 using LInt64Allocation = LInt64Value<LAllocation>;
365 class LGeneralReg : public LAllocation {
366 public:
367 explicit LGeneralReg(Register reg) : LAllocation(GPR, reg.code()) {}
369 Register reg() const { return Register::FromCode(data()); }
372 class LFloatReg : public LAllocation {
373 public:
374 explicit LFloatReg(FloatRegister reg) : LAllocation(FPU, reg.code()) {}
376 FloatRegister reg() const { return FloatRegister::FromCode(data()); }
379 // Arbitrary constant index.
380 class LConstantIndex : public LAllocation {
381 explicit LConstantIndex(uint32_t index)
382 : LAllocation(CONSTANT_INDEX, index) {}
384 public:
385 static LConstantIndex FromIndex(uint32_t index) {
386 return LConstantIndex(index);
389 uint32_t index() const { return data(); }
392 // Stack slots are indices into the stack. The indices are byte indices.
393 class LStackSlot : public LAllocation {
394 public:
395 explicit LStackSlot(uint32_t slot) : LAllocation(STACK_SLOT, slot) {}
397 uint32_t slot() const { return data(); }
400 // Stack area indicates a contiguous stack allocation meant to receive call
401 // results that don't fit in registers.
402 class LStackArea : public LAllocation {
403 public:
404 explicit LStackArea(LInstruction* stackArea)
405 : LAllocation(STACK_AREA, stackArea) {}
407 // Byte index of base of stack area, in the same coordinate space as
408 // LStackSlot::slot().
409 inline uint32_t base() const;
410 inline void setBase(uint32_t base);
412 // Size in bytes of the stack area.
413 inline uint32_t size() const;
414 inline uint32_t alignment() const { return 8; }
416 class ResultIterator {
417 const LStackArea& alloc_;
418 uint32_t idx_;
420 public:
421 explicit ResultIterator(const LStackArea& alloc) : alloc_(alloc), idx_(0) {}
423 inline bool done() const;
424 inline void next();
425 inline LAllocation alloc() const;
426 inline bool isWasmAnyRef() const;
428 explicit operator bool() const { return !done(); }
431 ResultIterator results() const { return ResultIterator(*this); }
433 inline LStackSlot resultAlloc(LInstruction* lir, LDefinition* def) const;
436 // Arguments are reverse indices into the stack. The indices are byte indices.
437 class LArgument : public LAllocation {
438 public:
439 explicit LArgument(uint32_t index) : LAllocation(ARGUMENT_SLOT, index) {}
441 uint32_t index() const { return data(); }
444 inline uint32_t LAllocation::memorySlot() const {
445 MOZ_ASSERT(isMemory());
446 return isStackSlot() ? toStackSlot()->slot() : toArgument()->index();
449 // Represents storage for a definition.
450 class LDefinition {
451 // Bits containing policy, type, and virtual register.
452 uint32_t bits_;
454 // Before register allocation, this optionally contains a fixed policy.
455 // Register allocation assigns this field to a physical policy if none is
456 // fixed.
458 // Right now, pre-allocated outputs are limited to the following:
459 // * Physical argument stack slots.
460 // * Physical registers.
461 LAllocation output_;
463 static const uint32_t TYPE_BITS = 4;
464 static const uint32_t TYPE_SHIFT = 0;
465 static const uint32_t TYPE_MASK = (1 << TYPE_BITS) - 1;
466 static const uint32_t POLICY_BITS = 2;
467 static const uint32_t POLICY_SHIFT = TYPE_SHIFT + TYPE_BITS;
468 static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1;
470 static const uint32_t VREG_BITS =
471 (sizeof(uint32_t) * 8) - (POLICY_BITS + TYPE_BITS);
472 static const uint32_t VREG_SHIFT = POLICY_SHIFT + POLICY_BITS;
473 static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1;
475 public:
476 // Note that definitions, by default, are always allocated a register,
477 // unless the policy specifies that an input can be re-used and that input
478 // is a stack slot.
479 enum Policy {
480 // The policy is predetermined by the LAllocation attached to this
481 // definition. The allocation may be:
482 // * A register, which may not appear as any fixed temporary.
483 // * A stack slot or argument.
485 // Register allocation will not modify a fixed allocation.
486 FIXED,
488 // A random register of an appropriate class will be assigned.
489 REGISTER,
491 // An area on the stack must be assigned. Used when defining stack results
492 // and stack result areas.
493 STACK,
495 // One definition per instruction must re-use the first input
496 // allocation, which (for now) must be a register.
497 MUST_REUSE_INPUT
500 enum Type {
501 GENERAL, // Generic, integer or pointer-width data (GPR).
502 INT32, // int32 data (GPR).
503 OBJECT, // Pointer that may be collected as garbage (GPR).
504 SLOTS, // Slots/elements/wasm array data pointer that may be moved by minor
505 // GCs (GPR).
506 WASM_ANYREF, // Tagged pointer that may be collected as garbage (GPR).
507 FLOAT32, // 32-bit floating-point value (FPU).
508 DOUBLE, // 64-bit floating-point value (FPU).
509 SIMD128, // 128-bit SIMD vector (FPU).
510 STACKRESULTS, // A variable-size stack allocation that may contain objects.
511 #ifdef JS_NUNBOX32
512 // A type virtual register must be followed by a payload virtual
513 // register, as both will be tracked as a single gcthing.
514 TYPE,
515 PAYLOAD
516 #else
517 BOX // Joined box, for punbox systems. (GPR, gcthing)
518 #endif
521 void set(uint32_t index, Type type, Policy policy) {
522 static_assert(MAX_VIRTUAL_REGISTERS <= VREG_MASK);
523 bits_ =
524 (index << VREG_SHIFT) | (policy << POLICY_SHIFT) | (type << TYPE_SHIFT);
525 #ifndef ENABLE_WASM_SIMD
526 MOZ_ASSERT(this->type() != SIMD128);
527 #endif
530 public:
531 LDefinition(uint32_t index, Type type, Policy policy = REGISTER) {
532 set(index, type, policy);
535 explicit LDefinition(Type type, Policy policy = REGISTER) {
536 set(0, type, policy);
539 LDefinition(Type type, const LAllocation& a) : output_(a) {
540 set(0, type, FIXED);
543 LDefinition(uint32_t index, Type type, const LAllocation& a) : output_(a) {
544 set(index, type, FIXED);
547 LDefinition() : bits_(0) { MOZ_ASSERT(isBogusTemp()); }
549 static LDefinition BogusTemp() { return LDefinition(); }
551 Policy policy() const {
552 return (Policy)((bits_ >> POLICY_SHIFT) & POLICY_MASK);
554 Type type() const { return (Type)((bits_ >> TYPE_SHIFT) & TYPE_MASK); }
556 static bool isFloatRegCompatible(Type type, FloatRegister reg) {
557 #ifdef JS_CODEGEN_RISCV64
558 if (type == FLOAT32 || type == DOUBLE) {
559 return reg.isSingle() || reg.isDouble();
561 #else
562 if (type == FLOAT32) {
563 return reg.isSingle();
565 if (type == DOUBLE) {
566 return reg.isDouble();
568 #endif
569 MOZ_ASSERT(type == SIMD128);
570 return reg.isSimd128();
573 bool isCompatibleReg(const AnyRegister& r) const {
574 if (isFloatReg() && r.isFloat()) {
575 return isFloatRegCompatible(type(), r.fpu());
577 return !isFloatReg() && !r.isFloat();
579 bool isCompatibleDef(const LDefinition& other) const {
580 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
581 if (isFloatReg() && other.isFloatReg()) {
582 return type() == other.type();
584 return !isFloatReg() && !other.isFloatReg();
585 #else
586 return isFloatReg() == other.isFloatReg();
587 #endif
590 static bool isFloatReg(Type type) {
591 return type == FLOAT32 || type == DOUBLE || type == SIMD128;
593 bool isFloatReg() const { return isFloatReg(type()); }
595 uint32_t virtualRegister() const {
596 uint32_t index = (bits_ >> VREG_SHIFT) & VREG_MASK;
597 // MOZ_ASSERT(index != 0);
598 return index;
600 LAllocation* output() { return &output_; }
601 const LAllocation* output() const { return &output_; }
602 bool isFixed() const { return policy() == FIXED; }
603 bool isBogusTemp() const { return isFixed() && output()->isBogus(); }
604 void setVirtualRegister(uint32_t index) {
605 MOZ_ASSERT(index < VREG_MASK);
606 bits_ &= ~(VREG_MASK << VREG_SHIFT);
607 bits_ |= index << VREG_SHIFT;
609 void setOutput(const LAllocation& a) {
610 output_ = a;
611 if (!a.isUse()) {
612 bits_ &= ~(POLICY_MASK << POLICY_SHIFT);
613 bits_ |= FIXED << POLICY_SHIFT;
616 void setReusedInput(uint32_t operand) {
617 output_ = LConstantIndex::FromIndex(operand);
619 uint32_t getReusedInput() const {
620 MOZ_ASSERT(policy() == LDefinition::MUST_REUSE_INPUT);
621 return output_.toConstantIndex()->index();
624 static inline Type TypeFrom(MIRType type) {
625 switch (type) {
626 case MIRType::Boolean:
627 case MIRType::Int32:
628 // The stack slot allocator doesn't currently support allocating
629 // 1-byte slots, so for now we lower MIRType::Boolean into INT32.
630 static_assert(sizeof(bool) <= sizeof(int32_t),
631 "bool doesn't fit in an int32 slot");
632 return LDefinition::INT32;
633 case MIRType::String:
634 case MIRType::Symbol:
635 case MIRType::BigInt:
636 case MIRType::Object:
637 return LDefinition::OBJECT;
638 case MIRType::Double:
639 return LDefinition::DOUBLE;
640 case MIRType::Float32:
641 return LDefinition::FLOAT32;
642 #if defined(JS_PUNBOX64)
643 case MIRType::Value:
644 return LDefinition::BOX;
645 #endif
646 case MIRType::Slots:
647 case MIRType::Elements:
648 case MIRType::WasmArrayData:
649 return LDefinition::SLOTS;
650 case MIRType::WasmAnyRef:
651 return LDefinition::WASM_ANYREF;
652 case MIRType::Pointer:
653 case MIRType::IntPtr:
654 return LDefinition::GENERAL;
655 #if defined(JS_PUNBOX64)
656 case MIRType::Int64:
657 return LDefinition::GENERAL;
658 #endif
659 case MIRType::StackResults:
660 return LDefinition::STACKRESULTS;
661 case MIRType::Simd128:
662 return LDefinition::SIMD128;
663 default:
664 MOZ_CRASH("unexpected type");
668 UniqueChars toString() const;
670 #ifdef JS_JITSPEW
671 void dump() const;
672 #endif
675 class LInt64Definition : public LInt64Value<LDefinition> {
676 public:
677 using LInt64Value<LDefinition>::LInt64Value;
679 static LInt64Definition BogusTemp() { return LInt64Definition(); }
681 bool isBogusTemp() const {
682 #if JS_BITS_PER_WORD == 32
683 MOZ_ASSERT(high().isBogusTemp() == low().isBogusTemp());
684 return high().isBogusTemp();
685 #else
686 return value().isBogusTemp();
687 #endif
691 // Forward declarations of LIR types.
692 #define LIROP(op) class L##op;
693 LIR_OPCODE_LIST(LIROP)
694 #undef LIROP
696 class LSnapshot;
697 class LSafepoint;
698 class LElementVisitor;
700 constexpr size_t MaxNumLInstructionOperands = 63;
702 // The common base class for LPhi and LInstruction.
703 class LNode {
704 protected:
705 MDefinition* mir_;
707 private:
708 LBlock* block_;
709 uint32_t id_;
711 protected:
712 // Bitfields below are all uint32_t to make sure MSVC packs them correctly.
713 uint32_t op_ : 10;
714 uint32_t isCall_ : 1;
716 // LPhi::numOperands() may not fit in this bitfield, so we only use this
717 // field for LInstruction.
718 uint32_t nonPhiNumOperands_ : 6;
719 static_assert((1 << 6) - 1 == MaxNumLInstructionOperands,
720 "packing constraints");
722 // For LInstruction, the first operand is stored at offset
723 // sizeof(LInstruction) + nonPhiOperandsOffset_ * sizeof(uintptr_t).
724 uint32_t nonPhiOperandsOffset_ : 5;
725 uint32_t numDefs_ : 4;
726 uint32_t numTemps_ : 4;
728 public:
729 enum class Opcode {
730 #define LIROP(name) name,
731 LIR_OPCODE_LIST(LIROP)
732 #undef LIROP
733 Invalid
736 LNode(Opcode op, uint32_t nonPhiNumOperands, uint32_t numDefs,
737 uint32_t numTemps)
738 : mir_(nullptr),
739 block_(nullptr),
740 id_(0),
741 op_(uint32_t(op)),
742 isCall_(false),
743 nonPhiNumOperands_(nonPhiNumOperands),
744 nonPhiOperandsOffset_(0),
745 numDefs_(numDefs),
746 numTemps_(numTemps) {
747 MOZ_ASSERT(op < Opcode::Invalid);
748 MOZ_ASSERT(op_ == uint32_t(op), "opcode must fit in bitfield");
749 MOZ_ASSERT(nonPhiNumOperands_ == nonPhiNumOperands,
750 "nonPhiNumOperands must fit in bitfield");
751 MOZ_ASSERT(numDefs_ == numDefs, "numDefs must fit in bitfield");
752 MOZ_ASSERT(numTemps_ == numTemps, "numTemps must fit in bitfield");
755 const char* opName() {
756 switch (op()) {
757 #define LIR_NAME_INS(name) \
758 case Opcode::name: \
759 return #name;
760 LIR_OPCODE_LIST(LIR_NAME_INS)
761 #undef LIR_NAME_INS
762 default:
763 MOZ_CRASH("Invalid op");
767 // Hook for opcodes to add extra high level detail about what code will be
768 // emitted for the op.
769 private:
770 const char* extraName() const { return nullptr; }
772 public:
773 #ifdef JS_JITSPEW
774 const char* getExtraName() const;
775 #endif
777 Opcode op() const { return Opcode(op_); }
779 bool isInstruction() const { return op() != Opcode::Phi; }
780 inline LInstruction* toInstruction();
781 inline const LInstruction* toInstruction() const;
783 // Returns the number of outputs of this instruction. If an output is
784 // unallocated, it is an LDefinition, defining a virtual register.
785 size_t numDefs() const { return numDefs_; }
787 bool isCall() const { return isCall_; }
789 // Does this call preserve the given register?
790 // By default, it is assumed that all registers are clobbered by a call.
791 inline bool isCallPreserved(AnyRegister reg) const;
793 uint32_t id() const { return id_; }
794 void setId(uint32_t id) {
795 MOZ_ASSERT(!id_);
796 MOZ_ASSERT(id);
797 id_ = id;
799 void setMir(MDefinition* mir) { mir_ = mir; }
800 MDefinition* mirRaw() const {
801 /* Untyped MIR for this op. Prefer mir() methods in subclasses. */
802 return mir_;
804 LBlock* block() const { return block_; }
805 void setBlock(LBlock* block) { block_ = block; }
807 // For an instruction which has a MUST_REUSE_INPUT output, whether that
808 // output register will be restored to its original value when bailing out.
809 inline bool recoversInput() const;
811 #ifdef JS_JITSPEW
812 void dump(GenericPrinter& out);
813 void dump();
814 static void printName(GenericPrinter& out, Opcode op);
815 void printName(GenericPrinter& out);
816 void printOperands(GenericPrinter& out);
817 #endif
819 public:
820 // Opcode testing and casts.
821 #define LIROP(name) \
822 bool is##name() const { return op() == Opcode::name; } \
823 inline L##name* to##name(); \
824 inline const L##name* to##name() const;
825 LIR_OPCODE_LIST(LIROP)
826 #undef LIROP
828 // Note: GenerateOpcodeFiles.py generates LIROpsGenerated.h based on this
829 // macro.
830 #define LIR_HEADER(opcode) \
831 static constexpr LNode::Opcode classOpcode = LNode::Opcode::opcode;
834 extern const char* const LIROpNames[];
835 inline const char* LIRCodeName(LNode::Opcode op) {
836 return LIROpNames[static_cast<size_t>(op)];
839 class LInstruction : public LNode,
840 public TempObject,
841 public InlineListNode<LInstruction> {
842 // This snapshot could be set after a ResumePoint. It is used to restart
843 // from the resume point pc.
844 LSnapshot* snapshot_;
846 // Structure capturing the set of stack slots and registers which are known
847 // to hold either gcthings or Values.
848 LSafepoint* safepoint_;
850 LMoveGroup* inputMoves_;
851 LMoveGroup* fixReuseMoves_;
852 LMoveGroup* movesAfter_;
854 protected:
855 LInstruction(Opcode opcode, uint32_t numOperands, uint32_t numDefs,
856 uint32_t numTemps)
857 : LNode(opcode, numOperands, numDefs, numTemps),
858 snapshot_(nullptr),
859 safepoint_(nullptr),
860 inputMoves_(nullptr),
861 fixReuseMoves_(nullptr),
862 movesAfter_(nullptr) {}
864 void setIsCall() { isCall_ = true; }
866 public:
867 inline LDefinition* getDef(size_t index);
869 void setDef(size_t index, const LDefinition& def) { *getDef(index) = def; }
871 LAllocation* getOperand(size_t index) const {
872 MOZ_ASSERT(index < numOperands());
873 MOZ_ASSERT(nonPhiOperandsOffset_ > 0);
874 uintptr_t p = reinterpret_cast<uintptr_t>(this + 1) +
875 nonPhiOperandsOffset_ * sizeof(uintptr_t);
876 return reinterpret_cast<LAllocation*>(p) + index;
878 void setOperand(size_t index, const LAllocation& a) {
879 *getOperand(index) = a;
882 void initOperandsOffset(size_t offset) {
883 MOZ_ASSERT(nonPhiOperandsOffset_ == 0);
884 MOZ_ASSERT(offset >= sizeof(LInstruction));
885 MOZ_ASSERT(((offset - sizeof(LInstruction)) % sizeof(uintptr_t)) == 0);
886 offset = (offset - sizeof(LInstruction)) / sizeof(uintptr_t);
887 nonPhiOperandsOffset_ = offset;
888 MOZ_ASSERT(nonPhiOperandsOffset_ == offset, "offset must fit in bitfield");
891 // Returns information about temporary registers needed. Each temporary
892 // register is an LDefinition with a fixed or virtual register and
893 // either GENERAL, FLOAT32, or DOUBLE type.
894 size_t numTemps() const { return numTemps_; }
895 inline LDefinition* getTemp(size_t index);
897 LSnapshot* snapshot() const { return snapshot_; }
898 LSafepoint* safepoint() const { return safepoint_; }
899 LMoveGroup* inputMoves() const { return inputMoves_; }
900 void setInputMoves(LMoveGroup* moves) { inputMoves_ = moves; }
901 LMoveGroup* fixReuseMoves() const { return fixReuseMoves_; }
902 void setFixReuseMoves(LMoveGroup* moves) { fixReuseMoves_ = moves; }
903 LMoveGroup* movesAfter() const { return movesAfter_; }
904 void setMovesAfter(LMoveGroup* moves) { movesAfter_ = moves; }
905 uint32_t numOperands() const { return nonPhiNumOperands_; }
906 void assignSnapshot(LSnapshot* snapshot);
907 void initSafepoint(TempAllocator& alloc);
909 class InputIterator;
912 LInstruction* LNode::toInstruction() {
913 MOZ_ASSERT(isInstruction());
914 return static_cast<LInstruction*>(this);
917 const LInstruction* LNode::toInstruction() const {
918 MOZ_ASSERT(isInstruction());
919 return static_cast<const LInstruction*>(this);
922 class LElementVisitor {
923 #ifdef TRACK_SNAPSHOTS
924 LInstruction* ins_ = nullptr;
925 #endif
927 protected:
928 #ifdef TRACK_SNAPSHOTS
929 LInstruction* instruction() { return ins_; }
931 void setElement(LInstruction* ins) { ins_ = ins; }
932 #else
933 void setElement(LInstruction* ins) {}
934 #endif
937 using LInstructionIterator = InlineList<LInstruction>::iterator;
938 using LInstructionReverseIterator = InlineList<LInstruction>::reverse_iterator;
940 class MPhi;
942 // Phi is a pseudo-instruction that emits no code, and is an annotation for the
943 // register allocator. Like its equivalent in MIR, phis are collected at the
944 // top of blocks and are meant to be executed in parallel, choosing the input
945 // corresponding to the predecessor taken in the control flow graph.
946 class LPhi final : public LNode {
947 LAllocation* const inputs_;
948 LDefinition def_;
950 public:
951 LIR_HEADER(Phi)
953 LPhi(MPhi* ins, LAllocation* inputs)
954 : LNode(classOpcode,
955 /* nonPhiNumOperands = */ 0,
956 /* numDefs = */ 1,
957 /* numTemps = */ 0),
958 inputs_(inputs) {
959 setMir(ins);
962 LDefinition* getDef(size_t index) {
963 MOZ_ASSERT(index == 0);
964 return &def_;
966 void setDef(size_t index, const LDefinition& def) {
967 MOZ_ASSERT(index == 0);
968 def_ = def;
970 size_t numOperands() const { return mir_->toPhi()->numOperands(); }
971 LAllocation* getOperand(size_t index) {
972 MOZ_ASSERT(index < numOperands());
973 return &inputs_[index];
975 void setOperand(size_t index, const LAllocation& a) {
976 MOZ_ASSERT(index < numOperands());
977 inputs_[index] = a;
980 // Phis don't have temps, so calling numTemps/getTemp is pointless.
981 size_t numTemps() const = delete;
982 LDefinition* getTemp(size_t index) = delete;
985 class LMoveGroup;
986 class LBlock {
987 MBasicBlock* block_;
988 FixedList<LPhi> phis_;
989 InlineList<LInstruction> instructions_;
990 LMoveGroup* entryMoveGroup_;
991 LMoveGroup* exitMoveGroup_;
992 Label label_;
994 public:
995 explicit LBlock(MBasicBlock* block);
996 [[nodiscard]] bool init(TempAllocator& alloc);
998 void add(LInstruction* ins) {
999 ins->setBlock(this);
1000 instructions_.pushBack(ins);
1002 size_t numPhis() const { return phis_.length(); }
1003 LPhi* getPhi(size_t index) { return &phis_[index]; }
1004 const LPhi* getPhi(size_t index) const { return &phis_[index]; }
1005 MBasicBlock* mir() const { return block_; }
1006 LInstructionIterator begin() { return instructions_.begin(); }
1007 LInstructionIterator begin(LInstruction* at) {
1008 return instructions_.begin(at);
1010 LInstructionIterator end() { return instructions_.end(); }
1011 LInstructionReverseIterator rbegin() { return instructions_.rbegin(); }
1012 LInstructionReverseIterator rbegin(LInstruction* at) {
1013 return instructions_.rbegin(at);
1015 LInstructionReverseIterator rend() { return instructions_.rend(); }
1016 InlineList<LInstruction>& instructions() { return instructions_; }
1017 void insertAfter(LInstruction* at, LInstruction* ins) {
1018 instructions_.insertAfter(at, ins);
1020 void insertBefore(LInstruction* at, LInstruction* ins) {
1021 instructions_.insertBefore(at, ins);
1023 const LNode* firstElementWithId() const {
1024 return !phis_.empty() ? static_cast<const LNode*>(getPhi(0))
1025 : firstInstructionWithId();
1027 uint32_t firstId() const { return firstElementWithId()->id(); }
1028 uint32_t lastId() const { return lastInstructionWithId()->id(); }
1029 const LInstruction* firstInstructionWithId() const;
1030 const LInstruction* lastInstructionWithId() const {
1031 const LInstruction* last = *instructions_.rbegin();
1032 MOZ_ASSERT(last->id());
1033 // The last instruction is a control flow instruction which does not have
1034 // any output.
1035 MOZ_ASSERT(last->numDefs() == 0);
1036 return last;
1039 // Return the label to branch to when branching to this block.
1040 Label* label() {
1041 MOZ_ASSERT(!isTrivial());
1042 return &label_;
1045 LMoveGroup* getEntryMoveGroup(TempAllocator& alloc);
1046 LMoveGroup* getExitMoveGroup(TempAllocator& alloc);
1048 // Test whether this basic block is empty except for a simple goto, and
1049 // which is not forming a loop. No code will be emitted for such blocks.
1050 bool isTrivial() { return begin()->isGoto() && !mir()->isLoopHeader(); }
1052 #ifdef JS_JITSPEW
1053 void dump(GenericPrinter& out);
1054 void dump();
1055 #endif
1058 namespace details {
1059 template <size_t Defs, size_t Temps>
1060 class LInstructionFixedDefsTempsHelper : public LInstruction {
1061 mozilla::Array<LDefinition, Defs + Temps> defsAndTemps_;
1063 protected:
1064 LInstructionFixedDefsTempsHelper(Opcode opcode, uint32_t numOperands)
1065 : LInstruction(opcode, numOperands, Defs, Temps) {}
1067 public:
1068 // Override the methods in LInstruction with more optimized versions
1069 // for when we know the exact instruction type.
1070 LDefinition* getDef(size_t index) {
1071 MOZ_ASSERT(index < Defs);
1072 return &defsAndTemps_[index];
1074 LDefinition* getTemp(size_t index) {
1075 MOZ_ASSERT(index < Temps);
1076 return &defsAndTemps_[Defs + index];
1078 LInt64Definition getInt64Temp(size_t index) {
1079 MOZ_ASSERT(index + INT64_PIECES <= Temps);
1080 #if JS_BITS_PER_WORD == 32
1081 return LInt64Definition(defsAndTemps_[Defs + index + INT64HIGH_INDEX],
1082 defsAndTemps_[Defs + index + INT64LOW_INDEX]);
1083 #else
1084 return LInt64Definition(defsAndTemps_[Defs + index]);
1085 #endif
1088 void setDef(size_t index, const LDefinition& def) {
1089 MOZ_ASSERT(index < Defs);
1090 defsAndTemps_[index] = def;
1092 void setTemp(size_t index, const LDefinition& a) {
1093 MOZ_ASSERT(index < Temps);
1094 defsAndTemps_[Defs + index] = a;
1096 void setInt64Temp(size_t index, const LInt64Definition& a) {
1097 #if JS_BITS_PER_WORD == 32
1098 setTemp(index, a.low());
1099 setTemp(index + 1, a.high());
1100 #else
1101 setTemp(index, a.value());
1102 #endif
1105 // Default accessors, assuming a single input and output, respectively.
1106 const LAllocation* input() {
1107 MOZ_ASSERT(numOperands() == 1);
1108 return getOperand(0);
1110 const LDefinition* output() {
1111 MOZ_ASSERT(numDefs() == 1);
1112 return getDef(0);
1114 static size_t offsetOfDef(size_t index) {
1115 using T = LInstructionFixedDefsTempsHelper<0, 0>;
1116 return offsetof(T, defsAndTemps_) + index * sizeof(LDefinition);
1118 static size_t offsetOfTemp(uint32_t numDefs, uint32_t index) {
1119 using T = LInstructionFixedDefsTempsHelper<0, 0>;
1120 return offsetof(T, defsAndTemps_) + (numDefs + index) * sizeof(LDefinition);
1123 } // namespace details
1125 inline LDefinition* LInstruction::getDef(size_t index) {
1126 MOZ_ASSERT(index < numDefs());
1127 using T = details::LInstructionFixedDefsTempsHelper<0, 0>;
1128 uint8_t* p = reinterpret_cast<uint8_t*>(this) + T::offsetOfDef(index);
1129 return reinterpret_cast<LDefinition*>(p);
1132 inline LDefinition* LInstruction::getTemp(size_t index) {
1133 MOZ_ASSERT(index < numTemps());
1134 using T = details::LInstructionFixedDefsTempsHelper<0, 0>;
1135 uint8_t* p =
1136 reinterpret_cast<uint8_t*>(this) + T::offsetOfTemp(numDefs(), index);
1137 return reinterpret_cast<LDefinition*>(p);
1140 template <size_t Defs, size_t Operands, size_t Temps>
1141 class LInstructionHelper
1142 : public details::LInstructionFixedDefsTempsHelper<Defs, Temps> {
1143 mozilla::Array<LAllocation, Operands> operands_;
1145 protected:
1146 explicit LInstructionHelper(LNode::Opcode opcode)
1147 : details::LInstructionFixedDefsTempsHelper<Defs, Temps>(opcode,
1148 Operands) {
1149 static_assert(
1150 Operands == 0 || sizeof(operands_) == Operands * sizeof(LAllocation),
1151 "mozilla::Array should not contain other fields");
1152 if (Operands > 0) {
1153 using T = LInstructionHelper<Defs, Operands, Temps>;
1154 this->initOperandsOffset(offsetof(T, operands_));
1158 public:
1159 // Override the methods in LInstruction with more optimized versions
1160 // for when we know the exact instruction type.
1161 LAllocation* getOperand(size_t index) { return &operands_[index]; }
1162 void setOperand(size_t index, const LAllocation& a) { operands_[index] = a; }
1163 void setBoxOperand(size_t index, const LBoxAllocation& alloc) {
1164 #ifdef JS_NUNBOX32
1165 operands_[index + TYPE_INDEX] = alloc.type();
1166 operands_[index + PAYLOAD_INDEX] = alloc.payload();
1167 #else
1168 operands_[index] = alloc.value();
1169 #endif
1171 void setInt64Operand(size_t index, const LInt64Allocation& alloc) {
1172 #if JS_BITS_PER_WORD == 32
1173 operands_[index + INT64LOW_INDEX] = alloc.low();
1174 operands_[index + INT64HIGH_INDEX] = alloc.high();
1175 #else
1176 operands_[index] = alloc.value();
1177 #endif
1179 const LInt64Allocation getInt64Operand(size_t offset) {
1180 #if JS_BITS_PER_WORD == 32
1181 return LInt64Allocation(operands_[offset + INT64HIGH_INDEX],
1182 operands_[offset + INT64LOW_INDEX]);
1183 #else
1184 return LInt64Allocation(operands_[offset]);
1185 #endif
1189 template <size_t Defs, size_t Temps>
1190 class LVariadicInstruction
1191 : public details::LInstructionFixedDefsTempsHelper<Defs, Temps> {
1192 protected:
1193 LVariadicInstruction(LNode::Opcode opcode, size_t numOperands)
1194 : details::LInstructionFixedDefsTempsHelper<Defs, Temps>(opcode,
1195 numOperands) {}
1197 public:
1198 void setBoxOperand(size_t index, const LBoxAllocation& a) {
1199 #ifdef JS_NUNBOX32
1200 this->setOperand(index + TYPE_INDEX, a.type());
1201 this->setOperand(index + PAYLOAD_INDEX, a.payload());
1202 #else
1203 this->setOperand(index, a.value());
1204 #endif
1208 template <size_t Defs, size_t Operands, size_t Temps>
1209 class LCallInstructionHelper
1210 : public LInstructionHelper<Defs, Operands, Temps> {
1211 protected:
1212 explicit LCallInstructionHelper(LNode::Opcode opcode)
1213 : LInstructionHelper<Defs, Operands, Temps>(opcode) {
1214 this->setIsCall();
1218 template <size_t Defs, size_t Temps>
1219 class LBinaryCallInstructionHelper
1220 : public LCallInstructionHelper<Defs, 2, Temps> {
1221 protected:
1222 explicit LBinaryCallInstructionHelper(LNode::Opcode opcode)
1223 : LCallInstructionHelper<Defs, 2, Temps>(opcode) {}
1225 public:
1226 const LAllocation* lhs() { return this->getOperand(0); }
1227 const LAllocation* rhs() { return this->getOperand(1); }
1230 class LRecoverInfo : public TempObject {
1231 public:
1232 typedef Vector<MNode*, 2, JitAllocPolicy> Instructions;
1234 private:
1235 // List of instructions needed to recover the stack frames.
1236 // Outer frames are stored before inner frames.
1237 Instructions instructions_;
1239 // Cached offset where this resume point is encoded.
1240 RecoverOffset recoverOffset_;
1242 // Whether this LRecoverInfo has any side-effect associated with it.
1243 bool hasSideEffects_ = false;
1245 explicit LRecoverInfo(TempAllocator& alloc);
1246 [[nodiscard]] bool init(MResumePoint* mir);
1248 // Fill the instruction vector such as all instructions needed for the
1249 // recovery are pushed before the current instruction.
1250 template <typename Node>
1251 [[nodiscard]] bool appendOperands(Node* ins);
1252 [[nodiscard]] bool appendDefinition(MDefinition* def);
1253 [[nodiscard]] bool appendResumePoint(MResumePoint* rp);
1255 public:
1256 static LRecoverInfo* New(MIRGenerator* gen, MResumePoint* mir);
1258 // Resume point of the inner most function.
1259 MResumePoint* mir() const { return instructions_.back()->toResumePoint(); }
1260 RecoverOffset recoverOffset() const { return recoverOffset_; }
1261 void setRecoverOffset(RecoverOffset offset) {
1262 MOZ_ASSERT(recoverOffset_ == INVALID_RECOVER_OFFSET);
1263 recoverOffset_ = offset;
1266 MNode** begin() { return instructions_.begin(); }
1267 MNode** end() { return instructions_.end(); }
1268 size_t numInstructions() const { return instructions_.length(); }
1269 bool hasSideEffects() { return hasSideEffects_; }
1271 class OperandIter {
1272 private:
1273 MNode** it_;
1274 MNode** end_;
1275 size_t op_;
1276 size_t opEnd_;
1277 MResumePoint* rp_;
1278 MNode* node_;
1280 public:
1281 explicit OperandIter(LRecoverInfo* recoverInfo)
1282 : it_(recoverInfo->begin()),
1283 end_(recoverInfo->end()),
1284 op_(0),
1285 opEnd_(0),
1286 rp_(nullptr),
1287 node_(nullptr) {
1288 settle();
1291 void settle() {
1292 opEnd_ = (*it_)->numOperands();
1293 while (opEnd_ == 0) {
1294 ++it_;
1295 op_ = 0;
1296 opEnd_ = (*it_)->numOperands();
1298 node_ = *it_;
1299 if (node_->isResumePoint()) {
1300 rp_ = node_->toResumePoint();
1304 MDefinition* operator*() {
1305 if (rp_) { // de-virtualize MResumePoint::getOperand calls.
1306 return rp_->getOperand(op_);
1308 return node_->getOperand(op_);
1310 MDefinition* operator->() {
1311 if (rp_) { // de-virtualize MResumePoint::getOperand calls.
1312 return rp_->getOperand(op_);
1314 return node_->getOperand(op_);
1317 OperandIter& operator++() {
1318 ++op_;
1319 if (op_ != opEnd_) {
1320 return *this;
1322 op_ = 0;
1323 ++it_;
1324 node_ = rp_ = nullptr;
1325 if (!*this) {
1326 settle();
1328 return *this;
1331 explicit operator bool() const { return it_ == end_; }
1333 #ifdef DEBUG
1334 bool canOptimizeOutIfUnused();
1335 #endif
1339 // An LSnapshot is the reflection of an MResumePoint in LIR. Unlike
1340 // MResumePoints, they cannot be shared, as they are filled in by the register
1341 // allocator in order to capture the precise low-level stack state in between an
1342 // instruction's input and output. During code generation, LSnapshots are
1343 // compressed and saved in the compiled script.
1344 class LSnapshot : public TempObject {
1345 private:
1346 LAllocation* slots_;
1347 LRecoverInfo* recoverInfo_;
1348 SnapshotOffset snapshotOffset_;
1349 uint32_t numSlots_;
1350 BailoutKind bailoutKind_;
1352 LSnapshot(LRecoverInfo* recover, BailoutKind kind);
1353 [[nodiscard]] bool init(MIRGenerator* gen);
1355 public:
1356 static LSnapshot* New(MIRGenerator* gen, LRecoverInfo* recover,
1357 BailoutKind kind);
1359 size_t numEntries() const { return numSlots_; }
1360 size_t numSlots() const { return numSlots_ / BOX_PIECES; }
1361 LAllocation* payloadOfSlot(size_t i) {
1362 MOZ_ASSERT(i < numSlots());
1363 size_t entryIndex = (i * BOX_PIECES) + (BOX_PIECES - 1);
1364 return getEntry(entryIndex);
1366 #ifdef JS_NUNBOX32
1367 LAllocation* typeOfSlot(size_t i) {
1368 MOZ_ASSERT(i < numSlots());
1369 size_t entryIndex = (i * BOX_PIECES) + (BOX_PIECES - 2);
1370 return getEntry(entryIndex);
1372 #endif
1373 LAllocation* getEntry(size_t i) {
1374 MOZ_ASSERT(i < numSlots_);
1375 return &slots_[i];
1377 void setEntry(size_t i, const LAllocation& alloc) {
1378 MOZ_ASSERT(i < numSlots_);
1379 slots_[i] = alloc;
1381 LRecoverInfo* recoverInfo() const { return recoverInfo_; }
1382 MResumePoint* mir() const { return recoverInfo()->mir(); }
1383 SnapshotOffset snapshotOffset() const { return snapshotOffset_; }
1384 void setSnapshotOffset(SnapshotOffset offset) {
1385 MOZ_ASSERT(snapshotOffset_ == INVALID_SNAPSHOT_OFFSET);
1386 snapshotOffset_ = offset;
1388 BailoutKind bailoutKind() const { return bailoutKind_; }
1389 void rewriteRecoveredInput(LUse input);
1392 struct SafepointSlotEntry {
1393 // Flag indicating whether this is a slot in the stack or argument space.
1394 uint32_t stack : 1;
1396 // Byte offset of the slot, as in LStackSlot or LArgument.
1397 uint32_t slot : 31;
1399 SafepointSlotEntry() : stack(0), slot(0) {}
1400 SafepointSlotEntry(bool stack, uint32_t slot) : stack(stack), slot(slot) {}
1401 explicit SafepointSlotEntry(const LAllocation* a)
1402 : stack(a->isStackSlot()), slot(a->memorySlot()) {}
1405 struct SafepointNunboxEntry {
1406 uint32_t typeVreg;
1407 LAllocation type;
1408 LAllocation payload;
1410 SafepointNunboxEntry() : typeVreg(0) {}
1411 SafepointNunboxEntry(uint32_t typeVreg, LAllocation type, LAllocation payload)
1412 : typeVreg(typeVreg), type(type), payload(payload) {}
1415 enum class WasmSafepointKind : uint8_t {
1416 // For wasm call instructions (isCall() == true) where registers are spilled
1417 // by register allocation.
1418 LirCall,
1419 // For wasm instructions (isCall() == false) which will spill/restore live
1420 // registers manually in codegen.
1421 CodegenCall,
1422 // For resumable wasm traps where registers will be spilled by the trap
1423 // handler.
1424 Trap,
1427 class LSafepoint : public TempObject {
1428 using SlotEntry = SafepointSlotEntry;
1429 using NunboxEntry = SafepointNunboxEntry;
1431 public:
1432 typedef Vector<SlotEntry, 0, JitAllocPolicy> SlotList;
1433 typedef Vector<NunboxEntry, 0, JitAllocPolicy> NunboxList;
1435 private:
1436 // The information in a safepoint describes the registers and gc related
1437 // values that are live at the start of the associated instruction.
1439 // The set of registers which are live at an OOL call made within the
1440 // instruction. This includes any registers for inputs which are not
1441 // use-at-start, any registers for temps, and any registers live after the
1442 // call except outputs of the instruction.
1444 // For call instructions, the live regs are empty. Call instructions may
1445 // have register inputs or temporaries, which will *not* be in the live
1446 // registers: if passed to the call, the values passed will be marked via
1447 // TraceJitExitFrame, and no registers can be live after the instruction
1448 // except its outputs.
1449 LiveRegisterSet liveRegs_;
1451 // The subset of liveRegs which contains gcthing pointers.
1452 LiveGeneralRegisterSet gcRegs_;
1454 #ifdef CHECK_OSIPOINT_REGISTERS
1455 // Clobbered regs of the current instruction. This set is never written to
1456 // the safepoint; it's only used by assertions during compilation.
1457 LiveRegisterSet clobberedRegs_;
1458 #endif
1460 // Offset to a position in the safepoint stream, or
1461 // INVALID_SAFEPOINT_OFFSET.
1462 uint32_t safepointOffset_;
1464 // Assembler buffer displacement to OSI point's call location.
1465 uint32_t osiCallPointOffset_;
1467 // List of slots which have gcthing pointers.
1468 SlotList gcSlots_;
1470 #ifdef JS_NUNBOX32
1471 // List of registers (in liveRegs) and slots which contain pieces of Values.
1472 NunboxList nunboxParts_;
1473 #elif JS_PUNBOX64
1474 // List of slots which have Values.
1475 SlotList valueSlots_;
1477 // The subset of liveRegs which have Values.
1478 LiveGeneralRegisterSet valueRegs_;
1479 #endif
1481 // The subset of liveRegs which contains pointers to slots/elements.
1482 LiveGeneralRegisterSet slotsOrElementsRegs_;
1484 // List of slots which have slots/elements pointers.
1485 SlotList slotsOrElementsSlots_;
1487 // The subset of liveRegs which contains wasm::AnyRef's.
1488 LiveGeneralRegisterSet wasmAnyRefRegs_;
1489 // List of slots which have wasm::AnyRef's.
1490 SlotList wasmAnyRefSlots_;
1492 // Wasm only: with what kind of instruction is this LSafepoint associated?
1493 WasmSafepointKind wasmSafepointKind_;
1495 // Wasm only: what is the value of masm.framePushed() that corresponds to
1496 // the lowest-addressed word covered by the StackMap that we will generate
1497 // from this LSafepoint? This depends on the instruction:
1499 // WasmSafepointKind::LirCall:
1500 // masm.framePushed() - StackArgAreaSizeUnaligned(arg types for the call),
1501 // because the map does not include the outgoing args themselves, but
1502 // it does cover any and all alignment space above them.
1504 // WasmSafepointKind::CodegenCall and WasmSafepointKind::Trap:
1505 // masm.framePushed() unmodified. Note that when constructing the
1506 // StackMap we will add entries below this point to take account of
1507 // registers dumped on the stack.
1508 uint32_t framePushedAtStackMapBase_;
1510 public:
1511 void assertInvariants() {
1512 // Every register in valueRegs and gcRegs should also be in liveRegs.
1513 #ifndef JS_NUNBOX32
1514 MOZ_ASSERT((valueRegs().bits() & ~liveRegs().gprs().bits()) == 0);
1515 #endif
1516 MOZ_ASSERT((gcRegs().bits() & ~liveRegs().gprs().bits()) == 0);
1517 MOZ_ASSERT((wasmAnyRefRegs().bits() & ~liveRegs().gprs().bits()) == 0);
1520 explicit LSafepoint(TempAllocator& alloc)
1521 : safepointOffset_(INVALID_SAFEPOINT_OFFSET),
1522 osiCallPointOffset_(0),
1523 gcSlots_(alloc),
1524 #ifdef JS_NUNBOX32
1525 nunboxParts_(alloc),
1526 #else
1527 valueSlots_(alloc),
1528 #endif
1529 slotsOrElementsSlots_(alloc),
1530 wasmAnyRefSlots_(alloc),
1531 wasmSafepointKind_(WasmSafepointKind::LirCall),
1532 framePushedAtStackMapBase_(0) {
1533 assertInvariants();
1535 void addLiveRegister(AnyRegister reg) {
1536 liveRegs_.addUnchecked(reg);
1537 assertInvariants();
1539 const LiveRegisterSet& liveRegs() const { return liveRegs_; }
1540 #ifdef CHECK_OSIPOINT_REGISTERS
1541 void addClobberedRegister(AnyRegister reg) {
1542 clobberedRegs_.addUnchecked(reg);
1543 assertInvariants();
1545 const LiveRegisterSet& clobberedRegs() const { return clobberedRegs_; }
1546 #endif
1547 void addGcRegister(Register reg) {
1548 gcRegs_.addUnchecked(reg);
1549 assertInvariants();
1551 LiveGeneralRegisterSet gcRegs() const { return gcRegs_; }
1552 [[nodiscard]] bool addGcSlot(bool stack, uint32_t slot) {
1553 bool result = gcSlots_.append(SlotEntry(stack, slot));
1554 if (result) {
1555 assertInvariants();
1557 return result;
1559 SlotList& gcSlots() { return gcSlots_; }
1561 SlotList& slotsOrElementsSlots() { return slotsOrElementsSlots_; }
1562 LiveGeneralRegisterSet slotsOrElementsRegs() const {
1563 return slotsOrElementsRegs_;
1565 void addSlotsOrElementsRegister(Register reg) {
1566 slotsOrElementsRegs_.addUnchecked(reg);
1567 assertInvariants();
1569 [[nodiscard]] bool addSlotsOrElementsSlot(bool stack, uint32_t slot) {
1570 bool result = slotsOrElementsSlots_.append(SlotEntry(stack, slot));
1571 if (result) {
1572 assertInvariants();
1574 return result;
1576 [[nodiscard]] bool addSlotsOrElementsPointer(LAllocation alloc) {
1577 if (alloc.isMemory()) {
1578 return addSlotsOrElementsSlot(alloc.isStackSlot(), alloc.memorySlot());
1580 MOZ_ASSERT(alloc.isRegister());
1581 addSlotsOrElementsRegister(alloc.toRegister().gpr());
1582 assertInvariants();
1583 return true;
1585 bool hasSlotsOrElementsPointer(LAllocation alloc) const {
1586 if (alloc.isRegister()) {
1587 return slotsOrElementsRegs().has(alloc.toRegister().gpr());
1589 for (size_t i = 0; i < slotsOrElementsSlots_.length(); i++) {
1590 const SlotEntry& entry = slotsOrElementsSlots_[i];
1591 if (entry.stack == alloc.isStackSlot() &&
1592 entry.slot == alloc.memorySlot()) {
1593 return true;
1596 return false;
1599 [[nodiscard]] bool addGcPointer(LAllocation alloc) {
1600 if (alloc.isMemory()) {
1601 return addGcSlot(alloc.isStackSlot(), alloc.memorySlot());
1603 if (alloc.isRegister()) {
1604 addGcRegister(alloc.toRegister().gpr());
1606 assertInvariants();
1607 return true;
1610 bool hasGcPointer(LAllocation alloc) const {
1611 if (alloc.isRegister()) {
1612 return gcRegs().has(alloc.toRegister().gpr());
1614 MOZ_ASSERT(alloc.isMemory());
1615 for (size_t i = 0; i < gcSlots_.length(); i++) {
1616 if (gcSlots_[i].stack == alloc.isStackSlot() &&
1617 gcSlots_[i].slot == alloc.memorySlot()) {
1618 return true;
1621 return false;
1624 void addWasmAnyRefReg(Register reg) {
1625 wasmAnyRefRegs_.addUnchecked(reg);
1626 assertInvariants();
1628 LiveGeneralRegisterSet wasmAnyRefRegs() const { return wasmAnyRefRegs_; }
1630 [[nodiscard]] bool addWasmAnyRefSlot(bool stack, uint32_t slot) {
1631 bool result = wasmAnyRefSlots_.append(SlotEntry(stack, slot));
1632 if (result) {
1633 assertInvariants();
1635 return result;
1637 SlotList& wasmAnyRefSlots() { return wasmAnyRefSlots_; }
1639 [[nodiscard]] bool addWasmAnyRef(LAllocation alloc) {
1640 if (alloc.isMemory()) {
1641 return addWasmAnyRefSlot(alloc.isStackSlot(), alloc.memorySlot());
1643 if (alloc.isRegister()) {
1644 addWasmAnyRefReg(alloc.toRegister().gpr());
1646 assertInvariants();
1647 return true;
1649 bool hasWasmAnyRef(LAllocation alloc) const {
1650 if (alloc.isRegister()) {
1651 return wasmAnyRefRegs().has(alloc.toRegister().gpr());
1653 MOZ_ASSERT(alloc.isMemory());
1654 for (size_t i = 0; i < wasmAnyRefSlots_.length(); i++) {
1655 if (wasmAnyRefSlots_[i].stack == alloc.isStackSlot() &&
1656 wasmAnyRefSlots_[i].slot == alloc.memorySlot()) {
1657 return true;
1660 return false;
1663 // Return true if all GC-managed pointers from `alloc` are recorded in this
1664 // safepoint.
1665 bool hasAllWasmAnyRefsFromStackArea(LAllocation alloc) const {
1666 for (LStackArea::ResultIterator iter = alloc.toStackArea()->results(); iter;
1667 iter.next()) {
1668 if (iter.isWasmAnyRef() && !hasWasmAnyRef(iter.alloc())) {
1669 return false;
1672 return true;
1675 #ifdef JS_NUNBOX32
1676 [[nodiscard]] bool addNunboxParts(uint32_t typeVreg, LAllocation type,
1677 LAllocation payload) {
1678 bool result = nunboxParts_.append(NunboxEntry(typeVreg, type, payload));
1679 if (result) {
1680 assertInvariants();
1682 return result;
1685 [[nodiscard]] bool addNunboxType(uint32_t typeVreg, LAllocation type) {
1686 for (size_t i = 0; i < nunboxParts_.length(); i++) {
1687 if (nunboxParts_[i].type == type) {
1688 return true;
1690 if (nunboxParts_[i].type == LUse(typeVreg, LUse::ANY)) {
1691 nunboxParts_[i].type = type;
1692 return true;
1696 // vregs for nunbox pairs are adjacent, with the type coming first.
1697 uint32_t payloadVreg = typeVreg + 1;
1698 bool result = nunboxParts_.append(
1699 NunboxEntry(typeVreg, type, LUse(payloadVreg, LUse::ANY)));
1700 if (result) {
1701 assertInvariants();
1703 return result;
1706 [[nodiscard]] bool addNunboxPayload(uint32_t payloadVreg,
1707 LAllocation payload) {
1708 for (size_t i = 0; i < nunboxParts_.length(); i++) {
1709 if (nunboxParts_[i].payload == payload) {
1710 return true;
1712 if (nunboxParts_[i].payload == LUse(payloadVreg, LUse::ANY)) {
1713 nunboxParts_[i].payload = payload;
1714 return true;
1718 // vregs for nunbox pairs are adjacent, with the type coming first.
1719 uint32_t typeVreg = payloadVreg - 1;
1720 bool result = nunboxParts_.append(
1721 NunboxEntry(typeVreg, LUse(typeVreg, LUse::ANY), payload));
1722 if (result) {
1723 assertInvariants();
1725 return result;
1728 LAllocation findTypeAllocation(uint32_t typeVreg) {
1729 // Look for some allocation for the specified type vreg, to go with a
1730 // partial nunbox entry for the payload. Note that we don't need to
1731 // look at the value slots in the safepoint, as these aren't used by
1732 // register allocators which add partial nunbox entries.
1733 for (size_t i = 0; i < nunboxParts_.length(); i++) {
1734 if (nunboxParts_[i].typeVreg == typeVreg &&
1735 !nunboxParts_[i].type.isUse()) {
1736 return nunboxParts_[i].type;
1739 return LUse(typeVreg, LUse::ANY);
1742 # ifdef DEBUG
1743 bool hasNunboxPayload(LAllocation payload) const {
1744 for (size_t i = 0; i < nunboxParts_.length(); i++) {
1745 if (nunboxParts_[i].payload == payload) {
1746 return true;
1749 return false;
1751 # endif
1753 NunboxList& nunboxParts() { return nunboxParts_; }
1755 #elif JS_PUNBOX64
1756 [[nodiscard]] bool addValueSlot(bool stack, uint32_t slot) {
1757 bool result = valueSlots_.append(SlotEntry(stack, slot));
1758 if (result) {
1759 assertInvariants();
1761 return result;
1763 SlotList& valueSlots() { return valueSlots_; }
1765 bool hasValueSlot(bool stack, uint32_t slot) const {
1766 for (size_t i = 0; i < valueSlots_.length(); i++) {
1767 if (valueSlots_[i].stack == stack && valueSlots_[i].slot == slot) {
1768 return true;
1771 return false;
1774 void addValueRegister(Register reg) {
1775 valueRegs_.add(reg);
1776 assertInvariants();
1778 LiveGeneralRegisterSet valueRegs() const { return valueRegs_; }
1780 [[nodiscard]] bool addBoxedValue(LAllocation alloc) {
1781 if (alloc.isRegister()) {
1782 Register reg = alloc.toRegister().gpr();
1783 if (!valueRegs().has(reg)) {
1784 addValueRegister(reg);
1786 return true;
1788 if (hasValueSlot(alloc.isStackSlot(), alloc.memorySlot())) {
1789 return true;
1791 return addValueSlot(alloc.isStackSlot(), alloc.memorySlot());
1794 bool hasBoxedValue(LAllocation alloc) const {
1795 if (alloc.isRegister()) {
1796 return valueRegs().has(alloc.toRegister().gpr());
1798 return hasValueSlot(alloc.isStackSlot(), alloc.memorySlot());
1801 #endif // JS_PUNBOX64
1803 bool encoded() const { return safepointOffset_ != INVALID_SAFEPOINT_OFFSET; }
1804 uint32_t offset() const {
1805 MOZ_ASSERT(encoded());
1806 return safepointOffset_;
1808 void setOffset(uint32_t offset) { safepointOffset_ = offset; }
1809 uint32_t osiReturnPointOffset() const {
1810 // In general, pointer arithmetic on code is bad, but in this case,
1811 // getting the return address from a call instruction, stepping over pools
1812 // would be wrong.
1813 return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize();
1815 uint32_t osiCallPointOffset() const { return osiCallPointOffset_; }
1816 void setOsiCallPointOffset(uint32_t osiCallPointOffset) {
1817 MOZ_ASSERT(!osiCallPointOffset_);
1818 osiCallPointOffset_ = osiCallPointOffset;
1821 WasmSafepointKind wasmSafepointKind() const { return wasmSafepointKind_; }
1822 void setWasmSafepointKind(WasmSafepointKind kind) {
1823 wasmSafepointKind_ = kind;
1826 // See comment on framePushedAtStackMapBase_.
1827 uint32_t framePushedAtStackMapBase() const {
1828 return framePushedAtStackMapBase_;
1830 void setFramePushedAtStackMapBase(uint32_t n) {
1831 MOZ_ASSERT(framePushedAtStackMapBase_ == 0);
1832 framePushedAtStackMapBase_ = n;
1836 struct WasmRefIsSubtypeDefs {
1837 LAllocation superSTV;
1838 LDefinition scratch1;
1839 LDefinition scratch2;
1842 class LInstruction::InputIterator {
1843 private:
1844 LInstruction& ins_;
1845 size_t idx_;
1846 bool snapshot_;
1848 void handleOperandsEnd() {
1849 // Iterate on the snapshot when iteration over all operands is done.
1850 if (!snapshot_ && idx_ == ins_.numOperands() && ins_.snapshot()) {
1851 idx_ = 0;
1852 snapshot_ = true;
1856 public:
1857 explicit InputIterator(LInstruction& ins)
1858 : ins_(ins), idx_(0), snapshot_(false) {
1859 handleOperandsEnd();
1862 bool more() const {
1863 if (snapshot_) {
1864 return idx_ < ins_.snapshot()->numEntries();
1866 if (idx_ < ins_.numOperands()) {
1867 return true;
1869 if (ins_.snapshot() && ins_.snapshot()->numEntries()) {
1870 return true;
1872 return false;
1875 bool isSnapshotInput() const { return snapshot_; }
1877 void next() {
1878 MOZ_ASSERT(more());
1879 idx_++;
1880 handleOperandsEnd();
1883 void replace(const LAllocation& alloc) {
1884 if (snapshot_) {
1885 ins_.snapshot()->setEntry(idx_, alloc);
1886 } else {
1887 ins_.setOperand(idx_, alloc);
1891 LAllocation* operator*() const {
1892 if (snapshot_) {
1893 return ins_.snapshot()->getEntry(idx_);
1895 return ins_.getOperand(idx_);
1898 LAllocation* operator->() const { return **this; }
1901 class LIRGraph {
1902 struct ValueHasher {
1903 using Lookup = Value;
1904 static HashNumber hash(const Value& v) { return HashNumber(v.asRawBits()); }
1905 static bool match(const Value& lhs, const Value& rhs) { return lhs == rhs; }
1908 FixedList<LBlock> blocks_;
1910 // constantPool_ is a mozilla::Vector, not a js::Vector, because
1911 // js::Vector<Value> is prohibited as unsafe. This particular Vector of
1912 // Values is safe because it is only used within the scope of an
1913 // AutoSuppressGC (in IonCompile), which inhibits GC.
1914 mozilla::Vector<Value, 0, JitAllocPolicy> constantPool_;
1915 typedef HashMap<Value, uint32_t, ValueHasher, JitAllocPolicy> ConstantPoolMap;
1916 ConstantPoolMap constantPoolMap_;
1917 Vector<LInstruction*, 0, JitAllocPolicy> safepoints_;
1918 Vector<LInstruction*, 0, JitAllocPolicy> nonCallSafepoints_;
1919 uint32_t numVirtualRegisters_;
1920 uint32_t numInstructions_;
1922 // Size of stack slots needed for local spills.
1923 uint32_t localSlotsSize_;
1924 // Number of JS::Value stack slots needed for argument construction for calls.
1925 uint32_t argumentSlotCount_;
1927 MIRGraph& mir_;
1929 public:
1930 explicit LIRGraph(MIRGraph* mir);
1932 [[nodiscard]] bool init() {
1933 return blocks_.init(mir_.alloc(), mir_.numBlocks());
1935 MIRGraph& mir() const { return mir_; }
1936 size_t numBlocks() const { return blocks_.length(); }
1937 LBlock* getBlock(size_t i) { return &blocks_[i]; }
1938 uint32_t numBlockIds() const { return mir_.numBlockIds(); }
1939 [[nodiscard]] bool initBlock(MBasicBlock* mir) {
1940 auto* block = &blocks_[mir->id()];
1941 auto* lir = new (block) LBlock(mir);
1942 return lir->init(mir_.alloc());
1944 uint32_t getVirtualRegister() {
1945 numVirtualRegisters_ += VREG_INCREMENT;
1946 return numVirtualRegisters_;
1948 uint32_t numVirtualRegisters() const {
1949 // Virtual registers are 1-based, not 0-based, so add one as a
1950 // convenience for 0-based arrays.
1951 return numVirtualRegisters_ + 1;
1953 uint32_t getInstructionId() { return numInstructions_++; }
1954 uint32_t numInstructions() const { return numInstructions_; }
1955 void setLocalSlotsSize(uint32_t localSlotsSize) {
1956 localSlotsSize_ = localSlotsSize;
1958 uint32_t localSlotsSize() const { return localSlotsSize_; }
1959 void setArgumentSlotCount(uint32_t argumentSlotCount) {
1960 argumentSlotCount_ = argumentSlotCount;
1962 uint32_t argumentSlotCount() const { return argumentSlotCount_; }
1963 [[nodiscard]] bool addConstantToPool(const Value& v, uint32_t* index);
1964 size_t numConstants() const { return constantPool_.length(); }
1965 Value* constantPool() { return &constantPool_[0]; }
1967 bool noteNeedsSafepoint(LInstruction* ins);
1968 size_t numNonCallSafepoints() const { return nonCallSafepoints_.length(); }
1969 LInstruction* getNonCallSafepoint(size_t i) const {
1970 return nonCallSafepoints_[i];
1972 size_t numSafepoints() const { return safepoints_.length(); }
1973 LInstruction* getSafepoint(size_t i) const { return safepoints_[i]; }
1975 #ifdef JS_JITSPEW
1976 void dump(GenericPrinter& out);
1977 void dump();
1978 #endif
1981 LAllocation::LAllocation(AnyRegister reg) {
1982 if (reg.isFloat()) {
1983 *this = LFloatReg(reg.fpu());
1984 } else {
1985 *this = LGeneralReg(reg.gpr());
1989 AnyRegister LAllocation::toRegister() const {
1990 MOZ_ASSERT(isRegister());
1991 if (isFloatReg()) {
1992 return AnyRegister(toFloatReg()->reg());
1994 return AnyRegister(toGeneralReg()->reg());
1997 } // namespace jit
1998 } // namespace js
2000 #include "jit/shared/LIR-shared.h"
2001 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
2002 # if defined(JS_CODEGEN_X86)
2003 # include "jit/x86/LIR-x86.h"
2004 # elif defined(JS_CODEGEN_X64)
2005 # include "jit/x64/LIR-x64.h"
2006 # endif
2007 # include "jit/x86-shared/LIR-x86-shared.h"
2008 #elif defined(JS_CODEGEN_ARM)
2009 # include "jit/arm/LIR-arm.h"
2010 #elif defined(JS_CODEGEN_ARM64)
2011 # include "jit/arm64/LIR-arm64.h"
2012 #elif defined(JS_CODEGEN_LOONG64)
2013 # include "jit/loong64/LIR-loong64.h"
2014 #elif defined(JS_CODEGEN_RISCV64)
2015 # include "jit/riscv64/LIR-riscv64.h"
2016 #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
2017 # if defined(JS_CODEGEN_MIPS32)
2018 # include "jit/mips32/LIR-mips32.h"
2019 # elif defined(JS_CODEGEN_MIPS64)
2020 # include "jit/mips64/LIR-mips64.h"
2021 # endif
2022 # include "jit/mips-shared/LIR-mips-shared.h"
2023 #elif defined(JS_CODEGEN_WASM32)
2024 # include "jit/wasm32/LIR-wasm32.h"
2025 #elif defined(JS_CODEGEN_NONE)
2026 # include "jit/none/LIR-none.h"
2027 #else
2028 # error "Unknown architecture!"
2029 #endif
2031 #undef LIR_HEADER
2033 namespace js {
2034 namespace jit {
2036 #define LIROP(name) \
2037 L##name* LNode::to##name() { \
2038 MOZ_ASSERT(is##name()); \
2039 return static_cast<L##name*>(this); \
2041 const L##name* LNode::to##name() const { \
2042 MOZ_ASSERT(is##name()); \
2043 return static_cast<const L##name*>(this); \
2045 LIR_OPCODE_LIST(LIROP)
2046 #undef LIROP
2048 #define LALLOC_CAST(type) \
2049 L##type* LAllocation::to##type() { \
2050 MOZ_ASSERT(is##type()); \
2051 return static_cast<L##type*>(this); \
2053 #define LALLOC_CONST_CAST(type) \
2054 const L##type* LAllocation::to##type() const { \
2055 MOZ_ASSERT(is##type()); \
2056 return static_cast<const L##type*>(this); \
2059 LALLOC_CAST(Use)
2060 LALLOC_CONST_CAST(Use)
2061 LALLOC_CONST_CAST(GeneralReg)
2062 LALLOC_CONST_CAST(FloatReg)
2063 LALLOC_CONST_CAST(StackSlot)
2064 LALLOC_CAST(StackArea)
2065 LALLOC_CONST_CAST(StackArea)
2066 LALLOC_CONST_CAST(Argument)
2067 LALLOC_CONST_CAST(ConstantIndex)
2069 #undef LALLOC_CAST
2071 } // namespace jit
2072 } // namespace js
2074 #endif /* jit_LIR_h */