Bug 1885489 - Part 5: Add SnapshotIterator::readInt32(). r=iain
[gecko.git] / js / src / jit / RegisterAllocator.h
blob805bc7e77d78058b35309af885f1ed709bd3f34b
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef jit_RegisterAllocator_h
8 #define jit_RegisterAllocator_h
10 #include "mozilla/MathAlgorithms.h"
12 #include "jit/LIR.h"
13 #include "jit/MIRGenerator.h"
14 #include "jit/MIRGraph.h"
16 // Generic structures and functions for use by register allocators.
18 namespace js {
19 namespace jit {
21 class LIRGenerator;
23 #ifdef DEBUG
24 // Structure for running a liveness analysis on a finished register allocation.
25 // This analysis can be used for two purposes:
27 // - Check the integrity of the allocation, i.e. that the reads and writes of
28 // physical values preserve the semantics of the original virtual registers.
30 // - Populate safepoints with live registers, GC thing and value data, to
31 // streamline the process of prototyping new allocators.
32 struct AllocationIntegrityState {
33 explicit AllocationIntegrityState(LIRGraph& graph) : graph(graph) {}
35 // Record all virtual registers in the graph. This must be called before
36 // register allocation, to pick up the original LUses.
37 [[nodiscard]] bool record();
39 // Perform the liveness analysis on the graph, and assert on an invalid
40 // allocation. This must be called after register allocation, to pick up
41 // all assigned physical values.
42 [[nodiscard]] bool check();
44 private:
45 LIRGraph& graph;
47 // For all instructions and phis in the graph, keep track of the virtual
48 // registers for all inputs and outputs of the nodes. These are overwritten
49 // in place during register allocation. This information is kept on the
50 // side rather than in the instructions and phis themselves to avoid
51 // debug-builds-only bloat in the size of the involved structures.
53 struct InstructionInfo {
54 Vector<LAllocation, 2, SystemAllocPolicy> inputs;
55 Vector<LDefinition, 0, SystemAllocPolicy> temps;
56 Vector<LDefinition, 1, SystemAllocPolicy> outputs;
58 InstructionInfo() = default;
60 InstructionInfo(const InstructionInfo& o) {
61 AutoEnterOOMUnsafeRegion oomUnsafe;
62 if (!inputs.appendAll(o.inputs) || !temps.appendAll(o.temps) ||
63 !outputs.appendAll(o.outputs)) {
64 oomUnsafe.crash("InstructionInfo::InstructionInfo");
68 Vector<InstructionInfo, 0, SystemAllocPolicy> instructions;
70 struct BlockInfo {
71 Vector<InstructionInfo, 5, SystemAllocPolicy> phis;
72 BlockInfo() = default;
73 BlockInfo(const BlockInfo& o) {
74 AutoEnterOOMUnsafeRegion oomUnsafe;
75 if (!phis.appendAll(o.phis)) {
76 oomUnsafe.crash("BlockInfo::BlockInfo");
80 Vector<BlockInfo, 0, SystemAllocPolicy> blocks;
82 Vector<LDefinition*, 20, SystemAllocPolicy> virtualRegisters;
84 // Describes a correspondence that should hold at the end of a block.
85 // The value which was written to vreg in the original LIR should be
86 // physically stored in alloc after the register allocation.
87 struct IntegrityItem {
88 LBlock* block;
89 uint32_t vreg;
90 LAllocation alloc;
92 // Order of insertion into seen, for sorting.
93 uint32_t index;
95 using Lookup = IntegrityItem;
96 static HashNumber hash(const IntegrityItem& item) {
97 HashNumber hash = item.alloc.hash();
98 hash = mozilla::RotateLeft(hash, 4) ^ item.vreg;
99 hash = mozilla::RotateLeft(hash, 4) ^ HashNumber(item.block->mir()->id());
100 return hash;
102 static bool match(const IntegrityItem& one, const IntegrityItem& two) {
103 return one.block == two.block && one.vreg == two.vreg &&
104 one.alloc == two.alloc;
108 // Items still to be processed.
109 Vector<IntegrityItem, 10, SystemAllocPolicy> worklist;
111 // Set of all items that have already been processed.
112 typedef HashSet<IntegrityItem, IntegrityItem, SystemAllocPolicy>
113 IntegrityItemSet;
114 IntegrityItemSet seen;
116 [[nodiscard]] bool checkIntegrity(LBlock* block, LInstruction* ins,
117 uint32_t vreg, LAllocation alloc);
118 void checkSafepointAllocation(LInstruction* ins, uint32_t vreg,
119 LAllocation alloc);
120 [[nodiscard]] bool addPredecessor(LBlock* block, uint32_t vreg,
121 LAllocation alloc);
123 void dump();
125 #endif // DEBUG
127 // Represents with better-than-instruction precision a position in the
128 // instruction stream.
130 // An issue comes up when performing register allocation as to how to represent
131 // information such as "this register is only needed for the input of
132 // this instruction, it can be clobbered in the output". Just having ranges
133 // of instruction IDs is insufficiently expressive to denote all possibilities.
134 // This class solves this issue by associating an extra bit with the instruction
135 // ID which indicates whether the position is the input half or output half of
136 // an instruction.
137 class CodePosition {
138 private:
139 constexpr explicit CodePosition(uint32_t bits) : bits_(bits) {}
141 static const unsigned int INSTRUCTION_SHIFT = 1;
142 static const unsigned int SUBPOSITION_MASK = 1;
143 uint32_t bits_;
145 public:
146 static const CodePosition MAX;
147 static const CodePosition MIN;
149 // This is the half of the instruction this code position represents, as
150 // described in the huge comment above.
151 enum SubPosition { INPUT, OUTPUT };
153 constexpr CodePosition() : bits_(0) {}
155 CodePosition(uint32_t instruction, SubPosition where) {
156 MOZ_ASSERT(instruction < 0x80000000u);
157 MOZ_ASSERT(((uint32_t)where & SUBPOSITION_MASK) == (uint32_t)where);
158 bits_ = (instruction << INSTRUCTION_SHIFT) | (uint32_t)where;
161 uint32_t ins() const { return bits_ >> INSTRUCTION_SHIFT; }
163 uint32_t bits() const { return bits_; }
165 SubPosition subpos() const { return (SubPosition)(bits_ & SUBPOSITION_MASK); }
167 bool operator<(CodePosition other) const { return bits_ < other.bits_; }
169 bool operator<=(CodePosition other) const { return bits_ <= other.bits_; }
171 bool operator!=(CodePosition other) const { return bits_ != other.bits_; }
173 bool operator==(CodePosition other) const { return bits_ == other.bits_; }
175 bool operator>(CodePosition other) const { return bits_ > other.bits_; }
177 bool operator>=(CodePosition other) const { return bits_ >= other.bits_; }
179 uint32_t operator-(CodePosition other) const {
180 MOZ_ASSERT(bits_ >= other.bits_);
181 return bits_ - other.bits_;
184 CodePosition previous() const {
185 MOZ_ASSERT(*this != MIN);
186 return CodePosition(bits_ - 1);
188 CodePosition next() const {
189 MOZ_ASSERT(*this != MAX);
190 return CodePosition(bits_ + 1);
194 // Structure to track all moves inserted next to instructions in a graph.
195 class InstructionDataMap {
196 FixedList<LNode*> insData_;
198 public:
199 InstructionDataMap() {}
201 [[nodiscard]] bool init(MIRGenerator* gen, uint32_t numInstructions) {
202 if (!insData_.init(gen->alloc(), numInstructions)) {
203 return false;
205 memset(&insData_[0], 0, sizeof(LNode*) * numInstructions);
206 return true;
209 LNode*& operator[](CodePosition pos) { return operator[](pos.ins()); }
210 LNode* const& operator[](CodePosition pos) const {
211 return operator[](pos.ins());
213 LNode*& operator[](uint32_t ins) { return insData_[ins]; }
214 LNode* const& operator[](uint32_t ins) const { return insData_[ins]; }
217 // Common superclass for register allocators.
218 class RegisterAllocator {
219 void operator=(const RegisterAllocator&) = delete;
220 RegisterAllocator(const RegisterAllocator&) = delete;
222 protected:
223 // Context
224 MIRGenerator* mir;
225 LIRGenerator* lir;
226 LIRGraph& graph;
228 // Pool of all registers that should be considered allocateable
229 AllocatableRegisterSet allRegisters_;
231 // Computed data
232 InstructionDataMap insData;
233 Vector<CodePosition, 12, SystemAllocPolicy> entryPositions;
234 Vector<CodePosition, 12, SystemAllocPolicy> exitPositions;
236 RegisterAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph)
237 : mir(mir), lir(lir), graph(graph), allRegisters_(RegisterSet::All()) {
238 MOZ_ASSERT(!allRegisters_.has(FramePointer));
239 if (mir->compilingWasm()) {
240 takeWasmRegisters(allRegisters_);
244 [[nodiscard]] bool init();
246 TempAllocator& alloc() const { return mir->alloc(); }
248 CodePosition outputOf(const LNode* ins) const {
249 return ins->isPhi() ? outputOf(ins->toPhi())
250 : outputOf(ins->toInstruction());
252 CodePosition outputOf(const LPhi* ins) const {
253 // All phis in a block write their outputs after all of them have
254 // read their inputs. Consequently, it doesn't make sense to talk
255 // about code positions in the middle of a series of phis.
256 LBlock* block = ins->block();
257 return CodePosition(block->getPhi(block->numPhis() - 1)->id(),
258 CodePosition::OUTPUT);
260 CodePosition outputOf(const LInstruction* ins) const {
261 return CodePosition(ins->id(), CodePosition::OUTPUT);
263 CodePosition inputOf(const LNode* ins) const {
264 return ins->isPhi() ? inputOf(ins->toPhi()) : inputOf(ins->toInstruction());
266 CodePosition inputOf(const LPhi* ins) const {
267 // All phis in a block read their inputs before any of them write their
268 // outputs. Consequently, it doesn't make sense to talk about code
269 // positions in the middle of a series of phis.
270 return CodePosition(ins->block()->getPhi(0)->id(), CodePosition::INPUT);
272 CodePosition inputOf(const LInstruction* ins) const {
273 return CodePosition(ins->id(), CodePosition::INPUT);
275 CodePosition entryOf(const LBlock* block) {
276 return entryPositions[block->mir()->id()];
278 CodePosition exitOf(const LBlock* block) {
279 return exitPositions[block->mir()->id()];
282 LMoveGroup* getInputMoveGroup(LInstruction* ins);
283 LMoveGroup* getFixReuseMoveGroup(LInstruction* ins);
284 LMoveGroup* getMoveGroupAfter(LInstruction* ins);
286 // Atomic group helper. See comments in BacktrackingAllocator.cpp.
287 CodePosition minimalDefEnd(LNode* ins) const;
289 void dumpInstructions(const char* who);
291 public:
292 template <typename TakeableSet>
293 static void takeWasmRegisters(TakeableSet& regs) {
294 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \
295 defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \
296 defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64) || \
297 defined(JS_CODEGEN_RISCV64)
298 regs.take(HeapReg);
299 #endif
300 MOZ_ASSERT(!regs.has(FramePointer));
304 static inline AnyRegister GetFixedRegister(const LDefinition* def,
305 const LUse* use) {
306 return def->isFloatReg()
307 ? AnyRegister(FloatRegister::FromCode(use->registerCode()))
308 : AnyRegister(Register::FromCode(use->registerCode()));
311 } // namespace jit
312 } // namespace js
314 #endif /* jit_RegisterAllocator_h */