Backed out changeset 2450366cf7ca (bug 1891629) for causing win msix mochitest failures
[gecko.git] / js / src / jit / BacktrackingAllocator.h
blob366aa0d16c0d8bd6930e01382a88709f3474f100
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef jit_BacktrackingAllocator_h
8 #define jit_BacktrackingAllocator_h
10 #include "mozilla/Array.h"
11 #include "mozilla/Atomics.h"
12 #include "mozilla/Attributes.h"
14 #include "ds/AvlTree.h"
15 #include "ds/PriorityQueue.h"
16 #include "jit/RegisterAllocator.h"
17 #include "jit/StackSlotAllocator.h"
19 // Gives better traces in Nightly/debug builds (could be EARLY_BETA_OR_EARLIER)
20 #if defined(NIGHTLY_BUILD) || defined(DEBUG)
21 # define AVOID_INLINE_FOR_DEBUGGING MOZ_NEVER_INLINE
22 #else
23 # define AVOID_INLINE_FOR_DEBUGGING
24 #endif
26 // Backtracking priority queue based register allocator based on that described
27 // in the following blog post:
29 // http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
31 namespace js {
32 namespace jit {
34 class Requirement {
35 public:
36 enum Kind { NONE, REGISTER, FIXED };
38 Requirement() : kind_(NONE) {}
40 explicit Requirement(Kind kind) : kind_(kind) {
41 // FIXED has a dedicated constructor.
42 MOZ_ASSERT(kind != FIXED);
45 explicit Requirement(LAllocation fixed) : kind_(FIXED), allocation_(fixed) {
46 MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
49 Kind kind() const { return kind_; }
51 LAllocation allocation() const {
52 MOZ_ASSERT(!allocation_.isBogus() && !allocation_.isUse());
53 return allocation_;
56 [[nodiscard]] bool merge(const Requirement& newRequirement) {
57 // Merge newRequirement with any existing requirement, returning false
58 // if the new and old requirements conflict.
60 if (newRequirement.kind() == Requirement::FIXED) {
61 if (kind() == Requirement::FIXED) {
62 return newRequirement.allocation() == allocation();
64 *this = newRequirement;
65 return true;
68 MOZ_ASSERT(newRequirement.kind() == Requirement::REGISTER);
69 if (kind() == Requirement::FIXED) {
70 return allocation().isRegister();
73 *this = newRequirement;
74 return true;
77 private:
78 Kind kind_;
79 LAllocation allocation_;
82 struct UsePosition : public TempObject,
83 public InlineForwardListNode<UsePosition> {
84 private:
85 // A UsePosition is an LUse* with a CodePosition. UsePosition also has an
86 // optimization that allows access to the associated LUse::Policy without
87 // dereferencing memory: the policy is encoded in the low bits of the LUse*.
89 // Note however that because LUse* is uintptr_t-aligned, on 32-bit systems
90 // there are only 4 encodable values, for more than 4 use policies; in that
91 // case we allocate the common LUse::ANY, LUse::REGISTER, and LUse::FIXED use
92 // policies to tags, and use tag 0x3 to indicate that dereferencing the LUse
93 // is necessary to get the policy (KEEPALIVE or STACK, in that case).
94 uintptr_t use_;
95 static_assert(LUse::ANY < 0x3,
96 "LUse::ANY can be represented in low tag on 32-bit systems");
97 static_assert(LUse::REGISTER < 0x3,
98 "LUse::REGISTER can be represented in tag on 32-bit systems");
99 static_assert(LUse::FIXED < 0x3,
100 "LUse::FIXED can be represented in tag on 32-bit systems");
102 static constexpr uintptr_t PolicyMask = sizeof(uintptr_t) - 1;
103 static constexpr uintptr_t UseMask = ~PolicyMask;
105 void setUse(LUse* use) {
106 // RECOVERED_INPUT is used by snapshots and ignored when building the
107 // liveness information. Thus we can safely assume that no such value
108 // would be seen.
109 MOZ_ASSERT(use->policy() != LUse::RECOVERED_INPUT);
111 uintptr_t policyBits = use->policy();
112 #ifndef JS_64BIT
113 // On a 32-bit machine, LUse::KEEPALIVE and LUse::STACK are accessed by
114 // dereferencing the use pointer.
115 if (policyBits >= PolicyMask) {
116 policyBits = PolicyMask;
118 #endif
119 use_ = uintptr_t(use) | policyBits;
120 MOZ_ASSERT(use->policy() == usePolicy());
123 public:
124 CodePosition pos;
126 LUse* use() const { return reinterpret_cast<LUse*>(use_ & UseMask); }
128 LUse::Policy usePolicy() const {
129 uintptr_t bits = use_ & PolicyMask;
130 #ifndef JS_64BIT
131 // On 32-bit machines, reach out to memory if it's LUse::KEEPALIVE or
132 // LUse::STACK.
133 if (bits == PolicyMask) {
134 return use()->policy();
136 #endif
137 LUse::Policy policy = LUse::Policy(bits);
138 MOZ_ASSERT(use()->policy() == policy);
139 return policy;
142 UsePosition(LUse* use, CodePosition pos) : pos(pos) {
143 // Verify that the usedAtStart() flag is consistent with the
144 // subposition. For now ignore fixed registers, because they
145 // are handled specially around calls.
146 MOZ_ASSERT_IF(!use->isFixedRegister(),
147 pos.subpos() == (use->usedAtStart() ? CodePosition::INPUT
148 : CodePosition::OUTPUT));
149 setUse(use);
153 using UsePositionIterator = InlineForwardListIterator<UsePosition>;
155 // Backtracking allocator data structures overview.
157 // LiveRange: A continuous range of positions where a virtual register is live.
158 // LiveBundle: A set of LiveRanges which do not overlap.
159 // VirtualRegister: A set of all LiveRanges used for some LDefinition.
161 // The allocator first performs a liveness ananlysis on the LIR graph which
162 // constructs LiveRanges for each VirtualRegister, determining where the
163 // registers are live.
165 // The ranges are then bundled together according to heuristics, and placed on
166 // the allocation queue.
168 // As bundles are removed from the allocation queue, we attempt to find a
169 // physical register or stack slot allocation for all ranges in the removed
170 // bundle, possibly evicting already-allocated bundles. See processBundle()
171 // for details.
173 // If we are not able to allocate a bundle, it is split according to heuristics
174 // into two or more smaller bundles which cover all the ranges of the original.
175 // These smaller bundles are then allocated independently.
177 class LiveBundle;
178 class VirtualRegister;
180 class LiveRange : public TempObject {
181 public:
182 // Linked lists are used to keep track of the ranges in each LiveBundle and
183 // VirtualRegister. Since a LiveRange may be in two lists simultaneously, use
184 // these auxiliary classes to keep things straight.
185 class BundleLink : public InlineForwardListNode<BundleLink> {};
186 class RegisterLink : public InlineForwardListNode<RegisterLink> {};
188 using BundleLinkIterator = InlineForwardListIterator<BundleLink>;
189 using RegisterLinkIterator = InlineForwardListIterator<RegisterLink>;
191 // Links in the lists in LiveBundle and VirtualRegister.
192 BundleLink bundleLink;
193 RegisterLink registerLink;
195 static LiveRange* get(BundleLink* link) {
196 return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
197 offsetof(LiveRange, bundleLink));
199 static LiveRange* get(RegisterLink* link) {
200 return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
201 offsetof(LiveRange, registerLink));
204 struct Range {
205 // The beginning of this range, inclusive.
206 CodePosition from;
208 // The end of this range, exclusive.
209 CodePosition to;
211 Range() = default;
213 Range(CodePosition from, CodePosition to) : from(from), to(to) {
214 MOZ_ASSERT(!empty());
217 bool empty() {
218 MOZ_ASSERT(from <= to);
219 return from == to;
223 private:
224 // The virtual register this range is for, or nullptr if this does not have a
225 // virtual register (for example, it is in the callRanges bundle).
226 VirtualRegister* vreg_;
228 // The bundle containing this range, null if liveness information is being
229 // constructed and we haven't started allocating bundles yet.
230 LiveBundle* bundle_;
232 // The code positions in this range.
233 Range range_;
235 // All uses of the virtual register in this range, ordered by location.
236 InlineForwardList<UsePosition> uses_;
238 // Total spill weight that calculate from all the uses' policy. Because the
239 // use's policy can't be changed after initialization, we can update the
240 // weight whenever a use is added to or remove from this range. This way, we
241 // don't need to iterate all the uses every time computeSpillWeight() is
242 // called.
243 size_t usesSpillWeight_;
245 // Number of uses that have policy LUse::FIXED.
246 uint32_t numFixedUses_;
248 // Whether this range contains the virtual register's definition.
249 bool hasDefinition_;
251 LiveRange(VirtualRegister* vreg, Range range)
252 : vreg_(vreg),
253 bundle_(nullptr),
254 range_(range),
255 usesSpillWeight_(0),
256 numFixedUses_(0),
257 hasDefinition_(false)
260 MOZ_ASSERT(!range.empty());
263 void noteAddedUse(UsePosition* use);
264 void noteRemovedUse(UsePosition* use);
266 public:
267 static LiveRange* FallibleNew(TempAllocator& alloc, VirtualRegister* vreg,
268 CodePosition from, CodePosition to) {
269 return new (alloc.fallible()) LiveRange(vreg, Range(from, to));
272 VirtualRegister& vreg() const {
273 MOZ_ASSERT(hasVreg());
274 return *vreg_;
276 bool hasVreg() const { return vreg_ != nullptr; }
278 LiveBundle* bundle() const { return bundle_; }
280 CodePosition from() const { return range_.from; }
281 CodePosition to() const { return range_.to; }
282 bool covers(CodePosition pos) const { return pos >= from() && pos < to(); }
284 // Whether this range wholly contains other.
285 bool contains(LiveRange* other) const;
287 // Intersect this range with other, returning the subranges of this
288 // that are before, inside, or after other.
289 void intersect(LiveRange* other, Range* pre, Range* inside,
290 Range* post) const;
292 // Whether this range has any intersection with other.
293 bool intersects(LiveRange* other) const;
295 UsePositionIterator usesBegin() const { return uses_.begin(); }
296 UsePosition* lastUse() const { return uses_.back(); }
297 bool hasUses() const { return !!usesBegin(); }
298 UsePosition* popUse();
300 bool hasDefinition() const { return hasDefinition_; }
302 void setFrom(CodePosition from) {
303 range_.from = from;
304 MOZ_ASSERT(!range_.empty());
306 void setTo(CodePosition to) {
307 range_.to = to;
308 MOZ_ASSERT(!range_.empty());
311 void setBundle(LiveBundle* bundle) { bundle_ = bundle; }
313 void addUse(UsePosition* use);
314 void tryToMoveDefAndUsesInto(LiveRange* other);
316 void setHasDefinition() {
317 MOZ_ASSERT(!hasDefinition_);
318 hasDefinition_ = true;
321 size_t usesSpillWeight() { return usesSpillWeight_; }
322 uint32_t numFixedUses() { return numFixedUses_; }
324 #ifdef JS_JITSPEW
325 // Return a string describing this range.
326 UniqueChars toString() const;
327 #endif
329 // Comparator for use in AVL trees.
330 static int compare(LiveRange* v0, LiveRange* v1) {
331 // The denoted range includes 'from' but excludes 'to'.
332 if (v0->to() <= v1->from()) {
333 return -1;
335 if (v0->from() >= v1->to()) {
336 return 1;
338 return 0;
342 // LiveRangePlus is a simple wrapper around a LiveRange*. It caches the
343 // LiveRange*'s `.range_.from` and `.range_.to` CodePositions. The only
344 // purpose of this is to avoid some cache misses that would otherwise occur
345 // when comparing those fields in an AvlTree<LiveRange*, ..>. This measurably
346 // speeds up the allocator in some cases. See bug 1814204.
348 class LiveRangePlus {
349 // The LiveRange we're wrapping.
350 LiveRange* liveRange_;
351 // Cached versions of liveRange_->range_.from and lr->range_.to
352 CodePosition from_;
353 CodePosition to_;
355 public:
356 explicit LiveRangePlus(LiveRange* lr)
357 : liveRange_(lr), from_(lr->from()), to_(lr->to()) {}
358 LiveRangePlus() : liveRange_(nullptr) {}
359 ~LiveRangePlus() {
360 MOZ_ASSERT(liveRange_ ? from_ == liveRange_->from()
361 : from_ == CodePosition());
362 MOZ_ASSERT(liveRange_ ? to_ == liveRange_->to() : to_ == CodePosition());
365 LiveRange* liveRange() const { return liveRange_; }
367 // Comparator for use in AVL trees.
368 static int compare(const LiveRangePlus& lrp0, const LiveRangePlus& lrp1) {
369 // The denoted range includes 'from' but excludes 'to'.
370 if (lrp0.to_ <= lrp1.from_) {
371 return -1;
373 if (lrp0.from_ >= lrp1.to_) {
374 return 1;
376 return 0;
380 // Make sure there's no alignment holes or vtable present. Per bug 1814204,
381 // it's important that this structure is as small as possible.
382 static_assert(sizeof(LiveRangePlus) ==
383 sizeof(LiveRange*) + 2 * sizeof(CodePosition));
385 // Tracks information about bundles that should all be spilled to the same
386 // physical location. At the beginning of allocation, each bundle has its own
387 // spill set. As bundles are split, the new smaller bundles continue to use the
388 // same spill set.
389 class SpillSet : public TempObject {
390 // All bundles with this spill set which have been spilled. All bundles in
391 // this list will be given the same physical slot.
392 Vector<LiveBundle*, 1, JitAllocPolicy> list_;
394 explicit SpillSet(TempAllocator& alloc) : list_(alloc) {}
396 public:
397 static SpillSet* New(TempAllocator& alloc) {
398 return new (alloc) SpillSet(alloc);
401 [[nodiscard]] bool addSpilledBundle(LiveBundle* bundle) {
402 return list_.append(bundle);
404 size_t numSpilledBundles() const { return list_.length(); }
405 LiveBundle* spilledBundle(size_t i) const { return list_[i]; }
407 void setAllocation(LAllocation alloc);
410 #ifdef JS_JITSPEW
411 // See comment on LiveBundle::debugId_ just below. This needs to be atomic
412 // because TSan automation runs on debug builds will otherwise (correctly)
413 // report a race.
414 static mozilla::Atomic<uint32_t> LiveBundle_debugIdCounter =
415 mozilla::Atomic<uint32_t>{0};
416 #endif
418 // A set of live ranges which are all pairwise disjoint. The register allocator
419 // attempts to find allocations for an entire bundle, and if it fails the
420 // bundle will be broken into smaller ones which are allocated independently.
421 class LiveBundle : public TempObject {
422 // Set to use if this bundle or one it is split into is spilled.
423 SpillSet* spill_;
425 // All the ranges in this set, ordered by location.
426 InlineForwardList<LiveRange::BundleLink> ranges_;
428 // Allocation to use for ranges in this set, bogus if unallocated or spilled
429 // and not yet given a physical stack slot.
430 LAllocation alloc_;
432 // Bundle which entirely contains this one and has no register uses. This
433 // may or may not be spilled by the allocator, but it can be spilled and
434 // will not be split.
435 LiveBundle* spillParent_;
437 #ifdef JS_JITSPEW
438 // This is used only for debug-printing bundles. It gives them an
439 // identifiable identity in the debug output, which they otherwise wouldn't
440 // have.
441 uint32_t debugId_;
442 #endif
444 LiveBundle(SpillSet* spill, LiveBundle* spillParent)
445 : spill_(spill), spillParent_(spillParent) {
446 #ifdef JS_JITSPEW
447 debugId_ = LiveBundle_debugIdCounter++;
448 #endif
451 public:
452 static LiveBundle* FallibleNew(TempAllocator& alloc, SpillSet* spill,
453 LiveBundle* spillParent) {
454 return new (alloc.fallible()) LiveBundle(spill, spillParent);
457 SpillSet* spillSet() const { return spill_; }
458 void setSpillSet(SpillSet* spill) { spill_ = spill; }
460 LiveRange::BundleLinkIterator rangesBegin() const { return ranges_.begin(); }
461 bool hasRanges() const { return !!rangesBegin(); }
462 LiveRange* firstRange() const { return LiveRange::get(*rangesBegin()); }
463 LiveRange* lastRange() const { return LiveRange::get(ranges_.back()); }
464 LiveRange* rangeFor(CodePosition pos) const;
465 void removeRange(LiveRange* range);
466 void removeRangeAndIncrementIterator(LiveRange::BundleLinkIterator& iter) {
467 ranges_.removeAndIncrement(iter);
469 void addRange(LiveRange* range);
470 [[nodiscard]] bool addRange(TempAllocator& alloc, VirtualRegister* vreg,
471 CodePosition from, CodePosition to);
472 [[nodiscard]] bool addRangeAndDistributeUses(TempAllocator& alloc,
473 LiveRange* oldRange,
474 CodePosition from,
475 CodePosition to);
476 LiveRange* popFirstRange();
477 #ifdef DEBUG
478 size_t numRanges() const;
479 #endif
481 LAllocation allocation() const { return alloc_; }
482 void setAllocation(LAllocation alloc) { alloc_ = alloc; }
484 LiveBundle* spillParent() const { return spillParent_; }
486 #ifdef JS_JITSPEW
487 uint32_t debugId() const { return debugId_; }
489 // Return a string describing this bundle.
490 UniqueChars toString() const;
491 #endif
494 // Information about the allocation for a virtual register.
495 class VirtualRegister {
496 // Instruction which defines this register.
497 LNode* ins_ = nullptr;
499 // Definition in the instruction for this register.
500 LDefinition* def_ = nullptr;
502 // All live ranges for this register. These may overlap each other, and are
503 // ordered by their start position.
504 InlineForwardList<LiveRange::RegisterLink> ranges_;
506 // Whether def_ is a temp or an output.
507 bool isTemp_ = false;
509 // Whether this vreg is an input for some phi. This use is not reflected in
510 // any range on the vreg.
511 bool usedByPhi_ = false;
513 // If this register's definition is MUST_REUSE_INPUT, whether a copy must
514 // be introduced before the definition that relaxes the policy.
515 bool mustCopyInput_ = false;
517 void operator=(const VirtualRegister&) = delete;
518 VirtualRegister(const VirtualRegister&) = delete;
520 public:
521 VirtualRegister() = default;
523 void init(LNode* ins, LDefinition* def, bool isTemp) {
524 MOZ_ASSERT(!ins_);
525 ins_ = ins;
526 def_ = def;
527 isTemp_ = isTemp;
530 LNode* ins() const { return ins_; }
531 LDefinition* def() const { return def_; }
532 LDefinition::Type type() const { return def()->type(); }
533 uint32_t vreg() const { return def()->virtualRegister(); }
534 bool isCompatible(const AnyRegister& r) const {
535 return def_->isCompatibleReg(r);
537 bool isCompatible(const VirtualRegister& vr) const {
538 return def_->isCompatibleDef(*vr.def_);
540 bool isTemp() const { return isTemp_; }
542 void setUsedByPhi() { usedByPhi_ = true; }
543 bool usedByPhi() { return usedByPhi_; }
545 void setMustCopyInput() { mustCopyInput_ = true; }
546 bool mustCopyInput() { return mustCopyInput_; }
548 LiveRange::RegisterLinkIterator rangesBegin() const {
549 return ranges_.begin();
551 LiveRange::RegisterLinkIterator rangesBegin(LiveRange* range) const {
552 return ranges_.begin(&range->registerLink);
554 bool hasRanges() const { return !!rangesBegin(); }
555 LiveRange* firstRange() const { return LiveRange::get(*rangesBegin()); }
556 LiveRange* lastRange() const { return LiveRange::get(ranges_.back()); }
557 LiveRange* rangeFor(CodePosition pos, bool preferRegister = false) const;
558 void removeRange(LiveRange* range);
559 void addRange(LiveRange* range);
561 void removeRangeAndIncrement(LiveRange::RegisterLinkIterator& iter) {
562 ranges_.removeAndIncrement(iter);
565 LiveBundle* firstBundle() const { return firstRange()->bundle(); }
567 [[nodiscard]] bool addInitialRange(TempAllocator& alloc, CodePosition from,
568 CodePosition to, size_t* numRanges);
569 void addInitialUse(UsePosition* use);
570 void setInitialDefinition(CodePosition from);
573 // A sequence of code positions, for tellings BacktrackingAllocator::splitAt
574 // where to split.
575 using SplitPositionVector = js::Vector<CodePosition, 4, SystemAllocPolicy>;
577 class BacktrackingAllocator : protected RegisterAllocator {
578 friend class JSONSpewer;
580 // This flag is set when testing new allocator modifications.
581 bool testbed;
583 BitSet* liveIn;
584 FixedList<VirtualRegister> vregs;
586 // Allocation state.
587 StackSlotAllocator stackSlotAllocator;
589 // Priority queue element: a bundle and the associated priority.
590 struct QueueItem {
591 LiveBundle* bundle;
593 QueueItem(LiveBundle* bundle, size_t priority)
594 : bundle(bundle), priority_(priority) {}
596 static size_t priority(const QueueItem& v) { return v.priority_; }
598 private:
599 size_t priority_;
602 PriorityQueue<QueueItem, QueueItem, 0, SystemAllocPolicy> allocationQueue;
604 // This is a set of LiveRange. They must be non-overlapping. Attempts
605 // to add an overlapping range will cause AvlTree::insert to MOZ_CRASH().
606 using LiveRangeSet = AvlTree<LiveRange*, LiveRange>;
608 // The same, but for LiveRangePlus. See comments on LiveRangePlus.
609 using LiveRangePlusSet = AvlTree<LiveRangePlus, LiveRangePlus>;
611 // Each physical register is associated with the set of ranges over which
612 // that register is currently allocated.
613 struct PhysicalRegister {
614 bool allocatable;
615 AnyRegister reg;
616 LiveRangePlusSet allocations;
618 PhysicalRegister() : allocatable(false) {}
620 mozilla::Array<PhysicalRegister, AnyRegister::Total> registers;
622 // Ranges of code which are considered to be hot, for which good allocation
623 // should be prioritized.
624 LiveRangeSet hotcode;
626 struct CallRange : public TempObject, public InlineListNode<CallRange> {
627 LiveRange::Range range;
629 CallRange(CodePosition from, CodePosition to) : range(from, to) {}
631 // Comparator for use in AVL trees.
632 static int compare(CallRange* v0, CallRange* v1) {
633 if (v0->range.to <= v1->range.from) {
634 return -1;
636 if (v0->range.from >= v1->range.to) {
637 return 1;
639 return 0;
643 // Ranges where all registers must be spilled due to call instructions.
644 using CallRangeList = InlineList<CallRange>;
645 CallRangeList callRangesList;
646 AvlTree<CallRange*, CallRange> callRanges;
648 // Information about an allocated stack slot.
649 struct SpillSlot : public TempObject,
650 public InlineForwardListNode<SpillSlot> {
651 LStackSlot alloc;
652 LiveRangePlusSet allocated;
654 SpillSlot(uint32_t slot, LifoAlloc* alloc)
655 : alloc(slot), allocated(alloc) {}
657 using SpillSlotList = InlineForwardList<SpillSlot>;
659 // All allocated slots of each width.
660 SpillSlotList normalSlots, doubleSlots, quadSlots;
662 Vector<LiveBundle*, 4, SystemAllocPolicy> spilledBundles;
664 using LiveBundleVector = Vector<LiveBundle*, 4, SystemAllocPolicy>;
666 // Misc accessors
667 bool compilingWasm() { return mir->outerInfo().compilingWasm(); }
668 VirtualRegister& vreg(const LDefinition* def) {
669 return vregs[def->virtualRegister()];
671 VirtualRegister& vreg(const LAllocation* alloc) {
672 MOZ_ASSERT(alloc->isUse());
673 return vregs[alloc->toUse()->virtualRegister()];
676 // Helpers for creating and adding MoveGroups
677 [[nodiscard]] bool addMove(LMoveGroup* moves, LiveRange* from, LiveRange* to,
678 LDefinition::Type type) {
679 LAllocation fromAlloc = from->bundle()->allocation();
680 LAllocation toAlloc = to->bundle()->allocation();
681 MOZ_ASSERT(fromAlloc != toAlloc);
682 return moves->add(fromAlloc, toAlloc, type);
685 [[nodiscard]] bool moveInput(LInstruction* ins, LiveRange* from,
686 LiveRange* to, LDefinition::Type type) {
687 if (from->bundle()->allocation() == to->bundle()->allocation()) {
688 return true;
690 LMoveGroup* moves = getInputMoveGroup(ins);
691 return addMove(moves, from, to, type);
694 [[nodiscard]] bool moveAfter(LInstruction* ins, LiveRange* from,
695 LiveRange* to, LDefinition::Type type) {
696 if (from->bundle()->allocation() == to->bundle()->allocation()) {
697 return true;
699 LMoveGroup* moves = getMoveGroupAfter(ins);
700 return addMove(moves, from, to, type);
703 [[nodiscard]] bool moveAtExit(LBlock* block, LiveRange* from, LiveRange* to,
704 LDefinition::Type type) {
705 if (from->bundle()->allocation() == to->bundle()->allocation()) {
706 return true;
708 LMoveGroup* moves = block->getExitMoveGroup(alloc());
709 return addMove(moves, from, to, type);
712 [[nodiscard]] bool moveAtEntry(LBlock* block, LiveRange* from, LiveRange* to,
713 LDefinition::Type type) {
714 if (from->bundle()->allocation() == to->bundle()->allocation()) {
715 return true;
717 LMoveGroup* moves = block->getEntryMoveGroup(alloc());
718 return addMove(moves, from, to, type);
721 // Out-of-line methods, in the same sequence as in BacktrackingAllocator.cpp.
723 // Misc helpers: queries about uses
724 bool isReusedInput(LUse* use, LNode* ins, bool considerCopy);
725 bool isRegisterUse(UsePosition* use, LNode* ins, bool considerCopy = false);
726 bool isRegisterDefinition(LiveRange* range);
728 // Misc helpers: atomic LIR groups
729 // (these are all in the parent class, RegisterAllocator)
731 // Misc helpers: computation of bundle priorities and spill weights
732 size_t computePriority(LiveBundle* bundle);
733 bool minimalDef(LiveRange* range, LNode* ins);
734 bool minimalUse(LiveRange* range, UsePosition* use);
735 bool minimalBundle(LiveBundle* bundle, bool* pfixed = nullptr);
736 size_t computeSpillWeight(LiveBundle* bundle);
737 size_t maximumSpillWeight(const LiveBundleVector& bundles);
739 // Initialization of the allocator
740 [[nodiscard]] bool init();
742 // Liveness analysis
743 [[nodiscard]] bool addInitialFixedRange(AnyRegister reg, CodePosition from,
744 CodePosition to);
745 [[nodiscard]] bool buildLivenessInfo();
747 // Merging and queueing of LiveRange groups
748 [[nodiscard]] bool tryMergeBundles(LiveBundle* bundle0, LiveBundle* bundle1);
749 void allocateStackDefinition(VirtualRegister& reg);
750 [[nodiscard]] bool tryMergeReusedRegister(VirtualRegister& def,
751 VirtualRegister& input);
752 [[nodiscard]] bool mergeAndQueueRegisters();
754 // Implementation of splitting decisions, but not the making of those
755 // decisions
756 [[nodiscard]] bool updateVirtualRegisterListsThenRequeueBundles(
757 LiveBundle* bundle, const LiveBundleVector& newBundles);
759 // Implementation of splitting decisions, but not the making of those
760 // decisions
761 [[nodiscard]] bool splitAt(LiveBundle* bundle,
762 const SplitPositionVector& splitPositions);
764 // Creation of splitting decisions, but not their implementation
765 [[nodiscard]] bool splitAcrossCalls(LiveBundle* bundle);
766 [[nodiscard]] bool trySplitAcrossHotcode(LiveBundle* bundle, bool* success);
767 [[nodiscard]] bool trySplitAfterLastRegisterUse(LiveBundle* bundle,
768 LiveBundle* conflict,
769 bool* success);
770 [[nodiscard]] bool trySplitBeforeFirstRegisterUse(LiveBundle* bundle,
771 LiveBundle* conflict,
772 bool* success);
774 // The top level driver for the splitting machinery
775 [[nodiscard]] bool chooseBundleSplit(LiveBundle* bundle, bool fixed,
776 LiveBundle* conflict);
778 // Bundle allocation
779 [[nodiscard]] bool computeRequirement(LiveBundle* bundle,
780 Requirement* prequirement,
781 Requirement* phint);
782 [[nodiscard]] bool tryAllocateRegister(PhysicalRegister& r,
783 LiveBundle* bundle, bool* success,
784 bool* pfixed,
785 LiveBundleVector& conflicting);
786 [[nodiscard]] bool tryAllocateAnyRegister(LiveBundle* bundle, bool* success,
787 bool* pfixed,
788 LiveBundleVector& conflicting);
789 [[nodiscard]] bool evictBundle(LiveBundle* bundle);
790 [[nodiscard]] bool tryAllocateFixed(LiveBundle* bundle,
791 Requirement requirement, bool* success,
792 bool* pfixed,
793 LiveBundleVector& conflicting);
794 [[nodiscard]] bool tryAllocateNonFixed(LiveBundle* bundle,
795 Requirement requirement,
796 Requirement hint, bool* success,
797 bool* pfixed,
798 LiveBundleVector& conflicting);
799 [[nodiscard]] bool processBundle(MIRGenerator* mir, LiveBundle* bundle);
800 [[nodiscard]] bool spill(LiveBundle* bundle);
801 [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool
802 tryAllocatingRegistersForSpillBundles();
804 // Rewriting of the LIR after bundle processing is done
805 [[nodiscard]] bool insertAllRanges(LiveRangePlusSet& set, LiveBundle* bundle);
806 [[nodiscard]] bool pickStackSlot(SpillSet* spill);
807 [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool pickStackSlots();
808 [[nodiscard]] bool moveAtEdge(LBlock* predecessor, LBlock* successor,
809 LiveRange* from, LiveRange* to,
810 LDefinition::Type type);
811 [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool deadRange(LiveRange* range);
812 [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool
813 createMoveGroupsFromLiveRangeTransitions();
814 size_t findFirstNonCallSafepoint(CodePosition from);
815 void addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range);
816 [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool installAllocationsInLIR();
817 size_t findFirstSafepoint(CodePosition pos, size_t startFrom);
818 [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool populateSafepoints();
819 [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool annotateMoveGroups();
821 // Debug-printing support
822 #ifdef JS_JITSPEW
823 void dumpLiveRangesByVReg(const char* who);
824 void dumpLiveRangesByBundle(const char* who);
825 void dumpAllocations();
826 #endif
828 // Top level of the register allocation machinery, and the only externally
829 // visible bit.
830 public:
831 BacktrackingAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph,
832 bool testbed)
833 : RegisterAllocator(mir, lir, graph),
834 testbed(testbed),
835 liveIn(nullptr),
836 callRanges(nullptr) {}
838 [[nodiscard]] bool go();
841 } // namespace jit
842 } // namespace js
844 #endif /* jit_BacktrackingAllocator_h */