1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/MoveResolver.h"
9 #include "mozilla/ScopeExit.h"
11 #include "jit/MacroAssembler.h"
12 #include "jit/RegisterSets.h"
15 using namespace js::jit
;
17 MoveOperand::MoveOperand(MacroAssembler
& masm
, const ABIArg
& arg
) : disp_(0) {
21 code_
= arg
.gpr().code();
23 #ifdef JS_CODEGEN_REGISTER_PAIR
24 case ABIArg::GPR_PAIR
:
25 kind_
= Kind::RegPair
;
26 code_
= arg
.evenGpr().code();
27 MOZ_ASSERT(code_
% 2 == 0);
28 MOZ_ASSERT(code_
+ 1 == arg
.oddGpr().code());
32 kind_
= Kind::FloatReg
;
33 code_
= arg
.fpu().code();
37 if (IsHiddenSP(masm
.getStackPointer())) {
39 "Hidden SP cannot be represented as register code on this "
42 code_
= AsRegister(masm
.getStackPointer()).code();
44 disp_
= arg
.offsetFromArgBase();
46 case ABIArg::Uninitialized
:
47 MOZ_CRASH("Uninitialized ABIArg kind");
51 MoveResolver::MoveResolver() : numCycles_(0), curCycles_(0) {}
53 void MoveResolver::resetState() {
58 bool MoveResolver::addMove(const MoveOperand
& from
, const MoveOperand
& to
,
60 // Assert that we're not doing no-op moves.
61 MOZ_ASSERT(!(from
== to
));
62 PendingMove
* pm
= movePool_
.allocate(from
, to
, type
);
66 pending_
.pushBack(pm
);
70 // Given move (A -> B), this function attempts to find any move (B -> *) in the
71 // pending move list, and returns the first one.
72 MoveResolver::PendingMove
* MoveResolver::findBlockingMove(
73 const PendingMove
* last
) {
74 for (PendingMoveIterator iter
= pending_
.begin(); iter
!= pending_
.end();
76 PendingMove
* other
= *iter
;
78 if (other
->from().aliases(last
->to())) {
79 // We now have pairs in the form (A -> X) (X -> y). The second pair
80 // blocks the move in the first pair, so return it.
85 // No blocking moves found.
89 // Given move (A -> B), this function attempts to find any move (B -> *) in the
90 // move list iterator, and returns the first one.
91 // N.B. It is unclear if a single move can complete more than one cycle, so to
92 // be conservative, this function operates on iterators, so the caller can
93 // process all instructions that start a cycle.
94 MoveResolver::PendingMove
* MoveResolver::findCycledMove(
95 PendingMoveIterator
* iter
, PendingMoveIterator end
,
96 const PendingMove
* last
) {
97 for (; *iter
!= end
; (*iter
)++) {
98 PendingMove
* other
= **iter
;
99 if (other
->from().aliases(last
->to())) {
100 // We now have pairs in the form (A -> X) (X -> y). The second pair
101 // blocks the move in the first pair, so return it.
106 // No blocking moves found.
110 #ifdef JS_CODEGEN_ARM
111 static inline bool MoveIsDouble(const MoveOperand
& move
) {
112 if (!move
.isFloatReg()) {
115 return move
.floatReg().isDouble();
119 #ifdef JS_CODEGEN_ARM
120 static inline bool MoveIsSingle(const MoveOperand
& move
) {
121 if (!move
.isFloatReg()) {
124 return move
.floatReg().isSingle();
128 #ifdef JS_CODEGEN_ARM
129 bool MoveResolver::isDoubleAliasedAsSingle(const MoveOperand
& move
) {
130 if (!MoveIsDouble(move
)) {
134 for (auto iter
= pending_
.begin(); iter
!= pending_
.end(); ++iter
) {
135 PendingMove
* other
= *iter
;
136 if (other
->from().aliases(move
) && MoveIsSingle(other
->from())) {
139 if (other
->to().aliases(move
) && MoveIsSingle(other
->to())) {
147 #ifdef JS_CODEGEN_ARM
148 static MoveOperand
SplitIntoLowerHalf(const MoveOperand
& move
) {
149 if (MoveIsDouble(move
)) {
150 FloatRegister lowerSingle
= move
.floatReg().asSingle();
151 return MoveOperand(lowerSingle
);
154 MOZ_ASSERT(move
.isMemoryOrEffectiveAddress());
159 #ifdef JS_CODEGEN_ARM
160 static MoveOperand
SplitIntoUpperHalf(const MoveOperand
& move
) {
161 if (MoveIsDouble(move
)) {
162 FloatRegister lowerSingle
= move
.floatReg().asSingle();
163 FloatRegister upperSingle
=
164 VFPRegister(lowerSingle
.code() + 1, VFPRegister::Single
);
165 return MoveOperand(upperSingle
);
168 MOZ_ASSERT(move
.isMemoryOrEffectiveAddress());
169 return MoveOperand(move
.base(), move
.disp() + sizeof(float));
173 // Resolves the pending_ list to a list in orderedMoves_.
174 bool MoveResolver::resolve() {
176 orderedMoves_
.clear();
178 // Upon return from this function, the pending_ list must be cleared.
179 auto clearPending
= mozilla::MakeScopeExit([this]() { pending_
.clear(); });
181 #ifdef JS_CODEGEN_ARM
182 // Some of ARM's double registers alias two of its single registers,
183 // but the algorithm below assumes that every register can participate
184 // in at most one cycle. To satisfy the algorithm, any double registers
185 // that may conflict are split into their single-register halves.
187 // This logic is only applicable because ARM only uses registers d0-d15,
188 // all of which alias s0-s31. Double registers d16-d31 are unused.
189 // Therefore there is never a double move that cannot be split.
190 // If this changes in the future, the algorithm will have to be fixed.
192 bool splitDoubles
= false;
193 for (auto iter
= pending_
.begin(); iter
!= pending_
.end(); ++iter
) {
194 PendingMove
* pm
= *iter
;
196 if (isDoubleAliasedAsSingle(pm
->from()) ||
197 isDoubleAliasedAsSingle(pm
->to())) {
204 for (auto iter
= pending_
.begin(); iter
!= pending_
.end(); ++iter
) {
205 PendingMove
* pm
= *iter
;
207 if (!MoveIsDouble(pm
->from()) && !MoveIsDouble(pm
->to())) {
211 MoveOperand fromLower
= SplitIntoLowerHalf(pm
->from());
212 MoveOperand toLower
= SplitIntoLowerHalf(pm
->to());
215 movePool_
.allocate(fromLower
, toLower
, MoveOp::FLOAT32
);
220 // Insert the new node before the current position to not affect
222 pending_
.insertBefore(pm
, lower
);
224 // Overwrite pm in place for the upper move. Iteration proceeds as normal.
225 MoveOperand fromUpper
= SplitIntoUpperHalf(pm
->from());
226 MoveOperand toUpper
= SplitIntoUpperHalf(pm
->to());
227 pm
->overwrite(fromUpper
, toUpper
, MoveOp::FLOAT32
);
232 InlineList
<PendingMove
> stack
;
234 // This is a depth-first-search without recursion, which tries to find
235 // cycles in a list of moves.
239 // S = Traversal stack.
240 // P = Pending move list.
241 // O = Ordered list of moves.
243 // As long as there are pending moves in P:
244 // Let |root| be any pending move removed from P
245 // Add |root| to the traversal stack.
246 // As long as S is not empty:
247 // Let |L| be the most recent move added to S.
249 // Find any pending move M whose source is L's destination, thus
250 // preventing L's move until M has completed.
252 // If a move M was found,
253 // Remove M from the pending list.
254 // If M's destination is |root|,
255 // Annotate M and |root| as cycles.
257 // do not Add M to O, since M may have other conflictors in P
258 // that have not yet been processed.
265 while (!pending_
.empty()) {
266 PendingMove
* pm
= pending_
.popBack();
268 // Add this pending move to the cycle detection stack.
271 while (!stack
.empty()) {
272 PendingMove
* blocking
= findBlockingMove(stack
.peekBack());
275 PendingMoveIterator stackiter
= stack
.begin();
276 PendingMove
* cycled
= findCycledMove(&stackiter
, stack
.end(), blocking
);
278 // Find the cycle's start.
279 // We annotate cycles at each move in the cycle, and
280 // assert that we do not find two cycles in one move chain
281 // traversal (which would indicate two moves to the same
283 // Since there can be more than one cycle, find them all.
285 cycled
->setCycleEnd(curCycles_
);
286 cycled
= findCycledMove(&stackiter
, stack
.end(), blocking
);
289 blocking
->setCycleBegin(pm
->type(), curCycles_
);
291 pending_
.remove(blocking
);
292 stack
.pushBack(blocking
);
294 // This is a new link in the move chain, so keep
295 // searching for a cycle.
296 pending_
.remove(blocking
);
297 stack
.pushBack(blocking
);
300 // Otherwise, pop the last move on the search stack because it's
301 // complete and not participating in a cycle. The resulting
302 // move can safely be added to the ordered move list.
303 PendingMove
* done
= stack
.popBack();
304 if (!addOrderedMove(*done
)) {
307 movePool_
.free(done
);
310 // If the current queue is empty, it is certain that there are
311 // all previous cycles cannot conflict with future cycles,
312 // so re-set the counter of pending cycles, while keeping a high-water mark.
313 if (numCycles_
< curCycles_
) {
314 numCycles_
= curCycles_
;
322 bool MoveResolver::addOrderedMove(const MoveOp
& move
) {
323 // Sometimes the register allocator generates move groups where multiple
324 // moves have the same source. Try to optimize these cases when the source
325 // is in memory and the target of one of the moves is in a register.
326 MOZ_ASSERT(!move
.from().aliases(move
.to()));
328 if (!move
.from().isMemory() || move
.isCycleBegin() || move
.isCycleEnd()) {
329 return orderedMoves_
.append(move
);
332 // Look for an earlier move with the same source, where no intervening move
333 // touches either the source or destination of the new move.
334 for (int i
= orderedMoves_
.length() - 1; i
>= 0; i
--) {
335 const MoveOp
& existing
= orderedMoves_
[i
];
337 if (existing
.from() == move
.from() && !existing
.to().aliases(move
.to()) &&
338 existing
.type() == move
.type() && !existing
.isCycleBegin() &&
339 !existing
.isCycleEnd()) {
340 MoveOp
* after
= orderedMoves_
.begin() + i
+ 1;
341 if (existing
.to().isGeneralReg() || existing
.to().isFloatReg()) {
342 MoveOp
nmove(existing
.to(), move
.to(), move
.type());
343 return orderedMoves_
.insert(after
, nmove
);
344 } else if (move
.to().isGeneralReg() || move
.to().isFloatReg()) {
345 MoveOp
nmove(move
.to(), existing
.to(), move
.type());
346 orderedMoves_
[i
] = move
;
347 return orderedMoves_
.insert(after
, nmove
);
351 if (existing
.aliases(move
)) {
356 return orderedMoves_
.append(move
);
359 void MoveResolver::reorderMove(size_t from
, size_t to
) {
360 MOZ_ASSERT(from
!= to
);
362 MoveOp op
= orderedMoves_
[from
];
364 for (size_t i
= from
; i
< to
; i
++) {
365 orderedMoves_
[i
] = orderedMoves_
[i
+ 1];
368 for (size_t i
= from
; i
> to
; i
--) {
369 orderedMoves_
[i
] = orderedMoves_
[i
- 1];
372 orderedMoves_
[to
] = op
;
375 void MoveResolver::sortMemoryToMemoryMoves() {
376 // Try to reorder memory->memory moves so that they are executed right
377 // before a move that clobbers some register. This will allow the move
378 // emitter to use that clobbered register as a scratch register for the
379 // memory->memory move, if necessary.
380 for (size_t i
= 0; i
< orderedMoves_
.length(); i
++) {
381 const MoveOp
& base
= orderedMoves_
[i
];
382 if (!base
.from().isMemory() || !base
.to().isMemory()) {
385 if (base
.type() != MoveOp::GENERAL
&& base
.type() != MoveOp::INT32
) {
389 // Look for an earlier move clobbering a register.
391 for (int j
= i
- 1; j
>= 0; j
--) {
392 const MoveOp
& previous
= orderedMoves_
[j
];
393 if (previous
.aliases(base
) || previous
.isCycleBegin() ||
394 previous
.isCycleEnd()) {
398 if (previous
.to().isGeneralReg()) {
408 // Look for a later move clobbering a register.
409 if (i
+ 1 < orderedMoves_
.length()) {
410 bool found
= false, skippedRegisterUse
= false;
411 for (size_t j
= i
+ 1; j
< orderedMoves_
.length(); j
++) {
412 const MoveOp
& later
= orderedMoves_
[j
];
413 if (later
.aliases(base
) || later
.isCycleBegin() || later
.isCycleEnd()) {
417 if (later
.to().isGeneralReg()) {
418 if (skippedRegisterUse
) {
422 // There is no move that uses a register between the
423 // original memory->memory move and this move that
424 // clobbers a register. The move should already be able
425 // to use a scratch register, so don't shift anything
431 if (later
.from().isGeneralReg()) {
432 skippedRegisterUse
= true;
437 // Redo the search for memory->memory moves at the current
438 // index, so we don't skip the move just shifted back.