1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/EffectiveAddressAnalysis.h"
9 #include "jit/IonAnalysis.h"
11 #include "jit/MIRGenerator.h"
12 #include "jit/MIRGraph.h"
13 #include "util/CheckedArithmetic.h"
18 static void AnalyzeLsh(TempAllocator
& alloc
, MLsh
* lsh
) {
19 if (lsh
->type() != MIRType::Int32
) {
23 if (lsh
->isRecoveredOnBailout()) {
27 MDefinition
* index
= lsh
->lhs();
28 MOZ_ASSERT(index
->type() == MIRType::Int32
);
30 MConstant
* shiftValue
= lsh
->rhs()->maybeConstantValue();
35 if (shiftValue
->type() != MIRType::Int32
||
36 !IsShiftInScaleRange(shiftValue
->toInt32())) {
40 Scale scale
= ShiftToScale(shiftValue
->toInt32());
42 int32_t displacement
= 0;
43 MInstruction
* last
= lsh
;
44 MDefinition
* base
= nullptr;
46 if (!last
->hasOneUse()) {
50 MUseIterator use
= last
->usesBegin();
51 if (!use
->consumer()->isDefinition() ||
52 !use
->consumer()->toDefinition()->isAdd()) {
56 MAdd
* add
= use
->consumer()->toDefinition()->toAdd();
57 if (add
->type() != MIRType::Int32
|| !add
->isTruncated()) {
61 MDefinition
* other
= add
->getOperand(1 - add
->indexOf(*use
));
63 if (MConstant
* otherConst
= other
->maybeConstantValue()) {
64 displacement
+= otherConst
->toInt32();
73 if (last
->isRecoveredOnBailout()) {
79 uint32_t elemSize
= 1 << ScaleToShift(scale
);
80 if (displacement
% elemSize
!= 0) {
84 if (!last
->hasOneUse()) {
88 MUseIterator use
= last
->usesBegin();
89 if (!use
->consumer()->isDefinition() ||
90 !use
->consumer()->toDefinition()->isBitAnd()) {
94 MBitAnd
* bitAnd
= use
->consumer()->toDefinition()->toBitAnd();
95 if (bitAnd
->isRecoveredOnBailout()) {
99 MDefinition
* other
= bitAnd
->getOperand(1 - bitAnd
->indexOf(*use
));
100 MConstant
* otherConst
= other
->maybeConstantValue();
101 if (!otherConst
|| otherConst
->type() != MIRType::Int32
) {
105 uint32_t bitsClearedByShift
= elemSize
- 1;
106 uint32_t bitsClearedByMask
= ~uint32_t(otherConst
->toInt32());
107 if ((bitsClearedByShift
& bitsClearedByMask
) != bitsClearedByMask
) {
111 bitAnd
->replaceAllUsesWith(last
);
115 if (base
->isRecoveredOnBailout()) {
119 MEffectiveAddress
* eaddr
=
120 MEffectiveAddress::New(alloc
, base
, index
, scale
, displacement
);
121 last
->replaceAllUsesWith(eaddr
);
122 last
->block()->insertAfter(last
, eaddr
);
129 // [LoadUnboxedScalar]
130 // movsd 0x0(%rbx,%rsi,8), %xmm4
134 // [LoadUnboxedScalar]
135 // movsd 0x48(%rbx,%rsi,8), %xmm4
137 // This is possible when the AddI is only used by the LoadUnboxedScalar opcode.
138 static void AnalyzeLoadUnboxedScalar(MLoadUnboxedScalar
* load
) {
139 if (load
->isRecoveredOnBailout()) {
143 if (!load
->getOperand(1)->isAdd()) {
147 JitSpew(JitSpew_EAA
, "analyze: %s%u", load
->opName(), load
->id());
149 MAdd
* add
= load
->getOperand(1)->toAdd();
151 if (add
->type() != MIRType::Int32
|| !add
->hasUses() ||
152 add
->truncateKind() != TruncateKind::Truncate
) {
156 MDefinition
* lhs
= add
->lhs();
157 MDefinition
* rhs
= add
->rhs();
158 MDefinition
* constant
= nullptr;
159 MDefinition
* node
= nullptr;
161 if (lhs
->isConstant()) {
164 } else if (rhs
->isConstant()) {
170 MOZ_ASSERT(constant
->type() == MIRType::Int32
);
172 size_t storageSize
= Scalar::byteSize(load
->storageType());
173 int32_t c1
= load
->offsetAdjustment();
175 if (!SafeMul(constant
->maybeConstantValue()->toInt32(), storageSize
, &c2
)) {
180 if (!SafeAdd(c1
, c2
, &offset
)) {
184 JitSpew(JitSpew_EAA
, "set offset: %d + %d = %d on: %s%u", c1
, c2
, offset
,
185 load
->opName(), load
->id());
186 load
->setOffsetAdjustment(offset
);
187 load
->replaceOperand(1, node
);
189 if (!add
->hasLiveDefUses() && DeadIfUnused(add
) &&
190 add
->canRecoverOnBailout()) {
191 JitSpew(JitSpew_EAA
, "mark as recovered on bailout: %s%u", add
->opName(),
193 add
->setRecoveredOnBailoutUnchecked();
197 template <typename AsmJSMemoryAccess
>
198 void EffectiveAddressAnalysis::analyzeAsmJSHeapAccess(AsmJSMemoryAccess
* ins
) {
199 MDefinition
* base
= ins
->base();
201 if (base
->isConstant()) {
202 // If the index is within the minimum heap length, we can optimize away the
203 // bounds check. Asm.js accesses always have an int32 base, the memory is
204 // always a memory32.
205 int32_t imm
= base
->toConstant()->toInt32();
207 int32_t end
= (uint32_t)imm
+ ins
->byteSize();
208 if (end
>= imm
&& (uint32_t)end
<= mir_
->minWasmMemory0Length()) {
209 ins
->removeBoundsCheck();
215 // This analysis converts patterns of the form:
216 // truncate(x + (y << {0,1,2,3}))
217 // truncate(x + (y << {0,1,2,3}) + imm32)
218 // into a single lea instruction, and patterns of the form:
219 // asmload(x + imm32)
220 // asmload(x << {0,1,2,3})
221 // asmload((x << {0,1,2,3}) + imm32)
222 // asmload((x << {0,1,2,3}) & mask) (where mask is redundant
224 // asmload(((x << {0,1,2,3}) + imm32) & mask) (where mask is redundant
225 // with shift + imm32)
226 // into a single asmload instruction (and for asmstore too).
228 // Additionally, we should consider the general forms:
229 // truncate(x + y + imm32)
230 // truncate((y << {0,1,2,3}) + imm32)
231 bool EffectiveAddressAnalysis::analyze() {
232 JitSpew(JitSpew_EAA
, "Begin");
233 for (ReversePostorderIterator
block(graph_
.rpoBegin());
234 block
!= graph_
.rpoEnd(); block
++) {
235 for (MInstructionIterator i
= block
->begin(); i
!= block
->end(); i
++) {
236 if (!graph_
.alloc().ensureBallast()) {
240 // Note that we don't check for MWasmCompareExchangeHeap
241 // or MWasmAtomicBinopHeap, because the backend and the OOB
242 // mechanism don't support non-zero offsets for them yet
243 // (TODO bug 1254935).
245 AnalyzeLsh(graph_
.alloc(), i
->toLsh());
246 } else if (i
->isLoadUnboxedScalar()) {
247 AnalyzeLoadUnboxedScalar(i
->toLoadUnboxedScalar());
248 } else if (i
->isAsmJSLoadHeap()) {
249 analyzeAsmJSHeapAccess(i
->toAsmJSLoadHeap());
250 } else if (i
->isAsmJSStoreHeap()) {
251 analyzeAsmJSHeapAccess(i
->toAsmJSStoreHeap());