Backed out changeset 2450366cf7ca (bug 1891629) for causing win msix mochitest failures
[gecko.git] / js / src / jit / EffectiveAddressAnalysis.cpp
blob8a0030fac08103b138c0f42b1edaa3467c66b321
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/EffectiveAddressAnalysis.h"
9 #include "jit/IonAnalysis.h"
10 #include "jit/MIR.h"
11 #include "jit/MIRGenerator.h"
12 #include "jit/MIRGraph.h"
13 #include "util/CheckedArithmetic.h"
15 using namespace js;
16 using namespace jit;
18 static void AnalyzeLsh(TempAllocator& alloc, MLsh* lsh) {
19 if (lsh->type() != MIRType::Int32) {
20 return;
23 if (lsh->isRecoveredOnBailout()) {
24 return;
27 MDefinition* index = lsh->lhs();
28 MOZ_ASSERT(index->type() == MIRType::Int32);
30 MConstant* shiftValue = lsh->rhs()->maybeConstantValue();
31 if (!shiftValue) {
32 return;
35 if (shiftValue->type() != MIRType::Int32 ||
36 !IsShiftInScaleRange(shiftValue->toInt32())) {
37 return;
40 Scale scale = ShiftToScale(shiftValue->toInt32());
42 int32_t displacement = 0;
43 MInstruction* last = lsh;
44 MDefinition* base = nullptr;
45 while (true) {
46 if (!last->hasOneUse()) {
47 break;
50 MUseIterator use = last->usesBegin();
51 if (!use->consumer()->isDefinition() ||
52 !use->consumer()->toDefinition()->isAdd()) {
53 break;
56 MAdd* add = use->consumer()->toDefinition()->toAdd();
57 if (add->type() != MIRType::Int32 || !add->isTruncated()) {
58 break;
61 MDefinition* other = add->getOperand(1 - add->indexOf(*use));
63 if (MConstant* otherConst = other->maybeConstantValue()) {
64 displacement += otherConst->toInt32();
65 } else {
66 if (base) {
67 break;
69 base = other;
72 last = add;
73 if (last->isRecoveredOnBailout()) {
74 return;
78 if (!base) {
79 uint32_t elemSize = 1 << ScaleToShift(scale);
80 if (displacement % elemSize != 0) {
81 return;
84 if (!last->hasOneUse()) {
85 return;
88 MUseIterator use = last->usesBegin();
89 if (!use->consumer()->isDefinition() ||
90 !use->consumer()->toDefinition()->isBitAnd()) {
91 return;
94 MBitAnd* bitAnd = use->consumer()->toDefinition()->toBitAnd();
95 if (bitAnd->isRecoveredOnBailout()) {
96 return;
99 MDefinition* other = bitAnd->getOperand(1 - bitAnd->indexOf(*use));
100 MConstant* otherConst = other->maybeConstantValue();
101 if (!otherConst || otherConst->type() != MIRType::Int32) {
102 return;
105 uint32_t bitsClearedByShift = elemSize - 1;
106 uint32_t bitsClearedByMask = ~uint32_t(otherConst->toInt32());
107 if ((bitsClearedByShift & bitsClearedByMask) != bitsClearedByMask) {
108 return;
111 bitAnd->replaceAllUsesWith(last);
112 return;
115 if (base->isRecoveredOnBailout()) {
116 return;
119 MEffectiveAddress* eaddr =
120 MEffectiveAddress::New(alloc, base, index, scale, displacement);
121 last->replaceAllUsesWith(eaddr);
122 last->block()->insertAfter(last, eaddr);
125 // Transform:
127 // [AddI]
128 // addl $9, %esi
129 // [LoadUnboxedScalar]
130 // movsd 0x0(%rbx,%rsi,8), %xmm4
132 // into:
134 // [LoadUnboxedScalar]
135 // movsd 0x48(%rbx,%rsi,8), %xmm4
137 // This is possible when the AddI is only used by the LoadUnboxedScalar opcode.
138 static void AnalyzeLoadUnboxedScalar(MLoadUnboxedScalar* load) {
139 if (load->isRecoveredOnBailout()) {
140 return;
143 if (!load->getOperand(1)->isAdd()) {
144 return;
147 JitSpew(JitSpew_EAA, "analyze: %s%u", load->opName(), load->id());
149 MAdd* add = load->getOperand(1)->toAdd();
151 if (add->type() != MIRType::Int32 || !add->hasUses() ||
152 add->truncateKind() != TruncateKind::Truncate) {
153 return;
156 MDefinition* lhs = add->lhs();
157 MDefinition* rhs = add->rhs();
158 MDefinition* constant = nullptr;
159 MDefinition* node = nullptr;
161 if (lhs->isConstant()) {
162 constant = lhs;
163 node = rhs;
164 } else if (rhs->isConstant()) {
165 constant = rhs;
166 node = lhs;
167 } else
168 return;
170 MOZ_ASSERT(constant->type() == MIRType::Int32);
172 size_t storageSize = Scalar::byteSize(load->storageType());
173 int32_t c1 = load->offsetAdjustment();
174 int32_t c2 = 0;
175 if (!SafeMul(constant->maybeConstantValue()->toInt32(), storageSize, &c2)) {
176 return;
179 int32_t offset = 0;
180 if (!SafeAdd(c1, c2, &offset)) {
181 return;
184 JitSpew(JitSpew_EAA, "set offset: %d + %d = %d on: %s%u", c1, c2, offset,
185 load->opName(), load->id());
186 load->setOffsetAdjustment(offset);
187 load->replaceOperand(1, node);
189 if (!add->hasLiveDefUses() && DeadIfUnused(add) &&
190 add->canRecoverOnBailout()) {
191 JitSpew(JitSpew_EAA, "mark as recovered on bailout: %s%u", add->opName(),
192 add->id());
193 add->setRecoveredOnBailoutUnchecked();
197 template <typename AsmJSMemoryAccess>
198 void EffectiveAddressAnalysis::analyzeAsmJSHeapAccess(AsmJSMemoryAccess* ins) {
199 MDefinition* base = ins->base();
201 if (base->isConstant()) {
202 // If the index is within the minimum heap length, we can optimize away the
203 // bounds check. Asm.js accesses always have an int32 base, the memory is
204 // always a memory32.
205 int32_t imm = base->toConstant()->toInt32();
206 if (imm >= 0) {
207 int32_t end = (uint32_t)imm + ins->byteSize();
208 if (end >= imm && (uint32_t)end <= mir_->minWasmMemory0Length()) {
209 ins->removeBoundsCheck();
215 // This analysis converts patterns of the form:
216 // truncate(x + (y << {0,1,2,3}))
217 // truncate(x + (y << {0,1,2,3}) + imm32)
218 // into a single lea instruction, and patterns of the form:
219 // asmload(x + imm32)
220 // asmload(x << {0,1,2,3})
221 // asmload((x << {0,1,2,3}) + imm32)
222 // asmload((x << {0,1,2,3}) & mask) (where mask is redundant
223 // with shift)
224 // asmload(((x << {0,1,2,3}) + imm32) & mask) (where mask is redundant
225 // with shift + imm32)
226 // into a single asmload instruction (and for asmstore too).
228 // Additionally, we should consider the general forms:
229 // truncate(x + y + imm32)
230 // truncate((y << {0,1,2,3}) + imm32)
231 bool EffectiveAddressAnalysis::analyze() {
232 JitSpew(JitSpew_EAA, "Begin");
233 for (ReversePostorderIterator block(graph_.rpoBegin());
234 block != graph_.rpoEnd(); block++) {
235 for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
236 if (!graph_.alloc().ensureBallast()) {
237 return false;
240 // Note that we don't check for MWasmCompareExchangeHeap
241 // or MWasmAtomicBinopHeap, because the backend and the OOB
242 // mechanism don't support non-zero offsets for them yet
243 // (TODO bug 1254935).
244 if (i->isLsh()) {
245 AnalyzeLsh(graph_.alloc(), i->toLsh());
246 } else if (i->isLoadUnboxedScalar()) {
247 AnalyzeLoadUnboxedScalar(i->toLoadUnboxedScalar());
248 } else if (i->isAsmJSLoadHeap()) {
249 analyzeAsmJSHeapAccess(i->toAsmJSLoadHeap());
250 } else if (i->isAsmJSStoreHeap()) {
251 analyzeAsmJSHeapAccess(i->toAsmJSStoreHeap());
255 return true;