Backed out changeset 2450366cf7ca (bug 1891629) for causing win msix mochitest failures
[gecko.git] / js / src / jit / AlignmentMaskAnalysis.cpp
blob5b19b0861c2324f317aa0fe9c49112c4cad40be2
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/AlignmentMaskAnalysis.h"
8 #include "jit/MIR.h"
9 #include "jit/MIRGraph.h"
11 using namespace js;
12 using namespace jit;
14 static bool IsAlignmentMask(uint32_t m) {
15 // Test whether m is just leading ones and trailing zeros.
16 return (-m & ~m) == 0;
19 static void AnalyzeAsmHeapAddress(MDefinition* ptr, MIRGraph& graph) {
20 // Fold (a+i)&m to (a&m)+i, provided that this doesn't change the result,
21 // since the users of the BitAnd include heap accesses. This will expose
22 // the redundancy for GVN when expressions like this:
23 // a&m
24 // (a+1)&m,
25 // (a+2)&m,
26 // are transformed into this:
27 // a&m
28 // (a&m)+1
29 // (a&m)+2
30 // and it will allow the constants to be folded by the
31 // EffectiveAddressAnalysis pass.
33 // Putting the add on the outside might seem like it exposes other users of
34 // the expression to the possibility of i32 overflow, if we aren't in wasm
35 // and they aren't naturally truncating. However, since we use MAdd::New
36 // with MIRType::Int32, we make sure that the value is truncated, just as it
37 // would be by the MBitAnd.
39 MOZ_ASSERT(IsCompilingWasm());
41 if (!ptr->isBitAnd()) {
42 return;
45 MDefinition* lhs = ptr->toBitAnd()->getOperand(0);
46 MDefinition* rhs = ptr->toBitAnd()->getOperand(1);
47 if (lhs->isConstant()) {
48 std::swap(lhs, rhs);
50 if (!lhs->isAdd() || !rhs->isConstant()) {
51 return;
54 MDefinition* op0 = lhs->toAdd()->getOperand(0);
55 MDefinition* op1 = lhs->toAdd()->getOperand(1);
56 if (op0->isConstant()) {
57 std::swap(op0, op1);
59 if (!op1->isConstant()) {
60 return;
63 uint32_t i = op1->toConstant()->toInt32();
64 uint32_t m = rhs->toConstant()->toInt32();
65 if (!IsAlignmentMask(m) || (i & m) != i) {
66 return;
69 // The pattern was matched! Produce the replacement expression.
70 MInstruction* and_ = MBitAnd::New(graph.alloc(), op0, rhs, MIRType::Int32);
71 ptr->block()->insertBefore(ptr->toBitAnd(), and_);
72 auto* add = MAdd::New(graph.alloc(), and_, op1, TruncateKind::Truncate);
73 ptr->block()->insertBefore(ptr->toBitAnd(), add);
74 ptr->replaceAllUsesWith(add);
75 ptr->block()->discard(ptr->toBitAnd());
78 bool AlignmentMaskAnalysis::analyze() {
79 for (ReversePostorderIterator block(graph_.rpoBegin());
80 block != graph_.rpoEnd(); block++) {
81 for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
82 if (!graph_.alloc().ensureBallast()) {
83 return false;
86 // Note that we don't check for MWasmCompareExchangeHeap
87 // or MWasmAtomicBinopHeap, because the backend and the OOB
88 // mechanism don't support non-zero offsets for them yet.
89 if (i->isAsmJSLoadHeap()) {
90 AnalyzeAsmHeapAddress(i->toAsmJSLoadHeap()->base(), graph_);
91 } else if (i->isAsmJSStoreHeap()) {
92 AnalyzeAsmHeapAddress(i->toAsmJSStoreHeap()->base(), graph_);
96 return true;