1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/arm/Lowering-arm.h"
9 #include "mozilla/MathAlgorithms.h"
11 #include "jit/arm/Assembler-arm.h"
12 #include "jit/Lowering.h"
14 #include "jit/shared/Lowering-shared-inl.h"
17 using namespace js::jit
;
19 using mozilla::FloorLog2
;
21 LBoxAllocation
LIRGeneratorARM::useBoxFixed(MDefinition
* mir
, Register reg1
,
22 Register reg2
, bool useAtStart
) {
23 MOZ_ASSERT(mir
->type() == MIRType::Value
);
24 MOZ_ASSERT(reg1
!= reg2
);
27 return LBoxAllocation(LUse(reg1
, mir
->virtualRegister(), useAtStart
),
28 LUse(reg2
, VirtualRegisterOfPayload(mir
), useAtStart
));
31 LAllocation
LIRGeneratorARM::useByteOpRegister(MDefinition
* mir
) {
32 return useRegister(mir
);
35 LAllocation
LIRGeneratorARM::useByteOpRegisterAtStart(MDefinition
* mir
) {
36 return useRegisterAtStart(mir
);
39 LAllocation
LIRGeneratorARM::useByteOpRegisterOrNonDoubleConstant(
41 return useRegisterOrNonDoubleConstant(mir
);
44 LDefinition
LIRGeneratorARM::tempByteOpRegister() { return temp(); }
46 void LIRGenerator::visitBox(MBox
* box
) {
47 MDefinition
* inner
= box
->getOperand(0);
49 // If the box wrapped a double, it needs a new register.
50 if (IsFloatingPointType(inner
->type())) {
51 defineBox(new (alloc()) LBoxFloatingPoint(
52 useRegisterAtStart(inner
), tempCopy(inner
, 0), inner
->type()),
57 if (box
->canEmitAtUses()) {
62 if (inner
->isConstant()) {
63 defineBox(new (alloc()) LValue(inner
->toConstant()->toJSValue()), box
);
67 LBox
* lir
= new (alloc()) LBox(use(inner
), inner
->type());
69 // Otherwise, we should not define a new register for the payload portion
70 // of the output, so bypass defineBox().
71 uint32_t vreg
= getVirtualRegister();
73 // Note that because we're using BogusTemp(), we do not change the type of
74 // the definition. We also do not define the first output as "TYPE",
75 // because it has no corresponding payload at (vreg + 1). Also note that
76 // although we copy the input's original type for the payload half of the
77 // definition, this is only for clarity. BogusTemp() definitions are
79 lir
->setDef(0, LDefinition(vreg
, LDefinition::GENERAL
));
80 lir
->setDef(1, LDefinition::BogusTemp());
81 box
->setVirtualRegister(vreg
);
85 void LIRGenerator::visitUnbox(MUnbox
* unbox
) {
86 MDefinition
* inner
= unbox
->getOperand(0);
88 if (inner
->type() == MIRType::ObjectOrNull
) {
89 LUnboxObjectOrNull
* lir
=
90 new (alloc()) LUnboxObjectOrNull(useRegisterAtStart(inner
));
91 if (unbox
->fallible()) {
92 assignSnapshot(lir
, unbox
->bailoutKind());
94 defineReuseInput(lir
, unbox
, 0);
98 // An unbox on arm reads in a type tag (either in memory or a register) and
99 // a payload. Unlike most instructions consuming a box, we ask for the type
100 // second, so that the result can re-use the first input.
101 MOZ_ASSERT(inner
->type() == MIRType::Value
);
103 ensureDefined(inner
);
105 if (IsFloatingPointType(unbox
->type())) {
106 LUnboxFloatingPoint
* lir
=
107 new (alloc()) LUnboxFloatingPoint(useBox(inner
), unbox
->type());
108 if (unbox
->fallible()) {
109 assignSnapshot(lir
, unbox
->bailoutKind());
115 // Swap the order we use the box pieces so we can re-use the payload register.
116 LUnbox
* lir
= new (alloc()) LUnbox
;
117 lir
->setOperand(0, usePayloadInRegisterAtStart(inner
));
118 lir
->setOperand(1, useType(inner
, LUse::REGISTER
));
120 if (unbox
->fallible()) {
121 assignSnapshot(lir
, unbox
->bailoutKind());
124 // Types and payloads form two separate intervals. If the type becomes dead
125 // before the payload, it could be used as a Value without the type being
126 // recoverable. Unbox's purpose is to eagerly kill the definition of a type
127 // tag, so keeping both alive (for the purpose of gcmaps) is unappealing.
128 // Instead, we create a new virtual register.
129 defineReuseInput(lir
, unbox
, 0);
132 void LIRGenerator::visitReturn(MReturn
* ret
) {
133 MDefinition
* opd
= ret
->getOperand(0);
134 MOZ_ASSERT(opd
->type() == MIRType::Value
);
136 LReturn
* ins
= new (alloc()) LReturn
;
137 ins
->setOperand(0, LUse(JSReturnReg_Type
));
138 ins
->setOperand(1, LUse(JSReturnReg_Data
));
139 fillBoxUses(ins
, 0, opd
);
143 void LIRGeneratorARM::defineInt64Phi(MPhi
* phi
, size_t lirIndex
) {
144 LPhi
* low
= current
->getPhi(lirIndex
+ INT64LOW_INDEX
);
145 LPhi
* high
= current
->getPhi(lirIndex
+ INT64HIGH_INDEX
);
147 uint32_t lowVreg
= getVirtualRegister();
149 phi
->setVirtualRegister(lowVreg
);
151 uint32_t highVreg
= getVirtualRegister();
152 MOZ_ASSERT(lowVreg
+ INT64HIGH_INDEX
== highVreg
+ INT64LOW_INDEX
);
154 low
->setDef(0, LDefinition(lowVreg
, LDefinition::INT32
));
155 high
->setDef(0, LDefinition(highVreg
, LDefinition::INT32
));
160 void LIRGeneratorARM::lowerInt64PhiInput(MPhi
* phi
, uint32_t inputPosition
,
161 LBlock
* block
, size_t lirIndex
) {
162 MDefinition
* operand
= phi
->getOperand(inputPosition
);
163 LPhi
* low
= block
->getPhi(lirIndex
+ INT64LOW_INDEX
);
164 LPhi
* high
= block
->getPhi(lirIndex
+ INT64HIGH_INDEX
);
165 low
->setOperand(inputPosition
,
166 LUse(operand
->virtualRegister() + INT64LOW_INDEX
, LUse::ANY
));
169 LUse(operand
->virtualRegister() + INT64HIGH_INDEX
, LUse::ANY
));
173 void LIRGeneratorARM::lowerForALU(LInstructionHelper
<1, 1, 0>* ins
,
174 MDefinition
* mir
, MDefinition
* input
) {
176 0, ins
->snapshot() ? useRegister(input
) : useRegisterAtStart(input
));
179 LDefinition(LDefinition::TypeFrom(mir
->type()), LDefinition::REGISTER
));
183 void LIRGeneratorARM::lowerForALU(LInstructionHelper
<1, 2, 0>* ins
,
184 MDefinition
* mir
, MDefinition
* lhs
,
186 // Some operations depend on checking inputs after writing the result, e.g.
187 // MulI, but only for bail out paths so useAtStart when no bailouts.
189 ins
->snapshot() ? useRegister(lhs
) : useRegisterAtStart(lhs
));
190 ins
->setOperand(1, ins
->snapshot() ? useRegisterOrConstant(rhs
)
191 : useRegisterOrConstantAtStart(rhs
));
194 LDefinition(LDefinition::TypeFrom(mir
->type()), LDefinition::REGISTER
));
197 void LIRGeneratorARM::lowerForALUInt64(
198 LInstructionHelper
<INT64_PIECES
, 2 * INT64_PIECES
, 0>* ins
,
199 MDefinition
* mir
, MDefinition
* lhs
, MDefinition
* rhs
) {
200 ins
->setInt64Operand(0, useInt64RegisterAtStart(lhs
));
201 ins
->setInt64Operand(INT64_PIECES
, useInt64OrConstant(rhs
));
202 defineInt64ReuseInput(ins
, mir
, 0);
205 void LIRGeneratorARM::lowerForMulInt64(LMulI64
* ins
, MMul
* mir
,
206 MDefinition
* lhs
, MDefinition
* rhs
) {
207 bool needsTemp
= true;
209 if (rhs
->isConstant()) {
210 int64_t constant
= rhs
->toConstant()->toInt64();
211 int32_t shift
= mozilla::FloorLog2(constant
);
212 // See special cases in CodeGeneratorARM::visitMulI64
213 if (constant
>= -1 && constant
<= 2) {
216 if (constant
> 0 && int64_t(1) << shift
== constant
) {
221 ins
->setInt64Operand(0, useInt64RegisterAtStart(lhs
));
222 ins
->setInt64Operand(INT64_PIECES
, useInt64OrConstant(rhs
));
224 ins
->setTemp(0, temp());
227 defineInt64ReuseInput(ins
, mir
, 0);
230 void LIRGeneratorARM::lowerForFPU(LInstructionHelper
<1, 1, 0>* ins
,
231 MDefinition
* mir
, MDefinition
* input
) {
232 ins
->setOperand(0, useRegisterAtStart(input
));
235 LDefinition(LDefinition::TypeFrom(mir
->type()), LDefinition::REGISTER
));
238 template <size_t Temps
>
239 void LIRGeneratorARM::lowerForFPU(LInstructionHelper
<1, 2, Temps
>* ins
,
240 MDefinition
* mir
, MDefinition
* lhs
,
242 ins
->setOperand(0, useRegisterAtStart(lhs
));
243 ins
->setOperand(1, useRegisterAtStart(rhs
));
246 LDefinition(LDefinition::TypeFrom(mir
->type()), LDefinition::REGISTER
));
249 template void LIRGeneratorARM::lowerForFPU(LInstructionHelper
<1, 2, 0>* ins
,
250 MDefinition
* mir
, MDefinition
* lhs
,
252 template void LIRGeneratorARM::lowerForFPU(LInstructionHelper
<1, 2, 1>* ins
,
253 MDefinition
* mir
, MDefinition
* lhs
,
256 void LIRGeneratorARM::lowerForBitAndAndBranch(LBitAndAndBranch
* baab
,
260 baab
->setOperand(0, useRegisterAtStart(lhs
));
261 baab
->setOperand(1, useRegisterOrConstantAtStart(rhs
));
265 void LIRGeneratorARM::lowerUntypedPhiInput(MPhi
* phi
, uint32_t inputPosition
,
266 LBlock
* block
, size_t lirIndex
) {
267 MDefinition
* operand
= phi
->getOperand(inputPosition
);
268 LPhi
* type
= block
->getPhi(lirIndex
+ VREG_TYPE_OFFSET
);
269 LPhi
* payload
= block
->getPhi(lirIndex
+ VREG_DATA_OFFSET
);
272 LUse(operand
->virtualRegister() + VREG_TYPE_OFFSET
, LUse::ANY
));
273 payload
->setOperand(inputPosition
,
274 LUse(VirtualRegisterOfPayload(operand
), LUse::ANY
));
277 void LIRGeneratorARM::lowerForShift(LInstructionHelper
<1, 2, 0>* ins
,
278 MDefinition
* mir
, MDefinition
* lhs
,
280 ins
->setOperand(0, useRegister(lhs
));
281 ins
->setOperand(1, useRegisterOrConstant(rhs
));
285 template <size_t Temps
>
286 void LIRGeneratorARM::lowerForShiftInt64(
287 LInstructionHelper
<INT64_PIECES
, INT64_PIECES
+ 1, Temps
>* ins
,
288 MDefinition
* mir
, MDefinition
* lhs
, MDefinition
* rhs
) {
289 if (mir
->isRotate() && !rhs
->isConstant()) {
290 ins
->setTemp(0, temp());
293 ins
->setInt64Operand(0, useInt64RegisterAtStart(lhs
));
294 ins
->setOperand(INT64_PIECES
, useRegisterOrConstant(rhs
));
295 defineInt64ReuseInput(ins
, mir
, 0);
298 template void LIRGeneratorARM::lowerForShiftInt64(
299 LInstructionHelper
<INT64_PIECES
, INT64_PIECES
+ 1, 0>* ins
,
300 MDefinition
* mir
, MDefinition
* lhs
, MDefinition
* rhs
);
301 template void LIRGeneratorARM::lowerForShiftInt64(
302 LInstructionHelper
<INT64_PIECES
, INT64_PIECES
+ 1, 1>* ins
,
303 MDefinition
* mir
, MDefinition
* lhs
, MDefinition
* rhs
);
305 void LIRGeneratorARM::lowerDivI(MDiv
* div
) {
306 if (div
->isUnsigned()) {
311 // Division instructions are slow. Division by constant denominators can be
312 // rewritten to use other instructions.
313 if (div
->rhs()->isConstant()) {
314 int32_t rhs
= div
->rhs()->toConstant()->toInt32();
315 // Check for division by a positive power of two, which is an easy and
316 // important case to optimize. Note that other optimizations are also
317 // possible; division by negative powers of two can be optimized in a
318 // similar manner as positive powers of two, and division by other
319 // constants can be optimized by a reciprocal multiplication technique.
320 int32_t shift
= FloorLog2(rhs
);
321 if (rhs
> 0 && 1 << shift
== rhs
) {
323 new (alloc()) LDivPowTwoI(useRegisterAtStart(div
->lhs()), shift
);
324 if (div
->fallible()) {
325 assignSnapshot(lir
, BailoutKind::DoubleOutput
);
333 LDivI
* lir
= new (alloc())
334 LDivI(useRegister(div
->lhs()), useRegister(div
->rhs()), temp());
335 if (div
->fallible()) {
336 assignSnapshot(lir
, BailoutKind::DoubleOutput
);
342 LSoftDivI
* lir
= new (alloc()) LSoftDivI(useFixedAtStart(div
->lhs(), r0
),
343 useFixedAtStart(div
->rhs(), r1
));
345 if (div
->fallible()) {
346 assignSnapshot(lir
, BailoutKind::DoubleOutput
);
349 defineReturn(lir
, div
);
352 void LIRGeneratorARM::lowerMulI(MMul
* mul
, MDefinition
* lhs
, MDefinition
* rhs
) {
353 LMulI
* lir
= new (alloc()) LMulI
;
354 if (mul
->fallible()) {
355 assignSnapshot(lir
, BailoutKind::DoubleOutput
);
357 lowerForALU(lir
, mul
, lhs
, rhs
);
360 void LIRGeneratorARM::lowerModI(MMod
* mod
) {
361 if (mod
->isUnsigned()) {
366 if (mod
->rhs()->isConstant()) {
367 int32_t rhs
= mod
->rhs()->toConstant()->toInt32();
368 int32_t shift
= FloorLog2(rhs
);
369 if (rhs
> 0 && 1 << shift
== rhs
) {
371 new (alloc()) LModPowTwoI(useRegister(mod
->lhs()), shift
);
372 if (mod
->fallible()) {
373 assignSnapshot(lir
, BailoutKind::DoubleOutput
);
378 if (shift
< 31 && (1 << (shift
+ 1)) - 1 == rhs
) {
380 LModMaskI
* lir
= new (alloc())
381 LModMaskI(useRegister(mod
->lhs()), temp(), temp(), shift
+ 1);
382 if (mod
->fallible()) {
383 assignSnapshot(lir
, BailoutKind::DoubleOutput
);
391 LModI
* lir
= new (alloc())
392 LModI(useRegister(mod
->lhs()), useRegister(mod
->rhs()), temp());
393 if (mod
->fallible()) {
394 assignSnapshot(lir
, BailoutKind::DoubleOutput
);
401 new (alloc()) LSoftModI(useFixedAtStart(mod
->lhs(), r0
),
402 useFixedAtStart(mod
->rhs(), r1
), tempFixed(r2
));
404 if (mod
->fallible()) {
405 assignSnapshot(lir
, BailoutKind::DoubleOutput
);
408 defineReturn(lir
, mod
);
411 void LIRGeneratorARM::lowerDivI64(MDiv
* div
) {
412 if (div
->isUnsigned()) {
417 LDivOrModI64
* lir
= new (alloc()) LDivOrModI64(
418 useInt64RegisterAtStart(div
->lhs()), useInt64RegisterAtStart(div
->rhs()));
419 defineReturn(lir
, div
);
422 void LIRGeneratorARM::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64
* div
) {
423 MOZ_CRASH("We don't use runtime div for this architecture");
426 void LIRGeneratorARM::lowerModI64(MMod
* mod
) {
427 if (mod
->isUnsigned()) {
432 LDivOrModI64
* lir
= new (alloc()) LDivOrModI64(
433 useInt64RegisterAtStart(mod
->lhs()), useInt64RegisterAtStart(mod
->rhs()));
434 defineReturn(lir
, mod
);
437 void LIRGeneratorARM::lowerWasmBuiltinModI64(MWasmBuiltinModI64
* mod
) {
438 MOZ_CRASH("We don't use runtime mod for this architecture");
441 void LIRGeneratorARM::lowerUDivI64(MDiv
* div
) {
442 LUDivOrModI64
* lir
= new (alloc()) LUDivOrModI64(
443 useInt64RegisterAtStart(div
->lhs()), useInt64RegisterAtStart(div
->rhs()));
444 defineReturn(lir
, div
);
447 void LIRGeneratorARM::lowerUModI64(MMod
* mod
) {
448 LUDivOrModI64
* lir
= new (alloc()) LUDivOrModI64(
449 useInt64RegisterAtStart(mod
->lhs()), useInt64RegisterAtStart(mod
->rhs()));
450 defineReturn(lir
, mod
);
453 void LIRGenerator::visitPowHalf(MPowHalf
* ins
) {
454 MDefinition
* input
= ins
->input();
455 MOZ_ASSERT(input
->type() == MIRType::Double
);
456 LPowHalfD
* lir
= new (alloc()) LPowHalfD(useRegisterAtStart(input
));
457 defineReuseInput(lir
, ins
, 0);
460 LTableSwitch
* LIRGeneratorARM::newLTableSwitch(const LAllocation
& in
,
461 const LDefinition
& inputCopy
,
462 MTableSwitch
* tableswitch
) {
463 return new (alloc()) LTableSwitch(in
, inputCopy
, tableswitch
);
466 LTableSwitchV
* LIRGeneratorARM::newLTableSwitchV(MTableSwitch
* tableswitch
) {
467 return new (alloc()) LTableSwitchV(useBox(tableswitch
->getOperand(0)), temp(),
468 tempDouble(), tableswitch
);
471 void LIRGeneratorARM::lowerUrshD(MUrsh
* mir
) {
472 MDefinition
* lhs
= mir
->lhs();
473 MDefinition
* rhs
= mir
->rhs();
475 MOZ_ASSERT(lhs
->type() == MIRType::Int32
);
476 MOZ_ASSERT(rhs
->type() == MIRType::Int32
);
478 LUrshD
* lir
= new (alloc())
479 LUrshD(useRegister(lhs
), useRegisterOrConstant(rhs
), temp());
483 void LIRGeneratorARM::lowerPowOfTwoI(MPow
* mir
) {
484 int32_t base
= mir
->input()->toConstant()->toInt32();
485 MDefinition
* power
= mir
->power();
487 auto* lir
= new (alloc()) LPowOfTwoI(base
, useRegister(power
));
488 assignSnapshot(lir
, BailoutKind::PrecisionLoss
);
492 void LIRGenerator::visitWasmNeg(MWasmNeg
* ins
) {
493 if (ins
->type() == MIRType::Int32
) {
494 define(new (alloc()) LNegI(useRegisterAtStart(ins
->input())), ins
);
495 } else if (ins
->type() == MIRType::Float32
) {
496 define(new (alloc()) LNegF(useRegisterAtStart(ins
->input())), ins
);
498 MOZ_ASSERT(ins
->type() == MIRType::Double
);
499 define(new (alloc()) LNegD(useRegisterAtStart(ins
->input())), ins
);
503 void LIRGeneratorARM::lowerUDiv(MDiv
* div
) {
504 MDefinition
* lhs
= div
->getOperand(0);
505 MDefinition
* rhs
= div
->getOperand(1);
508 LUDiv
* lir
= new (alloc()) LUDiv
;
509 lir
->setOperand(0, useRegister(lhs
));
510 lir
->setOperand(1, useRegister(rhs
));
511 if (div
->fallible()) {
512 assignSnapshot(lir
, BailoutKind::DoubleOutput
);
518 LSoftUDivOrMod
* lir
= new (alloc())
519 LSoftUDivOrMod(useFixedAtStart(lhs
, r0
), useFixedAtStart(rhs
, r1
));
521 if (div
->fallible()) {
522 assignSnapshot(lir
, BailoutKind::DoubleOutput
);
525 defineReturn(lir
, div
);
528 void LIRGeneratorARM::lowerUMod(MMod
* mod
) {
529 MDefinition
* lhs
= mod
->getOperand(0);
530 MDefinition
* rhs
= mod
->getOperand(1);
533 LUMod
* lir
= new (alloc()) LUMod
;
534 lir
->setOperand(0, useRegister(lhs
));
535 lir
->setOperand(1, useRegister(rhs
));
536 if (mod
->fallible()) {
537 assignSnapshot(lir
, BailoutKind::DoubleOutput
);
543 LSoftUDivOrMod
* lir
= new (alloc())
544 LSoftUDivOrMod(useFixedAtStart(lhs
, r0
), useFixedAtStart(rhs
, r1
));
546 if (mod
->fallible()) {
547 assignSnapshot(lir
, BailoutKind::DoubleOutput
);
550 defineReturn(lir
, mod
);
553 void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble
* ins
) {
554 MOZ_ASSERT(ins
->input()->type() == MIRType::Int32
);
555 LWasmUint32ToDouble
* lir
=
556 new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins
->input()));
560 void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32
* ins
) {
561 MOZ_ASSERT(ins
->input()->type() == MIRType::Int32
);
562 LWasmUint32ToFloat32
* lir
=
563 new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins
->input()));
567 void LIRGenerator::visitWasmHeapBase(MWasmHeapBase
* ins
) {
568 auto* lir
= new (alloc()) LWasmHeapBase(LAllocation());
572 void LIRGenerator::visitWasmLoad(MWasmLoad
* ins
) {
573 MDefinition
* base
= ins
->base();
574 MOZ_ASSERT(base
->type() == MIRType::Int32
);
576 if (ins
->access().type() == Scalar::Int64
&& ins
->access().isAtomic()) {
577 auto* lir
= new (alloc()) LWasmAtomicLoadI64(useRegisterAtStart(base
));
578 defineInt64Fixed(lir
, ins
,
579 LInt64Allocation(LAllocation(AnyRegister(IntArgReg1
)),
580 LAllocation(AnyRegister(IntArgReg0
))));
584 LAllocation ptr
= useRegisterAtStart(base
);
586 if (IsUnaligned(ins
->access())) {
587 MOZ_ASSERT(!ins
->access().isAtomic());
589 // Unaligned access expected! Revert to a byte load.
590 LDefinition ptrCopy
= tempCopy(base
, 0);
592 LDefinition noTemp
= LDefinition::BogusTemp();
593 if (ins
->type() == MIRType::Int64
) {
594 auto* lir
= new (alloc())
595 LWasmUnalignedLoadI64(ptr
, ptrCopy
, temp(), noTemp
, noTemp
);
596 defineInt64(lir
, ins
);
600 LDefinition temp2
= noTemp
;
601 LDefinition temp3
= noTemp
;
602 if (IsFloatingPointType(ins
->type())) {
603 // For putting the low value in a GPR.
605 // For putting the high value in a GPR.
606 if (ins
->type() == MIRType::Double
) {
612 new (alloc()) LWasmUnalignedLoad(ptr
, ptrCopy
, temp(), temp2
, temp3
);
617 if (ins
->type() == MIRType::Int64
) {
618 auto* lir
= new (alloc()) LWasmLoadI64(ptr
);
619 if (ins
->access().offset() || ins
->access().type() == Scalar::Int64
) {
620 lir
->setTemp(0, tempCopy(base
, 0));
622 defineInt64(lir
, ins
);
626 auto* lir
= new (alloc()) LWasmLoad(ptr
);
627 if (ins
->access().offset()) {
628 lir
->setTemp(0, tempCopy(base
, 0));
634 void LIRGenerator::visitWasmStore(MWasmStore
* ins
) {
635 MDefinition
* base
= ins
->base();
636 MOZ_ASSERT(base
->type() == MIRType::Int32
);
638 if (ins
->access().type() == Scalar::Int64
&& ins
->access().isAtomic()) {
639 auto* lir
= new (alloc()) LWasmAtomicStoreI64(
641 useInt64Fixed(ins
->value(), Register64(IntArgReg1
, IntArgReg0
)),
642 tempFixed(IntArgReg2
), tempFixed(IntArgReg3
));
647 LAllocation ptr
= useRegisterAtStart(base
);
649 if (IsUnaligned(ins
->access())) {
650 MOZ_ASSERT(!ins
->access().isAtomic());
652 // Unaligned access expected! Revert to a byte store.
653 LDefinition ptrCopy
= tempCopy(base
, 0);
655 MIRType valueType
= ins
->value()->type();
656 if (valueType
== MIRType::Int64
) {
657 LInt64Allocation value
= useInt64RegisterAtStart(ins
->value());
659 new (alloc()) LWasmUnalignedStoreI64(ptr
, value
, ptrCopy
, temp());
664 LAllocation value
= useRegisterAtStart(ins
->value());
665 LDefinition valueHelper
= IsFloatingPointType(valueType
)
666 ? temp() // to do a FPU -> GPR move.
667 : tempCopy(base
, 1); // to clobber the value.
670 new (alloc()) LWasmUnalignedStore(ptr
, value
, ptrCopy
, valueHelper
);
675 if (ins
->value()->type() == MIRType::Int64
) {
676 LInt64Allocation value
= useInt64RegisterAtStart(ins
->value());
677 auto* lir
= new (alloc()) LWasmStoreI64(ptr
, value
);
678 if (ins
->access().offset() || ins
->access().type() == Scalar::Int64
) {
679 lir
->setTemp(0, tempCopy(base
, 0));
685 LAllocation value
= useRegisterAtStart(ins
->value());
686 auto* lir
= new (alloc()) LWasmStore(ptr
, value
);
688 if (ins
->access().offset()) {
689 lir
->setTemp(0, tempCopy(base
, 0));
695 void LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap
* ins
) {
696 MOZ_ASSERT(ins
->offset() == 0);
698 MDefinition
* base
= ins
->base();
699 MOZ_ASSERT(base
->type() == MIRType::Int32
);
701 // For the ARM it is best to keep the 'base' in a register if a bounds check
703 LAllocation baseAlloc
;
704 LAllocation limitAlloc
;
706 if (base
->isConstant() && !ins
->needsBoundsCheck()) {
707 // A bounds check is only skipped for a positive index.
708 MOZ_ASSERT(base
->toConstant()->toInt32() >= 0);
709 baseAlloc
= LAllocation(base
->toConstant());
711 baseAlloc
= useRegisterAtStart(base
);
712 if (ins
->needsBoundsCheck()) {
713 MDefinition
* boundsCheckLimit
= ins
->boundsCheckLimit();
714 MOZ_ASSERT(boundsCheckLimit
->type() == MIRType::Int32
);
715 limitAlloc
= useRegisterAtStart(boundsCheckLimit
);
719 define(new (alloc()) LAsmJSLoadHeap(baseAlloc
, limitAlloc
), ins
);
722 void LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap
* ins
) {
723 MOZ_ASSERT(ins
->offset() == 0);
725 MDefinition
* base
= ins
->base();
726 MOZ_ASSERT(base
->type() == MIRType::Int32
);
728 LAllocation baseAlloc
;
729 LAllocation limitAlloc
;
731 if (base
->isConstant() && !ins
->needsBoundsCheck()) {
732 MOZ_ASSERT(base
->toConstant()->toInt32() >= 0);
733 baseAlloc
= LAllocation(base
->toConstant());
735 baseAlloc
= useRegisterAtStart(base
);
736 if (ins
->needsBoundsCheck()) {
737 MDefinition
* boundsCheckLimit
= ins
->boundsCheckLimit();
738 MOZ_ASSERT(boundsCheckLimit
->type() == MIRType::Int32
);
739 limitAlloc
= useRegisterAtStart(boundsCheckLimit
);
743 add(new (alloc()) LAsmJSStoreHeap(baseAlloc
, useRegisterAtStart(ins
->value()),
748 void LIRGeneratorARM::lowerTruncateDToInt32(MTruncateToInt32
* ins
) {
749 MDefinition
* opd
= ins
->input();
750 MOZ_ASSERT(opd
->type() == MIRType::Double
);
753 LTruncateDToInt32(useRegister(opd
), LDefinition::BogusTemp()),
757 void LIRGeneratorARM::lowerTruncateFToInt32(MTruncateToInt32
* ins
) {
758 MDefinition
* opd
= ins
->input();
759 MOZ_ASSERT(opd
->type() == MIRType::Float32
);
762 LTruncateFToInt32(useRegister(opd
), LDefinition::BogusTemp()),
766 void LIRGenerator::visitAtomicExchangeTypedArrayElement(
767 MAtomicExchangeTypedArrayElement
* ins
) {
768 MOZ_ASSERT(HasLDSTREXBHD());
769 MOZ_ASSERT(ins
->arrayType() <= Scalar::Uint32
);
771 MOZ_ASSERT(ins
->elements()->type() == MIRType::Elements
);
772 MOZ_ASSERT(ins
->index()->type() == MIRType::Int32
);
774 const LUse elements
= useRegister(ins
->elements());
775 const LAllocation index
= useRegisterOrConstant(ins
->index());
777 // If the target is a floating register then we need a temp at the
778 // CodeGenerator level for creating the result.
780 const LAllocation value
= useRegister(ins
->value());
781 LDefinition tempDef
= LDefinition::BogusTemp();
782 if (ins
->arrayType() == Scalar::Uint32
) {
783 MOZ_ASSERT(ins
->type() == MIRType::Double
);
787 LAtomicExchangeTypedArrayElement
* lir
= new (alloc())
788 LAtomicExchangeTypedArrayElement(elements
, index
, value
, tempDef
);
793 void LIRGenerator::visitAtomicTypedArrayElementBinop(
794 MAtomicTypedArrayElementBinop
* ins
) {
795 MOZ_ASSERT(ins
->arrayType() != Scalar::Uint8Clamped
);
796 MOZ_ASSERT(ins
->arrayType() != Scalar::Float32
);
797 MOZ_ASSERT(ins
->arrayType() != Scalar::Float64
);
799 MOZ_ASSERT(ins
->elements()->type() == MIRType::Elements
);
800 MOZ_ASSERT(ins
->index()->type() == MIRType::Int32
);
802 const LUse elements
= useRegister(ins
->elements());
803 const LAllocation index
= useRegisterOrConstant(ins
->index());
804 const LAllocation value
= useRegister(ins
->value());
806 if (!ins
->hasUses()) {
807 LAtomicTypedArrayElementBinopForEffect
* lir
= new (alloc())
808 LAtomicTypedArrayElementBinopForEffect(elements
, index
, value
,
809 /* flagTemp= */ temp());
814 // For a Uint32Array with a known double result we need a temp for
815 // the intermediate output.
817 // Optimization opportunity (bug 1077317): We can do better by
818 // allowing 'value' to remain as an imm32 if it is small enough to
819 // fit in an instruction.
821 LDefinition flagTemp
= temp();
822 LDefinition outTemp
= LDefinition::BogusTemp();
824 if (ins
->arrayType() == Scalar::Uint32
&& IsFloatingPointType(ins
->type())) {
828 // On arm, map flagTemp to temp1 and outTemp to temp2, at least for now.
830 LAtomicTypedArrayElementBinop
* lir
= new (alloc())
831 LAtomicTypedArrayElementBinop(elements
, index
, value
, flagTemp
, outTemp
);
835 void LIRGenerator::visitCompareExchangeTypedArrayElement(
836 MCompareExchangeTypedArrayElement
* ins
) {
837 MOZ_ASSERT(ins
->arrayType() != Scalar::Float32
);
838 MOZ_ASSERT(ins
->arrayType() != Scalar::Float64
);
840 MOZ_ASSERT(ins
->elements()->type() == MIRType::Elements
);
841 MOZ_ASSERT(ins
->index()->type() == MIRType::Int32
);
843 const LUse elements
= useRegister(ins
->elements());
844 const LAllocation index
= useRegisterOrConstant(ins
->index());
846 // If the target is a floating register then we need a temp at the
847 // CodeGenerator level for creating the result.
849 // Optimization opportunity (bug 1077317): We could do better by
850 // allowing oldval to remain an immediate, if it is small enough
851 // to fit in an instruction.
853 const LAllocation newval
= useRegister(ins
->newval());
854 const LAllocation oldval
= useRegister(ins
->oldval());
855 LDefinition tempDef
= LDefinition::BogusTemp();
856 if (ins
->arrayType() == Scalar::Uint32
&& IsFloatingPointType(ins
->type())) {
860 LCompareExchangeTypedArrayElement
* lir
=
861 new (alloc()) LCompareExchangeTypedArrayElement(elements
, index
, oldval
,
867 void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap
* ins
) {
868 MDefinition
* base
= ins
->base();
869 MOZ_ASSERT(base
->type() == MIRType::Int32
);
871 if (ins
->access().type() == Scalar::Int64
) {
872 // The three register pairs must be distinct.
873 auto* lir
= new (alloc()) LWasmCompareExchangeI64(
874 useRegister(base
), useInt64Fixed(ins
->oldValue(), CmpXchgOld64
),
875 useInt64Fixed(ins
->newValue(), CmpXchgNew64
));
876 defineInt64Fixed(lir
, ins
,
877 LInt64Allocation(LAllocation(AnyRegister(CmpXchgOutHi
)),
878 LAllocation(AnyRegister(CmpXchgOutLo
))));
882 MOZ_ASSERT(ins
->access().type() < Scalar::Float32
);
883 MOZ_ASSERT(HasLDSTREXBHD(), "by HasPlatformSupport() constraints");
885 LWasmCompareExchangeHeap
* lir
= new (alloc())
886 LWasmCompareExchangeHeap(useRegister(base
), useRegister(ins
->oldValue()),
887 useRegister(ins
->newValue()));
892 void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap
* ins
) {
893 MOZ_ASSERT(ins
->base()->type() == MIRType::Int32
);
895 if (ins
->access().type() == Scalar::Int64
) {
896 auto* lir
= new (alloc()) LWasmAtomicExchangeI64(
897 useRegister(ins
->base()), useInt64Fixed(ins
->value(), XchgNew64
),
899 defineInt64Fixed(lir
, ins
,
900 LInt64Allocation(LAllocation(AnyRegister(XchgOutHi
)),
901 LAllocation(AnyRegister(XchgOutLo
))));
905 MOZ_ASSERT(ins
->access().type() < Scalar::Float32
);
906 MOZ_ASSERT(HasLDSTREXBHD(), "by HasPlatformSupport() constraints");
908 const LAllocation base
= useRegister(ins
->base());
909 const LAllocation value
= useRegister(ins
->value());
910 define(new (alloc()) LWasmAtomicExchangeHeap(base
, value
), ins
);
913 void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap
* ins
) {
914 if (ins
->access().type() == Scalar::Int64
) {
915 auto* lir
= new (alloc()) LWasmAtomicBinopI64(
916 useRegister(ins
->base()), useInt64Fixed(ins
->value(), FetchOpVal64
),
917 tempFixed(FetchOpTmpLo
), tempFixed(FetchOpTmpHi
), ins
->access(),
919 defineInt64Fixed(lir
, ins
,
920 LInt64Allocation(LAllocation(AnyRegister(FetchOpOutHi
)),
921 LAllocation(AnyRegister(FetchOpOutLo
))));
925 MOZ_ASSERT(ins
->access().type() < Scalar::Float32
);
926 MOZ_ASSERT(HasLDSTREXBHD(), "by HasPlatformSupport() constraints");
928 MDefinition
* base
= ins
->base();
929 MOZ_ASSERT(base
->type() == MIRType::Int32
);
931 if (!ins
->hasUses()) {
932 LWasmAtomicBinopHeapForEffect
* lir
=
933 new (alloc()) LWasmAtomicBinopHeapForEffect(useRegister(base
),
934 useRegister(ins
->value()),
935 /* flagTemp= */ temp());
940 LWasmAtomicBinopHeap
* lir
= new (alloc())
941 LWasmAtomicBinopHeap(useRegister(base
), useRegister(ins
->value()),
942 /* temp = */ LDefinition::BogusTemp(),
943 /* flagTemp= */ temp());
947 void LIRGenerator::visitSubstr(MSubstr
* ins
) {
948 LSubstr
* lir
= new (alloc())
949 LSubstr(useRegister(ins
->string()), useRegister(ins
->begin()),
950 useRegister(ins
->length()), temp(), temp(), tempByteOpRegister());
952 assignSafepoint(lir
, ins
);
955 void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64
* ins
) {
956 MDefinition
* opd
= ins
->input();
957 MOZ_ASSERT(opd
->type() == MIRType::Double
|| opd
->type() == MIRType::Float32
);
959 defineReturn(new (alloc()) LWasmTruncateToInt64(useRegisterAtStart(opd
)),
963 void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint
* ins
) {
964 MOZ_ASSERT(ins
->type() == MIRType::Double
|| ins
->type() == MIRType::Float32
);
966 auto* lir
= new (alloc()) LInt64ToFloatingPointCall();
967 lir
->setInt64Operand(0, useInt64RegisterAtStart(ins
->input()));
968 defineReturn(lir
, ins
);
971 void LIRGenerator::visitCopySign(MCopySign
* ins
) {
972 MDefinition
* lhs
= ins
->lhs();
973 MDefinition
* rhs
= ins
->rhs();
975 MOZ_ASSERT(IsFloatingPointType(lhs
->type()));
976 MOZ_ASSERT(lhs
->type() == rhs
->type());
977 MOZ_ASSERT(lhs
->type() == ins
->type());
979 LInstructionHelper
<1, 2, 2>* lir
;
980 if (lhs
->type() == MIRType::Double
) {
981 lir
= new (alloc()) LCopySignD();
983 lir
= new (alloc()) LCopySignF();
986 lir
->setTemp(0, temp());
987 lir
->setTemp(1, temp());
989 lowerForFPU(lir
, ins
, lhs
, rhs
);
992 void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64
* ins
) {
994 new (alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins
->input()));
995 defineInt64(lir
, ins
);
997 LDefinition
def(LDefinition::GENERAL
, LDefinition::MUST_REUSE_INPUT
);
998 def
.setReusedInput(0);
999 def
.setVirtualRegister(ins
->virtualRegister());
1001 lir
->setDef(0, def
);
1004 void LIRGenerator::visitSignExtendInt64(MSignExtendInt64
* ins
) {
1005 defineInt64(new (alloc())
1006 LSignExtendInt64(useInt64RegisterAtStart(ins
->input())),