1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/x86/Lowering-x86.h"
9 #include "jit/Lowering.h"
11 #include "jit/x86/Assembler-x86.h"
13 #include "jit/shared/Lowering-shared-inl.h"
16 using namespace js::jit
;
18 LBoxAllocation
LIRGeneratorX86::useBoxFixed(MDefinition
* mir
, Register reg1
,
19 Register reg2
, bool useAtStart
) {
20 MOZ_ASSERT(mir
->type() == MIRType::Value
);
21 MOZ_ASSERT(reg1
!= reg2
);
24 return LBoxAllocation(LUse(reg1
, mir
->virtualRegister(), useAtStart
),
25 LUse(reg2
, VirtualRegisterOfPayload(mir
), useAtStart
));
28 LAllocation
LIRGeneratorX86::useByteOpRegister(MDefinition
* mir
) {
29 return useFixed(mir
, eax
);
32 LAllocation
LIRGeneratorX86::useByteOpRegisterAtStart(MDefinition
* mir
) {
33 return useFixedAtStart(mir
, eax
);
36 LAllocation
LIRGeneratorX86::useByteOpRegisterOrNonDoubleConstant(
38 return useFixed(mir
, eax
);
41 LDefinition
LIRGeneratorX86::tempByteOpRegister() { return tempFixed(eax
); }
43 void LIRGenerator::visitBox(MBox
* box
) {
44 MDefinition
* inner
= box
->getOperand(0);
46 // If the box wrapped a double, it needs a new register.
47 if (IsFloatingPointType(inner
->type())) {
48 LDefinition spectreTemp
=
49 JitOptions
.spectreValueMasking
? temp() : LDefinition::BogusTemp();
50 defineBox(new (alloc()) LBoxFloatingPoint(useRegisterAtStart(inner
),
51 tempCopy(inner
, 0), spectreTemp
,
57 if (box
->canEmitAtUses()) {
62 if (inner
->isConstant()) {
63 defineBox(new (alloc()) LValue(inner
->toConstant()->toJSValue()), box
);
67 LBox
* lir
= new (alloc()) LBox(use(inner
), inner
->type());
69 // Otherwise, we should not define a new register for the payload portion
70 // of the output, so bypass defineBox().
71 uint32_t vreg
= getVirtualRegister();
73 // Note that because we're using BogusTemp(), we do not change the type of
74 // the definition. We also do not define the first output as "TYPE",
75 // because it has no corresponding payload at (vreg + 1). Also note that
76 // although we copy the input's original type for the payload half of the
77 // definition, this is only for clarity. BogusTemp() definitions are
79 lir
->setDef(0, LDefinition(vreg
, LDefinition::GENERAL
));
80 lir
->setDef(1, LDefinition::BogusTemp());
81 box
->setVirtualRegister(vreg
);
85 void LIRGenerator::visitUnbox(MUnbox
* unbox
) {
86 MDefinition
* inner
= unbox
->getOperand(0);
88 // An unbox on x86 reads in a type tag (either in memory or a register) and
89 // a payload. Unlike most instructions consuming a box, we ask for the type
90 // second, so that the result can re-use the first input.
91 MOZ_ASSERT(inner
->type() == MIRType::Value
);
95 if (IsFloatingPointType(unbox
->type())) {
96 LUnboxFloatingPoint
* lir
=
97 new (alloc()) LUnboxFloatingPoint(useBox(inner
), unbox
->type());
98 if (unbox
->fallible()) {
99 assignSnapshot(lir
, unbox
->bailoutKind());
105 // Swap the order we use the box pieces so we can re-use the payload register.
106 LUnbox
* lir
= new (alloc()) LUnbox
;
107 bool reusePayloadReg
= !JitOptions
.spectreValueMasking
||
108 unbox
->type() == MIRType::Int32
||
109 unbox
->type() == MIRType::Boolean
;
110 if (reusePayloadReg
) {
111 lir
->setOperand(0, usePayloadInRegisterAtStart(inner
));
112 lir
->setOperand(1, useType(inner
, LUse::ANY
));
114 lir
->setOperand(0, usePayload(inner
, LUse::REGISTER
));
115 lir
->setOperand(1, useType(inner
, LUse::ANY
));
118 if (unbox
->fallible()) {
119 assignSnapshot(lir
, unbox
->bailoutKind());
122 // Types and payloads form two separate intervals. If the type becomes dead
123 // before the payload, it could be used as a Value without the type being
124 // recoverable. Unbox's purpose is to eagerly kill the definition of a type
125 // tag, so keeping both alive (for the purpose of gcmaps) is unappealing.
126 // Instead, we create a new virtual register.
127 if (reusePayloadReg
) {
128 defineReuseInput(lir
, unbox
, 0);
134 void LIRGenerator::visitReturnImpl(MDefinition
* opd
, bool isGenerator
) {
135 MOZ_ASSERT(opd
->type() == MIRType::Value
);
137 LReturn
* ins
= new (alloc()) LReturn(isGenerator
);
138 ins
->setOperand(0, LUse(JSReturnReg_Type
));
139 ins
->setOperand(1, LUse(JSReturnReg_Data
));
140 fillBoxUses(ins
, 0, opd
);
144 void LIRGeneratorX86::lowerUntypedPhiInput(MPhi
* phi
, uint32_t inputPosition
,
145 LBlock
* block
, size_t lirIndex
) {
146 MDefinition
* operand
= phi
->getOperand(inputPosition
);
147 LPhi
* type
= block
->getPhi(lirIndex
+ VREG_TYPE_OFFSET
);
148 LPhi
* payload
= block
->getPhi(lirIndex
+ VREG_DATA_OFFSET
);
151 LUse(operand
->virtualRegister() + VREG_TYPE_OFFSET
, LUse::ANY
));
152 payload
->setOperand(inputPosition
,
153 LUse(VirtualRegisterOfPayload(operand
), LUse::ANY
));
156 void LIRGeneratorX86::defineInt64Phi(MPhi
* phi
, size_t lirIndex
) {
157 LPhi
* low
= current
->getPhi(lirIndex
+ INT64LOW_INDEX
);
158 LPhi
* high
= current
->getPhi(lirIndex
+ INT64HIGH_INDEX
);
160 uint32_t lowVreg
= getVirtualRegister();
162 phi
->setVirtualRegister(lowVreg
);
164 uint32_t highVreg
= getVirtualRegister();
165 MOZ_ASSERT(lowVreg
+ INT64HIGH_INDEX
== highVreg
+ INT64LOW_INDEX
);
167 low
->setDef(0, LDefinition(lowVreg
, LDefinition::INT32
));
168 high
->setDef(0, LDefinition(highVreg
, LDefinition::INT32
));
173 void LIRGeneratorX86::lowerInt64PhiInput(MPhi
* phi
, uint32_t inputPosition
,
174 LBlock
* block
, size_t lirIndex
) {
175 MDefinition
* operand
= phi
->getOperand(inputPosition
);
176 LPhi
* low
= block
->getPhi(lirIndex
+ INT64LOW_INDEX
);
177 LPhi
* high
= block
->getPhi(lirIndex
+ INT64HIGH_INDEX
);
178 low
->setOperand(inputPosition
,
179 LUse(operand
->virtualRegister() + INT64LOW_INDEX
, LUse::ANY
));
182 LUse(operand
->virtualRegister() + INT64HIGH_INDEX
, LUse::ANY
));
185 void LIRGeneratorX86::lowerForALUInt64(
186 LInstructionHelper
<INT64_PIECES
, INT64_PIECES
, 0>* ins
, MDefinition
* mir
,
187 MDefinition
* input
) {
188 ins
->setInt64Operand(0, useInt64RegisterAtStart(input
));
189 defineInt64ReuseInput(ins
, mir
, 0);
192 void LIRGeneratorX86::lowerForALUInt64(
193 LInstructionHelper
<INT64_PIECES
, 2 * INT64_PIECES
, 0>* ins
,
194 MDefinition
* mir
, MDefinition
* lhs
, MDefinition
* rhs
) {
195 ins
->setInt64Operand(0, useInt64RegisterAtStart(lhs
));
196 ins
->setInt64Operand(INT64_PIECES
, useInt64OrConstant(rhs
));
197 defineInt64ReuseInput(ins
, mir
, 0);
200 void LIRGeneratorX86::lowerForMulInt64(LMulI64
* ins
, MMul
* mir
,
201 MDefinition
* lhs
, MDefinition
* rhs
) {
202 bool needsTemp
= true;
204 if (rhs
->isConstant()) {
205 int64_t constant
= rhs
->toConstant()->toInt64();
206 int32_t shift
= mozilla::FloorLog2(constant
);
207 // See special cases in CodeGeneratorX86Shared::visitMulI64.
208 if (constant
>= -1 && constant
<= 2) {
211 if (constant
> 0 && int64_t(1) << shift
== constant
) {
216 // MulI64 on x86 needs output to be in edx, eax;
217 ins
->setInt64Operand(
218 0, useInt64Fixed(lhs
, Register64(edx
, eax
), /*useAtStart = */ true));
219 ins
->setInt64Operand(INT64_PIECES
, useInt64OrConstant(rhs
));
221 ins
->setTemp(0, temp());
224 defineInt64Fixed(ins
, mir
,
225 LInt64Allocation(LAllocation(AnyRegister(edx
)),
226 LAllocation(AnyRegister(eax
))));
229 void LIRGenerator::visitCompareExchangeTypedArrayElement(
230 MCompareExchangeTypedArrayElement
* ins
) {
231 MOZ_ASSERT(ins
->elements()->type() == MIRType::Elements
);
232 MOZ_ASSERT(ins
->index()->type() == MIRType::IntPtr
);
234 if (Scalar::isBigIntType(ins
->arrayType())) {
235 LUse elements
= useFixed(ins
->elements(), esi
);
237 useRegisterOrIndexConstant(ins
->index(), ins
->arrayType());
238 LUse oldval
= useFixed(ins
->oldval(), eax
);
239 LUse newval
= useFixed(ins
->newval(), edx
);
240 LDefinition temp
= tempFixed(ebx
);
242 auto* lir
= new (alloc()) LCompareExchangeTypedArrayElement64(
243 elements
, index
, oldval
, newval
, temp
);
244 defineFixed(lir
, ins
, LAllocation(AnyRegister(ecx
)));
245 assignSafepoint(lir
, ins
);
249 lowerCompareExchangeTypedArrayElement(ins
, /* useI386ByteRegisters = */ true);
252 void LIRGenerator::visitAtomicExchangeTypedArrayElement(
253 MAtomicExchangeTypedArrayElement
* ins
) {
254 MOZ_ASSERT(ins
->elements()->type() == MIRType::Elements
);
255 MOZ_ASSERT(ins
->index()->type() == MIRType::IntPtr
);
257 if (Scalar::isBigIntType(ins
->arrayType())) {
258 LUse elements
= useRegister(ins
->elements());
260 useRegisterOrIndexConstant(ins
->index(), ins
->arrayType());
261 LAllocation value
= useFixed(ins
->value(), edx
);
262 LInt64Definition temp
= tempInt64Fixed(Register64(ecx
, ebx
));
264 auto* lir
= new (alloc())
265 LAtomicExchangeTypedArrayElement64(elements
, index
, value
, temp
);
266 defineFixed(lir
, ins
, LAllocation(AnyRegister(eax
)));
267 assignSafepoint(lir
, ins
);
271 lowerAtomicExchangeTypedArrayElement(ins
, /*useI386ByteRegisters=*/true);
274 void LIRGenerator::visitAtomicTypedArrayElementBinop(
275 MAtomicTypedArrayElementBinop
* ins
) {
276 MOZ_ASSERT(ins
->elements()->type() == MIRType::Elements
);
277 MOZ_ASSERT(ins
->index()->type() == MIRType::IntPtr
);
279 if (Scalar::isBigIntType(ins
->arrayType())) {
280 LUse elements
= useRegister(ins
->elements());
282 useRegisterOrIndexConstant(ins
->index(), ins
->arrayType());
283 LAllocation value
= useFixed(ins
->value(), edx
);
284 LInt64Definition temp
= tempInt64Fixed(Register64(ecx
, ebx
));
286 // Case 1: the result of the operation is not used.
288 // We can omit allocating the result BigInt.
290 if (ins
->isForEffect()) {
291 LDefinition tempLow
= tempFixed(eax
);
293 auto* lir
= new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
294 elements
, index
, value
, temp
, tempLow
);
299 // Case 2: the result of the operation is used.
301 auto* lir
= new (alloc())
302 LAtomicTypedArrayElementBinop64(elements
, index
, value
, temp
);
303 defineFixed(lir
, ins
, LAllocation(AnyRegister(eax
)));
304 assignSafepoint(lir
, ins
);
308 lowerAtomicTypedArrayElementBinop(ins
, /* useI386ByteRegisters = */ true);
311 void LIRGeneratorX86::lowerAtomicLoad64(MLoadUnboxedScalar
* ins
) {
312 const LUse elements
= useRegister(ins
->elements());
313 const LAllocation index
=
314 useRegisterOrIndexConstant(ins
->index(), ins
->storageType());
316 auto* lir
= new (alloc()) LAtomicLoad64(elements
, index
, tempFixed(ebx
),
317 tempInt64Fixed(Register64(edx
, eax
)));
318 defineFixed(lir
, ins
, LAllocation(AnyRegister(ecx
)));
319 assignSafepoint(lir
, ins
);
322 void LIRGeneratorX86::lowerAtomicStore64(MStoreUnboxedScalar
* ins
) {
323 LUse elements
= useRegister(ins
->elements());
325 useRegisterOrIndexConstant(ins
->index(), ins
->writeType());
326 LAllocation value
= useFixed(ins
->value(), edx
);
327 LInt64Definition temp1
= tempInt64Fixed(Register64(ecx
, ebx
));
328 LDefinition temp2
= tempFixed(eax
);
330 add(new (alloc()) LAtomicStore64(elements
, index
, value
, temp1
, temp2
), ins
);
333 void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble
* ins
) {
334 MOZ_ASSERT(ins
->input()->type() == MIRType::Int32
);
335 LWasmUint32ToDouble
* lir
= new (alloc())
336 LWasmUint32ToDouble(useRegisterAtStart(ins
->input()), temp());
340 void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32
* ins
) {
341 MOZ_ASSERT(ins
->input()->type() == MIRType::Int32
);
342 LWasmUint32ToFloat32
* lir
= new (alloc())
343 LWasmUint32ToFloat32(useRegisterAtStart(ins
->input()), temp());
347 // If the base is a constant, and it is zero or its offset is zero, then
348 // code generation will fold the values into the access. Allocate the
349 // pointer to a register only if that can't happen.
351 static bool OptimizableConstantAccess(MDefinition
* base
,
352 const wasm::MemoryAccessDesc
& access
) {
353 MOZ_ASSERT(base
->isConstant());
354 MOZ_ASSERT(base
->type() == MIRType::Int32
);
356 if (!(base
->toConstant()->isInt32(0) || access
.offset() == 0)) {
359 if (access
.type() == Scalar::Int64
) {
360 // For int64 accesses on 32-bit systems we will need to add another offset
361 // of 4 to access the high part of the value; make sure this does not
362 // overflow the value.
364 if (base
->toConstant()->isInt32(0)) {
367 v
= base
->toConstant()->toInt32();
369 return v
<= int32_t(INT32_MAX
- INT64HIGH_OFFSET
);
374 void LIRGenerator::visitWasmLoad(MWasmLoad
* ins
) {
375 MDefinition
* base
= ins
->base();
376 MOZ_ASSERT(base
->type() == MIRType::Int32
);
378 MDefinition
* memoryBase
= ins
->memoryBase();
379 MOZ_ASSERT(memoryBase
->type() == MIRType::Pointer
);
381 if (ins
->access().type() == Scalar::Int64
&& ins
->access().isAtomic()) {
382 auto* lir
= new (alloc())
383 LWasmAtomicLoadI64(useRegister(memoryBase
), useRegister(base
),
384 tempFixed(ecx
), tempFixed(ebx
));
385 defineInt64Fixed(lir
, ins
,
386 LInt64Allocation(LAllocation(AnyRegister(edx
)),
387 LAllocation(AnyRegister(eax
))));
391 LAllocation baseAlloc
;
392 if (!base
->isConstant() || !OptimizableConstantAccess(base
, ins
->access())) {
393 baseAlloc
= ins
->type() == MIRType::Int64
? useRegister(base
)
394 : useRegisterAtStart(base
);
397 if (ins
->type() != MIRType::Int64
) {
399 new (alloc()) LWasmLoad(baseAlloc
, useRegisterAtStart(memoryBase
));
404 // "AtStart" register usage does not work for the 64-bit case because we
405 // clobber two registers for the result and may need two registers for a
406 // scaled address; we can't guarantee non-interference.
408 auto* lir
= new (alloc()) LWasmLoadI64(baseAlloc
, useRegister(memoryBase
));
410 Scalar::Type accessType
= ins
->access().type();
411 if (accessType
== Scalar::Int8
|| accessType
== Scalar::Int16
||
412 accessType
== Scalar::Int32
) {
413 // We use cdq to sign-extend the result and cdq demands these registers.
414 defineInt64Fixed(lir
, ins
,
415 LInt64Allocation(LAllocation(AnyRegister(edx
)),
416 LAllocation(AnyRegister(eax
))));
420 defineInt64(lir
, ins
);
423 void LIRGenerator::visitWasmStore(MWasmStore
* ins
) {
424 MDefinition
* base
= ins
->base();
425 MOZ_ASSERT(base
->type() == MIRType::Int32
);
427 MDefinition
* memoryBase
= ins
->memoryBase();
428 MOZ_ASSERT(memoryBase
->type() == MIRType::Pointer
);
430 if (ins
->access().type() == Scalar::Int64
&& ins
->access().isAtomic()) {
431 auto* lir
= new (alloc())
432 LWasmAtomicStoreI64(useRegister(memoryBase
), useRegister(base
),
433 useInt64Fixed(ins
->value(), Register64(ecx
, ebx
)),
434 tempFixed(edx
), tempFixed(eax
));
439 LAllocation baseAlloc
;
440 if (!base
->isConstant() || !OptimizableConstantAccess(base
, ins
->access())) {
441 baseAlloc
= useRegisterAtStart(base
);
444 LAllocation valueAlloc
;
445 switch (ins
->access().type()) {
448 // See comment for LIRGeneratorX86::useByteOpRegister.
449 valueAlloc
= useFixed(ins
->value(), eax
);
455 case Scalar::Float32
:
456 case Scalar::Float64
:
457 // For now, don't allow constant values. The immediate operand affects
458 // instruction layout which affects patching.
459 valueAlloc
= useRegisterAtStart(ins
->value());
461 case Scalar::Simd128
:
462 #ifdef ENABLE_WASM_SIMD
463 valueAlloc
= useRegisterAtStart(ins
->value());
466 MOZ_CRASH("unexpected array type");
468 case Scalar::Int64
: {
469 LInt64Allocation valueAlloc
= useInt64RegisterAtStart(ins
->value());
470 auto* lir
= new (alloc())
471 LWasmStoreI64(baseAlloc
, valueAlloc
, useRegisterAtStart(memoryBase
));
475 case Scalar::Uint8Clamped
:
476 case Scalar::BigInt64
:
477 case Scalar::BigUint64
:
478 case Scalar::MaxTypedArrayViewType
:
479 MOZ_CRASH("unexpected array type");
482 auto* lir
= new (alloc())
483 LWasmStore(baseAlloc
, valueAlloc
, useRegisterAtStart(memoryBase
));
487 void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap
* ins
) {
488 MDefinition
* base
= ins
->base();
489 MOZ_ASSERT(base
->type() == MIRType::Int32
);
491 MDefinition
* memoryBase
= ins
->memoryBase();
492 MOZ_ASSERT(memoryBase
->type() == MIRType::Pointer
);
494 if (ins
->access().type() == Scalar::Int64
) {
495 auto* lir
= new (alloc()) LWasmCompareExchangeI64(
496 useRegisterAtStart(memoryBase
), useRegisterAtStart(base
),
497 useInt64FixedAtStart(ins
->oldValue(), Register64(edx
, eax
)),
498 useInt64FixedAtStart(ins
->newValue(), Register64(ecx
, ebx
)));
499 defineInt64Fixed(lir
, ins
,
500 LInt64Allocation(LAllocation(AnyRegister(edx
)),
501 LAllocation(AnyRegister(eax
))));
505 MOZ_ASSERT(ins
->access().type() < Scalar::Float32
);
507 bool byteArray
= byteSize(ins
->access().type()) == 1;
509 // Register allocation:
511 // The output may not be used, but eax will be clobbered regardless
512 // so pin the output to eax.
514 // oldval must be in a register.
516 // newval must be in a register. If the source is a byte array
517 // then newval must be a register that has a byte size: this must
518 // be ebx, ecx, or edx (eax is taken).
520 // Bug #1077036 describes some optimization opportunities.
522 const LAllocation oldval
= useRegister(ins
->oldValue());
523 const LAllocation newval
=
524 byteArray
? useFixed(ins
->newValue(), ebx
) : useRegister(ins
->newValue());
526 LWasmCompareExchangeHeap
* lir
= new (alloc()) LWasmCompareExchangeHeap(
527 useRegister(base
), oldval
, newval
, useRegister(memoryBase
));
529 lir
->setAddrTemp(temp());
530 defineFixed(lir
, ins
, LAllocation(AnyRegister(eax
)));
533 void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap
* ins
) {
534 MDefinition
* memoryBase
= ins
->memoryBase();
535 MOZ_ASSERT(memoryBase
->type() == MIRType::Pointer
);
537 if (ins
->access().type() == Scalar::Int64
) {
538 MDefinition
* base
= ins
->base();
539 auto* lir
= new (alloc()) LWasmAtomicExchangeI64(
540 useRegister(memoryBase
), useRegister(base
),
541 useInt64Fixed(ins
->value(), Register64(ecx
, ebx
)), ins
->access());
542 defineInt64Fixed(lir
, ins
,
543 LInt64Allocation(LAllocation(AnyRegister(edx
)),
544 LAllocation(AnyRegister(eax
))));
548 const LAllocation base
= useRegister(ins
->base());
549 const LAllocation value
= useRegister(ins
->value());
551 LWasmAtomicExchangeHeap
* lir
= new (alloc())
552 LWasmAtomicExchangeHeap(base
, value
, useRegister(memoryBase
));
554 lir
->setAddrTemp(temp());
555 if (byteSize(ins
->access().type()) == 1) {
556 defineFixed(lir
, ins
, LAllocation(AnyRegister(eax
)));
562 void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap
* ins
) {
563 MDefinition
* base
= ins
->base();
564 MOZ_ASSERT(base
->type() == MIRType::Int32
);
566 MDefinition
* memoryBase
= ins
->memoryBase();
567 MOZ_ASSERT(memoryBase
->type() == MIRType::Pointer
);
569 if (ins
->access().type() == Scalar::Int64
) {
570 auto* lir
= new (alloc())
571 LWasmAtomicBinopI64(useRegister(memoryBase
), useRegister(base
),
572 useInt64Fixed(ins
->value(), Register64(ecx
, ebx
)),
573 ins
->access(), ins
->operation());
574 defineInt64Fixed(lir
, ins
,
575 LInt64Allocation(LAllocation(AnyRegister(edx
)),
576 LAllocation(AnyRegister(eax
))));
580 MOZ_ASSERT(ins
->access().type() < Scalar::Float32
);
582 bool byteArray
= byteSize(ins
->access().type()) == 1;
584 // Case 1: the result of the operation is not used.
586 // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
587 // LOCK OR, or LOCK XOR. These can all take an immediate.
589 if (!ins
->hasUses()) {
591 if (byteArray
&& !ins
->value()->isConstant()) {
592 value
= useFixed(ins
->value(), ebx
);
594 value
= useRegisterOrConstant(ins
->value());
596 LWasmAtomicBinopHeapForEffect
* lir
=
597 new (alloc()) LWasmAtomicBinopHeapForEffect(useRegister(base
), value
,
598 LDefinition::BogusTemp(),
599 useRegister(memoryBase
));
600 lir
->setAddrTemp(temp());
605 // Case 2: the result of the operation is used.
607 // For ADD and SUB we'll use XADD:
609 // movl value, output
610 // lock xaddl output, mem
612 // For the 8-bit variants XADD needs a byte register for the
613 // output only, we can still set up with movl; just pin the output
614 // to eax (or ebx / ecx / edx).
616 // For AND/OR/XOR we need to use a CMPXCHG loop:
621 // lock cmpxchg temp, mem ; reads eax also
625 // Note the placement of L, cmpxchg will update eax with *mem if
626 // *mem does not have the expected value, so reloading it at the
627 // top of the loop would be redundant.
629 // We want to fix eax as the output. We also need a temp for
630 // the intermediate value.
632 // For the 8-bit variants the temp must have a byte register.
634 // There are optimization opportunities:
635 // - better 8-bit register allocation and instruction selection, Bug
639 !(ins
->operation() == AtomicOp::Add
|| ins
->operation() == AtomicOp::Sub
);
640 LDefinition tempDef
= LDefinition::BogusTemp();
644 value
= useFixed(ins
->value(), ebx
);
646 tempDef
= tempFixed(ecx
);
648 } else if (bitOp
|| ins
->value()->isConstant()) {
649 value
= useRegisterOrConstant(ins
->value());
654 value
= useRegisterAtStart(ins
->value());
657 LWasmAtomicBinopHeap
* lir
= new (alloc())
658 LWasmAtomicBinopHeap(useRegister(base
), value
, tempDef
,
659 LDefinition::BogusTemp(), useRegister(memoryBase
));
661 lir
->setAddrTemp(temp());
662 if (byteArray
|| bitOp
) {
663 defineFixed(lir
, ins
, LAllocation(AnyRegister(eax
)));
664 } else if (ins
->value()->isConstant()) {
667 defineReuseInput(lir
, ins
, LWasmAtomicBinopHeap::valueOp
);
671 void LIRGeneratorX86::lowerDivI64(MDiv
* div
) {
672 MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
675 void LIRGeneratorX86::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64
* div
) {
676 MOZ_ASSERT(div
->lhs()->type() == div
->rhs()->type());
677 MOZ_ASSERT(IsNumberType(div
->type()));
679 MOZ_ASSERT(div
->type() == MIRType::Int64
);
681 if (div
->isUnsigned()) {
682 LUDivOrModI64
* lir
= new (alloc())
683 LUDivOrModI64(useInt64FixedAtStart(div
->lhs(), Register64(eax
, ebx
)),
684 useInt64FixedAtStart(div
->rhs(), Register64(ecx
, edx
)),
685 useFixedAtStart(div
->instance(), InstanceReg
));
686 defineReturn(lir
, div
);
690 LDivOrModI64
* lir
= new (alloc())
691 LDivOrModI64(useInt64FixedAtStart(div
->lhs(), Register64(eax
, ebx
)),
692 useInt64FixedAtStart(div
->rhs(), Register64(ecx
, edx
)),
693 useFixedAtStart(div
->instance(), InstanceReg
));
694 defineReturn(lir
, div
);
697 void LIRGeneratorX86::lowerModI64(MMod
* mod
) {
698 MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
701 void LIRGeneratorX86::lowerWasmBuiltinModI64(MWasmBuiltinModI64
* mod
) {
702 MDefinition
* lhs
= mod
->lhs();
703 MDefinition
* rhs
= mod
->rhs();
704 MOZ_ASSERT(lhs
->type() == rhs
->type());
705 MOZ_ASSERT(IsNumberType(mod
->type()));
707 MOZ_ASSERT(mod
->type() == MIRType::Int64
);
708 MOZ_ASSERT(mod
->type() == MIRType::Int64
);
710 if (mod
->isUnsigned()) {
711 LUDivOrModI64
* lir
= new (alloc())
712 LUDivOrModI64(useInt64FixedAtStart(lhs
, Register64(eax
, ebx
)),
713 useInt64FixedAtStart(rhs
, Register64(ecx
, edx
)),
714 useFixedAtStart(mod
->instance(), InstanceReg
));
715 defineReturn(lir
, mod
);
719 LDivOrModI64
* lir
= new (alloc())
720 LDivOrModI64(useInt64FixedAtStart(lhs
, Register64(eax
, ebx
)),
721 useInt64FixedAtStart(rhs
, Register64(ecx
, edx
)),
722 useFixedAtStart(mod
->instance(), InstanceReg
));
723 defineReturn(lir
, mod
);
726 void LIRGeneratorX86::lowerUDivI64(MDiv
* div
) {
727 MOZ_CRASH("We use MWasmBuiltinDivI64 instead.");
730 void LIRGeneratorX86::lowerUModI64(MMod
* mod
) {
731 MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
734 void LIRGeneratorX86::lowerBigIntDiv(MBigIntDiv
* ins
) {
735 auto* lir
= new (alloc()) LBigIntDiv(
736 useRegister(ins
->lhs()), useRegister(ins
->rhs()), tempFixed(eax
), temp());
737 defineFixed(lir
, ins
, LAllocation(AnyRegister(edx
)));
738 assignSafepoint(lir
, ins
);
741 void LIRGeneratorX86::lowerBigIntMod(MBigIntMod
* ins
) {
742 auto* lir
= new (alloc()) LBigIntMod(
743 useRegister(ins
->lhs()), useRegister(ins
->rhs()), tempFixed(eax
), temp());
744 defineFixed(lir
, ins
, LAllocation(AnyRegister(edx
)));
745 assignSafepoint(lir
, ins
);
748 void LIRGenerator::visitSubstr(MSubstr
* ins
) {
749 // Due to lack of registers on x86, we reuse the string register as
750 // temporary. As a result we only need two temporary registers and take a
751 // bogus temporary as fifth argument.
752 LSubstr
* lir
= new (alloc())
753 LSubstr(useRegister(ins
->string()), useRegister(ins
->begin()),
754 useRegister(ins
->length()), temp(), LDefinition::BogusTemp(),
755 tempByteOpRegister());
757 assignSafepoint(lir
, ins
);
760 void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64
* ins
) {
761 MDefinition
* opd
= ins
->input();
762 MOZ_ASSERT(opd
->type() == MIRType::Double
|| opd
->type() == MIRType::Float32
);
764 LDefinition temp
= tempDouble();
765 defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd
), temp
), ins
);
768 void LIRGeneratorX86::lowerWasmBuiltinTruncateToInt64(
769 MWasmBuiltinTruncateToInt64
* ins
) {
770 MOZ_CRASH("We don't use it for this architecture");
773 void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint
* ins
) {
774 MDefinition
* opd
= ins
->input();
775 MOZ_ASSERT(opd
->type() == MIRType::Int64
);
776 MOZ_ASSERT(IsFloatingPointType(ins
->type()));
778 LDefinition maybeTemp
=
779 (ins
->isUnsigned() &&
780 ((ins
->type() == MIRType::Double
&& AssemblerX86Shared::HasSSE3()) ||
781 ins
->type() == MIRType::Float32
))
783 : LDefinition::BogusTemp();
785 define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd
), maybeTemp
),
789 void LIRGeneratorX86::lowerBuiltinInt64ToFloatingPoint(
790 MBuiltinInt64ToFloatingPoint
* ins
) {
791 MOZ_CRASH("We don't use it for this architecture");
794 void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64
* ins
) {
795 if (ins
->isUnsigned()) {
796 defineInt64(new (alloc())
797 LExtendInt32ToInt64(useRegisterAtStart(ins
->input())),
800 LExtendInt32ToInt64
* lir
=
801 new (alloc()) LExtendInt32ToInt64(useFixedAtStart(ins
->input(), eax
));
802 defineInt64Fixed(lir
, ins
,
803 LInt64Allocation(LAllocation(AnyRegister(edx
)),
804 LAllocation(AnyRegister(eax
))));
808 void LIRGenerator::visitSignExtendInt64(MSignExtendInt64
* ins
) {
809 // Here we'll end up using cdq which requires input and output in (edx,eax).
810 LSignExtendInt64
* lir
= new (alloc()) LSignExtendInt64(
811 useInt64FixedAtStart(ins
->input(), Register64(edx
, eax
)));
812 defineInt64Fixed(lir
, ins
,
813 LInt64Allocation(LAllocation(AnyRegister(edx
)),
814 LAllocation(AnyRegister(eax
))));
817 // On x86 we specialize the only cases where compare is {U,}Int32 and select
819 bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
820 MCompare::CompareType compTy
, MIRType insTy
) {
821 return insTy
== MIRType::Int32
&& (compTy
== MCompare::Compare_Int32
||
822 compTy
== MCompare::Compare_UInt32
);
825 void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect
* ins
,
828 MCompare::CompareType compTy
,
830 MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy
, ins
->type()));
831 auto* lir
= new (alloc()) LWasmCompareAndSelect(
832 useRegister(lhs
), useAny(rhs
), compTy
, jsop
,
833 useRegisterAtStart(ins
->trueExpr()), useAny(ins
->falseExpr()));
834 defineReuseInput(lir
, ins
, LWasmCompareAndSelect::IfTrueExprIndex
);