Bug 1874684 - Part 31: Correctly reject invalid durations in some RoundDuration calls...
[gecko.git] / js / src / jit / mips64 / CodeGenerator-mips64.cpp
blob7c4f34340ca3e3c61e3c2a305d9b50f3b39903a4
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/mips64/CodeGenerator-mips64.h"
9 #include "mozilla/MathAlgorithms.h"
11 #include "jit/CodeGenerator.h"
12 #include "jit/MIR.h"
13 #include "jit/MIRGraph.h"
14 #include "js/Conversions.h"
15 #include "vm/Shape.h"
17 #include "jit/MacroAssembler-inl.h"
18 #include "jit/shared/CodeGenerator-shared-inl.h"
20 using namespace js;
21 using namespace js::jit;
23 ValueOperand CodeGeneratorMIPS64::ToValue(LInstruction* ins, size_t pos) {
24 return ValueOperand(ToRegister(ins->getOperand(pos)));
27 ValueOperand CodeGeneratorMIPS64::ToTempValue(LInstruction* ins, size_t pos) {
28 return ValueOperand(ToRegister(ins->getTemp(pos)));
31 void CodeGenerator::visitBox(LBox* box) {
32 const LAllocation* in = box->getOperand(0);
33 ValueOperand result = ToOutValue(box);
35 masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
38 void CodeGenerator::visitUnbox(LUnbox* unbox) {
39 MUnbox* mir = unbox->mir();
41 Register result = ToRegister(unbox->output());
43 if (mir->fallible()) {
44 const ValueOperand value = ToValue(unbox, LUnbox::Input);
45 Label bail;
46 switch (mir->type()) {
47 case MIRType::Int32:
48 masm.fallibleUnboxInt32(value, result, &bail);
49 break;
50 case MIRType::Boolean:
51 masm.fallibleUnboxBoolean(value, result, &bail);
52 break;
53 case MIRType::Object:
54 masm.fallibleUnboxObject(value, result, &bail);
55 break;
56 case MIRType::String:
57 masm.fallibleUnboxString(value, result, &bail);
58 break;
59 case MIRType::Symbol:
60 masm.fallibleUnboxSymbol(value, result, &bail);
61 break;
62 case MIRType::BigInt:
63 masm.fallibleUnboxBigInt(value, result, &bail);
64 break;
65 default:
66 MOZ_CRASH("Given MIRType cannot be unboxed.");
68 bailoutFrom(&bail, unbox->snapshot());
69 return;
72 LAllocation* input = unbox->getOperand(LUnbox::Input);
73 if (input->isRegister()) {
74 Register inputReg = ToRegister(input);
75 switch (mir->type()) {
76 case MIRType::Int32:
77 masm.unboxInt32(inputReg, result);
78 break;
79 case MIRType::Boolean:
80 masm.unboxBoolean(inputReg, result);
81 break;
82 case MIRType::Object:
83 masm.unboxObject(inputReg, result);
84 break;
85 case MIRType::String:
86 masm.unboxString(inputReg, result);
87 break;
88 case MIRType::Symbol:
89 masm.unboxSymbol(inputReg, result);
90 break;
91 case MIRType::BigInt:
92 masm.unboxBigInt(inputReg, result);
93 break;
94 default:
95 MOZ_CRASH("Given MIRType cannot be unboxed.");
97 return;
100 Address inputAddr = ToAddress(input);
101 switch (mir->type()) {
102 case MIRType::Int32:
103 masm.unboxInt32(inputAddr, result);
104 break;
105 case MIRType::Boolean:
106 masm.unboxBoolean(inputAddr, result);
107 break;
108 case MIRType::Object:
109 masm.unboxObject(inputAddr, result);
110 break;
111 case MIRType::String:
112 masm.unboxString(inputAddr, result);
113 break;
114 case MIRType::Symbol:
115 masm.unboxSymbol(inputAddr, result);
116 break;
117 case MIRType::BigInt:
118 masm.unboxBigInt(inputAddr, result);
119 break;
120 default:
121 MOZ_CRASH("Given MIRType cannot be unboxed.");
125 void CodeGeneratorMIPS64::splitTagForTest(const ValueOperand& value,
126 ScratchTagScope& tag) {
127 masm.splitTag(value.valueReg(), tag);
130 void CodeGenerator::visitCompareI64(LCompareI64* lir) {
131 MCompare* mir = lir->mir();
132 MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
133 mir->compareType() == MCompare::Compare_UInt64);
135 const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
136 const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
137 Register lhsReg = ToRegister64(lhs).reg;
138 Register output = ToRegister(lir->output());
139 Register rhsReg;
140 ScratchRegisterScope scratch(masm);
142 if (IsConstant(rhs)) {
143 rhsReg = scratch;
144 masm.ma_li(rhsReg, ImmWord(ToInt64(rhs)));
145 } else if (rhs.value().isGeneralReg()) {
146 rhsReg = ToRegister64(rhs).reg;
147 } else {
148 rhsReg = scratch;
149 masm.loadPtr(ToAddress(rhs.value()), rhsReg);
152 bool isSigned = mir->compareType() == MCompare::Compare_Int64;
153 masm.cmpPtrSet(JSOpToCondition(lir->jsop(), isSigned), lhsReg, rhsReg,
154 output);
157 void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
158 MCompare* mir = lir->cmpMir();
159 MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
160 mir->compareType() == MCompare::Compare_UInt64);
162 const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
163 const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
164 Register lhsReg = ToRegister64(lhs).reg;
165 Register rhsReg;
166 ScratchRegisterScope scratch(masm);
168 if (IsConstant(rhs)) {
169 rhsReg = scratch;
170 masm.ma_li(rhsReg, ImmWord(ToInt64(rhs)));
171 } else if (rhs.value().isGeneralReg()) {
172 rhsReg = ToRegister64(rhs).reg;
173 } else {
174 rhsReg = scratch;
175 masm.loadPtr(ToAddress(rhs.value()), rhsReg);
178 bool isSigned = mir->compareType() == MCompare::Compare_Int64;
179 Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
180 emitBranch(lhsReg, rhsReg, cond, lir->ifTrue(), lir->ifFalse());
183 void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
184 Register lhs = ToRegister(lir->lhs());
185 Register rhs = ToRegister(lir->rhs());
186 Register output = ToRegister(lir->output());
188 Label done;
190 // Handle divide by zero.
191 if (lir->canBeDivideByZero()) {
192 Label nonZero;
193 masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
194 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
195 masm.bind(&nonZero);
198 // Handle an integer overflow exception from INT64_MIN / -1.
199 if (lir->canBeNegativeOverflow()) {
200 Label notOverflow;
201 masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &notOverflow);
202 masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &notOverflow);
203 if (lir->mir()->isMod()) {
204 masm.ma_xor(output, output);
205 } else {
206 masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
208 masm.jump(&done);
209 masm.bind(&notOverflow);
212 #ifdef MIPSR6
213 if (lir->mir()->isMod()) {
214 masm.as_dmod(output, lhs, rhs);
215 } else {
216 masm.as_ddiv(output, lhs, rhs);
218 #else
219 masm.as_ddiv(lhs, rhs);
220 if (lir->mir()->isMod()) {
221 masm.as_mfhi(output);
222 } else {
223 masm.as_mflo(output);
225 #endif
226 masm.bind(&done);
229 void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
230 Register lhs = ToRegister(lir->lhs());
231 Register rhs = ToRegister(lir->rhs());
232 Register output = ToRegister(lir->output());
234 Label done;
236 // Prevent divide by zero.
237 if (lir->canBeDivideByZero()) {
238 Label nonZero;
239 masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
240 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
241 masm.bind(&nonZero);
244 #ifdef MIPSR6
245 if (lir->mir()->isMod()) {
246 masm.as_dmodu(output, lhs, rhs);
247 } else {
248 masm.as_ddivu(output, lhs, rhs);
250 #else
251 masm.as_ddivu(lhs, rhs);
252 if (lir->mir()->isMod()) {
253 masm.as_mfhi(output);
254 } else {
255 masm.as_mflo(output);
257 #endif
258 masm.bind(&done);
261 void CodeGeneratorMIPS64::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
262 Register divisor, Register output,
263 Label* fail) {
264 // Callers handle division by zero and integer overflow.
266 #ifdef MIPSR6
267 masm.as_ddiv(/* result= */ dividend, dividend, divisor);
268 #else
269 masm.as_ddiv(dividend, divisor);
270 masm.as_mflo(dividend);
271 #endif
273 // Create and return the result.
274 masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
275 masm.initializeBigInt(output, dividend);
278 void CodeGeneratorMIPS64::emitBigIntMod(LBigIntMod* ins, Register dividend,
279 Register divisor, Register output,
280 Label* fail) {
281 // Callers handle division by zero and integer overflow.
283 #ifdef MIPSR6
284 masm.as_dmod(/* result= */ dividend, dividend, divisor);
285 #else
286 masm.as_ddiv(dividend, divisor);
287 masm.as_mfhi(dividend);
288 #endif
290 // Create and return the result.
291 masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
292 masm.initializeBigInt(output, dividend);
295 template <typename T>
296 void CodeGeneratorMIPS64::emitWasmLoadI64(T* lir) {
297 const MWasmLoad* mir = lir->mir();
299 Register memoryBase = ToRegister(lir->memoryBase());
300 Register ptrScratch = InvalidReg;
301 if (!lir->ptrCopy()->isBogusTemp()) {
302 ptrScratch = ToRegister(lir->ptrCopy());
305 Register ptrReg = ToRegister(lir->ptr());
306 if (mir->base()->type() == MIRType::Int32) {
307 // See comment in visitWasmLoad re the type of 'base'.
308 masm.move32ZeroExtendToPtr(ptrReg, ptrReg);
311 if (IsUnaligned(mir->access())) {
312 masm.wasmUnalignedLoadI64(mir->access(), memoryBase, ptrReg, ptrScratch,
313 ToOutRegister64(lir),
314 ToRegister(lir->getTemp(1)));
315 } else {
316 masm.wasmLoadI64(mir->access(), memoryBase, ptrReg, ptrScratch,
317 ToOutRegister64(lir));
321 void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) {
322 emitWasmLoadI64(lir);
325 void CodeGenerator::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir) {
326 emitWasmLoadI64(lir);
329 template <typename T>
330 void CodeGeneratorMIPS64::emitWasmStoreI64(T* lir) {
331 const MWasmStore* mir = lir->mir();
333 Register memoryBase = ToRegister(lir->memoryBase());
334 Register ptrScratch = InvalidReg;
335 if (!lir->ptrCopy()->isBogusTemp()) {
336 ptrScratch = ToRegister(lir->ptrCopy());
339 Register ptrReg = ToRegister(lir->ptr());
340 if (mir->base()->type() == MIRType::Int32) {
341 // See comment in visitWasmLoad re the type of 'base'.
342 masm.move32ZeroExtendToPtr(ptrReg, ptrReg);
345 if (IsUnaligned(mir->access())) {
346 masm.wasmUnalignedStoreI64(mir->access(), ToRegister64(lir->value()),
347 memoryBase, ptrReg, ptrScratch,
348 ToRegister(lir->getTemp(1)));
349 } else {
350 masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), memoryBase,
351 ptrReg, ptrScratch);
355 void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
356 emitWasmStoreI64(lir);
359 void CodeGenerator::visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* lir) {
360 emitWasmStoreI64(lir);
363 void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
364 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
366 Register cond = ToRegister(lir->condExpr());
367 const LInt64Allocation falseExpr = lir->falseExpr();
369 Register64 out = ToOutRegister64(lir);
370 MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
371 "true expr is reused for input");
373 if (falseExpr.value().isRegister()) {
374 masm.as_movz(out.reg, ToRegister(falseExpr.value()), cond);
375 } else {
376 Label done;
377 masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
378 masm.loadPtr(ToAddress(falseExpr.value()), out.reg);
379 masm.bind(&done);
383 void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
384 MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
385 MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
386 masm.as_dmtc1(ToRegister(lir->input()), ToFloatRegister(lir->output()));
389 void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
390 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
391 MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
392 masm.as_dmfc1(ToRegister(lir->output()), ToFloatRegister(lir->input()));
395 void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
396 const LAllocation* input = lir->getOperand(0);
397 Register output = ToRegister(lir->output());
399 if (lir->mir()->isUnsigned()) {
400 masm.ma_dext(output, ToRegister(input), Imm32(0), Imm32(32));
401 } else {
402 masm.ma_sll(output, ToRegister(input), Imm32(0));
406 void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
407 const LAllocation* input = lir->getOperand(0);
408 Register output = ToRegister(lir->output());
410 if (lir->mir()->bottomHalf()) {
411 if (input->isMemory()) {
412 masm.load32(ToAddress(input), output);
413 } else {
414 masm.ma_sll(output, ToRegister(input), Imm32(0));
416 } else {
417 MOZ_CRASH("Not implemented.");
421 void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
422 Register64 input = ToRegister64(lir->getInt64Operand(0));
423 Register64 output = ToOutRegister64(lir);
424 switch (lir->mode()) {
425 case MSignExtendInt64::Byte:
426 masm.move32To64SignExtend(input.reg, output);
427 masm.move8SignExtend(output.reg, output.reg);
428 break;
429 case MSignExtendInt64::Half:
430 masm.move32To64SignExtend(input.reg, output);
431 masm.move16SignExtend(output.reg, output.reg);
432 break;
433 case MSignExtendInt64::Word:
434 masm.move32To64SignExtend(input.reg, output);
435 break;
439 void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index* lir) {
440 Register input = ToRegister(lir->input());
441 Register output = ToRegister(lir->output());
442 MOZ_ASSERT(input == output);
443 masm.move32To64ZeroExtend(input, Register64(output));
446 void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) {
447 Register input = ToRegister(lir->input());
448 Register output = ToRegister(lir->output());
449 MOZ_ASSERT(input == output);
450 masm.move64To32(Register64(input), output);
453 void CodeGenerator::visitClzI64(LClzI64* lir) {
454 Register64 input = ToRegister64(lir->getInt64Operand(0));
455 Register64 output = ToOutRegister64(lir);
456 masm.clz64(input, output.reg);
459 void CodeGenerator::visitCtzI64(LCtzI64* lir) {
460 Register64 input = ToRegister64(lir->getInt64Operand(0));
461 Register64 output = ToOutRegister64(lir);
462 masm.ctz64(input, output.reg);
465 void CodeGenerator::visitNotI64(LNotI64* lir) {
466 Register64 input = ToRegister64(lir->getInt64Operand(0));
467 Register output = ToRegister(lir->output());
469 masm.ma_cmp_set(output, input.reg, zero, Assembler::Equal);
472 void CodeGenerator::visitBitNotI64(LBitNotI64* ins) {
473 const LAllocation* input = ins->getOperand(0);
474 MOZ_ASSERT(!input->isConstant());
475 Register inputReg = ToRegister(input);
476 MOZ_ASSERT(inputReg == ToRegister(ins->output()));
477 masm.ma_not(inputReg, inputReg);
480 void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
481 FloatRegister input = ToFloatRegister(lir->input());
482 Register64 output = ToOutRegister64(lir);
484 MWasmTruncateToInt64* mir = lir->mir();
485 MIRType fromType = mir->input()->type();
487 MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
489 auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
490 addOutOfLineCode(ool, mir);
492 Label* oolEntry = ool->entry();
493 Label* oolRejoin = ool->rejoin();
494 bool isSaturating = mir->isSaturating();
496 if (fromType == MIRType::Double) {
497 if (mir->isUnsigned()) {
498 masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, oolEntry,
499 oolRejoin, InvalidFloatReg);
500 } else {
501 masm.wasmTruncateDoubleToInt64(input, output, isSaturating, oolEntry,
502 oolRejoin, InvalidFloatReg);
504 } else {
505 if (mir->isUnsigned()) {
506 masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating, oolEntry,
507 oolRejoin, InvalidFloatReg);
508 } else {
509 masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, oolEntry,
510 oolRejoin, InvalidFloatReg);
515 void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
516 Register64 input = ToRegister64(lir->getInt64Operand(0));
517 FloatRegister output = ToFloatRegister(lir->output());
519 MIRType outputType = lir->mir()->type();
520 MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
522 if (outputType == MIRType::Double) {
523 if (lir->mir()->isUnsigned()) {
524 masm.convertUInt64ToDouble(input, output, Register::Invalid());
525 } else {
526 masm.convertInt64ToDouble(input, output);
528 } else {
529 if (lir->mir()->isUnsigned()) {
530 masm.convertUInt64ToFloat32(input, output, Register::Invalid());
531 } else {
532 masm.convertInt64ToFloat32(input, output);
537 void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
538 Register64 input = ToRegister64(lir->getInt64Operand(0));
539 MBasicBlock* ifTrue = lir->ifTrue();
540 MBasicBlock* ifFalse = lir->ifFalse();
542 emitBranch(input.reg, Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
545 void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
546 Register elements = ToRegister(lir->elements());
547 Register temp = ToRegister(lir->temp());
548 Register64 temp64 = ToRegister64(lir->temp64());
549 Register out = ToRegister(lir->output());
550 const MLoadUnboxedScalar* mir = lir->mir();
552 Scalar::Type storageType = mir->storageType();
554 auto sync = Synchronization::Load();
555 masm.memoryBarrierBefore(sync);
556 if (lir->index()->isConstant()) {
557 Address source =
558 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
559 masm.load64(source, temp64);
560 } else {
561 BaseIndex source(elements, ToRegister(lir->index()),
562 ScaleFromScalarType(storageType), mir->offsetAdjustment());
563 masm.load64(source, temp64);
565 masm.memoryBarrierAfter(sync);
566 emitCreateBigInt(lir, storageType, temp64, out, temp);
569 void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
570 Register elements = ToRegister(lir->elements());
571 Register value = ToRegister(lir->value());
572 Register64 temp1 = ToRegister64(lir->temp1());
574 Scalar::Type writeType = lir->mir()->writeType();
576 masm.loadBigInt64(value, temp1);
577 auto sync = Synchronization::Store();
578 masm.memoryBarrierBefore(sync);
579 if (lir->index()->isConstant()) {
580 Address dest = ToAddress(elements, lir->index(), writeType);
581 masm.store64(temp1, dest);
582 } else {
583 BaseIndex dest(elements, ToRegister(lir->index()),
584 ScaleFromScalarType(writeType));
585 masm.store64(temp1, dest);
587 masm.memoryBarrierAfter(sync);