Bug 1885489 - Part 5: Add SnapshotIterator::readInt32(). r=iain
[gecko.git] / js / src / jit / CodeGenerator.cpp
blob588c1e7108af8da932a5de8ed9c9eb6eeba3530d
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/CodeGenerator.h"
9 #include "mozilla/Assertions.h"
10 #include "mozilla/Casting.h"
11 #include "mozilla/DebugOnly.h"
12 #include "mozilla/EndianUtils.h"
13 #include "mozilla/EnumeratedArray.h"
14 #include "mozilla/EnumeratedRange.h"
15 #include "mozilla/EnumSet.h"
16 #include "mozilla/IntegerTypeTraits.h"
17 #include "mozilla/Latin1.h"
18 #include "mozilla/MathAlgorithms.h"
19 #include "mozilla/ScopeExit.h"
20 #include "mozilla/SIMD.h"
22 #include <limits>
23 #include <type_traits>
24 #include <utility>
26 #include "jslibmath.h"
27 #include "jsmath.h"
28 #include "jsnum.h"
30 #include "builtin/MapObject.h"
31 #include "builtin/RegExp.h"
32 #include "builtin/String.h"
33 #include "irregexp/RegExpTypes.h"
34 #include "jit/ABIArgGenerator.h"
35 #include "jit/CompileInfo.h"
36 #include "jit/InlineScriptTree.h"
37 #include "jit/Invalidation.h"
38 #include "jit/IonGenericCallStub.h"
39 #include "jit/IonIC.h"
40 #include "jit/IonScript.h"
41 #include "jit/JitcodeMap.h"
42 #include "jit/JitFrames.h"
43 #include "jit/JitRuntime.h"
44 #include "jit/JitSpewer.h"
45 #include "jit/JitZone.h"
46 #include "jit/Linker.h"
47 #include "jit/MIRGenerator.h"
48 #include "jit/MoveEmitter.h"
49 #include "jit/RangeAnalysis.h"
50 #include "jit/RegExpStubConstants.h"
51 #include "jit/SafepointIndex.h"
52 #include "jit/SharedICHelpers.h"
53 #include "jit/SharedICRegisters.h"
54 #include "jit/VMFunctions.h"
55 #include "jit/WarpSnapshot.h"
56 #include "js/ColumnNumber.h" // JS::LimitedColumnNumberOneOrigin
57 #include "js/experimental/JitInfo.h" // JSJit{Getter,Setter}CallArgs, JSJitMethodCallArgsTraits, JSJitInfo
58 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
59 #include "js/RegExpFlags.h" // JS::RegExpFlag
60 #include "js/ScalarType.h" // js::Scalar::Type
61 #include "proxy/DOMProxy.h"
62 #include "proxy/ScriptedProxyHandler.h"
63 #include "util/CheckedArithmetic.h"
64 #include "util/Unicode.h"
65 #include "vm/ArrayBufferViewObject.h"
66 #include "vm/AsyncFunction.h"
67 #include "vm/AsyncIteration.h"
68 #include "vm/BuiltinObjectKind.h"
69 #include "vm/FunctionFlags.h" // js::FunctionFlags
70 #include "vm/Interpreter.h"
71 #include "vm/JSAtomUtils.h" // AtomizeString
72 #include "vm/MatchPairs.h"
73 #include "vm/RegExpObject.h"
74 #include "vm/RegExpStatics.h"
75 #include "vm/StaticStrings.h"
76 #include "vm/StringObject.h"
77 #include "vm/StringType.h"
78 #include "vm/TypedArrayObject.h"
79 #include "wasm/WasmCodegenConstants.h"
80 #include "wasm/WasmValType.h"
81 #ifdef MOZ_VTUNE
82 # include "vtune/VTuneWrapper.h"
83 #endif
84 #include "wasm/WasmBinary.h"
85 #include "wasm/WasmGC.h"
86 #include "wasm/WasmGcObject.h"
87 #include "wasm/WasmStubs.h"
89 #include "builtin/Boolean-inl.h"
90 #include "jit/MacroAssembler-inl.h"
91 #include "jit/shared/CodeGenerator-shared-inl.h"
92 #include "jit/TemplateObject-inl.h"
93 #include "jit/VMFunctionList-inl.h"
94 #include "vm/JSScript-inl.h"
95 #include "wasm/WasmInstance-inl.h"
97 using namespace js;
98 using namespace js::jit;
100 using JS::GenericNaN;
101 using mozilla::AssertedCast;
102 using mozilla::CheckedUint32;
103 using mozilla::DebugOnly;
104 using mozilla::FloatingPoint;
105 using mozilla::Maybe;
106 using mozilla::NegativeInfinity;
107 using mozilla::PositiveInfinity;
109 using JS::ExpandoAndGeneration;
111 namespace js {
112 namespace jit {
114 #ifdef CHECK_OSIPOINT_REGISTERS
115 template <class Op>
116 static void HandleRegisterDump(Op op, MacroAssembler& masm,
117 LiveRegisterSet liveRegs, Register activation,
118 Register scratch) {
119 const size_t baseOffset = JitActivation::offsetOfRegs();
121 // Handle live GPRs.
122 for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
123 Register reg = *iter;
124 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
126 if (reg == activation) {
127 // To use the original value of the activation register (that's
128 // now on top of the stack), we need the scratch register.
129 masm.push(scratch);
130 masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
131 op(scratch, dump);
132 masm.pop(scratch);
133 } else {
134 op(reg, dump);
138 // Handle live FPRs.
139 for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
140 FloatRegister reg = *iter;
141 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
142 op(reg, dump);
146 class StoreOp {
147 MacroAssembler& masm;
149 public:
150 explicit StoreOp(MacroAssembler& masm) : masm(masm) {}
152 void operator()(Register reg, Address dump) { masm.storePtr(reg, dump); }
153 void operator()(FloatRegister reg, Address dump) {
154 if (reg.isDouble()) {
155 masm.storeDouble(reg, dump);
156 } else if (reg.isSingle()) {
157 masm.storeFloat32(reg, dump);
158 } else if (reg.isSimd128()) {
159 MOZ_CRASH("Unexpected case for SIMD");
160 } else {
161 MOZ_CRASH("Unexpected register type.");
166 class VerifyOp {
167 MacroAssembler& masm;
168 Label* failure_;
170 public:
171 VerifyOp(MacroAssembler& masm, Label* failure)
172 : masm(masm), failure_(failure) {}
174 void operator()(Register reg, Address dump) {
175 masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
177 void operator()(FloatRegister reg, Address dump) {
178 if (reg.isDouble()) {
179 ScratchDoubleScope scratch(masm);
180 masm.loadDouble(dump, scratch);
181 masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
182 } else if (reg.isSingle()) {
183 ScratchFloat32Scope scratch(masm);
184 masm.loadFloat32(dump, scratch);
185 masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
186 } else if (reg.isSimd128()) {
187 MOZ_CRASH("Unexpected case for SIMD");
188 } else {
189 MOZ_CRASH("Unexpected register type.");
194 void CodeGenerator::verifyOsiPointRegs(LSafepoint* safepoint) {
195 // Ensure the live registers stored by callVM did not change between
196 // the call and this OsiPoint. Try-catch relies on this invariant.
198 // Load pointer to the JitActivation in a scratch register.
199 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
200 Register scratch = allRegs.takeAny();
201 masm.push(scratch);
202 masm.loadJitActivation(scratch);
204 // If we should not check registers (because the instruction did not call
205 // into the VM, or a GC happened), we're done.
206 Label failure, done;
207 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
208 masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
210 // Having more than one VM function call made in one visit function at
211 // runtime is a sec-ciritcal error, because if we conservatively assume that
212 // one of the function call can re-enter Ion, then the invalidation process
213 // will potentially add a call at a random location, by patching the code
214 // before the return address.
215 masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
217 // Set checkRegs to 0, so that we don't try to verify registers after we
218 // return from this script to the caller.
219 masm.store32(Imm32(0), checkRegs);
221 // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
222 // temps after calling into the VM. This is fine because no other
223 // instructions (including this OsiPoint) will depend on them. Also
224 // backtracking can also use the same register for an input and an output.
225 // These are marked as clobbered and shouldn't get checked.
226 LiveRegisterSet liveRegs;
227 liveRegs.set() = RegisterSet::Intersect(
228 safepoint->liveRegs().set(),
229 RegisterSet::Not(safepoint->clobberedRegs().set()));
231 VerifyOp op(masm, &failure);
232 HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
234 masm.jump(&done);
236 // Do not profile the callWithABI that occurs below. This is to avoid a
237 // rare corner case that occurs when profiling interacts with itself:
239 // When slow profiling assertions are turned on, FunctionBoundary ops
240 // (which update the profiler pseudo-stack) may emit a callVM, which
241 // forces them to have an osi point associated with them. The
242 // FunctionBoundary for inline function entry is added to the caller's
243 // graph with a PC from the caller's code, but during codegen it modifies
244 // Gecko Profiler instrumentation to add the callee as the current top-most
245 // script. When codegen gets to the OSIPoint, and the callWithABI below is
246 // emitted, the codegen thinks that the current frame is the callee, but
247 // the PC it's using from the OSIPoint refers to the caller. This causes
248 // the profiler instrumentation of the callWithABI below to ASSERT, since
249 // the script and pc are mismatched. To avoid this, we simply omit
250 // instrumentation for these callWithABIs.
252 // Any live register captured by a safepoint (other than temp registers)
253 // must remain unchanged between the call and the OsiPoint instruction.
254 masm.bind(&failure);
255 masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
257 masm.bind(&done);
258 masm.pop(scratch);
261 bool CodeGenerator::shouldVerifyOsiPointRegs(LSafepoint* safepoint) {
262 if (!checkOsiPointRegisters) {
263 return false;
266 if (safepoint->liveRegs().emptyGeneral() &&
267 safepoint->liveRegs().emptyFloat()) {
268 return false; // No registers to check.
271 return true;
274 void CodeGenerator::resetOsiPointRegs(LSafepoint* safepoint) {
275 if (!shouldVerifyOsiPointRegs(safepoint)) {
276 return;
279 // Set checkRegs to 0. If we perform a VM call, the instruction
280 // will set it to 1.
281 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
282 Register scratch = allRegs.takeAny();
283 masm.push(scratch);
284 masm.loadJitActivation(scratch);
285 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
286 masm.store32(Imm32(0), checkRegs);
287 masm.pop(scratch);
290 static void StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs) {
291 // Store a copy of all live registers before performing the call.
292 // When we reach the OsiPoint, we can use this to check nothing
293 // modified them in the meantime.
295 // Load pointer to the JitActivation in a scratch register.
296 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
297 Register scratch = allRegs.takeAny();
298 masm.push(scratch);
299 masm.loadJitActivation(scratch);
301 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
302 masm.add32(Imm32(1), checkRegs);
304 StoreOp op(masm);
305 HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
307 masm.pop(scratch);
309 #endif // CHECK_OSIPOINT_REGISTERS
311 // Before doing any call to Cpp, you should ensure that volatile
312 // registers are evicted by the register allocator.
313 void CodeGenerator::callVMInternal(VMFunctionId id, LInstruction* ins) {
314 TrampolinePtr code = gen->jitRuntime()->getVMWrapper(id);
315 const VMFunctionData& fun = GetVMFunction(id);
317 // Stack is:
318 // ... frame ...
319 // [args]
320 #ifdef DEBUG
321 MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
322 pushedArgs_ = 0;
323 #endif
325 #ifdef CHECK_OSIPOINT_REGISTERS
326 if (shouldVerifyOsiPointRegs(ins->safepoint())) {
327 StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
329 #endif
331 #ifdef DEBUG
332 if (ins->mirRaw()) {
333 MOZ_ASSERT(ins->mirRaw()->isInstruction());
334 MInstruction* mir = ins->mirRaw()->toInstruction();
335 MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
337 // If this MIR instruction has an overridden AliasSet, set the JitRuntime's
338 // disallowArbitraryCode_ flag so we can assert this VMFunction doesn't call
339 // RunScript. Whitelist MInterruptCheck and MCheckOverRecursed because
340 // interrupt callbacks can call JS (chrome JS or shell testing functions).
341 bool isWhitelisted = mir->isInterruptCheck() || mir->isCheckOverRecursed();
342 if (!mir->hasDefaultAliasSet() && !isWhitelisted) {
343 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
344 masm.move32(Imm32(1), ReturnReg);
345 masm.store32(ReturnReg, AbsoluteAddress(addr));
348 #endif
350 // Push an exit frame descriptor.
351 masm.PushFrameDescriptor(FrameType::IonJS);
353 // Call the wrapper function. The wrapper is in charge to unwind the stack
354 // when returning from the call. Failures are handled with exceptions based
355 // on the return value of the C functions. To guard the outcome of the
356 // returned value, use another LIR instruction.
357 ensureOsiSpace();
358 uint32_t callOffset = masm.callJit(code);
359 markSafepointAt(callOffset, ins);
361 #ifdef DEBUG
362 // Reset the disallowArbitraryCode flag after the call.
364 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
365 masm.push(ReturnReg);
366 masm.move32(Imm32(0), ReturnReg);
367 masm.store32(ReturnReg, AbsoluteAddress(addr));
368 masm.pop(ReturnReg);
370 #endif
372 // Pop rest of the exit frame and the arguments left on the stack.
373 int framePop =
374 sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
375 masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
377 // Stack is:
378 // ... frame ...
381 template <typename Fn, Fn fn>
382 void CodeGenerator::callVM(LInstruction* ins) {
383 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
384 callVMInternal(id, ins);
387 // ArgSeq store arguments for OutOfLineCallVM.
389 // OutOfLineCallVM are created with "oolCallVM" function. The third argument of
390 // this function is an instance of a class which provides a "generate" in charge
391 // of pushing the argument, with "pushArg", for a VMFunction.
393 // Such list of arguments can be created by using the "ArgList" function which
394 // creates one instance of "ArgSeq", where the type of the arguments are
395 // inferred from the type of the arguments.
397 // The list of arguments must be written in the same order as if you were
398 // calling the function in C++.
400 // Example:
401 // ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))
403 template <typename... ArgTypes>
404 class ArgSeq {
405 std::tuple<std::remove_reference_t<ArgTypes>...> args_;
407 template <std::size_t... ISeq>
408 inline void generate(CodeGenerator* codegen,
409 std::index_sequence<ISeq...>) const {
410 // Arguments are pushed in reverse order, from last argument to first
411 // argument.
412 (codegen->pushArg(std::get<sizeof...(ISeq) - 1 - ISeq>(args_)), ...);
415 public:
416 explicit ArgSeq(ArgTypes&&... args)
417 : args_(std::forward<ArgTypes>(args)...) {}
419 inline void generate(CodeGenerator* codegen) const {
420 generate(codegen, std::index_sequence_for<ArgTypes...>{});
423 #ifdef DEBUG
424 static constexpr size_t numArgs = sizeof...(ArgTypes);
425 #endif
428 template <typename... ArgTypes>
429 inline ArgSeq<ArgTypes...> ArgList(ArgTypes&&... args) {
430 return ArgSeq<ArgTypes...>(std::forward<ArgTypes>(args)...);
433 // Store wrappers, to generate the right move of data after the VM call.
435 struct StoreNothing {
436 inline void generate(CodeGenerator* codegen) const {}
437 inline LiveRegisterSet clobbered() const {
438 return LiveRegisterSet(); // No register gets clobbered
442 class StoreRegisterTo {
443 private:
444 Register out_;
446 public:
447 explicit StoreRegisterTo(Register out) : out_(out) {}
449 inline void generate(CodeGenerator* codegen) const {
450 // It's okay to use storePointerResultTo here - the VMFunction wrapper
451 // ensures the upper bytes are zero for bool/int32 return values.
452 codegen->storePointerResultTo(out_);
454 inline LiveRegisterSet clobbered() const {
455 LiveRegisterSet set;
456 set.add(out_);
457 return set;
461 class StoreFloatRegisterTo {
462 private:
463 FloatRegister out_;
465 public:
466 explicit StoreFloatRegisterTo(FloatRegister out) : out_(out) {}
468 inline void generate(CodeGenerator* codegen) const {
469 codegen->storeFloatResultTo(out_);
471 inline LiveRegisterSet clobbered() const {
472 LiveRegisterSet set;
473 set.add(out_);
474 return set;
478 template <typename Output>
479 class StoreValueTo_ {
480 private:
481 Output out_;
483 public:
484 explicit StoreValueTo_(const Output& out) : out_(out) {}
486 inline void generate(CodeGenerator* codegen) const {
487 codegen->storeResultValueTo(out_);
489 inline LiveRegisterSet clobbered() const {
490 LiveRegisterSet set;
491 set.add(out_);
492 return set;
496 template <typename Output>
497 StoreValueTo_<Output> StoreValueTo(const Output& out) {
498 return StoreValueTo_<Output>(out);
501 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
502 class OutOfLineCallVM : public OutOfLineCodeBase<CodeGenerator> {
503 private:
504 LInstruction* lir_;
505 ArgSeq args_;
506 StoreOutputTo out_;
508 public:
509 OutOfLineCallVM(LInstruction* lir, const ArgSeq& args,
510 const StoreOutputTo& out)
511 : lir_(lir), args_(args), out_(out) {}
513 void accept(CodeGenerator* codegen) override {
514 codegen->visitOutOfLineCallVM(this);
517 LInstruction* lir() const { return lir_; }
518 const ArgSeq& args() const { return args_; }
519 const StoreOutputTo& out() const { return out_; }
522 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
523 OutOfLineCode* CodeGenerator::oolCallVM(LInstruction* lir, const ArgSeq& args,
524 const StoreOutputTo& out) {
525 MOZ_ASSERT(lir->mirRaw());
526 MOZ_ASSERT(lir->mirRaw()->isInstruction());
528 #ifdef DEBUG
529 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
530 const VMFunctionData& fun = GetVMFunction(id);
531 MOZ_ASSERT(fun.explicitArgs == args.numArgs);
532 MOZ_ASSERT(fun.returnsData() !=
533 (std::is_same_v<StoreOutputTo, StoreNothing>));
534 #endif
536 OutOfLineCode* ool = new (alloc())
537 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>(lir, args, out);
538 addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
539 return ool;
542 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
543 void CodeGenerator::visitOutOfLineCallVM(
544 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>* ool) {
545 LInstruction* lir = ool->lir();
547 #ifdef JS_JITSPEW
548 JitSpewStart(JitSpew_Codegen, " # LIR=%s",
549 lir->opName());
550 if (const char* extra = lir->getExtraName()) {
551 JitSpewCont(JitSpew_Codegen, ":%s", extra);
553 JitSpewFin(JitSpew_Codegen);
554 #endif
555 perfSpewer_.recordInstruction(masm, lir);
556 saveLive(lir);
557 ool->args().generate(this);
558 callVM<Fn, fn>(lir);
559 ool->out().generate(this);
560 restoreLiveIgnore(lir, ool->out().clobbered());
561 masm.jump(ool->rejoin());
564 class OutOfLineICFallback : public OutOfLineCodeBase<CodeGenerator> {
565 private:
566 LInstruction* lir_;
567 size_t cacheIndex_;
568 size_t cacheInfoIndex_;
570 public:
571 OutOfLineICFallback(LInstruction* lir, size_t cacheIndex,
572 size_t cacheInfoIndex)
573 : lir_(lir), cacheIndex_(cacheIndex), cacheInfoIndex_(cacheInfoIndex) {}
575 void bind(MacroAssembler* masm) override {
576 // The binding of the initial jump is done in
577 // CodeGenerator::visitOutOfLineICFallback.
580 size_t cacheIndex() const { return cacheIndex_; }
581 size_t cacheInfoIndex() const { return cacheInfoIndex_; }
582 LInstruction* lir() const { return lir_; }
584 void accept(CodeGenerator* codegen) override {
585 codegen->visitOutOfLineICFallback(this);
589 void CodeGeneratorShared::addIC(LInstruction* lir, size_t cacheIndex) {
590 if (cacheIndex == SIZE_MAX) {
591 masm.setOOM();
592 return;
595 DataPtr<IonIC> cache(this, cacheIndex);
596 MInstruction* mir = lir->mirRaw()->toInstruction();
597 cache->setScriptedLocation(mir->block()->info().script(),
598 mir->resumePoint()->pc());
600 Register temp = cache->scratchRegisterForEntryJump();
601 icInfo_.back().icOffsetForJump = masm.movWithPatch(ImmWord(-1), temp);
602 masm.jump(Address(temp, 0));
604 MOZ_ASSERT(!icInfo_.empty());
606 OutOfLineICFallback* ool =
607 new (alloc()) OutOfLineICFallback(lir, cacheIndex, icInfo_.length() - 1);
608 addOutOfLineCode(ool, mir);
610 masm.bind(ool->rejoin());
611 cache->setRejoinOffset(CodeOffset(ool->rejoin()->offset()));
614 void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) {
615 LInstruction* lir = ool->lir();
616 size_t cacheIndex = ool->cacheIndex();
617 size_t cacheInfoIndex = ool->cacheInfoIndex();
619 DataPtr<IonIC> ic(this, cacheIndex);
621 // Register the location of the OOL path in the IC.
622 ic->setFallbackOffset(CodeOffset(masm.currentOffset()));
624 switch (ic->kind()) {
625 case CacheKind::GetProp:
626 case CacheKind::GetElem: {
627 IonGetPropertyIC* getPropIC = ic->asGetPropertyIC();
629 saveLive(lir);
631 pushArg(getPropIC->id());
632 pushArg(getPropIC->value());
633 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
634 pushArg(ImmGCPtr(gen->outerInfo().script()));
636 using Fn = bool (*)(JSContext*, HandleScript, IonGetPropertyIC*,
637 HandleValue, HandleValue, MutableHandleValue);
638 callVM<Fn, IonGetPropertyIC::update>(lir);
640 StoreValueTo(getPropIC->output()).generate(this);
641 restoreLiveIgnore(lir, StoreValueTo(getPropIC->output()).clobbered());
643 masm.jump(ool->rejoin());
644 return;
646 case CacheKind::GetPropSuper:
647 case CacheKind::GetElemSuper: {
648 IonGetPropSuperIC* getPropSuperIC = ic->asGetPropSuperIC();
650 saveLive(lir);
652 pushArg(getPropSuperIC->id());
653 pushArg(getPropSuperIC->receiver());
654 pushArg(getPropSuperIC->object());
655 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
656 pushArg(ImmGCPtr(gen->outerInfo().script()));
658 using Fn =
659 bool (*)(JSContext*, HandleScript, IonGetPropSuperIC*, HandleObject,
660 HandleValue, HandleValue, MutableHandleValue);
661 callVM<Fn, IonGetPropSuperIC::update>(lir);
663 StoreValueTo(getPropSuperIC->output()).generate(this);
664 restoreLiveIgnore(lir,
665 StoreValueTo(getPropSuperIC->output()).clobbered());
667 masm.jump(ool->rejoin());
668 return;
670 case CacheKind::SetProp:
671 case CacheKind::SetElem: {
672 IonSetPropertyIC* setPropIC = ic->asSetPropertyIC();
674 saveLive(lir);
676 pushArg(setPropIC->rhs());
677 pushArg(setPropIC->id());
678 pushArg(setPropIC->object());
679 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
680 pushArg(ImmGCPtr(gen->outerInfo().script()));
682 using Fn = bool (*)(JSContext*, HandleScript, IonSetPropertyIC*,
683 HandleObject, HandleValue, HandleValue);
684 callVM<Fn, IonSetPropertyIC::update>(lir);
686 restoreLive(lir);
688 masm.jump(ool->rejoin());
689 return;
691 case CacheKind::GetName: {
692 IonGetNameIC* getNameIC = ic->asGetNameIC();
694 saveLive(lir);
696 pushArg(getNameIC->environment());
697 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
698 pushArg(ImmGCPtr(gen->outerInfo().script()));
700 using Fn = bool (*)(JSContext*, HandleScript, IonGetNameIC*, HandleObject,
701 MutableHandleValue);
702 callVM<Fn, IonGetNameIC::update>(lir);
704 StoreValueTo(getNameIC->output()).generate(this);
705 restoreLiveIgnore(lir, StoreValueTo(getNameIC->output()).clobbered());
707 masm.jump(ool->rejoin());
708 return;
710 case CacheKind::BindName: {
711 IonBindNameIC* bindNameIC = ic->asBindNameIC();
713 saveLive(lir);
715 pushArg(bindNameIC->environment());
716 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
717 pushArg(ImmGCPtr(gen->outerInfo().script()));
719 using Fn =
720 JSObject* (*)(JSContext*, HandleScript, IonBindNameIC*, HandleObject);
721 callVM<Fn, IonBindNameIC::update>(lir);
723 StoreRegisterTo(bindNameIC->output()).generate(this);
724 restoreLiveIgnore(lir, StoreRegisterTo(bindNameIC->output()).clobbered());
726 masm.jump(ool->rejoin());
727 return;
729 case CacheKind::GetIterator: {
730 IonGetIteratorIC* getIteratorIC = ic->asGetIteratorIC();
732 saveLive(lir);
734 pushArg(getIteratorIC->value());
735 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
736 pushArg(ImmGCPtr(gen->outerInfo().script()));
738 using Fn = JSObject* (*)(JSContext*, HandleScript, IonGetIteratorIC*,
739 HandleValue);
740 callVM<Fn, IonGetIteratorIC::update>(lir);
742 StoreRegisterTo(getIteratorIC->output()).generate(this);
743 restoreLiveIgnore(lir,
744 StoreRegisterTo(getIteratorIC->output()).clobbered());
746 masm.jump(ool->rejoin());
747 return;
749 case CacheKind::OptimizeSpreadCall: {
750 auto* optimizeSpreadCallIC = ic->asOptimizeSpreadCallIC();
752 saveLive(lir);
754 pushArg(optimizeSpreadCallIC->value());
755 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
756 pushArg(ImmGCPtr(gen->outerInfo().script()));
758 using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeSpreadCallIC*,
759 HandleValue, MutableHandleValue);
760 callVM<Fn, IonOptimizeSpreadCallIC::update>(lir);
762 StoreValueTo(optimizeSpreadCallIC->output()).generate(this);
763 restoreLiveIgnore(
764 lir, StoreValueTo(optimizeSpreadCallIC->output()).clobbered());
766 masm.jump(ool->rejoin());
767 return;
769 case CacheKind::In: {
770 IonInIC* inIC = ic->asInIC();
772 saveLive(lir);
774 pushArg(inIC->object());
775 pushArg(inIC->key());
776 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
777 pushArg(ImmGCPtr(gen->outerInfo().script()));
779 using Fn = bool (*)(JSContext*, HandleScript, IonInIC*, HandleValue,
780 HandleObject, bool*);
781 callVM<Fn, IonInIC::update>(lir);
783 StoreRegisterTo(inIC->output()).generate(this);
784 restoreLiveIgnore(lir, StoreRegisterTo(inIC->output()).clobbered());
786 masm.jump(ool->rejoin());
787 return;
789 case CacheKind::HasOwn: {
790 IonHasOwnIC* hasOwnIC = ic->asHasOwnIC();
792 saveLive(lir);
794 pushArg(hasOwnIC->id());
795 pushArg(hasOwnIC->value());
796 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
797 pushArg(ImmGCPtr(gen->outerInfo().script()));
799 using Fn = bool (*)(JSContext*, HandleScript, IonHasOwnIC*, HandleValue,
800 HandleValue, int32_t*);
801 callVM<Fn, IonHasOwnIC::update>(lir);
803 StoreRegisterTo(hasOwnIC->output()).generate(this);
804 restoreLiveIgnore(lir, StoreRegisterTo(hasOwnIC->output()).clobbered());
806 masm.jump(ool->rejoin());
807 return;
809 case CacheKind::CheckPrivateField: {
810 IonCheckPrivateFieldIC* checkPrivateFieldIC = ic->asCheckPrivateFieldIC();
812 saveLive(lir);
814 pushArg(checkPrivateFieldIC->id());
815 pushArg(checkPrivateFieldIC->value());
817 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
818 pushArg(ImmGCPtr(gen->outerInfo().script()));
820 using Fn = bool (*)(JSContext*, HandleScript, IonCheckPrivateFieldIC*,
821 HandleValue, HandleValue, bool*);
822 callVM<Fn, IonCheckPrivateFieldIC::update>(lir);
824 StoreRegisterTo(checkPrivateFieldIC->output()).generate(this);
825 restoreLiveIgnore(
826 lir, StoreRegisterTo(checkPrivateFieldIC->output()).clobbered());
828 masm.jump(ool->rejoin());
829 return;
831 case CacheKind::InstanceOf: {
832 IonInstanceOfIC* hasInstanceOfIC = ic->asInstanceOfIC();
834 saveLive(lir);
836 pushArg(hasInstanceOfIC->rhs());
837 pushArg(hasInstanceOfIC->lhs());
838 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
839 pushArg(ImmGCPtr(gen->outerInfo().script()));
841 using Fn = bool (*)(JSContext*, HandleScript, IonInstanceOfIC*,
842 HandleValue lhs, HandleObject rhs, bool* res);
843 callVM<Fn, IonInstanceOfIC::update>(lir);
845 StoreRegisterTo(hasInstanceOfIC->output()).generate(this);
846 restoreLiveIgnore(lir,
847 StoreRegisterTo(hasInstanceOfIC->output()).clobbered());
849 masm.jump(ool->rejoin());
850 return;
852 case CacheKind::UnaryArith: {
853 IonUnaryArithIC* unaryArithIC = ic->asUnaryArithIC();
855 saveLive(lir);
857 pushArg(unaryArithIC->input());
858 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
859 pushArg(ImmGCPtr(gen->outerInfo().script()));
861 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
862 IonUnaryArithIC* stub, HandleValue val,
863 MutableHandleValue res);
864 callVM<Fn, IonUnaryArithIC::update>(lir);
866 StoreValueTo(unaryArithIC->output()).generate(this);
867 restoreLiveIgnore(lir, StoreValueTo(unaryArithIC->output()).clobbered());
869 masm.jump(ool->rejoin());
870 return;
872 case CacheKind::ToPropertyKey: {
873 IonToPropertyKeyIC* toPropertyKeyIC = ic->asToPropertyKeyIC();
875 saveLive(lir);
877 pushArg(toPropertyKeyIC->input());
878 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
879 pushArg(ImmGCPtr(gen->outerInfo().script()));
881 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
882 IonToPropertyKeyIC* ic, HandleValue val,
883 MutableHandleValue res);
884 callVM<Fn, IonToPropertyKeyIC::update>(lir);
886 StoreValueTo(toPropertyKeyIC->output()).generate(this);
887 restoreLiveIgnore(lir,
888 StoreValueTo(toPropertyKeyIC->output()).clobbered());
890 masm.jump(ool->rejoin());
891 return;
893 case CacheKind::BinaryArith: {
894 IonBinaryArithIC* binaryArithIC = ic->asBinaryArithIC();
896 saveLive(lir);
898 pushArg(binaryArithIC->rhs());
899 pushArg(binaryArithIC->lhs());
900 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
901 pushArg(ImmGCPtr(gen->outerInfo().script()));
903 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
904 IonBinaryArithIC* stub, HandleValue lhs,
905 HandleValue rhs, MutableHandleValue res);
906 callVM<Fn, IonBinaryArithIC::update>(lir);
908 StoreValueTo(binaryArithIC->output()).generate(this);
909 restoreLiveIgnore(lir, StoreValueTo(binaryArithIC->output()).clobbered());
911 masm.jump(ool->rejoin());
912 return;
914 case CacheKind::Compare: {
915 IonCompareIC* compareIC = ic->asCompareIC();
917 saveLive(lir);
919 pushArg(compareIC->rhs());
920 pushArg(compareIC->lhs());
921 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
922 pushArg(ImmGCPtr(gen->outerInfo().script()));
924 using Fn =
925 bool (*)(JSContext* cx, HandleScript outerScript, IonCompareIC* stub,
926 HandleValue lhs, HandleValue rhs, bool* res);
927 callVM<Fn, IonCompareIC::update>(lir);
929 StoreRegisterTo(compareIC->output()).generate(this);
930 restoreLiveIgnore(lir, StoreRegisterTo(compareIC->output()).clobbered());
932 masm.jump(ool->rejoin());
933 return;
935 case CacheKind::CloseIter: {
936 IonCloseIterIC* closeIterIC = ic->asCloseIterIC();
938 saveLive(lir);
940 pushArg(closeIterIC->iter());
941 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
942 pushArg(ImmGCPtr(gen->outerInfo().script()));
944 using Fn =
945 bool (*)(JSContext*, HandleScript, IonCloseIterIC*, HandleObject);
946 callVM<Fn, IonCloseIterIC::update>(lir);
948 restoreLive(lir);
950 masm.jump(ool->rejoin());
951 return;
953 case CacheKind::OptimizeGetIterator: {
954 auto* optimizeGetIteratorIC = ic->asOptimizeGetIteratorIC();
956 saveLive(lir);
958 pushArg(optimizeGetIteratorIC->value());
959 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
960 pushArg(ImmGCPtr(gen->outerInfo().script()));
962 using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeGetIteratorIC*,
963 HandleValue, bool* res);
964 callVM<Fn, IonOptimizeGetIteratorIC::update>(lir);
966 StoreRegisterTo(optimizeGetIteratorIC->output()).generate(this);
967 restoreLiveIgnore(
968 lir, StoreRegisterTo(optimizeGetIteratorIC->output()).clobbered());
970 masm.jump(ool->rejoin());
971 return;
973 case CacheKind::Call:
974 case CacheKind::TypeOf:
975 case CacheKind::ToBool:
976 case CacheKind::GetIntrinsic:
977 case CacheKind::NewArray:
978 case CacheKind::NewObject:
979 MOZ_CRASH("Unsupported IC");
981 MOZ_CRASH();
984 StringObject* MNewStringObject::templateObj() const {
985 return &templateObj_->as<StringObject>();
988 CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph,
989 MacroAssembler* masm)
990 : CodeGeneratorSpecific(gen, graph, masm),
991 ionScriptLabels_(gen->alloc()),
992 ionNurseryObjectLabels_(gen->alloc()),
993 scriptCounts_(nullptr),
994 zoneStubsToReadBarrier_(0) {}
996 CodeGenerator::~CodeGenerator() { js_delete(scriptCounts_); }
998 void CodeGenerator::visitValueToInt32(LValueToInt32* lir) {
999 ValueOperand operand = ToValue(lir, LValueToInt32::Input);
1000 Register output = ToRegister(lir->output());
1001 FloatRegister temp = ToFloatRegister(lir->tempFloat());
1003 Label fails;
1004 if (lir->mode() == LValueToInt32::TRUNCATE) {
1005 OutOfLineCode* oolDouble = oolTruncateDouble(temp, output, lir->mir());
1007 // We can only handle strings in truncation contexts, like bitwise
1008 // operations.
1009 Register stringReg = ToRegister(lir->temp());
1010 using Fn = bool (*)(JSContext*, JSString*, double*);
1011 auto* oolString = oolCallVM<Fn, StringToNumber>(lir, ArgList(stringReg),
1012 StoreFloatRegisterTo(temp));
1013 Label* stringEntry = oolString->entry();
1014 Label* stringRejoin = oolString->rejoin();
1016 masm.truncateValueToInt32(operand, stringEntry, stringRejoin,
1017 oolDouble->entry(), stringReg, temp, output,
1018 &fails);
1019 masm.bind(oolDouble->rejoin());
1020 } else {
1021 MOZ_ASSERT(lir->mode() == LValueToInt32::NORMAL);
1022 masm.convertValueToInt32(operand, temp, output, &fails,
1023 lir->mirNormal()->needsNegativeZeroCheck(),
1024 lir->mirNormal()->conversion());
1027 bailoutFrom(&fails, lir->snapshot());
1030 void CodeGenerator::visitValueToDouble(LValueToDouble* lir) {
1031 ValueOperand operand = ToValue(lir, LValueToDouble::InputIndex);
1032 FloatRegister output = ToFloatRegister(lir->output());
1034 // Set if we can handle other primitives beside strings, as long as they're
1035 // guaranteed to never throw. This rules out symbols and BigInts, but allows
1036 // booleans, undefined, and null.
1037 bool hasNonStringPrimitives =
1038 lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1040 Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1043 ScratchTagScope tag(masm, operand);
1044 masm.splitTagForTest(operand, tag);
1046 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1047 masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1049 if (hasNonStringPrimitives) {
1050 masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1051 masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1052 masm.branchTestNull(Assembler::Equal, tag, &isNull);
1056 bailout(lir->snapshot());
1058 if (hasNonStringPrimitives) {
1059 masm.bind(&isNull);
1060 masm.loadConstantDouble(0.0, output);
1061 masm.jump(&done);
1064 if (hasNonStringPrimitives) {
1065 masm.bind(&isUndefined);
1066 masm.loadConstantDouble(GenericNaN(), output);
1067 masm.jump(&done);
1070 if (hasNonStringPrimitives) {
1071 masm.bind(&isBool);
1072 masm.boolValueToDouble(operand, output);
1073 masm.jump(&done);
1076 masm.bind(&isInt32);
1077 masm.int32ValueToDouble(operand, output);
1078 masm.jump(&done);
1080 masm.bind(&isDouble);
1081 masm.unboxDouble(operand, output);
1082 masm.bind(&done);
1085 void CodeGenerator::visitValueToFloat32(LValueToFloat32* lir) {
1086 ValueOperand operand = ToValue(lir, LValueToFloat32::InputIndex);
1087 FloatRegister output = ToFloatRegister(lir->output());
1089 // Set if we can handle other primitives beside strings, as long as they're
1090 // guaranteed to never throw. This rules out symbols and BigInts, but allows
1091 // booleans, undefined, and null.
1092 bool hasNonStringPrimitives =
1093 lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1095 Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1098 ScratchTagScope tag(masm, operand);
1099 masm.splitTagForTest(operand, tag);
1101 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1102 masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1104 if (hasNonStringPrimitives) {
1105 masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1106 masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1107 masm.branchTestNull(Assembler::Equal, tag, &isNull);
1111 bailout(lir->snapshot());
1113 if (hasNonStringPrimitives) {
1114 masm.bind(&isNull);
1115 masm.loadConstantFloat32(0.0f, output);
1116 masm.jump(&done);
1119 if (hasNonStringPrimitives) {
1120 masm.bind(&isUndefined);
1121 masm.loadConstantFloat32(float(GenericNaN()), output);
1122 masm.jump(&done);
1125 if (hasNonStringPrimitives) {
1126 masm.bind(&isBool);
1127 masm.boolValueToFloat32(operand, output);
1128 masm.jump(&done);
1131 masm.bind(&isInt32);
1132 masm.int32ValueToFloat32(operand, output);
1133 masm.jump(&done);
1135 masm.bind(&isDouble);
1136 // ARM and MIPS may not have a double register available if we've
1137 // allocated output as a float32.
1138 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
1139 ScratchDoubleScope fpscratch(masm);
1140 masm.unboxDouble(operand, fpscratch);
1141 masm.convertDoubleToFloat32(fpscratch, output);
1142 #else
1143 masm.unboxDouble(operand, output);
1144 masm.convertDoubleToFloat32(output, output);
1145 #endif
1146 masm.bind(&done);
1149 void CodeGenerator::visitValueToBigInt(LValueToBigInt* lir) {
1150 ValueOperand operand = ToValue(lir, LValueToBigInt::InputIndex);
1151 Register output = ToRegister(lir->output());
1153 using Fn = BigInt* (*)(JSContext*, HandleValue);
1154 auto* ool =
1155 oolCallVM<Fn, ToBigInt>(lir, ArgList(operand), StoreRegisterTo(output));
1157 Register tag = masm.extractTag(operand, output);
1159 Label notBigInt, done;
1160 masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
1161 masm.unboxBigInt(operand, output);
1162 masm.jump(&done);
1163 masm.bind(&notBigInt);
1165 masm.branchTestBoolean(Assembler::Equal, tag, ool->entry());
1166 masm.branchTestString(Assembler::Equal, tag, ool->entry());
1168 // ToBigInt(object) can have side-effects; all other types throw a TypeError.
1169 bailout(lir->snapshot());
1171 masm.bind(ool->rejoin());
1172 masm.bind(&done);
1175 void CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir) {
1176 masm.convertInt32ToDouble(ToRegister(lir->input()),
1177 ToFloatRegister(lir->output()));
1180 void CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir) {
1181 masm.convertFloat32ToDouble(ToFloatRegister(lir->input()),
1182 ToFloatRegister(lir->output()));
1185 void CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir) {
1186 masm.convertDoubleToFloat32(ToFloatRegister(lir->input()),
1187 ToFloatRegister(lir->output()));
1190 void CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir) {
1191 masm.convertInt32ToFloat32(ToRegister(lir->input()),
1192 ToFloatRegister(lir->output()));
1195 void CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir) {
1196 Label fail;
1197 FloatRegister input = ToFloatRegister(lir->input());
1198 Register output = ToRegister(lir->output());
1199 masm.convertDoubleToInt32(input, output, &fail,
1200 lir->mir()->needsNegativeZeroCheck());
1201 bailoutFrom(&fail, lir->snapshot());
1204 void CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir) {
1205 Label fail;
1206 FloatRegister input = ToFloatRegister(lir->input());
1207 Register output = ToRegister(lir->output());
1208 masm.convertFloat32ToInt32(input, output, &fail,
1209 lir->mir()->needsNegativeZeroCheck());
1210 bailoutFrom(&fail, lir->snapshot());
1213 void CodeGenerator::visitInt32ToIntPtr(LInt32ToIntPtr* lir) {
1214 #ifdef JS_64BIT
1215 // This LIR instruction is only used if the input can be negative.
1216 MOZ_ASSERT(lir->mir()->canBeNegative());
1218 Register output = ToRegister(lir->output());
1219 const LAllocation* input = lir->input();
1220 if (input->isRegister()) {
1221 masm.move32SignExtendToPtr(ToRegister(input), output);
1222 } else {
1223 masm.load32SignExtendToPtr(ToAddress(input), output);
1225 #else
1226 MOZ_CRASH("Not used on 32-bit platforms");
1227 #endif
1230 void CodeGenerator::visitNonNegativeIntPtrToInt32(
1231 LNonNegativeIntPtrToInt32* lir) {
1232 #ifdef JS_64BIT
1233 Register output = ToRegister(lir->output());
1234 MOZ_ASSERT(ToRegister(lir->input()) == output);
1236 Label bail;
1237 masm.guardNonNegativeIntPtrToInt32(output, &bail);
1238 bailoutFrom(&bail, lir->snapshot());
1239 #else
1240 MOZ_CRASH("Not used on 32-bit platforms");
1241 #endif
1244 void CodeGenerator::visitIntPtrToDouble(LIntPtrToDouble* lir) {
1245 Register input = ToRegister(lir->input());
1246 FloatRegister output = ToFloatRegister(lir->output());
1247 masm.convertIntPtrToDouble(input, output);
1250 void CodeGenerator::visitAdjustDataViewLength(LAdjustDataViewLength* lir) {
1251 Register output = ToRegister(lir->output());
1252 MOZ_ASSERT(ToRegister(lir->input()) == output);
1254 uint32_t byteSize = lir->mir()->byteSize();
1256 #ifdef DEBUG
1257 Label ok;
1258 masm.branchTestPtr(Assembler::NotSigned, output, output, &ok);
1259 masm.assumeUnreachable("Unexpected negative value in LAdjustDataViewLength");
1260 masm.bind(&ok);
1261 #endif
1263 Label bail;
1264 masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), output, &bail);
1265 bailoutFrom(&bail, lir->snapshot());
1268 void CodeGenerator::emitOOLTestObject(Register objreg,
1269 Label* ifEmulatesUndefined,
1270 Label* ifDoesntEmulateUndefined,
1271 Register scratch) {
1272 saveVolatile(scratch);
1273 #if defined(DEBUG) || defined(FUZZING)
1274 masm.loadPtr(AbsoluteAddress(
1275 gen->runtime->addressOfHasSeenObjectEmulateUndefinedFuse()),
1276 scratch);
1277 using Fn = bool (*)(JSObject* obj, size_t fuseValue);
1278 masm.setupAlignedABICall();
1279 masm.passABIArg(objreg);
1280 masm.passABIArg(scratch);
1281 masm.callWithABI<Fn, js::EmulatesUndefinedCheckFuse>();
1282 #else
1283 using Fn = bool (*)(JSObject* obj);
1284 masm.setupAlignedABICall();
1285 masm.passABIArg(objreg);
1286 masm.callWithABI<Fn, js::EmulatesUndefined>();
1287 #endif
1288 masm.storeCallPointerResult(scratch);
1289 restoreVolatile(scratch);
1291 masm.branchIfTrueBool(scratch, ifEmulatesUndefined);
1292 masm.jump(ifDoesntEmulateUndefined);
1295 // Base out-of-line code generator for all tests of the truthiness of an
1296 // object, where the object might not be truthy. (Recall that per spec all
1297 // objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class
1298 // flag to permit objects to look like |undefined| in certain contexts,
1299 // including in object truthiness testing.) We check truthiness inline except
1300 // when we're testing it on a proxy, in which case out-of-line code will call
1301 // EmulatesUndefined for a conclusive answer.
1302 class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator> {
1303 Register objreg_;
1304 Register scratch_;
1306 Label* ifEmulatesUndefined_;
1307 Label* ifDoesntEmulateUndefined_;
1309 #ifdef DEBUG
1310 bool initialized() { return ifEmulatesUndefined_ != nullptr; }
1311 #endif
1313 public:
1314 OutOfLineTestObject()
1315 : ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr) {}
1317 void accept(CodeGenerator* codegen) final {
1318 MOZ_ASSERT(initialized());
1319 codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_,
1320 ifDoesntEmulateUndefined_, scratch_);
1323 // Specify the register where the object to be tested is found, labels to
1324 // jump to if the object is truthy or falsy, and a scratch register for
1325 // use in the out-of-line path.
1326 void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined,
1327 Label* ifDoesntEmulateUndefined, Register scratch) {
1328 MOZ_ASSERT(!initialized());
1329 MOZ_ASSERT(ifEmulatesUndefined);
1330 objreg_ = objreg;
1331 scratch_ = scratch;
1332 ifEmulatesUndefined_ = ifEmulatesUndefined;
1333 ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined;
1337 // A subclass of OutOfLineTestObject containing two extra labels, for use when
1338 // the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line
1339 // code. The user should bind these labels in inline code, and specify them as
1340 // targets via setInputAndTargets, as appropriate.
1341 class OutOfLineTestObjectWithLabels : public OutOfLineTestObject {
1342 Label label1_;
1343 Label label2_;
1345 public:
1346 OutOfLineTestObjectWithLabels() = default;
1348 Label* label1() { return &label1_; }
1349 Label* label2() { return &label2_; }
1352 void CodeGenerator::testObjectEmulatesUndefinedKernel(
1353 Register objreg, Label* ifEmulatesUndefined,
1354 Label* ifDoesntEmulateUndefined, Register scratch,
1355 OutOfLineTestObject* ool) {
1356 ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
1357 scratch);
1359 // Perform a fast-path check of the object's class flags if the object's
1360 // not a proxy. Let out-of-line code handle the slow cases that require
1361 // saving registers, making a function call, and restoring registers.
1362 masm.branchIfObjectEmulatesUndefined(objreg, scratch, ool->entry(),
1363 ifEmulatesUndefined);
1366 void CodeGenerator::branchTestObjectEmulatesUndefined(
1367 Register objreg, Label* ifEmulatesUndefined,
1368 Label* ifDoesntEmulateUndefined, Register scratch,
1369 OutOfLineTestObject* ool) {
1370 MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(),
1371 "ifDoesntEmulateUndefined will be bound to the fallthrough path");
1373 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1374 ifDoesntEmulateUndefined, scratch, ool);
1375 masm.bind(ifDoesntEmulateUndefined);
1378 void CodeGenerator::testObjectEmulatesUndefined(Register objreg,
1379 Label* ifEmulatesUndefined,
1380 Label* ifDoesntEmulateUndefined,
1381 Register scratch,
1382 OutOfLineTestObject* ool) {
1383 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1384 ifDoesntEmulateUndefined, scratch, ool);
1385 masm.jump(ifDoesntEmulateUndefined);
1388 void CodeGenerator::testValueTruthyForType(
1389 JSValueType type, ScratchTagScope& tag, const ValueOperand& value,
1390 Register tempToUnbox, Register temp, FloatRegister floatTemp,
1391 Label* ifTruthy, Label* ifFalsy, OutOfLineTestObject* ool,
1392 bool skipTypeTest) {
1393 #ifdef DEBUG
1394 if (skipTypeTest) {
1395 Label expected;
1396 masm.branchTestType(Assembler::Equal, tag, type, &expected);
1397 masm.assumeUnreachable("Unexpected Value type in testValueTruthyForType");
1398 masm.bind(&expected);
1400 #endif
1402 // Handle irregular types first.
1403 switch (type) {
1404 case JSVAL_TYPE_UNDEFINED:
1405 case JSVAL_TYPE_NULL:
1406 // Undefined and null are falsy.
1407 if (!skipTypeTest) {
1408 masm.branchTestType(Assembler::Equal, tag, type, ifFalsy);
1409 } else {
1410 masm.jump(ifFalsy);
1412 return;
1413 case JSVAL_TYPE_SYMBOL:
1414 // Symbols are truthy.
1415 if (!skipTypeTest) {
1416 masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy);
1417 } else {
1418 masm.jump(ifTruthy);
1420 return;
1421 case JSVAL_TYPE_OBJECT: {
1422 Label notObject;
1423 if (!skipTypeTest) {
1424 masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
1426 ScratchTagScopeRelease _(&tag);
1427 Register objreg = masm.extractObject(value, tempToUnbox);
1428 testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, temp, ool);
1429 masm.bind(&notObject);
1430 return;
1432 default:
1433 break;
1436 // Check the type of the value (unless this is the last possible type).
1437 Label differentType;
1438 if (!skipTypeTest) {
1439 masm.branchTestType(Assembler::NotEqual, tag, type, &differentType);
1442 // Branch if the value is falsy.
1443 ScratchTagScopeRelease _(&tag);
1444 switch (type) {
1445 case JSVAL_TYPE_BOOLEAN: {
1446 masm.branchTestBooleanTruthy(false, value, ifFalsy);
1447 break;
1449 case JSVAL_TYPE_INT32: {
1450 masm.branchTestInt32Truthy(false, value, ifFalsy);
1451 break;
1453 case JSVAL_TYPE_STRING: {
1454 masm.branchTestStringTruthy(false, value, ifFalsy);
1455 break;
1457 case JSVAL_TYPE_BIGINT: {
1458 masm.branchTestBigIntTruthy(false, value, ifFalsy);
1459 break;
1461 case JSVAL_TYPE_DOUBLE: {
1462 masm.unboxDouble(value, floatTemp);
1463 masm.branchTestDoubleTruthy(false, floatTemp, ifFalsy);
1464 break;
1466 default:
1467 MOZ_CRASH("Unexpected value type");
1470 // If we reach this point, the value is truthy. We fall through for
1471 // truthy on the last test; otherwise, branch.
1472 if (!skipTypeTest) {
1473 masm.jump(ifTruthy);
1476 masm.bind(&differentType);
1479 void CodeGenerator::testValueTruthy(const ValueOperand& value,
1480 Register tempToUnbox, Register temp,
1481 FloatRegister floatTemp,
1482 const TypeDataList& observedTypes,
1483 Label* ifTruthy, Label* ifFalsy,
1484 OutOfLineTestObject* ool) {
1485 ScratchTagScope tag(masm, value);
1486 masm.splitTagForTest(value, tag);
1488 const std::initializer_list<JSValueType> defaultOrder = {
1489 JSVAL_TYPE_UNDEFINED, JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN,
1490 JSVAL_TYPE_INT32, JSVAL_TYPE_OBJECT, JSVAL_TYPE_STRING,
1491 JSVAL_TYPE_DOUBLE, JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
1493 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
1495 // Generate tests for previously observed types first.
1496 // The TypeDataList is sorted by descending frequency.
1497 for (auto& observed : observedTypes) {
1498 JSValueType type = observed.type();
1499 remaining -= type;
1501 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1502 ifTruthy, ifFalsy, ool, /*skipTypeTest*/ false);
1505 // Generate tests for remaining types.
1506 for (auto type : defaultOrder) {
1507 if (!remaining.contains(type)) {
1508 continue;
1510 remaining -= type;
1512 // We don't need a type test for the last possible type.
1513 bool skipTypeTest = remaining.isEmpty();
1514 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1515 ifTruthy, ifFalsy, ool, skipTypeTest);
1517 MOZ_ASSERT(remaining.isEmpty());
1519 // We fall through if the final test is truthy.
1522 void CodeGenerator::visitTestBIAndBranch(LTestBIAndBranch* lir) {
1523 Label* ifTrueLabel = getJumpLabelForBranch(lir->ifTrue());
1524 Label* ifFalseLabel = getJumpLabelForBranch(lir->ifFalse());
1525 Register input = ToRegister(lir->input());
1527 if (isNextBlock(lir->ifFalse()->lir())) {
1528 masm.branchIfBigIntIsNonZero(input, ifTrueLabel);
1529 } else if (isNextBlock(lir->ifTrue()->lir())) {
1530 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1531 } else {
1532 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1533 jumpToBlock(lir->ifTrue());
1537 void CodeGenerator::assertObjectDoesNotEmulateUndefined(
1538 Register input, Register temp, const MInstruction* mir) {
1539 #if defined(DEBUG) || defined(FUZZING)
1540 // Validate that the object indeed doesn't have the emulates undefined flag.
1541 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
1542 addOutOfLineCode(ool, mir);
1544 Label* doesNotEmulateUndefined = ool->label1();
1545 Label* emulatesUndefined = ool->label2();
1547 testObjectEmulatesUndefined(input, emulatesUndefined, doesNotEmulateUndefined,
1548 temp, ool);
1549 masm.bind(emulatesUndefined);
1550 masm.assumeUnreachable(
1551 "Found an object emulating undefined while the fuse is intact");
1552 masm.bind(doesNotEmulateUndefined);
1553 #endif
1556 void CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir) {
1557 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1558 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1559 Register input = ToRegister(lir->input());
1561 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
1562 if (intact) {
1563 assertObjectDoesNotEmulateUndefined(input, ToRegister(lir->temp()),
1564 lir->mir());
1565 // Bug 1874905: It would be fantastic if this could be optimized out
1566 masm.jump(truthy);
1567 } else {
1568 auto* ool = new (alloc()) OutOfLineTestObject();
1569 addOutOfLineCode(ool, lir->mir());
1571 testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()),
1572 ool);
1576 void CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir) {
1577 auto* ool = new (alloc()) OutOfLineTestObject();
1578 addOutOfLineCode(ool, lir->mir());
1580 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1581 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1583 ValueOperand input = ToValue(lir, LTestVAndBranch::Input);
1584 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
1585 Register temp = ToRegister(lir->temp2());
1586 FloatRegister floatTemp = ToFloatRegister(lir->tempFloat());
1587 const TypeDataList& observedTypes = lir->mir()->observedTypes();
1589 testValueTruthy(input, tempToUnbox, temp, floatTemp, observedTypes, truthy,
1590 falsy, ool);
1591 masm.jump(truthy);
1594 void CodeGenerator::visitBooleanToString(LBooleanToString* lir) {
1595 Register input = ToRegister(lir->input());
1596 Register output = ToRegister(lir->output());
1597 const JSAtomState& names = gen->runtime->names();
1598 Label true_, done;
1600 masm.branchTest32(Assembler::NonZero, input, input, &true_);
1601 masm.movePtr(ImmGCPtr(names.false_), output);
1602 masm.jump(&done);
1604 masm.bind(&true_);
1605 masm.movePtr(ImmGCPtr(names.true_), output);
1607 masm.bind(&done);
1610 void CodeGenerator::visitIntToString(LIntToString* lir) {
1611 Register input = ToRegister(lir->input());
1612 Register output = ToRegister(lir->output());
1614 using Fn = JSLinearString* (*)(JSContext*, int);
1615 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
1616 lir, ArgList(input), StoreRegisterTo(output));
1618 masm.lookupStaticIntString(input, output, gen->runtime->staticStrings(),
1619 ool->entry());
1621 masm.bind(ool->rejoin());
1624 void CodeGenerator::visitDoubleToString(LDoubleToString* lir) {
1625 FloatRegister input = ToFloatRegister(lir->input());
1626 Register temp = ToRegister(lir->temp0());
1627 Register output = ToRegister(lir->output());
1629 using Fn = JSString* (*)(JSContext*, double);
1630 OutOfLineCode* ool = oolCallVM<Fn, NumberToString<CanGC>>(
1631 lir, ArgList(input), StoreRegisterTo(output));
1633 // Try double to integer conversion and run integer to string code.
1634 masm.convertDoubleToInt32(input, temp, ool->entry(), false);
1635 masm.lookupStaticIntString(temp, output, gen->runtime->staticStrings(),
1636 ool->entry());
1638 masm.bind(ool->rejoin());
1641 void CodeGenerator::visitValueToString(LValueToString* lir) {
1642 ValueOperand input = ToValue(lir, LValueToString::InputIndex);
1643 Register output = ToRegister(lir->output());
1645 using Fn = JSString* (*)(JSContext*, HandleValue);
1646 OutOfLineCode* ool = oolCallVM<Fn, ToStringSlow<CanGC>>(
1647 lir, ArgList(input), StoreRegisterTo(output));
1649 Label done;
1650 Register tag = masm.extractTag(input, output);
1651 const JSAtomState& names = gen->runtime->names();
1653 // String
1655 Label notString;
1656 masm.branchTestString(Assembler::NotEqual, tag, &notString);
1657 masm.unboxString(input, output);
1658 masm.jump(&done);
1659 masm.bind(&notString);
1662 // Integer
1664 Label notInteger;
1665 masm.branchTestInt32(Assembler::NotEqual, tag, &notInteger);
1666 Register unboxed = ToTempUnboxRegister(lir->temp0());
1667 unboxed = masm.extractInt32(input, unboxed);
1668 masm.lookupStaticIntString(unboxed, output, gen->runtime->staticStrings(),
1669 ool->entry());
1670 masm.jump(&done);
1671 masm.bind(&notInteger);
1674 // Double
1676 // Note: no fastpath. Need two extra registers and can only convert doubles
1677 // that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT.
1678 masm.branchTestDouble(Assembler::Equal, tag, ool->entry());
1681 // Undefined
1683 Label notUndefined;
1684 masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
1685 masm.movePtr(ImmGCPtr(names.undefined), output);
1686 masm.jump(&done);
1687 masm.bind(&notUndefined);
1690 // Null
1692 Label notNull;
1693 masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
1694 masm.movePtr(ImmGCPtr(names.null), output);
1695 masm.jump(&done);
1696 masm.bind(&notNull);
1699 // Boolean
1701 Label notBoolean, true_;
1702 masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
1703 masm.branchTestBooleanTruthy(true, input, &true_);
1704 masm.movePtr(ImmGCPtr(names.false_), output);
1705 masm.jump(&done);
1706 masm.bind(&true_);
1707 masm.movePtr(ImmGCPtr(names.true_), output);
1708 masm.jump(&done);
1709 masm.bind(&notBoolean);
1712 // Objects/symbols are only possible when |mir->mightHaveSideEffects()|.
1713 if (lir->mir()->mightHaveSideEffects()) {
1714 // Object
1715 if (lir->mir()->supportSideEffects()) {
1716 masm.branchTestObject(Assembler::Equal, tag, ool->entry());
1717 } else {
1718 // Bail.
1719 MOZ_ASSERT(lir->mir()->needsSnapshot());
1720 Label bail;
1721 masm.branchTestObject(Assembler::Equal, tag, &bail);
1722 bailoutFrom(&bail, lir->snapshot());
1725 // Symbol
1726 if (lir->mir()->supportSideEffects()) {
1727 masm.branchTestSymbol(Assembler::Equal, tag, ool->entry());
1728 } else {
1729 // Bail.
1730 MOZ_ASSERT(lir->mir()->needsSnapshot());
1731 Label bail;
1732 masm.branchTestSymbol(Assembler::Equal, tag, &bail);
1733 bailoutFrom(&bail, lir->snapshot());
1737 // BigInt
1739 // No fastpath currently implemented.
1740 masm.branchTestBigInt(Assembler::Equal, tag, ool->entry());
1743 masm.assumeUnreachable("Unexpected type for LValueToString.");
1745 masm.bind(&done);
1746 masm.bind(ool->rejoin());
1749 using StoreBufferMutationFn = void (*)(js::gc::StoreBuffer*, js::gc::Cell**);
1751 static void EmitStoreBufferMutation(MacroAssembler& masm, Register holder,
1752 size_t offset, Register buffer,
1753 LiveGeneralRegisterSet& liveVolatiles,
1754 StoreBufferMutationFn fun) {
1755 Label callVM;
1756 Label exit;
1758 // Call into the VM to barrier the write. The only registers that need to
1759 // be preserved are those in liveVolatiles, so once they are saved on the
1760 // stack all volatile registers are available for use.
1761 masm.bind(&callVM);
1762 masm.PushRegsInMask(liveVolatiles);
1764 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
1765 regs.takeUnchecked(buffer);
1766 regs.takeUnchecked(holder);
1767 Register addrReg = regs.takeAny();
1769 masm.computeEffectiveAddress(Address(holder, offset), addrReg);
1771 bool needExtraReg = !regs.hasAny<GeneralRegisterSet::DefaultType>();
1772 if (needExtraReg) {
1773 masm.push(holder);
1774 masm.setupUnalignedABICall(holder);
1775 } else {
1776 masm.setupUnalignedABICall(regs.takeAny());
1778 masm.passABIArg(buffer);
1779 masm.passABIArg(addrReg);
1780 masm.callWithABI(DynamicFunction<StoreBufferMutationFn>(fun),
1781 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
1783 if (needExtraReg) {
1784 masm.pop(holder);
1786 masm.PopRegsInMask(liveVolatiles);
1787 masm.bind(&exit);
1790 // Warning: this function modifies prev and next.
1791 static void EmitPostWriteBarrierS(MacroAssembler& masm, Register holder,
1792 size_t offset, Register prev, Register next,
1793 LiveGeneralRegisterSet& liveVolatiles) {
1794 Label exit;
1795 Label checkRemove, putCell;
1797 // if (next && (buffer = next->storeBuffer()))
1798 // but we never pass in nullptr for next.
1799 Register storebuffer = next;
1800 masm.loadStoreBuffer(next, storebuffer);
1801 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &checkRemove);
1803 // if (prev && prev->storeBuffer())
1804 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &putCell);
1805 masm.loadStoreBuffer(prev, prev);
1806 masm.branchPtr(Assembler::NotEqual, prev, ImmWord(0), &exit);
1808 // buffer->putCell(cellp)
1809 masm.bind(&putCell);
1810 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1811 JSString::addCellAddressToStoreBuffer);
1812 masm.jump(&exit);
1814 // if (prev && (buffer = prev->storeBuffer()))
1815 masm.bind(&checkRemove);
1816 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &exit);
1817 masm.loadStoreBuffer(prev, storebuffer);
1818 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &exit);
1819 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1820 JSString::removeCellAddressFromStoreBuffer);
1822 masm.bind(&exit);
1825 void CodeGenerator::visitRegExp(LRegExp* lir) {
1826 Register output = ToRegister(lir->output());
1827 Register temp = ToRegister(lir->temp0());
1828 JSObject* source = lir->mir()->source();
1830 using Fn = JSObject* (*)(JSContext*, Handle<RegExpObject*>);
1831 OutOfLineCode* ool = oolCallVM<Fn, CloneRegExpObject>(
1832 lir, ArgList(ImmGCPtr(source)), StoreRegisterTo(output));
1833 if (lir->mir()->hasShared()) {
1834 TemplateObject templateObject(source);
1835 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
1836 ool->entry());
1837 } else {
1838 masm.jump(ool->entry());
1840 masm.bind(ool->rejoin());
1843 static constexpr int32_t RegExpPairsVectorStartOffset(
1844 int32_t inputOutputDataStartOffset) {
1845 return inputOutputDataStartOffset + int32_t(InputOutputDataSize) +
1846 int32_t(sizeof(MatchPairs));
1849 static Address RegExpPairCountAddress(MacroAssembler& masm,
1850 int32_t inputOutputDataStartOffset) {
1851 return Address(FramePointer, inputOutputDataStartOffset +
1852 int32_t(InputOutputDataSize) +
1853 MatchPairs::offsetOfPairCount());
1856 static void UpdateRegExpStatics(MacroAssembler& masm, Register regexp,
1857 Register input, Register lastIndex,
1858 Register staticsReg, Register temp1,
1859 Register temp2, gc::Heap initialStringHeap,
1860 LiveGeneralRegisterSet& volatileRegs) {
1861 Address pendingInputAddress(staticsReg,
1862 RegExpStatics::offsetOfPendingInput());
1863 Address matchesInputAddress(staticsReg,
1864 RegExpStatics::offsetOfMatchesInput());
1865 Address lazySourceAddress(staticsReg, RegExpStatics::offsetOfLazySource());
1866 Address lazyIndexAddress(staticsReg, RegExpStatics::offsetOfLazyIndex());
1868 masm.guardedCallPreBarrier(pendingInputAddress, MIRType::String);
1869 masm.guardedCallPreBarrier(matchesInputAddress, MIRType::String);
1870 masm.guardedCallPreBarrier(lazySourceAddress, MIRType::String);
1872 if (initialStringHeap == gc::Heap::Default) {
1873 // Writing into RegExpStatics tenured memory; must post-barrier.
1874 if (staticsReg.volatile_()) {
1875 volatileRegs.add(staticsReg);
1878 masm.loadPtr(pendingInputAddress, temp1);
1879 masm.storePtr(input, pendingInputAddress);
1880 masm.movePtr(input, temp2);
1881 EmitPostWriteBarrierS(masm, staticsReg,
1882 RegExpStatics::offsetOfPendingInput(),
1883 temp1 /* prev */, temp2 /* next */, volatileRegs);
1885 masm.loadPtr(matchesInputAddress, temp1);
1886 masm.storePtr(input, matchesInputAddress);
1887 masm.movePtr(input, temp2);
1888 EmitPostWriteBarrierS(masm, staticsReg,
1889 RegExpStatics::offsetOfMatchesInput(),
1890 temp1 /* prev */, temp2 /* next */, volatileRegs);
1891 } else {
1892 masm.debugAssertGCThingIsTenured(input, temp1);
1893 masm.storePtr(input, pendingInputAddress);
1894 masm.storePtr(input, matchesInputAddress);
1897 masm.storePtr(lastIndex,
1898 Address(staticsReg, RegExpStatics::offsetOfLazyIndex()));
1899 masm.store32(
1900 Imm32(1),
1901 Address(staticsReg, RegExpStatics::offsetOfPendingLazyEvaluation()));
1903 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
1904 RegExpObject::SHARED_SLOT)),
1905 temp1, JSVAL_TYPE_PRIVATE_GCTHING);
1906 masm.loadPtr(Address(temp1, RegExpShared::offsetOfSource()), temp2);
1907 masm.storePtr(temp2, lazySourceAddress);
1908 static_assert(sizeof(JS::RegExpFlags) == 1, "load size must match flag size");
1909 masm.load8ZeroExtend(Address(temp1, RegExpShared::offsetOfFlags()), temp2);
1910 masm.store8(temp2, Address(staticsReg, RegExpStatics::offsetOfLazyFlags()));
1913 // Prepare an InputOutputData and optional MatchPairs which space has been
1914 // allocated for on the stack, and try to execute a RegExp on a string input.
1915 // If the RegExp was successfully executed and matched the input, fallthrough.
1916 // Otherwise, jump to notFound or failure.
1918 // inputOutputDataStartOffset is the offset relative to the frame pointer
1919 // register. This offset is negative for the RegExpExecTest stub.
1920 static bool PrepareAndExecuteRegExp(MacroAssembler& masm, Register regexp,
1921 Register input, Register lastIndex,
1922 Register temp1, Register temp2,
1923 Register temp3,
1924 int32_t inputOutputDataStartOffset,
1925 gc::Heap initialStringHeap, Label* notFound,
1926 Label* failure) {
1927 JitSpew(JitSpew_Codegen, "# Emitting PrepareAndExecuteRegExp");
1929 using irregexp::InputOutputData;
1932 * [SMDOC] Stack layout for PrepareAndExecuteRegExp
1934 * Before this function is called, the caller is responsible for
1935 * allocating enough stack space for the following data:
1937 * inputOutputDataStartOffset +-----> +---------------+
1938 * |InputOutputData|
1939 * inputStartAddress +----------> inputStart|
1940 * inputEndAddress +----------> inputEnd|
1941 * startIndexAddress +----------> startIndex|
1942 * matchesAddress +----------> matches|-----+
1943 * +---------------+ |
1944 * matchPairs(Address|Offset) +-----> +---------------+ <--+
1945 * | MatchPairs |
1946 * pairCountAddress +----------> count |
1947 * pairsPointerAddress +----------> pairs |-----+
1948 * +---------------+ |
1949 * pairsArray(Address|Offset) +-----> +---------------+ <--+
1950 * | MatchPair |
1951 * firstMatchStartAddress +----------> start | <--+
1952 * | limit | |
1953 * +---------------+ |
1954 * . |
1955 * . Reserved space for
1956 * . RegExpObject::MaxPairCount
1957 * . MatchPair objects
1958 * . |
1959 * +---------------+ |
1960 * | MatchPair | |
1961 * | start | |
1962 * | limit | <--+
1963 * +---------------+
1966 int32_t ioOffset = inputOutputDataStartOffset;
1967 int32_t matchPairsOffset = ioOffset + int32_t(sizeof(InputOutputData));
1968 int32_t pairsArrayOffset = matchPairsOffset + int32_t(sizeof(MatchPairs));
1970 Address inputStartAddress(FramePointer,
1971 ioOffset + InputOutputData::offsetOfInputStart());
1972 Address inputEndAddress(FramePointer,
1973 ioOffset + InputOutputData::offsetOfInputEnd());
1974 Address startIndexAddress(FramePointer,
1975 ioOffset + InputOutputData::offsetOfStartIndex());
1976 Address matchesAddress(FramePointer,
1977 ioOffset + InputOutputData::offsetOfMatches());
1979 Address matchPairsAddress(FramePointer, matchPairsOffset);
1980 Address pairCountAddress(FramePointer,
1981 matchPairsOffset + MatchPairs::offsetOfPairCount());
1982 Address pairsPointerAddress(FramePointer,
1983 matchPairsOffset + MatchPairs::offsetOfPairs());
1985 Address pairsArrayAddress(FramePointer, pairsArrayOffset);
1986 Address firstMatchStartAddress(FramePointer,
1987 pairsArrayOffset + MatchPair::offsetOfStart());
1989 // First, fill in a skeletal MatchPairs instance on the stack. This will be
1990 // passed to the OOL stub in the caller if we aren't able to execute the
1991 // RegExp inline, and that stub needs to be able to determine whether the
1992 // execution finished successfully.
1994 // Initialize MatchPairs::pairCount to 1. The correct value can only
1995 // be determined after loading the RegExpShared. If the RegExpShared
1996 // has Kind::Atom, this is the correct pairCount.
1997 masm.store32(Imm32(1), pairCountAddress);
1999 // Initialize MatchPairs::pairs pointer
2000 masm.computeEffectiveAddress(pairsArrayAddress, temp1);
2001 masm.storePtr(temp1, pairsPointerAddress);
2003 // Initialize MatchPairs::pairs[0]::start to MatchPair::NoMatch
2004 masm.store32(Imm32(MatchPair::NoMatch), firstMatchStartAddress);
2006 // Determine the set of volatile inputs to save when calling into C++ or
2007 // regexp code.
2008 LiveGeneralRegisterSet volatileRegs;
2009 if (lastIndex.volatile_()) {
2010 volatileRegs.add(lastIndex);
2012 if (input.volatile_()) {
2013 volatileRegs.add(input);
2015 if (regexp.volatile_()) {
2016 volatileRegs.add(regexp);
2019 // Ensure the input string is not a rope.
2020 Label isLinear;
2021 masm.branchIfNotRope(input, &isLinear);
2023 masm.PushRegsInMask(volatileRegs);
2025 using Fn = JSLinearString* (*)(JSString*);
2026 masm.setupUnalignedABICall(temp1);
2027 masm.passABIArg(input);
2028 masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
2030 MOZ_ASSERT(!volatileRegs.has(temp1));
2031 masm.storeCallPointerResult(temp1);
2032 masm.PopRegsInMask(volatileRegs);
2034 masm.branchTestPtr(Assembler::Zero, temp1, temp1, failure);
2036 masm.bind(&isLinear);
2038 // Load the RegExpShared.
2039 Register regexpReg = temp1;
2040 Address sharedSlot = Address(
2041 regexp, NativeObject::getFixedSlotOffset(RegExpObject::SHARED_SLOT));
2042 masm.branchTestUndefined(Assembler::Equal, sharedSlot, failure);
2043 masm.unboxNonDouble(sharedSlot, regexpReg, JSVAL_TYPE_PRIVATE_GCTHING);
2045 // Handle Atom matches
2046 Label notAtom, checkSuccess;
2047 masm.branchPtr(Assembler::Equal,
2048 Address(regexpReg, RegExpShared::offsetOfPatternAtom()),
2049 ImmWord(0), &notAtom);
2051 masm.computeEffectiveAddress(matchPairsAddress, temp3);
2053 masm.PushRegsInMask(volatileRegs);
2054 using Fn = RegExpRunStatus (*)(RegExpShared* re, JSLinearString* input,
2055 size_t start, MatchPairs* matchPairs);
2056 masm.setupUnalignedABICall(temp2);
2057 masm.passABIArg(regexpReg);
2058 masm.passABIArg(input);
2059 masm.passABIArg(lastIndex);
2060 masm.passABIArg(temp3);
2061 masm.callWithABI<Fn, js::ExecuteRegExpAtomRaw>();
2063 MOZ_ASSERT(!volatileRegs.has(temp1));
2064 masm.storeCallInt32Result(temp1);
2065 masm.PopRegsInMask(volatileRegs);
2067 masm.jump(&checkSuccess);
2069 masm.bind(&notAtom);
2071 // Don't handle regexps with too many capture pairs.
2072 masm.load32(Address(regexpReg, RegExpShared::offsetOfPairCount()), temp2);
2073 masm.branch32(Assembler::Above, temp2, Imm32(RegExpObject::MaxPairCount),
2074 failure);
2076 // Fill in the pair count in the MatchPairs on the stack.
2077 masm.store32(temp2, pairCountAddress);
2079 // Load code pointer and length of input (in bytes).
2080 // Store the input start in the InputOutputData.
2081 Register codePointer = temp1; // Note: temp1 was previously regexpReg.
2082 Register byteLength = temp3;
2084 Label isLatin1, done;
2085 masm.loadStringLength(input, byteLength);
2087 masm.branchLatin1String(input, &isLatin1);
2089 // Two-byte input
2090 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
2091 masm.storePtr(temp2, inputStartAddress);
2092 masm.loadPtr(
2093 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/false)),
2094 codePointer);
2095 masm.lshiftPtr(Imm32(1), byteLength);
2096 masm.jump(&done);
2098 // Latin1 input
2099 masm.bind(&isLatin1);
2100 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
2101 masm.storePtr(temp2, inputStartAddress);
2102 masm.loadPtr(
2103 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/true)),
2104 codePointer);
2106 masm.bind(&done);
2108 // Store end pointer
2109 masm.addPtr(byteLength, temp2);
2110 masm.storePtr(temp2, inputEndAddress);
2113 // Guard that the RegExpShared has been compiled for this type of input.
2114 // If it has not been compiled, we fall back to the OOL case, which will
2115 // do a VM call into the interpreter.
2116 // TODO: add an interpreter trampoline?
2117 masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure);
2118 masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer);
2120 // Finish filling in the InputOutputData instance on the stack
2121 masm.computeEffectiveAddress(matchPairsAddress, temp2);
2122 masm.storePtr(temp2, matchesAddress);
2123 masm.storePtr(lastIndex, startIndexAddress);
2125 // Execute the RegExp.
2126 masm.computeEffectiveAddress(
2127 Address(FramePointer, inputOutputDataStartOffset), temp2);
2128 masm.PushRegsInMask(volatileRegs);
2129 masm.setupUnalignedABICall(temp3);
2130 masm.passABIArg(temp2);
2131 masm.callWithABI(codePointer);
2132 masm.storeCallInt32Result(temp1);
2133 masm.PopRegsInMask(volatileRegs);
2135 masm.bind(&checkSuccess);
2136 masm.branch32(Assembler::Equal, temp1,
2137 Imm32(int32_t(RegExpRunStatus::Success_NotFound)), notFound);
2138 masm.branch32(Assembler::Equal, temp1, Imm32(int32_t(RegExpRunStatus::Error)),
2139 failure);
2141 // Lazily update the RegExpStatics.
2142 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2143 RegExpRealm::offsetOfRegExpStatics();
2144 masm.loadGlobalObjectData(temp1);
2145 masm.loadPtr(Address(temp1, offset), temp1);
2146 UpdateRegExpStatics(masm, regexp, input, lastIndex, temp1, temp2, temp3,
2147 initialStringHeap, volatileRegs);
2149 return true;
2152 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
2153 Register len, Register byteOpScratch,
2154 CharEncoding encoding,
2155 size_t maximumLength = SIZE_MAX);
2157 class CreateDependentString {
2158 CharEncoding encoding_;
2159 Register string_;
2160 Register temp1_;
2161 Register temp2_;
2162 Label* failure_;
2164 enum class FallbackKind : uint8_t {
2165 InlineString,
2166 FatInlineString,
2167 NotInlineString,
2168 Count
2170 mozilla::EnumeratedArray<FallbackKind, Label, size_t(FallbackKind::Count)>
2171 fallbacks_, joins_;
2173 public:
2174 CreateDependentString(CharEncoding encoding, Register string, Register temp1,
2175 Register temp2, Label* failure)
2176 : encoding_(encoding),
2177 string_(string),
2178 temp1_(temp1),
2179 temp2_(temp2),
2180 failure_(failure) {}
2182 Register string() const { return string_; }
2183 CharEncoding encoding() const { return encoding_; }
2185 // Generate code that creates DependentString.
2186 // Caller should call generateFallback after masm.ret(), to generate
2187 // fallback path.
2188 void generate(MacroAssembler& masm, const JSAtomState& names,
2189 CompileRuntime* runtime, Register base,
2190 BaseIndex startIndexAddress, BaseIndex limitIndexAddress,
2191 gc::Heap initialStringHeap);
2193 // Generate fallback path for creating DependentString.
2194 void generateFallback(MacroAssembler& masm);
2197 void CreateDependentString::generate(MacroAssembler& masm,
2198 const JSAtomState& names,
2199 CompileRuntime* runtime, Register base,
2200 BaseIndex startIndexAddress,
2201 BaseIndex limitIndexAddress,
2202 gc::Heap initialStringHeap) {
2203 JitSpew(JitSpew_Codegen, "# Emitting CreateDependentString (encoding=%s)",
2204 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2206 auto newGCString = [&](FallbackKind kind) {
2207 uint32_t flags = kind == FallbackKind::InlineString
2208 ? JSString::INIT_THIN_INLINE_FLAGS
2209 : kind == FallbackKind::FatInlineString
2210 ? JSString::INIT_FAT_INLINE_FLAGS
2211 : JSString::INIT_DEPENDENT_FLAGS;
2212 if (encoding_ == CharEncoding::Latin1) {
2213 flags |= JSString::LATIN1_CHARS_BIT;
2216 if (kind != FallbackKind::FatInlineString) {
2217 masm.newGCString(string_, temp2_, initialStringHeap, &fallbacks_[kind]);
2218 } else {
2219 masm.newGCFatInlineString(string_, temp2_, initialStringHeap,
2220 &fallbacks_[kind]);
2222 masm.bind(&joins_[kind]);
2223 masm.store32(Imm32(flags), Address(string_, JSString::offsetOfFlags()));
2226 // Compute the string length.
2227 masm.load32(startIndexAddress, temp2_);
2228 masm.load32(limitIndexAddress, temp1_);
2229 masm.sub32(temp2_, temp1_);
2231 Label done, nonEmpty;
2233 // Zero length matches use the empty string.
2234 masm.branchTest32(Assembler::NonZero, temp1_, temp1_, &nonEmpty);
2235 masm.movePtr(ImmGCPtr(names.empty_), string_);
2236 masm.jump(&done);
2238 masm.bind(&nonEmpty);
2240 // Complete matches use the base string.
2241 Label nonBaseStringMatch;
2242 masm.branchTest32(Assembler::NonZero, temp2_, temp2_, &nonBaseStringMatch);
2243 masm.branch32(Assembler::NotEqual, Address(base, JSString::offsetOfLength()),
2244 temp1_, &nonBaseStringMatch);
2245 masm.movePtr(base, string_);
2246 masm.jump(&done);
2248 masm.bind(&nonBaseStringMatch);
2250 Label notInline;
2252 int32_t maxInlineLength = encoding_ == CharEncoding::Latin1
2253 ? JSFatInlineString::MAX_LENGTH_LATIN1
2254 : JSFatInlineString::MAX_LENGTH_TWO_BYTE;
2255 masm.branch32(Assembler::Above, temp1_, Imm32(maxInlineLength), &notInline);
2257 // Make a thin or fat inline string.
2258 Label stringAllocated, fatInline;
2260 int32_t maxThinInlineLength = encoding_ == CharEncoding::Latin1
2261 ? JSThinInlineString::MAX_LENGTH_LATIN1
2262 : JSThinInlineString::MAX_LENGTH_TWO_BYTE;
2263 masm.branch32(Assembler::Above, temp1_, Imm32(maxThinInlineLength),
2264 &fatInline);
2265 if (encoding_ == CharEncoding::Latin1) {
2266 // One character Latin-1 strings can be loaded directly from the
2267 // static strings table.
2268 Label thinInline;
2269 masm.branch32(Assembler::Above, temp1_, Imm32(1), &thinInline);
2271 static_assert(
2272 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
2273 "Latin-1 strings can be loaded from static strings");
2275 masm.loadStringChars(base, temp1_, encoding_);
2276 masm.loadChar(temp1_, temp2_, temp1_, encoding_);
2278 masm.lookupStaticString(temp1_, string_, runtime->staticStrings());
2280 masm.jump(&done);
2282 masm.bind(&thinInline);
2285 newGCString(FallbackKind::InlineString);
2286 masm.jump(&stringAllocated);
2288 masm.bind(&fatInline);
2289 { newGCString(FallbackKind::FatInlineString); }
2290 masm.bind(&stringAllocated);
2292 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2294 masm.push(string_);
2295 masm.push(base);
2297 MOZ_ASSERT(startIndexAddress.base == FramePointer,
2298 "startIndexAddress is still valid after stack pushes");
2300 // Load chars pointer for the new string.
2301 masm.loadInlineStringCharsForStore(string_, string_);
2303 // Load the source characters pointer.
2304 masm.loadStringChars(base, temp2_, encoding_);
2305 masm.load32(startIndexAddress, base);
2306 masm.addToCharPtr(temp2_, base, encoding_);
2308 CopyStringChars(masm, string_, temp2_, temp1_, base, encoding_);
2310 masm.pop(base);
2311 masm.pop(string_);
2313 masm.jump(&done);
2316 masm.bind(&notInline);
2319 // Make a dependent string.
2320 // Warning: string may be tenured (if the fallback case is hit), so
2321 // stores into it must be post barriered.
2322 newGCString(FallbackKind::NotInlineString);
2324 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2326 masm.loadNonInlineStringChars(base, temp1_, encoding_);
2327 masm.load32(startIndexAddress, temp2_);
2328 masm.addToCharPtr(temp1_, temp2_, encoding_);
2329 masm.storeNonInlineStringChars(temp1_, string_);
2330 masm.storeDependentStringBase(base, string_);
2331 masm.movePtr(base, temp1_);
2333 // Follow any base pointer if the input is itself a dependent string.
2334 // Watch for undepended strings, which have a base pointer but don't
2335 // actually share their characters with it.
2336 Label noBase;
2337 masm.load32(Address(base, JSString::offsetOfFlags()), temp2_);
2338 masm.and32(Imm32(JSString::TYPE_FLAGS_MASK), temp2_);
2339 masm.branchTest32(Assembler::Zero, temp2_, Imm32(JSString::DEPENDENT_BIT),
2340 &noBase);
2341 masm.loadDependentStringBase(base, temp1_);
2342 masm.storeDependentStringBase(temp1_, string_);
2343 masm.bind(&noBase);
2345 // Post-barrier the base store, whether it was the direct or indirect
2346 // base (both will end up in temp1 here).
2347 masm.branchPtrInNurseryChunk(Assembler::Equal, string_, temp2_, &done);
2348 masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp1_, temp2_, &done);
2350 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2351 regsToSave.takeUnchecked(temp1_);
2352 regsToSave.takeUnchecked(temp2_);
2354 masm.PushRegsInMask(regsToSave);
2356 masm.mov(ImmPtr(runtime), temp1_);
2358 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
2359 masm.setupUnalignedABICall(temp2_);
2360 masm.passABIArg(temp1_);
2361 masm.passABIArg(string_);
2362 masm.callWithABI<Fn, PostWriteBarrier>();
2364 masm.PopRegsInMask(regsToSave);
2367 masm.bind(&done);
2370 void CreateDependentString::generateFallback(MacroAssembler& masm) {
2371 JitSpew(JitSpew_Codegen,
2372 "# Emitting CreateDependentString fallback (encoding=%s)",
2373 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2375 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2376 regsToSave.takeUnchecked(string_);
2377 regsToSave.takeUnchecked(temp2_);
2379 for (FallbackKind kind : mozilla::MakeEnumeratedRange(FallbackKind::Count)) {
2380 masm.bind(&fallbacks_[kind]);
2382 masm.PushRegsInMask(regsToSave);
2384 using Fn = void* (*)(JSContext* cx);
2385 masm.setupUnalignedABICall(string_);
2386 masm.loadJSContext(string_);
2387 masm.passABIArg(string_);
2388 if (kind == FallbackKind::FatInlineString) {
2389 masm.callWithABI<Fn, AllocateFatInlineString>();
2390 } else {
2391 masm.callWithABI<Fn, AllocateDependentString>();
2393 masm.storeCallPointerResult(string_);
2395 masm.PopRegsInMask(regsToSave);
2397 masm.branchPtr(Assembler::Equal, string_, ImmWord(0), failure_);
2399 masm.jump(&joins_[kind]);
2403 // Generate the RegExpMatcher and RegExpExecMatch stubs. These are very similar,
2404 // but RegExpExecMatch also has to load and update .lastIndex for global/sticky
2405 // regular expressions.
2406 static JitCode* GenerateRegExpMatchStubShared(JSContext* cx,
2407 gc::Heap initialStringHeap,
2408 bool isExecMatch) {
2409 if (isExecMatch) {
2410 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecMatch stub");
2411 } else {
2412 JitSpew(JitSpew_Codegen, "# Emitting RegExpMatcher stub");
2415 // |initialStringHeap| could be stale after a GC.
2416 JS::AutoCheckCannotGC nogc(cx);
2418 Register regexp = RegExpMatcherRegExpReg;
2419 Register input = RegExpMatcherStringReg;
2420 Register lastIndex = RegExpMatcherLastIndexReg;
2421 ValueOperand result = JSReturnOperand;
2423 // We are free to clobber all registers, as LRegExpMatcher is a call
2424 // instruction.
2425 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2426 regs.take(input);
2427 regs.take(regexp);
2428 regs.take(lastIndex);
2430 Register temp1 = regs.takeAny();
2431 Register temp2 = regs.takeAny();
2432 Register temp3 = regs.takeAny();
2433 Register maybeTemp4 = InvalidReg;
2434 if (!regs.empty()) {
2435 // There are not enough registers on x86.
2436 maybeTemp4 = regs.takeAny();
2438 Register maybeTemp5 = InvalidReg;
2439 if (!regs.empty()) {
2440 // There are not enough registers on x86.
2441 maybeTemp5 = regs.takeAny();
2444 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
2445 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
2447 TempAllocator temp(&cx->tempLifoAlloc());
2448 JitContext jcx(cx);
2449 StackMacroAssembler masm(cx, temp);
2450 AutoCreatedBy acb(masm, "GenerateRegExpMatchStubShared");
2452 #ifdef JS_USE_LINK_REGISTER
2453 masm.pushReturnAddress();
2454 #endif
2455 masm.push(FramePointer);
2456 masm.moveStackPtrTo(FramePointer);
2458 Label notFoundZeroLastIndex;
2459 if (isExecMatch) {
2460 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
2463 // The InputOutputData is placed above the frame pointer and return address on
2464 // the stack.
2465 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
2467 Label notFound, oolEntry;
2468 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
2469 temp3, inputOutputDataStartOffset,
2470 initialStringHeap, &notFound, &oolEntry)) {
2471 return nullptr;
2474 // If a regexp has named captures, fall back to the OOL stub, which
2475 // will end up calling CreateRegExpMatchResults.
2476 Register shared = temp2;
2477 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
2478 RegExpObject::SHARED_SLOT)),
2479 shared, JSVAL_TYPE_PRIVATE_GCTHING);
2480 masm.branchPtr(Assembler::NotEqual,
2481 Address(shared, RegExpShared::offsetOfGroupsTemplate()),
2482 ImmWord(0), &oolEntry);
2484 // Similarly, if the |hasIndices| flag is set, fall back to the OOL stub.
2485 masm.branchTest32(Assembler::NonZero,
2486 Address(shared, RegExpShared::offsetOfFlags()),
2487 Imm32(int32_t(JS::RegExpFlag::HasIndices)), &oolEntry);
2489 Address pairCountAddress =
2490 RegExpPairCountAddress(masm, inputOutputDataStartOffset);
2492 // Construct the result.
2493 Register object = temp1;
2495 // In most cases, the array will have just 1-2 elements, so we optimize for
2496 // that by emitting separate code paths for capacity 2/6/14 (= 4/8/16 slots
2497 // because two slots are used for the elements header).
2499 // Load the array length in temp2 and the shape in temp3.
2500 Label allocated;
2501 masm.load32(pairCountAddress, temp2);
2502 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2503 RegExpRealm::offsetOfNormalMatchResultShape();
2504 masm.loadGlobalObjectData(temp3);
2505 masm.loadPtr(Address(temp3, offset), temp3);
2507 auto emitAllocObject = [&](size_t elementCapacity) {
2508 gc::AllocKind kind = GuessArrayGCKind(elementCapacity);
2509 MOZ_ASSERT(CanChangeToBackgroundAllocKind(kind, &ArrayObject::class_));
2510 kind = ForegroundToBackgroundAllocKind(kind);
2512 #ifdef DEBUG
2513 // Assert all of the available slots are used for |elementCapacity|
2514 // elements.
2515 size_t usedSlots = ObjectElements::VALUES_PER_HEADER + elementCapacity;
2516 MOZ_ASSERT(usedSlots == GetGCKindSlots(kind));
2517 #endif
2519 constexpr size_t numUsedDynamicSlots =
2520 RegExpRealm::MatchResultObjectSlotSpan;
2521 constexpr size_t numDynamicSlots =
2522 RegExpRealm::MatchResultObjectNumDynamicSlots;
2523 constexpr size_t arrayLength = 1;
2524 masm.createArrayWithFixedElements(object, temp3, temp2, temp3,
2525 arrayLength, elementCapacity,
2526 numUsedDynamicSlots, numDynamicSlots,
2527 kind, gc::Heap::Default, &oolEntry);
2530 Label moreThan2;
2531 masm.branch32(Assembler::Above, temp2, Imm32(2), &moreThan2);
2532 emitAllocObject(2);
2533 masm.jump(&allocated);
2535 Label moreThan6;
2536 masm.bind(&moreThan2);
2537 masm.branch32(Assembler::Above, temp2, Imm32(6), &moreThan6);
2538 emitAllocObject(6);
2539 masm.jump(&allocated);
2541 masm.bind(&moreThan6);
2542 static_assert(RegExpObject::MaxPairCount == 14);
2543 emitAllocObject(RegExpObject::MaxPairCount);
2545 masm.bind(&allocated);
2548 // clang-format off
2550 * [SMDOC] Stack layout for the RegExpMatcher stub
2552 * +---------------+
2553 * FramePointer +-----> |Caller-FramePtr|
2554 * +---------------+
2555 * |Return-Address |
2556 * +---------------+
2557 * inputOutputDataStartOffset +-----> +---------------+
2558 * |InputOutputData|
2559 * +---------------+
2560 * +---------------+
2561 * | MatchPairs |
2562 * pairsCountAddress +-----------> count |
2563 * | pairs |
2564 * | |
2565 * +---------------+
2566 * pairsVectorStartOffset +-----> +---------------+
2567 * | MatchPair |
2568 * matchPairStart +------------> start | <-------+
2569 * matchPairLimit +------------> limit | | Reserved space for
2570 * +---------------+ | `RegExpObject::MaxPairCount`
2571 * . | MatchPair objects.
2572 * . |
2573 * . | `count` objects will be
2574 * +---------------+ | initialized and can be
2575 * | MatchPair | | accessed below.
2576 * | start | <-------+
2577 * | limit |
2578 * +---------------+
2580 // clang-format on
2582 static_assert(sizeof(MatchPair) == 2 * sizeof(int32_t),
2583 "MatchPair consists of two int32 values representing the start"
2584 "and the end offset of the match");
2586 int32_t pairsVectorStartOffset =
2587 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
2589 // Incremented by one below for each match pair.
2590 Register matchIndex = temp2;
2591 masm.move32(Imm32(0), matchIndex);
2593 // The element in which to store the result of the current match.
2594 size_t elementsOffset = NativeObject::offsetOfFixedElements();
2595 BaseObjectElementIndex objectMatchElement(object, matchIndex, elementsOffset);
2597 // The current match pair's "start" and "limit" member.
2598 BaseIndex matchPairStart(FramePointer, matchIndex, TimesEight,
2599 pairsVectorStartOffset + MatchPair::offsetOfStart());
2600 BaseIndex matchPairLimit(FramePointer, matchIndex, TimesEight,
2601 pairsVectorStartOffset + MatchPair::offsetOfLimit());
2603 Label* depStrFailure = &oolEntry;
2604 Label restoreRegExpAndLastIndex;
2606 Register temp4;
2607 if (maybeTemp4 == InvalidReg) {
2608 depStrFailure = &restoreRegExpAndLastIndex;
2610 // We don't have enough registers for a fourth temporary. Reuse |regexp|
2611 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2612 masm.push(regexp);
2613 temp4 = regexp;
2614 } else {
2615 temp4 = maybeTemp4;
2618 Register temp5;
2619 if (maybeTemp5 == InvalidReg) {
2620 depStrFailure = &restoreRegExpAndLastIndex;
2622 // We don't have enough registers for a fifth temporary. Reuse |lastIndex|
2623 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2624 masm.push(lastIndex);
2625 temp5 = lastIndex;
2626 } else {
2627 temp5 = maybeTemp5;
2630 auto maybeRestoreRegExpAndLastIndex = [&]() {
2631 if (maybeTemp5 == InvalidReg) {
2632 masm.pop(lastIndex);
2634 if (maybeTemp4 == InvalidReg) {
2635 masm.pop(regexp);
2639 // Loop to construct the match strings. There are two different loops,
2640 // depending on whether the input is a Two-Byte or a Latin-1 string.
2641 CreateDependentString depStrs[]{
2642 {CharEncoding::TwoByte, temp3, temp4, temp5, depStrFailure},
2643 {CharEncoding::Latin1, temp3, temp4, temp5, depStrFailure},
2647 Label isLatin1, done;
2648 masm.branchLatin1String(input, &isLatin1);
2650 for (auto& depStr : depStrs) {
2651 if (depStr.encoding() == CharEncoding::Latin1) {
2652 masm.bind(&isLatin1);
2655 Label matchLoop;
2656 masm.bind(&matchLoop);
2658 static_assert(MatchPair::NoMatch == -1,
2659 "MatchPair::start is negative if no match was found");
2661 Label isUndefined, storeDone;
2662 masm.branch32(Assembler::LessThan, matchPairStart, Imm32(0),
2663 &isUndefined);
2665 depStr.generate(masm, cx->names(), CompileRuntime::get(cx->runtime()),
2666 input, matchPairStart, matchPairLimit,
2667 initialStringHeap);
2669 // Storing into nursery-allocated results object's elements; no post
2670 // barrier.
2671 masm.storeValue(JSVAL_TYPE_STRING, depStr.string(), objectMatchElement);
2672 masm.jump(&storeDone);
2674 masm.bind(&isUndefined);
2675 { masm.storeValue(UndefinedValue(), objectMatchElement); }
2676 masm.bind(&storeDone);
2678 masm.add32(Imm32(1), matchIndex);
2679 masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex,
2680 &done);
2681 masm.jump(&matchLoop);
2684 #ifdef DEBUG
2685 masm.assumeUnreachable("The match string loop doesn't fall through.");
2686 #endif
2688 masm.bind(&done);
2691 maybeRestoreRegExpAndLastIndex();
2693 // Fill in the rest of the output object.
2694 masm.store32(
2695 matchIndex,
2696 Address(object,
2697 elementsOffset + ObjectElements::offsetOfInitializedLength()));
2698 masm.store32(
2699 matchIndex,
2700 Address(object, elementsOffset + ObjectElements::offsetOfLength()));
2702 Address firstMatchPairStartAddress(
2703 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfStart());
2704 Address firstMatchPairLimitAddress(
2705 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfLimit());
2707 static_assert(RegExpRealm::MatchResultObjectIndexSlot == 0,
2708 "First slot holds the 'index' property");
2709 static_assert(RegExpRealm::MatchResultObjectInputSlot == 1,
2710 "Second slot holds the 'input' property");
2712 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
2714 masm.load32(firstMatchPairStartAddress, temp3);
2715 masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0));
2717 // No post barrier needed (address is within nursery object.)
2718 masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value)));
2720 // For the ExecMatch stub, if the regular expression is global or sticky, we
2721 // have to update its .lastIndex slot.
2722 if (isExecMatch) {
2723 MOZ_ASSERT(object != lastIndex);
2724 Label notGlobalOrSticky;
2725 masm.branchTest32(Assembler::Zero, flagsSlot,
2726 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2727 &notGlobalOrSticky);
2728 masm.load32(firstMatchPairLimitAddress, lastIndex);
2729 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
2730 masm.bind(&notGlobalOrSticky);
2733 // All done!
2734 masm.tagValue(JSVAL_TYPE_OBJECT, object, result);
2735 masm.pop(FramePointer);
2736 masm.ret();
2738 masm.bind(&notFound);
2739 if (isExecMatch) {
2740 Label notGlobalOrSticky;
2741 masm.branchTest32(Assembler::Zero, flagsSlot,
2742 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2743 &notGlobalOrSticky);
2744 masm.bind(&notFoundZeroLastIndex);
2745 masm.storeValue(Int32Value(0), lastIndexSlot);
2746 masm.bind(&notGlobalOrSticky);
2748 masm.moveValue(NullValue(), result);
2749 masm.pop(FramePointer);
2750 masm.ret();
2752 // Fallback paths for CreateDependentString.
2753 for (auto& depStr : depStrs) {
2754 depStr.generateFallback(masm);
2757 // Fall-through to the ool entry after restoring the registers.
2758 masm.bind(&restoreRegExpAndLastIndex);
2759 maybeRestoreRegExpAndLastIndex();
2761 // Use an undefined value to signal to the caller that the OOL stub needs to
2762 // be called.
2763 masm.bind(&oolEntry);
2764 masm.moveValue(UndefinedValue(), result);
2765 masm.pop(FramePointer);
2766 masm.ret();
2768 Linker linker(masm);
2769 JitCode* code = linker.newCode(cx, CodeKind::Other);
2770 if (!code) {
2771 return nullptr;
2774 const char* name = isExecMatch ? "RegExpExecMatchStub" : "RegExpMatcherStub";
2775 CollectPerfSpewerJitCodeProfile(code, name);
2776 #ifdef MOZ_VTUNE
2777 vtune::MarkStub(code, name);
2778 #endif
2780 return code;
2783 JitCode* JitZone::generateRegExpMatcherStub(JSContext* cx) {
2784 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2785 /* isExecMatch = */ false);
2788 JitCode* JitZone::generateRegExpExecMatchStub(JSContext* cx) {
2789 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2790 /* isExecMatch = */ true);
2793 class OutOfLineRegExpMatcher : public OutOfLineCodeBase<CodeGenerator> {
2794 LRegExpMatcher* lir_;
2796 public:
2797 explicit OutOfLineRegExpMatcher(LRegExpMatcher* lir) : lir_(lir) {}
2799 void accept(CodeGenerator* codegen) override {
2800 codegen->visitOutOfLineRegExpMatcher(this);
2803 LRegExpMatcher* lir() const { return lir_; }
2806 void CodeGenerator::visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher* ool) {
2807 LRegExpMatcher* lir = ool->lir();
2808 Register lastIndex = ToRegister(lir->lastIndex());
2809 Register input = ToRegister(lir->string());
2810 Register regexp = ToRegister(lir->regexp());
2812 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2813 regs.take(lastIndex);
2814 regs.take(input);
2815 regs.take(regexp);
2816 Register temp = regs.takeAny();
2818 masm.computeEffectiveAddress(
2819 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2821 pushArg(temp);
2822 pushArg(lastIndex);
2823 pushArg(input);
2824 pushArg(regexp);
2826 // We are not using oolCallVM because we are in a Call, and that live
2827 // registers are already saved by the the register allocator.
2828 using Fn =
2829 bool (*)(JSContext*, HandleObject regexp, HandleString input,
2830 int32_t lastIndex, MatchPairs* pairs, MutableHandleValue output);
2831 callVM<Fn, RegExpMatcherRaw>(lir);
2833 masm.jump(ool->rejoin());
2836 void CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir) {
2837 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2838 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2839 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg);
2840 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2842 #if defined(JS_NUNBOX32)
2843 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2844 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2845 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2846 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2847 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Type);
2848 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Data);
2849 #elif defined(JS_PUNBOX64)
2850 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2851 static_assert(RegExpMatcherStringReg != JSReturnReg);
2852 static_assert(RegExpMatcherLastIndexReg != JSReturnReg);
2853 #endif
2855 masm.reserveStack(RegExpReservedStack);
2857 OutOfLineRegExpMatcher* ool = new (alloc()) OutOfLineRegExpMatcher(lir);
2858 addOutOfLineCode(ool, lir->mir());
2860 const JitZone* jitZone = gen->realm->zone()->jitZone();
2861 JitCode* regExpMatcherStub =
2862 jitZone->regExpMatcherStubNoBarrier(&zoneStubsToReadBarrier_);
2863 masm.call(regExpMatcherStub);
2864 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2865 masm.bind(ool->rejoin());
2867 masm.freeStack(RegExpReservedStack);
2870 class OutOfLineRegExpExecMatch : public OutOfLineCodeBase<CodeGenerator> {
2871 LRegExpExecMatch* lir_;
2873 public:
2874 explicit OutOfLineRegExpExecMatch(LRegExpExecMatch* lir) : lir_(lir) {}
2876 void accept(CodeGenerator* codegen) override {
2877 codegen->visitOutOfLineRegExpExecMatch(this);
2880 LRegExpExecMatch* lir() const { return lir_; }
2883 void CodeGenerator::visitOutOfLineRegExpExecMatch(
2884 OutOfLineRegExpExecMatch* ool) {
2885 LRegExpExecMatch* lir = ool->lir();
2886 Register input = ToRegister(lir->string());
2887 Register regexp = ToRegister(lir->regexp());
2889 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2890 regs.take(input);
2891 regs.take(regexp);
2892 Register temp = regs.takeAny();
2894 masm.computeEffectiveAddress(
2895 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2897 pushArg(temp);
2898 pushArg(input);
2899 pushArg(regexp);
2901 // We are not using oolCallVM because we are in a Call and live registers have
2902 // already been saved by the register allocator.
2903 using Fn =
2904 bool (*)(JSContext*, Handle<RegExpObject*> regexp, HandleString input,
2905 MatchPairs* pairs, MutableHandleValue output);
2906 callVM<Fn, RegExpBuiltinExecMatchFromJit>(lir);
2907 masm.jump(ool->rejoin());
2910 void CodeGenerator::visitRegExpExecMatch(LRegExpExecMatch* lir) {
2911 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2912 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2913 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2915 #if defined(JS_NUNBOX32)
2916 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2917 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2918 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2919 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2920 #elif defined(JS_PUNBOX64)
2921 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2922 static_assert(RegExpMatcherStringReg != JSReturnReg);
2923 #endif
2925 masm.reserveStack(RegExpReservedStack);
2927 auto* ool = new (alloc()) OutOfLineRegExpExecMatch(lir);
2928 addOutOfLineCode(ool, lir->mir());
2930 const JitZone* jitZone = gen->realm->zone()->jitZone();
2931 JitCode* regExpExecMatchStub =
2932 jitZone->regExpExecMatchStubNoBarrier(&zoneStubsToReadBarrier_);
2933 masm.call(regExpExecMatchStub);
2934 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2936 masm.bind(ool->rejoin());
2937 masm.freeStack(RegExpReservedStack);
2940 JitCode* JitZone::generateRegExpSearcherStub(JSContext* cx) {
2941 JitSpew(JitSpew_Codegen, "# Emitting RegExpSearcher stub");
2943 Register regexp = RegExpSearcherRegExpReg;
2944 Register input = RegExpSearcherStringReg;
2945 Register lastIndex = RegExpSearcherLastIndexReg;
2946 Register result = ReturnReg;
2948 // We are free to clobber all registers, as LRegExpSearcher is a call
2949 // instruction.
2950 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2951 regs.take(input);
2952 regs.take(regexp);
2953 regs.take(lastIndex);
2955 Register temp1 = regs.takeAny();
2956 Register temp2 = regs.takeAny();
2957 Register temp3 = regs.takeAny();
2959 TempAllocator temp(&cx->tempLifoAlloc());
2960 JitContext jcx(cx);
2961 StackMacroAssembler masm(cx, temp);
2962 AutoCreatedBy acb(masm, "JitZone::generateRegExpSearcherStub");
2964 #ifdef JS_USE_LINK_REGISTER
2965 masm.pushReturnAddress();
2966 #endif
2967 masm.push(FramePointer);
2968 masm.moveStackPtrTo(FramePointer);
2970 #ifdef DEBUG
2971 // Store sentinel value to cx->regExpSearcherLastLimit.
2972 // See comment in RegExpSearcherImpl.
2973 masm.loadJSContext(temp1);
2974 masm.store32(Imm32(RegExpSearcherLastLimitSentinel),
2975 Address(temp1, JSContext::offsetOfRegExpSearcherLastLimit()));
2976 #endif
2978 // The InputOutputData is placed above the frame pointer and return address on
2979 // the stack.
2980 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
2982 Label notFound, oolEntry;
2983 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
2984 temp3, inputOutputDataStartOffset,
2985 initialStringHeap, &notFound, &oolEntry)) {
2986 return nullptr;
2989 // clang-format off
2991 * [SMDOC] Stack layout for the RegExpSearcher stub
2993 * +---------------+
2994 * FramePointer +-----> |Caller-FramePtr|
2995 * +---------------+
2996 * |Return-Address |
2997 * +---------------+
2998 * inputOutputDataStartOffset +-----> +---------------+
2999 * |InputOutputData|
3000 * +---------------+
3001 * +---------------+
3002 * | MatchPairs |
3003 * | count |
3004 * | pairs |
3005 * | |
3006 * +---------------+
3007 * pairsVectorStartOffset +-----> +---------------+
3008 * | MatchPair |
3009 * matchPairStart +------------> start | <-------+
3010 * matchPairLimit +------------> limit | | Reserved space for
3011 * +---------------+ | `RegExpObject::MaxPairCount`
3012 * . | MatchPair objects.
3013 * . |
3014 * . | Only a single object will
3015 * +---------------+ | be initialized and can be
3016 * | MatchPair | | accessed below.
3017 * | start | <-------+
3018 * | limit |
3019 * +---------------+
3021 // clang-format on
3023 int32_t pairsVectorStartOffset =
3024 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
3025 Address matchPairStart(FramePointer,
3026 pairsVectorStartOffset + MatchPair::offsetOfStart());
3027 Address matchPairLimit(FramePointer,
3028 pairsVectorStartOffset + MatchPair::offsetOfLimit());
3030 // Store match limit to cx->regExpSearcherLastLimit and return the index.
3031 masm.load32(matchPairLimit, result);
3032 masm.loadJSContext(input);
3033 masm.store32(result,
3034 Address(input, JSContext::offsetOfRegExpSearcherLastLimit()));
3035 masm.load32(matchPairStart, result);
3036 masm.pop(FramePointer);
3037 masm.ret();
3039 masm.bind(&notFound);
3040 masm.move32(Imm32(RegExpSearcherResultNotFound), result);
3041 masm.pop(FramePointer);
3042 masm.ret();
3044 masm.bind(&oolEntry);
3045 masm.move32(Imm32(RegExpSearcherResultFailed), result);
3046 masm.pop(FramePointer);
3047 masm.ret();
3049 Linker linker(masm);
3050 JitCode* code = linker.newCode(cx, CodeKind::Other);
3051 if (!code) {
3052 return nullptr;
3055 CollectPerfSpewerJitCodeProfile(code, "RegExpSearcherStub");
3056 #ifdef MOZ_VTUNE
3057 vtune::MarkStub(code, "RegExpSearcherStub");
3058 #endif
3060 return code;
3063 class OutOfLineRegExpSearcher : public OutOfLineCodeBase<CodeGenerator> {
3064 LRegExpSearcher* lir_;
3066 public:
3067 explicit OutOfLineRegExpSearcher(LRegExpSearcher* lir) : lir_(lir) {}
3069 void accept(CodeGenerator* codegen) override {
3070 codegen->visitOutOfLineRegExpSearcher(this);
3073 LRegExpSearcher* lir() const { return lir_; }
3076 void CodeGenerator::visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher* ool) {
3077 LRegExpSearcher* lir = ool->lir();
3078 Register lastIndex = ToRegister(lir->lastIndex());
3079 Register input = ToRegister(lir->string());
3080 Register regexp = ToRegister(lir->regexp());
3082 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3083 regs.take(lastIndex);
3084 regs.take(input);
3085 regs.take(regexp);
3086 Register temp = regs.takeAny();
3088 masm.computeEffectiveAddress(
3089 Address(masm.getStackPointer(), InputOutputDataSize), temp);
3091 pushArg(temp);
3092 pushArg(lastIndex);
3093 pushArg(input);
3094 pushArg(regexp);
3096 // We are not using oolCallVM because we are in a Call, and that live
3097 // registers are already saved by the the register allocator.
3098 using Fn = bool (*)(JSContext* cx, HandleObject regexp, HandleString input,
3099 int32_t lastIndex, MatchPairs* pairs, int32_t* result);
3100 callVM<Fn, RegExpSearcherRaw>(lir);
3102 masm.jump(ool->rejoin());
3105 void CodeGenerator::visitRegExpSearcher(LRegExpSearcher* lir) {
3106 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpSearcherRegExpReg);
3107 MOZ_ASSERT(ToRegister(lir->string()) == RegExpSearcherStringReg);
3108 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpSearcherLastIndexReg);
3109 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3111 static_assert(RegExpSearcherRegExpReg != ReturnReg);
3112 static_assert(RegExpSearcherStringReg != ReturnReg);
3113 static_assert(RegExpSearcherLastIndexReg != ReturnReg);
3115 masm.reserveStack(RegExpReservedStack);
3117 OutOfLineRegExpSearcher* ool = new (alloc()) OutOfLineRegExpSearcher(lir);
3118 addOutOfLineCode(ool, lir->mir());
3120 const JitZone* jitZone = gen->realm->zone()->jitZone();
3121 JitCode* regExpSearcherStub =
3122 jitZone->regExpSearcherStubNoBarrier(&zoneStubsToReadBarrier_);
3123 masm.call(regExpSearcherStub);
3124 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpSearcherResultFailed),
3125 ool->entry());
3126 masm.bind(ool->rejoin());
3128 masm.freeStack(RegExpReservedStack);
3131 void CodeGenerator::visitRegExpSearcherLastLimit(
3132 LRegExpSearcherLastLimit* lir) {
3133 Register result = ToRegister(lir->output());
3134 Register scratch = ToRegister(lir->temp0());
3136 masm.loadAndClearRegExpSearcherLastLimit(result, scratch);
3139 JitCode* JitZone::generateRegExpExecTestStub(JSContext* cx) {
3140 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecTest stub");
3142 Register regexp = RegExpExecTestRegExpReg;
3143 Register input = RegExpExecTestStringReg;
3144 Register result = ReturnReg;
3146 TempAllocator temp(&cx->tempLifoAlloc());
3147 JitContext jcx(cx);
3148 StackMacroAssembler masm(cx, temp);
3149 AutoCreatedBy acb(masm, "JitZone::generateRegExpExecTestStub");
3151 #ifdef JS_USE_LINK_REGISTER
3152 masm.pushReturnAddress();
3153 #endif
3154 masm.push(FramePointer);
3155 masm.moveStackPtrTo(FramePointer);
3157 // We are free to clobber all registers, as LRegExpExecTest is a call
3158 // instruction.
3159 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3160 regs.take(input);
3161 regs.take(regexp);
3163 // Ensure lastIndex != result.
3164 regs.take(result);
3165 Register lastIndex = regs.takeAny();
3166 regs.add(result);
3167 Register temp1 = regs.takeAny();
3168 Register temp2 = regs.takeAny();
3169 Register temp3 = regs.takeAny();
3171 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
3172 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
3174 masm.reserveStack(RegExpReservedStack);
3176 // Load lastIndex and skip RegExp execution if needed.
3177 Label notFoundZeroLastIndex;
3178 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
3180 // In visitRegExpMatcher and visitRegExpSearcher, we reserve stack space
3181 // before calling the stub. For RegExpExecTest we call the stub before
3182 // reserving stack space, so the offset of the InputOutputData relative to the
3183 // frame pointer is negative.
3184 constexpr int32_t inputOutputDataStartOffset = -int32_t(RegExpReservedStack);
3186 // On ARM64, load/store instructions can encode an immediate offset in the
3187 // range [-256, 4095]. If we ever fail this assertion, it would be more
3188 // efficient to store the data above the frame pointer similar to
3189 // RegExpMatcher and RegExpSearcher.
3190 static_assert(inputOutputDataStartOffset >= -256);
3192 Label notFound, oolEntry;
3193 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
3194 temp3, inputOutputDataStartOffset,
3195 initialStringHeap, &notFound, &oolEntry)) {
3196 return nullptr;
3199 // Set `result` to true/false to indicate found/not-found, or to
3200 // RegExpExecTestResultFailed if we have to retry in C++. If the regular
3201 // expression is global or sticky, we also have to update its .lastIndex slot.
3203 Label done;
3204 int32_t pairsVectorStartOffset =
3205 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
3206 Address matchPairLimit(FramePointer,
3207 pairsVectorStartOffset + MatchPair::offsetOfLimit());
3209 masm.move32(Imm32(1), result);
3210 masm.branchTest32(Assembler::Zero, flagsSlot,
3211 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3212 &done);
3213 masm.load32(matchPairLimit, lastIndex);
3214 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
3215 masm.jump(&done);
3217 masm.bind(&notFound);
3218 masm.move32(Imm32(0), result);
3219 masm.branchTest32(Assembler::Zero, flagsSlot,
3220 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3221 &done);
3222 masm.storeValue(Int32Value(0), lastIndexSlot);
3223 masm.jump(&done);
3225 masm.bind(&notFoundZeroLastIndex);
3226 masm.move32(Imm32(0), result);
3227 masm.storeValue(Int32Value(0), lastIndexSlot);
3228 masm.jump(&done);
3230 masm.bind(&oolEntry);
3231 masm.move32(Imm32(RegExpExecTestResultFailed), result);
3233 masm.bind(&done);
3234 masm.freeStack(RegExpReservedStack);
3235 masm.pop(FramePointer);
3236 masm.ret();
3238 Linker linker(masm);
3239 JitCode* code = linker.newCode(cx, CodeKind::Other);
3240 if (!code) {
3241 return nullptr;
3244 CollectPerfSpewerJitCodeProfile(code, "RegExpExecTestStub");
3245 #ifdef MOZ_VTUNE
3246 vtune::MarkStub(code, "RegExpExecTestStub");
3247 #endif
3249 return code;
3252 class OutOfLineRegExpExecTest : public OutOfLineCodeBase<CodeGenerator> {
3253 LRegExpExecTest* lir_;
3255 public:
3256 explicit OutOfLineRegExpExecTest(LRegExpExecTest* lir) : lir_(lir) {}
3258 void accept(CodeGenerator* codegen) override {
3259 codegen->visitOutOfLineRegExpExecTest(this);
3262 LRegExpExecTest* lir() const { return lir_; }
3265 void CodeGenerator::visitOutOfLineRegExpExecTest(OutOfLineRegExpExecTest* ool) {
3266 LRegExpExecTest* lir = ool->lir();
3267 Register input = ToRegister(lir->string());
3268 Register regexp = ToRegister(lir->regexp());
3270 pushArg(input);
3271 pushArg(regexp);
3273 // We are not using oolCallVM because we are in a Call and live registers have
3274 // already been saved by the register allocator.
3275 using Fn = bool (*)(JSContext* cx, Handle<RegExpObject*> regexp,
3276 HandleString input, bool* result);
3277 callVM<Fn, RegExpBuiltinExecTestFromJit>(lir);
3279 masm.jump(ool->rejoin());
3282 void CodeGenerator::visitRegExpExecTest(LRegExpExecTest* lir) {
3283 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpExecTestRegExpReg);
3284 MOZ_ASSERT(ToRegister(lir->string()) == RegExpExecTestStringReg);
3285 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3287 static_assert(RegExpExecTestRegExpReg != ReturnReg);
3288 static_assert(RegExpExecTestStringReg != ReturnReg);
3290 auto* ool = new (alloc()) OutOfLineRegExpExecTest(lir);
3291 addOutOfLineCode(ool, lir->mir());
3293 const JitZone* jitZone = gen->realm->zone()->jitZone();
3294 JitCode* regExpExecTestStub =
3295 jitZone->regExpExecTestStubNoBarrier(&zoneStubsToReadBarrier_);
3296 masm.call(regExpExecTestStub);
3298 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpExecTestResultFailed),
3299 ool->entry());
3301 masm.bind(ool->rejoin());
3304 void CodeGenerator::visitRegExpHasCaptureGroups(LRegExpHasCaptureGroups* ins) {
3305 Register regexp = ToRegister(ins->regexp());
3306 Register input = ToRegister(ins->input());
3307 Register output = ToRegister(ins->output());
3309 using Fn =
3310 bool (*)(JSContext*, Handle<RegExpObject*>, Handle<JSString*>, bool*);
3311 auto* ool = oolCallVM<Fn, js::RegExpHasCaptureGroups>(
3312 ins, ArgList(regexp, input), StoreRegisterTo(output));
3314 // Load RegExpShared in |output|.
3315 Label vmCall;
3316 masm.loadParsedRegExpShared(regexp, output, ool->entry());
3318 // Return true iff pairCount > 1.
3319 Label returnTrue;
3320 masm.branch32(Assembler::Above,
3321 Address(output, RegExpShared::offsetOfPairCount()), Imm32(1),
3322 &returnTrue);
3323 masm.move32(Imm32(0), output);
3324 masm.jump(ool->rejoin());
3326 masm.bind(&returnTrue);
3327 masm.move32(Imm32(1), output);
3329 masm.bind(ool->rejoin());
3332 class OutOfLineRegExpPrototypeOptimizable
3333 : public OutOfLineCodeBase<CodeGenerator> {
3334 LRegExpPrototypeOptimizable* ins_;
3336 public:
3337 explicit OutOfLineRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins)
3338 : ins_(ins) {}
3340 void accept(CodeGenerator* codegen) override {
3341 codegen->visitOutOfLineRegExpPrototypeOptimizable(this);
3343 LRegExpPrototypeOptimizable* ins() const { return ins_; }
3346 void CodeGenerator::visitRegExpPrototypeOptimizable(
3347 LRegExpPrototypeOptimizable* ins) {
3348 Register object = ToRegister(ins->object());
3349 Register output = ToRegister(ins->output());
3350 Register temp = ToRegister(ins->temp0());
3352 OutOfLineRegExpPrototypeOptimizable* ool =
3353 new (alloc()) OutOfLineRegExpPrototypeOptimizable(ins);
3354 addOutOfLineCode(ool, ins->mir());
3356 const GlobalObject* global = gen->realm->maybeGlobal();
3357 MOZ_ASSERT(global);
3358 masm.branchIfNotRegExpPrototypeOptimizable(object, temp, global,
3359 ool->entry());
3360 masm.move32(Imm32(0x1), output);
3362 masm.bind(ool->rejoin());
3365 void CodeGenerator::visitOutOfLineRegExpPrototypeOptimizable(
3366 OutOfLineRegExpPrototypeOptimizable* ool) {
3367 LRegExpPrototypeOptimizable* ins = ool->ins();
3368 Register object = ToRegister(ins->object());
3369 Register output = ToRegister(ins->output());
3371 saveVolatile(output);
3373 using Fn = bool (*)(JSContext* cx, JSObject* proto);
3374 masm.setupAlignedABICall();
3375 masm.loadJSContext(output);
3376 masm.passABIArg(output);
3377 masm.passABIArg(object);
3378 masm.callWithABI<Fn, RegExpPrototypeOptimizableRaw>();
3379 masm.storeCallBoolResult(output);
3381 restoreVolatile(output);
3383 masm.jump(ool->rejoin());
3386 class OutOfLineRegExpInstanceOptimizable
3387 : public OutOfLineCodeBase<CodeGenerator> {
3388 LRegExpInstanceOptimizable* ins_;
3390 public:
3391 explicit OutOfLineRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins)
3392 : ins_(ins) {}
3394 void accept(CodeGenerator* codegen) override {
3395 codegen->visitOutOfLineRegExpInstanceOptimizable(this);
3397 LRegExpInstanceOptimizable* ins() const { return ins_; }
3400 void CodeGenerator::visitRegExpInstanceOptimizable(
3401 LRegExpInstanceOptimizable* ins) {
3402 Register object = ToRegister(ins->object());
3403 Register output = ToRegister(ins->output());
3404 Register temp = ToRegister(ins->temp0());
3406 OutOfLineRegExpInstanceOptimizable* ool =
3407 new (alloc()) OutOfLineRegExpInstanceOptimizable(ins);
3408 addOutOfLineCode(ool, ins->mir());
3410 const GlobalObject* global = gen->realm->maybeGlobal();
3411 MOZ_ASSERT(global);
3412 masm.branchIfNotRegExpInstanceOptimizable(object, temp, global, ool->entry());
3413 masm.move32(Imm32(0x1), output);
3415 masm.bind(ool->rejoin());
3418 void CodeGenerator::visitOutOfLineRegExpInstanceOptimizable(
3419 OutOfLineRegExpInstanceOptimizable* ool) {
3420 LRegExpInstanceOptimizable* ins = ool->ins();
3421 Register object = ToRegister(ins->object());
3422 Register proto = ToRegister(ins->proto());
3423 Register output = ToRegister(ins->output());
3425 saveVolatile(output);
3427 using Fn = bool (*)(JSContext* cx, JSObject* obj, JSObject* proto);
3428 masm.setupAlignedABICall();
3429 masm.loadJSContext(output);
3430 masm.passABIArg(output);
3431 masm.passABIArg(object);
3432 masm.passABIArg(proto);
3433 masm.callWithABI<Fn, RegExpInstanceOptimizableRaw>();
3434 masm.storeCallBoolResult(output);
3436 restoreVolatile(output);
3438 masm.jump(ool->rejoin());
3441 static void FindFirstDollarIndex(MacroAssembler& masm, Register str,
3442 Register len, Register temp0, Register temp1,
3443 Register output, CharEncoding encoding) {
3444 #ifdef DEBUG
3445 Label ok;
3446 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
3447 masm.assumeUnreachable("Length should be greater than 0.");
3448 masm.bind(&ok);
3449 #endif
3451 Register chars = temp0;
3452 masm.loadStringChars(str, chars, encoding);
3454 masm.move32(Imm32(0), output);
3456 Label start, done;
3457 masm.bind(&start);
3459 Register currentChar = temp1;
3460 masm.loadChar(chars, output, currentChar, encoding);
3461 masm.branch32(Assembler::Equal, currentChar, Imm32('$'), &done);
3463 masm.add32(Imm32(1), output);
3464 masm.branch32(Assembler::NotEqual, output, len, &start);
3466 masm.move32(Imm32(-1), output);
3468 masm.bind(&done);
3471 void CodeGenerator::visitGetFirstDollarIndex(LGetFirstDollarIndex* ins) {
3472 Register str = ToRegister(ins->str());
3473 Register output = ToRegister(ins->output());
3474 Register temp0 = ToRegister(ins->temp0());
3475 Register temp1 = ToRegister(ins->temp1());
3476 Register len = ToRegister(ins->temp2());
3478 using Fn = bool (*)(JSContext*, JSString*, int32_t*);
3479 OutOfLineCode* ool = oolCallVM<Fn, GetFirstDollarIndexRaw>(
3480 ins, ArgList(str), StoreRegisterTo(output));
3482 masm.branchIfRope(str, ool->entry());
3483 masm.loadStringLength(str, len);
3485 Label isLatin1, done;
3486 masm.branchLatin1String(str, &isLatin1);
3488 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3489 CharEncoding::TwoByte);
3490 masm.jump(&done);
3492 masm.bind(&isLatin1);
3494 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3495 CharEncoding::Latin1);
3497 masm.bind(&done);
3498 masm.bind(ool->rejoin());
3501 void CodeGenerator::visitStringReplace(LStringReplace* lir) {
3502 if (lir->replacement()->isConstant()) {
3503 pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString()));
3504 } else {
3505 pushArg(ToRegister(lir->replacement()));
3508 if (lir->pattern()->isConstant()) {
3509 pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString()));
3510 } else {
3511 pushArg(ToRegister(lir->pattern()));
3514 if (lir->string()->isConstant()) {
3515 pushArg(ImmGCPtr(lir->string()->toConstant()->toString()));
3516 } else {
3517 pushArg(ToRegister(lir->string()));
3520 using Fn =
3521 JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
3522 if (lir->mir()->isFlatReplacement()) {
3523 callVM<Fn, StringFlatReplaceString>(lir);
3524 } else {
3525 callVM<Fn, StringReplace>(lir);
3529 void CodeGenerator::visitBinaryValueCache(LBinaryValueCache* lir) {
3530 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3531 TypedOrValueRegister lhs =
3532 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::LhsIndex));
3533 TypedOrValueRegister rhs =
3534 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::RhsIndex));
3535 ValueOperand output = ToOutValue(lir);
3537 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3539 switch (jsop) {
3540 case JSOp::Add:
3541 case JSOp::Sub:
3542 case JSOp::Mul:
3543 case JSOp::Div:
3544 case JSOp::Mod:
3545 case JSOp::Pow:
3546 case JSOp::BitAnd:
3547 case JSOp::BitOr:
3548 case JSOp::BitXor:
3549 case JSOp::Lsh:
3550 case JSOp::Rsh:
3551 case JSOp::Ursh: {
3552 IonBinaryArithIC ic(liveRegs, lhs, rhs, output);
3553 addIC(lir, allocateIC(ic));
3554 return;
3556 default:
3557 MOZ_CRASH("Unsupported jsop in MBinaryValueCache");
3561 void CodeGenerator::visitBinaryBoolCache(LBinaryBoolCache* lir) {
3562 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3563 TypedOrValueRegister lhs =
3564 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::LhsIndex));
3565 TypedOrValueRegister rhs =
3566 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::RhsIndex));
3567 Register output = ToRegister(lir->output());
3569 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3571 switch (jsop) {
3572 case JSOp::Lt:
3573 case JSOp::Le:
3574 case JSOp::Gt:
3575 case JSOp::Ge:
3576 case JSOp::Eq:
3577 case JSOp::Ne:
3578 case JSOp::StrictEq:
3579 case JSOp::StrictNe: {
3580 IonCompareIC ic(liveRegs, lhs, rhs, output);
3581 addIC(lir, allocateIC(ic));
3582 return;
3584 default:
3585 MOZ_CRASH("Unsupported jsop in MBinaryBoolCache");
3589 void CodeGenerator::visitUnaryCache(LUnaryCache* lir) {
3590 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3591 TypedOrValueRegister input =
3592 TypedOrValueRegister(ToValue(lir, LUnaryCache::InputIndex));
3593 ValueOperand output = ToOutValue(lir);
3595 IonUnaryArithIC ic(liveRegs, input, output);
3596 addIC(lir, allocateIC(ic));
3599 void CodeGenerator::visitModuleMetadata(LModuleMetadata* lir) {
3600 pushArg(ImmPtr(lir->mir()->module()));
3602 using Fn = JSObject* (*)(JSContext*, HandleObject);
3603 callVM<Fn, js::GetOrCreateModuleMetaObject>(lir);
3606 void CodeGenerator::visitDynamicImport(LDynamicImport* lir) {
3607 pushArg(ToValue(lir, LDynamicImport::OptionsIndex));
3608 pushArg(ToValue(lir, LDynamicImport::SpecifierIndex));
3609 pushArg(ImmGCPtr(current->mir()->info().script()));
3611 using Fn = JSObject* (*)(JSContext*, HandleScript, HandleValue, HandleValue);
3612 callVM<Fn, js::StartDynamicModuleImport>(lir);
3615 void CodeGenerator::visitLambda(LLambda* lir) {
3616 Register envChain = ToRegister(lir->environmentChain());
3617 Register output = ToRegister(lir->output());
3618 Register tempReg = ToRegister(lir->temp0());
3620 JSFunction* fun = lir->mir()->templateFunction();
3622 using Fn = JSObject* (*)(JSContext*, HandleFunction, HandleObject);
3623 OutOfLineCode* ool = oolCallVM<Fn, js::Lambda>(
3624 lir, ArgList(ImmGCPtr(fun), envChain), StoreRegisterTo(output));
3626 TemplateObject templateObject(fun);
3627 masm.createGCObject(output, tempReg, templateObject, gc::Heap::Default,
3628 ool->entry());
3630 masm.storeValue(JSVAL_TYPE_OBJECT, envChain,
3631 Address(output, JSFunction::offsetOfEnvironment()));
3632 // No post barrier needed because output is guaranteed to be allocated in
3633 // the nursery.
3635 masm.bind(ool->rejoin());
3638 void CodeGenerator::visitFunctionWithProto(LFunctionWithProto* lir) {
3639 Register envChain = ToRegister(lir->envChain());
3640 Register prototype = ToRegister(lir->prototype());
3642 pushArg(prototype);
3643 pushArg(envChain);
3644 pushArg(ImmGCPtr(lir->mir()->function()));
3646 using Fn =
3647 JSObject* (*)(JSContext*, HandleFunction, HandleObject, HandleObject);
3648 callVM<Fn, js::FunWithProtoOperation>(lir);
3651 void CodeGenerator::visitSetFunName(LSetFunName* lir) {
3652 pushArg(Imm32(lir->mir()->prefixKind()));
3653 pushArg(ToValue(lir, LSetFunName::NameIndex));
3654 pushArg(ToRegister(lir->fun()));
3656 using Fn =
3657 bool (*)(JSContext*, HandleFunction, HandleValue, FunctionPrefixKind);
3658 callVM<Fn, js::SetFunctionName>(lir);
3661 void CodeGenerator::visitOsiPoint(LOsiPoint* lir) {
3662 // Note: markOsiPoint ensures enough space exists between the last
3663 // LOsiPoint and this one to patch adjacent call instructions.
3665 MOZ_ASSERT(masm.framePushed() == frameSize());
3667 uint32_t osiCallPointOffset = markOsiPoint(lir);
3669 LSafepoint* safepoint = lir->associatedSafepoint();
3670 MOZ_ASSERT(!safepoint->osiCallPointOffset());
3671 safepoint->setOsiCallPointOffset(osiCallPointOffset);
3673 #ifdef DEBUG
3674 // There should be no movegroups or other instructions between
3675 // an instruction and its OsiPoint. This is necessary because
3676 // we use the OsiPoint's snapshot from within VM calls.
3677 for (LInstructionReverseIterator iter(current->rbegin(lir));
3678 iter != current->rend(); iter++) {
3679 if (*iter == lir) {
3680 continue;
3682 MOZ_ASSERT(!iter->isMoveGroup());
3683 MOZ_ASSERT(iter->safepoint() == safepoint);
3684 break;
3686 #endif
3688 #ifdef CHECK_OSIPOINT_REGISTERS
3689 if (shouldVerifyOsiPointRegs(safepoint)) {
3690 verifyOsiPointRegs(safepoint);
3692 #endif
3695 void CodeGenerator::visitPhi(LPhi* lir) {
3696 MOZ_CRASH("Unexpected LPhi in CodeGenerator");
3699 void CodeGenerator::visitGoto(LGoto* lir) { jumpToBlock(lir->target()); }
3701 void CodeGenerator::visitTableSwitch(LTableSwitch* ins) {
3702 MTableSwitch* mir = ins->mir();
3703 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3704 const LAllocation* temp;
3706 if (mir->getOperand(0)->type() != MIRType::Int32) {
3707 temp = ins->tempInt()->output();
3709 // The input is a double, so try and convert it to an integer.
3710 // If it does not fit in an integer, take the default case.
3711 masm.convertDoubleToInt32(ToFloatRegister(ins->index()), ToRegister(temp),
3712 defaultcase, false);
3713 } else {
3714 temp = ins->index();
3717 emitTableSwitchDispatch(mir, ToRegister(temp),
3718 ToRegisterOrInvalid(ins->tempPointer()));
3721 void CodeGenerator::visitTableSwitchV(LTableSwitchV* ins) {
3722 MTableSwitch* mir = ins->mir();
3723 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3725 Register index = ToRegister(ins->tempInt());
3726 ValueOperand value = ToValue(ins, LTableSwitchV::InputValue);
3727 Register tag = masm.extractTag(value, index);
3728 masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase);
3730 Label unboxInt, isInt;
3731 masm.branchTestInt32(Assembler::Equal, tag, &unboxInt);
3733 FloatRegister floatIndex = ToFloatRegister(ins->tempFloat());
3734 masm.unboxDouble(value, floatIndex);
3735 masm.convertDoubleToInt32(floatIndex, index, defaultcase, false);
3736 masm.jump(&isInt);
3739 masm.bind(&unboxInt);
3740 masm.unboxInt32(value, index);
3742 masm.bind(&isInt);
3744 emitTableSwitchDispatch(mir, index, ToRegisterOrInvalid(ins->tempPointer()));
3747 void CodeGenerator::visitParameter(LParameter* lir) {}
3749 void CodeGenerator::visitCallee(LCallee* lir) {
3750 Register callee = ToRegister(lir->output());
3751 Address ptr(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3753 masm.loadFunctionFromCalleeToken(ptr, callee);
3756 void CodeGenerator::visitIsConstructing(LIsConstructing* lir) {
3757 Register output = ToRegister(lir->output());
3758 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3759 masm.loadPtr(calleeToken, output);
3761 // We must be inside a function.
3762 MOZ_ASSERT(current->mir()->info().script()->function());
3764 // The low bit indicates whether this call is constructing, just clear the
3765 // other bits.
3766 static_assert(CalleeToken_Function == 0x0,
3767 "CalleeTokenTag value should match");
3768 static_assert(CalleeToken_FunctionConstructing == 0x1,
3769 "CalleeTokenTag value should match");
3770 masm.andPtr(Imm32(0x1), output);
3773 void CodeGenerator::visitReturn(LReturn* lir) {
3774 #if defined(JS_NUNBOX32)
3775 DebugOnly<LAllocation*> type = lir->getOperand(TYPE_INDEX);
3776 DebugOnly<LAllocation*> payload = lir->getOperand(PAYLOAD_INDEX);
3777 MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type);
3778 MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data);
3779 #elif defined(JS_PUNBOX64)
3780 DebugOnly<LAllocation*> result = lir->getOperand(0);
3781 MOZ_ASSERT(ToRegister(result) == JSReturnReg);
3782 #endif
3783 // Don't emit a jump to the return label if this is the last block, as
3784 // it'll fall through to the epilogue.
3786 // This is -not- true however for a Generator-return, which may appear in the
3787 // middle of the last block, so we should always emit the jump there.
3788 if (current->mir() != *gen->graph().poBegin() || lir->isGenerator()) {
3789 masm.jump(&returnLabel_);
3793 void CodeGenerator::visitOsrEntry(LOsrEntry* lir) {
3794 Register temp = ToRegister(lir->temp());
3796 // Remember the OSR entry offset into the code buffer.
3797 masm.flushBuffer();
3798 setOsrEntryOffset(masm.size());
3800 // Allocate the full frame for this function
3801 // Note we have a new entry here. So we reset MacroAssembler::framePushed()
3802 // to 0, before reserving the stack.
3803 MOZ_ASSERT(masm.framePushed() == frameSize());
3804 masm.setFramePushed(0);
3806 // The Baseline code ensured both the frame pointer and stack pointer point to
3807 // the JitFrameLayout on the stack.
3809 // If profiling, save the current frame pointer to a per-thread global field.
3810 if (isProfilerInstrumentationEnabled()) {
3811 masm.profilerEnterFrame(FramePointer, temp);
3814 masm.reserveStack(frameSize());
3815 MOZ_ASSERT(masm.framePushed() == frameSize());
3817 // Ensure that the Ion frames is properly aligned.
3818 masm.assertStackAlignment(JitStackAlignment, 0);
3821 void CodeGenerator::visitOsrEnvironmentChain(LOsrEnvironmentChain* lir) {
3822 const LAllocation* frame = lir->getOperand(0);
3823 const LDefinition* object = lir->getDef(0);
3825 const ptrdiff_t frameOffset =
3826 BaselineFrame::reverseOffsetOfEnvironmentChain();
3828 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3831 void CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir) {
3832 const LAllocation* frame = lir->getOperand(0);
3833 const LDefinition* object = lir->getDef(0);
3835 const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj();
3837 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3840 void CodeGenerator::visitOsrValue(LOsrValue* value) {
3841 const LAllocation* frame = value->getOperand(0);
3842 const ValueOperand out = ToOutValue(value);
3844 const ptrdiff_t frameOffset = value->mir()->frameOffset();
3846 masm.loadValue(Address(ToRegister(frame), frameOffset), out);
3849 void CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir) {
3850 const LAllocation* frame = lir->getOperand(0);
3851 const ValueOperand out = ToOutValue(lir);
3853 Address flags =
3854 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags());
3855 Address retval =
3856 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue());
3858 masm.moveValue(UndefinedValue(), out);
3860 Label done;
3861 masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL),
3862 &done);
3863 masm.loadValue(retval, out);
3864 masm.bind(&done);
3867 void CodeGenerator::visitStackArgT(LStackArgT* lir) {
3868 const LAllocation* arg = lir->arg();
3869 MIRType argType = lir->type();
3870 uint32_t argslot = lir->argslot();
3871 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3873 Address dest = AddressOfPassedArg(argslot);
3875 if (arg->isFloatReg()) {
3876 masm.boxDouble(ToFloatRegister(arg), dest);
3877 } else if (arg->isRegister()) {
3878 masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest);
3879 } else {
3880 masm.storeValue(arg->toConstant()->toJSValue(), dest);
3884 void CodeGenerator::visitStackArgV(LStackArgV* lir) {
3885 ValueOperand val = ToValue(lir, 0);
3886 uint32_t argslot = lir->argslot();
3887 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3889 masm.storeValue(val, AddressOfPassedArg(argslot));
3892 void CodeGenerator::visitMoveGroup(LMoveGroup* group) {
3893 if (!group->numMoves()) {
3894 return;
3897 MoveResolver& resolver = masm.moveResolver();
3899 for (size_t i = 0; i < group->numMoves(); i++) {
3900 const LMove& move = group->getMove(i);
3902 LAllocation from = move.from();
3903 LAllocation to = move.to();
3904 LDefinition::Type type = move.type();
3906 // No bogus moves.
3907 MOZ_ASSERT(from != to);
3908 MOZ_ASSERT(!from.isConstant());
3909 MoveOp::Type moveType;
3910 switch (type) {
3911 case LDefinition::OBJECT:
3912 case LDefinition::SLOTS:
3913 case LDefinition::WASM_ANYREF:
3914 #ifdef JS_NUNBOX32
3915 case LDefinition::TYPE:
3916 case LDefinition::PAYLOAD:
3917 #else
3918 case LDefinition::BOX:
3919 #endif
3920 case LDefinition::GENERAL:
3921 case LDefinition::STACKRESULTS:
3922 moveType = MoveOp::GENERAL;
3923 break;
3924 case LDefinition::INT32:
3925 moveType = MoveOp::INT32;
3926 break;
3927 case LDefinition::FLOAT32:
3928 moveType = MoveOp::FLOAT32;
3929 break;
3930 case LDefinition::DOUBLE:
3931 moveType = MoveOp::DOUBLE;
3932 break;
3933 case LDefinition::SIMD128:
3934 moveType = MoveOp::SIMD128;
3935 break;
3936 default:
3937 MOZ_CRASH("Unexpected move type");
3940 masm.propagateOOM(
3941 resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType));
3944 masm.propagateOOM(resolver.resolve());
3945 if (masm.oom()) {
3946 return;
3949 MoveEmitter emitter(masm);
3951 #ifdef JS_CODEGEN_X86
3952 if (group->maybeScratchRegister().isGeneralReg()) {
3953 emitter.setScratchRegister(
3954 group->maybeScratchRegister().toGeneralReg()->reg());
3955 } else {
3956 resolver.sortMemoryToMemoryMoves();
3958 #endif
3960 emitter.emit(resolver);
3961 emitter.finish();
3964 void CodeGenerator::visitInteger(LInteger* lir) {
3965 masm.move32(Imm32(lir->i32()), ToRegister(lir->output()));
3968 void CodeGenerator::visitInteger64(LInteger64* lir) {
3969 masm.move64(Imm64(lir->i64()), ToOutRegister64(lir));
3972 void CodeGenerator::visitPointer(LPointer* lir) {
3973 masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output()));
3976 void CodeGenerator::visitNurseryObject(LNurseryObject* lir) {
3977 Register output = ToRegister(lir->output());
3978 uint32_t nurseryIndex = lir->mir()->nurseryIndex();
3980 // Load a pointer to the entry in IonScript's nursery objects list.
3981 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), output);
3982 masm.propagateOOM(ionNurseryObjectLabels_.emplaceBack(label, nurseryIndex));
3984 // Load the JSObject*.
3985 masm.loadPtr(Address(output, 0), output);
3988 void CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir) {
3989 // No-op.
3992 void CodeGenerator::visitDebugEnterGCUnsafeRegion(
3993 LDebugEnterGCUnsafeRegion* lir) {
3994 Register temp = ToRegister(lir->temp0());
3996 masm.loadJSContext(temp);
3998 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
3999 masm.add32(Imm32(1), inUnsafeRegion);
4001 Label ok;
4002 masm.branch32(Assembler::GreaterThan, inUnsafeRegion, Imm32(0), &ok);
4003 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
4004 masm.bind(&ok);
4007 void CodeGenerator::visitDebugLeaveGCUnsafeRegion(
4008 LDebugLeaveGCUnsafeRegion* lir) {
4009 Register temp = ToRegister(lir->temp0());
4011 masm.loadJSContext(temp);
4013 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
4014 masm.add32(Imm32(-1), inUnsafeRegion);
4016 Label ok;
4017 masm.branch32(Assembler::GreaterThanOrEqual, inUnsafeRegion, Imm32(0), &ok);
4018 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
4019 masm.bind(&ok);
4022 void CodeGenerator::visitSlots(LSlots* lir) {
4023 Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots());
4024 masm.loadPtr(slots, ToRegister(lir->output()));
4027 void CodeGenerator::visitLoadDynamicSlotV(LLoadDynamicSlotV* lir) {
4028 ValueOperand dest = ToOutValue(lir);
4029 Register base = ToRegister(lir->input());
4030 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
4032 masm.loadValue(Address(base, offset), dest);
4035 static ConstantOrRegister ToConstantOrRegister(const LAllocation* value,
4036 MIRType valueType) {
4037 if (value->isConstant()) {
4038 return ConstantOrRegister(value->toConstant()->toJSValue());
4040 return TypedOrValueRegister(valueType, ToAnyRegister(value));
4043 void CodeGenerator::visitStoreDynamicSlotT(LStoreDynamicSlotT* lir) {
4044 Register base = ToRegister(lir->slots());
4045 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
4046 Address dest(base, offset);
4048 if (lir->mir()->needsBarrier()) {
4049 emitPreBarrier(dest);
4052 MIRType valueType = lir->mir()->value()->type();
4053 ConstantOrRegister value = ToConstantOrRegister(lir->value(), valueType);
4054 masm.storeUnboxedValue(value, valueType, dest);
4057 void CodeGenerator::visitStoreDynamicSlotV(LStoreDynamicSlotV* lir) {
4058 Register base = ToRegister(lir->slots());
4059 int32_t offset = lir->mir()->slot() * sizeof(Value);
4061 const ValueOperand value = ToValue(lir, LStoreDynamicSlotV::ValueIndex);
4063 if (lir->mir()->needsBarrier()) {
4064 emitPreBarrier(Address(base, offset));
4067 masm.storeValue(value, Address(base, offset));
4070 void CodeGenerator::visitElements(LElements* lir) {
4071 Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements());
4072 masm.loadPtr(elements, ToRegister(lir->output()));
4075 void CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment* lir) {
4076 Address environment(ToRegister(lir->function()),
4077 JSFunction::offsetOfEnvironment());
4078 masm.unboxObject(environment, ToRegister(lir->output()));
4081 void CodeGenerator::visitHomeObject(LHomeObject* lir) {
4082 Register func = ToRegister(lir->function());
4083 Address homeObject(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
4085 masm.assertFunctionIsExtended(func);
4086 #ifdef DEBUG
4087 Label isObject;
4088 masm.branchTestObject(Assembler::Equal, homeObject, &isObject);
4089 masm.assumeUnreachable("[[HomeObject]] must be Object");
4090 masm.bind(&isObject);
4091 #endif
4093 masm.unboxObject(homeObject, ToRegister(lir->output()));
4096 void CodeGenerator::visitHomeObjectSuperBase(LHomeObjectSuperBase* lir) {
4097 Register homeObject = ToRegister(lir->homeObject());
4098 ValueOperand output = ToOutValue(lir);
4099 Register temp = output.scratchReg();
4101 masm.loadObjProto(homeObject, temp);
4103 #ifdef DEBUG
4104 // We won't encounter a lazy proto, because the prototype is guaranteed to
4105 // either be a JSFunction or a PlainObject, and only proxy objects can have a
4106 // lazy proto.
4107 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
4109 Label proxyCheckDone;
4110 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
4111 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperBase");
4112 masm.bind(&proxyCheckDone);
4113 #endif
4115 Label nullProto, done;
4116 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
4118 // Box prototype and return
4119 masm.tagValue(JSVAL_TYPE_OBJECT, temp, output);
4120 masm.jump(&done);
4122 masm.bind(&nullProto);
4123 masm.moveValue(NullValue(), output);
4125 masm.bind(&done);
4128 template <class T>
4129 static T* ToConstantObject(MDefinition* def) {
4130 MOZ_ASSERT(def->isConstant());
4131 return &def->toConstant()->toObject().as<T>();
4134 void CodeGenerator::visitNewLexicalEnvironmentObject(
4135 LNewLexicalEnvironmentObject* lir) {
4136 Register output = ToRegister(lir->output());
4137 Register temp = ToRegister(lir->temp0());
4139 auto* templateObj = ToConstantObject<BlockLexicalEnvironmentObject>(
4140 lir->mir()->templateObj());
4141 auto* scope = &templateObj->scope();
4142 gc::Heap initialHeap = gc::Heap::Default;
4144 using Fn =
4145 BlockLexicalEnvironmentObject* (*)(JSContext*, Handle<LexicalScope*>);
4146 auto* ool =
4147 oolCallVM<Fn, BlockLexicalEnvironmentObject::createWithoutEnclosing>(
4148 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4150 TemplateObject templateObject(templateObj);
4151 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4153 masm.bind(ool->rejoin());
4156 void CodeGenerator::visitNewClassBodyEnvironmentObject(
4157 LNewClassBodyEnvironmentObject* lir) {
4158 Register output = ToRegister(lir->output());
4159 Register temp = ToRegister(lir->temp0());
4161 auto* templateObj = ToConstantObject<ClassBodyLexicalEnvironmentObject>(
4162 lir->mir()->templateObj());
4163 auto* scope = &templateObj->scope();
4164 gc::Heap initialHeap = gc::Heap::Default;
4166 using Fn = ClassBodyLexicalEnvironmentObject* (*)(JSContext*,
4167 Handle<ClassBodyScope*>);
4168 auto* ool =
4169 oolCallVM<Fn, ClassBodyLexicalEnvironmentObject::createWithoutEnclosing>(
4170 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4172 TemplateObject templateObject(templateObj);
4173 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4175 masm.bind(ool->rejoin());
4178 void CodeGenerator::visitNewVarEnvironmentObject(
4179 LNewVarEnvironmentObject* lir) {
4180 Register output = ToRegister(lir->output());
4181 Register temp = ToRegister(lir->temp0());
4183 auto* templateObj =
4184 ToConstantObject<VarEnvironmentObject>(lir->mir()->templateObj());
4185 auto* scope = &templateObj->scope().as<VarScope>();
4186 gc::Heap initialHeap = gc::Heap::Default;
4188 using Fn = VarEnvironmentObject* (*)(JSContext*, Handle<VarScope*>);
4189 auto* ool = oolCallVM<Fn, VarEnvironmentObject::createWithoutEnclosing>(
4190 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4192 TemplateObject templateObject(templateObj);
4193 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4195 masm.bind(ool->rejoin());
4198 void CodeGenerator::visitGuardShape(LGuardShape* guard) {
4199 Register obj = ToRegister(guard->input());
4200 Register temp = ToTempRegisterOrInvalid(guard->temp0());
4201 Label bail;
4202 masm.branchTestObjShape(Assembler::NotEqual, obj, guard->mir()->shape(), temp,
4203 obj, &bail);
4204 bailoutFrom(&bail, guard->snapshot());
4207 void CodeGenerator::visitGuardFuse(LGuardFuse* guard) {
4208 auto fuseIndex = guard->mir()->fuseIndex();
4209 switch (fuseIndex) {
4210 case RealmFuses::FuseIndex::OptimizeGetIteratorFuse:
4211 addOptimizeGetIteratorFuseDependency();
4212 return;
4213 default:
4214 // validateAndRegisterFuseDependencies doesn't have
4215 // handling for this yet, actively check fuse instead.
4216 break;
4219 Register temp = ToRegister(guard->temp0());
4220 Label bail;
4222 // Bake specific fuse address for Ion code, because we won't share this code
4223 // across realms.
4224 GuardFuse* fuse = mirGen().realm->realmFuses().getFuseByIndex(fuseIndex);
4225 masm.loadPtr(AbsoluteAddress(fuse->fuseRef()), temp);
4226 masm.branchPtr(Assembler::NotEqual, temp, ImmPtr(nullptr), &bail);
4228 bailoutFrom(&bail, guard->snapshot());
4231 void CodeGenerator::visitGuardMultipleShapes(LGuardMultipleShapes* guard) {
4232 Register obj = ToRegister(guard->object());
4233 Register shapeList = ToRegister(guard->shapeList());
4234 Register temp = ToRegister(guard->temp0());
4235 Register temp2 = ToRegister(guard->temp1());
4236 Register temp3 = ToRegister(guard->temp2());
4237 Register spectre = ToTempRegisterOrInvalid(guard->temp3());
4239 Label bail;
4240 masm.loadPtr(Address(shapeList, NativeObject::offsetOfElements()), temp);
4241 masm.branchTestObjShapeList(Assembler::NotEqual, obj, temp, temp2, temp3,
4242 spectre, &bail);
4243 bailoutFrom(&bail, guard->snapshot());
4246 void CodeGenerator::visitGuardProto(LGuardProto* guard) {
4247 Register obj = ToRegister(guard->object());
4248 Register expected = ToRegister(guard->expected());
4249 Register temp = ToRegister(guard->temp0());
4251 masm.loadObjProto(obj, temp);
4253 Label bail;
4254 masm.branchPtr(Assembler::NotEqual, temp, expected, &bail);
4255 bailoutFrom(&bail, guard->snapshot());
4258 void CodeGenerator::visitGuardNullProto(LGuardNullProto* guard) {
4259 Register obj = ToRegister(guard->input());
4260 Register temp = ToRegister(guard->temp0());
4262 masm.loadObjProto(obj, temp);
4264 Label bail;
4265 masm.branchTestPtr(Assembler::NonZero, temp, temp, &bail);
4266 bailoutFrom(&bail, guard->snapshot());
4269 void CodeGenerator::visitGuardIsNativeObject(LGuardIsNativeObject* guard) {
4270 Register obj = ToRegister(guard->input());
4271 Register temp = ToRegister(guard->temp0());
4273 Label bail;
4274 masm.branchIfNonNativeObj(obj, temp, &bail);
4275 bailoutFrom(&bail, guard->snapshot());
4278 void CodeGenerator::visitGuardGlobalGeneration(LGuardGlobalGeneration* guard) {
4279 Register temp = ToRegister(guard->temp0());
4280 Label bail;
4282 masm.load32(AbsoluteAddress(guard->mir()->generationAddr()), temp);
4283 masm.branch32(Assembler::NotEqual, temp, Imm32(guard->mir()->expected()),
4284 &bail);
4285 bailoutFrom(&bail, guard->snapshot());
4288 void CodeGenerator::visitGuardIsProxy(LGuardIsProxy* guard) {
4289 Register obj = ToRegister(guard->input());
4290 Register temp = ToRegister(guard->temp0());
4292 Label bail;
4293 masm.branchTestObjectIsProxy(false, obj, temp, &bail);
4294 bailoutFrom(&bail, guard->snapshot());
4297 void CodeGenerator::visitGuardIsNotProxy(LGuardIsNotProxy* guard) {
4298 Register obj = ToRegister(guard->input());
4299 Register temp = ToRegister(guard->temp0());
4301 Label bail;
4302 masm.branchTestObjectIsProxy(true, obj, temp, &bail);
4303 bailoutFrom(&bail, guard->snapshot());
4306 void CodeGenerator::visitGuardIsNotDOMProxy(LGuardIsNotDOMProxy* guard) {
4307 Register proxy = ToRegister(guard->proxy());
4308 Register temp = ToRegister(guard->temp0());
4310 Label bail;
4311 masm.branchTestProxyHandlerFamily(Assembler::Equal, proxy, temp,
4312 GetDOMProxyHandlerFamily(), &bail);
4313 bailoutFrom(&bail, guard->snapshot());
4316 void CodeGenerator::visitProxyGet(LProxyGet* lir) {
4317 Register proxy = ToRegister(lir->proxy());
4318 Register temp = ToRegister(lir->temp0());
4320 pushArg(lir->mir()->id(), temp);
4321 pushArg(proxy);
4323 using Fn = bool (*)(JSContext*, HandleObject, HandleId, MutableHandleValue);
4324 callVM<Fn, ProxyGetProperty>(lir);
4327 void CodeGenerator::visitProxyGetByValue(LProxyGetByValue* lir) {
4328 Register proxy = ToRegister(lir->proxy());
4329 ValueOperand idVal = ToValue(lir, LProxyGetByValue::IdIndex);
4331 pushArg(idVal);
4332 pushArg(proxy);
4334 using Fn =
4335 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
4336 callVM<Fn, ProxyGetPropertyByValue>(lir);
4339 void CodeGenerator::visitProxyHasProp(LProxyHasProp* lir) {
4340 Register proxy = ToRegister(lir->proxy());
4341 ValueOperand idVal = ToValue(lir, LProxyHasProp::IdIndex);
4343 pushArg(idVal);
4344 pushArg(proxy);
4346 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
4347 if (lir->mir()->hasOwn()) {
4348 callVM<Fn, ProxyHasOwn>(lir);
4349 } else {
4350 callVM<Fn, ProxyHas>(lir);
4354 void CodeGenerator::visitProxySet(LProxySet* lir) {
4355 Register proxy = ToRegister(lir->proxy());
4356 ValueOperand rhs = ToValue(lir, LProxySet::RhsIndex);
4357 Register temp = ToRegister(lir->temp0());
4359 pushArg(Imm32(lir->mir()->strict()));
4360 pushArg(rhs);
4361 pushArg(lir->mir()->id(), temp);
4362 pushArg(proxy);
4364 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4365 callVM<Fn, ProxySetProperty>(lir);
4368 void CodeGenerator::visitProxySetByValue(LProxySetByValue* lir) {
4369 Register proxy = ToRegister(lir->proxy());
4370 ValueOperand idVal = ToValue(lir, LProxySetByValue::IdIndex);
4371 ValueOperand rhs = ToValue(lir, LProxySetByValue::RhsIndex);
4373 pushArg(Imm32(lir->mir()->strict()));
4374 pushArg(rhs);
4375 pushArg(idVal);
4376 pushArg(proxy);
4378 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
4379 callVM<Fn, ProxySetPropertyByValue>(lir);
4382 void CodeGenerator::visitCallSetArrayLength(LCallSetArrayLength* lir) {
4383 Register obj = ToRegister(lir->obj());
4384 ValueOperand rhs = ToValue(lir, LCallSetArrayLength::RhsIndex);
4386 pushArg(Imm32(lir->mir()->strict()));
4387 pushArg(rhs);
4388 pushArg(obj);
4390 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
4391 callVM<Fn, jit::SetArrayLength>(lir);
4394 void CodeGenerator::visitMegamorphicLoadSlot(LMegamorphicLoadSlot* lir) {
4395 Register obj = ToRegister(lir->object());
4396 Register temp0 = ToRegister(lir->temp0());
4397 Register temp1 = ToRegister(lir->temp1());
4398 Register temp2 = ToRegister(lir->temp2());
4399 Register temp3 = ToRegister(lir->temp3());
4400 ValueOperand output = ToOutValue(lir);
4402 Label bail, cacheHit;
4403 masm.emitMegamorphicCacheLookup(lir->mir()->name(), obj, temp0, temp1, temp2,
4404 output, &cacheHit);
4406 masm.branchIfNonNativeObj(obj, temp0, &bail);
4408 masm.Push(UndefinedValue());
4409 masm.moveStackPtrTo(temp3);
4411 using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
4412 MegamorphicCache::Entry* cacheEntry, Value* vp);
4413 masm.setupAlignedABICall();
4414 masm.loadJSContext(temp0);
4415 masm.passABIArg(temp0);
4416 masm.passABIArg(obj);
4417 masm.movePropertyKey(lir->mir()->name(), temp1);
4418 masm.passABIArg(temp1);
4419 masm.passABIArg(temp2);
4420 masm.passABIArg(temp3);
4422 masm.callWithABI<Fn, GetNativeDataPropertyPure>();
4424 MOZ_ASSERT(!output.aliases(ReturnReg));
4425 masm.Pop(output);
4427 masm.branchIfFalseBool(ReturnReg, &bail);
4429 masm.bind(&cacheHit);
4430 bailoutFrom(&bail, lir->snapshot());
4433 void CodeGenerator::visitMegamorphicLoadSlotByValue(
4434 LMegamorphicLoadSlotByValue* lir) {
4435 Register obj = ToRegister(lir->object());
4436 ValueOperand idVal = ToValue(lir, LMegamorphicLoadSlotByValue::IdIndex);
4437 Register temp0 = ToRegister(lir->temp0());
4438 Register temp1 = ToRegister(lir->temp1());
4439 Register temp2 = ToRegister(lir->temp2());
4440 ValueOperand output = ToOutValue(lir);
4442 Label bail, cacheHit;
4443 masm.emitMegamorphicCacheLookupByValue(idVal, obj, temp0, temp1, temp2,
4444 output, &cacheHit);
4446 masm.branchIfNonNativeObj(obj, temp0, &bail);
4448 // idVal will be in vp[0], result will be stored in vp[1].
4449 masm.reserveStack(sizeof(Value));
4450 masm.Push(idVal);
4451 masm.moveStackPtrTo(temp0);
4453 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4454 MegamorphicCache::Entry* cacheEntry, Value* vp);
4455 masm.setupAlignedABICall();
4456 masm.loadJSContext(temp1);
4457 masm.passABIArg(temp1);
4458 masm.passABIArg(obj);
4459 masm.passABIArg(temp2);
4460 masm.passABIArg(temp0);
4461 masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
4463 MOZ_ASSERT(!idVal.aliases(temp0));
4464 masm.storeCallPointerResult(temp0);
4465 masm.Pop(idVal);
4467 uint32_t framePushed = masm.framePushed();
4468 Label ok;
4469 masm.branchIfTrueBool(temp0, &ok);
4470 masm.freeStack(sizeof(Value)); // Discard result Value.
4471 masm.jump(&bail);
4473 masm.bind(&ok);
4474 masm.setFramePushed(framePushed);
4475 masm.Pop(output);
4477 masm.bind(&cacheHit);
4478 bailoutFrom(&bail, lir->snapshot());
4481 void CodeGenerator::visitMegamorphicStoreSlot(LMegamorphicStoreSlot* lir) {
4482 Register obj = ToRegister(lir->object());
4483 ValueOperand value = ToValue(lir, LMegamorphicStoreSlot::RhsIndex);
4485 Register temp0 = ToRegister(lir->temp0());
4486 #ifndef JS_CODEGEN_X86
4487 Register temp1 = ToRegister(lir->temp1());
4488 Register temp2 = ToRegister(lir->temp2());
4489 #endif
4491 Label cacheHit, done;
4492 #ifdef JS_CODEGEN_X86
4493 masm.emitMegamorphicCachedSetSlot(
4494 lir->mir()->name(), obj, temp0, value, &cacheHit,
4495 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4496 EmitPreBarrier(masm, addr, mirType);
4498 #else
4499 masm.emitMegamorphicCachedSetSlot(
4500 lir->mir()->name(), obj, temp0, temp1, temp2, value, &cacheHit,
4501 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4502 EmitPreBarrier(masm, addr, mirType);
4504 #endif
4506 pushArg(Imm32(lir->mir()->strict()));
4507 pushArg(value);
4508 pushArg(lir->mir()->name(), temp0);
4509 pushArg(obj);
4511 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4512 callVM<Fn, SetPropertyMegamorphic<true>>(lir);
4514 masm.jump(&done);
4515 masm.bind(&cacheHit);
4517 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
4518 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
4520 saveVolatile(temp0);
4521 emitPostWriteBarrier(obj);
4522 restoreVolatile(temp0);
4524 masm.bind(&done);
4527 void CodeGenerator::visitMegamorphicHasProp(LMegamorphicHasProp* lir) {
4528 Register obj = ToRegister(lir->object());
4529 ValueOperand idVal = ToValue(lir, LMegamorphicHasProp::IdIndex);
4530 Register temp0 = ToRegister(lir->temp0());
4531 Register temp1 = ToRegister(lir->temp1());
4532 Register temp2 = ToRegister(lir->temp2());
4533 Register output = ToRegister(lir->output());
4535 Label bail, cacheHit;
4536 masm.emitMegamorphicCacheLookupExists(idVal, obj, temp0, temp1, temp2, output,
4537 &cacheHit, lir->mir()->hasOwn());
4539 masm.branchIfNonNativeObj(obj, temp0, &bail);
4541 // idVal will be in vp[0], result will be stored in vp[1].
4542 masm.reserveStack(sizeof(Value));
4543 masm.Push(idVal);
4544 masm.moveStackPtrTo(temp0);
4546 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4547 MegamorphicCache::Entry* cacheEntry, Value* vp);
4548 masm.setupAlignedABICall();
4549 masm.loadJSContext(temp1);
4550 masm.passABIArg(temp1);
4551 masm.passABIArg(obj);
4552 masm.passABIArg(temp2);
4553 masm.passABIArg(temp0);
4554 if (lir->mir()->hasOwn()) {
4555 masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
4556 } else {
4557 masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
4560 MOZ_ASSERT(!idVal.aliases(temp0));
4561 masm.storeCallPointerResult(temp0);
4562 masm.Pop(idVal);
4564 uint32_t framePushed = masm.framePushed();
4565 Label ok;
4566 masm.branchIfTrueBool(temp0, &ok);
4567 masm.freeStack(sizeof(Value)); // Discard result Value.
4568 masm.jump(&bail);
4570 masm.bind(&ok);
4571 masm.setFramePushed(framePushed);
4572 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
4573 masm.freeStack(sizeof(Value));
4574 masm.bind(&cacheHit);
4576 bailoutFrom(&bail, lir->snapshot());
4579 void CodeGenerator::visitSmallObjectVariableKeyHasProp(
4580 LSmallObjectVariableKeyHasProp* lir) {
4581 Register id = ToRegister(lir->id());
4582 Register output = ToRegister(lir->output());
4584 #ifdef DEBUG
4585 Label isAtom;
4586 masm.branchTest32(Assembler::NonZero, Address(id, JSString::offsetOfFlags()),
4587 Imm32(JSString::ATOM_BIT), &isAtom);
4588 masm.assumeUnreachable("Expected atom input");
4589 masm.bind(&isAtom);
4590 #endif
4592 SharedShape* shape = &lir->mir()->shape()->asShared();
4594 Label done, success;
4595 for (SharedShapePropertyIter<NoGC> iter(shape); !iter.done(); iter++) {
4596 masm.branchPtr(Assembler::Equal, id, ImmGCPtr(iter->key().toAtom()),
4597 &success);
4599 masm.move32(Imm32(0), output);
4600 masm.jump(&done);
4601 masm.bind(&success);
4602 masm.move32(Imm32(1), output);
4603 masm.bind(&done);
4606 void CodeGenerator::visitGuardIsNotArrayBufferMaybeShared(
4607 LGuardIsNotArrayBufferMaybeShared* guard) {
4608 Register obj = ToRegister(guard->input());
4609 Register temp = ToRegister(guard->temp0());
4611 Label bail;
4612 masm.loadObjClassUnsafe(obj, temp);
4613 masm.branchPtr(Assembler::Equal, temp,
4614 ImmPtr(&FixedLengthArrayBufferObject::class_), &bail);
4615 masm.branchPtr(Assembler::Equal, temp,
4616 ImmPtr(&FixedLengthSharedArrayBufferObject::class_), &bail);
4617 masm.branchPtr(Assembler::Equal, temp,
4618 ImmPtr(&ResizableArrayBufferObject::class_), &bail);
4619 masm.branchPtr(Assembler::Equal, temp,
4620 ImmPtr(&GrowableSharedArrayBufferObject::class_), &bail);
4621 bailoutFrom(&bail, guard->snapshot());
4624 void CodeGenerator::visitGuardIsTypedArray(LGuardIsTypedArray* guard) {
4625 Register obj = ToRegister(guard->input());
4626 Register temp = ToRegister(guard->temp0());
4628 Label bail;
4629 masm.loadObjClassUnsafe(obj, temp);
4630 masm.branchIfClassIsNotTypedArray(temp, &bail);
4631 bailoutFrom(&bail, guard->snapshot());
4634 void CodeGenerator::visitGuardIsFixedLengthTypedArray(
4635 LGuardIsFixedLengthTypedArray* guard) {
4636 Register obj = ToRegister(guard->input());
4637 Register temp = ToRegister(guard->temp0());
4639 Label bail;
4640 masm.loadObjClassUnsafe(obj, temp);
4641 masm.branchIfClassIsNotFixedLengthTypedArray(temp, &bail);
4642 bailoutFrom(&bail, guard->snapshot());
4645 void CodeGenerator::visitGuardIsResizableTypedArray(
4646 LGuardIsResizableTypedArray* guard) {
4647 Register obj = ToRegister(guard->input());
4648 Register temp = ToRegister(guard->temp0());
4650 Label bail;
4651 masm.loadObjClassUnsafe(obj, temp);
4652 masm.branchIfClassIsNotResizableTypedArray(temp, &bail);
4653 bailoutFrom(&bail, guard->snapshot());
4656 void CodeGenerator::visitGuardHasProxyHandler(LGuardHasProxyHandler* guard) {
4657 Register obj = ToRegister(guard->input());
4659 Label bail;
4661 Address handlerAddr(obj, ProxyObject::offsetOfHandler());
4662 masm.branchPtr(Assembler::NotEqual, handlerAddr,
4663 ImmPtr(guard->mir()->handler()), &bail);
4665 bailoutFrom(&bail, guard->snapshot());
4668 void CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard) {
4669 Register input = ToRegister(guard->input());
4670 Register expected = ToRegister(guard->expected());
4672 Assembler::Condition cond =
4673 guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
4674 bailoutCmpPtr(cond, input, expected, guard->snapshot());
4677 void CodeGenerator::visitGuardSpecificFunction(LGuardSpecificFunction* guard) {
4678 Register input = ToRegister(guard->input());
4679 Register expected = ToRegister(guard->expected());
4681 bailoutCmpPtr(Assembler::NotEqual, input, expected, guard->snapshot());
4684 void CodeGenerator::visitGuardSpecificAtom(LGuardSpecificAtom* guard) {
4685 Register str = ToRegister(guard->str());
4686 Register scratch = ToRegister(guard->temp0());
4688 LiveRegisterSet volatileRegs = liveVolatileRegs(guard);
4689 volatileRegs.takeUnchecked(scratch);
4691 Label bail;
4692 masm.guardSpecificAtom(str, guard->mir()->atom(), scratch, volatileRegs,
4693 &bail);
4694 bailoutFrom(&bail, guard->snapshot());
4697 void CodeGenerator::visitGuardSpecificSymbol(LGuardSpecificSymbol* guard) {
4698 Register symbol = ToRegister(guard->symbol());
4700 bailoutCmpPtr(Assembler::NotEqual, symbol, ImmGCPtr(guard->mir()->expected()),
4701 guard->snapshot());
4704 void CodeGenerator::visitGuardSpecificInt32(LGuardSpecificInt32* guard) {
4705 Register num = ToRegister(guard->num());
4707 bailoutCmp32(Assembler::NotEqual, num, Imm32(guard->mir()->expected()),
4708 guard->snapshot());
4711 void CodeGenerator::visitGuardStringToIndex(LGuardStringToIndex* lir) {
4712 Register str = ToRegister(lir->string());
4713 Register output = ToRegister(lir->output());
4715 Label vmCall, done;
4716 masm.loadStringIndexValue(str, output, &vmCall);
4717 masm.jump(&done);
4720 masm.bind(&vmCall);
4722 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4723 volatileRegs.takeUnchecked(output);
4724 masm.PushRegsInMask(volatileRegs);
4726 using Fn = int32_t (*)(JSString* str);
4727 masm.setupAlignedABICall();
4728 masm.passABIArg(str);
4729 masm.callWithABI<Fn, GetIndexFromString>();
4730 masm.storeCallInt32Result(output);
4732 masm.PopRegsInMask(volatileRegs);
4734 // GetIndexFromString returns a negative value on failure.
4735 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
4738 masm.bind(&done);
4741 void CodeGenerator::visitGuardStringToInt32(LGuardStringToInt32* lir) {
4742 Register str = ToRegister(lir->string());
4743 Register output = ToRegister(lir->output());
4744 Register temp = ToRegister(lir->temp0());
4746 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4748 Label bail;
4749 masm.guardStringToInt32(str, output, temp, volatileRegs, &bail);
4750 bailoutFrom(&bail, lir->snapshot());
4753 void CodeGenerator::visitGuardStringToDouble(LGuardStringToDouble* lir) {
4754 Register str = ToRegister(lir->string());
4755 FloatRegister output = ToFloatRegister(lir->output());
4756 Register temp0 = ToRegister(lir->temp0());
4757 Register temp1 = ToRegister(lir->temp1());
4759 Label vmCall, done;
4760 // Use indexed value as fast path if possible.
4761 masm.loadStringIndexValue(str, temp0, &vmCall);
4762 masm.convertInt32ToDouble(temp0, output);
4763 masm.jump(&done);
4765 masm.bind(&vmCall);
4767 // Reserve stack for holding the result value of the call.
4768 masm.reserveStack(sizeof(double));
4769 masm.moveStackPtrTo(temp0);
4771 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4772 volatileRegs.takeUnchecked(temp0);
4773 volatileRegs.takeUnchecked(temp1);
4774 masm.PushRegsInMask(volatileRegs);
4776 using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
4777 masm.setupAlignedABICall();
4778 masm.loadJSContext(temp1);
4779 masm.passABIArg(temp1);
4780 masm.passABIArg(str);
4781 masm.passABIArg(temp0);
4782 masm.callWithABI<Fn, StringToNumberPure>();
4783 masm.storeCallPointerResult(temp0);
4785 masm.PopRegsInMask(volatileRegs);
4787 Label ok;
4788 masm.branchIfTrueBool(temp0, &ok);
4790 // OOM path, recovered by StringToNumberPure.
4792 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
4793 // flow-insensitively, and using it here would confuse the stack height
4794 // tracking.
4795 masm.addToStackPtr(Imm32(sizeof(double)));
4796 bailout(lir->snapshot());
4798 masm.bind(&ok);
4799 masm.Pop(output);
4801 masm.bind(&done);
4804 void CodeGenerator::visitGuardNoDenseElements(LGuardNoDenseElements* guard) {
4805 Register obj = ToRegister(guard->input());
4806 Register temp = ToRegister(guard->temp0());
4808 // Load obj->elements.
4809 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
4811 // Make sure there are no dense elements.
4812 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
4813 bailoutCmp32(Assembler::NotEqual, initLength, Imm32(0), guard->snapshot());
4816 void CodeGenerator::visitBooleanToInt64(LBooleanToInt64* lir) {
4817 Register input = ToRegister(lir->input());
4818 Register64 output = ToOutRegister64(lir);
4820 masm.move32To64ZeroExtend(input, output);
4823 void CodeGenerator::emitStringToInt64(LInstruction* lir, Register input,
4824 Register64 output) {
4825 Register temp = output.scratchReg();
4827 saveLive(lir);
4829 masm.reserveStack(sizeof(uint64_t));
4830 masm.moveStackPtrTo(temp);
4831 pushArg(temp);
4832 pushArg(input);
4834 using Fn = bool (*)(JSContext*, HandleString, uint64_t*);
4835 callVM<Fn, DoStringToInt64>(lir);
4837 masm.load64(Address(masm.getStackPointer(), 0), output);
4838 masm.freeStack(sizeof(uint64_t));
4840 restoreLiveIgnore(lir, StoreValueTo(output).clobbered());
4843 void CodeGenerator::visitStringToInt64(LStringToInt64* lir) {
4844 Register input = ToRegister(lir->input());
4845 Register64 output = ToOutRegister64(lir);
4847 emitStringToInt64(lir, input, output);
4850 void CodeGenerator::visitValueToInt64(LValueToInt64* lir) {
4851 ValueOperand input = ToValue(lir, LValueToInt64::InputIndex);
4852 Register temp = ToRegister(lir->temp0());
4853 Register64 output = ToOutRegister64(lir);
4855 int checks = 3;
4857 Label fail, done;
4858 // Jump to fail if this is the last check and we fail it,
4859 // otherwise to the next test.
4860 auto emitTestAndUnbox = [&](auto testAndUnbox) {
4861 MOZ_ASSERT(checks > 0);
4863 checks--;
4864 Label notType;
4865 Label* target = checks ? &notType : &fail;
4867 testAndUnbox(target);
4869 if (checks) {
4870 masm.jump(&done);
4871 masm.bind(&notType);
4875 Register tag = masm.extractTag(input, temp);
4877 // BigInt.
4878 emitTestAndUnbox([&](Label* target) {
4879 masm.branchTestBigInt(Assembler::NotEqual, tag, target);
4880 masm.unboxBigInt(input, temp);
4881 masm.loadBigInt64(temp, output);
4884 // Boolean
4885 emitTestAndUnbox([&](Label* target) {
4886 masm.branchTestBoolean(Assembler::NotEqual, tag, target);
4887 masm.unboxBoolean(input, temp);
4888 masm.move32To64ZeroExtend(temp, output);
4891 // String
4892 emitTestAndUnbox([&](Label* target) {
4893 masm.branchTestString(Assembler::NotEqual, tag, target);
4894 masm.unboxString(input, temp);
4895 emitStringToInt64(lir, temp, output);
4898 MOZ_ASSERT(checks == 0);
4900 bailoutFrom(&fail, lir->snapshot());
4901 masm.bind(&done);
4904 void CodeGenerator::visitTruncateBigIntToInt64(LTruncateBigIntToInt64* lir) {
4905 Register operand = ToRegister(lir->input());
4906 Register64 output = ToOutRegister64(lir);
4908 masm.loadBigInt64(operand, output);
4911 OutOfLineCode* CodeGenerator::createBigIntOutOfLine(LInstruction* lir,
4912 Scalar::Type type,
4913 Register64 input,
4914 Register output) {
4915 #if JS_BITS_PER_WORD == 32
4916 using Fn = BigInt* (*)(JSContext*, uint32_t, uint32_t);
4917 auto args = ArgList(input.low, input.high);
4918 #else
4919 using Fn = BigInt* (*)(JSContext*, uint64_t);
4920 auto args = ArgList(input);
4921 #endif
4923 if (type == Scalar::BigInt64) {
4924 return oolCallVM<Fn, jit::CreateBigIntFromInt64>(lir, args,
4925 StoreRegisterTo(output));
4927 MOZ_ASSERT(type == Scalar::BigUint64);
4928 return oolCallVM<Fn, jit::CreateBigIntFromUint64>(lir, args,
4929 StoreRegisterTo(output));
4932 void CodeGenerator::emitCreateBigInt(LInstruction* lir, Scalar::Type type,
4933 Register64 input, Register output,
4934 Register maybeTemp) {
4935 OutOfLineCode* ool = createBigIntOutOfLine(lir, type, input, output);
4937 if (maybeTemp != InvalidReg) {
4938 masm.newGCBigInt(output, maybeTemp, initialBigIntHeap(), ool->entry());
4939 } else {
4940 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
4941 regs.take(input);
4942 regs.take(output);
4944 Register temp = regs.takeAny();
4946 masm.push(temp);
4948 Label fail, ok;
4949 masm.newGCBigInt(output, temp, initialBigIntHeap(), &fail);
4950 masm.pop(temp);
4951 masm.jump(&ok);
4952 masm.bind(&fail);
4953 masm.pop(temp);
4954 masm.jump(ool->entry());
4955 masm.bind(&ok);
4957 masm.initializeBigInt64(type, output, input);
4958 masm.bind(ool->rejoin());
4961 void CodeGenerator::visitInt64ToBigInt(LInt64ToBigInt* lir) {
4962 Register64 input = ToRegister64(lir->input());
4963 Register temp = ToRegister(lir->temp0());
4964 Register output = ToRegister(lir->output());
4966 emitCreateBigInt(lir, Scalar::BigInt64, input, output, temp);
4969 void CodeGenerator::visitGuardValue(LGuardValue* lir) {
4970 ValueOperand input = ToValue(lir, LGuardValue::InputIndex);
4971 Value expected = lir->mir()->expected();
4972 Label bail;
4973 masm.branchTestValue(Assembler::NotEqual, input, expected, &bail);
4974 bailoutFrom(&bail, lir->snapshot());
4977 void CodeGenerator::visitGuardNullOrUndefined(LGuardNullOrUndefined* lir) {
4978 ValueOperand input = ToValue(lir, LGuardNullOrUndefined::InputIndex);
4980 ScratchTagScope tag(masm, input);
4981 masm.splitTagForTest(input, tag);
4983 Label done;
4984 masm.branchTestNull(Assembler::Equal, tag, &done);
4986 Label bail;
4987 masm.branchTestUndefined(Assembler::NotEqual, tag, &bail);
4988 bailoutFrom(&bail, lir->snapshot());
4990 masm.bind(&done);
4993 void CodeGenerator::visitGuardIsNotObject(LGuardIsNotObject* lir) {
4994 ValueOperand input = ToValue(lir, LGuardIsNotObject::InputIndex);
4996 Label bail;
4997 masm.branchTestObject(Assembler::Equal, input, &bail);
4998 bailoutFrom(&bail, lir->snapshot());
5001 void CodeGenerator::visitGuardFunctionFlags(LGuardFunctionFlags* lir) {
5002 Register function = ToRegister(lir->function());
5004 Label bail;
5005 if (uint16_t flags = lir->mir()->expectedFlags()) {
5006 masm.branchTestFunctionFlags(function, flags, Assembler::Zero, &bail);
5008 if (uint16_t flags = lir->mir()->unexpectedFlags()) {
5009 masm.branchTestFunctionFlags(function, flags, Assembler::NonZero, &bail);
5011 bailoutFrom(&bail, lir->snapshot());
5014 void CodeGenerator::visitGuardFunctionIsNonBuiltinCtor(
5015 LGuardFunctionIsNonBuiltinCtor* lir) {
5016 Register function = ToRegister(lir->function());
5017 Register temp = ToRegister(lir->temp0());
5019 Label bail;
5020 masm.branchIfNotFunctionIsNonBuiltinCtor(function, temp, &bail);
5021 bailoutFrom(&bail, lir->snapshot());
5024 void CodeGenerator::visitGuardFunctionKind(LGuardFunctionKind* lir) {
5025 Register function = ToRegister(lir->function());
5026 Register temp = ToRegister(lir->temp0());
5028 Assembler::Condition cond =
5029 lir->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
5031 Label bail;
5032 masm.branchFunctionKind(cond, lir->mir()->expected(), function, temp, &bail);
5033 bailoutFrom(&bail, lir->snapshot());
5036 void CodeGenerator::visitGuardFunctionScript(LGuardFunctionScript* lir) {
5037 Register function = ToRegister(lir->function());
5039 Address scriptAddr(function, JSFunction::offsetOfJitInfoOrScript());
5040 bailoutCmpPtr(Assembler::NotEqual, scriptAddr,
5041 ImmGCPtr(lir->mir()->expected()), lir->snapshot());
5044 // Out-of-line path to update the store buffer.
5045 class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase<CodeGenerator> {
5046 LInstruction* lir_;
5047 const LAllocation* object_;
5049 public:
5050 OutOfLineCallPostWriteBarrier(LInstruction* lir, const LAllocation* object)
5051 : lir_(lir), object_(object) {}
5053 void accept(CodeGenerator* codegen) override {
5054 codegen->visitOutOfLineCallPostWriteBarrier(this);
5057 LInstruction* lir() const { return lir_; }
5058 const LAllocation* object() const { return object_; }
5061 static void EmitStoreBufferCheckForConstant(MacroAssembler& masm,
5062 const gc::TenuredCell* cell,
5063 AllocatableGeneralRegisterSet& regs,
5064 Label* exit, Label* callVM) {
5065 Register temp = regs.takeAny();
5067 gc::Arena* arena = cell->arena();
5069 Register cells = temp;
5070 masm.loadPtr(AbsoluteAddress(&arena->bufferedCells()), cells);
5072 size_t index = gc::ArenaCellSet::getCellIndex(cell);
5073 size_t word;
5074 uint32_t mask;
5075 gc::ArenaCellSet::getWordIndexAndMask(index, &word, &mask);
5076 size_t offset = gc::ArenaCellSet::offsetOfBits() + word * sizeof(uint32_t);
5078 masm.branchTest32(Assembler::NonZero, Address(cells, offset), Imm32(mask),
5079 exit);
5081 // Check whether this is the sentinel set and if so call the VM to allocate
5082 // one for this arena.
5083 masm.branchPtr(Assembler::Equal,
5084 Address(cells, gc::ArenaCellSet::offsetOfArena()),
5085 ImmPtr(nullptr), callVM);
5087 // Add the cell to the set.
5088 masm.or32(Imm32(mask), Address(cells, offset));
5089 masm.jump(exit);
5091 regs.add(temp);
5094 static void EmitPostWriteBarrier(MacroAssembler& masm, CompileRuntime* runtime,
5095 Register objreg, JSObject* maybeConstant,
5096 bool isGlobal,
5097 AllocatableGeneralRegisterSet& regs) {
5098 MOZ_ASSERT_IF(isGlobal, maybeConstant);
5100 Label callVM;
5101 Label exit;
5103 Register temp = regs.takeAny();
5105 // We already have a fast path to check whether a global is in the store
5106 // buffer.
5107 if (!isGlobal) {
5108 if (maybeConstant) {
5109 // Check store buffer bitmap directly for known object.
5110 EmitStoreBufferCheckForConstant(masm, &maybeConstant->asTenured(), regs,
5111 &exit, &callVM);
5112 } else {
5113 // Check one element cache to avoid VM call.
5114 masm.branchPtr(Assembler::Equal,
5115 AbsoluteAddress(runtime->addressOfLastBufferedWholeCell()),
5116 objreg, &exit);
5120 // Call into the VM to barrier the write.
5121 masm.bind(&callVM);
5123 Register runtimereg = temp;
5124 masm.mov(ImmPtr(runtime), runtimereg);
5126 masm.setupAlignedABICall();
5127 masm.passABIArg(runtimereg);
5128 masm.passABIArg(objreg);
5129 if (isGlobal) {
5130 using Fn = void (*)(JSRuntime* rt, GlobalObject* obj);
5131 masm.callWithABI<Fn, PostGlobalWriteBarrier>();
5132 } else {
5133 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* obj);
5134 masm.callWithABI<Fn, PostWriteBarrier>();
5137 masm.bind(&exit);
5140 void CodeGenerator::emitPostWriteBarrier(const LAllocation* obj) {
5141 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5143 Register objreg;
5144 JSObject* object = nullptr;
5145 bool isGlobal = false;
5146 if (obj->isConstant()) {
5147 object = &obj->toConstant()->toObject();
5148 isGlobal = isGlobalObject(object);
5149 objreg = regs.takeAny();
5150 masm.movePtr(ImmGCPtr(object), objreg);
5151 } else {
5152 objreg = ToRegister(obj);
5153 regs.takeUnchecked(objreg);
5156 EmitPostWriteBarrier(masm, gen->runtime, objreg, object, isGlobal, regs);
5159 // Returns true if `def` might be allocated in the nursery.
5160 static bool ValueNeedsPostBarrier(MDefinition* def) {
5161 if (def->isBox()) {
5162 def = def->toBox()->input();
5164 if (def->type() == MIRType::Value) {
5165 return true;
5167 return NeedsPostBarrier(def->type());
5170 class OutOfLineElementPostWriteBarrier
5171 : public OutOfLineCodeBase<CodeGenerator> {
5172 LiveRegisterSet liveVolatileRegs_;
5173 const LAllocation* index_;
5174 int32_t indexDiff_;
5175 Register obj_;
5176 Register scratch_;
5178 public:
5179 OutOfLineElementPostWriteBarrier(const LiveRegisterSet& liveVolatileRegs,
5180 Register obj, const LAllocation* index,
5181 Register scratch, int32_t indexDiff)
5182 : liveVolatileRegs_(liveVolatileRegs),
5183 index_(index),
5184 indexDiff_(indexDiff),
5185 obj_(obj),
5186 scratch_(scratch) {}
5188 void accept(CodeGenerator* codegen) override {
5189 codegen->visitOutOfLineElementPostWriteBarrier(this);
5192 const LiveRegisterSet& liveVolatileRegs() const { return liveVolatileRegs_; }
5193 const LAllocation* index() const { return index_; }
5194 int32_t indexDiff() const { return indexDiff_; }
5196 Register object() const { return obj_; }
5197 Register scratch() const { return scratch_; }
5200 void CodeGenerator::emitElementPostWriteBarrier(
5201 MInstruction* mir, const LiveRegisterSet& liveVolatileRegs, Register obj,
5202 const LAllocation* index, Register scratch, const ConstantOrRegister& val,
5203 int32_t indexDiff) {
5204 if (val.constant()) {
5205 MOZ_ASSERT_IF(val.value().isGCThing(),
5206 !IsInsideNursery(val.value().toGCThing()));
5207 return;
5210 TypedOrValueRegister reg = val.reg();
5211 if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
5212 return;
5215 auto* ool = new (alloc()) OutOfLineElementPostWriteBarrier(
5216 liveVolatileRegs, obj, index, scratch, indexDiff);
5217 addOutOfLineCode(ool, mir);
5219 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, ool->rejoin());
5221 if (reg.hasValue()) {
5222 masm.branchValueIsNurseryCell(Assembler::Equal, reg.valueReg(), scratch,
5223 ool->entry());
5224 } else {
5225 masm.branchPtrInNurseryChunk(Assembler::Equal, reg.typedReg().gpr(),
5226 scratch, ool->entry());
5229 masm.bind(ool->rejoin());
5232 void CodeGenerator::visitOutOfLineElementPostWriteBarrier(
5233 OutOfLineElementPostWriteBarrier* ool) {
5234 Register obj = ool->object();
5235 Register scratch = ool->scratch();
5236 const LAllocation* index = ool->index();
5237 int32_t indexDiff = ool->indexDiff();
5239 masm.PushRegsInMask(ool->liveVolatileRegs());
5241 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5242 regs.takeUnchecked(obj);
5243 regs.takeUnchecked(scratch);
5245 Register indexReg;
5246 if (index->isConstant()) {
5247 indexReg = regs.takeAny();
5248 masm.move32(Imm32(ToInt32(index) + indexDiff), indexReg);
5249 } else {
5250 indexReg = ToRegister(index);
5251 regs.takeUnchecked(indexReg);
5252 if (indexDiff != 0) {
5253 masm.add32(Imm32(indexDiff), indexReg);
5257 masm.setupUnalignedABICall(scratch);
5258 masm.movePtr(ImmPtr(gen->runtime), scratch);
5259 masm.passABIArg(scratch);
5260 masm.passABIArg(obj);
5261 masm.passABIArg(indexReg);
5262 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5263 masm.callWithABI<Fn, PostWriteElementBarrier>();
5265 // We don't need a sub32 here because indexReg must be in liveVolatileRegs
5266 // if indexDiff is not zero, so it will be restored below.
5267 MOZ_ASSERT_IF(indexDiff != 0, ool->liveVolatileRegs().has(indexReg));
5269 masm.PopRegsInMask(ool->liveVolatileRegs());
5271 masm.jump(ool->rejoin());
5274 void CodeGenerator::emitPostWriteBarrier(Register objreg) {
5275 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5276 regs.takeUnchecked(objreg);
5277 EmitPostWriteBarrier(masm, gen->runtime, objreg, nullptr, false, regs);
5280 void CodeGenerator::visitOutOfLineCallPostWriteBarrier(
5281 OutOfLineCallPostWriteBarrier* ool) {
5282 saveLiveVolatile(ool->lir());
5283 const LAllocation* obj = ool->object();
5284 emitPostWriteBarrier(obj);
5285 restoreLiveVolatile(ool->lir());
5287 masm.jump(ool->rejoin());
5290 void CodeGenerator::maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal,
5291 OutOfLineCode* ool) {
5292 // Check whether an object is a global that we have already barriered before
5293 // calling into the VM.
5295 // We only check for the script's global, not other globals within the same
5296 // compartment, because we bake in a pointer to realm->globalWriteBarriered
5297 // and doing that would be invalid for other realms because they could be
5298 // collected before the Ion code is discarded.
5300 if (!maybeGlobal->isConstant()) {
5301 return;
5304 JSObject* obj = &maybeGlobal->toConstant()->toObject();
5305 if (gen->realm->maybeGlobal() != obj) {
5306 return;
5309 const uint32_t* addr = gen->realm->addressOfGlobalWriteBarriered();
5310 masm.branch32(Assembler::NotEqual, AbsoluteAddress(addr), Imm32(0),
5311 ool->rejoin());
5314 template <class LPostBarrierType, MIRType nurseryType>
5315 void CodeGenerator::visitPostWriteBarrierCommon(LPostBarrierType* lir,
5316 OutOfLineCode* ool) {
5317 static_assert(NeedsPostBarrier(nurseryType));
5319 addOutOfLineCode(ool, lir->mir());
5321 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5323 if (lir->object()->isConstant()) {
5324 // Constant nursery objects cannot appear here, see
5325 // LIRGenerator::visitPostWriteElementBarrier.
5326 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5327 } else {
5328 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5329 temp, ool->rejoin());
5332 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5334 Register value = ToRegister(lir->value());
5335 if constexpr (nurseryType == MIRType::Object) {
5336 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::Object);
5337 } else if constexpr (nurseryType == MIRType::String) {
5338 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::String);
5339 } else {
5340 static_assert(nurseryType == MIRType::BigInt);
5341 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::BigInt);
5343 masm.branchPtrInNurseryChunk(Assembler::Equal, value, temp, ool->entry());
5345 masm.bind(ool->rejoin());
5348 template <class LPostBarrierType>
5349 void CodeGenerator::visitPostWriteBarrierCommonV(LPostBarrierType* lir,
5350 OutOfLineCode* ool) {
5351 addOutOfLineCode(ool, lir->mir());
5353 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5355 if (lir->object()->isConstant()) {
5356 // Constant nursery objects cannot appear here, see
5357 // LIRGenerator::visitPostWriteElementBarrier.
5358 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5359 } else {
5360 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5361 temp, ool->rejoin());
5364 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5366 ValueOperand value = ToValue(lir, LPostBarrierType::ValueIndex);
5367 masm.branchValueIsNurseryCell(Assembler::Equal, value, temp, ool->entry());
5369 masm.bind(ool->rejoin());
5372 void CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO* lir) {
5373 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5374 visitPostWriteBarrierCommon<LPostWriteBarrierO, MIRType::Object>(lir, ool);
5377 void CodeGenerator::visitPostWriteBarrierS(LPostWriteBarrierS* lir) {
5378 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5379 visitPostWriteBarrierCommon<LPostWriteBarrierS, MIRType::String>(lir, ool);
5382 void CodeGenerator::visitPostWriteBarrierBI(LPostWriteBarrierBI* lir) {
5383 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5384 visitPostWriteBarrierCommon<LPostWriteBarrierBI, MIRType::BigInt>(lir, ool);
5387 void CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV* lir) {
5388 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5389 visitPostWriteBarrierCommonV(lir, ool);
5392 // Out-of-line path to update the store buffer.
5393 class OutOfLineCallPostWriteElementBarrier
5394 : public OutOfLineCodeBase<CodeGenerator> {
5395 LInstruction* lir_;
5396 const LAllocation* object_;
5397 const LAllocation* index_;
5399 public:
5400 OutOfLineCallPostWriteElementBarrier(LInstruction* lir,
5401 const LAllocation* object,
5402 const LAllocation* index)
5403 : lir_(lir), object_(object), index_(index) {}
5405 void accept(CodeGenerator* codegen) override {
5406 codegen->visitOutOfLineCallPostWriteElementBarrier(this);
5409 LInstruction* lir() const { return lir_; }
5411 const LAllocation* object() const { return object_; }
5413 const LAllocation* index() const { return index_; }
5416 void CodeGenerator::visitOutOfLineCallPostWriteElementBarrier(
5417 OutOfLineCallPostWriteElementBarrier* ool) {
5418 saveLiveVolatile(ool->lir());
5420 const LAllocation* obj = ool->object();
5421 const LAllocation* index = ool->index();
5423 Register objreg = obj->isConstant() ? InvalidReg : ToRegister(obj);
5424 Register indexreg = ToRegister(index);
5426 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5427 regs.takeUnchecked(indexreg);
5429 if (obj->isConstant()) {
5430 objreg = regs.takeAny();
5431 masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg);
5432 } else {
5433 regs.takeUnchecked(objreg);
5436 Register runtimereg = regs.takeAny();
5437 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5438 masm.setupAlignedABICall();
5439 masm.mov(ImmPtr(gen->runtime), runtimereg);
5440 masm.passABIArg(runtimereg);
5441 masm.passABIArg(objreg);
5442 masm.passABIArg(indexreg);
5443 masm.callWithABI<Fn, PostWriteElementBarrier>();
5445 restoreLiveVolatile(ool->lir());
5447 masm.jump(ool->rejoin());
5450 void CodeGenerator::visitPostWriteElementBarrierO(
5451 LPostWriteElementBarrierO* lir) {
5452 auto ool = new (alloc())
5453 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5454 visitPostWriteBarrierCommon<LPostWriteElementBarrierO, MIRType::Object>(lir,
5455 ool);
5458 void CodeGenerator::visitPostWriteElementBarrierS(
5459 LPostWriteElementBarrierS* lir) {
5460 auto ool = new (alloc())
5461 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5462 visitPostWriteBarrierCommon<LPostWriteElementBarrierS, MIRType::String>(lir,
5463 ool);
5466 void CodeGenerator::visitPostWriteElementBarrierBI(
5467 LPostWriteElementBarrierBI* lir) {
5468 auto ool = new (alloc())
5469 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5470 visitPostWriteBarrierCommon<LPostWriteElementBarrierBI, MIRType::BigInt>(lir,
5471 ool);
5474 void CodeGenerator::visitPostWriteElementBarrierV(
5475 LPostWriteElementBarrierV* lir) {
5476 auto ool = new (alloc())
5477 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5478 visitPostWriteBarrierCommonV(lir, ool);
5481 void CodeGenerator::visitAssertCanElidePostWriteBarrier(
5482 LAssertCanElidePostWriteBarrier* lir) {
5483 Register object = ToRegister(lir->object());
5484 ValueOperand value =
5485 ToValue(lir, LAssertCanElidePostWriteBarrier::ValueIndex);
5486 Register temp = ToRegister(lir->temp0());
5488 Label ok;
5489 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp, &ok);
5490 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp, &ok);
5492 masm.assumeUnreachable("Unexpected missing post write barrier");
5494 masm.bind(&ok);
5497 template <typename LCallIns>
5498 void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
5499 MCallBase* mir = call->mir();
5501 uint32_t unusedStack = UnusedStackBytesForCall(mir->paddedNumStackArgs());
5503 // Registers used for callWithABI() argument-passing.
5504 const Register argContextReg = ToRegister(call->getArgContextReg());
5505 const Register argUintNReg = ToRegister(call->getArgUintNReg());
5506 const Register argVpReg = ToRegister(call->getArgVpReg());
5508 // Misc. temporary registers.
5509 const Register tempReg = ToRegister(call->getTempReg());
5511 DebugOnly<uint32_t> initialStack = masm.framePushed();
5513 masm.checkStackAlignment();
5515 // Native functions have the signature:
5516 // bool (*)(JSContext*, unsigned, Value* vp)
5517 // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
5518 // are the function arguments.
5520 // Allocate space for the outparam, moving the StackPointer to what will be
5521 // &vp[1].
5522 masm.adjustStack(unusedStack);
5524 // Push a Value containing the callee object: natives are allowed to access
5525 // their callee before setting the return value. The StackPointer is moved
5526 // to &vp[0].
5527 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5528 Register calleeReg = ToRegister(call->getCallee());
5529 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
5531 if (call->mir()->maybeCrossRealm()) {
5532 masm.switchToObjectRealm(calleeReg, tempReg);
5534 } else {
5535 WrappedFunction* target = call->getSingleTarget();
5536 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5538 if (call->mir()->maybeCrossRealm()) {
5539 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), tempReg);
5540 masm.switchToObjectRealm(tempReg, tempReg);
5544 // Preload arguments into registers.
5545 masm.loadJSContext(argContextReg);
5546 masm.move32(Imm32(call->mir()->numActualArgs()), argUintNReg);
5547 masm.moveStackPtrTo(argVpReg);
5549 masm.Push(argUintNReg);
5551 // Construct native exit frame.
5552 uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
5553 masm.enterFakeExitFrameForNative(argContextReg, tempReg,
5554 call->mir()->isConstructing());
5556 markSafepointAt(safepointOffset, call);
5558 // Construct and execute call.
5559 masm.setupAlignedABICall();
5560 masm.passABIArg(argContextReg);
5561 masm.passABIArg(argUintNReg);
5562 masm.passABIArg(argVpReg);
5564 ensureOsiSpace();
5565 // If we're using a simulator build, `native` will already point to the
5566 // simulator's call-redirection code for LCallClassHook. Load the address in
5567 // a register first so that we don't try to redirect it a second time.
5568 bool emittedCall = false;
5569 #ifdef JS_SIMULATOR
5570 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5571 masm.movePtr(ImmPtr(native), tempReg);
5572 masm.callWithABI(tempReg);
5573 emittedCall = true;
5575 #endif
5576 if (!emittedCall) {
5577 masm.callWithABI(DynamicFunction<JSNative>(native), ABIType::General,
5578 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5581 // Test for failure.
5582 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
5584 if (call->mir()->maybeCrossRealm()) {
5585 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5588 // Load the outparam vp[0] into output register(s).
5589 masm.loadValue(
5590 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
5591 JSReturnOperand);
5593 // Until C++ code is instrumented against Spectre, prevent speculative
5594 // execution from returning any private data.
5595 if (JitOptions.spectreJitToCxxCalls && !call->mir()->ignoresReturnValue() &&
5596 mir->hasLiveDefUses()) {
5597 masm.speculationBarrier();
5600 // The next instruction is removing the footer of the exit frame, so there
5601 // is no need for leaveFakeExitFrame.
5603 // Move the StackPointer back to its original location, unwinding the native
5604 // exit frame.
5605 masm.adjustStack(NativeExitFrameLayout::Size() - unusedStack);
5606 MOZ_ASSERT(masm.framePushed() == initialStack);
5609 void CodeGenerator::visitCallNative(LCallNative* call) {
5610 WrappedFunction* target = call->getSingleTarget();
5611 MOZ_ASSERT(target);
5612 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5614 JSNative native = target->native();
5615 if (call->ignoresReturnValue() && target->hasJitInfo()) {
5616 const JSJitInfo* jitInfo = target->jitInfo();
5617 if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
5618 native = jitInfo->ignoresReturnValueMethod;
5621 emitCallNative(call, native);
5624 void CodeGenerator::visitCallClassHook(LCallClassHook* call) {
5625 emitCallNative(call, call->mir()->target());
5628 static void LoadDOMPrivate(MacroAssembler& masm, Register obj, Register priv,
5629 DOMObjectKind kind) {
5630 // Load the value in DOM_OBJECT_SLOT for a native or proxy DOM object. This
5631 // will be in the first slot but may be fixed or non-fixed.
5632 MOZ_ASSERT(obj != priv);
5634 switch (kind) {
5635 case DOMObjectKind::Native:
5636 // If it's a native object, the value must be in a fixed slot.
5637 // See CanAttachDOMCall in CacheIR.cpp.
5638 masm.debugAssertObjHasFixedSlots(obj, priv);
5639 masm.loadPrivate(Address(obj, NativeObject::getFixedSlotOffset(0)), priv);
5640 break;
5641 case DOMObjectKind::Proxy: {
5642 #ifdef DEBUG
5643 // Sanity check: it must be a DOM proxy.
5644 Label isDOMProxy;
5645 masm.branchTestProxyHandlerFamily(
5646 Assembler::Equal, obj, priv, GetDOMProxyHandlerFamily(), &isDOMProxy);
5647 masm.assumeUnreachable("Expected a DOM proxy");
5648 masm.bind(&isDOMProxy);
5649 #endif
5650 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), priv);
5651 masm.loadPrivate(
5652 Address(priv, js::detail::ProxyReservedSlots::offsetOfSlot(0)), priv);
5653 break;
5658 void CodeGenerator::visitCallDOMNative(LCallDOMNative* call) {
5659 WrappedFunction* target = call->getSingleTarget();
5660 MOZ_ASSERT(target);
5661 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5662 MOZ_ASSERT(target->hasJitInfo());
5663 MOZ_ASSERT(call->mir()->isCallDOMNative());
5665 int unusedStack = UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5667 // Registers used for callWithABI() argument-passing.
5668 const Register argJSContext = ToRegister(call->getArgJSContext());
5669 const Register argObj = ToRegister(call->getArgObj());
5670 const Register argPrivate = ToRegister(call->getArgPrivate());
5671 const Register argArgs = ToRegister(call->getArgArgs());
5673 DebugOnly<uint32_t> initialStack = masm.framePushed();
5675 masm.checkStackAlignment();
5677 // DOM methods have the signature:
5678 // bool (*)(JSContext*, HandleObject, void* private, const
5679 // JSJitMethodCallArgs& args)
5680 // Where args is initialized from an argc and a vp, vp[0] is space for an
5681 // outparam and the callee, vp[1] is |this|, and vp[2] onward are the
5682 // function arguments. Note that args stores the argv, not the vp, and
5683 // argv == vp + 2.
5685 // Nestle the stack up against the pushed arguments, leaving StackPointer at
5686 // &vp[1]
5687 masm.adjustStack(unusedStack);
5688 // argObj is filled with the extracted object, then returned.
5689 Register obj = masm.extractObject(Address(masm.getStackPointer(), 0), argObj);
5690 MOZ_ASSERT(obj == argObj);
5692 // Push a Value containing the callee object: natives are allowed to access
5693 // their callee before setting the return value. After this the StackPointer
5694 // points to &vp[0].
5695 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5697 // Now compute the argv value. Since StackPointer is pointing to &vp[0] and
5698 // argv is &vp[2] we just need to add 2*sizeof(Value) to the current
5699 // StackPointer.
5700 static_assert(JSJitMethodCallArgsTraits::offsetOfArgv == 0);
5701 static_assert(JSJitMethodCallArgsTraits::offsetOfArgc ==
5702 IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv);
5703 masm.computeEffectiveAddress(
5704 Address(masm.getStackPointer(), 2 * sizeof(Value)), argArgs);
5706 LoadDOMPrivate(masm, obj, argPrivate,
5707 static_cast<MCallDOMNative*>(call->mir())->objectKind());
5709 // Push argc from the call instruction into what will become the IonExitFrame
5710 masm.Push(Imm32(call->numActualArgs()));
5712 // Push our argv onto the stack
5713 masm.Push(argArgs);
5714 // And store our JSJitMethodCallArgs* in argArgs.
5715 masm.moveStackPtrTo(argArgs);
5717 // Push |this| object for passing HandleObject. We push after argc to
5718 // maintain the same sp-relative location of the object pointer with other
5719 // DOMExitFrames.
5720 masm.Push(argObj);
5721 masm.moveStackPtrTo(argObj);
5723 if (call->mir()->maybeCrossRealm()) {
5724 // We use argJSContext as scratch register here.
5725 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), argJSContext);
5726 masm.switchToObjectRealm(argJSContext, argJSContext);
5729 // Construct native exit frame.
5730 uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext);
5731 masm.loadJSContext(argJSContext);
5732 masm.enterFakeExitFrame(argJSContext, argJSContext,
5733 ExitFrameType::IonDOMMethod);
5735 markSafepointAt(safepointOffset, call);
5737 // Construct and execute call.
5738 masm.setupAlignedABICall();
5739 masm.loadJSContext(argJSContext);
5740 masm.passABIArg(argJSContext);
5741 masm.passABIArg(argObj);
5742 masm.passABIArg(argPrivate);
5743 masm.passABIArg(argArgs);
5744 ensureOsiSpace();
5745 masm.callWithABI(DynamicFunction<JSJitMethodOp>(target->jitInfo()->method),
5746 ABIType::General,
5747 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5749 if (target->jitInfo()->isInfallible) {
5750 masm.loadValue(Address(masm.getStackPointer(),
5751 IonDOMMethodExitFrameLayout::offsetOfResult()),
5752 JSReturnOperand);
5753 } else {
5754 // Test for failure.
5755 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
5757 // Load the outparam vp[0] into output register(s).
5758 masm.loadValue(Address(masm.getStackPointer(),
5759 IonDOMMethodExitFrameLayout::offsetOfResult()),
5760 JSReturnOperand);
5763 // Switch back to the current realm if needed. Note: if the DOM method threw
5764 // an exception, the exception handler will do this.
5765 if (call->mir()->maybeCrossRealm()) {
5766 static_assert(!JSReturnOperand.aliases(ReturnReg),
5767 "Clobbering ReturnReg should not affect the return value");
5768 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5771 // Until C++ code is instrumented against Spectre, prevent speculative
5772 // execution from returning any private data.
5773 if (JitOptions.spectreJitToCxxCalls && call->mir()->hasLiveDefUses()) {
5774 masm.speculationBarrier();
5777 // The next instruction is removing the footer of the exit frame, so there
5778 // is no need for leaveFakeExitFrame.
5780 // Move the StackPointer back to its original location, unwinding the native
5781 // exit frame.
5782 masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack);
5783 MOZ_ASSERT(masm.framePushed() == initialStack);
5786 void CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir) {
5787 pushArg(ImmGCPtr(lir->mir()->name()));
5789 using Fn = bool (*)(JSContext* cx, Handle<PropertyName*>, MutableHandleValue);
5790 callVM<Fn, GetIntrinsicValue>(lir);
5793 void CodeGenerator::emitCallInvokeFunction(
5794 LInstruction* call, Register calleereg, bool constructing,
5795 bool ignoresReturnValue, uint32_t argc, uint32_t unusedStack) {
5796 // Nestle %esp up to the argument vector.
5797 // Each path must account for framePushed_ separately, for callVM to be valid.
5798 masm.freeStack(unusedStack);
5800 pushArg(masm.getStackPointer()); // argv.
5801 pushArg(Imm32(argc)); // argc.
5802 pushArg(Imm32(ignoresReturnValue));
5803 pushArg(Imm32(constructing)); // constructing.
5804 pushArg(calleereg); // JSFunction*.
5806 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
5807 MutableHandleValue);
5808 callVM<Fn, jit::InvokeFunction>(call);
5810 // Un-nestle %esp from the argument vector. No prefix was pushed.
5811 masm.reserveStack(unusedStack);
5814 void CodeGenerator::visitCallGeneric(LCallGeneric* call) {
5815 // The callee is passed straight through to the trampoline.
5816 MOZ_ASSERT(ToRegister(call->getCallee()) == IonGenericCallCalleeReg);
5818 Register argcReg = ToRegister(call->getArgc());
5819 uint32_t unusedStack =
5820 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5822 // Known-target case is handled by LCallKnown.
5823 MOZ_ASSERT(!call->hasSingleTarget());
5825 masm.checkStackAlignment();
5827 masm.move32(Imm32(call->numActualArgs()), argcReg);
5829 // Nestle the StackPointer up to the argument vector.
5830 masm.freeStack(unusedStack);
5831 ensureOsiSpace();
5833 auto kind = call->mir()->isConstructing() ? IonGenericCallKind::Construct
5834 : IonGenericCallKind::Call;
5836 TrampolinePtr genericCallStub =
5837 gen->jitRuntime()->getIonGenericCallStub(kind);
5838 uint32_t callOffset = masm.callJit(genericCallStub);
5839 markSafepointAt(callOffset, call);
5841 if (call->mir()->maybeCrossRealm()) {
5842 static_assert(!JSReturnOperand.aliases(ReturnReg),
5843 "ReturnReg available as scratch after scripted calls");
5844 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5847 // Restore stack pointer.
5848 masm.setFramePushed(frameSize());
5849 emitRestoreStackPointerFromFP();
5851 // If the return value of the constructing function is Primitive,
5852 // replace the return value with the Object from CreateThis.
5853 if (call->mir()->isConstructing()) {
5854 Label notPrimitive;
5855 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5856 &notPrimitive);
5857 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
5858 JSReturnOperand);
5859 #ifdef DEBUG
5860 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5861 &notPrimitive);
5862 masm.assumeUnreachable("CreateThis creates an object");
5863 #endif
5864 masm.bind(&notPrimitive);
5868 void JitRuntime::generateIonGenericCallArgumentsShift(
5869 MacroAssembler& masm, Register argc, Register curr, Register end,
5870 Register scratch, Label* done) {
5871 static_assert(sizeof(Value) == 8);
5872 // There are |argc| Values on the stack. Shift them all down by 8 bytes,
5873 // overwriting the first value.
5875 // Initialize `curr` to the destination of the first copy, and `end` to the
5876 // final value of curr.
5877 masm.moveStackPtrTo(curr);
5878 masm.computeEffectiveAddress(BaseValueIndex(curr, argc), end);
5880 Label loop;
5881 masm.bind(&loop);
5882 masm.branchPtr(Assembler::Equal, curr, end, done);
5883 masm.loadPtr(Address(curr, 8), scratch);
5884 masm.storePtr(scratch, Address(curr, 0));
5885 masm.addPtr(Imm32(sizeof(uintptr_t)), curr);
5886 masm.jump(&loop);
5889 void JitRuntime::generateIonGenericCallStub(MacroAssembler& masm,
5890 IonGenericCallKind kind) {
5891 AutoCreatedBy acb(masm, "JitRuntime::generateIonGenericCallStub");
5892 ionGenericCallStubOffset_[kind] = startTrampolineCode(masm);
5894 // This code is tightly coupled with visitCallGeneric.
5896 // Upon entry:
5897 // IonGenericCallCalleeReg contains a pointer to the callee object.
5898 // IonGenericCallArgcReg contains the number of actual args.
5899 // The arguments have been pushed onto the stack:
5900 // [newTarget] (iff isConstructing)
5901 // [argN]
5902 // ...
5903 // [arg1]
5904 // [arg0]
5905 // [this]
5906 // <return address> (if not JS_USE_LINK_REGISTER)
5908 // This trampoline is responsible for entering the callee's realm,
5909 // massaging the stack into the right shape, and then performing a
5910 // tail call. We will return directly to the Ion code from the
5911 // callee.
5913 // To do a tail call, we keep the return address in a register, even
5914 // on platforms that don't normally use a link register, and push it
5915 // just before jumping to the callee, after we are done setting up
5916 // the stack.
5918 // The caller is responsible for switching back to the caller's
5919 // realm and cleaning up the stack.
5921 Register calleeReg = IonGenericCallCalleeReg;
5922 Register argcReg = IonGenericCallArgcReg;
5923 Register scratch = IonGenericCallScratch;
5924 Register scratch2 = IonGenericCallScratch2;
5926 #ifndef JS_USE_LINK_REGISTER
5927 Register returnAddrReg = IonGenericCallReturnAddrReg;
5928 masm.pop(returnAddrReg);
5929 #endif
5931 #ifdef JS_CODEGEN_ARM
5932 // The default second scratch register on arm is lr, which we need
5933 // preserved for tail calls.
5934 AutoNonDefaultSecondScratchRegister andssr(masm, IonGenericSecondScratchReg);
5935 #endif
5937 bool isConstructing = kind == IonGenericCallKind::Construct;
5939 Label entry, notFunction, noJitEntry, vmCall;
5940 masm.bind(&entry);
5942 // Guard that the callee is actually a function.
5943 masm.branchTestObjIsFunction(Assembler::NotEqual, calleeReg, scratch,
5944 calleeReg, &notFunction);
5946 // Guard that the callee supports the [[Call]] or [[Construct]] operation.
5947 // If these tests fail, we will call into the VM to throw an exception.
5948 if (isConstructing) {
5949 masm.branchTestFunctionFlags(calleeReg, FunctionFlags::CONSTRUCTOR,
5950 Assembler::Zero, &vmCall);
5951 } else {
5952 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
5953 calleeReg, scratch, &vmCall);
5956 if (isConstructing) {
5957 // Use the slow path if CreateThis was unable to create the |this| object.
5958 Address thisAddr(masm.getStackPointer(), 0);
5959 masm.branchTestNull(Assembler::Equal, thisAddr, &vmCall);
5962 masm.switchToObjectRealm(calleeReg, scratch);
5964 // Load jitCodeRaw for callee if it exists.
5965 masm.branchIfFunctionHasNoJitEntry(calleeReg, isConstructing, &noJitEntry);
5967 // ****************************
5968 // * Functions with jit entry *
5969 // ****************************
5970 masm.loadJitCodeRaw(calleeReg, scratch2);
5972 // Construct the JitFrameLayout.
5973 masm.PushCalleeToken(calleeReg, isConstructing);
5974 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcReg, scratch);
5975 #ifndef JS_USE_LINK_REGISTER
5976 masm.push(returnAddrReg);
5977 #endif
5979 // Check whether we need a rectifier frame.
5980 Label noRectifier;
5981 masm.loadFunctionArgCount(calleeReg, scratch);
5982 masm.branch32(Assembler::BelowOrEqual, scratch, argcReg, &noRectifier);
5984 // Tail-call the arguments rectifier.
5985 // Because all trampolines are created at the same time,
5986 // we can't create a TrampolinePtr for the arguments rectifier,
5987 // because it hasn't been linked yet. We can, however, directly
5988 // encode its offset.
5989 Label rectifier;
5990 bindLabelToOffset(&rectifier, argumentsRectifierOffset_);
5992 masm.jump(&rectifier);
5995 // Tail call the jit entry.
5996 masm.bind(&noRectifier);
5997 masm.jump(scratch2);
5999 // ********************
6000 // * Native functions *
6001 // ********************
6002 masm.bind(&noJitEntry);
6003 if (!isConstructing) {
6004 generateIonGenericCallFunCall(masm, &entry, &vmCall);
6006 generateIonGenericCallNativeFunction(masm, isConstructing);
6008 // *******************
6009 // * Bound functions *
6010 // *******************
6011 // TODO: support class hooks?
6012 masm.bind(&notFunction);
6013 if (!isConstructing) {
6014 // TODO: support generic bound constructors?
6015 generateIonGenericCallBoundFunction(masm, &entry, &vmCall);
6018 // ********************
6019 // * Fallback VM call *
6020 // ********************
6021 masm.bind(&vmCall);
6023 masm.push(masm.getStackPointer()); // argv
6024 masm.push(argcReg); // argc
6025 masm.push(Imm32(false)); // ignores return value
6026 masm.push(Imm32(isConstructing)); // constructing
6027 masm.push(calleeReg); // callee
6029 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
6030 MutableHandleValue);
6031 VMFunctionId id = VMFunctionToId<Fn, jit::InvokeFunction>::id;
6032 uint32_t invokeFunctionOffset = functionWrapperOffsets_[size_t(id)];
6033 Label invokeFunctionVMEntry;
6034 bindLabelToOffset(&invokeFunctionVMEntry, invokeFunctionOffset);
6036 masm.pushFrameDescriptor(FrameType::IonJS);
6037 #ifndef JS_USE_LINK_REGISTER
6038 masm.push(returnAddrReg);
6039 #endif
6040 masm.jump(&invokeFunctionVMEntry);
6043 void JitRuntime::generateIonGenericCallNativeFunction(MacroAssembler& masm,
6044 bool isConstructing) {
6045 Register calleeReg = IonGenericCallCalleeReg;
6046 Register argcReg = IonGenericCallArgcReg;
6047 Register scratch = IonGenericCallScratch;
6048 Register scratch2 = IonGenericCallScratch2;
6049 Register contextReg = IonGenericCallScratch3;
6050 #ifndef JS_USE_LINK_REGISTER
6051 Register returnAddrReg = IonGenericCallReturnAddrReg;
6052 #endif
6054 // Push a value containing the callee, which will become argv[0].
6055 masm.pushValue(JSVAL_TYPE_OBJECT, calleeReg);
6057 // Load the callee address into calleeReg.
6058 #ifdef JS_SIMULATOR
6059 masm.movePtr(ImmPtr(RedirectedCallAnyNative()), calleeReg);
6060 #else
6061 masm.loadPrivate(Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
6062 calleeReg);
6063 #endif
6065 // Load argv into scratch2.
6066 masm.moveStackPtrTo(scratch2);
6068 // Push argc.
6069 masm.push(argcReg);
6071 masm.loadJSContext(contextReg);
6073 // Construct native exit frame. Note that unlike other cases in this
6074 // trampoline, this code does not use a tail call.
6075 masm.pushFrameDescriptor(FrameType::IonJS);
6076 #ifdef JS_USE_LINK_REGISTER
6077 masm.pushReturnAddress();
6078 #else
6079 masm.push(returnAddrReg);
6080 #endif
6082 masm.push(FramePointer);
6083 masm.moveStackPtrTo(FramePointer);
6084 masm.enterFakeExitFrameForNative(contextReg, scratch, isConstructing);
6086 masm.setupUnalignedABICall(scratch);
6087 masm.passABIArg(contextReg); // cx
6088 masm.passABIArg(argcReg); // argc
6089 masm.passABIArg(scratch2); // argv
6091 masm.callWithABI(calleeReg);
6093 // Test for failure.
6094 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
6096 masm.loadValue(
6097 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
6098 JSReturnOperand);
6100 // Leave the exit frame.
6101 masm.moveToStackPtr(FramePointer);
6102 masm.pop(FramePointer);
6104 // Return.
6105 masm.ret();
6108 void JitRuntime::generateIonGenericCallFunCall(MacroAssembler& masm,
6109 Label* entry, Label* vmCall) {
6110 Register calleeReg = IonGenericCallCalleeReg;
6111 Register argcReg = IonGenericCallArgcReg;
6112 Register scratch = IonGenericCallScratch;
6113 Register scratch2 = IonGenericCallScratch2;
6114 Register scratch3 = IonGenericCallScratch3;
6116 Label notFunCall;
6117 masm.branchPtr(Assembler::NotEqual,
6118 Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
6119 ImmPtr(js::fun_call), &notFunCall);
6121 // In general, we can implement fun_call by replacing calleeReg with
6122 // |this|, sliding all the other arguments down, and decrementing argc.
6124 // *BEFORE* *AFTER*
6125 // [argN] argc = N+1 <padding>
6126 // ... [argN] argc = N
6127 // [arg1] ...
6128 // [arg0] [arg1] <- now arg0
6129 // [this] <- top of stack (aligned) [arg0] <- now this
6131 // The only exception is when argc is already 0, in which case instead
6132 // of shifting arguments down we replace [this] with UndefinedValue():
6134 // *BEFORE* *AFTER*
6135 // [this] argc = 0 [undef] argc = 0
6137 // After making this transformation, we can jump back to the beginning
6138 // of this trampoline to handle the inner call.
6140 // Guard that |this| is an object. If it is, replace calleeReg.
6141 masm.fallibleUnboxObject(Address(masm.getStackPointer(), 0), scratch, vmCall);
6142 masm.movePtr(scratch, calleeReg);
6144 Label hasArgs;
6145 masm.branch32(Assembler::NotEqual, argcReg, Imm32(0), &hasArgs);
6147 // No arguments. Replace |this| with |undefined| and start from the top.
6148 masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), 0));
6149 masm.jump(entry);
6151 masm.bind(&hasArgs);
6153 Label doneSliding;
6154 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6155 scratch3, &doneSliding);
6156 masm.bind(&doneSliding);
6157 masm.sub32(Imm32(1), argcReg);
6159 masm.jump(entry);
6161 masm.bind(&notFunCall);
6164 void JitRuntime::generateIonGenericCallBoundFunction(MacroAssembler& masm,
6165 Label* entry,
6166 Label* vmCall) {
6167 Register calleeReg = IonGenericCallCalleeReg;
6168 Register argcReg = IonGenericCallArgcReg;
6169 Register scratch = IonGenericCallScratch;
6170 Register scratch2 = IonGenericCallScratch2;
6171 Register scratch3 = IonGenericCallScratch3;
6173 masm.branchTestObjClass(Assembler::NotEqual, calleeReg,
6174 &BoundFunctionObject::class_, scratch, calleeReg,
6175 vmCall);
6177 Address targetSlot(calleeReg, BoundFunctionObject::offsetOfTargetSlot());
6178 Address flagsSlot(calleeReg, BoundFunctionObject::offsetOfFlagsSlot());
6179 Address thisSlot(calleeReg, BoundFunctionObject::offsetOfBoundThisSlot());
6180 Address firstInlineArgSlot(
6181 calleeReg, BoundFunctionObject::offsetOfFirstInlineBoundArg());
6183 // Check that we won't be pushing too many arguments.
6184 masm.load32(flagsSlot, scratch);
6185 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6186 masm.add32(argcReg, scratch);
6187 masm.branch32(Assembler::Above, scratch, Imm32(JIT_ARGS_LENGTH_MAX), vmCall);
6189 // The stack is currently correctly aligned for a jit call. We will
6190 // be updating the `this` value and potentially adding additional
6191 // arguments. On platforms with 16-byte alignment, if the number of
6192 // bound arguments is odd, we have to move the arguments that are
6193 // currently on the stack. For example, with one bound argument:
6195 // *BEFORE* *AFTER*
6196 // [argN] <padding>
6197 // ... [argN] |
6198 // [arg1] ... | These arguments have been
6199 // [arg0] [arg1] | shifted down 8 bytes.
6200 // [this] <- top of stack (aligned) [arg0] v
6201 // [bound0] <- one bound argument (odd)
6202 // [boundThis] <- top of stack (aligned)
6204 Label poppedThis;
6205 if (JitStackValueAlignment > 1) {
6206 Label alreadyAligned;
6207 masm.branchTest32(Assembler::Zero, flagsSlot,
6208 Imm32(1 << BoundFunctionObject::NumBoundArgsShift),
6209 &alreadyAligned);
6211 // We have an odd number of bound arguments. Shift the existing arguments
6212 // down by 8 bytes.
6213 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6214 scratch3, &poppedThis);
6215 masm.bind(&alreadyAligned);
6218 // Pop the current `this`. It will be replaced with the bound `this`.
6219 masm.freeStack(sizeof(Value));
6220 masm.bind(&poppedThis);
6222 // Load the number of bound arguments in scratch
6223 masm.load32(flagsSlot, scratch);
6224 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6226 Label donePushingBoundArguments;
6227 masm.branch32(Assembler::Equal, scratch, Imm32(0),
6228 &donePushingBoundArguments);
6230 // Update argc to include bound arguments.
6231 masm.add32(scratch, argcReg);
6233 // Load &boundArgs[0] in scratch2.
6234 Label outOfLineBoundArguments, haveBoundArguments;
6235 masm.branch32(Assembler::Above, scratch,
6236 Imm32(BoundFunctionObject::MaxInlineBoundArgs),
6237 &outOfLineBoundArguments);
6238 masm.computeEffectiveAddress(firstInlineArgSlot, scratch2);
6239 masm.jump(&haveBoundArguments);
6241 masm.bind(&outOfLineBoundArguments);
6242 masm.unboxObject(firstInlineArgSlot, scratch2);
6243 masm.loadPtr(Address(scratch2, NativeObject::offsetOfElements()), scratch2);
6245 masm.bind(&haveBoundArguments);
6247 // Load &boundArgs[numBoundArgs] in scratch.
6248 BaseObjectElementIndex lastBoundArg(scratch2, scratch);
6249 masm.computeEffectiveAddress(lastBoundArg, scratch);
6251 // Push the bound arguments, starting with the last one.
6252 // Copying pre-decrements scratch until scratch2 is reached.
6253 Label boundArgumentsLoop;
6254 masm.bind(&boundArgumentsLoop);
6255 masm.subPtr(Imm32(sizeof(Value)), scratch);
6256 masm.pushValue(Address(scratch, 0));
6257 masm.branchPtr(Assembler::Above, scratch, scratch2, &boundArgumentsLoop);
6258 masm.bind(&donePushingBoundArguments);
6260 // Push the bound `this`.
6261 masm.pushValue(thisSlot);
6263 // Load the target in calleeReg.
6264 masm.unboxObject(targetSlot, calleeReg);
6266 // At this point, all preconditions for entering the trampoline are met:
6267 // - calleeReg contains a pointer to the callee object
6268 // - argcReg contains the number of actual args (now including bound args)
6269 // - the arguments are on the stack with the correct alignment.
6270 // Instead of generating more code, we can jump back to the entry point
6271 // of the trampoline to call the bound target.
6272 masm.jump(entry);
6275 void CodeGenerator::visitCallKnown(LCallKnown* call) {
6276 Register calleereg = ToRegister(call->getFunction());
6277 Register objreg = ToRegister(call->getTempObject());
6278 uint32_t unusedStack =
6279 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
6280 WrappedFunction* target = call->getSingleTarget();
6282 // Native single targets (except Wasm and TrampolineNative functions) are
6283 // handled by LCallNative.
6284 MOZ_ASSERT(target->hasJitEntry());
6286 // Missing arguments must have been explicitly appended by WarpBuilder.
6287 DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
6288 MOZ_ASSERT(target->nargs() <=
6289 call->mir()->numStackArgs() - numNonArgsOnStack);
6291 MOZ_ASSERT_IF(call->isConstructing(), target->isConstructor());
6293 masm.checkStackAlignment();
6295 if (target->isClassConstructor() && !call->isConstructing()) {
6296 emitCallInvokeFunction(call, calleereg, call->isConstructing(),
6297 call->ignoresReturnValue(), call->numActualArgs(),
6298 unusedStack);
6299 return;
6302 MOZ_ASSERT_IF(target->isClassConstructor(), call->isConstructing());
6304 MOZ_ASSERT(!call->mir()->needsThisCheck());
6306 if (call->mir()->maybeCrossRealm()) {
6307 masm.switchToObjectRealm(calleereg, objreg);
6310 masm.loadJitCodeRaw(calleereg, objreg);
6312 // Nestle the StackPointer up to the argument vector.
6313 masm.freeStack(unusedStack);
6315 // Construct the JitFrameLayout.
6316 masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
6317 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, call->numActualArgs());
6319 // Finally call the function in objreg.
6320 ensureOsiSpace();
6321 uint32_t callOffset = masm.callJit(objreg);
6322 markSafepointAt(callOffset, call);
6324 if (call->mir()->maybeCrossRealm()) {
6325 static_assert(!JSReturnOperand.aliases(ReturnReg),
6326 "ReturnReg available as scratch after scripted calls");
6327 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6330 // Restore stack pointer: pop JitFrameLayout fields still left on the stack
6331 // and undo the earlier |freeStack(unusedStack)|.
6332 int prefixGarbage =
6333 sizeof(JitFrameLayout) - JitFrameLayout::bytesPoppedAfterCall();
6334 masm.adjustStack(prefixGarbage - unusedStack);
6336 // If the return value of the constructing function is Primitive,
6337 // replace the return value with the Object from CreateThis.
6338 if (call->mir()->isConstructing()) {
6339 Label notPrimitive;
6340 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6341 &notPrimitive);
6342 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
6343 JSReturnOperand);
6344 #ifdef DEBUG
6345 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6346 &notPrimitive);
6347 masm.assumeUnreachable("CreateThis creates an object");
6348 #endif
6349 masm.bind(&notPrimitive);
6353 template <typename T>
6354 void CodeGenerator::emitCallInvokeFunction(T* apply) {
6355 Register objreg = ToRegister(apply->getTempObject());
6357 // Push the space used by the arguments.
6358 masm.moveStackPtrTo(objreg);
6360 pushArg(objreg); // argv.
6361 pushArg(ToRegister(apply->getArgc())); // argc.
6362 pushArg(Imm32(apply->mir()->ignoresReturnValue())); // ignoresReturnValue.
6363 pushArg(Imm32(apply->mir()->isConstructing())); // isConstructing.
6364 pushArg(ToRegister(apply->getFunction())); // JSFunction*.
6366 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
6367 MutableHandleValue);
6368 callVM<Fn, jit::InvokeFunction>(apply);
6371 // Do not bailout after the execution of this function since the stack no longer
6372 // correspond to what is expected by the snapshots.
6373 void CodeGenerator::emitAllocateSpaceForApply(Register argcreg,
6374 Register scratch) {
6375 // Use scratch register to calculate stack space (including padding).
6376 masm.movePtr(argcreg, scratch);
6378 // Align the JitFrameLayout on the JitStackAlignment.
6379 if (JitStackValueAlignment > 1) {
6380 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6381 "Stack padding assumes that the frameSize is correct");
6382 MOZ_ASSERT(JitStackValueAlignment == 2);
6383 Label noPaddingNeeded;
6384 // if the number of arguments is odd, then we do not need any padding.
6385 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6386 masm.addPtr(Imm32(1), scratch);
6387 masm.bind(&noPaddingNeeded);
6390 // Reserve space for copying the arguments.
6391 NativeObject::elementsSizeMustNotOverflow();
6392 masm.lshiftPtr(Imm32(ValueShift), scratch);
6393 masm.subFromStackPtr(scratch);
6395 #ifdef DEBUG
6396 // Put a magic value in the space reserved for padding. Note, this code
6397 // cannot be merged with the previous test, as not all architectures can
6398 // write below their stack pointers.
6399 if (JitStackValueAlignment > 1) {
6400 MOZ_ASSERT(JitStackValueAlignment == 2);
6401 Label noPaddingNeeded;
6402 // if the number of arguments is odd, then we do not need any padding.
6403 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6404 BaseValueIndex dstPtr(masm.getStackPointer(), argcreg);
6405 masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr);
6406 masm.bind(&noPaddingNeeded);
6408 #endif
6411 // Do not bailout after the execution of this function since the stack no longer
6412 // correspond to what is expected by the snapshots.
6413 void CodeGenerator::emitAllocateSpaceForConstructAndPushNewTarget(
6414 Register argcreg, Register newTargetAndScratch) {
6415 // Align the JitFrameLayout on the JitStackAlignment. Contrary to
6416 // |emitAllocateSpaceForApply()|, we're always pushing a magic value, because
6417 // we can't write to |newTargetAndScratch| before |new.target| has
6418 // been pushed onto the stack.
6419 if (JitStackValueAlignment > 1) {
6420 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6421 "Stack padding assumes that the frameSize is correct");
6422 MOZ_ASSERT(JitStackValueAlignment == 2);
6424 Label noPaddingNeeded;
6425 // If the number of arguments is even, then we do not need any padding.
6426 masm.branchTestPtr(Assembler::Zero, argcreg, Imm32(1), &noPaddingNeeded);
6427 masm.pushValue(MagicValue(JS_ARG_POISON));
6428 masm.bind(&noPaddingNeeded);
6431 // Push |new.target| after the padding value, but before any arguments.
6432 masm.pushValue(JSVAL_TYPE_OBJECT, newTargetAndScratch);
6434 // Use newTargetAndScratch to calculate stack space (including padding).
6435 masm.movePtr(argcreg, newTargetAndScratch);
6437 // Reserve space for copying the arguments.
6438 NativeObject::elementsSizeMustNotOverflow();
6439 masm.lshiftPtr(Imm32(ValueShift), newTargetAndScratch);
6440 masm.subFromStackPtr(newTargetAndScratch);
6443 // Destroys argvIndex and copyreg.
6444 void CodeGenerator::emitCopyValuesForApply(Register argvSrcBase,
6445 Register argvIndex, Register copyreg,
6446 size_t argvSrcOffset,
6447 size_t argvDstOffset) {
6448 Label loop;
6449 masm.bind(&loop);
6451 // As argvIndex is off by 1, and we use the decBranchPtr instruction
6452 // to loop back, we have to substract the size of the word which are
6453 // copied.
6454 BaseValueIndex srcPtr(argvSrcBase, argvIndex,
6455 int32_t(argvSrcOffset) - sizeof(void*));
6456 BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex,
6457 int32_t(argvDstOffset) - sizeof(void*));
6458 masm.loadPtr(srcPtr, copyreg);
6459 masm.storePtr(copyreg, dstPtr);
6461 // Handle 32 bits architectures.
6462 if (sizeof(Value) == 2 * sizeof(void*)) {
6463 BaseValueIndex srcPtrLow(argvSrcBase, argvIndex,
6464 int32_t(argvSrcOffset) - 2 * sizeof(void*));
6465 BaseValueIndex dstPtrLow(masm.getStackPointer(), argvIndex,
6466 int32_t(argvDstOffset) - 2 * sizeof(void*));
6467 masm.loadPtr(srcPtrLow, copyreg);
6468 masm.storePtr(copyreg, dstPtrLow);
6471 masm.decBranchPtr(Assembler::NonZero, argvIndex, Imm32(1), &loop);
6474 void CodeGenerator::emitRestoreStackPointerFromFP() {
6475 // This is used to restore the stack pointer after a call with a dynamic
6476 // number of arguments.
6478 MOZ_ASSERT(masm.framePushed() == frameSize());
6480 int32_t offset = -int32_t(frameSize());
6481 masm.computeEffectiveAddress(Address(FramePointer, offset),
6482 masm.getStackPointer());
6485 void CodeGenerator::emitPushArguments(Register argcreg, Register scratch,
6486 Register copyreg, uint32_t extraFormals) {
6487 Label end;
6489 // Skip the copy of arguments if there are none.
6490 masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, &end);
6492 // clang-format off
6494 // We are making a copy of the arguments which are above the JitFrameLayout
6495 // of the current Ion frame.
6497 // [arg1] [arg0] <- src [this] [JitFrameLayout] [.. frameSize ..] [pad] [arg1] [arg0] <- dst
6499 // clang-format on
6501 // Compute the source and destination offsets into the stack.
6502 Register argvSrcBase = FramePointer;
6503 size_t argvSrcOffset =
6504 JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
6505 size_t argvDstOffset = 0;
6507 Register argvIndex = scratch;
6508 masm.move32(argcreg, argvIndex);
6510 // Copy arguments.
6511 emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
6512 argvDstOffset);
6514 // Join with all arguments copied and the extra stack usage computed.
6515 masm.bind(&end);
6518 void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply,
6519 Register scratch) {
6520 // Holds the function nargs. Initially the number of args to the caller.
6521 Register argcreg = ToRegister(apply->getArgc());
6522 Register copyreg = ToRegister(apply->getTempObject());
6523 uint32_t extraFormals = apply->numExtraFormals();
6525 emitAllocateSpaceForApply(argcreg, scratch);
6527 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6529 // Push |this|.
6530 masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex));
6533 void CodeGenerator::emitPushArguments(LApplyArgsObj* apply, Register scratch) {
6534 // argc and argsObj are mapped to the same calltemp register.
6535 MOZ_ASSERT(apply->getArgsObj() == apply->getArgc());
6537 Register tmpArgc = ToRegister(apply->getTempObject());
6538 Register argsObj = ToRegister(apply->getArgsObj());
6540 // Load argc into tmpArgc.
6541 Address lengthAddr(argsObj, ArgumentsObject::getInitialLengthSlotOffset());
6542 masm.unboxInt32(lengthAddr, tmpArgc);
6543 masm.rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), tmpArgc);
6545 // Allocate space on the stack for arguments. This modifies scratch.
6546 emitAllocateSpaceForApply(tmpArgc, scratch);
6548 // Load arguments data
6549 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
6550 argsObj);
6551 size_t argsSrcOffset = ArgumentsData::offsetOfArgs();
6553 // This is the end of the lifetime of argsObj.
6554 // After this call, the argsObj register holds the argument count instead.
6555 emitPushArrayAsArguments(tmpArgc, argsObj, scratch, argsSrcOffset);
6557 masm.pushValue(ToValue(apply, LApplyArgsObj::ThisIndex));
6560 void CodeGenerator::emitPushArrayAsArguments(Register tmpArgc,
6561 Register srcBaseAndArgc,
6562 Register scratch,
6563 size_t argvSrcOffset) {
6564 // Preconditions:
6565 // 1. |tmpArgc| * sizeof(Value) bytes have been allocated at the top of
6566 // the stack to hold arguments.
6567 // 2. |srcBaseAndArgc| + |srcOffset| points to an array of |tmpArgc| values.
6569 // Postconditions:
6570 // 1. The arguments at |srcBaseAndArgc| + |srcOffset| have been copied into
6571 // the allocated space.
6572 // 2. |srcBaseAndArgc| now contains the original value of |tmpArgc|.
6574 // |scratch| is used as a temp register within this function and clobbered.
6576 Label noCopy, epilogue;
6578 // Skip the copy of arguments if there are none.
6579 masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
6581 // Copy the values. This code is skipped entirely if there are
6582 // no values.
6583 size_t argvDstOffset = 0;
6585 Register argvSrcBase = srcBaseAndArgc;
6586 Register copyreg = scratch;
6588 masm.push(tmpArgc);
6589 Register argvIndex = tmpArgc;
6590 argvDstOffset += sizeof(void*);
6592 // Copy
6593 emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
6594 argvDstOffset);
6596 // Restore.
6597 masm.pop(srcBaseAndArgc); // srcBaseAndArgc now contains argc.
6598 masm.jump(&epilogue);
6600 // Clear argc if we skipped the copy step.
6601 masm.bind(&noCopy);
6602 masm.movePtr(ImmWord(0), srcBaseAndArgc);
6604 // Join with all arguments copied and the extra stack usage computed.
6605 // Note, "srcBase" has become "argc".
6606 masm.bind(&epilogue);
6609 void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply,
6610 Register scratch) {
6611 Register tmpArgc = ToRegister(apply->getTempObject());
6612 Register elementsAndArgc = ToRegister(apply->getElements());
6614 // Invariants guarded in the caller:
6615 // - the array is not too long
6616 // - the array length equals its initialized length
6618 // The array length is our argc for the purposes of allocating space.
6619 Address length(ToRegister(apply->getElements()),
6620 ObjectElements::offsetOfLength());
6621 masm.load32(length, tmpArgc);
6623 // Allocate space for the values.
6624 emitAllocateSpaceForApply(tmpArgc, scratch);
6626 // After this call "elements" has become "argc".
6627 size_t elementsOffset = 0;
6628 emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
6630 // Push |this|.
6631 masm.pushValue(ToValue(apply, LApplyArrayGeneric::ThisIndex));
6634 void CodeGenerator::emitPushArguments(LConstructArgsGeneric* construct,
6635 Register scratch) {
6636 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6638 // Holds the function nargs. Initially the number of args to the caller.
6639 Register argcreg = ToRegister(construct->getArgc());
6640 Register copyreg = ToRegister(construct->getTempObject());
6641 uint32_t extraFormals = construct->numExtraFormals();
6643 // Allocate space for the values.
6644 // After this call "newTarget" has become "scratch".
6645 emitAllocateSpaceForConstructAndPushNewTarget(argcreg, scratch);
6647 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6649 // Push |this|.
6650 masm.pushValue(ToValue(construct, LConstructArgsGeneric::ThisIndex));
6653 void CodeGenerator::emitPushArguments(LConstructArrayGeneric* construct,
6654 Register scratch) {
6655 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6657 Register tmpArgc = ToRegister(construct->getTempObject());
6658 Register elementsAndArgc = ToRegister(construct->getElements());
6660 // Invariants guarded in the caller:
6661 // - the array is not too long
6662 // - the array length equals its initialized length
6664 // The array length is our argc for the purposes of allocating space.
6665 Address length(ToRegister(construct->getElements()),
6666 ObjectElements::offsetOfLength());
6667 masm.load32(length, tmpArgc);
6669 // Allocate space for the values.
6670 emitAllocateSpaceForConstructAndPushNewTarget(tmpArgc, scratch);
6672 // After this call "elements" has become "argc" and "newTarget" has become
6673 // "scratch".
6674 size_t elementsOffset = 0;
6675 emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
6677 // Push |this|.
6678 masm.pushValue(ToValue(construct, LConstructArrayGeneric::ThisIndex));
6681 template <typename T>
6682 void CodeGenerator::emitApplyGeneric(T* apply) {
6683 // Holds the function object.
6684 Register calleereg = ToRegister(apply->getFunction());
6686 // Temporary register for modifying the function object.
6687 Register objreg = ToRegister(apply->getTempObject());
6688 Register scratch = ToRegister(apply->getTempForArgCopy());
6690 // Holds the function nargs, computed in the invoker or (for ApplyArray,
6691 // ConstructArray, or ApplyArgsObj) in the argument pusher.
6692 Register argcreg = ToRegister(apply->getArgc());
6694 // Copy the arguments of the current function.
6696 // In the case of ApplyArray, ConstructArray, or ApplyArgsObj, also
6697 // compute argc. The argc register and the elements/argsObj register
6698 // are the same; argc must not be referenced before the call to
6699 // emitPushArguments() and elements/argsObj must not be referenced
6700 // after it returns.
6702 // In the case of ConstructArray or ConstructArgs, also overwrite newTarget
6703 // with scratch; newTarget must not be referenced after this point.
6705 // objreg is dead across this call.
6706 emitPushArguments(apply, scratch);
6708 masm.checkStackAlignment();
6710 bool constructing = apply->mir()->isConstructing();
6712 // If the function is native, only emit the call to InvokeFunction.
6713 if (apply->hasSingleTarget() &&
6714 apply->getSingleTarget()->isNativeWithoutJitEntry()) {
6715 emitCallInvokeFunction(apply);
6717 #ifdef DEBUG
6718 // Native constructors are guaranteed to return an Object value, so we never
6719 // have to replace a primitive result with the previously allocated Object
6720 // from CreateThis.
6721 if (constructing) {
6722 Label notPrimitive;
6723 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6724 &notPrimitive);
6725 masm.assumeUnreachable("native constructors don't return primitives");
6726 masm.bind(&notPrimitive);
6728 #endif
6730 emitRestoreStackPointerFromFP();
6731 return;
6734 Label end, invoke;
6736 // Unless already known, guard that calleereg is actually a function object.
6737 if (!apply->hasSingleTarget()) {
6738 masm.branchTestObjIsFunction(Assembler::NotEqual, calleereg, objreg,
6739 calleereg, &invoke);
6742 // Guard that calleereg is an interpreted function with a JSScript.
6743 masm.branchIfFunctionHasNoJitEntry(calleereg, constructing, &invoke);
6745 // Guard that callee allows the [[Call]] or [[Construct]] operation required.
6746 if (constructing) {
6747 masm.branchTestFunctionFlags(calleereg, FunctionFlags::CONSTRUCTOR,
6748 Assembler::Zero, &invoke);
6749 } else {
6750 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
6751 calleereg, objreg, &invoke);
6754 // Use the slow path if CreateThis was unable to create the |this| object.
6755 if (constructing) {
6756 Address thisAddr(masm.getStackPointer(), 0);
6757 masm.branchTestNull(Assembler::Equal, thisAddr, &invoke);
6760 // Call with an Ion frame or a rectifier frame.
6762 if (apply->mir()->maybeCrossRealm()) {
6763 masm.switchToObjectRealm(calleereg, objreg);
6766 // Knowing that calleereg is a non-native function, load jitcode.
6767 masm.loadJitCodeRaw(calleereg, objreg);
6769 masm.PushCalleeToken(calleereg, constructing);
6770 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcreg, scratch);
6772 Label underflow, rejoin;
6774 // Check whether the provided arguments satisfy target argc.
6775 if (!apply->hasSingleTarget()) {
6776 Register nformals = scratch;
6777 masm.loadFunctionArgCount(calleereg, nformals);
6778 masm.branch32(Assembler::Below, argcreg, nformals, &underflow);
6779 } else {
6780 masm.branch32(Assembler::Below, argcreg,
6781 Imm32(apply->getSingleTarget()->nargs()), &underflow);
6784 // Skip the construction of the rectifier frame because we have no
6785 // underflow.
6786 masm.jump(&rejoin);
6788 // Argument fixup needed. Get ready to call the argumentsRectifier.
6790 masm.bind(&underflow);
6792 // Hardcode the address of the argumentsRectifier code.
6793 TrampolinePtr argumentsRectifier =
6794 gen->jitRuntime()->getArgumentsRectifier();
6795 masm.movePtr(argumentsRectifier, objreg);
6798 masm.bind(&rejoin);
6800 // Finally call the function in objreg, as assigned by one of the paths
6801 // above.
6802 ensureOsiSpace();
6803 uint32_t callOffset = masm.callJit(objreg);
6804 markSafepointAt(callOffset, apply);
6806 if (apply->mir()->maybeCrossRealm()) {
6807 static_assert(!JSReturnOperand.aliases(ReturnReg),
6808 "ReturnReg available as scratch after scripted calls");
6809 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6812 // Discard JitFrameLayout fields still left on the stack.
6813 masm.freeStack(sizeof(JitFrameLayout) -
6814 JitFrameLayout::bytesPoppedAfterCall());
6815 masm.jump(&end);
6818 // Handle uncompiled or native functions.
6820 masm.bind(&invoke);
6821 emitCallInvokeFunction(apply);
6824 masm.bind(&end);
6826 // If the return value of the constructing function is Primitive,
6827 // replace the return value with the Object from CreateThis.
6828 if (constructing) {
6829 Label notPrimitive;
6830 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6831 &notPrimitive);
6832 masm.loadValue(Address(masm.getStackPointer(), 0), JSReturnOperand);
6834 #ifdef DEBUG
6835 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6836 &notPrimitive);
6837 masm.assumeUnreachable("CreateThis creates an object");
6838 #endif
6840 masm.bind(&notPrimitive);
6843 // Pop arguments and continue.
6844 emitRestoreStackPointerFromFP();
6847 void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) {
6848 LSnapshot* snapshot = apply->snapshot();
6849 Register argcreg = ToRegister(apply->getArgc());
6851 // Ensure that we have a reasonable number of arguments.
6852 bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6854 emitApplyGeneric(apply);
6857 void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
6858 Register argsObj = ToRegister(apply->getArgsObj());
6859 Register temp = ToRegister(apply->getTempObject());
6861 Label bail;
6862 masm.loadArgumentsObjectLength(argsObj, temp, &bail);
6863 masm.branch32(Assembler::Above, temp, Imm32(JIT_ARGS_LENGTH_MAX), &bail);
6864 bailoutFrom(&bail, apply->snapshot());
6866 emitApplyGeneric(apply);
6869 void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
6870 LSnapshot* snapshot = apply->snapshot();
6871 Register tmp = ToRegister(apply->getTempObject());
6873 Address length(ToRegister(apply->getElements()),
6874 ObjectElements::offsetOfLength());
6875 masm.load32(length, tmp);
6877 // Ensure that we have a reasonable number of arguments.
6878 bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6880 // Ensure that the array does not contain an uninitialized tail.
6882 Address initializedLength(ToRegister(apply->getElements()),
6883 ObjectElements::offsetOfInitializedLength());
6884 masm.sub32(initializedLength, tmp);
6885 bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
6887 emitApplyGeneric(apply);
6890 void CodeGenerator::visitConstructArgsGeneric(LConstructArgsGeneric* lir) {
6891 LSnapshot* snapshot = lir->snapshot();
6892 Register argcreg = ToRegister(lir->getArgc());
6894 // Ensure that we have a reasonable number of arguments.
6895 bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6897 emitApplyGeneric(lir);
6900 void CodeGenerator::visitConstructArrayGeneric(LConstructArrayGeneric* lir) {
6901 LSnapshot* snapshot = lir->snapshot();
6902 Register tmp = ToRegister(lir->getTempObject());
6904 Address length(ToRegister(lir->getElements()),
6905 ObjectElements::offsetOfLength());
6906 masm.load32(length, tmp);
6908 // Ensure that we have a reasonable number of arguments.
6909 bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6911 // Ensure that the array does not contain an uninitialized tail.
6913 Address initializedLength(ToRegister(lir->getElements()),
6914 ObjectElements::offsetOfInitializedLength());
6915 masm.sub32(initializedLength, tmp);
6916 bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
6918 emitApplyGeneric(lir);
6921 void CodeGenerator::visitBail(LBail* lir) { bailout(lir->snapshot()); }
6923 void CodeGenerator::visitUnreachable(LUnreachable* lir) {
6924 masm.assumeUnreachable("end-of-block assumed unreachable");
6927 void CodeGenerator::visitEncodeSnapshot(LEncodeSnapshot* lir) {
6928 encode(lir->snapshot());
6931 void CodeGenerator::visitUnreachableResultV(LUnreachableResultV* lir) {
6932 masm.assumeUnreachable("must be unreachable");
6935 void CodeGenerator::visitUnreachableResultT(LUnreachableResultT* lir) {
6936 masm.assumeUnreachable("must be unreachable");
6939 // Out-of-line path to report over-recursed error and fail.
6940 class CheckOverRecursedFailure : public OutOfLineCodeBase<CodeGenerator> {
6941 LInstruction* lir_;
6943 public:
6944 explicit CheckOverRecursedFailure(LInstruction* lir) : lir_(lir) {}
6946 void accept(CodeGenerator* codegen) override {
6947 codegen->visitCheckOverRecursedFailure(this);
6950 LInstruction* lir() const { return lir_; }
6953 void CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed* lir) {
6954 // If we don't push anything on the stack, skip the check.
6955 if (omitOverRecursedCheck()) {
6956 return;
6959 // Ensure that this frame will not cross the stack limit.
6960 // This is a weak check, justified by Ion using the C stack: we must always
6961 // be some distance away from the actual limit, since if the limit is
6962 // crossed, an error must be thrown, which requires more frames.
6964 // It must always be possible to trespass past the stack limit.
6965 // Ion may legally place frames very close to the limit. Calling additional
6966 // C functions may then violate the limit without any checking.
6968 // Since Ion frames exist on the C stack, the stack limit may be
6969 // dynamically set by JS_SetThreadStackLimit() and JS_SetNativeStackQuota().
6971 CheckOverRecursedFailure* ool = new (alloc()) CheckOverRecursedFailure(lir);
6972 addOutOfLineCode(ool, lir->mir());
6974 // Conditional forward (unlikely) branch to failure.
6975 const void* limitAddr = gen->runtime->addressOfJitStackLimit();
6976 masm.branchStackPtrRhs(Assembler::AboveOrEqual, AbsoluteAddress(limitAddr),
6977 ool->entry());
6978 masm.bind(ool->rejoin());
6981 void CodeGenerator::visitCheckOverRecursedFailure(
6982 CheckOverRecursedFailure* ool) {
6983 // The OOL path is hit if the recursion depth has been exceeded.
6984 // Throw an InternalError for over-recursion.
6986 // LFunctionEnvironment can appear before LCheckOverRecursed, so we have
6987 // to save all live registers to avoid crashes if CheckOverRecursed triggers
6988 // a GC.
6989 saveLive(ool->lir());
6991 using Fn = bool (*)(JSContext*);
6992 callVM<Fn, CheckOverRecursed>(ool->lir());
6994 restoreLive(ool->lir());
6995 masm.jump(ool->rejoin());
6998 IonScriptCounts* CodeGenerator::maybeCreateScriptCounts() {
6999 // If scripts are being profiled, create a new IonScriptCounts for the
7000 // profiling data, which will be attached to the associated JSScript or
7001 // wasm module after code generation finishes.
7002 if (!gen->hasProfilingScripts()) {
7003 return nullptr;
7006 // This test inhibits IonScriptCount creation for wasm code which is
7007 // currently incompatible with wasm codegen for two reasons: (1) wasm code
7008 // must be serializable and script count codegen bakes in absolute
7009 // addresses, (2) wasm code does not have a JSScript with which to associate
7010 // code coverage data.
7011 JSScript* script = gen->outerInfo().script();
7012 if (!script) {
7013 return nullptr;
7016 auto counts = MakeUnique<IonScriptCounts>();
7017 if (!counts || !counts->init(graph.numBlocks())) {
7018 return nullptr;
7021 for (size_t i = 0; i < graph.numBlocks(); i++) {
7022 MBasicBlock* block = graph.getBlock(i)->mir();
7024 uint32_t offset = 0;
7025 char* description = nullptr;
7026 if (MResumePoint* resume = block->entryResumePoint()) {
7027 // Find a PC offset in the outermost script to use. If this
7028 // block is from an inlined script, find a location in the
7029 // outer script to associate information about the inlining
7030 // with.
7031 while (resume->caller()) {
7032 resume = resume->caller();
7034 offset = script->pcToOffset(resume->pc());
7036 if (block->entryResumePoint()->caller()) {
7037 // Get the filename and line number of the inner script.
7038 JSScript* innerScript = block->info().script();
7039 description = js_pod_calloc<char>(200);
7040 if (description) {
7041 snprintf(description, 200, "%s:%u", innerScript->filename(),
7042 innerScript->lineno());
7047 if (!counts->block(i).init(block->id(), offset, description,
7048 block->numSuccessors())) {
7049 return nullptr;
7052 for (size_t j = 0; j < block->numSuccessors(); j++) {
7053 counts->block(i).setSuccessor(
7054 j, skipTrivialBlocks(block->getSuccessor(j))->id());
7058 scriptCounts_ = counts.release();
7059 return scriptCounts_;
7062 // Structure for managing the state tracked for a block by script counters.
7063 struct ScriptCountBlockState {
7064 IonBlockCounts& block;
7065 MacroAssembler& masm;
7067 Sprinter printer;
7069 public:
7070 ScriptCountBlockState(IonBlockCounts* block, MacroAssembler* masm)
7071 : block(*block), masm(*masm), printer(GetJitContext()->cx, false) {}
7073 bool init() {
7074 if (!printer.init()) {
7075 return false;
7078 // Bump the hit count for the block at the start. This code is not
7079 // included in either the text for the block or the instruction byte
7080 // counts.
7081 masm.inc64(AbsoluteAddress(block.addressOfHitCount()));
7083 // Collect human readable assembly for the code generated in the block.
7084 masm.setPrinter(&printer);
7086 return true;
7089 void visitInstruction(LInstruction* ins) {
7090 #ifdef JS_JITSPEW
7091 // Prefix stream of assembly instructions with their LIR instruction
7092 // name and any associated high level info.
7093 if (const char* extra = ins->getExtraName()) {
7094 printer.printf("[%s:%s]\n", ins->opName(), extra);
7095 } else {
7096 printer.printf("[%s]\n", ins->opName());
7098 #endif
7101 ~ScriptCountBlockState() {
7102 masm.setPrinter(nullptr);
7104 if (JS::UniqueChars str = printer.release()) {
7105 block.setCode(str.get());
7110 void CodeGenerator::branchIfInvalidated(Register temp, Label* invalidated) {
7111 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), temp);
7112 masm.propagateOOM(ionScriptLabels_.append(label));
7114 // If IonScript::invalidationCount_ != 0, the script has been invalidated.
7115 masm.branch32(Assembler::NotEqual,
7116 Address(temp, IonScript::offsetOfInvalidationCount()), Imm32(0),
7117 invalidated);
7120 #ifdef DEBUG
7121 void CodeGenerator::emitAssertGCThingResult(Register input,
7122 const MDefinition* mir) {
7123 MIRType type = mir->type();
7124 MOZ_ASSERT(type == MIRType::Object || type == MIRType::String ||
7125 type == MIRType::Symbol || type == MIRType::BigInt);
7127 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7128 regs.take(input);
7130 Register temp = regs.takeAny();
7131 masm.push(temp);
7133 // Don't check if the script has been invalidated. In that case invalid
7134 // types are expected (until we reach the OsiPoint and bailout).
7135 Label done;
7136 branchIfInvalidated(temp, &done);
7138 # ifndef JS_SIMULATOR
7139 // Check that we have a valid GC pointer.
7140 // Disable for wasm because we don't have a context on wasm compilation
7141 // threads and this needs a context.
7142 // Also disable for simulator builds because the C++ call is a lot slower
7143 // there than on actual hardware.
7144 if (JitOptions.fullDebugChecks && !IsCompilingWasm()) {
7145 saveVolatile();
7146 masm.setupUnalignedABICall(temp);
7147 masm.loadJSContext(temp);
7148 masm.passABIArg(temp);
7149 masm.passABIArg(input);
7151 switch (type) {
7152 case MIRType::Object: {
7153 using Fn = void (*)(JSContext* cx, JSObject* obj);
7154 masm.callWithABI<Fn, AssertValidObjectPtr>();
7155 break;
7157 case MIRType::String: {
7158 using Fn = void (*)(JSContext* cx, JSString* str);
7159 masm.callWithABI<Fn, AssertValidStringPtr>();
7160 break;
7162 case MIRType::Symbol: {
7163 using Fn = void (*)(JSContext* cx, JS::Symbol* sym);
7164 masm.callWithABI<Fn, AssertValidSymbolPtr>();
7165 break;
7167 case MIRType::BigInt: {
7168 using Fn = void (*)(JSContext* cx, JS::BigInt* bi);
7169 masm.callWithABI<Fn, AssertValidBigIntPtr>();
7170 break;
7172 default:
7173 MOZ_CRASH();
7176 restoreVolatile();
7178 # endif
7180 masm.bind(&done);
7181 masm.pop(temp);
7184 void CodeGenerator::emitAssertResultV(const ValueOperand input,
7185 const MDefinition* mir) {
7186 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7187 regs.take(input);
7189 Register temp1 = regs.takeAny();
7190 Register temp2 = regs.takeAny();
7191 masm.push(temp1);
7192 masm.push(temp2);
7194 // Don't check if the script has been invalidated. In that case invalid
7195 // types are expected (until we reach the OsiPoint and bailout).
7196 Label done;
7197 branchIfInvalidated(temp1, &done);
7199 // Check that we have a valid GC pointer.
7200 if (JitOptions.fullDebugChecks) {
7201 saveVolatile();
7203 masm.pushValue(input);
7204 masm.moveStackPtrTo(temp1);
7206 using Fn = void (*)(JSContext* cx, Value* v);
7207 masm.setupUnalignedABICall(temp2);
7208 masm.loadJSContext(temp2);
7209 masm.passABIArg(temp2);
7210 masm.passABIArg(temp1);
7211 masm.callWithABI<Fn, AssertValidValue>();
7212 masm.popValue(input);
7213 restoreVolatile();
7216 masm.bind(&done);
7217 masm.pop(temp2);
7218 masm.pop(temp1);
7221 void CodeGenerator::emitGCThingResultChecks(LInstruction* lir,
7222 MDefinition* mir) {
7223 if (lir->numDefs() == 0) {
7224 return;
7227 MOZ_ASSERT(lir->numDefs() == 1);
7228 if (lir->getDef(0)->isBogusTemp()) {
7229 return;
7232 Register output = ToRegister(lir->getDef(0));
7233 emitAssertGCThingResult(output, mir);
7236 void CodeGenerator::emitValueResultChecks(LInstruction* lir, MDefinition* mir) {
7237 if (lir->numDefs() == 0) {
7238 return;
7241 MOZ_ASSERT(lir->numDefs() == BOX_PIECES);
7242 if (!lir->getDef(0)->output()->isRegister()) {
7243 return;
7246 ValueOperand output = ToOutValue(lir);
7248 emitAssertResultV(output, mir);
7251 void CodeGenerator::emitDebugResultChecks(LInstruction* ins) {
7252 // In debug builds, check that LIR instructions return valid values.
7254 MDefinition* mir = ins->mirRaw();
7255 if (!mir) {
7256 return;
7259 switch (mir->type()) {
7260 case MIRType::Object:
7261 case MIRType::String:
7262 case MIRType::Symbol:
7263 case MIRType::BigInt:
7264 emitGCThingResultChecks(ins, mir);
7265 break;
7266 case MIRType::Value:
7267 emitValueResultChecks(ins, mir);
7268 break;
7269 default:
7270 break;
7274 void CodeGenerator::emitDebugForceBailing(LInstruction* lir) {
7275 if (MOZ_LIKELY(!gen->options.ionBailAfterEnabled())) {
7276 return;
7278 if (!lir->snapshot()) {
7279 return;
7281 if (lir->isOsiPoint()) {
7282 return;
7285 masm.comment("emitDebugForceBailing");
7286 const void* bailAfterCounterAddr =
7287 gen->runtime->addressOfIonBailAfterCounter();
7289 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7291 Label done, notBail;
7292 masm.branch32(Assembler::Equal, AbsoluteAddress(bailAfterCounterAddr),
7293 Imm32(0), &done);
7295 Register temp = regs.takeAny();
7297 masm.push(temp);
7298 masm.load32(AbsoluteAddress(bailAfterCounterAddr), temp);
7299 masm.sub32(Imm32(1), temp);
7300 masm.store32(temp, AbsoluteAddress(bailAfterCounterAddr));
7302 masm.branch32(Assembler::NotEqual, temp, Imm32(0), &notBail);
7304 masm.pop(temp);
7305 bailout(lir->snapshot());
7307 masm.bind(&notBail);
7308 masm.pop(temp);
7310 masm.bind(&done);
7312 #endif
7314 bool CodeGenerator::generateBody() {
7315 JitSpewCont(JitSpew_Codegen, "\n");
7316 AutoCreatedBy acb(masm, "CodeGenerator::generateBody");
7318 JitSpew(JitSpew_Codegen, "==== BEGIN CodeGenerator::generateBody ====");
7319 IonScriptCounts* counts = maybeCreateScriptCounts();
7321 const bool compilingWasm = gen->compilingWasm();
7323 for (size_t i = 0; i < graph.numBlocks(); i++) {
7324 current = graph.getBlock(i);
7326 // Don't emit any code for trivial blocks, containing just a goto. Such
7327 // blocks are created to split critical edges, and if we didn't end up
7328 // putting any instructions in them, we can skip them.
7329 if (current->isTrivial()) {
7330 continue;
7333 #ifdef JS_JITSPEW
7334 const char* filename = nullptr;
7335 size_t lineNumber = 0;
7336 JS::LimitedColumnNumberOneOrigin columnNumber;
7337 if (current->mir()->info().script()) {
7338 filename = current->mir()->info().script()->filename();
7339 if (current->mir()->pc()) {
7340 lineNumber = PCToLineNumber(current->mir()->info().script(),
7341 current->mir()->pc(), &columnNumber);
7344 JitSpew(JitSpew_Codegen, "--------------------------------");
7345 JitSpew(JitSpew_Codegen, "# block%zu %s:%zu:%u%s:", i,
7346 filename ? filename : "?", lineNumber,
7347 columnNumber.oneOriginValue(),
7348 current->mir()->isLoopHeader() ? " (loop header)" : "");
7349 #endif
7351 if (current->mir()->isLoopHeader() && compilingWasm) {
7352 masm.nopAlign(CodeAlignment);
7355 masm.bind(current->label());
7357 mozilla::Maybe<ScriptCountBlockState> blockCounts;
7358 if (counts) {
7359 blockCounts.emplace(&counts->block(i), &masm);
7360 if (!blockCounts->init()) {
7361 return false;
7365 for (LInstructionIterator iter = current->begin(); iter != current->end();
7366 iter++) {
7367 if (!alloc().ensureBallast()) {
7368 return false;
7371 perfSpewer_.recordInstruction(masm, *iter);
7372 #ifdef JS_JITSPEW
7373 JitSpewStart(JitSpew_Codegen, " # LIR=%s",
7374 iter->opName());
7375 if (const char* extra = iter->getExtraName()) {
7376 JitSpewCont(JitSpew_Codegen, ":%s", extra);
7378 JitSpewFin(JitSpew_Codegen);
7379 #endif
7381 if (counts) {
7382 blockCounts->visitInstruction(*iter);
7385 #ifdef CHECK_OSIPOINT_REGISTERS
7386 if (iter->safepoint() && !compilingWasm) {
7387 resetOsiPointRegs(iter->safepoint());
7389 #endif
7391 if (!compilingWasm) {
7392 if (MDefinition* mir = iter->mirRaw()) {
7393 if (!addNativeToBytecodeEntry(mir->trackedSite())) {
7394 return false;
7399 setElement(*iter); // needed to encode correct snapshot location.
7401 #ifdef DEBUG
7402 emitDebugForceBailing(*iter);
7403 #endif
7405 switch (iter->op()) {
7406 #ifndef JS_CODEGEN_NONE
7407 # define LIROP(op) \
7408 case LNode::Opcode::op: \
7409 visit##op(iter->to##op()); \
7410 break;
7411 LIR_OPCODE_LIST(LIROP)
7412 # undef LIROP
7413 #endif
7414 case LNode::Opcode::Invalid:
7415 default:
7416 MOZ_CRASH("Invalid LIR op");
7419 #ifdef DEBUG
7420 if (!counts) {
7421 emitDebugResultChecks(*iter);
7423 #endif
7425 if (masm.oom()) {
7426 return false;
7430 JitSpew(JitSpew_Codegen, "==== END CodeGenerator::generateBody ====\n");
7431 return true;
7434 // Out-of-line object allocation for LNewArray.
7435 class OutOfLineNewArray : public OutOfLineCodeBase<CodeGenerator> {
7436 LNewArray* lir_;
7438 public:
7439 explicit OutOfLineNewArray(LNewArray* lir) : lir_(lir) {}
7441 void accept(CodeGenerator* codegen) override {
7442 codegen->visitOutOfLineNewArray(this);
7445 LNewArray* lir() const { return lir_; }
7448 void CodeGenerator::visitNewArrayCallVM(LNewArray* lir) {
7449 Register objReg = ToRegister(lir->output());
7451 MOZ_ASSERT(!lir->isCall());
7452 saveLive(lir);
7454 JSObject* templateObject = lir->mir()->templateObject();
7456 if (templateObject) {
7457 pushArg(ImmGCPtr(templateObject->shape()));
7458 pushArg(Imm32(lir->mir()->length()));
7460 using Fn = ArrayObject* (*)(JSContext*, uint32_t, Handle<Shape*>);
7461 callVM<Fn, NewArrayWithShape>(lir);
7462 } else {
7463 pushArg(Imm32(GenericObject));
7464 pushArg(Imm32(lir->mir()->length()));
7466 using Fn = ArrayObject* (*)(JSContext*, uint32_t, NewObjectKind);
7467 callVM<Fn, NewArrayOperation>(lir);
7470 masm.storeCallPointerResult(objReg);
7472 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
7473 restoreLive(lir);
7476 void CodeGenerator::visitAtan2D(LAtan2D* lir) {
7477 FloatRegister y = ToFloatRegister(lir->y());
7478 FloatRegister x = ToFloatRegister(lir->x());
7480 using Fn = double (*)(double x, double y);
7481 masm.setupAlignedABICall();
7482 masm.passABIArg(y, ABIType::Float64);
7483 masm.passABIArg(x, ABIType::Float64);
7484 masm.callWithABI<Fn, ecmaAtan2>(ABIType::Float64);
7486 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7489 void CodeGenerator::visitHypot(LHypot* lir) {
7490 uint32_t numArgs = lir->numArgs();
7491 masm.setupAlignedABICall();
7493 for (uint32_t i = 0; i < numArgs; ++i) {
7494 masm.passABIArg(ToFloatRegister(lir->getOperand(i)), ABIType::Float64);
7497 switch (numArgs) {
7498 case 2: {
7499 using Fn = double (*)(double x, double y);
7500 masm.callWithABI<Fn, ecmaHypot>(ABIType::Float64);
7501 break;
7503 case 3: {
7504 using Fn = double (*)(double x, double y, double z);
7505 masm.callWithABI<Fn, hypot3>(ABIType::Float64);
7506 break;
7508 case 4: {
7509 using Fn = double (*)(double x, double y, double z, double w);
7510 masm.callWithABI<Fn, hypot4>(ABIType::Float64);
7511 break;
7513 default:
7514 MOZ_CRASH("Unexpected number of arguments to hypot function.");
7516 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7519 void CodeGenerator::visitNewArray(LNewArray* lir) {
7520 Register objReg = ToRegister(lir->output());
7521 Register tempReg = ToRegister(lir->temp());
7522 DebugOnly<uint32_t> length = lir->mir()->length();
7524 MOZ_ASSERT(length <= NativeObject::MAX_DENSE_ELEMENTS_COUNT);
7526 if (lir->mir()->isVMCall()) {
7527 visitNewArrayCallVM(lir);
7528 return;
7531 OutOfLineNewArray* ool = new (alloc()) OutOfLineNewArray(lir);
7532 addOutOfLineCode(ool, lir->mir());
7534 TemplateObject templateObject(lir->mir()->templateObject());
7535 #ifdef DEBUG
7536 size_t numInlineElements = gc::GetGCKindSlots(templateObject.getAllocKind()) -
7537 ObjectElements::VALUES_PER_HEADER;
7538 MOZ_ASSERT(length <= numInlineElements,
7539 "Inline allocation only supports inline elements");
7540 #endif
7541 masm.createGCObject(objReg, tempReg, templateObject,
7542 lir->mir()->initialHeap(), ool->entry());
7544 masm.bind(ool->rejoin());
7547 void CodeGenerator::visitOutOfLineNewArray(OutOfLineNewArray* ool) {
7548 visitNewArrayCallVM(ool->lir());
7549 masm.jump(ool->rejoin());
7552 void CodeGenerator::visitNewArrayDynamicLength(LNewArrayDynamicLength* lir) {
7553 Register lengthReg = ToRegister(lir->length());
7554 Register objReg = ToRegister(lir->output());
7555 Register tempReg = ToRegister(lir->temp0());
7557 JSObject* templateObject = lir->mir()->templateObject();
7558 gc::Heap initialHeap = lir->mir()->initialHeap();
7560 using Fn = ArrayObject* (*)(JSContext*, Handle<ArrayObject*>, int32_t length);
7561 OutOfLineCode* ool = oolCallVM<Fn, ArrayConstructorOneArg>(
7562 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
7563 StoreRegisterTo(objReg));
7565 bool canInline = true;
7566 size_t inlineLength = 0;
7567 if (templateObject->as<ArrayObject>().hasFixedElements()) {
7568 size_t numSlots =
7569 gc::GetGCKindSlots(templateObject->asTenured().getAllocKind());
7570 inlineLength = numSlots - ObjectElements::VALUES_PER_HEADER;
7571 } else {
7572 canInline = false;
7575 if (canInline) {
7576 // Try to do the allocation inline if the template object is big enough
7577 // for the length in lengthReg. If the length is bigger we could still
7578 // use the template object and not allocate the elements, but it's more
7579 // efficient to do a single big allocation than (repeatedly) reallocating
7580 // the array later on when filling it.
7581 masm.branch32(Assembler::Above, lengthReg, Imm32(inlineLength),
7582 ool->entry());
7584 TemplateObject templateObj(templateObject);
7585 masm.createGCObject(objReg, tempReg, templateObj, initialHeap,
7586 ool->entry());
7588 size_t lengthOffset = NativeObject::offsetOfFixedElements() +
7589 ObjectElements::offsetOfLength();
7590 masm.store32(lengthReg, Address(objReg, lengthOffset));
7591 } else {
7592 masm.jump(ool->entry());
7595 masm.bind(ool->rejoin());
7598 void CodeGenerator::visitNewIterator(LNewIterator* lir) {
7599 Register objReg = ToRegister(lir->output());
7600 Register tempReg = ToRegister(lir->temp0());
7602 OutOfLineCode* ool;
7603 switch (lir->mir()->type()) {
7604 case MNewIterator::ArrayIterator: {
7605 using Fn = ArrayIteratorObject* (*)(JSContext*);
7606 ool = oolCallVM<Fn, NewArrayIterator>(lir, ArgList(),
7607 StoreRegisterTo(objReg));
7608 break;
7610 case MNewIterator::StringIterator: {
7611 using Fn = StringIteratorObject* (*)(JSContext*);
7612 ool = oolCallVM<Fn, NewStringIterator>(lir, ArgList(),
7613 StoreRegisterTo(objReg));
7614 break;
7616 case MNewIterator::RegExpStringIterator: {
7617 using Fn = RegExpStringIteratorObject* (*)(JSContext*);
7618 ool = oolCallVM<Fn, NewRegExpStringIterator>(lir, ArgList(),
7619 StoreRegisterTo(objReg));
7620 break;
7622 default:
7623 MOZ_CRASH("unexpected iterator type");
7626 TemplateObject templateObject(lir->mir()->templateObject());
7627 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
7628 ool->entry());
7630 masm.bind(ool->rejoin());
7633 void CodeGenerator::visitNewTypedArray(LNewTypedArray* lir) {
7634 Register objReg = ToRegister(lir->output());
7635 Register tempReg = ToRegister(lir->temp0());
7636 Register lengthReg = ToRegister(lir->temp1());
7637 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
7639 JSObject* templateObject = lir->mir()->templateObject();
7640 gc::Heap initialHeap = lir->mir()->initialHeap();
7642 auto* ttemplate = &templateObject->as<FixedLengthTypedArrayObject>();
7644 size_t n = ttemplate->length();
7645 MOZ_ASSERT(n <= INT32_MAX,
7646 "Template objects are only created for int32 lengths");
7648 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
7649 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
7650 lir, ArgList(ImmGCPtr(templateObject), Imm32(n)),
7651 StoreRegisterTo(objReg));
7653 TemplateObject templateObj(templateObject);
7654 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
7656 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
7657 ttemplate, MacroAssembler::TypedArrayLength::Fixed);
7659 masm.bind(ool->rejoin());
7662 void CodeGenerator::visitNewTypedArrayDynamicLength(
7663 LNewTypedArrayDynamicLength* lir) {
7664 Register lengthReg = ToRegister(lir->length());
7665 Register objReg = ToRegister(lir->output());
7666 Register tempReg = ToRegister(lir->temp0());
7667 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
7669 JSObject* templateObject = lir->mir()->templateObject();
7670 gc::Heap initialHeap = lir->mir()->initialHeap();
7672 auto* ttemplate = &templateObject->as<FixedLengthTypedArrayObject>();
7674 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
7675 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
7676 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
7677 StoreRegisterTo(objReg));
7679 // Volatile |lengthReg| is saved across the ABI call in |initTypedArraySlots|.
7680 MOZ_ASSERT_IF(lengthReg.volatile_(), liveRegs.has(lengthReg));
7682 TemplateObject templateObj(templateObject);
7683 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
7685 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
7686 ttemplate,
7687 MacroAssembler::TypedArrayLength::Dynamic);
7689 masm.bind(ool->rejoin());
7692 void CodeGenerator::visitNewTypedArrayFromArray(LNewTypedArrayFromArray* lir) {
7693 pushArg(ToRegister(lir->array()));
7694 pushArg(ImmGCPtr(lir->mir()->templateObject()));
7696 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
7697 callVM<Fn, js::NewTypedArrayWithTemplateAndArray>(lir);
7700 void CodeGenerator::visitNewTypedArrayFromArrayBuffer(
7701 LNewTypedArrayFromArrayBuffer* lir) {
7702 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::LengthIndex));
7703 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::ByteOffsetIndex));
7704 pushArg(ToRegister(lir->arrayBuffer()));
7705 pushArg(ImmGCPtr(lir->mir()->templateObject()));
7707 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
7708 HandleValue, HandleValue);
7709 callVM<Fn, js::NewTypedArrayWithTemplateAndBuffer>(lir);
7712 void CodeGenerator::visitBindFunction(LBindFunction* lir) {
7713 Register target = ToRegister(lir->target());
7714 Register temp1 = ToRegister(lir->temp0());
7715 Register temp2 = ToRegister(lir->temp1());
7717 // Try to allocate a new BoundFunctionObject we can pass to the VM function.
7718 // If this fails, we set temp1 to nullptr so we do the allocation in C++.
7719 TemplateObject templateObject(lir->mir()->templateObject());
7720 Label allocOk, allocFailed;
7721 masm.createGCObject(temp1, temp2, templateObject, gc::Heap::Default,
7722 &allocFailed);
7723 masm.jump(&allocOk);
7725 masm.bind(&allocFailed);
7726 masm.movePtr(ImmWord(0), temp1);
7728 masm.bind(&allocOk);
7730 // Set temp2 to the address of the first argument on the stack.
7731 // Note that the Value slots used for arguments are currently aligned for a
7732 // JIT call, even though that's not strictly necessary for calling into C++.
7733 uint32_t argc = lir->mir()->numStackArgs();
7734 if (JitStackValueAlignment > 1) {
7735 argc = AlignBytes(argc, JitStackValueAlignment);
7737 uint32_t unusedStack = UnusedStackBytesForCall(argc);
7738 masm.computeEffectiveAddress(Address(masm.getStackPointer(), unusedStack),
7739 temp2);
7741 pushArg(temp1);
7742 pushArg(Imm32(lir->mir()->numStackArgs()));
7743 pushArg(temp2);
7744 pushArg(target);
7746 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<JSObject*>, Value*,
7747 uint32_t, Handle<BoundFunctionObject*>);
7748 callVM<Fn, js::BoundFunctionObject::functionBindImpl>(lir);
7751 void CodeGenerator::visitNewBoundFunction(LNewBoundFunction* lir) {
7752 Register output = ToRegister(lir->output());
7753 Register temp = ToRegister(lir->temp0());
7755 JSObject* templateObj = lir->mir()->templateObj();
7757 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<BoundFunctionObject*>);
7758 OutOfLineCode* ool = oolCallVM<Fn, BoundFunctionObject::createWithTemplate>(
7759 lir, ArgList(ImmGCPtr(templateObj)), StoreRegisterTo(output));
7761 TemplateObject templateObject(templateObj);
7762 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
7763 ool->entry());
7765 masm.bind(ool->rejoin());
7768 // Out-of-line object allocation for JSOp::NewObject.
7769 class OutOfLineNewObject : public OutOfLineCodeBase<CodeGenerator> {
7770 LNewObject* lir_;
7772 public:
7773 explicit OutOfLineNewObject(LNewObject* lir) : lir_(lir) {}
7775 void accept(CodeGenerator* codegen) override {
7776 codegen->visitOutOfLineNewObject(this);
7779 LNewObject* lir() const { return lir_; }
7782 void CodeGenerator::visitNewObjectVMCall(LNewObject* lir) {
7783 Register objReg = ToRegister(lir->output());
7785 MOZ_ASSERT(!lir->isCall());
7786 saveLive(lir);
7788 JSObject* templateObject = lir->mir()->templateObject();
7790 // If we're making a new object with a class prototype (that is, an object
7791 // that derives its class from its prototype instead of being
7792 // PlainObject::class_'d) from self-hosted code, we need a different init
7793 // function.
7794 switch (lir->mir()->mode()) {
7795 case MNewObject::ObjectLiteral: {
7796 MOZ_ASSERT(!templateObject);
7797 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
7798 pushArg(ImmGCPtr(lir->mir()->block()->info().script()));
7800 using Fn = JSObject* (*)(JSContext*, HandleScript, const jsbytecode* pc);
7801 callVM<Fn, NewObjectOperation>(lir);
7802 break;
7804 case MNewObject::ObjectCreate: {
7805 pushArg(ImmGCPtr(templateObject));
7807 using Fn = PlainObject* (*)(JSContext*, Handle<PlainObject*>);
7808 callVM<Fn, ObjectCreateWithTemplate>(lir);
7809 break;
7813 masm.storeCallPointerResult(objReg);
7815 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
7816 restoreLive(lir);
7819 static bool ShouldInitFixedSlots(LNewPlainObject* lir, const Shape* shape,
7820 uint32_t nfixed) {
7821 // Look for StoreFixedSlot instructions following an object allocation
7822 // that write to this object before a GC is triggered or this object is
7823 // passed to a VM call. If all fixed slots will be initialized, the
7824 // allocation code doesn't need to set the slots to |undefined|.
7826 if (nfixed == 0) {
7827 return false;
7830 // Keep track of the fixed slots that are initialized. initializedSlots is
7831 // a bit mask with a bit for each slot.
7832 MOZ_ASSERT(nfixed <= NativeObject::MAX_FIXED_SLOTS);
7833 static_assert(NativeObject::MAX_FIXED_SLOTS <= 32,
7834 "Slot bits must fit in 32 bits");
7835 uint32_t initializedSlots = 0;
7836 uint32_t numInitialized = 0;
7838 MInstruction* allocMir = lir->mir();
7839 MBasicBlock* block = allocMir->block();
7841 // Skip the allocation instruction.
7842 MInstructionIterator iter = block->begin(allocMir);
7843 MOZ_ASSERT(*iter == allocMir);
7844 iter++;
7846 // Handle the leading shape guard, if present.
7847 for (; iter != block->end(); iter++) {
7848 if (iter->isConstant()) {
7849 // This instruction won't trigger a GC or read object slots.
7850 continue;
7852 if (iter->isGuardShape()) {
7853 auto* guard = iter->toGuardShape();
7854 if (guard->object() != allocMir || guard->shape() != shape) {
7855 return true;
7857 allocMir = guard;
7858 iter++;
7860 break;
7863 for (; iter != block->end(); iter++) {
7864 if (iter->isConstant() || iter->isPostWriteBarrier()) {
7865 // These instructions won't trigger a GC or read object slots.
7866 continue;
7869 if (iter->isStoreFixedSlot()) {
7870 MStoreFixedSlot* store = iter->toStoreFixedSlot();
7871 if (store->object() != allocMir) {
7872 return true;
7875 // We may not initialize this object slot on allocation, so the
7876 // pre-barrier could read uninitialized memory. Simply disable
7877 // the barrier for this store: the object was just initialized
7878 // so the barrier is not necessary.
7879 store->setNeedsBarrier(false);
7881 uint32_t slot = store->slot();
7882 MOZ_ASSERT(slot < nfixed);
7883 if ((initializedSlots & (1 << slot)) == 0) {
7884 numInitialized++;
7885 initializedSlots |= (1 << slot);
7887 if (numInitialized == nfixed) {
7888 // All fixed slots will be initialized.
7889 MOZ_ASSERT(mozilla::CountPopulation32(initializedSlots) == nfixed);
7890 return false;
7893 continue;
7896 // Unhandled instruction, assume it bails or reads object slots.
7897 return true;
7900 MOZ_CRASH("Shouldn't get here");
7903 void CodeGenerator::visitNewObject(LNewObject* lir) {
7904 Register objReg = ToRegister(lir->output());
7905 Register tempReg = ToRegister(lir->temp());
7907 if (lir->mir()->isVMCall()) {
7908 visitNewObjectVMCall(lir);
7909 return;
7912 OutOfLineNewObject* ool = new (alloc()) OutOfLineNewObject(lir);
7913 addOutOfLineCode(ool, lir->mir());
7915 TemplateObject templateObject(lir->mir()->templateObject());
7917 masm.createGCObject(objReg, tempReg, templateObject,
7918 lir->mir()->initialHeap(), ool->entry());
7920 masm.bind(ool->rejoin());
7923 void CodeGenerator::visitOutOfLineNewObject(OutOfLineNewObject* ool) {
7924 visitNewObjectVMCall(ool->lir());
7925 masm.jump(ool->rejoin());
7928 void CodeGenerator::visitNewPlainObject(LNewPlainObject* lir) {
7929 Register objReg = ToRegister(lir->output());
7930 Register temp0Reg = ToRegister(lir->temp0());
7931 Register temp1Reg = ToRegister(lir->temp1());
7932 Register shapeReg = ToRegister(lir->temp2());
7934 auto* mir = lir->mir();
7935 const Shape* shape = mir->shape();
7936 gc::Heap initialHeap = mir->initialHeap();
7937 gc::AllocKind allocKind = mir->allocKind();
7939 using Fn =
7940 JSObject* (*)(JSContext*, Handle<SharedShape*>, gc::AllocKind, gc::Heap);
7941 OutOfLineCode* ool = oolCallVM<Fn, NewPlainObjectOptimizedFallback>(
7942 lir,
7943 ArgList(ImmGCPtr(shape), Imm32(int32_t(allocKind)),
7944 Imm32(int32_t(initialHeap))),
7945 StoreRegisterTo(objReg));
7947 bool initContents = ShouldInitFixedSlots(lir, shape, mir->numFixedSlots());
7949 masm.movePtr(ImmGCPtr(shape), shapeReg);
7950 masm.createPlainGCObject(
7951 objReg, shapeReg, temp0Reg, temp1Reg, mir->numFixedSlots(),
7952 mir->numDynamicSlots(), allocKind, initialHeap, ool->entry(),
7953 AllocSiteInput(gc::CatchAllAllocSite::Optimized), initContents);
7955 #ifdef DEBUG
7956 // ShouldInitFixedSlots expects that the leading GuardShape will never fail,
7957 // so ensure the newly created object has the correct shape. Should the guard
7958 // ever fail, we may end up with uninitialized fixed slots, which can confuse
7959 // the GC.
7960 Label ok;
7961 masm.branchTestObjShape(Assembler::Equal, objReg, shape, temp0Reg, objReg,
7962 &ok);
7963 masm.assumeUnreachable("Newly created object has the correct shape");
7964 masm.bind(&ok);
7965 #endif
7967 masm.bind(ool->rejoin());
7970 void CodeGenerator::visitNewArrayObject(LNewArrayObject* lir) {
7971 Register objReg = ToRegister(lir->output());
7972 Register temp0Reg = ToRegister(lir->temp0());
7973 Register shapeReg = ToRegister(lir->temp1());
7975 auto* mir = lir->mir();
7976 uint32_t arrayLength = mir->length();
7978 gc::AllocKind allocKind = GuessArrayGCKind(arrayLength);
7979 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
7980 allocKind = ForegroundToBackgroundAllocKind(allocKind);
7982 uint32_t slotCount = GetGCKindSlots(allocKind);
7983 MOZ_ASSERT(slotCount >= ObjectElements::VALUES_PER_HEADER);
7984 uint32_t arrayCapacity = slotCount - ObjectElements::VALUES_PER_HEADER;
7986 const Shape* shape = mir->shape();
7988 NewObjectKind objectKind =
7989 mir->initialHeap() == gc::Heap::Tenured ? TenuredObject : GenericObject;
7991 using Fn =
7992 ArrayObject* (*)(JSContext*, uint32_t, gc::AllocKind, NewObjectKind);
7993 OutOfLineCode* ool = oolCallVM<Fn, NewArrayObjectOptimizedFallback>(
7994 lir,
7995 ArgList(Imm32(arrayLength), Imm32(int32_t(allocKind)), Imm32(objectKind)),
7996 StoreRegisterTo(objReg));
7998 masm.movePtr(ImmPtr(shape), shapeReg);
7999 masm.createArrayWithFixedElements(
8000 objReg, shapeReg, temp0Reg, InvalidReg, arrayLength, arrayCapacity, 0, 0,
8001 allocKind, mir->initialHeap(), ool->entry(),
8002 AllocSiteInput(gc::CatchAllAllocSite::Optimized));
8003 masm.bind(ool->rejoin());
8006 void CodeGenerator::visitNewNamedLambdaObject(LNewNamedLambdaObject* lir) {
8007 Register objReg = ToRegister(lir->output());
8008 Register tempReg = ToRegister(lir->temp0());
8009 const CompileInfo& info = lir->mir()->block()->info();
8011 using Fn = js::NamedLambdaObject* (*)(JSContext*, HandleFunction);
8012 OutOfLineCode* ool = oolCallVM<Fn, NamedLambdaObject::createWithoutEnclosing>(
8013 lir, ArgList(info.funMaybeLazy()), StoreRegisterTo(objReg));
8015 TemplateObject templateObject(lir->mir()->templateObj());
8017 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
8018 ool->entry());
8020 masm.bind(ool->rejoin());
8023 void CodeGenerator::visitNewCallObject(LNewCallObject* lir) {
8024 Register objReg = ToRegister(lir->output());
8025 Register tempReg = ToRegister(lir->temp0());
8027 CallObject* templateObj = lir->mir()->templateObject();
8029 using Fn = CallObject* (*)(JSContext*, Handle<SharedShape*>);
8030 OutOfLineCode* ool = oolCallVM<Fn, CallObject::createWithShape>(
8031 lir, ArgList(ImmGCPtr(templateObj->sharedShape())),
8032 StoreRegisterTo(objReg));
8034 // Inline call object creation, using the OOL path only for tricky cases.
8035 TemplateObject templateObject(templateObj);
8036 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
8037 ool->entry());
8039 masm.bind(ool->rejoin());
8042 void CodeGenerator::visitNewStringObject(LNewStringObject* lir) {
8043 Register input = ToRegister(lir->input());
8044 Register output = ToRegister(lir->output());
8045 Register temp = ToRegister(lir->temp0());
8047 StringObject* templateObj = lir->mir()->templateObj();
8049 using Fn = JSObject* (*)(JSContext*, HandleString);
8050 OutOfLineCode* ool = oolCallVM<Fn, NewStringObject>(lir, ArgList(input),
8051 StoreRegisterTo(output));
8053 TemplateObject templateObject(templateObj);
8054 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
8055 ool->entry());
8057 masm.loadStringLength(input, temp);
8059 masm.storeValue(JSVAL_TYPE_STRING, input,
8060 Address(output, StringObject::offsetOfPrimitiveValue()));
8061 masm.storeValue(JSVAL_TYPE_INT32, temp,
8062 Address(output, StringObject::offsetOfLength()));
8064 masm.bind(ool->rejoin());
8067 void CodeGenerator::visitInitElemGetterSetter(LInitElemGetterSetter* lir) {
8068 Register obj = ToRegister(lir->object());
8069 Register value = ToRegister(lir->value());
8071 pushArg(value);
8072 pushArg(ToValue(lir, LInitElemGetterSetter::IdIndex));
8073 pushArg(obj);
8074 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
8076 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject, HandleValue,
8077 HandleObject);
8078 callVM<Fn, InitElemGetterSetterOperation>(lir);
8081 void CodeGenerator::visitMutateProto(LMutateProto* lir) {
8082 Register objReg = ToRegister(lir->object());
8084 pushArg(ToValue(lir, LMutateProto::ValueIndex));
8085 pushArg(objReg);
8087 using Fn =
8088 bool (*)(JSContext* cx, Handle<PlainObject*> obj, HandleValue value);
8089 callVM<Fn, MutatePrototype>(lir);
8092 void CodeGenerator::visitInitPropGetterSetter(LInitPropGetterSetter* lir) {
8093 Register obj = ToRegister(lir->object());
8094 Register value = ToRegister(lir->value());
8096 pushArg(value);
8097 pushArg(ImmGCPtr(lir->mir()->name()));
8098 pushArg(obj);
8099 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
8101 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject,
8102 Handle<PropertyName*>, HandleObject);
8103 callVM<Fn, InitPropGetterSetterOperation>(lir);
8106 void CodeGenerator::visitCreateThis(LCreateThis* lir) {
8107 const LAllocation* callee = lir->callee();
8108 const LAllocation* newTarget = lir->newTarget();
8110 if (newTarget->isConstant()) {
8111 pushArg(ImmGCPtr(&newTarget->toConstant()->toObject()));
8112 } else {
8113 pushArg(ToRegister(newTarget));
8116 if (callee->isConstant()) {
8117 pushArg(ImmGCPtr(&callee->toConstant()->toObject()));
8118 } else {
8119 pushArg(ToRegister(callee));
8122 using Fn = bool (*)(JSContext* cx, HandleObject callee,
8123 HandleObject newTarget, MutableHandleValue rval);
8124 callVM<Fn, jit::CreateThisFromIon>(lir);
8127 void CodeGenerator::visitCreateArgumentsObject(LCreateArgumentsObject* lir) {
8128 // This should be getting constructed in the first block only, and not any OSR
8129 // entry blocks.
8130 MOZ_ASSERT(lir->mir()->block()->id() == 0);
8132 Register callObj = ToRegister(lir->callObject());
8133 Register temp0 = ToRegister(lir->temp0());
8134 Label done;
8136 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8137 Register objTemp = ToRegister(lir->temp1());
8138 Register cxTemp = ToRegister(lir->temp2());
8140 masm.Push(callObj);
8142 // Try to allocate an arguments object. This will leave the reserved
8143 // slots uninitialized, so it's important we don't GC until we
8144 // initialize these slots in ArgumentsObject::finishForIonPure.
8145 Label failure;
8146 TemplateObject templateObject(templateObj);
8147 masm.createGCObject(objTemp, temp0, templateObject, gc::Heap::Default,
8148 &failure,
8149 /* initContents = */ false);
8151 masm.moveStackPtrTo(temp0);
8152 masm.addPtr(Imm32(masm.framePushed()), temp0);
8154 using Fn = ArgumentsObject* (*)(JSContext* cx, jit::JitFrameLayout* frame,
8155 JSObject* scopeChain, ArgumentsObject* obj);
8156 masm.setupAlignedABICall();
8157 masm.loadJSContext(cxTemp);
8158 masm.passABIArg(cxTemp);
8159 masm.passABIArg(temp0);
8160 masm.passABIArg(callObj);
8161 masm.passABIArg(objTemp);
8163 masm.callWithABI<Fn, ArgumentsObject::finishForIonPure>();
8164 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8166 // Discard saved callObj on the stack.
8167 masm.addToStackPtr(Imm32(sizeof(uintptr_t)));
8168 masm.jump(&done);
8170 masm.bind(&failure);
8171 masm.Pop(callObj);
8174 masm.moveStackPtrTo(temp0);
8175 masm.addPtr(Imm32(frameSize()), temp0);
8177 pushArg(callObj);
8178 pushArg(temp0);
8180 using Fn = ArgumentsObject* (*)(JSContext*, JitFrameLayout*, HandleObject);
8181 callVM<Fn, ArgumentsObject::createForIon>(lir);
8183 masm.bind(&done);
8186 void CodeGenerator::visitCreateInlinedArgumentsObject(
8187 LCreateInlinedArgumentsObject* lir) {
8188 Register callObj = ToRegister(lir->getCallObject());
8189 Register callee = ToRegister(lir->getCallee());
8190 Register argsAddress = ToRegister(lir->temp1());
8191 Register argsObj = ToRegister(lir->temp2());
8193 // TODO: Do we have to worry about alignment here?
8195 // Create a contiguous array of values for ArgumentsObject::create
8196 // by pushing the arguments onto the stack in reverse order.
8197 uint32_t argc = lir->mir()->numActuals();
8198 for (uint32_t i = 0; i < argc; i++) {
8199 uint32_t argNum = argc - i - 1;
8200 uint32_t index = LCreateInlinedArgumentsObject::ArgIndex(argNum);
8201 ConstantOrRegister arg =
8202 toConstantOrRegister(lir, index, lir->mir()->getArg(argNum)->type());
8203 masm.Push(arg);
8205 masm.moveStackPtrTo(argsAddress);
8207 Label done;
8208 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8209 LiveRegisterSet liveRegs;
8210 liveRegs.add(callObj);
8211 liveRegs.add(callee);
8213 masm.PushRegsInMask(liveRegs);
8215 // We are free to clobber all registers, as LCreateInlinedArgumentsObject is
8216 // a call instruction.
8217 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
8218 allRegs.take(callObj);
8219 allRegs.take(callee);
8220 allRegs.take(argsObj);
8221 allRegs.take(argsAddress);
8223 Register temp3 = allRegs.takeAny();
8224 Register temp4 = allRegs.takeAny();
8226 // Try to allocate an arguments object. This will leave the reserved slots
8227 // uninitialized, so it's important we don't GC until we initialize these
8228 // slots in ArgumentsObject::finishForIonPure.
8229 Label failure;
8230 TemplateObject templateObject(templateObj);
8231 masm.createGCObject(argsObj, temp3, templateObject, gc::Heap::Default,
8232 &failure,
8233 /* initContents = */ false);
8235 Register numActuals = temp3;
8236 masm.move32(Imm32(argc), numActuals);
8238 using Fn = ArgumentsObject* (*)(JSContext*, JSObject*, JSFunction*, Value*,
8239 uint32_t, ArgumentsObject*);
8240 masm.setupAlignedABICall();
8241 masm.loadJSContext(temp4);
8242 masm.passABIArg(temp4);
8243 masm.passABIArg(callObj);
8244 masm.passABIArg(callee);
8245 masm.passABIArg(argsAddress);
8246 masm.passABIArg(numActuals);
8247 masm.passABIArg(argsObj);
8249 masm.callWithABI<Fn, ArgumentsObject::finishInlineForIonPure>();
8250 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8252 // Discard saved callObj, callee, and values array on the stack.
8253 masm.addToStackPtr(
8254 Imm32(MacroAssembler::PushRegsInMaskSizeInBytes(liveRegs) +
8255 argc * sizeof(Value)));
8256 masm.jump(&done);
8258 masm.bind(&failure);
8259 masm.PopRegsInMask(liveRegs);
8261 // Reload argsAddress because it may have been overridden.
8262 masm.moveStackPtrTo(argsAddress);
8265 pushArg(Imm32(argc));
8266 pushArg(callObj);
8267 pushArg(callee);
8268 pushArg(argsAddress);
8270 using Fn = ArgumentsObject* (*)(JSContext*, Value*, HandleFunction,
8271 HandleObject, uint32_t);
8272 callVM<Fn, ArgumentsObject::createForInlinedIon>(lir);
8274 // Discard the array of values.
8275 masm.freeStack(argc * sizeof(Value));
8277 masm.bind(&done);
8280 template <class GetInlinedArgument>
8281 void CodeGenerator::emitGetInlinedArgument(GetInlinedArgument* lir,
8282 Register index,
8283 ValueOperand output) {
8284 uint32_t numActuals = lir->mir()->numActuals();
8285 MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
8287 // The index has already been bounds-checked, so the code we
8288 // generate here should be unreachable. We can end up in this
8289 // situation in self-hosted code using GetArgument(), or in a
8290 // monomorphically inlined function if we've inlined some CacheIR
8291 // that was created for a different caller.
8292 if (numActuals == 0) {
8293 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8294 return;
8297 // Check the first n-1 possible indices.
8298 Label done;
8299 for (uint32_t i = 0; i < numActuals - 1; i++) {
8300 Label skip;
8301 ConstantOrRegister arg = toConstantOrRegister(
8302 lir, GetInlinedArgument::ArgIndex(i), lir->mir()->getArg(i)->type());
8303 masm.branch32(Assembler::NotEqual, index, Imm32(i), &skip);
8304 masm.moveValue(arg, output);
8306 masm.jump(&done);
8307 masm.bind(&skip);
8310 #ifdef DEBUG
8311 Label skip;
8312 masm.branch32(Assembler::Equal, index, Imm32(numActuals - 1), &skip);
8313 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8314 masm.bind(&skip);
8315 #endif
8317 // The index has already been bounds-checked, so load the last argument.
8318 uint32_t lastIdx = numActuals - 1;
8319 ConstantOrRegister arg =
8320 toConstantOrRegister(lir, GetInlinedArgument::ArgIndex(lastIdx),
8321 lir->mir()->getArg(lastIdx)->type());
8322 masm.moveValue(arg, output);
8323 masm.bind(&done);
8326 void CodeGenerator::visitGetInlinedArgument(LGetInlinedArgument* lir) {
8327 Register index = ToRegister(lir->getIndex());
8328 ValueOperand output = ToOutValue(lir);
8330 emitGetInlinedArgument(lir, index, output);
8333 void CodeGenerator::visitGetInlinedArgumentHole(LGetInlinedArgumentHole* lir) {
8334 Register index = ToRegister(lir->getIndex());
8335 ValueOperand output = ToOutValue(lir);
8337 uint32_t numActuals = lir->mir()->numActuals();
8339 if (numActuals == 0) {
8340 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8341 masm.moveValue(UndefinedValue(), output);
8342 return;
8345 Label outOfBounds, done;
8346 masm.branch32(Assembler::AboveOrEqual, index, Imm32(numActuals),
8347 &outOfBounds);
8349 emitGetInlinedArgument(lir, index, output);
8350 masm.jump(&done);
8352 masm.bind(&outOfBounds);
8353 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8354 masm.moveValue(UndefinedValue(), output);
8356 masm.bind(&done);
8359 void CodeGenerator::visitGetArgumentsObjectArg(LGetArgumentsObjectArg* lir) {
8360 Register temp = ToRegister(lir->temp0());
8361 Register argsObj = ToRegister(lir->argsObject());
8362 ValueOperand out = ToOutValue(lir);
8364 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8365 temp);
8366 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8367 lir->mir()->argno() * sizeof(Value));
8368 masm.loadValue(argAddr, out);
8369 #ifdef DEBUG
8370 Label success;
8371 masm.branchTestMagic(Assembler::NotEqual, out, &success);
8372 masm.assumeUnreachable(
8373 "Result from ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8374 masm.bind(&success);
8375 #endif
8378 void CodeGenerator::visitSetArgumentsObjectArg(LSetArgumentsObjectArg* lir) {
8379 Register temp = ToRegister(lir->getTemp(0));
8380 Register argsObj = ToRegister(lir->argsObject());
8381 ValueOperand value = ToValue(lir, LSetArgumentsObjectArg::ValueIndex);
8383 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8384 temp);
8385 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8386 lir->mir()->argno() * sizeof(Value));
8387 emitPreBarrier(argAddr);
8388 #ifdef DEBUG
8389 Label success;
8390 masm.branchTestMagic(Assembler::NotEqual, argAddr, &success);
8391 masm.assumeUnreachable(
8392 "Result in ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8393 masm.bind(&success);
8394 #endif
8395 masm.storeValue(value, argAddr);
8398 void CodeGenerator::visitLoadArgumentsObjectArg(LLoadArgumentsObjectArg* lir) {
8399 Register temp = ToRegister(lir->temp0());
8400 Register argsObj = ToRegister(lir->argsObject());
8401 Register index = ToRegister(lir->index());
8402 ValueOperand out = ToOutValue(lir);
8404 Label bail;
8405 masm.loadArgumentsObjectElement(argsObj, index, out, temp, &bail);
8406 bailoutFrom(&bail, lir->snapshot());
8409 void CodeGenerator::visitLoadArgumentsObjectArgHole(
8410 LLoadArgumentsObjectArgHole* lir) {
8411 Register temp = ToRegister(lir->temp0());
8412 Register argsObj = ToRegister(lir->argsObject());
8413 Register index = ToRegister(lir->index());
8414 ValueOperand out = ToOutValue(lir);
8416 Label bail;
8417 masm.loadArgumentsObjectElementHole(argsObj, index, out, temp, &bail);
8418 bailoutFrom(&bail, lir->snapshot());
8421 void CodeGenerator::visitInArgumentsObjectArg(LInArgumentsObjectArg* lir) {
8422 Register temp = ToRegister(lir->temp0());
8423 Register argsObj = ToRegister(lir->argsObject());
8424 Register index = ToRegister(lir->index());
8425 Register out = ToRegister(lir->output());
8427 Label bail;
8428 masm.loadArgumentsObjectElementExists(argsObj, index, out, temp, &bail);
8429 bailoutFrom(&bail, lir->snapshot());
8432 void CodeGenerator::visitArgumentsObjectLength(LArgumentsObjectLength* lir) {
8433 Register argsObj = ToRegister(lir->argsObject());
8434 Register out = ToRegister(lir->output());
8436 Label bail;
8437 masm.loadArgumentsObjectLength(argsObj, out, &bail);
8438 bailoutFrom(&bail, lir->snapshot());
8441 void CodeGenerator::visitArrayFromArgumentsObject(
8442 LArrayFromArgumentsObject* lir) {
8443 pushArg(ToRegister(lir->argsObject()));
8445 using Fn = ArrayObject* (*)(JSContext*, Handle<ArgumentsObject*>);
8446 callVM<Fn, js::ArrayFromArgumentsObject>(lir);
8449 void CodeGenerator::visitGuardArgumentsObjectFlags(
8450 LGuardArgumentsObjectFlags* lir) {
8451 Register argsObj = ToRegister(lir->argsObject());
8452 Register temp = ToRegister(lir->temp0());
8454 Label bail;
8455 masm.branchTestArgumentsObjectFlags(argsObj, temp, lir->mir()->flags(),
8456 Assembler::NonZero, &bail);
8457 bailoutFrom(&bail, lir->snapshot());
8460 void CodeGenerator::visitBoundFunctionNumArgs(LBoundFunctionNumArgs* lir) {
8461 Register obj = ToRegister(lir->object());
8462 Register output = ToRegister(lir->output());
8464 masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
8465 output);
8466 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
8469 void CodeGenerator::visitGuardBoundFunctionIsConstructor(
8470 LGuardBoundFunctionIsConstructor* lir) {
8471 Register obj = ToRegister(lir->object());
8473 Label bail;
8474 Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
8475 masm.branchTest32(Assembler::Zero, flagsSlot,
8476 Imm32(BoundFunctionObject::IsConstructorFlag), &bail);
8477 bailoutFrom(&bail, lir->snapshot());
8480 void CodeGenerator::visitReturnFromCtor(LReturnFromCtor* lir) {
8481 ValueOperand value = ToValue(lir, LReturnFromCtor::ValueIndex);
8482 Register obj = ToRegister(lir->object());
8483 Register output = ToRegister(lir->output());
8485 Label valueIsObject, end;
8487 masm.branchTestObject(Assembler::Equal, value, &valueIsObject);
8489 // Value is not an object. Return that other object.
8490 masm.movePtr(obj, output);
8491 masm.jump(&end);
8493 // Value is an object. Return unbox(Value).
8494 masm.bind(&valueIsObject);
8495 Register payload = masm.extractObject(value, output);
8496 if (payload != output) {
8497 masm.movePtr(payload, output);
8500 masm.bind(&end);
8503 class OutOfLineBoxNonStrictThis : public OutOfLineCodeBase<CodeGenerator> {
8504 LBoxNonStrictThis* ins_;
8506 public:
8507 explicit OutOfLineBoxNonStrictThis(LBoxNonStrictThis* ins) : ins_(ins) {}
8508 void accept(CodeGenerator* codegen) override {
8509 codegen->visitOutOfLineBoxNonStrictThis(this);
8511 LBoxNonStrictThis* ins() const { return ins_; }
8514 void CodeGenerator::visitBoxNonStrictThis(LBoxNonStrictThis* lir) {
8515 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8516 Register output = ToRegister(lir->output());
8518 auto* ool = new (alloc()) OutOfLineBoxNonStrictThis(lir);
8519 addOutOfLineCode(ool, lir->mir());
8521 masm.fallibleUnboxObject(value, output, ool->entry());
8522 masm.bind(ool->rejoin());
8525 void CodeGenerator::visitOutOfLineBoxNonStrictThis(
8526 OutOfLineBoxNonStrictThis* ool) {
8527 LBoxNonStrictThis* lir = ool->ins();
8529 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8530 Register output = ToRegister(lir->output());
8532 Label notNullOrUndefined;
8534 Label isNullOrUndefined;
8535 ScratchTagScope tag(masm, value);
8536 masm.splitTagForTest(value, tag);
8537 masm.branchTestUndefined(Assembler::Equal, tag, &isNullOrUndefined);
8538 masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
8539 masm.bind(&isNullOrUndefined);
8540 masm.movePtr(ImmGCPtr(lir->mir()->globalThis()), output);
8541 masm.jump(ool->rejoin());
8544 masm.bind(&notNullOrUndefined);
8546 saveLive(lir);
8548 pushArg(value);
8549 using Fn = JSObject* (*)(JSContext*, HandleValue);
8550 callVM<Fn, BoxNonStrictThis>(lir);
8552 StoreRegisterTo(output).generate(this);
8553 restoreLiveIgnore(lir, StoreRegisterTo(output).clobbered());
8555 masm.jump(ool->rejoin());
8558 void CodeGenerator::visitImplicitThis(LImplicitThis* lir) {
8559 pushArg(ImmGCPtr(lir->mir()->name()));
8560 pushArg(ToRegister(lir->env()));
8562 using Fn = bool (*)(JSContext*, HandleObject, Handle<PropertyName*>,
8563 MutableHandleValue);
8564 callVM<Fn, ImplicitThisOperation>(lir);
8567 void CodeGenerator::visitArrayLength(LArrayLength* lir) {
8568 Register elements = ToRegister(lir->elements());
8569 Register output = ToRegister(lir->output());
8571 Address length(elements, ObjectElements::offsetOfLength());
8572 masm.load32(length, output);
8574 // Bail out if the length doesn't fit in int32.
8575 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
8578 static void SetLengthFromIndex(MacroAssembler& masm, const LAllocation* index,
8579 const Address& length) {
8580 if (index->isConstant()) {
8581 masm.store32(Imm32(ToInt32(index) + 1), length);
8582 } else {
8583 Register newLength = ToRegister(index);
8584 masm.add32(Imm32(1), newLength);
8585 masm.store32(newLength, length);
8586 masm.sub32(Imm32(1), newLength);
8590 void CodeGenerator::visitSetArrayLength(LSetArrayLength* lir) {
8591 Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength());
8592 SetLengthFromIndex(masm, lir->index(), length);
8595 void CodeGenerator::visitFunctionLength(LFunctionLength* lir) {
8596 Register function = ToRegister(lir->function());
8597 Register output = ToRegister(lir->output());
8599 Label bail;
8601 // Get the JSFunction flags.
8602 masm.load32(Address(function, JSFunction::offsetOfFlagsAndArgCount()),
8603 output);
8605 // Functions with a SelfHostedLazyScript must be compiled with the slow-path
8606 // before the function length is known. If the length was previously resolved,
8607 // the length property may be shadowed.
8608 masm.branchTest32(
8609 Assembler::NonZero, output,
8610 Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
8611 &bail);
8613 masm.loadFunctionLength(function, output, output, &bail);
8615 bailoutFrom(&bail, lir->snapshot());
8618 void CodeGenerator::visitFunctionName(LFunctionName* lir) {
8619 Register function = ToRegister(lir->function());
8620 Register output = ToRegister(lir->output());
8622 Label bail;
8624 const JSAtomState& names = gen->runtime->names();
8625 masm.loadFunctionName(function, output, ImmGCPtr(names.empty_), &bail);
8627 bailoutFrom(&bail, lir->snapshot());
8630 template <class OrderedHashTable>
8631 static void RangeFront(MacroAssembler&, Register, Register, Register);
8633 template <>
8634 void RangeFront<ValueMap>(MacroAssembler& masm, Register range, Register i,
8635 Register front) {
8636 masm.loadPtr(Address(range, ValueMap::Range::offsetOfHashTable()), front);
8637 masm.loadPtr(Address(front, ValueMap::offsetOfImplData()), front);
8639 MOZ_ASSERT(ValueMap::offsetOfImplDataElement() == 0,
8640 "offsetof(Data, element) is 0");
8641 static_assert(ValueMap::sizeofImplData() == 24, "sizeof(Data) is 24");
8642 masm.mulBy3(i, i);
8643 masm.lshiftPtr(Imm32(3), i);
8644 masm.addPtr(i, front);
8647 template <>
8648 void RangeFront<ValueSet>(MacroAssembler& masm, Register range, Register i,
8649 Register front) {
8650 masm.loadPtr(Address(range, ValueSet::Range::offsetOfHashTable()), front);
8651 masm.loadPtr(Address(front, ValueSet::offsetOfImplData()), front);
8653 MOZ_ASSERT(ValueSet::offsetOfImplDataElement() == 0,
8654 "offsetof(Data, element) is 0");
8655 static_assert(ValueSet::sizeofImplData() == 16, "sizeof(Data) is 16");
8656 masm.lshiftPtr(Imm32(4), i);
8657 masm.addPtr(i, front);
8660 template <class OrderedHashTable>
8661 static void RangePopFront(MacroAssembler& masm, Register range, Register front,
8662 Register dataLength, Register temp) {
8663 Register i = temp;
8665 masm.add32(Imm32(1),
8666 Address(range, OrderedHashTable::Range::offsetOfCount()));
8668 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), i);
8670 Label done, seek;
8671 masm.bind(&seek);
8672 masm.add32(Imm32(1), i);
8673 masm.branch32(Assembler::AboveOrEqual, i, dataLength, &done);
8675 // We can add sizeof(Data) to |front| to select the next element, because
8676 // |front| and |range.ht.data[i]| point to the same location.
8677 MOZ_ASSERT(OrderedHashTable::offsetOfImplDataElement() == 0,
8678 "offsetof(Data, element) is 0");
8679 masm.addPtr(Imm32(OrderedHashTable::sizeofImplData()), front);
8681 masm.branchTestMagic(Assembler::Equal,
8682 Address(front, OrderedHashTable::offsetOfEntryKey()),
8683 JS_HASH_KEY_EMPTY, &seek);
8685 masm.bind(&done);
8686 masm.store32(i, Address(range, OrderedHashTable::Range::offsetOfI()));
8689 template <class OrderedHashTable>
8690 static inline void RangeDestruct(MacroAssembler& masm, Register iter,
8691 Register range, Register temp0,
8692 Register temp1) {
8693 Register next = temp0;
8694 Register prevp = temp1;
8696 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfNext()), next);
8697 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfPrevP()), prevp);
8698 masm.storePtr(next, Address(prevp, 0));
8700 Label hasNoNext;
8701 masm.branchTestPtr(Assembler::Zero, next, next, &hasNoNext);
8703 masm.storePtr(prevp, Address(next, OrderedHashTable::Range::offsetOfPrevP()));
8705 masm.bind(&hasNoNext);
8707 Label nurseryAllocated;
8708 masm.branchPtrInNurseryChunk(Assembler::Equal, iter, temp0,
8709 &nurseryAllocated);
8711 masm.callFreeStub(range);
8713 masm.bind(&nurseryAllocated);
8716 template <>
8717 void CodeGenerator::emitLoadIteratorValues<ValueMap>(Register result,
8718 Register temp,
8719 Register front) {
8720 size_t elementsOffset = NativeObject::offsetOfFixedElements();
8722 Address keyAddress(front, ValueMap::Entry::offsetOfKey());
8723 Address valueAddress(front, ValueMap::Entry::offsetOfValue());
8724 Address keyElemAddress(result, elementsOffset);
8725 Address valueElemAddress(result, elementsOffset + sizeof(Value));
8726 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
8727 masm.guardedCallPreBarrier(valueElemAddress, MIRType::Value);
8728 masm.storeValue(keyAddress, keyElemAddress, temp);
8729 masm.storeValue(valueAddress, valueElemAddress, temp);
8731 Label emitBarrier, skipBarrier;
8732 masm.branchValueIsNurseryCell(Assembler::Equal, keyAddress, temp,
8733 &emitBarrier);
8734 masm.branchValueIsNurseryCell(Assembler::NotEqual, valueAddress, temp,
8735 &skipBarrier);
8737 masm.bind(&emitBarrier);
8738 saveVolatile(temp);
8739 emitPostWriteBarrier(result);
8740 restoreVolatile(temp);
8742 masm.bind(&skipBarrier);
8745 template <>
8746 void CodeGenerator::emitLoadIteratorValues<ValueSet>(Register result,
8747 Register temp,
8748 Register front) {
8749 size_t elementsOffset = NativeObject::offsetOfFixedElements();
8751 Address keyAddress(front, ValueSet::offsetOfEntryKey());
8752 Address keyElemAddress(result, elementsOffset);
8753 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
8754 masm.storeValue(keyAddress, keyElemAddress, temp);
8756 Label skipBarrier;
8757 masm.branchValueIsNurseryCell(Assembler::NotEqual, keyAddress, temp,
8758 &skipBarrier);
8760 saveVolatile(temp);
8761 emitPostWriteBarrier(result);
8762 restoreVolatile(temp);
8764 masm.bind(&skipBarrier);
8767 template <class IteratorObject, class OrderedHashTable>
8768 void CodeGenerator::emitGetNextEntryForIterator(LGetNextEntryForIterator* lir) {
8769 Register iter = ToRegister(lir->iter());
8770 Register result = ToRegister(lir->result());
8771 Register temp = ToRegister(lir->temp0());
8772 Register dataLength = ToRegister(lir->temp1());
8773 Register range = ToRegister(lir->temp2());
8774 Register output = ToRegister(lir->output());
8776 #ifdef DEBUG
8777 // Self-hosted code is responsible for ensuring GetNextEntryForIterator is
8778 // only called with the correct iterator class. Assert here all self-
8779 // hosted callers of GetNextEntryForIterator perform this class check.
8780 // No Spectre mitigations are needed because this is DEBUG-only code.
8781 Label success;
8782 masm.branchTestObjClassNoSpectreMitigations(
8783 Assembler::Equal, iter, &IteratorObject::class_, temp, &success);
8784 masm.assumeUnreachable("Iterator object should have the correct class.");
8785 masm.bind(&success);
8786 #endif
8788 masm.loadPrivate(Address(iter, NativeObject::getFixedSlotOffset(
8789 IteratorObject::RangeSlot)),
8790 range);
8792 Label iterAlreadyDone, iterDone, done;
8793 masm.branchTestPtr(Assembler::Zero, range, range, &iterAlreadyDone);
8795 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), temp);
8796 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfHashTable()),
8797 dataLength);
8798 masm.load32(Address(dataLength, OrderedHashTable::offsetOfImplDataLength()),
8799 dataLength);
8800 masm.branch32(Assembler::AboveOrEqual, temp, dataLength, &iterDone);
8802 masm.Push(iter);
8804 Register front = iter;
8805 RangeFront<OrderedHashTable>(masm, range, temp, front);
8807 emitLoadIteratorValues<OrderedHashTable>(result, temp, front);
8809 RangePopFront<OrderedHashTable>(masm, range, front, dataLength, temp);
8811 masm.Pop(iter);
8812 masm.move32(Imm32(0), output);
8814 masm.jump(&done);
8816 masm.bind(&iterDone);
8818 RangeDestruct<OrderedHashTable>(masm, iter, range, temp, dataLength);
8820 masm.storeValue(PrivateValue(nullptr),
8821 Address(iter, NativeObject::getFixedSlotOffset(
8822 IteratorObject::RangeSlot)));
8824 masm.bind(&iterAlreadyDone);
8826 masm.move32(Imm32(1), output);
8828 masm.bind(&done);
8831 void CodeGenerator::visitGetNextEntryForIterator(
8832 LGetNextEntryForIterator* lir) {
8833 if (lir->mir()->mode() == MGetNextEntryForIterator::Map) {
8834 emitGetNextEntryForIterator<MapIteratorObject, ValueMap>(lir);
8835 } else {
8836 MOZ_ASSERT(lir->mir()->mode() == MGetNextEntryForIterator::Set);
8837 emitGetNextEntryForIterator<SetIteratorObject, ValueSet>(lir);
8841 // The point of these is to inform Ion of where these values already are; they
8842 // don't normally generate (much) code.
8843 void CodeGenerator::visitWasmRegisterPairResult(LWasmRegisterPairResult* lir) {}
8844 void CodeGenerator::visitWasmStackResult(LWasmStackResult* lir) {}
8845 void CodeGenerator::visitWasmStackResult64(LWasmStackResult64* lir) {}
8847 void CodeGenerator::visitWasmStackResultArea(LWasmStackResultArea* lir) {
8848 LAllocation* output = lir->getDef(0)->output();
8849 MOZ_ASSERT(output->isStackArea());
8850 bool tempInit = false;
8851 for (auto iter = output->toStackArea()->results(); iter; iter.next()) {
8852 // Zero out ref stack results.
8853 if (iter.isWasmAnyRef()) {
8854 Register temp = ToRegister(lir->temp0());
8855 if (!tempInit) {
8856 masm.xorPtr(temp, temp);
8857 tempInit = true;
8859 masm.storePtr(temp, ToAddress(iter.alloc()));
8864 void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {
8865 #ifdef JS_64BIT
8866 if (MWasmRegisterResult* mir = lir->mir()) {
8867 if (mir->type() == MIRType::Int32) {
8868 masm.widenInt32(ToRegister(lir->output()));
8871 #endif
8874 void CodeGenerator::visitWasmCall(LWasmCall* lir) {
8875 const MWasmCallBase* callBase = lir->callBase();
8876 bool isReturnCall = lir->isReturnCall();
8878 // If this call is in Wasm try code block, initialise a wasm::TryNote for this
8879 // call.
8880 bool inTry = callBase->inTry();
8881 if (inTry) {
8882 size_t tryNoteIndex = callBase->tryNoteIndex();
8883 wasm::TryNoteVector& tryNotes = masm.tryNotes();
8884 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
8885 tryNote.setTryBodyBegin(masm.currentOffset());
8888 MOZ_ASSERT((sizeof(wasm::Frame) + masm.framePushed()) % WasmStackAlignment ==
8890 static_assert(
8891 WasmStackAlignment >= ABIStackAlignment &&
8892 WasmStackAlignment % ABIStackAlignment == 0,
8893 "The wasm stack alignment should subsume the ABI-required alignment");
8895 #ifdef DEBUG
8896 Label ok;
8897 masm.branchTestStackPtr(Assembler::Zero, Imm32(WasmStackAlignment - 1), &ok);
8898 masm.breakpoint();
8899 masm.bind(&ok);
8900 #endif
8902 // LWasmCallBase::isCallPreserved() assumes that all MWasmCalls preserve the
8903 // instance and pinned regs. The only case where where we don't have to
8904 // reload the instance and pinned regs is when the callee preserves them.
8905 bool reloadRegs = true;
8906 bool switchRealm = true;
8908 const wasm::CallSiteDesc& desc = callBase->desc();
8909 const wasm::CalleeDesc& callee = callBase->callee();
8910 CodeOffset retOffset;
8911 CodeOffset secondRetOffset;
8912 switch (callee.which()) {
8913 case wasm::CalleeDesc::Func:
8914 #ifdef ENABLE_WASM_TAIL_CALLS
8915 if (isReturnCall) {
8916 ReturnCallAdjustmentInfo retCallInfo(
8917 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8918 masm.wasmReturnCall(desc, callee.funcIndex(), retCallInfo);
8919 // The rest of the method is unnecessary for a return call.
8920 return;
8922 #endif
8923 MOZ_ASSERT(!isReturnCall);
8924 retOffset = masm.call(desc, callee.funcIndex());
8925 reloadRegs = false;
8926 switchRealm = false;
8927 break;
8928 case wasm::CalleeDesc::Import:
8929 #ifdef ENABLE_WASM_TAIL_CALLS
8930 if (isReturnCall) {
8931 ReturnCallAdjustmentInfo retCallInfo(
8932 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8933 masm.wasmReturnCallImport(desc, callee, retCallInfo);
8934 // The rest of the method is unnecessary for a return call.
8935 return;
8937 #endif
8938 MOZ_ASSERT(!isReturnCall);
8939 retOffset = masm.wasmCallImport(desc, callee);
8940 break;
8941 case wasm::CalleeDesc::AsmJSTable:
8942 retOffset = masm.asmCallIndirect(desc, callee);
8943 break;
8944 case wasm::CalleeDesc::WasmTable: {
8945 Label* boundsCheckFailed = nullptr;
8946 if (lir->needsBoundsCheck()) {
8947 OutOfLineAbortingWasmTrap* ool =
8948 new (alloc()) OutOfLineAbortingWasmTrap(
8949 wasm::BytecodeOffset(desc.lineOrBytecode()),
8950 wasm::Trap::OutOfBounds);
8951 if (lir->isCatchable()) {
8952 addOutOfLineCode(ool, lir->mirCatchable());
8953 } else if (isReturnCall) {
8954 #ifdef ENABLE_WASM_TAIL_CALLS
8955 addOutOfLineCode(ool, lir->mirReturnCall());
8956 #else
8957 MOZ_CRASH("Return calls are disabled.");
8958 #endif
8959 } else {
8960 addOutOfLineCode(ool, lir->mirUncatchable());
8962 boundsCheckFailed = ool->entry();
8964 Label* nullCheckFailed = nullptr;
8965 #ifndef WASM_HAS_HEAPREG
8967 OutOfLineAbortingWasmTrap* ool =
8968 new (alloc()) OutOfLineAbortingWasmTrap(
8969 wasm::BytecodeOffset(desc.lineOrBytecode()),
8970 wasm::Trap::IndirectCallToNull);
8971 if (lir->isCatchable()) {
8972 addOutOfLineCode(ool, lir->mirCatchable());
8973 } else if (isReturnCall) {
8974 # ifdef ENABLE_WASM_TAIL_CALLS
8975 addOutOfLineCode(ool, lir->mirReturnCall());
8976 # else
8977 MOZ_CRASH("Return calls are disabled.");
8978 # endif
8979 } else {
8980 addOutOfLineCode(ool, lir->mirUncatchable());
8982 nullCheckFailed = ool->entry();
8984 #endif
8985 #ifdef ENABLE_WASM_TAIL_CALLS
8986 if (isReturnCall) {
8987 ReturnCallAdjustmentInfo retCallInfo(
8988 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8989 masm.wasmReturnCallIndirect(desc, callee, boundsCheckFailed,
8990 nullCheckFailed, mozilla::Nothing(),
8991 retCallInfo);
8992 // The rest of the method is unnecessary for a return call.
8993 return;
8995 #endif
8996 MOZ_ASSERT(!isReturnCall);
8997 masm.wasmCallIndirect(desc, callee, boundsCheckFailed, nullCheckFailed,
8998 lir->tableSize(), &retOffset, &secondRetOffset);
8999 // Register reloading and realm switching are handled dynamically inside
9000 // wasmCallIndirect. There are two return offsets, one for each call
9001 // instruction (fast path and slow path).
9002 reloadRegs = false;
9003 switchRealm = false;
9004 break;
9006 case wasm::CalleeDesc::Builtin:
9007 retOffset = masm.call(desc, callee.builtin());
9008 reloadRegs = false;
9009 switchRealm = false;
9010 break;
9011 case wasm::CalleeDesc::BuiltinInstanceMethod:
9012 retOffset = masm.wasmCallBuiltinInstanceMethod(
9013 desc, callBase->instanceArg(), callee.builtin(),
9014 callBase->builtinMethodFailureMode());
9015 switchRealm = false;
9016 break;
9017 case wasm::CalleeDesc::FuncRef:
9018 #ifdef ENABLE_WASM_TAIL_CALLS
9019 if (isReturnCall) {
9020 ReturnCallAdjustmentInfo retCallInfo(
9021 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
9022 masm.wasmReturnCallRef(desc, callee, retCallInfo);
9023 // The rest of the method is unnecessary for a return call.
9024 return;
9026 #endif
9027 MOZ_ASSERT(!isReturnCall);
9028 // Register reloading and realm switching are handled dynamically inside
9029 // wasmCallRef. There are two return offsets, one for each call
9030 // instruction (fast path and slow path).
9031 masm.wasmCallRef(desc, callee, &retOffset, &secondRetOffset);
9032 reloadRegs = false;
9033 switchRealm = false;
9034 break;
9037 // Note the assembler offset for the associated LSafePoint.
9038 MOZ_ASSERT(!isReturnCall);
9039 markSafepointAt(retOffset.offset(), lir);
9041 // Now that all the outbound in-memory args are on the stack, note the
9042 // required lower boundary point of the associated StackMap.
9043 uint32_t framePushedAtStackMapBase =
9044 masm.framePushed() -
9045 wasm::AlignStackArgAreaSize(callBase->stackArgAreaSizeUnaligned());
9046 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAtStackMapBase);
9047 MOZ_ASSERT(lir->safepoint()->wasmSafepointKind() ==
9048 WasmSafepointKind::LirCall);
9050 // Note the assembler offset and framePushed for use by the adjunct
9051 // LSafePoint, see visitor for LWasmCallIndirectAdjunctSafepoint below.
9052 if (callee.which() == wasm::CalleeDesc::WasmTable) {
9053 lir->adjunctSafepoint()->recordSafepointInfo(secondRetOffset,
9054 framePushedAtStackMapBase);
9057 if (reloadRegs) {
9058 masm.loadPtr(
9059 Address(masm.getStackPointer(), WasmCallerInstanceOffsetBeforeCall),
9060 InstanceReg);
9061 masm.loadWasmPinnedRegsFromInstance();
9062 if (switchRealm) {
9063 masm.switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
9065 } else {
9066 MOZ_ASSERT(!switchRealm);
9069 #ifdef ENABLE_WASM_TAIL_CALLS
9070 switch (callee.which()) {
9071 case wasm::CalleeDesc::Func:
9072 case wasm::CalleeDesc::Import:
9073 case wasm::CalleeDesc::WasmTable:
9074 case wasm::CalleeDesc::FuncRef:
9075 // Stack allocation could change during Wasm (return) calls,
9076 // recover pre-call state.
9077 masm.freeStackTo(masm.framePushed());
9078 break;
9079 default:
9080 break;
9082 #endif // ENABLE_WASM_TAIL_CALLS
9084 if (inTry) {
9085 // Set the end of the try note range
9086 size_t tryNoteIndex = callBase->tryNoteIndex();
9087 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9088 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
9090 // Don't set the end of the try note if we've OOM'ed, as the above
9091 // instructions may not have been emitted, which will trigger an assert
9092 // about zero-length try-notes. This is okay as this compilation will be
9093 // thrown away.
9094 if (!masm.oom()) {
9095 tryNote.setTryBodyEnd(masm.currentOffset());
9098 // This instruction or the adjunct safepoint must be the last instruction
9099 // in the block. No other instructions may be inserted.
9100 LBlock* block = lir->block();
9101 MOZ_RELEASE_ASSERT(*block->rbegin() == lir ||
9102 (block->rbegin()->isWasmCallIndirectAdjunctSafepoint() &&
9103 *(++block->rbegin()) == lir));
9105 // Jump to the fallthrough block
9106 jumpToBlock(lir->mirCatchable()->getSuccessor(
9107 MWasmCallCatchable::FallthroughBranchIndex));
9111 void CodeGenerator::visitWasmCallLandingPrePad(LWasmCallLandingPrePad* lir) {
9112 LBlock* block = lir->block();
9113 MWasmCallLandingPrePad* mir = lir->mir();
9114 MBasicBlock* mirBlock = mir->block();
9115 MBasicBlock* callMirBlock = mir->callBlock();
9117 // This block must be the pre-pad successor of the call block. No blocks may
9118 // be inserted between us, such as for critical edge splitting.
9119 MOZ_RELEASE_ASSERT(mirBlock == callMirBlock->getSuccessor(
9120 MWasmCallCatchable::PrePadBranchIndex));
9122 // This instruction or a move group must be the first instruction in the
9123 // block. No other instructions may be inserted.
9124 MOZ_RELEASE_ASSERT(*block->begin() == lir || (block->begin()->isMoveGroup() &&
9125 *(++block->begin()) == lir));
9127 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9128 wasm::TryNote& tryNote = tryNotes[mir->tryNoteIndex()];
9129 // Set the entry point for the call try note to be the beginning of this
9130 // block. The above assertions (and assertions in visitWasmCall) guarantee
9131 // that we are not skipping over instructions that should be executed.
9132 tryNote.setLandingPad(block->label()->offset(), masm.framePushed());
9135 void CodeGenerator::visitWasmCallIndirectAdjunctSafepoint(
9136 LWasmCallIndirectAdjunctSafepoint* lir) {
9137 markSafepointAt(lir->safepointLocation().offset(), lir);
9138 lir->safepoint()->setFramePushedAtStackMapBase(
9139 lir->framePushedAtStackMapBase());
9142 template <typename InstructionWithMaybeTrapSite>
9143 void EmitSignalNullCheckTrapSite(MacroAssembler& masm,
9144 InstructionWithMaybeTrapSite* ins,
9145 FaultingCodeOffset fco,
9146 wasm::TrapMachineInsn tmi) {
9147 if (!ins->maybeTrap()) {
9148 return;
9150 wasm::BytecodeOffset trapOffset(ins->maybeTrap()->offset);
9151 masm.append(wasm::Trap::NullPointerDereference,
9152 wasm::TrapSite(tmi, fco, trapOffset));
9155 template <typename InstructionWithMaybeTrapSite, class AddressOrBaseIndex>
9156 void CodeGenerator::emitWasmValueLoad(InstructionWithMaybeTrapSite* ins,
9157 MIRType type, MWideningOp wideningOp,
9158 AddressOrBaseIndex addr,
9159 AnyRegister dst) {
9160 FaultingCodeOffset fco;
9161 switch (type) {
9162 case MIRType::Int32:
9163 switch (wideningOp) {
9164 case MWideningOp::None:
9165 fco = masm.load32(addr, dst.gpr());
9166 EmitSignalNullCheckTrapSite(masm, ins, fco,
9167 wasm::TrapMachineInsn::Load32);
9168 break;
9169 case MWideningOp::FromU16:
9170 fco = masm.load16ZeroExtend(addr, dst.gpr());
9171 EmitSignalNullCheckTrapSite(masm, ins, fco,
9172 wasm::TrapMachineInsn::Load16);
9173 break;
9174 case MWideningOp::FromS16:
9175 fco = masm.load16SignExtend(addr, dst.gpr());
9176 EmitSignalNullCheckTrapSite(masm, ins, fco,
9177 wasm::TrapMachineInsn::Load16);
9178 break;
9179 case MWideningOp::FromU8:
9180 fco = masm.load8ZeroExtend(addr, dst.gpr());
9181 EmitSignalNullCheckTrapSite(masm, ins, fco,
9182 wasm::TrapMachineInsn::Load8);
9183 break;
9184 case MWideningOp::FromS8:
9185 fco = masm.load8SignExtend(addr, dst.gpr());
9186 EmitSignalNullCheckTrapSite(masm, ins, fco,
9187 wasm::TrapMachineInsn::Load8);
9188 break;
9189 default:
9190 MOZ_CRASH("unexpected widening op in ::visitWasmLoadElement");
9192 break;
9193 case MIRType::Float32:
9194 MOZ_ASSERT(wideningOp == MWideningOp::None);
9195 fco = masm.loadFloat32(addr, dst.fpu());
9196 EmitSignalNullCheckTrapSite(masm, ins, fco,
9197 wasm::TrapMachineInsn::Load32);
9198 break;
9199 case MIRType::Double:
9200 MOZ_ASSERT(wideningOp == MWideningOp::None);
9201 fco = masm.loadDouble(addr, dst.fpu());
9202 EmitSignalNullCheckTrapSite(masm, ins, fco,
9203 wasm::TrapMachineInsn::Load64);
9204 break;
9205 case MIRType::Pointer:
9206 case MIRType::WasmAnyRef:
9207 case MIRType::WasmArrayData:
9208 MOZ_ASSERT(wideningOp == MWideningOp::None);
9209 fco = masm.loadPtr(addr, dst.gpr());
9210 EmitSignalNullCheckTrapSite(masm, ins, fco,
9211 wasm::TrapMachineInsnForLoadWord());
9212 break;
9213 default:
9214 MOZ_CRASH("unexpected type in ::emitWasmValueLoad");
9218 template <typename InstructionWithMaybeTrapSite, class AddressOrBaseIndex>
9219 void CodeGenerator::emitWasmValueStore(InstructionWithMaybeTrapSite* ins,
9220 MIRType type, MNarrowingOp narrowingOp,
9221 AnyRegister src,
9222 AddressOrBaseIndex addr) {
9223 FaultingCodeOffset fco;
9224 switch (type) {
9225 case MIRType::Int32:
9226 switch (narrowingOp) {
9227 case MNarrowingOp::None:
9228 fco = masm.store32(src.gpr(), addr);
9229 EmitSignalNullCheckTrapSite(masm, ins, fco,
9230 wasm::TrapMachineInsn::Store32);
9231 break;
9232 case MNarrowingOp::To16:
9233 fco = masm.store16(src.gpr(), addr);
9234 EmitSignalNullCheckTrapSite(masm, ins, fco,
9235 wasm::TrapMachineInsn::Store16);
9236 break;
9237 case MNarrowingOp::To8:
9238 fco = masm.store8(src.gpr(), addr);
9239 EmitSignalNullCheckTrapSite(masm, ins, fco,
9240 wasm::TrapMachineInsn::Store8);
9241 break;
9242 default:
9243 MOZ_CRASH();
9245 break;
9246 case MIRType::Float32:
9247 fco = masm.storeFloat32(src.fpu(), addr);
9248 EmitSignalNullCheckTrapSite(masm, ins, fco,
9249 wasm::TrapMachineInsn::Store32);
9250 break;
9251 case MIRType::Double:
9252 fco = masm.storeDouble(src.fpu(), addr);
9253 EmitSignalNullCheckTrapSite(masm, ins, fco,
9254 wasm::TrapMachineInsn::Store64);
9255 break;
9256 case MIRType::Pointer:
9257 // This could be correct, but it would be a new usage, so check carefully.
9258 MOZ_CRASH("Unexpected type in ::emitWasmValueStore.");
9259 case MIRType::WasmAnyRef:
9260 MOZ_CRASH("Bad type in ::emitWasmValueStore. Use LWasmStoreElementRef.");
9261 default:
9262 MOZ_CRASH("unexpected type in ::emitWasmValueStore");
9266 void CodeGenerator::visitWasmLoadSlot(LWasmLoadSlot* ins) {
9267 MIRType type = ins->type();
9268 MWideningOp wideningOp = ins->wideningOp();
9269 Register container = ToRegister(ins->containerRef());
9270 Address addr(container, ins->offset());
9271 AnyRegister dst = ToAnyRegister(ins->output());
9273 #ifdef ENABLE_WASM_SIMD
9274 if (type == MIRType::Simd128) {
9275 MOZ_ASSERT(wideningOp == MWideningOp::None);
9276 FaultingCodeOffset fco = masm.loadUnalignedSimd128(addr, dst.fpu());
9277 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load128);
9278 return;
9280 #endif
9281 emitWasmValueLoad(ins, type, wideningOp, addr, dst);
9284 void CodeGenerator::visitWasmLoadElement(LWasmLoadElement* ins) {
9285 MIRType type = ins->type();
9286 MWideningOp wideningOp = ins->wideningOp();
9287 Scale scale = ins->scale();
9288 Register base = ToRegister(ins->base());
9289 Register index = ToRegister(ins->index());
9290 AnyRegister dst = ToAnyRegister(ins->output());
9292 #ifdef ENABLE_WASM_SIMD
9293 if (type == MIRType::Simd128) {
9294 MOZ_ASSERT(wideningOp == MWideningOp::None);
9295 FaultingCodeOffset fco;
9296 Register temp = ToRegister(ins->temp0());
9297 masm.movePtr(index, temp);
9298 masm.lshiftPtr(Imm32(4), temp);
9299 fco = masm.loadUnalignedSimd128(BaseIndex(base, temp, Scale::TimesOne),
9300 dst.fpu());
9301 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load128);
9302 return;
9304 #endif
9305 emitWasmValueLoad(ins, type, wideningOp, BaseIndex(base, index, scale), dst);
9308 void CodeGenerator::visitWasmStoreSlot(LWasmStoreSlot* ins) {
9309 MIRType type = ins->type();
9310 MNarrowingOp narrowingOp = ins->narrowingOp();
9311 Register container = ToRegister(ins->containerRef());
9312 Address addr(container, ins->offset());
9313 AnyRegister src = ToAnyRegister(ins->value());
9314 if (type != MIRType::Int32) {
9315 MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
9318 #ifdef ENABLE_WASM_SIMD
9319 if (type == MIRType::Simd128) {
9320 FaultingCodeOffset fco = masm.storeUnalignedSimd128(src.fpu(), addr);
9321 EmitSignalNullCheckTrapSite(masm, ins, fco,
9322 wasm::TrapMachineInsn::Store128);
9323 return;
9325 #endif
9326 emitWasmValueStore(ins, type, narrowingOp, src, addr);
9329 void CodeGenerator::visitWasmStoreElement(LWasmStoreElement* ins) {
9330 MIRType type = ins->type();
9331 MNarrowingOp narrowingOp = ins->narrowingOp();
9332 Scale scale = ins->scale();
9333 Register base = ToRegister(ins->base());
9334 Register index = ToRegister(ins->index());
9335 AnyRegister src = ToAnyRegister(ins->value());
9336 if (type != MIRType::Int32) {
9337 MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
9340 #ifdef ENABLE_WASM_SIMD
9341 if (type == MIRType::Simd128) {
9342 Register temp = ToRegister(ins->temp0());
9343 masm.movePtr(index, temp);
9344 masm.lshiftPtr(Imm32(4), temp);
9345 FaultingCodeOffset fco = masm.storeUnalignedSimd128(
9346 src.fpu(), BaseIndex(base, temp, Scale::TimesOne));
9347 EmitSignalNullCheckTrapSite(masm, ins, fco,
9348 wasm::TrapMachineInsn::Store128);
9349 return;
9351 #endif
9352 emitWasmValueStore(ins, type, narrowingOp, src,
9353 BaseIndex(base, index, scale));
9356 void CodeGenerator::visitWasmLoadTableElement(LWasmLoadTableElement* ins) {
9357 Register elements = ToRegister(ins->elements());
9358 Register index = ToRegister(ins->index());
9359 Register output = ToRegister(ins->output());
9360 masm.loadPtr(BaseIndex(elements, index, ScalePointer), output);
9363 void CodeGenerator::visitWasmDerivedPointer(LWasmDerivedPointer* ins) {
9364 masm.movePtr(ToRegister(ins->base()), ToRegister(ins->output()));
9365 masm.addPtr(Imm32(int32_t(ins->offset())), ToRegister(ins->output()));
9368 void CodeGenerator::visitWasmDerivedIndexPointer(
9369 LWasmDerivedIndexPointer* ins) {
9370 Register base = ToRegister(ins->base());
9371 Register index = ToRegister(ins->index());
9372 Register output = ToRegister(ins->output());
9373 masm.computeEffectiveAddress(BaseIndex(base, index, ins->scale()), output);
9376 void CodeGenerator::visitWasmStoreRef(LWasmStoreRef* ins) {
9377 Register instance = ToRegister(ins->instance());
9378 Register valueBase = ToRegister(ins->valueBase());
9379 size_t offset = ins->offset();
9380 Register value = ToRegister(ins->value());
9381 Register temp = ToRegister(ins->temp0());
9383 if (ins->preBarrierKind() == WasmPreBarrierKind::Normal) {
9384 Label skipPreBarrier;
9385 wasm::EmitWasmPreBarrierGuard(
9386 masm, instance, temp, Address(valueBase, offset), &skipPreBarrier,
9387 ins->maybeTrap() ? &ins->maybeTrap()->offset : nullptr);
9388 wasm::EmitWasmPreBarrierCallImmediate(masm, instance, temp, valueBase,
9389 offset);
9390 masm.bind(&skipPreBarrier);
9393 FaultingCodeOffset fco = masm.storePtr(value, Address(valueBase, offset));
9394 EmitSignalNullCheckTrapSite(masm, ins, fco,
9395 wasm::TrapMachineInsnForStoreWord());
9396 // The postbarrier is handled separately.
9399 void CodeGenerator::visitWasmStoreElementRef(LWasmStoreElementRef* ins) {
9400 Register instance = ToRegister(ins->instance());
9401 Register base = ToRegister(ins->base());
9402 Register index = ToRegister(ins->index());
9403 Register value = ToRegister(ins->value());
9404 Register temp0 = ToTempRegisterOrInvalid(ins->temp0());
9405 Register temp1 = ToTempRegisterOrInvalid(ins->temp1());
9407 BaseIndex addr(base, index, ScalePointer);
9409 if (ins->preBarrierKind() == WasmPreBarrierKind::Normal) {
9410 Label skipPreBarrier;
9411 wasm::EmitWasmPreBarrierGuard(
9412 masm, instance, temp0, addr, &skipPreBarrier,
9413 ins->maybeTrap() ? &ins->maybeTrap()->offset : nullptr);
9414 wasm::EmitWasmPreBarrierCallIndex(masm, instance, temp0, temp1, addr);
9415 masm.bind(&skipPreBarrier);
9418 FaultingCodeOffset fco = masm.storePtr(value, addr);
9419 EmitSignalNullCheckTrapSite(masm, ins, fco,
9420 wasm::TrapMachineInsnForStoreWord());
9421 // The postbarrier is handled separately.
9424 // Out-of-line path to update the store buffer for wasm references.
9425 class OutOfLineWasmCallPostWriteBarrierImmediate
9426 : public OutOfLineCodeBase<CodeGenerator> {
9427 LInstruction* lir_;
9428 Register valueBase_;
9429 Register temp_;
9430 uint32_t valueOffset_;
9432 public:
9433 OutOfLineWasmCallPostWriteBarrierImmediate(LInstruction* lir,
9434 Register valueBase, Register temp,
9435 uint32_t valueOffset)
9436 : lir_(lir),
9437 valueBase_(valueBase),
9438 temp_(temp),
9439 valueOffset_(valueOffset) {}
9441 void accept(CodeGenerator* codegen) override {
9442 codegen->visitOutOfLineWasmCallPostWriteBarrierImmediate(this);
9445 LInstruction* lir() const { return lir_; }
9446 Register valueBase() const { return valueBase_; }
9447 Register temp() const { return temp_; }
9448 uint32_t valueOffset() const { return valueOffset_; }
9451 void CodeGenerator::visitOutOfLineWasmCallPostWriteBarrierImmediate(
9452 OutOfLineWasmCallPostWriteBarrierImmediate* ool) {
9453 saveLiveVolatile(ool->lir());
9454 masm.Push(InstanceReg);
9455 int32_t framePushedAfterInstance = masm.framePushed();
9457 // Fold the value offset into the value base
9458 Register valueAddr = ool->valueBase();
9459 Register temp = ool->temp();
9460 masm.computeEffectiveAddress(Address(valueAddr, ool->valueOffset()), temp);
9462 // Call Instance::postBarrier
9463 masm.setupWasmABICall();
9464 masm.passABIArg(InstanceReg);
9465 masm.passABIArg(temp);
9466 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
9467 masm.callWithABI(wasm::BytecodeOffset(0), wasm::SymbolicAddress::PostBarrier,
9468 mozilla::Some(instanceOffset), ABIType::General);
9470 masm.Pop(InstanceReg);
9471 restoreLiveVolatile(ool->lir());
9473 masm.jump(ool->rejoin());
9476 void CodeGenerator::visitWasmPostWriteBarrierImmediate(
9477 LWasmPostWriteBarrierImmediate* lir) {
9478 Register object = ToRegister(lir->object());
9479 Register value = ToRegister(lir->value());
9480 Register valueBase = ToRegister(lir->valueBase());
9481 Register temp = ToRegister(lir->temp0());
9482 MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
9483 auto* ool = new (alloc()) OutOfLineWasmCallPostWriteBarrierImmediate(
9484 lir, valueBase, temp, lir->valueOffset());
9485 addOutOfLineCode(ool, lir->mir());
9487 wasm::EmitWasmPostBarrierGuard(masm, mozilla::Some(object), temp, value,
9488 ool->rejoin());
9489 masm.jump(ool->entry());
9490 masm.bind(ool->rejoin());
9493 // Out-of-line path to update the store buffer for wasm references.
9494 class OutOfLineWasmCallPostWriteBarrierIndex
9495 : public OutOfLineCodeBase<CodeGenerator> {
9496 LInstruction* lir_;
9497 Register valueBase_;
9498 Register index_;
9499 Register temp_;
9500 uint32_t elemSize_;
9502 public:
9503 OutOfLineWasmCallPostWriteBarrierIndex(LInstruction* lir, Register valueBase,
9504 Register index, Register temp,
9505 uint32_t elemSize)
9506 : lir_(lir),
9507 valueBase_(valueBase),
9508 index_(index),
9509 temp_(temp),
9510 elemSize_(elemSize) {
9511 MOZ_ASSERT(elemSize == 1 || elemSize == 2 || elemSize == 4 ||
9512 elemSize == 8 || elemSize == 16);
9515 void accept(CodeGenerator* codegen) override {
9516 codegen->visitOutOfLineWasmCallPostWriteBarrierIndex(this);
9519 LInstruction* lir() const { return lir_; }
9520 Register valueBase() const { return valueBase_; }
9521 Register index() const { return index_; }
9522 Register temp() const { return temp_; }
9523 uint32_t elemSize() const { return elemSize_; }
9526 void CodeGenerator::visitOutOfLineWasmCallPostWriteBarrierIndex(
9527 OutOfLineWasmCallPostWriteBarrierIndex* ool) {
9528 saveLiveVolatile(ool->lir());
9529 masm.Push(InstanceReg);
9530 int32_t framePushedAfterInstance = masm.framePushed();
9532 // Fold the value offset into the value base
9533 Register temp = ool->temp();
9534 if (ool->elemSize() == 16) {
9535 masm.movePtr(ool->index(), temp);
9536 masm.lshiftPtr(Imm32(4), temp);
9537 masm.addPtr(ool->valueBase(), temp);
9538 } else {
9539 masm.computeEffectiveAddress(BaseIndex(ool->valueBase(), ool->index(),
9540 ScaleFromElemWidth(ool->elemSize())),
9541 temp);
9544 // Call Instance::postBarrier
9545 masm.setupWasmABICall();
9546 masm.passABIArg(InstanceReg);
9547 masm.passABIArg(temp);
9548 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
9549 masm.callWithABI(wasm::BytecodeOffset(0), wasm::SymbolicAddress::PostBarrier,
9550 mozilla::Some(instanceOffset), ABIType::General);
9552 masm.Pop(InstanceReg);
9553 restoreLiveVolatile(ool->lir());
9555 masm.jump(ool->rejoin());
9558 void CodeGenerator::visitWasmPostWriteBarrierIndex(
9559 LWasmPostWriteBarrierIndex* lir) {
9560 Register object = ToRegister(lir->object());
9561 Register value = ToRegister(lir->value());
9562 Register valueBase = ToRegister(lir->valueBase());
9563 Register index = ToRegister(lir->index());
9564 Register temp = ToRegister(lir->temp0());
9565 MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
9566 auto* ool = new (alloc()) OutOfLineWasmCallPostWriteBarrierIndex(
9567 lir, valueBase, index, temp, lir->elemSize());
9568 addOutOfLineCode(ool, lir->mir());
9570 wasm::EmitWasmPostBarrierGuard(masm, mozilla::Some(object), temp, value,
9571 ool->rejoin());
9572 masm.jump(ool->entry());
9573 masm.bind(ool->rejoin());
9576 void CodeGenerator::visitWasmLoadSlotI64(LWasmLoadSlotI64* ins) {
9577 Register container = ToRegister(ins->containerRef());
9578 Address addr(container, ins->offset());
9579 Register64 output = ToOutRegister64(ins);
9580 // Either 1 or 2 words. On a 32-bit target, it is hard to argue that one
9581 // transaction will always trap before the other, so it seems safest to
9582 // register both of them as potentially trapping.
9583 #ifdef JS_64BIT
9584 FaultingCodeOffset fco = masm.load64(addr, output);
9585 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load64);
9586 #else
9587 FaultingCodeOffsetPair fcop = masm.load64(addr, output);
9588 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9589 wasm::TrapMachineInsn::Load32);
9590 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9591 wasm::TrapMachineInsn::Load32);
9592 #endif
9595 void CodeGenerator::visitWasmLoadElementI64(LWasmLoadElementI64* ins) {
9596 Register base = ToRegister(ins->base());
9597 Register index = ToRegister(ins->index());
9598 BaseIndex addr(base, index, Scale::TimesEight);
9599 Register64 output = ToOutRegister64(ins);
9600 // Either 1 or 2 words. On a 32-bit target, it is hard to argue that one
9601 // transaction will always trap before the other, so it seems safest to
9602 // register both of them as potentially trapping.
9603 #ifdef JS_64BIT
9604 FaultingCodeOffset fco = masm.load64(addr, output);
9605 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load64);
9606 #else
9607 FaultingCodeOffsetPair fcop = masm.load64(addr, output);
9608 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9609 wasm::TrapMachineInsn::Load32);
9610 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9611 wasm::TrapMachineInsn::Load32);
9612 #endif
9615 void CodeGenerator::visitWasmStoreSlotI64(LWasmStoreSlotI64* ins) {
9616 Register container = ToRegister(ins->containerRef());
9617 Address addr(container, ins->offset());
9618 Register64 value = ToRegister64(ins->value());
9619 // Either 1 or 2 words. As above we register both transactions in the
9620 // 2-word case.
9621 #ifdef JS_64BIT
9622 FaultingCodeOffset fco = masm.store64(value, addr);
9623 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Store64);
9624 #else
9625 FaultingCodeOffsetPair fcop = masm.store64(value, addr);
9626 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9627 wasm::TrapMachineInsn::Store32);
9628 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9629 wasm::TrapMachineInsn::Store32);
9630 #endif
9633 void CodeGenerator::visitWasmStoreElementI64(LWasmStoreElementI64* ins) {
9634 Register base = ToRegister(ins->base());
9635 Register index = ToRegister(ins->index());
9636 BaseIndex addr(base, index, Scale::TimesEight);
9637 Register64 value = ToRegister64(ins->value());
9638 // Either 1 or 2 words. As above we register both transactions in the
9639 // 2-word case.
9640 #ifdef JS_64BIT
9641 FaultingCodeOffset fco = masm.store64(value, addr);
9642 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Store64);
9643 #else
9644 FaultingCodeOffsetPair fcop = masm.store64(value, addr);
9645 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9646 wasm::TrapMachineInsn::Store32);
9647 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9648 wasm::TrapMachineInsn::Store32);
9649 #endif
9652 void CodeGenerator::visitArrayBufferByteLength(LArrayBufferByteLength* lir) {
9653 Register obj = ToRegister(lir->object());
9654 Register out = ToRegister(lir->output());
9655 masm.loadArrayBufferByteLengthIntPtr(obj, out);
9658 void CodeGenerator::visitArrayBufferViewLength(LArrayBufferViewLength* lir) {
9659 Register obj = ToRegister(lir->object());
9660 Register out = ToRegister(lir->output());
9661 masm.loadArrayBufferViewLengthIntPtr(obj, out);
9664 void CodeGenerator::visitArrayBufferViewByteOffset(
9665 LArrayBufferViewByteOffset* lir) {
9666 Register obj = ToRegister(lir->object());
9667 Register out = ToRegister(lir->output());
9668 masm.loadArrayBufferViewByteOffsetIntPtr(obj, out);
9671 void CodeGenerator::visitArrayBufferViewElements(
9672 LArrayBufferViewElements* lir) {
9673 Register obj = ToRegister(lir->object());
9674 Register out = ToRegister(lir->output());
9675 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), out);
9678 void CodeGenerator::visitTypedArrayElementSize(LTypedArrayElementSize* lir) {
9679 Register obj = ToRegister(lir->object());
9680 Register out = ToRegister(lir->output());
9682 masm.typedArrayElementSize(obj, out);
9685 void CodeGenerator::visitResizableTypedArrayByteOffsetMaybeOutOfBounds(
9686 LResizableTypedArrayByteOffsetMaybeOutOfBounds* lir) {
9687 Register obj = ToRegister(lir->object());
9688 Register out = ToRegister(lir->output());
9689 Register temp = ToRegister(lir->temp0());
9691 masm.loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(obj, out, temp);
9694 void CodeGenerator::visitResizableTypedArrayLength(
9695 LResizableTypedArrayLength* lir) {
9696 Register obj = ToRegister(lir->object());
9697 Register out = ToRegister(lir->output());
9698 Register temp = ToRegister(lir->temp0());
9700 masm.loadResizableTypedArrayLengthIntPtr(lir->synchronization(), obj, out,
9701 temp);
9704 void CodeGenerator::visitResizableDataViewByteLength(
9705 LResizableDataViewByteLength* lir) {
9706 Register obj = ToRegister(lir->object());
9707 Register out = ToRegister(lir->output());
9708 Register temp = ToRegister(lir->temp0());
9710 masm.loadResizableDataViewByteLengthIntPtr(lir->synchronization(), obj, out,
9711 temp);
9714 void CodeGenerator::visitGrowableSharedArrayBufferByteLength(
9715 LGrowableSharedArrayBufferByteLength* lir) {
9716 Register obj = ToRegister(lir->object());
9717 Register out = ToRegister(lir->output());
9719 // Explicit |byteLength| accesses are seq-consistent atomic loads.
9720 auto sync = Synchronization::Load();
9722 masm.loadGrowableSharedArrayBufferByteLengthIntPtr(sync, obj, out);
9725 void CodeGenerator::visitGuardResizableArrayBufferViewInBounds(
9726 LGuardResizableArrayBufferViewInBounds* lir) {
9727 Register obj = ToRegister(lir->object());
9728 Register temp = ToRegister(lir->temp0());
9730 Label bail;
9731 masm.branchIfResizableArrayBufferViewOutOfBounds(obj, temp, &bail);
9732 bailoutFrom(&bail, lir->snapshot());
9735 void CodeGenerator::visitGuardResizableArrayBufferViewInBoundsOrDetached(
9736 LGuardResizableArrayBufferViewInBoundsOrDetached* lir) {
9737 Register obj = ToRegister(lir->object());
9738 Register temp = ToRegister(lir->temp0());
9740 Label done, bail;
9741 masm.branchIfResizableArrayBufferViewInBounds(obj, temp, &done);
9742 masm.branchIfHasAttachedArrayBuffer(obj, temp, &bail);
9743 masm.bind(&done);
9744 bailoutFrom(&bail, lir->snapshot());
9747 void CodeGenerator::visitGuardHasAttachedArrayBuffer(
9748 LGuardHasAttachedArrayBuffer* lir) {
9749 Register obj = ToRegister(lir->object());
9750 Register temp = ToRegister(lir->temp0());
9752 Label bail;
9753 masm.branchIfHasDetachedArrayBuffer(obj, temp, &bail);
9754 bailoutFrom(&bail, lir->snapshot());
9757 class OutOfLineGuardNumberToIntPtrIndex
9758 : public OutOfLineCodeBase<CodeGenerator> {
9759 LGuardNumberToIntPtrIndex* lir_;
9761 public:
9762 explicit OutOfLineGuardNumberToIntPtrIndex(LGuardNumberToIntPtrIndex* lir)
9763 : lir_(lir) {}
9765 void accept(CodeGenerator* codegen) override {
9766 codegen->visitOutOfLineGuardNumberToIntPtrIndex(this);
9768 LGuardNumberToIntPtrIndex* lir() const { return lir_; }
9771 void CodeGenerator::visitGuardNumberToIntPtrIndex(
9772 LGuardNumberToIntPtrIndex* lir) {
9773 FloatRegister input = ToFloatRegister(lir->input());
9774 Register output = ToRegister(lir->output());
9776 if (!lir->mir()->supportOOB()) {
9777 Label bail;
9778 masm.convertDoubleToPtr(input, output, &bail, false);
9779 bailoutFrom(&bail, lir->snapshot());
9780 return;
9783 auto* ool = new (alloc()) OutOfLineGuardNumberToIntPtrIndex(lir);
9784 addOutOfLineCode(ool, lir->mir());
9786 masm.convertDoubleToPtr(input, output, ool->entry(), false);
9787 masm.bind(ool->rejoin());
9790 void CodeGenerator::visitOutOfLineGuardNumberToIntPtrIndex(
9791 OutOfLineGuardNumberToIntPtrIndex* ool) {
9792 // Substitute the invalid index with an arbitrary out-of-bounds index.
9793 masm.movePtr(ImmWord(-1), ToRegister(ool->lir()->output()));
9794 masm.jump(ool->rejoin());
9797 void CodeGenerator::visitStringLength(LStringLength* lir) {
9798 Register input = ToRegister(lir->string());
9799 Register output = ToRegister(lir->output());
9801 masm.loadStringLength(input, output);
9804 void CodeGenerator::visitMinMaxI(LMinMaxI* ins) {
9805 Register first = ToRegister(ins->first());
9806 Register output = ToRegister(ins->output());
9808 MOZ_ASSERT(first == output);
9810 Assembler::Condition cond =
9811 ins->mir()->isMax() ? Assembler::GreaterThan : Assembler::LessThan;
9813 if (ins->second()->isConstant()) {
9814 Label done;
9815 masm.branch32(cond, first, Imm32(ToInt32(ins->second())), &done);
9816 masm.move32(Imm32(ToInt32(ins->second())), output);
9817 masm.bind(&done);
9818 } else {
9819 Register second = ToRegister(ins->second());
9820 masm.cmp32Move32(cond, second, first, second, output);
9824 void CodeGenerator::visitMinMaxArrayI(LMinMaxArrayI* ins) {
9825 Register array = ToRegister(ins->array());
9826 Register output = ToRegister(ins->output());
9827 Register temp1 = ToRegister(ins->temp1());
9828 Register temp2 = ToRegister(ins->temp2());
9829 Register temp3 = ToRegister(ins->temp3());
9830 bool isMax = ins->isMax();
9832 Label bail;
9833 masm.minMaxArrayInt32(array, output, temp1, temp2, temp3, isMax, &bail);
9834 bailoutFrom(&bail, ins->snapshot());
9837 void CodeGenerator::visitMinMaxArrayD(LMinMaxArrayD* ins) {
9838 Register array = ToRegister(ins->array());
9839 FloatRegister output = ToFloatRegister(ins->output());
9840 Register temp1 = ToRegister(ins->temp1());
9841 Register temp2 = ToRegister(ins->temp2());
9842 FloatRegister floatTemp = ToFloatRegister(ins->floatTemp());
9843 bool isMax = ins->isMax();
9845 Label bail;
9846 masm.minMaxArrayNumber(array, output, floatTemp, temp1, temp2, isMax, &bail);
9847 bailoutFrom(&bail, ins->snapshot());
9850 // For Abs*, lowering will have tied input to output on platforms where that is
9851 // sensible, and otherwise left them untied.
9853 void CodeGenerator::visitAbsI(LAbsI* ins) {
9854 Register input = ToRegister(ins->input());
9855 Register output = ToRegister(ins->output());
9857 if (ins->mir()->fallible()) {
9858 Label positive;
9859 if (input != output) {
9860 masm.move32(input, output);
9862 masm.branchTest32(Assembler::NotSigned, output, output, &positive);
9863 Label bail;
9864 masm.branchNeg32(Assembler::Overflow, output, &bail);
9865 bailoutFrom(&bail, ins->snapshot());
9866 masm.bind(&positive);
9867 } else {
9868 masm.abs32(input, output);
9872 void CodeGenerator::visitAbsD(LAbsD* ins) {
9873 masm.absDouble(ToFloatRegister(ins->input()), ToFloatRegister(ins->output()));
9876 void CodeGenerator::visitAbsF(LAbsF* ins) {
9877 masm.absFloat32(ToFloatRegister(ins->input()),
9878 ToFloatRegister(ins->output()));
9881 void CodeGenerator::visitPowII(LPowII* ins) {
9882 Register value = ToRegister(ins->value());
9883 Register power = ToRegister(ins->power());
9884 Register output = ToRegister(ins->output());
9885 Register temp0 = ToRegister(ins->temp0());
9886 Register temp1 = ToRegister(ins->temp1());
9888 Label bailout;
9889 masm.pow32(value, power, output, temp0, temp1, &bailout);
9890 bailoutFrom(&bailout, ins->snapshot());
9893 void CodeGenerator::visitPowI(LPowI* ins) {
9894 FloatRegister value = ToFloatRegister(ins->value());
9895 Register power = ToRegister(ins->power());
9897 using Fn = double (*)(double x, int32_t y);
9898 masm.setupAlignedABICall();
9899 masm.passABIArg(value, ABIType::Float64);
9900 masm.passABIArg(power);
9902 masm.callWithABI<Fn, js::powi>(ABIType::Float64);
9903 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9906 void CodeGenerator::visitPowD(LPowD* ins) {
9907 FloatRegister value = ToFloatRegister(ins->value());
9908 FloatRegister power = ToFloatRegister(ins->power());
9910 using Fn = double (*)(double x, double y);
9911 masm.setupAlignedABICall();
9912 masm.passABIArg(value, ABIType::Float64);
9913 masm.passABIArg(power, ABIType::Float64);
9914 masm.callWithABI<Fn, ecmaPow>(ABIType::Float64);
9916 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9919 void CodeGenerator::visitPowOfTwoI(LPowOfTwoI* ins) {
9920 Register power = ToRegister(ins->power());
9921 Register output = ToRegister(ins->output());
9923 uint32_t base = ins->base();
9924 MOZ_ASSERT(mozilla::IsPowerOfTwo(base));
9926 uint32_t n = mozilla::FloorLog2(base);
9927 MOZ_ASSERT(n != 0);
9929 // Hacker's Delight, 2nd edition, theorem D2.
9930 auto ceilingDiv = [](uint32_t x, uint32_t y) { return (x + y - 1) / y; };
9932 // Take bailout if |power| is greater-or-equals |log_y(2^31)| or is negative.
9933 // |2^(n*y) < 2^31| must hold, hence |n*y < 31| resp. |y < 31/n|.
9935 // Note: it's important for this condition to match the code in CacheIR.cpp
9936 // (CanAttachInt32Pow) to prevent failure loops.
9937 bailoutCmp32(Assembler::AboveOrEqual, power, Imm32(ceilingDiv(31, n)),
9938 ins->snapshot());
9940 // Compute (2^n)^y as 2^(n*y) using repeated shifts. We could directly scale
9941 // |power| and perform a single shift, but due to the lack of necessary
9942 // MacroAssembler functionality, like multiplying a register with an
9943 // immediate, we restrict the number of generated shift instructions when
9944 // lowering this operation.
9945 masm.move32(Imm32(1), output);
9946 do {
9947 masm.lshift32(power, output);
9948 n--;
9949 } while (n > 0);
9952 void CodeGenerator::visitSqrtD(LSqrtD* ins) {
9953 FloatRegister input = ToFloatRegister(ins->input());
9954 FloatRegister output = ToFloatRegister(ins->output());
9955 masm.sqrtDouble(input, output);
9958 void CodeGenerator::visitSqrtF(LSqrtF* ins) {
9959 FloatRegister input = ToFloatRegister(ins->input());
9960 FloatRegister output = ToFloatRegister(ins->output());
9961 masm.sqrtFloat32(input, output);
9964 void CodeGenerator::visitSignI(LSignI* ins) {
9965 Register input = ToRegister(ins->input());
9966 Register output = ToRegister(ins->output());
9967 masm.signInt32(input, output);
9970 void CodeGenerator::visitSignD(LSignD* ins) {
9971 FloatRegister input = ToFloatRegister(ins->input());
9972 FloatRegister output = ToFloatRegister(ins->output());
9973 masm.signDouble(input, output);
9976 void CodeGenerator::visitSignDI(LSignDI* ins) {
9977 FloatRegister input = ToFloatRegister(ins->input());
9978 FloatRegister temp = ToFloatRegister(ins->temp0());
9979 Register output = ToRegister(ins->output());
9981 Label bail;
9982 masm.signDoubleToInt32(input, output, temp, &bail);
9983 bailoutFrom(&bail, ins->snapshot());
9986 void CodeGenerator::visitMathFunctionD(LMathFunctionD* ins) {
9987 FloatRegister input = ToFloatRegister(ins->input());
9988 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9990 UnaryMathFunction fun = ins->mir()->function();
9991 UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
9993 masm.setupAlignedABICall();
9995 masm.passABIArg(input, ABIType::Float64);
9996 masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
9997 ABIType::Float64);
10000 void CodeGenerator::visitMathFunctionF(LMathFunctionF* ins) {
10001 FloatRegister input = ToFloatRegister(ins->input());
10002 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnFloat32Reg);
10004 masm.setupAlignedABICall();
10005 masm.passABIArg(input, ABIType::Float32);
10007 using Fn = float (*)(float x);
10008 Fn funptr = nullptr;
10009 CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check;
10010 switch (ins->mir()->function()) {
10011 case UnaryMathFunction::Floor:
10012 funptr = floorf;
10013 check = CheckUnsafeCallWithABI::DontCheckOther;
10014 break;
10015 case UnaryMathFunction::Round:
10016 funptr = math_roundf_impl;
10017 break;
10018 case UnaryMathFunction::Trunc:
10019 funptr = math_truncf_impl;
10020 break;
10021 case UnaryMathFunction::Ceil:
10022 funptr = ceilf;
10023 check = CheckUnsafeCallWithABI::DontCheckOther;
10024 break;
10025 default:
10026 MOZ_CRASH("Unknown or unsupported float32 math function");
10029 masm.callWithABI(DynamicFunction<Fn>(funptr), ABIType::Float32, check);
10032 void CodeGenerator::visitModD(LModD* ins) {
10033 MOZ_ASSERT(!gen->compilingWasm());
10035 FloatRegister lhs = ToFloatRegister(ins->lhs());
10036 FloatRegister rhs = ToFloatRegister(ins->rhs());
10038 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10040 using Fn = double (*)(double a, double b);
10041 masm.setupAlignedABICall();
10042 masm.passABIArg(lhs, ABIType::Float64);
10043 masm.passABIArg(rhs, ABIType::Float64);
10044 masm.callWithABI<Fn, NumberMod>(ABIType::Float64);
10047 void CodeGenerator::visitModPowTwoD(LModPowTwoD* ins) {
10048 FloatRegister lhs = ToFloatRegister(ins->lhs());
10049 uint32_t divisor = ins->divisor();
10050 MOZ_ASSERT(mozilla::IsPowerOfTwo(divisor));
10052 FloatRegister output = ToFloatRegister(ins->output());
10054 // Compute |n % d| using |copysign(n - (d * trunc(n / d)), n)|.
10056 // This doesn't work if |d| isn't a power of two, because we may lose too much
10057 // precision. For example |Number.MAX_VALUE % 3 == 2|, but
10058 // |3 * trunc(Number.MAX_VALUE / 3) == Infinity|.
10060 Label done;
10062 ScratchDoubleScope scratch(masm);
10064 // Subnormals can lead to performance degradation, which can make calling
10065 // |fmod| faster than this inline implementation. Work around this issue by
10066 // directly returning the input for any value in the interval ]-1, +1[.
10067 Label notSubnormal;
10068 masm.loadConstantDouble(1.0, scratch);
10069 masm.loadConstantDouble(-1.0, output);
10070 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, lhs, scratch,
10071 &notSubnormal);
10072 masm.branchDouble(Assembler::DoubleLessThanOrEqual, lhs, output,
10073 &notSubnormal);
10075 masm.moveDouble(lhs, output);
10076 masm.jump(&done);
10078 masm.bind(&notSubnormal);
10080 if (divisor == 1) {
10081 // The pattern |n % 1 == 0| is used to detect integer numbers. We can skip
10082 // the multiplication by one in this case.
10083 masm.moveDouble(lhs, output);
10084 masm.nearbyIntDouble(RoundingMode::TowardsZero, output, scratch);
10085 masm.subDouble(scratch, output);
10086 } else {
10087 masm.loadConstantDouble(1.0 / double(divisor), scratch);
10088 masm.loadConstantDouble(double(divisor), output);
10090 masm.mulDouble(lhs, scratch);
10091 masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
10092 masm.mulDouble(output, scratch);
10094 masm.moveDouble(lhs, output);
10095 masm.subDouble(scratch, output);
10099 masm.copySignDouble(output, lhs, output);
10100 masm.bind(&done);
10103 void CodeGenerator::visitWasmBuiltinModD(LWasmBuiltinModD* ins) {
10104 masm.Push(InstanceReg);
10105 int32_t framePushedAfterInstance = masm.framePushed();
10107 FloatRegister lhs = ToFloatRegister(ins->lhs());
10108 FloatRegister rhs = ToFloatRegister(ins->rhs());
10110 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10112 masm.setupWasmABICall();
10113 masm.passABIArg(lhs, ABIType::Float64);
10114 masm.passABIArg(rhs, ABIType::Float64);
10116 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
10117 masm.callWithABI(ins->mir()->bytecodeOffset(), wasm::SymbolicAddress::ModD,
10118 mozilla::Some(instanceOffset), ABIType::Float64);
10120 masm.Pop(InstanceReg);
10123 void CodeGenerator::visitBigIntAdd(LBigIntAdd* ins) {
10124 Register lhs = ToRegister(ins->lhs());
10125 Register rhs = ToRegister(ins->rhs());
10126 Register temp1 = ToRegister(ins->temp1());
10127 Register temp2 = ToRegister(ins->temp2());
10128 Register output = ToRegister(ins->output());
10130 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10131 auto* ool = oolCallVM<Fn, BigInt::add>(ins, ArgList(lhs, rhs),
10132 StoreRegisterTo(output));
10134 // 0n + x == x
10135 Label lhsNonZero;
10136 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10137 masm.movePtr(rhs, output);
10138 masm.jump(ool->rejoin());
10139 masm.bind(&lhsNonZero);
10141 // x + 0n == x
10142 Label rhsNonZero;
10143 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10144 masm.movePtr(lhs, output);
10145 masm.jump(ool->rejoin());
10146 masm.bind(&rhsNonZero);
10148 // Call into the VM when either operand can't be loaded into a pointer-sized
10149 // register.
10150 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10151 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10153 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10155 // Create and return the result.
10156 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10157 masm.initializeBigInt(output, temp1);
10159 masm.bind(ool->rejoin());
10162 void CodeGenerator::visitBigIntSub(LBigIntSub* ins) {
10163 Register lhs = ToRegister(ins->lhs());
10164 Register rhs = ToRegister(ins->rhs());
10165 Register temp1 = ToRegister(ins->temp1());
10166 Register temp2 = ToRegister(ins->temp2());
10167 Register output = ToRegister(ins->output());
10169 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10170 auto* ool = oolCallVM<Fn, BigInt::sub>(ins, ArgList(lhs, rhs),
10171 StoreRegisterTo(output));
10173 // x - 0n == x
10174 Label rhsNonZero;
10175 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10176 masm.movePtr(lhs, output);
10177 masm.jump(ool->rejoin());
10178 masm.bind(&rhsNonZero);
10180 // Call into the VM when either operand can't be loaded into a pointer-sized
10181 // register.
10182 masm.loadBigInt(lhs, temp1, ool->entry());
10183 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10185 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10187 // Create and return the result.
10188 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10189 masm.initializeBigInt(output, temp1);
10191 masm.bind(ool->rejoin());
10194 void CodeGenerator::visitBigIntMul(LBigIntMul* ins) {
10195 Register lhs = ToRegister(ins->lhs());
10196 Register rhs = ToRegister(ins->rhs());
10197 Register temp1 = ToRegister(ins->temp1());
10198 Register temp2 = ToRegister(ins->temp2());
10199 Register output = ToRegister(ins->output());
10201 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10202 auto* ool = oolCallVM<Fn, BigInt::mul>(ins, ArgList(lhs, rhs),
10203 StoreRegisterTo(output));
10205 // 0n * x == 0n
10206 Label lhsNonZero;
10207 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10208 masm.movePtr(lhs, output);
10209 masm.jump(ool->rejoin());
10210 masm.bind(&lhsNonZero);
10212 // x * 0n == 0n
10213 Label rhsNonZero;
10214 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10215 masm.movePtr(rhs, output);
10216 masm.jump(ool->rejoin());
10217 masm.bind(&rhsNonZero);
10219 // Call into the VM when either operand can't be loaded into a pointer-sized
10220 // register.
10221 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10222 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10224 masm.branchMulPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10226 // Create and return the result.
10227 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10228 masm.initializeBigInt(output, temp1);
10230 masm.bind(ool->rejoin());
10233 void CodeGenerator::visitBigIntDiv(LBigIntDiv* ins) {
10234 Register lhs = ToRegister(ins->lhs());
10235 Register rhs = ToRegister(ins->rhs());
10236 Register temp1 = ToRegister(ins->temp1());
10237 Register temp2 = ToRegister(ins->temp2());
10238 Register output = ToRegister(ins->output());
10240 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10241 auto* ool = oolCallVM<Fn, BigInt::div>(ins, ArgList(lhs, rhs),
10242 StoreRegisterTo(output));
10244 // x / 0 throws an error.
10245 if (ins->mir()->canBeDivideByZero()) {
10246 masm.branchIfBigIntIsZero(rhs, ool->entry());
10249 // 0n / x == 0n
10250 Label lhsNonZero;
10251 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10252 masm.movePtr(lhs, output);
10253 masm.jump(ool->rejoin());
10254 masm.bind(&lhsNonZero);
10256 // Call into the VM when either operand can't be loaded into a pointer-sized
10257 // register.
10258 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10259 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10261 // |BigInt::div()| returns |lhs| for |lhs / 1n|, which means there's no
10262 // allocation which might trigger a minor GC to free up nursery space. This
10263 // requires us to apply the same optimization here, otherwise we'd end up with
10264 // always entering the OOL call, because the nursery is never evicted.
10265 Label notOne;
10266 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(1), &notOne);
10267 masm.movePtr(lhs, output);
10268 masm.jump(ool->rejoin());
10269 masm.bind(&notOne);
10271 static constexpr auto DigitMin = std::numeric_limits<
10272 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
10274 // Handle an integer overflow from INT{32,64}_MIN / -1.
10275 Label notOverflow;
10276 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
10277 masm.branchPtr(Assembler::Equal, temp2, ImmWord(-1), ool->entry());
10278 masm.bind(&notOverflow);
10280 emitBigIntDiv(ins, temp1, temp2, output, ool->entry());
10282 masm.bind(ool->rejoin());
10285 void CodeGenerator::visitBigIntMod(LBigIntMod* ins) {
10286 Register lhs = ToRegister(ins->lhs());
10287 Register rhs = ToRegister(ins->rhs());
10288 Register temp1 = ToRegister(ins->temp1());
10289 Register temp2 = ToRegister(ins->temp2());
10290 Register output = ToRegister(ins->output());
10292 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10293 auto* ool = oolCallVM<Fn, BigInt::mod>(ins, ArgList(lhs, rhs),
10294 StoreRegisterTo(output));
10296 // x % 0 throws an error.
10297 if (ins->mir()->canBeDivideByZero()) {
10298 masm.branchIfBigIntIsZero(rhs, ool->entry());
10301 // 0n % x == 0n
10302 Label lhsNonZero;
10303 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10304 masm.movePtr(lhs, output);
10305 masm.jump(ool->rejoin());
10306 masm.bind(&lhsNonZero);
10308 // Call into the VM when either operand can't be loaded into a pointer-sized
10309 // register.
10310 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10311 masm.loadBigIntAbsolute(rhs, temp2, ool->entry());
10313 // Similar to the case for BigInt division, we must apply the same allocation
10314 // optimizations as performed in |BigInt::mod()|.
10315 Label notBelow;
10316 masm.branchPtr(Assembler::AboveOrEqual, temp1, temp2, &notBelow);
10317 masm.movePtr(lhs, output);
10318 masm.jump(ool->rejoin());
10319 masm.bind(&notBelow);
10321 // Convert both digits to signed pointer-sized values.
10322 masm.bigIntDigitToSignedPtr(lhs, temp1, ool->entry());
10323 masm.bigIntDigitToSignedPtr(rhs, temp2, ool->entry());
10325 static constexpr auto DigitMin = std::numeric_limits<
10326 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
10328 // Handle an integer overflow from INT{32,64}_MIN / -1.
10329 Label notOverflow;
10330 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
10331 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(-1), &notOverflow);
10332 masm.movePtr(ImmWord(0), temp1);
10333 masm.bind(&notOverflow);
10335 emitBigIntMod(ins, temp1, temp2, output, ool->entry());
10337 masm.bind(ool->rejoin());
10340 void CodeGenerator::visitBigIntPow(LBigIntPow* ins) {
10341 Register lhs = ToRegister(ins->lhs());
10342 Register rhs = ToRegister(ins->rhs());
10343 Register temp1 = ToRegister(ins->temp1());
10344 Register temp2 = ToRegister(ins->temp2());
10345 Register output = ToRegister(ins->output());
10347 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10348 auto* ool = oolCallVM<Fn, BigInt::pow>(ins, ArgList(lhs, rhs),
10349 StoreRegisterTo(output));
10351 // x ** -y throws an error.
10352 if (ins->mir()->canBeNegativeExponent()) {
10353 masm.branchIfBigIntIsNegative(rhs, ool->entry());
10356 Register dest = temp1;
10357 Register base = temp2;
10358 Register exponent = output;
10360 Label done;
10361 masm.movePtr(ImmWord(1), dest); // p = 1
10363 // 1n ** y == 1n
10364 // -1n ** y == 1n when y is even
10365 // -1n ** y == -1n when y is odd
10366 Label lhsNotOne;
10367 masm.branch32(Assembler::Above, Address(lhs, BigInt::offsetOfLength()),
10368 Imm32(1), &lhsNotOne);
10369 masm.loadFirstBigIntDigitOrZero(lhs, base);
10370 masm.branchPtr(Assembler::NotEqual, base, Imm32(1), &lhsNotOne);
10372 masm.loadFirstBigIntDigitOrZero(rhs, exponent);
10374 Label lhsNonNegative;
10375 masm.branchIfBigIntIsNonNegative(lhs, &lhsNonNegative);
10376 masm.branchTestPtr(Assembler::Zero, exponent, Imm32(1), &done);
10377 masm.bind(&lhsNonNegative);
10378 masm.movePtr(lhs, output);
10379 masm.jump(ool->rejoin());
10381 masm.bind(&lhsNotOne);
10383 // x ** 0n == 1n
10384 masm.branchIfBigIntIsZero(rhs, &done);
10386 // 0n ** y == 0n with y != 0n
10387 Label lhsNonZero;
10388 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10390 masm.movePtr(lhs, output);
10391 masm.jump(ool->rejoin());
10393 masm.bind(&lhsNonZero);
10395 // Call into the VM when the exponent can't be loaded into a pointer-sized
10396 // register.
10397 masm.loadBigIntAbsolute(rhs, exponent, ool->entry());
10399 // x ** y with x > 1 and y >= DigitBits can't be pointer-sized.
10400 masm.branchPtr(Assembler::AboveOrEqual, exponent, Imm32(BigInt::DigitBits),
10401 ool->entry());
10403 // x ** 1n == x
10404 Label rhsNotOne;
10405 masm.branch32(Assembler::NotEqual, exponent, Imm32(1), &rhsNotOne);
10407 masm.movePtr(lhs, output);
10408 masm.jump(ool->rejoin());
10410 masm.bind(&rhsNotOne);
10412 // Call into the VM when the base operand can't be loaded into a pointer-sized
10413 // register.
10414 masm.loadBigIntNonZero(lhs, base, ool->entry());
10416 // MacroAssembler::pow32() adjusted to work on pointer-sized registers.
10418 // m = base
10419 // n = exponent
10421 Label start, loop;
10422 masm.jump(&start);
10423 masm.bind(&loop);
10425 // m *= m
10426 masm.branchMulPtr(Assembler::Overflow, base, base, ool->entry());
10428 masm.bind(&start);
10430 // if ((n & 1) != 0) p *= m
10431 Label even;
10432 masm.branchTest32(Assembler::Zero, exponent, Imm32(1), &even);
10433 masm.branchMulPtr(Assembler::Overflow, base, dest, ool->entry());
10434 masm.bind(&even);
10436 // n >>= 1
10437 // if (n == 0) return p
10438 masm.branchRshift32(Assembler::NonZero, Imm32(1), exponent, &loop);
10441 MOZ_ASSERT(temp1 == dest);
10443 // Create and return the result.
10444 masm.bind(&done);
10445 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10446 masm.initializeBigInt(output, temp1);
10448 masm.bind(ool->rejoin());
10451 void CodeGenerator::visitBigIntBitAnd(LBigIntBitAnd* ins) {
10452 Register lhs = ToRegister(ins->lhs());
10453 Register rhs = ToRegister(ins->rhs());
10454 Register temp1 = ToRegister(ins->temp1());
10455 Register temp2 = ToRegister(ins->temp2());
10456 Register output = ToRegister(ins->output());
10458 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10459 auto* ool = oolCallVM<Fn, BigInt::bitAnd>(ins, ArgList(lhs, rhs),
10460 StoreRegisterTo(output));
10462 // 0n & x == 0n
10463 Label lhsNonZero;
10464 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10465 masm.movePtr(lhs, output);
10466 masm.jump(ool->rejoin());
10467 masm.bind(&lhsNonZero);
10469 // x & 0n == 0n
10470 Label rhsNonZero;
10471 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10472 masm.movePtr(rhs, output);
10473 masm.jump(ool->rejoin());
10474 masm.bind(&rhsNonZero);
10476 // Call into the VM when either operand can't be loaded into a pointer-sized
10477 // register.
10478 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10479 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10481 masm.andPtr(temp2, temp1);
10483 // Create and return the result.
10484 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10485 masm.initializeBigInt(output, temp1);
10487 masm.bind(ool->rejoin());
10490 void CodeGenerator::visitBigIntBitOr(LBigIntBitOr* ins) {
10491 Register lhs = ToRegister(ins->lhs());
10492 Register rhs = ToRegister(ins->rhs());
10493 Register temp1 = ToRegister(ins->temp1());
10494 Register temp2 = ToRegister(ins->temp2());
10495 Register output = ToRegister(ins->output());
10497 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10498 auto* ool = oolCallVM<Fn, BigInt::bitOr>(ins, ArgList(lhs, rhs),
10499 StoreRegisterTo(output));
10501 // 0n | x == x
10502 Label lhsNonZero;
10503 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10504 masm.movePtr(rhs, output);
10505 masm.jump(ool->rejoin());
10506 masm.bind(&lhsNonZero);
10508 // x | 0n == x
10509 Label rhsNonZero;
10510 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10511 masm.movePtr(lhs, output);
10512 masm.jump(ool->rejoin());
10513 masm.bind(&rhsNonZero);
10515 // Call into the VM when either operand can't be loaded into a pointer-sized
10516 // register.
10517 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10518 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10520 masm.orPtr(temp2, temp1);
10522 // Create and return the result.
10523 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10524 masm.initializeBigInt(output, temp1);
10526 masm.bind(ool->rejoin());
10529 void CodeGenerator::visitBigIntBitXor(LBigIntBitXor* ins) {
10530 Register lhs = ToRegister(ins->lhs());
10531 Register rhs = ToRegister(ins->rhs());
10532 Register temp1 = ToRegister(ins->temp1());
10533 Register temp2 = ToRegister(ins->temp2());
10534 Register output = ToRegister(ins->output());
10536 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10537 auto* ool = oolCallVM<Fn, BigInt::bitXor>(ins, ArgList(lhs, rhs),
10538 StoreRegisterTo(output));
10540 // 0n ^ x == x
10541 Label lhsNonZero;
10542 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10543 masm.movePtr(rhs, output);
10544 masm.jump(ool->rejoin());
10545 masm.bind(&lhsNonZero);
10547 // x ^ 0n == x
10548 Label rhsNonZero;
10549 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10550 masm.movePtr(lhs, output);
10551 masm.jump(ool->rejoin());
10552 masm.bind(&rhsNonZero);
10554 // Call into the VM when either operand can't be loaded into a pointer-sized
10555 // register.
10556 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10557 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10559 masm.xorPtr(temp2, temp1);
10561 // Create and return the result.
10562 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10563 masm.initializeBigInt(output, temp1);
10565 masm.bind(ool->rejoin());
10568 void CodeGenerator::visitBigIntLsh(LBigIntLsh* ins) {
10569 Register lhs = ToRegister(ins->lhs());
10570 Register rhs = ToRegister(ins->rhs());
10571 Register temp1 = ToRegister(ins->temp1());
10572 Register temp2 = ToRegister(ins->temp2());
10573 Register temp3 = ToRegister(ins->temp3());
10574 Register output = ToRegister(ins->output());
10576 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10577 auto* ool = oolCallVM<Fn, BigInt::lsh>(ins, ArgList(lhs, rhs),
10578 StoreRegisterTo(output));
10580 // 0n << x == 0n
10581 Label lhsNonZero;
10582 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10583 masm.movePtr(lhs, output);
10584 masm.jump(ool->rejoin());
10585 masm.bind(&lhsNonZero);
10587 // x << 0n == x
10588 Label rhsNonZero;
10589 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10590 masm.movePtr(lhs, output);
10591 masm.jump(ool->rejoin());
10592 masm.bind(&rhsNonZero);
10594 // Inline |BigInt::lsh| for the case when |lhs| contains a single digit.
10596 Label rhsTooLarge;
10597 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
10599 // Call into the VM when the left-hand side operand can't be loaded into a
10600 // pointer-sized register.
10601 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10603 // Handle shifts exceeding |BigInt::DigitBits| first.
10604 Label shift, create;
10605 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
10607 masm.bind(&rhsTooLarge);
10609 // x << DigitBits with x != 0n always exceeds pointer-sized storage.
10610 masm.branchIfBigIntIsNonNegative(rhs, ool->entry());
10612 // x << -DigitBits == x >> DigitBits, which is either 0n or -1n.
10613 masm.move32(Imm32(0), temp1);
10614 masm.branchIfBigIntIsNonNegative(lhs, &create);
10615 masm.move32(Imm32(1), temp1);
10616 masm.jump(&create);
10618 masm.bind(&shift);
10620 Label nonNegative;
10621 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
10623 masm.movePtr(temp1, temp3);
10625 // |x << -y| is computed as |x >> y|.
10626 masm.rshiftPtr(temp2, temp1);
10628 // For negative numbers, round down if any bit was shifted out.
10629 masm.branchIfBigIntIsNonNegative(lhs, &create);
10631 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
10632 masm.movePtr(ImmWord(-1), output);
10633 masm.lshiftPtr(temp2, output);
10634 masm.notPtr(output);
10636 // Add plus one when |(lhs.digit(0) & mask) != 0|.
10637 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
10638 masm.addPtr(ImmWord(1), temp1);
10639 masm.jump(&create);
10641 masm.bind(&nonNegative);
10643 masm.movePtr(temp2, temp3);
10645 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
10646 masm.negPtr(temp2);
10647 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
10648 masm.movePtr(temp1, output);
10649 masm.rshiftPtr(temp2, output);
10651 // Call into the VM when any bit will be shifted out.
10652 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
10654 masm.movePtr(temp3, temp2);
10655 masm.lshiftPtr(temp2, temp1);
10657 masm.bind(&create);
10659 // Create and return the result.
10660 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10661 masm.initializeBigIntAbsolute(output, temp1);
10663 // Set the sign bit when the left-hand side is negative.
10664 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
10665 masm.or32(Imm32(BigInt::signBitMask()),
10666 Address(output, BigInt::offsetOfFlags()));
10668 masm.bind(ool->rejoin());
10671 void CodeGenerator::visitBigIntRsh(LBigIntRsh* ins) {
10672 Register lhs = ToRegister(ins->lhs());
10673 Register rhs = ToRegister(ins->rhs());
10674 Register temp1 = ToRegister(ins->temp1());
10675 Register temp2 = ToRegister(ins->temp2());
10676 Register temp3 = ToRegister(ins->temp3());
10677 Register output = ToRegister(ins->output());
10679 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10680 auto* ool = oolCallVM<Fn, BigInt::rsh>(ins, ArgList(lhs, rhs),
10681 StoreRegisterTo(output));
10683 // 0n >> x == 0n
10684 Label lhsNonZero;
10685 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10686 masm.movePtr(lhs, output);
10687 masm.jump(ool->rejoin());
10688 masm.bind(&lhsNonZero);
10690 // x >> 0n == x
10691 Label rhsNonZero;
10692 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10693 masm.movePtr(lhs, output);
10694 masm.jump(ool->rejoin());
10695 masm.bind(&rhsNonZero);
10697 // Inline |BigInt::rsh| for the case when |lhs| contains a single digit.
10699 Label rhsTooLarge;
10700 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
10702 // Call into the VM when the left-hand side operand can't be loaded into a
10703 // pointer-sized register.
10704 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10706 // Handle shifts exceeding |BigInt::DigitBits| first.
10707 Label shift, create;
10708 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
10710 masm.bind(&rhsTooLarge);
10712 // x >> -DigitBits == x << DigitBits, which exceeds pointer-sized storage.
10713 masm.branchIfBigIntIsNegative(rhs, ool->entry());
10715 // x >> DigitBits is either 0n or -1n.
10716 masm.move32(Imm32(0), temp1);
10717 masm.branchIfBigIntIsNonNegative(lhs, &create);
10718 masm.move32(Imm32(1), temp1);
10719 masm.jump(&create);
10721 masm.bind(&shift);
10723 Label nonNegative;
10724 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
10726 masm.movePtr(temp2, temp3);
10728 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
10729 masm.negPtr(temp2);
10730 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
10731 masm.movePtr(temp1, output);
10732 masm.rshiftPtr(temp2, output);
10734 // Call into the VM when any bit will be shifted out.
10735 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
10737 // |x >> -y| is computed as |x << y|.
10738 masm.movePtr(temp3, temp2);
10739 masm.lshiftPtr(temp2, temp1);
10740 masm.jump(&create);
10742 masm.bind(&nonNegative);
10744 masm.movePtr(temp1, temp3);
10746 masm.rshiftPtr(temp2, temp1);
10748 // For negative numbers, round down if any bit was shifted out.
10749 masm.branchIfBigIntIsNonNegative(lhs, &create);
10751 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
10752 masm.movePtr(ImmWord(-1), output);
10753 masm.lshiftPtr(temp2, output);
10754 masm.notPtr(output);
10756 // Add plus one when |(lhs.digit(0) & mask) != 0|.
10757 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
10758 masm.addPtr(ImmWord(1), temp1);
10760 masm.bind(&create);
10762 // Create and return the result.
10763 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10764 masm.initializeBigIntAbsolute(output, temp1);
10766 // Set the sign bit when the left-hand side is negative.
10767 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
10768 masm.or32(Imm32(BigInt::signBitMask()),
10769 Address(output, BigInt::offsetOfFlags()));
10771 masm.bind(ool->rejoin());
10774 void CodeGenerator::visitBigIntIncrement(LBigIntIncrement* ins) {
10775 Register input = ToRegister(ins->input());
10776 Register temp1 = ToRegister(ins->temp1());
10777 Register temp2 = ToRegister(ins->temp2());
10778 Register output = ToRegister(ins->output());
10780 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10781 auto* ool =
10782 oolCallVM<Fn, BigInt::inc>(ins, ArgList(input), StoreRegisterTo(output));
10784 // Call into the VM when the input can't be loaded into a pointer-sized
10785 // register.
10786 masm.loadBigInt(input, temp1, ool->entry());
10787 masm.movePtr(ImmWord(1), temp2);
10789 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10791 // Create and return the result.
10792 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10793 masm.initializeBigInt(output, temp1);
10795 masm.bind(ool->rejoin());
10798 void CodeGenerator::visitBigIntDecrement(LBigIntDecrement* ins) {
10799 Register input = ToRegister(ins->input());
10800 Register temp1 = ToRegister(ins->temp1());
10801 Register temp2 = ToRegister(ins->temp2());
10802 Register output = ToRegister(ins->output());
10804 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10805 auto* ool =
10806 oolCallVM<Fn, BigInt::dec>(ins, ArgList(input), StoreRegisterTo(output));
10808 // Call into the VM when the input can't be loaded into a pointer-sized
10809 // register.
10810 masm.loadBigInt(input, temp1, ool->entry());
10811 masm.movePtr(ImmWord(1), temp2);
10813 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10815 // Create and return the result.
10816 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10817 masm.initializeBigInt(output, temp1);
10819 masm.bind(ool->rejoin());
10822 void CodeGenerator::visitBigIntNegate(LBigIntNegate* ins) {
10823 Register input = ToRegister(ins->input());
10824 Register temp = ToRegister(ins->temp());
10825 Register output = ToRegister(ins->output());
10827 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10828 auto* ool =
10829 oolCallVM<Fn, BigInt::neg>(ins, ArgList(input), StoreRegisterTo(output));
10831 // -0n == 0n
10832 Label lhsNonZero;
10833 masm.branchIfBigIntIsNonZero(input, &lhsNonZero);
10834 masm.movePtr(input, output);
10835 masm.jump(ool->rejoin());
10836 masm.bind(&lhsNonZero);
10838 // Call into the VM when the input uses heap digits.
10839 masm.copyBigIntWithInlineDigits(input, output, temp, initialBigIntHeap(),
10840 ool->entry());
10842 // Flip the sign bit.
10843 masm.xor32(Imm32(BigInt::signBitMask()),
10844 Address(output, BigInt::offsetOfFlags()));
10846 masm.bind(ool->rejoin());
10849 void CodeGenerator::visitBigIntBitNot(LBigIntBitNot* ins) {
10850 Register input = ToRegister(ins->input());
10851 Register temp1 = ToRegister(ins->temp1());
10852 Register temp2 = ToRegister(ins->temp2());
10853 Register output = ToRegister(ins->output());
10855 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10856 auto* ool = oolCallVM<Fn, BigInt::bitNot>(ins, ArgList(input),
10857 StoreRegisterTo(output));
10859 masm.loadBigIntAbsolute(input, temp1, ool->entry());
10861 // This follows the C++ implementation because it let's us support the full
10862 // range [-2^64, 2^64 - 1] on 64-bit resp. [-2^32, 2^32 - 1] on 32-bit.
10863 Label nonNegative, done;
10864 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
10866 // ~(-x) == ~(~(x-1)) == x-1
10867 masm.subPtr(Imm32(1), temp1);
10868 masm.jump(&done);
10870 masm.bind(&nonNegative);
10872 // ~x == -x-1 == -(x+1)
10873 masm.movePtr(ImmWord(1), temp2);
10874 masm.branchAddPtr(Assembler::CarrySet, temp2, temp1, ool->entry());
10876 masm.bind(&done);
10878 // Create and return the result.
10879 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10880 masm.initializeBigIntAbsolute(output, temp1);
10882 // Set the sign bit when the input is positive.
10883 masm.branchIfBigIntIsNegative(input, ool->rejoin());
10884 masm.or32(Imm32(BigInt::signBitMask()),
10885 Address(output, BigInt::offsetOfFlags()));
10887 masm.bind(ool->rejoin());
10890 void CodeGenerator::visitInt32ToStringWithBase(LInt32ToStringWithBase* lir) {
10891 Register input = ToRegister(lir->input());
10892 RegisterOrInt32 base = ToRegisterOrInt32(lir->base());
10893 Register output = ToRegister(lir->output());
10894 Register temp0 = ToRegister(lir->temp0());
10895 Register temp1 = ToRegister(lir->temp1());
10897 bool lowerCase = lir->mir()->lowerCase();
10899 using Fn = JSString* (*)(JSContext*, int32_t, int32_t, bool);
10900 if (base.is<Register>()) {
10901 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
10902 lir, ArgList(input, base.as<Register>(), Imm32(lowerCase)),
10903 StoreRegisterTo(output));
10905 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
10906 masm.loadInt32ToStringWithBase(input, base.as<Register>(), output, temp0,
10907 temp1, gen->runtime->staticStrings(),
10908 liveRegs, lowerCase, ool->entry());
10909 masm.bind(ool->rejoin());
10910 } else {
10911 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
10912 lir, ArgList(input, Imm32(base.as<int32_t>()), Imm32(lowerCase)),
10913 StoreRegisterTo(output));
10915 masm.loadInt32ToStringWithBase(input, base.as<int32_t>(), output, temp0,
10916 temp1, gen->runtime->staticStrings(),
10917 lowerCase, ool->entry());
10918 masm.bind(ool->rejoin());
10922 void CodeGenerator::visitNumberParseInt(LNumberParseInt* lir) {
10923 Register string = ToRegister(lir->string());
10924 Register radix = ToRegister(lir->radix());
10925 ValueOperand output = ToOutValue(lir);
10926 Register temp = ToRegister(lir->temp0());
10928 #ifdef DEBUG
10929 Label ok;
10930 masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
10931 masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
10932 masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
10933 masm.bind(&ok);
10934 #endif
10936 // Use indexed value as fast path if possible.
10937 Label vmCall, done;
10938 masm.loadStringIndexValue(string, temp, &vmCall);
10939 masm.tagValue(JSVAL_TYPE_INT32, temp, output);
10940 masm.jump(&done);
10942 masm.bind(&vmCall);
10944 pushArg(radix);
10945 pushArg(string);
10947 using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
10948 callVM<Fn, js::NumberParseInt>(lir);
10950 masm.bind(&done);
10953 void CodeGenerator::visitDoubleParseInt(LDoubleParseInt* lir) {
10954 FloatRegister number = ToFloatRegister(lir->number());
10955 Register output = ToRegister(lir->output());
10956 FloatRegister temp = ToFloatRegister(lir->temp0());
10958 Label bail;
10959 masm.branchDouble(Assembler::DoubleUnordered, number, number, &bail);
10960 masm.branchTruncateDoubleToInt32(number, output, &bail);
10962 Label ok;
10963 masm.branch32(Assembler::NotEqual, output, Imm32(0), &ok);
10965 // Accept both +0 and -0 and return 0.
10966 masm.loadConstantDouble(0.0, temp);
10967 masm.branchDouble(Assembler::DoubleEqual, number, temp, &ok);
10969 // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
10970 masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, temp);
10971 masm.branchDouble(Assembler::DoubleLessThan, number, temp, &bail);
10973 masm.bind(&ok);
10975 bailoutFrom(&bail, lir->snapshot());
10978 void CodeGenerator::visitFloor(LFloor* lir) {
10979 FloatRegister input = ToFloatRegister(lir->input());
10980 Register output = ToRegister(lir->output());
10982 Label bail;
10983 masm.floorDoubleToInt32(input, output, &bail);
10984 bailoutFrom(&bail, lir->snapshot());
10987 void CodeGenerator::visitFloorF(LFloorF* lir) {
10988 FloatRegister input = ToFloatRegister(lir->input());
10989 Register output = ToRegister(lir->output());
10991 Label bail;
10992 masm.floorFloat32ToInt32(input, output, &bail);
10993 bailoutFrom(&bail, lir->snapshot());
10996 void CodeGenerator::visitCeil(LCeil* lir) {
10997 FloatRegister input = ToFloatRegister(lir->input());
10998 Register output = ToRegister(lir->output());
11000 Label bail;
11001 masm.ceilDoubleToInt32(input, output, &bail);
11002 bailoutFrom(&bail, lir->snapshot());
11005 void CodeGenerator::visitCeilF(LCeilF* lir) {
11006 FloatRegister input = ToFloatRegister(lir->input());
11007 Register output = ToRegister(lir->output());
11009 Label bail;
11010 masm.ceilFloat32ToInt32(input, output, &bail);
11011 bailoutFrom(&bail, lir->snapshot());
11014 void CodeGenerator::visitRound(LRound* lir) {
11015 FloatRegister input = ToFloatRegister(lir->input());
11016 FloatRegister temp = ToFloatRegister(lir->temp0());
11017 Register output = ToRegister(lir->output());
11019 Label bail;
11020 masm.roundDoubleToInt32(input, output, temp, &bail);
11021 bailoutFrom(&bail, lir->snapshot());
11024 void CodeGenerator::visitRoundF(LRoundF* lir) {
11025 FloatRegister input = ToFloatRegister(lir->input());
11026 FloatRegister temp = ToFloatRegister(lir->temp0());
11027 Register output = ToRegister(lir->output());
11029 Label bail;
11030 masm.roundFloat32ToInt32(input, output, temp, &bail);
11031 bailoutFrom(&bail, lir->snapshot());
11034 void CodeGenerator::visitTrunc(LTrunc* lir) {
11035 FloatRegister input = ToFloatRegister(lir->input());
11036 Register output = ToRegister(lir->output());
11038 Label bail;
11039 masm.truncDoubleToInt32(input, output, &bail);
11040 bailoutFrom(&bail, lir->snapshot());
11043 void CodeGenerator::visitTruncF(LTruncF* lir) {
11044 FloatRegister input = ToFloatRegister(lir->input());
11045 Register output = ToRegister(lir->output());
11047 Label bail;
11048 masm.truncFloat32ToInt32(input, output, &bail);
11049 bailoutFrom(&bail, lir->snapshot());
11052 void CodeGenerator::visitCompareS(LCompareS* lir) {
11053 JSOp op = lir->mir()->jsop();
11054 Register left = ToRegister(lir->left());
11055 Register right = ToRegister(lir->right());
11056 Register output = ToRegister(lir->output());
11058 OutOfLineCode* ool = nullptr;
11060 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
11061 if (op == JSOp::Eq || op == JSOp::StrictEq) {
11062 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
11063 lir, ArgList(left, right), StoreRegisterTo(output));
11064 } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
11065 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
11066 lir, ArgList(left, right), StoreRegisterTo(output));
11067 } else if (op == JSOp::Lt) {
11068 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
11069 lir, ArgList(left, right), StoreRegisterTo(output));
11070 } else if (op == JSOp::Le) {
11071 // Push the operands in reverse order for JSOp::Le:
11072 // - |left <= right| is implemented as |right >= left|.
11073 ool =
11074 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
11075 lir, ArgList(right, left), StoreRegisterTo(output));
11076 } else if (op == JSOp::Gt) {
11077 // Push the operands in reverse order for JSOp::Gt:
11078 // - |left > right| is implemented as |right < left|.
11079 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
11080 lir, ArgList(right, left), StoreRegisterTo(output));
11081 } else {
11082 MOZ_ASSERT(op == JSOp::Ge);
11083 ool =
11084 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
11085 lir, ArgList(left, right), StoreRegisterTo(output));
11088 masm.compareStrings(op, left, right, output, ool->entry());
11090 masm.bind(ool->rejoin());
11093 void CodeGenerator::visitCompareSInline(LCompareSInline* lir) {
11094 JSOp op = lir->mir()->jsop();
11095 MOZ_ASSERT(IsEqualityOp(op));
11097 Register input = ToRegister(lir->input());
11098 Register output = ToRegister(lir->output());
11100 const JSLinearString* str = lir->constant();
11101 MOZ_ASSERT(str->length() > 0);
11103 OutOfLineCode* ool = nullptr;
11105 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
11106 if (op == JSOp::Eq || op == JSOp::StrictEq) {
11107 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
11108 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
11109 } else {
11110 MOZ_ASSERT(op == JSOp::Ne || op == JSOp::StrictNe);
11111 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
11112 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
11115 Label compareChars;
11117 Label notPointerEqual;
11119 // If operands point to the same instance, the strings are trivially equal.
11120 masm.branchPtr(Assembler::NotEqual, input, ImmGCPtr(str), &notPointerEqual);
11121 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
11122 masm.jump(ool->rejoin());
11124 masm.bind(&notPointerEqual);
11126 Label setNotEqualResult;
11127 if (str->isAtom()) {
11128 // Atoms cannot be equal to each other if they point to different strings.
11129 Imm32 atomBit(JSString::ATOM_BIT);
11130 masm.branchTest32(Assembler::NonZero,
11131 Address(input, JSString::offsetOfFlags()), atomBit,
11132 &setNotEqualResult);
11135 if (str->hasTwoByteChars()) {
11136 // Pure two-byte strings can't be equal to Latin-1 strings.
11137 JS::AutoCheckCannotGC nogc;
11138 if (!mozilla::IsUtf16Latin1(str->twoByteRange(nogc))) {
11139 masm.branchLatin1String(input, &setNotEqualResult);
11143 // Strings of different length can never be equal.
11144 masm.branch32(Assembler::Equal, Address(input, JSString::offsetOfLength()),
11145 Imm32(str->length()), &compareChars);
11147 masm.bind(&setNotEqualResult);
11148 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
11149 masm.jump(ool->rejoin());
11152 masm.bind(&compareChars);
11154 // Load the input string's characters.
11155 Register stringChars = output;
11156 masm.loadStringCharsForCompare(input, str, stringChars, ool->entry());
11158 // Start comparing character by character.
11159 masm.compareStringChars(op, stringChars, str, output);
11161 masm.bind(ool->rejoin());
11164 void CodeGenerator::visitCompareSSingle(LCompareSSingle* lir) {
11165 JSOp op = lir->jsop();
11166 MOZ_ASSERT(IsRelationalOp(op));
11168 Register input = ToRegister(lir->input());
11169 Register output = ToRegister(lir->output());
11170 Register temp = ToRegister(lir->temp0());
11172 const JSLinearString* str = lir->constant();
11173 MOZ_ASSERT(str->length() == 1);
11175 char16_t ch = str->latin1OrTwoByteChar(0);
11177 masm.movePtr(input, temp);
11179 // Check if the string is empty.
11180 Label compareLength;
11181 masm.branch32(Assembler::Equal, Address(temp, JSString::offsetOfLength()),
11182 Imm32(0), &compareLength);
11184 // The first character is in the left-most rope child.
11185 Label notRope;
11186 masm.branchIfNotRope(temp, &notRope);
11188 // Unwind ropes at the start if possible.
11189 Label unwindRope;
11190 masm.bind(&unwindRope);
11191 masm.loadRopeLeftChild(temp, output);
11192 masm.movePtr(output, temp);
11194 #ifdef DEBUG
11195 Label notEmpty;
11196 masm.branch32(Assembler::NotEqual,
11197 Address(temp, JSString::offsetOfLength()), Imm32(0),
11198 &notEmpty);
11199 masm.assumeUnreachable("rope children are non-empty");
11200 masm.bind(&notEmpty);
11201 #endif
11203 // Otherwise keep unwinding ropes.
11204 masm.branchIfRope(temp, &unwindRope);
11206 masm.bind(&notRope);
11208 // Load the first character into |output|.
11209 auto loadFirstChar = [&](auto encoding) {
11210 masm.loadStringChars(temp, output, encoding);
11211 masm.loadChar(Address(output, 0), output, encoding);
11214 Label done;
11215 if (ch <= JSString::MAX_LATIN1_CHAR) {
11216 // Handle both encodings when the search character is Latin-1.
11217 Label twoByte, compare;
11218 masm.branchTwoByteString(temp, &twoByte);
11220 loadFirstChar(CharEncoding::Latin1);
11221 masm.jump(&compare);
11223 masm.bind(&twoByte);
11224 loadFirstChar(CharEncoding::TwoByte);
11226 masm.bind(&compare);
11227 } else {
11228 // The search character is a two-byte character, so it can't be equal to any
11229 // character of a Latin-1 string.
11230 masm.move32(Imm32(int32_t(op == JSOp::Lt || op == JSOp::Le)), output);
11231 masm.branchLatin1String(temp, &done);
11233 loadFirstChar(CharEncoding::TwoByte);
11236 // Compare the string length when the search character is equal to the
11237 // input's first character.
11238 masm.branch32(Assembler::Equal, output, Imm32(ch), &compareLength);
11240 // Otherwise compute the result and jump to the end.
11241 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false), output, Imm32(ch),
11242 output);
11243 masm.jump(&done);
11245 // Compare the string length to compute the overall result.
11246 masm.bind(&compareLength);
11247 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
11248 Address(temp, JSString::offsetOfLength()), Imm32(1), output);
11250 masm.bind(&done);
11253 void CodeGenerator::visitCompareBigInt(LCompareBigInt* lir) {
11254 JSOp op = lir->mir()->jsop();
11255 Register left = ToRegister(lir->left());
11256 Register right = ToRegister(lir->right());
11257 Register temp0 = ToRegister(lir->temp0());
11258 Register temp1 = ToRegister(lir->temp1());
11259 Register temp2 = ToRegister(lir->temp2());
11260 Register output = ToRegister(lir->output());
11262 Label notSame;
11263 Label compareSign;
11264 Label compareLength;
11265 Label compareDigit;
11267 Label* notSameSign;
11268 Label* notSameLength;
11269 Label* notSameDigit;
11270 if (IsEqualityOp(op)) {
11271 notSameSign = &notSame;
11272 notSameLength = &notSame;
11273 notSameDigit = &notSame;
11274 } else {
11275 notSameSign = &compareSign;
11276 notSameLength = &compareLength;
11277 notSameDigit = &compareDigit;
11280 masm.equalBigInts(left, right, temp0, temp1, temp2, output, notSameSign,
11281 notSameLength, notSameDigit);
11283 Label done;
11284 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
11285 op == JSOp::Ge),
11286 output);
11287 masm.jump(&done);
11289 if (IsEqualityOp(op)) {
11290 masm.bind(&notSame);
11291 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
11292 } else {
11293 Label invertWhenNegative;
11295 // There are two cases when sign(left) != sign(right):
11296 // 1. sign(left) = positive and sign(right) = negative,
11297 // 2. or the dual case with reversed signs.
11299 // For case 1, |left| <cmp> |right| is true for cmp=Gt or cmp=Ge and false
11300 // for cmp=Lt or cmp=Le. Initialize the result for case 1 and handle case 2
11301 // with |invertWhenNegative|.
11302 masm.bind(&compareSign);
11303 masm.move32(Imm32(op == JSOp::Gt || op == JSOp::Ge), output);
11304 masm.jump(&invertWhenNegative);
11306 // For sign(left) = sign(right) and len(digits(left)) != len(digits(right)),
11307 // we have to consider the two cases:
11308 // 1. len(digits(left)) < len(digits(right))
11309 // 2. len(digits(left)) > len(digits(right))
11311 // For |left| <cmp> |right| with cmp=Lt:
11312 // Assume both BigInts are positive, then |left < right| is true for case 1
11313 // and false for case 2. When both are negative, the result is reversed.
11315 // The other comparison operators can be handled similarly.
11317 // |temp0| holds the digits length of the right-hand side operand.
11318 masm.bind(&compareLength);
11319 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
11320 Address(left, BigInt::offsetOfLength()), temp0, output);
11321 masm.jump(&invertWhenNegative);
11323 // Similar to the case above, compare the current digit to determine the
11324 // overall comparison result.
11326 // |temp1| points to the current digit of the left-hand side operand.
11327 // |output| holds the current digit of the right-hand side operand.
11328 masm.bind(&compareDigit);
11329 masm.cmpPtrSet(JSOpToCondition(op, /* isSigned = */ false),
11330 Address(temp1, 0), output, output);
11332 Label nonNegative;
11333 masm.bind(&invertWhenNegative);
11334 masm.branchIfBigIntIsNonNegative(left, &nonNegative);
11335 masm.xor32(Imm32(1), output);
11336 masm.bind(&nonNegative);
11339 masm.bind(&done);
11342 void CodeGenerator::visitCompareBigIntInt32(LCompareBigIntInt32* lir) {
11343 JSOp op = lir->mir()->jsop();
11344 Register left = ToRegister(lir->left());
11345 Register right = ToRegister(lir->right());
11346 Register temp0 = ToRegister(lir->temp0());
11347 Register temp1 = ToRegister(lir->temp1());
11348 Register output = ToRegister(lir->output());
11350 Label ifTrue, ifFalse;
11351 masm.compareBigIntAndInt32(op, left, right, temp0, temp1, &ifTrue, &ifFalse);
11353 Label done;
11354 masm.bind(&ifFalse);
11355 masm.move32(Imm32(0), output);
11356 masm.jump(&done);
11357 masm.bind(&ifTrue);
11358 masm.move32(Imm32(1), output);
11359 masm.bind(&done);
11362 void CodeGenerator::visitCompareBigIntDouble(LCompareBigIntDouble* lir) {
11363 JSOp op = lir->mir()->jsop();
11364 Register left = ToRegister(lir->left());
11365 FloatRegister right = ToFloatRegister(lir->right());
11366 Register output = ToRegister(lir->output());
11368 masm.setupAlignedABICall();
11370 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
11371 // - |left <= right| is implemented as |right >= left|.
11372 // - |left > right| is implemented as |right < left|.
11373 if (op == JSOp::Le || op == JSOp::Gt) {
11374 masm.passABIArg(right, ABIType::Float64);
11375 masm.passABIArg(left);
11376 } else {
11377 masm.passABIArg(left);
11378 masm.passABIArg(right, ABIType::Float64);
11381 using FnBigIntNumber = bool (*)(BigInt*, double);
11382 using FnNumberBigInt = bool (*)(double, BigInt*);
11383 switch (op) {
11384 case JSOp::Eq: {
11385 masm.callWithABI<FnBigIntNumber,
11386 jit::BigIntNumberEqual<EqualityKind::Equal>>();
11387 break;
11389 case JSOp::Ne: {
11390 masm.callWithABI<FnBigIntNumber,
11391 jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
11392 break;
11394 case JSOp::Lt: {
11395 masm.callWithABI<FnBigIntNumber,
11396 jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
11397 break;
11399 case JSOp::Gt: {
11400 masm.callWithABI<FnNumberBigInt,
11401 jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
11402 break;
11404 case JSOp::Le: {
11405 masm.callWithABI<
11406 FnNumberBigInt,
11407 jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
11408 break;
11410 case JSOp::Ge: {
11411 masm.callWithABI<
11412 FnBigIntNumber,
11413 jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
11414 break;
11416 default:
11417 MOZ_CRASH("unhandled op");
11420 masm.storeCallBoolResult(output);
11423 void CodeGenerator::visitCompareBigIntString(LCompareBigIntString* lir) {
11424 JSOp op = lir->mir()->jsop();
11425 Register left = ToRegister(lir->left());
11426 Register right = ToRegister(lir->right());
11428 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
11429 // - |left <= right| is implemented as |right >= left|.
11430 // - |left > right| is implemented as |right < left|.
11431 if (op == JSOp::Le || op == JSOp::Gt) {
11432 pushArg(left);
11433 pushArg(right);
11434 } else {
11435 pushArg(right);
11436 pushArg(left);
11439 using FnBigIntString =
11440 bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
11441 using FnStringBigInt =
11442 bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
11444 switch (op) {
11445 case JSOp::Eq: {
11446 constexpr auto Equal = EqualityKind::Equal;
11447 callVM<FnBigIntString, BigIntStringEqual<Equal>>(lir);
11448 break;
11450 case JSOp::Ne: {
11451 constexpr auto NotEqual = EqualityKind::NotEqual;
11452 callVM<FnBigIntString, BigIntStringEqual<NotEqual>>(lir);
11453 break;
11455 case JSOp::Lt: {
11456 constexpr auto LessThan = ComparisonKind::LessThan;
11457 callVM<FnBigIntString, BigIntStringCompare<LessThan>>(lir);
11458 break;
11460 case JSOp::Gt: {
11461 constexpr auto LessThan = ComparisonKind::LessThan;
11462 callVM<FnStringBigInt, StringBigIntCompare<LessThan>>(lir);
11463 break;
11465 case JSOp::Le: {
11466 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
11467 callVM<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>(lir);
11468 break;
11470 case JSOp::Ge: {
11471 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
11472 callVM<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>(lir);
11473 break;
11475 default:
11476 MOZ_CRASH("Unexpected compare op");
11480 void CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir) {
11481 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
11482 lir->mir()->compareType() == MCompare::Compare_Null);
11484 JSOp op = lir->mir()->jsop();
11485 MOZ_ASSERT(IsLooseEqualityOp(op));
11487 const ValueOperand value = ToValue(lir, LIsNullOrLikeUndefinedV::ValueIndex);
11488 Register output = ToRegister(lir->output());
11490 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
11491 if (!intact) {
11492 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
11493 addOutOfLineCode(ool, lir->mir());
11495 Label* nullOrLikeUndefined = ool->label1();
11496 Label* notNullOrLikeUndefined = ool->label2();
11499 ScratchTagScope tag(masm, value);
11500 masm.splitTagForTest(value, tag);
11502 masm.branchTestNull(Assembler::Equal, tag, nullOrLikeUndefined);
11503 masm.branchTestUndefined(Assembler::Equal, tag, nullOrLikeUndefined);
11505 // Check whether it's a truthy object or a falsy object that emulates
11506 // undefined.
11507 masm.branchTestObject(Assembler::NotEqual, tag, notNullOrLikeUndefined);
11510 Register objreg =
11511 masm.extractObject(value, ToTempUnboxRegister(lir->temp0()));
11512 branchTestObjectEmulatesUndefined(objreg, nullOrLikeUndefined,
11513 notNullOrLikeUndefined, output, ool);
11514 // fall through
11516 Label done;
11518 // It's not null or undefined, and if it's an object it doesn't
11519 // emulate undefined, so it's not like undefined.
11520 masm.move32(Imm32(op == JSOp::Ne), output);
11521 masm.jump(&done);
11523 masm.bind(nullOrLikeUndefined);
11524 masm.move32(Imm32(op == JSOp::Eq), output);
11526 // Both branches meet here.
11527 masm.bind(&done);
11528 } else {
11529 Label nullOrUndefined, notNullOrLikeUndefined;
11530 #if defined(DEBUG) || defined(FUZZING)
11531 Register objreg = Register::Invalid();
11532 #endif
11534 ScratchTagScope tag(masm, value);
11535 masm.splitTagForTest(value, tag);
11537 masm.branchTestNull(Assembler::Equal, tag, &nullOrUndefined);
11538 masm.branchTestUndefined(Assembler::Equal, tag, &nullOrUndefined);
11540 #if defined(DEBUG) || defined(FUZZING)
11541 // Check whether it's a truthy object or a falsy object that emulates
11542 // undefined.
11543 masm.branchTestObject(Assembler::NotEqual, tag, &notNullOrLikeUndefined);
11544 objreg = masm.extractObject(value, ToTempUnboxRegister(lir->temp0()));
11545 #endif
11548 #if defined(DEBUG) || defined(FUZZING)
11549 assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
11550 masm.bind(&notNullOrLikeUndefined);
11551 #endif
11553 Label done;
11555 // It's not null or undefined, and if it's an object it doesn't
11556 // emulate undefined.
11557 masm.move32(Imm32(op == JSOp::Ne), output);
11558 masm.jump(&done);
11560 masm.bind(&nullOrUndefined);
11561 masm.move32(Imm32(op == JSOp::Eq), output);
11563 // Both branches meet here.
11564 masm.bind(&done);
11568 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchV(
11569 LIsNullOrLikeUndefinedAndBranchV* lir) {
11570 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
11571 lir->cmpMir()->compareType() == MCompare::Compare_Null);
11573 JSOp op = lir->cmpMir()->jsop();
11574 MOZ_ASSERT(IsLooseEqualityOp(op));
11576 const ValueOperand value =
11577 ToValue(lir, LIsNullOrLikeUndefinedAndBranchV::Value);
11579 MBasicBlock* ifTrue = lir->ifTrue();
11580 MBasicBlock* ifFalse = lir->ifFalse();
11582 if (op == JSOp::Ne) {
11583 // Swap branches.
11584 std::swap(ifTrue, ifFalse);
11587 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
11589 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
11590 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
11593 ScratchTagScope tag(masm, value);
11594 masm.splitTagForTest(value, tag);
11596 masm.branchTestNull(Assembler::Equal, tag, ifTrueLabel);
11597 masm.branchTestUndefined(Assembler::Equal, tag, ifTrueLabel);
11599 masm.branchTestObject(Assembler::NotEqual, tag, ifFalseLabel);
11602 bool extractObject = !intact;
11603 #if defined(DEBUG) || defined(FUZZING)
11604 // always extract objreg if we're in debug and
11605 // assertObjectDoesNotEmulateUndefined;
11606 extractObject = true;
11607 #endif
11609 Register objreg = Register::Invalid();
11610 Register scratch = ToRegister(lir->temp());
11611 if (extractObject) {
11612 objreg = masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
11614 if (!intact) {
11615 // Objects that emulate undefined are loosely equal to null/undefined.
11616 OutOfLineTestObject* ool = new (alloc()) OutOfLineTestObject();
11617 addOutOfLineCode(ool, lir->cmpMir());
11618 testObjectEmulatesUndefined(objreg, ifTrueLabel, ifFalseLabel, scratch,
11619 ool);
11620 } else {
11621 assertObjectDoesNotEmulateUndefined(objreg, scratch, lir->cmpMir());
11622 // Bug 1874905. This would be nice to optimize out at the MIR level.
11623 masm.jump(ifFalseLabel);
11627 void CodeGenerator::visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT* lir) {
11628 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
11629 lir->mir()->compareType() == MCompare::Compare_Null);
11630 MOZ_ASSERT(lir->mir()->lhs()->type() == MIRType::Object);
11632 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
11633 JSOp op = lir->mir()->jsop();
11634 Register output = ToRegister(lir->output());
11635 Register objreg = ToRegister(lir->input());
11636 if (!intact) {
11637 MOZ_ASSERT(IsLooseEqualityOp(op),
11638 "Strict equality should have been folded");
11640 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
11641 addOutOfLineCode(ool, lir->mir());
11643 Label* emulatesUndefined = ool->label1();
11644 Label* doesntEmulateUndefined = ool->label2();
11646 branchTestObjectEmulatesUndefined(objreg, emulatesUndefined,
11647 doesntEmulateUndefined, output, ool);
11649 Label done;
11651 masm.move32(Imm32(op == JSOp::Ne), output);
11652 masm.jump(&done);
11654 masm.bind(emulatesUndefined);
11655 masm.move32(Imm32(op == JSOp::Eq), output);
11656 masm.bind(&done);
11657 } else {
11658 assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
11659 masm.move32(Imm32(op == JSOp::Ne), output);
11663 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchT(
11664 LIsNullOrLikeUndefinedAndBranchT* lir) {
11665 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
11666 lir->cmpMir()->compareType() == MCompare::Compare_Null);
11667 MOZ_ASSERT(lir->cmpMir()->lhs()->type() == MIRType::Object);
11669 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
11671 JSOp op = lir->cmpMir()->jsop();
11672 MOZ_ASSERT(IsLooseEqualityOp(op), "Strict equality should have been folded");
11674 MBasicBlock* ifTrue = lir->ifTrue();
11675 MBasicBlock* ifFalse = lir->ifFalse();
11677 if (op == JSOp::Ne) {
11678 // Swap branches.
11679 std::swap(ifTrue, ifFalse);
11682 Register input = ToRegister(lir->getOperand(0));
11683 Register scratch = ToRegister(lir->temp());
11684 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
11685 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
11687 if (intact) {
11688 // Bug 1874905. Ideally branches like this would be optimized out.
11689 assertObjectDoesNotEmulateUndefined(input, scratch, lir->mir());
11690 masm.jump(ifFalseLabel);
11691 } else {
11692 auto* ool = new (alloc()) OutOfLineTestObject();
11693 addOutOfLineCode(ool, lir->cmpMir());
11695 // Objects that emulate undefined are loosely equal to null/undefined.
11696 testObjectEmulatesUndefined(input, ifTrueLabel, ifFalseLabel, scratch, ool);
11700 void CodeGenerator::visitIsNull(LIsNull* lir) {
11701 MCompare::CompareType compareType = lir->mir()->compareType();
11702 MOZ_ASSERT(compareType == MCompare::Compare_Null);
11704 JSOp op = lir->mir()->jsop();
11705 MOZ_ASSERT(IsStrictEqualityOp(op));
11707 const ValueOperand value = ToValue(lir, LIsNull::ValueIndex);
11708 Register output = ToRegister(lir->output());
11710 Assembler::Condition cond = JSOpToCondition(compareType, op);
11711 masm.testNullSet(cond, value, output);
11714 void CodeGenerator::visitIsUndefined(LIsUndefined* lir) {
11715 MCompare::CompareType compareType = lir->mir()->compareType();
11716 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
11718 JSOp op = lir->mir()->jsop();
11719 MOZ_ASSERT(IsStrictEqualityOp(op));
11721 const ValueOperand value = ToValue(lir, LIsUndefined::ValueIndex);
11722 Register output = ToRegister(lir->output());
11724 Assembler::Condition cond = JSOpToCondition(compareType, op);
11725 masm.testUndefinedSet(cond, value, output);
11728 void CodeGenerator::visitIsNullAndBranch(LIsNullAndBranch* lir) {
11729 MCompare::CompareType compareType = lir->cmpMir()->compareType();
11730 MOZ_ASSERT(compareType == MCompare::Compare_Null);
11732 JSOp op = lir->cmpMir()->jsop();
11733 MOZ_ASSERT(IsStrictEqualityOp(op));
11735 const ValueOperand value = ToValue(lir, LIsNullAndBranch::Value);
11737 Assembler::Condition cond = JSOpToCondition(compareType, op);
11738 testNullEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
11741 void CodeGenerator::visitIsUndefinedAndBranch(LIsUndefinedAndBranch* lir) {
11742 MCompare::CompareType compareType = lir->cmpMir()->compareType();
11743 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
11745 JSOp op = lir->cmpMir()->jsop();
11746 MOZ_ASSERT(IsStrictEqualityOp(op));
11748 const ValueOperand value = ToValue(lir, LIsUndefinedAndBranch::Value);
11750 Assembler::Condition cond = JSOpToCondition(compareType, op);
11751 testUndefinedEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
11754 void CodeGenerator::visitSameValueDouble(LSameValueDouble* lir) {
11755 FloatRegister left = ToFloatRegister(lir->left());
11756 FloatRegister right = ToFloatRegister(lir->right());
11757 FloatRegister temp = ToFloatRegister(lir->temp0());
11758 Register output = ToRegister(lir->output());
11760 masm.sameValueDouble(left, right, temp, output);
11763 void CodeGenerator::visitSameValue(LSameValue* lir) {
11764 ValueOperand lhs = ToValue(lir, LSameValue::LhsIndex);
11765 ValueOperand rhs = ToValue(lir, LSameValue::RhsIndex);
11766 Register output = ToRegister(lir->output());
11768 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
11769 OutOfLineCode* ool =
11770 oolCallVM<Fn, SameValue>(lir, ArgList(lhs, rhs), StoreRegisterTo(output));
11772 // First check to see if the values have identical bits.
11773 // This is correct for SameValue because SameValue(NaN,NaN) is true,
11774 // and SameValue(0,-0) is false.
11775 masm.branch64(Assembler::NotEqual, lhs.toRegister64(), rhs.toRegister64(),
11776 ool->entry());
11777 masm.move32(Imm32(1), output);
11779 // If this fails, call SameValue.
11780 masm.bind(ool->rejoin());
11783 void CodeGenerator::emitConcat(LInstruction* lir, Register lhs, Register rhs,
11784 Register output) {
11785 using Fn =
11786 JSString* (*)(JSContext*, HandleString, HandleString, js::gc::Heap);
11787 OutOfLineCode* ool = oolCallVM<Fn, ConcatStrings<CanGC>>(
11788 lir, ArgList(lhs, rhs, static_cast<Imm32>(int32_t(gc::Heap::Default))),
11789 StoreRegisterTo(output));
11791 const JitZone* jitZone = gen->realm->zone()->jitZone();
11792 JitCode* stringConcatStub =
11793 jitZone->stringConcatStubNoBarrier(&zoneStubsToReadBarrier_);
11794 masm.call(stringConcatStub);
11795 masm.branchTestPtr(Assembler::Zero, output, output, ool->entry());
11797 masm.bind(ool->rejoin());
11800 void CodeGenerator::visitConcat(LConcat* lir) {
11801 Register lhs = ToRegister(lir->lhs());
11802 Register rhs = ToRegister(lir->rhs());
11804 Register output = ToRegister(lir->output());
11806 MOZ_ASSERT(lhs == CallTempReg0);
11807 MOZ_ASSERT(rhs == CallTempReg1);
11808 MOZ_ASSERT(ToRegister(lir->temp0()) == CallTempReg0);
11809 MOZ_ASSERT(ToRegister(lir->temp1()) == CallTempReg1);
11810 MOZ_ASSERT(ToRegister(lir->temp2()) == CallTempReg2);
11811 MOZ_ASSERT(ToRegister(lir->temp3()) == CallTempReg3);
11812 MOZ_ASSERT(ToRegister(lir->temp4()) == CallTempReg4);
11813 MOZ_ASSERT(output == CallTempReg5);
11815 emitConcat(lir, lhs, rhs, output);
11818 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
11819 Register len, Register byteOpScratch,
11820 CharEncoding fromEncoding, CharEncoding toEncoding,
11821 size_t maximumLength = SIZE_MAX) {
11822 // Copy |len| char16_t code units from |from| to |to|. Assumes len > 0
11823 // (checked below in debug builds), and when done |to| must point to the
11824 // next available char.
11826 #ifdef DEBUG
11827 Label ok;
11828 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
11829 masm.assumeUnreachable("Length should be greater than 0.");
11830 masm.bind(&ok);
11832 if (maximumLength != SIZE_MAX) {
11833 MOZ_ASSERT(maximumLength <= INT32_MAX, "maximum length fits into int32");
11835 Label ok;
11836 masm.branchPtr(Assembler::BelowOrEqual, len, Imm32(maximumLength), &ok);
11837 masm.assumeUnreachable("Length should not exceed maximum length.");
11838 masm.bind(&ok);
11840 #endif
11842 MOZ_ASSERT_IF(toEncoding == CharEncoding::Latin1,
11843 fromEncoding == CharEncoding::Latin1);
11845 size_t fromWidth =
11846 fromEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
11847 size_t toWidth =
11848 toEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
11850 // Try to copy multiple characters at once when both encoding are equal.
11851 if (fromEncoding == toEncoding) {
11852 constexpr size_t ptrWidth = sizeof(uintptr_t);
11854 // Copy |width| bytes and then adjust |from| and |to|.
11855 auto copyCharacters = [&](size_t width) {
11856 static_assert(ptrWidth <= 8, "switch handles only up to eight bytes");
11858 switch (width) {
11859 case 1:
11860 masm.load8ZeroExtend(Address(from, 0), byteOpScratch);
11861 masm.store8(byteOpScratch, Address(to, 0));
11862 break;
11863 case 2:
11864 masm.load16ZeroExtend(Address(from, 0), byteOpScratch);
11865 masm.store16(byteOpScratch, Address(to, 0));
11866 break;
11867 case 4:
11868 masm.load32(Address(from, 0), byteOpScratch);
11869 masm.store32(byteOpScratch, Address(to, 0));
11870 break;
11871 case 8:
11872 MOZ_ASSERT(width == ptrWidth);
11873 masm.loadPtr(Address(from, 0), byteOpScratch);
11874 masm.storePtr(byteOpScratch, Address(to, 0));
11875 break;
11878 masm.addPtr(Imm32(width), from);
11879 masm.addPtr(Imm32(width), to);
11882 // First align |len| to pointer width.
11883 Label done;
11884 for (size_t width = fromWidth; width < ptrWidth; width *= 2) {
11885 // Number of characters which fit into |width| bytes.
11886 size_t charsPerWidth = width / fromWidth;
11888 if (charsPerWidth < maximumLength) {
11889 Label next;
11890 masm.branchTest32(Assembler::Zero, len, Imm32(charsPerWidth), &next);
11892 copyCharacters(width);
11894 masm.branchSub32(Assembler::Zero, Imm32(charsPerWidth), len, &done);
11895 masm.bind(&next);
11896 } else if (charsPerWidth == maximumLength) {
11897 copyCharacters(width);
11898 masm.sub32(Imm32(charsPerWidth), len);
11902 size_t maxInlineLength;
11903 if (fromEncoding == CharEncoding::Latin1) {
11904 maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
11905 } else {
11906 maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
11909 // Number of characters which fit into a single register.
11910 size_t charsPerPtr = ptrWidth / fromWidth;
11912 // Unroll small loops.
11913 constexpr size_t unrollLoopLimit = 3;
11914 size_t loopCount = std::min(maxInlineLength, maximumLength) / charsPerPtr;
11916 #ifdef JS_64BIT
11917 static constexpr size_t latin1MaxInlineByteLength =
11918 JSFatInlineString::MAX_LENGTH_LATIN1 * sizeof(char);
11919 static constexpr size_t twoByteMaxInlineByteLength =
11920 JSFatInlineString::MAX_LENGTH_TWO_BYTE * sizeof(char16_t);
11922 // |unrollLoopLimit| should be large enough to allow loop unrolling on
11923 // 64-bit targets.
11924 static_assert(latin1MaxInlineByteLength / ptrWidth == unrollLoopLimit,
11925 "Latin-1 loops are unrolled on 64-bit");
11926 static_assert(twoByteMaxInlineByteLength / ptrWidth == unrollLoopLimit,
11927 "Two-byte loops are unrolled on 64-bit");
11928 #endif
11930 if (loopCount <= unrollLoopLimit) {
11931 Label labels[unrollLoopLimit];
11933 // Check up front how many characters can be copied.
11934 for (size_t i = 1; i < loopCount; i++) {
11935 masm.branch32(Assembler::Below, len, Imm32((i + 1) * charsPerPtr),
11936 &labels[i]);
11939 // Generate the unrolled loop body.
11940 for (size_t i = loopCount; i > 0; i--) {
11941 copyCharacters(ptrWidth);
11942 masm.sub32(Imm32(charsPerPtr), len);
11944 // Jump target for the previous length check.
11945 if (i != 1) {
11946 masm.bind(&labels[i - 1]);
11949 } else {
11950 Label start;
11951 masm.bind(&start);
11952 copyCharacters(ptrWidth);
11953 masm.branchSub32(Assembler::NonZero, Imm32(charsPerPtr), len, &start);
11956 masm.bind(&done);
11957 } else {
11958 Label start;
11959 masm.bind(&start);
11960 masm.loadChar(Address(from, 0), byteOpScratch, fromEncoding);
11961 masm.storeChar(byteOpScratch, Address(to, 0), toEncoding);
11962 masm.addPtr(Imm32(fromWidth), from);
11963 masm.addPtr(Imm32(toWidth), to);
11964 masm.branchSub32(Assembler::NonZero, Imm32(1), len, &start);
11968 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
11969 Register len, Register byteOpScratch,
11970 CharEncoding encoding, size_t maximumLength) {
11971 CopyStringChars(masm, to, from, len, byteOpScratch, encoding, encoding,
11972 maximumLength);
11975 static void CopyStringCharsMaybeInflate(MacroAssembler& masm, Register input,
11976 Register destChars, Register temp1,
11977 Register temp2) {
11978 // destChars is TwoByte and input is a Latin1 or TwoByte string, so we may
11979 // have to inflate.
11981 Label isLatin1, done;
11982 masm.loadStringLength(input, temp1);
11983 masm.branchLatin1String(input, &isLatin1);
11985 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
11986 masm.movePtr(temp2, input);
11987 CopyStringChars(masm, destChars, input, temp1, temp2,
11988 CharEncoding::TwoByte);
11989 masm.jump(&done);
11991 masm.bind(&isLatin1);
11993 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
11994 masm.movePtr(temp2, input);
11995 CopyStringChars(masm, destChars, input, temp1, temp2, CharEncoding::Latin1,
11996 CharEncoding::TwoByte);
11998 masm.bind(&done);
12001 static void AllocateThinOrFatInlineString(MacroAssembler& masm, Register output,
12002 Register length, Register temp,
12003 gc::Heap initialStringHeap,
12004 Label* failure,
12005 CharEncoding encoding) {
12006 #ifdef DEBUG
12007 size_t maxInlineLength;
12008 if (encoding == CharEncoding::Latin1) {
12009 maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
12010 } else {
12011 maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
12014 Label ok;
12015 masm.branch32(Assembler::BelowOrEqual, length, Imm32(maxInlineLength), &ok);
12016 masm.assumeUnreachable("string length too large to be allocated as inline");
12017 masm.bind(&ok);
12018 #endif
12020 size_t maxThinInlineLength;
12021 if (encoding == CharEncoding::Latin1) {
12022 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_LATIN1;
12023 } else {
12024 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_TWO_BYTE;
12027 Label isFat, allocDone;
12028 masm.branch32(Assembler::Above, length, Imm32(maxThinInlineLength), &isFat);
12030 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
12031 if (encoding == CharEncoding::Latin1) {
12032 flags |= JSString::LATIN1_CHARS_BIT;
12034 masm.newGCString(output, temp, initialStringHeap, failure);
12035 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12036 masm.jump(&allocDone);
12038 masm.bind(&isFat);
12040 uint32_t flags = JSString::INIT_FAT_INLINE_FLAGS;
12041 if (encoding == CharEncoding::Latin1) {
12042 flags |= JSString::LATIN1_CHARS_BIT;
12044 masm.newGCFatInlineString(output, temp, initialStringHeap, failure);
12045 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12047 masm.bind(&allocDone);
12049 // Store length.
12050 masm.store32(length, Address(output, JSString::offsetOfLength()));
12053 static void ConcatInlineString(MacroAssembler& masm, Register lhs, Register rhs,
12054 Register output, Register temp1, Register temp2,
12055 Register temp3, gc::Heap initialStringHeap,
12056 Label* failure, CharEncoding encoding) {
12057 JitSpew(JitSpew_Codegen, "# Emitting ConcatInlineString (encoding=%s)",
12058 (encoding == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
12060 // State: result length in temp2.
12062 // Ensure both strings are linear.
12063 masm.branchIfRope(lhs, failure);
12064 masm.branchIfRope(rhs, failure);
12066 // Allocate a JSThinInlineString or JSFatInlineString.
12067 AllocateThinOrFatInlineString(masm, output, temp2, temp1, initialStringHeap,
12068 failure, encoding);
12070 // Load chars pointer in temp2.
12071 masm.loadInlineStringCharsForStore(output, temp2);
12073 auto copyChars = [&](Register src) {
12074 if (encoding == CharEncoding::TwoByte) {
12075 CopyStringCharsMaybeInflate(masm, src, temp2, temp1, temp3);
12076 } else {
12077 masm.loadStringLength(src, temp3);
12078 masm.loadStringChars(src, temp1, CharEncoding::Latin1);
12079 masm.movePtr(temp1, src);
12080 CopyStringChars(masm, temp2, src, temp3, temp1, CharEncoding::Latin1);
12084 // Copy lhs chars. Note that this advances temp2 to point to the next
12085 // char. This also clobbers the lhs register.
12086 copyChars(lhs);
12088 // Copy rhs chars. Clobbers the rhs register.
12089 copyChars(rhs);
12092 void CodeGenerator::visitSubstr(LSubstr* lir) {
12093 Register string = ToRegister(lir->string());
12094 Register begin = ToRegister(lir->begin());
12095 Register length = ToRegister(lir->length());
12096 Register output = ToRegister(lir->output());
12097 Register temp0 = ToRegister(lir->temp0());
12098 Register temp2 = ToRegister(lir->temp2());
12100 // On x86 there are not enough registers. In that case reuse the string
12101 // register as temporary.
12102 Register temp1 =
12103 lir->temp1()->isBogusTemp() ? string : ToRegister(lir->temp1());
12105 size_t maximumLength = SIZE_MAX;
12107 Range* range = lir->mir()->length()->range();
12108 if (range && range->hasInt32UpperBound()) {
12109 MOZ_ASSERT(range->upper() >= 0);
12110 maximumLength = size_t(range->upper());
12113 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <=
12114 JSThinInlineString::MAX_LENGTH_LATIN1);
12116 static_assert(JSFatInlineString::MAX_LENGTH_TWO_BYTE <=
12117 JSFatInlineString::MAX_LENGTH_LATIN1);
12119 bool tryFatInlineOrDependent =
12120 maximumLength > JSThinInlineString::MAX_LENGTH_TWO_BYTE;
12121 bool tryDependent = maximumLength > JSFatInlineString::MAX_LENGTH_TWO_BYTE;
12123 #ifdef DEBUG
12124 if (maximumLength != SIZE_MAX) {
12125 Label ok;
12126 masm.branch32(Assembler::BelowOrEqual, length, Imm32(maximumLength), &ok);
12127 masm.assumeUnreachable("length should not exceed maximum length");
12128 masm.bind(&ok);
12130 #endif
12132 Label nonZero, nonInput;
12134 // For every edge case use the C++ variant.
12135 // Note: we also use this upon allocation failure in newGCString and
12136 // newGCFatInlineString. To squeeze out even more performance those failures
12137 // can be handled by allocate in ool code and returning to jit code to fill
12138 // in all data.
12139 using Fn = JSString* (*)(JSContext* cx, HandleString str, int32_t begin,
12140 int32_t len);
12141 OutOfLineCode* ool = oolCallVM<Fn, SubstringKernel>(
12142 lir, ArgList(string, begin, length), StoreRegisterTo(output));
12143 Label* slowPath = ool->entry();
12144 Label* done = ool->rejoin();
12146 // Zero length, return emptystring.
12147 masm.branchTest32(Assembler::NonZero, length, length, &nonZero);
12148 const JSAtomState& names = gen->runtime->names();
12149 masm.movePtr(ImmGCPtr(names.empty_), output);
12150 masm.jump(done);
12152 // Substring from 0..|str.length|, return str.
12153 masm.bind(&nonZero);
12154 masm.branch32(Assembler::NotEqual,
12155 Address(string, JSString::offsetOfLength()), length, &nonInput);
12156 #ifdef DEBUG
12158 Label ok;
12159 masm.branchTest32(Assembler::Zero, begin, begin, &ok);
12160 masm.assumeUnreachable("length == str.length implies begin == 0");
12161 masm.bind(&ok);
12163 #endif
12164 masm.movePtr(string, output);
12165 masm.jump(done);
12167 // Use slow path for ropes.
12168 masm.bind(&nonInput);
12169 masm.branchIfRope(string, slowPath);
12171 // Optimize one and two character strings.
12172 Label nonStatic;
12173 masm.branch32(Assembler::Above, length, Imm32(2), &nonStatic);
12175 Label loadLengthOne, loadLengthTwo;
12177 auto loadChars = [&](CharEncoding encoding, bool fallthru) {
12178 size_t size = encoding == CharEncoding::Latin1 ? sizeof(JS::Latin1Char)
12179 : sizeof(char16_t);
12181 masm.loadStringChars(string, temp0, encoding);
12182 masm.loadChar(temp0, begin, temp2, encoding);
12183 masm.branch32(Assembler::Equal, length, Imm32(1), &loadLengthOne);
12184 masm.loadChar(temp0, begin, temp0, encoding, int32_t(size));
12185 if (!fallthru) {
12186 masm.jump(&loadLengthTwo);
12190 Label isLatin1;
12191 masm.branchLatin1String(string, &isLatin1);
12192 loadChars(CharEncoding::TwoByte, /* fallthru = */ false);
12194 masm.bind(&isLatin1);
12195 loadChars(CharEncoding::Latin1, /* fallthru = */ true);
12197 // Try to load a length-two static string.
12198 masm.bind(&loadLengthTwo);
12199 masm.lookupStaticString(temp2, temp0, output, gen->runtime->staticStrings(),
12200 &nonStatic);
12201 masm.jump(done);
12203 // Try to load a length-one static string.
12204 masm.bind(&loadLengthOne);
12205 masm.lookupStaticString(temp2, output, gen->runtime->staticStrings(),
12206 &nonStatic);
12207 masm.jump(done);
12209 masm.bind(&nonStatic);
12211 // Allocate either a JSThinInlineString or JSFatInlineString, or jump to
12212 // notInline if we need a dependent string.
12213 Label notInline;
12215 static_assert(JSThinInlineString::MAX_LENGTH_LATIN1 <
12216 JSFatInlineString::MAX_LENGTH_LATIN1);
12217 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <
12218 JSFatInlineString::MAX_LENGTH_TWO_BYTE);
12220 // Use temp2 to store the JS(Thin|Fat)InlineString flags. This avoids having
12221 // duplicate newGCString/newGCFatInlineString codegen for Latin1 vs TwoByte
12222 // strings.
12224 Label allocFat, allocDone;
12225 if (tryFatInlineOrDependent) {
12226 Label isLatin1, allocThin;
12227 masm.branchLatin1String(string, &isLatin1);
12229 if (tryDependent) {
12230 masm.branch32(Assembler::Above, length,
12231 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
12232 &notInline);
12234 masm.move32(Imm32(0), temp2);
12235 masm.branch32(Assembler::Above, length,
12236 Imm32(JSThinInlineString::MAX_LENGTH_TWO_BYTE),
12237 &allocFat);
12238 masm.jump(&allocThin);
12241 masm.bind(&isLatin1);
12243 if (tryDependent) {
12244 masm.branch32(Assembler::Above, length,
12245 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1),
12246 &notInline);
12248 masm.move32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
12249 masm.branch32(Assembler::Above, length,
12250 Imm32(JSThinInlineString::MAX_LENGTH_LATIN1), &allocFat);
12253 masm.bind(&allocThin);
12254 } else {
12255 masm.load32(Address(string, JSString::offsetOfFlags()), temp2);
12256 masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
12260 masm.newGCString(output, temp0, initialStringHeap(), slowPath);
12261 masm.or32(Imm32(JSString::INIT_THIN_INLINE_FLAGS), temp2);
12264 if (tryFatInlineOrDependent) {
12265 masm.jump(&allocDone);
12267 masm.bind(&allocFat);
12269 masm.newGCFatInlineString(output, temp0, initialStringHeap(), slowPath);
12270 masm.or32(Imm32(JSString::INIT_FAT_INLINE_FLAGS), temp2);
12273 masm.bind(&allocDone);
12276 masm.store32(temp2, Address(output, JSString::offsetOfFlags()));
12277 masm.store32(length, Address(output, JSString::offsetOfLength()));
12279 auto initializeInlineString = [&](CharEncoding encoding) {
12280 masm.loadStringChars(string, temp0, encoding);
12281 masm.addToCharPtr(temp0, begin, encoding);
12282 if (temp1 == string) {
12283 masm.push(string);
12285 masm.loadInlineStringCharsForStore(output, temp1);
12286 CopyStringChars(masm, temp1, temp0, length, temp2, encoding,
12287 maximumLength);
12288 masm.loadStringLength(output, length);
12289 if (temp1 == string) {
12290 masm.pop(string);
12294 Label isInlineLatin1;
12295 masm.branchTest32(Assembler::NonZero, temp2,
12296 Imm32(JSString::LATIN1_CHARS_BIT), &isInlineLatin1);
12297 initializeInlineString(CharEncoding::TwoByte);
12298 masm.jump(done);
12300 masm.bind(&isInlineLatin1);
12301 initializeInlineString(CharEncoding::Latin1);
12304 // Handle other cases with a DependentString.
12305 if (tryDependent) {
12306 masm.jump(done);
12308 masm.bind(&notInline);
12309 masm.newGCString(output, temp0, gen->initialStringHeap(), slowPath);
12310 masm.store32(length, Address(output, JSString::offsetOfLength()));
12311 masm.storeDependentStringBase(string, output);
12313 auto initializeDependentString = [&](CharEncoding encoding) {
12314 uint32_t flags = JSString::INIT_DEPENDENT_FLAGS;
12315 if (encoding == CharEncoding::Latin1) {
12316 flags |= JSString::LATIN1_CHARS_BIT;
12319 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12320 masm.loadNonInlineStringChars(string, temp0, encoding);
12321 masm.addToCharPtr(temp0, begin, encoding);
12322 masm.storeNonInlineStringChars(temp0, output);
12325 Label isLatin1;
12326 masm.branchLatin1String(string, &isLatin1);
12327 initializeDependentString(CharEncoding::TwoByte);
12328 masm.jump(done);
12330 masm.bind(&isLatin1);
12331 initializeDependentString(CharEncoding::Latin1);
12334 masm.bind(done);
12337 JitCode* JitZone::generateStringConcatStub(JSContext* cx) {
12338 JitSpew(JitSpew_Codegen, "# Emitting StringConcat stub");
12340 TempAllocator temp(&cx->tempLifoAlloc());
12341 JitContext jcx(cx);
12342 StackMacroAssembler masm(cx, temp);
12343 AutoCreatedBy acb(masm, "JitZone::generateStringConcatStub");
12345 Register lhs = CallTempReg0;
12346 Register rhs = CallTempReg1;
12347 Register temp1 = CallTempReg2;
12348 Register temp2 = CallTempReg3;
12349 Register temp3 = CallTempReg4;
12350 Register output = CallTempReg5;
12352 Label failure;
12353 #ifdef JS_USE_LINK_REGISTER
12354 masm.pushReturnAddress();
12355 #endif
12356 masm.Push(FramePointer);
12357 masm.moveStackPtrTo(FramePointer);
12359 // If lhs is empty, return rhs.
12360 Label leftEmpty;
12361 masm.loadStringLength(lhs, temp1);
12362 masm.branchTest32(Assembler::Zero, temp1, temp1, &leftEmpty);
12364 // If rhs is empty, return lhs.
12365 Label rightEmpty;
12366 masm.loadStringLength(rhs, temp2);
12367 masm.branchTest32(Assembler::Zero, temp2, temp2, &rightEmpty);
12369 masm.add32(temp1, temp2);
12371 // Check if we can use a JSInlineString. The result is a Latin1 string if
12372 // lhs and rhs are both Latin1, so we AND the flags.
12373 Label isInlineTwoByte, isInlineLatin1;
12374 masm.load32(Address(lhs, JSString::offsetOfFlags()), temp1);
12375 masm.and32(Address(rhs, JSString::offsetOfFlags()), temp1);
12377 Label isLatin1, notInline;
12378 masm.branchTest32(Assembler::NonZero, temp1,
12379 Imm32(JSString::LATIN1_CHARS_BIT), &isLatin1);
12381 masm.branch32(Assembler::BelowOrEqual, temp2,
12382 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
12383 &isInlineTwoByte);
12384 masm.jump(&notInline);
12386 masm.bind(&isLatin1);
12388 masm.branch32(Assembler::BelowOrEqual, temp2,
12389 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), &isInlineLatin1);
12391 masm.bind(&notInline);
12393 // Keep AND'ed flags in temp1.
12395 // Ensure result length <= JSString::MAX_LENGTH.
12396 masm.branch32(Assembler::Above, temp2, Imm32(JSString::MAX_LENGTH), &failure);
12398 // Allocate a new rope, guaranteed to be in the nursery if initialStringHeap
12399 // == gc::Heap::Default. (As a result, no post barriers are needed below.)
12400 masm.newGCString(output, temp3, initialStringHeap, &failure);
12402 // Store rope length and flags. temp1 still holds the result of AND'ing the
12403 // lhs and rhs flags, so we just have to clear the other flags to get our rope
12404 // flags (Latin1 if both lhs and rhs are Latin1).
12405 static_assert(JSString::INIT_ROPE_FLAGS == 0,
12406 "Rope type flags must have no bits set");
12407 masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp1);
12408 masm.store32(temp1, Address(output, JSString::offsetOfFlags()));
12409 masm.store32(temp2, Address(output, JSString::offsetOfLength()));
12411 // Store left and right nodes.
12412 masm.storeRopeChildren(lhs, rhs, output);
12413 masm.pop(FramePointer);
12414 masm.ret();
12416 masm.bind(&leftEmpty);
12417 masm.mov(rhs, output);
12418 masm.pop(FramePointer);
12419 masm.ret();
12421 masm.bind(&rightEmpty);
12422 masm.mov(lhs, output);
12423 masm.pop(FramePointer);
12424 masm.ret();
12426 masm.bind(&isInlineTwoByte);
12427 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
12428 initialStringHeap, &failure, CharEncoding::TwoByte);
12429 masm.pop(FramePointer);
12430 masm.ret();
12432 masm.bind(&isInlineLatin1);
12433 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
12434 initialStringHeap, &failure, CharEncoding::Latin1);
12435 masm.pop(FramePointer);
12436 masm.ret();
12438 masm.bind(&failure);
12439 masm.movePtr(ImmPtr(nullptr), output);
12440 masm.pop(FramePointer);
12441 masm.ret();
12443 Linker linker(masm);
12444 JitCode* code = linker.newCode(cx, CodeKind::Other);
12446 CollectPerfSpewerJitCodeProfile(code, "StringConcatStub");
12447 #ifdef MOZ_VTUNE
12448 vtune::MarkStub(code, "StringConcatStub");
12449 #endif
12451 return code;
12454 void JitRuntime::generateFreeStub(MacroAssembler& masm) {
12455 AutoCreatedBy acb(masm, "JitRuntime::generateFreeStub");
12457 const Register regSlots = CallTempReg0;
12459 freeStubOffset_ = startTrampolineCode(masm);
12461 #ifdef JS_USE_LINK_REGISTER
12462 masm.pushReturnAddress();
12463 #endif
12464 AllocatableRegisterSet regs(RegisterSet::Volatile());
12465 regs.takeUnchecked(regSlots);
12466 LiveRegisterSet save(regs.asLiveSet());
12467 masm.PushRegsInMask(save);
12469 const Register regTemp = regs.takeAnyGeneral();
12470 MOZ_ASSERT(regTemp != regSlots);
12472 using Fn = void (*)(void* p);
12473 masm.setupUnalignedABICall(regTemp);
12474 masm.passABIArg(regSlots);
12475 masm.callWithABI<Fn, js_free>(ABIType::General,
12476 CheckUnsafeCallWithABI::DontCheckOther);
12478 masm.PopRegsInMask(save);
12480 masm.ret();
12483 void JitRuntime::generateLazyLinkStub(MacroAssembler& masm) {
12484 AutoCreatedBy acb(masm, "JitRuntime::generateLazyLinkStub");
12486 lazyLinkStubOffset_ = startTrampolineCode(masm);
12488 #ifdef JS_USE_LINK_REGISTER
12489 masm.pushReturnAddress();
12490 #endif
12491 masm.Push(FramePointer);
12492 masm.moveStackPtrTo(FramePointer);
12494 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
12495 Register temp0 = regs.takeAny();
12496 Register temp1 = regs.takeAny();
12497 Register temp2 = regs.takeAny();
12499 masm.loadJSContext(temp0);
12500 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::LazyLink);
12501 masm.moveStackPtrTo(temp1);
12503 using Fn = uint8_t* (*)(JSContext* cx, LazyLinkExitFrameLayout* frame);
12504 masm.setupUnalignedABICall(temp2);
12505 masm.passABIArg(temp0);
12506 masm.passABIArg(temp1);
12507 masm.callWithABI<Fn, LazyLinkTopActivation>(
12508 ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
12510 // Discard exit frame and restore frame pointer.
12511 masm.leaveExitFrame(0);
12512 masm.pop(FramePointer);
12514 #ifdef JS_USE_LINK_REGISTER
12515 // Restore the return address such that the emitPrologue function of the
12516 // CodeGenerator can push it back on the stack with pushReturnAddress.
12517 masm.popReturnAddress();
12518 #endif
12519 masm.jump(ReturnReg);
12522 void JitRuntime::generateInterpreterStub(MacroAssembler& masm) {
12523 AutoCreatedBy acb(masm, "JitRuntime::generateInterpreterStub");
12525 interpreterStubOffset_ = startTrampolineCode(masm);
12527 #ifdef JS_USE_LINK_REGISTER
12528 masm.pushReturnAddress();
12529 #endif
12530 masm.Push(FramePointer);
12531 masm.moveStackPtrTo(FramePointer);
12533 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
12534 Register temp0 = regs.takeAny();
12535 Register temp1 = regs.takeAny();
12536 Register temp2 = regs.takeAny();
12538 masm.loadJSContext(temp0);
12539 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::InterpreterStub);
12540 masm.moveStackPtrTo(temp1);
12542 using Fn = bool (*)(JSContext* cx, InterpreterStubExitFrameLayout* frame);
12543 masm.setupUnalignedABICall(temp2);
12544 masm.passABIArg(temp0);
12545 masm.passABIArg(temp1);
12546 masm.callWithABI<Fn, InvokeFromInterpreterStub>(
12547 ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
12549 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
12551 // Discard exit frame and restore frame pointer.
12552 masm.leaveExitFrame(0);
12553 masm.pop(FramePointer);
12555 // InvokeFromInterpreterStub stores the return value in argv[0], where the
12556 // caller stored |this|. Subtract |sizeof(void*)| for the frame pointer we
12557 // just popped.
12558 masm.loadValue(Address(masm.getStackPointer(),
12559 JitFrameLayout::offsetOfThis() - sizeof(void*)),
12560 JSReturnOperand);
12561 masm.ret();
12564 void JitRuntime::generateDoubleToInt32ValueStub(MacroAssembler& masm) {
12565 AutoCreatedBy acb(masm, "JitRuntime::generateDoubleToInt32ValueStub");
12566 doubleToInt32ValueStubOffset_ = startTrampolineCode(masm);
12568 Label done;
12569 masm.branchTestDouble(Assembler::NotEqual, R0, &done);
12571 masm.unboxDouble(R0, FloatReg0);
12572 masm.convertDoubleToInt32(FloatReg0, R1.scratchReg(), &done,
12573 /* negativeZeroCheck = */ false);
12574 masm.tagValue(JSVAL_TYPE_INT32, R1.scratchReg(), R0);
12576 masm.bind(&done);
12577 masm.abiret();
12580 void CodeGenerator::visitLinearizeString(LLinearizeString* lir) {
12581 Register str = ToRegister(lir->str());
12582 Register output = ToRegister(lir->output());
12584 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12585 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12586 lir, ArgList(str), StoreRegisterTo(output));
12588 masm.branchIfRope(str, ool->entry());
12590 masm.movePtr(str, output);
12591 masm.bind(ool->rejoin());
12594 void CodeGenerator::visitLinearizeForCharAccess(LLinearizeForCharAccess* lir) {
12595 Register str = ToRegister(lir->str());
12596 Register index = ToRegister(lir->index());
12597 Register output = ToRegister(lir->output());
12599 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12600 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12601 lir, ArgList(str), StoreRegisterTo(output));
12603 masm.branchIfNotCanLoadStringChar(str, index, output, ool->entry());
12605 masm.movePtr(str, output);
12606 masm.bind(ool->rejoin());
12609 void CodeGenerator::visitLinearizeForCodePointAccess(
12610 LLinearizeForCodePointAccess* lir) {
12611 Register str = ToRegister(lir->str());
12612 Register index = ToRegister(lir->index());
12613 Register output = ToRegister(lir->output());
12614 Register temp = ToRegister(lir->temp0());
12616 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12617 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12618 lir, ArgList(str), StoreRegisterTo(output));
12620 masm.branchIfNotCanLoadStringCodePoint(str, index, output, temp,
12621 ool->entry());
12623 masm.movePtr(str, output);
12624 masm.bind(ool->rejoin());
12627 void CodeGenerator::visitToRelativeStringIndex(LToRelativeStringIndex* lir) {
12628 Register index = ToRegister(lir->index());
12629 Register length = ToRegister(lir->length());
12630 Register output = ToRegister(lir->output());
12632 masm.move32(Imm32(0), output);
12633 masm.cmp32Move32(Assembler::LessThan, index, Imm32(0), length, output);
12634 masm.add32(index, output);
12637 void CodeGenerator::visitCharCodeAt(LCharCodeAt* lir) {
12638 Register str = ToRegister(lir->str());
12639 Register output = ToRegister(lir->output());
12640 Register temp0 = ToRegister(lir->temp0());
12641 Register temp1 = ToRegister(lir->temp1());
12643 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12645 if (lir->index()->isBogus()) {
12646 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
12647 StoreRegisterTo(output));
12648 masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
12649 masm.bind(ool->rejoin());
12650 } else {
12651 Register index = ToRegister(lir->index());
12653 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
12654 StoreRegisterTo(output));
12655 masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
12656 masm.bind(ool->rejoin());
12660 void CodeGenerator::visitCharCodeAtOrNegative(LCharCodeAtOrNegative* lir) {
12661 Register str = ToRegister(lir->str());
12662 Register output = ToRegister(lir->output());
12663 Register temp0 = ToRegister(lir->temp0());
12664 Register temp1 = ToRegister(lir->temp1());
12666 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12668 // Return -1 for out-of-bounds access.
12669 masm.move32(Imm32(-1), output);
12671 if (lir->index()->isBogus()) {
12672 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
12673 StoreRegisterTo(output));
12675 masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
12676 Imm32(0), ool->rejoin());
12677 masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
12678 masm.bind(ool->rejoin());
12679 } else {
12680 Register index = ToRegister(lir->index());
12682 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
12683 StoreRegisterTo(output));
12685 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
12686 temp0, ool->rejoin());
12687 masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
12688 masm.bind(ool->rejoin());
12692 void CodeGenerator::visitCodePointAt(LCodePointAt* lir) {
12693 Register str = ToRegister(lir->str());
12694 Register index = ToRegister(lir->index());
12695 Register output = ToRegister(lir->output());
12696 Register temp0 = ToRegister(lir->temp0());
12697 Register temp1 = ToRegister(lir->temp1());
12699 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12700 auto* ool = oolCallVM<Fn, jit::CodePointAt>(lir, ArgList(str, index),
12701 StoreRegisterTo(output));
12703 masm.loadStringCodePoint(str, index, output, temp0, temp1, ool->entry());
12704 masm.bind(ool->rejoin());
12707 void CodeGenerator::visitCodePointAtOrNegative(LCodePointAtOrNegative* lir) {
12708 Register str = ToRegister(lir->str());
12709 Register index = ToRegister(lir->index());
12710 Register output = ToRegister(lir->output());
12711 Register temp0 = ToRegister(lir->temp0());
12712 Register temp1 = ToRegister(lir->temp1());
12714 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12715 auto* ool = oolCallVM<Fn, jit::CodePointAt>(lir, ArgList(str, index),
12716 StoreRegisterTo(output));
12718 // Return -1 for out-of-bounds access.
12719 masm.move32(Imm32(-1), output);
12721 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
12722 temp0, ool->rejoin());
12723 masm.loadStringCodePoint(str, index, output, temp0, temp1, ool->entry());
12724 masm.bind(ool->rejoin());
12727 void CodeGenerator::visitNegativeToNaN(LNegativeToNaN* lir) {
12728 Register input = ToRegister(lir->input());
12729 ValueOperand output = ToOutValue(lir);
12731 masm.tagValue(JSVAL_TYPE_INT32, input, output);
12733 Label done;
12734 masm.branchTest32(Assembler::NotSigned, input, input, &done);
12735 masm.moveValue(JS::NaNValue(), output);
12736 masm.bind(&done);
12739 void CodeGenerator::visitNegativeToUndefined(LNegativeToUndefined* lir) {
12740 Register input = ToRegister(lir->input());
12741 ValueOperand output = ToOutValue(lir);
12743 masm.tagValue(JSVAL_TYPE_INT32, input, output);
12745 Label done;
12746 masm.branchTest32(Assembler::NotSigned, input, input, &done);
12747 masm.moveValue(JS::UndefinedValue(), output);
12748 masm.bind(&done);
12751 void CodeGenerator::visitFromCharCode(LFromCharCode* lir) {
12752 Register code = ToRegister(lir->code());
12753 Register output = ToRegister(lir->output());
12755 using Fn = JSLinearString* (*)(JSContext*, int32_t);
12756 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
12757 StoreRegisterTo(output));
12759 // OOL path if code >= UNIT_STATIC_LIMIT.
12760 masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
12761 ool->entry());
12763 masm.bind(ool->rejoin());
12766 void CodeGenerator::visitFromCharCodeEmptyIfNegative(
12767 LFromCharCodeEmptyIfNegative* lir) {
12768 Register code = ToRegister(lir->code());
12769 Register output = ToRegister(lir->output());
12771 using Fn = JSLinearString* (*)(JSContext*, int32_t);
12772 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
12773 StoreRegisterTo(output));
12775 // Return the empty string for negative inputs.
12776 const JSAtomState& names = gen->runtime->names();
12777 masm.movePtr(ImmGCPtr(names.empty_), output);
12778 masm.branchTest32(Assembler::Signed, code, code, ool->rejoin());
12780 // OOL path if code >= UNIT_STATIC_LIMIT.
12781 masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
12782 ool->entry());
12784 masm.bind(ool->rejoin());
12787 void CodeGenerator::visitFromCharCodeUndefinedIfNegative(
12788 LFromCharCodeUndefinedIfNegative* lir) {
12789 Register code = ToRegister(lir->code());
12790 ValueOperand output = ToOutValue(lir);
12791 Register temp = output.scratchReg();
12793 using Fn = JSLinearString* (*)(JSContext*, int32_t);
12794 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
12795 StoreRegisterTo(temp));
12797 // Return |undefined| for negative inputs.
12798 Label done;
12799 masm.moveValue(UndefinedValue(), output);
12800 masm.branchTest32(Assembler::Signed, code, code, &done);
12802 // OOL path if code >= UNIT_STATIC_LIMIT.
12803 masm.lookupStaticString(code, temp, gen->runtime->staticStrings(),
12804 ool->entry());
12806 masm.bind(ool->rejoin());
12807 masm.tagValue(JSVAL_TYPE_STRING, temp, output);
12809 masm.bind(&done);
12812 void CodeGenerator::visitFromCodePoint(LFromCodePoint* lir) {
12813 Register codePoint = ToRegister(lir->codePoint());
12814 Register output = ToRegister(lir->output());
12815 Register temp0 = ToRegister(lir->temp0());
12816 Register temp1 = ToRegister(lir->temp1());
12817 LSnapshot* snapshot = lir->snapshot();
12819 // The OOL path is only taken when we can't allocate the inline string.
12820 using Fn = JSLinearString* (*)(JSContext*, char32_t);
12821 auto* ool = oolCallVM<Fn, js::StringFromCodePoint>(lir, ArgList(codePoint),
12822 StoreRegisterTo(output));
12824 Label isTwoByte;
12825 Label* done = ool->rejoin();
12827 static_assert(
12828 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
12829 "Latin-1 strings can be loaded from static strings");
12832 masm.lookupStaticString(codePoint, output, gen->runtime->staticStrings(),
12833 &isTwoByte);
12834 masm.jump(done);
12836 masm.bind(&isTwoByte);
12838 // Use a bailout if the input is not a valid code point, because
12839 // MFromCodePoint is movable and it'd be observable when a moved
12840 // fromCodePoint throws an exception before its actual call site.
12841 bailoutCmp32(Assembler::Above, codePoint, Imm32(unicode::NonBMPMax),
12842 snapshot);
12844 // Allocate a JSThinInlineString.
12846 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE >= 2,
12847 "JSThinInlineString can hold a supplementary code point");
12849 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
12850 masm.newGCString(output, temp0, gen->initialStringHeap(), ool->entry());
12851 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12854 Label isSupplementary;
12855 masm.branch32(Assembler::AboveOrEqual, codePoint, Imm32(unicode::NonBMPMin),
12856 &isSupplementary);
12858 // Store length.
12859 masm.store32(Imm32(1), Address(output, JSString::offsetOfLength()));
12861 // Load chars pointer in temp0.
12862 masm.loadInlineStringCharsForStore(output, temp0);
12864 masm.store16(codePoint, Address(temp0, 0));
12866 masm.jump(done);
12868 masm.bind(&isSupplementary);
12870 // Store length.
12871 masm.store32(Imm32(2), Address(output, JSString::offsetOfLength()));
12873 // Load chars pointer in temp0.
12874 masm.loadInlineStringCharsForStore(output, temp0);
12876 // Inlined unicode::LeadSurrogate(uint32_t).
12877 masm.move32(codePoint, temp1);
12878 masm.rshift32(Imm32(10), temp1);
12879 masm.add32(Imm32(unicode::LeadSurrogateMin - (unicode::NonBMPMin >> 10)),
12880 temp1);
12882 masm.store16(temp1, Address(temp0, 0));
12884 // Inlined unicode::TrailSurrogate(uint32_t).
12885 masm.move32(codePoint, temp1);
12886 masm.and32(Imm32(0x3FF), temp1);
12887 masm.or32(Imm32(unicode::TrailSurrogateMin), temp1);
12889 masm.store16(temp1, Address(temp0, sizeof(char16_t)));
12893 masm.bind(done);
12896 void CodeGenerator::visitStringIncludes(LStringIncludes* lir) {
12897 pushArg(ToRegister(lir->searchString()));
12898 pushArg(ToRegister(lir->string()));
12900 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12901 callVM<Fn, js::StringIncludes>(lir);
12904 template <typename LIns>
12905 static void CallStringMatch(MacroAssembler& masm, LIns* lir, OutOfLineCode* ool,
12906 LiveRegisterSet volatileRegs) {
12907 Register string = ToRegister(lir->string());
12908 Register output = ToRegister(lir->output());
12909 Register tempLength = ToRegister(lir->temp0());
12910 Register tempChars = ToRegister(lir->temp1());
12911 Register maybeTempPat = ToTempRegisterOrInvalid(lir->temp2());
12913 const JSLinearString* searchString = lir->searchString();
12914 size_t length = searchString->length();
12915 MOZ_ASSERT(length == 1 || length == 2);
12917 // The additional temp register is only needed when searching for two
12918 // pattern characters.
12919 MOZ_ASSERT_IF(length == 2, maybeTempPat != InvalidReg);
12921 if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
12922 masm.move32(Imm32(0), output);
12923 } else {
12924 masm.move32(Imm32(-1), output);
12927 masm.loadStringLength(string, tempLength);
12929 // Can't be a substring when the string is smaller than the search string.
12930 Label done;
12931 masm.branch32(Assembler::Below, tempLength, Imm32(length), ool->rejoin());
12933 bool searchStringIsPureTwoByte = false;
12934 if (searchString->hasTwoByteChars()) {
12935 JS::AutoCheckCannotGC nogc;
12936 searchStringIsPureTwoByte =
12937 !mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc));
12940 // Pure two-byte strings can't occur in a Latin-1 string.
12941 if (searchStringIsPureTwoByte) {
12942 masm.branchLatin1String(string, ool->rejoin());
12945 // Slow path when we need to linearize the string.
12946 masm.branchIfRope(string, ool->entry());
12948 Label restoreVolatile;
12950 auto callMatcher = [&](CharEncoding encoding) {
12951 masm.loadStringChars(string, tempChars, encoding);
12953 LiveGeneralRegisterSet liveRegs;
12954 if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
12955 // Save |tempChars| to compute the result index.
12956 liveRegs.add(tempChars);
12958 #ifdef DEBUG
12959 // Save |tempLength| in debug-mode for assertions.
12960 liveRegs.add(tempLength);
12961 #endif
12963 // Exclude non-volatile registers.
12964 liveRegs.set() = GeneralRegisterSet::Intersect(
12965 liveRegs.set(), GeneralRegisterSet::Volatile());
12967 masm.PushRegsInMask(liveRegs);
12970 if (length == 1) {
12971 char16_t pat = searchString->latin1OrTwoByteChar(0);
12972 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
12973 pat <= JSString::MAX_LATIN1_CHAR);
12975 masm.move32(Imm32(pat), output);
12977 masm.setupAlignedABICall();
12978 masm.passABIArg(tempChars);
12979 masm.passABIArg(output);
12980 masm.passABIArg(tempLength);
12981 if (encoding == CharEncoding::Latin1) {
12982 using Fn = const char* (*)(const char*, char, size_t);
12983 masm.callWithABI<Fn, mozilla::SIMD::memchr8>(
12984 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
12985 } else {
12986 using Fn = const char16_t* (*)(const char16_t*, char16_t, size_t);
12987 masm.callWithABI<Fn, mozilla::SIMD::memchr16>(
12988 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
12990 } else {
12991 char16_t pat0 = searchString->latin1OrTwoByteChar(0);
12992 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
12993 pat0 <= JSString::MAX_LATIN1_CHAR);
12995 char16_t pat1 = searchString->latin1OrTwoByteChar(1);
12996 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
12997 pat1 <= JSString::MAX_LATIN1_CHAR);
12999 masm.move32(Imm32(pat0), output);
13000 masm.move32(Imm32(pat1), maybeTempPat);
13002 masm.setupAlignedABICall();
13003 masm.passABIArg(tempChars);
13004 masm.passABIArg(output);
13005 masm.passABIArg(maybeTempPat);
13006 masm.passABIArg(tempLength);
13007 if (encoding == CharEncoding::Latin1) {
13008 using Fn = const char* (*)(const char*, char, char, size_t);
13009 masm.callWithABI<Fn, mozilla::SIMD::memchr2x8>(
13010 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
13011 } else {
13012 using Fn =
13013 const char16_t* (*)(const char16_t*, char16_t, char16_t, size_t);
13014 masm.callWithABI<Fn, mozilla::SIMD::memchr2x16>(
13015 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
13019 masm.storeCallPointerResult(output);
13021 // Convert to string index for `indexOf`.
13022 if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
13023 // Restore |tempChars|. (And in debug mode |tempLength|.)
13024 masm.PopRegsInMask(liveRegs);
13026 Label found;
13027 masm.branchPtr(Assembler::NotEqual, output, ImmPtr(nullptr), &found);
13029 masm.move32(Imm32(-1), output);
13030 masm.jump(&restoreVolatile);
13032 masm.bind(&found);
13034 #ifdef DEBUG
13035 // Check lower bound.
13036 Label lower;
13037 masm.branchPtr(Assembler::AboveOrEqual, output, tempChars, &lower);
13038 masm.assumeUnreachable("result pointer below string chars");
13039 masm.bind(&lower);
13041 // Compute the end position of the characters.
13042 auto scale = encoding == CharEncoding::Latin1 ? TimesOne : TimesTwo;
13043 masm.computeEffectiveAddress(BaseIndex(tempChars, tempLength, scale),
13044 tempLength);
13046 // Check upper bound.
13047 Label upper;
13048 masm.branchPtr(Assembler::Below, output, tempLength, &upper);
13049 masm.assumeUnreachable("result pointer above string chars");
13050 masm.bind(&upper);
13051 #endif
13053 masm.subPtr(tempChars, output);
13055 if (encoding == CharEncoding::TwoByte) {
13056 masm.rshiftPtr(Imm32(1), output);
13061 volatileRegs.takeUnchecked(output);
13062 volatileRegs.takeUnchecked(tempLength);
13063 volatileRegs.takeUnchecked(tempChars);
13064 if (maybeTempPat != InvalidReg) {
13065 volatileRegs.takeUnchecked(maybeTempPat);
13067 masm.PushRegsInMask(volatileRegs);
13069 // Handle the case when the input is a Latin-1 string.
13070 if (!searchStringIsPureTwoByte) {
13071 Label twoByte;
13072 masm.branchTwoByteString(string, &twoByte);
13074 callMatcher(CharEncoding::Latin1);
13075 masm.jump(&restoreVolatile);
13077 masm.bind(&twoByte);
13080 // Handle the case when the input is a two-byte string.
13081 callMatcher(CharEncoding::TwoByte);
13083 masm.bind(&restoreVolatile);
13084 masm.PopRegsInMask(volatileRegs);
13086 // Convert to bool for `includes`.
13087 if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
13088 masm.cmpPtrSet(Assembler::NotEqual, output, ImmPtr(nullptr), output);
13091 masm.bind(ool->rejoin());
13094 void CodeGenerator::visitStringIncludesSIMD(LStringIncludesSIMD* lir) {
13095 Register string = ToRegister(lir->string());
13096 Register output = ToRegister(lir->output());
13097 const JSLinearString* searchString = lir->searchString();
13099 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13100 auto* ool = oolCallVM<Fn, js::StringIncludes>(
13101 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
13103 CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
13106 void CodeGenerator::visitStringIndexOf(LStringIndexOf* lir) {
13107 pushArg(ToRegister(lir->searchString()));
13108 pushArg(ToRegister(lir->string()));
13110 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
13111 callVM<Fn, js::StringIndexOf>(lir);
13114 void CodeGenerator::visitStringIndexOfSIMD(LStringIndexOfSIMD* lir) {
13115 Register string = ToRegister(lir->string());
13116 Register output = ToRegister(lir->output());
13117 const JSLinearString* searchString = lir->searchString();
13119 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
13120 auto* ool = oolCallVM<Fn, js::StringIndexOf>(
13121 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
13123 CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
13126 void CodeGenerator::visitStringLastIndexOf(LStringLastIndexOf* lir) {
13127 pushArg(ToRegister(lir->searchString()));
13128 pushArg(ToRegister(lir->string()));
13130 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
13131 callVM<Fn, js::StringLastIndexOf>(lir);
13134 void CodeGenerator::visitStringStartsWith(LStringStartsWith* lir) {
13135 pushArg(ToRegister(lir->searchString()));
13136 pushArg(ToRegister(lir->string()));
13138 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13139 callVM<Fn, js::StringStartsWith>(lir);
13142 void CodeGenerator::visitStringStartsWithInline(LStringStartsWithInline* lir) {
13143 Register string = ToRegister(lir->string());
13144 Register output = ToRegister(lir->output());
13145 Register temp = ToRegister(lir->temp0());
13147 const JSLinearString* searchString = lir->searchString();
13149 size_t length = searchString->length();
13150 MOZ_ASSERT(length > 0);
13152 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13153 auto* ool = oolCallVM<Fn, js::StringStartsWith>(
13154 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
13156 masm.move32(Imm32(0), output);
13158 // Can't be a prefix when the string is smaller than the search string.
13159 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
13160 Imm32(length), ool->rejoin());
13162 // Unwind ropes at the start if possible.
13163 Label compare;
13164 masm.movePtr(string, temp);
13165 masm.branchIfNotRope(temp, &compare);
13167 Label unwindRope;
13168 masm.bind(&unwindRope);
13169 masm.loadRopeLeftChild(temp, output);
13170 masm.movePtr(output, temp);
13172 // If the left child is smaller than the search string, jump into the VM to
13173 // linearize the string.
13174 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
13175 Imm32(length), ool->entry());
13177 // Otherwise keep unwinding ropes.
13178 masm.branchIfRope(temp, &unwindRope);
13180 masm.bind(&compare);
13182 // If operands point to the same instance, it's trivially a prefix.
13183 Label notPointerEqual;
13184 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
13185 &notPointerEqual);
13186 masm.move32(Imm32(1), output);
13187 masm.jump(ool->rejoin());
13188 masm.bind(&notPointerEqual);
13190 if (searchString->hasTwoByteChars()) {
13191 // Pure two-byte strings can't be a prefix of Latin-1 strings.
13192 JS::AutoCheckCannotGC nogc;
13193 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
13194 Label compareChars;
13195 masm.branchTwoByteString(temp, &compareChars);
13196 masm.move32(Imm32(0), output);
13197 masm.jump(ool->rejoin());
13198 masm.bind(&compareChars);
13202 // Load the input string's characters.
13203 Register stringChars = output;
13204 masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
13206 // Start comparing character by character.
13207 masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
13209 masm.bind(ool->rejoin());
13212 void CodeGenerator::visitStringEndsWith(LStringEndsWith* lir) {
13213 pushArg(ToRegister(lir->searchString()));
13214 pushArg(ToRegister(lir->string()));
13216 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13217 callVM<Fn, js::StringEndsWith>(lir);
13220 void CodeGenerator::visitStringEndsWithInline(LStringEndsWithInline* lir) {
13221 Register string = ToRegister(lir->string());
13222 Register output = ToRegister(lir->output());
13223 Register temp = ToRegister(lir->temp0());
13225 const JSLinearString* searchString = lir->searchString();
13227 size_t length = searchString->length();
13228 MOZ_ASSERT(length > 0);
13230 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13231 auto* ool = oolCallVM<Fn, js::StringEndsWith>(
13232 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
13234 masm.move32(Imm32(0), output);
13236 // Can't be a suffix when the string is smaller than the search string.
13237 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
13238 Imm32(length), ool->rejoin());
13240 // Unwind ropes at the end if possible.
13241 Label compare;
13242 masm.movePtr(string, temp);
13243 masm.branchIfNotRope(temp, &compare);
13245 Label unwindRope;
13246 masm.bind(&unwindRope);
13247 masm.loadRopeRightChild(temp, output);
13248 masm.movePtr(output, temp);
13250 // If the right child is smaller than the search string, jump into the VM to
13251 // linearize the string.
13252 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
13253 Imm32(length), ool->entry());
13255 // Otherwise keep unwinding ropes.
13256 masm.branchIfRope(temp, &unwindRope);
13258 masm.bind(&compare);
13260 // If operands point to the same instance, it's trivially a suffix.
13261 Label notPointerEqual;
13262 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
13263 &notPointerEqual);
13264 masm.move32(Imm32(1), output);
13265 masm.jump(ool->rejoin());
13266 masm.bind(&notPointerEqual);
13268 CharEncoding encoding = searchString->hasLatin1Chars()
13269 ? CharEncoding::Latin1
13270 : CharEncoding::TwoByte;
13271 if (encoding == CharEncoding::TwoByte) {
13272 // Pure two-byte strings can't be a suffix of Latin-1 strings.
13273 JS::AutoCheckCannotGC nogc;
13274 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
13275 Label compareChars;
13276 masm.branchTwoByteString(temp, &compareChars);
13277 masm.move32(Imm32(0), output);
13278 masm.jump(ool->rejoin());
13279 masm.bind(&compareChars);
13283 // Load the input string's characters.
13284 Register stringChars = output;
13285 masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
13287 // Move string-char pointer to the suffix string.
13288 masm.loadStringLength(temp, temp);
13289 masm.sub32(Imm32(length), temp);
13290 masm.addToCharPtr(stringChars, temp, encoding);
13292 // Start comparing character by character.
13293 masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
13295 masm.bind(ool->rejoin());
13298 void CodeGenerator::visitStringToLowerCase(LStringToLowerCase* lir) {
13299 Register string = ToRegister(lir->string());
13300 Register output = ToRegister(lir->output());
13301 Register temp0 = ToRegister(lir->temp0());
13302 Register temp1 = ToRegister(lir->temp1());
13303 Register temp2 = ToRegister(lir->temp2());
13305 // On x86 there are not enough registers. In that case reuse the string
13306 // register as a temporary.
13307 Register temp3 =
13308 lir->temp3()->isBogusTemp() ? string : ToRegister(lir->temp3());
13309 Register temp4 = ToRegister(lir->temp4());
13311 using Fn = JSString* (*)(JSContext*, HandleString);
13312 OutOfLineCode* ool = oolCallVM<Fn, js::StringToLowerCase>(
13313 lir, ArgList(string), StoreRegisterTo(output));
13315 // Take the slow path if the string isn't a linear Latin-1 string.
13316 Imm32 linearLatin1Bits(JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT);
13317 Register flags = temp0;
13318 masm.load32(Address(string, JSString::offsetOfFlags()), flags);
13319 masm.and32(linearLatin1Bits, flags);
13320 masm.branch32(Assembler::NotEqual, flags, linearLatin1Bits, ool->entry());
13322 Register length = temp0;
13323 masm.loadStringLength(string, length);
13325 // Return the input if it's the empty string.
13326 Label notEmptyString;
13327 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmptyString);
13329 masm.movePtr(string, output);
13330 masm.jump(ool->rejoin());
13332 masm.bind(&notEmptyString);
13334 Register inputChars = temp1;
13335 masm.loadStringChars(string, inputChars, CharEncoding::Latin1);
13337 Register toLowerCaseTable = temp2;
13338 masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), toLowerCaseTable);
13340 // Single element strings can be directly retrieved from static strings cache.
13341 Label notSingleElementString;
13342 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleElementString);
13344 Register current = temp4;
13346 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
13347 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
13348 current);
13349 masm.lookupStaticString(current, output, gen->runtime->staticStrings());
13351 masm.jump(ool->rejoin());
13353 masm.bind(&notSingleElementString);
13355 // Use the OOL-path when the string is too long. This prevents scanning long
13356 // strings which have upper case characters only near the end a second time in
13357 // the VM.
13358 constexpr int32_t MaxInlineLength = 64;
13359 masm.branch32(Assembler::Above, length, Imm32(MaxInlineLength), ool->entry());
13362 // Check if there are any characters which need to be converted.
13364 // This extra loop gives a small performance improvement for strings which
13365 // are already lower cased and lets us avoid calling into the runtime for
13366 // non-inline, all lower case strings. But more importantly it avoids
13367 // repeated inline allocation failures:
13368 // |AllocateThinOrFatInlineString| below takes the OOL-path and calls the
13369 // |js::StringToLowerCase| runtime function when the result string can't be
13370 // allocated inline. And |js::StringToLowerCase| directly returns the input
13371 // string when no characters need to be converted. That means it won't
13372 // trigger GC to clear up the free nursery space, so the next toLowerCase()
13373 // call will again fail to inline allocate the result string.
13374 Label hasUpper;
13376 Register checkInputChars = output;
13377 masm.movePtr(inputChars, checkInputChars);
13379 Register current = temp4;
13381 Label start;
13382 masm.bind(&start);
13383 masm.loadChar(Address(checkInputChars, 0), current, CharEncoding::Latin1);
13384 masm.branch8(Assembler::NotEqual,
13385 BaseIndex(toLowerCaseTable, current, TimesOne), current,
13386 &hasUpper);
13387 masm.addPtr(Imm32(sizeof(Latin1Char)), checkInputChars);
13388 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
13390 // Input is already in lower case.
13391 masm.movePtr(string, output);
13392 masm.jump(ool->rejoin());
13394 masm.bind(&hasUpper);
13396 // |length| was clobbered above, reload.
13397 masm.loadStringLength(string, length);
13399 // Call into the runtime when we can't create an inline string.
13400 masm.branch32(Assembler::Above, length,
13401 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), ool->entry());
13403 AllocateThinOrFatInlineString(masm, output, length, temp4,
13404 initialStringHeap(), ool->entry(),
13405 CharEncoding::Latin1);
13407 if (temp3 == string) {
13408 masm.push(string);
13411 Register outputChars = temp3;
13412 masm.loadInlineStringCharsForStore(output, outputChars);
13415 Register current = temp4;
13417 Label start;
13418 masm.bind(&start);
13419 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
13420 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
13421 current);
13422 masm.storeChar(current, Address(outputChars, 0), CharEncoding::Latin1);
13423 masm.addPtr(Imm32(sizeof(Latin1Char)), inputChars);
13424 masm.addPtr(Imm32(sizeof(Latin1Char)), outputChars);
13425 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
13428 if (temp3 == string) {
13429 masm.pop(string);
13433 masm.bind(ool->rejoin());
13436 void CodeGenerator::visitStringToUpperCase(LStringToUpperCase* lir) {
13437 pushArg(ToRegister(lir->string()));
13439 using Fn = JSString* (*)(JSContext*, HandleString);
13440 callVM<Fn, js::StringToUpperCase>(lir);
13443 void CodeGenerator::visitCharCodeToLowerCase(LCharCodeToLowerCase* lir) {
13444 Register code = ToRegister(lir->code());
13445 Register output = ToRegister(lir->output());
13446 Register temp = ToRegister(lir->temp0());
13448 using Fn = JSString* (*)(JSContext*, int32_t);
13449 auto* ool = oolCallVM<Fn, jit::CharCodeToLowerCase>(lir, ArgList(code),
13450 StoreRegisterTo(output));
13452 constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
13454 // OOL path if code >= NonLatin1Min.
13455 masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
13457 // Convert to lower case.
13458 masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), temp);
13459 masm.load8ZeroExtend(BaseIndex(temp, code, TimesOne), temp);
13461 // Load static string for lower case character.
13462 masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
13464 masm.bind(ool->rejoin());
13467 void CodeGenerator::visitCharCodeToUpperCase(LCharCodeToUpperCase* lir) {
13468 Register code = ToRegister(lir->code());
13469 Register output = ToRegister(lir->output());
13470 Register temp = ToRegister(lir->temp0());
13472 using Fn = JSString* (*)(JSContext*, int32_t);
13473 auto* ool = oolCallVM<Fn, jit::CharCodeToUpperCase>(lir, ArgList(code),
13474 StoreRegisterTo(output));
13476 constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
13478 // OOL path if code >= NonLatin1Min.
13479 masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
13481 // Most one element Latin-1 strings can be directly retrieved from the
13482 // static strings cache, except the following three characters:
13484 // 1. ToUpper(U+00B5) = 0+039C
13485 // 2. ToUpper(U+00FF) = 0+0178
13486 // 3. ToUpper(U+00DF) = 0+0053 0+0053
13487 masm.branch32(Assembler::Equal, code, Imm32(unicode::MICRO_SIGN),
13488 ool->entry());
13489 masm.branch32(Assembler::Equal, code,
13490 Imm32(unicode::LATIN_SMALL_LETTER_Y_WITH_DIAERESIS),
13491 ool->entry());
13492 masm.branch32(Assembler::Equal, code,
13493 Imm32(unicode::LATIN_SMALL_LETTER_SHARP_S), ool->entry());
13495 // Inline unicode::ToUpperCase (without the special case for ASCII characters)
13497 constexpr size_t shift = unicode::CharInfoShift;
13499 // code >> shift
13500 masm.move32(code, temp);
13501 masm.rshift32(Imm32(shift), temp);
13503 // index = index1[code >> shift];
13504 masm.movePtr(ImmPtr(unicode::index1), output);
13505 masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
13507 // (code & ((1 << shift) - 1)
13508 masm.move32(code, output);
13509 masm.and32(Imm32((1 << shift) - 1), output);
13511 // (index << shift) + (code & ((1 << shift) - 1))
13512 masm.lshift32(Imm32(shift), temp);
13513 masm.add32(output, temp);
13515 // index = index2[(index << shift) + (code & ((1 << shift) - 1))]
13516 masm.movePtr(ImmPtr(unicode::index2), output);
13517 masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
13519 // Compute |index * 6| through |(index * 3) * TimesTwo|.
13520 static_assert(sizeof(unicode::CharacterInfo) == 6);
13521 masm.mulBy3(temp, temp);
13523 // upperCase = js_charinfo[index].upperCase
13524 masm.movePtr(ImmPtr(unicode::js_charinfo), output);
13525 masm.load16ZeroExtend(BaseIndex(output, temp, TimesTwo,
13526 offsetof(unicode::CharacterInfo, upperCase)),
13527 temp);
13529 // uint16_t(ch) + upperCase
13530 masm.add32(code, temp);
13532 // Clear any high bits added when performing the unsigned 16-bit addition
13533 // through a signed 32-bit addition.
13534 masm.move8ZeroExtend(temp, temp);
13536 // Load static string for upper case character.
13537 masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
13539 masm.bind(ool->rejoin());
13542 void CodeGenerator::visitStringTrimStartIndex(LStringTrimStartIndex* lir) {
13543 Register string = ToRegister(lir->string());
13544 Register output = ToRegister(lir->output());
13546 auto volatileRegs = liveVolatileRegs(lir);
13547 volatileRegs.takeUnchecked(output);
13549 masm.PushRegsInMask(volatileRegs);
13551 using Fn = int32_t (*)(const JSString*);
13552 masm.setupAlignedABICall();
13553 masm.passABIArg(string);
13554 masm.callWithABI<Fn, jit::StringTrimStartIndex>();
13555 masm.storeCallInt32Result(output);
13557 masm.PopRegsInMask(volatileRegs);
13560 void CodeGenerator::visitStringTrimEndIndex(LStringTrimEndIndex* lir) {
13561 Register string = ToRegister(lir->string());
13562 Register start = ToRegister(lir->start());
13563 Register output = ToRegister(lir->output());
13565 auto volatileRegs = liveVolatileRegs(lir);
13566 volatileRegs.takeUnchecked(output);
13568 masm.PushRegsInMask(volatileRegs);
13570 using Fn = int32_t (*)(const JSString*, int32_t);
13571 masm.setupAlignedABICall();
13572 masm.passABIArg(string);
13573 masm.passABIArg(start);
13574 masm.callWithABI<Fn, jit::StringTrimEndIndex>();
13575 masm.storeCallInt32Result(output);
13577 masm.PopRegsInMask(volatileRegs);
13580 void CodeGenerator::visitStringSplit(LStringSplit* lir) {
13581 pushArg(Imm32(INT32_MAX));
13582 pushArg(ToRegister(lir->separator()));
13583 pushArg(ToRegister(lir->string()));
13585 using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
13586 callVM<Fn, js::StringSplitString>(lir);
13589 void CodeGenerator::visitInitializedLength(LInitializedLength* lir) {
13590 Address initLength(ToRegister(lir->elements()),
13591 ObjectElements::offsetOfInitializedLength());
13592 masm.load32(initLength, ToRegister(lir->output()));
13595 void CodeGenerator::visitSetInitializedLength(LSetInitializedLength* lir) {
13596 Address initLength(ToRegister(lir->elements()),
13597 ObjectElements::offsetOfInitializedLength());
13598 SetLengthFromIndex(masm, lir->index(), initLength);
13601 void CodeGenerator::visitNotBI(LNotBI* lir) {
13602 Register input = ToRegister(lir->input());
13603 Register output = ToRegister(lir->output());
13605 masm.cmp32Set(Assembler::Equal, Address(input, BigInt::offsetOfLength()),
13606 Imm32(0), output);
13609 void CodeGenerator::visitNotO(LNotO* lir) {
13610 Register objreg = ToRegister(lir->input());
13611 Register output = ToRegister(lir->output());
13613 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
13614 if (intact) {
13615 // Bug 1874905: It would be fantastic if this could be optimized out.
13616 assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
13617 masm.move32(Imm32(0), output);
13618 } else {
13619 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
13620 addOutOfLineCode(ool, lir->mir());
13622 Label* ifEmulatesUndefined = ool->label1();
13623 Label* ifDoesntEmulateUndefined = ool->label2();
13625 branchTestObjectEmulatesUndefined(objreg, ifEmulatesUndefined,
13626 ifDoesntEmulateUndefined, output, ool);
13627 // fall through
13629 Label join;
13631 masm.move32(Imm32(0), output);
13632 masm.jump(&join);
13634 masm.bind(ifEmulatesUndefined);
13635 masm.move32(Imm32(1), output);
13637 masm.bind(&join);
13641 void CodeGenerator::visitNotV(LNotV* lir) {
13642 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
13643 addOutOfLineCode(ool, lir->mir());
13645 Label* ifTruthy = ool->label1();
13646 Label* ifFalsy = ool->label2();
13648 ValueOperand input = ToValue(lir, LNotV::InputIndex);
13649 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
13650 FloatRegister floatTemp = ToFloatRegister(lir->temp0());
13651 Register output = ToRegister(lir->output());
13652 const TypeDataList& observedTypes = lir->mir()->observedTypes();
13654 testValueTruthy(input, tempToUnbox, output, floatTemp, observedTypes,
13655 ifTruthy, ifFalsy, ool);
13657 Label join;
13659 // Note that the testValueTruthy call above may choose to fall through
13660 // to ifTruthy instead of branching there.
13661 masm.bind(ifTruthy);
13662 masm.move32(Imm32(0), output);
13663 masm.jump(&join);
13665 masm.bind(ifFalsy);
13666 masm.move32(Imm32(1), output);
13668 // both branches meet here.
13669 masm.bind(&join);
13672 void CodeGenerator::visitBoundsCheck(LBoundsCheck* lir) {
13673 const LAllocation* index = lir->index();
13674 const LAllocation* length = lir->length();
13675 LSnapshot* snapshot = lir->snapshot();
13677 MIRType type = lir->mir()->type();
13679 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
13680 if (type == MIRType::Int32) {
13681 bailoutCmp32(cond, lhs, rhs, snapshot);
13682 } else {
13683 MOZ_ASSERT(type == MIRType::IntPtr);
13684 bailoutCmpPtr(cond, lhs, rhs, snapshot);
13688 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
13689 int32_t rhs) {
13690 if (type == MIRType::Int32) {
13691 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
13692 } else {
13693 MOZ_ASSERT(type == MIRType::IntPtr);
13694 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
13698 if (index->isConstant()) {
13699 // Use uint32 so that the comparison is unsigned.
13700 uint32_t idx = ToInt32(index);
13701 if (length->isConstant()) {
13702 uint32_t len = ToInt32(lir->length());
13703 if (idx < len) {
13704 return;
13706 bailout(snapshot);
13707 return;
13710 if (length->isRegister()) {
13711 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), idx);
13712 } else {
13713 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), idx);
13715 return;
13718 Register indexReg = ToRegister(index);
13719 if (length->isConstant()) {
13720 bailoutCmpConstant(Assembler::AboveOrEqual, indexReg, ToInt32(length));
13721 } else if (length->isRegister()) {
13722 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), indexReg);
13723 } else {
13724 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), indexReg);
13728 void CodeGenerator::visitBoundsCheckRange(LBoundsCheckRange* lir) {
13729 int32_t min = lir->mir()->minimum();
13730 int32_t max = lir->mir()->maximum();
13731 MOZ_ASSERT(max >= min);
13733 LSnapshot* snapshot = lir->snapshot();
13734 MIRType type = lir->mir()->type();
13736 const LAllocation* length = lir->length();
13737 Register temp = ToRegister(lir->getTemp(0));
13739 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
13740 if (type == MIRType::Int32) {
13741 bailoutCmp32(cond, lhs, rhs, snapshot);
13742 } else {
13743 MOZ_ASSERT(type == MIRType::IntPtr);
13744 bailoutCmpPtr(cond, lhs, rhs, snapshot);
13748 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
13749 int32_t rhs) {
13750 if (type == MIRType::Int32) {
13751 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
13752 } else {
13753 MOZ_ASSERT(type == MIRType::IntPtr);
13754 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
13758 if (lir->index()->isConstant()) {
13759 int32_t nmin, nmax;
13760 int32_t index = ToInt32(lir->index());
13761 if (SafeAdd(index, min, &nmin) && SafeAdd(index, max, &nmax) && nmin >= 0) {
13762 if (length->isRegister()) {
13763 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), nmax);
13764 } else {
13765 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), nmax);
13767 return;
13769 masm.mov(ImmWord(index), temp);
13770 } else {
13771 masm.mov(ToRegister(lir->index()), temp);
13774 // If the minimum and maximum differ then do an underflow check first.
13775 // If the two are the same then doing an unsigned comparison on the
13776 // length will also catch a negative index.
13777 if (min != max) {
13778 if (min != 0) {
13779 Label bail;
13780 if (type == MIRType::Int32) {
13781 masm.branchAdd32(Assembler::Overflow, Imm32(min), temp, &bail);
13782 } else {
13783 masm.branchAddPtr(Assembler::Overflow, Imm32(min), temp, &bail);
13785 bailoutFrom(&bail, snapshot);
13788 bailoutCmpConstant(Assembler::LessThan, temp, 0);
13790 if (min != 0) {
13791 int32_t diff;
13792 if (SafeSub(max, min, &diff)) {
13793 max = diff;
13794 } else {
13795 if (type == MIRType::Int32) {
13796 masm.sub32(Imm32(min), temp);
13797 } else {
13798 masm.subPtr(Imm32(min), temp);
13804 // Compute the maximum possible index. No overflow check is needed when
13805 // max > 0. We can only wraparound to a negative number, which will test as
13806 // larger than all nonnegative numbers in the unsigned comparison, and the
13807 // length is required to be nonnegative (else testing a negative length
13808 // would succeed on any nonnegative index).
13809 if (max != 0) {
13810 if (max < 0) {
13811 Label bail;
13812 if (type == MIRType::Int32) {
13813 masm.branchAdd32(Assembler::Overflow, Imm32(max), temp, &bail);
13814 } else {
13815 masm.branchAddPtr(Assembler::Overflow, Imm32(max), temp, &bail);
13817 bailoutFrom(&bail, snapshot);
13818 } else {
13819 if (type == MIRType::Int32) {
13820 masm.add32(Imm32(max), temp);
13821 } else {
13822 masm.addPtr(Imm32(max), temp);
13827 if (length->isRegister()) {
13828 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), temp);
13829 } else {
13830 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), temp);
13834 void CodeGenerator::visitBoundsCheckLower(LBoundsCheckLower* lir) {
13835 int32_t min = lir->mir()->minimum();
13836 bailoutCmp32(Assembler::LessThan, ToRegister(lir->index()), Imm32(min),
13837 lir->snapshot());
13840 void CodeGenerator::visitSpectreMaskIndex(LSpectreMaskIndex* lir) {
13841 MOZ_ASSERT(JitOptions.spectreIndexMasking);
13843 const LAllocation* length = lir->length();
13844 Register index = ToRegister(lir->index());
13845 Register output = ToRegister(lir->output());
13847 if (lir->mir()->type() == MIRType::Int32) {
13848 if (length->isRegister()) {
13849 masm.spectreMaskIndex32(index, ToRegister(length), output);
13850 } else {
13851 masm.spectreMaskIndex32(index, ToAddress(length), output);
13853 } else {
13854 MOZ_ASSERT(lir->mir()->type() == MIRType::IntPtr);
13855 if (length->isRegister()) {
13856 masm.spectreMaskIndexPtr(index, ToRegister(length), output);
13857 } else {
13858 masm.spectreMaskIndexPtr(index, ToAddress(length), output);
13863 class OutOfLineStoreElementHole : public OutOfLineCodeBase<CodeGenerator> {
13864 LInstruction* ins_;
13866 public:
13867 explicit OutOfLineStoreElementHole(LInstruction* ins) : ins_(ins) {
13868 MOZ_ASSERT(ins->isStoreElementHoleV() || ins->isStoreElementHoleT());
13871 void accept(CodeGenerator* codegen) override {
13872 codegen->visitOutOfLineStoreElementHole(this);
13875 MStoreElementHole* mir() const {
13876 return ins_->isStoreElementHoleV() ? ins_->toStoreElementHoleV()->mir()
13877 : ins_->toStoreElementHoleT()->mir();
13879 LInstruction* ins() const { return ins_; }
13882 void CodeGenerator::emitStoreHoleCheck(Register elements,
13883 const LAllocation* index,
13884 LSnapshot* snapshot) {
13885 Label bail;
13886 if (index->isConstant()) {
13887 Address dest(elements, ToInt32(index) * sizeof(js::Value));
13888 masm.branchTestMagic(Assembler::Equal, dest, &bail);
13889 } else {
13890 BaseObjectElementIndex dest(elements, ToRegister(index));
13891 masm.branchTestMagic(Assembler::Equal, dest, &bail);
13893 bailoutFrom(&bail, snapshot);
13896 void CodeGenerator::emitStoreElementTyped(const LAllocation* value,
13897 MIRType valueType, Register elements,
13898 const LAllocation* index) {
13899 MOZ_ASSERT(valueType != MIRType::MagicHole);
13900 ConstantOrRegister v = ToConstantOrRegister(value, valueType);
13901 if (index->isConstant()) {
13902 Address dest(elements, ToInt32(index) * sizeof(js::Value));
13903 masm.storeUnboxedValue(v, valueType, dest);
13904 } else {
13905 BaseObjectElementIndex dest(elements, ToRegister(index));
13906 masm.storeUnboxedValue(v, valueType, dest);
13910 void CodeGenerator::visitStoreElementT(LStoreElementT* store) {
13911 Register elements = ToRegister(store->elements());
13912 const LAllocation* index = store->index();
13914 if (store->mir()->needsBarrier()) {
13915 emitPreBarrier(elements, index);
13918 if (store->mir()->needsHoleCheck()) {
13919 emitStoreHoleCheck(elements, index, store->snapshot());
13922 emitStoreElementTyped(store->value(), store->mir()->value()->type(), elements,
13923 index);
13926 void CodeGenerator::visitStoreElementV(LStoreElementV* lir) {
13927 const ValueOperand value = ToValue(lir, LStoreElementV::Value);
13928 Register elements = ToRegister(lir->elements());
13929 const LAllocation* index = lir->index();
13931 if (lir->mir()->needsBarrier()) {
13932 emitPreBarrier(elements, index);
13935 if (lir->mir()->needsHoleCheck()) {
13936 emitStoreHoleCheck(elements, index, lir->snapshot());
13939 if (lir->index()->isConstant()) {
13940 Address dest(elements, ToInt32(lir->index()) * sizeof(js::Value));
13941 masm.storeValue(value, dest);
13942 } else {
13943 BaseObjectElementIndex dest(elements, ToRegister(lir->index()));
13944 masm.storeValue(value, dest);
13948 void CodeGenerator::visitStoreHoleValueElement(LStoreHoleValueElement* lir) {
13949 Register elements = ToRegister(lir->elements());
13950 Register index = ToRegister(lir->index());
13952 Address elementsFlags(elements, ObjectElements::offsetOfFlags());
13953 masm.or32(Imm32(ObjectElements::NON_PACKED), elementsFlags);
13955 BaseObjectElementIndex element(elements, index);
13956 masm.storeValue(MagicValue(JS_ELEMENTS_HOLE), element);
13959 void CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT* lir) {
13960 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
13961 addOutOfLineCode(ool, lir->mir());
13963 Register obj = ToRegister(lir->object());
13964 Register elements = ToRegister(lir->elements());
13965 Register index = ToRegister(lir->index());
13966 Register temp = ToRegister(lir->temp0());
13968 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
13969 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
13971 emitPreBarrier(elements, lir->index());
13973 masm.bind(ool->rejoin());
13974 emitStoreElementTyped(lir->value(), lir->mir()->value()->type(), elements,
13975 lir->index());
13977 if (ValueNeedsPostBarrier(lir->mir()->value())) {
13978 LiveRegisterSet regs = liveVolatileRegs(lir);
13979 ConstantOrRegister val =
13980 ToConstantOrRegister(lir->value(), lir->mir()->value()->type());
13981 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp, val);
13985 void CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV* lir) {
13986 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
13987 addOutOfLineCode(ool, lir->mir());
13989 Register obj = ToRegister(lir->object());
13990 Register elements = ToRegister(lir->elements());
13991 Register index = ToRegister(lir->index());
13992 const ValueOperand value = ToValue(lir, LStoreElementHoleV::ValueIndex);
13993 Register temp = ToRegister(lir->temp0());
13995 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
13996 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
13998 emitPreBarrier(elements, lir->index());
14000 masm.bind(ool->rejoin());
14001 masm.storeValue(value, BaseObjectElementIndex(elements, index));
14003 if (ValueNeedsPostBarrier(lir->mir()->value())) {
14004 LiveRegisterSet regs = liveVolatileRegs(lir);
14005 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp,
14006 ConstantOrRegister(value));
14010 void CodeGenerator::visitOutOfLineStoreElementHole(
14011 OutOfLineStoreElementHole* ool) {
14012 Register object, elements, index;
14013 LInstruction* ins = ool->ins();
14014 mozilla::Maybe<ConstantOrRegister> value;
14015 Register temp;
14017 if (ins->isStoreElementHoleV()) {
14018 LStoreElementHoleV* store = ins->toStoreElementHoleV();
14019 object = ToRegister(store->object());
14020 elements = ToRegister(store->elements());
14021 index = ToRegister(store->index());
14022 value.emplace(
14023 TypedOrValueRegister(ToValue(store, LStoreElementHoleV::ValueIndex)));
14024 temp = ToRegister(store->temp0());
14025 } else {
14026 LStoreElementHoleT* store = ins->toStoreElementHoleT();
14027 object = ToRegister(store->object());
14028 elements = ToRegister(store->elements());
14029 index = ToRegister(store->index());
14030 if (store->value()->isConstant()) {
14031 value.emplace(
14032 ConstantOrRegister(store->value()->toConstant()->toJSValue()));
14033 } else {
14034 MIRType valueType = store->mir()->value()->type();
14035 value.emplace(
14036 TypedOrValueRegister(valueType, ToAnyRegister(store->value())));
14038 temp = ToRegister(store->temp0());
14041 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
14043 // We're out-of-bounds. We only handle the index == initlength case.
14044 // If index > initializedLength, bail out. Note that this relies on the
14045 // condition flags sticking from the incoming branch.
14046 // Also note: this branch does not need Spectre mitigations, doing that for
14047 // the capacity check below is sufficient.
14048 Label allocElement, addNewElement;
14049 #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
14050 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
14051 // Had to reimplement for MIPS because there are no flags.
14052 bailoutCmp32(Assembler::NotEqual, initLength, index, ins->snapshot());
14053 #else
14054 bailoutIf(Assembler::NotEqual, ins->snapshot());
14055 #endif
14057 // If index < capacity, we can add a dense element inline. If not, we need
14058 // to allocate more elements first.
14059 masm.spectreBoundsCheck32(
14060 index, Address(elements, ObjectElements::offsetOfCapacity()), temp,
14061 &allocElement);
14062 masm.jump(&addNewElement);
14064 masm.bind(&allocElement);
14066 // Save all live volatile registers, except |temp|.
14067 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
14068 liveRegs.takeUnchecked(temp);
14069 masm.PushRegsInMask(liveRegs);
14071 masm.setupAlignedABICall();
14072 masm.loadJSContext(temp);
14073 masm.passABIArg(temp);
14074 masm.passABIArg(object);
14076 using Fn = bool (*)(JSContext*, NativeObject*);
14077 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
14078 masm.storeCallPointerResult(temp);
14080 masm.PopRegsInMask(liveRegs);
14081 bailoutIfFalseBool(temp, ins->snapshot());
14083 // Load the reallocated elements pointer.
14084 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), elements);
14086 masm.bind(&addNewElement);
14088 // Increment initLength
14089 masm.add32(Imm32(1), initLength);
14091 // If length is now <= index, increment length too.
14092 Label skipIncrementLength;
14093 Address length(elements, ObjectElements::offsetOfLength());
14094 masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
14095 masm.add32(Imm32(1), length);
14096 masm.bind(&skipIncrementLength);
14098 // Jump to the inline path where we will store the value.
14099 // We rejoin after the prebarrier, because the memory is uninitialized.
14100 masm.jump(ool->rejoin());
14103 void CodeGenerator::visitArrayPopShift(LArrayPopShift* lir) {
14104 Register obj = ToRegister(lir->object());
14105 Register temp1 = ToRegister(lir->temp0());
14106 Register temp2 = ToRegister(lir->temp1());
14107 ValueOperand out = ToOutValue(lir);
14109 Label bail;
14110 if (lir->mir()->mode() == MArrayPopShift::Pop) {
14111 masm.packedArrayPop(obj, out, temp1, temp2, &bail);
14112 } else {
14113 MOZ_ASSERT(lir->mir()->mode() == MArrayPopShift::Shift);
14114 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
14115 masm.packedArrayShift(obj, out, temp1, temp2, volatileRegs, &bail);
14117 bailoutFrom(&bail, lir->snapshot());
14120 class OutOfLineArrayPush : public OutOfLineCodeBase<CodeGenerator> {
14121 LArrayPush* ins_;
14123 public:
14124 explicit OutOfLineArrayPush(LArrayPush* ins) : ins_(ins) {}
14126 void accept(CodeGenerator* codegen) override {
14127 codegen->visitOutOfLineArrayPush(this);
14130 LArrayPush* ins() const { return ins_; }
14133 void CodeGenerator::visitArrayPush(LArrayPush* lir) {
14134 Register obj = ToRegister(lir->object());
14135 Register elementsTemp = ToRegister(lir->temp0());
14136 Register length = ToRegister(lir->output());
14137 ValueOperand value = ToValue(lir, LArrayPush::ValueIndex);
14138 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
14140 auto* ool = new (alloc()) OutOfLineArrayPush(lir);
14141 addOutOfLineCode(ool, lir->mir());
14143 // Load obj->elements in elementsTemp.
14144 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), elementsTemp);
14146 Address initLengthAddr(elementsTemp,
14147 ObjectElements::offsetOfInitializedLength());
14148 Address lengthAddr(elementsTemp, ObjectElements::offsetOfLength());
14149 Address capacityAddr(elementsTemp, ObjectElements::offsetOfCapacity());
14151 // Bail out if length != initLength.
14152 masm.load32(lengthAddr, length);
14153 bailoutCmp32(Assembler::NotEqual, initLengthAddr, length, lir->snapshot());
14155 // If length < capacity, we can add a dense element inline. If not, we
14156 // need to allocate more elements.
14157 masm.spectreBoundsCheck32(length, capacityAddr, spectreTemp, ool->entry());
14158 masm.bind(ool->rejoin());
14160 // Store the value.
14161 masm.storeValue(value, BaseObjectElementIndex(elementsTemp, length));
14163 // Update length and initialized length.
14164 masm.add32(Imm32(1), length);
14165 masm.store32(length, Address(elementsTemp, ObjectElements::offsetOfLength()));
14166 masm.store32(length, Address(elementsTemp,
14167 ObjectElements::offsetOfInitializedLength()));
14169 if (ValueNeedsPostBarrier(lir->mir()->value())) {
14170 LiveRegisterSet regs = liveVolatileRegs(lir);
14171 regs.addUnchecked(length);
14172 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->output()->output(),
14173 elementsTemp, ConstantOrRegister(value),
14174 /* indexDiff = */ -1);
14178 void CodeGenerator::visitOutOfLineArrayPush(OutOfLineArrayPush* ool) {
14179 LArrayPush* ins = ool->ins();
14181 Register object = ToRegister(ins->object());
14182 Register temp = ToRegister(ins->temp0());
14184 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
14185 liveRegs.takeUnchecked(temp);
14186 liveRegs.addUnchecked(ToRegister(ins->output()));
14187 liveRegs.addUnchecked(ToValue(ins, LArrayPush::ValueIndex));
14189 masm.PushRegsInMask(liveRegs);
14191 masm.setupAlignedABICall();
14192 masm.loadJSContext(temp);
14193 masm.passABIArg(temp);
14194 masm.passABIArg(object);
14196 using Fn = bool (*)(JSContext*, NativeObject* obj);
14197 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
14198 masm.storeCallPointerResult(temp);
14200 masm.PopRegsInMask(liveRegs);
14201 bailoutIfFalseBool(temp, ins->snapshot());
14203 // Load the reallocated elements pointer.
14204 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
14206 masm.jump(ool->rejoin());
14209 void CodeGenerator::visitArraySlice(LArraySlice* lir) {
14210 Register object = ToRegister(lir->object());
14211 Register begin = ToRegister(lir->begin());
14212 Register end = ToRegister(lir->end());
14213 Register temp0 = ToRegister(lir->temp0());
14214 Register temp1 = ToRegister(lir->temp1());
14216 Label call, fail;
14218 Label bail;
14219 masm.branchArrayIsNotPacked(object, temp0, temp1, &bail);
14220 bailoutFrom(&bail, lir->snapshot());
14222 // Try to allocate an object.
14223 TemplateObject templateObject(lir->mir()->templateObj());
14224 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
14225 &fail);
14227 masm.jump(&call);
14229 masm.bind(&fail);
14230 masm.movePtr(ImmPtr(nullptr), temp0);
14232 masm.bind(&call);
14234 pushArg(temp0);
14235 pushArg(end);
14236 pushArg(begin);
14237 pushArg(object);
14239 using Fn =
14240 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
14241 callVM<Fn, ArraySliceDense>(lir);
14244 void CodeGenerator::visitArgumentsSlice(LArgumentsSlice* lir) {
14245 Register object = ToRegister(lir->object());
14246 Register begin = ToRegister(lir->begin());
14247 Register end = ToRegister(lir->end());
14248 Register temp0 = ToRegister(lir->temp0());
14249 Register temp1 = ToRegister(lir->temp1());
14251 Label call, fail;
14253 // Try to allocate an object.
14254 TemplateObject templateObject(lir->mir()->templateObj());
14255 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
14256 &fail);
14258 masm.jump(&call);
14260 masm.bind(&fail);
14261 masm.movePtr(ImmPtr(nullptr), temp0);
14263 masm.bind(&call);
14265 pushArg(temp0);
14266 pushArg(end);
14267 pushArg(begin);
14268 pushArg(object);
14270 using Fn =
14271 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
14272 callVM<Fn, ArgumentsSliceDense>(lir);
14275 #ifdef DEBUG
14276 void CodeGenerator::emitAssertArgumentsSliceBounds(const RegisterOrInt32& begin,
14277 const RegisterOrInt32& count,
14278 Register numActualArgs) {
14279 // |begin| must be positive or zero.
14280 if (begin.is<Register>()) {
14281 Label beginOk;
14282 masm.branch32(Assembler::GreaterThanOrEqual, begin.as<Register>(), Imm32(0),
14283 &beginOk);
14284 masm.assumeUnreachable("begin < 0");
14285 masm.bind(&beginOk);
14286 } else {
14287 MOZ_ASSERT(begin.as<int32_t>() >= 0);
14290 // |count| must be positive or zero.
14291 if (count.is<Register>()) {
14292 Label countOk;
14293 masm.branch32(Assembler::GreaterThanOrEqual, count.as<Register>(), Imm32(0),
14294 &countOk);
14295 masm.assumeUnreachable("count < 0");
14296 masm.bind(&countOk);
14297 } else {
14298 MOZ_ASSERT(count.as<int32_t>() >= 0);
14301 // |begin| must be less-or-equal to |numActualArgs|.
14302 Label argsBeginOk;
14303 if (begin.is<Register>()) {
14304 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
14305 &argsBeginOk);
14306 } else {
14307 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
14308 Imm32(begin.as<int32_t>()), &argsBeginOk);
14310 masm.assumeUnreachable("begin <= numActualArgs");
14311 masm.bind(&argsBeginOk);
14313 // |count| must be less-or-equal to |numActualArgs|.
14314 Label argsCountOk;
14315 if (count.is<Register>()) {
14316 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, count.as<Register>(),
14317 &argsCountOk);
14318 } else {
14319 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
14320 Imm32(count.as<int32_t>()), &argsCountOk);
14322 masm.assumeUnreachable("count <= numActualArgs");
14323 masm.bind(&argsCountOk);
14325 // |begin| and |count| must be preserved, but |numActualArgs| can be changed.
14327 // Pre-condition: |count| <= |numActualArgs|
14328 // Condition to test: |begin + count| <= |numActualArgs|
14329 // Transform to: |begin| <= |numActualArgs - count|
14330 if (count.is<Register>()) {
14331 masm.subPtr(count.as<Register>(), numActualArgs);
14332 } else {
14333 masm.subPtr(Imm32(count.as<int32_t>()), numActualArgs);
14336 // |begin + count| must be less-or-equal to |numActualArgs|.
14337 Label argsBeginCountOk;
14338 if (begin.is<Register>()) {
14339 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
14340 &argsBeginCountOk);
14341 } else {
14342 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
14343 Imm32(begin.as<int32_t>()), &argsBeginCountOk);
14345 masm.assumeUnreachable("begin + count <= numActualArgs");
14346 masm.bind(&argsBeginCountOk);
14348 #endif
14350 template <class ArgumentsSlice>
14351 void CodeGenerator::emitNewArray(ArgumentsSlice* lir,
14352 const RegisterOrInt32& count, Register output,
14353 Register temp) {
14354 using Fn = ArrayObject* (*)(JSContext*, int32_t);
14355 auto* ool = count.match(
14356 [&](Register count) {
14357 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
14358 lir, ArgList(count), StoreRegisterTo(output));
14360 [&](int32_t count) {
14361 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
14362 lir, ArgList(Imm32(count)), StoreRegisterTo(output));
14365 TemplateObject templateObject(lir->mir()->templateObj());
14366 MOZ_ASSERT(templateObject.isArrayObject());
14368 auto templateNativeObj = templateObject.asTemplateNativeObject();
14369 MOZ_ASSERT(templateNativeObj.getArrayLength() == 0);
14370 MOZ_ASSERT(templateNativeObj.getDenseInitializedLength() == 0);
14371 MOZ_ASSERT(!templateNativeObj.hasDynamicElements());
14373 // Check array capacity. Call into the VM if the template object's capacity
14374 // is too small.
14375 bool tryAllocate = count.match(
14376 [&](Register count) {
14377 masm.branch32(Assembler::Above, count,
14378 Imm32(templateNativeObj.getDenseCapacity()),
14379 ool->entry());
14380 return true;
14382 [&](int32_t count) {
14383 MOZ_ASSERT(count >= 0);
14384 if (uint32_t(count) > templateNativeObj.getDenseCapacity()) {
14385 masm.jump(ool->entry());
14386 return false;
14388 return true;
14391 if (tryAllocate) {
14392 // Try to allocate an object.
14393 masm.createGCObject(output, temp, templateObject, lir->mir()->initialHeap(),
14394 ool->entry());
14396 auto setInitializedLengthAndLength = [&](auto count) {
14397 const int elementsOffset = NativeObject::offsetOfFixedElements();
14399 // Update initialized length.
14400 Address initLength(
14401 output, elementsOffset + ObjectElements::offsetOfInitializedLength());
14402 masm.store32(count, initLength);
14404 // Update length.
14405 Address length(output, elementsOffset + ObjectElements::offsetOfLength());
14406 masm.store32(count, length);
14409 // The array object was successfully created. Set the length and initialized
14410 // length and then proceed to fill the elements.
14411 count.match([&](Register count) { setInitializedLengthAndLength(count); },
14412 [&](int32_t count) {
14413 if (count > 0) {
14414 setInitializedLengthAndLength(Imm32(count));
14419 masm.bind(ool->rejoin());
14422 void CodeGenerator::visitFrameArgumentsSlice(LFrameArgumentsSlice* lir) {
14423 Register begin = ToRegister(lir->begin());
14424 Register count = ToRegister(lir->count());
14425 Register temp = ToRegister(lir->temp0());
14426 Register output = ToRegister(lir->output());
14428 #ifdef DEBUG
14429 masm.loadNumActualArgs(FramePointer, temp);
14430 emitAssertArgumentsSliceBounds(RegisterOrInt32(begin), RegisterOrInt32(count),
14431 temp);
14432 #endif
14434 emitNewArray(lir, RegisterOrInt32(count), output, temp);
14436 Label done;
14437 masm.branch32(Assembler::Equal, count, Imm32(0), &done);
14439 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
14440 allRegs.take(begin);
14441 allRegs.take(count);
14442 allRegs.take(temp);
14443 allRegs.take(output);
14445 ValueOperand value = allRegs.takeAnyValue();
14447 LiveRegisterSet liveRegs;
14448 liveRegs.add(output);
14449 liveRegs.add(begin);
14450 liveRegs.add(value);
14452 masm.PushRegsInMask(liveRegs);
14454 // Initialize all elements.
14456 Register elements = output;
14457 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14459 Register argIndex = begin;
14461 Register index = temp;
14462 masm.move32(Imm32(0), index);
14464 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
14465 BaseValueIndex argPtr(FramePointer, argIndex, argvOffset);
14467 Label loop;
14468 masm.bind(&loop);
14470 masm.loadValue(argPtr, value);
14472 // We don't need a pre-barrier, because the element at |index| is guaranteed
14473 // to be a non-GC thing (either uninitialized memory or the magic hole
14474 // value).
14475 masm.storeValue(value, BaseObjectElementIndex(elements, index));
14477 masm.add32(Imm32(1), index);
14478 masm.add32(Imm32(1), argIndex);
14480 masm.branch32(Assembler::LessThan, index, count, &loop);
14482 masm.PopRegsInMask(liveRegs);
14484 // Emit a post-write barrier if |output| is tenured.
14486 // We expect that |output| is nursery allocated, so it isn't worth the
14487 // trouble to check if no frame argument is a nursery thing, which would
14488 // allow to omit the post-write barrier.
14489 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
14491 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
14492 volatileRegs.takeUnchecked(temp);
14493 if (output.volatile_()) {
14494 volatileRegs.addUnchecked(output);
14497 masm.PushRegsInMask(volatileRegs);
14498 emitPostWriteBarrier(output);
14499 masm.PopRegsInMask(volatileRegs);
14501 masm.bind(&done);
14504 CodeGenerator::RegisterOrInt32 CodeGenerator::ToRegisterOrInt32(
14505 const LAllocation* allocation) {
14506 if (allocation->isConstant()) {
14507 return RegisterOrInt32(allocation->toConstant()->toInt32());
14509 return RegisterOrInt32(ToRegister(allocation));
14512 void CodeGenerator::visitInlineArgumentsSlice(LInlineArgumentsSlice* lir) {
14513 RegisterOrInt32 begin = ToRegisterOrInt32(lir->begin());
14514 RegisterOrInt32 count = ToRegisterOrInt32(lir->count());
14515 Register temp = ToRegister(lir->temp());
14516 Register output = ToRegister(lir->output());
14518 uint32_t numActuals = lir->mir()->numActuals();
14520 #ifdef DEBUG
14521 masm.move32(Imm32(numActuals), temp);
14523 emitAssertArgumentsSliceBounds(begin, count, temp);
14524 #endif
14526 emitNewArray(lir, count, output, temp);
14528 // We're done if there are no actual arguments.
14529 if (numActuals == 0) {
14530 return;
14533 // Check if any arguments have to be copied.
14534 Label done;
14535 if (count.is<Register>()) {
14536 masm.branch32(Assembler::Equal, count.as<Register>(), Imm32(0), &done);
14537 } else if (count.as<int32_t>() == 0) {
14538 return;
14541 auto getArg = [&](uint32_t i) {
14542 return toConstantOrRegister(lir, LInlineArgumentsSlice::ArgIndex(i),
14543 lir->mir()->getArg(i)->type());
14546 auto storeArg = [&](uint32_t i, auto dest) {
14547 // We don't need a pre-barrier because the element at |index| is guaranteed
14548 // to be a non-GC thing (either uninitialized memory or the magic hole
14549 // value).
14550 masm.storeConstantOrRegister(getArg(i), dest);
14553 // Initialize all elements.
14554 if (numActuals == 1) {
14555 // There's exactly one argument. We've checked that |count| is non-zero,
14556 // which implies that |begin| must be zero.
14557 MOZ_ASSERT_IF(begin.is<int32_t>(), begin.as<int32_t>() == 0);
14559 Register elements = temp;
14560 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14562 storeArg(0, Address(elements, 0));
14563 } else if (begin.is<Register>()) {
14564 // There is more than one argument and |begin| isn't a compile-time
14565 // constant. Iterate through 0..numActuals to search for |begin| and then
14566 // start copying |count| arguments from that index.
14568 LiveGeneralRegisterSet liveRegs;
14569 liveRegs.add(output);
14570 liveRegs.add(begin.as<Register>());
14572 masm.PushRegsInMask(liveRegs);
14574 Register elements = output;
14575 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14577 Register argIndex = begin.as<Register>();
14579 Register index = temp;
14580 masm.move32(Imm32(0), index);
14582 Label doneLoop;
14583 for (uint32_t i = 0; i < numActuals; ++i) {
14584 Label next;
14585 masm.branch32(Assembler::NotEqual, argIndex, Imm32(i), &next);
14587 storeArg(i, BaseObjectElementIndex(elements, index));
14589 masm.add32(Imm32(1), index);
14590 masm.add32(Imm32(1), argIndex);
14592 if (count.is<Register>()) {
14593 masm.branch32(Assembler::GreaterThanOrEqual, index,
14594 count.as<Register>(), &doneLoop);
14595 } else {
14596 masm.branch32(Assembler::GreaterThanOrEqual, index,
14597 Imm32(count.as<int32_t>()), &doneLoop);
14600 masm.bind(&next);
14602 masm.bind(&doneLoop);
14604 masm.PopRegsInMask(liveRegs);
14605 } else {
14606 // There is more than one argument and |begin| is a compile-time constant.
14608 Register elements = temp;
14609 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14611 int32_t argIndex = begin.as<int32_t>();
14613 int32_t index = 0;
14615 Label doneLoop;
14616 for (uint32_t i = argIndex; i < numActuals; ++i) {
14617 storeArg(i, Address(elements, index * sizeof(Value)));
14619 index += 1;
14621 if (count.is<Register>()) {
14622 masm.branch32(Assembler::LessThanOrEqual, count.as<Register>(),
14623 Imm32(index), &doneLoop);
14624 } else {
14625 if (index >= count.as<int32_t>()) {
14626 break;
14630 masm.bind(&doneLoop);
14633 // Determine if we have to emit post-write barrier.
14635 // If either |begin| or |count| is a constant, use their value directly.
14636 // Otherwise assume we copy all inline arguments from 0..numActuals.
14637 bool postWriteBarrier = false;
14638 uint32_t actualBegin = begin.match([](Register) { return 0; },
14639 [](int32_t value) { return value; });
14640 uint32_t actualCount =
14641 count.match([=](Register) { return numActuals; },
14642 [](int32_t value) -> uint32_t { return value; });
14643 for (uint32_t i = 0; i < actualCount; ++i) {
14644 ConstantOrRegister arg = getArg(actualBegin + i);
14645 if (arg.constant()) {
14646 Value v = arg.value();
14647 if (v.isGCThing() && IsInsideNursery(v.toGCThing())) {
14648 postWriteBarrier = true;
14650 } else {
14651 MIRType type = arg.reg().type();
14652 if (type == MIRType::Value || NeedsPostBarrier(type)) {
14653 postWriteBarrier = true;
14658 // Emit a post-write barrier if |output| is tenured and we couldn't
14659 // determine at compile-time that no barrier is needed.
14660 if (postWriteBarrier) {
14661 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
14663 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
14664 volatileRegs.takeUnchecked(temp);
14665 if (output.volatile_()) {
14666 volatileRegs.addUnchecked(output);
14669 masm.PushRegsInMask(volatileRegs);
14670 emitPostWriteBarrier(output);
14671 masm.PopRegsInMask(volatileRegs);
14674 masm.bind(&done);
14677 void CodeGenerator::visitNormalizeSliceTerm(LNormalizeSliceTerm* lir) {
14678 Register value = ToRegister(lir->value());
14679 Register length = ToRegister(lir->length());
14680 Register output = ToRegister(lir->output());
14682 masm.move32(value, output);
14684 Label positive;
14685 masm.branch32(Assembler::GreaterThanOrEqual, value, Imm32(0), &positive);
14687 Label done;
14688 masm.add32(length, output);
14689 masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(0), &done);
14690 masm.move32(Imm32(0), output);
14691 masm.jump(&done);
14693 masm.bind(&positive);
14694 masm.cmp32Move32(Assembler::LessThan, length, value, length, output);
14696 masm.bind(&done);
14699 void CodeGenerator::visitArrayJoin(LArrayJoin* lir) {
14700 Label skipCall;
14702 Register output = ToRegister(lir->output());
14703 Register sep = ToRegister(lir->separator());
14704 Register array = ToRegister(lir->array());
14705 Register temp = ToRegister(lir->temp0());
14707 // Fast path for simple length <= 1 cases.
14709 masm.loadPtr(Address(array, NativeObject::offsetOfElements()), temp);
14710 Address length(temp, ObjectElements::offsetOfLength());
14711 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
14713 // Check for length == 0
14714 Label notEmpty;
14715 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmpty);
14716 const JSAtomState& names = gen->runtime->names();
14717 masm.movePtr(ImmGCPtr(names.empty_), output);
14718 masm.jump(&skipCall);
14720 masm.bind(&notEmpty);
14721 Label notSingleString;
14722 // Check for length == 1, initializedLength >= 1, arr[0].isString()
14723 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleString);
14724 masm.branch32(Assembler::LessThan, initLength, Imm32(1), &notSingleString);
14726 Address elem0(temp, 0);
14727 masm.branchTestString(Assembler::NotEqual, elem0, &notSingleString);
14729 // At this point, 'output' can be used as a scratch register, since we're
14730 // guaranteed to succeed.
14731 masm.unboxString(elem0, output);
14732 masm.jump(&skipCall);
14733 masm.bind(&notSingleString);
14736 pushArg(sep);
14737 pushArg(array);
14739 using Fn = JSString* (*)(JSContext*, HandleObject, HandleString);
14740 callVM<Fn, jit::ArrayJoin>(lir);
14741 masm.bind(&skipCall);
14744 void CodeGenerator::visitObjectKeys(LObjectKeys* lir) {
14745 Register object = ToRegister(lir->object());
14747 pushArg(object);
14749 using Fn = JSObject* (*)(JSContext*, HandleObject);
14750 callVM<Fn, jit::ObjectKeys>(lir);
14753 void CodeGenerator::visitObjectKeysLength(LObjectKeysLength* lir) {
14754 Register object = ToRegister(lir->object());
14756 pushArg(object);
14758 using Fn = bool (*)(JSContext*, HandleObject, int32_t*);
14759 callVM<Fn, jit::ObjectKeysLength>(lir);
14762 void CodeGenerator::visitGetIteratorCache(LGetIteratorCache* lir) {
14763 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14764 TypedOrValueRegister val =
14765 toConstantOrRegister(lir, LGetIteratorCache::ValueIndex,
14766 lir->mir()->value()->type())
14767 .reg();
14768 Register output = ToRegister(lir->output());
14769 Register temp0 = ToRegister(lir->temp0());
14770 Register temp1 = ToRegister(lir->temp1());
14772 IonGetIteratorIC ic(liveRegs, val, output, temp0, temp1);
14773 addIC(lir, allocateIC(ic));
14776 void CodeGenerator::visitOptimizeSpreadCallCache(
14777 LOptimizeSpreadCallCache* lir) {
14778 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14779 ValueOperand val = ToValue(lir, LOptimizeSpreadCallCache::ValueIndex);
14780 ValueOperand output = ToOutValue(lir);
14781 Register temp = ToRegister(lir->temp0());
14783 IonOptimizeSpreadCallIC ic(liveRegs, val, output, temp);
14784 addIC(lir, allocateIC(ic));
14787 void CodeGenerator::visitCloseIterCache(LCloseIterCache* lir) {
14788 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14789 Register iter = ToRegister(lir->iter());
14790 Register temp = ToRegister(lir->temp0());
14791 CompletionKind kind = CompletionKind(lir->mir()->completionKind());
14793 IonCloseIterIC ic(liveRegs, iter, temp, kind);
14794 addIC(lir, allocateIC(ic));
14797 void CodeGenerator::visitOptimizeGetIteratorCache(
14798 LOptimizeGetIteratorCache* lir) {
14799 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14800 ValueOperand val = ToValue(lir, LOptimizeGetIteratorCache::ValueIndex);
14801 Register output = ToRegister(lir->output());
14802 Register temp = ToRegister(lir->temp0());
14804 IonOptimizeGetIteratorIC ic(liveRegs, val, output, temp);
14805 addIC(lir, allocateIC(ic));
14808 void CodeGenerator::visitIteratorMore(LIteratorMore* lir) {
14809 const Register obj = ToRegister(lir->iterator());
14810 const ValueOperand output = ToOutValue(lir);
14811 const Register temp = ToRegister(lir->temp0());
14813 masm.iteratorMore(obj, output, temp);
14816 void CodeGenerator::visitIsNoIterAndBranch(LIsNoIterAndBranch* lir) {
14817 ValueOperand input = ToValue(lir, LIsNoIterAndBranch::Input);
14818 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
14819 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
14821 masm.branchTestMagic(Assembler::Equal, input, ifTrue);
14823 if (!isNextBlock(lir->ifFalse()->lir())) {
14824 masm.jump(ifFalse);
14828 void CodeGenerator::visitIteratorEnd(LIteratorEnd* lir) {
14829 const Register obj = ToRegister(lir->object());
14830 const Register temp0 = ToRegister(lir->temp0());
14831 const Register temp1 = ToRegister(lir->temp1());
14832 const Register temp2 = ToRegister(lir->temp2());
14834 masm.iteratorClose(obj, temp0, temp1, temp2);
14837 void CodeGenerator::visitArgumentsLength(LArgumentsLength* lir) {
14838 // read number of actual arguments from the JS frame.
14839 Register argc = ToRegister(lir->output());
14840 masm.loadNumActualArgs(FramePointer, argc);
14843 void CodeGenerator::visitGetFrameArgument(LGetFrameArgument* lir) {
14844 ValueOperand result = ToOutValue(lir);
14845 const LAllocation* index = lir->index();
14846 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
14848 // This instruction is used to access actual arguments and formal arguments.
14849 // The number of Values on the stack is |max(numFormals, numActuals)|, so we
14850 // assert |index < numFormals || index < numActuals| in debug builds.
14851 DebugOnly<size_t> numFormals = gen->outerInfo().script()->function()->nargs();
14853 if (index->isConstant()) {
14854 int32_t i = index->toConstant()->toInt32();
14855 #ifdef DEBUG
14856 if (uint32_t(i) >= numFormals) {
14857 Label ok;
14858 Register argc = result.scratchReg();
14859 masm.loadNumActualArgs(FramePointer, argc);
14860 masm.branch32(Assembler::Above, argc, Imm32(i), &ok);
14861 masm.assumeUnreachable("Invalid argument index");
14862 masm.bind(&ok);
14864 #endif
14865 Address argPtr(FramePointer, sizeof(Value) * i + argvOffset);
14866 masm.loadValue(argPtr, result);
14867 } else {
14868 Register i = ToRegister(index);
14869 #ifdef DEBUG
14870 Label ok;
14871 Register argc = result.scratchReg();
14872 masm.branch32(Assembler::Below, i, Imm32(numFormals), &ok);
14873 masm.loadNumActualArgs(FramePointer, argc);
14874 masm.branch32(Assembler::Above, argc, i, &ok);
14875 masm.assumeUnreachable("Invalid argument index");
14876 masm.bind(&ok);
14877 #endif
14878 BaseValueIndex argPtr(FramePointer, i, argvOffset);
14879 masm.loadValue(argPtr, result);
14883 void CodeGenerator::visitGetFrameArgumentHole(LGetFrameArgumentHole* lir) {
14884 ValueOperand result = ToOutValue(lir);
14885 Register index = ToRegister(lir->index());
14886 Register length = ToRegister(lir->length());
14887 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
14888 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
14890 Label outOfBounds, done;
14891 masm.spectreBoundsCheck32(index, length, spectreTemp, &outOfBounds);
14893 BaseValueIndex argPtr(FramePointer, index, argvOffset);
14894 masm.loadValue(argPtr, result);
14895 masm.jump(&done);
14897 masm.bind(&outOfBounds);
14898 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
14899 masm.moveValue(UndefinedValue(), result);
14901 masm.bind(&done);
14904 void CodeGenerator::visitRest(LRest* lir) {
14905 Register numActuals = ToRegister(lir->numActuals());
14906 Register temp0 = ToRegister(lir->temp0());
14907 Register temp1 = ToRegister(lir->temp1());
14908 Register temp2 = ToRegister(lir->temp2());
14909 Register temp3 = ToRegister(lir->temp3());
14910 unsigned numFormals = lir->mir()->numFormals();
14912 constexpr uint32_t arrayCapacity = 2;
14914 if (Shape* shape = lir->mir()->shape()) {
14915 uint32_t arrayLength = 0;
14916 gc::AllocKind allocKind = GuessArrayGCKind(arrayCapacity);
14917 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
14918 allocKind = ForegroundToBackgroundAllocKind(allocKind);
14919 MOZ_ASSERT(GetGCKindSlots(allocKind) ==
14920 arrayCapacity + ObjectElements::VALUES_PER_HEADER);
14922 Label joinAlloc, failAlloc;
14923 masm.movePtr(ImmGCPtr(shape), temp0);
14924 masm.createArrayWithFixedElements(temp2, temp0, temp1, InvalidReg,
14925 arrayLength, arrayCapacity, 0, 0,
14926 allocKind, gc::Heap::Default, &failAlloc);
14927 masm.jump(&joinAlloc);
14929 masm.bind(&failAlloc);
14930 masm.movePtr(ImmPtr(nullptr), temp2);
14932 masm.bind(&joinAlloc);
14933 } else {
14934 masm.movePtr(ImmPtr(nullptr), temp2);
14937 // Set temp1 to the address of the first actual argument.
14938 size_t actualsOffset = JitFrameLayout::offsetOfActualArgs();
14939 masm.computeEffectiveAddress(Address(FramePointer, actualsOffset), temp1);
14941 // Compute array length: max(numActuals - numFormals, 0).
14942 Register lengthReg;
14943 if (numFormals) {
14944 lengthReg = temp0;
14945 Label emptyLength, joinLength;
14946 masm.branch32(Assembler::LessThanOrEqual, numActuals, Imm32(numFormals),
14947 &emptyLength);
14949 masm.move32(numActuals, lengthReg);
14950 masm.sub32(Imm32(numFormals), lengthReg);
14952 // Skip formal arguments.
14953 masm.addPtr(Imm32(sizeof(Value) * numFormals), temp1);
14955 masm.jump(&joinLength);
14957 masm.bind(&emptyLength);
14959 masm.move32(Imm32(0), lengthReg);
14961 // Leave temp1 pointed to the start of actuals() when the rest-array
14962 // length is zero. We don't use |actuals() + numFormals| because
14963 // |numFormals| can be any non-negative int32 value when this MRest was
14964 // created from scalar replacement optimizations. And it seems
14965 // questionable to compute a Value* pointer which points to who knows
14966 // where.
14968 masm.bind(&joinLength);
14969 } else {
14970 // Use numActuals directly when there are no formals.
14971 lengthReg = numActuals;
14974 // Try to initialize the array elements.
14975 Label vmCall, done;
14976 if (lir->mir()->shape()) {
14977 // Call into C++ if we failed to allocate an array or there are more than
14978 // |arrayCapacity| elements.
14979 masm.branchTestPtr(Assembler::Zero, temp2, temp2, &vmCall);
14980 masm.branch32(Assembler::Above, lengthReg, Imm32(arrayCapacity), &vmCall);
14982 // The array must be nursery allocated so no post barrier is needed.
14983 #ifdef DEBUG
14984 Label ok;
14985 masm.branchPtrInNurseryChunk(Assembler::Equal, temp2, temp3, &ok);
14986 masm.assumeUnreachable("Unexpected tenured object for LRest");
14987 masm.bind(&ok);
14988 #endif
14990 Label initialized;
14991 masm.branch32(Assembler::Equal, lengthReg, Imm32(0), &initialized);
14993 // Store length and initializedLength.
14994 Register elements = temp3;
14995 masm.loadPtr(Address(temp2, NativeObject::offsetOfElements()), elements);
14996 Address lengthAddr(elements, ObjectElements::offsetOfLength());
14997 Address initLengthAddr(elements,
14998 ObjectElements::offsetOfInitializedLength());
14999 masm.store32(lengthReg, lengthAddr);
15000 masm.store32(lengthReg, initLengthAddr);
15002 // Store either one or two elements. This may clobber lengthReg (temp0).
15003 static_assert(arrayCapacity == 2, "code handles 1 or 2 elements");
15004 Label storeFirst;
15005 masm.branch32(Assembler::Equal, lengthReg, Imm32(1), &storeFirst);
15006 masm.storeValue(Address(temp1, sizeof(Value)),
15007 Address(elements, sizeof(Value)), temp0);
15008 masm.bind(&storeFirst);
15009 masm.storeValue(Address(temp1, 0), Address(elements, 0), temp0);
15011 // Done.
15012 masm.bind(&initialized);
15013 masm.movePtr(temp2, ReturnReg);
15014 masm.jump(&done);
15017 masm.bind(&vmCall);
15019 pushArg(temp2);
15020 pushArg(temp1);
15021 pushArg(lengthReg);
15023 using Fn =
15024 ArrayObject* (*)(JSContext*, uint32_t, Value*, Handle<ArrayObject*>);
15025 callVM<Fn, InitRestParameter>(lir);
15027 masm.bind(&done);
15030 // Create a stackmap from the given safepoint, with the structure:
15032 // <reg dump, if any>
15033 // | ++ <body (general spill)>
15034 // | | ++ <space for Frame>
15035 // | | ++ <inbound args>
15036 // | | |
15037 // Lowest Addr Highest Addr
15038 // |
15039 // framePushedAtStackMapBase
15041 // The caller owns the resulting stackmap. This assumes a grow-down stack.
15043 // For non-debug builds, if the stackmap would contain no pointers, no
15044 // stackmap is created, and nullptr is returned. For a debug build, a
15045 // stackmap is always created and returned.
15047 // Depending on the type of safepoint, the stackmap may need to account for
15048 // spilled registers. WasmSafepointKind::LirCall corresponds to LIR nodes where
15049 // isCall() == true, for which the register allocator will spill/restore all
15050 // live registers at the LIR level - in this case, the LSafepoint sees only live
15051 // values on the stack, never in registers. WasmSafepointKind::CodegenCall, on
15052 // the other hand, is for LIR nodes which may manually spill/restore live
15053 // registers in codegen, in which case the stackmap must account for this. Traps
15054 // also require tracking of live registers, but spilling is handled by the trap
15055 // mechanism.
15056 static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
15057 const RegisterOffsets& trapExitLayout,
15058 size_t trapExitLayoutNumWords,
15059 size_t nInboundStackArgBytes,
15060 wasm::StackMap** result) {
15061 // Ensure this is defined on all return paths.
15062 *result = nullptr;
15064 // The size of the wasm::Frame itself.
15065 const size_t nFrameBytes = sizeof(wasm::Frame);
15067 // This is the number of bytes spilled for live registers, outside of a trap.
15068 // For traps, trapExitLayout and trapExitLayoutNumWords will be used.
15069 const size_t nRegisterDumpBytes =
15070 MacroAssembler::PushRegsInMaskSizeInBytes(safepoint.liveRegs());
15072 // As mentioned above, for WasmSafepointKind::LirCall, register spills and
15073 // restores are handled at the LIR level and there should therefore be no live
15074 // registers to handle here.
15075 MOZ_ASSERT_IF(safepoint.wasmSafepointKind() == WasmSafepointKind::LirCall,
15076 nRegisterDumpBytes == 0);
15077 MOZ_ASSERT(nRegisterDumpBytes % sizeof(void*) == 0);
15079 // This is the number of bytes in the general spill area, below the Frame.
15080 const size_t nBodyBytes = safepoint.framePushedAtStackMapBase();
15082 // The stack map owns any alignment padding around inbound stack args.
15083 const size_t nInboundStackArgBytesAligned =
15084 wasm::AlignStackArgAreaSize(nInboundStackArgBytes);
15086 // This is the number of bytes in the general spill area, the Frame, and the
15087 // incoming args, but not including any register dump area.
15088 const size_t nNonRegisterBytes =
15089 nBodyBytes + nFrameBytes + nInboundStackArgBytesAligned;
15090 MOZ_ASSERT(nNonRegisterBytes % sizeof(void*) == 0);
15092 // This is the number of bytes in the register dump area, if any, below the
15093 // general spill area.
15094 const size_t nRegisterBytes =
15095 (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap)
15096 ? (trapExitLayoutNumWords * sizeof(void*))
15097 : nRegisterDumpBytes;
15099 // This is the total number of bytes covered by the map.
15100 const size_t nTotalBytes = nNonRegisterBytes + nRegisterBytes;
15102 #ifndef DEBUG
15103 bool needStackMap = !(safepoint.wasmAnyRefRegs().empty() &&
15104 safepoint.wasmAnyRefSlots().empty() &&
15105 safepoint.slotsOrElementsSlots().empty());
15107 // There are no references, and this is a non-debug build, so don't bother
15108 // building the stackmap.
15109 if (!needStackMap) {
15110 return true;
15112 #endif
15114 wasm::StackMap* stackMap =
15115 wasm::StackMap::create(nTotalBytes / sizeof(void*));
15116 if (!stackMap) {
15117 return false;
15119 if (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap) {
15120 stackMap->setExitStubWords(trapExitLayoutNumWords);
15123 // REG DUMP AREA, if any.
15124 size_t regDumpWords = 0;
15125 const LiveGeneralRegisterSet wasmAnyRefRegs = safepoint.wasmAnyRefRegs();
15126 const LiveGeneralRegisterSet slotsOrElementsRegs =
15127 safepoint.slotsOrElementsRegs();
15128 const LiveGeneralRegisterSet refRegs(GeneralRegisterSet::Union(
15129 wasmAnyRefRegs.set(), slotsOrElementsRegs.set()));
15130 GeneralRegisterForwardIterator refRegsIter(refRegs);
15131 switch (safepoint.wasmSafepointKind()) {
15132 case WasmSafepointKind::LirCall:
15133 case WasmSafepointKind::CodegenCall: {
15134 size_t spilledNumWords = nRegisterDumpBytes / sizeof(void*);
15135 regDumpWords += spilledNumWords;
15137 for (; refRegsIter.more(); ++refRegsIter) {
15138 Register reg = *refRegsIter;
15139 size_t offsetFromSpillBase =
15140 safepoint.liveRegs().gprs().offsetOfPushedRegister(reg) /
15141 sizeof(void*);
15142 MOZ_ASSERT(0 < offsetFromSpillBase &&
15143 offsetFromSpillBase <= spilledNumWords);
15144 size_t index = spilledNumWords - offsetFromSpillBase;
15146 if (wasmAnyRefRegs.has(reg)) {
15147 stackMap->set(index, wasm::StackMap::AnyRef);
15148 } else {
15149 MOZ_ASSERT(slotsOrElementsRegs.has(reg));
15150 stackMap->set(index, wasm::StackMap::ArrayDataPointer);
15153 // Float and vector registers do not have to be handled; they cannot
15154 // contain wasm anyrefs, and they are spilled after general-purpose
15155 // registers. Gprs are therefore closest to the spill base and thus their
15156 // offset calculation does not need to account for other spills.
15157 } break;
15158 case WasmSafepointKind::Trap: {
15159 regDumpWords += trapExitLayoutNumWords;
15161 for (; refRegsIter.more(); ++refRegsIter) {
15162 Register reg = *refRegsIter;
15163 size_t offsetFromTop = trapExitLayout.getOffset(reg);
15165 // If this doesn't hold, the associated register wasn't saved by
15166 // the trap exit stub. Better to crash now than much later, in
15167 // some obscure place, and possibly with security consequences.
15168 MOZ_RELEASE_ASSERT(offsetFromTop < trapExitLayoutNumWords);
15170 // offsetFromTop is an offset in words down from the highest
15171 // address in the exit stub save area. Switch it around to be an
15172 // offset up from the bottom of the (integer register) save area.
15173 size_t offsetFromBottom = trapExitLayoutNumWords - 1 - offsetFromTop;
15175 if (wasmAnyRefRegs.has(reg)) {
15176 stackMap->set(offsetFromBottom, wasm::StackMap::AnyRef);
15177 } else {
15178 MOZ_ASSERT(slotsOrElementsRegs.has(reg));
15179 stackMap->set(offsetFromBottom, wasm::StackMap::ArrayDataPointer);
15182 } break;
15183 default:
15184 MOZ_CRASH("unreachable");
15187 // BODY (GENERAL SPILL) AREA and FRAME and INCOMING ARGS
15188 // Deal with roots on the stack.
15189 const LSafepoint::SlotList& wasmAnyRefSlots = safepoint.wasmAnyRefSlots();
15190 for (SafepointSlotEntry wasmAnyRefSlot : wasmAnyRefSlots) {
15191 // The following needs to correspond with JitFrameLayout::slotRef
15192 // wasmAnyRefSlot.stack == 0 means the slot is in the args area
15193 if (wasmAnyRefSlot.stack) {
15194 // It's a slot in the body allocation, so .slot is interpreted
15195 // as an index downwards from the Frame*
15196 MOZ_ASSERT(wasmAnyRefSlot.slot <= nBodyBytes);
15197 uint32_t offsetInBytes = nBodyBytes - wasmAnyRefSlot.slot;
15198 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
15199 stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
15200 wasm::StackMap::AnyRef);
15201 } else {
15202 // It's an argument slot
15203 MOZ_ASSERT(wasmAnyRefSlot.slot < nInboundStackArgBytes);
15204 uint32_t offsetInBytes = nBodyBytes + nFrameBytes + wasmAnyRefSlot.slot;
15205 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
15206 stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
15207 wasm::StackMap::AnyRef);
15211 // Track array data pointers on the stack
15212 const LSafepoint::SlotList& slots = safepoint.slotsOrElementsSlots();
15213 for (SafepointSlotEntry slot : slots) {
15214 MOZ_ASSERT(slot.stack);
15216 // It's a slot in the body allocation, so .slot is interpreted
15217 // as an index downwards from the Frame*
15218 MOZ_ASSERT(slot.slot <= nBodyBytes);
15219 uint32_t offsetInBytes = nBodyBytes - slot.slot;
15220 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
15221 stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
15222 wasm::StackMap::Kind::ArrayDataPointer);
15225 // Record in the map, how far down from the highest address the Frame* is.
15226 // Take the opportunity to check that we haven't marked any part of the
15227 // Frame itself as a pointer.
15228 stackMap->setFrameOffsetFromTop((nInboundStackArgBytesAligned + nFrameBytes) /
15229 sizeof(void*));
15230 #ifdef DEBUG
15231 for (uint32_t i = 0; i < nFrameBytes / sizeof(void*); i++) {
15232 MOZ_ASSERT(stackMap->get(stackMap->header.numMappedWords -
15233 stackMap->header.frameOffsetFromTop + i) ==
15234 wasm::StackMap::Kind::POD);
15236 #endif
15238 *result = stackMap;
15239 return true;
15242 bool CodeGenerator::generateWasm(
15243 wasm::CallIndirectId callIndirectId, wasm::BytecodeOffset trapOffset,
15244 const wasm::ArgTypeVector& argTypes, const RegisterOffsets& trapExitLayout,
15245 size_t trapExitLayoutNumWords, wasm::FuncOffsets* offsets,
15246 wasm::StackMaps* stackMaps, wasm::Decoder* decoder) {
15247 AutoCreatedBy acb(masm, "CodeGenerator::generateWasm");
15249 JitSpew(JitSpew_Codegen, "# Emitting wasm code");
15251 size_t nInboundStackArgBytes = StackArgAreaSizeUnaligned(argTypes);
15252 inboundStackArgBytes_ = nInboundStackArgBytes;
15254 wasm::GenerateFunctionPrologue(masm, callIndirectId, mozilla::Nothing(),
15255 offsets);
15257 MOZ_ASSERT(masm.framePushed() == 0);
15259 // Very large frames are implausible, probably an attack.
15260 if (frameSize() > wasm::MaxFrameSize) {
15261 return decoder->fail(decoder->beginOffset(), "stack frame is too large");
15264 if (omitOverRecursedCheck()) {
15265 masm.reserveStack(frameSize());
15266 } else {
15267 std::pair<CodeOffset, uint32_t> pair =
15268 masm.wasmReserveStackChecked(frameSize(), trapOffset);
15269 CodeOffset trapInsnOffset = pair.first;
15270 size_t nBytesReservedBeforeTrap = pair.second;
15272 wasm::StackMap* functionEntryStackMap = nullptr;
15273 if (!CreateStackMapForFunctionEntryTrap(
15274 argTypes, trapExitLayout, trapExitLayoutNumWords,
15275 nBytesReservedBeforeTrap, nInboundStackArgBytes,
15276 &functionEntryStackMap)) {
15277 return false;
15280 // In debug builds, we'll always have a stack map, even if there are no
15281 // refs to track.
15282 MOZ_ASSERT(functionEntryStackMap);
15284 if (functionEntryStackMap &&
15285 !stackMaps->add((uint8_t*)(uintptr_t)trapInsnOffset.offset(),
15286 functionEntryStackMap)) {
15287 functionEntryStackMap->destroy();
15288 return false;
15292 MOZ_ASSERT(masm.framePushed() == frameSize());
15294 if (!generateBody()) {
15295 return false;
15298 masm.bind(&returnLabel_);
15299 wasm::GenerateFunctionEpilogue(masm, frameSize(), offsets);
15301 if (!generateOutOfLineCode()) {
15302 return false;
15305 masm.flush();
15306 if (masm.oom()) {
15307 return false;
15310 offsets->end = masm.currentOffset();
15312 MOZ_ASSERT(!masm.failureLabel()->used());
15313 MOZ_ASSERT(snapshots_.listSize() == 0);
15314 MOZ_ASSERT(snapshots_.RVATableSize() == 0);
15315 MOZ_ASSERT(recovers_.size() == 0);
15316 MOZ_ASSERT(graph.numConstants() == 0);
15317 MOZ_ASSERT(osiIndices_.empty());
15318 MOZ_ASSERT(icList_.empty());
15319 MOZ_ASSERT(safepoints_.size() == 0);
15320 MOZ_ASSERT(!scriptCounts_);
15322 // Convert the safepoints to stackmaps and add them to our running
15323 // collection thereof.
15324 for (CodegenSafepointIndex& index : safepointIndices_) {
15325 wasm::StackMap* stackMap = nullptr;
15326 if (!CreateStackMapFromLSafepoint(*index.safepoint(), trapExitLayout,
15327 trapExitLayoutNumWords,
15328 nInboundStackArgBytes, &stackMap)) {
15329 return false;
15332 // In debug builds, we'll always have a stack map.
15333 MOZ_ASSERT(stackMap);
15334 if (!stackMap) {
15335 continue;
15338 if (!stackMaps->add((uint8_t*)(uintptr_t)index.displacement(), stackMap)) {
15339 stackMap->destroy();
15340 return false;
15344 return true;
15347 bool CodeGenerator::generate() {
15348 AutoCreatedBy acb(masm, "CodeGenerator::generate");
15350 JitSpew(JitSpew_Codegen, "# Emitting code for script %s:%u:%u",
15351 gen->outerInfo().script()->filename(),
15352 gen->outerInfo().script()->lineno(),
15353 gen->outerInfo().script()->column().oneOriginValue());
15355 // Initialize native code table with an entry to the start of
15356 // top-level script.
15357 InlineScriptTree* tree = gen->outerInfo().inlineScriptTree();
15358 jsbytecode* startPC = tree->script()->code();
15359 BytecodeSite* startSite = new (gen->alloc()) BytecodeSite(tree, startPC);
15360 if (!addNativeToBytecodeEntry(startSite)) {
15361 return false;
15364 if (!safepoints_.init(gen->alloc())) {
15365 return false;
15368 perfSpewer_.recordOffset(masm, "Prologue");
15369 if (!generatePrologue()) {
15370 return false;
15373 // Reset native => bytecode map table with top-level script and startPc.
15374 if (!addNativeToBytecodeEntry(startSite)) {
15375 return false;
15378 if (!generateBody()) {
15379 return false;
15382 // Reset native => bytecode map table with top-level script and startPc.
15383 if (!addNativeToBytecodeEntry(startSite)) {
15384 return false;
15387 perfSpewer_.recordOffset(masm, "Epilogue");
15388 if (!generateEpilogue()) {
15389 return false;
15392 // Reset native => bytecode map table with top-level script and startPc.
15393 if (!addNativeToBytecodeEntry(startSite)) {
15394 return false;
15397 perfSpewer_.recordOffset(masm, "InvalidateEpilogue");
15398 generateInvalidateEpilogue();
15400 // native => bytecode entries for OOL code will be added
15401 // by CodeGeneratorShared::generateOutOfLineCode
15402 perfSpewer_.recordOffset(masm, "OOLCode");
15403 if (!generateOutOfLineCode()) {
15404 return false;
15407 // Add terminal entry.
15408 if (!addNativeToBytecodeEntry(startSite)) {
15409 return false;
15412 // Dump Native to bytecode entries to spew.
15413 dumpNativeToBytecodeEntries();
15415 // We encode safepoints after the OSI-point offsets have been determined.
15416 if (!encodeSafepoints()) {
15417 return false;
15420 return !masm.oom();
15423 static bool AddInlinedCompilations(JSContext* cx, HandleScript script,
15424 IonCompilationId compilationId,
15425 const WarpSnapshot* snapshot,
15426 bool* isValid) {
15427 MOZ_ASSERT(!*isValid);
15428 RecompileInfo recompileInfo(script, compilationId);
15430 JitZone* jitZone = cx->zone()->jitZone();
15432 for (const auto* scriptSnapshot : snapshot->scripts()) {
15433 JSScript* inlinedScript = scriptSnapshot->script();
15434 if (inlinedScript == script) {
15435 continue;
15438 // TODO(post-Warp): This matches FinishCompilation and is necessary to
15439 // ensure in-progress compilations are canceled when an inlined functon
15440 // becomes a debuggee. See the breakpoint-14.js jit-test.
15441 // When TI is gone, try to clean this up by moving AddInlinedCompilations to
15442 // WarpOracle so that we can handle this as part of addPendingRecompile
15443 // instead of requiring this separate check.
15444 if (inlinedScript->isDebuggee()) {
15445 *isValid = false;
15446 return true;
15449 if (!jitZone->addInlinedCompilation(recompileInfo, inlinedScript)) {
15450 return false;
15454 *isValid = true;
15455 return true;
15458 void CodeGenerator::validateAndRegisterFuseDependencies(JSContext* cx,
15459 HandleScript script,
15460 bool* isValid) {
15461 // No need to validate as we will toss this compilation anyhow.
15462 if (!*isValid) {
15463 return;
15466 for (auto dependency : fuseDependencies) {
15467 switch (dependency) {
15468 case FuseDependencyKind::HasSeenObjectEmulateUndefinedFuse: {
15469 auto& hasSeenObjectEmulateUndefinedFuse =
15470 cx->runtime()->hasSeenObjectEmulateUndefinedFuse.ref();
15472 if (!hasSeenObjectEmulateUndefinedFuse.intact()) {
15473 JitSpew(JitSpew_Codegen,
15474 "tossing compilation; hasSeenObjectEmulateUndefinedFuse fuse "
15475 "dependency no longer valid\n");
15476 *isValid = false;
15477 return;
15480 if (!hasSeenObjectEmulateUndefinedFuse.addFuseDependency(cx, script)) {
15481 JitSpew(JitSpew_Codegen,
15482 "tossing compilation; failed to register "
15483 "hasSeenObjectEmulateUndefinedFuse script dependency\n");
15484 *isValid = false;
15485 return;
15487 break;
15490 case FuseDependencyKind::OptimizeGetIteratorFuse: {
15491 auto& optimizeGetIteratorFuse =
15492 cx->realm()->realmFuses.optimizeGetIteratorFuse;
15493 if (!optimizeGetIteratorFuse.intact()) {
15494 JitSpew(JitSpew_Codegen,
15495 "tossing compilation; optimizeGetIteratorFuse fuse "
15496 "dependency no longer valid\n");
15497 *isValid = false;
15498 return;
15501 if (!optimizeGetIteratorFuse.addFuseDependency(cx, script)) {
15502 JitSpew(JitSpew_Codegen,
15503 "tossing compilation; failed to register "
15504 "optimizeGetIteratorFuse script dependency\n");
15505 *isValid = false;
15506 return;
15508 break;
15511 default:
15512 MOZ_CRASH("Unknown Dependency Kind");
15517 bool CodeGenerator::link(JSContext* cx, const WarpSnapshot* snapshot) {
15518 AutoCreatedBy acb(masm, "CodeGenerator::link");
15520 // We cancel off-thread Ion compilations in a few places during GC, but if
15521 // this compilation was performed off-thread it will already have been
15522 // removed from the relevant lists by this point. Don't allow GC here.
15523 JS::AutoAssertNoGC nogc(cx);
15525 RootedScript script(cx, gen->outerInfo().script());
15526 MOZ_ASSERT(!script->hasIonScript());
15528 // Perform any read barriers which were skipped while compiling the
15529 // script, which may have happened off-thread.
15530 JitZone* jitZone = cx->zone()->jitZone();
15531 jitZone->performStubReadBarriers(zoneStubsToReadBarrier_);
15533 if (scriptCounts_ && !script->hasScriptCounts() &&
15534 !script->initScriptCounts(cx)) {
15535 return false;
15538 IonCompilationId compilationId =
15539 cx->runtime()->jitRuntime()->nextCompilationId();
15540 jitZone->currentCompilationIdRef().emplace(compilationId);
15541 auto resetCurrentId = mozilla::MakeScopeExit(
15542 [jitZone] { jitZone->currentCompilationIdRef().reset(); });
15544 // Record constraints. If an error occured, returns false and potentially
15545 // prevent future compilations. Otherwise, if an invalidation occured, then
15546 // skip the current compilation.
15547 bool isValid = false;
15549 // If an inlined script is invalidated (for example, by attaching
15550 // a debugger), we must also invalidate the parent IonScript.
15551 if (!AddInlinedCompilations(cx, script, compilationId, snapshot, &isValid)) {
15552 return false;
15555 // Validate fuse dependencies here; if a fuse has popped since we registered a
15556 // dependency then we need to toss this compilation as it assumes things which
15557 // are not valid.
15559 // Eagerly register a fuse dependency here too; this way if we OOM we can
15560 // instead simply remove the compilation and move on with our lives.
15561 validateAndRegisterFuseDependencies(cx, script, &isValid);
15563 // This compilation is no longer valid; don't proceed, but return true as this
15564 // isn't an error case either.
15565 if (!isValid) {
15566 return true;
15569 uint32_t argumentSlots = (gen->outerInfo().nargs() + 1) * sizeof(Value);
15571 size_t numNurseryObjects = snapshot->nurseryObjects().length();
15573 IonScript* ionScript = IonScript::New(
15574 cx, compilationId, graph.localSlotsSize(), argumentSlots, frameDepth_,
15575 snapshots_.listSize(), snapshots_.RVATableSize(), recovers_.size(),
15576 graph.numConstants(), numNurseryObjects, safepointIndices_.length(),
15577 osiIndices_.length(), icList_.length(), runtimeData_.length(),
15578 safepoints_.size());
15579 if (!ionScript) {
15580 return false;
15582 #ifdef DEBUG
15583 ionScript->setICHash(snapshot->icHash());
15584 #endif
15586 auto freeIonScript = mozilla::MakeScopeExit([&ionScript] {
15587 // Use js_free instead of IonScript::Destroy: the cache list is still
15588 // uninitialized.
15589 js_free(ionScript);
15592 Linker linker(masm);
15593 JitCode* code = linker.newCode(cx, CodeKind::Ion);
15594 if (!code) {
15595 return false;
15598 // Encode native to bytecode map if profiling is enabled.
15599 if (isProfilerInstrumentationEnabled()) {
15600 // Generate native-to-bytecode main table.
15601 IonEntry::ScriptList scriptList;
15602 if (!generateCompactNativeToBytecodeMap(cx, code, scriptList)) {
15603 return false;
15606 uint8_t* ionTableAddr =
15607 ((uint8_t*)nativeToBytecodeMap_.get()) + nativeToBytecodeTableOffset_;
15608 JitcodeIonTable* ionTable = (JitcodeIonTable*)ionTableAddr;
15610 // Construct the IonEntry that will go into the global table.
15611 auto entry = MakeJitcodeGlobalEntry<IonEntry>(
15612 cx, code, code->raw(), code->rawEnd(), std::move(scriptList), ionTable);
15613 if (!entry) {
15614 return false;
15616 (void)nativeToBytecodeMap_.release(); // Table is now owned by |entry|.
15618 // Add entry to the global table.
15619 JitcodeGlobalTable* globalTable =
15620 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
15621 if (!globalTable->addEntry(std::move(entry))) {
15622 return false;
15625 // Mark the jitcode as having a bytecode map.
15626 code->setHasBytecodeMap();
15627 } else {
15628 // Add a dumy jitcodeGlobalTable entry.
15629 auto entry = MakeJitcodeGlobalEntry<DummyEntry>(cx, code, code->raw(),
15630 code->rawEnd());
15631 if (!entry) {
15632 return false;
15635 // Add entry to the global table.
15636 JitcodeGlobalTable* globalTable =
15637 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
15638 if (!globalTable->addEntry(std::move(entry))) {
15639 return false;
15642 // Mark the jitcode as having a bytecode map.
15643 code->setHasBytecodeMap();
15646 ionScript->setMethod(code);
15648 // If the Gecko Profiler is enabled, mark IonScript as having been
15649 // instrumented accordingly.
15650 if (isProfilerInstrumentationEnabled()) {
15651 ionScript->setHasProfilingInstrumentation();
15654 Assembler::PatchDataWithValueCheck(
15655 CodeLocationLabel(code, invalidateEpilogueData_), ImmPtr(ionScript),
15656 ImmPtr((void*)-1));
15658 for (CodeOffset offset : ionScriptLabels_) {
15659 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, offset),
15660 ImmPtr(ionScript), ImmPtr((void*)-1));
15663 for (NurseryObjectLabel label : ionNurseryObjectLabels_) {
15664 void* entry = ionScript->addressOfNurseryObject(label.nurseryIndex);
15665 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label.offset),
15666 ImmPtr(entry), ImmPtr((void*)-1));
15669 // for generating inline caches during the execution.
15670 if (runtimeData_.length()) {
15671 ionScript->copyRuntimeData(&runtimeData_[0]);
15673 if (icList_.length()) {
15674 ionScript->copyICEntries(&icList_[0]);
15677 for (size_t i = 0; i < icInfo_.length(); i++) {
15678 IonIC& ic = ionScript->getICFromIndex(i);
15679 Assembler::PatchDataWithValueCheck(
15680 CodeLocationLabel(code, icInfo_[i].icOffsetForJump),
15681 ImmPtr(ic.codeRawPtr()), ImmPtr((void*)-1));
15682 Assembler::PatchDataWithValueCheck(
15683 CodeLocationLabel(code, icInfo_[i].icOffsetForPush), ImmPtr(&ic),
15684 ImmPtr((void*)-1));
15687 JitSpew(JitSpew_Codegen, "Created IonScript %p (raw %p)", (void*)ionScript,
15688 (void*)code->raw());
15690 ionScript->setInvalidationEpilogueDataOffset(
15691 invalidateEpilogueData_.offset());
15692 if (jsbytecode* osrPc = gen->outerInfo().osrPc()) {
15693 ionScript->setOsrPc(osrPc);
15694 ionScript->setOsrEntryOffset(getOsrEntryOffset());
15696 ionScript->setInvalidationEpilogueOffset(invalidate_.offset());
15698 perfSpewer_.saveProfile(cx, script, code);
15700 #ifdef MOZ_VTUNE
15701 vtune::MarkScript(code, script, "ion");
15702 #endif
15704 // Set a Ion counter hint for this script.
15705 if (cx->runtime()->jitRuntime()->hasJitHintsMap()) {
15706 JitHintsMap* jitHints = cx->runtime()->jitRuntime()->getJitHintsMap();
15707 jitHints->recordIonCompilation(script);
15710 // for marking during GC.
15711 if (safepointIndices_.length()) {
15712 ionScript->copySafepointIndices(&safepointIndices_[0]);
15714 if (safepoints_.size()) {
15715 ionScript->copySafepoints(&safepoints_);
15718 // for recovering from an Ion Frame.
15719 if (osiIndices_.length()) {
15720 ionScript->copyOsiIndices(&osiIndices_[0]);
15722 if (snapshots_.listSize()) {
15723 ionScript->copySnapshots(&snapshots_);
15725 MOZ_ASSERT_IF(snapshots_.listSize(), recovers_.size());
15726 if (recovers_.size()) {
15727 ionScript->copyRecovers(&recovers_);
15729 if (graph.numConstants()) {
15730 const Value* vp = graph.constantPool();
15731 ionScript->copyConstants(vp);
15732 for (size_t i = 0; i < graph.numConstants(); i++) {
15733 const Value& v = vp[i];
15734 if (v.isGCThing()) {
15735 if (gc::StoreBuffer* sb = v.toGCThing()->storeBuffer()) {
15736 sb->putWholeCell(script);
15737 break;
15743 // Attach any generated script counts to the script.
15744 if (IonScriptCounts* counts = extractScriptCounts()) {
15745 script->addIonCounts(counts);
15747 // WARNING: Code after this point must be infallible!
15749 // Copy the list of nursery objects. Note that the store buffer can add
15750 // HeapPtr edges that must be cleared in IonScript::Destroy. See the
15751 // infallibility warning above.
15752 const auto& nurseryObjects = snapshot->nurseryObjects();
15753 for (size_t i = 0; i < nurseryObjects.length(); i++) {
15754 ionScript->nurseryObjects()[i].init(nurseryObjects[i]);
15757 // Transfer ownership of the IonScript to the JitScript. At this point enough
15758 // of the IonScript must be initialized for IonScript::Destroy to work.
15759 freeIonScript.release();
15760 script->jitScript()->setIonScript(script, ionScript);
15762 return true;
15765 // An out-of-line path to convert a boxed int32 to either a float or double.
15766 class OutOfLineUnboxFloatingPoint : public OutOfLineCodeBase<CodeGenerator> {
15767 LUnboxFloatingPoint* unboxFloatingPoint_;
15769 public:
15770 explicit OutOfLineUnboxFloatingPoint(LUnboxFloatingPoint* unboxFloatingPoint)
15771 : unboxFloatingPoint_(unboxFloatingPoint) {}
15773 void accept(CodeGenerator* codegen) override {
15774 codegen->visitOutOfLineUnboxFloatingPoint(this);
15777 LUnboxFloatingPoint* unboxFloatingPoint() const {
15778 return unboxFloatingPoint_;
15782 void CodeGenerator::visitUnboxFloatingPoint(LUnboxFloatingPoint* lir) {
15783 const ValueOperand box = ToValue(lir, LUnboxFloatingPoint::Input);
15784 const LDefinition* result = lir->output();
15786 // Out-of-line path to convert int32 to double or bailout
15787 // if this instruction is fallible.
15788 OutOfLineUnboxFloatingPoint* ool =
15789 new (alloc()) OutOfLineUnboxFloatingPoint(lir);
15790 addOutOfLineCode(ool, lir->mir());
15792 FloatRegister resultReg = ToFloatRegister(result);
15793 masm.branchTestDouble(Assembler::NotEqual, box, ool->entry());
15794 masm.unboxDouble(box, resultReg);
15795 if (lir->type() == MIRType::Float32) {
15796 masm.convertDoubleToFloat32(resultReg, resultReg);
15798 masm.bind(ool->rejoin());
15801 void CodeGenerator::visitOutOfLineUnboxFloatingPoint(
15802 OutOfLineUnboxFloatingPoint* ool) {
15803 LUnboxFloatingPoint* ins = ool->unboxFloatingPoint();
15804 const ValueOperand value = ToValue(ins, LUnboxFloatingPoint::Input);
15806 if (ins->mir()->fallible()) {
15807 Label bail;
15808 masm.branchTestInt32(Assembler::NotEqual, value, &bail);
15809 bailoutFrom(&bail, ins->snapshot());
15811 masm.int32ValueToFloatingPoint(value, ToFloatRegister(ins->output()),
15812 ins->type());
15813 masm.jump(ool->rejoin());
15816 void CodeGenerator::visitCallBindVar(LCallBindVar* lir) {
15817 pushArg(ToRegister(lir->environmentChain()));
15819 using Fn = JSObject* (*)(JSContext*, JSObject*);
15820 callVM<Fn, BindVarOperation>(lir);
15823 void CodeGenerator::visitMegamorphicSetElement(LMegamorphicSetElement* lir) {
15824 Register obj = ToRegister(lir->getOperand(0));
15825 ValueOperand idVal = ToValue(lir, LMegamorphicSetElement::IndexIndex);
15826 ValueOperand value = ToValue(lir, LMegamorphicSetElement::ValueIndex);
15828 Register temp0 = ToRegister(lir->temp0());
15829 // See comment in LIROps.yaml (x86 is short on registers)
15830 #ifndef JS_CODEGEN_X86
15831 Register temp1 = ToRegister(lir->temp1());
15832 Register temp2 = ToRegister(lir->temp2());
15833 #endif
15835 Label cacheHit, done;
15836 #ifdef JS_CODEGEN_X86
15837 masm.emitMegamorphicCachedSetSlot(
15838 idVal, obj, temp0, value, &cacheHit,
15839 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
15840 EmitPreBarrier(masm, addr, mirType);
15842 #else
15843 masm.emitMegamorphicCachedSetSlot(
15844 idVal, obj, temp0, temp1, temp2, value, &cacheHit,
15845 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
15846 EmitPreBarrier(masm, addr, mirType);
15848 #endif
15850 pushArg(Imm32(lir->mir()->strict()));
15851 pushArg(ToValue(lir, LMegamorphicSetElement::ValueIndex));
15852 pushArg(ToValue(lir, LMegamorphicSetElement::IndexIndex));
15853 pushArg(obj);
15855 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
15856 callVM<Fn, js::jit::SetElementMegamorphic<true>>(lir);
15858 masm.jump(&done);
15859 masm.bind(&cacheHit);
15861 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
15862 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
15864 saveVolatile(temp0);
15865 emitPostWriteBarrier(obj);
15866 restoreVolatile(temp0);
15868 masm.bind(&done);
15871 void CodeGenerator::visitLoadScriptedProxyHandler(
15872 LLoadScriptedProxyHandler* ins) {
15873 Register obj = ToRegister(ins->getOperand(0));
15874 Register output = ToRegister(ins->output());
15876 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), output);
15878 Label bail;
15879 Address handlerAddr(output, js::detail::ProxyReservedSlots::offsetOfSlot(
15880 ScriptedProxyHandler::HANDLER_EXTRA));
15881 masm.fallibleUnboxObject(handlerAddr, output, &bail);
15882 bailoutFrom(&bail, ins->snapshot());
15885 #ifdef JS_PUNBOX64
15886 void CodeGenerator::visitCheckScriptedProxyGetResult(
15887 LCheckScriptedProxyGetResult* ins) {
15888 ValueOperand target = ToValue(ins, LCheckScriptedProxyGetResult::TargetIndex);
15889 ValueOperand value = ToValue(ins, LCheckScriptedProxyGetResult::ValueIndex);
15890 ValueOperand id = ToValue(ins, LCheckScriptedProxyGetResult::IdIndex);
15891 Register scratch = ToRegister(ins->temp0());
15892 Register scratch2 = ToRegister(ins->temp1());
15894 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue,
15895 MutableHandleValue);
15896 OutOfLineCode* ool = oolCallVM<Fn, CheckProxyGetByValueResult>(
15897 ins, ArgList(scratch, id, value), StoreValueTo(value));
15899 masm.unboxObject(target, scratch);
15900 masm.branchTestObjectNeedsProxyResultValidation(Assembler::NonZero, scratch,
15901 scratch2, ool->entry());
15902 masm.bind(ool->rejoin());
15904 #endif
15906 void CodeGenerator::visitIdToStringOrSymbol(LIdToStringOrSymbol* ins) {
15907 ValueOperand id = ToValue(ins, LIdToStringOrSymbol::IdIndex);
15908 ValueOperand output = ToOutValue(ins);
15909 Register scratch = ToRegister(ins->temp0());
15911 masm.moveValue(id, output);
15913 Label done, callVM;
15914 Label bail;
15916 ScratchTagScope tag(masm, output);
15917 masm.splitTagForTest(output, tag);
15918 masm.branchTestString(Assembler::Equal, tag, &done);
15919 masm.branchTestSymbol(Assembler::Equal, tag, &done);
15920 masm.branchTestInt32(Assembler::NotEqual, tag, &bail);
15923 masm.unboxInt32(output, scratch);
15925 using Fn = JSLinearString* (*)(JSContext*, int);
15926 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
15927 ins, ArgList(scratch), StoreRegisterTo(output.scratchReg()));
15929 masm.lookupStaticIntString(scratch, output.scratchReg(),
15930 gen->runtime->staticStrings(), ool->entry());
15932 masm.bind(ool->rejoin());
15933 masm.tagValue(JSVAL_TYPE_STRING, output.scratchReg(), output);
15934 masm.bind(&done);
15936 bailoutFrom(&bail, ins->snapshot());
15939 void CodeGenerator::visitLoadFixedSlotV(LLoadFixedSlotV* ins) {
15940 const Register obj = ToRegister(ins->getOperand(0));
15941 size_t slot = ins->mir()->slot();
15942 ValueOperand result = ToOutValue(ins);
15944 masm.loadValue(Address(obj, NativeObject::getFixedSlotOffset(slot)), result);
15947 void CodeGenerator::visitLoadFixedSlotT(LLoadFixedSlotT* ins) {
15948 const Register obj = ToRegister(ins->getOperand(0));
15949 size_t slot = ins->mir()->slot();
15950 AnyRegister result = ToAnyRegister(ins->getDef(0));
15951 MIRType type = ins->mir()->type();
15953 masm.loadUnboxedValue(Address(obj, NativeObject::getFixedSlotOffset(slot)),
15954 type, result);
15957 template <typename T>
15958 static void EmitLoadAndUnbox(MacroAssembler& masm, const T& src, MIRType type,
15959 bool fallible, AnyRegister dest, Label* fail) {
15960 if (type == MIRType::Double) {
15961 MOZ_ASSERT(dest.isFloat());
15962 masm.ensureDouble(src, dest.fpu(), fail);
15963 return;
15965 if (fallible) {
15966 switch (type) {
15967 case MIRType::Int32:
15968 masm.fallibleUnboxInt32(src, dest.gpr(), fail);
15969 break;
15970 case MIRType::Boolean:
15971 masm.fallibleUnboxBoolean(src, dest.gpr(), fail);
15972 break;
15973 case MIRType::Object:
15974 masm.fallibleUnboxObject(src, dest.gpr(), fail);
15975 break;
15976 case MIRType::String:
15977 masm.fallibleUnboxString(src, dest.gpr(), fail);
15978 break;
15979 case MIRType::Symbol:
15980 masm.fallibleUnboxSymbol(src, dest.gpr(), fail);
15981 break;
15982 case MIRType::BigInt:
15983 masm.fallibleUnboxBigInt(src, dest.gpr(), fail);
15984 break;
15985 default:
15986 MOZ_CRASH("Unexpected MIRType");
15988 return;
15990 masm.loadUnboxedValue(src, type, dest);
15993 void CodeGenerator::visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox* ins) {
15994 const MLoadFixedSlotAndUnbox* mir = ins->mir();
15995 MIRType type = mir->type();
15996 Register input = ToRegister(ins->object());
15997 AnyRegister result = ToAnyRegister(ins->output());
15998 size_t slot = mir->slot();
16000 Address address(input, NativeObject::getFixedSlotOffset(slot));
16002 Label bail;
16003 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16004 if (mir->fallible()) {
16005 bailoutFrom(&bail, ins->snapshot());
16009 void CodeGenerator::visitLoadDynamicSlotAndUnbox(
16010 LLoadDynamicSlotAndUnbox* ins) {
16011 const MLoadDynamicSlotAndUnbox* mir = ins->mir();
16012 MIRType type = mir->type();
16013 Register input = ToRegister(ins->slots());
16014 AnyRegister result = ToAnyRegister(ins->output());
16015 size_t slot = mir->slot();
16017 Address address(input, slot * sizeof(JS::Value));
16019 Label bail;
16020 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16021 if (mir->fallible()) {
16022 bailoutFrom(&bail, ins->snapshot());
16026 void CodeGenerator::visitLoadElementAndUnbox(LLoadElementAndUnbox* ins) {
16027 const MLoadElementAndUnbox* mir = ins->mir();
16028 MIRType type = mir->type();
16029 Register elements = ToRegister(ins->elements());
16030 AnyRegister result = ToAnyRegister(ins->output());
16032 Label bail;
16033 if (ins->index()->isConstant()) {
16034 NativeObject::elementsSizeMustNotOverflow();
16035 int32_t offset = ToInt32(ins->index()) * sizeof(Value);
16036 Address address(elements, offset);
16037 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16038 } else {
16039 BaseObjectElementIndex address(elements, ToRegister(ins->index()));
16040 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16043 if (mir->fallible()) {
16044 bailoutFrom(&bail, ins->snapshot());
16048 class OutOfLineAtomizeSlot : public OutOfLineCodeBase<CodeGenerator> {
16049 LInstruction* lir_;
16050 Register stringReg_;
16051 Address slotAddr_;
16052 TypedOrValueRegister dest_;
16054 public:
16055 OutOfLineAtomizeSlot(LInstruction* lir, Register stringReg, Address slotAddr,
16056 TypedOrValueRegister dest)
16057 : lir_(lir), stringReg_(stringReg), slotAddr_(slotAddr), dest_(dest) {}
16059 void accept(CodeGenerator* codegen) final {
16060 codegen->visitOutOfLineAtomizeSlot(this);
16062 LInstruction* lir() const { return lir_; }
16063 Register stringReg() const { return stringReg_; }
16064 Address slotAddr() const { return slotAddr_; }
16065 TypedOrValueRegister dest() const { return dest_; }
16068 void CodeGenerator::visitOutOfLineAtomizeSlot(OutOfLineAtomizeSlot* ool) {
16069 LInstruction* lir = ool->lir();
16070 Register stringReg = ool->stringReg();
16071 Address slotAddr = ool->slotAddr();
16072 TypedOrValueRegister dest = ool->dest();
16074 // This code is called with a non-atomic string in |stringReg|.
16075 // When it returns, |stringReg| contains an unboxed pointer to an
16076 // atomized version of that string, and |slotAddr| contains a
16077 // StringValue pointing to that atom. If |dest| is a ValueOperand,
16078 // it contains the same StringValue; otherwise we assert that |dest|
16079 // is |stringReg|.
16081 saveLive(lir);
16082 pushArg(stringReg);
16084 using Fn = JSAtom* (*)(JSContext*, JSString*);
16085 callVM<Fn, js::AtomizeString>(lir);
16086 StoreRegisterTo(stringReg).generate(this);
16087 restoreLiveIgnore(lir, StoreRegisterTo(stringReg).clobbered());
16089 if (dest.hasValue()) {
16090 masm.moveValue(
16091 TypedOrValueRegister(MIRType::String, AnyRegister(stringReg)),
16092 dest.valueReg());
16093 } else {
16094 MOZ_ASSERT(dest.typedReg().gpr() == stringReg);
16097 emitPreBarrier(slotAddr);
16098 masm.storeTypedOrValue(dest, slotAddr);
16100 // We don't need a post-barrier because atoms aren't nursery-allocated.
16101 #ifdef DEBUG
16102 // We need a temp register for the nursery check. Spill something.
16103 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
16104 allRegs.take(stringReg);
16105 Register temp = allRegs.takeAny();
16106 masm.push(temp);
16108 Label tenured;
16109 masm.branchPtrInNurseryChunk(Assembler::NotEqual, stringReg, temp, &tenured);
16110 masm.assumeUnreachable("AtomizeString returned a nursery pointer");
16111 masm.bind(&tenured);
16113 masm.pop(temp);
16114 #endif
16116 masm.jump(ool->rejoin());
16119 void CodeGenerator::emitMaybeAtomizeSlot(LInstruction* ins, Register stringReg,
16120 Address slotAddr,
16121 TypedOrValueRegister dest) {
16122 OutOfLineAtomizeSlot* ool =
16123 new (alloc()) OutOfLineAtomizeSlot(ins, stringReg, slotAddr, dest);
16124 addOutOfLineCode(ool, ins->mirRaw()->toInstruction());
16125 masm.branchTest32(Assembler::Zero,
16126 Address(stringReg, JSString::offsetOfFlags()),
16127 Imm32(JSString::ATOM_BIT), ool->entry());
16128 masm.bind(ool->rejoin());
16131 void CodeGenerator::visitLoadFixedSlotAndAtomize(
16132 LLoadFixedSlotAndAtomize* ins) {
16133 Register obj = ToRegister(ins->getOperand(0));
16134 Register temp = ToRegister(ins->temp0());
16135 size_t slot = ins->mir()->slot();
16136 ValueOperand result = ToOutValue(ins);
16138 Address slotAddr(obj, NativeObject::getFixedSlotOffset(slot));
16139 masm.loadValue(slotAddr, result);
16141 Label notString;
16142 masm.branchTestString(Assembler::NotEqual, result, &notString);
16143 masm.unboxString(result, temp);
16144 emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
16145 masm.bind(&notString);
16148 void CodeGenerator::visitLoadDynamicSlotAndAtomize(
16149 LLoadDynamicSlotAndAtomize* ins) {
16150 ValueOperand result = ToOutValue(ins);
16151 Register temp = ToRegister(ins->temp0());
16152 Register base = ToRegister(ins->input());
16153 int32_t offset = ins->mir()->slot() * sizeof(js::Value);
16155 Address slotAddr(base, offset);
16156 masm.loadValue(slotAddr, result);
16158 Label notString;
16159 masm.branchTestString(Assembler::NotEqual, result, &notString);
16160 masm.unboxString(result, temp);
16161 emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
16162 masm.bind(&notString);
16165 void CodeGenerator::visitLoadFixedSlotUnboxAndAtomize(
16166 LLoadFixedSlotUnboxAndAtomize* ins) {
16167 const MLoadFixedSlotAndUnbox* mir = ins->mir();
16168 MOZ_ASSERT(mir->type() == MIRType::String);
16169 Register input = ToRegister(ins->object());
16170 AnyRegister result = ToAnyRegister(ins->output());
16171 size_t slot = mir->slot();
16173 Address slotAddr(input, NativeObject::getFixedSlotOffset(slot));
16175 Label bail;
16176 EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
16177 &bail);
16178 emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
16179 TypedOrValueRegister(MIRType::String, result));
16181 if (mir->fallible()) {
16182 bailoutFrom(&bail, ins->snapshot());
16186 void CodeGenerator::visitLoadDynamicSlotUnboxAndAtomize(
16187 LLoadDynamicSlotUnboxAndAtomize* ins) {
16188 const MLoadDynamicSlotAndUnbox* mir = ins->mir();
16189 MOZ_ASSERT(mir->type() == MIRType::String);
16190 Register input = ToRegister(ins->slots());
16191 AnyRegister result = ToAnyRegister(ins->output());
16192 size_t slot = mir->slot();
16194 Address slotAddr(input, slot * sizeof(JS::Value));
16196 Label bail;
16197 EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
16198 &bail);
16199 emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
16200 TypedOrValueRegister(MIRType::String, result));
16202 if (mir->fallible()) {
16203 bailoutFrom(&bail, ins->snapshot());
16207 void CodeGenerator::visitAddAndStoreSlot(LAddAndStoreSlot* ins) {
16208 const Register obj = ToRegister(ins->getOperand(0));
16209 const ValueOperand value = ToValue(ins, LAddAndStoreSlot::ValueIndex);
16210 const Register maybeTemp = ToTempRegisterOrInvalid(ins->temp0());
16212 Shape* shape = ins->mir()->shape();
16213 masm.storeObjShape(shape, obj, [](MacroAssembler& masm, const Address& addr) {
16214 EmitPreBarrier(masm, addr, MIRType::Shape);
16217 // Perform the store. No pre-barrier required since this is a new
16218 // initialization.
16220 uint32_t offset = ins->mir()->slotOffset();
16221 if (ins->mir()->kind() == MAddAndStoreSlot::Kind::FixedSlot) {
16222 Address slot(obj, offset);
16223 masm.storeValue(value, slot);
16224 } else {
16225 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), maybeTemp);
16226 Address slot(maybeTemp, offset);
16227 masm.storeValue(value, slot);
16231 void CodeGenerator::visitAllocateAndStoreSlot(LAllocateAndStoreSlot* ins) {
16232 const Register obj = ToRegister(ins->getOperand(0));
16233 const ValueOperand value = ToValue(ins, LAllocateAndStoreSlot::ValueIndex);
16234 const Register temp0 = ToRegister(ins->temp0());
16235 const Register temp1 = ToRegister(ins->temp1());
16237 masm.Push(obj);
16238 masm.Push(value);
16240 using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
16241 masm.setupAlignedABICall();
16242 masm.loadJSContext(temp0);
16243 masm.passABIArg(temp0);
16244 masm.passABIArg(obj);
16245 masm.move32(Imm32(ins->mir()->numNewSlots()), temp1);
16246 masm.passABIArg(temp1);
16247 masm.callWithABI<Fn, NativeObject::growSlotsPure>();
16248 masm.storeCallPointerResult(temp0);
16250 masm.Pop(value);
16251 masm.Pop(obj);
16253 bailoutIfFalseBool(temp0, ins->snapshot());
16255 masm.storeObjShape(ins->mir()->shape(), obj,
16256 [](MacroAssembler& masm, const Address& addr) {
16257 EmitPreBarrier(masm, addr, MIRType::Shape);
16260 // Perform the store. No pre-barrier required since this is a new
16261 // initialization.
16262 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), temp0);
16263 Address slot(temp0, ins->mir()->slotOffset());
16264 masm.storeValue(value, slot);
16267 void CodeGenerator::visitAddSlotAndCallAddPropHook(
16268 LAddSlotAndCallAddPropHook* ins) {
16269 const Register obj = ToRegister(ins->object());
16270 const ValueOperand value =
16271 ToValue(ins, LAddSlotAndCallAddPropHook::ValueIndex);
16273 pushArg(ImmGCPtr(ins->mir()->shape()));
16274 pushArg(value);
16275 pushArg(obj);
16277 using Fn =
16278 bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, Handle<Shape*>);
16279 callVM<Fn, AddSlotAndCallAddPropHook>(ins);
16282 void CodeGenerator::visitStoreFixedSlotV(LStoreFixedSlotV* ins) {
16283 const Register obj = ToRegister(ins->getOperand(0));
16284 size_t slot = ins->mir()->slot();
16286 const ValueOperand value = ToValue(ins, LStoreFixedSlotV::ValueIndex);
16288 Address address(obj, NativeObject::getFixedSlotOffset(slot));
16289 if (ins->mir()->needsBarrier()) {
16290 emitPreBarrier(address);
16293 masm.storeValue(value, address);
16296 void CodeGenerator::visitStoreFixedSlotT(LStoreFixedSlotT* ins) {
16297 const Register obj = ToRegister(ins->getOperand(0));
16298 size_t slot = ins->mir()->slot();
16300 const LAllocation* value = ins->value();
16301 MIRType valueType = ins->mir()->value()->type();
16303 Address address(obj, NativeObject::getFixedSlotOffset(slot));
16304 if (ins->mir()->needsBarrier()) {
16305 emitPreBarrier(address);
16308 ConstantOrRegister nvalue =
16309 value->isConstant()
16310 ? ConstantOrRegister(value->toConstant()->toJSValue())
16311 : TypedOrValueRegister(valueType, ToAnyRegister(value));
16312 masm.storeConstantOrRegister(nvalue, address);
16315 void CodeGenerator::visitGetNameCache(LGetNameCache* ins) {
16316 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16317 Register envChain = ToRegister(ins->envObj());
16318 ValueOperand output = ToOutValue(ins);
16319 Register temp = ToRegister(ins->temp0());
16321 IonGetNameIC ic(liveRegs, envChain, output, temp);
16322 addIC(ins, allocateIC(ic));
16325 void CodeGenerator::addGetPropertyCache(LInstruction* ins,
16326 LiveRegisterSet liveRegs,
16327 TypedOrValueRegister value,
16328 const ConstantOrRegister& id,
16329 ValueOperand output) {
16330 CacheKind kind = CacheKind::GetElem;
16331 if (id.constant() && id.value().isString()) {
16332 JSString* idString = id.value().toString();
16333 if (idString->isAtom() && !idString->asAtom().isIndex()) {
16334 kind = CacheKind::GetProp;
16337 IonGetPropertyIC cache(kind, liveRegs, value, id, output);
16338 addIC(ins, allocateIC(cache));
16341 void CodeGenerator::addSetPropertyCache(LInstruction* ins,
16342 LiveRegisterSet liveRegs,
16343 Register objReg, Register temp,
16344 const ConstantOrRegister& id,
16345 const ConstantOrRegister& value,
16346 bool strict) {
16347 CacheKind kind = CacheKind::SetElem;
16348 if (id.constant() && id.value().isString()) {
16349 JSString* idString = id.value().toString();
16350 if (idString->isAtom() && !idString->asAtom().isIndex()) {
16351 kind = CacheKind::SetProp;
16354 IonSetPropertyIC cache(kind, liveRegs, objReg, temp, id, value, strict);
16355 addIC(ins, allocateIC(cache));
16358 ConstantOrRegister CodeGenerator::toConstantOrRegister(LInstruction* lir,
16359 size_t n, MIRType type) {
16360 if (type == MIRType::Value) {
16361 return TypedOrValueRegister(ToValue(lir, n));
16364 const LAllocation* value = lir->getOperand(n);
16365 if (value->isConstant()) {
16366 return ConstantOrRegister(value->toConstant()->toJSValue());
16369 return TypedOrValueRegister(type, ToAnyRegister(value));
16372 void CodeGenerator::visitGetPropertyCache(LGetPropertyCache* ins) {
16373 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16374 TypedOrValueRegister value =
16375 toConstantOrRegister(ins, LGetPropertyCache::ValueIndex,
16376 ins->mir()->value()->type())
16377 .reg();
16378 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropertyCache::IdIndex,
16379 ins->mir()->idval()->type());
16380 ValueOperand output = ToOutValue(ins);
16381 addGetPropertyCache(ins, liveRegs, value, id, output);
16384 void CodeGenerator::visitGetPropSuperCache(LGetPropSuperCache* ins) {
16385 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16386 Register obj = ToRegister(ins->obj());
16387 TypedOrValueRegister receiver =
16388 toConstantOrRegister(ins, LGetPropSuperCache::ReceiverIndex,
16389 ins->mir()->receiver()->type())
16390 .reg();
16391 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropSuperCache::IdIndex,
16392 ins->mir()->idval()->type());
16393 ValueOperand output = ToOutValue(ins);
16395 CacheKind kind = CacheKind::GetElemSuper;
16396 if (id.constant() && id.value().isString()) {
16397 JSString* idString = id.value().toString();
16398 if (idString->isAtom() && !idString->asAtom().isIndex()) {
16399 kind = CacheKind::GetPropSuper;
16403 IonGetPropSuperIC cache(kind, liveRegs, obj, receiver, id, output);
16404 addIC(ins, allocateIC(cache));
16407 void CodeGenerator::visitBindNameCache(LBindNameCache* ins) {
16408 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16409 Register envChain = ToRegister(ins->environmentChain());
16410 Register output = ToRegister(ins->output());
16411 Register temp = ToRegister(ins->temp0());
16413 IonBindNameIC ic(liveRegs, envChain, output, temp);
16414 addIC(ins, allocateIC(ic));
16417 void CodeGenerator::visitHasOwnCache(LHasOwnCache* ins) {
16418 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16419 TypedOrValueRegister value =
16420 toConstantOrRegister(ins, LHasOwnCache::ValueIndex,
16421 ins->mir()->value()->type())
16422 .reg();
16423 TypedOrValueRegister id = toConstantOrRegister(ins, LHasOwnCache::IdIndex,
16424 ins->mir()->idval()->type())
16425 .reg();
16426 Register output = ToRegister(ins->output());
16428 IonHasOwnIC cache(liveRegs, value, id, output);
16429 addIC(ins, allocateIC(cache));
16432 void CodeGenerator::visitCheckPrivateFieldCache(LCheckPrivateFieldCache* ins) {
16433 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16434 TypedOrValueRegister value =
16435 toConstantOrRegister(ins, LCheckPrivateFieldCache::ValueIndex,
16436 ins->mir()->value()->type())
16437 .reg();
16438 TypedOrValueRegister id =
16439 toConstantOrRegister(ins, LCheckPrivateFieldCache::IdIndex,
16440 ins->mir()->idval()->type())
16441 .reg();
16442 Register output = ToRegister(ins->output());
16444 IonCheckPrivateFieldIC cache(liveRegs, value, id, output);
16445 addIC(ins, allocateIC(cache));
16448 void CodeGenerator::visitNewPrivateName(LNewPrivateName* ins) {
16449 pushArg(ImmGCPtr(ins->mir()->name()));
16451 using Fn = JS::Symbol* (*)(JSContext*, Handle<JSAtom*>);
16452 callVM<Fn, NewPrivateName>(ins);
16455 void CodeGenerator::visitCallDeleteProperty(LCallDeleteProperty* lir) {
16456 pushArg(ImmGCPtr(lir->mir()->name()));
16457 pushArg(ToValue(lir, LCallDeleteProperty::ValueIndex));
16459 using Fn = bool (*)(JSContext*, HandleValue, Handle<PropertyName*>, bool*);
16460 if (lir->mir()->strict()) {
16461 callVM<Fn, DelPropOperation<true>>(lir);
16462 } else {
16463 callVM<Fn, DelPropOperation<false>>(lir);
16467 void CodeGenerator::visitCallDeleteElement(LCallDeleteElement* lir) {
16468 pushArg(ToValue(lir, LCallDeleteElement::IndexIndex));
16469 pushArg(ToValue(lir, LCallDeleteElement::ValueIndex));
16471 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
16472 if (lir->mir()->strict()) {
16473 callVM<Fn, DelElemOperation<true>>(lir);
16474 } else {
16475 callVM<Fn, DelElemOperation<false>>(lir);
16479 void CodeGenerator::visitObjectToIterator(LObjectToIterator* lir) {
16480 Register obj = ToRegister(lir->object());
16481 Register iterObj = ToRegister(lir->output());
16482 Register temp = ToRegister(lir->temp0());
16483 Register temp2 = ToRegister(lir->temp1());
16484 Register temp3 = ToRegister(lir->temp2());
16486 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
16487 OutOfLineCode* ool = (lir->mir()->wantsIndices())
16488 ? oolCallVM<Fn, GetIteratorWithIndices>(
16489 lir, ArgList(obj), StoreRegisterTo(iterObj))
16490 : oolCallVM<Fn, GetIterator>(
16491 lir, ArgList(obj), StoreRegisterTo(iterObj));
16493 masm.maybeLoadIteratorFromShape(obj, iterObj, temp, temp2, temp3,
16494 ool->entry());
16496 Register nativeIter = temp;
16497 masm.loadPrivate(
16498 Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
16499 nativeIter);
16501 if (lir->mir()->wantsIndices()) {
16502 // At least one consumer of the output of this iterator has been optimized
16503 // to use iterator indices. If the cached iterator doesn't include indices,
16504 // but it was marked to indicate that we can create them if needed, then we
16505 // do a VM call to replace the cached iterator with a fresh iterator
16506 // including indices.
16507 masm.branchNativeIteratorIndices(Assembler::Equal, nativeIter, temp2,
16508 NativeIteratorIndices::AvailableOnRequest,
16509 ool->entry());
16512 Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlagsAndCount());
16513 masm.storePtr(
16514 obj, Address(nativeIter, NativeIterator::offsetOfObjectBeingIterated()));
16515 masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
16517 Register enumeratorsAddr = temp2;
16518 masm.movePtr(ImmPtr(lir->mir()->enumeratorsAddr()), enumeratorsAddr);
16519 masm.registerIterator(enumeratorsAddr, nativeIter, temp3);
16521 // Generate post-write barrier for storing to |iterObj->objectBeingIterated_|.
16522 // We already know that |iterObj| is tenured, so we only have to check |obj|.
16523 Label skipBarrier;
16524 masm.branchPtrInNurseryChunk(Assembler::NotEqual, obj, temp2, &skipBarrier);
16526 LiveRegisterSet save = liveVolatileRegs(lir);
16527 save.takeUnchecked(temp);
16528 save.takeUnchecked(temp2);
16529 save.takeUnchecked(temp3);
16530 if (iterObj.volatile_()) {
16531 save.addUnchecked(iterObj);
16534 masm.PushRegsInMask(save);
16535 emitPostWriteBarrier(iterObj);
16536 masm.PopRegsInMask(save);
16538 masm.bind(&skipBarrier);
16540 masm.bind(ool->rejoin());
16543 void CodeGenerator::visitValueToIterator(LValueToIterator* lir) {
16544 pushArg(ToValue(lir, LValueToIterator::ValueIndex));
16546 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleValue);
16547 callVM<Fn, ValueToIterator>(lir);
16550 void CodeGenerator::visitIteratorHasIndicesAndBranch(
16551 LIteratorHasIndicesAndBranch* lir) {
16552 Register iterator = ToRegister(lir->iterator());
16553 Register object = ToRegister(lir->object());
16554 Register temp = ToRegister(lir->temp());
16555 Register temp2 = ToRegister(lir->temp2());
16556 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
16557 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
16559 // Check that the iterator has indices available.
16560 Address nativeIterAddr(iterator,
16561 PropertyIteratorObject::offsetOfIteratorSlot());
16562 masm.loadPrivate(nativeIterAddr, temp);
16563 masm.branchNativeIteratorIndices(Assembler::NotEqual, temp, temp2,
16564 NativeIteratorIndices::Valid, ifFalse);
16566 // Guard that the first shape stored in the iterator matches the current
16567 // shape of the iterated object.
16568 Address firstShapeAddr(temp, NativeIterator::offsetOfFirstShape());
16569 masm.loadPtr(firstShapeAddr, temp);
16570 masm.branchTestObjShape(Assembler::NotEqual, object, temp, temp2, object,
16571 ifFalse);
16573 if (!isNextBlock(lir->ifTrue()->lir())) {
16574 masm.jump(ifTrue);
16578 void CodeGenerator::visitLoadSlotByIteratorIndex(
16579 LLoadSlotByIteratorIndex* lir) {
16580 Register object = ToRegister(lir->object());
16581 Register iterator = ToRegister(lir->iterator());
16582 Register temp = ToRegister(lir->temp0());
16583 Register temp2 = ToRegister(lir->temp1());
16584 ValueOperand result = ToOutValue(lir);
16586 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
16588 Label notDynamicSlot, notFixedSlot, done;
16589 masm.branch32(Assembler::NotEqual, temp2,
16590 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
16591 &notDynamicSlot);
16592 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
16593 masm.loadValue(BaseValueIndex(temp2, temp), result);
16594 masm.jump(&done);
16596 masm.bind(&notDynamicSlot);
16597 masm.branch32(Assembler::NotEqual, temp2,
16598 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
16599 // Fixed slot
16600 masm.loadValue(BaseValueIndex(object, temp, sizeof(NativeObject)), result);
16601 masm.jump(&done);
16602 masm.bind(&notFixedSlot);
16604 #ifdef DEBUG
16605 Label kindOkay;
16606 masm.branch32(Assembler::Equal, temp2,
16607 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
16608 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
16609 masm.bind(&kindOkay);
16610 #endif
16612 // Dense element
16613 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
16614 Label indexOkay;
16615 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
16616 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
16617 masm.assumeUnreachable("Dense element out of bounds");
16618 masm.bind(&indexOkay);
16620 masm.loadValue(BaseObjectElementIndex(temp2, temp), result);
16621 masm.bind(&done);
16624 void CodeGenerator::visitStoreSlotByIteratorIndex(
16625 LStoreSlotByIteratorIndex* lir) {
16626 Register object = ToRegister(lir->object());
16627 Register iterator = ToRegister(lir->iterator());
16628 ValueOperand value = ToValue(lir, LStoreSlotByIteratorIndex::ValueIndex);
16629 Register temp = ToRegister(lir->temp0());
16630 Register temp2 = ToRegister(lir->temp1());
16632 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
16634 Label notDynamicSlot, notFixedSlot, done, doStore;
16635 masm.branch32(Assembler::NotEqual, temp2,
16636 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
16637 &notDynamicSlot);
16638 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
16639 masm.computeEffectiveAddress(BaseValueIndex(temp2, temp), temp);
16640 masm.jump(&doStore);
16642 masm.bind(&notDynamicSlot);
16643 masm.branch32(Assembler::NotEqual, temp2,
16644 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
16645 // Fixed slot
16646 masm.computeEffectiveAddress(
16647 BaseValueIndex(object, temp, sizeof(NativeObject)), temp);
16648 masm.jump(&doStore);
16649 masm.bind(&notFixedSlot);
16651 #ifdef DEBUG
16652 Label kindOkay;
16653 masm.branch32(Assembler::Equal, temp2,
16654 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
16655 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
16656 masm.bind(&kindOkay);
16657 #endif
16659 // Dense element
16660 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
16661 Label indexOkay;
16662 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
16663 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
16664 masm.assumeUnreachable("Dense element out of bounds");
16665 masm.bind(&indexOkay);
16667 BaseObjectElementIndex elementAddress(temp2, temp);
16668 masm.computeEffectiveAddress(elementAddress, temp);
16670 masm.bind(&doStore);
16671 Address storeAddress(temp, 0);
16672 emitPreBarrier(storeAddress);
16673 masm.storeValue(value, storeAddress);
16675 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp2, &done);
16676 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp2, &done);
16678 saveVolatile(temp2);
16679 emitPostWriteBarrier(object);
16680 restoreVolatile(temp2);
16682 masm.bind(&done);
16685 void CodeGenerator::visitSetPropertyCache(LSetPropertyCache* ins) {
16686 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16687 Register objReg = ToRegister(ins->object());
16688 Register temp = ToRegister(ins->temp0());
16690 ConstantOrRegister id = toConstantOrRegister(ins, LSetPropertyCache::IdIndex,
16691 ins->mir()->idval()->type());
16692 ConstantOrRegister value = toConstantOrRegister(
16693 ins, LSetPropertyCache::ValueIndex, ins->mir()->value()->type());
16695 addSetPropertyCache(ins, liveRegs, objReg, temp, id, value,
16696 ins->mir()->strict());
16699 void CodeGenerator::visitThrow(LThrow* lir) {
16700 pushArg(ToValue(lir, LThrow::ValueIndex));
16702 using Fn = bool (*)(JSContext*, HandleValue);
16703 callVM<Fn, js::ThrowOperation>(lir);
16706 void CodeGenerator::visitThrowWithStack(LThrowWithStack* lir) {
16707 pushArg(ToValue(lir, LThrowWithStack::StackIndex));
16708 pushArg(ToValue(lir, LThrowWithStack::ValueIndex));
16710 using Fn = bool (*)(JSContext*, HandleValue, HandleValue);
16711 callVM<Fn, js::ThrowWithStackOperation>(lir);
16714 class OutOfLineTypeOfV : public OutOfLineCodeBase<CodeGenerator> {
16715 LTypeOfV* ins_;
16717 public:
16718 explicit OutOfLineTypeOfV(LTypeOfV* ins) : ins_(ins) {}
16720 void accept(CodeGenerator* codegen) override {
16721 codegen->visitOutOfLineTypeOfV(this);
16723 LTypeOfV* ins() const { return ins_; }
16726 void CodeGenerator::emitTypeOfJSType(JSValueType type, Register output) {
16727 switch (type) {
16728 case JSVAL_TYPE_OBJECT:
16729 masm.move32(Imm32(JSTYPE_OBJECT), output);
16730 break;
16731 case JSVAL_TYPE_DOUBLE:
16732 case JSVAL_TYPE_INT32:
16733 masm.move32(Imm32(JSTYPE_NUMBER), output);
16734 break;
16735 case JSVAL_TYPE_BOOLEAN:
16736 masm.move32(Imm32(JSTYPE_BOOLEAN), output);
16737 break;
16738 case JSVAL_TYPE_UNDEFINED:
16739 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
16740 break;
16741 case JSVAL_TYPE_NULL:
16742 masm.move32(Imm32(JSTYPE_OBJECT), output);
16743 break;
16744 case JSVAL_TYPE_STRING:
16745 masm.move32(Imm32(JSTYPE_STRING), output);
16746 break;
16747 case JSVAL_TYPE_SYMBOL:
16748 masm.move32(Imm32(JSTYPE_SYMBOL), output);
16749 break;
16750 case JSVAL_TYPE_BIGINT:
16751 masm.move32(Imm32(JSTYPE_BIGINT), output);
16752 break;
16753 default:
16754 MOZ_CRASH("Unsupported JSValueType");
16758 void CodeGenerator::emitTypeOfCheck(JSValueType type, Register tag,
16759 Register output, Label* done,
16760 Label* oolObject) {
16761 Label notMatch;
16762 switch (type) {
16763 case JSVAL_TYPE_OBJECT:
16764 // The input may be a callable object (result is "function") or
16765 // may emulate undefined (result is "undefined"). Use an OOL path.
16766 masm.branchTestObject(Assembler::Equal, tag, oolObject);
16767 return;
16768 case JSVAL_TYPE_DOUBLE:
16769 case JSVAL_TYPE_INT32:
16770 masm.branchTestNumber(Assembler::NotEqual, tag, &notMatch);
16771 break;
16772 default:
16773 masm.branchTestType(Assembler::NotEqual, tag, type, &notMatch);
16774 break;
16777 emitTypeOfJSType(type, output);
16778 masm.jump(done);
16779 masm.bind(&notMatch);
16782 void CodeGenerator::visitTypeOfV(LTypeOfV* lir) {
16783 const ValueOperand value = ToValue(lir, LTypeOfV::InputIndex);
16784 Register output = ToRegister(lir->output());
16785 Register tag = masm.extractTag(value, output);
16787 Label done;
16789 auto* ool = new (alloc()) OutOfLineTypeOfV(lir);
16790 addOutOfLineCode(ool, lir->mir());
16792 const std::initializer_list<JSValueType> defaultOrder = {
16793 JSVAL_TYPE_OBJECT, JSVAL_TYPE_DOUBLE, JSVAL_TYPE_UNDEFINED,
16794 JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN, JSVAL_TYPE_STRING,
16795 JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
16797 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
16799 // Generate checks for previously observed types first.
16800 // The TypeDataList is sorted by descending frequency.
16801 for (auto& observed : lir->mir()->observedTypes()) {
16802 JSValueType type = observed.type();
16804 // Unify number types.
16805 if (type == JSVAL_TYPE_INT32) {
16806 type = JSVAL_TYPE_DOUBLE;
16809 remaining -= type;
16811 emitTypeOfCheck(type, tag, output, &done, ool->entry());
16814 // Generate checks for remaining types.
16815 for (auto type : defaultOrder) {
16816 if (!remaining.contains(type)) {
16817 continue;
16819 remaining -= type;
16821 if (remaining.isEmpty() && type != JSVAL_TYPE_OBJECT) {
16822 // We can skip the check for the last remaining type, unless the type is
16823 // JSVAL_TYPE_OBJECT, which may have to go through the OOL path.
16824 #ifdef DEBUG
16825 emitTypeOfCheck(type, tag, output, &done, ool->entry());
16826 masm.assumeUnreachable("Unexpected Value type in visitTypeOfV");
16827 #else
16828 emitTypeOfJSType(type, output);
16829 #endif
16830 } else {
16831 emitTypeOfCheck(type, tag, output, &done, ool->entry());
16834 MOZ_ASSERT(remaining.isEmpty());
16836 masm.bind(&done);
16837 masm.bind(ool->rejoin());
16840 void CodeGenerator::emitTypeOfObject(Register obj, Register output,
16841 Label* done) {
16842 Label slowCheck, isObject, isCallable, isUndefined;
16843 masm.typeOfObject(obj, output, &slowCheck, &isObject, &isCallable,
16844 &isUndefined);
16846 masm.bind(&isCallable);
16847 masm.move32(Imm32(JSTYPE_FUNCTION), output);
16848 masm.jump(done);
16850 masm.bind(&isUndefined);
16851 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
16852 masm.jump(done);
16854 masm.bind(&isObject);
16855 masm.move32(Imm32(JSTYPE_OBJECT), output);
16856 masm.jump(done);
16858 masm.bind(&slowCheck);
16860 saveVolatile(output);
16861 using Fn = JSType (*)(JSObject*);
16862 masm.setupAlignedABICall();
16863 masm.passABIArg(obj);
16864 masm.callWithABI<Fn, js::TypeOfObject>();
16865 masm.storeCallInt32Result(output);
16866 restoreVolatile(output);
16869 void CodeGenerator::visitOutOfLineTypeOfV(OutOfLineTypeOfV* ool) {
16870 LTypeOfV* ins = ool->ins();
16872 ValueOperand input = ToValue(ins, LTypeOfV::InputIndex);
16873 Register temp = ToTempUnboxRegister(ins->temp0());
16874 Register output = ToRegister(ins->output());
16876 Register obj = masm.extractObject(input, temp);
16877 emitTypeOfObject(obj, output, ool->rejoin());
16878 masm.jump(ool->rejoin());
16881 void CodeGenerator::visitTypeOfO(LTypeOfO* lir) {
16882 Register obj = ToRegister(lir->object());
16883 Register output = ToRegister(lir->output());
16885 Label done;
16886 emitTypeOfObject(obj, output, &done);
16887 masm.bind(&done);
16890 void CodeGenerator::visitTypeOfName(LTypeOfName* lir) {
16891 Register input = ToRegister(lir->input());
16892 Register output = ToRegister(lir->output());
16894 #ifdef DEBUG
16895 Label ok;
16896 masm.branch32(Assembler::Below, input, Imm32(JSTYPE_LIMIT), &ok);
16897 masm.assumeUnreachable("bad JSType");
16898 masm.bind(&ok);
16899 #endif
16901 static_assert(JSTYPE_UNDEFINED == 0);
16903 masm.movePtr(ImmPtr(&gen->runtime->names().undefined), output);
16904 masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
16907 class OutOfLineTypeOfIsNonPrimitiveV : public OutOfLineCodeBase<CodeGenerator> {
16908 LTypeOfIsNonPrimitiveV* ins_;
16910 public:
16911 explicit OutOfLineTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* ins)
16912 : ins_(ins) {}
16914 void accept(CodeGenerator* codegen) override {
16915 codegen->visitOutOfLineTypeOfIsNonPrimitiveV(this);
16917 auto* ins() const { return ins_; }
16920 class OutOfLineTypeOfIsNonPrimitiveO : public OutOfLineCodeBase<CodeGenerator> {
16921 LTypeOfIsNonPrimitiveO* ins_;
16923 public:
16924 explicit OutOfLineTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* ins)
16925 : ins_(ins) {}
16927 void accept(CodeGenerator* codegen) override {
16928 codegen->visitOutOfLineTypeOfIsNonPrimitiveO(this);
16930 auto* ins() const { return ins_; }
16933 void CodeGenerator::emitTypeOfIsObjectOOL(MTypeOfIs* mir, Register obj,
16934 Register output) {
16935 saveVolatile(output);
16936 using Fn = JSType (*)(JSObject*);
16937 masm.setupAlignedABICall();
16938 masm.passABIArg(obj);
16939 masm.callWithABI<Fn, js::TypeOfObject>();
16940 masm.storeCallInt32Result(output);
16941 restoreVolatile(output);
16943 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
16944 masm.cmp32Set(cond, output, Imm32(mir->jstype()), output);
16947 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveV(
16948 OutOfLineTypeOfIsNonPrimitiveV* ool) {
16949 auto* ins = ool->ins();
16950 ValueOperand input = ToValue(ins, LTypeOfIsNonPrimitiveV::InputIndex);
16951 Register output = ToRegister(ins->output());
16952 Register temp = ToTempUnboxRegister(ins->temp0());
16954 Register obj = masm.extractObject(input, temp);
16956 emitTypeOfIsObjectOOL(ins->mir(), obj, output);
16958 masm.jump(ool->rejoin());
16961 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveO(
16962 OutOfLineTypeOfIsNonPrimitiveO* ool) {
16963 auto* ins = ool->ins();
16964 Register input = ToRegister(ins->input());
16965 Register output = ToRegister(ins->output());
16967 emitTypeOfIsObjectOOL(ins->mir(), input, output);
16969 masm.jump(ool->rejoin());
16972 void CodeGenerator::emitTypeOfIsObject(MTypeOfIs* mir, Register obj,
16973 Register output, Label* success,
16974 Label* fail, Label* slowCheck) {
16975 Label* isObject = fail;
16976 Label* isFunction = fail;
16977 Label* isUndefined = fail;
16979 switch (mir->jstype()) {
16980 case JSTYPE_UNDEFINED:
16981 isUndefined = success;
16982 break;
16984 case JSTYPE_OBJECT:
16985 isObject = success;
16986 break;
16988 case JSTYPE_FUNCTION:
16989 isFunction = success;
16990 break;
16992 case JSTYPE_STRING:
16993 case JSTYPE_NUMBER:
16994 case JSTYPE_BOOLEAN:
16995 case JSTYPE_SYMBOL:
16996 case JSTYPE_BIGINT:
16997 #ifdef ENABLE_RECORD_TUPLE
16998 case JSTYPE_RECORD:
16999 case JSTYPE_TUPLE:
17000 #endif
17001 case JSTYPE_LIMIT:
17002 MOZ_CRASH("Primitive type");
17005 masm.typeOfObject(obj, output, slowCheck, isObject, isFunction, isUndefined);
17007 auto op = mir->jsop();
17009 Label done;
17010 masm.bind(fail);
17011 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
17012 masm.jump(&done);
17013 masm.bind(success);
17014 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
17015 masm.bind(&done);
17018 void CodeGenerator::visitTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* lir) {
17019 ValueOperand input = ToValue(lir, LTypeOfIsNonPrimitiveV::InputIndex);
17020 Register output = ToRegister(lir->output());
17021 Register temp = ToTempUnboxRegister(lir->temp0());
17023 auto* mir = lir->mir();
17025 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveV(lir);
17026 addOutOfLineCode(ool, mir);
17028 Label success, fail;
17030 switch (mir->jstype()) {
17031 case JSTYPE_UNDEFINED: {
17032 ScratchTagScope tag(masm, input);
17033 masm.splitTagForTest(input, tag);
17035 masm.branchTestUndefined(Assembler::Equal, tag, &success);
17036 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
17037 break;
17040 case JSTYPE_OBJECT: {
17041 ScratchTagScope tag(masm, input);
17042 masm.splitTagForTest(input, tag);
17044 masm.branchTestNull(Assembler::Equal, tag, &success);
17045 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
17046 break;
17049 case JSTYPE_FUNCTION: {
17050 masm.branchTestObject(Assembler::NotEqual, input, &fail);
17051 break;
17054 case JSTYPE_STRING:
17055 case JSTYPE_NUMBER:
17056 case JSTYPE_BOOLEAN:
17057 case JSTYPE_SYMBOL:
17058 case JSTYPE_BIGINT:
17059 #ifdef ENABLE_RECORD_TUPLE
17060 case JSTYPE_RECORD:
17061 case JSTYPE_TUPLE:
17062 #endif
17063 case JSTYPE_LIMIT:
17064 MOZ_CRASH("Primitive type");
17067 Register obj = masm.extractObject(input, temp);
17069 emitTypeOfIsObject(mir, obj, output, &success, &fail, ool->entry());
17071 masm.bind(ool->rejoin());
17074 void CodeGenerator::visitTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* lir) {
17075 Register input = ToRegister(lir->input());
17076 Register output = ToRegister(lir->output());
17078 auto* mir = lir->mir();
17080 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveO(lir);
17081 addOutOfLineCode(ool, mir);
17083 Label success, fail;
17084 emitTypeOfIsObject(mir, input, output, &success, &fail, ool->entry());
17086 masm.bind(ool->rejoin());
17089 void CodeGenerator::visitTypeOfIsPrimitive(LTypeOfIsPrimitive* lir) {
17090 ValueOperand input = ToValue(lir, LTypeOfIsPrimitive::InputIndex);
17091 Register output = ToRegister(lir->output());
17093 auto* mir = lir->mir();
17094 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
17096 switch (mir->jstype()) {
17097 case JSTYPE_STRING:
17098 masm.testStringSet(cond, input, output);
17099 break;
17100 case JSTYPE_NUMBER:
17101 masm.testNumberSet(cond, input, output);
17102 break;
17103 case JSTYPE_BOOLEAN:
17104 masm.testBooleanSet(cond, input, output);
17105 break;
17106 case JSTYPE_SYMBOL:
17107 masm.testSymbolSet(cond, input, output);
17108 break;
17109 case JSTYPE_BIGINT:
17110 masm.testBigIntSet(cond, input, output);
17111 break;
17113 case JSTYPE_UNDEFINED:
17114 case JSTYPE_OBJECT:
17115 case JSTYPE_FUNCTION:
17116 #ifdef ENABLE_RECORD_TUPLE
17117 case JSTYPE_RECORD:
17118 case JSTYPE_TUPLE:
17119 #endif
17120 case JSTYPE_LIMIT:
17121 MOZ_CRASH("Non-primitive type");
17125 void CodeGenerator::visitToAsyncIter(LToAsyncIter* lir) {
17126 pushArg(ToValue(lir, LToAsyncIter::NextMethodIndex));
17127 pushArg(ToRegister(lir->iterator()));
17129 using Fn = JSObject* (*)(JSContext*, HandleObject, HandleValue);
17130 callVM<Fn, js::CreateAsyncFromSyncIterator>(lir);
17133 void CodeGenerator::visitToPropertyKeyCache(LToPropertyKeyCache* lir) {
17134 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
17135 ValueOperand input = ToValue(lir, LToPropertyKeyCache::InputIndex);
17136 ValueOperand output = ToOutValue(lir);
17138 IonToPropertyKeyIC ic(liveRegs, input, output);
17139 addIC(lir, allocateIC(ic));
17142 void CodeGenerator::visitLoadElementV(LLoadElementV* load) {
17143 Register elements = ToRegister(load->elements());
17144 const ValueOperand out = ToOutValue(load);
17146 if (load->index()->isConstant()) {
17147 NativeObject::elementsSizeMustNotOverflow();
17148 int32_t offset = ToInt32(load->index()) * sizeof(Value);
17149 masm.loadValue(Address(elements, offset), out);
17150 } else {
17151 masm.loadValue(BaseObjectElementIndex(elements, ToRegister(load->index())),
17152 out);
17155 Label testMagic;
17156 masm.branchTestMagic(Assembler::Equal, out, &testMagic);
17157 bailoutFrom(&testMagic, load->snapshot());
17160 void CodeGenerator::visitLoadElementHole(LLoadElementHole* lir) {
17161 Register elements = ToRegister(lir->elements());
17162 Register index = ToRegister(lir->index());
17163 Register initLength = ToRegister(lir->initLength());
17164 const ValueOperand out = ToOutValue(lir);
17166 const MLoadElementHole* mir = lir->mir();
17168 // If the index is out of bounds, load |undefined|. Otherwise, load the
17169 // value.
17170 Label outOfBounds, done;
17171 masm.spectreBoundsCheck32(index, initLength, out.scratchReg(), &outOfBounds);
17173 masm.loadValue(BaseObjectElementIndex(elements, index), out);
17175 // If the value wasn't a hole, we're done. Otherwise, we'll load undefined.
17176 masm.branchTestMagic(Assembler::NotEqual, out, &done);
17178 if (mir->needsNegativeIntCheck()) {
17179 Label loadUndefined;
17180 masm.jump(&loadUndefined);
17182 masm.bind(&outOfBounds);
17184 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
17186 masm.bind(&loadUndefined);
17187 } else {
17188 masm.bind(&outOfBounds);
17190 masm.moveValue(UndefinedValue(), out);
17192 masm.bind(&done);
17195 void CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir) {
17196 Register elements = ToRegister(lir->elements());
17197 Register temp = ToTempRegisterOrInvalid(lir->temp0());
17198 AnyRegister out = ToAnyRegister(lir->output());
17200 const MLoadUnboxedScalar* mir = lir->mir();
17202 Scalar::Type storageType = mir->storageType();
17204 Label fail;
17205 if (lir->index()->isConstant()) {
17206 Address source =
17207 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
17208 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
17209 } else {
17210 BaseIndex source(elements, ToRegister(lir->index()),
17211 ScaleFromScalarType(storageType), mir->offsetAdjustment());
17212 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
17215 if (fail.used()) {
17216 bailoutFrom(&fail, lir->snapshot());
17220 void CodeGenerator::visitLoadUnboxedBigInt(LLoadUnboxedBigInt* lir) {
17221 Register elements = ToRegister(lir->elements());
17222 Register temp = ToRegister(lir->temp());
17223 Register64 temp64 = ToRegister64(lir->temp64());
17224 Register out = ToRegister(lir->output());
17226 const MLoadUnboxedScalar* mir = lir->mir();
17228 Scalar::Type storageType = mir->storageType();
17230 if (lir->index()->isConstant()) {
17231 Address source =
17232 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
17233 masm.load64(source, temp64);
17234 } else {
17235 BaseIndex source(elements, ToRegister(lir->index()),
17236 ScaleFromScalarType(storageType), mir->offsetAdjustment());
17237 masm.load64(source, temp64);
17240 emitCreateBigInt(lir, storageType, temp64, out, temp);
17243 void CodeGenerator::visitLoadDataViewElement(LLoadDataViewElement* lir) {
17244 Register elements = ToRegister(lir->elements());
17245 const LAllocation* littleEndian = lir->littleEndian();
17246 Register temp = ToTempRegisterOrInvalid(lir->temp());
17247 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
17248 AnyRegister out = ToAnyRegister(lir->output());
17250 const MLoadDataViewElement* mir = lir->mir();
17251 Scalar::Type storageType = mir->storageType();
17253 BaseIndex source(elements, ToRegister(lir->index()), TimesOne);
17255 bool noSwap = littleEndian->isConstant() &&
17256 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
17258 // Directly load if no byte swap is needed and the platform supports unaligned
17259 // accesses for the access. (Such support is assumed for integer types.)
17260 if (noSwap && (!Scalar::isFloatingType(storageType) ||
17261 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
17262 if (!Scalar::isBigIntType(storageType)) {
17263 Label fail;
17264 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
17266 if (fail.used()) {
17267 bailoutFrom(&fail, lir->snapshot());
17269 } else {
17270 masm.load64(source, temp64);
17272 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
17274 return;
17277 // Load the value into a gpr register.
17278 switch (storageType) {
17279 case Scalar::Int16:
17280 masm.load16UnalignedSignExtend(source, out.gpr());
17281 break;
17282 case Scalar::Uint16:
17283 masm.load16UnalignedZeroExtend(source, out.gpr());
17284 break;
17285 case Scalar::Int32:
17286 masm.load32Unaligned(source, out.gpr());
17287 break;
17288 case Scalar::Uint32:
17289 masm.load32Unaligned(source, out.isFloat() ? temp : out.gpr());
17290 break;
17291 case Scalar::Float32:
17292 masm.load32Unaligned(source, temp);
17293 break;
17294 case Scalar::Float64:
17295 case Scalar::BigInt64:
17296 case Scalar::BigUint64:
17297 masm.load64Unaligned(source, temp64);
17298 break;
17299 case Scalar::Int8:
17300 case Scalar::Uint8:
17301 case Scalar::Uint8Clamped:
17302 default:
17303 MOZ_CRASH("Invalid typed array type");
17306 if (!noSwap) {
17307 // Swap the bytes in the loaded value.
17308 Label skip;
17309 if (!littleEndian->isConstant()) {
17310 masm.branch32(
17311 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
17312 ToRegister(littleEndian), Imm32(0), &skip);
17315 switch (storageType) {
17316 case Scalar::Int16:
17317 masm.byteSwap16SignExtend(out.gpr());
17318 break;
17319 case Scalar::Uint16:
17320 masm.byteSwap16ZeroExtend(out.gpr());
17321 break;
17322 case Scalar::Int32:
17323 masm.byteSwap32(out.gpr());
17324 break;
17325 case Scalar::Uint32:
17326 masm.byteSwap32(out.isFloat() ? temp : out.gpr());
17327 break;
17328 case Scalar::Float32:
17329 masm.byteSwap32(temp);
17330 break;
17331 case Scalar::Float64:
17332 case Scalar::BigInt64:
17333 case Scalar::BigUint64:
17334 masm.byteSwap64(temp64);
17335 break;
17336 case Scalar::Int8:
17337 case Scalar::Uint8:
17338 case Scalar::Uint8Clamped:
17339 default:
17340 MOZ_CRASH("Invalid typed array type");
17343 if (skip.used()) {
17344 masm.bind(&skip);
17348 // Move the value into the output register.
17349 switch (storageType) {
17350 case Scalar::Int16:
17351 case Scalar::Uint16:
17352 case Scalar::Int32:
17353 break;
17354 case Scalar::Uint32:
17355 if (out.isFloat()) {
17356 masm.convertUInt32ToDouble(temp, out.fpu());
17357 } else {
17358 // Bail out if the value doesn't fit into a signed int32 value. This
17359 // is what allows MLoadDataViewElement to have a type() of
17360 // MIRType::Int32 for UInt32 array loads.
17361 bailoutTest32(Assembler::Signed, out.gpr(), out.gpr(), lir->snapshot());
17363 break;
17364 case Scalar::Float32:
17365 masm.moveGPRToFloat32(temp, out.fpu());
17366 masm.canonicalizeFloat(out.fpu());
17367 break;
17368 case Scalar::Float64:
17369 masm.moveGPR64ToDouble(temp64, out.fpu());
17370 masm.canonicalizeDouble(out.fpu());
17371 break;
17372 case Scalar::BigInt64:
17373 case Scalar::BigUint64:
17374 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
17375 break;
17376 case Scalar::Int8:
17377 case Scalar::Uint8:
17378 case Scalar::Uint8Clamped:
17379 default:
17380 MOZ_CRASH("Invalid typed array type");
17384 void CodeGenerator::visitLoadTypedArrayElementHole(
17385 LLoadTypedArrayElementHole* lir) {
17386 Register elements = ToRegister(lir->elements());
17387 Register index = ToRegister(lir->index());
17388 Register length = ToRegister(lir->length());
17389 const ValueOperand out = ToOutValue(lir);
17391 Register scratch = out.scratchReg();
17393 // Load undefined if index >= length.
17394 Label outOfBounds, done;
17395 masm.spectreBoundsCheckPtr(index, length, scratch, &outOfBounds);
17397 Scalar::Type arrayType = lir->mir()->arrayType();
17398 Label fail;
17399 BaseIndex source(elements, index, ScaleFromScalarType(arrayType));
17400 MacroAssembler::Uint32Mode uint32Mode =
17401 lir->mir()->forceDouble() ? MacroAssembler::Uint32Mode::ForceDouble
17402 : MacroAssembler::Uint32Mode::FailOnDouble;
17403 masm.loadFromTypedArray(arrayType, source, out, uint32Mode, out.scratchReg(),
17404 &fail);
17405 masm.jump(&done);
17407 masm.bind(&outOfBounds);
17408 masm.moveValue(UndefinedValue(), out);
17410 if (fail.used()) {
17411 bailoutFrom(&fail, lir->snapshot());
17414 masm.bind(&done);
17417 void CodeGenerator::visitLoadTypedArrayElementHoleBigInt(
17418 LLoadTypedArrayElementHoleBigInt* lir) {
17419 Register elements = ToRegister(lir->elements());
17420 Register index = ToRegister(lir->index());
17421 Register length = ToRegister(lir->length());
17422 const ValueOperand out = ToOutValue(lir);
17424 Register temp = ToRegister(lir->temp());
17426 // On x86 there are not enough registers. In that case reuse the output
17427 // registers as temporaries.
17428 #ifdef JS_CODEGEN_X86
17429 MOZ_ASSERT(lir->temp64().isBogusTemp());
17430 Register64 temp64 = out.toRegister64();
17431 #else
17432 Register64 temp64 = ToRegister64(lir->temp64());
17433 #endif
17435 // Load undefined if index >= length.
17436 Label outOfBounds, done;
17437 masm.spectreBoundsCheckPtr(index, length, temp, &outOfBounds);
17439 Scalar::Type arrayType = lir->mir()->arrayType();
17440 BaseIndex source(elements, index, ScaleFromScalarType(arrayType));
17441 masm.load64(source, temp64);
17443 #ifdef JS_CODEGEN_X86
17444 Register bigInt = temp;
17445 Register maybeTemp = InvalidReg;
17446 #else
17447 Register bigInt = out.scratchReg();
17448 Register maybeTemp = temp;
17449 #endif
17450 emitCreateBigInt(lir, arrayType, temp64, bigInt, maybeTemp);
17452 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, out);
17453 masm.jump(&done);
17455 masm.bind(&outOfBounds);
17456 masm.moveValue(UndefinedValue(), out);
17458 masm.bind(&done);
17461 template <SwitchTableType tableType>
17462 class OutOfLineSwitch : public OutOfLineCodeBase<CodeGenerator> {
17463 using LabelsVector = Vector<Label, 0, JitAllocPolicy>;
17464 using CodeLabelsVector = Vector<CodeLabel, 0, JitAllocPolicy>;
17465 LabelsVector labels_;
17466 CodeLabelsVector codeLabels_;
17467 CodeLabel start_;
17468 bool isOutOfLine_;
17470 void accept(CodeGenerator* codegen) override {
17471 codegen->visitOutOfLineSwitch(this);
17474 public:
17475 explicit OutOfLineSwitch(TempAllocator& alloc)
17476 : labels_(alloc), codeLabels_(alloc), isOutOfLine_(false) {}
17478 CodeLabel* start() { return &start_; }
17480 CodeLabelsVector& codeLabels() { return codeLabels_; }
17481 LabelsVector& labels() { return labels_; }
17483 void jumpToCodeEntries(MacroAssembler& masm, Register index, Register temp) {
17484 Register base;
17485 if (tableType == SwitchTableType::Inline) {
17486 #if defined(JS_CODEGEN_ARM)
17487 base = ::js::jit::pc;
17488 #else
17489 MOZ_CRASH("NYI: SwitchTableType::Inline");
17490 #endif
17491 } else {
17492 #if defined(JS_CODEGEN_ARM)
17493 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
17494 #else
17495 masm.mov(start(), temp);
17496 base = temp;
17497 #endif
17499 BaseIndex jumpTarget(base, index, ScalePointer);
17500 masm.branchToComputedAddress(jumpTarget);
17503 // Register an entry in the switch table.
17504 void addTableEntry(MacroAssembler& masm) {
17505 if ((!isOutOfLine_ && tableType == SwitchTableType::Inline) ||
17506 (isOutOfLine_ && tableType == SwitchTableType::OutOfLine)) {
17507 CodeLabel cl;
17508 masm.writeCodePointer(&cl);
17509 masm.propagateOOM(codeLabels_.append(std::move(cl)));
17512 // Register the code, to which the table will jump to.
17513 void addCodeEntry(MacroAssembler& masm) {
17514 Label entry;
17515 masm.bind(&entry);
17516 masm.propagateOOM(labels_.append(std::move(entry)));
17519 void setOutOfLine() { isOutOfLine_ = true; }
17522 template <SwitchTableType tableType>
17523 void CodeGenerator::visitOutOfLineSwitch(
17524 OutOfLineSwitch<tableType>* jumpTable) {
17525 jumpTable->setOutOfLine();
17526 auto& labels = jumpTable->labels();
17528 if (tableType == SwitchTableType::OutOfLine) {
17529 #if defined(JS_CODEGEN_ARM)
17530 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
17531 #elif defined(JS_CODEGEN_NONE)
17532 MOZ_CRASH();
17533 #else
17535 # if defined(JS_CODEGEN_ARM64)
17536 AutoForbidPoolsAndNops afp(
17537 &masm,
17538 (labels.length() + 1) * (sizeof(void*) / vixl::kInstructionSize));
17539 # endif
17541 masm.haltingAlign(sizeof(void*));
17543 // Bind the address of the jump table and reserve the space for code
17544 // pointers to jump in the newly generated code.
17545 masm.bind(jumpTable->start());
17546 masm.addCodeLabel(*jumpTable->start());
17547 for (size_t i = 0, e = labels.length(); i < e; i++) {
17548 jumpTable->addTableEntry(masm);
17550 #endif
17553 // Register all reserved pointers of the jump table to target labels. The
17554 // entries of the jump table need to be absolute addresses and thus must be
17555 // patched after codegen is finished.
17556 auto& codeLabels = jumpTable->codeLabels();
17557 for (size_t i = 0, e = codeLabels.length(); i < e; i++) {
17558 auto& cl = codeLabels[i];
17559 cl.target()->bind(labels[i].offset());
17560 masm.addCodeLabel(cl);
17564 template void CodeGenerator::visitOutOfLineSwitch(
17565 OutOfLineSwitch<SwitchTableType::Inline>* jumpTable);
17566 template void CodeGenerator::visitOutOfLineSwitch(
17567 OutOfLineSwitch<SwitchTableType::OutOfLine>* jumpTable);
17569 template <typename T>
17570 static inline void StoreToTypedArray(MacroAssembler& masm,
17571 Scalar::Type writeType,
17572 const LAllocation* value, const T& dest) {
17573 if (writeType == Scalar::Float32 || writeType == Scalar::Float64) {
17574 masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest);
17575 } else {
17576 if (value->isConstant()) {
17577 masm.storeToTypedIntArray(writeType, Imm32(ToInt32(value)), dest);
17578 } else {
17579 masm.storeToTypedIntArray(writeType, ToRegister(value), dest);
17584 void CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir) {
17585 Register elements = ToRegister(lir->elements());
17586 const LAllocation* value = lir->value();
17588 const MStoreUnboxedScalar* mir = lir->mir();
17590 Scalar::Type writeType = mir->writeType();
17592 if (lir->index()->isConstant()) {
17593 Address dest = ToAddress(elements, lir->index(), writeType);
17594 StoreToTypedArray(masm, writeType, value, dest);
17595 } else {
17596 BaseIndex dest(elements, ToRegister(lir->index()),
17597 ScaleFromScalarType(writeType));
17598 StoreToTypedArray(masm, writeType, value, dest);
17602 void CodeGenerator::visitStoreUnboxedBigInt(LStoreUnboxedBigInt* lir) {
17603 Register elements = ToRegister(lir->elements());
17604 Register value = ToRegister(lir->value());
17605 Register64 temp = ToRegister64(lir->temp());
17607 Scalar::Type writeType = lir->mir()->writeType();
17609 masm.loadBigInt64(value, temp);
17611 if (lir->index()->isConstant()) {
17612 Address dest = ToAddress(elements, lir->index(), writeType);
17613 masm.storeToTypedBigIntArray(writeType, temp, dest);
17614 } else {
17615 BaseIndex dest(elements, ToRegister(lir->index()),
17616 ScaleFromScalarType(writeType));
17617 masm.storeToTypedBigIntArray(writeType, temp, dest);
17621 void CodeGenerator::visitStoreDataViewElement(LStoreDataViewElement* lir) {
17622 Register elements = ToRegister(lir->elements());
17623 const LAllocation* value = lir->value();
17624 const LAllocation* littleEndian = lir->littleEndian();
17625 Register temp = ToTempRegisterOrInvalid(lir->temp());
17626 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
17628 const MStoreDataViewElement* mir = lir->mir();
17629 Scalar::Type writeType = mir->writeType();
17631 BaseIndex dest(elements, ToRegister(lir->index()), TimesOne);
17633 bool noSwap = littleEndian->isConstant() &&
17634 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
17636 // Directly store if no byte swap is needed and the platform supports
17637 // unaligned accesses for the access. (Such support is assumed for integer
17638 // types.)
17639 if (noSwap && (!Scalar::isFloatingType(writeType) ||
17640 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
17641 if (!Scalar::isBigIntType(writeType)) {
17642 StoreToTypedArray(masm, writeType, value, dest);
17643 } else {
17644 masm.loadBigInt64(ToRegister(value), temp64);
17645 masm.storeToTypedBigIntArray(writeType, temp64, dest);
17647 return;
17650 // Load the value into a gpr register.
17651 switch (writeType) {
17652 case Scalar::Int16:
17653 case Scalar::Uint16:
17654 case Scalar::Int32:
17655 case Scalar::Uint32:
17656 if (value->isConstant()) {
17657 masm.move32(Imm32(ToInt32(value)), temp);
17658 } else {
17659 masm.move32(ToRegister(value), temp);
17661 break;
17662 case Scalar::Float32: {
17663 FloatRegister fvalue = ToFloatRegister(value);
17664 masm.canonicalizeFloatIfDeterministic(fvalue);
17665 masm.moveFloat32ToGPR(fvalue, temp);
17666 break;
17668 case Scalar::Float64: {
17669 FloatRegister fvalue = ToFloatRegister(value);
17670 masm.canonicalizeDoubleIfDeterministic(fvalue);
17671 masm.moveDoubleToGPR64(fvalue, temp64);
17672 break;
17674 case Scalar::BigInt64:
17675 case Scalar::BigUint64:
17676 masm.loadBigInt64(ToRegister(value), temp64);
17677 break;
17678 case Scalar::Int8:
17679 case Scalar::Uint8:
17680 case Scalar::Uint8Clamped:
17681 default:
17682 MOZ_CRASH("Invalid typed array type");
17685 if (!noSwap) {
17686 // Swap the bytes in the loaded value.
17687 Label skip;
17688 if (!littleEndian->isConstant()) {
17689 masm.branch32(
17690 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
17691 ToRegister(littleEndian), Imm32(0), &skip);
17694 switch (writeType) {
17695 case Scalar::Int16:
17696 masm.byteSwap16SignExtend(temp);
17697 break;
17698 case Scalar::Uint16:
17699 masm.byteSwap16ZeroExtend(temp);
17700 break;
17701 case Scalar::Int32:
17702 case Scalar::Uint32:
17703 case Scalar::Float32:
17704 masm.byteSwap32(temp);
17705 break;
17706 case Scalar::Float64:
17707 case Scalar::BigInt64:
17708 case Scalar::BigUint64:
17709 masm.byteSwap64(temp64);
17710 break;
17711 case Scalar::Int8:
17712 case Scalar::Uint8:
17713 case Scalar::Uint8Clamped:
17714 default:
17715 MOZ_CRASH("Invalid typed array type");
17718 if (skip.used()) {
17719 masm.bind(&skip);
17723 // Store the value into the destination.
17724 switch (writeType) {
17725 case Scalar::Int16:
17726 case Scalar::Uint16:
17727 masm.store16Unaligned(temp, dest);
17728 break;
17729 case Scalar::Int32:
17730 case Scalar::Uint32:
17731 case Scalar::Float32:
17732 masm.store32Unaligned(temp, dest);
17733 break;
17734 case Scalar::Float64:
17735 case Scalar::BigInt64:
17736 case Scalar::BigUint64:
17737 masm.store64Unaligned(temp64, dest);
17738 break;
17739 case Scalar::Int8:
17740 case Scalar::Uint8:
17741 case Scalar::Uint8Clamped:
17742 default:
17743 MOZ_CRASH("Invalid typed array type");
17747 void CodeGenerator::visitStoreTypedArrayElementHole(
17748 LStoreTypedArrayElementHole* lir) {
17749 Register elements = ToRegister(lir->elements());
17750 const LAllocation* value = lir->value();
17752 Scalar::Type arrayType = lir->mir()->arrayType();
17754 Register index = ToRegister(lir->index());
17755 const LAllocation* length = lir->length();
17756 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
17758 Label skip;
17759 if (length->isRegister()) {
17760 masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
17761 } else {
17762 masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
17765 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
17766 StoreToTypedArray(masm, arrayType, value, dest);
17768 masm.bind(&skip);
17771 void CodeGenerator::visitStoreTypedArrayElementHoleBigInt(
17772 LStoreTypedArrayElementHoleBigInt* lir) {
17773 Register elements = ToRegister(lir->elements());
17774 Register value = ToRegister(lir->value());
17775 Register64 temp = ToRegister64(lir->temp());
17777 Scalar::Type arrayType = lir->mir()->arrayType();
17779 Register index = ToRegister(lir->index());
17780 const LAllocation* length = lir->length();
17781 Register spectreTemp = temp.scratchReg();
17783 Label skip;
17784 if (length->isRegister()) {
17785 masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
17786 } else {
17787 masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
17790 masm.loadBigInt64(value, temp);
17792 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
17793 masm.storeToTypedBigIntArray(arrayType, temp, dest);
17795 masm.bind(&skip);
17798 void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
17799 masm.memoryBarrier(ins->type());
17802 void CodeGenerator::visitAtomicIsLockFree(LAtomicIsLockFree* lir) {
17803 Register value = ToRegister(lir->value());
17804 Register output = ToRegister(lir->output());
17806 masm.atomicIsLockFreeJS(value, output);
17809 void CodeGenerator::visitClampIToUint8(LClampIToUint8* lir) {
17810 Register output = ToRegister(lir->output());
17811 MOZ_ASSERT(output == ToRegister(lir->input()));
17812 masm.clampIntToUint8(output);
17815 void CodeGenerator::visitClampDToUint8(LClampDToUint8* lir) {
17816 FloatRegister input = ToFloatRegister(lir->input());
17817 Register output = ToRegister(lir->output());
17818 masm.clampDoubleToUint8(input, output);
17821 void CodeGenerator::visitClampVToUint8(LClampVToUint8* lir) {
17822 ValueOperand operand = ToValue(lir, LClampVToUint8::InputIndex);
17823 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
17824 Register output = ToRegister(lir->output());
17826 using Fn = bool (*)(JSContext*, JSString*, double*);
17827 OutOfLineCode* oolString = oolCallVM<Fn, StringToNumber>(
17828 lir, ArgList(output), StoreFloatRegisterTo(tempFloat));
17829 Label* stringEntry = oolString->entry();
17830 Label* stringRejoin = oolString->rejoin();
17832 Label fails;
17833 masm.clampValueToUint8(operand, stringEntry, stringRejoin, output, tempFloat,
17834 output, &fails);
17836 bailoutFrom(&fails, lir->snapshot());
17839 void CodeGenerator::visitInCache(LInCache* ins) {
17840 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
17842 ConstantOrRegister key =
17843 toConstantOrRegister(ins, LInCache::LhsIndex, ins->mir()->key()->type());
17844 Register object = ToRegister(ins->rhs());
17845 Register output = ToRegister(ins->output());
17846 Register temp = ToRegister(ins->temp0());
17848 IonInIC cache(liveRegs, key, object, output, temp);
17849 addIC(ins, allocateIC(cache));
17852 void CodeGenerator::visitInArray(LInArray* lir) {
17853 const MInArray* mir = lir->mir();
17854 Register elements = ToRegister(lir->elements());
17855 Register initLength = ToRegister(lir->initLength());
17856 Register output = ToRegister(lir->output());
17858 Label falseBranch, done, trueBranch;
17860 if (lir->index()->isConstant()) {
17861 int32_t index = ToInt32(lir->index());
17863 if (index < 0) {
17864 MOZ_ASSERT(mir->needsNegativeIntCheck());
17865 bailout(lir->snapshot());
17866 return;
17869 masm.branch32(Assembler::BelowOrEqual, initLength, Imm32(index),
17870 &falseBranch);
17872 NativeObject::elementsSizeMustNotOverflow();
17873 Address address = Address(elements, index * sizeof(Value));
17874 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
17875 } else {
17876 Register index = ToRegister(lir->index());
17878 Label negativeIntCheck;
17879 Label* failedInitLength = &falseBranch;
17880 if (mir->needsNegativeIntCheck()) {
17881 failedInitLength = &negativeIntCheck;
17884 masm.branch32(Assembler::BelowOrEqual, initLength, index, failedInitLength);
17886 BaseObjectElementIndex address(elements, index);
17887 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
17889 if (mir->needsNegativeIntCheck()) {
17890 masm.jump(&trueBranch);
17891 masm.bind(&negativeIntCheck);
17893 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
17895 masm.jump(&falseBranch);
17899 masm.bind(&trueBranch);
17900 masm.move32(Imm32(1), output);
17901 masm.jump(&done);
17903 masm.bind(&falseBranch);
17904 masm.move32(Imm32(0), output);
17905 masm.bind(&done);
17908 void CodeGenerator::visitGuardElementNotHole(LGuardElementNotHole* lir) {
17909 Register elements = ToRegister(lir->elements());
17910 const LAllocation* index = lir->index();
17912 Label testMagic;
17913 if (index->isConstant()) {
17914 Address address(elements, ToInt32(index) * sizeof(js::Value));
17915 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
17916 } else {
17917 BaseObjectElementIndex address(elements, ToRegister(index));
17918 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
17920 bailoutFrom(&testMagic, lir->snapshot());
17923 void CodeGenerator::visitInstanceOfO(LInstanceOfO* ins) {
17924 Register protoReg = ToRegister(ins->rhs());
17925 emitInstanceOf(ins, protoReg);
17928 void CodeGenerator::visitInstanceOfV(LInstanceOfV* ins) {
17929 Register protoReg = ToRegister(ins->rhs());
17930 emitInstanceOf(ins, protoReg);
17933 void CodeGenerator::emitInstanceOf(LInstruction* ins, Register protoReg) {
17934 // This path implements fun_hasInstance when the function's prototype is
17935 // known to be the object in protoReg
17937 Label done;
17938 Register output = ToRegister(ins->getDef(0));
17940 // If the lhs is a primitive, the result is false.
17941 Register objReg;
17942 if (ins->isInstanceOfV()) {
17943 Label isObject;
17944 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
17945 masm.branchTestObject(Assembler::Equal, lhsValue, &isObject);
17946 masm.mov(ImmWord(0), output);
17947 masm.jump(&done);
17948 masm.bind(&isObject);
17949 objReg = masm.extractObject(lhsValue, output);
17950 } else {
17951 objReg = ToRegister(ins->toInstanceOfO()->lhs());
17954 // Crawl the lhs's prototype chain in a loop to search for prototypeObject.
17955 // This follows the main loop of js::IsPrototypeOf, though additionally breaks
17956 // out of the loop on Proxy::LazyProto.
17958 // Load the lhs's prototype.
17959 masm.loadObjProto(objReg, output);
17961 Label testLazy;
17963 Label loopPrototypeChain;
17964 masm.bind(&loopPrototypeChain);
17966 // Test for the target prototype object.
17967 Label notPrototypeObject;
17968 masm.branchPtr(Assembler::NotEqual, output, protoReg, &notPrototypeObject);
17969 masm.mov(ImmWord(1), output);
17970 masm.jump(&done);
17971 masm.bind(&notPrototypeObject);
17973 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
17975 // Test for nullptr or Proxy::LazyProto
17976 masm.branchPtr(Assembler::BelowOrEqual, output, ImmWord(1), &testLazy);
17978 // Load the current object's prototype.
17979 masm.loadObjProto(output, output);
17981 masm.jump(&loopPrototypeChain);
17984 // Make a VM call if an object with a lazy proto was found on the prototype
17985 // chain. This currently occurs only for cross compartment wrappers, which
17986 // we do not expect to be compared with non-wrapper functions from this
17987 // compartment. Otherwise, we stopped on a nullptr prototype and the output
17988 // register is already correct.
17990 using Fn = bool (*)(JSContext*, HandleObject, JSObject*, bool*);
17991 auto* ool = oolCallVM<Fn, IsPrototypeOf>(ins, ArgList(protoReg, objReg),
17992 StoreRegisterTo(output));
17994 // Regenerate the original lhs object for the VM call.
17995 Label regenerate, *lazyEntry;
17996 if (objReg != output) {
17997 lazyEntry = ool->entry();
17998 } else {
17999 masm.bind(&regenerate);
18000 lazyEntry = &regenerate;
18001 if (ins->isInstanceOfV()) {
18002 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
18003 objReg = masm.extractObject(lhsValue, output);
18004 } else {
18005 objReg = ToRegister(ins->toInstanceOfO()->lhs());
18007 MOZ_ASSERT(objReg == output);
18008 masm.jump(ool->entry());
18011 masm.bind(&testLazy);
18012 masm.branchPtr(Assembler::Equal, output, ImmWord(1), lazyEntry);
18014 masm.bind(&done);
18015 masm.bind(ool->rejoin());
18018 void CodeGenerator::visitInstanceOfCache(LInstanceOfCache* ins) {
18019 // The Lowering ensures that RHS is an object, and that LHS is a value.
18020 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
18021 TypedOrValueRegister lhs =
18022 TypedOrValueRegister(ToValue(ins, LInstanceOfCache::LHS));
18023 Register rhs = ToRegister(ins->rhs());
18024 Register output = ToRegister(ins->output());
18026 IonInstanceOfIC ic(liveRegs, lhs, rhs, output);
18027 addIC(ins, allocateIC(ic));
18030 void CodeGenerator::visitGetDOMProperty(LGetDOMProperty* ins) {
18031 const Register JSContextReg = ToRegister(ins->getJSContextReg());
18032 const Register ObjectReg = ToRegister(ins->getObjectReg());
18033 const Register PrivateReg = ToRegister(ins->getPrivReg());
18034 const Register ValueReg = ToRegister(ins->getValueReg());
18036 Label haveValue;
18037 if (ins->mir()->valueMayBeInSlot()) {
18038 size_t slot = ins->mir()->domMemberSlotIndex();
18039 // It's a bit annoying to redo these slot calculations, which duplcate
18040 // LSlots and a few other things like that, but I'm not sure there's a
18041 // way to reuse those here.
18043 // If this ever gets fixed to work with proxies (by not assuming that
18044 // reserved slot indices, which is what domMemberSlotIndex() returns,
18045 // match fixed slot indices), we can reenable MGetDOMProperty for
18046 // proxies in IonBuilder.
18047 if (slot < NativeObject::MAX_FIXED_SLOTS) {
18048 masm.loadValue(Address(ObjectReg, NativeObject::getFixedSlotOffset(slot)),
18049 JSReturnOperand);
18050 } else {
18051 // It's a dynamic slot.
18052 slot -= NativeObject::MAX_FIXED_SLOTS;
18053 // Use PrivateReg as a scratch register for the slots pointer.
18054 masm.loadPtr(Address(ObjectReg, NativeObject::offsetOfSlots()),
18055 PrivateReg);
18056 masm.loadValue(Address(PrivateReg, slot * sizeof(js::Value)),
18057 JSReturnOperand);
18059 masm.branchTestUndefined(Assembler::NotEqual, JSReturnOperand, &haveValue);
18062 DebugOnly<uint32_t> initialStack = masm.framePushed();
18064 masm.checkStackAlignment();
18066 // Make space for the outparam. Pre-initialize it to UndefinedValue so we
18067 // can trace it at GC time.
18068 masm.Push(UndefinedValue());
18069 // We pass the pointer to our out param as an instance of
18070 // JSJitGetterCallArgs, since on the binary level it's the same thing.
18071 static_assert(sizeof(JSJitGetterCallArgs) == sizeof(Value*));
18072 masm.moveStackPtrTo(ValueReg);
18074 masm.Push(ObjectReg);
18076 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
18078 // Rooting will happen at GC time.
18079 masm.moveStackPtrTo(ObjectReg);
18081 Realm* getterRealm = ins->mir()->getterRealm();
18082 if (gen->realm->realmPtr() != getterRealm) {
18083 // We use JSContextReg as scratch register here.
18084 masm.switchToRealm(getterRealm, JSContextReg);
18087 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
18088 masm.loadJSContext(JSContextReg);
18089 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
18090 ExitFrameType::IonDOMGetter);
18092 markSafepointAt(safepointOffset, ins);
18094 masm.setupAlignedABICall();
18095 masm.loadJSContext(JSContextReg);
18096 masm.passABIArg(JSContextReg);
18097 masm.passABIArg(ObjectReg);
18098 masm.passABIArg(PrivateReg);
18099 masm.passABIArg(ValueReg);
18100 ensureOsiSpace();
18101 masm.callWithABI(DynamicFunction<JSJitGetterOp>(ins->mir()->fun()),
18102 ABIType::General,
18103 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
18105 if (ins->mir()->isInfallible()) {
18106 masm.loadValue(Address(masm.getStackPointer(),
18107 IonDOMExitFrameLayout::offsetOfResult()),
18108 JSReturnOperand);
18109 } else {
18110 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
18112 masm.loadValue(Address(masm.getStackPointer(),
18113 IonDOMExitFrameLayout::offsetOfResult()),
18114 JSReturnOperand);
18117 // Switch back to the current realm if needed. Note: if the getter threw an
18118 // exception, the exception handler will do this.
18119 if (gen->realm->realmPtr() != getterRealm) {
18120 static_assert(!JSReturnOperand.aliases(ReturnReg),
18121 "Clobbering ReturnReg should not affect the return value");
18122 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
18125 // Until C++ code is instrumented against Spectre, prevent speculative
18126 // execution from returning any private data.
18127 if (JitOptions.spectreJitToCxxCalls && ins->mir()->hasLiveDefUses()) {
18128 masm.speculationBarrier();
18131 masm.adjustStack(IonDOMExitFrameLayout::Size());
18133 masm.bind(&haveValue);
18135 MOZ_ASSERT(masm.framePushed() == initialStack);
18138 void CodeGenerator::visitGetDOMMemberV(LGetDOMMemberV* ins) {
18139 // It's simpler to duplicate visitLoadFixedSlotV here than it is to try to
18140 // use an LLoadFixedSlotV or some subclass of it for this case: that would
18141 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
18142 // we'd have to duplicate a bunch of stuff we now get for free from
18143 // MGetDOMProperty.
18145 // If this ever gets fixed to work with proxies (by not assuming that
18146 // reserved slot indices, which is what domMemberSlotIndex() returns,
18147 // match fixed slot indices), we can reenable MGetDOMMember for
18148 // proxies in IonBuilder.
18149 Register object = ToRegister(ins->object());
18150 size_t slot = ins->mir()->domMemberSlotIndex();
18151 ValueOperand result = ToOutValue(ins);
18153 masm.loadValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
18154 result);
18157 void CodeGenerator::visitGetDOMMemberT(LGetDOMMemberT* ins) {
18158 // It's simpler to duplicate visitLoadFixedSlotT here than it is to try to
18159 // use an LLoadFixedSlotT or some subclass of it for this case: that would
18160 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
18161 // we'd have to duplicate a bunch of stuff we now get for free from
18162 // MGetDOMProperty.
18164 // If this ever gets fixed to work with proxies (by not assuming that
18165 // reserved slot indices, which is what domMemberSlotIndex() returns,
18166 // match fixed slot indices), we can reenable MGetDOMMember for
18167 // proxies in IonBuilder.
18168 Register object = ToRegister(ins->object());
18169 size_t slot = ins->mir()->domMemberSlotIndex();
18170 AnyRegister result = ToAnyRegister(ins->getDef(0));
18171 MIRType type = ins->mir()->type();
18173 masm.loadUnboxedValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
18174 type, result);
18177 void CodeGenerator::visitSetDOMProperty(LSetDOMProperty* ins) {
18178 const Register JSContextReg = ToRegister(ins->getJSContextReg());
18179 const Register ObjectReg = ToRegister(ins->getObjectReg());
18180 const Register PrivateReg = ToRegister(ins->getPrivReg());
18181 const Register ValueReg = ToRegister(ins->getValueReg());
18183 DebugOnly<uint32_t> initialStack = masm.framePushed();
18185 masm.checkStackAlignment();
18187 // Push the argument. Rooting will happen at GC time.
18188 ValueOperand argVal = ToValue(ins, LSetDOMProperty::Value);
18189 masm.Push(argVal);
18190 // We pass the pointer to our out param as an instance of
18191 // JSJitGetterCallArgs, since on the binary level it's the same thing.
18192 static_assert(sizeof(JSJitSetterCallArgs) == sizeof(Value*));
18193 masm.moveStackPtrTo(ValueReg);
18195 masm.Push(ObjectReg);
18197 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
18199 // Rooting will happen at GC time.
18200 masm.moveStackPtrTo(ObjectReg);
18202 Realm* setterRealm = ins->mir()->setterRealm();
18203 if (gen->realm->realmPtr() != setterRealm) {
18204 // We use JSContextReg as scratch register here.
18205 masm.switchToRealm(setterRealm, JSContextReg);
18208 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
18209 masm.loadJSContext(JSContextReg);
18210 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
18211 ExitFrameType::IonDOMSetter);
18213 markSafepointAt(safepointOffset, ins);
18215 masm.setupAlignedABICall();
18216 masm.loadJSContext(JSContextReg);
18217 masm.passABIArg(JSContextReg);
18218 masm.passABIArg(ObjectReg);
18219 masm.passABIArg(PrivateReg);
18220 masm.passABIArg(ValueReg);
18221 ensureOsiSpace();
18222 masm.callWithABI(DynamicFunction<JSJitSetterOp>(ins->mir()->fun()),
18223 ABIType::General,
18224 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
18226 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
18228 // Switch back to the current realm if needed. Note: if the setter threw an
18229 // exception, the exception handler will do this.
18230 if (gen->realm->realmPtr() != setterRealm) {
18231 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
18234 masm.adjustStack(IonDOMExitFrameLayout::Size());
18236 MOZ_ASSERT(masm.framePushed() == initialStack);
18239 void CodeGenerator::visitLoadDOMExpandoValue(LLoadDOMExpandoValue* ins) {
18240 Register proxy = ToRegister(ins->proxy());
18241 ValueOperand out = ToOutValue(ins);
18243 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
18244 out.scratchReg());
18245 masm.loadValue(Address(out.scratchReg(),
18246 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
18247 out);
18250 void CodeGenerator::visitLoadDOMExpandoValueGuardGeneration(
18251 LLoadDOMExpandoValueGuardGeneration* ins) {
18252 Register proxy = ToRegister(ins->proxy());
18253 ValueOperand out = ToOutValue(ins);
18255 Label bail;
18256 masm.loadDOMExpandoValueGuardGeneration(proxy, out,
18257 ins->mir()->expandoAndGeneration(),
18258 ins->mir()->generation(), &bail);
18259 bailoutFrom(&bail, ins->snapshot());
18262 void CodeGenerator::visitLoadDOMExpandoValueIgnoreGeneration(
18263 LLoadDOMExpandoValueIgnoreGeneration* ins) {
18264 Register proxy = ToRegister(ins->proxy());
18265 ValueOperand out = ToOutValue(ins);
18267 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
18268 out.scratchReg());
18270 // Load the ExpandoAndGeneration* from the PrivateValue.
18271 masm.loadPrivate(
18272 Address(out.scratchReg(),
18273 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
18274 out.scratchReg());
18276 // Load expandoAndGeneration->expando into the output Value register.
18277 masm.loadValue(
18278 Address(out.scratchReg(), ExpandoAndGeneration::offsetOfExpando()), out);
18281 void CodeGenerator::visitGuardDOMExpandoMissingOrGuardShape(
18282 LGuardDOMExpandoMissingOrGuardShape* ins) {
18283 Register temp = ToRegister(ins->temp0());
18284 ValueOperand input =
18285 ToValue(ins, LGuardDOMExpandoMissingOrGuardShape::InputIndex);
18287 Label done;
18288 masm.branchTestUndefined(Assembler::Equal, input, &done);
18290 masm.debugAssertIsObject(input);
18291 masm.unboxObject(input, temp);
18292 // The expando object is not used in this case, so we don't need Spectre
18293 // mitigations.
18294 Label bail;
18295 masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, temp,
18296 ins->mir()->shape(), &bail);
18297 bailoutFrom(&bail, ins->snapshot());
18299 masm.bind(&done);
18302 class OutOfLineIsCallable : public OutOfLineCodeBase<CodeGenerator> {
18303 Register object_;
18304 Register output_;
18306 public:
18307 OutOfLineIsCallable(Register object, Register output)
18308 : object_(object), output_(output) {}
18310 void accept(CodeGenerator* codegen) override {
18311 codegen->visitOutOfLineIsCallable(this);
18313 Register object() const { return object_; }
18314 Register output() const { return output_; }
18317 void CodeGenerator::visitIsCallableO(LIsCallableO* ins) {
18318 Register object = ToRegister(ins->object());
18319 Register output = ToRegister(ins->output());
18321 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(object, output);
18322 addOutOfLineCode(ool, ins->mir());
18324 masm.isCallable(object, output, ool->entry());
18326 masm.bind(ool->rejoin());
18329 void CodeGenerator::visitIsCallableV(LIsCallableV* ins) {
18330 ValueOperand val = ToValue(ins, LIsCallableV::ObjectIndex);
18331 Register output = ToRegister(ins->output());
18332 Register temp = ToRegister(ins->temp0());
18334 Label notObject;
18335 masm.fallibleUnboxObject(val, temp, &notObject);
18337 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(temp, output);
18338 addOutOfLineCode(ool, ins->mir());
18340 masm.isCallable(temp, output, ool->entry());
18341 masm.jump(ool->rejoin());
18343 masm.bind(&notObject);
18344 masm.move32(Imm32(0), output);
18346 masm.bind(ool->rejoin());
18349 void CodeGenerator::visitOutOfLineIsCallable(OutOfLineIsCallable* ool) {
18350 Register object = ool->object();
18351 Register output = ool->output();
18353 saveVolatile(output);
18354 using Fn = bool (*)(JSObject* obj);
18355 masm.setupAlignedABICall();
18356 masm.passABIArg(object);
18357 masm.callWithABI<Fn, ObjectIsCallable>();
18358 masm.storeCallBoolResult(output);
18359 restoreVolatile(output);
18360 masm.jump(ool->rejoin());
18363 class OutOfLineIsConstructor : public OutOfLineCodeBase<CodeGenerator> {
18364 LIsConstructor* ins_;
18366 public:
18367 explicit OutOfLineIsConstructor(LIsConstructor* ins) : ins_(ins) {}
18369 void accept(CodeGenerator* codegen) override {
18370 codegen->visitOutOfLineIsConstructor(this);
18372 LIsConstructor* ins() const { return ins_; }
18375 void CodeGenerator::visitIsConstructor(LIsConstructor* ins) {
18376 Register object = ToRegister(ins->object());
18377 Register output = ToRegister(ins->output());
18379 OutOfLineIsConstructor* ool = new (alloc()) OutOfLineIsConstructor(ins);
18380 addOutOfLineCode(ool, ins->mir());
18382 masm.isConstructor(object, output, ool->entry());
18384 masm.bind(ool->rejoin());
18387 void CodeGenerator::visitOutOfLineIsConstructor(OutOfLineIsConstructor* ool) {
18388 LIsConstructor* ins = ool->ins();
18389 Register object = ToRegister(ins->object());
18390 Register output = ToRegister(ins->output());
18392 saveVolatile(output);
18393 using Fn = bool (*)(JSObject* obj);
18394 masm.setupAlignedABICall();
18395 masm.passABIArg(object);
18396 masm.callWithABI<Fn, ObjectIsConstructor>();
18397 masm.storeCallBoolResult(output);
18398 restoreVolatile(output);
18399 masm.jump(ool->rejoin());
18402 void CodeGenerator::visitIsCrossRealmArrayConstructor(
18403 LIsCrossRealmArrayConstructor* ins) {
18404 Register object = ToRegister(ins->object());
18405 Register output = ToRegister(ins->output());
18407 masm.setIsCrossRealmArrayConstructor(object, output);
18410 static void EmitObjectIsArray(MacroAssembler& masm, OutOfLineCode* ool,
18411 Register obj, Register output,
18412 Label* notArray = nullptr) {
18413 masm.loadObjClassUnsafe(obj, output);
18415 Label isArray;
18416 masm.branchPtr(Assembler::Equal, output, ImmPtr(&ArrayObject::class_),
18417 &isArray);
18419 // Branch to OOL path if it's a proxy.
18420 masm.branchTestClassIsProxy(true, output, ool->entry());
18422 if (notArray) {
18423 masm.bind(notArray);
18425 masm.move32(Imm32(0), output);
18426 masm.jump(ool->rejoin());
18428 masm.bind(&isArray);
18429 masm.move32(Imm32(1), output);
18431 masm.bind(ool->rejoin());
18434 void CodeGenerator::visitIsArrayO(LIsArrayO* lir) {
18435 Register object = ToRegister(lir->object());
18436 Register output = ToRegister(lir->output());
18438 using Fn = bool (*)(JSContext*, HandleObject, bool*);
18439 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
18440 lir, ArgList(object), StoreRegisterTo(output));
18441 EmitObjectIsArray(masm, ool, object, output);
18444 void CodeGenerator::visitIsArrayV(LIsArrayV* lir) {
18445 ValueOperand val = ToValue(lir, LIsArrayV::ValueIndex);
18446 Register output = ToRegister(lir->output());
18447 Register temp = ToRegister(lir->temp0());
18449 Label notArray;
18450 masm.fallibleUnboxObject(val, temp, &notArray);
18452 using Fn = bool (*)(JSContext*, HandleObject, bool*);
18453 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
18454 lir, ArgList(temp), StoreRegisterTo(output));
18455 EmitObjectIsArray(masm, ool, temp, output, &notArray);
18458 void CodeGenerator::visitIsTypedArray(LIsTypedArray* lir) {
18459 Register object = ToRegister(lir->object());
18460 Register output = ToRegister(lir->output());
18462 OutOfLineCode* ool = nullptr;
18463 if (lir->mir()->isPossiblyWrapped()) {
18464 using Fn = bool (*)(JSContext*, JSObject*, bool*);
18465 ool = oolCallVM<Fn, jit::IsPossiblyWrappedTypedArray>(
18466 lir, ArgList(object), StoreRegisterTo(output));
18469 Label notTypedArray;
18470 Label done;
18472 masm.loadObjClassUnsafe(object, output);
18473 masm.branchIfClassIsNotTypedArray(output, &notTypedArray);
18475 masm.move32(Imm32(1), output);
18476 masm.jump(&done);
18477 masm.bind(&notTypedArray);
18478 if (ool) {
18479 masm.branchTestClassIsProxy(true, output, ool->entry());
18481 masm.move32(Imm32(0), output);
18482 masm.bind(&done);
18483 if (ool) {
18484 masm.bind(ool->rejoin());
18488 void CodeGenerator::visitIsObject(LIsObject* ins) {
18489 Register output = ToRegister(ins->output());
18490 ValueOperand value = ToValue(ins, LIsObject::ObjectIndex);
18491 masm.testObjectSet(Assembler::Equal, value, output);
18494 void CodeGenerator::visitIsObjectAndBranch(LIsObjectAndBranch* ins) {
18495 ValueOperand value = ToValue(ins, LIsObjectAndBranch::Input);
18496 testObjectEmitBranch(Assembler::Equal, value, ins->ifTrue(), ins->ifFalse());
18499 void CodeGenerator::visitIsNullOrUndefined(LIsNullOrUndefined* ins) {
18500 Register output = ToRegister(ins->output());
18501 ValueOperand value = ToValue(ins, LIsNullOrUndefined::InputIndex);
18503 Label isNotNull, done;
18504 masm.branchTestNull(Assembler::NotEqual, value, &isNotNull);
18506 masm.move32(Imm32(1), output);
18507 masm.jump(&done);
18509 masm.bind(&isNotNull);
18510 masm.testUndefinedSet(Assembler::Equal, value, output);
18512 masm.bind(&done);
18515 void CodeGenerator::visitIsNullOrUndefinedAndBranch(
18516 LIsNullOrUndefinedAndBranch* ins) {
18517 Label* ifTrue = getJumpLabelForBranch(ins->ifTrue());
18518 Label* ifFalse = getJumpLabelForBranch(ins->ifFalse());
18519 ValueOperand value = ToValue(ins, LIsNullOrUndefinedAndBranch::Input);
18521 ScratchTagScope tag(masm, value);
18522 masm.splitTagForTest(value, tag);
18524 masm.branchTestNull(Assembler::Equal, tag, ifTrue);
18525 masm.branchTestUndefined(Assembler::Equal, tag, ifTrue);
18527 if (!isNextBlock(ins->ifFalse()->lir())) {
18528 masm.jump(ifFalse);
18532 void CodeGenerator::loadOutermostJSScript(Register reg) {
18533 // The "outermost" JSScript means the script that we are compiling
18534 // basically; this is not always the script associated with the
18535 // current basic block, which might be an inlined script.
18537 MIRGraph& graph = current->mir()->graph();
18538 MBasicBlock* entryBlock = graph.entryBlock();
18539 masm.movePtr(ImmGCPtr(entryBlock->info().script()), reg);
18542 void CodeGenerator::loadJSScriptForBlock(MBasicBlock* block, Register reg) {
18543 // The current JSScript means the script for the current
18544 // basic block. This may be an inlined script.
18546 JSScript* script = block->info().script();
18547 masm.movePtr(ImmGCPtr(script), reg);
18550 void CodeGenerator::visitHasClass(LHasClass* ins) {
18551 Register lhs = ToRegister(ins->lhs());
18552 Register output = ToRegister(ins->output());
18554 masm.loadObjClassUnsafe(lhs, output);
18555 masm.cmpPtrSet(Assembler::Equal, output, ImmPtr(ins->mir()->getClass()),
18556 output);
18559 void CodeGenerator::visitGuardToClass(LGuardToClass* ins) {
18560 Register lhs = ToRegister(ins->lhs());
18561 Register temp = ToRegister(ins->temp0());
18563 // branchTestObjClass may zero the object register on speculative paths
18564 // (we should have a defineReuseInput allocation in this case).
18565 Register spectreRegToZero = lhs;
18567 Label notEqual;
18569 masm.branchTestObjClass(Assembler::NotEqual, lhs, ins->mir()->getClass(),
18570 temp, spectreRegToZero, &notEqual);
18572 // Can't return null-return here, so bail.
18573 bailoutFrom(&notEqual, ins->snapshot());
18576 void CodeGenerator::visitGuardToEitherClass(LGuardToEitherClass* ins) {
18577 Register lhs = ToRegister(ins->lhs());
18578 Register temp = ToRegister(ins->temp0());
18580 // branchTestObjClass may zero the object register on speculative paths
18581 // (we should have a defineReuseInput allocation in this case).
18582 Register spectreRegToZero = lhs;
18584 Label notEqual;
18586 masm.branchTestObjClass(Assembler::NotEqual, lhs,
18587 {ins->mir()->getClass1(), ins->mir()->getClass2()},
18588 temp, spectreRegToZero, &notEqual);
18590 // Can't return null-return here, so bail.
18591 bailoutFrom(&notEqual, ins->snapshot());
18594 void CodeGenerator::visitGuardToFunction(LGuardToFunction* ins) {
18595 Register lhs = ToRegister(ins->lhs());
18596 Register temp = ToRegister(ins->temp0());
18598 // branchTestObjClass may zero the object register on speculative paths
18599 // (we should have a defineReuseInput allocation in this case).
18600 Register spectreRegToZero = lhs;
18602 Label notEqual;
18604 masm.branchTestObjIsFunction(Assembler::NotEqual, lhs, temp, spectreRegToZero,
18605 &notEqual);
18607 // Can't return null-return here, so bail.
18608 bailoutFrom(&notEqual, ins->snapshot());
18611 void CodeGenerator::visitObjectClassToString(LObjectClassToString* lir) {
18612 Register obj = ToRegister(lir->lhs());
18613 Register temp = ToRegister(lir->temp0());
18615 using Fn = JSString* (*)(JSContext*, JSObject*);
18616 masm.setupAlignedABICall();
18617 masm.loadJSContext(temp);
18618 masm.passABIArg(temp);
18619 masm.passABIArg(obj);
18620 masm.callWithABI<Fn, js::ObjectClassToString>();
18622 bailoutCmpPtr(Assembler::Equal, ReturnReg, ImmWord(0), lir->snapshot());
18625 void CodeGenerator::visitWasmParameter(LWasmParameter* lir) {}
18627 void CodeGenerator::visitWasmParameterI64(LWasmParameterI64* lir) {}
18629 void CodeGenerator::visitWasmReturn(LWasmReturn* lir) {
18630 // Don't emit a jump to the return label if this is the last block.
18631 if (current->mir() != *gen->graph().poBegin()) {
18632 masm.jump(&returnLabel_);
18636 void CodeGenerator::visitWasmReturnI64(LWasmReturnI64* lir) {
18637 // Don't emit a jump to the return label if this is the last block.
18638 if (current->mir() != *gen->graph().poBegin()) {
18639 masm.jump(&returnLabel_);
18643 void CodeGenerator::visitWasmReturnVoid(LWasmReturnVoid* lir) {
18644 // Don't emit a jump to the return label if this is the last block.
18645 if (current->mir() != *gen->graph().poBegin()) {
18646 masm.jump(&returnLabel_);
18650 void CodeGenerator::emitAssertRangeI(MIRType type, const Range* r,
18651 Register input) {
18652 // Check the lower bound.
18653 if (r->hasInt32LowerBound() && r->lower() > INT32_MIN) {
18654 Label success;
18655 if (type == MIRType::Int32 || type == MIRType::Boolean) {
18656 masm.branch32(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
18657 &success);
18658 } else {
18659 MOZ_ASSERT(type == MIRType::IntPtr);
18660 masm.branchPtr(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
18661 &success);
18663 masm.assumeUnreachable(
18664 "Integer input should be equal or higher than Lowerbound.");
18665 masm.bind(&success);
18668 // Check the upper bound.
18669 if (r->hasInt32UpperBound() && r->upper() < INT32_MAX) {
18670 Label success;
18671 if (type == MIRType::Int32 || type == MIRType::Boolean) {
18672 masm.branch32(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
18673 &success);
18674 } else {
18675 MOZ_ASSERT(type == MIRType::IntPtr);
18676 masm.branchPtr(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
18677 &success);
18679 masm.assumeUnreachable(
18680 "Integer input should be lower or equal than Upperbound.");
18681 masm.bind(&success);
18684 // For r->canHaveFractionalPart(), r->canBeNegativeZero(), and
18685 // r->exponent(), there's nothing to check, because if we ended up in the
18686 // integer range checking code, the value is already in an integer register
18687 // in the integer range.
18690 void CodeGenerator::emitAssertRangeD(const Range* r, FloatRegister input,
18691 FloatRegister temp) {
18692 // Check the lower bound.
18693 if (r->hasInt32LowerBound()) {
18694 Label success;
18695 masm.loadConstantDouble(r->lower(), temp);
18696 if (r->canBeNaN()) {
18697 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
18699 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
18700 &success);
18701 masm.assumeUnreachable(
18702 "Double input should be equal or higher than Lowerbound.");
18703 masm.bind(&success);
18705 // Check the upper bound.
18706 if (r->hasInt32UpperBound()) {
18707 Label success;
18708 masm.loadConstantDouble(r->upper(), temp);
18709 if (r->canBeNaN()) {
18710 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
18712 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp, &success);
18713 masm.assumeUnreachable(
18714 "Double input should be lower or equal than Upperbound.");
18715 masm.bind(&success);
18718 // This code does not yet check r->canHaveFractionalPart(). This would require
18719 // new assembler interfaces to make rounding instructions available.
18721 if (!r->canBeNegativeZero()) {
18722 Label success;
18724 // First, test for being equal to 0.0, which also includes -0.0.
18725 masm.loadConstantDouble(0.0, temp);
18726 masm.branchDouble(Assembler::DoubleNotEqualOrUnordered, input, temp,
18727 &success);
18729 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0 is
18730 // -Infinity instead of Infinity.
18731 masm.loadConstantDouble(1.0, temp);
18732 masm.divDouble(input, temp);
18733 masm.branchDouble(Assembler::DoubleGreaterThan, temp, input, &success);
18735 masm.assumeUnreachable("Input shouldn't be negative zero.");
18737 masm.bind(&success);
18740 if (!r->hasInt32Bounds() && !r->canBeInfiniteOrNaN() &&
18741 r->exponent() < FloatingPoint<double>::kExponentBias) {
18742 // Check the bounds implied by the maximum exponent.
18743 Label exponentLoOk;
18744 masm.loadConstantDouble(pow(2.0, r->exponent() + 1), temp);
18745 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentLoOk);
18746 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp,
18747 &exponentLoOk);
18748 masm.assumeUnreachable("Check for exponent failed.");
18749 masm.bind(&exponentLoOk);
18751 Label exponentHiOk;
18752 masm.loadConstantDouble(-pow(2.0, r->exponent() + 1), temp);
18753 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentHiOk);
18754 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
18755 &exponentHiOk);
18756 masm.assumeUnreachable("Check for exponent failed.");
18757 masm.bind(&exponentHiOk);
18758 } else if (!r->hasInt32Bounds() && !r->canBeNaN()) {
18759 // If we think the value can't be NaN, check that it isn't.
18760 Label notnan;
18761 masm.branchDouble(Assembler::DoubleOrdered, input, input, &notnan);
18762 masm.assumeUnreachable("Input shouldn't be NaN.");
18763 masm.bind(&notnan);
18765 // If we think the value also can't be an infinity, check that it isn't.
18766 if (!r->canBeInfiniteOrNaN()) {
18767 Label notposinf;
18768 masm.loadConstantDouble(PositiveInfinity<double>(), temp);
18769 masm.branchDouble(Assembler::DoubleLessThan, input, temp, &notposinf);
18770 masm.assumeUnreachable("Input shouldn't be +Inf.");
18771 masm.bind(&notposinf);
18773 Label notneginf;
18774 masm.loadConstantDouble(NegativeInfinity<double>(), temp);
18775 masm.branchDouble(Assembler::DoubleGreaterThan, input, temp, &notneginf);
18776 masm.assumeUnreachable("Input shouldn't be -Inf.");
18777 masm.bind(&notneginf);
18782 void CodeGenerator::visitAssertClass(LAssertClass* ins) {
18783 Register obj = ToRegister(ins->input());
18784 Register temp = ToRegister(ins->getTemp(0));
18786 Label success;
18787 if (ins->mir()->getClass() == &FunctionClass) {
18788 // Allow both possible function classes here.
18789 masm.branchTestObjIsFunctionNoSpectreMitigations(Assembler::Equal, obj,
18790 temp, &success);
18791 } else {
18792 masm.branchTestObjClassNoSpectreMitigations(
18793 Assembler::Equal, obj, ins->mir()->getClass(), temp, &success);
18795 masm.assumeUnreachable("Wrong KnownClass during run-time");
18796 masm.bind(&success);
18799 void CodeGenerator::visitAssertShape(LAssertShape* ins) {
18800 Register obj = ToRegister(ins->input());
18802 Label success;
18803 masm.branchTestObjShapeNoSpectreMitigations(Assembler::Equal, obj,
18804 ins->mir()->shape(), &success);
18805 masm.assumeUnreachable("Wrong Shape during run-time");
18806 masm.bind(&success);
18809 void CodeGenerator::visitAssertRangeI(LAssertRangeI* ins) {
18810 Register input = ToRegister(ins->input());
18811 const Range* r = ins->range();
18813 emitAssertRangeI(ins->mir()->input()->type(), r, input);
18816 void CodeGenerator::visitAssertRangeD(LAssertRangeD* ins) {
18817 FloatRegister input = ToFloatRegister(ins->input());
18818 FloatRegister temp = ToFloatRegister(ins->temp());
18819 const Range* r = ins->range();
18821 emitAssertRangeD(r, input, temp);
18824 void CodeGenerator::visitAssertRangeF(LAssertRangeF* ins) {
18825 FloatRegister input = ToFloatRegister(ins->input());
18826 FloatRegister temp = ToFloatRegister(ins->temp());
18827 FloatRegister temp2 = ToFloatRegister(ins->temp2());
18829 const Range* r = ins->range();
18831 masm.convertFloat32ToDouble(input, temp);
18832 emitAssertRangeD(r, temp, temp2);
18835 void CodeGenerator::visitAssertRangeV(LAssertRangeV* ins) {
18836 const Range* r = ins->range();
18837 const ValueOperand value = ToValue(ins, LAssertRangeV::Input);
18838 Label done;
18841 ScratchTagScope tag(masm, value);
18842 masm.splitTagForTest(value, tag);
18845 Label isNotInt32;
18846 masm.branchTestInt32(Assembler::NotEqual, tag, &isNotInt32);
18848 ScratchTagScopeRelease _(&tag);
18849 Register unboxInt32 = ToTempUnboxRegister(ins->temp());
18850 Register input = masm.extractInt32(value, unboxInt32);
18851 emitAssertRangeI(MIRType::Int32, r, input);
18852 masm.jump(&done);
18854 masm.bind(&isNotInt32);
18858 Label isNotDouble;
18859 masm.branchTestDouble(Assembler::NotEqual, tag, &isNotDouble);
18861 ScratchTagScopeRelease _(&tag);
18862 FloatRegister input = ToFloatRegister(ins->floatTemp1());
18863 FloatRegister temp = ToFloatRegister(ins->floatTemp2());
18864 masm.unboxDouble(value, input);
18865 emitAssertRangeD(r, input, temp);
18866 masm.jump(&done);
18868 masm.bind(&isNotDouble);
18872 masm.assumeUnreachable("Incorrect range for Value.");
18873 masm.bind(&done);
18876 void CodeGenerator::visitInterruptCheck(LInterruptCheck* lir) {
18877 using Fn = bool (*)(JSContext*);
18878 OutOfLineCode* ool =
18879 oolCallVM<Fn, InterruptCheck>(lir, ArgList(), StoreNothing());
18881 const void* interruptAddr = gen->runtime->addressOfInterruptBits();
18882 masm.branch32(Assembler::NotEqual, AbsoluteAddress(interruptAddr), Imm32(0),
18883 ool->entry());
18884 masm.bind(ool->rejoin());
18887 void CodeGenerator::visitOutOfLineResumableWasmTrap(
18888 OutOfLineResumableWasmTrap* ool) {
18889 LInstruction* lir = ool->lir();
18890 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
18892 markSafepointAt(masm.currentOffset(), lir);
18894 // Note that masm.framePushed() doesn't include the register dump area.
18895 // That will be taken into account when the StackMap is created from the
18896 // LSafepoint.
18897 lir->safepoint()->setFramePushedAtStackMapBase(ool->framePushed());
18898 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::Trap);
18900 masm.jump(ool->rejoin());
18903 void CodeGenerator::visitOutOfLineAbortingWasmTrap(
18904 OutOfLineAbortingWasmTrap* ool) {
18905 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
18908 void CodeGenerator::visitWasmInterruptCheck(LWasmInterruptCheck* lir) {
18909 MOZ_ASSERT(gen->compilingWasm());
18911 OutOfLineResumableWasmTrap* ool = new (alloc()) OutOfLineResumableWasmTrap(
18912 lir, masm.framePushed(), lir->mir()->bytecodeOffset(),
18913 wasm::Trap::CheckInterrupt);
18914 addOutOfLineCode(ool, lir->mir());
18915 masm.branch32(
18916 Assembler::NotEqual,
18917 Address(ToRegister(lir->instance()), wasm::Instance::offsetOfInterrupt()),
18918 Imm32(0), ool->entry());
18919 masm.bind(ool->rejoin());
18922 void CodeGenerator::visitWasmTrap(LWasmTrap* lir) {
18923 MOZ_ASSERT(gen->compilingWasm());
18924 const MWasmTrap* mir = lir->mir();
18926 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
18929 void CodeGenerator::visitWasmTrapIfNull(LWasmTrapIfNull* lir) {
18930 MOZ_ASSERT(gen->compilingWasm());
18931 const MWasmTrapIfNull* mir = lir->mir();
18932 Label nonNull;
18933 Register ref = ToRegister(lir->ref());
18935 masm.branchWasmAnyRefIsNull(false, ref, &nonNull);
18936 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
18937 masm.bind(&nonNull);
18940 void CodeGenerator::visitWasmRefIsSubtypeOfAbstract(
18941 LWasmRefIsSubtypeOfAbstract* ins) {
18942 MOZ_ASSERT(gen->compilingWasm());
18944 const MWasmRefIsSubtypeOfAbstract* mir = ins->mir();
18945 MOZ_ASSERT(!mir->destType().isTypeRef());
18947 Register ref = ToRegister(ins->ref());
18948 Register superSTV = Register::Invalid();
18949 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
18950 Register scratch2 = Register::Invalid();
18951 Register result = ToRegister(ins->output());
18952 Label onSuccess;
18953 Label onFail;
18954 Label join;
18955 masm.branchWasmRefIsSubtype(ref, mir->sourceType(), mir->destType(),
18956 &onSuccess, /*onSuccess=*/true, superSTV,
18957 scratch1, scratch2);
18958 masm.bind(&onFail);
18959 masm.xor32(result, result);
18960 masm.jump(&join);
18961 masm.bind(&onSuccess);
18962 masm.move32(Imm32(1), result);
18963 masm.bind(&join);
18966 void CodeGenerator::visitWasmRefIsSubtypeOfConcrete(
18967 LWasmRefIsSubtypeOfConcrete* ins) {
18968 MOZ_ASSERT(gen->compilingWasm());
18970 const MWasmRefIsSubtypeOfConcrete* mir = ins->mir();
18971 MOZ_ASSERT(mir->destType().isTypeRef());
18973 Register ref = ToRegister(ins->ref());
18974 Register superSTV = ToRegister(ins->superSTV());
18975 Register scratch1 = ToRegister(ins->temp0());
18976 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
18977 Register result = ToRegister(ins->output());
18978 Label onSuccess;
18979 Label join;
18980 masm.branchWasmRefIsSubtype(ref, mir->sourceType(), mir->destType(),
18981 &onSuccess, /*onSuccess=*/true, superSTV,
18982 scratch1, scratch2);
18983 masm.move32(Imm32(0), result);
18984 masm.jump(&join);
18985 masm.bind(&onSuccess);
18986 masm.move32(Imm32(1), result);
18987 masm.bind(&join);
18990 void CodeGenerator::visitWasmRefIsSubtypeOfAbstractAndBranch(
18991 LWasmRefIsSubtypeOfAbstractAndBranch* ins) {
18992 MOZ_ASSERT(gen->compilingWasm());
18993 Register ref = ToRegister(ins->ref());
18994 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
18995 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
18996 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
18997 masm.branchWasmRefIsSubtype(
18998 ref, ins->sourceType(), ins->destType(), onSuccess, /*onSuccess=*/true,
18999 Register::Invalid(), scratch1, Register::Invalid());
19000 masm.jump(onFail);
19003 void CodeGenerator::visitWasmRefIsSubtypeOfConcreteAndBranch(
19004 LWasmRefIsSubtypeOfConcreteAndBranch* ins) {
19005 MOZ_ASSERT(gen->compilingWasm());
19006 Register ref = ToRegister(ins->ref());
19007 Register superSTV = ToRegister(ins->superSTV());
19008 Register scratch1 = ToRegister(ins->temp0());
19009 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
19010 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
19011 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
19012 masm.branchWasmRefIsSubtype(ref, ins->sourceType(), ins->destType(),
19013 onSuccess, /*onSuccess=*/true, superSTV, scratch1,
19014 scratch2);
19015 masm.jump(onFail);
19018 void CodeGenerator::callWasmStructAllocFun(LInstruction* lir,
19019 wasm::SymbolicAddress fun,
19020 Register typeDefData,
19021 Register output) {
19022 masm.Push(InstanceReg);
19023 int32_t framePushedAfterInstance = masm.framePushed();
19024 saveLive(lir);
19026 masm.setupWasmABICall();
19027 masm.passABIArg(InstanceReg);
19028 masm.passABIArg(typeDefData);
19029 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
19030 CodeOffset offset =
19031 masm.callWithABI(wasm::BytecodeOffset(0), fun,
19032 mozilla::Some(instanceOffset), ABIType::General);
19033 masm.storeCallPointerResult(output);
19035 markSafepointAt(offset.offset(), lir);
19036 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAfterInstance);
19037 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::CodegenCall);
19039 restoreLive(lir);
19040 masm.Pop(InstanceReg);
19041 #if JS_CODEGEN_ARM64
19042 masm.syncStackPtr();
19043 #endif
19046 // Out-of-line path to allocate wasm GC structs
19047 class OutOfLineWasmNewStruct : public OutOfLineCodeBase<CodeGenerator> {
19048 LInstruction* lir_;
19049 wasm::SymbolicAddress fun_;
19050 Register typeDefData_;
19051 Register output_;
19053 public:
19054 OutOfLineWasmNewStruct(LInstruction* lir, wasm::SymbolicAddress fun,
19055 Register typeDefData, Register output)
19056 : lir_(lir), fun_(fun), typeDefData_(typeDefData), output_(output) {}
19058 void accept(CodeGenerator* codegen) override {
19059 codegen->visitOutOfLineWasmNewStruct(this);
19062 LInstruction* lir() const { return lir_; }
19063 wasm::SymbolicAddress fun() const { return fun_; }
19064 Register typeDefData() const { return typeDefData_; }
19065 Register output() const { return output_; }
19068 void CodeGenerator::visitOutOfLineWasmNewStruct(OutOfLineWasmNewStruct* ool) {
19069 callWasmStructAllocFun(ool->lir(), ool->fun(), ool->typeDefData(),
19070 ool->output());
19071 masm.jump(ool->rejoin());
19074 void CodeGenerator::visitWasmNewStructObject(LWasmNewStructObject* lir) {
19075 MOZ_ASSERT(gen->compilingWasm());
19077 MWasmNewStructObject* mir = lir->mir();
19079 Register typeDefData = ToRegister(lir->typeDefData());
19080 Register output = ToRegister(lir->output());
19082 if (mir->isOutline()) {
19083 wasm::SymbolicAddress fun = mir->zeroFields()
19084 ? wasm::SymbolicAddress::StructNewOOL_true
19085 : wasm::SymbolicAddress::StructNewOOL_false;
19086 callWasmStructAllocFun(lir, fun, typeDefData, output);
19087 } else {
19088 wasm::SymbolicAddress fun = mir->zeroFields()
19089 ? wasm::SymbolicAddress::StructNewIL_true
19090 : wasm::SymbolicAddress::StructNewIL_false;
19092 Register instance = ToRegister(lir->instance());
19093 MOZ_ASSERT(instance == InstanceReg);
19095 auto ool =
19096 new (alloc()) OutOfLineWasmNewStruct(lir, fun, typeDefData, output);
19097 addOutOfLineCode(ool, lir->mir());
19099 Register temp1 = ToRegister(lir->temp0());
19100 Register temp2 = ToRegister(lir->temp1());
19101 masm.wasmNewStructObject(instance, output, typeDefData, temp1, temp2,
19102 ool->entry(), mir->allocKind(), mir->zeroFields());
19104 masm.bind(ool->rejoin());
19108 void CodeGenerator::callWasmArrayAllocFun(LInstruction* lir,
19109 wasm::SymbolicAddress fun,
19110 Register numElements,
19111 Register typeDefData, Register output,
19112 wasm::BytecodeOffset bytecodeOffset) {
19113 masm.Push(InstanceReg);
19114 int32_t framePushedAfterInstance = masm.framePushed();
19115 saveLive(lir);
19117 masm.setupWasmABICall();
19118 masm.passABIArg(InstanceReg);
19119 masm.passABIArg(numElements);
19120 masm.passABIArg(typeDefData);
19121 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
19122 CodeOffset offset = masm.callWithABI(
19123 bytecodeOffset, fun, mozilla::Some(instanceOffset), ABIType::General);
19124 masm.storeCallPointerResult(output);
19126 markSafepointAt(offset.offset(), lir);
19127 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAfterInstance);
19128 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::CodegenCall);
19130 restoreLive(lir);
19131 masm.Pop(InstanceReg);
19132 #if JS_CODEGEN_ARM64
19133 masm.syncStackPtr();
19134 #endif
19136 Label ok;
19137 masm.branchPtr(Assembler::NonZero, output, ImmWord(0), &ok);
19138 masm.wasmTrap(wasm::Trap::ThrowReported, bytecodeOffset);
19139 masm.bind(&ok);
19142 // Out-of-line path to allocate wasm GC arrays
19143 class OutOfLineWasmNewArray : public OutOfLineCodeBase<CodeGenerator> {
19144 LInstruction* lir_;
19145 wasm::SymbolicAddress fun_;
19146 Register numElementsReg_;
19147 mozilla::Maybe<uint32_t> numElements_;
19148 Register typeDefData_;
19149 Register output_;
19150 wasm::BytecodeOffset bytecodeOffset_;
19152 public:
19153 OutOfLineWasmNewArray(LInstruction* lir, wasm::SymbolicAddress fun,
19154 Register numElementsReg,
19155 mozilla::Maybe<uint32_t> numElements,
19156 Register typeDefData, Register output,
19157 wasm::BytecodeOffset bytecodeOffset)
19158 : lir_(lir),
19159 fun_(fun),
19160 numElementsReg_(numElementsReg),
19161 numElements_(numElements),
19162 typeDefData_(typeDefData),
19163 output_(output),
19164 bytecodeOffset_(bytecodeOffset) {}
19166 void accept(CodeGenerator* codegen) override {
19167 codegen->visitOutOfLineWasmNewArray(this);
19170 LInstruction* lir() const { return lir_; }
19171 wasm::SymbolicAddress fun() const { return fun_; }
19172 Register numElementsReg() const { return numElementsReg_; }
19173 mozilla::Maybe<uint32_t> numElements() const { return numElements_; }
19174 Register typeDefData() const { return typeDefData_; }
19175 Register output() const { return output_; }
19176 wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
19179 void CodeGenerator::visitOutOfLineWasmNewArray(OutOfLineWasmNewArray* ool) {
19180 if (ool->numElements().isSome()) {
19181 masm.move32(Imm32(ool->numElements().value()), ool->numElementsReg());
19183 callWasmArrayAllocFun(ool->lir(), ool->fun(), ool->numElementsReg(),
19184 ool->typeDefData(), ool->output(),
19185 ool->bytecodeOffset());
19186 masm.jump(ool->rejoin());
19189 void CodeGenerator::visitWasmNewArrayObject(LWasmNewArrayObject* lir) {
19190 MOZ_ASSERT(gen->compilingWasm());
19192 MWasmNewArrayObject* mir = lir->mir();
19194 Register typeDefData = ToRegister(lir->typeDefData());
19195 Register output = ToRegister(lir->output());
19196 Register temp1 = ToRegister(lir->temp0());
19197 Register temp2 = ToRegister(lir->temp1());
19199 wasm::SymbolicAddress fun = mir->zeroFields()
19200 ? wasm::SymbolicAddress::ArrayNew_true
19201 : wasm::SymbolicAddress::ArrayNew_false;
19203 if (lir->numElements()->isConstant()) {
19204 // numElements is constant, so we can do optimized code generation.
19205 uint32_t numElements = lir->numElements()->toConstant()->toInt32();
19206 CheckedUint32 storageBytes =
19207 WasmArrayObject::calcStorageBytesChecked(mir->elemSize(), numElements);
19208 if (!storageBytes.isValid() ||
19209 storageBytes.value() > WasmArrayObject_MaxInlineBytes) {
19210 // Too much array data to store inline. Immediately perform an instance
19211 // call to handle the out-of-line storage.
19212 masm.move32(Imm32(numElements), temp1);
19213 callWasmArrayAllocFun(lir, fun, temp1, typeDefData, output,
19214 mir->bytecodeOffset());
19215 } else {
19216 // storageBytes is small enough to be stored inline in WasmArrayObject.
19217 // Attempt a nursery allocation and fall back to an instance call if it
19218 // fails.
19219 Register instance = ToRegister(lir->instance());
19220 MOZ_ASSERT(instance == InstanceReg);
19222 auto ool = new (alloc())
19223 OutOfLineWasmNewArray(lir, fun, temp1, mozilla::Some(numElements),
19224 typeDefData, output, mir->bytecodeOffset());
19225 addOutOfLineCode(ool, lir->mir());
19227 masm.wasmNewArrayObjectFixed(instance, output, typeDefData, temp1, temp2,
19228 ool->entry(), numElements,
19229 storageBytes.value(), mir->zeroFields());
19231 masm.bind(ool->rejoin());
19233 } else {
19234 // numElements is dynamic. Attempt a dynamic inline-storage nursery
19235 // allocation and fall back to an instance call if it fails.
19236 Register instance = ToRegister(lir->instance());
19237 MOZ_ASSERT(instance == InstanceReg);
19238 Register numElements = ToRegister(lir->numElements());
19240 auto ool = new (alloc())
19241 OutOfLineWasmNewArray(lir, fun, numElements, mozilla::Nothing(),
19242 typeDefData, output, mir->bytecodeOffset());
19243 addOutOfLineCode(ool, lir->mir());
19245 masm.wasmNewArrayObject(instance, output, numElements, typeDefData, temp1,
19246 ool->entry(), mir->elemSize(), mir->zeroFields());
19248 masm.bind(ool->rejoin());
19252 void CodeGenerator::visitWasmHeapReg(LWasmHeapReg* ins) {
19253 #ifdef WASM_HAS_HEAPREG
19254 masm.movePtr(HeapReg, ToRegister(ins->output()));
19255 #else
19256 MOZ_CRASH();
19257 #endif
19260 void CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins) {
19261 const MWasmBoundsCheck* mir = ins->mir();
19262 Register ptr = ToRegister(ins->ptr());
19263 Register boundsCheckLimit = ToRegister(ins->boundsCheckLimit());
19264 // When there are no spectre mitigations in place, branching out-of-line to
19265 // the trap is a big performance win, but with mitigations it's trickier. See
19266 // bug 1680243.
19267 if (JitOptions.spectreIndexMasking) {
19268 Label ok;
19269 masm.wasmBoundsCheck32(Assembler::Below, ptr, boundsCheckLimit, &ok);
19270 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
19271 masm.bind(&ok);
19272 } else {
19273 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
19274 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
19275 addOutOfLineCode(ool, mir);
19276 masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
19277 ool->entry());
19281 void CodeGenerator::visitWasmBoundsCheck64(LWasmBoundsCheck64* ins) {
19282 const MWasmBoundsCheck* mir = ins->mir();
19283 Register64 ptr = ToRegister64(ins->ptr());
19284 Register64 boundsCheckLimit = ToRegister64(ins->boundsCheckLimit());
19285 // See above.
19286 if (JitOptions.spectreIndexMasking) {
19287 Label ok;
19288 masm.wasmBoundsCheck64(Assembler::Below, ptr, boundsCheckLimit, &ok);
19289 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
19290 masm.bind(&ok);
19291 } else {
19292 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
19293 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
19294 addOutOfLineCode(ool, mir);
19295 masm.wasmBoundsCheck64(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
19296 ool->entry());
19300 void CodeGenerator::visitWasmBoundsCheckRange32(LWasmBoundsCheckRange32* ins) {
19301 const MWasmBoundsCheckRange32* mir = ins->mir();
19302 Register index = ToRegister(ins->index());
19303 Register length = ToRegister(ins->length());
19304 Register limit = ToRegister(ins->limit());
19305 Register tmp = ToRegister(ins->temp0());
19307 masm.wasmBoundsCheckRange32(index, length, limit, tmp, mir->bytecodeOffset());
19310 void CodeGenerator::visitWasmAlignmentCheck(LWasmAlignmentCheck* ins) {
19311 const MWasmAlignmentCheck* mir = ins->mir();
19312 Register ptr = ToRegister(ins->ptr());
19313 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
19314 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
19315 addOutOfLineCode(ool, mir);
19316 masm.branchTest32(Assembler::NonZero, ptr, Imm32(mir->byteSize() - 1),
19317 ool->entry());
19320 void CodeGenerator::visitWasmAlignmentCheck64(LWasmAlignmentCheck64* ins) {
19321 const MWasmAlignmentCheck* mir = ins->mir();
19322 Register64 ptr = ToRegister64(ins->ptr());
19323 #ifdef JS_64BIT
19324 Register r = ptr.reg;
19325 #else
19326 Register r = ptr.low;
19327 #endif
19328 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
19329 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
19330 addOutOfLineCode(ool, mir);
19331 masm.branchTestPtr(Assembler::NonZero, r, Imm32(mir->byteSize() - 1),
19332 ool->entry());
19335 void CodeGenerator::visitWasmLoadInstance(LWasmLoadInstance* ins) {
19336 switch (ins->mir()->type()) {
19337 case MIRType::WasmAnyRef:
19338 case MIRType::Pointer:
19339 masm.loadPtr(Address(ToRegister(ins->instance()), ins->mir()->offset()),
19340 ToRegister(ins->output()));
19341 break;
19342 case MIRType::Int32:
19343 masm.load32(Address(ToRegister(ins->instance()), ins->mir()->offset()),
19344 ToRegister(ins->output()));
19345 break;
19346 default:
19347 MOZ_CRASH("MIRType not supported in WasmLoadInstance");
19351 void CodeGenerator::visitWasmLoadInstance64(LWasmLoadInstance64* ins) {
19352 MOZ_ASSERT(ins->mir()->type() == MIRType::Int64);
19353 masm.load64(Address(ToRegister(ins->instance()), ins->mir()->offset()),
19354 ToOutRegister64(ins));
19357 void CodeGenerator::incrementWarmUpCounter(AbsoluteAddress warmUpCount,
19358 JSScript* script, Register tmp) {
19359 // The code depends on the JitScript* not being discarded without also
19360 // invalidating Ion code. Assert this.
19361 #ifdef DEBUG
19362 Label ok;
19363 masm.movePtr(ImmGCPtr(script), tmp);
19364 masm.loadJitScript(tmp, tmp);
19365 masm.branchPtr(Assembler::Equal, tmp, ImmPtr(script->jitScript()), &ok);
19366 masm.assumeUnreachable("Didn't find JitScript?");
19367 masm.bind(&ok);
19368 #endif
19370 masm.load32(warmUpCount, tmp);
19371 masm.add32(Imm32(1), tmp);
19372 masm.store32(tmp, warmUpCount);
19375 void CodeGenerator::visitIncrementWarmUpCounter(LIncrementWarmUpCounter* ins) {
19376 Register tmp = ToRegister(ins->temp0());
19378 AbsoluteAddress warmUpCount =
19379 AbsoluteAddress(ins->mir()->script()->jitScript())
19380 .offset(JitScript::offsetOfWarmUpCount());
19381 incrementWarmUpCounter(warmUpCount, ins->mir()->script(), tmp);
19384 void CodeGenerator::visitLexicalCheck(LLexicalCheck* ins) {
19385 ValueOperand inputValue = ToValue(ins, LLexicalCheck::InputIndex);
19386 Label bail;
19387 masm.branchTestMagicValue(Assembler::Equal, inputValue,
19388 JS_UNINITIALIZED_LEXICAL, &bail);
19389 bailoutFrom(&bail, ins->snapshot());
19392 void CodeGenerator::visitThrowRuntimeLexicalError(
19393 LThrowRuntimeLexicalError* ins) {
19394 pushArg(Imm32(ins->mir()->errorNumber()));
19396 using Fn = bool (*)(JSContext*, unsigned);
19397 callVM<Fn, jit::ThrowRuntimeLexicalError>(ins);
19400 void CodeGenerator::visitThrowMsg(LThrowMsg* ins) {
19401 pushArg(Imm32(static_cast<int32_t>(ins->mir()->throwMsgKind())));
19403 using Fn = bool (*)(JSContext*, unsigned);
19404 callVM<Fn, js::ThrowMsgOperation>(ins);
19407 void CodeGenerator::visitGlobalDeclInstantiation(
19408 LGlobalDeclInstantiation* ins) {
19409 pushArg(ImmPtr(ins->mir()->resumePoint()->pc()));
19410 pushArg(ImmGCPtr(ins->mir()->block()->info().script()));
19412 using Fn = bool (*)(JSContext*, HandleScript, const jsbytecode*);
19413 callVM<Fn, GlobalDeclInstantiationFromIon>(ins);
19416 void CodeGenerator::visitDebugger(LDebugger* ins) {
19417 Register cx = ToRegister(ins->temp0());
19419 masm.loadJSContext(cx);
19420 using Fn = bool (*)(JSContext* cx);
19421 masm.setupAlignedABICall();
19422 masm.passABIArg(cx);
19423 masm.callWithABI<Fn, GlobalHasLiveOnDebuggerStatement>();
19425 Label bail;
19426 masm.branchIfTrueBool(ReturnReg, &bail);
19427 bailoutFrom(&bail, ins->snapshot());
19430 void CodeGenerator::visitNewTarget(LNewTarget* ins) {
19431 ValueOperand output = ToOutValue(ins);
19433 // if (isConstructing) output = argv[Max(numActualArgs, numFormalArgs)]
19434 Label notConstructing, done;
19435 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
19436 masm.branchTestPtr(Assembler::Zero, calleeToken,
19437 Imm32(CalleeToken_FunctionConstructing), &notConstructing);
19439 Register argvLen = output.scratchReg();
19440 masm.loadNumActualArgs(FramePointer, argvLen);
19442 Label useNFormals;
19444 size_t numFormalArgs = ins->mirRaw()->block()->info().nargs();
19445 masm.branchPtr(Assembler::Below, argvLen, Imm32(numFormalArgs), &useNFormals);
19447 size_t argsOffset = JitFrameLayout::offsetOfActualArgs();
19449 BaseValueIndex newTarget(FramePointer, argvLen, argsOffset);
19450 masm.loadValue(newTarget, output);
19451 masm.jump(&done);
19454 masm.bind(&useNFormals);
19457 Address newTarget(FramePointer,
19458 argsOffset + (numFormalArgs * sizeof(Value)));
19459 masm.loadValue(newTarget, output);
19460 masm.jump(&done);
19463 // else output = undefined
19464 masm.bind(&notConstructing);
19465 masm.moveValue(UndefinedValue(), output);
19466 masm.bind(&done);
19469 void CodeGenerator::visitCheckReturn(LCheckReturn* ins) {
19470 ValueOperand returnValue = ToValue(ins, LCheckReturn::ReturnValueIndex);
19471 ValueOperand thisValue = ToValue(ins, LCheckReturn::ThisValueIndex);
19472 ValueOperand output = ToOutValue(ins);
19474 using Fn = bool (*)(JSContext*, HandleValue);
19475 OutOfLineCode* ool = oolCallVM<Fn, ThrowBadDerivedReturnOrUninitializedThis>(
19476 ins, ArgList(returnValue), StoreNothing());
19478 Label noChecks;
19479 masm.branchTestObject(Assembler::Equal, returnValue, &noChecks);
19480 masm.branchTestUndefined(Assembler::NotEqual, returnValue, ool->entry());
19481 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
19482 masm.moveValue(thisValue, output);
19483 masm.jump(ool->rejoin());
19484 masm.bind(&noChecks);
19485 masm.moveValue(returnValue, output);
19486 masm.bind(ool->rejoin());
19489 void CodeGenerator::visitCheckIsObj(LCheckIsObj* ins) {
19490 ValueOperand value = ToValue(ins, LCheckIsObj::ValueIndex);
19491 Register output = ToRegister(ins->output());
19493 using Fn = bool (*)(JSContext*, CheckIsObjectKind);
19494 OutOfLineCode* ool = oolCallVM<Fn, ThrowCheckIsObject>(
19495 ins, ArgList(Imm32(ins->mir()->checkKind())), StoreNothing());
19497 masm.fallibleUnboxObject(value, output, ool->entry());
19498 masm.bind(ool->rejoin());
19501 void CodeGenerator::visitCheckObjCoercible(LCheckObjCoercible* ins) {
19502 ValueOperand checkValue = ToValue(ins, LCheckObjCoercible::ValueIndex);
19504 using Fn = bool (*)(JSContext*, HandleValue);
19505 OutOfLineCode* ool = oolCallVM<Fn, ThrowObjectCoercible>(
19506 ins, ArgList(checkValue), StoreNothing());
19507 masm.branchTestNull(Assembler::Equal, checkValue, ool->entry());
19508 masm.branchTestUndefined(Assembler::Equal, checkValue, ool->entry());
19509 masm.bind(ool->rejoin());
19512 void CodeGenerator::visitCheckClassHeritage(LCheckClassHeritage* ins) {
19513 ValueOperand heritage = ToValue(ins, LCheckClassHeritage::HeritageIndex);
19514 Register temp0 = ToRegister(ins->temp0());
19515 Register temp1 = ToRegister(ins->temp1());
19517 using Fn = bool (*)(JSContext*, HandleValue);
19518 OutOfLineCode* ool = oolCallVM<Fn, CheckClassHeritageOperation>(
19519 ins, ArgList(heritage), StoreNothing());
19521 masm.branchTestNull(Assembler::Equal, heritage, ool->rejoin());
19522 masm.fallibleUnboxObject(heritage, temp0, ool->entry());
19524 masm.isConstructor(temp0, temp1, ool->entry());
19525 masm.branchTest32(Assembler::Zero, temp1, temp1, ool->entry());
19527 masm.bind(ool->rejoin());
19530 void CodeGenerator::visitCheckThis(LCheckThis* ins) {
19531 ValueOperand thisValue = ToValue(ins, LCheckThis::ValueIndex);
19533 using Fn = bool (*)(JSContext*);
19534 OutOfLineCode* ool =
19535 oolCallVM<Fn, ThrowUninitializedThis>(ins, ArgList(), StoreNothing());
19536 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
19537 masm.bind(ool->rejoin());
19540 void CodeGenerator::visitCheckThisReinit(LCheckThisReinit* ins) {
19541 ValueOperand thisValue = ToValue(ins, LCheckThisReinit::ThisValueIndex);
19543 using Fn = bool (*)(JSContext*);
19544 OutOfLineCode* ool =
19545 oolCallVM<Fn, ThrowInitializedThis>(ins, ArgList(), StoreNothing());
19546 masm.branchTestMagic(Assembler::NotEqual, thisValue, ool->entry());
19547 masm.bind(ool->rejoin());
19550 void CodeGenerator::visitGenerator(LGenerator* lir) {
19551 Register callee = ToRegister(lir->callee());
19552 Register environmentChain = ToRegister(lir->environmentChain());
19553 Register argsObject = ToRegister(lir->argsObject());
19555 pushArg(argsObject);
19556 pushArg(environmentChain);
19557 pushArg(ImmGCPtr(current->mir()->info().script()));
19558 pushArg(callee);
19560 using Fn = JSObject* (*)(JSContext* cx, HandleFunction, HandleScript,
19561 HandleObject, HandleObject);
19562 callVM<Fn, CreateGenerator>(lir);
19565 void CodeGenerator::visitAsyncResolve(LAsyncResolve* lir) {
19566 Register generator = ToRegister(lir->generator());
19567 ValueOperand value = ToValue(lir, LAsyncResolve::ValueIndex);
19569 pushArg(value);
19570 pushArg(generator);
19572 using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
19573 HandleValue);
19574 callVM<Fn, js::AsyncFunctionResolve>(lir);
19577 void CodeGenerator::visitAsyncReject(LAsyncReject* lir) {
19578 Register generator = ToRegister(lir->generator());
19579 ValueOperand reason = ToValue(lir, LAsyncReject::ReasonIndex);
19580 ValueOperand stack = ToValue(lir, LAsyncReject::StackIndex);
19582 pushArg(stack);
19583 pushArg(reason);
19584 pushArg(generator);
19586 using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
19587 HandleValue, HandleValue);
19588 callVM<Fn, js::AsyncFunctionReject>(lir);
19591 void CodeGenerator::visitAsyncAwait(LAsyncAwait* lir) {
19592 ValueOperand value = ToValue(lir, LAsyncAwait::ValueIndex);
19593 Register generator = ToRegister(lir->generator());
19595 pushArg(value);
19596 pushArg(generator);
19598 using Fn =
19599 JSObject* (*)(JSContext* cx, Handle<AsyncFunctionGeneratorObject*> genObj,
19600 HandleValue value);
19601 callVM<Fn, js::AsyncFunctionAwait>(lir);
19604 void CodeGenerator::visitCanSkipAwait(LCanSkipAwait* lir) {
19605 ValueOperand value = ToValue(lir, LCanSkipAwait::ValueIndex);
19607 pushArg(value);
19609 using Fn = bool (*)(JSContext*, HandleValue, bool* canSkip);
19610 callVM<Fn, js::CanSkipAwait>(lir);
19613 void CodeGenerator::visitMaybeExtractAwaitValue(LMaybeExtractAwaitValue* lir) {
19614 ValueOperand value = ToValue(lir, LMaybeExtractAwaitValue::ValueIndex);
19615 ValueOperand output = ToOutValue(lir);
19616 Register canSkip = ToRegister(lir->canSkip());
19618 Label cantExtract, finished;
19619 masm.branchIfFalseBool(canSkip, &cantExtract);
19621 pushArg(value);
19623 using Fn = bool (*)(JSContext*, HandleValue, MutableHandleValue);
19624 callVM<Fn, js::ExtractAwaitValue>(lir);
19625 masm.jump(&finished);
19626 masm.bind(&cantExtract);
19628 masm.moveValue(value, output);
19630 masm.bind(&finished);
19633 void CodeGenerator::visitDebugCheckSelfHosted(LDebugCheckSelfHosted* ins) {
19634 ValueOperand checkValue = ToValue(ins, LDebugCheckSelfHosted::ValueIndex);
19635 pushArg(checkValue);
19636 using Fn = bool (*)(JSContext*, HandleValue);
19637 callVM<Fn, js::Debug_CheckSelfHosted>(ins);
19640 void CodeGenerator::visitRandom(LRandom* ins) {
19641 using mozilla::non_crypto::XorShift128PlusRNG;
19643 FloatRegister output = ToFloatRegister(ins->output());
19644 Register rngReg = ToRegister(ins->temp0());
19646 Register64 temp1 = ToRegister64(ins->temp1());
19647 Register64 temp2 = ToRegister64(ins->temp2());
19649 const XorShift128PlusRNG* rng = gen->realm->addressOfRandomNumberGenerator();
19650 masm.movePtr(ImmPtr(rng), rngReg);
19652 masm.randomDouble(rngReg, output, temp1, temp2);
19653 if (js::SupportDifferentialTesting()) {
19654 masm.loadConstantDouble(0.0, output);
19658 void CodeGenerator::visitSignExtendInt32(LSignExtendInt32* ins) {
19659 Register input = ToRegister(ins->input());
19660 Register output = ToRegister(ins->output());
19662 switch (ins->mode()) {
19663 case MSignExtendInt32::Byte:
19664 masm.move8SignExtend(input, output);
19665 break;
19666 case MSignExtendInt32::Half:
19667 masm.move16SignExtend(input, output);
19668 break;
19672 void CodeGenerator::visitRotate(LRotate* ins) {
19673 MRotate* mir = ins->mir();
19674 Register input = ToRegister(ins->input());
19675 Register dest = ToRegister(ins->output());
19677 const LAllocation* count = ins->count();
19678 if (count->isConstant()) {
19679 int32_t c = ToInt32(count) & 0x1F;
19680 if (mir->isLeftRotate()) {
19681 masm.rotateLeft(Imm32(c), input, dest);
19682 } else {
19683 masm.rotateRight(Imm32(c), input, dest);
19685 } else {
19686 Register creg = ToRegister(count);
19687 if (mir->isLeftRotate()) {
19688 masm.rotateLeft(creg, input, dest);
19689 } else {
19690 masm.rotateRight(creg, input, dest);
19695 class OutOfLineNaNToZero : public OutOfLineCodeBase<CodeGenerator> {
19696 LNaNToZero* lir_;
19698 public:
19699 explicit OutOfLineNaNToZero(LNaNToZero* lir) : lir_(lir) {}
19701 void accept(CodeGenerator* codegen) override {
19702 codegen->visitOutOfLineNaNToZero(this);
19704 LNaNToZero* lir() const { return lir_; }
19707 void CodeGenerator::visitOutOfLineNaNToZero(OutOfLineNaNToZero* ool) {
19708 FloatRegister output = ToFloatRegister(ool->lir()->output());
19709 masm.loadConstantDouble(0.0, output);
19710 masm.jump(ool->rejoin());
19713 void CodeGenerator::visitNaNToZero(LNaNToZero* lir) {
19714 FloatRegister input = ToFloatRegister(lir->input());
19716 OutOfLineNaNToZero* ool = new (alloc()) OutOfLineNaNToZero(lir);
19717 addOutOfLineCode(ool, lir->mir());
19719 if (lir->mir()->operandIsNeverNegativeZero()) {
19720 masm.branchDouble(Assembler::DoubleUnordered, input, input, ool->entry());
19721 } else {
19722 FloatRegister scratch = ToFloatRegister(lir->temp0());
19723 masm.loadConstantDouble(0.0, scratch);
19724 masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch,
19725 ool->entry());
19727 masm.bind(ool->rejoin());
19730 void CodeGenerator::visitIsPackedArray(LIsPackedArray* lir) {
19731 Register obj = ToRegister(lir->object());
19732 Register output = ToRegister(lir->output());
19733 Register temp = ToRegister(lir->temp0());
19735 masm.setIsPackedArray(obj, output, temp);
19738 void CodeGenerator::visitGuardArrayIsPacked(LGuardArrayIsPacked* lir) {
19739 Register array = ToRegister(lir->array());
19740 Register temp0 = ToRegister(lir->temp0());
19741 Register temp1 = ToRegister(lir->temp1());
19743 Label bail;
19744 masm.branchArrayIsNotPacked(array, temp0, temp1, &bail);
19745 bailoutFrom(&bail, lir->snapshot());
19748 void CodeGenerator::visitGetPrototypeOf(LGetPrototypeOf* lir) {
19749 Register target = ToRegister(lir->target());
19750 ValueOperand out = ToOutValue(lir);
19751 Register scratch = out.scratchReg();
19753 using Fn = bool (*)(JSContext*, HandleObject, MutableHandleValue);
19754 OutOfLineCode* ool = oolCallVM<Fn, jit::GetPrototypeOf>(lir, ArgList(target),
19755 StoreValueTo(out));
19757 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
19759 masm.loadObjProto(target, scratch);
19761 Label hasProto;
19762 masm.branchPtr(Assembler::Above, scratch, ImmWord(1), &hasProto);
19764 // Call into the VM for lazy prototypes.
19765 masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), ool->entry());
19767 masm.moveValue(NullValue(), out);
19768 masm.jump(ool->rejoin());
19770 masm.bind(&hasProto);
19771 masm.tagValue(JSVAL_TYPE_OBJECT, scratch, out);
19773 masm.bind(ool->rejoin());
19776 void CodeGenerator::visitObjectWithProto(LObjectWithProto* lir) {
19777 pushArg(ToValue(lir, LObjectWithProto::PrototypeIndex));
19779 using Fn = PlainObject* (*)(JSContext*, HandleValue);
19780 callVM<Fn, js::ObjectWithProtoOperation>(lir);
19783 void CodeGenerator::visitObjectStaticProto(LObjectStaticProto* lir) {
19784 Register obj = ToRegister(lir->input());
19785 Register output = ToRegister(lir->output());
19787 masm.loadObjProto(obj, output);
19789 #ifdef DEBUG
19790 // We shouldn't encounter a null or lazy proto.
19791 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
19793 Label done;
19794 masm.branchPtr(Assembler::Above, output, ImmWord(1), &done);
19795 masm.assumeUnreachable("Unexpected null or lazy proto in MObjectStaticProto");
19796 masm.bind(&done);
19797 #endif
19800 void CodeGenerator::visitBuiltinObject(LBuiltinObject* lir) {
19801 pushArg(Imm32(static_cast<int32_t>(lir->mir()->builtinObjectKind())));
19803 using Fn = JSObject* (*)(JSContext*, BuiltinObjectKind);
19804 callVM<Fn, js::BuiltinObjectOperation>(lir);
19807 void CodeGenerator::visitSuperFunction(LSuperFunction* lir) {
19808 Register callee = ToRegister(lir->callee());
19809 ValueOperand out = ToOutValue(lir);
19810 Register temp = ToRegister(lir->temp0());
19812 #ifdef DEBUG
19813 Label classCheckDone;
19814 masm.branchTestObjIsFunction(Assembler::Equal, callee, temp, callee,
19815 &classCheckDone);
19816 masm.assumeUnreachable("Unexpected non-JSFunction callee in JSOp::SuperFun");
19817 masm.bind(&classCheckDone);
19818 #endif
19820 // Load prototype of callee
19821 masm.loadObjProto(callee, temp);
19823 #ifdef DEBUG
19824 // We won't encounter a lazy proto, because |callee| is guaranteed to be a
19825 // JSFunction and only proxy objects can have a lazy proto.
19826 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
19828 Label proxyCheckDone;
19829 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
19830 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperFun");
19831 masm.bind(&proxyCheckDone);
19832 #endif
19834 Label nullProto, done;
19835 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
19837 // Box prototype and return
19838 masm.tagValue(JSVAL_TYPE_OBJECT, temp, out);
19839 masm.jump(&done);
19841 masm.bind(&nullProto);
19842 masm.moveValue(NullValue(), out);
19844 masm.bind(&done);
19847 void CodeGenerator::visitInitHomeObject(LInitHomeObject* lir) {
19848 Register func = ToRegister(lir->function());
19849 ValueOperand homeObject = ToValue(lir, LInitHomeObject::HomeObjectIndex);
19851 masm.assertFunctionIsExtended(func);
19853 Address addr(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
19855 emitPreBarrier(addr);
19856 masm.storeValue(homeObject, addr);
19859 void CodeGenerator::visitIsTypedArrayConstructor(
19860 LIsTypedArrayConstructor* lir) {
19861 Register object = ToRegister(lir->object());
19862 Register output = ToRegister(lir->output());
19864 masm.setIsDefinitelyTypedArrayConstructor(object, output);
19867 void CodeGenerator::visitLoadValueTag(LLoadValueTag* lir) {
19868 ValueOperand value = ToValue(lir, LLoadValueTag::ValueIndex);
19869 Register output = ToRegister(lir->output());
19871 Register tag = masm.extractTag(value, output);
19872 if (tag != output) {
19873 masm.mov(tag, output);
19877 void CodeGenerator::visitGuardTagNotEqual(LGuardTagNotEqual* lir) {
19878 Register lhs = ToRegister(lir->lhs());
19879 Register rhs = ToRegister(lir->rhs());
19881 bailoutCmp32(Assembler::Equal, lhs, rhs, lir->snapshot());
19883 // If both lhs and rhs are numbers, can't use tag comparison to do inequality
19884 // comparison
19885 Label done;
19886 masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
19887 masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
19888 bailout(lir->snapshot());
19890 masm.bind(&done);
19893 void CodeGenerator::visitLoadWrapperTarget(LLoadWrapperTarget* lir) {
19894 Register object = ToRegister(lir->object());
19895 Register output = ToRegister(lir->output());
19897 masm.loadPtr(Address(object, ProxyObject::offsetOfReservedSlots()), output);
19899 // Bail for revoked proxies.
19900 Label bail;
19901 Address targetAddr(output,
19902 js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
19903 if (lir->mir()->fallible()) {
19904 masm.fallibleUnboxObject(targetAddr, output, &bail);
19905 bailoutFrom(&bail, lir->snapshot());
19906 } else {
19907 masm.unboxObject(targetAddr, output);
19911 void CodeGenerator::visitGuardHasGetterSetter(LGuardHasGetterSetter* lir) {
19912 Register object = ToRegister(lir->object());
19913 Register temp0 = ToRegister(lir->temp0());
19914 Register temp1 = ToRegister(lir->temp1());
19915 Register temp2 = ToRegister(lir->temp2());
19917 masm.movePropertyKey(lir->mir()->propId(), temp1);
19918 masm.movePtr(ImmGCPtr(lir->mir()->getterSetter()), temp2);
19920 using Fn = bool (*)(JSContext* cx, JSObject* obj, jsid id,
19921 GetterSetter* getterSetter);
19922 masm.setupAlignedABICall();
19923 masm.loadJSContext(temp0);
19924 masm.passABIArg(temp0);
19925 masm.passABIArg(object);
19926 masm.passABIArg(temp1);
19927 masm.passABIArg(temp2);
19928 masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
19930 bailoutIfFalseBool(ReturnReg, lir->snapshot());
19933 void CodeGenerator::visitGuardIsExtensible(LGuardIsExtensible* lir) {
19934 Register object = ToRegister(lir->object());
19935 Register temp = ToRegister(lir->temp0());
19937 Label bail;
19938 masm.branchIfObjectNotExtensible(object, temp, &bail);
19939 bailoutFrom(&bail, lir->snapshot());
19942 void CodeGenerator::visitGuardInt32IsNonNegative(
19943 LGuardInt32IsNonNegative* lir) {
19944 Register index = ToRegister(lir->index());
19946 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
19949 void CodeGenerator::visitGuardInt32Range(LGuardInt32Range* lir) {
19950 Register input = ToRegister(lir->input());
19952 bailoutCmp32(Assembler::LessThan, input, Imm32(lir->mir()->minimum()),
19953 lir->snapshot());
19954 bailoutCmp32(Assembler::GreaterThan, input, Imm32(lir->mir()->maximum()),
19955 lir->snapshot());
19958 void CodeGenerator::visitGuardIndexIsNotDenseElement(
19959 LGuardIndexIsNotDenseElement* lir) {
19960 Register object = ToRegister(lir->object());
19961 Register index = ToRegister(lir->index());
19962 Register temp = ToRegister(lir->temp0());
19963 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
19965 // Load obj->elements.
19966 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
19968 // Ensure index >= initLength or the element is a hole.
19969 Label notDense;
19970 Address capacity(temp, ObjectElements::offsetOfInitializedLength());
19971 masm.spectreBoundsCheck32(index, capacity, spectreTemp, &notDense);
19973 BaseValueIndex element(temp, index);
19974 masm.branchTestMagic(Assembler::Equal, element, &notDense);
19976 bailout(lir->snapshot());
19978 masm.bind(&notDense);
19981 void CodeGenerator::visitGuardIndexIsValidUpdateOrAdd(
19982 LGuardIndexIsValidUpdateOrAdd* lir) {
19983 Register object = ToRegister(lir->object());
19984 Register index = ToRegister(lir->index());
19985 Register temp = ToRegister(lir->temp0());
19986 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
19988 // Load obj->elements.
19989 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
19991 Label success;
19993 // If length is writable, branch to &success. All indices are writable.
19994 Address flags(temp, ObjectElements::offsetOfFlags());
19995 masm.branchTest32(Assembler::Zero, flags,
19996 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
19997 &success);
19999 // Otherwise, ensure index is in bounds.
20000 Label bail;
20001 Address length(temp, ObjectElements::offsetOfLength());
20002 masm.spectreBoundsCheck32(index, length, spectreTemp, &bail);
20003 masm.bind(&success);
20005 bailoutFrom(&bail, lir->snapshot());
20008 void CodeGenerator::visitCallAddOrUpdateSparseElement(
20009 LCallAddOrUpdateSparseElement* lir) {
20010 Register object = ToRegister(lir->object());
20011 Register index = ToRegister(lir->index());
20012 ValueOperand value = ToValue(lir, LCallAddOrUpdateSparseElement::ValueIndex);
20014 pushArg(Imm32(lir->mir()->strict()));
20015 pushArg(value);
20016 pushArg(index);
20017 pushArg(object);
20019 using Fn =
20020 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, HandleValue, bool);
20021 callVM<Fn, js::AddOrUpdateSparseElementHelper>(lir);
20024 void CodeGenerator::visitCallGetSparseElement(LCallGetSparseElement* lir) {
20025 Register object = ToRegister(lir->object());
20026 Register index = ToRegister(lir->index());
20028 pushArg(index);
20029 pushArg(object);
20031 using Fn =
20032 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, MutableHandleValue);
20033 callVM<Fn, js::GetSparseElementHelper>(lir);
20036 void CodeGenerator::visitCallNativeGetElement(LCallNativeGetElement* lir) {
20037 Register object = ToRegister(lir->object());
20038 Register index = ToRegister(lir->index());
20040 pushArg(index);
20041 pushArg(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
20042 pushArg(object);
20044 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
20045 MutableHandleValue);
20046 callVM<Fn, js::NativeGetElement>(lir);
20049 void CodeGenerator::visitCallNativeGetElementSuper(
20050 LCallNativeGetElementSuper* lir) {
20051 Register object = ToRegister(lir->object());
20052 Register index = ToRegister(lir->index());
20053 ValueOperand receiver =
20054 ToValue(lir, LCallNativeGetElementSuper::ReceiverIndex);
20056 pushArg(index);
20057 pushArg(receiver);
20058 pushArg(object);
20060 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
20061 MutableHandleValue);
20062 callVM<Fn, js::NativeGetElement>(lir);
20065 void CodeGenerator::visitCallObjectHasSparseElement(
20066 LCallObjectHasSparseElement* lir) {
20067 Register object = ToRegister(lir->object());
20068 Register index = ToRegister(lir->index());
20069 Register temp0 = ToRegister(lir->temp0());
20070 Register temp1 = ToRegister(lir->temp1());
20071 Register output = ToRegister(lir->output());
20073 masm.reserveStack(sizeof(Value));
20074 masm.moveStackPtrTo(temp1);
20076 using Fn = bool (*)(JSContext*, NativeObject*, int32_t, Value*);
20077 masm.setupAlignedABICall();
20078 masm.loadJSContext(temp0);
20079 masm.passABIArg(temp0);
20080 masm.passABIArg(object);
20081 masm.passABIArg(index);
20082 masm.passABIArg(temp1);
20083 masm.callWithABI<Fn, HasNativeElementPure>();
20084 masm.storeCallPointerResult(temp0);
20086 Label bail, ok;
20087 uint32_t framePushed = masm.framePushed();
20088 masm.branchIfTrueBool(temp0, &ok);
20089 masm.adjustStack(sizeof(Value));
20090 masm.jump(&bail);
20092 masm.bind(&ok);
20093 masm.setFramePushed(framePushed);
20094 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
20095 masm.adjustStack(sizeof(Value));
20097 bailoutFrom(&bail, lir->snapshot());
20100 void CodeGenerator::visitBigIntAsIntN(LBigIntAsIntN* ins) {
20101 Register bits = ToRegister(ins->bits());
20102 Register input = ToRegister(ins->input());
20104 pushArg(bits);
20105 pushArg(input);
20107 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
20108 callVM<Fn, jit::BigIntAsIntN>(ins);
20111 void CodeGenerator::visitBigIntAsIntN64(LBigIntAsIntN64* ins) {
20112 Register input = ToRegister(ins->input());
20113 Register temp = ToRegister(ins->temp());
20114 Register64 temp64 = ToRegister64(ins->temp64());
20115 Register output = ToRegister(ins->output());
20117 Label done, create;
20119 masm.movePtr(input, output);
20121 // Load the BigInt value as an int64.
20122 masm.loadBigInt64(input, temp64);
20124 // Create a new BigInt when the input exceeds the int64 range.
20125 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
20126 Imm32(64 / BigInt::DigitBits), &create);
20128 // And create a new BigInt when the value and the BigInt have different signs.
20129 Label nonNegative;
20130 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
20131 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &create);
20132 masm.jump(&done);
20134 masm.bind(&nonNegative);
20135 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &done);
20137 masm.bind(&create);
20138 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
20140 masm.bind(&done);
20143 void CodeGenerator::visitBigIntAsIntN32(LBigIntAsIntN32* ins) {
20144 Register input = ToRegister(ins->input());
20145 Register temp = ToRegister(ins->temp());
20146 Register64 temp64 = ToRegister64(ins->temp64());
20147 Register output = ToRegister(ins->output());
20149 Label done, create;
20151 masm.movePtr(input, output);
20153 // Load the absolute value of the first digit.
20154 masm.loadFirstBigIntDigitOrZero(input, temp);
20156 // If the absolute value exceeds the int32 range, create a new BigInt.
20157 masm.branchPtr(Assembler::Above, temp, Imm32(INT32_MAX), &create);
20159 // Also create a new BigInt if we have more than one digit.
20160 masm.branch32(Assembler::BelowOrEqual,
20161 Address(input, BigInt::offsetOfLength()), Imm32(1), &done);
20163 masm.bind(&create);
20165 // |temp| stores the absolute value, negate it when the sign flag is set.
20166 Label nonNegative;
20167 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
20168 masm.negPtr(temp);
20169 masm.bind(&nonNegative);
20171 masm.move32To64SignExtend(temp, temp64);
20172 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
20174 masm.bind(&done);
20177 void CodeGenerator::visitBigIntAsUintN(LBigIntAsUintN* ins) {
20178 Register bits = ToRegister(ins->bits());
20179 Register input = ToRegister(ins->input());
20181 pushArg(bits);
20182 pushArg(input);
20184 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
20185 callVM<Fn, jit::BigIntAsUintN>(ins);
20188 void CodeGenerator::visitBigIntAsUintN64(LBigIntAsUintN64* ins) {
20189 Register input = ToRegister(ins->input());
20190 Register temp = ToRegister(ins->temp());
20191 Register64 temp64 = ToRegister64(ins->temp64());
20192 Register output = ToRegister(ins->output());
20194 Label done, create;
20196 masm.movePtr(input, output);
20198 // Load the BigInt value as an uint64.
20199 masm.loadBigInt64(input, temp64);
20201 // Create a new BigInt when the input exceeds the uint64 range.
20202 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
20203 Imm32(64 / BigInt::DigitBits), &create);
20205 // And create a new BigInt when the input has the sign flag set.
20206 masm.branchIfBigIntIsNonNegative(input, &done);
20208 masm.bind(&create);
20209 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
20211 masm.bind(&done);
20214 void CodeGenerator::visitBigIntAsUintN32(LBigIntAsUintN32* ins) {
20215 Register input = ToRegister(ins->input());
20216 Register temp = ToRegister(ins->temp());
20217 Register64 temp64 = ToRegister64(ins->temp64());
20218 Register output = ToRegister(ins->output());
20220 Label done, create;
20222 masm.movePtr(input, output);
20224 // Load the absolute value of the first digit.
20225 masm.loadFirstBigIntDigitOrZero(input, temp);
20227 // If the absolute value exceeds the uint32 range, create a new BigInt.
20228 #if JS_PUNBOX64
20229 masm.branchPtr(Assembler::Above, temp, ImmWord(UINT32_MAX), &create);
20230 #endif
20232 // Also create a new BigInt if we have more than one digit.
20233 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
20234 Imm32(1), &create);
20236 // And create a new BigInt when the input has the sign flag set.
20237 masm.branchIfBigIntIsNonNegative(input, &done);
20239 masm.bind(&create);
20241 // |temp| stores the absolute value, negate it when the sign flag is set.
20242 Label nonNegative;
20243 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
20244 masm.negPtr(temp);
20245 masm.bind(&nonNegative);
20247 masm.move32To64ZeroExtend(temp, temp64);
20248 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
20250 masm.bind(&done);
20253 void CodeGenerator::visitGuardNonGCThing(LGuardNonGCThing* ins) {
20254 ValueOperand input = ToValue(ins, LGuardNonGCThing::InputIndex);
20256 Label bail;
20257 masm.branchTestGCThing(Assembler::Equal, input, &bail);
20258 bailoutFrom(&bail, ins->snapshot());
20261 void CodeGenerator::visitToHashableNonGCThing(LToHashableNonGCThing* ins) {
20262 ValueOperand input = ToValue(ins, LToHashableNonGCThing::InputIndex);
20263 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
20264 ValueOperand output = ToOutValue(ins);
20266 masm.toHashableNonGCThing(input, output, tempFloat);
20269 void CodeGenerator::visitToHashableString(LToHashableString* ins) {
20270 Register input = ToRegister(ins->input());
20271 Register output = ToRegister(ins->output());
20273 using Fn = JSAtom* (*)(JSContext*, JSString*);
20274 auto* ool = oolCallVM<Fn, js::AtomizeString>(ins, ArgList(input),
20275 StoreRegisterTo(output));
20277 Label isAtom;
20278 masm.branchTest32(Assembler::NonZero,
20279 Address(input, JSString::offsetOfFlags()),
20280 Imm32(JSString::ATOM_BIT), &isAtom);
20282 masm.lookupStringInAtomCacheLastLookups(input, output, ool->entry());
20283 masm.bind(&isAtom);
20284 masm.movePtr(input, output);
20285 masm.bind(ool->rejoin());
20288 void CodeGenerator::visitToHashableValue(LToHashableValue* ins) {
20289 ValueOperand input = ToValue(ins, LToHashableValue::InputIndex);
20290 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
20291 ValueOperand output = ToOutValue(ins);
20293 Register str = output.scratchReg();
20295 using Fn = JSAtom* (*)(JSContext*, JSString*);
20296 auto* ool =
20297 oolCallVM<Fn, js::AtomizeString>(ins, ArgList(str), StoreRegisterTo(str));
20299 masm.toHashableValue(input, output, tempFloat, ool->entry(), ool->rejoin());
20302 void CodeGenerator::visitHashNonGCThing(LHashNonGCThing* ins) {
20303 ValueOperand input = ToValue(ins, LHashNonGCThing::InputIndex);
20304 Register temp = ToRegister(ins->temp0());
20305 Register output = ToRegister(ins->output());
20307 masm.prepareHashNonGCThing(input, output, temp);
20310 void CodeGenerator::visitHashString(LHashString* ins) {
20311 Register input = ToRegister(ins->input());
20312 Register temp = ToRegister(ins->temp0());
20313 Register output = ToRegister(ins->output());
20315 masm.prepareHashString(input, output, temp);
20318 void CodeGenerator::visitHashSymbol(LHashSymbol* ins) {
20319 Register input = ToRegister(ins->input());
20320 Register output = ToRegister(ins->output());
20322 masm.prepareHashSymbol(input, output);
20325 void CodeGenerator::visitHashBigInt(LHashBigInt* ins) {
20326 Register input = ToRegister(ins->input());
20327 Register temp0 = ToRegister(ins->temp0());
20328 Register temp1 = ToRegister(ins->temp1());
20329 Register temp2 = ToRegister(ins->temp2());
20330 Register output = ToRegister(ins->output());
20332 masm.prepareHashBigInt(input, output, temp0, temp1, temp2);
20335 void CodeGenerator::visitHashObject(LHashObject* ins) {
20336 Register setObj = ToRegister(ins->setObject());
20337 ValueOperand input = ToValue(ins, LHashObject::InputIndex);
20338 Register temp0 = ToRegister(ins->temp0());
20339 Register temp1 = ToRegister(ins->temp1());
20340 Register temp2 = ToRegister(ins->temp2());
20341 Register temp3 = ToRegister(ins->temp3());
20342 Register output = ToRegister(ins->output());
20344 masm.prepareHashObject(setObj, input, output, temp0, temp1, temp2, temp3);
20347 void CodeGenerator::visitHashValue(LHashValue* ins) {
20348 Register setObj = ToRegister(ins->setObject());
20349 ValueOperand input = ToValue(ins, LHashValue::InputIndex);
20350 Register temp0 = ToRegister(ins->temp0());
20351 Register temp1 = ToRegister(ins->temp1());
20352 Register temp2 = ToRegister(ins->temp2());
20353 Register temp3 = ToRegister(ins->temp3());
20354 Register output = ToRegister(ins->output());
20356 masm.prepareHashValue(setObj, input, output, temp0, temp1, temp2, temp3);
20359 void CodeGenerator::visitSetObjectHasNonBigInt(LSetObjectHasNonBigInt* ins) {
20360 Register setObj = ToRegister(ins->setObject());
20361 ValueOperand input = ToValue(ins, LSetObjectHasNonBigInt::InputIndex);
20362 Register hash = ToRegister(ins->hash());
20363 Register temp0 = ToRegister(ins->temp0());
20364 Register temp1 = ToRegister(ins->temp1());
20365 Register output = ToRegister(ins->output());
20367 masm.setObjectHasNonBigInt(setObj, input, hash, output, temp0, temp1);
20370 void CodeGenerator::visitSetObjectHasBigInt(LSetObjectHasBigInt* ins) {
20371 Register setObj = ToRegister(ins->setObject());
20372 ValueOperand input = ToValue(ins, LSetObjectHasBigInt::InputIndex);
20373 Register hash = ToRegister(ins->hash());
20374 Register temp0 = ToRegister(ins->temp0());
20375 Register temp1 = ToRegister(ins->temp1());
20376 Register temp2 = ToRegister(ins->temp2());
20377 Register temp3 = ToRegister(ins->temp3());
20378 Register output = ToRegister(ins->output());
20380 masm.setObjectHasBigInt(setObj, input, hash, output, temp0, temp1, temp2,
20381 temp3);
20384 void CodeGenerator::visitSetObjectHasValue(LSetObjectHasValue* ins) {
20385 Register setObj = ToRegister(ins->setObject());
20386 ValueOperand input = ToValue(ins, LSetObjectHasValue::InputIndex);
20387 Register hash = ToRegister(ins->hash());
20388 Register temp0 = ToRegister(ins->temp0());
20389 Register temp1 = ToRegister(ins->temp1());
20390 Register temp2 = ToRegister(ins->temp2());
20391 Register temp3 = ToRegister(ins->temp3());
20392 Register output = ToRegister(ins->output());
20394 masm.setObjectHasValue(setObj, input, hash, output, temp0, temp1, temp2,
20395 temp3);
20398 void CodeGenerator::visitSetObjectHasValueVMCall(
20399 LSetObjectHasValueVMCall* ins) {
20400 pushArg(ToValue(ins, LSetObjectHasValueVMCall::InputIndex));
20401 pushArg(ToRegister(ins->setObject()));
20403 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
20404 callVM<Fn, jit::SetObjectHas>(ins);
20407 void CodeGenerator::visitSetObjectSize(LSetObjectSize* ins) {
20408 Register setObj = ToRegister(ins->setObject());
20409 Register output = ToRegister(ins->output());
20411 masm.loadSetObjectSize(setObj, output);
20414 void CodeGenerator::visitMapObjectHasNonBigInt(LMapObjectHasNonBigInt* ins) {
20415 Register mapObj = ToRegister(ins->mapObject());
20416 ValueOperand input = ToValue(ins, LMapObjectHasNonBigInt::InputIndex);
20417 Register hash = ToRegister(ins->hash());
20418 Register temp0 = ToRegister(ins->temp0());
20419 Register temp1 = ToRegister(ins->temp1());
20420 Register output = ToRegister(ins->output());
20422 masm.mapObjectHasNonBigInt(mapObj, input, hash, output, temp0, temp1);
20425 void CodeGenerator::visitMapObjectHasBigInt(LMapObjectHasBigInt* ins) {
20426 Register mapObj = ToRegister(ins->mapObject());
20427 ValueOperand input = ToValue(ins, LMapObjectHasBigInt::InputIndex);
20428 Register hash = ToRegister(ins->hash());
20429 Register temp0 = ToRegister(ins->temp0());
20430 Register temp1 = ToRegister(ins->temp1());
20431 Register temp2 = ToRegister(ins->temp2());
20432 Register temp3 = ToRegister(ins->temp3());
20433 Register output = ToRegister(ins->output());
20435 masm.mapObjectHasBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
20436 temp3);
20439 void CodeGenerator::visitMapObjectHasValue(LMapObjectHasValue* ins) {
20440 Register mapObj = ToRegister(ins->mapObject());
20441 ValueOperand input = ToValue(ins, LMapObjectHasValue::InputIndex);
20442 Register hash = ToRegister(ins->hash());
20443 Register temp0 = ToRegister(ins->temp0());
20444 Register temp1 = ToRegister(ins->temp1());
20445 Register temp2 = ToRegister(ins->temp2());
20446 Register temp3 = ToRegister(ins->temp3());
20447 Register output = ToRegister(ins->output());
20449 masm.mapObjectHasValue(mapObj, input, hash, output, temp0, temp1, temp2,
20450 temp3);
20453 void CodeGenerator::visitMapObjectHasValueVMCall(
20454 LMapObjectHasValueVMCall* ins) {
20455 pushArg(ToValue(ins, LMapObjectHasValueVMCall::InputIndex));
20456 pushArg(ToRegister(ins->mapObject()));
20458 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
20459 callVM<Fn, jit::MapObjectHas>(ins);
20462 void CodeGenerator::visitMapObjectGetNonBigInt(LMapObjectGetNonBigInt* ins) {
20463 Register mapObj = ToRegister(ins->mapObject());
20464 ValueOperand input = ToValue(ins, LMapObjectGetNonBigInt::InputIndex);
20465 Register hash = ToRegister(ins->hash());
20466 Register temp0 = ToRegister(ins->temp0());
20467 Register temp1 = ToRegister(ins->temp1());
20468 ValueOperand output = ToOutValue(ins);
20470 masm.mapObjectGetNonBigInt(mapObj, input, hash, output, temp0, temp1,
20471 output.scratchReg());
20474 void CodeGenerator::visitMapObjectGetBigInt(LMapObjectGetBigInt* ins) {
20475 Register mapObj = ToRegister(ins->mapObject());
20476 ValueOperand input = ToValue(ins, LMapObjectGetBigInt::InputIndex);
20477 Register hash = ToRegister(ins->hash());
20478 Register temp0 = ToRegister(ins->temp0());
20479 Register temp1 = ToRegister(ins->temp1());
20480 Register temp2 = ToRegister(ins->temp2());
20481 Register temp3 = ToRegister(ins->temp3());
20482 ValueOperand output = ToOutValue(ins);
20484 masm.mapObjectGetBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
20485 temp3, output.scratchReg());
20488 void CodeGenerator::visitMapObjectGetValue(LMapObjectGetValue* ins) {
20489 Register mapObj = ToRegister(ins->mapObject());
20490 ValueOperand input = ToValue(ins, LMapObjectGetValue::InputIndex);
20491 Register hash = ToRegister(ins->hash());
20492 Register temp0 = ToRegister(ins->temp0());
20493 Register temp1 = ToRegister(ins->temp1());
20494 Register temp2 = ToRegister(ins->temp2());
20495 Register temp3 = ToRegister(ins->temp3());
20496 ValueOperand output = ToOutValue(ins);
20498 masm.mapObjectGetValue(mapObj, input, hash, output, temp0, temp1, temp2,
20499 temp3, output.scratchReg());
20502 void CodeGenerator::visitMapObjectGetValueVMCall(
20503 LMapObjectGetValueVMCall* ins) {
20504 pushArg(ToValue(ins, LMapObjectGetValueVMCall::InputIndex));
20505 pushArg(ToRegister(ins->mapObject()));
20507 using Fn =
20508 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
20509 callVM<Fn, jit::MapObjectGet>(ins);
20512 void CodeGenerator::visitMapObjectSize(LMapObjectSize* ins) {
20513 Register mapObj = ToRegister(ins->mapObject());
20514 Register output = ToRegister(ins->output());
20516 masm.loadMapObjectSize(mapObj, output);
20519 template <size_t NumDefs>
20520 void CodeGenerator::emitIonToWasmCallBase(LIonToWasmCallBase<NumDefs>* lir) {
20521 wasm::JitCallStackArgVector stackArgs;
20522 masm.propagateOOM(stackArgs.reserve(lir->numOperands()));
20523 if (masm.oom()) {
20524 return;
20527 MIonToWasmCall* mir = lir->mir();
20528 const wasm::FuncExport& funcExport = mir->funcExport();
20529 const wasm::FuncType& sig =
20530 mir->instance()->metadata().getFuncExportType(funcExport);
20532 WasmABIArgGenerator abi;
20533 for (size_t i = 0; i < lir->numOperands(); i++) {
20534 MIRType argMir;
20535 switch (sig.args()[i].kind()) {
20536 case wasm::ValType::I32:
20537 case wasm::ValType::I64:
20538 case wasm::ValType::F32:
20539 case wasm::ValType::F64:
20540 argMir = sig.args()[i].toMIRType();
20541 break;
20542 case wasm::ValType::V128:
20543 MOZ_CRASH("unexpected argument type when calling from ion to wasm");
20544 case wasm::ValType::Ref:
20545 // temporarilyUnsupportedReftypeForEntry() restricts args to externref
20546 MOZ_RELEASE_ASSERT(sig.args()[i].refType().isExtern());
20547 // Argument is boxed on the JS side to an anyref, so passed as a
20548 // pointer here.
20549 argMir = sig.args()[i].toMIRType();
20550 break;
20553 ABIArg arg = abi.next(argMir);
20554 switch (arg.kind()) {
20555 case ABIArg::GPR:
20556 case ABIArg::FPU: {
20557 MOZ_ASSERT(ToAnyRegister(lir->getOperand(i)) == arg.reg());
20558 stackArgs.infallibleEmplaceBack(wasm::JitCallStackArg());
20559 break;
20561 case ABIArg::Stack: {
20562 const LAllocation* larg = lir->getOperand(i);
20563 if (larg->isConstant()) {
20564 stackArgs.infallibleEmplaceBack(ToInt32(larg));
20565 } else if (larg->isGeneralReg()) {
20566 stackArgs.infallibleEmplaceBack(ToRegister(larg));
20567 } else if (larg->isFloatReg()) {
20568 stackArgs.infallibleEmplaceBack(ToFloatRegister(larg));
20569 } else {
20570 // Always use the stack pointer here because GenerateDirectCallFromJit
20571 // depends on this.
20572 Address addr = ToAddress<BaseRegForAddress::SP>(larg);
20573 stackArgs.infallibleEmplaceBack(addr);
20575 break;
20577 #ifdef JS_CODEGEN_REGISTER_PAIR
20578 case ABIArg::GPR_PAIR: {
20579 MOZ_CRASH(
20580 "no way to pass i64, and wasm uses hardfp for function calls");
20582 #endif
20583 case ABIArg::Uninitialized: {
20584 MOZ_CRASH("Uninitialized ABIArg kind");
20589 const wasm::ValTypeVector& results = sig.results();
20590 if (results.length() == 0) {
20591 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
20592 } else {
20593 MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
20594 switch (results[0].kind()) {
20595 case wasm::ValType::I32:
20596 MOZ_ASSERT(lir->mir()->type() == MIRType::Int32);
20597 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
20598 break;
20599 case wasm::ValType::I64:
20600 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
20601 MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
20602 break;
20603 case wasm::ValType::F32:
20604 MOZ_ASSERT(lir->mir()->type() == MIRType::Float32);
20605 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnFloat32Reg);
20606 break;
20607 case wasm::ValType::F64:
20608 MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
20609 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
20610 break;
20611 case wasm::ValType::V128:
20612 MOZ_CRASH("unexpected return type when calling from ion to wasm");
20613 case wasm::ValType::Ref:
20614 // The wasm stubs layer unboxes anything that needs to be unboxed
20615 // and leaves it in a Value. A FuncRef/EqRef we could in principle
20616 // leave it as a raw object pointer but for now it complicates the
20617 // API to do so.
20618 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
20619 break;
20623 WasmInstanceObject* instObj = lir->mir()->instanceObject();
20625 Register scratch = ToRegister(lir->temp());
20627 uint32_t callOffset;
20628 ensureOsiSpace();
20629 GenerateDirectCallFromJit(masm, funcExport, instObj->instance(), stackArgs,
20630 scratch, &callOffset);
20632 // Add the instance object to the constant pool, so it is transferred to
20633 // the owning IonScript and so that it gets traced as long as the IonScript
20634 // lives.
20636 uint32_t unused;
20637 masm.propagateOOM(graph.addConstantToPool(ObjectValue(*instObj), &unused));
20639 markSafepointAt(callOffset, lir);
20642 void CodeGenerator::visitIonToWasmCall(LIonToWasmCall* lir) {
20643 emitIonToWasmCallBase(lir);
20645 void CodeGenerator::visitIonToWasmCallV(LIonToWasmCallV* lir) {
20646 emitIonToWasmCallBase(lir);
20648 void CodeGenerator::visitIonToWasmCallI64(LIonToWasmCallI64* lir) {
20649 emitIonToWasmCallBase(lir);
20652 void CodeGenerator::visitWasmNullConstant(LWasmNullConstant* lir) {
20653 masm.xorPtr(ToRegister(lir->output()), ToRegister(lir->output()));
20656 void CodeGenerator::visitWasmFence(LWasmFence* lir) {
20657 MOZ_ASSERT(gen->compilingWasm());
20658 masm.memoryBarrier(MembarFull);
20661 void CodeGenerator::visitWasmAnyRefFromJSValue(LWasmAnyRefFromJSValue* lir) {
20662 ValueOperand input = ToValue(lir, LWasmAnyRefFromJSValue::InputIndex);
20663 Register output = ToRegister(lir->output());
20664 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
20666 using Fn = JSObject* (*)(JSContext* cx, HandleValue value);
20667 OutOfLineCode* oolBoxValue = oolCallVM<Fn, wasm::AnyRef::boxValue>(
20668 lir, ArgList(input), StoreRegisterTo(output));
20669 masm.convertValueToWasmAnyRef(input, output, tempFloat, oolBoxValue->entry());
20670 masm.bind(oolBoxValue->rejoin());
20673 void CodeGenerator::visitWasmAnyRefFromJSObject(LWasmAnyRefFromJSObject* lir) {
20674 Register input = ToRegister(lir->input());
20675 Register output = ToRegister(lir->output());
20676 masm.convertObjectToWasmAnyRef(input, output);
20679 void CodeGenerator::visitWasmAnyRefFromJSString(LWasmAnyRefFromJSString* lir) {
20680 Register input = ToRegister(lir->input());
20681 Register output = ToRegister(lir->output());
20682 masm.convertStringToWasmAnyRef(input, output);
20685 void CodeGenerator::visitWasmNewI31Ref(LWasmNewI31Ref* lir) {
20686 if (lir->value()->isConstant()) {
20687 // i31ref are often created with constants. If that's the case we will
20688 // do the operation statically here. This is similar to what is done
20689 // in masm.truncate32ToWasmI31Ref.
20690 Register output = ToRegister(lir->output());
20691 uint32_t value =
20692 static_cast<uint32_t>(lir->value()->toConstant()->toInt32());
20693 uintptr_t ptr = wasm::AnyRef::fromUint32Truncate(value).rawValue();
20694 masm.movePtr(ImmWord(ptr), output);
20695 } else {
20696 Register value = ToRegister(lir->value());
20697 Register output = ToRegister(lir->output());
20698 masm.truncate32ToWasmI31Ref(value, output);
20702 void CodeGenerator::visitWasmI31RefGet(LWasmI31RefGet* lir) {
20703 Register value = ToRegister(lir->value());
20704 Register output = ToRegister(lir->output());
20705 if (lir->mir()->wideningOp() == wasm::FieldWideningOp::Signed) {
20706 masm.convertWasmI31RefTo32Signed(value, output);
20707 } else {
20708 masm.convertWasmI31RefTo32Unsigned(value, output);
20712 #ifdef FUZZING_JS_FUZZILLI
20713 void CodeGenerator::emitFuzzilliHashDouble(FloatRegister floatDouble,
20714 Register scratch, Register output) {
20715 # ifdef JS_PUNBOX64
20716 Register64 reg64_1(scratch);
20717 Register64 reg64_2(output);
20718 masm.moveDoubleToGPR64(floatDouble, reg64_1);
20719 masm.move64(reg64_1, reg64_2);
20720 masm.rshift64(Imm32(32), reg64_2);
20721 masm.add32(scratch, output);
20722 # else
20723 Register64 reg64(scratch, output);
20724 masm.moveDoubleToGPR64(floatDouble, reg64);
20725 masm.add32(scratch, output);
20726 # endif
20729 void CodeGenerator::emitFuzzilliHashObject(LInstruction* lir, Register obj,
20730 Register output) {
20731 using Fn = void (*)(JSContext* cx, JSObject* obj, uint32_t* out);
20732 OutOfLineCode* ool = oolCallVM<Fn, FuzzilliHashObjectInl>(
20733 lir, ArgList(obj), StoreRegisterTo(output));
20735 masm.jump(ool->entry());
20736 masm.bind(ool->rejoin());
20739 void CodeGenerator::emitFuzzilliHashBigInt(Register bigInt, Register output) {
20740 LiveRegisterSet volatileRegs(GeneralRegisterSet::All(),
20741 FloatRegisterSet::All());
20742 volatileRegs.takeUnchecked(output);
20743 masm.PushRegsInMask(volatileRegs);
20745 using Fn = uint32_t (*)(BigInt* bigInt);
20746 masm.setupUnalignedABICall(output);
20747 masm.passABIArg(bigInt);
20748 masm.callWithABI<Fn, js::FuzzilliHashBigInt>();
20749 masm.storeCallInt32Result(output);
20751 masm.PopRegsInMask(volatileRegs);
20754 void CodeGenerator::visitFuzzilliHashV(LFuzzilliHashV* ins) {
20755 MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Value);
20757 ValueOperand value = ToValue(ins, 0);
20759 Label isDouble, isObject, isBigInt, done;
20761 FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
20762 Register scratch = ToRegister(ins->getTemp(0));
20763 Register output = ToRegister(ins->output());
20764 MOZ_ASSERT(scratch != output);
20766 # ifdef JS_PUNBOX64
20767 Register tagReg = ToRegister(ins->getTemp(0));
20768 masm.splitTag(value, tagReg);
20769 # else
20770 Register tagReg = value.typeReg();
20771 # endif
20773 Label noBigInt;
20774 masm.branchTestBigInt(Assembler::NotEqual, tagReg, &noBigInt);
20775 masm.unboxBigInt(value, scratch);
20776 masm.jump(&isBigInt);
20777 masm.bind(&noBigInt);
20779 Label noObject;
20780 masm.branchTestObject(Assembler::NotEqual, tagReg, &noObject);
20781 masm.unboxObject(value, scratch);
20782 masm.jump(&isObject);
20783 masm.bind(&noObject);
20785 Label noInt32;
20786 masm.branchTestInt32(Assembler::NotEqual, tagReg, &noInt32);
20787 masm.unboxInt32(value, scratch);
20788 masm.convertInt32ToDouble(scratch, scratchFloat);
20789 masm.jump(&isDouble);
20790 masm.bind(&noInt32);
20792 Label noNull;
20793 masm.branchTestNull(Assembler::NotEqual, tagReg, &noNull);
20794 masm.move32(Imm32(1), scratch);
20795 masm.convertInt32ToDouble(scratch, scratchFloat);
20796 masm.jump(&isDouble);
20797 masm.bind(&noNull);
20799 Label noUndefined;
20800 masm.branchTestUndefined(Assembler::NotEqual, tagReg, &noUndefined);
20801 masm.move32(Imm32(2), scratch);
20802 masm.convertInt32ToDouble(scratch, scratchFloat);
20803 masm.jump(&isDouble);
20804 masm.bind(&noUndefined);
20806 Label noBoolean;
20807 masm.branchTestBoolean(Assembler::NotEqual, tagReg, &noBoolean);
20808 masm.unboxBoolean(value, scratch);
20809 masm.add32(Imm32(3), scratch);
20810 masm.convertInt32ToDouble(scratch, scratchFloat);
20811 masm.jump(&isDouble);
20812 masm.bind(&noBoolean);
20814 Label noDouble;
20815 masm.branchTestDouble(Assembler::NotEqual, tagReg, &noDouble);
20816 masm.unboxDouble(value, scratchFloat);
20817 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
20819 masm.jump(&isDouble);
20820 masm.bind(&noDouble);
20821 masm.move32(Imm32(0), output);
20822 masm.jump(&done);
20824 masm.bind(&isBigInt);
20825 emitFuzzilliHashBigInt(scratch, output);
20826 masm.jump(&done);
20828 masm.bind(&isObject);
20829 emitFuzzilliHashObject(ins, scratch, output);
20830 masm.jump(&done);
20832 masm.bind(&isDouble);
20833 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20835 masm.bind(&done);
20838 void CodeGenerator::visitFuzzilliHashT(LFuzzilliHashT* ins) {
20839 const LAllocation* value = ins->value();
20840 MIRType mirType = ins->mir()->getOperand(0)->type();
20842 FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
20843 Register scratch = ToRegister(ins->getTemp(0));
20844 Register output = ToRegister(ins->output());
20845 MOZ_ASSERT(scratch != output);
20847 if (mirType == MIRType::Object) {
20848 MOZ_ASSERT(value->isGeneralReg());
20849 masm.mov(value->toGeneralReg()->reg(), scratch);
20850 emitFuzzilliHashObject(ins, scratch, output);
20851 } else if (mirType == MIRType::BigInt) {
20852 MOZ_ASSERT(value->isGeneralReg());
20853 masm.mov(value->toGeneralReg()->reg(), scratch);
20854 emitFuzzilliHashBigInt(scratch, output);
20855 } else if (mirType == MIRType::Double) {
20856 MOZ_ASSERT(value->isFloatReg());
20857 masm.moveDouble(value->toFloatReg()->reg(), scratchFloat);
20858 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
20859 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20860 } else if (mirType == MIRType::Float32) {
20861 MOZ_ASSERT(value->isFloatReg());
20862 masm.convertFloat32ToDouble(value->toFloatReg()->reg(), scratchFloat);
20863 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
20864 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20865 } else if (mirType == MIRType::Int32) {
20866 MOZ_ASSERT(value->isGeneralReg());
20867 masm.mov(value->toGeneralReg()->reg(), scratch);
20868 masm.convertInt32ToDouble(scratch, scratchFloat);
20869 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20870 } else if (mirType == MIRType::Null) {
20871 MOZ_ASSERT(value->isBogus());
20872 masm.move32(Imm32(1), scratch);
20873 masm.convertInt32ToDouble(scratch, scratchFloat);
20874 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20875 } else if (mirType == MIRType::Undefined) {
20876 MOZ_ASSERT(value->isBogus());
20877 masm.move32(Imm32(2), scratch);
20878 masm.convertInt32ToDouble(scratch, scratchFloat);
20879 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20880 } else if (mirType == MIRType::Boolean) {
20881 MOZ_ASSERT(value->isGeneralReg());
20882 masm.mov(value->toGeneralReg()->reg(), scratch);
20883 masm.add32(Imm32(3), scratch);
20884 masm.convertInt32ToDouble(scratch, scratchFloat);
20885 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20886 } else {
20887 MOZ_CRASH("unexpected type");
20891 void CodeGenerator::visitFuzzilliHashStore(LFuzzilliHashStore* ins) {
20892 const LAllocation* value = ins->value();
20893 MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Int32);
20894 MOZ_ASSERT(value->isGeneralReg());
20896 Register scratchJSContext = ToRegister(ins->getTemp(0));
20897 Register scratch = ToRegister(ins->getTemp(1));
20899 masm.loadJSContext(scratchJSContext);
20901 // stats
20902 Address addrExecHashInputs(scratchJSContext,
20903 offsetof(JSContext, executionHashInputs));
20904 masm.load32(addrExecHashInputs, scratch);
20905 masm.add32(Imm32(1), scratch);
20906 masm.store32(scratch, addrExecHashInputs);
20908 Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
20909 masm.load32(addrExecHash, scratch);
20910 masm.add32(value->toGeneralReg()->reg(), scratch);
20911 masm.rotateLeft(Imm32(1), scratch, scratch);
20912 masm.store32(scratch, addrExecHash);
20914 #endif
20916 static_assert(!std::is_polymorphic_v<CodeGenerator>,
20917 "CodeGenerator should not have any virtual methods");
20919 } // namespace jit
20920 } // namespace js