Bug 1889091 - Part 1: Add infallible MacroAssembler::loadArgumentsObjectLength. r...
[gecko.git] / js / src / jit / CodeGenerator.cpp
blobaa8e6e03aca536b9bc3f55f57fc821799a8ce4d8
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/CodeGenerator.h"
9 #include "mozilla/Assertions.h"
10 #include "mozilla/Casting.h"
11 #include "mozilla/DebugOnly.h"
12 #include "mozilla/EndianUtils.h"
13 #include "mozilla/EnumeratedArray.h"
14 #include "mozilla/EnumeratedRange.h"
15 #include "mozilla/EnumSet.h"
16 #include "mozilla/IntegerTypeTraits.h"
17 #include "mozilla/Latin1.h"
18 #include "mozilla/MathAlgorithms.h"
19 #include "mozilla/ScopeExit.h"
20 #include "mozilla/SIMD.h"
22 #include <limits>
23 #include <type_traits>
24 #include <utility>
26 #include "jslibmath.h"
27 #include "jsmath.h"
28 #include "jsnum.h"
30 #include "builtin/MapObject.h"
31 #include "builtin/RegExp.h"
32 #include "builtin/String.h"
33 #include "irregexp/RegExpTypes.h"
34 #include "jit/ABIArgGenerator.h"
35 #include "jit/CompileInfo.h"
36 #include "jit/InlineScriptTree.h"
37 #include "jit/Invalidation.h"
38 #include "jit/IonGenericCallStub.h"
39 #include "jit/IonIC.h"
40 #include "jit/IonScript.h"
41 #include "jit/JitcodeMap.h"
42 #include "jit/JitFrames.h"
43 #include "jit/JitRuntime.h"
44 #include "jit/JitSpewer.h"
45 #include "jit/JitZone.h"
46 #include "jit/Linker.h"
47 #include "jit/MIRGenerator.h"
48 #include "jit/MoveEmitter.h"
49 #include "jit/RangeAnalysis.h"
50 #include "jit/RegExpStubConstants.h"
51 #include "jit/SafepointIndex.h"
52 #include "jit/SharedICHelpers.h"
53 #include "jit/SharedICRegisters.h"
54 #include "jit/VMFunctions.h"
55 #include "jit/WarpSnapshot.h"
56 #include "js/ColumnNumber.h" // JS::LimitedColumnNumberOneOrigin
57 #include "js/experimental/JitInfo.h" // JSJit{Getter,Setter}CallArgs, JSJitMethodCallArgsTraits, JSJitInfo
58 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
59 #include "js/RegExpFlags.h" // JS::RegExpFlag
60 #include "js/ScalarType.h" // js::Scalar::Type
61 #include "proxy/DOMProxy.h"
62 #include "proxy/ScriptedProxyHandler.h"
63 #include "util/CheckedArithmetic.h"
64 #include "util/Unicode.h"
65 #include "vm/ArrayBufferViewObject.h"
66 #include "vm/AsyncFunction.h"
67 #include "vm/AsyncIteration.h"
68 #include "vm/BuiltinObjectKind.h"
69 #include "vm/FunctionFlags.h" // js::FunctionFlags
70 #include "vm/Interpreter.h"
71 #include "vm/JSAtomUtils.h" // AtomizeString
72 #include "vm/MatchPairs.h"
73 #include "vm/RegExpObject.h"
74 #include "vm/RegExpStatics.h"
75 #include "vm/StaticStrings.h"
76 #include "vm/StringObject.h"
77 #include "vm/StringType.h"
78 #include "vm/TypedArrayObject.h"
79 #include "wasm/WasmCodegenConstants.h"
80 #include "wasm/WasmValType.h"
81 #ifdef MOZ_VTUNE
82 # include "vtune/VTuneWrapper.h"
83 #endif
84 #include "wasm/WasmBinary.h"
85 #include "wasm/WasmGC.h"
86 #include "wasm/WasmGcObject.h"
87 #include "wasm/WasmStubs.h"
89 #include "builtin/Boolean-inl.h"
90 #include "jit/MacroAssembler-inl.h"
91 #include "jit/shared/CodeGenerator-shared-inl.h"
92 #include "jit/TemplateObject-inl.h"
93 #include "jit/VMFunctionList-inl.h"
94 #include "vm/JSScript-inl.h"
95 #include "wasm/WasmInstance-inl.h"
97 using namespace js;
98 using namespace js::jit;
100 using JS::GenericNaN;
101 using mozilla::AssertedCast;
102 using mozilla::CheckedUint32;
103 using mozilla::DebugOnly;
104 using mozilla::FloatingPoint;
105 using mozilla::Maybe;
106 using mozilla::NegativeInfinity;
107 using mozilla::PositiveInfinity;
109 using JS::ExpandoAndGeneration;
111 namespace js {
112 namespace jit {
114 #ifdef CHECK_OSIPOINT_REGISTERS
115 template <class Op>
116 static void HandleRegisterDump(Op op, MacroAssembler& masm,
117 LiveRegisterSet liveRegs, Register activation,
118 Register scratch) {
119 const size_t baseOffset = JitActivation::offsetOfRegs();
121 // Handle live GPRs.
122 for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
123 Register reg = *iter;
124 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
126 if (reg == activation) {
127 // To use the original value of the activation register (that's
128 // now on top of the stack), we need the scratch register.
129 masm.push(scratch);
130 masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
131 op(scratch, dump);
132 masm.pop(scratch);
133 } else {
134 op(reg, dump);
138 // Handle live FPRs.
139 for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
140 FloatRegister reg = *iter;
141 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
142 op(reg, dump);
146 class StoreOp {
147 MacroAssembler& masm;
149 public:
150 explicit StoreOp(MacroAssembler& masm) : masm(masm) {}
152 void operator()(Register reg, Address dump) { masm.storePtr(reg, dump); }
153 void operator()(FloatRegister reg, Address dump) {
154 if (reg.isDouble()) {
155 masm.storeDouble(reg, dump);
156 } else if (reg.isSingle()) {
157 masm.storeFloat32(reg, dump);
158 } else if (reg.isSimd128()) {
159 MOZ_CRASH("Unexpected case for SIMD");
160 } else {
161 MOZ_CRASH("Unexpected register type.");
166 class VerifyOp {
167 MacroAssembler& masm;
168 Label* failure_;
170 public:
171 VerifyOp(MacroAssembler& masm, Label* failure)
172 : masm(masm), failure_(failure) {}
174 void operator()(Register reg, Address dump) {
175 masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
177 void operator()(FloatRegister reg, Address dump) {
178 if (reg.isDouble()) {
179 ScratchDoubleScope scratch(masm);
180 masm.loadDouble(dump, scratch);
181 masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
182 } else if (reg.isSingle()) {
183 ScratchFloat32Scope scratch(masm);
184 masm.loadFloat32(dump, scratch);
185 masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
186 } else if (reg.isSimd128()) {
187 MOZ_CRASH("Unexpected case for SIMD");
188 } else {
189 MOZ_CRASH("Unexpected register type.");
194 void CodeGenerator::verifyOsiPointRegs(LSafepoint* safepoint) {
195 // Ensure the live registers stored by callVM did not change between
196 // the call and this OsiPoint. Try-catch relies on this invariant.
198 // Load pointer to the JitActivation in a scratch register.
199 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
200 Register scratch = allRegs.takeAny();
201 masm.push(scratch);
202 masm.loadJitActivation(scratch);
204 // If we should not check registers (because the instruction did not call
205 // into the VM, or a GC happened), we're done.
206 Label failure, done;
207 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
208 masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
210 // Having more than one VM function call made in one visit function at
211 // runtime is a sec-ciritcal error, because if we conservatively assume that
212 // one of the function call can re-enter Ion, then the invalidation process
213 // will potentially add a call at a random location, by patching the code
214 // before the return address.
215 masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
217 // Set checkRegs to 0, so that we don't try to verify registers after we
218 // return from this script to the caller.
219 masm.store32(Imm32(0), checkRegs);
221 // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
222 // temps after calling into the VM. This is fine because no other
223 // instructions (including this OsiPoint) will depend on them. Also
224 // backtracking can also use the same register for an input and an output.
225 // These are marked as clobbered and shouldn't get checked.
226 LiveRegisterSet liveRegs;
227 liveRegs.set() = RegisterSet::Intersect(
228 safepoint->liveRegs().set(),
229 RegisterSet::Not(safepoint->clobberedRegs().set()));
231 VerifyOp op(masm, &failure);
232 HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
234 masm.jump(&done);
236 // Do not profile the callWithABI that occurs below. This is to avoid a
237 // rare corner case that occurs when profiling interacts with itself:
239 // When slow profiling assertions are turned on, FunctionBoundary ops
240 // (which update the profiler pseudo-stack) may emit a callVM, which
241 // forces them to have an osi point associated with them. The
242 // FunctionBoundary for inline function entry is added to the caller's
243 // graph with a PC from the caller's code, but during codegen it modifies
244 // Gecko Profiler instrumentation to add the callee as the current top-most
245 // script. When codegen gets to the OSIPoint, and the callWithABI below is
246 // emitted, the codegen thinks that the current frame is the callee, but
247 // the PC it's using from the OSIPoint refers to the caller. This causes
248 // the profiler instrumentation of the callWithABI below to ASSERT, since
249 // the script and pc are mismatched. To avoid this, we simply omit
250 // instrumentation for these callWithABIs.
252 // Any live register captured by a safepoint (other than temp registers)
253 // must remain unchanged between the call and the OsiPoint instruction.
254 masm.bind(&failure);
255 masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
257 masm.bind(&done);
258 masm.pop(scratch);
261 bool CodeGenerator::shouldVerifyOsiPointRegs(LSafepoint* safepoint) {
262 if (!checkOsiPointRegisters) {
263 return false;
266 if (safepoint->liveRegs().emptyGeneral() &&
267 safepoint->liveRegs().emptyFloat()) {
268 return false; // No registers to check.
271 return true;
274 void CodeGenerator::resetOsiPointRegs(LSafepoint* safepoint) {
275 if (!shouldVerifyOsiPointRegs(safepoint)) {
276 return;
279 // Set checkRegs to 0. If we perform a VM call, the instruction
280 // will set it to 1.
281 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
282 Register scratch = allRegs.takeAny();
283 masm.push(scratch);
284 masm.loadJitActivation(scratch);
285 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
286 masm.store32(Imm32(0), checkRegs);
287 masm.pop(scratch);
290 static void StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs) {
291 // Store a copy of all live registers before performing the call.
292 // When we reach the OsiPoint, we can use this to check nothing
293 // modified them in the meantime.
295 // Load pointer to the JitActivation in a scratch register.
296 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
297 Register scratch = allRegs.takeAny();
298 masm.push(scratch);
299 masm.loadJitActivation(scratch);
301 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
302 masm.add32(Imm32(1), checkRegs);
304 StoreOp op(masm);
305 HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
307 masm.pop(scratch);
309 #endif // CHECK_OSIPOINT_REGISTERS
311 // Before doing any call to Cpp, you should ensure that volatile
312 // registers are evicted by the register allocator.
313 void CodeGenerator::callVMInternal(VMFunctionId id, LInstruction* ins) {
314 TrampolinePtr code = gen->jitRuntime()->getVMWrapper(id);
315 const VMFunctionData& fun = GetVMFunction(id);
317 // Stack is:
318 // ... frame ...
319 // [args]
320 #ifdef DEBUG
321 MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
322 pushedArgs_ = 0;
323 #endif
325 #ifdef CHECK_OSIPOINT_REGISTERS
326 if (shouldVerifyOsiPointRegs(ins->safepoint())) {
327 StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
329 #endif
331 #ifdef DEBUG
332 if (ins->mirRaw()) {
333 MOZ_ASSERT(ins->mirRaw()->isInstruction());
334 MInstruction* mir = ins->mirRaw()->toInstruction();
335 MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
337 // If this MIR instruction has an overridden AliasSet, set the JitRuntime's
338 // disallowArbitraryCode_ flag so we can assert this VMFunction doesn't call
339 // RunScript. Whitelist MInterruptCheck and MCheckOverRecursed because
340 // interrupt callbacks can call JS (chrome JS or shell testing functions).
341 bool isWhitelisted = mir->isInterruptCheck() || mir->isCheckOverRecursed();
342 if (!mir->hasDefaultAliasSet() && !isWhitelisted) {
343 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
344 masm.move32(Imm32(1), ReturnReg);
345 masm.store32(ReturnReg, AbsoluteAddress(addr));
348 #endif
350 // Push an exit frame descriptor.
351 masm.PushFrameDescriptor(FrameType::IonJS);
353 // Call the wrapper function. The wrapper is in charge to unwind the stack
354 // when returning from the call. Failures are handled with exceptions based
355 // on the return value of the C functions. To guard the outcome of the
356 // returned value, use another LIR instruction.
357 ensureOsiSpace();
358 uint32_t callOffset = masm.callJit(code);
359 markSafepointAt(callOffset, ins);
361 #ifdef DEBUG
362 // Reset the disallowArbitraryCode flag after the call.
364 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
365 masm.push(ReturnReg);
366 masm.move32(Imm32(0), ReturnReg);
367 masm.store32(ReturnReg, AbsoluteAddress(addr));
368 masm.pop(ReturnReg);
370 #endif
372 // Pop rest of the exit frame and the arguments left on the stack.
373 int framePop =
374 sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
375 masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
377 // Stack is:
378 // ... frame ...
381 template <typename Fn, Fn fn>
382 void CodeGenerator::callVM(LInstruction* ins) {
383 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
384 callVMInternal(id, ins);
387 // ArgSeq store arguments for OutOfLineCallVM.
389 // OutOfLineCallVM are created with "oolCallVM" function. The third argument of
390 // this function is an instance of a class which provides a "generate" in charge
391 // of pushing the argument, with "pushArg", for a VMFunction.
393 // Such list of arguments can be created by using the "ArgList" function which
394 // creates one instance of "ArgSeq", where the type of the arguments are
395 // inferred from the type of the arguments.
397 // The list of arguments must be written in the same order as if you were
398 // calling the function in C++.
400 // Example:
401 // ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))
403 template <typename... ArgTypes>
404 class ArgSeq {
405 std::tuple<std::remove_reference_t<ArgTypes>...> args_;
407 template <std::size_t... ISeq>
408 inline void generate(CodeGenerator* codegen,
409 std::index_sequence<ISeq...>) const {
410 // Arguments are pushed in reverse order, from last argument to first
411 // argument.
412 (codegen->pushArg(std::get<sizeof...(ISeq) - 1 - ISeq>(args_)), ...);
415 public:
416 explicit ArgSeq(ArgTypes&&... args)
417 : args_(std::forward<ArgTypes>(args)...) {}
419 inline void generate(CodeGenerator* codegen) const {
420 generate(codegen, std::index_sequence_for<ArgTypes...>{});
423 #ifdef DEBUG
424 static constexpr size_t numArgs = sizeof...(ArgTypes);
425 #endif
428 template <typename... ArgTypes>
429 inline ArgSeq<ArgTypes...> ArgList(ArgTypes&&... args) {
430 return ArgSeq<ArgTypes...>(std::forward<ArgTypes>(args)...);
433 // Store wrappers, to generate the right move of data after the VM call.
435 struct StoreNothing {
436 inline void generate(CodeGenerator* codegen) const {}
437 inline LiveRegisterSet clobbered() const {
438 return LiveRegisterSet(); // No register gets clobbered
442 class StoreRegisterTo {
443 private:
444 Register out_;
446 public:
447 explicit StoreRegisterTo(Register out) : out_(out) {}
449 inline void generate(CodeGenerator* codegen) const {
450 // It's okay to use storePointerResultTo here - the VMFunction wrapper
451 // ensures the upper bytes are zero for bool/int32 return values.
452 codegen->storePointerResultTo(out_);
454 inline LiveRegisterSet clobbered() const {
455 LiveRegisterSet set;
456 set.add(out_);
457 return set;
461 class StoreFloatRegisterTo {
462 private:
463 FloatRegister out_;
465 public:
466 explicit StoreFloatRegisterTo(FloatRegister out) : out_(out) {}
468 inline void generate(CodeGenerator* codegen) const {
469 codegen->storeFloatResultTo(out_);
471 inline LiveRegisterSet clobbered() const {
472 LiveRegisterSet set;
473 set.add(out_);
474 return set;
478 template <typename Output>
479 class StoreValueTo_ {
480 private:
481 Output out_;
483 public:
484 explicit StoreValueTo_(const Output& out) : out_(out) {}
486 inline void generate(CodeGenerator* codegen) const {
487 codegen->storeResultValueTo(out_);
489 inline LiveRegisterSet clobbered() const {
490 LiveRegisterSet set;
491 set.add(out_);
492 return set;
496 template <typename Output>
497 StoreValueTo_<Output> StoreValueTo(const Output& out) {
498 return StoreValueTo_<Output>(out);
501 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
502 class OutOfLineCallVM : public OutOfLineCodeBase<CodeGenerator> {
503 private:
504 LInstruction* lir_;
505 ArgSeq args_;
506 StoreOutputTo out_;
508 public:
509 OutOfLineCallVM(LInstruction* lir, const ArgSeq& args,
510 const StoreOutputTo& out)
511 : lir_(lir), args_(args), out_(out) {}
513 void accept(CodeGenerator* codegen) override {
514 codegen->visitOutOfLineCallVM(this);
517 LInstruction* lir() const { return lir_; }
518 const ArgSeq& args() const { return args_; }
519 const StoreOutputTo& out() const { return out_; }
522 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
523 OutOfLineCode* CodeGenerator::oolCallVM(LInstruction* lir, const ArgSeq& args,
524 const StoreOutputTo& out) {
525 MOZ_ASSERT(lir->mirRaw());
526 MOZ_ASSERT(lir->mirRaw()->isInstruction());
528 #ifdef DEBUG
529 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
530 const VMFunctionData& fun = GetVMFunction(id);
531 MOZ_ASSERT(fun.explicitArgs == args.numArgs);
532 MOZ_ASSERT(fun.returnsData() !=
533 (std::is_same_v<StoreOutputTo, StoreNothing>));
534 #endif
536 OutOfLineCode* ool = new (alloc())
537 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>(lir, args, out);
538 addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
539 return ool;
542 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
543 void CodeGenerator::visitOutOfLineCallVM(
544 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>* ool) {
545 LInstruction* lir = ool->lir();
547 #ifdef JS_JITSPEW
548 JitSpewStart(JitSpew_Codegen, " # LIR=%s",
549 lir->opName());
550 if (const char* extra = lir->getExtraName()) {
551 JitSpewCont(JitSpew_Codegen, ":%s", extra);
553 JitSpewFin(JitSpew_Codegen);
554 #endif
555 perfSpewer_.recordInstruction(masm, lir);
556 saveLive(lir);
557 ool->args().generate(this);
558 callVM<Fn, fn>(lir);
559 ool->out().generate(this);
560 restoreLiveIgnore(lir, ool->out().clobbered());
561 masm.jump(ool->rejoin());
564 class OutOfLineICFallback : public OutOfLineCodeBase<CodeGenerator> {
565 private:
566 LInstruction* lir_;
567 size_t cacheIndex_;
568 size_t cacheInfoIndex_;
570 public:
571 OutOfLineICFallback(LInstruction* lir, size_t cacheIndex,
572 size_t cacheInfoIndex)
573 : lir_(lir), cacheIndex_(cacheIndex), cacheInfoIndex_(cacheInfoIndex) {}
575 void bind(MacroAssembler* masm) override {
576 // The binding of the initial jump is done in
577 // CodeGenerator::visitOutOfLineICFallback.
580 size_t cacheIndex() const { return cacheIndex_; }
581 size_t cacheInfoIndex() const { return cacheInfoIndex_; }
582 LInstruction* lir() const { return lir_; }
584 void accept(CodeGenerator* codegen) override {
585 codegen->visitOutOfLineICFallback(this);
589 void CodeGeneratorShared::addIC(LInstruction* lir, size_t cacheIndex) {
590 if (cacheIndex == SIZE_MAX) {
591 masm.setOOM();
592 return;
595 DataPtr<IonIC> cache(this, cacheIndex);
596 MInstruction* mir = lir->mirRaw()->toInstruction();
597 cache->setScriptedLocation(mir->block()->info().script(),
598 mir->resumePoint()->pc());
600 Register temp = cache->scratchRegisterForEntryJump();
601 icInfo_.back().icOffsetForJump = masm.movWithPatch(ImmWord(-1), temp);
602 masm.jump(Address(temp, 0));
604 MOZ_ASSERT(!icInfo_.empty());
606 OutOfLineICFallback* ool =
607 new (alloc()) OutOfLineICFallback(lir, cacheIndex, icInfo_.length() - 1);
608 addOutOfLineCode(ool, mir);
610 masm.bind(ool->rejoin());
611 cache->setRejoinOffset(CodeOffset(ool->rejoin()->offset()));
614 void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) {
615 LInstruction* lir = ool->lir();
616 size_t cacheIndex = ool->cacheIndex();
617 size_t cacheInfoIndex = ool->cacheInfoIndex();
619 DataPtr<IonIC> ic(this, cacheIndex);
621 // Register the location of the OOL path in the IC.
622 ic->setFallbackOffset(CodeOffset(masm.currentOffset()));
624 switch (ic->kind()) {
625 case CacheKind::GetProp:
626 case CacheKind::GetElem: {
627 IonGetPropertyIC* getPropIC = ic->asGetPropertyIC();
629 saveLive(lir);
631 pushArg(getPropIC->id());
632 pushArg(getPropIC->value());
633 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
634 pushArg(ImmGCPtr(gen->outerInfo().script()));
636 using Fn = bool (*)(JSContext*, HandleScript, IonGetPropertyIC*,
637 HandleValue, HandleValue, MutableHandleValue);
638 callVM<Fn, IonGetPropertyIC::update>(lir);
640 StoreValueTo(getPropIC->output()).generate(this);
641 restoreLiveIgnore(lir, StoreValueTo(getPropIC->output()).clobbered());
643 masm.jump(ool->rejoin());
644 return;
646 case CacheKind::GetPropSuper:
647 case CacheKind::GetElemSuper: {
648 IonGetPropSuperIC* getPropSuperIC = ic->asGetPropSuperIC();
650 saveLive(lir);
652 pushArg(getPropSuperIC->id());
653 pushArg(getPropSuperIC->receiver());
654 pushArg(getPropSuperIC->object());
655 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
656 pushArg(ImmGCPtr(gen->outerInfo().script()));
658 using Fn =
659 bool (*)(JSContext*, HandleScript, IonGetPropSuperIC*, HandleObject,
660 HandleValue, HandleValue, MutableHandleValue);
661 callVM<Fn, IonGetPropSuperIC::update>(lir);
663 StoreValueTo(getPropSuperIC->output()).generate(this);
664 restoreLiveIgnore(lir,
665 StoreValueTo(getPropSuperIC->output()).clobbered());
667 masm.jump(ool->rejoin());
668 return;
670 case CacheKind::SetProp:
671 case CacheKind::SetElem: {
672 IonSetPropertyIC* setPropIC = ic->asSetPropertyIC();
674 saveLive(lir);
676 pushArg(setPropIC->rhs());
677 pushArg(setPropIC->id());
678 pushArg(setPropIC->object());
679 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
680 pushArg(ImmGCPtr(gen->outerInfo().script()));
682 using Fn = bool (*)(JSContext*, HandleScript, IonSetPropertyIC*,
683 HandleObject, HandleValue, HandleValue);
684 callVM<Fn, IonSetPropertyIC::update>(lir);
686 restoreLive(lir);
688 masm.jump(ool->rejoin());
689 return;
691 case CacheKind::GetName: {
692 IonGetNameIC* getNameIC = ic->asGetNameIC();
694 saveLive(lir);
696 pushArg(getNameIC->environment());
697 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
698 pushArg(ImmGCPtr(gen->outerInfo().script()));
700 using Fn = bool (*)(JSContext*, HandleScript, IonGetNameIC*, HandleObject,
701 MutableHandleValue);
702 callVM<Fn, IonGetNameIC::update>(lir);
704 StoreValueTo(getNameIC->output()).generate(this);
705 restoreLiveIgnore(lir, StoreValueTo(getNameIC->output()).clobbered());
707 masm.jump(ool->rejoin());
708 return;
710 case CacheKind::BindName: {
711 IonBindNameIC* bindNameIC = ic->asBindNameIC();
713 saveLive(lir);
715 pushArg(bindNameIC->environment());
716 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
717 pushArg(ImmGCPtr(gen->outerInfo().script()));
719 using Fn =
720 JSObject* (*)(JSContext*, HandleScript, IonBindNameIC*, HandleObject);
721 callVM<Fn, IonBindNameIC::update>(lir);
723 StoreRegisterTo(bindNameIC->output()).generate(this);
724 restoreLiveIgnore(lir, StoreRegisterTo(bindNameIC->output()).clobbered());
726 masm.jump(ool->rejoin());
727 return;
729 case CacheKind::GetIterator: {
730 IonGetIteratorIC* getIteratorIC = ic->asGetIteratorIC();
732 saveLive(lir);
734 pushArg(getIteratorIC->value());
735 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
736 pushArg(ImmGCPtr(gen->outerInfo().script()));
738 using Fn = JSObject* (*)(JSContext*, HandleScript, IonGetIteratorIC*,
739 HandleValue);
740 callVM<Fn, IonGetIteratorIC::update>(lir);
742 StoreRegisterTo(getIteratorIC->output()).generate(this);
743 restoreLiveIgnore(lir,
744 StoreRegisterTo(getIteratorIC->output()).clobbered());
746 masm.jump(ool->rejoin());
747 return;
749 case CacheKind::OptimizeSpreadCall: {
750 auto* optimizeSpreadCallIC = ic->asOptimizeSpreadCallIC();
752 saveLive(lir);
754 pushArg(optimizeSpreadCallIC->value());
755 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
756 pushArg(ImmGCPtr(gen->outerInfo().script()));
758 using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeSpreadCallIC*,
759 HandleValue, MutableHandleValue);
760 callVM<Fn, IonOptimizeSpreadCallIC::update>(lir);
762 StoreValueTo(optimizeSpreadCallIC->output()).generate(this);
763 restoreLiveIgnore(
764 lir, StoreValueTo(optimizeSpreadCallIC->output()).clobbered());
766 masm.jump(ool->rejoin());
767 return;
769 case CacheKind::In: {
770 IonInIC* inIC = ic->asInIC();
772 saveLive(lir);
774 pushArg(inIC->object());
775 pushArg(inIC->key());
776 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
777 pushArg(ImmGCPtr(gen->outerInfo().script()));
779 using Fn = bool (*)(JSContext*, HandleScript, IonInIC*, HandleValue,
780 HandleObject, bool*);
781 callVM<Fn, IonInIC::update>(lir);
783 StoreRegisterTo(inIC->output()).generate(this);
784 restoreLiveIgnore(lir, StoreRegisterTo(inIC->output()).clobbered());
786 masm.jump(ool->rejoin());
787 return;
789 case CacheKind::HasOwn: {
790 IonHasOwnIC* hasOwnIC = ic->asHasOwnIC();
792 saveLive(lir);
794 pushArg(hasOwnIC->id());
795 pushArg(hasOwnIC->value());
796 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
797 pushArg(ImmGCPtr(gen->outerInfo().script()));
799 using Fn = bool (*)(JSContext*, HandleScript, IonHasOwnIC*, HandleValue,
800 HandleValue, int32_t*);
801 callVM<Fn, IonHasOwnIC::update>(lir);
803 StoreRegisterTo(hasOwnIC->output()).generate(this);
804 restoreLiveIgnore(lir, StoreRegisterTo(hasOwnIC->output()).clobbered());
806 masm.jump(ool->rejoin());
807 return;
809 case CacheKind::CheckPrivateField: {
810 IonCheckPrivateFieldIC* checkPrivateFieldIC = ic->asCheckPrivateFieldIC();
812 saveLive(lir);
814 pushArg(checkPrivateFieldIC->id());
815 pushArg(checkPrivateFieldIC->value());
817 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
818 pushArg(ImmGCPtr(gen->outerInfo().script()));
820 using Fn = bool (*)(JSContext*, HandleScript, IonCheckPrivateFieldIC*,
821 HandleValue, HandleValue, bool*);
822 callVM<Fn, IonCheckPrivateFieldIC::update>(lir);
824 StoreRegisterTo(checkPrivateFieldIC->output()).generate(this);
825 restoreLiveIgnore(
826 lir, StoreRegisterTo(checkPrivateFieldIC->output()).clobbered());
828 masm.jump(ool->rejoin());
829 return;
831 case CacheKind::InstanceOf: {
832 IonInstanceOfIC* hasInstanceOfIC = ic->asInstanceOfIC();
834 saveLive(lir);
836 pushArg(hasInstanceOfIC->rhs());
837 pushArg(hasInstanceOfIC->lhs());
838 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
839 pushArg(ImmGCPtr(gen->outerInfo().script()));
841 using Fn = bool (*)(JSContext*, HandleScript, IonInstanceOfIC*,
842 HandleValue lhs, HandleObject rhs, bool* res);
843 callVM<Fn, IonInstanceOfIC::update>(lir);
845 StoreRegisterTo(hasInstanceOfIC->output()).generate(this);
846 restoreLiveIgnore(lir,
847 StoreRegisterTo(hasInstanceOfIC->output()).clobbered());
849 masm.jump(ool->rejoin());
850 return;
852 case CacheKind::UnaryArith: {
853 IonUnaryArithIC* unaryArithIC = ic->asUnaryArithIC();
855 saveLive(lir);
857 pushArg(unaryArithIC->input());
858 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
859 pushArg(ImmGCPtr(gen->outerInfo().script()));
861 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
862 IonUnaryArithIC* stub, HandleValue val,
863 MutableHandleValue res);
864 callVM<Fn, IonUnaryArithIC::update>(lir);
866 StoreValueTo(unaryArithIC->output()).generate(this);
867 restoreLiveIgnore(lir, StoreValueTo(unaryArithIC->output()).clobbered());
869 masm.jump(ool->rejoin());
870 return;
872 case CacheKind::ToPropertyKey: {
873 IonToPropertyKeyIC* toPropertyKeyIC = ic->asToPropertyKeyIC();
875 saveLive(lir);
877 pushArg(toPropertyKeyIC->input());
878 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
879 pushArg(ImmGCPtr(gen->outerInfo().script()));
881 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
882 IonToPropertyKeyIC* ic, HandleValue val,
883 MutableHandleValue res);
884 callVM<Fn, IonToPropertyKeyIC::update>(lir);
886 StoreValueTo(toPropertyKeyIC->output()).generate(this);
887 restoreLiveIgnore(lir,
888 StoreValueTo(toPropertyKeyIC->output()).clobbered());
890 masm.jump(ool->rejoin());
891 return;
893 case CacheKind::BinaryArith: {
894 IonBinaryArithIC* binaryArithIC = ic->asBinaryArithIC();
896 saveLive(lir);
898 pushArg(binaryArithIC->rhs());
899 pushArg(binaryArithIC->lhs());
900 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
901 pushArg(ImmGCPtr(gen->outerInfo().script()));
903 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
904 IonBinaryArithIC* stub, HandleValue lhs,
905 HandleValue rhs, MutableHandleValue res);
906 callVM<Fn, IonBinaryArithIC::update>(lir);
908 StoreValueTo(binaryArithIC->output()).generate(this);
909 restoreLiveIgnore(lir, StoreValueTo(binaryArithIC->output()).clobbered());
911 masm.jump(ool->rejoin());
912 return;
914 case CacheKind::Compare: {
915 IonCompareIC* compareIC = ic->asCompareIC();
917 saveLive(lir);
919 pushArg(compareIC->rhs());
920 pushArg(compareIC->lhs());
921 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
922 pushArg(ImmGCPtr(gen->outerInfo().script()));
924 using Fn =
925 bool (*)(JSContext* cx, HandleScript outerScript, IonCompareIC* stub,
926 HandleValue lhs, HandleValue rhs, bool* res);
927 callVM<Fn, IonCompareIC::update>(lir);
929 StoreRegisterTo(compareIC->output()).generate(this);
930 restoreLiveIgnore(lir, StoreRegisterTo(compareIC->output()).clobbered());
932 masm.jump(ool->rejoin());
933 return;
935 case CacheKind::CloseIter: {
936 IonCloseIterIC* closeIterIC = ic->asCloseIterIC();
938 saveLive(lir);
940 pushArg(closeIterIC->iter());
941 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
942 pushArg(ImmGCPtr(gen->outerInfo().script()));
944 using Fn =
945 bool (*)(JSContext*, HandleScript, IonCloseIterIC*, HandleObject);
946 callVM<Fn, IonCloseIterIC::update>(lir);
948 restoreLive(lir);
950 masm.jump(ool->rejoin());
951 return;
953 case CacheKind::OptimizeGetIterator: {
954 auto* optimizeGetIteratorIC = ic->asOptimizeGetIteratorIC();
956 saveLive(lir);
958 pushArg(optimizeGetIteratorIC->value());
959 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
960 pushArg(ImmGCPtr(gen->outerInfo().script()));
962 using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeGetIteratorIC*,
963 HandleValue, bool* res);
964 callVM<Fn, IonOptimizeGetIteratorIC::update>(lir);
966 StoreRegisterTo(optimizeGetIteratorIC->output()).generate(this);
967 restoreLiveIgnore(
968 lir, StoreRegisterTo(optimizeGetIteratorIC->output()).clobbered());
970 masm.jump(ool->rejoin());
971 return;
973 case CacheKind::Call:
974 case CacheKind::TypeOf:
975 case CacheKind::ToBool:
976 case CacheKind::GetIntrinsic:
977 case CacheKind::NewArray:
978 case CacheKind::NewObject:
979 MOZ_CRASH("Unsupported IC");
981 MOZ_CRASH();
984 StringObject* MNewStringObject::templateObj() const {
985 return &templateObj_->as<StringObject>();
988 CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph,
989 MacroAssembler* masm)
990 : CodeGeneratorSpecific(gen, graph, masm),
991 ionScriptLabels_(gen->alloc()),
992 ionNurseryObjectLabels_(gen->alloc()),
993 scriptCounts_(nullptr),
994 zoneStubsToReadBarrier_(0) {}
996 CodeGenerator::~CodeGenerator() { js_delete(scriptCounts_); }
998 void CodeGenerator::visitValueToInt32(LValueToInt32* lir) {
999 ValueOperand operand = ToValue(lir, LValueToInt32::Input);
1000 Register output = ToRegister(lir->output());
1001 FloatRegister temp = ToFloatRegister(lir->tempFloat());
1003 Label fails;
1004 if (lir->mode() == LValueToInt32::TRUNCATE) {
1005 OutOfLineCode* oolDouble = oolTruncateDouble(temp, output, lir->mir());
1007 // We can only handle strings in truncation contexts, like bitwise
1008 // operations.
1009 Register stringReg = ToRegister(lir->temp());
1010 using Fn = bool (*)(JSContext*, JSString*, double*);
1011 auto* oolString = oolCallVM<Fn, StringToNumber>(lir, ArgList(stringReg),
1012 StoreFloatRegisterTo(temp));
1013 Label* stringEntry = oolString->entry();
1014 Label* stringRejoin = oolString->rejoin();
1016 masm.truncateValueToInt32(operand, stringEntry, stringRejoin,
1017 oolDouble->entry(), stringReg, temp, output,
1018 &fails);
1019 masm.bind(oolDouble->rejoin());
1020 } else {
1021 MOZ_ASSERT(lir->mode() == LValueToInt32::NORMAL);
1022 masm.convertValueToInt32(operand, temp, output, &fails,
1023 lir->mirNormal()->needsNegativeZeroCheck(),
1024 lir->mirNormal()->conversion());
1027 bailoutFrom(&fails, lir->snapshot());
1030 void CodeGenerator::visitValueToDouble(LValueToDouble* lir) {
1031 ValueOperand operand = ToValue(lir, LValueToDouble::InputIndex);
1032 FloatRegister output = ToFloatRegister(lir->output());
1034 // Set if we can handle other primitives beside strings, as long as they're
1035 // guaranteed to never throw. This rules out symbols and BigInts, but allows
1036 // booleans, undefined, and null.
1037 bool hasNonStringPrimitives =
1038 lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1040 Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1043 ScratchTagScope tag(masm, operand);
1044 masm.splitTagForTest(operand, tag);
1046 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1047 masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1049 if (hasNonStringPrimitives) {
1050 masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1051 masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1052 masm.branchTestNull(Assembler::Equal, tag, &isNull);
1056 bailout(lir->snapshot());
1058 if (hasNonStringPrimitives) {
1059 masm.bind(&isNull);
1060 masm.loadConstantDouble(0.0, output);
1061 masm.jump(&done);
1064 if (hasNonStringPrimitives) {
1065 masm.bind(&isUndefined);
1066 masm.loadConstantDouble(GenericNaN(), output);
1067 masm.jump(&done);
1070 if (hasNonStringPrimitives) {
1071 masm.bind(&isBool);
1072 masm.boolValueToDouble(operand, output);
1073 masm.jump(&done);
1076 masm.bind(&isInt32);
1077 masm.int32ValueToDouble(operand, output);
1078 masm.jump(&done);
1080 masm.bind(&isDouble);
1081 masm.unboxDouble(operand, output);
1082 masm.bind(&done);
1085 void CodeGenerator::visitValueToFloat32(LValueToFloat32* lir) {
1086 ValueOperand operand = ToValue(lir, LValueToFloat32::InputIndex);
1087 FloatRegister output = ToFloatRegister(lir->output());
1089 // Set if we can handle other primitives beside strings, as long as they're
1090 // guaranteed to never throw. This rules out symbols and BigInts, but allows
1091 // booleans, undefined, and null.
1092 bool hasNonStringPrimitives =
1093 lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1095 Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1098 ScratchTagScope tag(masm, operand);
1099 masm.splitTagForTest(operand, tag);
1101 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1102 masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1104 if (hasNonStringPrimitives) {
1105 masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1106 masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1107 masm.branchTestNull(Assembler::Equal, tag, &isNull);
1111 bailout(lir->snapshot());
1113 if (hasNonStringPrimitives) {
1114 masm.bind(&isNull);
1115 masm.loadConstantFloat32(0.0f, output);
1116 masm.jump(&done);
1119 if (hasNonStringPrimitives) {
1120 masm.bind(&isUndefined);
1121 masm.loadConstantFloat32(float(GenericNaN()), output);
1122 masm.jump(&done);
1125 if (hasNonStringPrimitives) {
1126 masm.bind(&isBool);
1127 masm.boolValueToFloat32(operand, output);
1128 masm.jump(&done);
1131 masm.bind(&isInt32);
1132 masm.int32ValueToFloat32(operand, output);
1133 masm.jump(&done);
1135 masm.bind(&isDouble);
1136 // ARM and MIPS may not have a double register available if we've
1137 // allocated output as a float32.
1138 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
1139 ScratchDoubleScope fpscratch(masm);
1140 masm.unboxDouble(operand, fpscratch);
1141 masm.convertDoubleToFloat32(fpscratch, output);
1142 #else
1143 masm.unboxDouble(operand, output);
1144 masm.convertDoubleToFloat32(output, output);
1145 #endif
1146 masm.bind(&done);
1149 void CodeGenerator::visitValueToBigInt(LValueToBigInt* lir) {
1150 ValueOperand operand = ToValue(lir, LValueToBigInt::InputIndex);
1151 Register output = ToRegister(lir->output());
1153 using Fn = BigInt* (*)(JSContext*, HandleValue);
1154 auto* ool =
1155 oolCallVM<Fn, ToBigInt>(lir, ArgList(operand), StoreRegisterTo(output));
1157 Register tag = masm.extractTag(operand, output);
1159 Label notBigInt, done;
1160 masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
1161 masm.unboxBigInt(operand, output);
1162 masm.jump(&done);
1163 masm.bind(&notBigInt);
1165 masm.branchTestBoolean(Assembler::Equal, tag, ool->entry());
1166 masm.branchTestString(Assembler::Equal, tag, ool->entry());
1168 // ToBigInt(object) can have side-effects; all other types throw a TypeError.
1169 bailout(lir->snapshot());
1171 masm.bind(ool->rejoin());
1172 masm.bind(&done);
1175 void CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir) {
1176 masm.convertInt32ToDouble(ToRegister(lir->input()),
1177 ToFloatRegister(lir->output()));
1180 void CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir) {
1181 masm.convertFloat32ToDouble(ToFloatRegister(lir->input()),
1182 ToFloatRegister(lir->output()));
1185 void CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir) {
1186 masm.convertDoubleToFloat32(ToFloatRegister(lir->input()),
1187 ToFloatRegister(lir->output()));
1190 void CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir) {
1191 masm.convertInt32ToFloat32(ToRegister(lir->input()),
1192 ToFloatRegister(lir->output()));
1195 void CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir) {
1196 Label fail;
1197 FloatRegister input = ToFloatRegister(lir->input());
1198 Register output = ToRegister(lir->output());
1199 masm.convertDoubleToInt32(input, output, &fail,
1200 lir->mir()->needsNegativeZeroCheck());
1201 bailoutFrom(&fail, lir->snapshot());
1204 void CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir) {
1205 Label fail;
1206 FloatRegister input = ToFloatRegister(lir->input());
1207 Register output = ToRegister(lir->output());
1208 masm.convertFloat32ToInt32(input, output, &fail,
1209 lir->mir()->needsNegativeZeroCheck());
1210 bailoutFrom(&fail, lir->snapshot());
1213 void CodeGenerator::visitInt32ToIntPtr(LInt32ToIntPtr* lir) {
1214 #ifdef JS_64BIT
1215 // This LIR instruction is only used if the input can be negative.
1216 MOZ_ASSERT(lir->mir()->canBeNegative());
1218 Register output = ToRegister(lir->output());
1219 const LAllocation* input = lir->input();
1220 if (input->isRegister()) {
1221 masm.move32SignExtendToPtr(ToRegister(input), output);
1222 } else {
1223 masm.load32SignExtendToPtr(ToAddress(input), output);
1225 #else
1226 MOZ_CRASH("Not used on 32-bit platforms");
1227 #endif
1230 void CodeGenerator::visitNonNegativeIntPtrToInt32(
1231 LNonNegativeIntPtrToInt32* lir) {
1232 #ifdef JS_64BIT
1233 Register output = ToRegister(lir->output());
1234 MOZ_ASSERT(ToRegister(lir->input()) == output);
1236 Label bail;
1237 masm.guardNonNegativeIntPtrToInt32(output, &bail);
1238 bailoutFrom(&bail, lir->snapshot());
1239 #else
1240 MOZ_CRASH("Not used on 32-bit platforms");
1241 #endif
1244 void CodeGenerator::visitIntPtrToDouble(LIntPtrToDouble* lir) {
1245 Register input = ToRegister(lir->input());
1246 FloatRegister output = ToFloatRegister(lir->output());
1247 masm.convertIntPtrToDouble(input, output);
1250 void CodeGenerator::visitAdjustDataViewLength(LAdjustDataViewLength* lir) {
1251 Register output = ToRegister(lir->output());
1252 MOZ_ASSERT(ToRegister(lir->input()) == output);
1254 uint32_t byteSize = lir->mir()->byteSize();
1256 #ifdef DEBUG
1257 Label ok;
1258 masm.branchTestPtr(Assembler::NotSigned, output, output, &ok);
1259 masm.assumeUnreachable("Unexpected negative value in LAdjustDataViewLength");
1260 masm.bind(&ok);
1261 #endif
1263 Label bail;
1264 masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), output, &bail);
1265 bailoutFrom(&bail, lir->snapshot());
1268 void CodeGenerator::emitOOLTestObject(Register objreg,
1269 Label* ifEmulatesUndefined,
1270 Label* ifDoesntEmulateUndefined,
1271 Register scratch) {
1272 saveVolatile(scratch);
1273 #if defined(DEBUG) || defined(FUZZING)
1274 masm.loadPtr(AbsoluteAddress(
1275 gen->runtime->addressOfHasSeenObjectEmulateUndefinedFuse()),
1276 scratch);
1277 using Fn = bool (*)(JSObject* obj, size_t fuseValue);
1278 masm.setupAlignedABICall();
1279 masm.passABIArg(objreg);
1280 masm.passABIArg(scratch);
1281 masm.callWithABI<Fn, js::EmulatesUndefinedCheckFuse>();
1282 #else
1283 using Fn = bool (*)(JSObject* obj);
1284 masm.setupAlignedABICall();
1285 masm.passABIArg(objreg);
1286 masm.callWithABI<Fn, js::EmulatesUndefined>();
1287 #endif
1288 masm.storeCallPointerResult(scratch);
1289 restoreVolatile(scratch);
1291 masm.branchIfTrueBool(scratch, ifEmulatesUndefined);
1292 masm.jump(ifDoesntEmulateUndefined);
1295 // Base out-of-line code generator for all tests of the truthiness of an
1296 // object, where the object might not be truthy. (Recall that per spec all
1297 // objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class
1298 // flag to permit objects to look like |undefined| in certain contexts,
1299 // including in object truthiness testing.) We check truthiness inline except
1300 // when we're testing it on a proxy, in which case out-of-line code will call
1301 // EmulatesUndefined for a conclusive answer.
1302 class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator> {
1303 Register objreg_;
1304 Register scratch_;
1306 Label* ifEmulatesUndefined_;
1307 Label* ifDoesntEmulateUndefined_;
1309 #ifdef DEBUG
1310 bool initialized() { return ifEmulatesUndefined_ != nullptr; }
1311 #endif
1313 public:
1314 OutOfLineTestObject()
1315 : ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr) {}
1317 void accept(CodeGenerator* codegen) final {
1318 MOZ_ASSERT(initialized());
1319 codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_,
1320 ifDoesntEmulateUndefined_, scratch_);
1323 // Specify the register where the object to be tested is found, labels to
1324 // jump to if the object is truthy or falsy, and a scratch register for
1325 // use in the out-of-line path.
1326 void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined,
1327 Label* ifDoesntEmulateUndefined, Register scratch) {
1328 MOZ_ASSERT(!initialized());
1329 MOZ_ASSERT(ifEmulatesUndefined);
1330 objreg_ = objreg;
1331 scratch_ = scratch;
1332 ifEmulatesUndefined_ = ifEmulatesUndefined;
1333 ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined;
1337 // A subclass of OutOfLineTestObject containing two extra labels, for use when
1338 // the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line
1339 // code. The user should bind these labels in inline code, and specify them as
1340 // targets via setInputAndTargets, as appropriate.
1341 class OutOfLineTestObjectWithLabels : public OutOfLineTestObject {
1342 Label label1_;
1343 Label label2_;
1345 public:
1346 OutOfLineTestObjectWithLabels() = default;
1348 Label* label1() { return &label1_; }
1349 Label* label2() { return &label2_; }
1352 void CodeGenerator::testObjectEmulatesUndefinedKernel(
1353 Register objreg, Label* ifEmulatesUndefined,
1354 Label* ifDoesntEmulateUndefined, Register scratch,
1355 OutOfLineTestObject* ool) {
1356 ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
1357 scratch);
1359 // Perform a fast-path check of the object's class flags if the object's
1360 // not a proxy. Let out-of-line code handle the slow cases that require
1361 // saving registers, making a function call, and restoring registers.
1362 masm.branchIfObjectEmulatesUndefined(objreg, scratch, ool->entry(),
1363 ifEmulatesUndefined);
1366 void CodeGenerator::branchTestObjectEmulatesUndefined(
1367 Register objreg, Label* ifEmulatesUndefined,
1368 Label* ifDoesntEmulateUndefined, Register scratch,
1369 OutOfLineTestObject* ool) {
1370 MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(),
1371 "ifDoesntEmulateUndefined will be bound to the fallthrough path");
1373 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1374 ifDoesntEmulateUndefined, scratch, ool);
1375 masm.bind(ifDoesntEmulateUndefined);
1378 void CodeGenerator::testObjectEmulatesUndefined(Register objreg,
1379 Label* ifEmulatesUndefined,
1380 Label* ifDoesntEmulateUndefined,
1381 Register scratch,
1382 OutOfLineTestObject* ool) {
1383 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1384 ifDoesntEmulateUndefined, scratch, ool);
1385 masm.jump(ifDoesntEmulateUndefined);
1388 void CodeGenerator::testValueTruthyForType(
1389 JSValueType type, ScratchTagScope& tag, const ValueOperand& value,
1390 Register tempToUnbox, Register temp, FloatRegister floatTemp,
1391 Label* ifTruthy, Label* ifFalsy, OutOfLineTestObject* ool,
1392 bool skipTypeTest) {
1393 #ifdef DEBUG
1394 if (skipTypeTest) {
1395 Label expected;
1396 masm.branchTestType(Assembler::Equal, tag, type, &expected);
1397 masm.assumeUnreachable("Unexpected Value type in testValueTruthyForType");
1398 masm.bind(&expected);
1400 #endif
1402 // Handle irregular types first.
1403 switch (type) {
1404 case JSVAL_TYPE_UNDEFINED:
1405 case JSVAL_TYPE_NULL:
1406 // Undefined and null are falsy.
1407 if (!skipTypeTest) {
1408 masm.branchTestType(Assembler::Equal, tag, type, ifFalsy);
1409 } else {
1410 masm.jump(ifFalsy);
1412 return;
1413 case JSVAL_TYPE_SYMBOL:
1414 // Symbols are truthy.
1415 if (!skipTypeTest) {
1416 masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy);
1417 } else {
1418 masm.jump(ifTruthy);
1420 return;
1421 case JSVAL_TYPE_OBJECT: {
1422 Label notObject;
1423 if (!skipTypeTest) {
1424 masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
1426 ScratchTagScopeRelease _(&tag);
1427 Register objreg = masm.extractObject(value, tempToUnbox);
1428 testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, temp, ool);
1429 masm.bind(&notObject);
1430 return;
1432 default:
1433 break;
1436 // Check the type of the value (unless this is the last possible type).
1437 Label differentType;
1438 if (!skipTypeTest) {
1439 masm.branchTestType(Assembler::NotEqual, tag, type, &differentType);
1442 // Branch if the value is falsy.
1443 ScratchTagScopeRelease _(&tag);
1444 switch (type) {
1445 case JSVAL_TYPE_BOOLEAN: {
1446 masm.branchTestBooleanTruthy(false, value, ifFalsy);
1447 break;
1449 case JSVAL_TYPE_INT32: {
1450 masm.branchTestInt32Truthy(false, value, ifFalsy);
1451 break;
1453 case JSVAL_TYPE_STRING: {
1454 masm.branchTestStringTruthy(false, value, ifFalsy);
1455 break;
1457 case JSVAL_TYPE_BIGINT: {
1458 masm.branchTestBigIntTruthy(false, value, ifFalsy);
1459 break;
1461 case JSVAL_TYPE_DOUBLE: {
1462 masm.unboxDouble(value, floatTemp);
1463 masm.branchTestDoubleTruthy(false, floatTemp, ifFalsy);
1464 break;
1466 default:
1467 MOZ_CRASH("Unexpected value type");
1470 // If we reach this point, the value is truthy. We fall through for
1471 // truthy on the last test; otherwise, branch.
1472 if (!skipTypeTest) {
1473 masm.jump(ifTruthy);
1476 masm.bind(&differentType);
1479 void CodeGenerator::testValueTruthy(const ValueOperand& value,
1480 Register tempToUnbox, Register temp,
1481 FloatRegister floatTemp,
1482 const TypeDataList& observedTypes,
1483 Label* ifTruthy, Label* ifFalsy,
1484 OutOfLineTestObject* ool) {
1485 ScratchTagScope tag(masm, value);
1486 masm.splitTagForTest(value, tag);
1488 const std::initializer_list<JSValueType> defaultOrder = {
1489 JSVAL_TYPE_UNDEFINED, JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN,
1490 JSVAL_TYPE_INT32, JSVAL_TYPE_OBJECT, JSVAL_TYPE_STRING,
1491 JSVAL_TYPE_DOUBLE, JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
1493 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
1495 // Generate tests for previously observed types first.
1496 // The TypeDataList is sorted by descending frequency.
1497 for (auto& observed : observedTypes) {
1498 JSValueType type = observed.type();
1499 remaining -= type;
1501 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1502 ifTruthy, ifFalsy, ool, /*skipTypeTest*/ false);
1505 // Generate tests for remaining types.
1506 for (auto type : defaultOrder) {
1507 if (!remaining.contains(type)) {
1508 continue;
1510 remaining -= type;
1512 // We don't need a type test for the last possible type.
1513 bool skipTypeTest = remaining.isEmpty();
1514 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1515 ifTruthy, ifFalsy, ool, skipTypeTest);
1517 MOZ_ASSERT(remaining.isEmpty());
1519 // We fall through if the final test is truthy.
1522 void CodeGenerator::visitTestBIAndBranch(LTestBIAndBranch* lir) {
1523 Label* ifTrueLabel = getJumpLabelForBranch(lir->ifTrue());
1524 Label* ifFalseLabel = getJumpLabelForBranch(lir->ifFalse());
1525 Register input = ToRegister(lir->input());
1527 if (isNextBlock(lir->ifFalse()->lir())) {
1528 masm.branchIfBigIntIsNonZero(input, ifTrueLabel);
1529 } else if (isNextBlock(lir->ifTrue()->lir())) {
1530 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1531 } else {
1532 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1533 jumpToBlock(lir->ifTrue());
1537 void CodeGenerator::assertObjectDoesNotEmulateUndefined(
1538 Register input, Register temp, const MInstruction* mir) {
1539 #if defined(DEBUG) || defined(FUZZING)
1540 // Validate that the object indeed doesn't have the emulates undefined flag.
1541 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
1542 addOutOfLineCode(ool, mir);
1544 Label* doesNotEmulateUndefined = ool->label1();
1545 Label* emulatesUndefined = ool->label2();
1547 testObjectEmulatesUndefined(input, emulatesUndefined, doesNotEmulateUndefined,
1548 temp, ool);
1549 masm.bind(emulatesUndefined);
1550 masm.assumeUnreachable(
1551 "Found an object emulating undefined while the fuse is intact");
1552 masm.bind(doesNotEmulateUndefined);
1553 #endif
1556 void CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir) {
1557 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1558 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1559 Register input = ToRegister(lir->input());
1561 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
1562 if (intact) {
1563 assertObjectDoesNotEmulateUndefined(input, ToRegister(lir->temp()),
1564 lir->mir());
1565 // Bug 1874905: It would be fantastic if this could be optimized out
1566 masm.jump(truthy);
1567 } else {
1568 auto* ool = new (alloc()) OutOfLineTestObject();
1569 addOutOfLineCode(ool, lir->mir());
1571 testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()),
1572 ool);
1576 void CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir) {
1577 auto* ool = new (alloc()) OutOfLineTestObject();
1578 addOutOfLineCode(ool, lir->mir());
1580 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1581 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1583 ValueOperand input = ToValue(lir, LTestVAndBranch::Input);
1584 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
1585 Register temp = ToRegister(lir->temp2());
1586 FloatRegister floatTemp = ToFloatRegister(lir->tempFloat());
1587 const TypeDataList& observedTypes = lir->mir()->observedTypes();
1589 testValueTruthy(input, tempToUnbox, temp, floatTemp, observedTypes, truthy,
1590 falsy, ool);
1591 masm.jump(truthy);
1594 void CodeGenerator::visitBooleanToString(LBooleanToString* lir) {
1595 Register input = ToRegister(lir->input());
1596 Register output = ToRegister(lir->output());
1597 const JSAtomState& names = gen->runtime->names();
1598 Label true_, done;
1600 masm.branchTest32(Assembler::NonZero, input, input, &true_);
1601 masm.movePtr(ImmGCPtr(names.false_), output);
1602 masm.jump(&done);
1604 masm.bind(&true_);
1605 masm.movePtr(ImmGCPtr(names.true_), output);
1607 masm.bind(&done);
1610 void CodeGenerator::visitIntToString(LIntToString* lir) {
1611 Register input = ToRegister(lir->input());
1612 Register output = ToRegister(lir->output());
1614 using Fn = JSLinearString* (*)(JSContext*, int);
1615 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
1616 lir, ArgList(input), StoreRegisterTo(output));
1618 masm.lookupStaticIntString(input, output, gen->runtime->staticStrings(),
1619 ool->entry());
1621 masm.bind(ool->rejoin());
1624 void CodeGenerator::visitDoubleToString(LDoubleToString* lir) {
1625 FloatRegister input = ToFloatRegister(lir->input());
1626 Register temp = ToRegister(lir->temp0());
1627 Register output = ToRegister(lir->output());
1629 using Fn = JSString* (*)(JSContext*, double);
1630 OutOfLineCode* ool = oolCallVM<Fn, NumberToString<CanGC>>(
1631 lir, ArgList(input), StoreRegisterTo(output));
1633 // Try double to integer conversion and run integer to string code.
1634 masm.convertDoubleToInt32(input, temp, ool->entry(), false);
1635 masm.lookupStaticIntString(temp, output, gen->runtime->staticStrings(),
1636 ool->entry());
1638 masm.bind(ool->rejoin());
1641 void CodeGenerator::visitValueToString(LValueToString* lir) {
1642 ValueOperand input = ToValue(lir, LValueToString::InputIndex);
1643 Register output = ToRegister(lir->output());
1645 using Fn = JSString* (*)(JSContext*, HandleValue);
1646 OutOfLineCode* ool = oolCallVM<Fn, ToStringSlow<CanGC>>(
1647 lir, ArgList(input), StoreRegisterTo(output));
1649 Label done;
1650 Register tag = masm.extractTag(input, output);
1651 const JSAtomState& names = gen->runtime->names();
1653 // String
1655 Label notString;
1656 masm.branchTestString(Assembler::NotEqual, tag, &notString);
1657 masm.unboxString(input, output);
1658 masm.jump(&done);
1659 masm.bind(&notString);
1662 // Integer
1664 Label notInteger;
1665 masm.branchTestInt32(Assembler::NotEqual, tag, &notInteger);
1666 Register unboxed = ToTempUnboxRegister(lir->temp0());
1667 unboxed = masm.extractInt32(input, unboxed);
1668 masm.lookupStaticIntString(unboxed, output, gen->runtime->staticStrings(),
1669 ool->entry());
1670 masm.jump(&done);
1671 masm.bind(&notInteger);
1674 // Double
1676 // Note: no fastpath. Need two extra registers and can only convert doubles
1677 // that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT.
1678 masm.branchTestDouble(Assembler::Equal, tag, ool->entry());
1681 // Undefined
1683 Label notUndefined;
1684 masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
1685 masm.movePtr(ImmGCPtr(names.undefined), output);
1686 masm.jump(&done);
1687 masm.bind(&notUndefined);
1690 // Null
1692 Label notNull;
1693 masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
1694 masm.movePtr(ImmGCPtr(names.null), output);
1695 masm.jump(&done);
1696 masm.bind(&notNull);
1699 // Boolean
1701 Label notBoolean, true_;
1702 masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
1703 masm.branchTestBooleanTruthy(true, input, &true_);
1704 masm.movePtr(ImmGCPtr(names.false_), output);
1705 masm.jump(&done);
1706 masm.bind(&true_);
1707 masm.movePtr(ImmGCPtr(names.true_), output);
1708 masm.jump(&done);
1709 masm.bind(&notBoolean);
1712 // Objects/symbols are only possible when |mir->mightHaveSideEffects()|.
1713 if (lir->mir()->mightHaveSideEffects()) {
1714 // Object
1715 if (lir->mir()->supportSideEffects()) {
1716 masm.branchTestObject(Assembler::Equal, tag, ool->entry());
1717 } else {
1718 // Bail.
1719 MOZ_ASSERT(lir->mir()->needsSnapshot());
1720 Label bail;
1721 masm.branchTestObject(Assembler::Equal, tag, &bail);
1722 bailoutFrom(&bail, lir->snapshot());
1725 // Symbol
1726 if (lir->mir()->supportSideEffects()) {
1727 masm.branchTestSymbol(Assembler::Equal, tag, ool->entry());
1728 } else {
1729 // Bail.
1730 MOZ_ASSERT(lir->mir()->needsSnapshot());
1731 Label bail;
1732 masm.branchTestSymbol(Assembler::Equal, tag, &bail);
1733 bailoutFrom(&bail, lir->snapshot());
1737 // BigInt
1739 // No fastpath currently implemented.
1740 masm.branchTestBigInt(Assembler::Equal, tag, ool->entry());
1743 masm.assumeUnreachable("Unexpected type for LValueToString.");
1745 masm.bind(&done);
1746 masm.bind(ool->rejoin());
1749 using StoreBufferMutationFn = void (*)(js::gc::StoreBuffer*, js::gc::Cell**);
1751 static void EmitStoreBufferMutation(MacroAssembler& masm, Register holder,
1752 size_t offset, Register buffer,
1753 LiveGeneralRegisterSet& liveVolatiles,
1754 StoreBufferMutationFn fun) {
1755 Label callVM;
1756 Label exit;
1758 // Call into the VM to barrier the write. The only registers that need to
1759 // be preserved are those in liveVolatiles, so once they are saved on the
1760 // stack all volatile registers are available for use.
1761 masm.bind(&callVM);
1762 masm.PushRegsInMask(liveVolatiles);
1764 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
1765 regs.takeUnchecked(buffer);
1766 regs.takeUnchecked(holder);
1767 Register addrReg = regs.takeAny();
1769 masm.computeEffectiveAddress(Address(holder, offset), addrReg);
1771 bool needExtraReg = !regs.hasAny<GeneralRegisterSet::DefaultType>();
1772 if (needExtraReg) {
1773 masm.push(holder);
1774 masm.setupUnalignedABICall(holder);
1775 } else {
1776 masm.setupUnalignedABICall(regs.takeAny());
1778 masm.passABIArg(buffer);
1779 masm.passABIArg(addrReg);
1780 masm.callWithABI(DynamicFunction<StoreBufferMutationFn>(fun),
1781 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
1783 if (needExtraReg) {
1784 masm.pop(holder);
1786 masm.PopRegsInMask(liveVolatiles);
1787 masm.bind(&exit);
1790 // Warning: this function modifies prev and next.
1791 static void EmitPostWriteBarrierS(MacroAssembler& masm, Register holder,
1792 size_t offset, Register prev, Register next,
1793 LiveGeneralRegisterSet& liveVolatiles) {
1794 Label exit;
1795 Label checkRemove, putCell;
1797 // if (next && (buffer = next->storeBuffer()))
1798 // but we never pass in nullptr for next.
1799 Register storebuffer = next;
1800 masm.loadStoreBuffer(next, storebuffer);
1801 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &checkRemove);
1803 // if (prev && prev->storeBuffer())
1804 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &putCell);
1805 masm.loadStoreBuffer(prev, prev);
1806 masm.branchPtr(Assembler::NotEqual, prev, ImmWord(0), &exit);
1808 // buffer->putCell(cellp)
1809 masm.bind(&putCell);
1810 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1811 JSString::addCellAddressToStoreBuffer);
1812 masm.jump(&exit);
1814 // if (prev && (buffer = prev->storeBuffer()))
1815 masm.bind(&checkRemove);
1816 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &exit);
1817 masm.loadStoreBuffer(prev, storebuffer);
1818 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &exit);
1819 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1820 JSString::removeCellAddressFromStoreBuffer);
1822 masm.bind(&exit);
1825 void CodeGenerator::visitRegExp(LRegExp* lir) {
1826 Register output = ToRegister(lir->output());
1827 Register temp = ToRegister(lir->temp0());
1828 JSObject* source = lir->mir()->source();
1830 using Fn = JSObject* (*)(JSContext*, Handle<RegExpObject*>);
1831 OutOfLineCode* ool = oolCallVM<Fn, CloneRegExpObject>(
1832 lir, ArgList(ImmGCPtr(source)), StoreRegisterTo(output));
1833 if (lir->mir()->hasShared()) {
1834 TemplateObject templateObject(source);
1835 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
1836 ool->entry());
1837 } else {
1838 masm.jump(ool->entry());
1840 masm.bind(ool->rejoin());
1843 static constexpr int32_t RegExpPairsVectorStartOffset(
1844 int32_t inputOutputDataStartOffset) {
1845 return inputOutputDataStartOffset + int32_t(InputOutputDataSize) +
1846 int32_t(sizeof(MatchPairs));
1849 static Address RegExpPairCountAddress(MacroAssembler& masm,
1850 int32_t inputOutputDataStartOffset) {
1851 return Address(FramePointer, inputOutputDataStartOffset +
1852 int32_t(InputOutputDataSize) +
1853 MatchPairs::offsetOfPairCount());
1856 static void UpdateRegExpStatics(MacroAssembler& masm, Register regexp,
1857 Register input, Register lastIndex,
1858 Register staticsReg, Register temp1,
1859 Register temp2, gc::Heap initialStringHeap,
1860 LiveGeneralRegisterSet& volatileRegs) {
1861 Address pendingInputAddress(staticsReg,
1862 RegExpStatics::offsetOfPendingInput());
1863 Address matchesInputAddress(staticsReg,
1864 RegExpStatics::offsetOfMatchesInput());
1865 Address lazySourceAddress(staticsReg, RegExpStatics::offsetOfLazySource());
1866 Address lazyIndexAddress(staticsReg, RegExpStatics::offsetOfLazyIndex());
1868 masm.guardedCallPreBarrier(pendingInputAddress, MIRType::String);
1869 masm.guardedCallPreBarrier(matchesInputAddress, MIRType::String);
1870 masm.guardedCallPreBarrier(lazySourceAddress, MIRType::String);
1872 if (initialStringHeap == gc::Heap::Default) {
1873 // Writing into RegExpStatics tenured memory; must post-barrier.
1874 if (staticsReg.volatile_()) {
1875 volatileRegs.add(staticsReg);
1878 masm.loadPtr(pendingInputAddress, temp1);
1879 masm.storePtr(input, pendingInputAddress);
1880 masm.movePtr(input, temp2);
1881 EmitPostWriteBarrierS(masm, staticsReg,
1882 RegExpStatics::offsetOfPendingInput(),
1883 temp1 /* prev */, temp2 /* next */, volatileRegs);
1885 masm.loadPtr(matchesInputAddress, temp1);
1886 masm.storePtr(input, matchesInputAddress);
1887 masm.movePtr(input, temp2);
1888 EmitPostWriteBarrierS(masm, staticsReg,
1889 RegExpStatics::offsetOfMatchesInput(),
1890 temp1 /* prev */, temp2 /* next */, volatileRegs);
1891 } else {
1892 masm.debugAssertGCThingIsTenured(input, temp1);
1893 masm.storePtr(input, pendingInputAddress);
1894 masm.storePtr(input, matchesInputAddress);
1897 masm.storePtr(lastIndex,
1898 Address(staticsReg, RegExpStatics::offsetOfLazyIndex()));
1899 masm.store32(
1900 Imm32(1),
1901 Address(staticsReg, RegExpStatics::offsetOfPendingLazyEvaluation()));
1903 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
1904 RegExpObject::SHARED_SLOT)),
1905 temp1, JSVAL_TYPE_PRIVATE_GCTHING);
1906 masm.loadPtr(Address(temp1, RegExpShared::offsetOfSource()), temp2);
1907 masm.storePtr(temp2, lazySourceAddress);
1908 static_assert(sizeof(JS::RegExpFlags) == 1, "load size must match flag size");
1909 masm.load8ZeroExtend(Address(temp1, RegExpShared::offsetOfFlags()), temp2);
1910 masm.store8(temp2, Address(staticsReg, RegExpStatics::offsetOfLazyFlags()));
1913 // Prepare an InputOutputData and optional MatchPairs which space has been
1914 // allocated for on the stack, and try to execute a RegExp on a string input.
1915 // If the RegExp was successfully executed and matched the input, fallthrough.
1916 // Otherwise, jump to notFound or failure.
1918 // inputOutputDataStartOffset is the offset relative to the frame pointer
1919 // register. This offset is negative for the RegExpExecTest stub.
1920 static bool PrepareAndExecuteRegExp(MacroAssembler& masm, Register regexp,
1921 Register input, Register lastIndex,
1922 Register temp1, Register temp2,
1923 Register temp3,
1924 int32_t inputOutputDataStartOffset,
1925 gc::Heap initialStringHeap, Label* notFound,
1926 Label* failure) {
1927 JitSpew(JitSpew_Codegen, "# Emitting PrepareAndExecuteRegExp");
1929 using irregexp::InputOutputData;
1932 * [SMDOC] Stack layout for PrepareAndExecuteRegExp
1934 * Before this function is called, the caller is responsible for
1935 * allocating enough stack space for the following data:
1937 * inputOutputDataStartOffset +-----> +---------------+
1938 * |InputOutputData|
1939 * inputStartAddress +----------> inputStart|
1940 * inputEndAddress +----------> inputEnd|
1941 * startIndexAddress +----------> startIndex|
1942 * matchesAddress +----------> matches|-----+
1943 * +---------------+ |
1944 * matchPairs(Address|Offset) +-----> +---------------+ <--+
1945 * | MatchPairs |
1946 * pairCountAddress +----------> count |
1947 * pairsPointerAddress +----------> pairs |-----+
1948 * +---------------+ |
1949 * pairsArray(Address|Offset) +-----> +---------------+ <--+
1950 * | MatchPair |
1951 * firstMatchStartAddress +----------> start | <--+
1952 * | limit | |
1953 * +---------------+ |
1954 * . |
1955 * . Reserved space for
1956 * . RegExpObject::MaxPairCount
1957 * . MatchPair objects
1958 * . |
1959 * +---------------+ |
1960 * | MatchPair | |
1961 * | start | |
1962 * | limit | <--+
1963 * +---------------+
1966 int32_t ioOffset = inputOutputDataStartOffset;
1967 int32_t matchPairsOffset = ioOffset + int32_t(sizeof(InputOutputData));
1968 int32_t pairsArrayOffset = matchPairsOffset + int32_t(sizeof(MatchPairs));
1970 Address inputStartAddress(FramePointer,
1971 ioOffset + InputOutputData::offsetOfInputStart());
1972 Address inputEndAddress(FramePointer,
1973 ioOffset + InputOutputData::offsetOfInputEnd());
1974 Address startIndexAddress(FramePointer,
1975 ioOffset + InputOutputData::offsetOfStartIndex());
1976 Address matchesAddress(FramePointer,
1977 ioOffset + InputOutputData::offsetOfMatches());
1979 Address matchPairsAddress(FramePointer, matchPairsOffset);
1980 Address pairCountAddress(FramePointer,
1981 matchPairsOffset + MatchPairs::offsetOfPairCount());
1982 Address pairsPointerAddress(FramePointer,
1983 matchPairsOffset + MatchPairs::offsetOfPairs());
1985 Address pairsArrayAddress(FramePointer, pairsArrayOffset);
1986 Address firstMatchStartAddress(FramePointer,
1987 pairsArrayOffset + MatchPair::offsetOfStart());
1989 // First, fill in a skeletal MatchPairs instance on the stack. This will be
1990 // passed to the OOL stub in the caller if we aren't able to execute the
1991 // RegExp inline, and that stub needs to be able to determine whether the
1992 // execution finished successfully.
1994 // Initialize MatchPairs::pairCount to 1. The correct value can only
1995 // be determined after loading the RegExpShared. If the RegExpShared
1996 // has Kind::Atom, this is the correct pairCount.
1997 masm.store32(Imm32(1), pairCountAddress);
1999 // Initialize MatchPairs::pairs pointer
2000 masm.computeEffectiveAddress(pairsArrayAddress, temp1);
2001 masm.storePtr(temp1, pairsPointerAddress);
2003 // Initialize MatchPairs::pairs[0]::start to MatchPair::NoMatch
2004 masm.store32(Imm32(MatchPair::NoMatch), firstMatchStartAddress);
2006 // Determine the set of volatile inputs to save when calling into C++ or
2007 // regexp code.
2008 LiveGeneralRegisterSet volatileRegs;
2009 if (lastIndex.volatile_()) {
2010 volatileRegs.add(lastIndex);
2012 if (input.volatile_()) {
2013 volatileRegs.add(input);
2015 if (regexp.volatile_()) {
2016 volatileRegs.add(regexp);
2019 // Ensure the input string is not a rope.
2020 Label isLinear;
2021 masm.branchIfNotRope(input, &isLinear);
2023 masm.PushRegsInMask(volatileRegs);
2025 using Fn = JSLinearString* (*)(JSString*);
2026 masm.setupUnalignedABICall(temp1);
2027 masm.passABIArg(input);
2028 masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
2030 MOZ_ASSERT(!volatileRegs.has(temp1));
2031 masm.storeCallPointerResult(temp1);
2032 masm.PopRegsInMask(volatileRegs);
2034 masm.branchTestPtr(Assembler::Zero, temp1, temp1, failure);
2036 masm.bind(&isLinear);
2038 // Load the RegExpShared.
2039 Register regexpReg = temp1;
2040 Address sharedSlot = Address(
2041 regexp, NativeObject::getFixedSlotOffset(RegExpObject::SHARED_SLOT));
2042 masm.branchTestUndefined(Assembler::Equal, sharedSlot, failure);
2043 masm.unboxNonDouble(sharedSlot, regexpReg, JSVAL_TYPE_PRIVATE_GCTHING);
2045 // Handle Atom matches
2046 Label notAtom, checkSuccess;
2047 masm.branchPtr(Assembler::Equal,
2048 Address(regexpReg, RegExpShared::offsetOfPatternAtom()),
2049 ImmWord(0), &notAtom);
2051 masm.computeEffectiveAddress(matchPairsAddress, temp3);
2053 masm.PushRegsInMask(volatileRegs);
2054 using Fn = RegExpRunStatus (*)(RegExpShared* re, JSLinearString* input,
2055 size_t start, MatchPairs* matchPairs);
2056 masm.setupUnalignedABICall(temp2);
2057 masm.passABIArg(regexpReg);
2058 masm.passABIArg(input);
2059 masm.passABIArg(lastIndex);
2060 masm.passABIArg(temp3);
2061 masm.callWithABI<Fn, js::ExecuteRegExpAtomRaw>();
2063 MOZ_ASSERT(!volatileRegs.has(temp1));
2064 masm.storeCallInt32Result(temp1);
2065 masm.PopRegsInMask(volatileRegs);
2067 masm.jump(&checkSuccess);
2069 masm.bind(&notAtom);
2071 // Don't handle regexps with too many capture pairs.
2072 masm.load32(Address(regexpReg, RegExpShared::offsetOfPairCount()), temp2);
2073 masm.branch32(Assembler::Above, temp2, Imm32(RegExpObject::MaxPairCount),
2074 failure);
2076 // Fill in the pair count in the MatchPairs on the stack.
2077 masm.store32(temp2, pairCountAddress);
2079 // Load code pointer and length of input (in bytes).
2080 // Store the input start in the InputOutputData.
2081 Register codePointer = temp1; // Note: temp1 was previously regexpReg.
2082 Register byteLength = temp3;
2084 Label isLatin1, done;
2085 masm.loadStringLength(input, byteLength);
2087 masm.branchLatin1String(input, &isLatin1);
2089 // Two-byte input
2090 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
2091 masm.storePtr(temp2, inputStartAddress);
2092 masm.loadPtr(
2093 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/false)),
2094 codePointer);
2095 masm.lshiftPtr(Imm32(1), byteLength);
2096 masm.jump(&done);
2098 // Latin1 input
2099 masm.bind(&isLatin1);
2100 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
2101 masm.storePtr(temp2, inputStartAddress);
2102 masm.loadPtr(
2103 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/true)),
2104 codePointer);
2106 masm.bind(&done);
2108 // Store end pointer
2109 masm.addPtr(byteLength, temp2);
2110 masm.storePtr(temp2, inputEndAddress);
2113 // Guard that the RegExpShared has been compiled for this type of input.
2114 // If it has not been compiled, we fall back to the OOL case, which will
2115 // do a VM call into the interpreter.
2116 // TODO: add an interpreter trampoline?
2117 masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure);
2118 masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer);
2120 // Finish filling in the InputOutputData instance on the stack
2121 masm.computeEffectiveAddress(matchPairsAddress, temp2);
2122 masm.storePtr(temp2, matchesAddress);
2123 masm.storePtr(lastIndex, startIndexAddress);
2125 // Execute the RegExp.
2126 masm.computeEffectiveAddress(
2127 Address(FramePointer, inputOutputDataStartOffset), temp2);
2128 masm.PushRegsInMask(volatileRegs);
2129 masm.setupUnalignedABICall(temp3);
2130 masm.passABIArg(temp2);
2131 masm.callWithABI(codePointer);
2132 masm.storeCallInt32Result(temp1);
2133 masm.PopRegsInMask(volatileRegs);
2135 masm.bind(&checkSuccess);
2136 masm.branch32(Assembler::Equal, temp1,
2137 Imm32(int32_t(RegExpRunStatus::Success_NotFound)), notFound);
2138 masm.branch32(Assembler::Equal, temp1, Imm32(int32_t(RegExpRunStatus::Error)),
2139 failure);
2141 // Lazily update the RegExpStatics.
2142 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2143 RegExpRealm::offsetOfRegExpStatics();
2144 masm.loadGlobalObjectData(temp1);
2145 masm.loadPtr(Address(temp1, offset), temp1);
2146 UpdateRegExpStatics(masm, regexp, input, lastIndex, temp1, temp2, temp3,
2147 initialStringHeap, volatileRegs);
2149 return true;
2152 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
2153 Register len, Register byteOpScratch,
2154 CharEncoding encoding,
2155 size_t maximumLength = SIZE_MAX);
2157 class CreateDependentString {
2158 CharEncoding encoding_;
2159 Register string_;
2160 Register temp1_;
2161 Register temp2_;
2162 Label* failure_;
2164 enum class FallbackKind : uint8_t {
2165 InlineString,
2166 FatInlineString,
2167 NotInlineString,
2168 Count
2170 mozilla::EnumeratedArray<FallbackKind, Label, size_t(FallbackKind::Count)>
2171 fallbacks_, joins_;
2173 public:
2174 CreateDependentString(CharEncoding encoding, Register string, Register temp1,
2175 Register temp2, Label* failure)
2176 : encoding_(encoding),
2177 string_(string),
2178 temp1_(temp1),
2179 temp2_(temp2),
2180 failure_(failure) {}
2182 Register string() const { return string_; }
2183 CharEncoding encoding() const { return encoding_; }
2185 // Generate code that creates DependentString.
2186 // Caller should call generateFallback after masm.ret(), to generate
2187 // fallback path.
2188 void generate(MacroAssembler& masm, const JSAtomState& names,
2189 CompileRuntime* runtime, Register base,
2190 BaseIndex startIndexAddress, BaseIndex limitIndexAddress,
2191 gc::Heap initialStringHeap);
2193 // Generate fallback path for creating DependentString.
2194 void generateFallback(MacroAssembler& masm);
2197 void CreateDependentString::generate(MacroAssembler& masm,
2198 const JSAtomState& names,
2199 CompileRuntime* runtime, Register base,
2200 BaseIndex startIndexAddress,
2201 BaseIndex limitIndexAddress,
2202 gc::Heap initialStringHeap) {
2203 JitSpew(JitSpew_Codegen, "# Emitting CreateDependentString (encoding=%s)",
2204 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2206 auto newGCString = [&](FallbackKind kind) {
2207 uint32_t flags = kind == FallbackKind::InlineString
2208 ? JSString::INIT_THIN_INLINE_FLAGS
2209 : kind == FallbackKind::FatInlineString
2210 ? JSString::INIT_FAT_INLINE_FLAGS
2211 : JSString::INIT_DEPENDENT_FLAGS;
2212 if (encoding_ == CharEncoding::Latin1) {
2213 flags |= JSString::LATIN1_CHARS_BIT;
2216 if (kind != FallbackKind::FatInlineString) {
2217 masm.newGCString(string_, temp2_, initialStringHeap, &fallbacks_[kind]);
2218 } else {
2219 masm.newGCFatInlineString(string_, temp2_, initialStringHeap,
2220 &fallbacks_[kind]);
2222 masm.bind(&joins_[kind]);
2223 masm.store32(Imm32(flags), Address(string_, JSString::offsetOfFlags()));
2226 // Compute the string length.
2227 masm.load32(startIndexAddress, temp2_);
2228 masm.load32(limitIndexAddress, temp1_);
2229 masm.sub32(temp2_, temp1_);
2231 Label done, nonEmpty;
2233 // Zero length matches use the empty string.
2234 masm.branchTest32(Assembler::NonZero, temp1_, temp1_, &nonEmpty);
2235 masm.movePtr(ImmGCPtr(names.empty_), string_);
2236 masm.jump(&done);
2238 masm.bind(&nonEmpty);
2240 // Complete matches use the base string.
2241 Label nonBaseStringMatch;
2242 masm.branchTest32(Assembler::NonZero, temp2_, temp2_, &nonBaseStringMatch);
2243 masm.branch32(Assembler::NotEqual, Address(base, JSString::offsetOfLength()),
2244 temp1_, &nonBaseStringMatch);
2245 masm.movePtr(base, string_);
2246 masm.jump(&done);
2248 masm.bind(&nonBaseStringMatch);
2250 Label notInline;
2252 int32_t maxInlineLength = encoding_ == CharEncoding::Latin1
2253 ? JSFatInlineString::MAX_LENGTH_LATIN1
2254 : JSFatInlineString::MAX_LENGTH_TWO_BYTE;
2255 masm.branch32(Assembler::Above, temp1_, Imm32(maxInlineLength), &notInline);
2257 // Make a thin or fat inline string.
2258 Label stringAllocated, fatInline;
2260 int32_t maxThinInlineLength = encoding_ == CharEncoding::Latin1
2261 ? JSThinInlineString::MAX_LENGTH_LATIN1
2262 : JSThinInlineString::MAX_LENGTH_TWO_BYTE;
2263 masm.branch32(Assembler::Above, temp1_, Imm32(maxThinInlineLength),
2264 &fatInline);
2265 if (encoding_ == CharEncoding::Latin1) {
2266 // One character Latin-1 strings can be loaded directly from the
2267 // static strings table.
2268 Label thinInline;
2269 masm.branch32(Assembler::Above, temp1_, Imm32(1), &thinInline);
2271 static_assert(
2272 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
2273 "Latin-1 strings can be loaded from static strings");
2275 masm.loadStringChars(base, temp1_, encoding_);
2276 masm.loadChar(temp1_, temp2_, temp1_, encoding_);
2278 masm.lookupStaticString(temp1_, string_, runtime->staticStrings());
2280 masm.jump(&done);
2282 masm.bind(&thinInline);
2285 newGCString(FallbackKind::InlineString);
2286 masm.jump(&stringAllocated);
2288 masm.bind(&fatInline);
2289 { newGCString(FallbackKind::FatInlineString); }
2290 masm.bind(&stringAllocated);
2292 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2294 masm.push(string_);
2295 masm.push(base);
2297 MOZ_ASSERT(startIndexAddress.base == FramePointer,
2298 "startIndexAddress is still valid after stack pushes");
2300 // Load chars pointer for the new string.
2301 masm.loadInlineStringCharsForStore(string_, string_);
2303 // Load the source characters pointer.
2304 masm.loadStringChars(base, temp2_, encoding_);
2305 masm.load32(startIndexAddress, base);
2306 masm.addToCharPtr(temp2_, base, encoding_);
2308 CopyStringChars(masm, string_, temp2_, temp1_, base, encoding_);
2310 masm.pop(base);
2311 masm.pop(string_);
2313 masm.jump(&done);
2316 masm.bind(&notInline);
2319 // Make a dependent string.
2320 // Warning: string may be tenured (if the fallback case is hit), so
2321 // stores into it must be post barriered.
2322 newGCString(FallbackKind::NotInlineString);
2324 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2326 masm.loadNonInlineStringChars(base, temp1_, encoding_);
2327 masm.load32(startIndexAddress, temp2_);
2328 masm.addToCharPtr(temp1_, temp2_, encoding_);
2329 masm.storeNonInlineStringChars(temp1_, string_);
2330 masm.storeDependentStringBase(base, string_);
2331 masm.movePtr(base, temp1_);
2333 // Follow any base pointer if the input is itself a dependent string.
2334 // Watch for undepended strings, which have a base pointer but don't
2335 // actually share their characters with it.
2336 Label noBase;
2337 masm.load32(Address(base, JSString::offsetOfFlags()), temp2_);
2338 masm.and32(Imm32(JSString::TYPE_FLAGS_MASK), temp2_);
2339 masm.branchTest32(Assembler::Zero, temp2_, Imm32(JSString::DEPENDENT_BIT),
2340 &noBase);
2341 masm.loadDependentStringBase(base, temp1_);
2342 masm.storeDependentStringBase(temp1_, string_);
2343 masm.bind(&noBase);
2345 // Post-barrier the base store, whether it was the direct or indirect
2346 // base (both will end up in temp1 here).
2347 masm.branchPtrInNurseryChunk(Assembler::Equal, string_, temp2_, &done);
2348 masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp1_, temp2_, &done);
2350 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2351 regsToSave.takeUnchecked(temp1_);
2352 regsToSave.takeUnchecked(temp2_);
2354 masm.PushRegsInMask(regsToSave);
2356 masm.mov(ImmPtr(runtime), temp1_);
2358 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
2359 masm.setupUnalignedABICall(temp2_);
2360 masm.passABIArg(temp1_);
2361 masm.passABIArg(string_);
2362 masm.callWithABI<Fn, PostWriteBarrier>();
2364 masm.PopRegsInMask(regsToSave);
2367 masm.bind(&done);
2370 void CreateDependentString::generateFallback(MacroAssembler& masm) {
2371 JitSpew(JitSpew_Codegen,
2372 "# Emitting CreateDependentString fallback (encoding=%s)",
2373 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2375 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2376 regsToSave.takeUnchecked(string_);
2377 regsToSave.takeUnchecked(temp2_);
2379 for (FallbackKind kind : mozilla::MakeEnumeratedRange(FallbackKind::Count)) {
2380 masm.bind(&fallbacks_[kind]);
2382 masm.PushRegsInMask(regsToSave);
2384 using Fn = void* (*)(JSContext* cx);
2385 masm.setupUnalignedABICall(string_);
2386 masm.loadJSContext(string_);
2387 masm.passABIArg(string_);
2388 if (kind == FallbackKind::FatInlineString) {
2389 masm.callWithABI<Fn, AllocateFatInlineString>();
2390 } else {
2391 masm.callWithABI<Fn, AllocateDependentString>();
2393 masm.storeCallPointerResult(string_);
2395 masm.PopRegsInMask(regsToSave);
2397 masm.branchPtr(Assembler::Equal, string_, ImmWord(0), failure_);
2399 masm.jump(&joins_[kind]);
2403 // Generate the RegExpMatcher and RegExpExecMatch stubs. These are very similar,
2404 // but RegExpExecMatch also has to load and update .lastIndex for global/sticky
2405 // regular expressions.
2406 static JitCode* GenerateRegExpMatchStubShared(JSContext* cx,
2407 gc::Heap initialStringHeap,
2408 bool isExecMatch) {
2409 if (isExecMatch) {
2410 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecMatch stub");
2411 } else {
2412 JitSpew(JitSpew_Codegen, "# Emitting RegExpMatcher stub");
2415 // |initialStringHeap| could be stale after a GC.
2416 JS::AutoCheckCannotGC nogc(cx);
2418 Register regexp = RegExpMatcherRegExpReg;
2419 Register input = RegExpMatcherStringReg;
2420 Register lastIndex = RegExpMatcherLastIndexReg;
2421 ValueOperand result = JSReturnOperand;
2423 // We are free to clobber all registers, as LRegExpMatcher is a call
2424 // instruction.
2425 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2426 regs.take(input);
2427 regs.take(regexp);
2428 regs.take(lastIndex);
2430 Register temp1 = regs.takeAny();
2431 Register temp2 = regs.takeAny();
2432 Register temp3 = regs.takeAny();
2433 Register maybeTemp4 = InvalidReg;
2434 if (!regs.empty()) {
2435 // There are not enough registers on x86.
2436 maybeTemp4 = regs.takeAny();
2438 Register maybeTemp5 = InvalidReg;
2439 if (!regs.empty()) {
2440 // There are not enough registers on x86.
2441 maybeTemp5 = regs.takeAny();
2444 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
2445 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
2447 TempAllocator temp(&cx->tempLifoAlloc());
2448 JitContext jcx(cx);
2449 StackMacroAssembler masm(cx, temp);
2450 AutoCreatedBy acb(masm, "GenerateRegExpMatchStubShared");
2452 #ifdef JS_USE_LINK_REGISTER
2453 masm.pushReturnAddress();
2454 #endif
2455 masm.push(FramePointer);
2456 masm.moveStackPtrTo(FramePointer);
2458 Label notFoundZeroLastIndex;
2459 if (isExecMatch) {
2460 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
2463 // The InputOutputData is placed above the frame pointer and return address on
2464 // the stack.
2465 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
2467 Label notFound, oolEntry;
2468 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
2469 temp3, inputOutputDataStartOffset,
2470 initialStringHeap, &notFound, &oolEntry)) {
2471 return nullptr;
2474 // If a regexp has named captures, fall back to the OOL stub, which
2475 // will end up calling CreateRegExpMatchResults.
2476 Register shared = temp2;
2477 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
2478 RegExpObject::SHARED_SLOT)),
2479 shared, JSVAL_TYPE_PRIVATE_GCTHING);
2480 masm.branchPtr(Assembler::NotEqual,
2481 Address(shared, RegExpShared::offsetOfGroupsTemplate()),
2482 ImmWord(0), &oolEntry);
2484 // Similarly, if the |hasIndices| flag is set, fall back to the OOL stub.
2485 masm.branchTest32(Assembler::NonZero,
2486 Address(shared, RegExpShared::offsetOfFlags()),
2487 Imm32(int32_t(JS::RegExpFlag::HasIndices)), &oolEntry);
2489 Address pairCountAddress =
2490 RegExpPairCountAddress(masm, inputOutputDataStartOffset);
2492 // Construct the result.
2493 Register object = temp1;
2495 // In most cases, the array will have just 1-2 elements, so we optimize for
2496 // that by emitting separate code paths for capacity 2/6/14 (= 4/8/16 slots
2497 // because two slots are used for the elements header).
2499 // Load the array length in temp2 and the shape in temp3.
2500 Label allocated;
2501 masm.load32(pairCountAddress, temp2);
2502 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2503 RegExpRealm::offsetOfNormalMatchResultShape();
2504 masm.loadGlobalObjectData(temp3);
2505 masm.loadPtr(Address(temp3, offset), temp3);
2507 auto emitAllocObject = [&](size_t elementCapacity) {
2508 gc::AllocKind kind = GuessArrayGCKind(elementCapacity);
2509 MOZ_ASSERT(CanChangeToBackgroundAllocKind(kind, &ArrayObject::class_));
2510 kind = ForegroundToBackgroundAllocKind(kind);
2512 #ifdef DEBUG
2513 // Assert all of the available slots are used for |elementCapacity|
2514 // elements.
2515 size_t usedSlots = ObjectElements::VALUES_PER_HEADER + elementCapacity;
2516 MOZ_ASSERT(usedSlots == GetGCKindSlots(kind));
2517 #endif
2519 constexpr size_t numUsedDynamicSlots =
2520 RegExpRealm::MatchResultObjectSlotSpan;
2521 constexpr size_t numDynamicSlots =
2522 RegExpRealm::MatchResultObjectNumDynamicSlots;
2523 constexpr size_t arrayLength = 1;
2524 masm.createArrayWithFixedElements(object, temp3, temp2, temp3,
2525 arrayLength, elementCapacity,
2526 numUsedDynamicSlots, numDynamicSlots,
2527 kind, gc::Heap::Default, &oolEntry);
2530 Label moreThan2;
2531 masm.branch32(Assembler::Above, temp2, Imm32(2), &moreThan2);
2532 emitAllocObject(2);
2533 masm.jump(&allocated);
2535 Label moreThan6;
2536 masm.bind(&moreThan2);
2537 masm.branch32(Assembler::Above, temp2, Imm32(6), &moreThan6);
2538 emitAllocObject(6);
2539 masm.jump(&allocated);
2541 masm.bind(&moreThan6);
2542 static_assert(RegExpObject::MaxPairCount == 14);
2543 emitAllocObject(RegExpObject::MaxPairCount);
2545 masm.bind(&allocated);
2548 // clang-format off
2550 * [SMDOC] Stack layout for the RegExpMatcher stub
2552 * +---------------+
2553 * FramePointer +-----> |Caller-FramePtr|
2554 * +---------------+
2555 * |Return-Address |
2556 * +---------------+
2557 * inputOutputDataStartOffset +-----> +---------------+
2558 * |InputOutputData|
2559 * +---------------+
2560 * +---------------+
2561 * | MatchPairs |
2562 * pairsCountAddress +-----------> count |
2563 * | pairs |
2564 * | |
2565 * +---------------+
2566 * pairsVectorStartOffset +-----> +---------------+
2567 * | MatchPair |
2568 * matchPairStart +------------> start | <-------+
2569 * matchPairLimit +------------> limit | | Reserved space for
2570 * +---------------+ | `RegExpObject::MaxPairCount`
2571 * . | MatchPair objects.
2572 * . |
2573 * . | `count` objects will be
2574 * +---------------+ | initialized and can be
2575 * | MatchPair | | accessed below.
2576 * | start | <-------+
2577 * | limit |
2578 * +---------------+
2580 // clang-format on
2582 static_assert(sizeof(MatchPair) == 2 * sizeof(int32_t),
2583 "MatchPair consists of two int32 values representing the start"
2584 "and the end offset of the match");
2586 int32_t pairsVectorStartOffset =
2587 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
2589 // Incremented by one below for each match pair.
2590 Register matchIndex = temp2;
2591 masm.move32(Imm32(0), matchIndex);
2593 // The element in which to store the result of the current match.
2594 size_t elementsOffset = NativeObject::offsetOfFixedElements();
2595 BaseObjectElementIndex objectMatchElement(object, matchIndex, elementsOffset);
2597 // The current match pair's "start" and "limit" member.
2598 BaseIndex matchPairStart(FramePointer, matchIndex, TimesEight,
2599 pairsVectorStartOffset + MatchPair::offsetOfStart());
2600 BaseIndex matchPairLimit(FramePointer, matchIndex, TimesEight,
2601 pairsVectorStartOffset + MatchPair::offsetOfLimit());
2603 Label* depStrFailure = &oolEntry;
2604 Label restoreRegExpAndLastIndex;
2606 Register temp4;
2607 if (maybeTemp4 == InvalidReg) {
2608 depStrFailure = &restoreRegExpAndLastIndex;
2610 // We don't have enough registers for a fourth temporary. Reuse |regexp|
2611 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2612 masm.push(regexp);
2613 temp4 = regexp;
2614 } else {
2615 temp4 = maybeTemp4;
2618 Register temp5;
2619 if (maybeTemp5 == InvalidReg) {
2620 depStrFailure = &restoreRegExpAndLastIndex;
2622 // We don't have enough registers for a fifth temporary. Reuse |lastIndex|
2623 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2624 masm.push(lastIndex);
2625 temp5 = lastIndex;
2626 } else {
2627 temp5 = maybeTemp5;
2630 auto maybeRestoreRegExpAndLastIndex = [&]() {
2631 if (maybeTemp5 == InvalidReg) {
2632 masm.pop(lastIndex);
2634 if (maybeTemp4 == InvalidReg) {
2635 masm.pop(regexp);
2639 // Loop to construct the match strings. There are two different loops,
2640 // depending on whether the input is a Two-Byte or a Latin-1 string.
2641 CreateDependentString depStrs[]{
2642 {CharEncoding::TwoByte, temp3, temp4, temp5, depStrFailure},
2643 {CharEncoding::Latin1, temp3, temp4, temp5, depStrFailure},
2647 Label isLatin1, done;
2648 masm.branchLatin1String(input, &isLatin1);
2650 for (auto& depStr : depStrs) {
2651 if (depStr.encoding() == CharEncoding::Latin1) {
2652 masm.bind(&isLatin1);
2655 Label matchLoop;
2656 masm.bind(&matchLoop);
2658 static_assert(MatchPair::NoMatch == -1,
2659 "MatchPair::start is negative if no match was found");
2661 Label isUndefined, storeDone;
2662 masm.branch32(Assembler::LessThan, matchPairStart, Imm32(0),
2663 &isUndefined);
2665 depStr.generate(masm, cx->names(), CompileRuntime::get(cx->runtime()),
2666 input, matchPairStart, matchPairLimit,
2667 initialStringHeap);
2669 // Storing into nursery-allocated results object's elements; no post
2670 // barrier.
2671 masm.storeValue(JSVAL_TYPE_STRING, depStr.string(), objectMatchElement);
2672 masm.jump(&storeDone);
2674 masm.bind(&isUndefined);
2675 { masm.storeValue(UndefinedValue(), objectMatchElement); }
2676 masm.bind(&storeDone);
2678 masm.add32(Imm32(1), matchIndex);
2679 masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex,
2680 &done);
2681 masm.jump(&matchLoop);
2684 #ifdef DEBUG
2685 masm.assumeUnreachable("The match string loop doesn't fall through.");
2686 #endif
2688 masm.bind(&done);
2691 maybeRestoreRegExpAndLastIndex();
2693 // Fill in the rest of the output object.
2694 masm.store32(
2695 matchIndex,
2696 Address(object,
2697 elementsOffset + ObjectElements::offsetOfInitializedLength()));
2698 masm.store32(
2699 matchIndex,
2700 Address(object, elementsOffset + ObjectElements::offsetOfLength()));
2702 Address firstMatchPairStartAddress(
2703 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfStart());
2704 Address firstMatchPairLimitAddress(
2705 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfLimit());
2707 static_assert(RegExpRealm::MatchResultObjectIndexSlot == 0,
2708 "First slot holds the 'index' property");
2709 static_assert(RegExpRealm::MatchResultObjectInputSlot == 1,
2710 "Second slot holds the 'input' property");
2712 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
2714 masm.load32(firstMatchPairStartAddress, temp3);
2715 masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0));
2717 // No post barrier needed (address is within nursery object.)
2718 masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value)));
2720 // For the ExecMatch stub, if the regular expression is global or sticky, we
2721 // have to update its .lastIndex slot.
2722 if (isExecMatch) {
2723 MOZ_ASSERT(object != lastIndex);
2724 Label notGlobalOrSticky;
2725 masm.branchTest32(Assembler::Zero, flagsSlot,
2726 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2727 &notGlobalOrSticky);
2728 masm.load32(firstMatchPairLimitAddress, lastIndex);
2729 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
2730 masm.bind(&notGlobalOrSticky);
2733 // All done!
2734 masm.tagValue(JSVAL_TYPE_OBJECT, object, result);
2735 masm.pop(FramePointer);
2736 masm.ret();
2738 masm.bind(&notFound);
2739 if (isExecMatch) {
2740 Label notGlobalOrSticky;
2741 masm.branchTest32(Assembler::Zero, flagsSlot,
2742 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2743 &notGlobalOrSticky);
2744 masm.bind(&notFoundZeroLastIndex);
2745 masm.storeValue(Int32Value(0), lastIndexSlot);
2746 masm.bind(&notGlobalOrSticky);
2748 masm.moveValue(NullValue(), result);
2749 masm.pop(FramePointer);
2750 masm.ret();
2752 // Fallback paths for CreateDependentString.
2753 for (auto& depStr : depStrs) {
2754 depStr.generateFallback(masm);
2757 // Fall-through to the ool entry after restoring the registers.
2758 masm.bind(&restoreRegExpAndLastIndex);
2759 maybeRestoreRegExpAndLastIndex();
2761 // Use an undefined value to signal to the caller that the OOL stub needs to
2762 // be called.
2763 masm.bind(&oolEntry);
2764 masm.moveValue(UndefinedValue(), result);
2765 masm.pop(FramePointer);
2766 masm.ret();
2768 Linker linker(masm);
2769 JitCode* code = linker.newCode(cx, CodeKind::Other);
2770 if (!code) {
2771 return nullptr;
2774 const char* name = isExecMatch ? "RegExpExecMatchStub" : "RegExpMatcherStub";
2775 CollectPerfSpewerJitCodeProfile(code, name);
2776 #ifdef MOZ_VTUNE
2777 vtune::MarkStub(code, name);
2778 #endif
2780 return code;
2783 JitCode* JitZone::generateRegExpMatcherStub(JSContext* cx) {
2784 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2785 /* isExecMatch = */ false);
2788 JitCode* JitZone::generateRegExpExecMatchStub(JSContext* cx) {
2789 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2790 /* isExecMatch = */ true);
2793 class OutOfLineRegExpMatcher : public OutOfLineCodeBase<CodeGenerator> {
2794 LRegExpMatcher* lir_;
2796 public:
2797 explicit OutOfLineRegExpMatcher(LRegExpMatcher* lir) : lir_(lir) {}
2799 void accept(CodeGenerator* codegen) override {
2800 codegen->visitOutOfLineRegExpMatcher(this);
2803 LRegExpMatcher* lir() const { return lir_; }
2806 void CodeGenerator::visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher* ool) {
2807 LRegExpMatcher* lir = ool->lir();
2808 Register lastIndex = ToRegister(lir->lastIndex());
2809 Register input = ToRegister(lir->string());
2810 Register regexp = ToRegister(lir->regexp());
2812 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2813 regs.take(lastIndex);
2814 regs.take(input);
2815 regs.take(regexp);
2816 Register temp = regs.takeAny();
2818 masm.computeEffectiveAddress(
2819 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2821 pushArg(temp);
2822 pushArg(lastIndex);
2823 pushArg(input);
2824 pushArg(regexp);
2826 // We are not using oolCallVM because we are in a Call, and that live
2827 // registers are already saved by the the register allocator.
2828 using Fn =
2829 bool (*)(JSContext*, HandleObject regexp, HandleString input,
2830 int32_t lastIndex, MatchPairs* pairs, MutableHandleValue output);
2831 callVM<Fn, RegExpMatcherRaw>(lir);
2833 masm.jump(ool->rejoin());
2836 void CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir) {
2837 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2838 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2839 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg);
2840 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2842 #if defined(JS_NUNBOX32)
2843 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2844 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2845 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2846 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2847 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Type);
2848 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Data);
2849 #elif defined(JS_PUNBOX64)
2850 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2851 static_assert(RegExpMatcherStringReg != JSReturnReg);
2852 static_assert(RegExpMatcherLastIndexReg != JSReturnReg);
2853 #endif
2855 masm.reserveStack(RegExpReservedStack);
2857 OutOfLineRegExpMatcher* ool = new (alloc()) OutOfLineRegExpMatcher(lir);
2858 addOutOfLineCode(ool, lir->mir());
2860 const JitZone* jitZone = gen->realm->zone()->jitZone();
2861 JitCode* regExpMatcherStub =
2862 jitZone->regExpMatcherStubNoBarrier(&zoneStubsToReadBarrier_);
2863 masm.call(regExpMatcherStub);
2864 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2865 masm.bind(ool->rejoin());
2867 masm.freeStack(RegExpReservedStack);
2870 class OutOfLineRegExpExecMatch : public OutOfLineCodeBase<CodeGenerator> {
2871 LRegExpExecMatch* lir_;
2873 public:
2874 explicit OutOfLineRegExpExecMatch(LRegExpExecMatch* lir) : lir_(lir) {}
2876 void accept(CodeGenerator* codegen) override {
2877 codegen->visitOutOfLineRegExpExecMatch(this);
2880 LRegExpExecMatch* lir() const { return lir_; }
2883 void CodeGenerator::visitOutOfLineRegExpExecMatch(
2884 OutOfLineRegExpExecMatch* ool) {
2885 LRegExpExecMatch* lir = ool->lir();
2886 Register input = ToRegister(lir->string());
2887 Register regexp = ToRegister(lir->regexp());
2889 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2890 regs.take(input);
2891 regs.take(regexp);
2892 Register temp = regs.takeAny();
2894 masm.computeEffectiveAddress(
2895 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2897 pushArg(temp);
2898 pushArg(input);
2899 pushArg(regexp);
2901 // We are not using oolCallVM because we are in a Call and live registers have
2902 // already been saved by the register allocator.
2903 using Fn =
2904 bool (*)(JSContext*, Handle<RegExpObject*> regexp, HandleString input,
2905 MatchPairs* pairs, MutableHandleValue output);
2906 callVM<Fn, RegExpBuiltinExecMatchFromJit>(lir);
2907 masm.jump(ool->rejoin());
2910 void CodeGenerator::visitRegExpExecMatch(LRegExpExecMatch* lir) {
2911 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2912 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2913 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2915 #if defined(JS_NUNBOX32)
2916 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2917 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2918 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2919 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2920 #elif defined(JS_PUNBOX64)
2921 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2922 static_assert(RegExpMatcherStringReg != JSReturnReg);
2923 #endif
2925 masm.reserveStack(RegExpReservedStack);
2927 auto* ool = new (alloc()) OutOfLineRegExpExecMatch(lir);
2928 addOutOfLineCode(ool, lir->mir());
2930 const JitZone* jitZone = gen->realm->zone()->jitZone();
2931 JitCode* regExpExecMatchStub =
2932 jitZone->regExpExecMatchStubNoBarrier(&zoneStubsToReadBarrier_);
2933 masm.call(regExpExecMatchStub);
2934 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2936 masm.bind(ool->rejoin());
2937 masm.freeStack(RegExpReservedStack);
2940 JitCode* JitZone::generateRegExpSearcherStub(JSContext* cx) {
2941 JitSpew(JitSpew_Codegen, "# Emitting RegExpSearcher stub");
2943 Register regexp = RegExpSearcherRegExpReg;
2944 Register input = RegExpSearcherStringReg;
2945 Register lastIndex = RegExpSearcherLastIndexReg;
2946 Register result = ReturnReg;
2948 // We are free to clobber all registers, as LRegExpSearcher is a call
2949 // instruction.
2950 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2951 regs.take(input);
2952 regs.take(regexp);
2953 regs.take(lastIndex);
2955 Register temp1 = regs.takeAny();
2956 Register temp2 = regs.takeAny();
2957 Register temp3 = regs.takeAny();
2959 TempAllocator temp(&cx->tempLifoAlloc());
2960 JitContext jcx(cx);
2961 StackMacroAssembler masm(cx, temp);
2962 AutoCreatedBy acb(masm, "JitZone::generateRegExpSearcherStub");
2964 #ifdef JS_USE_LINK_REGISTER
2965 masm.pushReturnAddress();
2966 #endif
2967 masm.push(FramePointer);
2968 masm.moveStackPtrTo(FramePointer);
2970 #ifdef DEBUG
2971 // Store sentinel value to cx->regExpSearcherLastLimit.
2972 // See comment in RegExpSearcherImpl.
2973 masm.loadJSContext(temp1);
2974 masm.store32(Imm32(RegExpSearcherLastLimitSentinel),
2975 Address(temp1, JSContext::offsetOfRegExpSearcherLastLimit()));
2976 #endif
2978 // The InputOutputData is placed above the frame pointer and return address on
2979 // the stack.
2980 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
2982 Label notFound, oolEntry;
2983 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
2984 temp3, inputOutputDataStartOffset,
2985 initialStringHeap, &notFound, &oolEntry)) {
2986 return nullptr;
2989 // clang-format off
2991 * [SMDOC] Stack layout for the RegExpSearcher stub
2993 * +---------------+
2994 * FramePointer +-----> |Caller-FramePtr|
2995 * +---------------+
2996 * |Return-Address |
2997 * +---------------+
2998 * inputOutputDataStartOffset +-----> +---------------+
2999 * |InputOutputData|
3000 * +---------------+
3001 * +---------------+
3002 * | MatchPairs |
3003 * | count |
3004 * | pairs |
3005 * | |
3006 * +---------------+
3007 * pairsVectorStartOffset +-----> +---------------+
3008 * | MatchPair |
3009 * matchPairStart +------------> start | <-------+
3010 * matchPairLimit +------------> limit | | Reserved space for
3011 * +---------------+ | `RegExpObject::MaxPairCount`
3012 * . | MatchPair objects.
3013 * . |
3014 * . | Only a single object will
3015 * +---------------+ | be initialized and can be
3016 * | MatchPair | | accessed below.
3017 * | start | <-------+
3018 * | limit |
3019 * +---------------+
3021 // clang-format on
3023 int32_t pairsVectorStartOffset =
3024 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
3025 Address matchPairStart(FramePointer,
3026 pairsVectorStartOffset + MatchPair::offsetOfStart());
3027 Address matchPairLimit(FramePointer,
3028 pairsVectorStartOffset + MatchPair::offsetOfLimit());
3030 // Store match limit to cx->regExpSearcherLastLimit and return the index.
3031 masm.load32(matchPairLimit, result);
3032 masm.loadJSContext(input);
3033 masm.store32(result,
3034 Address(input, JSContext::offsetOfRegExpSearcherLastLimit()));
3035 masm.load32(matchPairStart, result);
3036 masm.pop(FramePointer);
3037 masm.ret();
3039 masm.bind(&notFound);
3040 masm.move32(Imm32(RegExpSearcherResultNotFound), result);
3041 masm.pop(FramePointer);
3042 masm.ret();
3044 masm.bind(&oolEntry);
3045 masm.move32(Imm32(RegExpSearcherResultFailed), result);
3046 masm.pop(FramePointer);
3047 masm.ret();
3049 Linker linker(masm);
3050 JitCode* code = linker.newCode(cx, CodeKind::Other);
3051 if (!code) {
3052 return nullptr;
3055 CollectPerfSpewerJitCodeProfile(code, "RegExpSearcherStub");
3056 #ifdef MOZ_VTUNE
3057 vtune::MarkStub(code, "RegExpSearcherStub");
3058 #endif
3060 return code;
3063 class OutOfLineRegExpSearcher : public OutOfLineCodeBase<CodeGenerator> {
3064 LRegExpSearcher* lir_;
3066 public:
3067 explicit OutOfLineRegExpSearcher(LRegExpSearcher* lir) : lir_(lir) {}
3069 void accept(CodeGenerator* codegen) override {
3070 codegen->visitOutOfLineRegExpSearcher(this);
3073 LRegExpSearcher* lir() const { return lir_; }
3076 void CodeGenerator::visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher* ool) {
3077 LRegExpSearcher* lir = ool->lir();
3078 Register lastIndex = ToRegister(lir->lastIndex());
3079 Register input = ToRegister(lir->string());
3080 Register regexp = ToRegister(lir->regexp());
3082 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3083 regs.take(lastIndex);
3084 regs.take(input);
3085 regs.take(regexp);
3086 Register temp = regs.takeAny();
3088 masm.computeEffectiveAddress(
3089 Address(masm.getStackPointer(), InputOutputDataSize), temp);
3091 pushArg(temp);
3092 pushArg(lastIndex);
3093 pushArg(input);
3094 pushArg(regexp);
3096 // We are not using oolCallVM because we are in a Call, and that live
3097 // registers are already saved by the the register allocator.
3098 using Fn = bool (*)(JSContext* cx, HandleObject regexp, HandleString input,
3099 int32_t lastIndex, MatchPairs* pairs, int32_t* result);
3100 callVM<Fn, RegExpSearcherRaw>(lir);
3102 masm.jump(ool->rejoin());
3105 void CodeGenerator::visitRegExpSearcher(LRegExpSearcher* lir) {
3106 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpSearcherRegExpReg);
3107 MOZ_ASSERT(ToRegister(lir->string()) == RegExpSearcherStringReg);
3108 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpSearcherLastIndexReg);
3109 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3111 static_assert(RegExpSearcherRegExpReg != ReturnReg);
3112 static_assert(RegExpSearcherStringReg != ReturnReg);
3113 static_assert(RegExpSearcherLastIndexReg != ReturnReg);
3115 masm.reserveStack(RegExpReservedStack);
3117 OutOfLineRegExpSearcher* ool = new (alloc()) OutOfLineRegExpSearcher(lir);
3118 addOutOfLineCode(ool, lir->mir());
3120 const JitZone* jitZone = gen->realm->zone()->jitZone();
3121 JitCode* regExpSearcherStub =
3122 jitZone->regExpSearcherStubNoBarrier(&zoneStubsToReadBarrier_);
3123 masm.call(regExpSearcherStub);
3124 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpSearcherResultFailed),
3125 ool->entry());
3126 masm.bind(ool->rejoin());
3128 masm.freeStack(RegExpReservedStack);
3131 void CodeGenerator::visitRegExpSearcherLastLimit(
3132 LRegExpSearcherLastLimit* lir) {
3133 Register result = ToRegister(lir->output());
3134 Register scratch = ToRegister(lir->temp0());
3136 masm.loadAndClearRegExpSearcherLastLimit(result, scratch);
3139 JitCode* JitZone::generateRegExpExecTestStub(JSContext* cx) {
3140 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecTest stub");
3142 Register regexp = RegExpExecTestRegExpReg;
3143 Register input = RegExpExecTestStringReg;
3144 Register result = ReturnReg;
3146 TempAllocator temp(&cx->tempLifoAlloc());
3147 JitContext jcx(cx);
3148 StackMacroAssembler masm(cx, temp);
3149 AutoCreatedBy acb(masm, "JitZone::generateRegExpExecTestStub");
3151 #ifdef JS_USE_LINK_REGISTER
3152 masm.pushReturnAddress();
3153 #endif
3154 masm.push(FramePointer);
3155 masm.moveStackPtrTo(FramePointer);
3157 // We are free to clobber all registers, as LRegExpExecTest is a call
3158 // instruction.
3159 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3160 regs.take(input);
3161 regs.take(regexp);
3163 // Ensure lastIndex != result.
3164 regs.take(result);
3165 Register lastIndex = regs.takeAny();
3166 regs.add(result);
3167 Register temp1 = regs.takeAny();
3168 Register temp2 = regs.takeAny();
3169 Register temp3 = regs.takeAny();
3171 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
3172 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
3174 masm.reserveStack(RegExpReservedStack);
3176 // Load lastIndex and skip RegExp execution if needed.
3177 Label notFoundZeroLastIndex;
3178 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
3180 // In visitRegExpMatcher and visitRegExpSearcher, we reserve stack space
3181 // before calling the stub. For RegExpExecTest we call the stub before
3182 // reserving stack space, so the offset of the InputOutputData relative to the
3183 // frame pointer is negative.
3184 constexpr int32_t inputOutputDataStartOffset = -int32_t(RegExpReservedStack);
3186 // On ARM64, load/store instructions can encode an immediate offset in the
3187 // range [-256, 4095]. If we ever fail this assertion, it would be more
3188 // efficient to store the data above the frame pointer similar to
3189 // RegExpMatcher and RegExpSearcher.
3190 static_assert(inputOutputDataStartOffset >= -256);
3192 Label notFound, oolEntry;
3193 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
3194 temp3, inputOutputDataStartOffset,
3195 initialStringHeap, &notFound, &oolEntry)) {
3196 return nullptr;
3199 // Set `result` to true/false to indicate found/not-found, or to
3200 // RegExpExecTestResultFailed if we have to retry in C++. If the regular
3201 // expression is global or sticky, we also have to update its .lastIndex slot.
3203 Label done;
3204 int32_t pairsVectorStartOffset =
3205 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
3206 Address matchPairLimit(FramePointer,
3207 pairsVectorStartOffset + MatchPair::offsetOfLimit());
3209 masm.move32(Imm32(1), result);
3210 masm.branchTest32(Assembler::Zero, flagsSlot,
3211 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3212 &done);
3213 masm.load32(matchPairLimit, lastIndex);
3214 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
3215 masm.jump(&done);
3217 masm.bind(&notFound);
3218 masm.move32(Imm32(0), result);
3219 masm.branchTest32(Assembler::Zero, flagsSlot,
3220 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3221 &done);
3222 masm.storeValue(Int32Value(0), lastIndexSlot);
3223 masm.jump(&done);
3225 masm.bind(&notFoundZeroLastIndex);
3226 masm.move32(Imm32(0), result);
3227 masm.storeValue(Int32Value(0), lastIndexSlot);
3228 masm.jump(&done);
3230 masm.bind(&oolEntry);
3231 masm.move32(Imm32(RegExpExecTestResultFailed), result);
3233 masm.bind(&done);
3234 masm.freeStack(RegExpReservedStack);
3235 masm.pop(FramePointer);
3236 masm.ret();
3238 Linker linker(masm);
3239 JitCode* code = linker.newCode(cx, CodeKind::Other);
3240 if (!code) {
3241 return nullptr;
3244 CollectPerfSpewerJitCodeProfile(code, "RegExpExecTestStub");
3245 #ifdef MOZ_VTUNE
3246 vtune::MarkStub(code, "RegExpExecTestStub");
3247 #endif
3249 return code;
3252 class OutOfLineRegExpExecTest : public OutOfLineCodeBase<CodeGenerator> {
3253 LRegExpExecTest* lir_;
3255 public:
3256 explicit OutOfLineRegExpExecTest(LRegExpExecTest* lir) : lir_(lir) {}
3258 void accept(CodeGenerator* codegen) override {
3259 codegen->visitOutOfLineRegExpExecTest(this);
3262 LRegExpExecTest* lir() const { return lir_; }
3265 void CodeGenerator::visitOutOfLineRegExpExecTest(OutOfLineRegExpExecTest* ool) {
3266 LRegExpExecTest* lir = ool->lir();
3267 Register input = ToRegister(lir->string());
3268 Register regexp = ToRegister(lir->regexp());
3270 pushArg(input);
3271 pushArg(regexp);
3273 // We are not using oolCallVM because we are in a Call and live registers have
3274 // already been saved by the register allocator.
3275 using Fn = bool (*)(JSContext* cx, Handle<RegExpObject*> regexp,
3276 HandleString input, bool* result);
3277 callVM<Fn, RegExpBuiltinExecTestFromJit>(lir);
3279 masm.jump(ool->rejoin());
3282 void CodeGenerator::visitRegExpExecTest(LRegExpExecTest* lir) {
3283 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpExecTestRegExpReg);
3284 MOZ_ASSERT(ToRegister(lir->string()) == RegExpExecTestStringReg);
3285 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3287 static_assert(RegExpExecTestRegExpReg != ReturnReg);
3288 static_assert(RegExpExecTestStringReg != ReturnReg);
3290 auto* ool = new (alloc()) OutOfLineRegExpExecTest(lir);
3291 addOutOfLineCode(ool, lir->mir());
3293 const JitZone* jitZone = gen->realm->zone()->jitZone();
3294 JitCode* regExpExecTestStub =
3295 jitZone->regExpExecTestStubNoBarrier(&zoneStubsToReadBarrier_);
3296 masm.call(regExpExecTestStub);
3298 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpExecTestResultFailed),
3299 ool->entry());
3301 masm.bind(ool->rejoin());
3304 void CodeGenerator::visitRegExpHasCaptureGroups(LRegExpHasCaptureGroups* ins) {
3305 Register regexp = ToRegister(ins->regexp());
3306 Register input = ToRegister(ins->input());
3307 Register output = ToRegister(ins->output());
3309 using Fn =
3310 bool (*)(JSContext*, Handle<RegExpObject*>, Handle<JSString*>, bool*);
3311 auto* ool = oolCallVM<Fn, js::RegExpHasCaptureGroups>(
3312 ins, ArgList(regexp, input), StoreRegisterTo(output));
3314 // Load RegExpShared in |output|.
3315 Label vmCall;
3316 masm.loadParsedRegExpShared(regexp, output, ool->entry());
3318 // Return true iff pairCount > 1.
3319 Label returnTrue;
3320 masm.branch32(Assembler::Above,
3321 Address(output, RegExpShared::offsetOfPairCount()), Imm32(1),
3322 &returnTrue);
3323 masm.move32(Imm32(0), output);
3324 masm.jump(ool->rejoin());
3326 masm.bind(&returnTrue);
3327 masm.move32(Imm32(1), output);
3329 masm.bind(ool->rejoin());
3332 class OutOfLineRegExpPrototypeOptimizable
3333 : public OutOfLineCodeBase<CodeGenerator> {
3334 LRegExpPrototypeOptimizable* ins_;
3336 public:
3337 explicit OutOfLineRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins)
3338 : ins_(ins) {}
3340 void accept(CodeGenerator* codegen) override {
3341 codegen->visitOutOfLineRegExpPrototypeOptimizable(this);
3343 LRegExpPrototypeOptimizable* ins() const { return ins_; }
3346 void CodeGenerator::visitRegExpPrototypeOptimizable(
3347 LRegExpPrototypeOptimizable* ins) {
3348 Register object = ToRegister(ins->object());
3349 Register output = ToRegister(ins->output());
3350 Register temp = ToRegister(ins->temp0());
3352 OutOfLineRegExpPrototypeOptimizable* ool =
3353 new (alloc()) OutOfLineRegExpPrototypeOptimizable(ins);
3354 addOutOfLineCode(ool, ins->mir());
3356 const GlobalObject* global = gen->realm->maybeGlobal();
3357 MOZ_ASSERT(global);
3358 masm.branchIfNotRegExpPrototypeOptimizable(object, temp, global,
3359 ool->entry());
3360 masm.move32(Imm32(0x1), output);
3362 masm.bind(ool->rejoin());
3365 void CodeGenerator::visitOutOfLineRegExpPrototypeOptimizable(
3366 OutOfLineRegExpPrototypeOptimizable* ool) {
3367 LRegExpPrototypeOptimizable* ins = ool->ins();
3368 Register object = ToRegister(ins->object());
3369 Register output = ToRegister(ins->output());
3371 saveVolatile(output);
3373 using Fn = bool (*)(JSContext* cx, JSObject* proto);
3374 masm.setupAlignedABICall();
3375 masm.loadJSContext(output);
3376 masm.passABIArg(output);
3377 masm.passABIArg(object);
3378 masm.callWithABI<Fn, RegExpPrototypeOptimizableRaw>();
3379 masm.storeCallBoolResult(output);
3381 restoreVolatile(output);
3383 masm.jump(ool->rejoin());
3386 class OutOfLineRegExpInstanceOptimizable
3387 : public OutOfLineCodeBase<CodeGenerator> {
3388 LRegExpInstanceOptimizable* ins_;
3390 public:
3391 explicit OutOfLineRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins)
3392 : ins_(ins) {}
3394 void accept(CodeGenerator* codegen) override {
3395 codegen->visitOutOfLineRegExpInstanceOptimizable(this);
3397 LRegExpInstanceOptimizable* ins() const { return ins_; }
3400 void CodeGenerator::visitRegExpInstanceOptimizable(
3401 LRegExpInstanceOptimizable* ins) {
3402 Register object = ToRegister(ins->object());
3403 Register output = ToRegister(ins->output());
3404 Register temp = ToRegister(ins->temp0());
3406 OutOfLineRegExpInstanceOptimizable* ool =
3407 new (alloc()) OutOfLineRegExpInstanceOptimizable(ins);
3408 addOutOfLineCode(ool, ins->mir());
3410 const GlobalObject* global = gen->realm->maybeGlobal();
3411 MOZ_ASSERT(global);
3412 masm.branchIfNotRegExpInstanceOptimizable(object, temp, global, ool->entry());
3413 masm.move32(Imm32(0x1), output);
3415 masm.bind(ool->rejoin());
3418 void CodeGenerator::visitOutOfLineRegExpInstanceOptimizable(
3419 OutOfLineRegExpInstanceOptimizable* ool) {
3420 LRegExpInstanceOptimizable* ins = ool->ins();
3421 Register object = ToRegister(ins->object());
3422 Register proto = ToRegister(ins->proto());
3423 Register output = ToRegister(ins->output());
3425 saveVolatile(output);
3427 using Fn = bool (*)(JSContext* cx, JSObject* obj, JSObject* proto);
3428 masm.setupAlignedABICall();
3429 masm.loadJSContext(output);
3430 masm.passABIArg(output);
3431 masm.passABIArg(object);
3432 masm.passABIArg(proto);
3433 masm.callWithABI<Fn, RegExpInstanceOptimizableRaw>();
3434 masm.storeCallBoolResult(output);
3436 restoreVolatile(output);
3438 masm.jump(ool->rejoin());
3441 static void FindFirstDollarIndex(MacroAssembler& masm, Register str,
3442 Register len, Register temp0, Register temp1,
3443 Register output, CharEncoding encoding) {
3444 #ifdef DEBUG
3445 Label ok;
3446 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
3447 masm.assumeUnreachable("Length should be greater than 0.");
3448 masm.bind(&ok);
3449 #endif
3451 Register chars = temp0;
3452 masm.loadStringChars(str, chars, encoding);
3454 masm.move32(Imm32(0), output);
3456 Label start, done;
3457 masm.bind(&start);
3459 Register currentChar = temp1;
3460 masm.loadChar(chars, output, currentChar, encoding);
3461 masm.branch32(Assembler::Equal, currentChar, Imm32('$'), &done);
3463 masm.add32(Imm32(1), output);
3464 masm.branch32(Assembler::NotEqual, output, len, &start);
3466 masm.move32(Imm32(-1), output);
3468 masm.bind(&done);
3471 void CodeGenerator::visitGetFirstDollarIndex(LGetFirstDollarIndex* ins) {
3472 Register str = ToRegister(ins->str());
3473 Register output = ToRegister(ins->output());
3474 Register temp0 = ToRegister(ins->temp0());
3475 Register temp1 = ToRegister(ins->temp1());
3476 Register len = ToRegister(ins->temp2());
3478 using Fn = bool (*)(JSContext*, JSString*, int32_t*);
3479 OutOfLineCode* ool = oolCallVM<Fn, GetFirstDollarIndexRaw>(
3480 ins, ArgList(str), StoreRegisterTo(output));
3482 masm.branchIfRope(str, ool->entry());
3483 masm.loadStringLength(str, len);
3485 Label isLatin1, done;
3486 masm.branchLatin1String(str, &isLatin1);
3488 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3489 CharEncoding::TwoByte);
3490 masm.jump(&done);
3492 masm.bind(&isLatin1);
3494 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3495 CharEncoding::Latin1);
3497 masm.bind(&done);
3498 masm.bind(ool->rejoin());
3501 void CodeGenerator::visitStringReplace(LStringReplace* lir) {
3502 if (lir->replacement()->isConstant()) {
3503 pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString()));
3504 } else {
3505 pushArg(ToRegister(lir->replacement()));
3508 if (lir->pattern()->isConstant()) {
3509 pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString()));
3510 } else {
3511 pushArg(ToRegister(lir->pattern()));
3514 if (lir->string()->isConstant()) {
3515 pushArg(ImmGCPtr(lir->string()->toConstant()->toString()));
3516 } else {
3517 pushArg(ToRegister(lir->string()));
3520 using Fn =
3521 JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
3522 if (lir->mir()->isFlatReplacement()) {
3523 callVM<Fn, StringFlatReplaceString>(lir);
3524 } else {
3525 callVM<Fn, StringReplace>(lir);
3529 void CodeGenerator::visitBinaryValueCache(LBinaryValueCache* lir) {
3530 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3531 TypedOrValueRegister lhs =
3532 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::LhsIndex));
3533 TypedOrValueRegister rhs =
3534 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::RhsIndex));
3535 ValueOperand output = ToOutValue(lir);
3537 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3539 switch (jsop) {
3540 case JSOp::Add:
3541 case JSOp::Sub:
3542 case JSOp::Mul:
3543 case JSOp::Div:
3544 case JSOp::Mod:
3545 case JSOp::Pow:
3546 case JSOp::BitAnd:
3547 case JSOp::BitOr:
3548 case JSOp::BitXor:
3549 case JSOp::Lsh:
3550 case JSOp::Rsh:
3551 case JSOp::Ursh: {
3552 IonBinaryArithIC ic(liveRegs, lhs, rhs, output);
3553 addIC(lir, allocateIC(ic));
3554 return;
3556 default:
3557 MOZ_CRASH("Unsupported jsop in MBinaryValueCache");
3561 void CodeGenerator::visitBinaryBoolCache(LBinaryBoolCache* lir) {
3562 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3563 TypedOrValueRegister lhs =
3564 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::LhsIndex));
3565 TypedOrValueRegister rhs =
3566 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::RhsIndex));
3567 Register output = ToRegister(lir->output());
3569 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3571 switch (jsop) {
3572 case JSOp::Lt:
3573 case JSOp::Le:
3574 case JSOp::Gt:
3575 case JSOp::Ge:
3576 case JSOp::Eq:
3577 case JSOp::Ne:
3578 case JSOp::StrictEq:
3579 case JSOp::StrictNe: {
3580 IonCompareIC ic(liveRegs, lhs, rhs, output);
3581 addIC(lir, allocateIC(ic));
3582 return;
3584 default:
3585 MOZ_CRASH("Unsupported jsop in MBinaryBoolCache");
3589 void CodeGenerator::visitUnaryCache(LUnaryCache* lir) {
3590 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3591 TypedOrValueRegister input =
3592 TypedOrValueRegister(ToValue(lir, LUnaryCache::InputIndex));
3593 ValueOperand output = ToOutValue(lir);
3595 IonUnaryArithIC ic(liveRegs, input, output);
3596 addIC(lir, allocateIC(ic));
3599 void CodeGenerator::visitModuleMetadata(LModuleMetadata* lir) {
3600 pushArg(ImmPtr(lir->mir()->module()));
3602 using Fn = JSObject* (*)(JSContext*, HandleObject);
3603 callVM<Fn, js::GetOrCreateModuleMetaObject>(lir);
3606 void CodeGenerator::visitDynamicImport(LDynamicImport* lir) {
3607 pushArg(ToValue(lir, LDynamicImport::OptionsIndex));
3608 pushArg(ToValue(lir, LDynamicImport::SpecifierIndex));
3609 pushArg(ImmGCPtr(current->mir()->info().script()));
3611 using Fn = JSObject* (*)(JSContext*, HandleScript, HandleValue, HandleValue);
3612 callVM<Fn, js::StartDynamicModuleImport>(lir);
3615 void CodeGenerator::visitLambda(LLambda* lir) {
3616 Register envChain = ToRegister(lir->environmentChain());
3617 Register output = ToRegister(lir->output());
3618 Register tempReg = ToRegister(lir->temp0());
3620 JSFunction* fun = lir->mir()->templateFunction();
3622 using Fn = JSObject* (*)(JSContext*, HandleFunction, HandleObject);
3623 OutOfLineCode* ool = oolCallVM<Fn, js::Lambda>(
3624 lir, ArgList(ImmGCPtr(fun), envChain), StoreRegisterTo(output));
3626 TemplateObject templateObject(fun);
3627 masm.createGCObject(output, tempReg, templateObject, gc::Heap::Default,
3628 ool->entry());
3630 masm.storeValue(JSVAL_TYPE_OBJECT, envChain,
3631 Address(output, JSFunction::offsetOfEnvironment()));
3632 // No post barrier needed because output is guaranteed to be allocated in
3633 // the nursery.
3635 masm.bind(ool->rejoin());
3638 void CodeGenerator::visitFunctionWithProto(LFunctionWithProto* lir) {
3639 Register envChain = ToRegister(lir->envChain());
3640 Register prototype = ToRegister(lir->prototype());
3642 pushArg(prototype);
3643 pushArg(envChain);
3644 pushArg(ImmGCPtr(lir->mir()->function()));
3646 using Fn =
3647 JSObject* (*)(JSContext*, HandleFunction, HandleObject, HandleObject);
3648 callVM<Fn, js::FunWithProtoOperation>(lir);
3651 void CodeGenerator::visitSetFunName(LSetFunName* lir) {
3652 pushArg(Imm32(lir->mir()->prefixKind()));
3653 pushArg(ToValue(lir, LSetFunName::NameIndex));
3654 pushArg(ToRegister(lir->fun()));
3656 using Fn =
3657 bool (*)(JSContext*, HandleFunction, HandleValue, FunctionPrefixKind);
3658 callVM<Fn, js::SetFunctionName>(lir);
3661 void CodeGenerator::visitOsiPoint(LOsiPoint* lir) {
3662 // Note: markOsiPoint ensures enough space exists between the last
3663 // LOsiPoint and this one to patch adjacent call instructions.
3665 MOZ_ASSERT(masm.framePushed() == frameSize());
3667 uint32_t osiCallPointOffset = markOsiPoint(lir);
3669 LSafepoint* safepoint = lir->associatedSafepoint();
3670 MOZ_ASSERT(!safepoint->osiCallPointOffset());
3671 safepoint->setOsiCallPointOffset(osiCallPointOffset);
3673 #ifdef DEBUG
3674 // There should be no movegroups or other instructions between
3675 // an instruction and its OsiPoint. This is necessary because
3676 // we use the OsiPoint's snapshot from within VM calls.
3677 for (LInstructionReverseIterator iter(current->rbegin(lir));
3678 iter != current->rend(); iter++) {
3679 if (*iter == lir) {
3680 continue;
3682 MOZ_ASSERT(!iter->isMoveGroup());
3683 MOZ_ASSERT(iter->safepoint() == safepoint);
3684 break;
3686 #endif
3688 #ifdef CHECK_OSIPOINT_REGISTERS
3689 if (shouldVerifyOsiPointRegs(safepoint)) {
3690 verifyOsiPointRegs(safepoint);
3692 #endif
3695 void CodeGenerator::visitPhi(LPhi* lir) {
3696 MOZ_CRASH("Unexpected LPhi in CodeGenerator");
3699 void CodeGenerator::visitGoto(LGoto* lir) { jumpToBlock(lir->target()); }
3701 void CodeGenerator::visitTableSwitch(LTableSwitch* ins) {
3702 MTableSwitch* mir = ins->mir();
3703 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3704 const LAllocation* temp;
3706 if (mir->getOperand(0)->type() != MIRType::Int32) {
3707 temp = ins->tempInt()->output();
3709 // The input is a double, so try and convert it to an integer.
3710 // If it does not fit in an integer, take the default case.
3711 masm.convertDoubleToInt32(ToFloatRegister(ins->index()), ToRegister(temp),
3712 defaultcase, false);
3713 } else {
3714 temp = ins->index();
3717 emitTableSwitchDispatch(mir, ToRegister(temp),
3718 ToRegisterOrInvalid(ins->tempPointer()));
3721 void CodeGenerator::visitTableSwitchV(LTableSwitchV* ins) {
3722 MTableSwitch* mir = ins->mir();
3723 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3725 Register index = ToRegister(ins->tempInt());
3726 ValueOperand value = ToValue(ins, LTableSwitchV::InputValue);
3727 Register tag = masm.extractTag(value, index);
3728 masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase);
3730 Label unboxInt, isInt;
3731 masm.branchTestInt32(Assembler::Equal, tag, &unboxInt);
3733 FloatRegister floatIndex = ToFloatRegister(ins->tempFloat());
3734 masm.unboxDouble(value, floatIndex);
3735 masm.convertDoubleToInt32(floatIndex, index, defaultcase, false);
3736 masm.jump(&isInt);
3739 masm.bind(&unboxInt);
3740 masm.unboxInt32(value, index);
3742 masm.bind(&isInt);
3744 emitTableSwitchDispatch(mir, index, ToRegisterOrInvalid(ins->tempPointer()));
3747 void CodeGenerator::visitParameter(LParameter* lir) {}
3749 void CodeGenerator::visitCallee(LCallee* lir) {
3750 Register callee = ToRegister(lir->output());
3751 Address ptr(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3753 masm.loadFunctionFromCalleeToken(ptr, callee);
3756 void CodeGenerator::visitIsConstructing(LIsConstructing* lir) {
3757 Register output = ToRegister(lir->output());
3758 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3759 masm.loadPtr(calleeToken, output);
3761 // We must be inside a function.
3762 MOZ_ASSERT(current->mir()->info().script()->function());
3764 // The low bit indicates whether this call is constructing, just clear the
3765 // other bits.
3766 static_assert(CalleeToken_Function == 0x0,
3767 "CalleeTokenTag value should match");
3768 static_assert(CalleeToken_FunctionConstructing == 0x1,
3769 "CalleeTokenTag value should match");
3770 masm.andPtr(Imm32(0x1), output);
3773 void CodeGenerator::visitReturn(LReturn* lir) {
3774 #if defined(JS_NUNBOX32)
3775 DebugOnly<LAllocation*> type = lir->getOperand(TYPE_INDEX);
3776 DebugOnly<LAllocation*> payload = lir->getOperand(PAYLOAD_INDEX);
3777 MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type);
3778 MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data);
3779 #elif defined(JS_PUNBOX64)
3780 DebugOnly<LAllocation*> result = lir->getOperand(0);
3781 MOZ_ASSERT(ToRegister(result) == JSReturnReg);
3782 #endif
3783 // Don't emit a jump to the return label if this is the last block, as
3784 // it'll fall through to the epilogue.
3786 // This is -not- true however for a Generator-return, which may appear in the
3787 // middle of the last block, so we should always emit the jump there.
3788 if (current->mir() != *gen->graph().poBegin() || lir->isGenerator()) {
3789 masm.jump(&returnLabel_);
3793 void CodeGenerator::visitOsrEntry(LOsrEntry* lir) {
3794 Register temp = ToRegister(lir->temp());
3796 // Remember the OSR entry offset into the code buffer.
3797 masm.flushBuffer();
3798 setOsrEntryOffset(masm.size());
3800 // Allocate the full frame for this function
3801 // Note we have a new entry here. So we reset MacroAssembler::framePushed()
3802 // to 0, before reserving the stack.
3803 MOZ_ASSERT(masm.framePushed() == frameSize());
3804 masm.setFramePushed(0);
3806 // The Baseline code ensured both the frame pointer and stack pointer point to
3807 // the JitFrameLayout on the stack.
3809 // If profiling, save the current frame pointer to a per-thread global field.
3810 if (isProfilerInstrumentationEnabled()) {
3811 masm.profilerEnterFrame(FramePointer, temp);
3814 masm.reserveStack(frameSize());
3815 MOZ_ASSERT(masm.framePushed() == frameSize());
3817 // Ensure that the Ion frames is properly aligned.
3818 masm.assertStackAlignment(JitStackAlignment, 0);
3821 void CodeGenerator::visitOsrEnvironmentChain(LOsrEnvironmentChain* lir) {
3822 const LAllocation* frame = lir->getOperand(0);
3823 const LDefinition* object = lir->getDef(0);
3825 const ptrdiff_t frameOffset =
3826 BaselineFrame::reverseOffsetOfEnvironmentChain();
3828 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3831 void CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir) {
3832 const LAllocation* frame = lir->getOperand(0);
3833 const LDefinition* object = lir->getDef(0);
3835 const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj();
3837 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3840 void CodeGenerator::visitOsrValue(LOsrValue* value) {
3841 const LAllocation* frame = value->getOperand(0);
3842 const ValueOperand out = ToOutValue(value);
3844 const ptrdiff_t frameOffset = value->mir()->frameOffset();
3846 masm.loadValue(Address(ToRegister(frame), frameOffset), out);
3849 void CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir) {
3850 const LAllocation* frame = lir->getOperand(0);
3851 const ValueOperand out = ToOutValue(lir);
3853 Address flags =
3854 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags());
3855 Address retval =
3856 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue());
3858 masm.moveValue(UndefinedValue(), out);
3860 Label done;
3861 masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL),
3862 &done);
3863 masm.loadValue(retval, out);
3864 masm.bind(&done);
3867 void CodeGenerator::visitStackArgT(LStackArgT* lir) {
3868 const LAllocation* arg = lir->arg();
3869 MIRType argType = lir->type();
3870 uint32_t argslot = lir->argslot();
3871 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3873 Address dest = AddressOfPassedArg(argslot);
3875 if (arg->isFloatReg()) {
3876 masm.boxDouble(ToFloatRegister(arg), dest);
3877 } else if (arg->isRegister()) {
3878 masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest);
3879 } else {
3880 masm.storeValue(arg->toConstant()->toJSValue(), dest);
3884 void CodeGenerator::visitStackArgV(LStackArgV* lir) {
3885 ValueOperand val = ToValue(lir, 0);
3886 uint32_t argslot = lir->argslot();
3887 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3889 masm.storeValue(val, AddressOfPassedArg(argslot));
3892 void CodeGenerator::visitMoveGroup(LMoveGroup* group) {
3893 if (!group->numMoves()) {
3894 return;
3897 MoveResolver& resolver = masm.moveResolver();
3899 for (size_t i = 0; i < group->numMoves(); i++) {
3900 const LMove& move = group->getMove(i);
3902 LAllocation from = move.from();
3903 LAllocation to = move.to();
3904 LDefinition::Type type = move.type();
3906 // No bogus moves.
3907 MOZ_ASSERT(from != to);
3908 MOZ_ASSERT(!from.isConstant());
3909 MoveOp::Type moveType;
3910 switch (type) {
3911 case LDefinition::OBJECT:
3912 case LDefinition::SLOTS:
3913 case LDefinition::WASM_ANYREF:
3914 #ifdef JS_NUNBOX32
3915 case LDefinition::TYPE:
3916 case LDefinition::PAYLOAD:
3917 #else
3918 case LDefinition::BOX:
3919 #endif
3920 case LDefinition::GENERAL:
3921 case LDefinition::STACKRESULTS:
3922 moveType = MoveOp::GENERAL;
3923 break;
3924 case LDefinition::INT32:
3925 moveType = MoveOp::INT32;
3926 break;
3927 case LDefinition::FLOAT32:
3928 moveType = MoveOp::FLOAT32;
3929 break;
3930 case LDefinition::DOUBLE:
3931 moveType = MoveOp::DOUBLE;
3932 break;
3933 case LDefinition::SIMD128:
3934 moveType = MoveOp::SIMD128;
3935 break;
3936 default:
3937 MOZ_CRASH("Unexpected move type");
3940 masm.propagateOOM(
3941 resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType));
3944 masm.propagateOOM(resolver.resolve());
3945 if (masm.oom()) {
3946 return;
3949 MoveEmitter emitter(masm);
3951 #ifdef JS_CODEGEN_X86
3952 if (group->maybeScratchRegister().isGeneralReg()) {
3953 emitter.setScratchRegister(
3954 group->maybeScratchRegister().toGeneralReg()->reg());
3955 } else {
3956 resolver.sortMemoryToMemoryMoves();
3958 #endif
3960 emitter.emit(resolver);
3961 emitter.finish();
3964 void CodeGenerator::visitInteger(LInteger* lir) {
3965 masm.move32(Imm32(lir->i32()), ToRegister(lir->output()));
3968 void CodeGenerator::visitInteger64(LInteger64* lir) {
3969 masm.move64(Imm64(lir->i64()), ToOutRegister64(lir));
3972 void CodeGenerator::visitPointer(LPointer* lir) {
3973 masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output()));
3976 void CodeGenerator::visitNurseryObject(LNurseryObject* lir) {
3977 Register output = ToRegister(lir->output());
3978 uint32_t nurseryIndex = lir->mir()->nurseryIndex();
3980 // Load a pointer to the entry in IonScript's nursery objects list.
3981 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), output);
3982 masm.propagateOOM(ionNurseryObjectLabels_.emplaceBack(label, nurseryIndex));
3984 // Load the JSObject*.
3985 masm.loadPtr(Address(output, 0), output);
3988 void CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir) {
3989 // No-op.
3992 void CodeGenerator::visitDebugEnterGCUnsafeRegion(
3993 LDebugEnterGCUnsafeRegion* lir) {
3994 Register temp = ToRegister(lir->temp0());
3996 masm.loadJSContext(temp);
3998 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
3999 masm.add32(Imm32(1), inUnsafeRegion);
4001 Label ok;
4002 masm.branch32(Assembler::GreaterThan, inUnsafeRegion, Imm32(0), &ok);
4003 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
4004 masm.bind(&ok);
4007 void CodeGenerator::visitDebugLeaveGCUnsafeRegion(
4008 LDebugLeaveGCUnsafeRegion* lir) {
4009 Register temp = ToRegister(lir->temp0());
4011 masm.loadJSContext(temp);
4013 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
4014 masm.add32(Imm32(-1), inUnsafeRegion);
4016 Label ok;
4017 masm.branch32(Assembler::GreaterThanOrEqual, inUnsafeRegion, Imm32(0), &ok);
4018 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
4019 masm.bind(&ok);
4022 void CodeGenerator::visitSlots(LSlots* lir) {
4023 Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots());
4024 masm.loadPtr(slots, ToRegister(lir->output()));
4027 void CodeGenerator::visitLoadDynamicSlotV(LLoadDynamicSlotV* lir) {
4028 ValueOperand dest = ToOutValue(lir);
4029 Register base = ToRegister(lir->input());
4030 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
4032 masm.loadValue(Address(base, offset), dest);
4035 static ConstantOrRegister ToConstantOrRegister(const LAllocation* value,
4036 MIRType valueType) {
4037 if (value->isConstant()) {
4038 return ConstantOrRegister(value->toConstant()->toJSValue());
4040 return TypedOrValueRegister(valueType, ToAnyRegister(value));
4043 void CodeGenerator::visitStoreDynamicSlotT(LStoreDynamicSlotT* lir) {
4044 Register base = ToRegister(lir->slots());
4045 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
4046 Address dest(base, offset);
4048 if (lir->mir()->needsBarrier()) {
4049 emitPreBarrier(dest);
4052 MIRType valueType = lir->mir()->value()->type();
4053 ConstantOrRegister value = ToConstantOrRegister(lir->value(), valueType);
4054 masm.storeUnboxedValue(value, valueType, dest);
4057 void CodeGenerator::visitStoreDynamicSlotV(LStoreDynamicSlotV* lir) {
4058 Register base = ToRegister(lir->slots());
4059 int32_t offset = lir->mir()->slot() * sizeof(Value);
4061 const ValueOperand value = ToValue(lir, LStoreDynamicSlotV::ValueIndex);
4063 if (lir->mir()->needsBarrier()) {
4064 emitPreBarrier(Address(base, offset));
4067 masm.storeValue(value, Address(base, offset));
4070 void CodeGenerator::visitElements(LElements* lir) {
4071 Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements());
4072 masm.loadPtr(elements, ToRegister(lir->output()));
4075 void CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment* lir) {
4076 Address environment(ToRegister(lir->function()),
4077 JSFunction::offsetOfEnvironment());
4078 masm.unboxObject(environment, ToRegister(lir->output()));
4081 void CodeGenerator::visitHomeObject(LHomeObject* lir) {
4082 Register func = ToRegister(lir->function());
4083 Address homeObject(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
4085 masm.assertFunctionIsExtended(func);
4086 #ifdef DEBUG
4087 Label isObject;
4088 masm.branchTestObject(Assembler::Equal, homeObject, &isObject);
4089 masm.assumeUnreachable("[[HomeObject]] must be Object");
4090 masm.bind(&isObject);
4091 #endif
4093 masm.unboxObject(homeObject, ToRegister(lir->output()));
4096 void CodeGenerator::visitHomeObjectSuperBase(LHomeObjectSuperBase* lir) {
4097 Register homeObject = ToRegister(lir->homeObject());
4098 ValueOperand output = ToOutValue(lir);
4099 Register temp = output.scratchReg();
4101 masm.loadObjProto(homeObject, temp);
4103 #ifdef DEBUG
4104 // We won't encounter a lazy proto, because the prototype is guaranteed to
4105 // either be a JSFunction or a PlainObject, and only proxy objects can have a
4106 // lazy proto.
4107 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
4109 Label proxyCheckDone;
4110 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
4111 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperBase");
4112 masm.bind(&proxyCheckDone);
4113 #endif
4115 Label nullProto, done;
4116 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
4118 // Box prototype and return
4119 masm.tagValue(JSVAL_TYPE_OBJECT, temp, output);
4120 masm.jump(&done);
4122 masm.bind(&nullProto);
4123 masm.moveValue(NullValue(), output);
4125 masm.bind(&done);
4128 template <class T>
4129 static T* ToConstantObject(MDefinition* def) {
4130 MOZ_ASSERT(def->isConstant());
4131 return &def->toConstant()->toObject().as<T>();
4134 void CodeGenerator::visitNewLexicalEnvironmentObject(
4135 LNewLexicalEnvironmentObject* lir) {
4136 Register output = ToRegister(lir->output());
4137 Register temp = ToRegister(lir->temp0());
4139 auto* templateObj = ToConstantObject<BlockLexicalEnvironmentObject>(
4140 lir->mir()->templateObj());
4141 auto* scope = &templateObj->scope();
4142 gc::Heap initialHeap = gc::Heap::Default;
4144 using Fn =
4145 BlockLexicalEnvironmentObject* (*)(JSContext*, Handle<LexicalScope*>);
4146 auto* ool =
4147 oolCallVM<Fn, BlockLexicalEnvironmentObject::createWithoutEnclosing>(
4148 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4150 TemplateObject templateObject(templateObj);
4151 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4153 masm.bind(ool->rejoin());
4156 void CodeGenerator::visitNewClassBodyEnvironmentObject(
4157 LNewClassBodyEnvironmentObject* lir) {
4158 Register output = ToRegister(lir->output());
4159 Register temp = ToRegister(lir->temp0());
4161 auto* templateObj = ToConstantObject<ClassBodyLexicalEnvironmentObject>(
4162 lir->mir()->templateObj());
4163 auto* scope = &templateObj->scope();
4164 gc::Heap initialHeap = gc::Heap::Default;
4166 using Fn = ClassBodyLexicalEnvironmentObject* (*)(JSContext*,
4167 Handle<ClassBodyScope*>);
4168 auto* ool =
4169 oolCallVM<Fn, ClassBodyLexicalEnvironmentObject::createWithoutEnclosing>(
4170 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4172 TemplateObject templateObject(templateObj);
4173 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4175 masm.bind(ool->rejoin());
4178 void CodeGenerator::visitNewVarEnvironmentObject(
4179 LNewVarEnvironmentObject* lir) {
4180 Register output = ToRegister(lir->output());
4181 Register temp = ToRegister(lir->temp0());
4183 auto* templateObj =
4184 ToConstantObject<VarEnvironmentObject>(lir->mir()->templateObj());
4185 auto* scope = &templateObj->scope().as<VarScope>();
4186 gc::Heap initialHeap = gc::Heap::Default;
4188 using Fn = VarEnvironmentObject* (*)(JSContext*, Handle<VarScope*>);
4189 auto* ool = oolCallVM<Fn, VarEnvironmentObject::createWithoutEnclosing>(
4190 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4192 TemplateObject templateObject(templateObj);
4193 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4195 masm.bind(ool->rejoin());
4198 void CodeGenerator::visitGuardShape(LGuardShape* guard) {
4199 Register obj = ToRegister(guard->input());
4200 Register temp = ToTempRegisterOrInvalid(guard->temp0());
4201 Label bail;
4202 masm.branchTestObjShape(Assembler::NotEqual, obj, guard->mir()->shape(), temp,
4203 obj, &bail);
4204 bailoutFrom(&bail, guard->snapshot());
4207 void CodeGenerator::visitGuardFuse(LGuardFuse* guard) {
4208 auto fuseIndex = guard->mir()->fuseIndex();
4209 switch (fuseIndex) {
4210 case RealmFuses::FuseIndex::OptimizeGetIteratorFuse:
4211 addOptimizeGetIteratorFuseDependency();
4212 return;
4213 default:
4214 // validateAndRegisterFuseDependencies doesn't have
4215 // handling for this yet, actively check fuse instead.
4216 break;
4219 Register temp = ToRegister(guard->temp0());
4220 Label bail;
4222 // Bake specific fuse address for Ion code, because we won't share this code
4223 // across realms.
4224 GuardFuse* fuse = mirGen().realm->realmFuses().getFuseByIndex(fuseIndex);
4225 masm.loadPtr(AbsoluteAddress(fuse->fuseRef()), temp);
4226 masm.branchPtr(Assembler::NotEqual, temp, ImmPtr(nullptr), &bail);
4228 bailoutFrom(&bail, guard->snapshot());
4231 void CodeGenerator::visitGuardMultipleShapes(LGuardMultipleShapes* guard) {
4232 Register obj = ToRegister(guard->object());
4233 Register shapeList = ToRegister(guard->shapeList());
4234 Register temp = ToRegister(guard->temp0());
4235 Register temp2 = ToRegister(guard->temp1());
4236 Register temp3 = ToRegister(guard->temp2());
4237 Register spectre = ToTempRegisterOrInvalid(guard->temp3());
4239 Label bail;
4240 masm.loadPtr(Address(shapeList, NativeObject::offsetOfElements()), temp);
4241 masm.branchTestObjShapeList(Assembler::NotEqual, obj, temp, temp2, temp3,
4242 spectre, &bail);
4243 bailoutFrom(&bail, guard->snapshot());
4246 void CodeGenerator::visitGuardProto(LGuardProto* guard) {
4247 Register obj = ToRegister(guard->object());
4248 Register expected = ToRegister(guard->expected());
4249 Register temp = ToRegister(guard->temp0());
4251 masm.loadObjProto(obj, temp);
4253 Label bail;
4254 masm.branchPtr(Assembler::NotEqual, temp, expected, &bail);
4255 bailoutFrom(&bail, guard->snapshot());
4258 void CodeGenerator::visitGuardNullProto(LGuardNullProto* guard) {
4259 Register obj = ToRegister(guard->input());
4260 Register temp = ToRegister(guard->temp0());
4262 masm.loadObjProto(obj, temp);
4264 Label bail;
4265 masm.branchTestPtr(Assembler::NonZero, temp, temp, &bail);
4266 bailoutFrom(&bail, guard->snapshot());
4269 void CodeGenerator::visitGuardIsNativeObject(LGuardIsNativeObject* guard) {
4270 Register obj = ToRegister(guard->input());
4271 Register temp = ToRegister(guard->temp0());
4273 Label bail;
4274 masm.branchIfNonNativeObj(obj, temp, &bail);
4275 bailoutFrom(&bail, guard->snapshot());
4278 void CodeGenerator::visitGuardGlobalGeneration(LGuardGlobalGeneration* guard) {
4279 Register temp = ToRegister(guard->temp0());
4280 Label bail;
4282 masm.load32(AbsoluteAddress(guard->mir()->generationAddr()), temp);
4283 masm.branch32(Assembler::NotEqual, temp, Imm32(guard->mir()->expected()),
4284 &bail);
4285 bailoutFrom(&bail, guard->snapshot());
4288 void CodeGenerator::visitGuardIsProxy(LGuardIsProxy* guard) {
4289 Register obj = ToRegister(guard->input());
4290 Register temp = ToRegister(guard->temp0());
4292 Label bail;
4293 masm.branchTestObjectIsProxy(false, obj, temp, &bail);
4294 bailoutFrom(&bail, guard->snapshot());
4297 void CodeGenerator::visitGuardIsNotProxy(LGuardIsNotProxy* guard) {
4298 Register obj = ToRegister(guard->input());
4299 Register temp = ToRegister(guard->temp0());
4301 Label bail;
4302 masm.branchTestObjectIsProxy(true, obj, temp, &bail);
4303 bailoutFrom(&bail, guard->snapshot());
4306 void CodeGenerator::visitGuardIsNotDOMProxy(LGuardIsNotDOMProxy* guard) {
4307 Register proxy = ToRegister(guard->proxy());
4308 Register temp = ToRegister(guard->temp0());
4310 Label bail;
4311 masm.branchTestProxyHandlerFamily(Assembler::Equal, proxy, temp,
4312 GetDOMProxyHandlerFamily(), &bail);
4313 bailoutFrom(&bail, guard->snapshot());
4316 void CodeGenerator::visitProxyGet(LProxyGet* lir) {
4317 Register proxy = ToRegister(lir->proxy());
4318 Register temp = ToRegister(lir->temp0());
4320 pushArg(lir->mir()->id(), temp);
4321 pushArg(proxy);
4323 using Fn = bool (*)(JSContext*, HandleObject, HandleId, MutableHandleValue);
4324 callVM<Fn, ProxyGetProperty>(lir);
4327 void CodeGenerator::visitProxyGetByValue(LProxyGetByValue* lir) {
4328 Register proxy = ToRegister(lir->proxy());
4329 ValueOperand idVal = ToValue(lir, LProxyGetByValue::IdIndex);
4331 pushArg(idVal);
4332 pushArg(proxy);
4334 using Fn =
4335 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
4336 callVM<Fn, ProxyGetPropertyByValue>(lir);
4339 void CodeGenerator::visitProxyHasProp(LProxyHasProp* lir) {
4340 Register proxy = ToRegister(lir->proxy());
4341 ValueOperand idVal = ToValue(lir, LProxyHasProp::IdIndex);
4343 pushArg(idVal);
4344 pushArg(proxy);
4346 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
4347 if (lir->mir()->hasOwn()) {
4348 callVM<Fn, ProxyHasOwn>(lir);
4349 } else {
4350 callVM<Fn, ProxyHas>(lir);
4354 void CodeGenerator::visitProxySet(LProxySet* lir) {
4355 Register proxy = ToRegister(lir->proxy());
4356 ValueOperand rhs = ToValue(lir, LProxySet::RhsIndex);
4357 Register temp = ToRegister(lir->temp0());
4359 pushArg(Imm32(lir->mir()->strict()));
4360 pushArg(rhs);
4361 pushArg(lir->mir()->id(), temp);
4362 pushArg(proxy);
4364 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4365 callVM<Fn, ProxySetProperty>(lir);
4368 void CodeGenerator::visitProxySetByValue(LProxySetByValue* lir) {
4369 Register proxy = ToRegister(lir->proxy());
4370 ValueOperand idVal = ToValue(lir, LProxySetByValue::IdIndex);
4371 ValueOperand rhs = ToValue(lir, LProxySetByValue::RhsIndex);
4373 pushArg(Imm32(lir->mir()->strict()));
4374 pushArg(rhs);
4375 pushArg(idVal);
4376 pushArg(proxy);
4378 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
4379 callVM<Fn, ProxySetPropertyByValue>(lir);
4382 void CodeGenerator::visitCallSetArrayLength(LCallSetArrayLength* lir) {
4383 Register obj = ToRegister(lir->obj());
4384 ValueOperand rhs = ToValue(lir, LCallSetArrayLength::RhsIndex);
4386 pushArg(Imm32(lir->mir()->strict()));
4387 pushArg(rhs);
4388 pushArg(obj);
4390 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
4391 callVM<Fn, jit::SetArrayLength>(lir);
4394 void CodeGenerator::visitMegamorphicLoadSlot(LMegamorphicLoadSlot* lir) {
4395 Register obj = ToRegister(lir->object());
4396 Register temp0 = ToRegister(lir->temp0());
4397 Register temp1 = ToRegister(lir->temp1());
4398 Register temp2 = ToRegister(lir->temp2());
4399 Register temp3 = ToRegister(lir->temp3());
4400 ValueOperand output = ToOutValue(lir);
4402 Label bail, cacheHit;
4403 masm.emitMegamorphicCacheLookup(lir->mir()->name(), obj, temp0, temp1, temp2,
4404 output, &cacheHit);
4406 masm.branchIfNonNativeObj(obj, temp0, &bail);
4408 masm.Push(UndefinedValue());
4409 masm.moveStackPtrTo(temp3);
4411 using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
4412 MegamorphicCache::Entry* cacheEntry, Value* vp);
4413 masm.setupAlignedABICall();
4414 masm.loadJSContext(temp0);
4415 masm.passABIArg(temp0);
4416 masm.passABIArg(obj);
4417 masm.movePropertyKey(lir->mir()->name(), temp1);
4418 masm.passABIArg(temp1);
4419 masm.passABIArg(temp2);
4420 masm.passABIArg(temp3);
4422 masm.callWithABI<Fn, GetNativeDataPropertyPure>();
4424 MOZ_ASSERT(!output.aliases(ReturnReg));
4425 masm.Pop(output);
4427 masm.branchIfFalseBool(ReturnReg, &bail);
4429 masm.bind(&cacheHit);
4430 bailoutFrom(&bail, lir->snapshot());
4433 void CodeGenerator::visitMegamorphicLoadSlotByValue(
4434 LMegamorphicLoadSlotByValue* lir) {
4435 Register obj = ToRegister(lir->object());
4436 ValueOperand idVal = ToValue(lir, LMegamorphicLoadSlotByValue::IdIndex);
4437 Register temp0 = ToRegister(lir->temp0());
4438 Register temp1 = ToRegister(lir->temp1());
4439 Register temp2 = ToRegister(lir->temp2());
4440 ValueOperand output = ToOutValue(lir);
4442 Label bail, cacheHit;
4443 masm.emitMegamorphicCacheLookupByValue(idVal, obj, temp0, temp1, temp2,
4444 output, &cacheHit);
4446 masm.branchIfNonNativeObj(obj, temp0, &bail);
4448 // idVal will be in vp[0], result will be stored in vp[1].
4449 masm.reserveStack(sizeof(Value));
4450 masm.Push(idVal);
4451 masm.moveStackPtrTo(temp0);
4453 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4454 MegamorphicCache::Entry* cacheEntry, Value* vp);
4455 masm.setupAlignedABICall();
4456 masm.loadJSContext(temp1);
4457 masm.passABIArg(temp1);
4458 masm.passABIArg(obj);
4459 masm.passABIArg(temp2);
4460 masm.passABIArg(temp0);
4461 masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
4463 MOZ_ASSERT(!idVal.aliases(temp0));
4464 masm.storeCallPointerResult(temp0);
4465 masm.Pop(idVal);
4467 uint32_t framePushed = masm.framePushed();
4468 Label ok;
4469 masm.branchIfTrueBool(temp0, &ok);
4470 masm.freeStack(sizeof(Value)); // Discard result Value.
4471 masm.jump(&bail);
4473 masm.bind(&ok);
4474 masm.setFramePushed(framePushed);
4475 masm.Pop(output);
4477 masm.bind(&cacheHit);
4478 bailoutFrom(&bail, lir->snapshot());
4481 void CodeGenerator::visitMegamorphicStoreSlot(LMegamorphicStoreSlot* lir) {
4482 Register obj = ToRegister(lir->object());
4483 ValueOperand value = ToValue(lir, LMegamorphicStoreSlot::RhsIndex);
4485 Register temp0 = ToRegister(lir->temp0());
4486 #ifndef JS_CODEGEN_X86
4487 Register temp1 = ToRegister(lir->temp1());
4488 Register temp2 = ToRegister(lir->temp2());
4489 #endif
4491 Label cacheHit, done;
4492 #ifdef JS_CODEGEN_X86
4493 masm.emitMegamorphicCachedSetSlot(
4494 lir->mir()->name(), obj, temp0, value, &cacheHit,
4495 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4496 EmitPreBarrier(masm, addr, mirType);
4498 #else
4499 masm.emitMegamorphicCachedSetSlot(
4500 lir->mir()->name(), obj, temp0, temp1, temp2, value, &cacheHit,
4501 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4502 EmitPreBarrier(masm, addr, mirType);
4504 #endif
4506 pushArg(Imm32(lir->mir()->strict()));
4507 pushArg(value);
4508 pushArg(lir->mir()->name(), temp0);
4509 pushArg(obj);
4511 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4512 callVM<Fn, SetPropertyMegamorphic<true>>(lir);
4514 masm.jump(&done);
4515 masm.bind(&cacheHit);
4517 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
4518 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
4520 saveVolatile(temp0);
4521 emitPostWriteBarrier(obj);
4522 restoreVolatile(temp0);
4524 masm.bind(&done);
4527 void CodeGenerator::visitMegamorphicHasProp(LMegamorphicHasProp* lir) {
4528 Register obj = ToRegister(lir->object());
4529 ValueOperand idVal = ToValue(lir, LMegamorphicHasProp::IdIndex);
4530 Register temp0 = ToRegister(lir->temp0());
4531 Register temp1 = ToRegister(lir->temp1());
4532 Register temp2 = ToRegister(lir->temp2());
4533 Register output = ToRegister(lir->output());
4535 Label bail, cacheHit;
4536 masm.emitMegamorphicCacheLookupExists(idVal, obj, temp0, temp1, temp2, output,
4537 &cacheHit, lir->mir()->hasOwn());
4539 masm.branchIfNonNativeObj(obj, temp0, &bail);
4541 // idVal will be in vp[0], result will be stored in vp[1].
4542 masm.reserveStack(sizeof(Value));
4543 masm.Push(idVal);
4544 masm.moveStackPtrTo(temp0);
4546 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4547 MegamorphicCache::Entry* cacheEntry, Value* vp);
4548 masm.setupAlignedABICall();
4549 masm.loadJSContext(temp1);
4550 masm.passABIArg(temp1);
4551 masm.passABIArg(obj);
4552 masm.passABIArg(temp2);
4553 masm.passABIArg(temp0);
4554 if (lir->mir()->hasOwn()) {
4555 masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
4556 } else {
4557 masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
4560 MOZ_ASSERT(!idVal.aliases(temp0));
4561 masm.storeCallPointerResult(temp0);
4562 masm.Pop(idVal);
4564 uint32_t framePushed = masm.framePushed();
4565 Label ok;
4566 masm.branchIfTrueBool(temp0, &ok);
4567 masm.freeStack(sizeof(Value)); // Discard result Value.
4568 masm.jump(&bail);
4570 masm.bind(&ok);
4571 masm.setFramePushed(framePushed);
4572 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
4573 masm.freeStack(sizeof(Value));
4574 masm.bind(&cacheHit);
4576 bailoutFrom(&bail, lir->snapshot());
4579 void CodeGenerator::visitSmallObjectVariableKeyHasProp(
4580 LSmallObjectVariableKeyHasProp* lir) {
4581 Register id = ToRegister(lir->id());
4582 Register output = ToRegister(lir->output());
4584 #ifdef DEBUG
4585 Label isAtom;
4586 masm.branchTest32(Assembler::NonZero, Address(id, JSString::offsetOfFlags()),
4587 Imm32(JSString::ATOM_BIT), &isAtom);
4588 masm.assumeUnreachable("Expected atom input");
4589 masm.bind(&isAtom);
4590 #endif
4592 SharedShape* shape = &lir->mir()->shape()->asShared();
4594 Label done, success;
4595 for (SharedShapePropertyIter<NoGC> iter(shape); !iter.done(); iter++) {
4596 masm.branchPtr(Assembler::Equal, id, ImmGCPtr(iter->key().toAtom()),
4597 &success);
4599 masm.move32(Imm32(0), output);
4600 masm.jump(&done);
4601 masm.bind(&success);
4602 masm.move32(Imm32(1), output);
4603 masm.bind(&done);
4606 void CodeGenerator::visitGuardIsNotArrayBufferMaybeShared(
4607 LGuardIsNotArrayBufferMaybeShared* guard) {
4608 Register obj = ToRegister(guard->input());
4609 Register temp = ToRegister(guard->temp0());
4611 Label bail;
4612 masm.loadObjClassUnsafe(obj, temp);
4613 masm.branchPtr(Assembler::Equal, temp,
4614 ImmPtr(&FixedLengthArrayBufferObject::class_), &bail);
4615 masm.branchPtr(Assembler::Equal, temp,
4616 ImmPtr(&FixedLengthSharedArrayBufferObject::class_), &bail);
4617 masm.branchPtr(Assembler::Equal, temp,
4618 ImmPtr(&ResizableArrayBufferObject::class_), &bail);
4619 masm.branchPtr(Assembler::Equal, temp,
4620 ImmPtr(&GrowableSharedArrayBufferObject::class_), &bail);
4621 bailoutFrom(&bail, guard->snapshot());
4624 void CodeGenerator::visitGuardIsTypedArray(LGuardIsTypedArray* guard) {
4625 Register obj = ToRegister(guard->input());
4626 Register temp = ToRegister(guard->temp0());
4628 Label bail;
4629 masm.loadObjClassUnsafe(obj, temp);
4630 masm.branchIfClassIsNotTypedArray(temp, &bail);
4631 bailoutFrom(&bail, guard->snapshot());
4634 void CodeGenerator::visitGuardIsFixedLengthTypedArray(
4635 LGuardIsFixedLengthTypedArray* guard) {
4636 Register obj = ToRegister(guard->input());
4637 Register temp = ToRegister(guard->temp0());
4639 Label bail;
4640 masm.loadObjClassUnsafe(obj, temp);
4641 masm.branchIfClassIsNotFixedLengthTypedArray(temp, &bail);
4642 bailoutFrom(&bail, guard->snapshot());
4645 void CodeGenerator::visitGuardIsResizableTypedArray(
4646 LGuardIsResizableTypedArray* guard) {
4647 Register obj = ToRegister(guard->input());
4648 Register temp = ToRegister(guard->temp0());
4650 Label bail;
4651 masm.loadObjClassUnsafe(obj, temp);
4652 masm.branchIfClassIsNotResizableTypedArray(temp, &bail);
4653 bailoutFrom(&bail, guard->snapshot());
4656 void CodeGenerator::visitGuardHasProxyHandler(LGuardHasProxyHandler* guard) {
4657 Register obj = ToRegister(guard->input());
4659 Label bail;
4661 Address handlerAddr(obj, ProxyObject::offsetOfHandler());
4662 masm.branchPtr(Assembler::NotEqual, handlerAddr,
4663 ImmPtr(guard->mir()->handler()), &bail);
4665 bailoutFrom(&bail, guard->snapshot());
4668 void CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard) {
4669 Register input = ToRegister(guard->input());
4670 Register expected = ToRegister(guard->expected());
4672 Assembler::Condition cond =
4673 guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
4674 bailoutCmpPtr(cond, input, expected, guard->snapshot());
4677 void CodeGenerator::visitGuardSpecificFunction(LGuardSpecificFunction* guard) {
4678 Register input = ToRegister(guard->input());
4679 Register expected = ToRegister(guard->expected());
4681 bailoutCmpPtr(Assembler::NotEqual, input, expected, guard->snapshot());
4684 void CodeGenerator::visitGuardSpecificAtom(LGuardSpecificAtom* guard) {
4685 Register str = ToRegister(guard->str());
4686 Register scratch = ToRegister(guard->temp0());
4688 LiveRegisterSet volatileRegs = liveVolatileRegs(guard);
4689 volatileRegs.takeUnchecked(scratch);
4691 Label bail;
4692 masm.guardSpecificAtom(str, guard->mir()->atom(), scratch, volatileRegs,
4693 &bail);
4694 bailoutFrom(&bail, guard->snapshot());
4697 void CodeGenerator::visitGuardSpecificSymbol(LGuardSpecificSymbol* guard) {
4698 Register symbol = ToRegister(guard->symbol());
4700 bailoutCmpPtr(Assembler::NotEqual, symbol, ImmGCPtr(guard->mir()->expected()),
4701 guard->snapshot());
4704 void CodeGenerator::visitGuardSpecificInt32(LGuardSpecificInt32* guard) {
4705 Register num = ToRegister(guard->num());
4707 bailoutCmp32(Assembler::NotEqual, num, Imm32(guard->mir()->expected()),
4708 guard->snapshot());
4711 void CodeGenerator::visitGuardStringToIndex(LGuardStringToIndex* lir) {
4712 Register str = ToRegister(lir->string());
4713 Register output = ToRegister(lir->output());
4715 Label vmCall, done;
4716 masm.loadStringIndexValue(str, output, &vmCall);
4717 masm.jump(&done);
4720 masm.bind(&vmCall);
4722 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4723 volatileRegs.takeUnchecked(output);
4724 masm.PushRegsInMask(volatileRegs);
4726 using Fn = int32_t (*)(JSString* str);
4727 masm.setupAlignedABICall();
4728 masm.passABIArg(str);
4729 masm.callWithABI<Fn, GetIndexFromString>();
4730 masm.storeCallInt32Result(output);
4732 masm.PopRegsInMask(volatileRegs);
4734 // GetIndexFromString returns a negative value on failure.
4735 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
4738 masm.bind(&done);
4741 void CodeGenerator::visitGuardStringToInt32(LGuardStringToInt32* lir) {
4742 Register str = ToRegister(lir->string());
4743 Register output = ToRegister(lir->output());
4744 Register temp = ToRegister(lir->temp0());
4746 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4748 Label bail;
4749 masm.guardStringToInt32(str, output, temp, volatileRegs, &bail);
4750 bailoutFrom(&bail, lir->snapshot());
4753 void CodeGenerator::visitGuardStringToDouble(LGuardStringToDouble* lir) {
4754 Register str = ToRegister(lir->string());
4755 FloatRegister output = ToFloatRegister(lir->output());
4756 Register temp0 = ToRegister(lir->temp0());
4757 Register temp1 = ToRegister(lir->temp1());
4759 Label vmCall, done;
4760 // Use indexed value as fast path if possible.
4761 masm.loadStringIndexValue(str, temp0, &vmCall);
4762 masm.convertInt32ToDouble(temp0, output);
4763 masm.jump(&done);
4765 masm.bind(&vmCall);
4767 // Reserve stack for holding the result value of the call.
4768 masm.reserveStack(sizeof(double));
4769 masm.moveStackPtrTo(temp0);
4771 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4772 volatileRegs.takeUnchecked(temp0);
4773 volatileRegs.takeUnchecked(temp1);
4774 masm.PushRegsInMask(volatileRegs);
4776 using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
4777 masm.setupAlignedABICall();
4778 masm.loadJSContext(temp1);
4779 masm.passABIArg(temp1);
4780 masm.passABIArg(str);
4781 masm.passABIArg(temp0);
4782 masm.callWithABI<Fn, StringToNumberPure>();
4783 masm.storeCallPointerResult(temp0);
4785 masm.PopRegsInMask(volatileRegs);
4787 Label ok;
4788 masm.branchIfTrueBool(temp0, &ok);
4790 // OOM path, recovered by StringToNumberPure.
4792 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
4793 // flow-insensitively, and using it here would confuse the stack height
4794 // tracking.
4795 masm.addToStackPtr(Imm32(sizeof(double)));
4796 bailout(lir->snapshot());
4798 masm.bind(&ok);
4799 masm.Pop(output);
4801 masm.bind(&done);
4804 void CodeGenerator::visitGuardNoDenseElements(LGuardNoDenseElements* guard) {
4805 Register obj = ToRegister(guard->input());
4806 Register temp = ToRegister(guard->temp0());
4808 // Load obj->elements.
4809 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
4811 // Make sure there are no dense elements.
4812 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
4813 bailoutCmp32(Assembler::NotEqual, initLength, Imm32(0), guard->snapshot());
4816 void CodeGenerator::visitBooleanToInt64(LBooleanToInt64* lir) {
4817 Register input = ToRegister(lir->input());
4818 Register64 output = ToOutRegister64(lir);
4820 masm.move32To64ZeroExtend(input, output);
4823 void CodeGenerator::emitStringToInt64(LInstruction* lir, Register input,
4824 Register64 output) {
4825 Register temp = output.scratchReg();
4827 saveLive(lir);
4829 masm.reserveStack(sizeof(uint64_t));
4830 masm.moveStackPtrTo(temp);
4831 pushArg(temp);
4832 pushArg(input);
4834 using Fn = bool (*)(JSContext*, HandleString, uint64_t*);
4835 callVM<Fn, DoStringToInt64>(lir);
4837 masm.load64(Address(masm.getStackPointer(), 0), output);
4838 masm.freeStack(sizeof(uint64_t));
4840 restoreLiveIgnore(lir, StoreValueTo(output).clobbered());
4843 void CodeGenerator::visitStringToInt64(LStringToInt64* lir) {
4844 Register input = ToRegister(lir->input());
4845 Register64 output = ToOutRegister64(lir);
4847 emitStringToInt64(lir, input, output);
4850 void CodeGenerator::visitValueToInt64(LValueToInt64* lir) {
4851 ValueOperand input = ToValue(lir, LValueToInt64::InputIndex);
4852 Register temp = ToRegister(lir->temp0());
4853 Register64 output = ToOutRegister64(lir);
4855 int checks = 3;
4857 Label fail, done;
4858 // Jump to fail if this is the last check and we fail it,
4859 // otherwise to the next test.
4860 auto emitTestAndUnbox = [&](auto testAndUnbox) {
4861 MOZ_ASSERT(checks > 0);
4863 checks--;
4864 Label notType;
4865 Label* target = checks ? &notType : &fail;
4867 testAndUnbox(target);
4869 if (checks) {
4870 masm.jump(&done);
4871 masm.bind(&notType);
4875 Register tag = masm.extractTag(input, temp);
4877 // BigInt.
4878 emitTestAndUnbox([&](Label* target) {
4879 masm.branchTestBigInt(Assembler::NotEqual, tag, target);
4880 masm.unboxBigInt(input, temp);
4881 masm.loadBigInt64(temp, output);
4884 // Boolean
4885 emitTestAndUnbox([&](Label* target) {
4886 masm.branchTestBoolean(Assembler::NotEqual, tag, target);
4887 masm.unboxBoolean(input, temp);
4888 masm.move32To64ZeroExtend(temp, output);
4891 // String
4892 emitTestAndUnbox([&](Label* target) {
4893 masm.branchTestString(Assembler::NotEqual, tag, target);
4894 masm.unboxString(input, temp);
4895 emitStringToInt64(lir, temp, output);
4898 MOZ_ASSERT(checks == 0);
4900 bailoutFrom(&fail, lir->snapshot());
4901 masm.bind(&done);
4904 void CodeGenerator::visitTruncateBigIntToInt64(LTruncateBigIntToInt64* lir) {
4905 Register operand = ToRegister(lir->input());
4906 Register64 output = ToOutRegister64(lir);
4908 masm.loadBigInt64(operand, output);
4911 OutOfLineCode* CodeGenerator::createBigIntOutOfLine(LInstruction* lir,
4912 Scalar::Type type,
4913 Register64 input,
4914 Register output) {
4915 #if JS_BITS_PER_WORD == 32
4916 using Fn = BigInt* (*)(JSContext*, uint32_t, uint32_t);
4917 auto args = ArgList(input.low, input.high);
4918 #else
4919 using Fn = BigInt* (*)(JSContext*, uint64_t);
4920 auto args = ArgList(input);
4921 #endif
4923 if (type == Scalar::BigInt64) {
4924 return oolCallVM<Fn, jit::CreateBigIntFromInt64>(lir, args,
4925 StoreRegisterTo(output));
4927 MOZ_ASSERT(type == Scalar::BigUint64);
4928 return oolCallVM<Fn, jit::CreateBigIntFromUint64>(lir, args,
4929 StoreRegisterTo(output));
4932 void CodeGenerator::emitCreateBigInt(LInstruction* lir, Scalar::Type type,
4933 Register64 input, Register output,
4934 Register maybeTemp) {
4935 OutOfLineCode* ool = createBigIntOutOfLine(lir, type, input, output);
4937 if (maybeTemp != InvalidReg) {
4938 masm.newGCBigInt(output, maybeTemp, initialBigIntHeap(), ool->entry());
4939 } else {
4940 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
4941 regs.take(input);
4942 regs.take(output);
4944 Register temp = regs.takeAny();
4946 masm.push(temp);
4948 Label fail, ok;
4949 masm.newGCBigInt(output, temp, initialBigIntHeap(), &fail);
4950 masm.pop(temp);
4951 masm.jump(&ok);
4952 masm.bind(&fail);
4953 masm.pop(temp);
4954 masm.jump(ool->entry());
4955 masm.bind(&ok);
4957 masm.initializeBigInt64(type, output, input);
4958 masm.bind(ool->rejoin());
4961 void CodeGenerator::visitInt64ToBigInt(LInt64ToBigInt* lir) {
4962 Register64 input = ToRegister64(lir->input());
4963 Register temp = ToRegister(lir->temp0());
4964 Register output = ToRegister(lir->output());
4966 emitCreateBigInt(lir, Scalar::BigInt64, input, output, temp);
4969 void CodeGenerator::visitGuardValue(LGuardValue* lir) {
4970 ValueOperand input = ToValue(lir, LGuardValue::InputIndex);
4971 Value expected = lir->mir()->expected();
4972 Label bail;
4973 masm.branchTestValue(Assembler::NotEqual, input, expected, &bail);
4974 bailoutFrom(&bail, lir->snapshot());
4977 void CodeGenerator::visitGuardNullOrUndefined(LGuardNullOrUndefined* lir) {
4978 ValueOperand input = ToValue(lir, LGuardNullOrUndefined::InputIndex);
4980 ScratchTagScope tag(masm, input);
4981 masm.splitTagForTest(input, tag);
4983 Label done;
4984 masm.branchTestNull(Assembler::Equal, tag, &done);
4986 Label bail;
4987 masm.branchTestUndefined(Assembler::NotEqual, tag, &bail);
4988 bailoutFrom(&bail, lir->snapshot());
4990 masm.bind(&done);
4993 void CodeGenerator::visitGuardIsNotObject(LGuardIsNotObject* lir) {
4994 ValueOperand input = ToValue(lir, LGuardIsNotObject::InputIndex);
4996 Label bail;
4997 masm.branchTestObject(Assembler::Equal, input, &bail);
4998 bailoutFrom(&bail, lir->snapshot());
5001 void CodeGenerator::visitGuardFunctionFlags(LGuardFunctionFlags* lir) {
5002 Register function = ToRegister(lir->function());
5004 Label bail;
5005 if (uint16_t flags = lir->mir()->expectedFlags()) {
5006 masm.branchTestFunctionFlags(function, flags, Assembler::Zero, &bail);
5008 if (uint16_t flags = lir->mir()->unexpectedFlags()) {
5009 masm.branchTestFunctionFlags(function, flags, Assembler::NonZero, &bail);
5011 bailoutFrom(&bail, lir->snapshot());
5014 void CodeGenerator::visitGuardFunctionIsNonBuiltinCtor(
5015 LGuardFunctionIsNonBuiltinCtor* lir) {
5016 Register function = ToRegister(lir->function());
5017 Register temp = ToRegister(lir->temp0());
5019 Label bail;
5020 masm.branchIfNotFunctionIsNonBuiltinCtor(function, temp, &bail);
5021 bailoutFrom(&bail, lir->snapshot());
5024 void CodeGenerator::visitGuardFunctionKind(LGuardFunctionKind* lir) {
5025 Register function = ToRegister(lir->function());
5026 Register temp = ToRegister(lir->temp0());
5028 Assembler::Condition cond =
5029 lir->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
5031 Label bail;
5032 masm.branchFunctionKind(cond, lir->mir()->expected(), function, temp, &bail);
5033 bailoutFrom(&bail, lir->snapshot());
5036 void CodeGenerator::visitGuardFunctionScript(LGuardFunctionScript* lir) {
5037 Register function = ToRegister(lir->function());
5039 Address scriptAddr(function, JSFunction::offsetOfJitInfoOrScript());
5040 bailoutCmpPtr(Assembler::NotEqual, scriptAddr,
5041 ImmGCPtr(lir->mir()->expected()), lir->snapshot());
5044 // Out-of-line path to update the store buffer.
5045 class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase<CodeGenerator> {
5046 LInstruction* lir_;
5047 const LAllocation* object_;
5049 public:
5050 OutOfLineCallPostWriteBarrier(LInstruction* lir, const LAllocation* object)
5051 : lir_(lir), object_(object) {}
5053 void accept(CodeGenerator* codegen) override {
5054 codegen->visitOutOfLineCallPostWriteBarrier(this);
5057 LInstruction* lir() const { return lir_; }
5058 const LAllocation* object() const { return object_; }
5061 static void EmitStoreBufferCheckForConstant(MacroAssembler& masm,
5062 const gc::TenuredCell* cell,
5063 AllocatableGeneralRegisterSet& regs,
5064 Label* exit, Label* callVM) {
5065 Register temp = regs.takeAny();
5067 gc::Arena* arena = cell->arena();
5069 Register cells = temp;
5070 masm.loadPtr(AbsoluteAddress(&arena->bufferedCells()), cells);
5072 size_t index = gc::ArenaCellSet::getCellIndex(cell);
5073 size_t word;
5074 uint32_t mask;
5075 gc::ArenaCellSet::getWordIndexAndMask(index, &word, &mask);
5076 size_t offset = gc::ArenaCellSet::offsetOfBits() + word * sizeof(uint32_t);
5078 masm.branchTest32(Assembler::NonZero, Address(cells, offset), Imm32(mask),
5079 exit);
5081 // Check whether this is the sentinel set and if so call the VM to allocate
5082 // one for this arena.
5083 masm.branchPtr(Assembler::Equal,
5084 Address(cells, gc::ArenaCellSet::offsetOfArena()),
5085 ImmPtr(nullptr), callVM);
5087 // Add the cell to the set.
5088 masm.or32(Imm32(mask), Address(cells, offset));
5089 masm.jump(exit);
5091 regs.add(temp);
5094 static void EmitPostWriteBarrier(MacroAssembler& masm, CompileRuntime* runtime,
5095 Register objreg, JSObject* maybeConstant,
5096 bool isGlobal,
5097 AllocatableGeneralRegisterSet& regs) {
5098 MOZ_ASSERT_IF(isGlobal, maybeConstant);
5100 Label callVM;
5101 Label exit;
5103 Register temp = regs.takeAny();
5105 // We already have a fast path to check whether a global is in the store
5106 // buffer.
5107 if (!isGlobal) {
5108 if (maybeConstant) {
5109 // Check store buffer bitmap directly for known object.
5110 EmitStoreBufferCheckForConstant(masm, &maybeConstant->asTenured(), regs,
5111 &exit, &callVM);
5112 } else {
5113 // Check one element cache to avoid VM call.
5114 masm.branchPtr(Assembler::Equal,
5115 AbsoluteAddress(runtime->addressOfLastBufferedWholeCell()),
5116 objreg, &exit);
5120 // Call into the VM to barrier the write.
5121 masm.bind(&callVM);
5123 Register runtimereg = temp;
5124 masm.mov(ImmPtr(runtime), runtimereg);
5126 masm.setupAlignedABICall();
5127 masm.passABIArg(runtimereg);
5128 masm.passABIArg(objreg);
5129 if (isGlobal) {
5130 using Fn = void (*)(JSRuntime* rt, GlobalObject* obj);
5131 masm.callWithABI<Fn, PostGlobalWriteBarrier>();
5132 } else {
5133 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* obj);
5134 masm.callWithABI<Fn, PostWriteBarrier>();
5137 masm.bind(&exit);
5140 void CodeGenerator::emitPostWriteBarrier(const LAllocation* obj) {
5141 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5143 Register objreg;
5144 JSObject* object = nullptr;
5145 bool isGlobal = false;
5146 if (obj->isConstant()) {
5147 object = &obj->toConstant()->toObject();
5148 isGlobal = isGlobalObject(object);
5149 objreg = regs.takeAny();
5150 masm.movePtr(ImmGCPtr(object), objreg);
5151 } else {
5152 objreg = ToRegister(obj);
5153 regs.takeUnchecked(objreg);
5156 EmitPostWriteBarrier(masm, gen->runtime, objreg, object, isGlobal, regs);
5159 // Returns true if `def` might be allocated in the nursery.
5160 static bool ValueNeedsPostBarrier(MDefinition* def) {
5161 if (def->isBox()) {
5162 def = def->toBox()->input();
5164 if (def->type() == MIRType::Value) {
5165 return true;
5167 return NeedsPostBarrier(def->type());
5170 class OutOfLineElementPostWriteBarrier
5171 : public OutOfLineCodeBase<CodeGenerator> {
5172 LiveRegisterSet liveVolatileRegs_;
5173 const LAllocation* index_;
5174 int32_t indexDiff_;
5175 Register obj_;
5176 Register scratch_;
5178 public:
5179 OutOfLineElementPostWriteBarrier(const LiveRegisterSet& liveVolatileRegs,
5180 Register obj, const LAllocation* index,
5181 Register scratch, int32_t indexDiff)
5182 : liveVolatileRegs_(liveVolatileRegs),
5183 index_(index),
5184 indexDiff_(indexDiff),
5185 obj_(obj),
5186 scratch_(scratch) {}
5188 void accept(CodeGenerator* codegen) override {
5189 codegen->visitOutOfLineElementPostWriteBarrier(this);
5192 const LiveRegisterSet& liveVolatileRegs() const { return liveVolatileRegs_; }
5193 const LAllocation* index() const { return index_; }
5194 int32_t indexDiff() const { return indexDiff_; }
5196 Register object() const { return obj_; }
5197 Register scratch() const { return scratch_; }
5200 void CodeGenerator::emitElementPostWriteBarrier(
5201 MInstruction* mir, const LiveRegisterSet& liveVolatileRegs, Register obj,
5202 const LAllocation* index, Register scratch, const ConstantOrRegister& val,
5203 int32_t indexDiff) {
5204 if (val.constant()) {
5205 MOZ_ASSERT_IF(val.value().isGCThing(),
5206 !IsInsideNursery(val.value().toGCThing()));
5207 return;
5210 TypedOrValueRegister reg = val.reg();
5211 if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
5212 return;
5215 auto* ool = new (alloc()) OutOfLineElementPostWriteBarrier(
5216 liveVolatileRegs, obj, index, scratch, indexDiff);
5217 addOutOfLineCode(ool, mir);
5219 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, ool->rejoin());
5221 if (reg.hasValue()) {
5222 masm.branchValueIsNurseryCell(Assembler::Equal, reg.valueReg(), scratch,
5223 ool->entry());
5224 } else {
5225 masm.branchPtrInNurseryChunk(Assembler::Equal, reg.typedReg().gpr(),
5226 scratch, ool->entry());
5229 masm.bind(ool->rejoin());
5232 void CodeGenerator::visitOutOfLineElementPostWriteBarrier(
5233 OutOfLineElementPostWriteBarrier* ool) {
5234 Register obj = ool->object();
5235 Register scratch = ool->scratch();
5236 const LAllocation* index = ool->index();
5237 int32_t indexDiff = ool->indexDiff();
5239 masm.PushRegsInMask(ool->liveVolatileRegs());
5241 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5242 regs.takeUnchecked(obj);
5243 regs.takeUnchecked(scratch);
5245 Register indexReg;
5246 if (index->isConstant()) {
5247 indexReg = regs.takeAny();
5248 masm.move32(Imm32(ToInt32(index) + indexDiff), indexReg);
5249 } else {
5250 indexReg = ToRegister(index);
5251 regs.takeUnchecked(indexReg);
5252 if (indexDiff != 0) {
5253 masm.add32(Imm32(indexDiff), indexReg);
5257 masm.setupUnalignedABICall(scratch);
5258 masm.movePtr(ImmPtr(gen->runtime), scratch);
5259 masm.passABIArg(scratch);
5260 masm.passABIArg(obj);
5261 masm.passABIArg(indexReg);
5262 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5263 masm.callWithABI<Fn, PostWriteElementBarrier>();
5265 // We don't need a sub32 here because indexReg must be in liveVolatileRegs
5266 // if indexDiff is not zero, so it will be restored below.
5267 MOZ_ASSERT_IF(indexDiff != 0, ool->liveVolatileRegs().has(indexReg));
5269 masm.PopRegsInMask(ool->liveVolatileRegs());
5271 masm.jump(ool->rejoin());
5274 void CodeGenerator::emitPostWriteBarrier(Register objreg) {
5275 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5276 regs.takeUnchecked(objreg);
5277 EmitPostWriteBarrier(masm, gen->runtime, objreg, nullptr, false, regs);
5280 void CodeGenerator::visitOutOfLineCallPostWriteBarrier(
5281 OutOfLineCallPostWriteBarrier* ool) {
5282 saveLiveVolatile(ool->lir());
5283 const LAllocation* obj = ool->object();
5284 emitPostWriteBarrier(obj);
5285 restoreLiveVolatile(ool->lir());
5287 masm.jump(ool->rejoin());
5290 void CodeGenerator::maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal,
5291 OutOfLineCode* ool) {
5292 // Check whether an object is a global that we have already barriered before
5293 // calling into the VM.
5295 // We only check for the script's global, not other globals within the same
5296 // compartment, because we bake in a pointer to realm->globalWriteBarriered
5297 // and doing that would be invalid for other realms because they could be
5298 // collected before the Ion code is discarded.
5300 if (!maybeGlobal->isConstant()) {
5301 return;
5304 JSObject* obj = &maybeGlobal->toConstant()->toObject();
5305 if (gen->realm->maybeGlobal() != obj) {
5306 return;
5309 const uint32_t* addr = gen->realm->addressOfGlobalWriteBarriered();
5310 masm.branch32(Assembler::NotEqual, AbsoluteAddress(addr), Imm32(0),
5311 ool->rejoin());
5314 template <class LPostBarrierType, MIRType nurseryType>
5315 void CodeGenerator::visitPostWriteBarrierCommon(LPostBarrierType* lir,
5316 OutOfLineCode* ool) {
5317 static_assert(NeedsPostBarrier(nurseryType));
5319 addOutOfLineCode(ool, lir->mir());
5321 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5323 if (lir->object()->isConstant()) {
5324 // Constant nursery objects cannot appear here, see
5325 // LIRGenerator::visitPostWriteElementBarrier.
5326 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5327 } else {
5328 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5329 temp, ool->rejoin());
5332 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5334 Register value = ToRegister(lir->value());
5335 if constexpr (nurseryType == MIRType::Object) {
5336 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::Object);
5337 } else if constexpr (nurseryType == MIRType::String) {
5338 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::String);
5339 } else {
5340 static_assert(nurseryType == MIRType::BigInt);
5341 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::BigInt);
5343 masm.branchPtrInNurseryChunk(Assembler::Equal, value, temp, ool->entry());
5345 masm.bind(ool->rejoin());
5348 template <class LPostBarrierType>
5349 void CodeGenerator::visitPostWriteBarrierCommonV(LPostBarrierType* lir,
5350 OutOfLineCode* ool) {
5351 addOutOfLineCode(ool, lir->mir());
5353 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5355 if (lir->object()->isConstant()) {
5356 // Constant nursery objects cannot appear here, see
5357 // LIRGenerator::visitPostWriteElementBarrier.
5358 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5359 } else {
5360 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5361 temp, ool->rejoin());
5364 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5366 ValueOperand value = ToValue(lir, LPostBarrierType::ValueIndex);
5367 masm.branchValueIsNurseryCell(Assembler::Equal, value, temp, ool->entry());
5369 masm.bind(ool->rejoin());
5372 void CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO* lir) {
5373 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5374 visitPostWriteBarrierCommon<LPostWriteBarrierO, MIRType::Object>(lir, ool);
5377 void CodeGenerator::visitPostWriteBarrierS(LPostWriteBarrierS* lir) {
5378 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5379 visitPostWriteBarrierCommon<LPostWriteBarrierS, MIRType::String>(lir, ool);
5382 void CodeGenerator::visitPostWriteBarrierBI(LPostWriteBarrierBI* lir) {
5383 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5384 visitPostWriteBarrierCommon<LPostWriteBarrierBI, MIRType::BigInt>(lir, ool);
5387 void CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV* lir) {
5388 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5389 visitPostWriteBarrierCommonV(lir, ool);
5392 // Out-of-line path to update the store buffer.
5393 class OutOfLineCallPostWriteElementBarrier
5394 : public OutOfLineCodeBase<CodeGenerator> {
5395 LInstruction* lir_;
5396 const LAllocation* object_;
5397 const LAllocation* index_;
5399 public:
5400 OutOfLineCallPostWriteElementBarrier(LInstruction* lir,
5401 const LAllocation* object,
5402 const LAllocation* index)
5403 : lir_(lir), object_(object), index_(index) {}
5405 void accept(CodeGenerator* codegen) override {
5406 codegen->visitOutOfLineCallPostWriteElementBarrier(this);
5409 LInstruction* lir() const { return lir_; }
5411 const LAllocation* object() const { return object_; }
5413 const LAllocation* index() const { return index_; }
5416 void CodeGenerator::visitOutOfLineCallPostWriteElementBarrier(
5417 OutOfLineCallPostWriteElementBarrier* ool) {
5418 saveLiveVolatile(ool->lir());
5420 const LAllocation* obj = ool->object();
5421 const LAllocation* index = ool->index();
5423 Register objreg = obj->isConstant() ? InvalidReg : ToRegister(obj);
5424 Register indexreg = ToRegister(index);
5426 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5427 regs.takeUnchecked(indexreg);
5429 if (obj->isConstant()) {
5430 objreg = regs.takeAny();
5431 masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg);
5432 } else {
5433 regs.takeUnchecked(objreg);
5436 Register runtimereg = regs.takeAny();
5437 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5438 masm.setupAlignedABICall();
5439 masm.mov(ImmPtr(gen->runtime), runtimereg);
5440 masm.passABIArg(runtimereg);
5441 masm.passABIArg(objreg);
5442 masm.passABIArg(indexreg);
5443 masm.callWithABI<Fn, PostWriteElementBarrier>();
5445 restoreLiveVolatile(ool->lir());
5447 masm.jump(ool->rejoin());
5450 void CodeGenerator::visitPostWriteElementBarrierO(
5451 LPostWriteElementBarrierO* lir) {
5452 auto ool = new (alloc())
5453 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5454 visitPostWriteBarrierCommon<LPostWriteElementBarrierO, MIRType::Object>(lir,
5455 ool);
5458 void CodeGenerator::visitPostWriteElementBarrierS(
5459 LPostWriteElementBarrierS* lir) {
5460 auto ool = new (alloc())
5461 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5462 visitPostWriteBarrierCommon<LPostWriteElementBarrierS, MIRType::String>(lir,
5463 ool);
5466 void CodeGenerator::visitPostWriteElementBarrierBI(
5467 LPostWriteElementBarrierBI* lir) {
5468 auto ool = new (alloc())
5469 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5470 visitPostWriteBarrierCommon<LPostWriteElementBarrierBI, MIRType::BigInt>(lir,
5471 ool);
5474 void CodeGenerator::visitPostWriteElementBarrierV(
5475 LPostWriteElementBarrierV* lir) {
5476 auto ool = new (alloc())
5477 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5478 visitPostWriteBarrierCommonV(lir, ool);
5481 void CodeGenerator::visitAssertCanElidePostWriteBarrier(
5482 LAssertCanElidePostWriteBarrier* lir) {
5483 Register object = ToRegister(lir->object());
5484 ValueOperand value =
5485 ToValue(lir, LAssertCanElidePostWriteBarrier::ValueIndex);
5486 Register temp = ToRegister(lir->temp0());
5488 Label ok;
5489 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp, &ok);
5490 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp, &ok);
5492 masm.assumeUnreachable("Unexpected missing post write barrier");
5494 masm.bind(&ok);
5497 template <typename LCallIns>
5498 void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
5499 MCallBase* mir = call->mir();
5501 uint32_t unusedStack = UnusedStackBytesForCall(mir->paddedNumStackArgs());
5503 // Registers used for callWithABI() argument-passing.
5504 const Register argContextReg = ToRegister(call->getArgContextReg());
5505 const Register argUintNReg = ToRegister(call->getArgUintNReg());
5506 const Register argVpReg = ToRegister(call->getArgVpReg());
5508 // Misc. temporary registers.
5509 const Register tempReg = ToRegister(call->getTempReg());
5511 DebugOnly<uint32_t> initialStack = masm.framePushed();
5513 masm.checkStackAlignment();
5515 // Native functions have the signature:
5516 // bool (*)(JSContext*, unsigned, Value* vp)
5517 // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
5518 // are the function arguments.
5520 // Allocate space for the outparam, moving the StackPointer to what will be
5521 // &vp[1].
5522 masm.adjustStack(unusedStack);
5524 // Push a Value containing the callee object: natives are allowed to access
5525 // their callee before setting the return value. The StackPointer is moved
5526 // to &vp[0].
5527 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5528 Register calleeReg = ToRegister(call->getCallee());
5529 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
5531 if (call->mir()->maybeCrossRealm()) {
5532 masm.switchToObjectRealm(calleeReg, tempReg);
5534 } else {
5535 WrappedFunction* target = call->getSingleTarget();
5536 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5538 if (call->mir()->maybeCrossRealm()) {
5539 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), tempReg);
5540 masm.switchToObjectRealm(tempReg, tempReg);
5544 // Preload arguments into registers.
5545 masm.loadJSContext(argContextReg);
5546 masm.move32(Imm32(call->mir()->numActualArgs()), argUintNReg);
5547 masm.moveStackPtrTo(argVpReg);
5549 masm.Push(argUintNReg);
5551 // Construct native exit frame.
5552 uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
5553 masm.enterFakeExitFrameForNative(argContextReg, tempReg,
5554 call->mir()->isConstructing());
5556 markSafepointAt(safepointOffset, call);
5558 // Construct and execute call.
5559 masm.setupAlignedABICall();
5560 masm.passABIArg(argContextReg);
5561 masm.passABIArg(argUintNReg);
5562 masm.passABIArg(argVpReg);
5564 ensureOsiSpace();
5565 // If we're using a simulator build, `native` will already point to the
5566 // simulator's call-redirection code for LCallClassHook. Load the address in
5567 // a register first so that we don't try to redirect it a second time.
5568 bool emittedCall = false;
5569 #ifdef JS_SIMULATOR
5570 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5571 masm.movePtr(ImmPtr(native), tempReg);
5572 masm.callWithABI(tempReg);
5573 emittedCall = true;
5575 #endif
5576 if (!emittedCall) {
5577 masm.callWithABI(DynamicFunction<JSNative>(native), ABIType::General,
5578 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5581 // Test for failure.
5582 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
5584 if (call->mir()->maybeCrossRealm()) {
5585 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5588 // Load the outparam vp[0] into output register(s).
5589 masm.loadValue(
5590 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
5591 JSReturnOperand);
5593 // Until C++ code is instrumented against Spectre, prevent speculative
5594 // execution from returning any private data.
5595 if (JitOptions.spectreJitToCxxCalls && !call->mir()->ignoresReturnValue() &&
5596 mir->hasLiveDefUses()) {
5597 masm.speculationBarrier();
5600 // The next instruction is removing the footer of the exit frame, so there
5601 // is no need for leaveFakeExitFrame.
5603 // Move the StackPointer back to its original location, unwinding the native
5604 // exit frame.
5605 masm.adjustStack(NativeExitFrameLayout::Size() - unusedStack);
5606 MOZ_ASSERT(masm.framePushed() == initialStack);
5609 void CodeGenerator::visitCallNative(LCallNative* call) {
5610 WrappedFunction* target = call->getSingleTarget();
5611 MOZ_ASSERT(target);
5612 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5614 JSNative native = target->native();
5615 if (call->ignoresReturnValue() && target->hasJitInfo()) {
5616 const JSJitInfo* jitInfo = target->jitInfo();
5617 if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
5618 native = jitInfo->ignoresReturnValueMethod;
5621 emitCallNative(call, native);
5624 void CodeGenerator::visitCallClassHook(LCallClassHook* call) {
5625 emitCallNative(call, call->mir()->target());
5628 static void LoadDOMPrivate(MacroAssembler& masm, Register obj, Register priv,
5629 DOMObjectKind kind) {
5630 // Load the value in DOM_OBJECT_SLOT for a native or proxy DOM object. This
5631 // will be in the first slot but may be fixed or non-fixed.
5632 MOZ_ASSERT(obj != priv);
5634 switch (kind) {
5635 case DOMObjectKind::Native:
5636 // If it's a native object, the value must be in a fixed slot.
5637 // See CanAttachDOMCall in CacheIR.cpp.
5638 masm.debugAssertObjHasFixedSlots(obj, priv);
5639 masm.loadPrivate(Address(obj, NativeObject::getFixedSlotOffset(0)), priv);
5640 break;
5641 case DOMObjectKind::Proxy: {
5642 #ifdef DEBUG
5643 // Sanity check: it must be a DOM proxy.
5644 Label isDOMProxy;
5645 masm.branchTestProxyHandlerFamily(
5646 Assembler::Equal, obj, priv, GetDOMProxyHandlerFamily(), &isDOMProxy);
5647 masm.assumeUnreachable("Expected a DOM proxy");
5648 masm.bind(&isDOMProxy);
5649 #endif
5650 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), priv);
5651 masm.loadPrivate(
5652 Address(priv, js::detail::ProxyReservedSlots::offsetOfSlot(0)), priv);
5653 break;
5658 void CodeGenerator::visitCallDOMNative(LCallDOMNative* call) {
5659 WrappedFunction* target = call->getSingleTarget();
5660 MOZ_ASSERT(target);
5661 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5662 MOZ_ASSERT(target->hasJitInfo());
5663 MOZ_ASSERT(call->mir()->isCallDOMNative());
5665 int unusedStack = UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5667 // Registers used for callWithABI() argument-passing.
5668 const Register argJSContext = ToRegister(call->getArgJSContext());
5669 const Register argObj = ToRegister(call->getArgObj());
5670 const Register argPrivate = ToRegister(call->getArgPrivate());
5671 const Register argArgs = ToRegister(call->getArgArgs());
5673 DebugOnly<uint32_t> initialStack = masm.framePushed();
5675 masm.checkStackAlignment();
5677 // DOM methods have the signature:
5678 // bool (*)(JSContext*, HandleObject, void* private, const
5679 // JSJitMethodCallArgs& args)
5680 // Where args is initialized from an argc and a vp, vp[0] is space for an
5681 // outparam and the callee, vp[1] is |this|, and vp[2] onward are the
5682 // function arguments. Note that args stores the argv, not the vp, and
5683 // argv == vp + 2.
5685 // Nestle the stack up against the pushed arguments, leaving StackPointer at
5686 // &vp[1]
5687 masm.adjustStack(unusedStack);
5688 // argObj is filled with the extracted object, then returned.
5689 Register obj = masm.extractObject(Address(masm.getStackPointer(), 0), argObj);
5690 MOZ_ASSERT(obj == argObj);
5692 // Push a Value containing the callee object: natives are allowed to access
5693 // their callee before setting the return value. After this the StackPointer
5694 // points to &vp[0].
5695 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5697 // Now compute the argv value. Since StackPointer is pointing to &vp[0] and
5698 // argv is &vp[2] we just need to add 2*sizeof(Value) to the current
5699 // StackPointer.
5700 static_assert(JSJitMethodCallArgsTraits::offsetOfArgv == 0);
5701 static_assert(JSJitMethodCallArgsTraits::offsetOfArgc ==
5702 IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv);
5703 masm.computeEffectiveAddress(
5704 Address(masm.getStackPointer(), 2 * sizeof(Value)), argArgs);
5706 LoadDOMPrivate(masm, obj, argPrivate,
5707 static_cast<MCallDOMNative*>(call->mir())->objectKind());
5709 // Push argc from the call instruction into what will become the IonExitFrame
5710 masm.Push(Imm32(call->numActualArgs()));
5712 // Push our argv onto the stack
5713 masm.Push(argArgs);
5714 // And store our JSJitMethodCallArgs* in argArgs.
5715 masm.moveStackPtrTo(argArgs);
5717 // Push |this| object for passing HandleObject. We push after argc to
5718 // maintain the same sp-relative location of the object pointer with other
5719 // DOMExitFrames.
5720 masm.Push(argObj);
5721 masm.moveStackPtrTo(argObj);
5723 if (call->mir()->maybeCrossRealm()) {
5724 // We use argJSContext as scratch register here.
5725 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), argJSContext);
5726 masm.switchToObjectRealm(argJSContext, argJSContext);
5729 // Construct native exit frame.
5730 uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext);
5731 masm.loadJSContext(argJSContext);
5732 masm.enterFakeExitFrame(argJSContext, argJSContext,
5733 ExitFrameType::IonDOMMethod);
5735 markSafepointAt(safepointOffset, call);
5737 // Construct and execute call.
5738 masm.setupAlignedABICall();
5739 masm.loadJSContext(argJSContext);
5740 masm.passABIArg(argJSContext);
5741 masm.passABIArg(argObj);
5742 masm.passABIArg(argPrivate);
5743 masm.passABIArg(argArgs);
5744 ensureOsiSpace();
5745 masm.callWithABI(DynamicFunction<JSJitMethodOp>(target->jitInfo()->method),
5746 ABIType::General,
5747 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5749 if (target->jitInfo()->isInfallible) {
5750 masm.loadValue(Address(masm.getStackPointer(),
5751 IonDOMMethodExitFrameLayout::offsetOfResult()),
5752 JSReturnOperand);
5753 } else {
5754 // Test for failure.
5755 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
5757 // Load the outparam vp[0] into output register(s).
5758 masm.loadValue(Address(masm.getStackPointer(),
5759 IonDOMMethodExitFrameLayout::offsetOfResult()),
5760 JSReturnOperand);
5763 // Switch back to the current realm if needed. Note: if the DOM method threw
5764 // an exception, the exception handler will do this.
5765 if (call->mir()->maybeCrossRealm()) {
5766 static_assert(!JSReturnOperand.aliases(ReturnReg),
5767 "Clobbering ReturnReg should not affect the return value");
5768 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5771 // Until C++ code is instrumented against Spectre, prevent speculative
5772 // execution from returning any private data.
5773 if (JitOptions.spectreJitToCxxCalls && call->mir()->hasLiveDefUses()) {
5774 masm.speculationBarrier();
5777 // The next instruction is removing the footer of the exit frame, so there
5778 // is no need for leaveFakeExitFrame.
5780 // Move the StackPointer back to its original location, unwinding the native
5781 // exit frame.
5782 masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack);
5783 MOZ_ASSERT(masm.framePushed() == initialStack);
5786 void CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir) {
5787 pushArg(ImmGCPtr(lir->mir()->name()));
5789 using Fn = bool (*)(JSContext* cx, Handle<PropertyName*>, MutableHandleValue);
5790 callVM<Fn, GetIntrinsicValue>(lir);
5793 void CodeGenerator::emitCallInvokeFunction(
5794 LInstruction* call, Register calleereg, bool constructing,
5795 bool ignoresReturnValue, uint32_t argc, uint32_t unusedStack) {
5796 // Nestle %esp up to the argument vector.
5797 // Each path must account for framePushed_ separately, for callVM to be valid.
5798 masm.freeStack(unusedStack);
5800 pushArg(masm.getStackPointer()); // argv.
5801 pushArg(Imm32(argc)); // argc.
5802 pushArg(Imm32(ignoresReturnValue));
5803 pushArg(Imm32(constructing)); // constructing.
5804 pushArg(calleereg); // JSFunction*.
5806 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
5807 MutableHandleValue);
5808 callVM<Fn, jit::InvokeFunction>(call);
5810 // Un-nestle %esp from the argument vector. No prefix was pushed.
5811 masm.reserveStack(unusedStack);
5814 void CodeGenerator::visitCallGeneric(LCallGeneric* call) {
5815 // The callee is passed straight through to the trampoline.
5816 MOZ_ASSERT(ToRegister(call->getCallee()) == IonGenericCallCalleeReg);
5818 Register argcReg = ToRegister(call->getArgc());
5819 uint32_t unusedStack =
5820 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5822 // Known-target case is handled by LCallKnown.
5823 MOZ_ASSERT(!call->hasSingleTarget());
5825 masm.checkStackAlignment();
5827 masm.move32(Imm32(call->numActualArgs()), argcReg);
5829 // Nestle the StackPointer up to the argument vector.
5830 masm.freeStack(unusedStack);
5831 ensureOsiSpace();
5833 auto kind = call->mir()->isConstructing() ? IonGenericCallKind::Construct
5834 : IonGenericCallKind::Call;
5836 TrampolinePtr genericCallStub =
5837 gen->jitRuntime()->getIonGenericCallStub(kind);
5838 uint32_t callOffset = masm.callJit(genericCallStub);
5839 markSafepointAt(callOffset, call);
5841 if (call->mir()->maybeCrossRealm()) {
5842 static_assert(!JSReturnOperand.aliases(ReturnReg),
5843 "ReturnReg available as scratch after scripted calls");
5844 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5847 // Restore stack pointer.
5848 masm.setFramePushed(frameSize());
5849 emitRestoreStackPointerFromFP();
5851 // If the return value of the constructing function is Primitive,
5852 // replace the return value with the Object from CreateThis.
5853 if (call->mir()->isConstructing()) {
5854 Label notPrimitive;
5855 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5856 &notPrimitive);
5857 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
5858 JSReturnOperand);
5859 #ifdef DEBUG
5860 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5861 &notPrimitive);
5862 masm.assumeUnreachable("CreateThis creates an object");
5863 #endif
5864 masm.bind(&notPrimitive);
5868 void JitRuntime::generateIonGenericCallArgumentsShift(
5869 MacroAssembler& masm, Register argc, Register curr, Register end,
5870 Register scratch, Label* done) {
5871 static_assert(sizeof(Value) == 8);
5872 // There are |argc| Values on the stack. Shift them all down by 8 bytes,
5873 // overwriting the first value.
5875 // Initialize `curr` to the destination of the first copy, and `end` to the
5876 // final value of curr.
5877 masm.moveStackPtrTo(curr);
5878 masm.computeEffectiveAddress(BaseValueIndex(curr, argc), end);
5880 Label loop;
5881 masm.bind(&loop);
5882 masm.branchPtr(Assembler::Equal, curr, end, done);
5883 masm.loadPtr(Address(curr, 8), scratch);
5884 masm.storePtr(scratch, Address(curr, 0));
5885 masm.addPtr(Imm32(sizeof(uintptr_t)), curr);
5886 masm.jump(&loop);
5889 void JitRuntime::generateIonGenericCallStub(MacroAssembler& masm,
5890 IonGenericCallKind kind) {
5891 AutoCreatedBy acb(masm, "JitRuntime::generateIonGenericCallStub");
5892 ionGenericCallStubOffset_[kind] = startTrampolineCode(masm);
5894 // This code is tightly coupled with visitCallGeneric.
5896 // Upon entry:
5897 // IonGenericCallCalleeReg contains a pointer to the callee object.
5898 // IonGenericCallArgcReg contains the number of actual args.
5899 // The arguments have been pushed onto the stack:
5900 // [newTarget] (iff isConstructing)
5901 // [argN]
5902 // ...
5903 // [arg1]
5904 // [arg0]
5905 // [this]
5906 // <return address> (if not JS_USE_LINK_REGISTER)
5908 // This trampoline is responsible for entering the callee's realm,
5909 // massaging the stack into the right shape, and then performing a
5910 // tail call. We will return directly to the Ion code from the
5911 // callee.
5913 // To do a tail call, we keep the return address in a register, even
5914 // on platforms that don't normally use a link register, and push it
5915 // just before jumping to the callee, after we are done setting up
5916 // the stack.
5918 // The caller is responsible for switching back to the caller's
5919 // realm and cleaning up the stack.
5921 Register calleeReg = IonGenericCallCalleeReg;
5922 Register argcReg = IonGenericCallArgcReg;
5923 Register scratch = IonGenericCallScratch;
5924 Register scratch2 = IonGenericCallScratch2;
5926 #ifndef JS_USE_LINK_REGISTER
5927 Register returnAddrReg = IonGenericCallReturnAddrReg;
5928 masm.pop(returnAddrReg);
5929 #endif
5931 #ifdef JS_CODEGEN_ARM
5932 // The default second scratch register on arm is lr, which we need
5933 // preserved for tail calls.
5934 AutoNonDefaultSecondScratchRegister andssr(masm, IonGenericSecondScratchReg);
5935 #endif
5937 bool isConstructing = kind == IonGenericCallKind::Construct;
5939 Label entry, notFunction, noJitEntry, vmCall;
5940 masm.bind(&entry);
5942 // Guard that the callee is actually a function.
5943 masm.branchTestObjIsFunction(Assembler::NotEqual, calleeReg, scratch,
5944 calleeReg, &notFunction);
5946 // Guard that the callee supports the [[Call]] or [[Construct]] operation.
5947 // If these tests fail, we will call into the VM to throw an exception.
5948 if (isConstructing) {
5949 masm.branchTestFunctionFlags(calleeReg, FunctionFlags::CONSTRUCTOR,
5950 Assembler::Zero, &vmCall);
5951 } else {
5952 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
5953 calleeReg, scratch, &vmCall);
5956 if (isConstructing) {
5957 // Use the slow path if CreateThis was unable to create the |this| object.
5958 Address thisAddr(masm.getStackPointer(), 0);
5959 masm.branchTestNull(Assembler::Equal, thisAddr, &vmCall);
5962 masm.switchToObjectRealm(calleeReg, scratch);
5964 // Load jitCodeRaw for callee if it exists.
5965 masm.branchIfFunctionHasNoJitEntry(calleeReg, isConstructing, &noJitEntry);
5967 // ****************************
5968 // * Functions with jit entry *
5969 // ****************************
5970 masm.loadJitCodeRaw(calleeReg, scratch2);
5972 // Construct the JitFrameLayout.
5973 masm.PushCalleeToken(calleeReg, isConstructing);
5974 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcReg, scratch);
5975 #ifndef JS_USE_LINK_REGISTER
5976 masm.push(returnAddrReg);
5977 #endif
5979 // Check whether we need a rectifier frame.
5980 Label noRectifier;
5981 masm.loadFunctionArgCount(calleeReg, scratch);
5982 masm.branch32(Assembler::BelowOrEqual, scratch, argcReg, &noRectifier);
5984 // Tail-call the arguments rectifier.
5985 // Because all trampolines are created at the same time,
5986 // we can't create a TrampolinePtr for the arguments rectifier,
5987 // because it hasn't been linked yet. We can, however, directly
5988 // encode its offset.
5989 Label rectifier;
5990 bindLabelToOffset(&rectifier, argumentsRectifierOffset_);
5992 masm.jump(&rectifier);
5995 // Tail call the jit entry.
5996 masm.bind(&noRectifier);
5997 masm.jump(scratch2);
5999 // ********************
6000 // * Native functions *
6001 // ********************
6002 masm.bind(&noJitEntry);
6003 if (!isConstructing) {
6004 generateIonGenericCallFunCall(masm, &entry, &vmCall);
6006 generateIonGenericCallNativeFunction(masm, isConstructing);
6008 // *******************
6009 // * Bound functions *
6010 // *******************
6011 // TODO: support class hooks?
6012 masm.bind(&notFunction);
6013 if (!isConstructing) {
6014 // TODO: support generic bound constructors?
6015 generateIonGenericCallBoundFunction(masm, &entry, &vmCall);
6018 // ********************
6019 // * Fallback VM call *
6020 // ********************
6021 masm.bind(&vmCall);
6023 masm.push(masm.getStackPointer()); // argv
6024 masm.push(argcReg); // argc
6025 masm.push(Imm32(false)); // ignores return value
6026 masm.push(Imm32(isConstructing)); // constructing
6027 masm.push(calleeReg); // callee
6029 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
6030 MutableHandleValue);
6031 VMFunctionId id = VMFunctionToId<Fn, jit::InvokeFunction>::id;
6032 uint32_t invokeFunctionOffset = functionWrapperOffsets_[size_t(id)];
6033 Label invokeFunctionVMEntry;
6034 bindLabelToOffset(&invokeFunctionVMEntry, invokeFunctionOffset);
6036 masm.pushFrameDescriptor(FrameType::IonJS);
6037 #ifndef JS_USE_LINK_REGISTER
6038 masm.push(returnAddrReg);
6039 #endif
6040 masm.jump(&invokeFunctionVMEntry);
6043 void JitRuntime::generateIonGenericCallNativeFunction(MacroAssembler& masm,
6044 bool isConstructing) {
6045 Register calleeReg = IonGenericCallCalleeReg;
6046 Register argcReg = IonGenericCallArgcReg;
6047 Register scratch = IonGenericCallScratch;
6048 Register scratch2 = IonGenericCallScratch2;
6049 Register contextReg = IonGenericCallScratch3;
6050 #ifndef JS_USE_LINK_REGISTER
6051 Register returnAddrReg = IonGenericCallReturnAddrReg;
6052 #endif
6054 // Push a value containing the callee, which will become argv[0].
6055 masm.pushValue(JSVAL_TYPE_OBJECT, calleeReg);
6057 // Load the callee address into calleeReg.
6058 #ifdef JS_SIMULATOR
6059 masm.movePtr(ImmPtr(RedirectedCallAnyNative()), calleeReg);
6060 #else
6061 masm.loadPrivate(Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
6062 calleeReg);
6063 #endif
6065 // Load argv into scratch2.
6066 masm.moveStackPtrTo(scratch2);
6068 // Push argc.
6069 masm.push(argcReg);
6071 masm.loadJSContext(contextReg);
6073 // Construct native exit frame. Note that unlike other cases in this
6074 // trampoline, this code does not use a tail call.
6075 masm.pushFrameDescriptor(FrameType::IonJS);
6076 #ifdef JS_USE_LINK_REGISTER
6077 masm.pushReturnAddress();
6078 #else
6079 masm.push(returnAddrReg);
6080 #endif
6082 masm.push(FramePointer);
6083 masm.moveStackPtrTo(FramePointer);
6084 masm.enterFakeExitFrameForNative(contextReg, scratch, isConstructing);
6086 masm.setupUnalignedABICall(scratch);
6087 masm.passABIArg(contextReg); // cx
6088 masm.passABIArg(argcReg); // argc
6089 masm.passABIArg(scratch2); // argv
6091 masm.callWithABI(calleeReg);
6093 // Test for failure.
6094 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
6096 masm.loadValue(
6097 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
6098 JSReturnOperand);
6100 // Leave the exit frame.
6101 masm.moveToStackPtr(FramePointer);
6102 masm.pop(FramePointer);
6104 // Return.
6105 masm.ret();
6108 void JitRuntime::generateIonGenericCallFunCall(MacroAssembler& masm,
6109 Label* entry, Label* vmCall) {
6110 Register calleeReg = IonGenericCallCalleeReg;
6111 Register argcReg = IonGenericCallArgcReg;
6112 Register scratch = IonGenericCallScratch;
6113 Register scratch2 = IonGenericCallScratch2;
6114 Register scratch3 = IonGenericCallScratch3;
6116 Label notFunCall;
6117 masm.branchPtr(Assembler::NotEqual,
6118 Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
6119 ImmPtr(js::fun_call), &notFunCall);
6121 // In general, we can implement fun_call by replacing calleeReg with
6122 // |this|, sliding all the other arguments down, and decrementing argc.
6124 // *BEFORE* *AFTER*
6125 // [argN] argc = N+1 <padding>
6126 // ... [argN] argc = N
6127 // [arg1] ...
6128 // [arg0] [arg1] <- now arg0
6129 // [this] <- top of stack (aligned) [arg0] <- now this
6131 // The only exception is when argc is already 0, in which case instead
6132 // of shifting arguments down we replace [this] with UndefinedValue():
6134 // *BEFORE* *AFTER*
6135 // [this] argc = 0 [undef] argc = 0
6137 // After making this transformation, we can jump back to the beginning
6138 // of this trampoline to handle the inner call.
6140 // Guard that |this| is an object. If it is, replace calleeReg.
6141 masm.fallibleUnboxObject(Address(masm.getStackPointer(), 0), scratch, vmCall);
6142 masm.movePtr(scratch, calleeReg);
6144 Label hasArgs;
6145 masm.branch32(Assembler::NotEqual, argcReg, Imm32(0), &hasArgs);
6147 // No arguments. Replace |this| with |undefined| and start from the top.
6148 masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), 0));
6149 masm.jump(entry);
6151 masm.bind(&hasArgs);
6153 Label doneSliding;
6154 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6155 scratch3, &doneSliding);
6156 masm.bind(&doneSliding);
6157 masm.sub32(Imm32(1), argcReg);
6159 masm.jump(entry);
6161 masm.bind(&notFunCall);
6164 void JitRuntime::generateIonGenericCallBoundFunction(MacroAssembler& masm,
6165 Label* entry,
6166 Label* vmCall) {
6167 Register calleeReg = IonGenericCallCalleeReg;
6168 Register argcReg = IonGenericCallArgcReg;
6169 Register scratch = IonGenericCallScratch;
6170 Register scratch2 = IonGenericCallScratch2;
6171 Register scratch3 = IonGenericCallScratch3;
6173 masm.branchTestObjClass(Assembler::NotEqual, calleeReg,
6174 &BoundFunctionObject::class_, scratch, calleeReg,
6175 vmCall);
6177 Address targetSlot(calleeReg, BoundFunctionObject::offsetOfTargetSlot());
6178 Address flagsSlot(calleeReg, BoundFunctionObject::offsetOfFlagsSlot());
6179 Address thisSlot(calleeReg, BoundFunctionObject::offsetOfBoundThisSlot());
6180 Address firstInlineArgSlot(
6181 calleeReg, BoundFunctionObject::offsetOfFirstInlineBoundArg());
6183 // Check that we won't be pushing too many arguments.
6184 masm.load32(flagsSlot, scratch);
6185 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6186 masm.add32(argcReg, scratch);
6187 masm.branch32(Assembler::Above, scratch, Imm32(JIT_ARGS_LENGTH_MAX), vmCall);
6189 // The stack is currently correctly aligned for a jit call. We will
6190 // be updating the `this` value and potentially adding additional
6191 // arguments. On platforms with 16-byte alignment, if the number of
6192 // bound arguments is odd, we have to move the arguments that are
6193 // currently on the stack. For example, with one bound argument:
6195 // *BEFORE* *AFTER*
6196 // [argN] <padding>
6197 // ... [argN] |
6198 // [arg1] ... | These arguments have been
6199 // [arg0] [arg1] | shifted down 8 bytes.
6200 // [this] <- top of stack (aligned) [arg0] v
6201 // [bound0] <- one bound argument (odd)
6202 // [boundThis] <- top of stack (aligned)
6204 Label poppedThis;
6205 if (JitStackValueAlignment > 1) {
6206 Label alreadyAligned;
6207 masm.branchTest32(Assembler::Zero, flagsSlot,
6208 Imm32(1 << BoundFunctionObject::NumBoundArgsShift),
6209 &alreadyAligned);
6211 // We have an odd number of bound arguments. Shift the existing arguments
6212 // down by 8 bytes.
6213 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6214 scratch3, &poppedThis);
6215 masm.bind(&alreadyAligned);
6218 // Pop the current `this`. It will be replaced with the bound `this`.
6219 masm.freeStack(sizeof(Value));
6220 masm.bind(&poppedThis);
6222 // Load the number of bound arguments in scratch
6223 masm.load32(flagsSlot, scratch);
6224 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6226 Label donePushingBoundArguments;
6227 masm.branch32(Assembler::Equal, scratch, Imm32(0),
6228 &donePushingBoundArguments);
6230 // Update argc to include bound arguments.
6231 masm.add32(scratch, argcReg);
6233 // Load &boundArgs[0] in scratch2.
6234 Label outOfLineBoundArguments, haveBoundArguments;
6235 masm.branch32(Assembler::Above, scratch,
6236 Imm32(BoundFunctionObject::MaxInlineBoundArgs),
6237 &outOfLineBoundArguments);
6238 masm.computeEffectiveAddress(firstInlineArgSlot, scratch2);
6239 masm.jump(&haveBoundArguments);
6241 masm.bind(&outOfLineBoundArguments);
6242 masm.unboxObject(firstInlineArgSlot, scratch2);
6243 masm.loadPtr(Address(scratch2, NativeObject::offsetOfElements()), scratch2);
6245 masm.bind(&haveBoundArguments);
6247 // Load &boundArgs[numBoundArgs] in scratch.
6248 BaseObjectElementIndex lastBoundArg(scratch2, scratch);
6249 masm.computeEffectiveAddress(lastBoundArg, scratch);
6251 // Push the bound arguments, starting with the last one.
6252 // Copying pre-decrements scratch until scratch2 is reached.
6253 Label boundArgumentsLoop;
6254 masm.bind(&boundArgumentsLoop);
6255 masm.subPtr(Imm32(sizeof(Value)), scratch);
6256 masm.pushValue(Address(scratch, 0));
6257 masm.branchPtr(Assembler::Above, scratch, scratch2, &boundArgumentsLoop);
6258 masm.bind(&donePushingBoundArguments);
6260 // Push the bound `this`.
6261 masm.pushValue(thisSlot);
6263 // Load the target in calleeReg.
6264 masm.unboxObject(targetSlot, calleeReg);
6266 // At this point, all preconditions for entering the trampoline are met:
6267 // - calleeReg contains a pointer to the callee object
6268 // - argcReg contains the number of actual args (now including bound args)
6269 // - the arguments are on the stack with the correct alignment.
6270 // Instead of generating more code, we can jump back to the entry point
6271 // of the trampoline to call the bound target.
6272 masm.jump(entry);
6275 void CodeGenerator::visitCallKnown(LCallKnown* call) {
6276 Register calleereg = ToRegister(call->getFunction());
6277 Register objreg = ToRegister(call->getTempObject());
6278 uint32_t unusedStack =
6279 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
6280 WrappedFunction* target = call->getSingleTarget();
6282 // Native single targets (except Wasm and TrampolineNative functions) are
6283 // handled by LCallNative.
6284 MOZ_ASSERT(target->hasJitEntry());
6286 // Missing arguments must have been explicitly appended by WarpBuilder.
6287 DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
6288 MOZ_ASSERT(target->nargs() <=
6289 call->mir()->numStackArgs() - numNonArgsOnStack);
6291 MOZ_ASSERT_IF(call->isConstructing(), target->isConstructor());
6293 masm.checkStackAlignment();
6295 if (target->isClassConstructor() && !call->isConstructing()) {
6296 emitCallInvokeFunction(call, calleereg, call->isConstructing(),
6297 call->ignoresReturnValue(), call->numActualArgs(),
6298 unusedStack);
6299 return;
6302 MOZ_ASSERT_IF(target->isClassConstructor(), call->isConstructing());
6304 MOZ_ASSERT(!call->mir()->needsThisCheck());
6306 if (call->mir()->maybeCrossRealm()) {
6307 masm.switchToObjectRealm(calleereg, objreg);
6310 masm.loadJitCodeRaw(calleereg, objreg);
6312 // Nestle the StackPointer up to the argument vector.
6313 masm.freeStack(unusedStack);
6315 // Construct the JitFrameLayout.
6316 masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
6317 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, call->numActualArgs());
6319 // Finally call the function in objreg.
6320 ensureOsiSpace();
6321 uint32_t callOffset = masm.callJit(objreg);
6322 markSafepointAt(callOffset, call);
6324 if (call->mir()->maybeCrossRealm()) {
6325 static_assert(!JSReturnOperand.aliases(ReturnReg),
6326 "ReturnReg available as scratch after scripted calls");
6327 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6330 // Restore stack pointer: pop JitFrameLayout fields still left on the stack
6331 // and undo the earlier |freeStack(unusedStack)|.
6332 int prefixGarbage =
6333 sizeof(JitFrameLayout) - JitFrameLayout::bytesPoppedAfterCall();
6334 masm.adjustStack(prefixGarbage - unusedStack);
6336 // If the return value of the constructing function is Primitive,
6337 // replace the return value with the Object from CreateThis.
6338 if (call->mir()->isConstructing()) {
6339 Label notPrimitive;
6340 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6341 &notPrimitive);
6342 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
6343 JSReturnOperand);
6344 #ifdef DEBUG
6345 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6346 &notPrimitive);
6347 masm.assumeUnreachable("CreateThis creates an object");
6348 #endif
6349 masm.bind(&notPrimitive);
6353 template <typename T>
6354 void CodeGenerator::emitCallInvokeFunction(T* apply) {
6355 Register objreg = ToRegister(apply->getTempObject());
6357 // Push the space used by the arguments.
6358 masm.moveStackPtrTo(objreg);
6360 pushArg(objreg); // argv.
6361 pushArg(ToRegister(apply->getArgc())); // argc.
6362 pushArg(Imm32(apply->mir()->ignoresReturnValue())); // ignoresReturnValue.
6363 pushArg(Imm32(apply->mir()->isConstructing())); // isConstructing.
6364 pushArg(ToRegister(apply->getFunction())); // JSFunction*.
6366 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
6367 MutableHandleValue);
6368 callVM<Fn, jit::InvokeFunction>(apply);
6371 // Do not bailout after the execution of this function since the stack no longer
6372 // correspond to what is expected by the snapshots.
6373 void CodeGenerator::emitAllocateSpaceForApply(Register argcreg,
6374 Register scratch) {
6375 // Use scratch register to calculate stack space (including padding).
6376 masm.movePtr(argcreg, scratch);
6378 // Align the JitFrameLayout on the JitStackAlignment.
6379 if (JitStackValueAlignment > 1) {
6380 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6381 "Stack padding assumes that the frameSize is correct");
6382 MOZ_ASSERT(JitStackValueAlignment == 2);
6383 Label noPaddingNeeded;
6384 // if the number of arguments is odd, then we do not need any padding.
6385 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6386 masm.addPtr(Imm32(1), scratch);
6387 masm.bind(&noPaddingNeeded);
6390 // Reserve space for copying the arguments.
6391 NativeObject::elementsSizeMustNotOverflow();
6392 masm.lshiftPtr(Imm32(ValueShift), scratch);
6393 masm.subFromStackPtr(scratch);
6395 #ifdef DEBUG
6396 // Put a magic value in the space reserved for padding. Note, this code
6397 // cannot be merged with the previous test, as not all architectures can
6398 // write below their stack pointers.
6399 if (JitStackValueAlignment > 1) {
6400 MOZ_ASSERT(JitStackValueAlignment == 2);
6401 Label noPaddingNeeded;
6402 // if the number of arguments is odd, then we do not need any padding.
6403 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6404 BaseValueIndex dstPtr(masm.getStackPointer(), argcreg);
6405 masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr);
6406 masm.bind(&noPaddingNeeded);
6408 #endif
6411 // Do not bailout after the execution of this function since the stack no longer
6412 // correspond to what is expected by the snapshots.
6413 void CodeGenerator::emitAllocateSpaceForConstructAndPushNewTarget(
6414 Register argcreg, Register newTargetAndScratch) {
6415 // Align the JitFrameLayout on the JitStackAlignment. Contrary to
6416 // |emitAllocateSpaceForApply()|, we're always pushing a magic value, because
6417 // we can't write to |newTargetAndScratch| before |new.target| has
6418 // been pushed onto the stack.
6419 if (JitStackValueAlignment > 1) {
6420 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6421 "Stack padding assumes that the frameSize is correct");
6422 MOZ_ASSERT(JitStackValueAlignment == 2);
6424 Label noPaddingNeeded;
6425 // If the number of arguments is even, then we do not need any padding.
6426 masm.branchTestPtr(Assembler::Zero, argcreg, Imm32(1), &noPaddingNeeded);
6427 masm.pushValue(MagicValue(JS_ARG_POISON));
6428 masm.bind(&noPaddingNeeded);
6431 // Push |new.target| after the padding value, but before any arguments.
6432 masm.pushValue(JSVAL_TYPE_OBJECT, newTargetAndScratch);
6434 // Use newTargetAndScratch to calculate stack space (including padding).
6435 masm.movePtr(argcreg, newTargetAndScratch);
6437 // Reserve space for copying the arguments.
6438 NativeObject::elementsSizeMustNotOverflow();
6439 masm.lshiftPtr(Imm32(ValueShift), newTargetAndScratch);
6440 masm.subFromStackPtr(newTargetAndScratch);
6443 // Destroys argvIndex and copyreg.
6444 void CodeGenerator::emitCopyValuesForApply(Register argvSrcBase,
6445 Register argvIndex, Register copyreg,
6446 size_t argvSrcOffset,
6447 size_t argvDstOffset) {
6448 Label loop;
6449 masm.bind(&loop);
6451 // As argvIndex is off by 1, and we use the decBranchPtr instruction
6452 // to loop back, we have to substract the size of the word which are
6453 // copied.
6454 BaseValueIndex srcPtr(argvSrcBase, argvIndex,
6455 int32_t(argvSrcOffset) - sizeof(void*));
6456 BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex,
6457 int32_t(argvDstOffset) - sizeof(void*));
6458 masm.loadPtr(srcPtr, copyreg);
6459 masm.storePtr(copyreg, dstPtr);
6461 // Handle 32 bits architectures.
6462 if (sizeof(Value) == 2 * sizeof(void*)) {
6463 BaseValueIndex srcPtrLow(argvSrcBase, argvIndex,
6464 int32_t(argvSrcOffset) - 2 * sizeof(void*));
6465 BaseValueIndex dstPtrLow(masm.getStackPointer(), argvIndex,
6466 int32_t(argvDstOffset) - 2 * sizeof(void*));
6467 masm.loadPtr(srcPtrLow, copyreg);
6468 masm.storePtr(copyreg, dstPtrLow);
6471 masm.decBranchPtr(Assembler::NonZero, argvIndex, Imm32(1), &loop);
6474 void CodeGenerator::emitRestoreStackPointerFromFP() {
6475 // This is used to restore the stack pointer after a call with a dynamic
6476 // number of arguments.
6478 MOZ_ASSERT(masm.framePushed() == frameSize());
6480 int32_t offset = -int32_t(frameSize());
6481 masm.computeEffectiveAddress(Address(FramePointer, offset),
6482 masm.getStackPointer());
6485 void CodeGenerator::emitPushArguments(Register argcreg, Register scratch,
6486 Register copyreg, uint32_t extraFormals) {
6487 Label end;
6489 // Skip the copy of arguments if there are none.
6490 masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, &end);
6492 // clang-format off
6494 // We are making a copy of the arguments which are above the JitFrameLayout
6495 // of the current Ion frame.
6497 // [arg1] [arg0] <- src [this] [JitFrameLayout] [.. frameSize ..] [pad] [arg1] [arg0] <- dst
6499 // clang-format on
6501 // Compute the source and destination offsets into the stack.
6502 Register argvSrcBase = FramePointer;
6503 size_t argvSrcOffset =
6504 JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
6505 size_t argvDstOffset = 0;
6507 Register argvIndex = scratch;
6508 masm.move32(argcreg, argvIndex);
6510 // Copy arguments.
6511 emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
6512 argvDstOffset);
6514 // Join with all arguments copied and the extra stack usage computed.
6515 masm.bind(&end);
6518 void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply,
6519 Register scratch) {
6520 // Holds the function nargs. Initially the number of args to the caller.
6521 Register argcreg = ToRegister(apply->getArgc());
6522 Register copyreg = ToRegister(apply->getTempObject());
6523 uint32_t extraFormals = apply->numExtraFormals();
6525 emitAllocateSpaceForApply(argcreg, scratch);
6527 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6529 // Push |this|.
6530 masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex));
6533 void CodeGenerator::emitPushArguments(LApplyArgsObj* apply, Register scratch) {
6534 // argc and argsObj are mapped to the same calltemp register.
6535 MOZ_ASSERT(apply->getArgsObj() == apply->getArgc());
6537 Register tmpArgc = ToRegister(apply->getTempObject());
6538 Register argsObj = ToRegister(apply->getArgsObj());
6540 // Load argc into tmpArgc.
6541 masm.loadArgumentsObjectLength(argsObj, tmpArgc);
6543 // Allocate space on the stack for arguments. This modifies scratch.
6544 emitAllocateSpaceForApply(tmpArgc, scratch);
6546 // Load arguments data
6547 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
6548 argsObj);
6549 size_t argsSrcOffset = ArgumentsData::offsetOfArgs();
6551 // This is the end of the lifetime of argsObj.
6552 // After this call, the argsObj register holds the argument count instead.
6553 emitPushArrayAsArguments(tmpArgc, argsObj, scratch, argsSrcOffset);
6555 masm.pushValue(ToValue(apply, LApplyArgsObj::ThisIndex));
6558 void CodeGenerator::emitPushArrayAsArguments(Register tmpArgc,
6559 Register srcBaseAndArgc,
6560 Register scratch,
6561 size_t argvSrcOffset) {
6562 // Preconditions:
6563 // 1. |tmpArgc| * sizeof(Value) bytes have been allocated at the top of
6564 // the stack to hold arguments.
6565 // 2. |srcBaseAndArgc| + |srcOffset| points to an array of |tmpArgc| values.
6567 // Postconditions:
6568 // 1. The arguments at |srcBaseAndArgc| + |srcOffset| have been copied into
6569 // the allocated space.
6570 // 2. |srcBaseAndArgc| now contains the original value of |tmpArgc|.
6572 // |scratch| is used as a temp register within this function and clobbered.
6574 Label noCopy, epilogue;
6576 // Skip the copy of arguments if there are none.
6577 masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
6579 // Copy the values. This code is skipped entirely if there are
6580 // no values.
6581 size_t argvDstOffset = 0;
6583 Register argvSrcBase = srcBaseAndArgc;
6584 Register copyreg = scratch;
6586 masm.push(tmpArgc);
6587 Register argvIndex = tmpArgc;
6588 argvDstOffset += sizeof(void*);
6590 // Copy
6591 emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
6592 argvDstOffset);
6594 // Restore.
6595 masm.pop(srcBaseAndArgc); // srcBaseAndArgc now contains argc.
6596 masm.jump(&epilogue);
6598 // Clear argc if we skipped the copy step.
6599 masm.bind(&noCopy);
6600 masm.movePtr(ImmWord(0), srcBaseAndArgc);
6602 // Join with all arguments copied and the extra stack usage computed.
6603 // Note, "srcBase" has become "argc".
6604 masm.bind(&epilogue);
6607 void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply,
6608 Register scratch) {
6609 Register tmpArgc = ToRegister(apply->getTempObject());
6610 Register elementsAndArgc = ToRegister(apply->getElements());
6612 // Invariants guarded in the caller:
6613 // - the array is not too long
6614 // - the array length equals its initialized length
6616 // The array length is our argc for the purposes of allocating space.
6617 Address length(ToRegister(apply->getElements()),
6618 ObjectElements::offsetOfLength());
6619 masm.load32(length, tmpArgc);
6621 // Allocate space for the values.
6622 emitAllocateSpaceForApply(tmpArgc, scratch);
6624 // After this call "elements" has become "argc".
6625 size_t elementsOffset = 0;
6626 emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
6628 // Push |this|.
6629 masm.pushValue(ToValue(apply, LApplyArrayGeneric::ThisIndex));
6632 void CodeGenerator::emitPushArguments(LConstructArgsGeneric* construct,
6633 Register scratch) {
6634 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6636 // Holds the function nargs. Initially the number of args to the caller.
6637 Register argcreg = ToRegister(construct->getArgc());
6638 Register copyreg = ToRegister(construct->getTempObject());
6639 uint32_t extraFormals = construct->numExtraFormals();
6641 // Allocate space for the values.
6642 // After this call "newTarget" has become "scratch".
6643 emitAllocateSpaceForConstructAndPushNewTarget(argcreg, scratch);
6645 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6647 // Push |this|.
6648 masm.pushValue(ToValue(construct, LConstructArgsGeneric::ThisIndex));
6651 void CodeGenerator::emitPushArguments(LConstructArrayGeneric* construct,
6652 Register scratch) {
6653 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6655 Register tmpArgc = ToRegister(construct->getTempObject());
6656 Register elementsAndArgc = ToRegister(construct->getElements());
6658 // Invariants guarded in the caller:
6659 // - the array is not too long
6660 // - the array length equals its initialized length
6662 // The array length is our argc for the purposes of allocating space.
6663 Address length(ToRegister(construct->getElements()),
6664 ObjectElements::offsetOfLength());
6665 masm.load32(length, tmpArgc);
6667 // Allocate space for the values.
6668 emitAllocateSpaceForConstructAndPushNewTarget(tmpArgc, scratch);
6670 // After this call "elements" has become "argc" and "newTarget" has become
6671 // "scratch".
6672 size_t elementsOffset = 0;
6673 emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
6675 // Push |this|.
6676 masm.pushValue(ToValue(construct, LConstructArrayGeneric::ThisIndex));
6679 template <typename T>
6680 void CodeGenerator::emitApplyGeneric(T* apply) {
6681 // Holds the function object.
6682 Register calleereg = ToRegister(apply->getFunction());
6684 // Temporary register for modifying the function object.
6685 Register objreg = ToRegister(apply->getTempObject());
6686 Register scratch = ToRegister(apply->getTempForArgCopy());
6688 // Holds the function nargs, computed in the invoker or (for ApplyArray,
6689 // ConstructArray, or ApplyArgsObj) in the argument pusher.
6690 Register argcreg = ToRegister(apply->getArgc());
6692 // Copy the arguments of the current function.
6694 // In the case of ApplyArray, ConstructArray, or ApplyArgsObj, also
6695 // compute argc. The argc register and the elements/argsObj register
6696 // are the same; argc must not be referenced before the call to
6697 // emitPushArguments() and elements/argsObj must not be referenced
6698 // after it returns.
6700 // In the case of ConstructArray or ConstructArgs, also overwrite newTarget
6701 // with scratch; newTarget must not be referenced after this point.
6703 // objreg is dead across this call.
6704 emitPushArguments(apply, scratch);
6706 masm.checkStackAlignment();
6708 bool constructing = apply->mir()->isConstructing();
6710 // If the function is native, only emit the call to InvokeFunction.
6711 if (apply->hasSingleTarget() &&
6712 apply->getSingleTarget()->isNativeWithoutJitEntry()) {
6713 emitCallInvokeFunction(apply);
6715 #ifdef DEBUG
6716 // Native constructors are guaranteed to return an Object value, so we never
6717 // have to replace a primitive result with the previously allocated Object
6718 // from CreateThis.
6719 if (constructing) {
6720 Label notPrimitive;
6721 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6722 &notPrimitive);
6723 masm.assumeUnreachable("native constructors don't return primitives");
6724 masm.bind(&notPrimitive);
6726 #endif
6728 emitRestoreStackPointerFromFP();
6729 return;
6732 Label end, invoke;
6734 // Unless already known, guard that calleereg is actually a function object.
6735 if (!apply->hasSingleTarget()) {
6736 masm.branchTestObjIsFunction(Assembler::NotEqual, calleereg, objreg,
6737 calleereg, &invoke);
6740 // Guard that calleereg is an interpreted function with a JSScript.
6741 masm.branchIfFunctionHasNoJitEntry(calleereg, constructing, &invoke);
6743 // Guard that callee allows the [[Call]] or [[Construct]] operation required.
6744 if (constructing) {
6745 masm.branchTestFunctionFlags(calleereg, FunctionFlags::CONSTRUCTOR,
6746 Assembler::Zero, &invoke);
6747 } else {
6748 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
6749 calleereg, objreg, &invoke);
6752 // Use the slow path if CreateThis was unable to create the |this| object.
6753 if (constructing) {
6754 Address thisAddr(masm.getStackPointer(), 0);
6755 masm.branchTestNull(Assembler::Equal, thisAddr, &invoke);
6758 // Call with an Ion frame or a rectifier frame.
6760 if (apply->mir()->maybeCrossRealm()) {
6761 masm.switchToObjectRealm(calleereg, objreg);
6764 // Knowing that calleereg is a non-native function, load jitcode.
6765 masm.loadJitCodeRaw(calleereg, objreg);
6767 masm.PushCalleeToken(calleereg, constructing);
6768 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcreg, scratch);
6770 Label underflow, rejoin;
6772 // Check whether the provided arguments satisfy target argc.
6773 if (!apply->hasSingleTarget()) {
6774 Register nformals = scratch;
6775 masm.loadFunctionArgCount(calleereg, nformals);
6776 masm.branch32(Assembler::Below, argcreg, nformals, &underflow);
6777 } else {
6778 masm.branch32(Assembler::Below, argcreg,
6779 Imm32(apply->getSingleTarget()->nargs()), &underflow);
6782 // Skip the construction of the rectifier frame because we have no
6783 // underflow.
6784 masm.jump(&rejoin);
6786 // Argument fixup needed. Get ready to call the argumentsRectifier.
6788 masm.bind(&underflow);
6790 // Hardcode the address of the argumentsRectifier code.
6791 TrampolinePtr argumentsRectifier =
6792 gen->jitRuntime()->getArgumentsRectifier();
6793 masm.movePtr(argumentsRectifier, objreg);
6796 masm.bind(&rejoin);
6798 // Finally call the function in objreg, as assigned by one of the paths
6799 // above.
6800 ensureOsiSpace();
6801 uint32_t callOffset = masm.callJit(objreg);
6802 markSafepointAt(callOffset, apply);
6804 if (apply->mir()->maybeCrossRealm()) {
6805 static_assert(!JSReturnOperand.aliases(ReturnReg),
6806 "ReturnReg available as scratch after scripted calls");
6807 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6810 // Discard JitFrameLayout fields still left on the stack.
6811 masm.freeStack(sizeof(JitFrameLayout) -
6812 JitFrameLayout::bytesPoppedAfterCall());
6813 masm.jump(&end);
6816 // Handle uncompiled or native functions.
6818 masm.bind(&invoke);
6819 emitCallInvokeFunction(apply);
6822 masm.bind(&end);
6824 // If the return value of the constructing function is Primitive,
6825 // replace the return value with the Object from CreateThis.
6826 if (constructing) {
6827 Label notPrimitive;
6828 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6829 &notPrimitive);
6830 masm.loadValue(Address(masm.getStackPointer(), 0), JSReturnOperand);
6832 #ifdef DEBUG
6833 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6834 &notPrimitive);
6835 masm.assumeUnreachable("CreateThis creates an object");
6836 #endif
6838 masm.bind(&notPrimitive);
6841 // Pop arguments and continue.
6842 emitRestoreStackPointerFromFP();
6845 void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) {
6846 LSnapshot* snapshot = apply->snapshot();
6847 Register argcreg = ToRegister(apply->getArgc());
6849 // Ensure that we have a reasonable number of arguments.
6850 bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6852 emitApplyGeneric(apply);
6855 void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
6856 Register argsObj = ToRegister(apply->getArgsObj());
6857 Register temp = ToRegister(apply->getTempObject());
6859 Label bail;
6860 masm.loadArgumentsObjectLength(argsObj, temp, &bail);
6861 masm.branch32(Assembler::Above, temp, Imm32(JIT_ARGS_LENGTH_MAX), &bail);
6862 bailoutFrom(&bail, apply->snapshot());
6864 emitApplyGeneric(apply);
6867 void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
6868 LSnapshot* snapshot = apply->snapshot();
6869 Register tmp = ToRegister(apply->getTempObject());
6871 Address length(ToRegister(apply->getElements()),
6872 ObjectElements::offsetOfLength());
6873 masm.load32(length, tmp);
6875 // Ensure that we have a reasonable number of arguments.
6876 bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6878 // Ensure that the array does not contain an uninitialized tail.
6880 Address initializedLength(ToRegister(apply->getElements()),
6881 ObjectElements::offsetOfInitializedLength());
6882 masm.sub32(initializedLength, tmp);
6883 bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
6885 emitApplyGeneric(apply);
6888 void CodeGenerator::visitConstructArgsGeneric(LConstructArgsGeneric* lir) {
6889 LSnapshot* snapshot = lir->snapshot();
6890 Register argcreg = ToRegister(lir->getArgc());
6892 // Ensure that we have a reasonable number of arguments.
6893 bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6895 emitApplyGeneric(lir);
6898 void CodeGenerator::visitConstructArrayGeneric(LConstructArrayGeneric* lir) {
6899 LSnapshot* snapshot = lir->snapshot();
6900 Register tmp = ToRegister(lir->getTempObject());
6902 Address length(ToRegister(lir->getElements()),
6903 ObjectElements::offsetOfLength());
6904 masm.load32(length, tmp);
6906 // Ensure that we have a reasonable number of arguments.
6907 bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6909 // Ensure that the array does not contain an uninitialized tail.
6911 Address initializedLength(ToRegister(lir->getElements()),
6912 ObjectElements::offsetOfInitializedLength());
6913 masm.sub32(initializedLength, tmp);
6914 bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
6916 emitApplyGeneric(lir);
6919 void CodeGenerator::visitBail(LBail* lir) { bailout(lir->snapshot()); }
6921 void CodeGenerator::visitUnreachable(LUnreachable* lir) {
6922 masm.assumeUnreachable("end-of-block assumed unreachable");
6925 void CodeGenerator::visitEncodeSnapshot(LEncodeSnapshot* lir) {
6926 encode(lir->snapshot());
6929 void CodeGenerator::visitUnreachableResultV(LUnreachableResultV* lir) {
6930 masm.assumeUnreachable("must be unreachable");
6933 void CodeGenerator::visitUnreachableResultT(LUnreachableResultT* lir) {
6934 masm.assumeUnreachable("must be unreachable");
6937 // Out-of-line path to report over-recursed error and fail.
6938 class CheckOverRecursedFailure : public OutOfLineCodeBase<CodeGenerator> {
6939 LInstruction* lir_;
6941 public:
6942 explicit CheckOverRecursedFailure(LInstruction* lir) : lir_(lir) {}
6944 void accept(CodeGenerator* codegen) override {
6945 codegen->visitCheckOverRecursedFailure(this);
6948 LInstruction* lir() const { return lir_; }
6951 void CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed* lir) {
6952 // If we don't push anything on the stack, skip the check.
6953 if (omitOverRecursedCheck()) {
6954 return;
6957 // Ensure that this frame will not cross the stack limit.
6958 // This is a weak check, justified by Ion using the C stack: we must always
6959 // be some distance away from the actual limit, since if the limit is
6960 // crossed, an error must be thrown, which requires more frames.
6962 // It must always be possible to trespass past the stack limit.
6963 // Ion may legally place frames very close to the limit. Calling additional
6964 // C functions may then violate the limit without any checking.
6966 // Since Ion frames exist on the C stack, the stack limit may be
6967 // dynamically set by JS_SetThreadStackLimit() and JS_SetNativeStackQuota().
6969 CheckOverRecursedFailure* ool = new (alloc()) CheckOverRecursedFailure(lir);
6970 addOutOfLineCode(ool, lir->mir());
6972 // Conditional forward (unlikely) branch to failure.
6973 const void* limitAddr = gen->runtime->addressOfJitStackLimit();
6974 masm.branchStackPtrRhs(Assembler::AboveOrEqual, AbsoluteAddress(limitAddr),
6975 ool->entry());
6976 masm.bind(ool->rejoin());
6979 void CodeGenerator::visitCheckOverRecursedFailure(
6980 CheckOverRecursedFailure* ool) {
6981 // The OOL path is hit if the recursion depth has been exceeded.
6982 // Throw an InternalError for over-recursion.
6984 // LFunctionEnvironment can appear before LCheckOverRecursed, so we have
6985 // to save all live registers to avoid crashes if CheckOverRecursed triggers
6986 // a GC.
6987 saveLive(ool->lir());
6989 using Fn = bool (*)(JSContext*);
6990 callVM<Fn, CheckOverRecursed>(ool->lir());
6992 restoreLive(ool->lir());
6993 masm.jump(ool->rejoin());
6996 IonScriptCounts* CodeGenerator::maybeCreateScriptCounts() {
6997 // If scripts are being profiled, create a new IonScriptCounts for the
6998 // profiling data, which will be attached to the associated JSScript or
6999 // wasm module after code generation finishes.
7000 if (!gen->hasProfilingScripts()) {
7001 return nullptr;
7004 // This test inhibits IonScriptCount creation for wasm code which is
7005 // currently incompatible with wasm codegen for two reasons: (1) wasm code
7006 // must be serializable and script count codegen bakes in absolute
7007 // addresses, (2) wasm code does not have a JSScript with which to associate
7008 // code coverage data.
7009 JSScript* script = gen->outerInfo().script();
7010 if (!script) {
7011 return nullptr;
7014 auto counts = MakeUnique<IonScriptCounts>();
7015 if (!counts || !counts->init(graph.numBlocks())) {
7016 return nullptr;
7019 for (size_t i = 0; i < graph.numBlocks(); i++) {
7020 MBasicBlock* block = graph.getBlock(i)->mir();
7022 uint32_t offset = 0;
7023 char* description = nullptr;
7024 if (MResumePoint* resume = block->entryResumePoint()) {
7025 // Find a PC offset in the outermost script to use. If this
7026 // block is from an inlined script, find a location in the
7027 // outer script to associate information about the inlining
7028 // with.
7029 while (resume->caller()) {
7030 resume = resume->caller();
7032 offset = script->pcToOffset(resume->pc());
7034 if (block->entryResumePoint()->caller()) {
7035 // Get the filename and line number of the inner script.
7036 JSScript* innerScript = block->info().script();
7037 description = js_pod_calloc<char>(200);
7038 if (description) {
7039 snprintf(description, 200, "%s:%u", innerScript->filename(),
7040 innerScript->lineno());
7045 if (!counts->block(i).init(block->id(), offset, description,
7046 block->numSuccessors())) {
7047 return nullptr;
7050 for (size_t j = 0; j < block->numSuccessors(); j++) {
7051 counts->block(i).setSuccessor(
7052 j, skipTrivialBlocks(block->getSuccessor(j))->id());
7056 scriptCounts_ = counts.release();
7057 return scriptCounts_;
7060 // Structure for managing the state tracked for a block by script counters.
7061 struct ScriptCountBlockState {
7062 IonBlockCounts& block;
7063 MacroAssembler& masm;
7065 Sprinter printer;
7067 public:
7068 ScriptCountBlockState(IonBlockCounts* block, MacroAssembler* masm)
7069 : block(*block), masm(*masm), printer(GetJitContext()->cx, false) {}
7071 bool init() {
7072 if (!printer.init()) {
7073 return false;
7076 // Bump the hit count for the block at the start. This code is not
7077 // included in either the text for the block or the instruction byte
7078 // counts.
7079 masm.inc64(AbsoluteAddress(block.addressOfHitCount()));
7081 // Collect human readable assembly for the code generated in the block.
7082 masm.setPrinter(&printer);
7084 return true;
7087 void visitInstruction(LInstruction* ins) {
7088 #ifdef JS_JITSPEW
7089 // Prefix stream of assembly instructions with their LIR instruction
7090 // name and any associated high level info.
7091 if (const char* extra = ins->getExtraName()) {
7092 printer.printf("[%s:%s]\n", ins->opName(), extra);
7093 } else {
7094 printer.printf("[%s]\n", ins->opName());
7096 #endif
7099 ~ScriptCountBlockState() {
7100 masm.setPrinter(nullptr);
7102 if (JS::UniqueChars str = printer.release()) {
7103 block.setCode(str.get());
7108 void CodeGenerator::branchIfInvalidated(Register temp, Label* invalidated) {
7109 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), temp);
7110 masm.propagateOOM(ionScriptLabels_.append(label));
7112 // If IonScript::invalidationCount_ != 0, the script has been invalidated.
7113 masm.branch32(Assembler::NotEqual,
7114 Address(temp, IonScript::offsetOfInvalidationCount()), Imm32(0),
7115 invalidated);
7118 #ifdef DEBUG
7119 void CodeGenerator::emitAssertGCThingResult(Register input,
7120 const MDefinition* mir) {
7121 MIRType type = mir->type();
7122 MOZ_ASSERT(type == MIRType::Object || type == MIRType::String ||
7123 type == MIRType::Symbol || type == MIRType::BigInt);
7125 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7126 regs.take(input);
7128 Register temp = regs.takeAny();
7129 masm.push(temp);
7131 // Don't check if the script has been invalidated. In that case invalid
7132 // types are expected (until we reach the OsiPoint and bailout).
7133 Label done;
7134 branchIfInvalidated(temp, &done);
7136 # ifndef JS_SIMULATOR
7137 // Check that we have a valid GC pointer.
7138 // Disable for wasm because we don't have a context on wasm compilation
7139 // threads and this needs a context.
7140 // Also disable for simulator builds because the C++ call is a lot slower
7141 // there than on actual hardware.
7142 if (JitOptions.fullDebugChecks && !IsCompilingWasm()) {
7143 saveVolatile();
7144 masm.setupUnalignedABICall(temp);
7145 masm.loadJSContext(temp);
7146 masm.passABIArg(temp);
7147 masm.passABIArg(input);
7149 switch (type) {
7150 case MIRType::Object: {
7151 using Fn = void (*)(JSContext* cx, JSObject* obj);
7152 masm.callWithABI<Fn, AssertValidObjectPtr>();
7153 break;
7155 case MIRType::String: {
7156 using Fn = void (*)(JSContext* cx, JSString* str);
7157 masm.callWithABI<Fn, AssertValidStringPtr>();
7158 break;
7160 case MIRType::Symbol: {
7161 using Fn = void (*)(JSContext* cx, JS::Symbol* sym);
7162 masm.callWithABI<Fn, AssertValidSymbolPtr>();
7163 break;
7165 case MIRType::BigInt: {
7166 using Fn = void (*)(JSContext* cx, JS::BigInt* bi);
7167 masm.callWithABI<Fn, AssertValidBigIntPtr>();
7168 break;
7170 default:
7171 MOZ_CRASH();
7174 restoreVolatile();
7176 # endif
7178 masm.bind(&done);
7179 masm.pop(temp);
7182 void CodeGenerator::emitAssertResultV(const ValueOperand input,
7183 const MDefinition* mir) {
7184 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7185 regs.take(input);
7187 Register temp1 = regs.takeAny();
7188 Register temp2 = regs.takeAny();
7189 masm.push(temp1);
7190 masm.push(temp2);
7192 // Don't check if the script has been invalidated. In that case invalid
7193 // types are expected (until we reach the OsiPoint and bailout).
7194 Label done;
7195 branchIfInvalidated(temp1, &done);
7197 // Check that we have a valid GC pointer.
7198 if (JitOptions.fullDebugChecks) {
7199 saveVolatile();
7201 masm.pushValue(input);
7202 masm.moveStackPtrTo(temp1);
7204 using Fn = void (*)(JSContext* cx, Value* v);
7205 masm.setupUnalignedABICall(temp2);
7206 masm.loadJSContext(temp2);
7207 masm.passABIArg(temp2);
7208 masm.passABIArg(temp1);
7209 masm.callWithABI<Fn, AssertValidValue>();
7210 masm.popValue(input);
7211 restoreVolatile();
7214 masm.bind(&done);
7215 masm.pop(temp2);
7216 masm.pop(temp1);
7219 void CodeGenerator::emitGCThingResultChecks(LInstruction* lir,
7220 MDefinition* mir) {
7221 if (lir->numDefs() == 0) {
7222 return;
7225 MOZ_ASSERT(lir->numDefs() == 1);
7226 if (lir->getDef(0)->isBogusTemp()) {
7227 return;
7230 Register output = ToRegister(lir->getDef(0));
7231 emitAssertGCThingResult(output, mir);
7234 void CodeGenerator::emitValueResultChecks(LInstruction* lir, MDefinition* mir) {
7235 if (lir->numDefs() == 0) {
7236 return;
7239 MOZ_ASSERT(lir->numDefs() == BOX_PIECES);
7240 if (!lir->getDef(0)->output()->isRegister()) {
7241 return;
7244 ValueOperand output = ToOutValue(lir);
7246 emitAssertResultV(output, mir);
7249 void CodeGenerator::emitDebugResultChecks(LInstruction* ins) {
7250 // In debug builds, check that LIR instructions return valid values.
7252 MDefinition* mir = ins->mirRaw();
7253 if (!mir) {
7254 return;
7257 switch (mir->type()) {
7258 case MIRType::Object:
7259 case MIRType::String:
7260 case MIRType::Symbol:
7261 case MIRType::BigInt:
7262 emitGCThingResultChecks(ins, mir);
7263 break;
7264 case MIRType::Value:
7265 emitValueResultChecks(ins, mir);
7266 break;
7267 default:
7268 break;
7272 void CodeGenerator::emitDebugForceBailing(LInstruction* lir) {
7273 if (MOZ_LIKELY(!gen->options.ionBailAfterEnabled())) {
7274 return;
7276 if (!lir->snapshot()) {
7277 return;
7279 if (lir->isOsiPoint()) {
7280 return;
7283 masm.comment("emitDebugForceBailing");
7284 const void* bailAfterCounterAddr =
7285 gen->runtime->addressOfIonBailAfterCounter();
7287 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7289 Label done, notBail;
7290 masm.branch32(Assembler::Equal, AbsoluteAddress(bailAfterCounterAddr),
7291 Imm32(0), &done);
7293 Register temp = regs.takeAny();
7295 masm.push(temp);
7296 masm.load32(AbsoluteAddress(bailAfterCounterAddr), temp);
7297 masm.sub32(Imm32(1), temp);
7298 masm.store32(temp, AbsoluteAddress(bailAfterCounterAddr));
7300 masm.branch32(Assembler::NotEqual, temp, Imm32(0), &notBail);
7302 masm.pop(temp);
7303 bailout(lir->snapshot());
7305 masm.bind(&notBail);
7306 masm.pop(temp);
7308 masm.bind(&done);
7310 #endif
7312 bool CodeGenerator::generateBody() {
7313 JitSpewCont(JitSpew_Codegen, "\n");
7314 AutoCreatedBy acb(masm, "CodeGenerator::generateBody");
7316 JitSpew(JitSpew_Codegen, "==== BEGIN CodeGenerator::generateBody ====");
7317 IonScriptCounts* counts = maybeCreateScriptCounts();
7319 const bool compilingWasm = gen->compilingWasm();
7321 for (size_t i = 0; i < graph.numBlocks(); i++) {
7322 current = graph.getBlock(i);
7324 // Don't emit any code for trivial blocks, containing just a goto. Such
7325 // blocks are created to split critical edges, and if we didn't end up
7326 // putting any instructions in them, we can skip them.
7327 if (current->isTrivial()) {
7328 continue;
7331 #ifdef JS_JITSPEW
7332 const char* filename = nullptr;
7333 size_t lineNumber = 0;
7334 JS::LimitedColumnNumberOneOrigin columnNumber;
7335 if (current->mir()->info().script()) {
7336 filename = current->mir()->info().script()->filename();
7337 if (current->mir()->pc()) {
7338 lineNumber = PCToLineNumber(current->mir()->info().script(),
7339 current->mir()->pc(), &columnNumber);
7342 JitSpew(JitSpew_Codegen, "--------------------------------");
7343 JitSpew(JitSpew_Codegen, "# block%zu %s:%zu:%u%s:", i,
7344 filename ? filename : "?", lineNumber,
7345 columnNumber.oneOriginValue(),
7346 current->mir()->isLoopHeader() ? " (loop header)" : "");
7347 #endif
7349 if (current->mir()->isLoopHeader() && compilingWasm) {
7350 masm.nopAlign(CodeAlignment);
7353 masm.bind(current->label());
7355 mozilla::Maybe<ScriptCountBlockState> blockCounts;
7356 if (counts) {
7357 blockCounts.emplace(&counts->block(i), &masm);
7358 if (!blockCounts->init()) {
7359 return false;
7363 for (LInstructionIterator iter = current->begin(); iter != current->end();
7364 iter++) {
7365 if (!alloc().ensureBallast()) {
7366 return false;
7369 perfSpewer_.recordInstruction(masm, *iter);
7370 #ifdef JS_JITSPEW
7371 JitSpewStart(JitSpew_Codegen, " # LIR=%s",
7372 iter->opName());
7373 if (const char* extra = iter->getExtraName()) {
7374 JitSpewCont(JitSpew_Codegen, ":%s", extra);
7376 JitSpewFin(JitSpew_Codegen);
7377 #endif
7379 if (counts) {
7380 blockCounts->visitInstruction(*iter);
7383 #ifdef CHECK_OSIPOINT_REGISTERS
7384 if (iter->safepoint() && !compilingWasm) {
7385 resetOsiPointRegs(iter->safepoint());
7387 #endif
7389 if (!compilingWasm) {
7390 if (MDefinition* mir = iter->mirRaw()) {
7391 if (!addNativeToBytecodeEntry(mir->trackedSite())) {
7392 return false;
7397 setElement(*iter); // needed to encode correct snapshot location.
7399 #ifdef DEBUG
7400 emitDebugForceBailing(*iter);
7401 #endif
7403 switch (iter->op()) {
7404 #ifndef JS_CODEGEN_NONE
7405 # define LIROP(op) \
7406 case LNode::Opcode::op: \
7407 visit##op(iter->to##op()); \
7408 break;
7409 LIR_OPCODE_LIST(LIROP)
7410 # undef LIROP
7411 #endif
7412 case LNode::Opcode::Invalid:
7413 default:
7414 MOZ_CRASH("Invalid LIR op");
7417 #ifdef DEBUG
7418 if (!counts) {
7419 emitDebugResultChecks(*iter);
7421 #endif
7423 if (masm.oom()) {
7424 return false;
7428 JitSpew(JitSpew_Codegen, "==== END CodeGenerator::generateBody ====\n");
7429 return true;
7432 // Out-of-line object allocation for LNewArray.
7433 class OutOfLineNewArray : public OutOfLineCodeBase<CodeGenerator> {
7434 LNewArray* lir_;
7436 public:
7437 explicit OutOfLineNewArray(LNewArray* lir) : lir_(lir) {}
7439 void accept(CodeGenerator* codegen) override {
7440 codegen->visitOutOfLineNewArray(this);
7443 LNewArray* lir() const { return lir_; }
7446 void CodeGenerator::visitNewArrayCallVM(LNewArray* lir) {
7447 Register objReg = ToRegister(lir->output());
7449 MOZ_ASSERT(!lir->isCall());
7450 saveLive(lir);
7452 JSObject* templateObject = lir->mir()->templateObject();
7454 if (templateObject) {
7455 pushArg(ImmGCPtr(templateObject->shape()));
7456 pushArg(Imm32(lir->mir()->length()));
7458 using Fn = ArrayObject* (*)(JSContext*, uint32_t, Handle<Shape*>);
7459 callVM<Fn, NewArrayWithShape>(lir);
7460 } else {
7461 pushArg(Imm32(GenericObject));
7462 pushArg(Imm32(lir->mir()->length()));
7464 using Fn = ArrayObject* (*)(JSContext*, uint32_t, NewObjectKind);
7465 callVM<Fn, NewArrayOperation>(lir);
7468 masm.storeCallPointerResult(objReg);
7470 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
7471 restoreLive(lir);
7474 void CodeGenerator::visitAtan2D(LAtan2D* lir) {
7475 FloatRegister y = ToFloatRegister(lir->y());
7476 FloatRegister x = ToFloatRegister(lir->x());
7478 using Fn = double (*)(double x, double y);
7479 masm.setupAlignedABICall();
7480 masm.passABIArg(y, ABIType::Float64);
7481 masm.passABIArg(x, ABIType::Float64);
7482 masm.callWithABI<Fn, ecmaAtan2>(ABIType::Float64);
7484 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7487 void CodeGenerator::visitHypot(LHypot* lir) {
7488 uint32_t numArgs = lir->numArgs();
7489 masm.setupAlignedABICall();
7491 for (uint32_t i = 0; i < numArgs; ++i) {
7492 masm.passABIArg(ToFloatRegister(lir->getOperand(i)), ABIType::Float64);
7495 switch (numArgs) {
7496 case 2: {
7497 using Fn = double (*)(double x, double y);
7498 masm.callWithABI<Fn, ecmaHypot>(ABIType::Float64);
7499 break;
7501 case 3: {
7502 using Fn = double (*)(double x, double y, double z);
7503 masm.callWithABI<Fn, hypot3>(ABIType::Float64);
7504 break;
7506 case 4: {
7507 using Fn = double (*)(double x, double y, double z, double w);
7508 masm.callWithABI<Fn, hypot4>(ABIType::Float64);
7509 break;
7511 default:
7512 MOZ_CRASH("Unexpected number of arguments to hypot function.");
7514 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7517 void CodeGenerator::visitNewArray(LNewArray* lir) {
7518 Register objReg = ToRegister(lir->output());
7519 Register tempReg = ToRegister(lir->temp());
7520 DebugOnly<uint32_t> length = lir->mir()->length();
7522 MOZ_ASSERT(length <= NativeObject::MAX_DENSE_ELEMENTS_COUNT);
7524 if (lir->mir()->isVMCall()) {
7525 visitNewArrayCallVM(lir);
7526 return;
7529 OutOfLineNewArray* ool = new (alloc()) OutOfLineNewArray(lir);
7530 addOutOfLineCode(ool, lir->mir());
7532 TemplateObject templateObject(lir->mir()->templateObject());
7533 #ifdef DEBUG
7534 size_t numInlineElements = gc::GetGCKindSlots(templateObject.getAllocKind()) -
7535 ObjectElements::VALUES_PER_HEADER;
7536 MOZ_ASSERT(length <= numInlineElements,
7537 "Inline allocation only supports inline elements");
7538 #endif
7539 masm.createGCObject(objReg, tempReg, templateObject,
7540 lir->mir()->initialHeap(), ool->entry());
7542 masm.bind(ool->rejoin());
7545 void CodeGenerator::visitOutOfLineNewArray(OutOfLineNewArray* ool) {
7546 visitNewArrayCallVM(ool->lir());
7547 masm.jump(ool->rejoin());
7550 void CodeGenerator::visitNewArrayDynamicLength(LNewArrayDynamicLength* lir) {
7551 Register lengthReg = ToRegister(lir->length());
7552 Register objReg = ToRegister(lir->output());
7553 Register tempReg = ToRegister(lir->temp0());
7555 JSObject* templateObject = lir->mir()->templateObject();
7556 gc::Heap initialHeap = lir->mir()->initialHeap();
7558 using Fn = ArrayObject* (*)(JSContext*, Handle<ArrayObject*>, int32_t length);
7559 OutOfLineCode* ool = oolCallVM<Fn, ArrayConstructorOneArg>(
7560 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
7561 StoreRegisterTo(objReg));
7563 bool canInline = true;
7564 size_t inlineLength = 0;
7565 if (templateObject->as<ArrayObject>().hasFixedElements()) {
7566 size_t numSlots =
7567 gc::GetGCKindSlots(templateObject->asTenured().getAllocKind());
7568 inlineLength = numSlots - ObjectElements::VALUES_PER_HEADER;
7569 } else {
7570 canInline = false;
7573 if (canInline) {
7574 // Try to do the allocation inline if the template object is big enough
7575 // for the length in lengthReg. If the length is bigger we could still
7576 // use the template object and not allocate the elements, but it's more
7577 // efficient to do a single big allocation than (repeatedly) reallocating
7578 // the array later on when filling it.
7579 masm.branch32(Assembler::Above, lengthReg, Imm32(inlineLength),
7580 ool->entry());
7582 TemplateObject templateObj(templateObject);
7583 masm.createGCObject(objReg, tempReg, templateObj, initialHeap,
7584 ool->entry());
7586 size_t lengthOffset = NativeObject::offsetOfFixedElements() +
7587 ObjectElements::offsetOfLength();
7588 masm.store32(lengthReg, Address(objReg, lengthOffset));
7589 } else {
7590 masm.jump(ool->entry());
7593 masm.bind(ool->rejoin());
7596 void CodeGenerator::visitNewIterator(LNewIterator* lir) {
7597 Register objReg = ToRegister(lir->output());
7598 Register tempReg = ToRegister(lir->temp0());
7600 OutOfLineCode* ool;
7601 switch (lir->mir()->type()) {
7602 case MNewIterator::ArrayIterator: {
7603 using Fn = ArrayIteratorObject* (*)(JSContext*);
7604 ool = oolCallVM<Fn, NewArrayIterator>(lir, ArgList(),
7605 StoreRegisterTo(objReg));
7606 break;
7608 case MNewIterator::StringIterator: {
7609 using Fn = StringIteratorObject* (*)(JSContext*);
7610 ool = oolCallVM<Fn, NewStringIterator>(lir, ArgList(),
7611 StoreRegisterTo(objReg));
7612 break;
7614 case MNewIterator::RegExpStringIterator: {
7615 using Fn = RegExpStringIteratorObject* (*)(JSContext*);
7616 ool = oolCallVM<Fn, NewRegExpStringIterator>(lir, ArgList(),
7617 StoreRegisterTo(objReg));
7618 break;
7620 default:
7621 MOZ_CRASH("unexpected iterator type");
7624 TemplateObject templateObject(lir->mir()->templateObject());
7625 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
7626 ool->entry());
7628 masm.bind(ool->rejoin());
7631 void CodeGenerator::visitNewTypedArray(LNewTypedArray* lir) {
7632 Register objReg = ToRegister(lir->output());
7633 Register tempReg = ToRegister(lir->temp0());
7634 Register lengthReg = ToRegister(lir->temp1());
7635 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
7637 JSObject* templateObject = lir->mir()->templateObject();
7638 gc::Heap initialHeap = lir->mir()->initialHeap();
7640 auto* ttemplate = &templateObject->as<FixedLengthTypedArrayObject>();
7642 size_t n = ttemplate->length();
7643 MOZ_ASSERT(n <= INT32_MAX,
7644 "Template objects are only created for int32 lengths");
7646 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
7647 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
7648 lir, ArgList(ImmGCPtr(templateObject), Imm32(n)),
7649 StoreRegisterTo(objReg));
7651 TemplateObject templateObj(templateObject);
7652 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
7654 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
7655 ttemplate, MacroAssembler::TypedArrayLength::Fixed);
7657 masm.bind(ool->rejoin());
7660 void CodeGenerator::visitNewTypedArrayDynamicLength(
7661 LNewTypedArrayDynamicLength* lir) {
7662 Register lengthReg = ToRegister(lir->length());
7663 Register objReg = ToRegister(lir->output());
7664 Register tempReg = ToRegister(lir->temp0());
7665 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
7667 JSObject* templateObject = lir->mir()->templateObject();
7668 gc::Heap initialHeap = lir->mir()->initialHeap();
7670 auto* ttemplate = &templateObject->as<FixedLengthTypedArrayObject>();
7672 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
7673 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
7674 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
7675 StoreRegisterTo(objReg));
7677 // Volatile |lengthReg| is saved across the ABI call in |initTypedArraySlots|.
7678 MOZ_ASSERT_IF(lengthReg.volatile_(), liveRegs.has(lengthReg));
7680 TemplateObject templateObj(templateObject);
7681 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
7683 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
7684 ttemplate,
7685 MacroAssembler::TypedArrayLength::Dynamic);
7687 masm.bind(ool->rejoin());
7690 void CodeGenerator::visitNewTypedArrayFromArray(LNewTypedArrayFromArray* lir) {
7691 pushArg(ToRegister(lir->array()));
7692 pushArg(ImmGCPtr(lir->mir()->templateObject()));
7694 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
7695 callVM<Fn, js::NewTypedArrayWithTemplateAndArray>(lir);
7698 void CodeGenerator::visitNewTypedArrayFromArrayBuffer(
7699 LNewTypedArrayFromArrayBuffer* lir) {
7700 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::LengthIndex));
7701 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::ByteOffsetIndex));
7702 pushArg(ToRegister(lir->arrayBuffer()));
7703 pushArg(ImmGCPtr(lir->mir()->templateObject()));
7705 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
7706 HandleValue, HandleValue);
7707 callVM<Fn, js::NewTypedArrayWithTemplateAndBuffer>(lir);
7710 void CodeGenerator::visitBindFunction(LBindFunction* lir) {
7711 Register target = ToRegister(lir->target());
7712 Register temp1 = ToRegister(lir->temp0());
7713 Register temp2 = ToRegister(lir->temp1());
7715 // Try to allocate a new BoundFunctionObject we can pass to the VM function.
7716 // If this fails, we set temp1 to nullptr so we do the allocation in C++.
7717 TemplateObject templateObject(lir->mir()->templateObject());
7718 Label allocOk, allocFailed;
7719 masm.createGCObject(temp1, temp2, templateObject, gc::Heap::Default,
7720 &allocFailed);
7721 masm.jump(&allocOk);
7723 masm.bind(&allocFailed);
7724 masm.movePtr(ImmWord(0), temp1);
7726 masm.bind(&allocOk);
7728 // Set temp2 to the address of the first argument on the stack.
7729 // Note that the Value slots used for arguments are currently aligned for a
7730 // JIT call, even though that's not strictly necessary for calling into C++.
7731 uint32_t argc = lir->mir()->numStackArgs();
7732 if (JitStackValueAlignment > 1) {
7733 argc = AlignBytes(argc, JitStackValueAlignment);
7735 uint32_t unusedStack = UnusedStackBytesForCall(argc);
7736 masm.computeEffectiveAddress(Address(masm.getStackPointer(), unusedStack),
7737 temp2);
7739 pushArg(temp1);
7740 pushArg(Imm32(lir->mir()->numStackArgs()));
7741 pushArg(temp2);
7742 pushArg(target);
7744 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<JSObject*>, Value*,
7745 uint32_t, Handle<BoundFunctionObject*>);
7746 callVM<Fn, js::BoundFunctionObject::functionBindImpl>(lir);
7749 void CodeGenerator::visitNewBoundFunction(LNewBoundFunction* lir) {
7750 Register output = ToRegister(lir->output());
7751 Register temp = ToRegister(lir->temp0());
7753 JSObject* templateObj = lir->mir()->templateObj();
7755 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<BoundFunctionObject*>);
7756 OutOfLineCode* ool = oolCallVM<Fn, BoundFunctionObject::createWithTemplate>(
7757 lir, ArgList(ImmGCPtr(templateObj)), StoreRegisterTo(output));
7759 TemplateObject templateObject(templateObj);
7760 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
7761 ool->entry());
7763 masm.bind(ool->rejoin());
7766 // Out-of-line object allocation for JSOp::NewObject.
7767 class OutOfLineNewObject : public OutOfLineCodeBase<CodeGenerator> {
7768 LNewObject* lir_;
7770 public:
7771 explicit OutOfLineNewObject(LNewObject* lir) : lir_(lir) {}
7773 void accept(CodeGenerator* codegen) override {
7774 codegen->visitOutOfLineNewObject(this);
7777 LNewObject* lir() const { return lir_; }
7780 void CodeGenerator::visitNewObjectVMCall(LNewObject* lir) {
7781 Register objReg = ToRegister(lir->output());
7783 MOZ_ASSERT(!lir->isCall());
7784 saveLive(lir);
7786 JSObject* templateObject = lir->mir()->templateObject();
7788 // If we're making a new object with a class prototype (that is, an object
7789 // that derives its class from its prototype instead of being
7790 // PlainObject::class_'d) from self-hosted code, we need a different init
7791 // function.
7792 switch (lir->mir()->mode()) {
7793 case MNewObject::ObjectLiteral: {
7794 MOZ_ASSERT(!templateObject);
7795 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
7796 pushArg(ImmGCPtr(lir->mir()->block()->info().script()));
7798 using Fn = JSObject* (*)(JSContext*, HandleScript, const jsbytecode* pc);
7799 callVM<Fn, NewObjectOperation>(lir);
7800 break;
7802 case MNewObject::ObjectCreate: {
7803 pushArg(ImmGCPtr(templateObject));
7805 using Fn = PlainObject* (*)(JSContext*, Handle<PlainObject*>);
7806 callVM<Fn, ObjectCreateWithTemplate>(lir);
7807 break;
7811 masm.storeCallPointerResult(objReg);
7813 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
7814 restoreLive(lir);
7817 static bool ShouldInitFixedSlots(LNewPlainObject* lir, const Shape* shape,
7818 uint32_t nfixed) {
7819 // Look for StoreFixedSlot instructions following an object allocation
7820 // that write to this object before a GC is triggered or this object is
7821 // passed to a VM call. If all fixed slots will be initialized, the
7822 // allocation code doesn't need to set the slots to |undefined|.
7824 if (nfixed == 0) {
7825 return false;
7828 // Keep track of the fixed slots that are initialized. initializedSlots is
7829 // a bit mask with a bit for each slot.
7830 MOZ_ASSERT(nfixed <= NativeObject::MAX_FIXED_SLOTS);
7831 static_assert(NativeObject::MAX_FIXED_SLOTS <= 32,
7832 "Slot bits must fit in 32 bits");
7833 uint32_t initializedSlots = 0;
7834 uint32_t numInitialized = 0;
7836 MInstruction* allocMir = lir->mir();
7837 MBasicBlock* block = allocMir->block();
7839 // Skip the allocation instruction.
7840 MInstructionIterator iter = block->begin(allocMir);
7841 MOZ_ASSERT(*iter == allocMir);
7842 iter++;
7844 // Handle the leading shape guard, if present.
7845 for (; iter != block->end(); iter++) {
7846 if (iter->isConstant()) {
7847 // This instruction won't trigger a GC or read object slots.
7848 continue;
7850 if (iter->isGuardShape()) {
7851 auto* guard = iter->toGuardShape();
7852 if (guard->object() != allocMir || guard->shape() != shape) {
7853 return true;
7855 allocMir = guard;
7856 iter++;
7858 break;
7861 for (; iter != block->end(); iter++) {
7862 if (iter->isConstant() || iter->isPostWriteBarrier()) {
7863 // These instructions won't trigger a GC or read object slots.
7864 continue;
7867 if (iter->isStoreFixedSlot()) {
7868 MStoreFixedSlot* store = iter->toStoreFixedSlot();
7869 if (store->object() != allocMir) {
7870 return true;
7873 // We may not initialize this object slot on allocation, so the
7874 // pre-barrier could read uninitialized memory. Simply disable
7875 // the barrier for this store: the object was just initialized
7876 // so the barrier is not necessary.
7877 store->setNeedsBarrier(false);
7879 uint32_t slot = store->slot();
7880 MOZ_ASSERT(slot < nfixed);
7881 if ((initializedSlots & (1 << slot)) == 0) {
7882 numInitialized++;
7883 initializedSlots |= (1 << slot);
7885 if (numInitialized == nfixed) {
7886 // All fixed slots will be initialized.
7887 MOZ_ASSERT(mozilla::CountPopulation32(initializedSlots) == nfixed);
7888 return false;
7891 continue;
7894 // Unhandled instruction, assume it bails or reads object slots.
7895 return true;
7898 MOZ_CRASH("Shouldn't get here");
7901 void CodeGenerator::visitNewObject(LNewObject* lir) {
7902 Register objReg = ToRegister(lir->output());
7903 Register tempReg = ToRegister(lir->temp());
7905 if (lir->mir()->isVMCall()) {
7906 visitNewObjectVMCall(lir);
7907 return;
7910 OutOfLineNewObject* ool = new (alloc()) OutOfLineNewObject(lir);
7911 addOutOfLineCode(ool, lir->mir());
7913 TemplateObject templateObject(lir->mir()->templateObject());
7915 masm.createGCObject(objReg, tempReg, templateObject,
7916 lir->mir()->initialHeap(), ool->entry());
7918 masm.bind(ool->rejoin());
7921 void CodeGenerator::visitOutOfLineNewObject(OutOfLineNewObject* ool) {
7922 visitNewObjectVMCall(ool->lir());
7923 masm.jump(ool->rejoin());
7926 void CodeGenerator::visitNewPlainObject(LNewPlainObject* lir) {
7927 Register objReg = ToRegister(lir->output());
7928 Register temp0Reg = ToRegister(lir->temp0());
7929 Register temp1Reg = ToRegister(lir->temp1());
7930 Register shapeReg = ToRegister(lir->temp2());
7932 auto* mir = lir->mir();
7933 const Shape* shape = mir->shape();
7934 gc::Heap initialHeap = mir->initialHeap();
7935 gc::AllocKind allocKind = mir->allocKind();
7937 using Fn =
7938 JSObject* (*)(JSContext*, Handle<SharedShape*>, gc::AllocKind, gc::Heap);
7939 OutOfLineCode* ool = oolCallVM<Fn, NewPlainObjectOptimizedFallback>(
7940 lir,
7941 ArgList(ImmGCPtr(shape), Imm32(int32_t(allocKind)),
7942 Imm32(int32_t(initialHeap))),
7943 StoreRegisterTo(objReg));
7945 bool initContents = ShouldInitFixedSlots(lir, shape, mir->numFixedSlots());
7947 masm.movePtr(ImmGCPtr(shape), shapeReg);
7948 masm.createPlainGCObject(
7949 objReg, shapeReg, temp0Reg, temp1Reg, mir->numFixedSlots(),
7950 mir->numDynamicSlots(), allocKind, initialHeap, ool->entry(),
7951 AllocSiteInput(gc::CatchAllAllocSite::Optimized), initContents);
7953 #ifdef DEBUG
7954 // ShouldInitFixedSlots expects that the leading GuardShape will never fail,
7955 // so ensure the newly created object has the correct shape. Should the guard
7956 // ever fail, we may end up with uninitialized fixed slots, which can confuse
7957 // the GC.
7958 Label ok;
7959 masm.branchTestObjShape(Assembler::Equal, objReg, shape, temp0Reg, objReg,
7960 &ok);
7961 masm.assumeUnreachable("Newly created object has the correct shape");
7962 masm.bind(&ok);
7963 #endif
7965 masm.bind(ool->rejoin());
7968 void CodeGenerator::visitNewArrayObject(LNewArrayObject* lir) {
7969 Register objReg = ToRegister(lir->output());
7970 Register temp0Reg = ToRegister(lir->temp0());
7971 Register shapeReg = ToRegister(lir->temp1());
7973 auto* mir = lir->mir();
7974 uint32_t arrayLength = mir->length();
7976 gc::AllocKind allocKind = GuessArrayGCKind(arrayLength);
7977 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
7978 allocKind = ForegroundToBackgroundAllocKind(allocKind);
7980 uint32_t slotCount = GetGCKindSlots(allocKind);
7981 MOZ_ASSERT(slotCount >= ObjectElements::VALUES_PER_HEADER);
7982 uint32_t arrayCapacity = slotCount - ObjectElements::VALUES_PER_HEADER;
7984 const Shape* shape = mir->shape();
7986 NewObjectKind objectKind =
7987 mir->initialHeap() == gc::Heap::Tenured ? TenuredObject : GenericObject;
7989 using Fn =
7990 ArrayObject* (*)(JSContext*, uint32_t, gc::AllocKind, NewObjectKind);
7991 OutOfLineCode* ool = oolCallVM<Fn, NewArrayObjectOptimizedFallback>(
7992 lir,
7993 ArgList(Imm32(arrayLength), Imm32(int32_t(allocKind)), Imm32(objectKind)),
7994 StoreRegisterTo(objReg));
7996 masm.movePtr(ImmPtr(shape), shapeReg);
7997 masm.createArrayWithFixedElements(
7998 objReg, shapeReg, temp0Reg, InvalidReg, arrayLength, arrayCapacity, 0, 0,
7999 allocKind, mir->initialHeap(), ool->entry(),
8000 AllocSiteInput(gc::CatchAllAllocSite::Optimized));
8001 masm.bind(ool->rejoin());
8004 void CodeGenerator::visitNewNamedLambdaObject(LNewNamedLambdaObject* lir) {
8005 Register objReg = ToRegister(lir->output());
8006 Register tempReg = ToRegister(lir->temp0());
8007 const CompileInfo& info = lir->mir()->block()->info();
8009 using Fn = js::NamedLambdaObject* (*)(JSContext*, HandleFunction);
8010 OutOfLineCode* ool = oolCallVM<Fn, NamedLambdaObject::createWithoutEnclosing>(
8011 lir, ArgList(info.funMaybeLazy()), StoreRegisterTo(objReg));
8013 TemplateObject templateObject(lir->mir()->templateObj());
8015 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
8016 ool->entry());
8018 masm.bind(ool->rejoin());
8021 void CodeGenerator::visitNewCallObject(LNewCallObject* lir) {
8022 Register objReg = ToRegister(lir->output());
8023 Register tempReg = ToRegister(lir->temp0());
8025 CallObject* templateObj = lir->mir()->templateObject();
8027 using Fn = CallObject* (*)(JSContext*, Handle<SharedShape*>);
8028 OutOfLineCode* ool = oolCallVM<Fn, CallObject::createWithShape>(
8029 lir, ArgList(ImmGCPtr(templateObj->sharedShape())),
8030 StoreRegisterTo(objReg));
8032 // Inline call object creation, using the OOL path only for tricky cases.
8033 TemplateObject templateObject(templateObj);
8034 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
8035 ool->entry());
8037 masm.bind(ool->rejoin());
8040 void CodeGenerator::visitNewStringObject(LNewStringObject* lir) {
8041 Register input = ToRegister(lir->input());
8042 Register output = ToRegister(lir->output());
8043 Register temp = ToRegister(lir->temp0());
8045 StringObject* templateObj = lir->mir()->templateObj();
8047 using Fn = JSObject* (*)(JSContext*, HandleString);
8048 OutOfLineCode* ool = oolCallVM<Fn, NewStringObject>(lir, ArgList(input),
8049 StoreRegisterTo(output));
8051 TemplateObject templateObject(templateObj);
8052 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
8053 ool->entry());
8055 masm.loadStringLength(input, temp);
8057 masm.storeValue(JSVAL_TYPE_STRING, input,
8058 Address(output, StringObject::offsetOfPrimitiveValue()));
8059 masm.storeValue(JSVAL_TYPE_INT32, temp,
8060 Address(output, StringObject::offsetOfLength()));
8062 masm.bind(ool->rejoin());
8065 void CodeGenerator::visitInitElemGetterSetter(LInitElemGetterSetter* lir) {
8066 Register obj = ToRegister(lir->object());
8067 Register value = ToRegister(lir->value());
8069 pushArg(value);
8070 pushArg(ToValue(lir, LInitElemGetterSetter::IdIndex));
8071 pushArg(obj);
8072 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
8074 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject, HandleValue,
8075 HandleObject);
8076 callVM<Fn, InitElemGetterSetterOperation>(lir);
8079 void CodeGenerator::visitMutateProto(LMutateProto* lir) {
8080 Register objReg = ToRegister(lir->object());
8082 pushArg(ToValue(lir, LMutateProto::ValueIndex));
8083 pushArg(objReg);
8085 using Fn =
8086 bool (*)(JSContext* cx, Handle<PlainObject*> obj, HandleValue value);
8087 callVM<Fn, MutatePrototype>(lir);
8090 void CodeGenerator::visitInitPropGetterSetter(LInitPropGetterSetter* lir) {
8091 Register obj = ToRegister(lir->object());
8092 Register value = ToRegister(lir->value());
8094 pushArg(value);
8095 pushArg(ImmGCPtr(lir->mir()->name()));
8096 pushArg(obj);
8097 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
8099 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject,
8100 Handle<PropertyName*>, HandleObject);
8101 callVM<Fn, InitPropGetterSetterOperation>(lir);
8104 void CodeGenerator::visitCreateThis(LCreateThis* lir) {
8105 const LAllocation* callee = lir->callee();
8106 const LAllocation* newTarget = lir->newTarget();
8108 if (newTarget->isConstant()) {
8109 pushArg(ImmGCPtr(&newTarget->toConstant()->toObject()));
8110 } else {
8111 pushArg(ToRegister(newTarget));
8114 if (callee->isConstant()) {
8115 pushArg(ImmGCPtr(&callee->toConstant()->toObject()));
8116 } else {
8117 pushArg(ToRegister(callee));
8120 using Fn = bool (*)(JSContext* cx, HandleObject callee,
8121 HandleObject newTarget, MutableHandleValue rval);
8122 callVM<Fn, jit::CreateThisFromIon>(lir);
8125 void CodeGenerator::visitCreateArgumentsObject(LCreateArgumentsObject* lir) {
8126 // This should be getting constructed in the first block only, and not any OSR
8127 // entry blocks.
8128 MOZ_ASSERT(lir->mir()->block()->id() == 0);
8130 Register callObj = ToRegister(lir->callObject());
8131 Register temp0 = ToRegister(lir->temp0());
8132 Label done;
8134 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8135 Register objTemp = ToRegister(lir->temp1());
8136 Register cxTemp = ToRegister(lir->temp2());
8138 masm.Push(callObj);
8140 // Try to allocate an arguments object. This will leave the reserved
8141 // slots uninitialized, so it's important we don't GC until we
8142 // initialize these slots in ArgumentsObject::finishForIonPure.
8143 Label failure;
8144 TemplateObject templateObject(templateObj);
8145 masm.createGCObject(objTemp, temp0, templateObject, gc::Heap::Default,
8146 &failure,
8147 /* initContents = */ false);
8149 masm.moveStackPtrTo(temp0);
8150 masm.addPtr(Imm32(masm.framePushed()), temp0);
8152 using Fn = ArgumentsObject* (*)(JSContext* cx, jit::JitFrameLayout* frame,
8153 JSObject* scopeChain, ArgumentsObject* obj);
8154 masm.setupAlignedABICall();
8155 masm.loadJSContext(cxTemp);
8156 masm.passABIArg(cxTemp);
8157 masm.passABIArg(temp0);
8158 masm.passABIArg(callObj);
8159 masm.passABIArg(objTemp);
8161 masm.callWithABI<Fn, ArgumentsObject::finishForIonPure>();
8162 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8164 // Discard saved callObj on the stack.
8165 masm.addToStackPtr(Imm32(sizeof(uintptr_t)));
8166 masm.jump(&done);
8168 masm.bind(&failure);
8169 masm.Pop(callObj);
8172 masm.moveStackPtrTo(temp0);
8173 masm.addPtr(Imm32(frameSize()), temp0);
8175 pushArg(callObj);
8176 pushArg(temp0);
8178 using Fn = ArgumentsObject* (*)(JSContext*, JitFrameLayout*, HandleObject);
8179 callVM<Fn, ArgumentsObject::createForIon>(lir);
8181 masm.bind(&done);
8184 void CodeGenerator::visitCreateInlinedArgumentsObject(
8185 LCreateInlinedArgumentsObject* lir) {
8186 Register callObj = ToRegister(lir->getCallObject());
8187 Register callee = ToRegister(lir->getCallee());
8188 Register argsAddress = ToRegister(lir->temp1());
8189 Register argsObj = ToRegister(lir->temp2());
8191 // TODO: Do we have to worry about alignment here?
8193 // Create a contiguous array of values for ArgumentsObject::create
8194 // by pushing the arguments onto the stack in reverse order.
8195 uint32_t argc = lir->mir()->numActuals();
8196 for (uint32_t i = 0; i < argc; i++) {
8197 uint32_t argNum = argc - i - 1;
8198 uint32_t index = LCreateInlinedArgumentsObject::ArgIndex(argNum);
8199 ConstantOrRegister arg =
8200 toConstantOrRegister(lir, index, lir->mir()->getArg(argNum)->type());
8201 masm.Push(arg);
8203 masm.moveStackPtrTo(argsAddress);
8205 Label done;
8206 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8207 LiveRegisterSet liveRegs;
8208 liveRegs.add(callObj);
8209 liveRegs.add(callee);
8211 masm.PushRegsInMask(liveRegs);
8213 // We are free to clobber all registers, as LCreateInlinedArgumentsObject is
8214 // a call instruction.
8215 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
8216 allRegs.take(callObj);
8217 allRegs.take(callee);
8218 allRegs.take(argsObj);
8219 allRegs.take(argsAddress);
8221 Register temp3 = allRegs.takeAny();
8222 Register temp4 = allRegs.takeAny();
8224 // Try to allocate an arguments object. This will leave the reserved slots
8225 // uninitialized, so it's important we don't GC until we initialize these
8226 // slots in ArgumentsObject::finishForIonPure.
8227 Label failure;
8228 TemplateObject templateObject(templateObj);
8229 masm.createGCObject(argsObj, temp3, templateObject, gc::Heap::Default,
8230 &failure,
8231 /* initContents = */ false);
8233 Register numActuals = temp3;
8234 masm.move32(Imm32(argc), numActuals);
8236 using Fn = ArgumentsObject* (*)(JSContext*, JSObject*, JSFunction*, Value*,
8237 uint32_t, ArgumentsObject*);
8238 masm.setupAlignedABICall();
8239 masm.loadJSContext(temp4);
8240 masm.passABIArg(temp4);
8241 masm.passABIArg(callObj);
8242 masm.passABIArg(callee);
8243 masm.passABIArg(argsAddress);
8244 masm.passABIArg(numActuals);
8245 masm.passABIArg(argsObj);
8247 masm.callWithABI<Fn, ArgumentsObject::finishInlineForIonPure>();
8248 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8250 // Discard saved callObj, callee, and values array on the stack.
8251 masm.addToStackPtr(
8252 Imm32(MacroAssembler::PushRegsInMaskSizeInBytes(liveRegs) +
8253 argc * sizeof(Value)));
8254 masm.jump(&done);
8256 masm.bind(&failure);
8257 masm.PopRegsInMask(liveRegs);
8259 // Reload argsAddress because it may have been overridden.
8260 masm.moveStackPtrTo(argsAddress);
8263 pushArg(Imm32(argc));
8264 pushArg(callObj);
8265 pushArg(callee);
8266 pushArg(argsAddress);
8268 using Fn = ArgumentsObject* (*)(JSContext*, Value*, HandleFunction,
8269 HandleObject, uint32_t);
8270 callVM<Fn, ArgumentsObject::createForInlinedIon>(lir);
8272 // Discard the array of values.
8273 masm.freeStack(argc * sizeof(Value));
8275 masm.bind(&done);
8278 template <class GetInlinedArgument>
8279 void CodeGenerator::emitGetInlinedArgument(GetInlinedArgument* lir,
8280 Register index,
8281 ValueOperand output) {
8282 uint32_t numActuals = lir->mir()->numActuals();
8283 MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
8285 // The index has already been bounds-checked, so the code we
8286 // generate here should be unreachable. We can end up in this
8287 // situation in self-hosted code using GetArgument(), or in a
8288 // monomorphically inlined function if we've inlined some CacheIR
8289 // that was created for a different caller.
8290 if (numActuals == 0) {
8291 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8292 return;
8295 // Check the first n-1 possible indices.
8296 Label done;
8297 for (uint32_t i = 0; i < numActuals - 1; i++) {
8298 Label skip;
8299 ConstantOrRegister arg = toConstantOrRegister(
8300 lir, GetInlinedArgument::ArgIndex(i), lir->mir()->getArg(i)->type());
8301 masm.branch32(Assembler::NotEqual, index, Imm32(i), &skip);
8302 masm.moveValue(arg, output);
8304 masm.jump(&done);
8305 masm.bind(&skip);
8308 #ifdef DEBUG
8309 Label skip;
8310 masm.branch32(Assembler::Equal, index, Imm32(numActuals - 1), &skip);
8311 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8312 masm.bind(&skip);
8313 #endif
8315 // The index has already been bounds-checked, so load the last argument.
8316 uint32_t lastIdx = numActuals - 1;
8317 ConstantOrRegister arg =
8318 toConstantOrRegister(lir, GetInlinedArgument::ArgIndex(lastIdx),
8319 lir->mir()->getArg(lastIdx)->type());
8320 masm.moveValue(arg, output);
8321 masm.bind(&done);
8324 void CodeGenerator::visitGetInlinedArgument(LGetInlinedArgument* lir) {
8325 Register index = ToRegister(lir->getIndex());
8326 ValueOperand output = ToOutValue(lir);
8328 emitGetInlinedArgument(lir, index, output);
8331 void CodeGenerator::visitGetInlinedArgumentHole(LGetInlinedArgumentHole* lir) {
8332 Register index = ToRegister(lir->getIndex());
8333 ValueOperand output = ToOutValue(lir);
8335 uint32_t numActuals = lir->mir()->numActuals();
8337 if (numActuals == 0) {
8338 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8339 masm.moveValue(UndefinedValue(), output);
8340 return;
8343 Label outOfBounds, done;
8344 masm.branch32(Assembler::AboveOrEqual, index, Imm32(numActuals),
8345 &outOfBounds);
8347 emitGetInlinedArgument(lir, index, output);
8348 masm.jump(&done);
8350 masm.bind(&outOfBounds);
8351 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8352 masm.moveValue(UndefinedValue(), output);
8354 masm.bind(&done);
8357 void CodeGenerator::visitGetArgumentsObjectArg(LGetArgumentsObjectArg* lir) {
8358 Register temp = ToRegister(lir->temp0());
8359 Register argsObj = ToRegister(lir->argsObject());
8360 ValueOperand out = ToOutValue(lir);
8362 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8363 temp);
8364 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8365 lir->mir()->argno() * sizeof(Value));
8366 masm.loadValue(argAddr, out);
8367 #ifdef DEBUG
8368 Label success;
8369 masm.branchTestMagic(Assembler::NotEqual, out, &success);
8370 masm.assumeUnreachable(
8371 "Result from ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8372 masm.bind(&success);
8373 #endif
8376 void CodeGenerator::visitSetArgumentsObjectArg(LSetArgumentsObjectArg* lir) {
8377 Register temp = ToRegister(lir->getTemp(0));
8378 Register argsObj = ToRegister(lir->argsObject());
8379 ValueOperand value = ToValue(lir, LSetArgumentsObjectArg::ValueIndex);
8381 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8382 temp);
8383 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8384 lir->mir()->argno() * sizeof(Value));
8385 emitPreBarrier(argAddr);
8386 #ifdef DEBUG
8387 Label success;
8388 masm.branchTestMagic(Assembler::NotEqual, argAddr, &success);
8389 masm.assumeUnreachable(
8390 "Result in ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8391 masm.bind(&success);
8392 #endif
8393 masm.storeValue(value, argAddr);
8396 void CodeGenerator::visitLoadArgumentsObjectArg(LLoadArgumentsObjectArg* lir) {
8397 Register temp = ToRegister(lir->temp0());
8398 Register argsObj = ToRegister(lir->argsObject());
8399 Register index = ToRegister(lir->index());
8400 ValueOperand out = ToOutValue(lir);
8402 Label bail;
8403 masm.loadArgumentsObjectElement(argsObj, index, out, temp, &bail);
8404 bailoutFrom(&bail, lir->snapshot());
8407 void CodeGenerator::visitLoadArgumentsObjectArgHole(
8408 LLoadArgumentsObjectArgHole* lir) {
8409 Register temp = ToRegister(lir->temp0());
8410 Register argsObj = ToRegister(lir->argsObject());
8411 Register index = ToRegister(lir->index());
8412 ValueOperand out = ToOutValue(lir);
8414 Label bail;
8415 masm.loadArgumentsObjectElementHole(argsObj, index, out, temp, &bail);
8416 bailoutFrom(&bail, lir->snapshot());
8419 void CodeGenerator::visitInArgumentsObjectArg(LInArgumentsObjectArg* lir) {
8420 Register temp = ToRegister(lir->temp0());
8421 Register argsObj = ToRegister(lir->argsObject());
8422 Register index = ToRegister(lir->index());
8423 Register out = ToRegister(lir->output());
8425 Label bail;
8426 masm.loadArgumentsObjectElementExists(argsObj, index, out, temp, &bail);
8427 bailoutFrom(&bail, lir->snapshot());
8430 void CodeGenerator::visitArgumentsObjectLength(LArgumentsObjectLength* lir) {
8431 Register argsObj = ToRegister(lir->argsObject());
8432 Register out = ToRegister(lir->output());
8434 Label bail;
8435 masm.loadArgumentsObjectLength(argsObj, out, &bail);
8436 bailoutFrom(&bail, lir->snapshot());
8439 void CodeGenerator::visitArrayFromArgumentsObject(
8440 LArrayFromArgumentsObject* lir) {
8441 pushArg(ToRegister(lir->argsObject()));
8443 using Fn = ArrayObject* (*)(JSContext*, Handle<ArgumentsObject*>);
8444 callVM<Fn, js::ArrayFromArgumentsObject>(lir);
8447 void CodeGenerator::visitGuardArgumentsObjectFlags(
8448 LGuardArgumentsObjectFlags* lir) {
8449 Register argsObj = ToRegister(lir->argsObject());
8450 Register temp = ToRegister(lir->temp0());
8452 Label bail;
8453 masm.branchTestArgumentsObjectFlags(argsObj, temp, lir->mir()->flags(),
8454 Assembler::NonZero, &bail);
8455 bailoutFrom(&bail, lir->snapshot());
8458 void CodeGenerator::visitBoundFunctionNumArgs(LBoundFunctionNumArgs* lir) {
8459 Register obj = ToRegister(lir->object());
8460 Register output = ToRegister(lir->output());
8462 masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
8463 output);
8464 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
8467 void CodeGenerator::visitGuardBoundFunctionIsConstructor(
8468 LGuardBoundFunctionIsConstructor* lir) {
8469 Register obj = ToRegister(lir->object());
8471 Label bail;
8472 Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
8473 masm.branchTest32(Assembler::Zero, flagsSlot,
8474 Imm32(BoundFunctionObject::IsConstructorFlag), &bail);
8475 bailoutFrom(&bail, lir->snapshot());
8478 void CodeGenerator::visitReturnFromCtor(LReturnFromCtor* lir) {
8479 ValueOperand value = ToValue(lir, LReturnFromCtor::ValueIndex);
8480 Register obj = ToRegister(lir->object());
8481 Register output = ToRegister(lir->output());
8483 Label valueIsObject, end;
8485 masm.branchTestObject(Assembler::Equal, value, &valueIsObject);
8487 // Value is not an object. Return that other object.
8488 masm.movePtr(obj, output);
8489 masm.jump(&end);
8491 // Value is an object. Return unbox(Value).
8492 masm.bind(&valueIsObject);
8493 Register payload = masm.extractObject(value, output);
8494 if (payload != output) {
8495 masm.movePtr(payload, output);
8498 masm.bind(&end);
8501 class OutOfLineBoxNonStrictThis : public OutOfLineCodeBase<CodeGenerator> {
8502 LBoxNonStrictThis* ins_;
8504 public:
8505 explicit OutOfLineBoxNonStrictThis(LBoxNonStrictThis* ins) : ins_(ins) {}
8506 void accept(CodeGenerator* codegen) override {
8507 codegen->visitOutOfLineBoxNonStrictThis(this);
8509 LBoxNonStrictThis* ins() const { return ins_; }
8512 void CodeGenerator::visitBoxNonStrictThis(LBoxNonStrictThis* lir) {
8513 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8514 Register output = ToRegister(lir->output());
8516 auto* ool = new (alloc()) OutOfLineBoxNonStrictThis(lir);
8517 addOutOfLineCode(ool, lir->mir());
8519 masm.fallibleUnboxObject(value, output, ool->entry());
8520 masm.bind(ool->rejoin());
8523 void CodeGenerator::visitOutOfLineBoxNonStrictThis(
8524 OutOfLineBoxNonStrictThis* ool) {
8525 LBoxNonStrictThis* lir = ool->ins();
8527 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8528 Register output = ToRegister(lir->output());
8530 Label notNullOrUndefined;
8532 Label isNullOrUndefined;
8533 ScratchTagScope tag(masm, value);
8534 masm.splitTagForTest(value, tag);
8535 masm.branchTestUndefined(Assembler::Equal, tag, &isNullOrUndefined);
8536 masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
8537 masm.bind(&isNullOrUndefined);
8538 masm.movePtr(ImmGCPtr(lir->mir()->globalThis()), output);
8539 masm.jump(ool->rejoin());
8542 masm.bind(&notNullOrUndefined);
8544 saveLive(lir);
8546 pushArg(value);
8547 using Fn = JSObject* (*)(JSContext*, HandleValue);
8548 callVM<Fn, BoxNonStrictThis>(lir);
8550 StoreRegisterTo(output).generate(this);
8551 restoreLiveIgnore(lir, StoreRegisterTo(output).clobbered());
8553 masm.jump(ool->rejoin());
8556 void CodeGenerator::visitImplicitThis(LImplicitThis* lir) {
8557 pushArg(ImmGCPtr(lir->mir()->name()));
8558 pushArg(ToRegister(lir->env()));
8560 using Fn = bool (*)(JSContext*, HandleObject, Handle<PropertyName*>,
8561 MutableHandleValue);
8562 callVM<Fn, ImplicitThisOperation>(lir);
8565 void CodeGenerator::visitArrayLength(LArrayLength* lir) {
8566 Register elements = ToRegister(lir->elements());
8567 Register output = ToRegister(lir->output());
8569 Address length(elements, ObjectElements::offsetOfLength());
8570 masm.load32(length, output);
8572 // Bail out if the length doesn't fit in int32.
8573 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
8576 static void SetLengthFromIndex(MacroAssembler& masm, const LAllocation* index,
8577 const Address& length) {
8578 if (index->isConstant()) {
8579 masm.store32(Imm32(ToInt32(index) + 1), length);
8580 } else {
8581 Register newLength = ToRegister(index);
8582 masm.add32(Imm32(1), newLength);
8583 masm.store32(newLength, length);
8584 masm.sub32(Imm32(1), newLength);
8588 void CodeGenerator::visitSetArrayLength(LSetArrayLength* lir) {
8589 Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength());
8590 SetLengthFromIndex(masm, lir->index(), length);
8593 void CodeGenerator::visitFunctionLength(LFunctionLength* lir) {
8594 Register function = ToRegister(lir->function());
8595 Register output = ToRegister(lir->output());
8597 Label bail;
8599 // Get the JSFunction flags.
8600 masm.load32(Address(function, JSFunction::offsetOfFlagsAndArgCount()),
8601 output);
8603 // Functions with a SelfHostedLazyScript must be compiled with the slow-path
8604 // before the function length is known. If the length was previously resolved,
8605 // the length property may be shadowed.
8606 masm.branchTest32(
8607 Assembler::NonZero, output,
8608 Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
8609 &bail);
8611 masm.loadFunctionLength(function, output, output, &bail);
8613 bailoutFrom(&bail, lir->snapshot());
8616 void CodeGenerator::visitFunctionName(LFunctionName* lir) {
8617 Register function = ToRegister(lir->function());
8618 Register output = ToRegister(lir->output());
8620 Label bail;
8622 const JSAtomState& names = gen->runtime->names();
8623 masm.loadFunctionName(function, output, ImmGCPtr(names.empty_), &bail);
8625 bailoutFrom(&bail, lir->snapshot());
8628 template <class OrderedHashTable>
8629 static void RangeFront(MacroAssembler&, Register, Register, Register);
8631 template <>
8632 void RangeFront<ValueMap>(MacroAssembler& masm, Register range, Register i,
8633 Register front) {
8634 masm.loadPtr(Address(range, ValueMap::Range::offsetOfHashTable()), front);
8635 masm.loadPtr(Address(front, ValueMap::offsetOfImplData()), front);
8637 MOZ_ASSERT(ValueMap::offsetOfImplDataElement() == 0,
8638 "offsetof(Data, element) is 0");
8639 static_assert(ValueMap::sizeofImplData() == 24, "sizeof(Data) is 24");
8640 masm.mulBy3(i, i);
8641 masm.lshiftPtr(Imm32(3), i);
8642 masm.addPtr(i, front);
8645 template <>
8646 void RangeFront<ValueSet>(MacroAssembler& masm, Register range, Register i,
8647 Register front) {
8648 masm.loadPtr(Address(range, ValueSet::Range::offsetOfHashTable()), front);
8649 masm.loadPtr(Address(front, ValueSet::offsetOfImplData()), front);
8651 MOZ_ASSERT(ValueSet::offsetOfImplDataElement() == 0,
8652 "offsetof(Data, element) is 0");
8653 static_assert(ValueSet::sizeofImplData() == 16, "sizeof(Data) is 16");
8654 masm.lshiftPtr(Imm32(4), i);
8655 masm.addPtr(i, front);
8658 template <class OrderedHashTable>
8659 static void RangePopFront(MacroAssembler& masm, Register range, Register front,
8660 Register dataLength, Register temp) {
8661 Register i = temp;
8663 masm.add32(Imm32(1),
8664 Address(range, OrderedHashTable::Range::offsetOfCount()));
8666 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), i);
8668 Label done, seek;
8669 masm.bind(&seek);
8670 masm.add32(Imm32(1), i);
8671 masm.branch32(Assembler::AboveOrEqual, i, dataLength, &done);
8673 // We can add sizeof(Data) to |front| to select the next element, because
8674 // |front| and |range.ht.data[i]| point to the same location.
8675 MOZ_ASSERT(OrderedHashTable::offsetOfImplDataElement() == 0,
8676 "offsetof(Data, element) is 0");
8677 masm.addPtr(Imm32(OrderedHashTable::sizeofImplData()), front);
8679 masm.branchTestMagic(Assembler::Equal,
8680 Address(front, OrderedHashTable::offsetOfEntryKey()),
8681 JS_HASH_KEY_EMPTY, &seek);
8683 masm.bind(&done);
8684 masm.store32(i, Address(range, OrderedHashTable::Range::offsetOfI()));
8687 template <class OrderedHashTable>
8688 static inline void RangeDestruct(MacroAssembler& masm, Register iter,
8689 Register range, Register temp0,
8690 Register temp1) {
8691 Register next = temp0;
8692 Register prevp = temp1;
8694 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfNext()), next);
8695 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfPrevP()), prevp);
8696 masm.storePtr(next, Address(prevp, 0));
8698 Label hasNoNext;
8699 masm.branchTestPtr(Assembler::Zero, next, next, &hasNoNext);
8701 masm.storePtr(prevp, Address(next, OrderedHashTable::Range::offsetOfPrevP()));
8703 masm.bind(&hasNoNext);
8705 Label nurseryAllocated;
8706 masm.branchPtrInNurseryChunk(Assembler::Equal, iter, temp0,
8707 &nurseryAllocated);
8709 masm.callFreeStub(range);
8711 masm.bind(&nurseryAllocated);
8714 template <>
8715 void CodeGenerator::emitLoadIteratorValues<ValueMap>(Register result,
8716 Register temp,
8717 Register front) {
8718 size_t elementsOffset = NativeObject::offsetOfFixedElements();
8720 Address keyAddress(front, ValueMap::Entry::offsetOfKey());
8721 Address valueAddress(front, ValueMap::Entry::offsetOfValue());
8722 Address keyElemAddress(result, elementsOffset);
8723 Address valueElemAddress(result, elementsOffset + sizeof(Value));
8724 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
8725 masm.guardedCallPreBarrier(valueElemAddress, MIRType::Value);
8726 masm.storeValue(keyAddress, keyElemAddress, temp);
8727 masm.storeValue(valueAddress, valueElemAddress, temp);
8729 Label emitBarrier, skipBarrier;
8730 masm.branchValueIsNurseryCell(Assembler::Equal, keyAddress, temp,
8731 &emitBarrier);
8732 masm.branchValueIsNurseryCell(Assembler::NotEqual, valueAddress, temp,
8733 &skipBarrier);
8735 masm.bind(&emitBarrier);
8736 saveVolatile(temp);
8737 emitPostWriteBarrier(result);
8738 restoreVolatile(temp);
8740 masm.bind(&skipBarrier);
8743 template <>
8744 void CodeGenerator::emitLoadIteratorValues<ValueSet>(Register result,
8745 Register temp,
8746 Register front) {
8747 size_t elementsOffset = NativeObject::offsetOfFixedElements();
8749 Address keyAddress(front, ValueSet::offsetOfEntryKey());
8750 Address keyElemAddress(result, elementsOffset);
8751 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
8752 masm.storeValue(keyAddress, keyElemAddress, temp);
8754 Label skipBarrier;
8755 masm.branchValueIsNurseryCell(Assembler::NotEqual, keyAddress, temp,
8756 &skipBarrier);
8758 saveVolatile(temp);
8759 emitPostWriteBarrier(result);
8760 restoreVolatile(temp);
8762 masm.bind(&skipBarrier);
8765 template <class IteratorObject, class OrderedHashTable>
8766 void CodeGenerator::emitGetNextEntryForIterator(LGetNextEntryForIterator* lir) {
8767 Register iter = ToRegister(lir->iter());
8768 Register result = ToRegister(lir->result());
8769 Register temp = ToRegister(lir->temp0());
8770 Register dataLength = ToRegister(lir->temp1());
8771 Register range = ToRegister(lir->temp2());
8772 Register output = ToRegister(lir->output());
8774 #ifdef DEBUG
8775 // Self-hosted code is responsible for ensuring GetNextEntryForIterator is
8776 // only called with the correct iterator class. Assert here all self-
8777 // hosted callers of GetNextEntryForIterator perform this class check.
8778 // No Spectre mitigations are needed because this is DEBUG-only code.
8779 Label success;
8780 masm.branchTestObjClassNoSpectreMitigations(
8781 Assembler::Equal, iter, &IteratorObject::class_, temp, &success);
8782 masm.assumeUnreachable("Iterator object should have the correct class.");
8783 masm.bind(&success);
8784 #endif
8786 masm.loadPrivate(Address(iter, NativeObject::getFixedSlotOffset(
8787 IteratorObject::RangeSlot)),
8788 range);
8790 Label iterAlreadyDone, iterDone, done;
8791 masm.branchTestPtr(Assembler::Zero, range, range, &iterAlreadyDone);
8793 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), temp);
8794 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfHashTable()),
8795 dataLength);
8796 masm.load32(Address(dataLength, OrderedHashTable::offsetOfImplDataLength()),
8797 dataLength);
8798 masm.branch32(Assembler::AboveOrEqual, temp, dataLength, &iterDone);
8800 masm.Push(iter);
8802 Register front = iter;
8803 RangeFront<OrderedHashTable>(masm, range, temp, front);
8805 emitLoadIteratorValues<OrderedHashTable>(result, temp, front);
8807 RangePopFront<OrderedHashTable>(masm, range, front, dataLength, temp);
8809 masm.Pop(iter);
8810 masm.move32(Imm32(0), output);
8812 masm.jump(&done);
8814 masm.bind(&iterDone);
8816 RangeDestruct<OrderedHashTable>(masm, iter, range, temp, dataLength);
8818 masm.storeValue(PrivateValue(nullptr),
8819 Address(iter, NativeObject::getFixedSlotOffset(
8820 IteratorObject::RangeSlot)));
8822 masm.bind(&iterAlreadyDone);
8824 masm.move32(Imm32(1), output);
8826 masm.bind(&done);
8829 void CodeGenerator::visitGetNextEntryForIterator(
8830 LGetNextEntryForIterator* lir) {
8831 if (lir->mir()->mode() == MGetNextEntryForIterator::Map) {
8832 emitGetNextEntryForIterator<MapIteratorObject, ValueMap>(lir);
8833 } else {
8834 MOZ_ASSERT(lir->mir()->mode() == MGetNextEntryForIterator::Set);
8835 emitGetNextEntryForIterator<SetIteratorObject, ValueSet>(lir);
8839 // The point of these is to inform Ion of where these values already are; they
8840 // don't normally generate (much) code.
8841 void CodeGenerator::visitWasmRegisterPairResult(LWasmRegisterPairResult* lir) {}
8842 void CodeGenerator::visitWasmStackResult(LWasmStackResult* lir) {}
8843 void CodeGenerator::visitWasmStackResult64(LWasmStackResult64* lir) {}
8845 void CodeGenerator::visitWasmStackResultArea(LWasmStackResultArea* lir) {
8846 LAllocation* output = lir->getDef(0)->output();
8847 MOZ_ASSERT(output->isStackArea());
8848 bool tempInit = false;
8849 for (auto iter = output->toStackArea()->results(); iter; iter.next()) {
8850 // Zero out ref stack results.
8851 if (iter.isWasmAnyRef()) {
8852 Register temp = ToRegister(lir->temp0());
8853 if (!tempInit) {
8854 masm.xorPtr(temp, temp);
8855 tempInit = true;
8857 masm.storePtr(temp, ToAddress(iter.alloc()));
8862 void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {
8863 #ifdef JS_64BIT
8864 if (MWasmRegisterResult* mir = lir->mir()) {
8865 if (mir->type() == MIRType::Int32) {
8866 masm.widenInt32(ToRegister(lir->output()));
8869 #endif
8872 void CodeGenerator::visitWasmCall(LWasmCall* lir) {
8873 const MWasmCallBase* callBase = lir->callBase();
8874 bool isReturnCall = lir->isReturnCall();
8876 // If this call is in Wasm try code block, initialise a wasm::TryNote for this
8877 // call.
8878 bool inTry = callBase->inTry();
8879 if (inTry) {
8880 size_t tryNoteIndex = callBase->tryNoteIndex();
8881 wasm::TryNoteVector& tryNotes = masm.tryNotes();
8882 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
8883 tryNote.setTryBodyBegin(masm.currentOffset());
8886 MOZ_ASSERT((sizeof(wasm::Frame) + masm.framePushed()) % WasmStackAlignment ==
8888 static_assert(
8889 WasmStackAlignment >= ABIStackAlignment &&
8890 WasmStackAlignment % ABIStackAlignment == 0,
8891 "The wasm stack alignment should subsume the ABI-required alignment");
8893 #ifdef DEBUG
8894 Label ok;
8895 masm.branchTestStackPtr(Assembler::Zero, Imm32(WasmStackAlignment - 1), &ok);
8896 masm.breakpoint();
8897 masm.bind(&ok);
8898 #endif
8900 // LWasmCallBase::isCallPreserved() assumes that all MWasmCalls preserve the
8901 // instance and pinned regs. The only case where where we don't have to
8902 // reload the instance and pinned regs is when the callee preserves them.
8903 bool reloadRegs = true;
8904 bool switchRealm = true;
8906 const wasm::CallSiteDesc& desc = callBase->desc();
8907 const wasm::CalleeDesc& callee = callBase->callee();
8908 CodeOffset retOffset;
8909 CodeOffset secondRetOffset;
8910 switch (callee.which()) {
8911 case wasm::CalleeDesc::Func:
8912 #ifdef ENABLE_WASM_TAIL_CALLS
8913 if (isReturnCall) {
8914 ReturnCallAdjustmentInfo retCallInfo(
8915 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8916 masm.wasmReturnCall(desc, callee.funcIndex(), retCallInfo);
8917 // The rest of the method is unnecessary for a return call.
8918 return;
8920 #endif
8921 MOZ_ASSERT(!isReturnCall);
8922 retOffset = masm.call(desc, callee.funcIndex());
8923 reloadRegs = false;
8924 switchRealm = false;
8925 break;
8926 case wasm::CalleeDesc::Import:
8927 #ifdef ENABLE_WASM_TAIL_CALLS
8928 if (isReturnCall) {
8929 ReturnCallAdjustmentInfo retCallInfo(
8930 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8931 masm.wasmReturnCallImport(desc, callee, retCallInfo);
8932 // The rest of the method is unnecessary for a return call.
8933 return;
8935 #endif
8936 MOZ_ASSERT(!isReturnCall);
8937 retOffset = masm.wasmCallImport(desc, callee);
8938 break;
8939 case wasm::CalleeDesc::AsmJSTable:
8940 retOffset = masm.asmCallIndirect(desc, callee);
8941 break;
8942 case wasm::CalleeDesc::WasmTable: {
8943 Label* boundsCheckFailed = nullptr;
8944 if (lir->needsBoundsCheck()) {
8945 OutOfLineAbortingWasmTrap* ool =
8946 new (alloc()) OutOfLineAbortingWasmTrap(
8947 wasm::BytecodeOffset(desc.lineOrBytecode()),
8948 wasm::Trap::OutOfBounds);
8949 if (lir->isCatchable()) {
8950 addOutOfLineCode(ool, lir->mirCatchable());
8951 } else if (isReturnCall) {
8952 #ifdef ENABLE_WASM_TAIL_CALLS
8953 addOutOfLineCode(ool, lir->mirReturnCall());
8954 #else
8955 MOZ_CRASH("Return calls are disabled.");
8956 #endif
8957 } else {
8958 addOutOfLineCode(ool, lir->mirUncatchable());
8960 boundsCheckFailed = ool->entry();
8962 Label* nullCheckFailed = nullptr;
8963 #ifndef WASM_HAS_HEAPREG
8965 OutOfLineAbortingWasmTrap* ool =
8966 new (alloc()) OutOfLineAbortingWasmTrap(
8967 wasm::BytecodeOffset(desc.lineOrBytecode()),
8968 wasm::Trap::IndirectCallToNull);
8969 if (lir->isCatchable()) {
8970 addOutOfLineCode(ool, lir->mirCatchable());
8971 } else if (isReturnCall) {
8972 # ifdef ENABLE_WASM_TAIL_CALLS
8973 addOutOfLineCode(ool, lir->mirReturnCall());
8974 # else
8975 MOZ_CRASH("Return calls are disabled.");
8976 # endif
8977 } else {
8978 addOutOfLineCode(ool, lir->mirUncatchable());
8980 nullCheckFailed = ool->entry();
8982 #endif
8983 #ifdef ENABLE_WASM_TAIL_CALLS
8984 if (isReturnCall) {
8985 ReturnCallAdjustmentInfo retCallInfo(
8986 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8987 masm.wasmReturnCallIndirect(desc, callee, boundsCheckFailed,
8988 nullCheckFailed, mozilla::Nothing(),
8989 retCallInfo);
8990 // The rest of the method is unnecessary for a return call.
8991 return;
8993 #endif
8994 MOZ_ASSERT(!isReturnCall);
8995 masm.wasmCallIndirect(desc, callee, boundsCheckFailed, nullCheckFailed,
8996 lir->tableSize(), &retOffset, &secondRetOffset);
8997 // Register reloading and realm switching are handled dynamically inside
8998 // wasmCallIndirect. There are two return offsets, one for each call
8999 // instruction (fast path and slow path).
9000 reloadRegs = false;
9001 switchRealm = false;
9002 break;
9004 case wasm::CalleeDesc::Builtin:
9005 retOffset = masm.call(desc, callee.builtin());
9006 reloadRegs = false;
9007 switchRealm = false;
9008 break;
9009 case wasm::CalleeDesc::BuiltinInstanceMethod:
9010 retOffset = masm.wasmCallBuiltinInstanceMethod(
9011 desc, callBase->instanceArg(), callee.builtin(),
9012 callBase->builtinMethodFailureMode());
9013 switchRealm = false;
9014 break;
9015 case wasm::CalleeDesc::FuncRef:
9016 #ifdef ENABLE_WASM_TAIL_CALLS
9017 if (isReturnCall) {
9018 ReturnCallAdjustmentInfo retCallInfo(
9019 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
9020 masm.wasmReturnCallRef(desc, callee, retCallInfo);
9021 // The rest of the method is unnecessary for a return call.
9022 return;
9024 #endif
9025 MOZ_ASSERT(!isReturnCall);
9026 // Register reloading and realm switching are handled dynamically inside
9027 // wasmCallRef. There are two return offsets, one for each call
9028 // instruction (fast path and slow path).
9029 masm.wasmCallRef(desc, callee, &retOffset, &secondRetOffset);
9030 reloadRegs = false;
9031 switchRealm = false;
9032 break;
9035 // Note the assembler offset for the associated LSafePoint.
9036 MOZ_ASSERT(!isReturnCall);
9037 markSafepointAt(retOffset.offset(), lir);
9039 // Now that all the outbound in-memory args are on the stack, note the
9040 // required lower boundary point of the associated StackMap.
9041 uint32_t framePushedAtStackMapBase =
9042 masm.framePushed() -
9043 wasm::AlignStackArgAreaSize(callBase->stackArgAreaSizeUnaligned());
9044 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAtStackMapBase);
9045 MOZ_ASSERT(lir->safepoint()->wasmSafepointKind() ==
9046 WasmSafepointKind::LirCall);
9048 // Note the assembler offset and framePushed for use by the adjunct
9049 // LSafePoint, see visitor for LWasmCallIndirectAdjunctSafepoint below.
9050 if (callee.which() == wasm::CalleeDesc::WasmTable) {
9051 lir->adjunctSafepoint()->recordSafepointInfo(secondRetOffset,
9052 framePushedAtStackMapBase);
9055 if (reloadRegs) {
9056 masm.loadPtr(
9057 Address(masm.getStackPointer(), WasmCallerInstanceOffsetBeforeCall),
9058 InstanceReg);
9059 masm.loadWasmPinnedRegsFromInstance();
9060 if (switchRealm) {
9061 masm.switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
9063 } else {
9064 MOZ_ASSERT(!switchRealm);
9067 #ifdef ENABLE_WASM_TAIL_CALLS
9068 switch (callee.which()) {
9069 case wasm::CalleeDesc::Func:
9070 case wasm::CalleeDesc::Import:
9071 case wasm::CalleeDesc::WasmTable:
9072 case wasm::CalleeDesc::FuncRef:
9073 // Stack allocation could change during Wasm (return) calls,
9074 // recover pre-call state.
9075 masm.freeStackTo(masm.framePushed());
9076 break;
9077 default:
9078 break;
9080 #endif // ENABLE_WASM_TAIL_CALLS
9082 if (inTry) {
9083 // Set the end of the try note range
9084 size_t tryNoteIndex = callBase->tryNoteIndex();
9085 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9086 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
9088 // Don't set the end of the try note if we've OOM'ed, as the above
9089 // instructions may not have been emitted, which will trigger an assert
9090 // about zero-length try-notes. This is okay as this compilation will be
9091 // thrown away.
9092 if (!masm.oom()) {
9093 tryNote.setTryBodyEnd(masm.currentOffset());
9096 // This instruction or the adjunct safepoint must be the last instruction
9097 // in the block. No other instructions may be inserted.
9098 LBlock* block = lir->block();
9099 MOZ_RELEASE_ASSERT(*block->rbegin() == lir ||
9100 (block->rbegin()->isWasmCallIndirectAdjunctSafepoint() &&
9101 *(++block->rbegin()) == lir));
9103 // Jump to the fallthrough block
9104 jumpToBlock(lir->mirCatchable()->getSuccessor(
9105 MWasmCallCatchable::FallthroughBranchIndex));
9109 void CodeGenerator::visitWasmCallLandingPrePad(LWasmCallLandingPrePad* lir) {
9110 LBlock* block = lir->block();
9111 MWasmCallLandingPrePad* mir = lir->mir();
9112 MBasicBlock* mirBlock = mir->block();
9113 MBasicBlock* callMirBlock = mir->callBlock();
9115 // This block must be the pre-pad successor of the call block. No blocks may
9116 // be inserted between us, such as for critical edge splitting.
9117 MOZ_RELEASE_ASSERT(mirBlock == callMirBlock->getSuccessor(
9118 MWasmCallCatchable::PrePadBranchIndex));
9120 // This instruction or a move group must be the first instruction in the
9121 // block. No other instructions may be inserted.
9122 MOZ_RELEASE_ASSERT(*block->begin() == lir || (block->begin()->isMoveGroup() &&
9123 *(++block->begin()) == lir));
9125 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9126 wasm::TryNote& tryNote = tryNotes[mir->tryNoteIndex()];
9127 // Set the entry point for the call try note to be the beginning of this
9128 // block. The above assertions (and assertions in visitWasmCall) guarantee
9129 // that we are not skipping over instructions that should be executed.
9130 tryNote.setLandingPad(block->label()->offset(), masm.framePushed());
9133 void CodeGenerator::visitWasmCallIndirectAdjunctSafepoint(
9134 LWasmCallIndirectAdjunctSafepoint* lir) {
9135 markSafepointAt(lir->safepointLocation().offset(), lir);
9136 lir->safepoint()->setFramePushedAtStackMapBase(
9137 lir->framePushedAtStackMapBase());
9140 template <typename InstructionWithMaybeTrapSite>
9141 void EmitSignalNullCheckTrapSite(MacroAssembler& masm,
9142 InstructionWithMaybeTrapSite* ins,
9143 FaultingCodeOffset fco,
9144 wasm::TrapMachineInsn tmi) {
9145 if (!ins->maybeTrap()) {
9146 return;
9148 wasm::BytecodeOffset trapOffset(ins->maybeTrap()->offset);
9149 masm.append(wasm::Trap::NullPointerDereference,
9150 wasm::TrapSite(tmi, fco, trapOffset));
9153 template <typename InstructionWithMaybeTrapSite, class AddressOrBaseIndex>
9154 void CodeGenerator::emitWasmValueLoad(InstructionWithMaybeTrapSite* ins,
9155 MIRType type, MWideningOp wideningOp,
9156 AddressOrBaseIndex addr,
9157 AnyRegister dst) {
9158 FaultingCodeOffset fco;
9159 switch (type) {
9160 case MIRType::Int32:
9161 switch (wideningOp) {
9162 case MWideningOp::None:
9163 fco = masm.load32(addr, dst.gpr());
9164 EmitSignalNullCheckTrapSite(masm, ins, fco,
9165 wasm::TrapMachineInsn::Load32);
9166 break;
9167 case MWideningOp::FromU16:
9168 fco = masm.load16ZeroExtend(addr, dst.gpr());
9169 EmitSignalNullCheckTrapSite(masm, ins, fco,
9170 wasm::TrapMachineInsn::Load16);
9171 break;
9172 case MWideningOp::FromS16:
9173 fco = masm.load16SignExtend(addr, dst.gpr());
9174 EmitSignalNullCheckTrapSite(masm, ins, fco,
9175 wasm::TrapMachineInsn::Load16);
9176 break;
9177 case MWideningOp::FromU8:
9178 fco = masm.load8ZeroExtend(addr, dst.gpr());
9179 EmitSignalNullCheckTrapSite(masm, ins, fco,
9180 wasm::TrapMachineInsn::Load8);
9181 break;
9182 case MWideningOp::FromS8:
9183 fco = masm.load8SignExtend(addr, dst.gpr());
9184 EmitSignalNullCheckTrapSite(masm, ins, fco,
9185 wasm::TrapMachineInsn::Load8);
9186 break;
9187 default:
9188 MOZ_CRASH("unexpected widening op in ::visitWasmLoadElement");
9190 break;
9191 case MIRType::Float32:
9192 MOZ_ASSERT(wideningOp == MWideningOp::None);
9193 fco = masm.loadFloat32(addr, dst.fpu());
9194 EmitSignalNullCheckTrapSite(masm, ins, fco,
9195 wasm::TrapMachineInsn::Load32);
9196 break;
9197 case MIRType::Double:
9198 MOZ_ASSERT(wideningOp == MWideningOp::None);
9199 fco = masm.loadDouble(addr, dst.fpu());
9200 EmitSignalNullCheckTrapSite(masm, ins, fco,
9201 wasm::TrapMachineInsn::Load64);
9202 break;
9203 case MIRType::Pointer:
9204 case MIRType::WasmAnyRef:
9205 case MIRType::WasmArrayData:
9206 MOZ_ASSERT(wideningOp == MWideningOp::None);
9207 fco = masm.loadPtr(addr, dst.gpr());
9208 EmitSignalNullCheckTrapSite(masm, ins, fco,
9209 wasm::TrapMachineInsnForLoadWord());
9210 break;
9211 default:
9212 MOZ_CRASH("unexpected type in ::emitWasmValueLoad");
9216 template <typename InstructionWithMaybeTrapSite, class AddressOrBaseIndex>
9217 void CodeGenerator::emitWasmValueStore(InstructionWithMaybeTrapSite* ins,
9218 MIRType type, MNarrowingOp narrowingOp,
9219 AnyRegister src,
9220 AddressOrBaseIndex addr) {
9221 FaultingCodeOffset fco;
9222 switch (type) {
9223 case MIRType::Int32:
9224 switch (narrowingOp) {
9225 case MNarrowingOp::None:
9226 fco = masm.store32(src.gpr(), addr);
9227 EmitSignalNullCheckTrapSite(masm, ins, fco,
9228 wasm::TrapMachineInsn::Store32);
9229 break;
9230 case MNarrowingOp::To16:
9231 fco = masm.store16(src.gpr(), addr);
9232 EmitSignalNullCheckTrapSite(masm, ins, fco,
9233 wasm::TrapMachineInsn::Store16);
9234 break;
9235 case MNarrowingOp::To8:
9236 fco = masm.store8(src.gpr(), addr);
9237 EmitSignalNullCheckTrapSite(masm, ins, fco,
9238 wasm::TrapMachineInsn::Store8);
9239 break;
9240 default:
9241 MOZ_CRASH();
9243 break;
9244 case MIRType::Float32:
9245 fco = masm.storeFloat32(src.fpu(), addr);
9246 EmitSignalNullCheckTrapSite(masm, ins, fco,
9247 wasm::TrapMachineInsn::Store32);
9248 break;
9249 case MIRType::Double:
9250 fco = masm.storeDouble(src.fpu(), addr);
9251 EmitSignalNullCheckTrapSite(masm, ins, fco,
9252 wasm::TrapMachineInsn::Store64);
9253 break;
9254 case MIRType::Pointer:
9255 // This could be correct, but it would be a new usage, so check carefully.
9256 MOZ_CRASH("Unexpected type in ::emitWasmValueStore.");
9257 case MIRType::WasmAnyRef:
9258 MOZ_CRASH("Bad type in ::emitWasmValueStore. Use LWasmStoreElementRef.");
9259 default:
9260 MOZ_CRASH("unexpected type in ::emitWasmValueStore");
9264 void CodeGenerator::visitWasmLoadSlot(LWasmLoadSlot* ins) {
9265 MIRType type = ins->type();
9266 MWideningOp wideningOp = ins->wideningOp();
9267 Register container = ToRegister(ins->containerRef());
9268 Address addr(container, ins->offset());
9269 AnyRegister dst = ToAnyRegister(ins->output());
9271 #ifdef ENABLE_WASM_SIMD
9272 if (type == MIRType::Simd128) {
9273 MOZ_ASSERT(wideningOp == MWideningOp::None);
9274 FaultingCodeOffset fco = masm.loadUnalignedSimd128(addr, dst.fpu());
9275 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load128);
9276 return;
9278 #endif
9279 emitWasmValueLoad(ins, type, wideningOp, addr, dst);
9282 void CodeGenerator::visitWasmLoadElement(LWasmLoadElement* ins) {
9283 MIRType type = ins->type();
9284 MWideningOp wideningOp = ins->wideningOp();
9285 Scale scale = ins->scale();
9286 Register base = ToRegister(ins->base());
9287 Register index = ToRegister(ins->index());
9288 AnyRegister dst = ToAnyRegister(ins->output());
9290 #ifdef ENABLE_WASM_SIMD
9291 if (type == MIRType::Simd128) {
9292 MOZ_ASSERT(wideningOp == MWideningOp::None);
9293 FaultingCodeOffset fco;
9294 Register temp = ToRegister(ins->temp0());
9295 masm.movePtr(index, temp);
9296 masm.lshiftPtr(Imm32(4), temp);
9297 fco = masm.loadUnalignedSimd128(BaseIndex(base, temp, Scale::TimesOne),
9298 dst.fpu());
9299 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load128);
9300 return;
9302 #endif
9303 emitWasmValueLoad(ins, type, wideningOp, BaseIndex(base, index, scale), dst);
9306 void CodeGenerator::visitWasmStoreSlot(LWasmStoreSlot* ins) {
9307 MIRType type = ins->type();
9308 MNarrowingOp narrowingOp = ins->narrowingOp();
9309 Register container = ToRegister(ins->containerRef());
9310 Address addr(container, ins->offset());
9311 AnyRegister src = ToAnyRegister(ins->value());
9312 if (type != MIRType::Int32) {
9313 MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
9316 #ifdef ENABLE_WASM_SIMD
9317 if (type == MIRType::Simd128) {
9318 FaultingCodeOffset fco = masm.storeUnalignedSimd128(src.fpu(), addr);
9319 EmitSignalNullCheckTrapSite(masm, ins, fco,
9320 wasm::TrapMachineInsn::Store128);
9321 return;
9323 #endif
9324 emitWasmValueStore(ins, type, narrowingOp, src, addr);
9327 void CodeGenerator::visitWasmStoreElement(LWasmStoreElement* ins) {
9328 MIRType type = ins->type();
9329 MNarrowingOp narrowingOp = ins->narrowingOp();
9330 Scale scale = ins->scale();
9331 Register base = ToRegister(ins->base());
9332 Register index = ToRegister(ins->index());
9333 AnyRegister src = ToAnyRegister(ins->value());
9334 if (type != MIRType::Int32) {
9335 MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
9338 #ifdef ENABLE_WASM_SIMD
9339 if (type == MIRType::Simd128) {
9340 Register temp = ToRegister(ins->temp0());
9341 masm.movePtr(index, temp);
9342 masm.lshiftPtr(Imm32(4), temp);
9343 FaultingCodeOffset fco = masm.storeUnalignedSimd128(
9344 src.fpu(), BaseIndex(base, temp, Scale::TimesOne));
9345 EmitSignalNullCheckTrapSite(masm, ins, fco,
9346 wasm::TrapMachineInsn::Store128);
9347 return;
9349 #endif
9350 emitWasmValueStore(ins, type, narrowingOp, src,
9351 BaseIndex(base, index, scale));
9354 void CodeGenerator::visitWasmLoadTableElement(LWasmLoadTableElement* ins) {
9355 Register elements = ToRegister(ins->elements());
9356 Register index = ToRegister(ins->index());
9357 Register output = ToRegister(ins->output());
9358 masm.loadPtr(BaseIndex(elements, index, ScalePointer), output);
9361 void CodeGenerator::visitWasmDerivedPointer(LWasmDerivedPointer* ins) {
9362 masm.movePtr(ToRegister(ins->base()), ToRegister(ins->output()));
9363 masm.addPtr(Imm32(int32_t(ins->offset())), ToRegister(ins->output()));
9366 void CodeGenerator::visitWasmDerivedIndexPointer(
9367 LWasmDerivedIndexPointer* ins) {
9368 Register base = ToRegister(ins->base());
9369 Register index = ToRegister(ins->index());
9370 Register output = ToRegister(ins->output());
9371 masm.computeEffectiveAddress(BaseIndex(base, index, ins->scale()), output);
9374 void CodeGenerator::visitWasmStoreRef(LWasmStoreRef* ins) {
9375 Register instance = ToRegister(ins->instance());
9376 Register valueBase = ToRegister(ins->valueBase());
9377 size_t offset = ins->offset();
9378 Register value = ToRegister(ins->value());
9379 Register temp = ToRegister(ins->temp0());
9381 if (ins->preBarrierKind() == WasmPreBarrierKind::Normal) {
9382 Label skipPreBarrier;
9383 wasm::EmitWasmPreBarrierGuard(
9384 masm, instance, temp, Address(valueBase, offset), &skipPreBarrier,
9385 ins->maybeTrap() ? &ins->maybeTrap()->offset : nullptr);
9386 wasm::EmitWasmPreBarrierCallImmediate(masm, instance, temp, valueBase,
9387 offset);
9388 masm.bind(&skipPreBarrier);
9391 FaultingCodeOffset fco = masm.storePtr(value, Address(valueBase, offset));
9392 EmitSignalNullCheckTrapSite(masm, ins, fco,
9393 wasm::TrapMachineInsnForStoreWord());
9394 // The postbarrier is handled separately.
9397 void CodeGenerator::visitWasmStoreElementRef(LWasmStoreElementRef* ins) {
9398 Register instance = ToRegister(ins->instance());
9399 Register base = ToRegister(ins->base());
9400 Register index = ToRegister(ins->index());
9401 Register value = ToRegister(ins->value());
9402 Register temp0 = ToTempRegisterOrInvalid(ins->temp0());
9403 Register temp1 = ToTempRegisterOrInvalid(ins->temp1());
9405 BaseIndex addr(base, index, ScalePointer);
9407 if (ins->preBarrierKind() == WasmPreBarrierKind::Normal) {
9408 Label skipPreBarrier;
9409 wasm::EmitWasmPreBarrierGuard(
9410 masm, instance, temp0, addr, &skipPreBarrier,
9411 ins->maybeTrap() ? &ins->maybeTrap()->offset : nullptr);
9412 wasm::EmitWasmPreBarrierCallIndex(masm, instance, temp0, temp1, addr);
9413 masm.bind(&skipPreBarrier);
9416 FaultingCodeOffset fco = masm.storePtr(value, addr);
9417 EmitSignalNullCheckTrapSite(masm, ins, fco,
9418 wasm::TrapMachineInsnForStoreWord());
9419 // The postbarrier is handled separately.
9422 // Out-of-line path to update the store buffer for wasm references.
9423 class OutOfLineWasmCallPostWriteBarrierImmediate
9424 : public OutOfLineCodeBase<CodeGenerator> {
9425 LInstruction* lir_;
9426 Register valueBase_;
9427 Register temp_;
9428 uint32_t valueOffset_;
9430 public:
9431 OutOfLineWasmCallPostWriteBarrierImmediate(LInstruction* lir,
9432 Register valueBase, Register temp,
9433 uint32_t valueOffset)
9434 : lir_(lir),
9435 valueBase_(valueBase),
9436 temp_(temp),
9437 valueOffset_(valueOffset) {}
9439 void accept(CodeGenerator* codegen) override {
9440 codegen->visitOutOfLineWasmCallPostWriteBarrierImmediate(this);
9443 LInstruction* lir() const { return lir_; }
9444 Register valueBase() const { return valueBase_; }
9445 Register temp() const { return temp_; }
9446 uint32_t valueOffset() const { return valueOffset_; }
9449 void CodeGenerator::visitOutOfLineWasmCallPostWriteBarrierImmediate(
9450 OutOfLineWasmCallPostWriteBarrierImmediate* ool) {
9451 saveLiveVolatile(ool->lir());
9452 masm.Push(InstanceReg);
9453 int32_t framePushedAfterInstance = masm.framePushed();
9455 // Fold the value offset into the value base
9456 Register valueAddr = ool->valueBase();
9457 Register temp = ool->temp();
9458 masm.computeEffectiveAddress(Address(valueAddr, ool->valueOffset()), temp);
9460 // Call Instance::postBarrier
9461 masm.setupWasmABICall();
9462 masm.passABIArg(InstanceReg);
9463 masm.passABIArg(temp);
9464 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
9465 masm.callWithABI(wasm::BytecodeOffset(0), wasm::SymbolicAddress::PostBarrier,
9466 mozilla::Some(instanceOffset), ABIType::General);
9468 masm.Pop(InstanceReg);
9469 restoreLiveVolatile(ool->lir());
9471 masm.jump(ool->rejoin());
9474 void CodeGenerator::visitWasmPostWriteBarrierImmediate(
9475 LWasmPostWriteBarrierImmediate* lir) {
9476 Register object = ToRegister(lir->object());
9477 Register value = ToRegister(lir->value());
9478 Register valueBase = ToRegister(lir->valueBase());
9479 Register temp = ToRegister(lir->temp0());
9480 MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
9481 auto* ool = new (alloc()) OutOfLineWasmCallPostWriteBarrierImmediate(
9482 lir, valueBase, temp, lir->valueOffset());
9483 addOutOfLineCode(ool, lir->mir());
9485 wasm::EmitWasmPostBarrierGuard(masm, mozilla::Some(object), temp, value,
9486 ool->rejoin());
9487 masm.jump(ool->entry());
9488 masm.bind(ool->rejoin());
9491 // Out-of-line path to update the store buffer for wasm references.
9492 class OutOfLineWasmCallPostWriteBarrierIndex
9493 : public OutOfLineCodeBase<CodeGenerator> {
9494 LInstruction* lir_;
9495 Register valueBase_;
9496 Register index_;
9497 Register temp_;
9498 uint32_t elemSize_;
9500 public:
9501 OutOfLineWasmCallPostWriteBarrierIndex(LInstruction* lir, Register valueBase,
9502 Register index, Register temp,
9503 uint32_t elemSize)
9504 : lir_(lir),
9505 valueBase_(valueBase),
9506 index_(index),
9507 temp_(temp),
9508 elemSize_(elemSize) {
9509 MOZ_ASSERT(elemSize == 1 || elemSize == 2 || elemSize == 4 ||
9510 elemSize == 8 || elemSize == 16);
9513 void accept(CodeGenerator* codegen) override {
9514 codegen->visitOutOfLineWasmCallPostWriteBarrierIndex(this);
9517 LInstruction* lir() const { return lir_; }
9518 Register valueBase() const { return valueBase_; }
9519 Register index() const { return index_; }
9520 Register temp() const { return temp_; }
9521 uint32_t elemSize() const { return elemSize_; }
9524 void CodeGenerator::visitOutOfLineWasmCallPostWriteBarrierIndex(
9525 OutOfLineWasmCallPostWriteBarrierIndex* ool) {
9526 saveLiveVolatile(ool->lir());
9527 masm.Push(InstanceReg);
9528 int32_t framePushedAfterInstance = masm.framePushed();
9530 // Fold the value offset into the value base
9531 Register temp = ool->temp();
9532 if (ool->elemSize() == 16) {
9533 masm.movePtr(ool->index(), temp);
9534 masm.lshiftPtr(Imm32(4), temp);
9535 masm.addPtr(ool->valueBase(), temp);
9536 } else {
9537 masm.computeEffectiveAddress(BaseIndex(ool->valueBase(), ool->index(),
9538 ScaleFromElemWidth(ool->elemSize())),
9539 temp);
9542 // Call Instance::postBarrier
9543 masm.setupWasmABICall();
9544 masm.passABIArg(InstanceReg);
9545 masm.passABIArg(temp);
9546 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
9547 masm.callWithABI(wasm::BytecodeOffset(0), wasm::SymbolicAddress::PostBarrier,
9548 mozilla::Some(instanceOffset), ABIType::General);
9550 masm.Pop(InstanceReg);
9551 restoreLiveVolatile(ool->lir());
9553 masm.jump(ool->rejoin());
9556 void CodeGenerator::visitWasmPostWriteBarrierIndex(
9557 LWasmPostWriteBarrierIndex* lir) {
9558 Register object = ToRegister(lir->object());
9559 Register value = ToRegister(lir->value());
9560 Register valueBase = ToRegister(lir->valueBase());
9561 Register index = ToRegister(lir->index());
9562 Register temp = ToRegister(lir->temp0());
9563 MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
9564 auto* ool = new (alloc()) OutOfLineWasmCallPostWriteBarrierIndex(
9565 lir, valueBase, index, temp, lir->elemSize());
9566 addOutOfLineCode(ool, lir->mir());
9568 wasm::EmitWasmPostBarrierGuard(masm, mozilla::Some(object), temp, value,
9569 ool->rejoin());
9570 masm.jump(ool->entry());
9571 masm.bind(ool->rejoin());
9574 void CodeGenerator::visitWasmLoadSlotI64(LWasmLoadSlotI64* ins) {
9575 Register container = ToRegister(ins->containerRef());
9576 Address addr(container, ins->offset());
9577 Register64 output = ToOutRegister64(ins);
9578 // Either 1 or 2 words. On a 32-bit target, it is hard to argue that one
9579 // transaction will always trap before the other, so it seems safest to
9580 // register both of them as potentially trapping.
9581 #ifdef JS_64BIT
9582 FaultingCodeOffset fco = masm.load64(addr, output);
9583 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load64);
9584 #else
9585 FaultingCodeOffsetPair fcop = masm.load64(addr, output);
9586 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9587 wasm::TrapMachineInsn::Load32);
9588 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9589 wasm::TrapMachineInsn::Load32);
9590 #endif
9593 void CodeGenerator::visitWasmLoadElementI64(LWasmLoadElementI64* ins) {
9594 Register base = ToRegister(ins->base());
9595 Register index = ToRegister(ins->index());
9596 BaseIndex addr(base, index, Scale::TimesEight);
9597 Register64 output = ToOutRegister64(ins);
9598 // Either 1 or 2 words. On a 32-bit target, it is hard to argue that one
9599 // transaction will always trap before the other, so it seems safest to
9600 // register both of them as potentially trapping.
9601 #ifdef JS_64BIT
9602 FaultingCodeOffset fco = masm.load64(addr, output);
9603 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load64);
9604 #else
9605 FaultingCodeOffsetPair fcop = masm.load64(addr, output);
9606 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9607 wasm::TrapMachineInsn::Load32);
9608 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9609 wasm::TrapMachineInsn::Load32);
9610 #endif
9613 void CodeGenerator::visitWasmStoreSlotI64(LWasmStoreSlotI64* ins) {
9614 Register container = ToRegister(ins->containerRef());
9615 Address addr(container, ins->offset());
9616 Register64 value = ToRegister64(ins->value());
9617 // Either 1 or 2 words. As above we register both transactions in the
9618 // 2-word case.
9619 #ifdef JS_64BIT
9620 FaultingCodeOffset fco = masm.store64(value, addr);
9621 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Store64);
9622 #else
9623 FaultingCodeOffsetPair fcop = masm.store64(value, addr);
9624 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9625 wasm::TrapMachineInsn::Store32);
9626 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9627 wasm::TrapMachineInsn::Store32);
9628 #endif
9631 void CodeGenerator::visitWasmStoreElementI64(LWasmStoreElementI64* ins) {
9632 Register base = ToRegister(ins->base());
9633 Register index = ToRegister(ins->index());
9634 BaseIndex addr(base, index, Scale::TimesEight);
9635 Register64 value = ToRegister64(ins->value());
9636 // Either 1 or 2 words. As above we register both transactions in the
9637 // 2-word case.
9638 #ifdef JS_64BIT
9639 FaultingCodeOffset fco = masm.store64(value, addr);
9640 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Store64);
9641 #else
9642 FaultingCodeOffsetPair fcop = masm.store64(value, addr);
9643 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9644 wasm::TrapMachineInsn::Store32);
9645 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9646 wasm::TrapMachineInsn::Store32);
9647 #endif
9650 void CodeGenerator::visitArrayBufferByteLength(LArrayBufferByteLength* lir) {
9651 Register obj = ToRegister(lir->object());
9652 Register out = ToRegister(lir->output());
9653 masm.loadArrayBufferByteLengthIntPtr(obj, out);
9656 void CodeGenerator::visitArrayBufferViewLength(LArrayBufferViewLength* lir) {
9657 Register obj = ToRegister(lir->object());
9658 Register out = ToRegister(lir->output());
9659 masm.loadArrayBufferViewLengthIntPtr(obj, out);
9662 void CodeGenerator::visitArrayBufferViewByteOffset(
9663 LArrayBufferViewByteOffset* lir) {
9664 Register obj = ToRegister(lir->object());
9665 Register out = ToRegister(lir->output());
9666 masm.loadArrayBufferViewByteOffsetIntPtr(obj, out);
9669 void CodeGenerator::visitArrayBufferViewElements(
9670 LArrayBufferViewElements* lir) {
9671 Register obj = ToRegister(lir->object());
9672 Register out = ToRegister(lir->output());
9673 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), out);
9676 void CodeGenerator::visitTypedArrayElementSize(LTypedArrayElementSize* lir) {
9677 Register obj = ToRegister(lir->object());
9678 Register out = ToRegister(lir->output());
9680 masm.typedArrayElementSize(obj, out);
9683 void CodeGenerator::visitResizableTypedArrayByteOffsetMaybeOutOfBounds(
9684 LResizableTypedArrayByteOffsetMaybeOutOfBounds* lir) {
9685 Register obj = ToRegister(lir->object());
9686 Register out = ToRegister(lir->output());
9687 Register temp = ToRegister(lir->temp0());
9689 masm.loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(obj, out, temp);
9692 void CodeGenerator::visitResizableTypedArrayLength(
9693 LResizableTypedArrayLength* lir) {
9694 Register obj = ToRegister(lir->object());
9695 Register out = ToRegister(lir->output());
9696 Register temp = ToRegister(lir->temp0());
9698 masm.loadResizableTypedArrayLengthIntPtr(lir->synchronization(), obj, out,
9699 temp);
9702 void CodeGenerator::visitResizableDataViewByteLength(
9703 LResizableDataViewByteLength* lir) {
9704 Register obj = ToRegister(lir->object());
9705 Register out = ToRegister(lir->output());
9706 Register temp = ToRegister(lir->temp0());
9708 masm.loadResizableDataViewByteLengthIntPtr(lir->synchronization(), obj, out,
9709 temp);
9712 void CodeGenerator::visitGrowableSharedArrayBufferByteLength(
9713 LGrowableSharedArrayBufferByteLength* lir) {
9714 Register obj = ToRegister(lir->object());
9715 Register out = ToRegister(lir->output());
9717 // Explicit |byteLength| accesses are seq-consistent atomic loads.
9718 auto sync = Synchronization::Load();
9720 masm.loadGrowableSharedArrayBufferByteLengthIntPtr(sync, obj, out);
9723 void CodeGenerator::visitGuardResizableArrayBufferViewInBounds(
9724 LGuardResizableArrayBufferViewInBounds* lir) {
9725 Register obj = ToRegister(lir->object());
9726 Register temp = ToRegister(lir->temp0());
9728 Label bail;
9729 masm.branchIfResizableArrayBufferViewOutOfBounds(obj, temp, &bail);
9730 bailoutFrom(&bail, lir->snapshot());
9733 void CodeGenerator::visitGuardResizableArrayBufferViewInBoundsOrDetached(
9734 LGuardResizableArrayBufferViewInBoundsOrDetached* lir) {
9735 Register obj = ToRegister(lir->object());
9736 Register temp = ToRegister(lir->temp0());
9738 Label done, bail;
9739 masm.branchIfResizableArrayBufferViewInBounds(obj, temp, &done);
9740 masm.branchIfHasAttachedArrayBuffer(obj, temp, &bail);
9741 masm.bind(&done);
9742 bailoutFrom(&bail, lir->snapshot());
9745 void CodeGenerator::visitGuardHasAttachedArrayBuffer(
9746 LGuardHasAttachedArrayBuffer* lir) {
9747 Register obj = ToRegister(lir->object());
9748 Register temp = ToRegister(lir->temp0());
9750 Label bail;
9751 masm.branchIfHasDetachedArrayBuffer(obj, temp, &bail);
9752 bailoutFrom(&bail, lir->snapshot());
9755 class OutOfLineGuardNumberToIntPtrIndex
9756 : public OutOfLineCodeBase<CodeGenerator> {
9757 LGuardNumberToIntPtrIndex* lir_;
9759 public:
9760 explicit OutOfLineGuardNumberToIntPtrIndex(LGuardNumberToIntPtrIndex* lir)
9761 : lir_(lir) {}
9763 void accept(CodeGenerator* codegen) override {
9764 codegen->visitOutOfLineGuardNumberToIntPtrIndex(this);
9766 LGuardNumberToIntPtrIndex* lir() const { return lir_; }
9769 void CodeGenerator::visitGuardNumberToIntPtrIndex(
9770 LGuardNumberToIntPtrIndex* lir) {
9771 FloatRegister input = ToFloatRegister(lir->input());
9772 Register output = ToRegister(lir->output());
9774 if (!lir->mir()->supportOOB()) {
9775 Label bail;
9776 masm.convertDoubleToPtr(input, output, &bail, false);
9777 bailoutFrom(&bail, lir->snapshot());
9778 return;
9781 auto* ool = new (alloc()) OutOfLineGuardNumberToIntPtrIndex(lir);
9782 addOutOfLineCode(ool, lir->mir());
9784 masm.convertDoubleToPtr(input, output, ool->entry(), false);
9785 masm.bind(ool->rejoin());
9788 void CodeGenerator::visitOutOfLineGuardNumberToIntPtrIndex(
9789 OutOfLineGuardNumberToIntPtrIndex* ool) {
9790 // Substitute the invalid index with an arbitrary out-of-bounds index.
9791 masm.movePtr(ImmWord(-1), ToRegister(ool->lir()->output()));
9792 masm.jump(ool->rejoin());
9795 void CodeGenerator::visitStringLength(LStringLength* lir) {
9796 Register input = ToRegister(lir->string());
9797 Register output = ToRegister(lir->output());
9799 masm.loadStringLength(input, output);
9802 void CodeGenerator::visitMinMaxI(LMinMaxI* ins) {
9803 Register first = ToRegister(ins->first());
9804 Register output = ToRegister(ins->output());
9806 MOZ_ASSERT(first == output);
9808 Assembler::Condition cond =
9809 ins->mir()->isMax() ? Assembler::GreaterThan : Assembler::LessThan;
9811 if (ins->second()->isConstant()) {
9812 Label done;
9813 masm.branch32(cond, first, Imm32(ToInt32(ins->second())), &done);
9814 masm.move32(Imm32(ToInt32(ins->second())), output);
9815 masm.bind(&done);
9816 } else {
9817 Register second = ToRegister(ins->second());
9818 masm.cmp32Move32(cond, second, first, second, output);
9822 void CodeGenerator::visitMinMaxArrayI(LMinMaxArrayI* ins) {
9823 Register array = ToRegister(ins->array());
9824 Register output = ToRegister(ins->output());
9825 Register temp1 = ToRegister(ins->temp1());
9826 Register temp2 = ToRegister(ins->temp2());
9827 Register temp3 = ToRegister(ins->temp3());
9828 bool isMax = ins->isMax();
9830 Label bail;
9831 masm.minMaxArrayInt32(array, output, temp1, temp2, temp3, isMax, &bail);
9832 bailoutFrom(&bail, ins->snapshot());
9835 void CodeGenerator::visitMinMaxArrayD(LMinMaxArrayD* ins) {
9836 Register array = ToRegister(ins->array());
9837 FloatRegister output = ToFloatRegister(ins->output());
9838 Register temp1 = ToRegister(ins->temp1());
9839 Register temp2 = ToRegister(ins->temp2());
9840 FloatRegister floatTemp = ToFloatRegister(ins->floatTemp());
9841 bool isMax = ins->isMax();
9843 Label bail;
9844 masm.minMaxArrayNumber(array, output, floatTemp, temp1, temp2, isMax, &bail);
9845 bailoutFrom(&bail, ins->snapshot());
9848 // For Abs*, lowering will have tied input to output on platforms where that is
9849 // sensible, and otherwise left them untied.
9851 void CodeGenerator::visitAbsI(LAbsI* ins) {
9852 Register input = ToRegister(ins->input());
9853 Register output = ToRegister(ins->output());
9855 if (ins->mir()->fallible()) {
9856 Label positive;
9857 if (input != output) {
9858 masm.move32(input, output);
9860 masm.branchTest32(Assembler::NotSigned, output, output, &positive);
9861 Label bail;
9862 masm.branchNeg32(Assembler::Overflow, output, &bail);
9863 bailoutFrom(&bail, ins->snapshot());
9864 masm.bind(&positive);
9865 } else {
9866 masm.abs32(input, output);
9870 void CodeGenerator::visitAbsD(LAbsD* ins) {
9871 masm.absDouble(ToFloatRegister(ins->input()), ToFloatRegister(ins->output()));
9874 void CodeGenerator::visitAbsF(LAbsF* ins) {
9875 masm.absFloat32(ToFloatRegister(ins->input()),
9876 ToFloatRegister(ins->output()));
9879 void CodeGenerator::visitPowII(LPowII* ins) {
9880 Register value = ToRegister(ins->value());
9881 Register power = ToRegister(ins->power());
9882 Register output = ToRegister(ins->output());
9883 Register temp0 = ToRegister(ins->temp0());
9884 Register temp1 = ToRegister(ins->temp1());
9886 Label bailout;
9887 masm.pow32(value, power, output, temp0, temp1, &bailout);
9888 bailoutFrom(&bailout, ins->snapshot());
9891 void CodeGenerator::visitPowI(LPowI* ins) {
9892 FloatRegister value = ToFloatRegister(ins->value());
9893 Register power = ToRegister(ins->power());
9895 using Fn = double (*)(double x, int32_t y);
9896 masm.setupAlignedABICall();
9897 masm.passABIArg(value, ABIType::Float64);
9898 masm.passABIArg(power);
9900 masm.callWithABI<Fn, js::powi>(ABIType::Float64);
9901 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9904 void CodeGenerator::visitPowD(LPowD* ins) {
9905 FloatRegister value = ToFloatRegister(ins->value());
9906 FloatRegister power = ToFloatRegister(ins->power());
9908 using Fn = double (*)(double x, double y);
9909 masm.setupAlignedABICall();
9910 masm.passABIArg(value, ABIType::Float64);
9911 masm.passABIArg(power, ABIType::Float64);
9912 masm.callWithABI<Fn, ecmaPow>(ABIType::Float64);
9914 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9917 void CodeGenerator::visitPowOfTwoI(LPowOfTwoI* ins) {
9918 Register power = ToRegister(ins->power());
9919 Register output = ToRegister(ins->output());
9921 uint32_t base = ins->base();
9922 MOZ_ASSERT(mozilla::IsPowerOfTwo(base));
9924 uint32_t n = mozilla::FloorLog2(base);
9925 MOZ_ASSERT(n != 0);
9927 // Hacker's Delight, 2nd edition, theorem D2.
9928 auto ceilingDiv = [](uint32_t x, uint32_t y) { return (x + y - 1) / y; };
9930 // Take bailout if |power| is greater-or-equals |log_y(2^31)| or is negative.
9931 // |2^(n*y) < 2^31| must hold, hence |n*y < 31| resp. |y < 31/n|.
9933 // Note: it's important for this condition to match the code in CacheIR.cpp
9934 // (CanAttachInt32Pow) to prevent failure loops.
9935 bailoutCmp32(Assembler::AboveOrEqual, power, Imm32(ceilingDiv(31, n)),
9936 ins->snapshot());
9938 // Compute (2^n)^y as 2^(n*y) using repeated shifts. We could directly scale
9939 // |power| and perform a single shift, but due to the lack of necessary
9940 // MacroAssembler functionality, like multiplying a register with an
9941 // immediate, we restrict the number of generated shift instructions when
9942 // lowering this operation.
9943 masm.move32(Imm32(1), output);
9944 do {
9945 masm.lshift32(power, output);
9946 n--;
9947 } while (n > 0);
9950 void CodeGenerator::visitSqrtD(LSqrtD* ins) {
9951 FloatRegister input = ToFloatRegister(ins->input());
9952 FloatRegister output = ToFloatRegister(ins->output());
9953 masm.sqrtDouble(input, output);
9956 void CodeGenerator::visitSqrtF(LSqrtF* ins) {
9957 FloatRegister input = ToFloatRegister(ins->input());
9958 FloatRegister output = ToFloatRegister(ins->output());
9959 masm.sqrtFloat32(input, output);
9962 void CodeGenerator::visitSignI(LSignI* ins) {
9963 Register input = ToRegister(ins->input());
9964 Register output = ToRegister(ins->output());
9965 masm.signInt32(input, output);
9968 void CodeGenerator::visitSignD(LSignD* ins) {
9969 FloatRegister input = ToFloatRegister(ins->input());
9970 FloatRegister output = ToFloatRegister(ins->output());
9971 masm.signDouble(input, output);
9974 void CodeGenerator::visitSignDI(LSignDI* ins) {
9975 FloatRegister input = ToFloatRegister(ins->input());
9976 FloatRegister temp = ToFloatRegister(ins->temp0());
9977 Register output = ToRegister(ins->output());
9979 Label bail;
9980 masm.signDoubleToInt32(input, output, temp, &bail);
9981 bailoutFrom(&bail, ins->snapshot());
9984 void CodeGenerator::visitMathFunctionD(LMathFunctionD* ins) {
9985 FloatRegister input = ToFloatRegister(ins->input());
9986 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9988 UnaryMathFunction fun = ins->mir()->function();
9989 UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
9991 masm.setupAlignedABICall();
9993 masm.passABIArg(input, ABIType::Float64);
9994 masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
9995 ABIType::Float64);
9998 void CodeGenerator::visitMathFunctionF(LMathFunctionF* ins) {
9999 FloatRegister input = ToFloatRegister(ins->input());
10000 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnFloat32Reg);
10002 masm.setupAlignedABICall();
10003 masm.passABIArg(input, ABIType::Float32);
10005 using Fn = float (*)(float x);
10006 Fn funptr = nullptr;
10007 CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check;
10008 switch (ins->mir()->function()) {
10009 case UnaryMathFunction::Floor:
10010 funptr = floorf;
10011 check = CheckUnsafeCallWithABI::DontCheckOther;
10012 break;
10013 case UnaryMathFunction::Round:
10014 funptr = math_roundf_impl;
10015 break;
10016 case UnaryMathFunction::Trunc:
10017 funptr = math_truncf_impl;
10018 break;
10019 case UnaryMathFunction::Ceil:
10020 funptr = ceilf;
10021 check = CheckUnsafeCallWithABI::DontCheckOther;
10022 break;
10023 default:
10024 MOZ_CRASH("Unknown or unsupported float32 math function");
10027 masm.callWithABI(DynamicFunction<Fn>(funptr), ABIType::Float32, check);
10030 void CodeGenerator::visitModD(LModD* ins) {
10031 MOZ_ASSERT(!gen->compilingWasm());
10033 FloatRegister lhs = ToFloatRegister(ins->lhs());
10034 FloatRegister rhs = ToFloatRegister(ins->rhs());
10036 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10038 using Fn = double (*)(double a, double b);
10039 masm.setupAlignedABICall();
10040 masm.passABIArg(lhs, ABIType::Float64);
10041 masm.passABIArg(rhs, ABIType::Float64);
10042 masm.callWithABI<Fn, NumberMod>(ABIType::Float64);
10045 void CodeGenerator::visitModPowTwoD(LModPowTwoD* ins) {
10046 FloatRegister lhs = ToFloatRegister(ins->lhs());
10047 uint32_t divisor = ins->divisor();
10048 MOZ_ASSERT(mozilla::IsPowerOfTwo(divisor));
10050 FloatRegister output = ToFloatRegister(ins->output());
10052 // Compute |n % d| using |copysign(n - (d * trunc(n / d)), n)|.
10054 // This doesn't work if |d| isn't a power of two, because we may lose too much
10055 // precision. For example |Number.MAX_VALUE % 3 == 2|, but
10056 // |3 * trunc(Number.MAX_VALUE / 3) == Infinity|.
10058 Label done;
10060 ScratchDoubleScope scratch(masm);
10062 // Subnormals can lead to performance degradation, which can make calling
10063 // |fmod| faster than this inline implementation. Work around this issue by
10064 // directly returning the input for any value in the interval ]-1, +1[.
10065 Label notSubnormal;
10066 masm.loadConstantDouble(1.0, scratch);
10067 masm.loadConstantDouble(-1.0, output);
10068 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, lhs, scratch,
10069 &notSubnormal);
10070 masm.branchDouble(Assembler::DoubleLessThanOrEqual, lhs, output,
10071 &notSubnormal);
10073 masm.moveDouble(lhs, output);
10074 masm.jump(&done);
10076 masm.bind(&notSubnormal);
10078 if (divisor == 1) {
10079 // The pattern |n % 1 == 0| is used to detect integer numbers. We can skip
10080 // the multiplication by one in this case.
10081 masm.moveDouble(lhs, output);
10082 masm.nearbyIntDouble(RoundingMode::TowardsZero, output, scratch);
10083 masm.subDouble(scratch, output);
10084 } else {
10085 masm.loadConstantDouble(1.0 / double(divisor), scratch);
10086 masm.loadConstantDouble(double(divisor), output);
10088 masm.mulDouble(lhs, scratch);
10089 masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
10090 masm.mulDouble(output, scratch);
10092 masm.moveDouble(lhs, output);
10093 masm.subDouble(scratch, output);
10097 masm.copySignDouble(output, lhs, output);
10098 masm.bind(&done);
10101 void CodeGenerator::visitWasmBuiltinModD(LWasmBuiltinModD* ins) {
10102 masm.Push(InstanceReg);
10103 int32_t framePushedAfterInstance = masm.framePushed();
10105 FloatRegister lhs = ToFloatRegister(ins->lhs());
10106 FloatRegister rhs = ToFloatRegister(ins->rhs());
10108 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10110 masm.setupWasmABICall();
10111 masm.passABIArg(lhs, ABIType::Float64);
10112 masm.passABIArg(rhs, ABIType::Float64);
10114 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
10115 masm.callWithABI(ins->mir()->bytecodeOffset(), wasm::SymbolicAddress::ModD,
10116 mozilla::Some(instanceOffset), ABIType::Float64);
10118 masm.Pop(InstanceReg);
10121 void CodeGenerator::visitBigIntAdd(LBigIntAdd* ins) {
10122 Register lhs = ToRegister(ins->lhs());
10123 Register rhs = ToRegister(ins->rhs());
10124 Register temp1 = ToRegister(ins->temp1());
10125 Register temp2 = ToRegister(ins->temp2());
10126 Register output = ToRegister(ins->output());
10128 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10129 auto* ool = oolCallVM<Fn, BigInt::add>(ins, ArgList(lhs, rhs),
10130 StoreRegisterTo(output));
10132 // 0n + x == x
10133 Label lhsNonZero;
10134 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10135 masm.movePtr(rhs, output);
10136 masm.jump(ool->rejoin());
10137 masm.bind(&lhsNonZero);
10139 // x + 0n == x
10140 Label rhsNonZero;
10141 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10142 masm.movePtr(lhs, output);
10143 masm.jump(ool->rejoin());
10144 masm.bind(&rhsNonZero);
10146 // Call into the VM when either operand can't be loaded into a pointer-sized
10147 // register.
10148 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10149 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10151 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10153 // Create and return the result.
10154 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10155 masm.initializeBigInt(output, temp1);
10157 masm.bind(ool->rejoin());
10160 void CodeGenerator::visitBigIntSub(LBigIntSub* ins) {
10161 Register lhs = ToRegister(ins->lhs());
10162 Register rhs = ToRegister(ins->rhs());
10163 Register temp1 = ToRegister(ins->temp1());
10164 Register temp2 = ToRegister(ins->temp2());
10165 Register output = ToRegister(ins->output());
10167 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10168 auto* ool = oolCallVM<Fn, BigInt::sub>(ins, ArgList(lhs, rhs),
10169 StoreRegisterTo(output));
10171 // x - 0n == x
10172 Label rhsNonZero;
10173 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10174 masm.movePtr(lhs, output);
10175 masm.jump(ool->rejoin());
10176 masm.bind(&rhsNonZero);
10178 // Call into the VM when either operand can't be loaded into a pointer-sized
10179 // register.
10180 masm.loadBigInt(lhs, temp1, ool->entry());
10181 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10183 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10185 // Create and return the result.
10186 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10187 masm.initializeBigInt(output, temp1);
10189 masm.bind(ool->rejoin());
10192 void CodeGenerator::visitBigIntMul(LBigIntMul* ins) {
10193 Register lhs = ToRegister(ins->lhs());
10194 Register rhs = ToRegister(ins->rhs());
10195 Register temp1 = ToRegister(ins->temp1());
10196 Register temp2 = ToRegister(ins->temp2());
10197 Register output = ToRegister(ins->output());
10199 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10200 auto* ool = oolCallVM<Fn, BigInt::mul>(ins, ArgList(lhs, rhs),
10201 StoreRegisterTo(output));
10203 // 0n * x == 0n
10204 Label lhsNonZero;
10205 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10206 masm.movePtr(lhs, output);
10207 masm.jump(ool->rejoin());
10208 masm.bind(&lhsNonZero);
10210 // x * 0n == 0n
10211 Label rhsNonZero;
10212 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10213 masm.movePtr(rhs, output);
10214 masm.jump(ool->rejoin());
10215 masm.bind(&rhsNonZero);
10217 // Call into the VM when either operand can't be loaded into a pointer-sized
10218 // register.
10219 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10220 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10222 masm.branchMulPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10224 // Create and return the result.
10225 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10226 masm.initializeBigInt(output, temp1);
10228 masm.bind(ool->rejoin());
10231 void CodeGenerator::visitBigIntDiv(LBigIntDiv* ins) {
10232 Register lhs = ToRegister(ins->lhs());
10233 Register rhs = ToRegister(ins->rhs());
10234 Register temp1 = ToRegister(ins->temp1());
10235 Register temp2 = ToRegister(ins->temp2());
10236 Register output = ToRegister(ins->output());
10238 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10239 auto* ool = oolCallVM<Fn, BigInt::div>(ins, ArgList(lhs, rhs),
10240 StoreRegisterTo(output));
10242 // x / 0 throws an error.
10243 if (ins->mir()->canBeDivideByZero()) {
10244 masm.branchIfBigIntIsZero(rhs, ool->entry());
10247 // 0n / x == 0n
10248 Label lhsNonZero;
10249 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10250 masm.movePtr(lhs, output);
10251 masm.jump(ool->rejoin());
10252 masm.bind(&lhsNonZero);
10254 // Call into the VM when either operand can't be loaded into a pointer-sized
10255 // register.
10256 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10257 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10259 // |BigInt::div()| returns |lhs| for |lhs / 1n|, which means there's no
10260 // allocation which might trigger a minor GC to free up nursery space. This
10261 // requires us to apply the same optimization here, otherwise we'd end up with
10262 // always entering the OOL call, because the nursery is never evicted.
10263 Label notOne;
10264 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(1), &notOne);
10265 masm.movePtr(lhs, output);
10266 masm.jump(ool->rejoin());
10267 masm.bind(&notOne);
10269 static constexpr auto DigitMin = std::numeric_limits<
10270 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
10272 // Handle an integer overflow from INT{32,64}_MIN / -1.
10273 Label notOverflow;
10274 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
10275 masm.branchPtr(Assembler::Equal, temp2, ImmWord(-1), ool->entry());
10276 masm.bind(&notOverflow);
10278 emitBigIntDiv(ins, temp1, temp2, output, ool->entry());
10280 masm.bind(ool->rejoin());
10283 void CodeGenerator::visitBigIntMod(LBigIntMod* ins) {
10284 Register lhs = ToRegister(ins->lhs());
10285 Register rhs = ToRegister(ins->rhs());
10286 Register temp1 = ToRegister(ins->temp1());
10287 Register temp2 = ToRegister(ins->temp2());
10288 Register output = ToRegister(ins->output());
10290 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10291 auto* ool = oolCallVM<Fn, BigInt::mod>(ins, ArgList(lhs, rhs),
10292 StoreRegisterTo(output));
10294 // x % 0 throws an error.
10295 if (ins->mir()->canBeDivideByZero()) {
10296 masm.branchIfBigIntIsZero(rhs, ool->entry());
10299 // 0n % x == 0n
10300 Label lhsNonZero;
10301 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10302 masm.movePtr(lhs, output);
10303 masm.jump(ool->rejoin());
10304 masm.bind(&lhsNonZero);
10306 // Call into the VM when either operand can't be loaded into a pointer-sized
10307 // register.
10308 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10309 masm.loadBigIntAbsolute(rhs, temp2, ool->entry());
10311 // Similar to the case for BigInt division, we must apply the same allocation
10312 // optimizations as performed in |BigInt::mod()|.
10313 Label notBelow;
10314 masm.branchPtr(Assembler::AboveOrEqual, temp1, temp2, &notBelow);
10315 masm.movePtr(lhs, output);
10316 masm.jump(ool->rejoin());
10317 masm.bind(&notBelow);
10319 // Convert both digits to signed pointer-sized values.
10320 masm.bigIntDigitToSignedPtr(lhs, temp1, ool->entry());
10321 masm.bigIntDigitToSignedPtr(rhs, temp2, ool->entry());
10323 static constexpr auto DigitMin = std::numeric_limits<
10324 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
10326 // Handle an integer overflow from INT{32,64}_MIN / -1.
10327 Label notOverflow;
10328 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
10329 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(-1), &notOverflow);
10330 masm.movePtr(ImmWord(0), temp1);
10331 masm.bind(&notOverflow);
10333 emitBigIntMod(ins, temp1, temp2, output, ool->entry());
10335 masm.bind(ool->rejoin());
10338 void CodeGenerator::visitBigIntPow(LBigIntPow* ins) {
10339 Register lhs = ToRegister(ins->lhs());
10340 Register rhs = ToRegister(ins->rhs());
10341 Register temp1 = ToRegister(ins->temp1());
10342 Register temp2 = ToRegister(ins->temp2());
10343 Register output = ToRegister(ins->output());
10345 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10346 auto* ool = oolCallVM<Fn, BigInt::pow>(ins, ArgList(lhs, rhs),
10347 StoreRegisterTo(output));
10349 // x ** -y throws an error.
10350 if (ins->mir()->canBeNegativeExponent()) {
10351 masm.branchIfBigIntIsNegative(rhs, ool->entry());
10354 Register dest = temp1;
10355 Register base = temp2;
10356 Register exponent = output;
10358 Label done;
10359 masm.movePtr(ImmWord(1), dest); // p = 1
10361 // 1n ** y == 1n
10362 // -1n ** y == 1n when y is even
10363 // -1n ** y == -1n when y is odd
10364 Label lhsNotOne;
10365 masm.branch32(Assembler::Above, Address(lhs, BigInt::offsetOfLength()),
10366 Imm32(1), &lhsNotOne);
10367 masm.loadFirstBigIntDigitOrZero(lhs, base);
10368 masm.branchPtr(Assembler::NotEqual, base, Imm32(1), &lhsNotOne);
10370 masm.loadFirstBigIntDigitOrZero(rhs, exponent);
10372 Label lhsNonNegative;
10373 masm.branchIfBigIntIsNonNegative(lhs, &lhsNonNegative);
10374 masm.branchTestPtr(Assembler::Zero, exponent, Imm32(1), &done);
10375 masm.bind(&lhsNonNegative);
10376 masm.movePtr(lhs, output);
10377 masm.jump(ool->rejoin());
10379 masm.bind(&lhsNotOne);
10381 // x ** 0n == 1n
10382 masm.branchIfBigIntIsZero(rhs, &done);
10384 // 0n ** y == 0n with y != 0n
10385 Label lhsNonZero;
10386 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10388 masm.movePtr(lhs, output);
10389 masm.jump(ool->rejoin());
10391 masm.bind(&lhsNonZero);
10393 // Call into the VM when the exponent can't be loaded into a pointer-sized
10394 // register.
10395 masm.loadBigIntAbsolute(rhs, exponent, ool->entry());
10397 // x ** y with x > 1 and y >= DigitBits can't be pointer-sized.
10398 masm.branchPtr(Assembler::AboveOrEqual, exponent, Imm32(BigInt::DigitBits),
10399 ool->entry());
10401 // x ** 1n == x
10402 Label rhsNotOne;
10403 masm.branch32(Assembler::NotEqual, exponent, Imm32(1), &rhsNotOne);
10405 masm.movePtr(lhs, output);
10406 masm.jump(ool->rejoin());
10408 masm.bind(&rhsNotOne);
10410 // Call into the VM when the base operand can't be loaded into a pointer-sized
10411 // register.
10412 masm.loadBigIntNonZero(lhs, base, ool->entry());
10414 // MacroAssembler::pow32() adjusted to work on pointer-sized registers.
10416 // m = base
10417 // n = exponent
10419 Label start, loop;
10420 masm.jump(&start);
10421 masm.bind(&loop);
10423 // m *= m
10424 masm.branchMulPtr(Assembler::Overflow, base, base, ool->entry());
10426 masm.bind(&start);
10428 // if ((n & 1) != 0) p *= m
10429 Label even;
10430 masm.branchTest32(Assembler::Zero, exponent, Imm32(1), &even);
10431 masm.branchMulPtr(Assembler::Overflow, base, dest, ool->entry());
10432 masm.bind(&even);
10434 // n >>= 1
10435 // if (n == 0) return p
10436 masm.branchRshift32(Assembler::NonZero, Imm32(1), exponent, &loop);
10439 MOZ_ASSERT(temp1 == dest);
10441 // Create and return the result.
10442 masm.bind(&done);
10443 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10444 masm.initializeBigInt(output, temp1);
10446 masm.bind(ool->rejoin());
10449 void CodeGenerator::visitBigIntBitAnd(LBigIntBitAnd* ins) {
10450 Register lhs = ToRegister(ins->lhs());
10451 Register rhs = ToRegister(ins->rhs());
10452 Register temp1 = ToRegister(ins->temp1());
10453 Register temp2 = ToRegister(ins->temp2());
10454 Register output = ToRegister(ins->output());
10456 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10457 auto* ool = oolCallVM<Fn, BigInt::bitAnd>(ins, ArgList(lhs, rhs),
10458 StoreRegisterTo(output));
10460 // 0n & x == 0n
10461 Label lhsNonZero;
10462 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10463 masm.movePtr(lhs, output);
10464 masm.jump(ool->rejoin());
10465 masm.bind(&lhsNonZero);
10467 // x & 0n == 0n
10468 Label rhsNonZero;
10469 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10470 masm.movePtr(rhs, output);
10471 masm.jump(ool->rejoin());
10472 masm.bind(&rhsNonZero);
10474 // Call into the VM when either operand can't be loaded into a pointer-sized
10475 // register.
10476 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10477 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10479 masm.andPtr(temp2, temp1);
10481 // Create and return the result.
10482 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10483 masm.initializeBigInt(output, temp1);
10485 masm.bind(ool->rejoin());
10488 void CodeGenerator::visitBigIntBitOr(LBigIntBitOr* ins) {
10489 Register lhs = ToRegister(ins->lhs());
10490 Register rhs = ToRegister(ins->rhs());
10491 Register temp1 = ToRegister(ins->temp1());
10492 Register temp2 = ToRegister(ins->temp2());
10493 Register output = ToRegister(ins->output());
10495 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10496 auto* ool = oolCallVM<Fn, BigInt::bitOr>(ins, ArgList(lhs, rhs),
10497 StoreRegisterTo(output));
10499 // 0n | x == x
10500 Label lhsNonZero;
10501 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10502 masm.movePtr(rhs, output);
10503 masm.jump(ool->rejoin());
10504 masm.bind(&lhsNonZero);
10506 // x | 0n == x
10507 Label rhsNonZero;
10508 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10509 masm.movePtr(lhs, output);
10510 masm.jump(ool->rejoin());
10511 masm.bind(&rhsNonZero);
10513 // Call into the VM when either operand can't be loaded into a pointer-sized
10514 // register.
10515 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10516 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10518 masm.orPtr(temp2, temp1);
10520 // Create and return the result.
10521 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10522 masm.initializeBigInt(output, temp1);
10524 masm.bind(ool->rejoin());
10527 void CodeGenerator::visitBigIntBitXor(LBigIntBitXor* ins) {
10528 Register lhs = ToRegister(ins->lhs());
10529 Register rhs = ToRegister(ins->rhs());
10530 Register temp1 = ToRegister(ins->temp1());
10531 Register temp2 = ToRegister(ins->temp2());
10532 Register output = ToRegister(ins->output());
10534 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10535 auto* ool = oolCallVM<Fn, BigInt::bitXor>(ins, ArgList(lhs, rhs),
10536 StoreRegisterTo(output));
10538 // 0n ^ x == x
10539 Label lhsNonZero;
10540 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10541 masm.movePtr(rhs, output);
10542 masm.jump(ool->rejoin());
10543 masm.bind(&lhsNonZero);
10545 // x ^ 0n == x
10546 Label rhsNonZero;
10547 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10548 masm.movePtr(lhs, output);
10549 masm.jump(ool->rejoin());
10550 masm.bind(&rhsNonZero);
10552 // Call into the VM when either operand can't be loaded into a pointer-sized
10553 // register.
10554 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10555 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10557 masm.xorPtr(temp2, temp1);
10559 // Create and return the result.
10560 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10561 masm.initializeBigInt(output, temp1);
10563 masm.bind(ool->rejoin());
10566 void CodeGenerator::visitBigIntLsh(LBigIntLsh* ins) {
10567 Register lhs = ToRegister(ins->lhs());
10568 Register rhs = ToRegister(ins->rhs());
10569 Register temp1 = ToRegister(ins->temp1());
10570 Register temp2 = ToRegister(ins->temp2());
10571 Register temp3 = ToRegister(ins->temp3());
10572 Register output = ToRegister(ins->output());
10574 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10575 auto* ool = oolCallVM<Fn, BigInt::lsh>(ins, ArgList(lhs, rhs),
10576 StoreRegisterTo(output));
10578 // 0n << x == 0n
10579 Label lhsNonZero;
10580 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10581 masm.movePtr(lhs, output);
10582 masm.jump(ool->rejoin());
10583 masm.bind(&lhsNonZero);
10585 // x << 0n == x
10586 Label rhsNonZero;
10587 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10588 masm.movePtr(lhs, output);
10589 masm.jump(ool->rejoin());
10590 masm.bind(&rhsNonZero);
10592 // Inline |BigInt::lsh| for the case when |lhs| contains a single digit.
10594 Label rhsTooLarge;
10595 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
10597 // Call into the VM when the left-hand side operand can't be loaded into a
10598 // pointer-sized register.
10599 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10601 // Handle shifts exceeding |BigInt::DigitBits| first.
10602 Label shift, create;
10603 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
10605 masm.bind(&rhsTooLarge);
10607 // x << DigitBits with x != 0n always exceeds pointer-sized storage.
10608 masm.branchIfBigIntIsNonNegative(rhs, ool->entry());
10610 // x << -DigitBits == x >> DigitBits, which is either 0n or -1n.
10611 masm.move32(Imm32(0), temp1);
10612 masm.branchIfBigIntIsNonNegative(lhs, &create);
10613 masm.move32(Imm32(1), temp1);
10614 masm.jump(&create);
10616 masm.bind(&shift);
10618 Label nonNegative;
10619 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
10621 masm.movePtr(temp1, temp3);
10623 // |x << -y| is computed as |x >> y|.
10624 masm.rshiftPtr(temp2, temp1);
10626 // For negative numbers, round down if any bit was shifted out.
10627 masm.branchIfBigIntIsNonNegative(lhs, &create);
10629 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
10630 masm.movePtr(ImmWord(-1), output);
10631 masm.lshiftPtr(temp2, output);
10632 masm.notPtr(output);
10634 // Add plus one when |(lhs.digit(0) & mask) != 0|.
10635 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
10636 masm.addPtr(ImmWord(1), temp1);
10637 masm.jump(&create);
10639 masm.bind(&nonNegative);
10641 masm.movePtr(temp2, temp3);
10643 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
10644 masm.negPtr(temp2);
10645 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
10646 masm.movePtr(temp1, output);
10647 masm.rshiftPtr(temp2, output);
10649 // Call into the VM when any bit will be shifted out.
10650 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
10652 masm.movePtr(temp3, temp2);
10653 masm.lshiftPtr(temp2, temp1);
10655 masm.bind(&create);
10657 // Create and return the result.
10658 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10659 masm.initializeBigIntAbsolute(output, temp1);
10661 // Set the sign bit when the left-hand side is negative.
10662 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
10663 masm.or32(Imm32(BigInt::signBitMask()),
10664 Address(output, BigInt::offsetOfFlags()));
10666 masm.bind(ool->rejoin());
10669 void CodeGenerator::visitBigIntRsh(LBigIntRsh* ins) {
10670 Register lhs = ToRegister(ins->lhs());
10671 Register rhs = ToRegister(ins->rhs());
10672 Register temp1 = ToRegister(ins->temp1());
10673 Register temp2 = ToRegister(ins->temp2());
10674 Register temp3 = ToRegister(ins->temp3());
10675 Register output = ToRegister(ins->output());
10677 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10678 auto* ool = oolCallVM<Fn, BigInt::rsh>(ins, ArgList(lhs, rhs),
10679 StoreRegisterTo(output));
10681 // 0n >> x == 0n
10682 Label lhsNonZero;
10683 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10684 masm.movePtr(lhs, output);
10685 masm.jump(ool->rejoin());
10686 masm.bind(&lhsNonZero);
10688 // x >> 0n == x
10689 Label rhsNonZero;
10690 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10691 masm.movePtr(lhs, output);
10692 masm.jump(ool->rejoin());
10693 masm.bind(&rhsNonZero);
10695 // Inline |BigInt::rsh| for the case when |lhs| contains a single digit.
10697 Label rhsTooLarge;
10698 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
10700 // Call into the VM when the left-hand side operand can't be loaded into a
10701 // pointer-sized register.
10702 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10704 // Handle shifts exceeding |BigInt::DigitBits| first.
10705 Label shift, create;
10706 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
10708 masm.bind(&rhsTooLarge);
10710 // x >> -DigitBits == x << DigitBits, which exceeds pointer-sized storage.
10711 masm.branchIfBigIntIsNegative(rhs, ool->entry());
10713 // x >> DigitBits is either 0n or -1n.
10714 masm.move32(Imm32(0), temp1);
10715 masm.branchIfBigIntIsNonNegative(lhs, &create);
10716 masm.move32(Imm32(1), temp1);
10717 masm.jump(&create);
10719 masm.bind(&shift);
10721 Label nonNegative;
10722 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
10724 masm.movePtr(temp2, temp3);
10726 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
10727 masm.negPtr(temp2);
10728 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
10729 masm.movePtr(temp1, output);
10730 masm.rshiftPtr(temp2, output);
10732 // Call into the VM when any bit will be shifted out.
10733 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
10735 // |x >> -y| is computed as |x << y|.
10736 masm.movePtr(temp3, temp2);
10737 masm.lshiftPtr(temp2, temp1);
10738 masm.jump(&create);
10740 masm.bind(&nonNegative);
10742 masm.movePtr(temp1, temp3);
10744 masm.rshiftPtr(temp2, temp1);
10746 // For negative numbers, round down if any bit was shifted out.
10747 masm.branchIfBigIntIsNonNegative(lhs, &create);
10749 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
10750 masm.movePtr(ImmWord(-1), output);
10751 masm.lshiftPtr(temp2, output);
10752 masm.notPtr(output);
10754 // Add plus one when |(lhs.digit(0) & mask) != 0|.
10755 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
10756 masm.addPtr(ImmWord(1), temp1);
10758 masm.bind(&create);
10760 // Create and return the result.
10761 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10762 masm.initializeBigIntAbsolute(output, temp1);
10764 // Set the sign bit when the left-hand side is negative.
10765 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
10766 masm.or32(Imm32(BigInt::signBitMask()),
10767 Address(output, BigInt::offsetOfFlags()));
10769 masm.bind(ool->rejoin());
10772 void CodeGenerator::visitBigIntIncrement(LBigIntIncrement* ins) {
10773 Register input = ToRegister(ins->input());
10774 Register temp1 = ToRegister(ins->temp1());
10775 Register temp2 = ToRegister(ins->temp2());
10776 Register output = ToRegister(ins->output());
10778 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10779 auto* ool =
10780 oolCallVM<Fn, BigInt::inc>(ins, ArgList(input), StoreRegisterTo(output));
10782 // Call into the VM when the input can't be loaded into a pointer-sized
10783 // register.
10784 masm.loadBigInt(input, temp1, ool->entry());
10785 masm.movePtr(ImmWord(1), temp2);
10787 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10789 // Create and return the result.
10790 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10791 masm.initializeBigInt(output, temp1);
10793 masm.bind(ool->rejoin());
10796 void CodeGenerator::visitBigIntDecrement(LBigIntDecrement* ins) {
10797 Register input = ToRegister(ins->input());
10798 Register temp1 = ToRegister(ins->temp1());
10799 Register temp2 = ToRegister(ins->temp2());
10800 Register output = ToRegister(ins->output());
10802 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10803 auto* ool =
10804 oolCallVM<Fn, BigInt::dec>(ins, ArgList(input), StoreRegisterTo(output));
10806 // Call into the VM when the input can't be loaded into a pointer-sized
10807 // register.
10808 masm.loadBigInt(input, temp1, ool->entry());
10809 masm.movePtr(ImmWord(1), temp2);
10811 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10813 // Create and return the result.
10814 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10815 masm.initializeBigInt(output, temp1);
10817 masm.bind(ool->rejoin());
10820 void CodeGenerator::visitBigIntNegate(LBigIntNegate* ins) {
10821 Register input = ToRegister(ins->input());
10822 Register temp = ToRegister(ins->temp());
10823 Register output = ToRegister(ins->output());
10825 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10826 auto* ool =
10827 oolCallVM<Fn, BigInt::neg>(ins, ArgList(input), StoreRegisterTo(output));
10829 // -0n == 0n
10830 Label lhsNonZero;
10831 masm.branchIfBigIntIsNonZero(input, &lhsNonZero);
10832 masm.movePtr(input, output);
10833 masm.jump(ool->rejoin());
10834 masm.bind(&lhsNonZero);
10836 // Call into the VM when the input uses heap digits.
10837 masm.copyBigIntWithInlineDigits(input, output, temp, initialBigIntHeap(),
10838 ool->entry());
10840 // Flip the sign bit.
10841 masm.xor32(Imm32(BigInt::signBitMask()),
10842 Address(output, BigInt::offsetOfFlags()));
10844 masm.bind(ool->rejoin());
10847 void CodeGenerator::visitBigIntBitNot(LBigIntBitNot* ins) {
10848 Register input = ToRegister(ins->input());
10849 Register temp1 = ToRegister(ins->temp1());
10850 Register temp2 = ToRegister(ins->temp2());
10851 Register output = ToRegister(ins->output());
10853 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10854 auto* ool = oolCallVM<Fn, BigInt::bitNot>(ins, ArgList(input),
10855 StoreRegisterTo(output));
10857 masm.loadBigIntAbsolute(input, temp1, ool->entry());
10859 // This follows the C++ implementation because it let's us support the full
10860 // range [-2^64, 2^64 - 1] on 64-bit resp. [-2^32, 2^32 - 1] on 32-bit.
10861 Label nonNegative, done;
10862 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
10864 // ~(-x) == ~(~(x-1)) == x-1
10865 masm.subPtr(Imm32(1), temp1);
10866 masm.jump(&done);
10868 masm.bind(&nonNegative);
10870 // ~x == -x-1 == -(x+1)
10871 masm.movePtr(ImmWord(1), temp2);
10872 masm.branchAddPtr(Assembler::CarrySet, temp2, temp1, ool->entry());
10874 masm.bind(&done);
10876 // Create and return the result.
10877 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10878 masm.initializeBigIntAbsolute(output, temp1);
10880 // Set the sign bit when the input is positive.
10881 masm.branchIfBigIntIsNegative(input, ool->rejoin());
10882 masm.or32(Imm32(BigInt::signBitMask()),
10883 Address(output, BigInt::offsetOfFlags()));
10885 masm.bind(ool->rejoin());
10888 void CodeGenerator::visitInt32ToStringWithBase(LInt32ToStringWithBase* lir) {
10889 Register input = ToRegister(lir->input());
10890 RegisterOrInt32 base = ToRegisterOrInt32(lir->base());
10891 Register output = ToRegister(lir->output());
10892 Register temp0 = ToRegister(lir->temp0());
10893 Register temp1 = ToRegister(lir->temp1());
10895 bool lowerCase = lir->mir()->lowerCase();
10897 using Fn = JSString* (*)(JSContext*, int32_t, int32_t, bool);
10898 if (base.is<Register>()) {
10899 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
10900 lir, ArgList(input, base.as<Register>(), Imm32(lowerCase)),
10901 StoreRegisterTo(output));
10903 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
10904 masm.loadInt32ToStringWithBase(input, base.as<Register>(), output, temp0,
10905 temp1, gen->runtime->staticStrings(),
10906 liveRegs, lowerCase, ool->entry());
10907 masm.bind(ool->rejoin());
10908 } else {
10909 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
10910 lir, ArgList(input, Imm32(base.as<int32_t>()), Imm32(lowerCase)),
10911 StoreRegisterTo(output));
10913 masm.loadInt32ToStringWithBase(input, base.as<int32_t>(), output, temp0,
10914 temp1, gen->runtime->staticStrings(),
10915 lowerCase, ool->entry());
10916 masm.bind(ool->rejoin());
10920 void CodeGenerator::visitNumberParseInt(LNumberParseInt* lir) {
10921 Register string = ToRegister(lir->string());
10922 Register radix = ToRegister(lir->radix());
10923 ValueOperand output = ToOutValue(lir);
10924 Register temp = ToRegister(lir->temp0());
10926 #ifdef DEBUG
10927 Label ok;
10928 masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
10929 masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
10930 masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
10931 masm.bind(&ok);
10932 #endif
10934 // Use indexed value as fast path if possible.
10935 Label vmCall, done;
10936 masm.loadStringIndexValue(string, temp, &vmCall);
10937 masm.tagValue(JSVAL_TYPE_INT32, temp, output);
10938 masm.jump(&done);
10940 masm.bind(&vmCall);
10942 pushArg(radix);
10943 pushArg(string);
10945 using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
10946 callVM<Fn, js::NumberParseInt>(lir);
10948 masm.bind(&done);
10951 void CodeGenerator::visitDoubleParseInt(LDoubleParseInt* lir) {
10952 FloatRegister number = ToFloatRegister(lir->number());
10953 Register output = ToRegister(lir->output());
10954 FloatRegister temp = ToFloatRegister(lir->temp0());
10956 Label bail;
10957 masm.branchDouble(Assembler::DoubleUnordered, number, number, &bail);
10958 masm.branchTruncateDoubleToInt32(number, output, &bail);
10960 Label ok;
10961 masm.branch32(Assembler::NotEqual, output, Imm32(0), &ok);
10963 // Accept both +0 and -0 and return 0.
10964 masm.loadConstantDouble(0.0, temp);
10965 masm.branchDouble(Assembler::DoubleEqual, number, temp, &ok);
10967 // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
10968 masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, temp);
10969 masm.branchDouble(Assembler::DoubleLessThan, number, temp, &bail);
10971 masm.bind(&ok);
10973 bailoutFrom(&bail, lir->snapshot());
10976 void CodeGenerator::visitFloor(LFloor* lir) {
10977 FloatRegister input = ToFloatRegister(lir->input());
10978 Register output = ToRegister(lir->output());
10980 Label bail;
10981 masm.floorDoubleToInt32(input, output, &bail);
10982 bailoutFrom(&bail, lir->snapshot());
10985 void CodeGenerator::visitFloorF(LFloorF* lir) {
10986 FloatRegister input = ToFloatRegister(lir->input());
10987 Register output = ToRegister(lir->output());
10989 Label bail;
10990 masm.floorFloat32ToInt32(input, output, &bail);
10991 bailoutFrom(&bail, lir->snapshot());
10994 void CodeGenerator::visitCeil(LCeil* lir) {
10995 FloatRegister input = ToFloatRegister(lir->input());
10996 Register output = ToRegister(lir->output());
10998 Label bail;
10999 masm.ceilDoubleToInt32(input, output, &bail);
11000 bailoutFrom(&bail, lir->snapshot());
11003 void CodeGenerator::visitCeilF(LCeilF* lir) {
11004 FloatRegister input = ToFloatRegister(lir->input());
11005 Register output = ToRegister(lir->output());
11007 Label bail;
11008 masm.ceilFloat32ToInt32(input, output, &bail);
11009 bailoutFrom(&bail, lir->snapshot());
11012 void CodeGenerator::visitRound(LRound* lir) {
11013 FloatRegister input = ToFloatRegister(lir->input());
11014 FloatRegister temp = ToFloatRegister(lir->temp0());
11015 Register output = ToRegister(lir->output());
11017 Label bail;
11018 masm.roundDoubleToInt32(input, output, temp, &bail);
11019 bailoutFrom(&bail, lir->snapshot());
11022 void CodeGenerator::visitRoundF(LRoundF* lir) {
11023 FloatRegister input = ToFloatRegister(lir->input());
11024 FloatRegister temp = ToFloatRegister(lir->temp0());
11025 Register output = ToRegister(lir->output());
11027 Label bail;
11028 masm.roundFloat32ToInt32(input, output, temp, &bail);
11029 bailoutFrom(&bail, lir->snapshot());
11032 void CodeGenerator::visitTrunc(LTrunc* lir) {
11033 FloatRegister input = ToFloatRegister(lir->input());
11034 Register output = ToRegister(lir->output());
11036 Label bail;
11037 masm.truncDoubleToInt32(input, output, &bail);
11038 bailoutFrom(&bail, lir->snapshot());
11041 void CodeGenerator::visitTruncF(LTruncF* lir) {
11042 FloatRegister input = ToFloatRegister(lir->input());
11043 Register output = ToRegister(lir->output());
11045 Label bail;
11046 masm.truncFloat32ToInt32(input, output, &bail);
11047 bailoutFrom(&bail, lir->snapshot());
11050 void CodeGenerator::visitCompareS(LCompareS* lir) {
11051 JSOp op = lir->mir()->jsop();
11052 Register left = ToRegister(lir->left());
11053 Register right = ToRegister(lir->right());
11054 Register output = ToRegister(lir->output());
11056 OutOfLineCode* ool = nullptr;
11058 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
11059 if (op == JSOp::Eq || op == JSOp::StrictEq) {
11060 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
11061 lir, ArgList(left, right), StoreRegisterTo(output));
11062 } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
11063 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
11064 lir, ArgList(left, right), StoreRegisterTo(output));
11065 } else if (op == JSOp::Lt) {
11066 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
11067 lir, ArgList(left, right), StoreRegisterTo(output));
11068 } else if (op == JSOp::Le) {
11069 // Push the operands in reverse order for JSOp::Le:
11070 // - |left <= right| is implemented as |right >= left|.
11071 ool =
11072 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
11073 lir, ArgList(right, left), StoreRegisterTo(output));
11074 } else if (op == JSOp::Gt) {
11075 // Push the operands in reverse order for JSOp::Gt:
11076 // - |left > right| is implemented as |right < left|.
11077 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
11078 lir, ArgList(right, left), StoreRegisterTo(output));
11079 } else {
11080 MOZ_ASSERT(op == JSOp::Ge);
11081 ool =
11082 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
11083 lir, ArgList(left, right), StoreRegisterTo(output));
11086 masm.compareStrings(op, left, right, output, ool->entry());
11088 masm.bind(ool->rejoin());
11091 void CodeGenerator::visitCompareSInline(LCompareSInline* lir) {
11092 JSOp op = lir->mir()->jsop();
11093 MOZ_ASSERT(IsEqualityOp(op));
11095 Register input = ToRegister(lir->input());
11096 Register output = ToRegister(lir->output());
11098 const JSLinearString* str = lir->constant();
11099 MOZ_ASSERT(str->length() > 0);
11101 OutOfLineCode* ool = nullptr;
11103 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
11104 if (op == JSOp::Eq || op == JSOp::StrictEq) {
11105 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
11106 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
11107 } else {
11108 MOZ_ASSERT(op == JSOp::Ne || op == JSOp::StrictNe);
11109 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
11110 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
11113 Label compareChars;
11115 Label notPointerEqual;
11117 // If operands point to the same instance, the strings are trivially equal.
11118 masm.branchPtr(Assembler::NotEqual, input, ImmGCPtr(str), &notPointerEqual);
11119 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
11120 masm.jump(ool->rejoin());
11122 masm.bind(&notPointerEqual);
11124 Label setNotEqualResult;
11125 if (str->isAtom()) {
11126 // Atoms cannot be equal to each other if they point to different strings.
11127 Imm32 atomBit(JSString::ATOM_BIT);
11128 masm.branchTest32(Assembler::NonZero,
11129 Address(input, JSString::offsetOfFlags()), atomBit,
11130 &setNotEqualResult);
11133 if (str->hasTwoByteChars()) {
11134 // Pure two-byte strings can't be equal to Latin-1 strings.
11135 JS::AutoCheckCannotGC nogc;
11136 if (!mozilla::IsUtf16Latin1(str->twoByteRange(nogc))) {
11137 masm.branchLatin1String(input, &setNotEqualResult);
11141 // Strings of different length can never be equal.
11142 masm.branch32(Assembler::Equal, Address(input, JSString::offsetOfLength()),
11143 Imm32(str->length()), &compareChars);
11145 masm.bind(&setNotEqualResult);
11146 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
11147 masm.jump(ool->rejoin());
11150 masm.bind(&compareChars);
11152 // Load the input string's characters.
11153 Register stringChars = output;
11154 masm.loadStringCharsForCompare(input, str, stringChars, ool->entry());
11156 // Start comparing character by character.
11157 masm.compareStringChars(op, stringChars, str, output);
11159 masm.bind(ool->rejoin());
11162 void CodeGenerator::visitCompareSSingle(LCompareSSingle* lir) {
11163 JSOp op = lir->jsop();
11164 MOZ_ASSERT(IsRelationalOp(op));
11166 Register input = ToRegister(lir->input());
11167 Register output = ToRegister(lir->output());
11168 Register temp = ToRegister(lir->temp0());
11170 const JSLinearString* str = lir->constant();
11171 MOZ_ASSERT(str->length() == 1);
11173 char16_t ch = str->latin1OrTwoByteChar(0);
11175 masm.movePtr(input, temp);
11177 // Check if the string is empty.
11178 Label compareLength;
11179 masm.branch32(Assembler::Equal, Address(temp, JSString::offsetOfLength()),
11180 Imm32(0), &compareLength);
11182 // The first character is in the left-most rope child.
11183 Label notRope;
11184 masm.branchIfNotRope(temp, &notRope);
11186 // Unwind ropes at the start if possible.
11187 Label unwindRope;
11188 masm.bind(&unwindRope);
11189 masm.loadRopeLeftChild(temp, output);
11190 masm.movePtr(output, temp);
11192 #ifdef DEBUG
11193 Label notEmpty;
11194 masm.branch32(Assembler::NotEqual,
11195 Address(temp, JSString::offsetOfLength()), Imm32(0),
11196 &notEmpty);
11197 masm.assumeUnreachable("rope children are non-empty");
11198 masm.bind(&notEmpty);
11199 #endif
11201 // Otherwise keep unwinding ropes.
11202 masm.branchIfRope(temp, &unwindRope);
11204 masm.bind(&notRope);
11206 // Load the first character into |output|.
11207 auto loadFirstChar = [&](auto encoding) {
11208 masm.loadStringChars(temp, output, encoding);
11209 masm.loadChar(Address(output, 0), output, encoding);
11212 Label done;
11213 if (ch <= JSString::MAX_LATIN1_CHAR) {
11214 // Handle both encodings when the search character is Latin-1.
11215 Label twoByte, compare;
11216 masm.branchTwoByteString(temp, &twoByte);
11218 loadFirstChar(CharEncoding::Latin1);
11219 masm.jump(&compare);
11221 masm.bind(&twoByte);
11222 loadFirstChar(CharEncoding::TwoByte);
11224 masm.bind(&compare);
11225 } else {
11226 // The search character is a two-byte character, so it can't be equal to any
11227 // character of a Latin-1 string.
11228 masm.move32(Imm32(int32_t(op == JSOp::Lt || op == JSOp::Le)), output);
11229 masm.branchLatin1String(temp, &done);
11231 loadFirstChar(CharEncoding::TwoByte);
11234 // Compare the string length when the search character is equal to the
11235 // input's first character.
11236 masm.branch32(Assembler::Equal, output, Imm32(ch), &compareLength);
11238 // Otherwise compute the result and jump to the end.
11239 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false), output, Imm32(ch),
11240 output);
11241 masm.jump(&done);
11243 // Compare the string length to compute the overall result.
11244 masm.bind(&compareLength);
11245 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
11246 Address(temp, JSString::offsetOfLength()), Imm32(1), output);
11248 masm.bind(&done);
11251 void CodeGenerator::visitCompareBigInt(LCompareBigInt* lir) {
11252 JSOp op = lir->mir()->jsop();
11253 Register left = ToRegister(lir->left());
11254 Register right = ToRegister(lir->right());
11255 Register temp0 = ToRegister(lir->temp0());
11256 Register temp1 = ToRegister(lir->temp1());
11257 Register temp2 = ToRegister(lir->temp2());
11258 Register output = ToRegister(lir->output());
11260 Label notSame;
11261 Label compareSign;
11262 Label compareLength;
11263 Label compareDigit;
11265 Label* notSameSign;
11266 Label* notSameLength;
11267 Label* notSameDigit;
11268 if (IsEqualityOp(op)) {
11269 notSameSign = &notSame;
11270 notSameLength = &notSame;
11271 notSameDigit = &notSame;
11272 } else {
11273 notSameSign = &compareSign;
11274 notSameLength = &compareLength;
11275 notSameDigit = &compareDigit;
11278 masm.equalBigInts(left, right, temp0, temp1, temp2, output, notSameSign,
11279 notSameLength, notSameDigit);
11281 Label done;
11282 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
11283 op == JSOp::Ge),
11284 output);
11285 masm.jump(&done);
11287 if (IsEqualityOp(op)) {
11288 masm.bind(&notSame);
11289 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
11290 } else {
11291 Label invertWhenNegative;
11293 // There are two cases when sign(left) != sign(right):
11294 // 1. sign(left) = positive and sign(right) = negative,
11295 // 2. or the dual case with reversed signs.
11297 // For case 1, |left| <cmp> |right| is true for cmp=Gt or cmp=Ge and false
11298 // for cmp=Lt or cmp=Le. Initialize the result for case 1 and handle case 2
11299 // with |invertWhenNegative|.
11300 masm.bind(&compareSign);
11301 masm.move32(Imm32(op == JSOp::Gt || op == JSOp::Ge), output);
11302 masm.jump(&invertWhenNegative);
11304 // For sign(left) = sign(right) and len(digits(left)) != len(digits(right)),
11305 // we have to consider the two cases:
11306 // 1. len(digits(left)) < len(digits(right))
11307 // 2. len(digits(left)) > len(digits(right))
11309 // For |left| <cmp> |right| with cmp=Lt:
11310 // Assume both BigInts are positive, then |left < right| is true for case 1
11311 // and false for case 2. When both are negative, the result is reversed.
11313 // The other comparison operators can be handled similarly.
11315 // |temp0| holds the digits length of the right-hand side operand.
11316 masm.bind(&compareLength);
11317 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
11318 Address(left, BigInt::offsetOfLength()), temp0, output);
11319 masm.jump(&invertWhenNegative);
11321 // Similar to the case above, compare the current digit to determine the
11322 // overall comparison result.
11324 // |temp1| points to the current digit of the left-hand side operand.
11325 // |output| holds the current digit of the right-hand side operand.
11326 masm.bind(&compareDigit);
11327 masm.cmpPtrSet(JSOpToCondition(op, /* isSigned = */ false),
11328 Address(temp1, 0), output, output);
11330 Label nonNegative;
11331 masm.bind(&invertWhenNegative);
11332 masm.branchIfBigIntIsNonNegative(left, &nonNegative);
11333 masm.xor32(Imm32(1), output);
11334 masm.bind(&nonNegative);
11337 masm.bind(&done);
11340 void CodeGenerator::visitCompareBigIntInt32(LCompareBigIntInt32* lir) {
11341 JSOp op = lir->mir()->jsop();
11342 Register left = ToRegister(lir->left());
11343 Register right = ToRegister(lir->right());
11344 Register temp0 = ToRegister(lir->temp0());
11345 Register temp1 = ToRegister(lir->temp1());
11346 Register output = ToRegister(lir->output());
11348 Label ifTrue, ifFalse;
11349 masm.compareBigIntAndInt32(op, left, right, temp0, temp1, &ifTrue, &ifFalse);
11351 Label done;
11352 masm.bind(&ifFalse);
11353 masm.move32(Imm32(0), output);
11354 masm.jump(&done);
11355 masm.bind(&ifTrue);
11356 masm.move32(Imm32(1), output);
11357 masm.bind(&done);
11360 void CodeGenerator::visitCompareBigIntDouble(LCompareBigIntDouble* lir) {
11361 JSOp op = lir->mir()->jsop();
11362 Register left = ToRegister(lir->left());
11363 FloatRegister right = ToFloatRegister(lir->right());
11364 Register output = ToRegister(lir->output());
11366 masm.setupAlignedABICall();
11368 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
11369 // - |left <= right| is implemented as |right >= left|.
11370 // - |left > right| is implemented as |right < left|.
11371 if (op == JSOp::Le || op == JSOp::Gt) {
11372 masm.passABIArg(right, ABIType::Float64);
11373 masm.passABIArg(left);
11374 } else {
11375 masm.passABIArg(left);
11376 masm.passABIArg(right, ABIType::Float64);
11379 using FnBigIntNumber = bool (*)(BigInt*, double);
11380 using FnNumberBigInt = bool (*)(double, BigInt*);
11381 switch (op) {
11382 case JSOp::Eq: {
11383 masm.callWithABI<FnBigIntNumber,
11384 jit::BigIntNumberEqual<EqualityKind::Equal>>();
11385 break;
11387 case JSOp::Ne: {
11388 masm.callWithABI<FnBigIntNumber,
11389 jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
11390 break;
11392 case JSOp::Lt: {
11393 masm.callWithABI<FnBigIntNumber,
11394 jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
11395 break;
11397 case JSOp::Gt: {
11398 masm.callWithABI<FnNumberBigInt,
11399 jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
11400 break;
11402 case JSOp::Le: {
11403 masm.callWithABI<
11404 FnNumberBigInt,
11405 jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
11406 break;
11408 case JSOp::Ge: {
11409 masm.callWithABI<
11410 FnBigIntNumber,
11411 jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
11412 break;
11414 default:
11415 MOZ_CRASH("unhandled op");
11418 masm.storeCallBoolResult(output);
11421 void CodeGenerator::visitCompareBigIntString(LCompareBigIntString* lir) {
11422 JSOp op = lir->mir()->jsop();
11423 Register left = ToRegister(lir->left());
11424 Register right = ToRegister(lir->right());
11426 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
11427 // - |left <= right| is implemented as |right >= left|.
11428 // - |left > right| is implemented as |right < left|.
11429 if (op == JSOp::Le || op == JSOp::Gt) {
11430 pushArg(left);
11431 pushArg(right);
11432 } else {
11433 pushArg(right);
11434 pushArg(left);
11437 using FnBigIntString =
11438 bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
11439 using FnStringBigInt =
11440 bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
11442 switch (op) {
11443 case JSOp::Eq: {
11444 constexpr auto Equal = EqualityKind::Equal;
11445 callVM<FnBigIntString, BigIntStringEqual<Equal>>(lir);
11446 break;
11448 case JSOp::Ne: {
11449 constexpr auto NotEqual = EqualityKind::NotEqual;
11450 callVM<FnBigIntString, BigIntStringEqual<NotEqual>>(lir);
11451 break;
11453 case JSOp::Lt: {
11454 constexpr auto LessThan = ComparisonKind::LessThan;
11455 callVM<FnBigIntString, BigIntStringCompare<LessThan>>(lir);
11456 break;
11458 case JSOp::Gt: {
11459 constexpr auto LessThan = ComparisonKind::LessThan;
11460 callVM<FnStringBigInt, StringBigIntCompare<LessThan>>(lir);
11461 break;
11463 case JSOp::Le: {
11464 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
11465 callVM<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>(lir);
11466 break;
11468 case JSOp::Ge: {
11469 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
11470 callVM<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>(lir);
11471 break;
11473 default:
11474 MOZ_CRASH("Unexpected compare op");
11478 void CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir) {
11479 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
11480 lir->mir()->compareType() == MCompare::Compare_Null);
11482 JSOp op = lir->mir()->jsop();
11483 MOZ_ASSERT(IsLooseEqualityOp(op));
11485 const ValueOperand value = ToValue(lir, LIsNullOrLikeUndefinedV::ValueIndex);
11486 Register output = ToRegister(lir->output());
11488 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
11489 if (!intact) {
11490 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
11491 addOutOfLineCode(ool, lir->mir());
11493 Label* nullOrLikeUndefined = ool->label1();
11494 Label* notNullOrLikeUndefined = ool->label2();
11497 ScratchTagScope tag(masm, value);
11498 masm.splitTagForTest(value, tag);
11500 masm.branchTestNull(Assembler::Equal, tag, nullOrLikeUndefined);
11501 masm.branchTestUndefined(Assembler::Equal, tag, nullOrLikeUndefined);
11503 // Check whether it's a truthy object or a falsy object that emulates
11504 // undefined.
11505 masm.branchTestObject(Assembler::NotEqual, tag, notNullOrLikeUndefined);
11508 Register objreg =
11509 masm.extractObject(value, ToTempUnboxRegister(lir->temp0()));
11510 branchTestObjectEmulatesUndefined(objreg, nullOrLikeUndefined,
11511 notNullOrLikeUndefined, output, ool);
11512 // fall through
11514 Label done;
11516 // It's not null or undefined, and if it's an object it doesn't
11517 // emulate undefined, so it's not like undefined.
11518 masm.move32(Imm32(op == JSOp::Ne), output);
11519 masm.jump(&done);
11521 masm.bind(nullOrLikeUndefined);
11522 masm.move32(Imm32(op == JSOp::Eq), output);
11524 // Both branches meet here.
11525 masm.bind(&done);
11526 } else {
11527 Label nullOrUndefined, notNullOrLikeUndefined;
11528 #if defined(DEBUG) || defined(FUZZING)
11529 Register objreg = Register::Invalid();
11530 #endif
11532 ScratchTagScope tag(masm, value);
11533 masm.splitTagForTest(value, tag);
11535 masm.branchTestNull(Assembler::Equal, tag, &nullOrUndefined);
11536 masm.branchTestUndefined(Assembler::Equal, tag, &nullOrUndefined);
11538 #if defined(DEBUG) || defined(FUZZING)
11539 // Check whether it's a truthy object or a falsy object that emulates
11540 // undefined.
11541 masm.branchTestObject(Assembler::NotEqual, tag, &notNullOrLikeUndefined);
11542 objreg = masm.extractObject(value, ToTempUnboxRegister(lir->temp0()));
11543 #endif
11546 #if defined(DEBUG) || defined(FUZZING)
11547 assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
11548 masm.bind(&notNullOrLikeUndefined);
11549 #endif
11551 Label done;
11553 // It's not null or undefined, and if it's an object it doesn't
11554 // emulate undefined.
11555 masm.move32(Imm32(op == JSOp::Ne), output);
11556 masm.jump(&done);
11558 masm.bind(&nullOrUndefined);
11559 masm.move32(Imm32(op == JSOp::Eq), output);
11561 // Both branches meet here.
11562 masm.bind(&done);
11566 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchV(
11567 LIsNullOrLikeUndefinedAndBranchV* lir) {
11568 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
11569 lir->cmpMir()->compareType() == MCompare::Compare_Null);
11571 JSOp op = lir->cmpMir()->jsop();
11572 MOZ_ASSERT(IsLooseEqualityOp(op));
11574 const ValueOperand value =
11575 ToValue(lir, LIsNullOrLikeUndefinedAndBranchV::Value);
11577 MBasicBlock* ifTrue = lir->ifTrue();
11578 MBasicBlock* ifFalse = lir->ifFalse();
11580 if (op == JSOp::Ne) {
11581 // Swap branches.
11582 std::swap(ifTrue, ifFalse);
11585 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
11587 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
11588 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
11591 ScratchTagScope tag(masm, value);
11592 masm.splitTagForTest(value, tag);
11594 masm.branchTestNull(Assembler::Equal, tag, ifTrueLabel);
11595 masm.branchTestUndefined(Assembler::Equal, tag, ifTrueLabel);
11597 masm.branchTestObject(Assembler::NotEqual, tag, ifFalseLabel);
11600 bool extractObject = !intact;
11601 #if defined(DEBUG) || defined(FUZZING)
11602 // always extract objreg if we're in debug and
11603 // assertObjectDoesNotEmulateUndefined;
11604 extractObject = true;
11605 #endif
11607 Register objreg = Register::Invalid();
11608 Register scratch = ToRegister(lir->temp());
11609 if (extractObject) {
11610 objreg = masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
11612 if (!intact) {
11613 // Objects that emulate undefined are loosely equal to null/undefined.
11614 OutOfLineTestObject* ool = new (alloc()) OutOfLineTestObject();
11615 addOutOfLineCode(ool, lir->cmpMir());
11616 testObjectEmulatesUndefined(objreg, ifTrueLabel, ifFalseLabel, scratch,
11617 ool);
11618 } else {
11619 assertObjectDoesNotEmulateUndefined(objreg, scratch, lir->cmpMir());
11620 // Bug 1874905. This would be nice to optimize out at the MIR level.
11621 masm.jump(ifFalseLabel);
11625 void CodeGenerator::visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT* lir) {
11626 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
11627 lir->mir()->compareType() == MCompare::Compare_Null);
11628 MOZ_ASSERT(lir->mir()->lhs()->type() == MIRType::Object);
11630 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
11631 JSOp op = lir->mir()->jsop();
11632 Register output = ToRegister(lir->output());
11633 Register objreg = ToRegister(lir->input());
11634 if (!intact) {
11635 MOZ_ASSERT(IsLooseEqualityOp(op),
11636 "Strict equality should have been folded");
11638 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
11639 addOutOfLineCode(ool, lir->mir());
11641 Label* emulatesUndefined = ool->label1();
11642 Label* doesntEmulateUndefined = ool->label2();
11644 branchTestObjectEmulatesUndefined(objreg, emulatesUndefined,
11645 doesntEmulateUndefined, output, ool);
11647 Label done;
11649 masm.move32(Imm32(op == JSOp::Ne), output);
11650 masm.jump(&done);
11652 masm.bind(emulatesUndefined);
11653 masm.move32(Imm32(op == JSOp::Eq), output);
11654 masm.bind(&done);
11655 } else {
11656 assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
11657 masm.move32(Imm32(op == JSOp::Ne), output);
11661 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchT(
11662 LIsNullOrLikeUndefinedAndBranchT* lir) {
11663 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
11664 lir->cmpMir()->compareType() == MCompare::Compare_Null);
11665 MOZ_ASSERT(lir->cmpMir()->lhs()->type() == MIRType::Object);
11667 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
11669 JSOp op = lir->cmpMir()->jsop();
11670 MOZ_ASSERT(IsLooseEqualityOp(op), "Strict equality should have been folded");
11672 MBasicBlock* ifTrue = lir->ifTrue();
11673 MBasicBlock* ifFalse = lir->ifFalse();
11675 if (op == JSOp::Ne) {
11676 // Swap branches.
11677 std::swap(ifTrue, ifFalse);
11680 Register input = ToRegister(lir->getOperand(0));
11681 Register scratch = ToRegister(lir->temp());
11682 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
11683 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
11685 if (intact) {
11686 // Bug 1874905. Ideally branches like this would be optimized out.
11687 assertObjectDoesNotEmulateUndefined(input, scratch, lir->mir());
11688 masm.jump(ifFalseLabel);
11689 } else {
11690 auto* ool = new (alloc()) OutOfLineTestObject();
11691 addOutOfLineCode(ool, lir->cmpMir());
11693 // Objects that emulate undefined are loosely equal to null/undefined.
11694 testObjectEmulatesUndefined(input, ifTrueLabel, ifFalseLabel, scratch, ool);
11698 void CodeGenerator::visitIsNull(LIsNull* lir) {
11699 MCompare::CompareType compareType = lir->mir()->compareType();
11700 MOZ_ASSERT(compareType == MCompare::Compare_Null);
11702 JSOp op = lir->mir()->jsop();
11703 MOZ_ASSERT(IsStrictEqualityOp(op));
11705 const ValueOperand value = ToValue(lir, LIsNull::ValueIndex);
11706 Register output = ToRegister(lir->output());
11708 Assembler::Condition cond = JSOpToCondition(compareType, op);
11709 masm.testNullSet(cond, value, output);
11712 void CodeGenerator::visitIsUndefined(LIsUndefined* lir) {
11713 MCompare::CompareType compareType = lir->mir()->compareType();
11714 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
11716 JSOp op = lir->mir()->jsop();
11717 MOZ_ASSERT(IsStrictEqualityOp(op));
11719 const ValueOperand value = ToValue(lir, LIsUndefined::ValueIndex);
11720 Register output = ToRegister(lir->output());
11722 Assembler::Condition cond = JSOpToCondition(compareType, op);
11723 masm.testUndefinedSet(cond, value, output);
11726 void CodeGenerator::visitIsNullAndBranch(LIsNullAndBranch* lir) {
11727 MCompare::CompareType compareType = lir->cmpMir()->compareType();
11728 MOZ_ASSERT(compareType == MCompare::Compare_Null);
11730 JSOp op = lir->cmpMir()->jsop();
11731 MOZ_ASSERT(IsStrictEqualityOp(op));
11733 const ValueOperand value = ToValue(lir, LIsNullAndBranch::Value);
11735 Assembler::Condition cond = JSOpToCondition(compareType, op);
11736 testNullEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
11739 void CodeGenerator::visitIsUndefinedAndBranch(LIsUndefinedAndBranch* lir) {
11740 MCompare::CompareType compareType = lir->cmpMir()->compareType();
11741 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
11743 JSOp op = lir->cmpMir()->jsop();
11744 MOZ_ASSERT(IsStrictEqualityOp(op));
11746 const ValueOperand value = ToValue(lir, LIsUndefinedAndBranch::Value);
11748 Assembler::Condition cond = JSOpToCondition(compareType, op);
11749 testUndefinedEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
11752 void CodeGenerator::visitSameValueDouble(LSameValueDouble* lir) {
11753 FloatRegister left = ToFloatRegister(lir->left());
11754 FloatRegister right = ToFloatRegister(lir->right());
11755 FloatRegister temp = ToFloatRegister(lir->temp0());
11756 Register output = ToRegister(lir->output());
11758 masm.sameValueDouble(left, right, temp, output);
11761 void CodeGenerator::visitSameValue(LSameValue* lir) {
11762 ValueOperand lhs = ToValue(lir, LSameValue::LhsIndex);
11763 ValueOperand rhs = ToValue(lir, LSameValue::RhsIndex);
11764 Register output = ToRegister(lir->output());
11766 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
11767 OutOfLineCode* ool =
11768 oolCallVM<Fn, SameValue>(lir, ArgList(lhs, rhs), StoreRegisterTo(output));
11770 // First check to see if the values have identical bits.
11771 // This is correct for SameValue because SameValue(NaN,NaN) is true,
11772 // and SameValue(0,-0) is false.
11773 masm.branch64(Assembler::NotEqual, lhs.toRegister64(), rhs.toRegister64(),
11774 ool->entry());
11775 masm.move32(Imm32(1), output);
11777 // If this fails, call SameValue.
11778 masm.bind(ool->rejoin());
11781 void CodeGenerator::emitConcat(LInstruction* lir, Register lhs, Register rhs,
11782 Register output) {
11783 using Fn =
11784 JSString* (*)(JSContext*, HandleString, HandleString, js::gc::Heap);
11785 OutOfLineCode* ool = oolCallVM<Fn, ConcatStrings<CanGC>>(
11786 lir, ArgList(lhs, rhs, static_cast<Imm32>(int32_t(gc::Heap::Default))),
11787 StoreRegisterTo(output));
11789 const JitZone* jitZone = gen->realm->zone()->jitZone();
11790 JitCode* stringConcatStub =
11791 jitZone->stringConcatStubNoBarrier(&zoneStubsToReadBarrier_);
11792 masm.call(stringConcatStub);
11793 masm.branchTestPtr(Assembler::Zero, output, output, ool->entry());
11795 masm.bind(ool->rejoin());
11798 void CodeGenerator::visitConcat(LConcat* lir) {
11799 Register lhs = ToRegister(lir->lhs());
11800 Register rhs = ToRegister(lir->rhs());
11802 Register output = ToRegister(lir->output());
11804 MOZ_ASSERT(lhs == CallTempReg0);
11805 MOZ_ASSERT(rhs == CallTempReg1);
11806 MOZ_ASSERT(ToRegister(lir->temp0()) == CallTempReg0);
11807 MOZ_ASSERT(ToRegister(lir->temp1()) == CallTempReg1);
11808 MOZ_ASSERT(ToRegister(lir->temp2()) == CallTempReg2);
11809 MOZ_ASSERT(ToRegister(lir->temp3()) == CallTempReg3);
11810 MOZ_ASSERT(ToRegister(lir->temp4()) == CallTempReg4);
11811 MOZ_ASSERT(output == CallTempReg5);
11813 emitConcat(lir, lhs, rhs, output);
11816 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
11817 Register len, Register byteOpScratch,
11818 CharEncoding fromEncoding, CharEncoding toEncoding,
11819 size_t maximumLength = SIZE_MAX) {
11820 // Copy |len| char16_t code units from |from| to |to|. Assumes len > 0
11821 // (checked below in debug builds), and when done |to| must point to the
11822 // next available char.
11824 #ifdef DEBUG
11825 Label ok;
11826 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
11827 masm.assumeUnreachable("Length should be greater than 0.");
11828 masm.bind(&ok);
11830 if (maximumLength != SIZE_MAX) {
11831 MOZ_ASSERT(maximumLength <= INT32_MAX, "maximum length fits into int32");
11833 Label ok;
11834 masm.branchPtr(Assembler::BelowOrEqual, len, Imm32(maximumLength), &ok);
11835 masm.assumeUnreachable("Length should not exceed maximum length.");
11836 masm.bind(&ok);
11838 #endif
11840 MOZ_ASSERT_IF(toEncoding == CharEncoding::Latin1,
11841 fromEncoding == CharEncoding::Latin1);
11843 size_t fromWidth =
11844 fromEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
11845 size_t toWidth =
11846 toEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
11848 // Try to copy multiple characters at once when both encoding are equal.
11849 if (fromEncoding == toEncoding) {
11850 constexpr size_t ptrWidth = sizeof(uintptr_t);
11852 // Copy |width| bytes and then adjust |from| and |to|.
11853 auto copyCharacters = [&](size_t width) {
11854 static_assert(ptrWidth <= 8, "switch handles only up to eight bytes");
11856 switch (width) {
11857 case 1:
11858 masm.load8ZeroExtend(Address(from, 0), byteOpScratch);
11859 masm.store8(byteOpScratch, Address(to, 0));
11860 break;
11861 case 2:
11862 masm.load16ZeroExtend(Address(from, 0), byteOpScratch);
11863 masm.store16(byteOpScratch, Address(to, 0));
11864 break;
11865 case 4:
11866 masm.load32(Address(from, 0), byteOpScratch);
11867 masm.store32(byteOpScratch, Address(to, 0));
11868 break;
11869 case 8:
11870 MOZ_ASSERT(width == ptrWidth);
11871 masm.loadPtr(Address(from, 0), byteOpScratch);
11872 masm.storePtr(byteOpScratch, Address(to, 0));
11873 break;
11876 masm.addPtr(Imm32(width), from);
11877 masm.addPtr(Imm32(width), to);
11880 // First align |len| to pointer width.
11881 Label done;
11882 for (size_t width = fromWidth; width < ptrWidth; width *= 2) {
11883 // Number of characters which fit into |width| bytes.
11884 size_t charsPerWidth = width / fromWidth;
11886 if (charsPerWidth < maximumLength) {
11887 Label next;
11888 masm.branchTest32(Assembler::Zero, len, Imm32(charsPerWidth), &next);
11890 copyCharacters(width);
11892 masm.branchSub32(Assembler::Zero, Imm32(charsPerWidth), len, &done);
11893 masm.bind(&next);
11894 } else if (charsPerWidth == maximumLength) {
11895 copyCharacters(width);
11896 masm.sub32(Imm32(charsPerWidth), len);
11900 size_t maxInlineLength;
11901 if (fromEncoding == CharEncoding::Latin1) {
11902 maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
11903 } else {
11904 maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
11907 // Number of characters which fit into a single register.
11908 size_t charsPerPtr = ptrWidth / fromWidth;
11910 // Unroll small loops.
11911 constexpr size_t unrollLoopLimit = 3;
11912 size_t loopCount = std::min(maxInlineLength, maximumLength) / charsPerPtr;
11914 #ifdef JS_64BIT
11915 static constexpr size_t latin1MaxInlineByteLength =
11916 JSFatInlineString::MAX_LENGTH_LATIN1 * sizeof(char);
11917 static constexpr size_t twoByteMaxInlineByteLength =
11918 JSFatInlineString::MAX_LENGTH_TWO_BYTE * sizeof(char16_t);
11920 // |unrollLoopLimit| should be large enough to allow loop unrolling on
11921 // 64-bit targets.
11922 static_assert(latin1MaxInlineByteLength / ptrWidth == unrollLoopLimit,
11923 "Latin-1 loops are unrolled on 64-bit");
11924 static_assert(twoByteMaxInlineByteLength / ptrWidth == unrollLoopLimit,
11925 "Two-byte loops are unrolled on 64-bit");
11926 #endif
11928 if (loopCount <= unrollLoopLimit) {
11929 Label labels[unrollLoopLimit];
11931 // Check up front how many characters can be copied.
11932 for (size_t i = 1; i < loopCount; i++) {
11933 masm.branch32(Assembler::Below, len, Imm32((i + 1) * charsPerPtr),
11934 &labels[i]);
11937 // Generate the unrolled loop body.
11938 for (size_t i = loopCount; i > 0; i--) {
11939 copyCharacters(ptrWidth);
11940 masm.sub32(Imm32(charsPerPtr), len);
11942 // Jump target for the previous length check.
11943 if (i != 1) {
11944 masm.bind(&labels[i - 1]);
11947 } else {
11948 Label start;
11949 masm.bind(&start);
11950 copyCharacters(ptrWidth);
11951 masm.branchSub32(Assembler::NonZero, Imm32(charsPerPtr), len, &start);
11954 masm.bind(&done);
11955 } else {
11956 Label start;
11957 masm.bind(&start);
11958 masm.loadChar(Address(from, 0), byteOpScratch, fromEncoding);
11959 masm.storeChar(byteOpScratch, Address(to, 0), toEncoding);
11960 masm.addPtr(Imm32(fromWidth), from);
11961 masm.addPtr(Imm32(toWidth), to);
11962 masm.branchSub32(Assembler::NonZero, Imm32(1), len, &start);
11966 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
11967 Register len, Register byteOpScratch,
11968 CharEncoding encoding, size_t maximumLength) {
11969 CopyStringChars(masm, to, from, len, byteOpScratch, encoding, encoding,
11970 maximumLength);
11973 static void CopyStringCharsMaybeInflate(MacroAssembler& masm, Register input,
11974 Register destChars, Register temp1,
11975 Register temp2) {
11976 // destChars is TwoByte and input is a Latin1 or TwoByte string, so we may
11977 // have to inflate.
11979 Label isLatin1, done;
11980 masm.loadStringLength(input, temp1);
11981 masm.branchLatin1String(input, &isLatin1);
11983 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
11984 masm.movePtr(temp2, input);
11985 CopyStringChars(masm, destChars, input, temp1, temp2,
11986 CharEncoding::TwoByte);
11987 masm.jump(&done);
11989 masm.bind(&isLatin1);
11991 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
11992 masm.movePtr(temp2, input);
11993 CopyStringChars(masm, destChars, input, temp1, temp2, CharEncoding::Latin1,
11994 CharEncoding::TwoByte);
11996 masm.bind(&done);
11999 static void AllocateThinOrFatInlineString(MacroAssembler& masm, Register output,
12000 Register length, Register temp,
12001 gc::Heap initialStringHeap,
12002 Label* failure,
12003 CharEncoding encoding) {
12004 #ifdef DEBUG
12005 size_t maxInlineLength;
12006 if (encoding == CharEncoding::Latin1) {
12007 maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
12008 } else {
12009 maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
12012 Label ok;
12013 masm.branch32(Assembler::BelowOrEqual, length, Imm32(maxInlineLength), &ok);
12014 masm.assumeUnreachable("string length too large to be allocated as inline");
12015 masm.bind(&ok);
12016 #endif
12018 size_t maxThinInlineLength;
12019 if (encoding == CharEncoding::Latin1) {
12020 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_LATIN1;
12021 } else {
12022 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_TWO_BYTE;
12025 Label isFat, allocDone;
12026 masm.branch32(Assembler::Above, length, Imm32(maxThinInlineLength), &isFat);
12028 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
12029 if (encoding == CharEncoding::Latin1) {
12030 flags |= JSString::LATIN1_CHARS_BIT;
12032 masm.newGCString(output, temp, initialStringHeap, failure);
12033 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12034 masm.jump(&allocDone);
12036 masm.bind(&isFat);
12038 uint32_t flags = JSString::INIT_FAT_INLINE_FLAGS;
12039 if (encoding == CharEncoding::Latin1) {
12040 flags |= JSString::LATIN1_CHARS_BIT;
12042 masm.newGCFatInlineString(output, temp, initialStringHeap, failure);
12043 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12045 masm.bind(&allocDone);
12047 // Store length.
12048 masm.store32(length, Address(output, JSString::offsetOfLength()));
12051 static void ConcatInlineString(MacroAssembler& masm, Register lhs, Register rhs,
12052 Register output, Register temp1, Register temp2,
12053 Register temp3, gc::Heap initialStringHeap,
12054 Label* failure, CharEncoding encoding) {
12055 JitSpew(JitSpew_Codegen, "# Emitting ConcatInlineString (encoding=%s)",
12056 (encoding == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
12058 // State: result length in temp2.
12060 // Ensure both strings are linear.
12061 masm.branchIfRope(lhs, failure);
12062 masm.branchIfRope(rhs, failure);
12064 // Allocate a JSThinInlineString or JSFatInlineString.
12065 AllocateThinOrFatInlineString(masm, output, temp2, temp1, initialStringHeap,
12066 failure, encoding);
12068 // Load chars pointer in temp2.
12069 masm.loadInlineStringCharsForStore(output, temp2);
12071 auto copyChars = [&](Register src) {
12072 if (encoding == CharEncoding::TwoByte) {
12073 CopyStringCharsMaybeInflate(masm, src, temp2, temp1, temp3);
12074 } else {
12075 masm.loadStringLength(src, temp3);
12076 masm.loadStringChars(src, temp1, CharEncoding::Latin1);
12077 masm.movePtr(temp1, src);
12078 CopyStringChars(masm, temp2, src, temp3, temp1, CharEncoding::Latin1);
12082 // Copy lhs chars. Note that this advances temp2 to point to the next
12083 // char. This also clobbers the lhs register.
12084 copyChars(lhs);
12086 // Copy rhs chars. Clobbers the rhs register.
12087 copyChars(rhs);
12090 void CodeGenerator::visitSubstr(LSubstr* lir) {
12091 Register string = ToRegister(lir->string());
12092 Register begin = ToRegister(lir->begin());
12093 Register length = ToRegister(lir->length());
12094 Register output = ToRegister(lir->output());
12095 Register temp0 = ToRegister(lir->temp0());
12096 Register temp2 = ToRegister(lir->temp2());
12098 // On x86 there are not enough registers. In that case reuse the string
12099 // register as temporary.
12100 Register temp1 =
12101 lir->temp1()->isBogusTemp() ? string : ToRegister(lir->temp1());
12103 size_t maximumLength = SIZE_MAX;
12105 Range* range = lir->mir()->length()->range();
12106 if (range && range->hasInt32UpperBound()) {
12107 MOZ_ASSERT(range->upper() >= 0);
12108 maximumLength = size_t(range->upper());
12111 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <=
12112 JSThinInlineString::MAX_LENGTH_LATIN1);
12114 static_assert(JSFatInlineString::MAX_LENGTH_TWO_BYTE <=
12115 JSFatInlineString::MAX_LENGTH_LATIN1);
12117 bool tryFatInlineOrDependent =
12118 maximumLength > JSThinInlineString::MAX_LENGTH_TWO_BYTE;
12119 bool tryDependent = maximumLength > JSFatInlineString::MAX_LENGTH_TWO_BYTE;
12121 #ifdef DEBUG
12122 if (maximumLength != SIZE_MAX) {
12123 Label ok;
12124 masm.branch32(Assembler::BelowOrEqual, length, Imm32(maximumLength), &ok);
12125 masm.assumeUnreachable("length should not exceed maximum length");
12126 masm.bind(&ok);
12128 #endif
12130 Label nonZero, nonInput;
12132 // For every edge case use the C++ variant.
12133 // Note: we also use this upon allocation failure in newGCString and
12134 // newGCFatInlineString. To squeeze out even more performance those failures
12135 // can be handled by allocate in ool code and returning to jit code to fill
12136 // in all data.
12137 using Fn = JSString* (*)(JSContext* cx, HandleString str, int32_t begin,
12138 int32_t len);
12139 OutOfLineCode* ool = oolCallVM<Fn, SubstringKernel>(
12140 lir, ArgList(string, begin, length), StoreRegisterTo(output));
12141 Label* slowPath = ool->entry();
12142 Label* done = ool->rejoin();
12144 // Zero length, return emptystring.
12145 masm.branchTest32(Assembler::NonZero, length, length, &nonZero);
12146 const JSAtomState& names = gen->runtime->names();
12147 masm.movePtr(ImmGCPtr(names.empty_), output);
12148 masm.jump(done);
12150 // Substring from 0..|str.length|, return str.
12151 masm.bind(&nonZero);
12152 masm.branch32(Assembler::NotEqual,
12153 Address(string, JSString::offsetOfLength()), length, &nonInput);
12154 #ifdef DEBUG
12156 Label ok;
12157 masm.branchTest32(Assembler::Zero, begin, begin, &ok);
12158 masm.assumeUnreachable("length == str.length implies begin == 0");
12159 masm.bind(&ok);
12161 #endif
12162 masm.movePtr(string, output);
12163 masm.jump(done);
12165 // Use slow path for ropes.
12166 masm.bind(&nonInput);
12167 masm.branchIfRope(string, slowPath);
12169 // Optimize one and two character strings.
12170 Label nonStatic;
12171 masm.branch32(Assembler::Above, length, Imm32(2), &nonStatic);
12173 Label loadLengthOne, loadLengthTwo;
12175 auto loadChars = [&](CharEncoding encoding, bool fallthru) {
12176 size_t size = encoding == CharEncoding::Latin1 ? sizeof(JS::Latin1Char)
12177 : sizeof(char16_t);
12179 masm.loadStringChars(string, temp0, encoding);
12180 masm.loadChar(temp0, begin, temp2, encoding);
12181 masm.branch32(Assembler::Equal, length, Imm32(1), &loadLengthOne);
12182 masm.loadChar(temp0, begin, temp0, encoding, int32_t(size));
12183 if (!fallthru) {
12184 masm.jump(&loadLengthTwo);
12188 Label isLatin1;
12189 masm.branchLatin1String(string, &isLatin1);
12190 loadChars(CharEncoding::TwoByte, /* fallthru = */ false);
12192 masm.bind(&isLatin1);
12193 loadChars(CharEncoding::Latin1, /* fallthru = */ true);
12195 // Try to load a length-two static string.
12196 masm.bind(&loadLengthTwo);
12197 masm.lookupStaticString(temp2, temp0, output, gen->runtime->staticStrings(),
12198 &nonStatic);
12199 masm.jump(done);
12201 // Try to load a length-one static string.
12202 masm.bind(&loadLengthOne);
12203 masm.lookupStaticString(temp2, output, gen->runtime->staticStrings(),
12204 &nonStatic);
12205 masm.jump(done);
12207 masm.bind(&nonStatic);
12209 // Allocate either a JSThinInlineString or JSFatInlineString, or jump to
12210 // notInline if we need a dependent string.
12211 Label notInline;
12213 static_assert(JSThinInlineString::MAX_LENGTH_LATIN1 <
12214 JSFatInlineString::MAX_LENGTH_LATIN1);
12215 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <
12216 JSFatInlineString::MAX_LENGTH_TWO_BYTE);
12218 // Use temp2 to store the JS(Thin|Fat)InlineString flags. This avoids having
12219 // duplicate newGCString/newGCFatInlineString codegen for Latin1 vs TwoByte
12220 // strings.
12222 Label allocFat, allocDone;
12223 if (tryFatInlineOrDependent) {
12224 Label isLatin1, allocThin;
12225 masm.branchLatin1String(string, &isLatin1);
12227 if (tryDependent) {
12228 masm.branch32(Assembler::Above, length,
12229 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
12230 &notInline);
12232 masm.move32(Imm32(0), temp2);
12233 masm.branch32(Assembler::Above, length,
12234 Imm32(JSThinInlineString::MAX_LENGTH_TWO_BYTE),
12235 &allocFat);
12236 masm.jump(&allocThin);
12239 masm.bind(&isLatin1);
12241 if (tryDependent) {
12242 masm.branch32(Assembler::Above, length,
12243 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1),
12244 &notInline);
12246 masm.move32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
12247 masm.branch32(Assembler::Above, length,
12248 Imm32(JSThinInlineString::MAX_LENGTH_LATIN1), &allocFat);
12251 masm.bind(&allocThin);
12252 } else {
12253 masm.load32(Address(string, JSString::offsetOfFlags()), temp2);
12254 masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
12258 masm.newGCString(output, temp0, initialStringHeap(), slowPath);
12259 masm.or32(Imm32(JSString::INIT_THIN_INLINE_FLAGS), temp2);
12262 if (tryFatInlineOrDependent) {
12263 masm.jump(&allocDone);
12265 masm.bind(&allocFat);
12267 masm.newGCFatInlineString(output, temp0, initialStringHeap(), slowPath);
12268 masm.or32(Imm32(JSString::INIT_FAT_INLINE_FLAGS), temp2);
12271 masm.bind(&allocDone);
12274 masm.store32(temp2, Address(output, JSString::offsetOfFlags()));
12275 masm.store32(length, Address(output, JSString::offsetOfLength()));
12277 auto initializeInlineString = [&](CharEncoding encoding) {
12278 masm.loadStringChars(string, temp0, encoding);
12279 masm.addToCharPtr(temp0, begin, encoding);
12280 if (temp1 == string) {
12281 masm.push(string);
12283 masm.loadInlineStringCharsForStore(output, temp1);
12284 CopyStringChars(masm, temp1, temp0, length, temp2, encoding,
12285 maximumLength);
12286 masm.loadStringLength(output, length);
12287 if (temp1 == string) {
12288 masm.pop(string);
12292 Label isInlineLatin1;
12293 masm.branchTest32(Assembler::NonZero, temp2,
12294 Imm32(JSString::LATIN1_CHARS_BIT), &isInlineLatin1);
12295 initializeInlineString(CharEncoding::TwoByte);
12296 masm.jump(done);
12298 masm.bind(&isInlineLatin1);
12299 initializeInlineString(CharEncoding::Latin1);
12302 // Handle other cases with a DependentString.
12303 if (tryDependent) {
12304 masm.jump(done);
12306 masm.bind(&notInline);
12307 masm.newGCString(output, temp0, gen->initialStringHeap(), slowPath);
12308 masm.store32(length, Address(output, JSString::offsetOfLength()));
12309 masm.storeDependentStringBase(string, output);
12311 auto initializeDependentString = [&](CharEncoding encoding) {
12312 uint32_t flags = JSString::INIT_DEPENDENT_FLAGS;
12313 if (encoding == CharEncoding::Latin1) {
12314 flags |= JSString::LATIN1_CHARS_BIT;
12317 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12318 masm.loadNonInlineStringChars(string, temp0, encoding);
12319 masm.addToCharPtr(temp0, begin, encoding);
12320 masm.storeNonInlineStringChars(temp0, output);
12323 Label isLatin1;
12324 masm.branchLatin1String(string, &isLatin1);
12325 initializeDependentString(CharEncoding::TwoByte);
12326 masm.jump(done);
12328 masm.bind(&isLatin1);
12329 initializeDependentString(CharEncoding::Latin1);
12332 masm.bind(done);
12335 JitCode* JitZone::generateStringConcatStub(JSContext* cx) {
12336 JitSpew(JitSpew_Codegen, "# Emitting StringConcat stub");
12338 TempAllocator temp(&cx->tempLifoAlloc());
12339 JitContext jcx(cx);
12340 StackMacroAssembler masm(cx, temp);
12341 AutoCreatedBy acb(masm, "JitZone::generateStringConcatStub");
12343 Register lhs = CallTempReg0;
12344 Register rhs = CallTempReg1;
12345 Register temp1 = CallTempReg2;
12346 Register temp2 = CallTempReg3;
12347 Register temp3 = CallTempReg4;
12348 Register output = CallTempReg5;
12350 Label failure;
12351 #ifdef JS_USE_LINK_REGISTER
12352 masm.pushReturnAddress();
12353 #endif
12354 masm.Push(FramePointer);
12355 masm.moveStackPtrTo(FramePointer);
12357 // If lhs is empty, return rhs.
12358 Label leftEmpty;
12359 masm.loadStringLength(lhs, temp1);
12360 masm.branchTest32(Assembler::Zero, temp1, temp1, &leftEmpty);
12362 // If rhs is empty, return lhs.
12363 Label rightEmpty;
12364 masm.loadStringLength(rhs, temp2);
12365 masm.branchTest32(Assembler::Zero, temp2, temp2, &rightEmpty);
12367 masm.add32(temp1, temp2);
12369 // Check if we can use a JSInlineString. The result is a Latin1 string if
12370 // lhs and rhs are both Latin1, so we AND the flags.
12371 Label isInlineTwoByte, isInlineLatin1;
12372 masm.load32(Address(lhs, JSString::offsetOfFlags()), temp1);
12373 masm.and32(Address(rhs, JSString::offsetOfFlags()), temp1);
12375 Label isLatin1, notInline;
12376 masm.branchTest32(Assembler::NonZero, temp1,
12377 Imm32(JSString::LATIN1_CHARS_BIT), &isLatin1);
12379 masm.branch32(Assembler::BelowOrEqual, temp2,
12380 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
12381 &isInlineTwoByte);
12382 masm.jump(&notInline);
12384 masm.bind(&isLatin1);
12386 masm.branch32(Assembler::BelowOrEqual, temp2,
12387 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), &isInlineLatin1);
12389 masm.bind(&notInline);
12391 // Keep AND'ed flags in temp1.
12393 // Ensure result length <= JSString::MAX_LENGTH.
12394 masm.branch32(Assembler::Above, temp2, Imm32(JSString::MAX_LENGTH), &failure);
12396 // Allocate a new rope, guaranteed to be in the nursery if initialStringHeap
12397 // == gc::Heap::Default. (As a result, no post barriers are needed below.)
12398 masm.newGCString(output, temp3, initialStringHeap, &failure);
12400 // Store rope length and flags. temp1 still holds the result of AND'ing the
12401 // lhs and rhs flags, so we just have to clear the other flags to get our rope
12402 // flags (Latin1 if both lhs and rhs are Latin1).
12403 static_assert(JSString::INIT_ROPE_FLAGS == 0,
12404 "Rope type flags must have no bits set");
12405 masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp1);
12406 masm.store32(temp1, Address(output, JSString::offsetOfFlags()));
12407 masm.store32(temp2, Address(output, JSString::offsetOfLength()));
12409 // Store left and right nodes.
12410 masm.storeRopeChildren(lhs, rhs, output);
12411 masm.pop(FramePointer);
12412 masm.ret();
12414 masm.bind(&leftEmpty);
12415 masm.mov(rhs, output);
12416 masm.pop(FramePointer);
12417 masm.ret();
12419 masm.bind(&rightEmpty);
12420 masm.mov(lhs, output);
12421 masm.pop(FramePointer);
12422 masm.ret();
12424 masm.bind(&isInlineTwoByte);
12425 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
12426 initialStringHeap, &failure, CharEncoding::TwoByte);
12427 masm.pop(FramePointer);
12428 masm.ret();
12430 masm.bind(&isInlineLatin1);
12431 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
12432 initialStringHeap, &failure, CharEncoding::Latin1);
12433 masm.pop(FramePointer);
12434 masm.ret();
12436 masm.bind(&failure);
12437 masm.movePtr(ImmPtr(nullptr), output);
12438 masm.pop(FramePointer);
12439 masm.ret();
12441 Linker linker(masm);
12442 JitCode* code = linker.newCode(cx, CodeKind::Other);
12444 CollectPerfSpewerJitCodeProfile(code, "StringConcatStub");
12445 #ifdef MOZ_VTUNE
12446 vtune::MarkStub(code, "StringConcatStub");
12447 #endif
12449 return code;
12452 void JitRuntime::generateFreeStub(MacroAssembler& masm) {
12453 AutoCreatedBy acb(masm, "JitRuntime::generateFreeStub");
12455 const Register regSlots = CallTempReg0;
12457 freeStubOffset_ = startTrampolineCode(masm);
12459 #ifdef JS_USE_LINK_REGISTER
12460 masm.pushReturnAddress();
12461 #endif
12462 AllocatableRegisterSet regs(RegisterSet::Volatile());
12463 regs.takeUnchecked(regSlots);
12464 LiveRegisterSet save(regs.asLiveSet());
12465 masm.PushRegsInMask(save);
12467 const Register regTemp = regs.takeAnyGeneral();
12468 MOZ_ASSERT(regTemp != regSlots);
12470 using Fn = void (*)(void* p);
12471 masm.setupUnalignedABICall(regTemp);
12472 masm.passABIArg(regSlots);
12473 masm.callWithABI<Fn, js_free>(ABIType::General,
12474 CheckUnsafeCallWithABI::DontCheckOther);
12476 masm.PopRegsInMask(save);
12478 masm.ret();
12481 void JitRuntime::generateLazyLinkStub(MacroAssembler& masm) {
12482 AutoCreatedBy acb(masm, "JitRuntime::generateLazyLinkStub");
12484 lazyLinkStubOffset_ = startTrampolineCode(masm);
12486 #ifdef JS_USE_LINK_REGISTER
12487 masm.pushReturnAddress();
12488 #endif
12489 masm.Push(FramePointer);
12490 masm.moveStackPtrTo(FramePointer);
12492 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
12493 Register temp0 = regs.takeAny();
12494 Register temp1 = regs.takeAny();
12495 Register temp2 = regs.takeAny();
12497 masm.loadJSContext(temp0);
12498 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::LazyLink);
12499 masm.moveStackPtrTo(temp1);
12501 using Fn = uint8_t* (*)(JSContext* cx, LazyLinkExitFrameLayout* frame);
12502 masm.setupUnalignedABICall(temp2);
12503 masm.passABIArg(temp0);
12504 masm.passABIArg(temp1);
12505 masm.callWithABI<Fn, LazyLinkTopActivation>(
12506 ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
12508 // Discard exit frame and restore frame pointer.
12509 masm.leaveExitFrame(0);
12510 masm.pop(FramePointer);
12512 #ifdef JS_USE_LINK_REGISTER
12513 // Restore the return address such that the emitPrologue function of the
12514 // CodeGenerator can push it back on the stack with pushReturnAddress.
12515 masm.popReturnAddress();
12516 #endif
12517 masm.jump(ReturnReg);
12520 void JitRuntime::generateInterpreterStub(MacroAssembler& masm) {
12521 AutoCreatedBy acb(masm, "JitRuntime::generateInterpreterStub");
12523 interpreterStubOffset_ = startTrampolineCode(masm);
12525 #ifdef JS_USE_LINK_REGISTER
12526 masm.pushReturnAddress();
12527 #endif
12528 masm.Push(FramePointer);
12529 masm.moveStackPtrTo(FramePointer);
12531 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
12532 Register temp0 = regs.takeAny();
12533 Register temp1 = regs.takeAny();
12534 Register temp2 = regs.takeAny();
12536 masm.loadJSContext(temp0);
12537 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::InterpreterStub);
12538 masm.moveStackPtrTo(temp1);
12540 using Fn = bool (*)(JSContext* cx, InterpreterStubExitFrameLayout* frame);
12541 masm.setupUnalignedABICall(temp2);
12542 masm.passABIArg(temp0);
12543 masm.passABIArg(temp1);
12544 masm.callWithABI<Fn, InvokeFromInterpreterStub>(
12545 ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
12547 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
12549 // Discard exit frame and restore frame pointer.
12550 masm.leaveExitFrame(0);
12551 masm.pop(FramePointer);
12553 // InvokeFromInterpreterStub stores the return value in argv[0], where the
12554 // caller stored |this|. Subtract |sizeof(void*)| for the frame pointer we
12555 // just popped.
12556 masm.loadValue(Address(masm.getStackPointer(),
12557 JitFrameLayout::offsetOfThis() - sizeof(void*)),
12558 JSReturnOperand);
12559 masm.ret();
12562 void JitRuntime::generateDoubleToInt32ValueStub(MacroAssembler& masm) {
12563 AutoCreatedBy acb(masm, "JitRuntime::generateDoubleToInt32ValueStub");
12564 doubleToInt32ValueStubOffset_ = startTrampolineCode(masm);
12566 Label done;
12567 masm.branchTestDouble(Assembler::NotEqual, R0, &done);
12569 masm.unboxDouble(R0, FloatReg0);
12570 masm.convertDoubleToInt32(FloatReg0, R1.scratchReg(), &done,
12571 /* negativeZeroCheck = */ false);
12572 masm.tagValue(JSVAL_TYPE_INT32, R1.scratchReg(), R0);
12574 masm.bind(&done);
12575 masm.abiret();
12578 void CodeGenerator::visitLinearizeString(LLinearizeString* lir) {
12579 Register str = ToRegister(lir->str());
12580 Register output = ToRegister(lir->output());
12582 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12583 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12584 lir, ArgList(str), StoreRegisterTo(output));
12586 masm.branchIfRope(str, ool->entry());
12588 masm.movePtr(str, output);
12589 masm.bind(ool->rejoin());
12592 void CodeGenerator::visitLinearizeForCharAccess(LLinearizeForCharAccess* lir) {
12593 Register str = ToRegister(lir->str());
12594 Register index = ToRegister(lir->index());
12595 Register output = ToRegister(lir->output());
12597 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12598 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12599 lir, ArgList(str), StoreRegisterTo(output));
12601 masm.branchIfNotCanLoadStringChar(str, index, output, ool->entry());
12603 masm.movePtr(str, output);
12604 masm.bind(ool->rejoin());
12607 void CodeGenerator::visitLinearizeForCodePointAccess(
12608 LLinearizeForCodePointAccess* lir) {
12609 Register str = ToRegister(lir->str());
12610 Register index = ToRegister(lir->index());
12611 Register output = ToRegister(lir->output());
12612 Register temp = ToRegister(lir->temp0());
12614 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12615 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12616 lir, ArgList(str), StoreRegisterTo(output));
12618 masm.branchIfNotCanLoadStringCodePoint(str, index, output, temp,
12619 ool->entry());
12621 masm.movePtr(str, output);
12622 masm.bind(ool->rejoin());
12625 void CodeGenerator::visitToRelativeStringIndex(LToRelativeStringIndex* lir) {
12626 Register index = ToRegister(lir->index());
12627 Register length = ToRegister(lir->length());
12628 Register output = ToRegister(lir->output());
12630 masm.move32(Imm32(0), output);
12631 masm.cmp32Move32(Assembler::LessThan, index, Imm32(0), length, output);
12632 masm.add32(index, output);
12635 void CodeGenerator::visitCharCodeAt(LCharCodeAt* lir) {
12636 Register str = ToRegister(lir->str());
12637 Register output = ToRegister(lir->output());
12638 Register temp0 = ToRegister(lir->temp0());
12639 Register temp1 = ToRegister(lir->temp1());
12641 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12643 if (lir->index()->isBogus()) {
12644 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
12645 StoreRegisterTo(output));
12646 masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
12647 masm.bind(ool->rejoin());
12648 } else {
12649 Register index = ToRegister(lir->index());
12651 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
12652 StoreRegisterTo(output));
12653 masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
12654 masm.bind(ool->rejoin());
12658 void CodeGenerator::visitCharCodeAtOrNegative(LCharCodeAtOrNegative* lir) {
12659 Register str = ToRegister(lir->str());
12660 Register output = ToRegister(lir->output());
12661 Register temp0 = ToRegister(lir->temp0());
12662 Register temp1 = ToRegister(lir->temp1());
12664 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12666 // Return -1 for out-of-bounds access.
12667 masm.move32(Imm32(-1), output);
12669 if (lir->index()->isBogus()) {
12670 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
12671 StoreRegisterTo(output));
12673 masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
12674 Imm32(0), ool->rejoin());
12675 masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
12676 masm.bind(ool->rejoin());
12677 } else {
12678 Register index = ToRegister(lir->index());
12680 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
12681 StoreRegisterTo(output));
12683 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
12684 temp0, ool->rejoin());
12685 masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
12686 masm.bind(ool->rejoin());
12690 void CodeGenerator::visitCodePointAt(LCodePointAt* lir) {
12691 Register str = ToRegister(lir->str());
12692 Register index = ToRegister(lir->index());
12693 Register output = ToRegister(lir->output());
12694 Register temp0 = ToRegister(lir->temp0());
12695 Register temp1 = ToRegister(lir->temp1());
12697 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12698 auto* ool = oolCallVM<Fn, jit::CodePointAt>(lir, ArgList(str, index),
12699 StoreRegisterTo(output));
12701 masm.loadStringCodePoint(str, index, output, temp0, temp1, ool->entry());
12702 masm.bind(ool->rejoin());
12705 void CodeGenerator::visitCodePointAtOrNegative(LCodePointAtOrNegative* lir) {
12706 Register str = ToRegister(lir->str());
12707 Register index = ToRegister(lir->index());
12708 Register output = ToRegister(lir->output());
12709 Register temp0 = ToRegister(lir->temp0());
12710 Register temp1 = ToRegister(lir->temp1());
12712 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12713 auto* ool = oolCallVM<Fn, jit::CodePointAt>(lir, ArgList(str, index),
12714 StoreRegisterTo(output));
12716 // Return -1 for out-of-bounds access.
12717 masm.move32(Imm32(-1), output);
12719 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
12720 temp0, ool->rejoin());
12721 masm.loadStringCodePoint(str, index, output, temp0, temp1, ool->entry());
12722 masm.bind(ool->rejoin());
12725 void CodeGenerator::visitNegativeToNaN(LNegativeToNaN* lir) {
12726 Register input = ToRegister(lir->input());
12727 ValueOperand output = ToOutValue(lir);
12729 masm.tagValue(JSVAL_TYPE_INT32, input, output);
12731 Label done;
12732 masm.branchTest32(Assembler::NotSigned, input, input, &done);
12733 masm.moveValue(JS::NaNValue(), output);
12734 masm.bind(&done);
12737 void CodeGenerator::visitNegativeToUndefined(LNegativeToUndefined* lir) {
12738 Register input = ToRegister(lir->input());
12739 ValueOperand output = ToOutValue(lir);
12741 masm.tagValue(JSVAL_TYPE_INT32, input, output);
12743 Label done;
12744 masm.branchTest32(Assembler::NotSigned, input, input, &done);
12745 masm.moveValue(JS::UndefinedValue(), output);
12746 masm.bind(&done);
12749 void CodeGenerator::visitFromCharCode(LFromCharCode* lir) {
12750 Register code = ToRegister(lir->code());
12751 Register output = ToRegister(lir->output());
12753 using Fn = JSLinearString* (*)(JSContext*, int32_t);
12754 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
12755 StoreRegisterTo(output));
12757 // OOL path if code >= UNIT_STATIC_LIMIT.
12758 masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
12759 ool->entry());
12761 masm.bind(ool->rejoin());
12764 void CodeGenerator::visitFromCharCodeEmptyIfNegative(
12765 LFromCharCodeEmptyIfNegative* lir) {
12766 Register code = ToRegister(lir->code());
12767 Register output = ToRegister(lir->output());
12769 using Fn = JSLinearString* (*)(JSContext*, int32_t);
12770 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
12771 StoreRegisterTo(output));
12773 // Return the empty string for negative inputs.
12774 const JSAtomState& names = gen->runtime->names();
12775 masm.movePtr(ImmGCPtr(names.empty_), output);
12776 masm.branchTest32(Assembler::Signed, code, code, ool->rejoin());
12778 // OOL path if code >= UNIT_STATIC_LIMIT.
12779 masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
12780 ool->entry());
12782 masm.bind(ool->rejoin());
12785 void CodeGenerator::visitFromCharCodeUndefinedIfNegative(
12786 LFromCharCodeUndefinedIfNegative* lir) {
12787 Register code = ToRegister(lir->code());
12788 ValueOperand output = ToOutValue(lir);
12789 Register temp = output.scratchReg();
12791 using Fn = JSLinearString* (*)(JSContext*, int32_t);
12792 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
12793 StoreRegisterTo(temp));
12795 // Return |undefined| for negative inputs.
12796 Label done;
12797 masm.moveValue(UndefinedValue(), output);
12798 masm.branchTest32(Assembler::Signed, code, code, &done);
12800 // OOL path if code >= UNIT_STATIC_LIMIT.
12801 masm.lookupStaticString(code, temp, gen->runtime->staticStrings(),
12802 ool->entry());
12804 masm.bind(ool->rejoin());
12805 masm.tagValue(JSVAL_TYPE_STRING, temp, output);
12807 masm.bind(&done);
12810 void CodeGenerator::visitFromCodePoint(LFromCodePoint* lir) {
12811 Register codePoint = ToRegister(lir->codePoint());
12812 Register output = ToRegister(lir->output());
12813 Register temp0 = ToRegister(lir->temp0());
12814 Register temp1 = ToRegister(lir->temp1());
12815 LSnapshot* snapshot = lir->snapshot();
12817 // The OOL path is only taken when we can't allocate the inline string.
12818 using Fn = JSLinearString* (*)(JSContext*, char32_t);
12819 auto* ool = oolCallVM<Fn, js::StringFromCodePoint>(lir, ArgList(codePoint),
12820 StoreRegisterTo(output));
12822 Label isTwoByte;
12823 Label* done = ool->rejoin();
12825 static_assert(
12826 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
12827 "Latin-1 strings can be loaded from static strings");
12830 masm.lookupStaticString(codePoint, output, gen->runtime->staticStrings(),
12831 &isTwoByte);
12832 masm.jump(done);
12834 masm.bind(&isTwoByte);
12836 // Use a bailout if the input is not a valid code point, because
12837 // MFromCodePoint is movable and it'd be observable when a moved
12838 // fromCodePoint throws an exception before its actual call site.
12839 bailoutCmp32(Assembler::Above, codePoint, Imm32(unicode::NonBMPMax),
12840 snapshot);
12842 // Allocate a JSThinInlineString.
12844 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE >= 2,
12845 "JSThinInlineString can hold a supplementary code point");
12847 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
12848 masm.newGCString(output, temp0, gen->initialStringHeap(), ool->entry());
12849 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12852 Label isSupplementary;
12853 masm.branch32(Assembler::AboveOrEqual, codePoint, Imm32(unicode::NonBMPMin),
12854 &isSupplementary);
12856 // Store length.
12857 masm.store32(Imm32(1), Address(output, JSString::offsetOfLength()));
12859 // Load chars pointer in temp0.
12860 masm.loadInlineStringCharsForStore(output, temp0);
12862 masm.store16(codePoint, Address(temp0, 0));
12864 masm.jump(done);
12866 masm.bind(&isSupplementary);
12868 // Store length.
12869 masm.store32(Imm32(2), Address(output, JSString::offsetOfLength()));
12871 // Load chars pointer in temp0.
12872 masm.loadInlineStringCharsForStore(output, temp0);
12874 // Inlined unicode::LeadSurrogate(uint32_t).
12875 masm.move32(codePoint, temp1);
12876 masm.rshift32(Imm32(10), temp1);
12877 masm.add32(Imm32(unicode::LeadSurrogateMin - (unicode::NonBMPMin >> 10)),
12878 temp1);
12880 masm.store16(temp1, Address(temp0, 0));
12882 // Inlined unicode::TrailSurrogate(uint32_t).
12883 masm.move32(codePoint, temp1);
12884 masm.and32(Imm32(0x3FF), temp1);
12885 masm.or32(Imm32(unicode::TrailSurrogateMin), temp1);
12887 masm.store16(temp1, Address(temp0, sizeof(char16_t)));
12891 masm.bind(done);
12894 void CodeGenerator::visitStringIncludes(LStringIncludes* lir) {
12895 pushArg(ToRegister(lir->searchString()));
12896 pushArg(ToRegister(lir->string()));
12898 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12899 callVM<Fn, js::StringIncludes>(lir);
12902 template <typename LIns>
12903 static void CallStringMatch(MacroAssembler& masm, LIns* lir, OutOfLineCode* ool,
12904 LiveRegisterSet volatileRegs) {
12905 Register string = ToRegister(lir->string());
12906 Register output = ToRegister(lir->output());
12907 Register tempLength = ToRegister(lir->temp0());
12908 Register tempChars = ToRegister(lir->temp1());
12909 Register maybeTempPat = ToTempRegisterOrInvalid(lir->temp2());
12911 const JSLinearString* searchString = lir->searchString();
12912 size_t length = searchString->length();
12913 MOZ_ASSERT(length == 1 || length == 2);
12915 // The additional temp register is only needed when searching for two
12916 // pattern characters.
12917 MOZ_ASSERT_IF(length == 2, maybeTempPat != InvalidReg);
12919 if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
12920 masm.move32(Imm32(0), output);
12921 } else {
12922 masm.move32(Imm32(-1), output);
12925 masm.loadStringLength(string, tempLength);
12927 // Can't be a substring when the string is smaller than the search string.
12928 Label done;
12929 masm.branch32(Assembler::Below, tempLength, Imm32(length), ool->rejoin());
12931 bool searchStringIsPureTwoByte = false;
12932 if (searchString->hasTwoByteChars()) {
12933 JS::AutoCheckCannotGC nogc;
12934 searchStringIsPureTwoByte =
12935 !mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc));
12938 // Pure two-byte strings can't occur in a Latin-1 string.
12939 if (searchStringIsPureTwoByte) {
12940 masm.branchLatin1String(string, ool->rejoin());
12943 // Slow path when we need to linearize the string.
12944 masm.branchIfRope(string, ool->entry());
12946 Label restoreVolatile;
12948 auto callMatcher = [&](CharEncoding encoding) {
12949 masm.loadStringChars(string, tempChars, encoding);
12951 LiveGeneralRegisterSet liveRegs;
12952 if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
12953 // Save |tempChars| to compute the result index.
12954 liveRegs.add(tempChars);
12956 #ifdef DEBUG
12957 // Save |tempLength| in debug-mode for assertions.
12958 liveRegs.add(tempLength);
12959 #endif
12961 // Exclude non-volatile registers.
12962 liveRegs.set() = GeneralRegisterSet::Intersect(
12963 liveRegs.set(), GeneralRegisterSet::Volatile());
12965 masm.PushRegsInMask(liveRegs);
12968 if (length == 1) {
12969 char16_t pat = searchString->latin1OrTwoByteChar(0);
12970 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
12971 pat <= JSString::MAX_LATIN1_CHAR);
12973 masm.move32(Imm32(pat), output);
12975 masm.setupAlignedABICall();
12976 masm.passABIArg(tempChars);
12977 masm.passABIArg(output);
12978 masm.passABIArg(tempLength);
12979 if (encoding == CharEncoding::Latin1) {
12980 using Fn = const char* (*)(const char*, char, size_t);
12981 masm.callWithABI<Fn, mozilla::SIMD::memchr8>(
12982 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
12983 } else {
12984 using Fn = const char16_t* (*)(const char16_t*, char16_t, size_t);
12985 masm.callWithABI<Fn, mozilla::SIMD::memchr16>(
12986 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
12988 } else {
12989 char16_t pat0 = searchString->latin1OrTwoByteChar(0);
12990 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
12991 pat0 <= JSString::MAX_LATIN1_CHAR);
12993 char16_t pat1 = searchString->latin1OrTwoByteChar(1);
12994 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
12995 pat1 <= JSString::MAX_LATIN1_CHAR);
12997 masm.move32(Imm32(pat0), output);
12998 masm.move32(Imm32(pat1), maybeTempPat);
13000 masm.setupAlignedABICall();
13001 masm.passABIArg(tempChars);
13002 masm.passABIArg(output);
13003 masm.passABIArg(maybeTempPat);
13004 masm.passABIArg(tempLength);
13005 if (encoding == CharEncoding::Latin1) {
13006 using Fn = const char* (*)(const char*, char, char, size_t);
13007 masm.callWithABI<Fn, mozilla::SIMD::memchr2x8>(
13008 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
13009 } else {
13010 using Fn =
13011 const char16_t* (*)(const char16_t*, char16_t, char16_t, size_t);
13012 masm.callWithABI<Fn, mozilla::SIMD::memchr2x16>(
13013 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
13017 masm.storeCallPointerResult(output);
13019 // Convert to string index for `indexOf`.
13020 if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
13021 // Restore |tempChars|. (And in debug mode |tempLength|.)
13022 masm.PopRegsInMask(liveRegs);
13024 Label found;
13025 masm.branchPtr(Assembler::NotEqual, output, ImmPtr(nullptr), &found);
13027 masm.move32(Imm32(-1), output);
13028 masm.jump(&restoreVolatile);
13030 masm.bind(&found);
13032 #ifdef DEBUG
13033 // Check lower bound.
13034 Label lower;
13035 masm.branchPtr(Assembler::AboveOrEqual, output, tempChars, &lower);
13036 masm.assumeUnreachable("result pointer below string chars");
13037 masm.bind(&lower);
13039 // Compute the end position of the characters.
13040 auto scale = encoding == CharEncoding::Latin1 ? TimesOne : TimesTwo;
13041 masm.computeEffectiveAddress(BaseIndex(tempChars, tempLength, scale),
13042 tempLength);
13044 // Check upper bound.
13045 Label upper;
13046 masm.branchPtr(Assembler::Below, output, tempLength, &upper);
13047 masm.assumeUnreachable("result pointer above string chars");
13048 masm.bind(&upper);
13049 #endif
13051 masm.subPtr(tempChars, output);
13053 if (encoding == CharEncoding::TwoByte) {
13054 masm.rshiftPtr(Imm32(1), output);
13059 volatileRegs.takeUnchecked(output);
13060 volatileRegs.takeUnchecked(tempLength);
13061 volatileRegs.takeUnchecked(tempChars);
13062 if (maybeTempPat != InvalidReg) {
13063 volatileRegs.takeUnchecked(maybeTempPat);
13065 masm.PushRegsInMask(volatileRegs);
13067 // Handle the case when the input is a Latin-1 string.
13068 if (!searchStringIsPureTwoByte) {
13069 Label twoByte;
13070 masm.branchTwoByteString(string, &twoByte);
13072 callMatcher(CharEncoding::Latin1);
13073 masm.jump(&restoreVolatile);
13075 masm.bind(&twoByte);
13078 // Handle the case when the input is a two-byte string.
13079 callMatcher(CharEncoding::TwoByte);
13081 masm.bind(&restoreVolatile);
13082 masm.PopRegsInMask(volatileRegs);
13084 // Convert to bool for `includes`.
13085 if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
13086 masm.cmpPtrSet(Assembler::NotEqual, output, ImmPtr(nullptr), output);
13089 masm.bind(ool->rejoin());
13092 void CodeGenerator::visitStringIncludesSIMD(LStringIncludesSIMD* lir) {
13093 Register string = ToRegister(lir->string());
13094 Register output = ToRegister(lir->output());
13095 const JSLinearString* searchString = lir->searchString();
13097 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13098 auto* ool = oolCallVM<Fn, js::StringIncludes>(
13099 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
13101 CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
13104 void CodeGenerator::visitStringIndexOf(LStringIndexOf* lir) {
13105 pushArg(ToRegister(lir->searchString()));
13106 pushArg(ToRegister(lir->string()));
13108 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
13109 callVM<Fn, js::StringIndexOf>(lir);
13112 void CodeGenerator::visitStringIndexOfSIMD(LStringIndexOfSIMD* lir) {
13113 Register string = ToRegister(lir->string());
13114 Register output = ToRegister(lir->output());
13115 const JSLinearString* searchString = lir->searchString();
13117 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
13118 auto* ool = oolCallVM<Fn, js::StringIndexOf>(
13119 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
13121 CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
13124 void CodeGenerator::visitStringLastIndexOf(LStringLastIndexOf* lir) {
13125 pushArg(ToRegister(lir->searchString()));
13126 pushArg(ToRegister(lir->string()));
13128 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
13129 callVM<Fn, js::StringLastIndexOf>(lir);
13132 void CodeGenerator::visitStringStartsWith(LStringStartsWith* lir) {
13133 pushArg(ToRegister(lir->searchString()));
13134 pushArg(ToRegister(lir->string()));
13136 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13137 callVM<Fn, js::StringStartsWith>(lir);
13140 void CodeGenerator::visitStringStartsWithInline(LStringStartsWithInline* lir) {
13141 Register string = ToRegister(lir->string());
13142 Register output = ToRegister(lir->output());
13143 Register temp = ToRegister(lir->temp0());
13145 const JSLinearString* searchString = lir->searchString();
13147 size_t length = searchString->length();
13148 MOZ_ASSERT(length > 0);
13150 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13151 auto* ool = oolCallVM<Fn, js::StringStartsWith>(
13152 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
13154 masm.move32(Imm32(0), output);
13156 // Can't be a prefix when the string is smaller than the search string.
13157 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
13158 Imm32(length), ool->rejoin());
13160 // Unwind ropes at the start if possible.
13161 Label compare;
13162 masm.movePtr(string, temp);
13163 masm.branchIfNotRope(temp, &compare);
13165 Label unwindRope;
13166 masm.bind(&unwindRope);
13167 masm.loadRopeLeftChild(temp, output);
13168 masm.movePtr(output, temp);
13170 // If the left child is smaller than the search string, jump into the VM to
13171 // linearize the string.
13172 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
13173 Imm32(length), ool->entry());
13175 // Otherwise keep unwinding ropes.
13176 masm.branchIfRope(temp, &unwindRope);
13178 masm.bind(&compare);
13180 // If operands point to the same instance, it's trivially a prefix.
13181 Label notPointerEqual;
13182 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
13183 &notPointerEqual);
13184 masm.move32(Imm32(1), output);
13185 masm.jump(ool->rejoin());
13186 masm.bind(&notPointerEqual);
13188 if (searchString->hasTwoByteChars()) {
13189 // Pure two-byte strings can't be a prefix of Latin-1 strings.
13190 JS::AutoCheckCannotGC nogc;
13191 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
13192 Label compareChars;
13193 masm.branchTwoByteString(temp, &compareChars);
13194 masm.move32(Imm32(0), output);
13195 masm.jump(ool->rejoin());
13196 masm.bind(&compareChars);
13200 // Load the input string's characters.
13201 Register stringChars = output;
13202 masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
13204 // Start comparing character by character.
13205 masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
13207 masm.bind(ool->rejoin());
13210 void CodeGenerator::visitStringEndsWith(LStringEndsWith* lir) {
13211 pushArg(ToRegister(lir->searchString()));
13212 pushArg(ToRegister(lir->string()));
13214 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13215 callVM<Fn, js::StringEndsWith>(lir);
13218 void CodeGenerator::visitStringEndsWithInline(LStringEndsWithInline* lir) {
13219 Register string = ToRegister(lir->string());
13220 Register output = ToRegister(lir->output());
13221 Register temp = ToRegister(lir->temp0());
13223 const JSLinearString* searchString = lir->searchString();
13225 size_t length = searchString->length();
13226 MOZ_ASSERT(length > 0);
13228 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13229 auto* ool = oolCallVM<Fn, js::StringEndsWith>(
13230 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
13232 masm.move32(Imm32(0), output);
13234 // Can't be a suffix when the string is smaller than the search string.
13235 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
13236 Imm32(length), ool->rejoin());
13238 // Unwind ropes at the end if possible.
13239 Label compare;
13240 masm.movePtr(string, temp);
13241 masm.branchIfNotRope(temp, &compare);
13243 Label unwindRope;
13244 masm.bind(&unwindRope);
13245 masm.loadRopeRightChild(temp, output);
13246 masm.movePtr(output, temp);
13248 // If the right child is smaller than the search string, jump into the VM to
13249 // linearize the string.
13250 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
13251 Imm32(length), ool->entry());
13253 // Otherwise keep unwinding ropes.
13254 masm.branchIfRope(temp, &unwindRope);
13256 masm.bind(&compare);
13258 // If operands point to the same instance, it's trivially a suffix.
13259 Label notPointerEqual;
13260 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
13261 &notPointerEqual);
13262 masm.move32(Imm32(1), output);
13263 masm.jump(ool->rejoin());
13264 masm.bind(&notPointerEqual);
13266 CharEncoding encoding = searchString->hasLatin1Chars()
13267 ? CharEncoding::Latin1
13268 : CharEncoding::TwoByte;
13269 if (encoding == CharEncoding::TwoByte) {
13270 // Pure two-byte strings can't be a suffix of Latin-1 strings.
13271 JS::AutoCheckCannotGC nogc;
13272 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
13273 Label compareChars;
13274 masm.branchTwoByteString(temp, &compareChars);
13275 masm.move32(Imm32(0), output);
13276 masm.jump(ool->rejoin());
13277 masm.bind(&compareChars);
13281 // Load the input string's characters.
13282 Register stringChars = output;
13283 masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
13285 // Move string-char pointer to the suffix string.
13286 masm.loadStringLength(temp, temp);
13287 masm.sub32(Imm32(length), temp);
13288 masm.addToCharPtr(stringChars, temp, encoding);
13290 // Start comparing character by character.
13291 masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
13293 masm.bind(ool->rejoin());
13296 void CodeGenerator::visitStringToLowerCase(LStringToLowerCase* lir) {
13297 Register string = ToRegister(lir->string());
13298 Register output = ToRegister(lir->output());
13299 Register temp0 = ToRegister(lir->temp0());
13300 Register temp1 = ToRegister(lir->temp1());
13301 Register temp2 = ToRegister(lir->temp2());
13303 // On x86 there are not enough registers. In that case reuse the string
13304 // register as a temporary.
13305 Register temp3 =
13306 lir->temp3()->isBogusTemp() ? string : ToRegister(lir->temp3());
13307 Register temp4 = ToRegister(lir->temp4());
13309 using Fn = JSString* (*)(JSContext*, HandleString);
13310 OutOfLineCode* ool = oolCallVM<Fn, js::StringToLowerCase>(
13311 lir, ArgList(string), StoreRegisterTo(output));
13313 // Take the slow path if the string isn't a linear Latin-1 string.
13314 Imm32 linearLatin1Bits(JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT);
13315 Register flags = temp0;
13316 masm.load32(Address(string, JSString::offsetOfFlags()), flags);
13317 masm.and32(linearLatin1Bits, flags);
13318 masm.branch32(Assembler::NotEqual, flags, linearLatin1Bits, ool->entry());
13320 Register length = temp0;
13321 masm.loadStringLength(string, length);
13323 // Return the input if it's the empty string.
13324 Label notEmptyString;
13325 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmptyString);
13327 masm.movePtr(string, output);
13328 masm.jump(ool->rejoin());
13330 masm.bind(&notEmptyString);
13332 Register inputChars = temp1;
13333 masm.loadStringChars(string, inputChars, CharEncoding::Latin1);
13335 Register toLowerCaseTable = temp2;
13336 masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), toLowerCaseTable);
13338 // Single element strings can be directly retrieved from static strings cache.
13339 Label notSingleElementString;
13340 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleElementString);
13342 Register current = temp4;
13344 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
13345 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
13346 current);
13347 masm.lookupStaticString(current, output, gen->runtime->staticStrings());
13349 masm.jump(ool->rejoin());
13351 masm.bind(&notSingleElementString);
13353 // Use the OOL-path when the string is too long. This prevents scanning long
13354 // strings which have upper case characters only near the end a second time in
13355 // the VM.
13356 constexpr int32_t MaxInlineLength = 64;
13357 masm.branch32(Assembler::Above, length, Imm32(MaxInlineLength), ool->entry());
13360 // Check if there are any characters which need to be converted.
13362 // This extra loop gives a small performance improvement for strings which
13363 // are already lower cased and lets us avoid calling into the runtime for
13364 // non-inline, all lower case strings. But more importantly it avoids
13365 // repeated inline allocation failures:
13366 // |AllocateThinOrFatInlineString| below takes the OOL-path and calls the
13367 // |js::StringToLowerCase| runtime function when the result string can't be
13368 // allocated inline. And |js::StringToLowerCase| directly returns the input
13369 // string when no characters need to be converted. That means it won't
13370 // trigger GC to clear up the free nursery space, so the next toLowerCase()
13371 // call will again fail to inline allocate the result string.
13372 Label hasUpper;
13374 Register checkInputChars = output;
13375 masm.movePtr(inputChars, checkInputChars);
13377 Register current = temp4;
13379 Label start;
13380 masm.bind(&start);
13381 masm.loadChar(Address(checkInputChars, 0), current, CharEncoding::Latin1);
13382 masm.branch8(Assembler::NotEqual,
13383 BaseIndex(toLowerCaseTable, current, TimesOne), current,
13384 &hasUpper);
13385 masm.addPtr(Imm32(sizeof(Latin1Char)), checkInputChars);
13386 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
13388 // Input is already in lower case.
13389 masm.movePtr(string, output);
13390 masm.jump(ool->rejoin());
13392 masm.bind(&hasUpper);
13394 // |length| was clobbered above, reload.
13395 masm.loadStringLength(string, length);
13397 // Call into the runtime when we can't create an inline string.
13398 masm.branch32(Assembler::Above, length,
13399 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), ool->entry());
13401 AllocateThinOrFatInlineString(masm, output, length, temp4,
13402 initialStringHeap(), ool->entry(),
13403 CharEncoding::Latin1);
13405 if (temp3 == string) {
13406 masm.push(string);
13409 Register outputChars = temp3;
13410 masm.loadInlineStringCharsForStore(output, outputChars);
13413 Register current = temp4;
13415 Label start;
13416 masm.bind(&start);
13417 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
13418 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
13419 current);
13420 masm.storeChar(current, Address(outputChars, 0), CharEncoding::Latin1);
13421 masm.addPtr(Imm32(sizeof(Latin1Char)), inputChars);
13422 masm.addPtr(Imm32(sizeof(Latin1Char)), outputChars);
13423 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
13426 if (temp3 == string) {
13427 masm.pop(string);
13431 masm.bind(ool->rejoin());
13434 void CodeGenerator::visitStringToUpperCase(LStringToUpperCase* lir) {
13435 pushArg(ToRegister(lir->string()));
13437 using Fn = JSString* (*)(JSContext*, HandleString);
13438 callVM<Fn, js::StringToUpperCase>(lir);
13441 void CodeGenerator::visitCharCodeToLowerCase(LCharCodeToLowerCase* lir) {
13442 Register code = ToRegister(lir->code());
13443 Register output = ToRegister(lir->output());
13444 Register temp = ToRegister(lir->temp0());
13446 using Fn = JSString* (*)(JSContext*, int32_t);
13447 auto* ool = oolCallVM<Fn, jit::CharCodeToLowerCase>(lir, ArgList(code),
13448 StoreRegisterTo(output));
13450 constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
13452 // OOL path if code >= NonLatin1Min.
13453 masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
13455 // Convert to lower case.
13456 masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), temp);
13457 masm.load8ZeroExtend(BaseIndex(temp, code, TimesOne), temp);
13459 // Load static string for lower case character.
13460 masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
13462 masm.bind(ool->rejoin());
13465 void CodeGenerator::visitCharCodeToUpperCase(LCharCodeToUpperCase* lir) {
13466 Register code = ToRegister(lir->code());
13467 Register output = ToRegister(lir->output());
13468 Register temp = ToRegister(lir->temp0());
13470 using Fn = JSString* (*)(JSContext*, int32_t);
13471 auto* ool = oolCallVM<Fn, jit::CharCodeToUpperCase>(lir, ArgList(code),
13472 StoreRegisterTo(output));
13474 constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
13476 // OOL path if code >= NonLatin1Min.
13477 masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
13479 // Most one element Latin-1 strings can be directly retrieved from the
13480 // static strings cache, except the following three characters:
13482 // 1. ToUpper(U+00B5) = 0+039C
13483 // 2. ToUpper(U+00FF) = 0+0178
13484 // 3. ToUpper(U+00DF) = 0+0053 0+0053
13485 masm.branch32(Assembler::Equal, code, Imm32(unicode::MICRO_SIGN),
13486 ool->entry());
13487 masm.branch32(Assembler::Equal, code,
13488 Imm32(unicode::LATIN_SMALL_LETTER_Y_WITH_DIAERESIS),
13489 ool->entry());
13490 masm.branch32(Assembler::Equal, code,
13491 Imm32(unicode::LATIN_SMALL_LETTER_SHARP_S), ool->entry());
13493 // Inline unicode::ToUpperCase (without the special case for ASCII characters)
13495 constexpr size_t shift = unicode::CharInfoShift;
13497 // code >> shift
13498 masm.move32(code, temp);
13499 masm.rshift32(Imm32(shift), temp);
13501 // index = index1[code >> shift];
13502 masm.movePtr(ImmPtr(unicode::index1), output);
13503 masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
13505 // (code & ((1 << shift) - 1)
13506 masm.move32(code, output);
13507 masm.and32(Imm32((1 << shift) - 1), output);
13509 // (index << shift) + (code & ((1 << shift) - 1))
13510 masm.lshift32(Imm32(shift), temp);
13511 masm.add32(output, temp);
13513 // index = index2[(index << shift) + (code & ((1 << shift) - 1))]
13514 masm.movePtr(ImmPtr(unicode::index2), output);
13515 masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
13517 // Compute |index * 6| through |(index * 3) * TimesTwo|.
13518 static_assert(sizeof(unicode::CharacterInfo) == 6);
13519 masm.mulBy3(temp, temp);
13521 // upperCase = js_charinfo[index].upperCase
13522 masm.movePtr(ImmPtr(unicode::js_charinfo), output);
13523 masm.load16ZeroExtend(BaseIndex(output, temp, TimesTwo,
13524 offsetof(unicode::CharacterInfo, upperCase)),
13525 temp);
13527 // uint16_t(ch) + upperCase
13528 masm.add32(code, temp);
13530 // Clear any high bits added when performing the unsigned 16-bit addition
13531 // through a signed 32-bit addition.
13532 masm.move8ZeroExtend(temp, temp);
13534 // Load static string for upper case character.
13535 masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
13537 masm.bind(ool->rejoin());
13540 void CodeGenerator::visitStringTrimStartIndex(LStringTrimStartIndex* lir) {
13541 Register string = ToRegister(lir->string());
13542 Register output = ToRegister(lir->output());
13544 auto volatileRegs = liveVolatileRegs(lir);
13545 volatileRegs.takeUnchecked(output);
13547 masm.PushRegsInMask(volatileRegs);
13549 using Fn = int32_t (*)(const JSString*);
13550 masm.setupAlignedABICall();
13551 masm.passABIArg(string);
13552 masm.callWithABI<Fn, jit::StringTrimStartIndex>();
13553 masm.storeCallInt32Result(output);
13555 masm.PopRegsInMask(volatileRegs);
13558 void CodeGenerator::visitStringTrimEndIndex(LStringTrimEndIndex* lir) {
13559 Register string = ToRegister(lir->string());
13560 Register start = ToRegister(lir->start());
13561 Register output = ToRegister(lir->output());
13563 auto volatileRegs = liveVolatileRegs(lir);
13564 volatileRegs.takeUnchecked(output);
13566 masm.PushRegsInMask(volatileRegs);
13568 using Fn = int32_t (*)(const JSString*, int32_t);
13569 masm.setupAlignedABICall();
13570 masm.passABIArg(string);
13571 masm.passABIArg(start);
13572 masm.callWithABI<Fn, jit::StringTrimEndIndex>();
13573 masm.storeCallInt32Result(output);
13575 masm.PopRegsInMask(volatileRegs);
13578 void CodeGenerator::visitStringSplit(LStringSplit* lir) {
13579 pushArg(Imm32(INT32_MAX));
13580 pushArg(ToRegister(lir->separator()));
13581 pushArg(ToRegister(lir->string()));
13583 using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
13584 callVM<Fn, js::StringSplitString>(lir);
13587 void CodeGenerator::visitInitializedLength(LInitializedLength* lir) {
13588 Address initLength(ToRegister(lir->elements()),
13589 ObjectElements::offsetOfInitializedLength());
13590 masm.load32(initLength, ToRegister(lir->output()));
13593 void CodeGenerator::visitSetInitializedLength(LSetInitializedLength* lir) {
13594 Address initLength(ToRegister(lir->elements()),
13595 ObjectElements::offsetOfInitializedLength());
13596 SetLengthFromIndex(masm, lir->index(), initLength);
13599 void CodeGenerator::visitNotBI(LNotBI* lir) {
13600 Register input = ToRegister(lir->input());
13601 Register output = ToRegister(lir->output());
13603 masm.cmp32Set(Assembler::Equal, Address(input, BigInt::offsetOfLength()),
13604 Imm32(0), output);
13607 void CodeGenerator::visitNotO(LNotO* lir) {
13608 Register objreg = ToRegister(lir->input());
13609 Register output = ToRegister(lir->output());
13611 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
13612 if (intact) {
13613 // Bug 1874905: It would be fantastic if this could be optimized out.
13614 assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
13615 masm.move32(Imm32(0), output);
13616 } else {
13617 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
13618 addOutOfLineCode(ool, lir->mir());
13620 Label* ifEmulatesUndefined = ool->label1();
13621 Label* ifDoesntEmulateUndefined = ool->label2();
13623 branchTestObjectEmulatesUndefined(objreg, ifEmulatesUndefined,
13624 ifDoesntEmulateUndefined, output, ool);
13625 // fall through
13627 Label join;
13629 masm.move32(Imm32(0), output);
13630 masm.jump(&join);
13632 masm.bind(ifEmulatesUndefined);
13633 masm.move32(Imm32(1), output);
13635 masm.bind(&join);
13639 void CodeGenerator::visitNotV(LNotV* lir) {
13640 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
13641 addOutOfLineCode(ool, lir->mir());
13643 Label* ifTruthy = ool->label1();
13644 Label* ifFalsy = ool->label2();
13646 ValueOperand input = ToValue(lir, LNotV::InputIndex);
13647 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
13648 FloatRegister floatTemp = ToFloatRegister(lir->temp0());
13649 Register output = ToRegister(lir->output());
13650 const TypeDataList& observedTypes = lir->mir()->observedTypes();
13652 testValueTruthy(input, tempToUnbox, output, floatTemp, observedTypes,
13653 ifTruthy, ifFalsy, ool);
13655 Label join;
13657 // Note that the testValueTruthy call above may choose to fall through
13658 // to ifTruthy instead of branching there.
13659 masm.bind(ifTruthy);
13660 masm.move32(Imm32(0), output);
13661 masm.jump(&join);
13663 masm.bind(ifFalsy);
13664 masm.move32(Imm32(1), output);
13666 // both branches meet here.
13667 masm.bind(&join);
13670 void CodeGenerator::visitBoundsCheck(LBoundsCheck* lir) {
13671 const LAllocation* index = lir->index();
13672 const LAllocation* length = lir->length();
13673 LSnapshot* snapshot = lir->snapshot();
13675 MIRType type = lir->mir()->type();
13677 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
13678 if (type == MIRType::Int32) {
13679 bailoutCmp32(cond, lhs, rhs, snapshot);
13680 } else {
13681 MOZ_ASSERT(type == MIRType::IntPtr);
13682 bailoutCmpPtr(cond, lhs, rhs, snapshot);
13686 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
13687 int32_t rhs) {
13688 if (type == MIRType::Int32) {
13689 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
13690 } else {
13691 MOZ_ASSERT(type == MIRType::IntPtr);
13692 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
13696 if (index->isConstant()) {
13697 // Use uint32 so that the comparison is unsigned.
13698 uint32_t idx = ToInt32(index);
13699 if (length->isConstant()) {
13700 uint32_t len = ToInt32(lir->length());
13701 if (idx < len) {
13702 return;
13704 bailout(snapshot);
13705 return;
13708 if (length->isRegister()) {
13709 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), idx);
13710 } else {
13711 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), idx);
13713 return;
13716 Register indexReg = ToRegister(index);
13717 if (length->isConstant()) {
13718 bailoutCmpConstant(Assembler::AboveOrEqual, indexReg, ToInt32(length));
13719 } else if (length->isRegister()) {
13720 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), indexReg);
13721 } else {
13722 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), indexReg);
13726 void CodeGenerator::visitBoundsCheckRange(LBoundsCheckRange* lir) {
13727 int32_t min = lir->mir()->minimum();
13728 int32_t max = lir->mir()->maximum();
13729 MOZ_ASSERT(max >= min);
13731 LSnapshot* snapshot = lir->snapshot();
13732 MIRType type = lir->mir()->type();
13734 const LAllocation* length = lir->length();
13735 Register temp = ToRegister(lir->getTemp(0));
13737 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
13738 if (type == MIRType::Int32) {
13739 bailoutCmp32(cond, lhs, rhs, snapshot);
13740 } else {
13741 MOZ_ASSERT(type == MIRType::IntPtr);
13742 bailoutCmpPtr(cond, lhs, rhs, snapshot);
13746 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
13747 int32_t rhs) {
13748 if (type == MIRType::Int32) {
13749 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
13750 } else {
13751 MOZ_ASSERT(type == MIRType::IntPtr);
13752 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
13756 if (lir->index()->isConstant()) {
13757 int32_t nmin, nmax;
13758 int32_t index = ToInt32(lir->index());
13759 if (SafeAdd(index, min, &nmin) && SafeAdd(index, max, &nmax) && nmin >= 0) {
13760 if (length->isRegister()) {
13761 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), nmax);
13762 } else {
13763 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), nmax);
13765 return;
13767 masm.mov(ImmWord(index), temp);
13768 } else {
13769 masm.mov(ToRegister(lir->index()), temp);
13772 // If the minimum and maximum differ then do an underflow check first.
13773 // If the two are the same then doing an unsigned comparison on the
13774 // length will also catch a negative index.
13775 if (min != max) {
13776 if (min != 0) {
13777 Label bail;
13778 if (type == MIRType::Int32) {
13779 masm.branchAdd32(Assembler::Overflow, Imm32(min), temp, &bail);
13780 } else {
13781 masm.branchAddPtr(Assembler::Overflow, Imm32(min), temp, &bail);
13783 bailoutFrom(&bail, snapshot);
13786 bailoutCmpConstant(Assembler::LessThan, temp, 0);
13788 if (min != 0) {
13789 int32_t diff;
13790 if (SafeSub(max, min, &diff)) {
13791 max = diff;
13792 } else {
13793 if (type == MIRType::Int32) {
13794 masm.sub32(Imm32(min), temp);
13795 } else {
13796 masm.subPtr(Imm32(min), temp);
13802 // Compute the maximum possible index. No overflow check is needed when
13803 // max > 0. We can only wraparound to a negative number, which will test as
13804 // larger than all nonnegative numbers in the unsigned comparison, and the
13805 // length is required to be nonnegative (else testing a negative length
13806 // would succeed on any nonnegative index).
13807 if (max != 0) {
13808 if (max < 0) {
13809 Label bail;
13810 if (type == MIRType::Int32) {
13811 masm.branchAdd32(Assembler::Overflow, Imm32(max), temp, &bail);
13812 } else {
13813 masm.branchAddPtr(Assembler::Overflow, Imm32(max), temp, &bail);
13815 bailoutFrom(&bail, snapshot);
13816 } else {
13817 if (type == MIRType::Int32) {
13818 masm.add32(Imm32(max), temp);
13819 } else {
13820 masm.addPtr(Imm32(max), temp);
13825 if (length->isRegister()) {
13826 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), temp);
13827 } else {
13828 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), temp);
13832 void CodeGenerator::visitBoundsCheckLower(LBoundsCheckLower* lir) {
13833 int32_t min = lir->mir()->minimum();
13834 bailoutCmp32(Assembler::LessThan, ToRegister(lir->index()), Imm32(min),
13835 lir->snapshot());
13838 void CodeGenerator::visitSpectreMaskIndex(LSpectreMaskIndex* lir) {
13839 MOZ_ASSERT(JitOptions.spectreIndexMasking);
13841 const LAllocation* length = lir->length();
13842 Register index = ToRegister(lir->index());
13843 Register output = ToRegister(lir->output());
13845 if (lir->mir()->type() == MIRType::Int32) {
13846 if (length->isRegister()) {
13847 masm.spectreMaskIndex32(index, ToRegister(length), output);
13848 } else {
13849 masm.spectreMaskIndex32(index, ToAddress(length), output);
13851 } else {
13852 MOZ_ASSERT(lir->mir()->type() == MIRType::IntPtr);
13853 if (length->isRegister()) {
13854 masm.spectreMaskIndexPtr(index, ToRegister(length), output);
13855 } else {
13856 masm.spectreMaskIndexPtr(index, ToAddress(length), output);
13861 class OutOfLineStoreElementHole : public OutOfLineCodeBase<CodeGenerator> {
13862 LInstruction* ins_;
13864 public:
13865 explicit OutOfLineStoreElementHole(LInstruction* ins) : ins_(ins) {
13866 MOZ_ASSERT(ins->isStoreElementHoleV() || ins->isStoreElementHoleT());
13869 void accept(CodeGenerator* codegen) override {
13870 codegen->visitOutOfLineStoreElementHole(this);
13873 MStoreElementHole* mir() const {
13874 return ins_->isStoreElementHoleV() ? ins_->toStoreElementHoleV()->mir()
13875 : ins_->toStoreElementHoleT()->mir();
13877 LInstruction* ins() const { return ins_; }
13880 void CodeGenerator::emitStoreHoleCheck(Register elements,
13881 const LAllocation* index,
13882 LSnapshot* snapshot) {
13883 Label bail;
13884 if (index->isConstant()) {
13885 Address dest(elements, ToInt32(index) * sizeof(js::Value));
13886 masm.branchTestMagic(Assembler::Equal, dest, &bail);
13887 } else {
13888 BaseObjectElementIndex dest(elements, ToRegister(index));
13889 masm.branchTestMagic(Assembler::Equal, dest, &bail);
13891 bailoutFrom(&bail, snapshot);
13894 void CodeGenerator::emitStoreElementTyped(const LAllocation* value,
13895 MIRType valueType, Register elements,
13896 const LAllocation* index) {
13897 MOZ_ASSERT(valueType != MIRType::MagicHole);
13898 ConstantOrRegister v = ToConstantOrRegister(value, valueType);
13899 if (index->isConstant()) {
13900 Address dest(elements, ToInt32(index) * sizeof(js::Value));
13901 masm.storeUnboxedValue(v, valueType, dest);
13902 } else {
13903 BaseObjectElementIndex dest(elements, ToRegister(index));
13904 masm.storeUnboxedValue(v, valueType, dest);
13908 void CodeGenerator::visitStoreElementT(LStoreElementT* store) {
13909 Register elements = ToRegister(store->elements());
13910 const LAllocation* index = store->index();
13912 if (store->mir()->needsBarrier()) {
13913 emitPreBarrier(elements, index);
13916 if (store->mir()->needsHoleCheck()) {
13917 emitStoreHoleCheck(elements, index, store->snapshot());
13920 emitStoreElementTyped(store->value(), store->mir()->value()->type(), elements,
13921 index);
13924 void CodeGenerator::visitStoreElementV(LStoreElementV* lir) {
13925 const ValueOperand value = ToValue(lir, LStoreElementV::Value);
13926 Register elements = ToRegister(lir->elements());
13927 const LAllocation* index = lir->index();
13929 if (lir->mir()->needsBarrier()) {
13930 emitPreBarrier(elements, index);
13933 if (lir->mir()->needsHoleCheck()) {
13934 emitStoreHoleCheck(elements, index, lir->snapshot());
13937 if (lir->index()->isConstant()) {
13938 Address dest(elements, ToInt32(lir->index()) * sizeof(js::Value));
13939 masm.storeValue(value, dest);
13940 } else {
13941 BaseObjectElementIndex dest(elements, ToRegister(lir->index()));
13942 masm.storeValue(value, dest);
13946 void CodeGenerator::visitStoreHoleValueElement(LStoreHoleValueElement* lir) {
13947 Register elements = ToRegister(lir->elements());
13948 Register index = ToRegister(lir->index());
13950 Address elementsFlags(elements, ObjectElements::offsetOfFlags());
13951 masm.or32(Imm32(ObjectElements::NON_PACKED), elementsFlags);
13953 BaseObjectElementIndex element(elements, index);
13954 masm.storeValue(MagicValue(JS_ELEMENTS_HOLE), element);
13957 void CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT* lir) {
13958 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
13959 addOutOfLineCode(ool, lir->mir());
13961 Register obj = ToRegister(lir->object());
13962 Register elements = ToRegister(lir->elements());
13963 Register index = ToRegister(lir->index());
13964 Register temp = ToRegister(lir->temp0());
13966 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
13967 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
13969 emitPreBarrier(elements, lir->index());
13971 masm.bind(ool->rejoin());
13972 emitStoreElementTyped(lir->value(), lir->mir()->value()->type(), elements,
13973 lir->index());
13975 if (ValueNeedsPostBarrier(lir->mir()->value())) {
13976 LiveRegisterSet regs = liveVolatileRegs(lir);
13977 ConstantOrRegister val =
13978 ToConstantOrRegister(lir->value(), lir->mir()->value()->type());
13979 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp, val);
13983 void CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV* lir) {
13984 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
13985 addOutOfLineCode(ool, lir->mir());
13987 Register obj = ToRegister(lir->object());
13988 Register elements = ToRegister(lir->elements());
13989 Register index = ToRegister(lir->index());
13990 const ValueOperand value = ToValue(lir, LStoreElementHoleV::ValueIndex);
13991 Register temp = ToRegister(lir->temp0());
13993 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
13994 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
13996 emitPreBarrier(elements, lir->index());
13998 masm.bind(ool->rejoin());
13999 masm.storeValue(value, BaseObjectElementIndex(elements, index));
14001 if (ValueNeedsPostBarrier(lir->mir()->value())) {
14002 LiveRegisterSet regs = liveVolatileRegs(lir);
14003 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp,
14004 ConstantOrRegister(value));
14008 void CodeGenerator::visitOutOfLineStoreElementHole(
14009 OutOfLineStoreElementHole* ool) {
14010 Register object, elements, index;
14011 LInstruction* ins = ool->ins();
14012 mozilla::Maybe<ConstantOrRegister> value;
14013 Register temp;
14015 if (ins->isStoreElementHoleV()) {
14016 LStoreElementHoleV* store = ins->toStoreElementHoleV();
14017 object = ToRegister(store->object());
14018 elements = ToRegister(store->elements());
14019 index = ToRegister(store->index());
14020 value.emplace(
14021 TypedOrValueRegister(ToValue(store, LStoreElementHoleV::ValueIndex)));
14022 temp = ToRegister(store->temp0());
14023 } else {
14024 LStoreElementHoleT* store = ins->toStoreElementHoleT();
14025 object = ToRegister(store->object());
14026 elements = ToRegister(store->elements());
14027 index = ToRegister(store->index());
14028 if (store->value()->isConstant()) {
14029 value.emplace(
14030 ConstantOrRegister(store->value()->toConstant()->toJSValue()));
14031 } else {
14032 MIRType valueType = store->mir()->value()->type();
14033 value.emplace(
14034 TypedOrValueRegister(valueType, ToAnyRegister(store->value())));
14036 temp = ToRegister(store->temp0());
14039 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
14041 // We're out-of-bounds. We only handle the index == initlength case.
14042 // If index > initializedLength, bail out. Note that this relies on the
14043 // condition flags sticking from the incoming branch.
14044 // Also note: this branch does not need Spectre mitigations, doing that for
14045 // the capacity check below is sufficient.
14046 Label allocElement, addNewElement;
14047 #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
14048 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
14049 // Had to reimplement for MIPS because there are no flags.
14050 bailoutCmp32(Assembler::NotEqual, initLength, index, ins->snapshot());
14051 #else
14052 bailoutIf(Assembler::NotEqual, ins->snapshot());
14053 #endif
14055 // If index < capacity, we can add a dense element inline. If not, we need
14056 // to allocate more elements first.
14057 masm.spectreBoundsCheck32(
14058 index, Address(elements, ObjectElements::offsetOfCapacity()), temp,
14059 &allocElement);
14060 masm.jump(&addNewElement);
14062 masm.bind(&allocElement);
14064 // Save all live volatile registers, except |temp|.
14065 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
14066 liveRegs.takeUnchecked(temp);
14067 masm.PushRegsInMask(liveRegs);
14069 masm.setupAlignedABICall();
14070 masm.loadJSContext(temp);
14071 masm.passABIArg(temp);
14072 masm.passABIArg(object);
14074 using Fn = bool (*)(JSContext*, NativeObject*);
14075 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
14076 masm.storeCallPointerResult(temp);
14078 masm.PopRegsInMask(liveRegs);
14079 bailoutIfFalseBool(temp, ins->snapshot());
14081 // Load the reallocated elements pointer.
14082 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), elements);
14084 masm.bind(&addNewElement);
14086 // Increment initLength
14087 masm.add32(Imm32(1), initLength);
14089 // If length is now <= index, increment length too.
14090 Label skipIncrementLength;
14091 Address length(elements, ObjectElements::offsetOfLength());
14092 masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
14093 masm.add32(Imm32(1), length);
14094 masm.bind(&skipIncrementLength);
14096 // Jump to the inline path where we will store the value.
14097 // We rejoin after the prebarrier, because the memory is uninitialized.
14098 masm.jump(ool->rejoin());
14101 void CodeGenerator::visitArrayPopShift(LArrayPopShift* lir) {
14102 Register obj = ToRegister(lir->object());
14103 Register temp1 = ToRegister(lir->temp0());
14104 Register temp2 = ToRegister(lir->temp1());
14105 ValueOperand out = ToOutValue(lir);
14107 Label bail;
14108 if (lir->mir()->mode() == MArrayPopShift::Pop) {
14109 masm.packedArrayPop(obj, out, temp1, temp2, &bail);
14110 } else {
14111 MOZ_ASSERT(lir->mir()->mode() == MArrayPopShift::Shift);
14112 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
14113 masm.packedArrayShift(obj, out, temp1, temp2, volatileRegs, &bail);
14115 bailoutFrom(&bail, lir->snapshot());
14118 class OutOfLineArrayPush : public OutOfLineCodeBase<CodeGenerator> {
14119 LArrayPush* ins_;
14121 public:
14122 explicit OutOfLineArrayPush(LArrayPush* ins) : ins_(ins) {}
14124 void accept(CodeGenerator* codegen) override {
14125 codegen->visitOutOfLineArrayPush(this);
14128 LArrayPush* ins() const { return ins_; }
14131 void CodeGenerator::visitArrayPush(LArrayPush* lir) {
14132 Register obj = ToRegister(lir->object());
14133 Register elementsTemp = ToRegister(lir->temp0());
14134 Register length = ToRegister(lir->output());
14135 ValueOperand value = ToValue(lir, LArrayPush::ValueIndex);
14136 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
14138 auto* ool = new (alloc()) OutOfLineArrayPush(lir);
14139 addOutOfLineCode(ool, lir->mir());
14141 // Load obj->elements in elementsTemp.
14142 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), elementsTemp);
14144 Address initLengthAddr(elementsTemp,
14145 ObjectElements::offsetOfInitializedLength());
14146 Address lengthAddr(elementsTemp, ObjectElements::offsetOfLength());
14147 Address capacityAddr(elementsTemp, ObjectElements::offsetOfCapacity());
14149 // Bail out if length != initLength.
14150 masm.load32(lengthAddr, length);
14151 bailoutCmp32(Assembler::NotEqual, initLengthAddr, length, lir->snapshot());
14153 // If length < capacity, we can add a dense element inline. If not, we
14154 // need to allocate more elements.
14155 masm.spectreBoundsCheck32(length, capacityAddr, spectreTemp, ool->entry());
14156 masm.bind(ool->rejoin());
14158 // Store the value.
14159 masm.storeValue(value, BaseObjectElementIndex(elementsTemp, length));
14161 // Update length and initialized length.
14162 masm.add32(Imm32(1), length);
14163 masm.store32(length, Address(elementsTemp, ObjectElements::offsetOfLength()));
14164 masm.store32(length, Address(elementsTemp,
14165 ObjectElements::offsetOfInitializedLength()));
14167 if (ValueNeedsPostBarrier(lir->mir()->value())) {
14168 LiveRegisterSet regs = liveVolatileRegs(lir);
14169 regs.addUnchecked(length);
14170 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->output()->output(),
14171 elementsTemp, ConstantOrRegister(value),
14172 /* indexDiff = */ -1);
14176 void CodeGenerator::visitOutOfLineArrayPush(OutOfLineArrayPush* ool) {
14177 LArrayPush* ins = ool->ins();
14179 Register object = ToRegister(ins->object());
14180 Register temp = ToRegister(ins->temp0());
14182 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
14183 liveRegs.takeUnchecked(temp);
14184 liveRegs.addUnchecked(ToRegister(ins->output()));
14185 liveRegs.addUnchecked(ToValue(ins, LArrayPush::ValueIndex));
14187 masm.PushRegsInMask(liveRegs);
14189 masm.setupAlignedABICall();
14190 masm.loadJSContext(temp);
14191 masm.passABIArg(temp);
14192 masm.passABIArg(object);
14194 using Fn = bool (*)(JSContext*, NativeObject* obj);
14195 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
14196 masm.storeCallPointerResult(temp);
14198 masm.PopRegsInMask(liveRegs);
14199 bailoutIfFalseBool(temp, ins->snapshot());
14201 // Load the reallocated elements pointer.
14202 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
14204 masm.jump(ool->rejoin());
14207 void CodeGenerator::visitArraySlice(LArraySlice* lir) {
14208 Register object = ToRegister(lir->object());
14209 Register begin = ToRegister(lir->begin());
14210 Register end = ToRegister(lir->end());
14211 Register temp0 = ToRegister(lir->temp0());
14212 Register temp1 = ToRegister(lir->temp1());
14214 Label call, fail;
14216 Label bail;
14217 masm.branchArrayIsNotPacked(object, temp0, temp1, &bail);
14218 bailoutFrom(&bail, lir->snapshot());
14220 // Try to allocate an object.
14221 TemplateObject templateObject(lir->mir()->templateObj());
14222 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
14223 &fail);
14225 masm.jump(&call);
14227 masm.bind(&fail);
14228 masm.movePtr(ImmPtr(nullptr), temp0);
14230 masm.bind(&call);
14232 pushArg(temp0);
14233 pushArg(end);
14234 pushArg(begin);
14235 pushArg(object);
14237 using Fn =
14238 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
14239 callVM<Fn, ArraySliceDense>(lir);
14242 void CodeGenerator::visitArgumentsSlice(LArgumentsSlice* lir) {
14243 Register object = ToRegister(lir->object());
14244 Register begin = ToRegister(lir->begin());
14245 Register end = ToRegister(lir->end());
14246 Register temp0 = ToRegister(lir->temp0());
14247 Register temp1 = ToRegister(lir->temp1());
14249 Label call, fail;
14251 // Try to allocate an object.
14252 TemplateObject templateObject(lir->mir()->templateObj());
14253 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
14254 &fail);
14256 masm.jump(&call);
14258 masm.bind(&fail);
14259 masm.movePtr(ImmPtr(nullptr), temp0);
14261 masm.bind(&call);
14263 pushArg(temp0);
14264 pushArg(end);
14265 pushArg(begin);
14266 pushArg(object);
14268 using Fn =
14269 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
14270 callVM<Fn, ArgumentsSliceDense>(lir);
14273 #ifdef DEBUG
14274 void CodeGenerator::emitAssertArgumentsSliceBounds(const RegisterOrInt32& begin,
14275 const RegisterOrInt32& count,
14276 Register numActualArgs) {
14277 // |begin| must be positive or zero.
14278 if (begin.is<Register>()) {
14279 Label beginOk;
14280 masm.branch32(Assembler::GreaterThanOrEqual, begin.as<Register>(), Imm32(0),
14281 &beginOk);
14282 masm.assumeUnreachable("begin < 0");
14283 masm.bind(&beginOk);
14284 } else {
14285 MOZ_ASSERT(begin.as<int32_t>() >= 0);
14288 // |count| must be positive or zero.
14289 if (count.is<Register>()) {
14290 Label countOk;
14291 masm.branch32(Assembler::GreaterThanOrEqual, count.as<Register>(), Imm32(0),
14292 &countOk);
14293 masm.assumeUnreachable("count < 0");
14294 masm.bind(&countOk);
14295 } else {
14296 MOZ_ASSERT(count.as<int32_t>() >= 0);
14299 // |begin| must be less-or-equal to |numActualArgs|.
14300 Label argsBeginOk;
14301 if (begin.is<Register>()) {
14302 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
14303 &argsBeginOk);
14304 } else {
14305 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
14306 Imm32(begin.as<int32_t>()), &argsBeginOk);
14308 masm.assumeUnreachable("begin <= numActualArgs");
14309 masm.bind(&argsBeginOk);
14311 // |count| must be less-or-equal to |numActualArgs|.
14312 Label argsCountOk;
14313 if (count.is<Register>()) {
14314 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, count.as<Register>(),
14315 &argsCountOk);
14316 } else {
14317 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
14318 Imm32(count.as<int32_t>()), &argsCountOk);
14320 masm.assumeUnreachable("count <= numActualArgs");
14321 masm.bind(&argsCountOk);
14323 // |begin| and |count| must be preserved, but |numActualArgs| can be changed.
14325 // Pre-condition: |count| <= |numActualArgs|
14326 // Condition to test: |begin + count| <= |numActualArgs|
14327 // Transform to: |begin| <= |numActualArgs - count|
14328 if (count.is<Register>()) {
14329 masm.subPtr(count.as<Register>(), numActualArgs);
14330 } else {
14331 masm.subPtr(Imm32(count.as<int32_t>()), numActualArgs);
14334 // |begin + count| must be less-or-equal to |numActualArgs|.
14335 Label argsBeginCountOk;
14336 if (begin.is<Register>()) {
14337 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
14338 &argsBeginCountOk);
14339 } else {
14340 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
14341 Imm32(begin.as<int32_t>()), &argsBeginCountOk);
14343 masm.assumeUnreachable("begin + count <= numActualArgs");
14344 masm.bind(&argsBeginCountOk);
14346 #endif
14348 template <class ArgumentsSlice>
14349 void CodeGenerator::emitNewArray(ArgumentsSlice* lir,
14350 const RegisterOrInt32& count, Register output,
14351 Register temp) {
14352 using Fn = ArrayObject* (*)(JSContext*, int32_t);
14353 auto* ool = count.match(
14354 [&](Register count) {
14355 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
14356 lir, ArgList(count), StoreRegisterTo(output));
14358 [&](int32_t count) {
14359 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
14360 lir, ArgList(Imm32(count)), StoreRegisterTo(output));
14363 TemplateObject templateObject(lir->mir()->templateObj());
14364 MOZ_ASSERT(templateObject.isArrayObject());
14366 auto templateNativeObj = templateObject.asTemplateNativeObject();
14367 MOZ_ASSERT(templateNativeObj.getArrayLength() == 0);
14368 MOZ_ASSERT(templateNativeObj.getDenseInitializedLength() == 0);
14369 MOZ_ASSERT(!templateNativeObj.hasDynamicElements());
14371 // Check array capacity. Call into the VM if the template object's capacity
14372 // is too small.
14373 bool tryAllocate = count.match(
14374 [&](Register count) {
14375 masm.branch32(Assembler::Above, count,
14376 Imm32(templateNativeObj.getDenseCapacity()),
14377 ool->entry());
14378 return true;
14380 [&](int32_t count) {
14381 MOZ_ASSERT(count >= 0);
14382 if (uint32_t(count) > templateNativeObj.getDenseCapacity()) {
14383 masm.jump(ool->entry());
14384 return false;
14386 return true;
14389 if (tryAllocate) {
14390 // Try to allocate an object.
14391 masm.createGCObject(output, temp, templateObject, lir->mir()->initialHeap(),
14392 ool->entry());
14394 auto setInitializedLengthAndLength = [&](auto count) {
14395 const int elementsOffset = NativeObject::offsetOfFixedElements();
14397 // Update initialized length.
14398 Address initLength(
14399 output, elementsOffset + ObjectElements::offsetOfInitializedLength());
14400 masm.store32(count, initLength);
14402 // Update length.
14403 Address length(output, elementsOffset + ObjectElements::offsetOfLength());
14404 masm.store32(count, length);
14407 // The array object was successfully created. Set the length and initialized
14408 // length and then proceed to fill the elements.
14409 count.match([&](Register count) { setInitializedLengthAndLength(count); },
14410 [&](int32_t count) {
14411 if (count > 0) {
14412 setInitializedLengthAndLength(Imm32(count));
14417 masm.bind(ool->rejoin());
14420 void CodeGenerator::visitFrameArgumentsSlice(LFrameArgumentsSlice* lir) {
14421 Register begin = ToRegister(lir->begin());
14422 Register count = ToRegister(lir->count());
14423 Register temp = ToRegister(lir->temp0());
14424 Register output = ToRegister(lir->output());
14426 #ifdef DEBUG
14427 masm.loadNumActualArgs(FramePointer, temp);
14428 emitAssertArgumentsSliceBounds(RegisterOrInt32(begin), RegisterOrInt32(count),
14429 temp);
14430 #endif
14432 emitNewArray(lir, RegisterOrInt32(count), output, temp);
14434 Label done;
14435 masm.branch32(Assembler::Equal, count, Imm32(0), &done);
14437 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
14438 allRegs.take(begin);
14439 allRegs.take(count);
14440 allRegs.take(temp);
14441 allRegs.take(output);
14443 ValueOperand value = allRegs.takeAnyValue();
14445 LiveRegisterSet liveRegs;
14446 liveRegs.add(output);
14447 liveRegs.add(begin);
14448 liveRegs.add(value);
14450 masm.PushRegsInMask(liveRegs);
14452 // Initialize all elements.
14454 Register elements = output;
14455 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14457 Register argIndex = begin;
14459 Register index = temp;
14460 masm.move32(Imm32(0), index);
14462 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
14463 BaseValueIndex argPtr(FramePointer, argIndex, argvOffset);
14465 Label loop;
14466 masm.bind(&loop);
14468 masm.loadValue(argPtr, value);
14470 // We don't need a pre-barrier, because the element at |index| is guaranteed
14471 // to be a non-GC thing (either uninitialized memory or the magic hole
14472 // value).
14473 masm.storeValue(value, BaseObjectElementIndex(elements, index));
14475 masm.add32(Imm32(1), index);
14476 masm.add32(Imm32(1), argIndex);
14478 masm.branch32(Assembler::LessThan, index, count, &loop);
14480 masm.PopRegsInMask(liveRegs);
14482 // Emit a post-write barrier if |output| is tenured.
14484 // We expect that |output| is nursery allocated, so it isn't worth the
14485 // trouble to check if no frame argument is a nursery thing, which would
14486 // allow to omit the post-write barrier.
14487 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
14489 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
14490 volatileRegs.takeUnchecked(temp);
14491 if (output.volatile_()) {
14492 volatileRegs.addUnchecked(output);
14495 masm.PushRegsInMask(volatileRegs);
14496 emitPostWriteBarrier(output);
14497 masm.PopRegsInMask(volatileRegs);
14499 masm.bind(&done);
14502 CodeGenerator::RegisterOrInt32 CodeGenerator::ToRegisterOrInt32(
14503 const LAllocation* allocation) {
14504 if (allocation->isConstant()) {
14505 return RegisterOrInt32(allocation->toConstant()->toInt32());
14507 return RegisterOrInt32(ToRegister(allocation));
14510 void CodeGenerator::visitInlineArgumentsSlice(LInlineArgumentsSlice* lir) {
14511 RegisterOrInt32 begin = ToRegisterOrInt32(lir->begin());
14512 RegisterOrInt32 count = ToRegisterOrInt32(lir->count());
14513 Register temp = ToRegister(lir->temp());
14514 Register output = ToRegister(lir->output());
14516 uint32_t numActuals = lir->mir()->numActuals();
14518 #ifdef DEBUG
14519 masm.move32(Imm32(numActuals), temp);
14521 emitAssertArgumentsSliceBounds(begin, count, temp);
14522 #endif
14524 emitNewArray(lir, count, output, temp);
14526 // We're done if there are no actual arguments.
14527 if (numActuals == 0) {
14528 return;
14531 // Check if any arguments have to be copied.
14532 Label done;
14533 if (count.is<Register>()) {
14534 masm.branch32(Assembler::Equal, count.as<Register>(), Imm32(0), &done);
14535 } else if (count.as<int32_t>() == 0) {
14536 return;
14539 auto getArg = [&](uint32_t i) {
14540 return toConstantOrRegister(lir, LInlineArgumentsSlice::ArgIndex(i),
14541 lir->mir()->getArg(i)->type());
14544 auto storeArg = [&](uint32_t i, auto dest) {
14545 // We don't need a pre-barrier because the element at |index| is guaranteed
14546 // to be a non-GC thing (either uninitialized memory or the magic hole
14547 // value).
14548 masm.storeConstantOrRegister(getArg(i), dest);
14551 // Initialize all elements.
14552 if (numActuals == 1) {
14553 // There's exactly one argument. We've checked that |count| is non-zero,
14554 // which implies that |begin| must be zero.
14555 MOZ_ASSERT_IF(begin.is<int32_t>(), begin.as<int32_t>() == 0);
14557 Register elements = temp;
14558 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14560 storeArg(0, Address(elements, 0));
14561 } else if (begin.is<Register>()) {
14562 // There is more than one argument and |begin| isn't a compile-time
14563 // constant. Iterate through 0..numActuals to search for |begin| and then
14564 // start copying |count| arguments from that index.
14566 LiveGeneralRegisterSet liveRegs;
14567 liveRegs.add(output);
14568 liveRegs.add(begin.as<Register>());
14570 masm.PushRegsInMask(liveRegs);
14572 Register elements = output;
14573 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14575 Register argIndex = begin.as<Register>();
14577 Register index = temp;
14578 masm.move32(Imm32(0), index);
14580 Label doneLoop;
14581 for (uint32_t i = 0; i < numActuals; ++i) {
14582 Label next;
14583 masm.branch32(Assembler::NotEqual, argIndex, Imm32(i), &next);
14585 storeArg(i, BaseObjectElementIndex(elements, index));
14587 masm.add32(Imm32(1), index);
14588 masm.add32(Imm32(1), argIndex);
14590 if (count.is<Register>()) {
14591 masm.branch32(Assembler::GreaterThanOrEqual, index,
14592 count.as<Register>(), &doneLoop);
14593 } else {
14594 masm.branch32(Assembler::GreaterThanOrEqual, index,
14595 Imm32(count.as<int32_t>()), &doneLoop);
14598 masm.bind(&next);
14600 masm.bind(&doneLoop);
14602 masm.PopRegsInMask(liveRegs);
14603 } else {
14604 // There is more than one argument and |begin| is a compile-time constant.
14606 Register elements = temp;
14607 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14609 int32_t argIndex = begin.as<int32_t>();
14611 int32_t index = 0;
14613 Label doneLoop;
14614 for (uint32_t i = argIndex; i < numActuals; ++i) {
14615 storeArg(i, Address(elements, index * sizeof(Value)));
14617 index += 1;
14619 if (count.is<Register>()) {
14620 masm.branch32(Assembler::LessThanOrEqual, count.as<Register>(),
14621 Imm32(index), &doneLoop);
14622 } else {
14623 if (index >= count.as<int32_t>()) {
14624 break;
14628 masm.bind(&doneLoop);
14631 // Determine if we have to emit post-write barrier.
14633 // If either |begin| or |count| is a constant, use their value directly.
14634 // Otherwise assume we copy all inline arguments from 0..numActuals.
14635 bool postWriteBarrier = false;
14636 uint32_t actualBegin = begin.match([](Register) { return 0; },
14637 [](int32_t value) { return value; });
14638 uint32_t actualCount =
14639 count.match([=](Register) { return numActuals; },
14640 [](int32_t value) -> uint32_t { return value; });
14641 for (uint32_t i = 0; i < actualCount; ++i) {
14642 ConstantOrRegister arg = getArg(actualBegin + i);
14643 if (arg.constant()) {
14644 Value v = arg.value();
14645 if (v.isGCThing() && IsInsideNursery(v.toGCThing())) {
14646 postWriteBarrier = true;
14648 } else {
14649 MIRType type = arg.reg().type();
14650 if (type == MIRType::Value || NeedsPostBarrier(type)) {
14651 postWriteBarrier = true;
14656 // Emit a post-write barrier if |output| is tenured and we couldn't
14657 // determine at compile-time that no barrier is needed.
14658 if (postWriteBarrier) {
14659 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
14661 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
14662 volatileRegs.takeUnchecked(temp);
14663 if (output.volatile_()) {
14664 volatileRegs.addUnchecked(output);
14667 masm.PushRegsInMask(volatileRegs);
14668 emitPostWriteBarrier(output);
14669 masm.PopRegsInMask(volatileRegs);
14672 masm.bind(&done);
14675 void CodeGenerator::visitNormalizeSliceTerm(LNormalizeSliceTerm* lir) {
14676 Register value = ToRegister(lir->value());
14677 Register length = ToRegister(lir->length());
14678 Register output = ToRegister(lir->output());
14680 masm.move32(value, output);
14682 Label positive;
14683 masm.branch32(Assembler::GreaterThanOrEqual, value, Imm32(0), &positive);
14685 Label done;
14686 masm.add32(length, output);
14687 masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(0), &done);
14688 masm.move32(Imm32(0), output);
14689 masm.jump(&done);
14691 masm.bind(&positive);
14692 masm.cmp32Move32(Assembler::LessThan, length, value, length, output);
14694 masm.bind(&done);
14697 void CodeGenerator::visitArrayJoin(LArrayJoin* lir) {
14698 Label skipCall;
14700 Register output = ToRegister(lir->output());
14701 Register sep = ToRegister(lir->separator());
14702 Register array = ToRegister(lir->array());
14703 Register temp = ToRegister(lir->temp0());
14705 // Fast path for simple length <= 1 cases.
14707 masm.loadPtr(Address(array, NativeObject::offsetOfElements()), temp);
14708 Address length(temp, ObjectElements::offsetOfLength());
14709 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
14711 // Check for length == 0
14712 Label notEmpty;
14713 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmpty);
14714 const JSAtomState& names = gen->runtime->names();
14715 masm.movePtr(ImmGCPtr(names.empty_), output);
14716 masm.jump(&skipCall);
14718 masm.bind(&notEmpty);
14719 Label notSingleString;
14720 // Check for length == 1, initializedLength >= 1, arr[0].isString()
14721 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleString);
14722 masm.branch32(Assembler::LessThan, initLength, Imm32(1), &notSingleString);
14724 Address elem0(temp, 0);
14725 masm.branchTestString(Assembler::NotEqual, elem0, &notSingleString);
14727 // At this point, 'output' can be used as a scratch register, since we're
14728 // guaranteed to succeed.
14729 masm.unboxString(elem0, output);
14730 masm.jump(&skipCall);
14731 masm.bind(&notSingleString);
14734 pushArg(sep);
14735 pushArg(array);
14737 using Fn = JSString* (*)(JSContext*, HandleObject, HandleString);
14738 callVM<Fn, jit::ArrayJoin>(lir);
14739 masm.bind(&skipCall);
14742 void CodeGenerator::visitObjectKeys(LObjectKeys* lir) {
14743 Register object = ToRegister(lir->object());
14745 pushArg(object);
14747 using Fn = JSObject* (*)(JSContext*, HandleObject);
14748 callVM<Fn, jit::ObjectKeys>(lir);
14751 void CodeGenerator::visitObjectKeysLength(LObjectKeysLength* lir) {
14752 Register object = ToRegister(lir->object());
14754 pushArg(object);
14756 using Fn = bool (*)(JSContext*, HandleObject, int32_t*);
14757 callVM<Fn, jit::ObjectKeysLength>(lir);
14760 void CodeGenerator::visitGetIteratorCache(LGetIteratorCache* lir) {
14761 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14762 TypedOrValueRegister val =
14763 toConstantOrRegister(lir, LGetIteratorCache::ValueIndex,
14764 lir->mir()->value()->type())
14765 .reg();
14766 Register output = ToRegister(lir->output());
14767 Register temp0 = ToRegister(lir->temp0());
14768 Register temp1 = ToRegister(lir->temp1());
14770 IonGetIteratorIC ic(liveRegs, val, output, temp0, temp1);
14771 addIC(lir, allocateIC(ic));
14774 void CodeGenerator::visitOptimizeSpreadCallCache(
14775 LOptimizeSpreadCallCache* lir) {
14776 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14777 ValueOperand val = ToValue(lir, LOptimizeSpreadCallCache::ValueIndex);
14778 ValueOperand output = ToOutValue(lir);
14779 Register temp = ToRegister(lir->temp0());
14781 IonOptimizeSpreadCallIC ic(liveRegs, val, output, temp);
14782 addIC(lir, allocateIC(ic));
14785 void CodeGenerator::visitCloseIterCache(LCloseIterCache* lir) {
14786 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14787 Register iter = ToRegister(lir->iter());
14788 Register temp = ToRegister(lir->temp0());
14789 CompletionKind kind = CompletionKind(lir->mir()->completionKind());
14791 IonCloseIterIC ic(liveRegs, iter, temp, kind);
14792 addIC(lir, allocateIC(ic));
14795 void CodeGenerator::visitOptimizeGetIteratorCache(
14796 LOptimizeGetIteratorCache* lir) {
14797 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14798 ValueOperand val = ToValue(lir, LOptimizeGetIteratorCache::ValueIndex);
14799 Register output = ToRegister(lir->output());
14800 Register temp = ToRegister(lir->temp0());
14802 IonOptimizeGetIteratorIC ic(liveRegs, val, output, temp);
14803 addIC(lir, allocateIC(ic));
14806 void CodeGenerator::visitIteratorMore(LIteratorMore* lir) {
14807 const Register obj = ToRegister(lir->iterator());
14808 const ValueOperand output = ToOutValue(lir);
14809 const Register temp = ToRegister(lir->temp0());
14811 masm.iteratorMore(obj, output, temp);
14814 void CodeGenerator::visitIsNoIterAndBranch(LIsNoIterAndBranch* lir) {
14815 ValueOperand input = ToValue(lir, LIsNoIterAndBranch::Input);
14816 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
14817 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
14819 masm.branchTestMagic(Assembler::Equal, input, ifTrue);
14821 if (!isNextBlock(lir->ifFalse()->lir())) {
14822 masm.jump(ifFalse);
14826 void CodeGenerator::visitIteratorEnd(LIteratorEnd* lir) {
14827 const Register obj = ToRegister(lir->object());
14828 const Register temp0 = ToRegister(lir->temp0());
14829 const Register temp1 = ToRegister(lir->temp1());
14830 const Register temp2 = ToRegister(lir->temp2());
14832 masm.iteratorClose(obj, temp0, temp1, temp2);
14835 void CodeGenerator::visitArgumentsLength(LArgumentsLength* lir) {
14836 // read number of actual arguments from the JS frame.
14837 Register argc = ToRegister(lir->output());
14838 masm.loadNumActualArgs(FramePointer, argc);
14841 void CodeGenerator::visitGetFrameArgument(LGetFrameArgument* lir) {
14842 ValueOperand result = ToOutValue(lir);
14843 const LAllocation* index = lir->index();
14844 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
14846 // This instruction is used to access actual arguments and formal arguments.
14847 // The number of Values on the stack is |max(numFormals, numActuals)|, so we
14848 // assert |index < numFormals || index < numActuals| in debug builds.
14849 DebugOnly<size_t> numFormals = gen->outerInfo().script()->function()->nargs();
14851 if (index->isConstant()) {
14852 int32_t i = index->toConstant()->toInt32();
14853 #ifdef DEBUG
14854 if (uint32_t(i) >= numFormals) {
14855 Label ok;
14856 Register argc = result.scratchReg();
14857 masm.loadNumActualArgs(FramePointer, argc);
14858 masm.branch32(Assembler::Above, argc, Imm32(i), &ok);
14859 masm.assumeUnreachable("Invalid argument index");
14860 masm.bind(&ok);
14862 #endif
14863 Address argPtr(FramePointer, sizeof(Value) * i + argvOffset);
14864 masm.loadValue(argPtr, result);
14865 } else {
14866 Register i = ToRegister(index);
14867 #ifdef DEBUG
14868 Label ok;
14869 Register argc = result.scratchReg();
14870 masm.branch32(Assembler::Below, i, Imm32(numFormals), &ok);
14871 masm.loadNumActualArgs(FramePointer, argc);
14872 masm.branch32(Assembler::Above, argc, i, &ok);
14873 masm.assumeUnreachable("Invalid argument index");
14874 masm.bind(&ok);
14875 #endif
14876 BaseValueIndex argPtr(FramePointer, i, argvOffset);
14877 masm.loadValue(argPtr, result);
14881 void CodeGenerator::visitGetFrameArgumentHole(LGetFrameArgumentHole* lir) {
14882 ValueOperand result = ToOutValue(lir);
14883 Register index = ToRegister(lir->index());
14884 Register length = ToRegister(lir->length());
14885 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
14886 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
14888 Label outOfBounds, done;
14889 masm.spectreBoundsCheck32(index, length, spectreTemp, &outOfBounds);
14891 BaseValueIndex argPtr(FramePointer, index, argvOffset);
14892 masm.loadValue(argPtr, result);
14893 masm.jump(&done);
14895 masm.bind(&outOfBounds);
14896 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
14897 masm.moveValue(UndefinedValue(), result);
14899 masm.bind(&done);
14902 void CodeGenerator::visitRest(LRest* lir) {
14903 Register numActuals = ToRegister(lir->numActuals());
14904 Register temp0 = ToRegister(lir->temp0());
14905 Register temp1 = ToRegister(lir->temp1());
14906 Register temp2 = ToRegister(lir->temp2());
14907 Register temp3 = ToRegister(lir->temp3());
14908 unsigned numFormals = lir->mir()->numFormals();
14910 constexpr uint32_t arrayCapacity = 2;
14912 if (Shape* shape = lir->mir()->shape()) {
14913 uint32_t arrayLength = 0;
14914 gc::AllocKind allocKind = GuessArrayGCKind(arrayCapacity);
14915 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
14916 allocKind = ForegroundToBackgroundAllocKind(allocKind);
14917 MOZ_ASSERT(GetGCKindSlots(allocKind) ==
14918 arrayCapacity + ObjectElements::VALUES_PER_HEADER);
14920 Label joinAlloc, failAlloc;
14921 masm.movePtr(ImmGCPtr(shape), temp0);
14922 masm.createArrayWithFixedElements(temp2, temp0, temp1, InvalidReg,
14923 arrayLength, arrayCapacity, 0, 0,
14924 allocKind, gc::Heap::Default, &failAlloc);
14925 masm.jump(&joinAlloc);
14927 masm.bind(&failAlloc);
14928 masm.movePtr(ImmPtr(nullptr), temp2);
14930 masm.bind(&joinAlloc);
14931 } else {
14932 masm.movePtr(ImmPtr(nullptr), temp2);
14935 // Set temp1 to the address of the first actual argument.
14936 size_t actualsOffset = JitFrameLayout::offsetOfActualArgs();
14937 masm.computeEffectiveAddress(Address(FramePointer, actualsOffset), temp1);
14939 // Compute array length: max(numActuals - numFormals, 0).
14940 Register lengthReg;
14941 if (numFormals) {
14942 lengthReg = temp0;
14943 Label emptyLength, joinLength;
14944 masm.branch32(Assembler::LessThanOrEqual, numActuals, Imm32(numFormals),
14945 &emptyLength);
14947 masm.move32(numActuals, lengthReg);
14948 masm.sub32(Imm32(numFormals), lengthReg);
14950 // Skip formal arguments.
14951 masm.addPtr(Imm32(sizeof(Value) * numFormals), temp1);
14953 masm.jump(&joinLength);
14955 masm.bind(&emptyLength);
14957 masm.move32(Imm32(0), lengthReg);
14959 // Leave temp1 pointed to the start of actuals() when the rest-array
14960 // length is zero. We don't use |actuals() + numFormals| because
14961 // |numFormals| can be any non-negative int32 value when this MRest was
14962 // created from scalar replacement optimizations. And it seems
14963 // questionable to compute a Value* pointer which points to who knows
14964 // where.
14966 masm.bind(&joinLength);
14967 } else {
14968 // Use numActuals directly when there are no formals.
14969 lengthReg = numActuals;
14972 // Try to initialize the array elements.
14973 Label vmCall, done;
14974 if (lir->mir()->shape()) {
14975 // Call into C++ if we failed to allocate an array or there are more than
14976 // |arrayCapacity| elements.
14977 masm.branchTestPtr(Assembler::Zero, temp2, temp2, &vmCall);
14978 masm.branch32(Assembler::Above, lengthReg, Imm32(arrayCapacity), &vmCall);
14980 // The array must be nursery allocated so no post barrier is needed.
14981 #ifdef DEBUG
14982 Label ok;
14983 masm.branchPtrInNurseryChunk(Assembler::Equal, temp2, temp3, &ok);
14984 masm.assumeUnreachable("Unexpected tenured object for LRest");
14985 masm.bind(&ok);
14986 #endif
14988 Label initialized;
14989 masm.branch32(Assembler::Equal, lengthReg, Imm32(0), &initialized);
14991 // Store length and initializedLength.
14992 Register elements = temp3;
14993 masm.loadPtr(Address(temp2, NativeObject::offsetOfElements()), elements);
14994 Address lengthAddr(elements, ObjectElements::offsetOfLength());
14995 Address initLengthAddr(elements,
14996 ObjectElements::offsetOfInitializedLength());
14997 masm.store32(lengthReg, lengthAddr);
14998 masm.store32(lengthReg, initLengthAddr);
15000 // Store either one or two elements. This may clobber lengthReg (temp0).
15001 static_assert(arrayCapacity == 2, "code handles 1 or 2 elements");
15002 Label storeFirst;
15003 masm.branch32(Assembler::Equal, lengthReg, Imm32(1), &storeFirst);
15004 masm.storeValue(Address(temp1, sizeof(Value)),
15005 Address(elements, sizeof(Value)), temp0);
15006 masm.bind(&storeFirst);
15007 masm.storeValue(Address(temp1, 0), Address(elements, 0), temp0);
15009 // Done.
15010 masm.bind(&initialized);
15011 masm.movePtr(temp2, ReturnReg);
15012 masm.jump(&done);
15015 masm.bind(&vmCall);
15017 pushArg(temp2);
15018 pushArg(temp1);
15019 pushArg(lengthReg);
15021 using Fn =
15022 ArrayObject* (*)(JSContext*, uint32_t, Value*, Handle<ArrayObject*>);
15023 callVM<Fn, InitRestParameter>(lir);
15025 masm.bind(&done);
15028 // Create a stackmap from the given safepoint, with the structure:
15030 // <reg dump, if any>
15031 // | ++ <body (general spill)>
15032 // | | ++ <space for Frame>
15033 // | | ++ <inbound args>
15034 // | | |
15035 // Lowest Addr Highest Addr
15036 // |
15037 // framePushedAtStackMapBase
15039 // The caller owns the resulting stackmap. This assumes a grow-down stack.
15041 // For non-debug builds, if the stackmap would contain no pointers, no
15042 // stackmap is created, and nullptr is returned. For a debug build, a
15043 // stackmap is always created and returned.
15045 // Depending on the type of safepoint, the stackmap may need to account for
15046 // spilled registers. WasmSafepointKind::LirCall corresponds to LIR nodes where
15047 // isCall() == true, for which the register allocator will spill/restore all
15048 // live registers at the LIR level - in this case, the LSafepoint sees only live
15049 // values on the stack, never in registers. WasmSafepointKind::CodegenCall, on
15050 // the other hand, is for LIR nodes which may manually spill/restore live
15051 // registers in codegen, in which case the stackmap must account for this. Traps
15052 // also require tracking of live registers, but spilling is handled by the trap
15053 // mechanism.
15054 static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
15055 const RegisterOffsets& trapExitLayout,
15056 size_t trapExitLayoutNumWords,
15057 size_t nInboundStackArgBytes,
15058 wasm::StackMap** result) {
15059 // Ensure this is defined on all return paths.
15060 *result = nullptr;
15062 // The size of the wasm::Frame itself.
15063 const size_t nFrameBytes = sizeof(wasm::Frame);
15065 // This is the number of bytes spilled for live registers, outside of a trap.
15066 // For traps, trapExitLayout and trapExitLayoutNumWords will be used.
15067 const size_t nRegisterDumpBytes =
15068 MacroAssembler::PushRegsInMaskSizeInBytes(safepoint.liveRegs());
15070 // As mentioned above, for WasmSafepointKind::LirCall, register spills and
15071 // restores are handled at the LIR level and there should therefore be no live
15072 // registers to handle here.
15073 MOZ_ASSERT_IF(safepoint.wasmSafepointKind() == WasmSafepointKind::LirCall,
15074 nRegisterDumpBytes == 0);
15075 MOZ_ASSERT(nRegisterDumpBytes % sizeof(void*) == 0);
15077 // This is the number of bytes in the general spill area, below the Frame.
15078 const size_t nBodyBytes = safepoint.framePushedAtStackMapBase();
15080 // The stack map owns any alignment padding around inbound stack args.
15081 const size_t nInboundStackArgBytesAligned =
15082 wasm::AlignStackArgAreaSize(nInboundStackArgBytes);
15084 // This is the number of bytes in the general spill area, the Frame, and the
15085 // incoming args, but not including any register dump area.
15086 const size_t nNonRegisterBytes =
15087 nBodyBytes + nFrameBytes + nInboundStackArgBytesAligned;
15088 MOZ_ASSERT(nNonRegisterBytes % sizeof(void*) == 0);
15090 // This is the number of bytes in the register dump area, if any, below the
15091 // general spill area.
15092 const size_t nRegisterBytes =
15093 (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap)
15094 ? (trapExitLayoutNumWords * sizeof(void*))
15095 : nRegisterDumpBytes;
15097 // This is the total number of bytes covered by the map.
15098 const size_t nTotalBytes = nNonRegisterBytes + nRegisterBytes;
15100 #ifndef DEBUG
15101 bool needStackMap = !(safepoint.wasmAnyRefRegs().empty() &&
15102 safepoint.wasmAnyRefSlots().empty() &&
15103 safepoint.slotsOrElementsSlots().empty());
15105 // There are no references, and this is a non-debug build, so don't bother
15106 // building the stackmap.
15107 if (!needStackMap) {
15108 return true;
15110 #endif
15112 wasm::StackMap* stackMap =
15113 wasm::StackMap::create(nTotalBytes / sizeof(void*));
15114 if (!stackMap) {
15115 return false;
15117 if (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap) {
15118 stackMap->setExitStubWords(trapExitLayoutNumWords);
15121 // REG DUMP AREA, if any.
15122 size_t regDumpWords = 0;
15123 const LiveGeneralRegisterSet wasmAnyRefRegs = safepoint.wasmAnyRefRegs();
15124 const LiveGeneralRegisterSet slotsOrElementsRegs =
15125 safepoint.slotsOrElementsRegs();
15126 const LiveGeneralRegisterSet refRegs(GeneralRegisterSet::Union(
15127 wasmAnyRefRegs.set(), slotsOrElementsRegs.set()));
15128 GeneralRegisterForwardIterator refRegsIter(refRegs);
15129 switch (safepoint.wasmSafepointKind()) {
15130 case WasmSafepointKind::LirCall:
15131 case WasmSafepointKind::CodegenCall: {
15132 size_t spilledNumWords = nRegisterDumpBytes / sizeof(void*);
15133 regDumpWords += spilledNumWords;
15135 for (; refRegsIter.more(); ++refRegsIter) {
15136 Register reg = *refRegsIter;
15137 size_t offsetFromSpillBase =
15138 safepoint.liveRegs().gprs().offsetOfPushedRegister(reg) /
15139 sizeof(void*);
15140 MOZ_ASSERT(0 < offsetFromSpillBase &&
15141 offsetFromSpillBase <= spilledNumWords);
15142 size_t index = spilledNumWords - offsetFromSpillBase;
15144 if (wasmAnyRefRegs.has(reg)) {
15145 stackMap->set(index, wasm::StackMap::AnyRef);
15146 } else {
15147 MOZ_ASSERT(slotsOrElementsRegs.has(reg));
15148 stackMap->set(index, wasm::StackMap::ArrayDataPointer);
15151 // Float and vector registers do not have to be handled; they cannot
15152 // contain wasm anyrefs, and they are spilled after general-purpose
15153 // registers. Gprs are therefore closest to the spill base and thus their
15154 // offset calculation does not need to account for other spills.
15155 } break;
15156 case WasmSafepointKind::Trap: {
15157 regDumpWords += trapExitLayoutNumWords;
15159 for (; refRegsIter.more(); ++refRegsIter) {
15160 Register reg = *refRegsIter;
15161 size_t offsetFromTop = trapExitLayout.getOffset(reg);
15163 // If this doesn't hold, the associated register wasn't saved by
15164 // the trap exit stub. Better to crash now than much later, in
15165 // some obscure place, and possibly with security consequences.
15166 MOZ_RELEASE_ASSERT(offsetFromTop < trapExitLayoutNumWords);
15168 // offsetFromTop is an offset in words down from the highest
15169 // address in the exit stub save area. Switch it around to be an
15170 // offset up from the bottom of the (integer register) save area.
15171 size_t offsetFromBottom = trapExitLayoutNumWords - 1 - offsetFromTop;
15173 if (wasmAnyRefRegs.has(reg)) {
15174 stackMap->set(offsetFromBottom, wasm::StackMap::AnyRef);
15175 } else {
15176 MOZ_ASSERT(slotsOrElementsRegs.has(reg));
15177 stackMap->set(offsetFromBottom, wasm::StackMap::ArrayDataPointer);
15180 } break;
15181 default:
15182 MOZ_CRASH("unreachable");
15185 // BODY (GENERAL SPILL) AREA and FRAME and INCOMING ARGS
15186 // Deal with roots on the stack.
15187 const LSafepoint::SlotList& wasmAnyRefSlots = safepoint.wasmAnyRefSlots();
15188 for (SafepointSlotEntry wasmAnyRefSlot : wasmAnyRefSlots) {
15189 // The following needs to correspond with JitFrameLayout::slotRef
15190 // wasmAnyRefSlot.stack == 0 means the slot is in the args area
15191 if (wasmAnyRefSlot.stack) {
15192 // It's a slot in the body allocation, so .slot is interpreted
15193 // as an index downwards from the Frame*
15194 MOZ_ASSERT(wasmAnyRefSlot.slot <= nBodyBytes);
15195 uint32_t offsetInBytes = nBodyBytes - wasmAnyRefSlot.slot;
15196 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
15197 stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
15198 wasm::StackMap::AnyRef);
15199 } else {
15200 // It's an argument slot
15201 MOZ_ASSERT(wasmAnyRefSlot.slot < nInboundStackArgBytes);
15202 uint32_t offsetInBytes = nBodyBytes + nFrameBytes + wasmAnyRefSlot.slot;
15203 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
15204 stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
15205 wasm::StackMap::AnyRef);
15209 // Track array data pointers on the stack
15210 const LSafepoint::SlotList& slots = safepoint.slotsOrElementsSlots();
15211 for (SafepointSlotEntry slot : slots) {
15212 MOZ_ASSERT(slot.stack);
15214 // It's a slot in the body allocation, so .slot is interpreted
15215 // as an index downwards from the Frame*
15216 MOZ_ASSERT(slot.slot <= nBodyBytes);
15217 uint32_t offsetInBytes = nBodyBytes - slot.slot;
15218 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
15219 stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
15220 wasm::StackMap::Kind::ArrayDataPointer);
15223 // Record in the map, how far down from the highest address the Frame* is.
15224 // Take the opportunity to check that we haven't marked any part of the
15225 // Frame itself as a pointer.
15226 stackMap->setFrameOffsetFromTop((nInboundStackArgBytesAligned + nFrameBytes) /
15227 sizeof(void*));
15228 #ifdef DEBUG
15229 for (uint32_t i = 0; i < nFrameBytes / sizeof(void*); i++) {
15230 MOZ_ASSERT(stackMap->get(stackMap->header.numMappedWords -
15231 stackMap->header.frameOffsetFromTop + i) ==
15232 wasm::StackMap::Kind::POD);
15234 #endif
15236 *result = stackMap;
15237 return true;
15240 bool CodeGenerator::generateWasm(
15241 wasm::CallIndirectId callIndirectId, wasm::BytecodeOffset trapOffset,
15242 const wasm::ArgTypeVector& argTypes, const RegisterOffsets& trapExitLayout,
15243 size_t trapExitLayoutNumWords, wasm::FuncOffsets* offsets,
15244 wasm::StackMaps* stackMaps, wasm::Decoder* decoder) {
15245 AutoCreatedBy acb(masm, "CodeGenerator::generateWasm");
15247 JitSpew(JitSpew_Codegen, "# Emitting wasm code");
15249 size_t nInboundStackArgBytes = StackArgAreaSizeUnaligned(argTypes);
15250 inboundStackArgBytes_ = nInboundStackArgBytes;
15252 wasm::GenerateFunctionPrologue(masm, callIndirectId, mozilla::Nothing(),
15253 offsets);
15255 MOZ_ASSERT(masm.framePushed() == 0);
15257 // Very large frames are implausible, probably an attack.
15258 if (frameSize() > wasm::MaxFrameSize) {
15259 return decoder->fail(decoder->beginOffset(), "stack frame is too large");
15262 if (omitOverRecursedCheck()) {
15263 masm.reserveStack(frameSize());
15264 } else {
15265 std::pair<CodeOffset, uint32_t> pair =
15266 masm.wasmReserveStackChecked(frameSize(), trapOffset);
15267 CodeOffset trapInsnOffset = pair.first;
15268 size_t nBytesReservedBeforeTrap = pair.second;
15270 wasm::StackMap* functionEntryStackMap = nullptr;
15271 if (!CreateStackMapForFunctionEntryTrap(
15272 argTypes, trapExitLayout, trapExitLayoutNumWords,
15273 nBytesReservedBeforeTrap, nInboundStackArgBytes,
15274 &functionEntryStackMap)) {
15275 return false;
15278 // In debug builds, we'll always have a stack map, even if there are no
15279 // refs to track.
15280 MOZ_ASSERT(functionEntryStackMap);
15282 if (functionEntryStackMap &&
15283 !stackMaps->add((uint8_t*)(uintptr_t)trapInsnOffset.offset(),
15284 functionEntryStackMap)) {
15285 functionEntryStackMap->destroy();
15286 return false;
15290 MOZ_ASSERT(masm.framePushed() == frameSize());
15292 if (!generateBody()) {
15293 return false;
15296 masm.bind(&returnLabel_);
15297 wasm::GenerateFunctionEpilogue(masm, frameSize(), offsets);
15299 if (!generateOutOfLineCode()) {
15300 return false;
15303 masm.flush();
15304 if (masm.oom()) {
15305 return false;
15308 offsets->end = masm.currentOffset();
15310 MOZ_ASSERT(!masm.failureLabel()->used());
15311 MOZ_ASSERT(snapshots_.listSize() == 0);
15312 MOZ_ASSERT(snapshots_.RVATableSize() == 0);
15313 MOZ_ASSERT(recovers_.size() == 0);
15314 MOZ_ASSERT(graph.numConstants() == 0);
15315 MOZ_ASSERT(osiIndices_.empty());
15316 MOZ_ASSERT(icList_.empty());
15317 MOZ_ASSERT(safepoints_.size() == 0);
15318 MOZ_ASSERT(!scriptCounts_);
15320 // Convert the safepoints to stackmaps and add them to our running
15321 // collection thereof.
15322 for (CodegenSafepointIndex& index : safepointIndices_) {
15323 wasm::StackMap* stackMap = nullptr;
15324 if (!CreateStackMapFromLSafepoint(*index.safepoint(), trapExitLayout,
15325 trapExitLayoutNumWords,
15326 nInboundStackArgBytes, &stackMap)) {
15327 return false;
15330 // In debug builds, we'll always have a stack map.
15331 MOZ_ASSERT(stackMap);
15332 if (!stackMap) {
15333 continue;
15336 if (!stackMaps->add((uint8_t*)(uintptr_t)index.displacement(), stackMap)) {
15337 stackMap->destroy();
15338 return false;
15342 return true;
15345 bool CodeGenerator::generate() {
15346 AutoCreatedBy acb(masm, "CodeGenerator::generate");
15348 JitSpew(JitSpew_Codegen, "# Emitting code for script %s:%u:%u",
15349 gen->outerInfo().script()->filename(),
15350 gen->outerInfo().script()->lineno(),
15351 gen->outerInfo().script()->column().oneOriginValue());
15353 // Initialize native code table with an entry to the start of
15354 // top-level script.
15355 InlineScriptTree* tree = gen->outerInfo().inlineScriptTree();
15356 jsbytecode* startPC = tree->script()->code();
15357 BytecodeSite* startSite = new (gen->alloc()) BytecodeSite(tree, startPC);
15358 if (!addNativeToBytecodeEntry(startSite)) {
15359 return false;
15362 if (!safepoints_.init(gen->alloc())) {
15363 return false;
15366 perfSpewer_.recordOffset(masm, "Prologue");
15367 if (!generatePrologue()) {
15368 return false;
15371 // Reset native => bytecode map table with top-level script and startPc.
15372 if (!addNativeToBytecodeEntry(startSite)) {
15373 return false;
15376 if (!generateBody()) {
15377 return false;
15380 // Reset native => bytecode map table with top-level script and startPc.
15381 if (!addNativeToBytecodeEntry(startSite)) {
15382 return false;
15385 perfSpewer_.recordOffset(masm, "Epilogue");
15386 if (!generateEpilogue()) {
15387 return false;
15390 // Reset native => bytecode map table with top-level script and startPc.
15391 if (!addNativeToBytecodeEntry(startSite)) {
15392 return false;
15395 perfSpewer_.recordOffset(masm, "InvalidateEpilogue");
15396 generateInvalidateEpilogue();
15398 // native => bytecode entries for OOL code will be added
15399 // by CodeGeneratorShared::generateOutOfLineCode
15400 perfSpewer_.recordOffset(masm, "OOLCode");
15401 if (!generateOutOfLineCode()) {
15402 return false;
15405 // Add terminal entry.
15406 if (!addNativeToBytecodeEntry(startSite)) {
15407 return false;
15410 // Dump Native to bytecode entries to spew.
15411 dumpNativeToBytecodeEntries();
15413 // We encode safepoints after the OSI-point offsets have been determined.
15414 if (!encodeSafepoints()) {
15415 return false;
15418 return !masm.oom();
15421 static bool AddInlinedCompilations(JSContext* cx, HandleScript script,
15422 IonCompilationId compilationId,
15423 const WarpSnapshot* snapshot,
15424 bool* isValid) {
15425 MOZ_ASSERT(!*isValid);
15426 RecompileInfo recompileInfo(script, compilationId);
15428 JitZone* jitZone = cx->zone()->jitZone();
15430 for (const auto* scriptSnapshot : snapshot->scripts()) {
15431 JSScript* inlinedScript = scriptSnapshot->script();
15432 if (inlinedScript == script) {
15433 continue;
15436 // TODO(post-Warp): This matches FinishCompilation and is necessary to
15437 // ensure in-progress compilations are canceled when an inlined functon
15438 // becomes a debuggee. See the breakpoint-14.js jit-test.
15439 // When TI is gone, try to clean this up by moving AddInlinedCompilations to
15440 // WarpOracle so that we can handle this as part of addPendingRecompile
15441 // instead of requiring this separate check.
15442 if (inlinedScript->isDebuggee()) {
15443 *isValid = false;
15444 return true;
15447 if (!jitZone->addInlinedCompilation(recompileInfo, inlinedScript)) {
15448 return false;
15452 *isValid = true;
15453 return true;
15456 void CodeGenerator::validateAndRegisterFuseDependencies(JSContext* cx,
15457 HandleScript script,
15458 bool* isValid) {
15459 // No need to validate as we will toss this compilation anyhow.
15460 if (!*isValid) {
15461 return;
15464 for (auto dependency : fuseDependencies) {
15465 switch (dependency) {
15466 case FuseDependencyKind::HasSeenObjectEmulateUndefinedFuse: {
15467 auto& hasSeenObjectEmulateUndefinedFuse =
15468 cx->runtime()->hasSeenObjectEmulateUndefinedFuse.ref();
15470 if (!hasSeenObjectEmulateUndefinedFuse.intact()) {
15471 JitSpew(JitSpew_Codegen,
15472 "tossing compilation; hasSeenObjectEmulateUndefinedFuse fuse "
15473 "dependency no longer valid\n");
15474 *isValid = false;
15475 return;
15478 if (!hasSeenObjectEmulateUndefinedFuse.addFuseDependency(cx, script)) {
15479 JitSpew(JitSpew_Codegen,
15480 "tossing compilation; failed to register "
15481 "hasSeenObjectEmulateUndefinedFuse script dependency\n");
15482 *isValid = false;
15483 return;
15485 break;
15488 case FuseDependencyKind::OptimizeGetIteratorFuse: {
15489 auto& optimizeGetIteratorFuse =
15490 cx->realm()->realmFuses.optimizeGetIteratorFuse;
15491 if (!optimizeGetIteratorFuse.intact()) {
15492 JitSpew(JitSpew_Codegen,
15493 "tossing compilation; optimizeGetIteratorFuse fuse "
15494 "dependency no longer valid\n");
15495 *isValid = false;
15496 return;
15499 if (!optimizeGetIteratorFuse.addFuseDependency(cx, script)) {
15500 JitSpew(JitSpew_Codegen,
15501 "tossing compilation; failed to register "
15502 "optimizeGetIteratorFuse script dependency\n");
15503 *isValid = false;
15504 return;
15506 break;
15509 default:
15510 MOZ_CRASH("Unknown Dependency Kind");
15515 bool CodeGenerator::link(JSContext* cx, const WarpSnapshot* snapshot) {
15516 AutoCreatedBy acb(masm, "CodeGenerator::link");
15518 // We cancel off-thread Ion compilations in a few places during GC, but if
15519 // this compilation was performed off-thread it will already have been
15520 // removed from the relevant lists by this point. Don't allow GC here.
15521 JS::AutoAssertNoGC nogc(cx);
15523 RootedScript script(cx, gen->outerInfo().script());
15524 MOZ_ASSERT(!script->hasIonScript());
15526 // Perform any read barriers which were skipped while compiling the
15527 // script, which may have happened off-thread.
15528 JitZone* jitZone = cx->zone()->jitZone();
15529 jitZone->performStubReadBarriers(zoneStubsToReadBarrier_);
15531 if (scriptCounts_ && !script->hasScriptCounts() &&
15532 !script->initScriptCounts(cx)) {
15533 return false;
15536 IonCompilationId compilationId =
15537 cx->runtime()->jitRuntime()->nextCompilationId();
15538 jitZone->currentCompilationIdRef().emplace(compilationId);
15539 auto resetCurrentId = mozilla::MakeScopeExit(
15540 [jitZone] { jitZone->currentCompilationIdRef().reset(); });
15542 // Record constraints. If an error occured, returns false and potentially
15543 // prevent future compilations. Otherwise, if an invalidation occured, then
15544 // skip the current compilation.
15545 bool isValid = false;
15547 // If an inlined script is invalidated (for example, by attaching
15548 // a debugger), we must also invalidate the parent IonScript.
15549 if (!AddInlinedCompilations(cx, script, compilationId, snapshot, &isValid)) {
15550 return false;
15553 // Validate fuse dependencies here; if a fuse has popped since we registered a
15554 // dependency then we need to toss this compilation as it assumes things which
15555 // are not valid.
15557 // Eagerly register a fuse dependency here too; this way if we OOM we can
15558 // instead simply remove the compilation and move on with our lives.
15559 validateAndRegisterFuseDependencies(cx, script, &isValid);
15561 // This compilation is no longer valid; don't proceed, but return true as this
15562 // isn't an error case either.
15563 if (!isValid) {
15564 return true;
15567 uint32_t argumentSlots = (gen->outerInfo().nargs() + 1) * sizeof(Value);
15569 size_t numNurseryObjects = snapshot->nurseryObjects().length();
15571 IonScript* ionScript = IonScript::New(
15572 cx, compilationId, graph.localSlotsSize(), argumentSlots, frameDepth_,
15573 snapshots_.listSize(), snapshots_.RVATableSize(), recovers_.size(),
15574 graph.numConstants(), numNurseryObjects, safepointIndices_.length(),
15575 osiIndices_.length(), icList_.length(), runtimeData_.length(),
15576 safepoints_.size());
15577 if (!ionScript) {
15578 return false;
15580 #ifdef DEBUG
15581 ionScript->setICHash(snapshot->icHash());
15582 #endif
15584 auto freeIonScript = mozilla::MakeScopeExit([&ionScript] {
15585 // Use js_free instead of IonScript::Destroy: the cache list is still
15586 // uninitialized.
15587 js_free(ionScript);
15590 Linker linker(masm);
15591 JitCode* code = linker.newCode(cx, CodeKind::Ion);
15592 if (!code) {
15593 return false;
15596 // Encode native to bytecode map if profiling is enabled.
15597 if (isProfilerInstrumentationEnabled()) {
15598 // Generate native-to-bytecode main table.
15599 IonEntry::ScriptList scriptList;
15600 if (!generateCompactNativeToBytecodeMap(cx, code, scriptList)) {
15601 return false;
15604 uint8_t* ionTableAddr =
15605 ((uint8_t*)nativeToBytecodeMap_.get()) + nativeToBytecodeTableOffset_;
15606 JitcodeIonTable* ionTable = (JitcodeIonTable*)ionTableAddr;
15608 // Construct the IonEntry that will go into the global table.
15609 auto entry = MakeJitcodeGlobalEntry<IonEntry>(
15610 cx, code, code->raw(), code->rawEnd(), std::move(scriptList), ionTable);
15611 if (!entry) {
15612 return false;
15614 (void)nativeToBytecodeMap_.release(); // Table is now owned by |entry|.
15616 // Add entry to the global table.
15617 JitcodeGlobalTable* globalTable =
15618 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
15619 if (!globalTable->addEntry(std::move(entry))) {
15620 return false;
15623 // Mark the jitcode as having a bytecode map.
15624 code->setHasBytecodeMap();
15625 } else {
15626 // Add a dumy jitcodeGlobalTable entry.
15627 auto entry = MakeJitcodeGlobalEntry<DummyEntry>(cx, code, code->raw(),
15628 code->rawEnd());
15629 if (!entry) {
15630 return false;
15633 // Add entry to the global table.
15634 JitcodeGlobalTable* globalTable =
15635 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
15636 if (!globalTable->addEntry(std::move(entry))) {
15637 return false;
15640 // Mark the jitcode as having a bytecode map.
15641 code->setHasBytecodeMap();
15644 ionScript->setMethod(code);
15646 // If the Gecko Profiler is enabled, mark IonScript as having been
15647 // instrumented accordingly.
15648 if (isProfilerInstrumentationEnabled()) {
15649 ionScript->setHasProfilingInstrumentation();
15652 Assembler::PatchDataWithValueCheck(
15653 CodeLocationLabel(code, invalidateEpilogueData_), ImmPtr(ionScript),
15654 ImmPtr((void*)-1));
15656 for (CodeOffset offset : ionScriptLabels_) {
15657 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, offset),
15658 ImmPtr(ionScript), ImmPtr((void*)-1));
15661 for (NurseryObjectLabel label : ionNurseryObjectLabels_) {
15662 void* entry = ionScript->addressOfNurseryObject(label.nurseryIndex);
15663 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label.offset),
15664 ImmPtr(entry), ImmPtr((void*)-1));
15667 // for generating inline caches during the execution.
15668 if (runtimeData_.length()) {
15669 ionScript->copyRuntimeData(&runtimeData_[0]);
15671 if (icList_.length()) {
15672 ionScript->copyICEntries(&icList_[0]);
15675 for (size_t i = 0; i < icInfo_.length(); i++) {
15676 IonIC& ic = ionScript->getICFromIndex(i);
15677 Assembler::PatchDataWithValueCheck(
15678 CodeLocationLabel(code, icInfo_[i].icOffsetForJump),
15679 ImmPtr(ic.codeRawPtr()), ImmPtr((void*)-1));
15680 Assembler::PatchDataWithValueCheck(
15681 CodeLocationLabel(code, icInfo_[i].icOffsetForPush), ImmPtr(&ic),
15682 ImmPtr((void*)-1));
15685 JitSpew(JitSpew_Codegen, "Created IonScript %p (raw %p)", (void*)ionScript,
15686 (void*)code->raw());
15688 ionScript->setInvalidationEpilogueDataOffset(
15689 invalidateEpilogueData_.offset());
15690 if (jsbytecode* osrPc = gen->outerInfo().osrPc()) {
15691 ionScript->setOsrPc(osrPc);
15692 ionScript->setOsrEntryOffset(getOsrEntryOffset());
15694 ionScript->setInvalidationEpilogueOffset(invalidate_.offset());
15696 perfSpewer_.saveProfile(cx, script, code);
15698 #ifdef MOZ_VTUNE
15699 vtune::MarkScript(code, script, "ion");
15700 #endif
15702 // Set a Ion counter hint for this script.
15703 if (cx->runtime()->jitRuntime()->hasJitHintsMap()) {
15704 JitHintsMap* jitHints = cx->runtime()->jitRuntime()->getJitHintsMap();
15705 jitHints->recordIonCompilation(script);
15708 // for marking during GC.
15709 if (safepointIndices_.length()) {
15710 ionScript->copySafepointIndices(&safepointIndices_[0]);
15712 if (safepoints_.size()) {
15713 ionScript->copySafepoints(&safepoints_);
15716 // for recovering from an Ion Frame.
15717 if (osiIndices_.length()) {
15718 ionScript->copyOsiIndices(&osiIndices_[0]);
15720 if (snapshots_.listSize()) {
15721 ionScript->copySnapshots(&snapshots_);
15723 MOZ_ASSERT_IF(snapshots_.listSize(), recovers_.size());
15724 if (recovers_.size()) {
15725 ionScript->copyRecovers(&recovers_);
15727 if (graph.numConstants()) {
15728 const Value* vp = graph.constantPool();
15729 ionScript->copyConstants(vp);
15730 for (size_t i = 0; i < graph.numConstants(); i++) {
15731 const Value& v = vp[i];
15732 if (v.isGCThing()) {
15733 if (gc::StoreBuffer* sb = v.toGCThing()->storeBuffer()) {
15734 sb->putWholeCell(script);
15735 break;
15741 // Attach any generated script counts to the script.
15742 if (IonScriptCounts* counts = extractScriptCounts()) {
15743 script->addIonCounts(counts);
15745 // WARNING: Code after this point must be infallible!
15747 // Copy the list of nursery objects. Note that the store buffer can add
15748 // HeapPtr edges that must be cleared in IonScript::Destroy. See the
15749 // infallibility warning above.
15750 const auto& nurseryObjects = snapshot->nurseryObjects();
15751 for (size_t i = 0; i < nurseryObjects.length(); i++) {
15752 ionScript->nurseryObjects()[i].init(nurseryObjects[i]);
15755 // Transfer ownership of the IonScript to the JitScript. At this point enough
15756 // of the IonScript must be initialized for IonScript::Destroy to work.
15757 freeIonScript.release();
15758 script->jitScript()->setIonScript(script, ionScript);
15760 return true;
15763 // An out-of-line path to convert a boxed int32 to either a float or double.
15764 class OutOfLineUnboxFloatingPoint : public OutOfLineCodeBase<CodeGenerator> {
15765 LUnboxFloatingPoint* unboxFloatingPoint_;
15767 public:
15768 explicit OutOfLineUnboxFloatingPoint(LUnboxFloatingPoint* unboxFloatingPoint)
15769 : unboxFloatingPoint_(unboxFloatingPoint) {}
15771 void accept(CodeGenerator* codegen) override {
15772 codegen->visitOutOfLineUnboxFloatingPoint(this);
15775 LUnboxFloatingPoint* unboxFloatingPoint() const {
15776 return unboxFloatingPoint_;
15780 void CodeGenerator::visitUnboxFloatingPoint(LUnboxFloatingPoint* lir) {
15781 const ValueOperand box = ToValue(lir, LUnboxFloatingPoint::Input);
15782 const LDefinition* result = lir->output();
15784 // Out-of-line path to convert int32 to double or bailout
15785 // if this instruction is fallible.
15786 OutOfLineUnboxFloatingPoint* ool =
15787 new (alloc()) OutOfLineUnboxFloatingPoint(lir);
15788 addOutOfLineCode(ool, lir->mir());
15790 FloatRegister resultReg = ToFloatRegister(result);
15791 masm.branchTestDouble(Assembler::NotEqual, box, ool->entry());
15792 masm.unboxDouble(box, resultReg);
15793 if (lir->type() == MIRType::Float32) {
15794 masm.convertDoubleToFloat32(resultReg, resultReg);
15796 masm.bind(ool->rejoin());
15799 void CodeGenerator::visitOutOfLineUnboxFloatingPoint(
15800 OutOfLineUnboxFloatingPoint* ool) {
15801 LUnboxFloatingPoint* ins = ool->unboxFloatingPoint();
15802 const ValueOperand value = ToValue(ins, LUnboxFloatingPoint::Input);
15804 if (ins->mir()->fallible()) {
15805 Label bail;
15806 masm.branchTestInt32(Assembler::NotEqual, value, &bail);
15807 bailoutFrom(&bail, ins->snapshot());
15809 masm.int32ValueToFloatingPoint(value, ToFloatRegister(ins->output()),
15810 ins->type());
15811 masm.jump(ool->rejoin());
15814 void CodeGenerator::visitCallBindVar(LCallBindVar* lir) {
15815 pushArg(ToRegister(lir->environmentChain()));
15817 using Fn = JSObject* (*)(JSContext*, JSObject*);
15818 callVM<Fn, BindVarOperation>(lir);
15821 void CodeGenerator::visitMegamorphicSetElement(LMegamorphicSetElement* lir) {
15822 Register obj = ToRegister(lir->getOperand(0));
15823 ValueOperand idVal = ToValue(lir, LMegamorphicSetElement::IndexIndex);
15824 ValueOperand value = ToValue(lir, LMegamorphicSetElement::ValueIndex);
15826 Register temp0 = ToRegister(lir->temp0());
15827 // See comment in LIROps.yaml (x86 is short on registers)
15828 #ifndef JS_CODEGEN_X86
15829 Register temp1 = ToRegister(lir->temp1());
15830 Register temp2 = ToRegister(lir->temp2());
15831 #endif
15833 Label cacheHit, done;
15834 #ifdef JS_CODEGEN_X86
15835 masm.emitMegamorphicCachedSetSlot(
15836 idVal, obj, temp0, value, &cacheHit,
15837 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
15838 EmitPreBarrier(masm, addr, mirType);
15840 #else
15841 masm.emitMegamorphicCachedSetSlot(
15842 idVal, obj, temp0, temp1, temp2, value, &cacheHit,
15843 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
15844 EmitPreBarrier(masm, addr, mirType);
15846 #endif
15848 pushArg(Imm32(lir->mir()->strict()));
15849 pushArg(ToValue(lir, LMegamorphicSetElement::ValueIndex));
15850 pushArg(ToValue(lir, LMegamorphicSetElement::IndexIndex));
15851 pushArg(obj);
15853 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
15854 callVM<Fn, js::jit::SetElementMegamorphic<true>>(lir);
15856 masm.jump(&done);
15857 masm.bind(&cacheHit);
15859 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
15860 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
15862 saveVolatile(temp0);
15863 emitPostWriteBarrier(obj);
15864 restoreVolatile(temp0);
15866 masm.bind(&done);
15869 void CodeGenerator::visitLoadScriptedProxyHandler(
15870 LLoadScriptedProxyHandler* ins) {
15871 Register obj = ToRegister(ins->getOperand(0));
15872 Register output = ToRegister(ins->output());
15874 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), output);
15876 Label bail;
15877 Address handlerAddr(output, js::detail::ProxyReservedSlots::offsetOfSlot(
15878 ScriptedProxyHandler::HANDLER_EXTRA));
15879 masm.fallibleUnboxObject(handlerAddr, output, &bail);
15880 bailoutFrom(&bail, ins->snapshot());
15883 #ifdef JS_PUNBOX64
15884 void CodeGenerator::visitCheckScriptedProxyGetResult(
15885 LCheckScriptedProxyGetResult* ins) {
15886 ValueOperand target = ToValue(ins, LCheckScriptedProxyGetResult::TargetIndex);
15887 ValueOperand value = ToValue(ins, LCheckScriptedProxyGetResult::ValueIndex);
15888 ValueOperand id = ToValue(ins, LCheckScriptedProxyGetResult::IdIndex);
15889 Register scratch = ToRegister(ins->temp0());
15890 Register scratch2 = ToRegister(ins->temp1());
15892 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue,
15893 MutableHandleValue);
15894 OutOfLineCode* ool = oolCallVM<Fn, CheckProxyGetByValueResult>(
15895 ins, ArgList(scratch, id, value), StoreValueTo(value));
15897 masm.unboxObject(target, scratch);
15898 masm.branchTestObjectNeedsProxyResultValidation(Assembler::NonZero, scratch,
15899 scratch2, ool->entry());
15900 masm.bind(ool->rejoin());
15902 #endif
15904 void CodeGenerator::visitIdToStringOrSymbol(LIdToStringOrSymbol* ins) {
15905 ValueOperand id = ToValue(ins, LIdToStringOrSymbol::IdIndex);
15906 ValueOperand output = ToOutValue(ins);
15907 Register scratch = ToRegister(ins->temp0());
15909 masm.moveValue(id, output);
15911 Label done, callVM;
15912 Label bail;
15914 ScratchTagScope tag(masm, output);
15915 masm.splitTagForTest(output, tag);
15916 masm.branchTestString(Assembler::Equal, tag, &done);
15917 masm.branchTestSymbol(Assembler::Equal, tag, &done);
15918 masm.branchTestInt32(Assembler::NotEqual, tag, &bail);
15921 masm.unboxInt32(output, scratch);
15923 using Fn = JSLinearString* (*)(JSContext*, int);
15924 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
15925 ins, ArgList(scratch), StoreRegisterTo(output.scratchReg()));
15927 masm.lookupStaticIntString(scratch, output.scratchReg(),
15928 gen->runtime->staticStrings(), ool->entry());
15930 masm.bind(ool->rejoin());
15931 masm.tagValue(JSVAL_TYPE_STRING, output.scratchReg(), output);
15932 masm.bind(&done);
15934 bailoutFrom(&bail, ins->snapshot());
15937 void CodeGenerator::visitLoadFixedSlotV(LLoadFixedSlotV* ins) {
15938 const Register obj = ToRegister(ins->getOperand(0));
15939 size_t slot = ins->mir()->slot();
15940 ValueOperand result = ToOutValue(ins);
15942 masm.loadValue(Address(obj, NativeObject::getFixedSlotOffset(slot)), result);
15945 void CodeGenerator::visitLoadFixedSlotT(LLoadFixedSlotT* ins) {
15946 const Register obj = ToRegister(ins->getOperand(0));
15947 size_t slot = ins->mir()->slot();
15948 AnyRegister result = ToAnyRegister(ins->getDef(0));
15949 MIRType type = ins->mir()->type();
15951 masm.loadUnboxedValue(Address(obj, NativeObject::getFixedSlotOffset(slot)),
15952 type, result);
15955 template <typename T>
15956 static void EmitLoadAndUnbox(MacroAssembler& masm, const T& src, MIRType type,
15957 bool fallible, AnyRegister dest, Label* fail) {
15958 if (type == MIRType::Double) {
15959 MOZ_ASSERT(dest.isFloat());
15960 masm.ensureDouble(src, dest.fpu(), fail);
15961 return;
15963 if (fallible) {
15964 switch (type) {
15965 case MIRType::Int32:
15966 masm.fallibleUnboxInt32(src, dest.gpr(), fail);
15967 break;
15968 case MIRType::Boolean:
15969 masm.fallibleUnboxBoolean(src, dest.gpr(), fail);
15970 break;
15971 case MIRType::Object:
15972 masm.fallibleUnboxObject(src, dest.gpr(), fail);
15973 break;
15974 case MIRType::String:
15975 masm.fallibleUnboxString(src, dest.gpr(), fail);
15976 break;
15977 case MIRType::Symbol:
15978 masm.fallibleUnboxSymbol(src, dest.gpr(), fail);
15979 break;
15980 case MIRType::BigInt:
15981 masm.fallibleUnboxBigInt(src, dest.gpr(), fail);
15982 break;
15983 default:
15984 MOZ_CRASH("Unexpected MIRType");
15986 return;
15988 masm.loadUnboxedValue(src, type, dest);
15991 void CodeGenerator::visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox* ins) {
15992 const MLoadFixedSlotAndUnbox* mir = ins->mir();
15993 MIRType type = mir->type();
15994 Register input = ToRegister(ins->object());
15995 AnyRegister result = ToAnyRegister(ins->output());
15996 size_t slot = mir->slot();
15998 Address address(input, NativeObject::getFixedSlotOffset(slot));
16000 Label bail;
16001 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16002 if (mir->fallible()) {
16003 bailoutFrom(&bail, ins->snapshot());
16007 void CodeGenerator::visitLoadDynamicSlotAndUnbox(
16008 LLoadDynamicSlotAndUnbox* ins) {
16009 const MLoadDynamicSlotAndUnbox* mir = ins->mir();
16010 MIRType type = mir->type();
16011 Register input = ToRegister(ins->slots());
16012 AnyRegister result = ToAnyRegister(ins->output());
16013 size_t slot = mir->slot();
16015 Address address(input, slot * sizeof(JS::Value));
16017 Label bail;
16018 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16019 if (mir->fallible()) {
16020 bailoutFrom(&bail, ins->snapshot());
16024 void CodeGenerator::visitLoadElementAndUnbox(LLoadElementAndUnbox* ins) {
16025 const MLoadElementAndUnbox* mir = ins->mir();
16026 MIRType type = mir->type();
16027 Register elements = ToRegister(ins->elements());
16028 AnyRegister result = ToAnyRegister(ins->output());
16030 Label bail;
16031 if (ins->index()->isConstant()) {
16032 NativeObject::elementsSizeMustNotOverflow();
16033 int32_t offset = ToInt32(ins->index()) * sizeof(Value);
16034 Address address(elements, offset);
16035 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16036 } else {
16037 BaseObjectElementIndex address(elements, ToRegister(ins->index()));
16038 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16041 if (mir->fallible()) {
16042 bailoutFrom(&bail, ins->snapshot());
16046 class OutOfLineAtomizeSlot : public OutOfLineCodeBase<CodeGenerator> {
16047 LInstruction* lir_;
16048 Register stringReg_;
16049 Address slotAddr_;
16050 TypedOrValueRegister dest_;
16052 public:
16053 OutOfLineAtomizeSlot(LInstruction* lir, Register stringReg, Address slotAddr,
16054 TypedOrValueRegister dest)
16055 : lir_(lir), stringReg_(stringReg), slotAddr_(slotAddr), dest_(dest) {}
16057 void accept(CodeGenerator* codegen) final {
16058 codegen->visitOutOfLineAtomizeSlot(this);
16060 LInstruction* lir() const { return lir_; }
16061 Register stringReg() const { return stringReg_; }
16062 Address slotAddr() const { return slotAddr_; }
16063 TypedOrValueRegister dest() const { return dest_; }
16066 void CodeGenerator::visitOutOfLineAtomizeSlot(OutOfLineAtomizeSlot* ool) {
16067 LInstruction* lir = ool->lir();
16068 Register stringReg = ool->stringReg();
16069 Address slotAddr = ool->slotAddr();
16070 TypedOrValueRegister dest = ool->dest();
16072 // This code is called with a non-atomic string in |stringReg|.
16073 // When it returns, |stringReg| contains an unboxed pointer to an
16074 // atomized version of that string, and |slotAddr| contains a
16075 // StringValue pointing to that atom. If |dest| is a ValueOperand,
16076 // it contains the same StringValue; otherwise we assert that |dest|
16077 // is |stringReg|.
16079 saveLive(lir);
16080 pushArg(stringReg);
16082 using Fn = JSAtom* (*)(JSContext*, JSString*);
16083 callVM<Fn, js::AtomizeString>(lir);
16084 StoreRegisterTo(stringReg).generate(this);
16085 restoreLiveIgnore(lir, StoreRegisterTo(stringReg).clobbered());
16087 if (dest.hasValue()) {
16088 masm.moveValue(
16089 TypedOrValueRegister(MIRType::String, AnyRegister(stringReg)),
16090 dest.valueReg());
16091 } else {
16092 MOZ_ASSERT(dest.typedReg().gpr() == stringReg);
16095 emitPreBarrier(slotAddr);
16096 masm.storeTypedOrValue(dest, slotAddr);
16098 // We don't need a post-barrier because atoms aren't nursery-allocated.
16099 #ifdef DEBUG
16100 // We need a temp register for the nursery check. Spill something.
16101 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
16102 allRegs.take(stringReg);
16103 Register temp = allRegs.takeAny();
16104 masm.push(temp);
16106 Label tenured;
16107 masm.branchPtrInNurseryChunk(Assembler::NotEqual, stringReg, temp, &tenured);
16108 masm.assumeUnreachable("AtomizeString returned a nursery pointer");
16109 masm.bind(&tenured);
16111 masm.pop(temp);
16112 #endif
16114 masm.jump(ool->rejoin());
16117 void CodeGenerator::emitMaybeAtomizeSlot(LInstruction* ins, Register stringReg,
16118 Address slotAddr,
16119 TypedOrValueRegister dest) {
16120 OutOfLineAtomizeSlot* ool =
16121 new (alloc()) OutOfLineAtomizeSlot(ins, stringReg, slotAddr, dest);
16122 addOutOfLineCode(ool, ins->mirRaw()->toInstruction());
16123 masm.branchTest32(Assembler::Zero,
16124 Address(stringReg, JSString::offsetOfFlags()),
16125 Imm32(JSString::ATOM_BIT), ool->entry());
16126 masm.bind(ool->rejoin());
16129 void CodeGenerator::visitLoadFixedSlotAndAtomize(
16130 LLoadFixedSlotAndAtomize* ins) {
16131 Register obj = ToRegister(ins->getOperand(0));
16132 Register temp = ToRegister(ins->temp0());
16133 size_t slot = ins->mir()->slot();
16134 ValueOperand result = ToOutValue(ins);
16136 Address slotAddr(obj, NativeObject::getFixedSlotOffset(slot));
16137 masm.loadValue(slotAddr, result);
16139 Label notString;
16140 masm.branchTestString(Assembler::NotEqual, result, &notString);
16141 masm.unboxString(result, temp);
16142 emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
16143 masm.bind(&notString);
16146 void CodeGenerator::visitLoadDynamicSlotAndAtomize(
16147 LLoadDynamicSlotAndAtomize* ins) {
16148 ValueOperand result = ToOutValue(ins);
16149 Register temp = ToRegister(ins->temp0());
16150 Register base = ToRegister(ins->input());
16151 int32_t offset = ins->mir()->slot() * sizeof(js::Value);
16153 Address slotAddr(base, offset);
16154 masm.loadValue(slotAddr, result);
16156 Label notString;
16157 masm.branchTestString(Assembler::NotEqual, result, &notString);
16158 masm.unboxString(result, temp);
16159 emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
16160 masm.bind(&notString);
16163 void CodeGenerator::visitLoadFixedSlotUnboxAndAtomize(
16164 LLoadFixedSlotUnboxAndAtomize* ins) {
16165 const MLoadFixedSlotAndUnbox* mir = ins->mir();
16166 MOZ_ASSERT(mir->type() == MIRType::String);
16167 Register input = ToRegister(ins->object());
16168 AnyRegister result = ToAnyRegister(ins->output());
16169 size_t slot = mir->slot();
16171 Address slotAddr(input, NativeObject::getFixedSlotOffset(slot));
16173 Label bail;
16174 EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
16175 &bail);
16176 emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
16177 TypedOrValueRegister(MIRType::String, result));
16179 if (mir->fallible()) {
16180 bailoutFrom(&bail, ins->snapshot());
16184 void CodeGenerator::visitLoadDynamicSlotUnboxAndAtomize(
16185 LLoadDynamicSlotUnboxAndAtomize* ins) {
16186 const MLoadDynamicSlotAndUnbox* mir = ins->mir();
16187 MOZ_ASSERT(mir->type() == MIRType::String);
16188 Register input = ToRegister(ins->slots());
16189 AnyRegister result = ToAnyRegister(ins->output());
16190 size_t slot = mir->slot();
16192 Address slotAddr(input, slot * sizeof(JS::Value));
16194 Label bail;
16195 EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
16196 &bail);
16197 emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
16198 TypedOrValueRegister(MIRType::String, result));
16200 if (mir->fallible()) {
16201 bailoutFrom(&bail, ins->snapshot());
16205 void CodeGenerator::visitAddAndStoreSlot(LAddAndStoreSlot* ins) {
16206 const Register obj = ToRegister(ins->getOperand(0));
16207 const ValueOperand value = ToValue(ins, LAddAndStoreSlot::ValueIndex);
16208 const Register maybeTemp = ToTempRegisterOrInvalid(ins->temp0());
16210 Shape* shape = ins->mir()->shape();
16211 masm.storeObjShape(shape, obj, [](MacroAssembler& masm, const Address& addr) {
16212 EmitPreBarrier(masm, addr, MIRType::Shape);
16215 // Perform the store. No pre-barrier required since this is a new
16216 // initialization.
16218 uint32_t offset = ins->mir()->slotOffset();
16219 if (ins->mir()->kind() == MAddAndStoreSlot::Kind::FixedSlot) {
16220 Address slot(obj, offset);
16221 masm.storeValue(value, slot);
16222 } else {
16223 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), maybeTemp);
16224 Address slot(maybeTemp, offset);
16225 masm.storeValue(value, slot);
16229 void CodeGenerator::visitAllocateAndStoreSlot(LAllocateAndStoreSlot* ins) {
16230 const Register obj = ToRegister(ins->getOperand(0));
16231 const ValueOperand value = ToValue(ins, LAllocateAndStoreSlot::ValueIndex);
16232 const Register temp0 = ToRegister(ins->temp0());
16233 const Register temp1 = ToRegister(ins->temp1());
16235 masm.Push(obj);
16236 masm.Push(value);
16238 using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
16239 masm.setupAlignedABICall();
16240 masm.loadJSContext(temp0);
16241 masm.passABIArg(temp0);
16242 masm.passABIArg(obj);
16243 masm.move32(Imm32(ins->mir()->numNewSlots()), temp1);
16244 masm.passABIArg(temp1);
16245 masm.callWithABI<Fn, NativeObject::growSlotsPure>();
16246 masm.storeCallPointerResult(temp0);
16248 masm.Pop(value);
16249 masm.Pop(obj);
16251 bailoutIfFalseBool(temp0, ins->snapshot());
16253 masm.storeObjShape(ins->mir()->shape(), obj,
16254 [](MacroAssembler& masm, const Address& addr) {
16255 EmitPreBarrier(masm, addr, MIRType::Shape);
16258 // Perform the store. No pre-barrier required since this is a new
16259 // initialization.
16260 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), temp0);
16261 Address slot(temp0, ins->mir()->slotOffset());
16262 masm.storeValue(value, slot);
16265 void CodeGenerator::visitAddSlotAndCallAddPropHook(
16266 LAddSlotAndCallAddPropHook* ins) {
16267 const Register obj = ToRegister(ins->object());
16268 const ValueOperand value =
16269 ToValue(ins, LAddSlotAndCallAddPropHook::ValueIndex);
16271 pushArg(ImmGCPtr(ins->mir()->shape()));
16272 pushArg(value);
16273 pushArg(obj);
16275 using Fn =
16276 bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, Handle<Shape*>);
16277 callVM<Fn, AddSlotAndCallAddPropHook>(ins);
16280 void CodeGenerator::visitStoreFixedSlotV(LStoreFixedSlotV* ins) {
16281 const Register obj = ToRegister(ins->getOperand(0));
16282 size_t slot = ins->mir()->slot();
16284 const ValueOperand value = ToValue(ins, LStoreFixedSlotV::ValueIndex);
16286 Address address(obj, NativeObject::getFixedSlotOffset(slot));
16287 if (ins->mir()->needsBarrier()) {
16288 emitPreBarrier(address);
16291 masm.storeValue(value, address);
16294 void CodeGenerator::visitStoreFixedSlotT(LStoreFixedSlotT* ins) {
16295 const Register obj = ToRegister(ins->getOperand(0));
16296 size_t slot = ins->mir()->slot();
16298 const LAllocation* value = ins->value();
16299 MIRType valueType = ins->mir()->value()->type();
16301 Address address(obj, NativeObject::getFixedSlotOffset(slot));
16302 if (ins->mir()->needsBarrier()) {
16303 emitPreBarrier(address);
16306 ConstantOrRegister nvalue =
16307 value->isConstant()
16308 ? ConstantOrRegister(value->toConstant()->toJSValue())
16309 : TypedOrValueRegister(valueType, ToAnyRegister(value));
16310 masm.storeConstantOrRegister(nvalue, address);
16313 void CodeGenerator::visitGetNameCache(LGetNameCache* ins) {
16314 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16315 Register envChain = ToRegister(ins->envObj());
16316 ValueOperand output = ToOutValue(ins);
16317 Register temp = ToRegister(ins->temp0());
16319 IonGetNameIC ic(liveRegs, envChain, output, temp);
16320 addIC(ins, allocateIC(ic));
16323 void CodeGenerator::addGetPropertyCache(LInstruction* ins,
16324 LiveRegisterSet liveRegs,
16325 TypedOrValueRegister value,
16326 const ConstantOrRegister& id,
16327 ValueOperand output) {
16328 CacheKind kind = CacheKind::GetElem;
16329 if (id.constant() && id.value().isString()) {
16330 JSString* idString = id.value().toString();
16331 if (idString->isAtom() && !idString->asAtom().isIndex()) {
16332 kind = CacheKind::GetProp;
16335 IonGetPropertyIC cache(kind, liveRegs, value, id, output);
16336 addIC(ins, allocateIC(cache));
16339 void CodeGenerator::addSetPropertyCache(LInstruction* ins,
16340 LiveRegisterSet liveRegs,
16341 Register objReg, Register temp,
16342 const ConstantOrRegister& id,
16343 const ConstantOrRegister& value,
16344 bool strict) {
16345 CacheKind kind = CacheKind::SetElem;
16346 if (id.constant() && id.value().isString()) {
16347 JSString* idString = id.value().toString();
16348 if (idString->isAtom() && !idString->asAtom().isIndex()) {
16349 kind = CacheKind::SetProp;
16352 IonSetPropertyIC cache(kind, liveRegs, objReg, temp, id, value, strict);
16353 addIC(ins, allocateIC(cache));
16356 ConstantOrRegister CodeGenerator::toConstantOrRegister(LInstruction* lir,
16357 size_t n, MIRType type) {
16358 if (type == MIRType::Value) {
16359 return TypedOrValueRegister(ToValue(lir, n));
16362 const LAllocation* value = lir->getOperand(n);
16363 if (value->isConstant()) {
16364 return ConstantOrRegister(value->toConstant()->toJSValue());
16367 return TypedOrValueRegister(type, ToAnyRegister(value));
16370 void CodeGenerator::visitGetPropertyCache(LGetPropertyCache* ins) {
16371 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16372 TypedOrValueRegister value =
16373 toConstantOrRegister(ins, LGetPropertyCache::ValueIndex,
16374 ins->mir()->value()->type())
16375 .reg();
16376 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropertyCache::IdIndex,
16377 ins->mir()->idval()->type());
16378 ValueOperand output = ToOutValue(ins);
16379 addGetPropertyCache(ins, liveRegs, value, id, output);
16382 void CodeGenerator::visitGetPropSuperCache(LGetPropSuperCache* ins) {
16383 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16384 Register obj = ToRegister(ins->obj());
16385 TypedOrValueRegister receiver =
16386 toConstantOrRegister(ins, LGetPropSuperCache::ReceiverIndex,
16387 ins->mir()->receiver()->type())
16388 .reg();
16389 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropSuperCache::IdIndex,
16390 ins->mir()->idval()->type());
16391 ValueOperand output = ToOutValue(ins);
16393 CacheKind kind = CacheKind::GetElemSuper;
16394 if (id.constant() && id.value().isString()) {
16395 JSString* idString = id.value().toString();
16396 if (idString->isAtom() && !idString->asAtom().isIndex()) {
16397 kind = CacheKind::GetPropSuper;
16401 IonGetPropSuperIC cache(kind, liveRegs, obj, receiver, id, output);
16402 addIC(ins, allocateIC(cache));
16405 void CodeGenerator::visitBindNameCache(LBindNameCache* ins) {
16406 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16407 Register envChain = ToRegister(ins->environmentChain());
16408 Register output = ToRegister(ins->output());
16409 Register temp = ToRegister(ins->temp0());
16411 IonBindNameIC ic(liveRegs, envChain, output, temp);
16412 addIC(ins, allocateIC(ic));
16415 void CodeGenerator::visitHasOwnCache(LHasOwnCache* ins) {
16416 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16417 TypedOrValueRegister value =
16418 toConstantOrRegister(ins, LHasOwnCache::ValueIndex,
16419 ins->mir()->value()->type())
16420 .reg();
16421 TypedOrValueRegister id = toConstantOrRegister(ins, LHasOwnCache::IdIndex,
16422 ins->mir()->idval()->type())
16423 .reg();
16424 Register output = ToRegister(ins->output());
16426 IonHasOwnIC cache(liveRegs, value, id, output);
16427 addIC(ins, allocateIC(cache));
16430 void CodeGenerator::visitCheckPrivateFieldCache(LCheckPrivateFieldCache* ins) {
16431 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16432 TypedOrValueRegister value =
16433 toConstantOrRegister(ins, LCheckPrivateFieldCache::ValueIndex,
16434 ins->mir()->value()->type())
16435 .reg();
16436 TypedOrValueRegister id =
16437 toConstantOrRegister(ins, LCheckPrivateFieldCache::IdIndex,
16438 ins->mir()->idval()->type())
16439 .reg();
16440 Register output = ToRegister(ins->output());
16442 IonCheckPrivateFieldIC cache(liveRegs, value, id, output);
16443 addIC(ins, allocateIC(cache));
16446 void CodeGenerator::visitNewPrivateName(LNewPrivateName* ins) {
16447 pushArg(ImmGCPtr(ins->mir()->name()));
16449 using Fn = JS::Symbol* (*)(JSContext*, Handle<JSAtom*>);
16450 callVM<Fn, NewPrivateName>(ins);
16453 void CodeGenerator::visitCallDeleteProperty(LCallDeleteProperty* lir) {
16454 pushArg(ImmGCPtr(lir->mir()->name()));
16455 pushArg(ToValue(lir, LCallDeleteProperty::ValueIndex));
16457 using Fn = bool (*)(JSContext*, HandleValue, Handle<PropertyName*>, bool*);
16458 if (lir->mir()->strict()) {
16459 callVM<Fn, DelPropOperation<true>>(lir);
16460 } else {
16461 callVM<Fn, DelPropOperation<false>>(lir);
16465 void CodeGenerator::visitCallDeleteElement(LCallDeleteElement* lir) {
16466 pushArg(ToValue(lir, LCallDeleteElement::IndexIndex));
16467 pushArg(ToValue(lir, LCallDeleteElement::ValueIndex));
16469 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
16470 if (lir->mir()->strict()) {
16471 callVM<Fn, DelElemOperation<true>>(lir);
16472 } else {
16473 callVM<Fn, DelElemOperation<false>>(lir);
16477 void CodeGenerator::visitObjectToIterator(LObjectToIterator* lir) {
16478 Register obj = ToRegister(lir->object());
16479 Register iterObj = ToRegister(lir->output());
16480 Register temp = ToRegister(lir->temp0());
16481 Register temp2 = ToRegister(lir->temp1());
16482 Register temp3 = ToRegister(lir->temp2());
16484 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
16485 OutOfLineCode* ool = (lir->mir()->wantsIndices())
16486 ? oolCallVM<Fn, GetIteratorWithIndices>(
16487 lir, ArgList(obj), StoreRegisterTo(iterObj))
16488 : oolCallVM<Fn, GetIterator>(
16489 lir, ArgList(obj), StoreRegisterTo(iterObj));
16491 masm.maybeLoadIteratorFromShape(obj, iterObj, temp, temp2, temp3,
16492 ool->entry());
16494 Register nativeIter = temp;
16495 masm.loadPrivate(
16496 Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
16497 nativeIter);
16499 if (lir->mir()->wantsIndices()) {
16500 // At least one consumer of the output of this iterator has been optimized
16501 // to use iterator indices. If the cached iterator doesn't include indices,
16502 // but it was marked to indicate that we can create them if needed, then we
16503 // do a VM call to replace the cached iterator with a fresh iterator
16504 // including indices.
16505 masm.branchNativeIteratorIndices(Assembler::Equal, nativeIter, temp2,
16506 NativeIteratorIndices::AvailableOnRequest,
16507 ool->entry());
16510 Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlagsAndCount());
16511 masm.storePtr(
16512 obj, Address(nativeIter, NativeIterator::offsetOfObjectBeingIterated()));
16513 masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
16515 Register enumeratorsAddr = temp2;
16516 masm.movePtr(ImmPtr(lir->mir()->enumeratorsAddr()), enumeratorsAddr);
16517 masm.registerIterator(enumeratorsAddr, nativeIter, temp3);
16519 // Generate post-write barrier for storing to |iterObj->objectBeingIterated_|.
16520 // We already know that |iterObj| is tenured, so we only have to check |obj|.
16521 Label skipBarrier;
16522 masm.branchPtrInNurseryChunk(Assembler::NotEqual, obj, temp2, &skipBarrier);
16524 LiveRegisterSet save = liveVolatileRegs(lir);
16525 save.takeUnchecked(temp);
16526 save.takeUnchecked(temp2);
16527 save.takeUnchecked(temp3);
16528 if (iterObj.volatile_()) {
16529 save.addUnchecked(iterObj);
16532 masm.PushRegsInMask(save);
16533 emitPostWriteBarrier(iterObj);
16534 masm.PopRegsInMask(save);
16536 masm.bind(&skipBarrier);
16538 masm.bind(ool->rejoin());
16541 void CodeGenerator::visitValueToIterator(LValueToIterator* lir) {
16542 pushArg(ToValue(lir, LValueToIterator::ValueIndex));
16544 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleValue);
16545 callVM<Fn, ValueToIterator>(lir);
16548 void CodeGenerator::visitIteratorHasIndicesAndBranch(
16549 LIteratorHasIndicesAndBranch* lir) {
16550 Register iterator = ToRegister(lir->iterator());
16551 Register object = ToRegister(lir->object());
16552 Register temp = ToRegister(lir->temp());
16553 Register temp2 = ToRegister(lir->temp2());
16554 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
16555 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
16557 // Check that the iterator has indices available.
16558 Address nativeIterAddr(iterator,
16559 PropertyIteratorObject::offsetOfIteratorSlot());
16560 masm.loadPrivate(nativeIterAddr, temp);
16561 masm.branchNativeIteratorIndices(Assembler::NotEqual, temp, temp2,
16562 NativeIteratorIndices::Valid, ifFalse);
16564 // Guard that the first shape stored in the iterator matches the current
16565 // shape of the iterated object.
16566 Address firstShapeAddr(temp, NativeIterator::offsetOfFirstShape());
16567 masm.loadPtr(firstShapeAddr, temp);
16568 masm.branchTestObjShape(Assembler::NotEqual, object, temp, temp2, object,
16569 ifFalse);
16571 if (!isNextBlock(lir->ifTrue()->lir())) {
16572 masm.jump(ifTrue);
16576 void CodeGenerator::visitLoadSlotByIteratorIndex(
16577 LLoadSlotByIteratorIndex* lir) {
16578 Register object = ToRegister(lir->object());
16579 Register iterator = ToRegister(lir->iterator());
16580 Register temp = ToRegister(lir->temp0());
16581 Register temp2 = ToRegister(lir->temp1());
16582 ValueOperand result = ToOutValue(lir);
16584 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
16586 Label notDynamicSlot, notFixedSlot, done;
16587 masm.branch32(Assembler::NotEqual, temp2,
16588 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
16589 &notDynamicSlot);
16590 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
16591 masm.loadValue(BaseValueIndex(temp2, temp), result);
16592 masm.jump(&done);
16594 masm.bind(&notDynamicSlot);
16595 masm.branch32(Assembler::NotEqual, temp2,
16596 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
16597 // Fixed slot
16598 masm.loadValue(BaseValueIndex(object, temp, sizeof(NativeObject)), result);
16599 masm.jump(&done);
16600 masm.bind(&notFixedSlot);
16602 #ifdef DEBUG
16603 Label kindOkay;
16604 masm.branch32(Assembler::Equal, temp2,
16605 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
16606 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
16607 masm.bind(&kindOkay);
16608 #endif
16610 // Dense element
16611 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
16612 Label indexOkay;
16613 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
16614 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
16615 masm.assumeUnreachable("Dense element out of bounds");
16616 masm.bind(&indexOkay);
16618 masm.loadValue(BaseObjectElementIndex(temp2, temp), result);
16619 masm.bind(&done);
16622 void CodeGenerator::visitStoreSlotByIteratorIndex(
16623 LStoreSlotByIteratorIndex* lir) {
16624 Register object = ToRegister(lir->object());
16625 Register iterator = ToRegister(lir->iterator());
16626 ValueOperand value = ToValue(lir, LStoreSlotByIteratorIndex::ValueIndex);
16627 Register temp = ToRegister(lir->temp0());
16628 Register temp2 = ToRegister(lir->temp1());
16630 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
16632 Label notDynamicSlot, notFixedSlot, done, doStore;
16633 masm.branch32(Assembler::NotEqual, temp2,
16634 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
16635 &notDynamicSlot);
16636 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
16637 masm.computeEffectiveAddress(BaseValueIndex(temp2, temp), temp);
16638 masm.jump(&doStore);
16640 masm.bind(&notDynamicSlot);
16641 masm.branch32(Assembler::NotEqual, temp2,
16642 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
16643 // Fixed slot
16644 masm.computeEffectiveAddress(
16645 BaseValueIndex(object, temp, sizeof(NativeObject)), temp);
16646 masm.jump(&doStore);
16647 masm.bind(&notFixedSlot);
16649 #ifdef DEBUG
16650 Label kindOkay;
16651 masm.branch32(Assembler::Equal, temp2,
16652 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
16653 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
16654 masm.bind(&kindOkay);
16655 #endif
16657 // Dense element
16658 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
16659 Label indexOkay;
16660 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
16661 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
16662 masm.assumeUnreachable("Dense element out of bounds");
16663 masm.bind(&indexOkay);
16665 BaseObjectElementIndex elementAddress(temp2, temp);
16666 masm.computeEffectiveAddress(elementAddress, temp);
16668 masm.bind(&doStore);
16669 Address storeAddress(temp, 0);
16670 emitPreBarrier(storeAddress);
16671 masm.storeValue(value, storeAddress);
16673 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp2, &done);
16674 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp2, &done);
16676 saveVolatile(temp2);
16677 emitPostWriteBarrier(object);
16678 restoreVolatile(temp2);
16680 masm.bind(&done);
16683 void CodeGenerator::visitSetPropertyCache(LSetPropertyCache* ins) {
16684 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16685 Register objReg = ToRegister(ins->object());
16686 Register temp = ToRegister(ins->temp0());
16688 ConstantOrRegister id = toConstantOrRegister(ins, LSetPropertyCache::IdIndex,
16689 ins->mir()->idval()->type());
16690 ConstantOrRegister value = toConstantOrRegister(
16691 ins, LSetPropertyCache::ValueIndex, ins->mir()->value()->type());
16693 addSetPropertyCache(ins, liveRegs, objReg, temp, id, value,
16694 ins->mir()->strict());
16697 void CodeGenerator::visitThrow(LThrow* lir) {
16698 pushArg(ToValue(lir, LThrow::ValueIndex));
16700 using Fn = bool (*)(JSContext*, HandleValue);
16701 callVM<Fn, js::ThrowOperation>(lir);
16704 void CodeGenerator::visitThrowWithStack(LThrowWithStack* lir) {
16705 pushArg(ToValue(lir, LThrowWithStack::StackIndex));
16706 pushArg(ToValue(lir, LThrowWithStack::ValueIndex));
16708 using Fn = bool (*)(JSContext*, HandleValue, HandleValue);
16709 callVM<Fn, js::ThrowWithStackOperation>(lir);
16712 class OutOfLineTypeOfV : public OutOfLineCodeBase<CodeGenerator> {
16713 LTypeOfV* ins_;
16715 public:
16716 explicit OutOfLineTypeOfV(LTypeOfV* ins) : ins_(ins) {}
16718 void accept(CodeGenerator* codegen) override {
16719 codegen->visitOutOfLineTypeOfV(this);
16721 LTypeOfV* ins() const { return ins_; }
16724 void CodeGenerator::emitTypeOfJSType(JSValueType type, Register output) {
16725 switch (type) {
16726 case JSVAL_TYPE_OBJECT:
16727 masm.move32(Imm32(JSTYPE_OBJECT), output);
16728 break;
16729 case JSVAL_TYPE_DOUBLE:
16730 case JSVAL_TYPE_INT32:
16731 masm.move32(Imm32(JSTYPE_NUMBER), output);
16732 break;
16733 case JSVAL_TYPE_BOOLEAN:
16734 masm.move32(Imm32(JSTYPE_BOOLEAN), output);
16735 break;
16736 case JSVAL_TYPE_UNDEFINED:
16737 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
16738 break;
16739 case JSVAL_TYPE_NULL:
16740 masm.move32(Imm32(JSTYPE_OBJECT), output);
16741 break;
16742 case JSVAL_TYPE_STRING:
16743 masm.move32(Imm32(JSTYPE_STRING), output);
16744 break;
16745 case JSVAL_TYPE_SYMBOL:
16746 masm.move32(Imm32(JSTYPE_SYMBOL), output);
16747 break;
16748 case JSVAL_TYPE_BIGINT:
16749 masm.move32(Imm32(JSTYPE_BIGINT), output);
16750 break;
16751 default:
16752 MOZ_CRASH("Unsupported JSValueType");
16756 void CodeGenerator::emitTypeOfCheck(JSValueType type, Register tag,
16757 Register output, Label* done,
16758 Label* oolObject) {
16759 Label notMatch;
16760 switch (type) {
16761 case JSVAL_TYPE_OBJECT:
16762 // The input may be a callable object (result is "function") or
16763 // may emulate undefined (result is "undefined"). Use an OOL path.
16764 masm.branchTestObject(Assembler::Equal, tag, oolObject);
16765 return;
16766 case JSVAL_TYPE_DOUBLE:
16767 case JSVAL_TYPE_INT32:
16768 masm.branchTestNumber(Assembler::NotEqual, tag, &notMatch);
16769 break;
16770 default:
16771 masm.branchTestType(Assembler::NotEqual, tag, type, &notMatch);
16772 break;
16775 emitTypeOfJSType(type, output);
16776 masm.jump(done);
16777 masm.bind(&notMatch);
16780 void CodeGenerator::visitTypeOfV(LTypeOfV* lir) {
16781 const ValueOperand value = ToValue(lir, LTypeOfV::InputIndex);
16782 Register output = ToRegister(lir->output());
16783 Register tag = masm.extractTag(value, output);
16785 Label done;
16787 auto* ool = new (alloc()) OutOfLineTypeOfV(lir);
16788 addOutOfLineCode(ool, lir->mir());
16790 const std::initializer_list<JSValueType> defaultOrder = {
16791 JSVAL_TYPE_OBJECT, JSVAL_TYPE_DOUBLE, JSVAL_TYPE_UNDEFINED,
16792 JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN, JSVAL_TYPE_STRING,
16793 JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
16795 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
16797 // Generate checks for previously observed types first.
16798 // The TypeDataList is sorted by descending frequency.
16799 for (auto& observed : lir->mir()->observedTypes()) {
16800 JSValueType type = observed.type();
16802 // Unify number types.
16803 if (type == JSVAL_TYPE_INT32) {
16804 type = JSVAL_TYPE_DOUBLE;
16807 remaining -= type;
16809 emitTypeOfCheck(type, tag, output, &done, ool->entry());
16812 // Generate checks for remaining types.
16813 for (auto type : defaultOrder) {
16814 if (!remaining.contains(type)) {
16815 continue;
16817 remaining -= type;
16819 if (remaining.isEmpty() && type != JSVAL_TYPE_OBJECT) {
16820 // We can skip the check for the last remaining type, unless the type is
16821 // JSVAL_TYPE_OBJECT, which may have to go through the OOL path.
16822 #ifdef DEBUG
16823 emitTypeOfCheck(type, tag, output, &done, ool->entry());
16824 masm.assumeUnreachable("Unexpected Value type in visitTypeOfV");
16825 #else
16826 emitTypeOfJSType(type, output);
16827 #endif
16828 } else {
16829 emitTypeOfCheck(type, tag, output, &done, ool->entry());
16832 MOZ_ASSERT(remaining.isEmpty());
16834 masm.bind(&done);
16835 masm.bind(ool->rejoin());
16838 void CodeGenerator::emitTypeOfObject(Register obj, Register output,
16839 Label* done) {
16840 Label slowCheck, isObject, isCallable, isUndefined;
16841 masm.typeOfObject(obj, output, &slowCheck, &isObject, &isCallable,
16842 &isUndefined);
16844 masm.bind(&isCallable);
16845 masm.move32(Imm32(JSTYPE_FUNCTION), output);
16846 masm.jump(done);
16848 masm.bind(&isUndefined);
16849 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
16850 masm.jump(done);
16852 masm.bind(&isObject);
16853 masm.move32(Imm32(JSTYPE_OBJECT), output);
16854 masm.jump(done);
16856 masm.bind(&slowCheck);
16858 saveVolatile(output);
16859 using Fn = JSType (*)(JSObject*);
16860 masm.setupAlignedABICall();
16861 masm.passABIArg(obj);
16862 masm.callWithABI<Fn, js::TypeOfObject>();
16863 masm.storeCallInt32Result(output);
16864 restoreVolatile(output);
16867 void CodeGenerator::visitOutOfLineTypeOfV(OutOfLineTypeOfV* ool) {
16868 LTypeOfV* ins = ool->ins();
16870 ValueOperand input = ToValue(ins, LTypeOfV::InputIndex);
16871 Register temp = ToTempUnboxRegister(ins->temp0());
16872 Register output = ToRegister(ins->output());
16874 Register obj = masm.extractObject(input, temp);
16875 emitTypeOfObject(obj, output, ool->rejoin());
16876 masm.jump(ool->rejoin());
16879 void CodeGenerator::visitTypeOfO(LTypeOfO* lir) {
16880 Register obj = ToRegister(lir->object());
16881 Register output = ToRegister(lir->output());
16883 Label done;
16884 emitTypeOfObject(obj, output, &done);
16885 masm.bind(&done);
16888 void CodeGenerator::visitTypeOfName(LTypeOfName* lir) {
16889 Register input = ToRegister(lir->input());
16890 Register output = ToRegister(lir->output());
16892 #ifdef DEBUG
16893 Label ok;
16894 masm.branch32(Assembler::Below, input, Imm32(JSTYPE_LIMIT), &ok);
16895 masm.assumeUnreachable("bad JSType");
16896 masm.bind(&ok);
16897 #endif
16899 static_assert(JSTYPE_UNDEFINED == 0);
16901 masm.movePtr(ImmPtr(&gen->runtime->names().undefined), output);
16902 masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
16905 class OutOfLineTypeOfIsNonPrimitiveV : public OutOfLineCodeBase<CodeGenerator> {
16906 LTypeOfIsNonPrimitiveV* ins_;
16908 public:
16909 explicit OutOfLineTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* ins)
16910 : ins_(ins) {}
16912 void accept(CodeGenerator* codegen) override {
16913 codegen->visitOutOfLineTypeOfIsNonPrimitiveV(this);
16915 auto* ins() const { return ins_; }
16918 class OutOfLineTypeOfIsNonPrimitiveO : public OutOfLineCodeBase<CodeGenerator> {
16919 LTypeOfIsNonPrimitiveO* ins_;
16921 public:
16922 explicit OutOfLineTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* ins)
16923 : ins_(ins) {}
16925 void accept(CodeGenerator* codegen) override {
16926 codegen->visitOutOfLineTypeOfIsNonPrimitiveO(this);
16928 auto* ins() const { return ins_; }
16931 void CodeGenerator::emitTypeOfIsObjectOOL(MTypeOfIs* mir, Register obj,
16932 Register output) {
16933 saveVolatile(output);
16934 using Fn = JSType (*)(JSObject*);
16935 masm.setupAlignedABICall();
16936 masm.passABIArg(obj);
16937 masm.callWithABI<Fn, js::TypeOfObject>();
16938 masm.storeCallInt32Result(output);
16939 restoreVolatile(output);
16941 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
16942 masm.cmp32Set(cond, output, Imm32(mir->jstype()), output);
16945 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveV(
16946 OutOfLineTypeOfIsNonPrimitiveV* ool) {
16947 auto* ins = ool->ins();
16948 ValueOperand input = ToValue(ins, LTypeOfIsNonPrimitiveV::InputIndex);
16949 Register output = ToRegister(ins->output());
16950 Register temp = ToTempUnboxRegister(ins->temp0());
16952 Register obj = masm.extractObject(input, temp);
16954 emitTypeOfIsObjectOOL(ins->mir(), obj, output);
16956 masm.jump(ool->rejoin());
16959 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveO(
16960 OutOfLineTypeOfIsNonPrimitiveO* ool) {
16961 auto* ins = ool->ins();
16962 Register input = ToRegister(ins->input());
16963 Register output = ToRegister(ins->output());
16965 emitTypeOfIsObjectOOL(ins->mir(), input, output);
16967 masm.jump(ool->rejoin());
16970 void CodeGenerator::emitTypeOfIsObject(MTypeOfIs* mir, Register obj,
16971 Register output, Label* success,
16972 Label* fail, Label* slowCheck) {
16973 Label* isObject = fail;
16974 Label* isFunction = fail;
16975 Label* isUndefined = fail;
16977 switch (mir->jstype()) {
16978 case JSTYPE_UNDEFINED:
16979 isUndefined = success;
16980 break;
16982 case JSTYPE_OBJECT:
16983 isObject = success;
16984 break;
16986 case JSTYPE_FUNCTION:
16987 isFunction = success;
16988 break;
16990 case JSTYPE_STRING:
16991 case JSTYPE_NUMBER:
16992 case JSTYPE_BOOLEAN:
16993 case JSTYPE_SYMBOL:
16994 case JSTYPE_BIGINT:
16995 #ifdef ENABLE_RECORD_TUPLE
16996 case JSTYPE_RECORD:
16997 case JSTYPE_TUPLE:
16998 #endif
16999 case JSTYPE_LIMIT:
17000 MOZ_CRASH("Primitive type");
17003 masm.typeOfObject(obj, output, slowCheck, isObject, isFunction, isUndefined);
17005 auto op = mir->jsop();
17007 Label done;
17008 masm.bind(fail);
17009 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
17010 masm.jump(&done);
17011 masm.bind(success);
17012 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
17013 masm.bind(&done);
17016 void CodeGenerator::visitTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* lir) {
17017 ValueOperand input = ToValue(lir, LTypeOfIsNonPrimitiveV::InputIndex);
17018 Register output = ToRegister(lir->output());
17019 Register temp = ToTempUnboxRegister(lir->temp0());
17021 auto* mir = lir->mir();
17023 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveV(lir);
17024 addOutOfLineCode(ool, mir);
17026 Label success, fail;
17028 switch (mir->jstype()) {
17029 case JSTYPE_UNDEFINED: {
17030 ScratchTagScope tag(masm, input);
17031 masm.splitTagForTest(input, tag);
17033 masm.branchTestUndefined(Assembler::Equal, tag, &success);
17034 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
17035 break;
17038 case JSTYPE_OBJECT: {
17039 ScratchTagScope tag(masm, input);
17040 masm.splitTagForTest(input, tag);
17042 masm.branchTestNull(Assembler::Equal, tag, &success);
17043 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
17044 break;
17047 case JSTYPE_FUNCTION: {
17048 masm.branchTestObject(Assembler::NotEqual, input, &fail);
17049 break;
17052 case JSTYPE_STRING:
17053 case JSTYPE_NUMBER:
17054 case JSTYPE_BOOLEAN:
17055 case JSTYPE_SYMBOL:
17056 case JSTYPE_BIGINT:
17057 #ifdef ENABLE_RECORD_TUPLE
17058 case JSTYPE_RECORD:
17059 case JSTYPE_TUPLE:
17060 #endif
17061 case JSTYPE_LIMIT:
17062 MOZ_CRASH("Primitive type");
17065 Register obj = masm.extractObject(input, temp);
17067 emitTypeOfIsObject(mir, obj, output, &success, &fail, ool->entry());
17069 masm.bind(ool->rejoin());
17072 void CodeGenerator::visitTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* lir) {
17073 Register input = ToRegister(lir->input());
17074 Register output = ToRegister(lir->output());
17076 auto* mir = lir->mir();
17078 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveO(lir);
17079 addOutOfLineCode(ool, mir);
17081 Label success, fail;
17082 emitTypeOfIsObject(mir, input, output, &success, &fail, ool->entry());
17084 masm.bind(ool->rejoin());
17087 void CodeGenerator::visitTypeOfIsPrimitive(LTypeOfIsPrimitive* lir) {
17088 ValueOperand input = ToValue(lir, LTypeOfIsPrimitive::InputIndex);
17089 Register output = ToRegister(lir->output());
17091 auto* mir = lir->mir();
17092 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
17094 switch (mir->jstype()) {
17095 case JSTYPE_STRING:
17096 masm.testStringSet(cond, input, output);
17097 break;
17098 case JSTYPE_NUMBER:
17099 masm.testNumberSet(cond, input, output);
17100 break;
17101 case JSTYPE_BOOLEAN:
17102 masm.testBooleanSet(cond, input, output);
17103 break;
17104 case JSTYPE_SYMBOL:
17105 masm.testSymbolSet(cond, input, output);
17106 break;
17107 case JSTYPE_BIGINT:
17108 masm.testBigIntSet(cond, input, output);
17109 break;
17111 case JSTYPE_UNDEFINED:
17112 case JSTYPE_OBJECT:
17113 case JSTYPE_FUNCTION:
17114 #ifdef ENABLE_RECORD_TUPLE
17115 case JSTYPE_RECORD:
17116 case JSTYPE_TUPLE:
17117 #endif
17118 case JSTYPE_LIMIT:
17119 MOZ_CRASH("Non-primitive type");
17123 void CodeGenerator::visitToAsyncIter(LToAsyncIter* lir) {
17124 pushArg(ToValue(lir, LToAsyncIter::NextMethodIndex));
17125 pushArg(ToRegister(lir->iterator()));
17127 using Fn = JSObject* (*)(JSContext*, HandleObject, HandleValue);
17128 callVM<Fn, js::CreateAsyncFromSyncIterator>(lir);
17131 void CodeGenerator::visitToPropertyKeyCache(LToPropertyKeyCache* lir) {
17132 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
17133 ValueOperand input = ToValue(lir, LToPropertyKeyCache::InputIndex);
17134 ValueOperand output = ToOutValue(lir);
17136 IonToPropertyKeyIC ic(liveRegs, input, output);
17137 addIC(lir, allocateIC(ic));
17140 void CodeGenerator::visitLoadElementV(LLoadElementV* load) {
17141 Register elements = ToRegister(load->elements());
17142 const ValueOperand out = ToOutValue(load);
17144 if (load->index()->isConstant()) {
17145 NativeObject::elementsSizeMustNotOverflow();
17146 int32_t offset = ToInt32(load->index()) * sizeof(Value);
17147 masm.loadValue(Address(elements, offset), out);
17148 } else {
17149 masm.loadValue(BaseObjectElementIndex(elements, ToRegister(load->index())),
17150 out);
17153 Label testMagic;
17154 masm.branchTestMagic(Assembler::Equal, out, &testMagic);
17155 bailoutFrom(&testMagic, load->snapshot());
17158 void CodeGenerator::visitLoadElementHole(LLoadElementHole* lir) {
17159 Register elements = ToRegister(lir->elements());
17160 Register index = ToRegister(lir->index());
17161 Register initLength = ToRegister(lir->initLength());
17162 const ValueOperand out = ToOutValue(lir);
17164 const MLoadElementHole* mir = lir->mir();
17166 // If the index is out of bounds, load |undefined|. Otherwise, load the
17167 // value.
17168 Label outOfBounds, done;
17169 masm.spectreBoundsCheck32(index, initLength, out.scratchReg(), &outOfBounds);
17171 masm.loadValue(BaseObjectElementIndex(elements, index), out);
17173 // If the value wasn't a hole, we're done. Otherwise, we'll load undefined.
17174 masm.branchTestMagic(Assembler::NotEqual, out, &done);
17176 if (mir->needsNegativeIntCheck()) {
17177 Label loadUndefined;
17178 masm.jump(&loadUndefined);
17180 masm.bind(&outOfBounds);
17182 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
17184 masm.bind(&loadUndefined);
17185 } else {
17186 masm.bind(&outOfBounds);
17188 masm.moveValue(UndefinedValue(), out);
17190 masm.bind(&done);
17193 void CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir) {
17194 Register elements = ToRegister(lir->elements());
17195 Register temp = ToTempRegisterOrInvalid(lir->temp0());
17196 AnyRegister out = ToAnyRegister(lir->output());
17198 const MLoadUnboxedScalar* mir = lir->mir();
17200 Scalar::Type storageType = mir->storageType();
17202 Label fail;
17203 if (lir->index()->isConstant()) {
17204 Address source =
17205 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
17206 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
17207 } else {
17208 BaseIndex source(elements, ToRegister(lir->index()),
17209 ScaleFromScalarType(storageType), mir->offsetAdjustment());
17210 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
17213 if (fail.used()) {
17214 bailoutFrom(&fail, lir->snapshot());
17218 void CodeGenerator::visitLoadUnboxedBigInt(LLoadUnboxedBigInt* lir) {
17219 Register elements = ToRegister(lir->elements());
17220 Register temp = ToRegister(lir->temp());
17221 Register64 temp64 = ToRegister64(lir->temp64());
17222 Register out = ToRegister(lir->output());
17224 const MLoadUnboxedScalar* mir = lir->mir();
17226 Scalar::Type storageType = mir->storageType();
17228 if (lir->index()->isConstant()) {
17229 Address source =
17230 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
17231 masm.load64(source, temp64);
17232 } else {
17233 BaseIndex source(elements, ToRegister(lir->index()),
17234 ScaleFromScalarType(storageType), mir->offsetAdjustment());
17235 masm.load64(source, temp64);
17238 emitCreateBigInt(lir, storageType, temp64, out, temp);
17241 void CodeGenerator::visitLoadDataViewElement(LLoadDataViewElement* lir) {
17242 Register elements = ToRegister(lir->elements());
17243 const LAllocation* littleEndian = lir->littleEndian();
17244 Register temp = ToTempRegisterOrInvalid(lir->temp());
17245 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
17246 AnyRegister out = ToAnyRegister(lir->output());
17248 const MLoadDataViewElement* mir = lir->mir();
17249 Scalar::Type storageType = mir->storageType();
17251 BaseIndex source(elements, ToRegister(lir->index()), TimesOne);
17253 bool noSwap = littleEndian->isConstant() &&
17254 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
17256 // Directly load if no byte swap is needed and the platform supports unaligned
17257 // accesses for the access. (Such support is assumed for integer types.)
17258 if (noSwap && (!Scalar::isFloatingType(storageType) ||
17259 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
17260 if (!Scalar::isBigIntType(storageType)) {
17261 Label fail;
17262 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
17264 if (fail.used()) {
17265 bailoutFrom(&fail, lir->snapshot());
17267 } else {
17268 masm.load64(source, temp64);
17270 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
17272 return;
17275 // Load the value into a gpr register.
17276 switch (storageType) {
17277 case Scalar::Int16:
17278 masm.load16UnalignedSignExtend(source, out.gpr());
17279 break;
17280 case Scalar::Uint16:
17281 masm.load16UnalignedZeroExtend(source, out.gpr());
17282 break;
17283 case Scalar::Int32:
17284 masm.load32Unaligned(source, out.gpr());
17285 break;
17286 case Scalar::Uint32:
17287 masm.load32Unaligned(source, out.isFloat() ? temp : out.gpr());
17288 break;
17289 case Scalar::Float32:
17290 masm.load32Unaligned(source, temp);
17291 break;
17292 case Scalar::Float64:
17293 case Scalar::BigInt64:
17294 case Scalar::BigUint64:
17295 masm.load64Unaligned(source, temp64);
17296 break;
17297 case Scalar::Int8:
17298 case Scalar::Uint8:
17299 case Scalar::Uint8Clamped:
17300 default:
17301 MOZ_CRASH("Invalid typed array type");
17304 if (!noSwap) {
17305 // Swap the bytes in the loaded value.
17306 Label skip;
17307 if (!littleEndian->isConstant()) {
17308 masm.branch32(
17309 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
17310 ToRegister(littleEndian), Imm32(0), &skip);
17313 switch (storageType) {
17314 case Scalar::Int16:
17315 masm.byteSwap16SignExtend(out.gpr());
17316 break;
17317 case Scalar::Uint16:
17318 masm.byteSwap16ZeroExtend(out.gpr());
17319 break;
17320 case Scalar::Int32:
17321 masm.byteSwap32(out.gpr());
17322 break;
17323 case Scalar::Uint32:
17324 masm.byteSwap32(out.isFloat() ? temp : out.gpr());
17325 break;
17326 case Scalar::Float32:
17327 masm.byteSwap32(temp);
17328 break;
17329 case Scalar::Float64:
17330 case Scalar::BigInt64:
17331 case Scalar::BigUint64:
17332 masm.byteSwap64(temp64);
17333 break;
17334 case Scalar::Int8:
17335 case Scalar::Uint8:
17336 case Scalar::Uint8Clamped:
17337 default:
17338 MOZ_CRASH("Invalid typed array type");
17341 if (skip.used()) {
17342 masm.bind(&skip);
17346 // Move the value into the output register.
17347 switch (storageType) {
17348 case Scalar::Int16:
17349 case Scalar::Uint16:
17350 case Scalar::Int32:
17351 break;
17352 case Scalar::Uint32:
17353 if (out.isFloat()) {
17354 masm.convertUInt32ToDouble(temp, out.fpu());
17355 } else {
17356 // Bail out if the value doesn't fit into a signed int32 value. This
17357 // is what allows MLoadDataViewElement to have a type() of
17358 // MIRType::Int32 for UInt32 array loads.
17359 bailoutTest32(Assembler::Signed, out.gpr(), out.gpr(), lir->snapshot());
17361 break;
17362 case Scalar::Float32:
17363 masm.moveGPRToFloat32(temp, out.fpu());
17364 masm.canonicalizeFloat(out.fpu());
17365 break;
17366 case Scalar::Float64:
17367 masm.moveGPR64ToDouble(temp64, out.fpu());
17368 masm.canonicalizeDouble(out.fpu());
17369 break;
17370 case Scalar::BigInt64:
17371 case Scalar::BigUint64:
17372 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
17373 break;
17374 case Scalar::Int8:
17375 case Scalar::Uint8:
17376 case Scalar::Uint8Clamped:
17377 default:
17378 MOZ_CRASH("Invalid typed array type");
17382 void CodeGenerator::visitLoadTypedArrayElementHole(
17383 LLoadTypedArrayElementHole* lir) {
17384 Register elements = ToRegister(lir->elements());
17385 Register index = ToRegister(lir->index());
17386 Register length = ToRegister(lir->length());
17387 const ValueOperand out = ToOutValue(lir);
17389 Register scratch = out.scratchReg();
17391 // Load undefined if index >= length.
17392 Label outOfBounds, done;
17393 masm.spectreBoundsCheckPtr(index, length, scratch, &outOfBounds);
17395 Scalar::Type arrayType = lir->mir()->arrayType();
17396 Label fail;
17397 BaseIndex source(elements, index, ScaleFromScalarType(arrayType));
17398 MacroAssembler::Uint32Mode uint32Mode =
17399 lir->mir()->forceDouble() ? MacroAssembler::Uint32Mode::ForceDouble
17400 : MacroAssembler::Uint32Mode::FailOnDouble;
17401 masm.loadFromTypedArray(arrayType, source, out, uint32Mode, out.scratchReg(),
17402 &fail);
17403 masm.jump(&done);
17405 masm.bind(&outOfBounds);
17406 masm.moveValue(UndefinedValue(), out);
17408 if (fail.used()) {
17409 bailoutFrom(&fail, lir->snapshot());
17412 masm.bind(&done);
17415 void CodeGenerator::visitLoadTypedArrayElementHoleBigInt(
17416 LLoadTypedArrayElementHoleBigInt* lir) {
17417 Register elements = ToRegister(lir->elements());
17418 Register index = ToRegister(lir->index());
17419 Register length = ToRegister(lir->length());
17420 const ValueOperand out = ToOutValue(lir);
17422 Register temp = ToRegister(lir->temp());
17424 // On x86 there are not enough registers. In that case reuse the output
17425 // registers as temporaries.
17426 #ifdef JS_CODEGEN_X86
17427 MOZ_ASSERT(lir->temp64().isBogusTemp());
17428 Register64 temp64 = out.toRegister64();
17429 #else
17430 Register64 temp64 = ToRegister64(lir->temp64());
17431 #endif
17433 // Load undefined if index >= length.
17434 Label outOfBounds, done;
17435 masm.spectreBoundsCheckPtr(index, length, temp, &outOfBounds);
17437 Scalar::Type arrayType = lir->mir()->arrayType();
17438 BaseIndex source(elements, index, ScaleFromScalarType(arrayType));
17439 masm.load64(source, temp64);
17441 #ifdef JS_CODEGEN_X86
17442 Register bigInt = temp;
17443 Register maybeTemp = InvalidReg;
17444 #else
17445 Register bigInt = out.scratchReg();
17446 Register maybeTemp = temp;
17447 #endif
17448 emitCreateBigInt(lir, arrayType, temp64, bigInt, maybeTemp);
17450 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, out);
17451 masm.jump(&done);
17453 masm.bind(&outOfBounds);
17454 masm.moveValue(UndefinedValue(), out);
17456 masm.bind(&done);
17459 template <SwitchTableType tableType>
17460 class OutOfLineSwitch : public OutOfLineCodeBase<CodeGenerator> {
17461 using LabelsVector = Vector<Label, 0, JitAllocPolicy>;
17462 using CodeLabelsVector = Vector<CodeLabel, 0, JitAllocPolicy>;
17463 LabelsVector labels_;
17464 CodeLabelsVector codeLabels_;
17465 CodeLabel start_;
17466 bool isOutOfLine_;
17468 void accept(CodeGenerator* codegen) override {
17469 codegen->visitOutOfLineSwitch(this);
17472 public:
17473 explicit OutOfLineSwitch(TempAllocator& alloc)
17474 : labels_(alloc), codeLabels_(alloc), isOutOfLine_(false) {}
17476 CodeLabel* start() { return &start_; }
17478 CodeLabelsVector& codeLabels() { return codeLabels_; }
17479 LabelsVector& labels() { return labels_; }
17481 void jumpToCodeEntries(MacroAssembler& masm, Register index, Register temp) {
17482 Register base;
17483 if (tableType == SwitchTableType::Inline) {
17484 #if defined(JS_CODEGEN_ARM)
17485 base = ::js::jit::pc;
17486 #else
17487 MOZ_CRASH("NYI: SwitchTableType::Inline");
17488 #endif
17489 } else {
17490 #if defined(JS_CODEGEN_ARM)
17491 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
17492 #else
17493 masm.mov(start(), temp);
17494 base = temp;
17495 #endif
17497 BaseIndex jumpTarget(base, index, ScalePointer);
17498 masm.branchToComputedAddress(jumpTarget);
17501 // Register an entry in the switch table.
17502 void addTableEntry(MacroAssembler& masm) {
17503 if ((!isOutOfLine_ && tableType == SwitchTableType::Inline) ||
17504 (isOutOfLine_ && tableType == SwitchTableType::OutOfLine)) {
17505 CodeLabel cl;
17506 masm.writeCodePointer(&cl);
17507 masm.propagateOOM(codeLabels_.append(std::move(cl)));
17510 // Register the code, to which the table will jump to.
17511 void addCodeEntry(MacroAssembler& masm) {
17512 Label entry;
17513 masm.bind(&entry);
17514 masm.propagateOOM(labels_.append(std::move(entry)));
17517 void setOutOfLine() { isOutOfLine_ = true; }
17520 template <SwitchTableType tableType>
17521 void CodeGenerator::visitOutOfLineSwitch(
17522 OutOfLineSwitch<tableType>* jumpTable) {
17523 jumpTable->setOutOfLine();
17524 auto& labels = jumpTable->labels();
17526 if (tableType == SwitchTableType::OutOfLine) {
17527 #if defined(JS_CODEGEN_ARM)
17528 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
17529 #elif defined(JS_CODEGEN_NONE)
17530 MOZ_CRASH();
17531 #else
17533 # if defined(JS_CODEGEN_ARM64)
17534 AutoForbidPoolsAndNops afp(
17535 &masm,
17536 (labels.length() + 1) * (sizeof(void*) / vixl::kInstructionSize));
17537 # endif
17539 masm.haltingAlign(sizeof(void*));
17541 // Bind the address of the jump table and reserve the space for code
17542 // pointers to jump in the newly generated code.
17543 masm.bind(jumpTable->start());
17544 masm.addCodeLabel(*jumpTable->start());
17545 for (size_t i = 0, e = labels.length(); i < e; i++) {
17546 jumpTable->addTableEntry(masm);
17548 #endif
17551 // Register all reserved pointers of the jump table to target labels. The
17552 // entries of the jump table need to be absolute addresses and thus must be
17553 // patched after codegen is finished.
17554 auto& codeLabels = jumpTable->codeLabels();
17555 for (size_t i = 0, e = codeLabels.length(); i < e; i++) {
17556 auto& cl = codeLabels[i];
17557 cl.target()->bind(labels[i].offset());
17558 masm.addCodeLabel(cl);
17562 template void CodeGenerator::visitOutOfLineSwitch(
17563 OutOfLineSwitch<SwitchTableType::Inline>* jumpTable);
17564 template void CodeGenerator::visitOutOfLineSwitch(
17565 OutOfLineSwitch<SwitchTableType::OutOfLine>* jumpTable);
17567 template <typename T>
17568 static inline void StoreToTypedArray(MacroAssembler& masm,
17569 Scalar::Type writeType,
17570 const LAllocation* value, const T& dest) {
17571 if (writeType == Scalar::Float32 || writeType == Scalar::Float64) {
17572 masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest);
17573 } else {
17574 if (value->isConstant()) {
17575 masm.storeToTypedIntArray(writeType, Imm32(ToInt32(value)), dest);
17576 } else {
17577 masm.storeToTypedIntArray(writeType, ToRegister(value), dest);
17582 void CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir) {
17583 Register elements = ToRegister(lir->elements());
17584 const LAllocation* value = lir->value();
17586 const MStoreUnboxedScalar* mir = lir->mir();
17588 Scalar::Type writeType = mir->writeType();
17590 if (lir->index()->isConstant()) {
17591 Address dest = ToAddress(elements, lir->index(), writeType);
17592 StoreToTypedArray(masm, writeType, value, dest);
17593 } else {
17594 BaseIndex dest(elements, ToRegister(lir->index()),
17595 ScaleFromScalarType(writeType));
17596 StoreToTypedArray(masm, writeType, value, dest);
17600 void CodeGenerator::visitStoreUnboxedBigInt(LStoreUnboxedBigInt* lir) {
17601 Register elements = ToRegister(lir->elements());
17602 Register value = ToRegister(lir->value());
17603 Register64 temp = ToRegister64(lir->temp());
17605 Scalar::Type writeType = lir->mir()->writeType();
17607 masm.loadBigInt64(value, temp);
17609 if (lir->index()->isConstant()) {
17610 Address dest = ToAddress(elements, lir->index(), writeType);
17611 masm.storeToTypedBigIntArray(writeType, temp, dest);
17612 } else {
17613 BaseIndex dest(elements, ToRegister(lir->index()),
17614 ScaleFromScalarType(writeType));
17615 masm.storeToTypedBigIntArray(writeType, temp, dest);
17619 void CodeGenerator::visitStoreDataViewElement(LStoreDataViewElement* lir) {
17620 Register elements = ToRegister(lir->elements());
17621 const LAllocation* value = lir->value();
17622 const LAllocation* littleEndian = lir->littleEndian();
17623 Register temp = ToTempRegisterOrInvalid(lir->temp());
17624 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
17626 const MStoreDataViewElement* mir = lir->mir();
17627 Scalar::Type writeType = mir->writeType();
17629 BaseIndex dest(elements, ToRegister(lir->index()), TimesOne);
17631 bool noSwap = littleEndian->isConstant() &&
17632 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
17634 // Directly store if no byte swap is needed and the platform supports
17635 // unaligned accesses for the access. (Such support is assumed for integer
17636 // types.)
17637 if (noSwap && (!Scalar::isFloatingType(writeType) ||
17638 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
17639 if (!Scalar::isBigIntType(writeType)) {
17640 StoreToTypedArray(masm, writeType, value, dest);
17641 } else {
17642 masm.loadBigInt64(ToRegister(value), temp64);
17643 masm.storeToTypedBigIntArray(writeType, temp64, dest);
17645 return;
17648 // Load the value into a gpr register.
17649 switch (writeType) {
17650 case Scalar::Int16:
17651 case Scalar::Uint16:
17652 case Scalar::Int32:
17653 case Scalar::Uint32:
17654 if (value->isConstant()) {
17655 masm.move32(Imm32(ToInt32(value)), temp);
17656 } else {
17657 masm.move32(ToRegister(value), temp);
17659 break;
17660 case Scalar::Float32: {
17661 FloatRegister fvalue = ToFloatRegister(value);
17662 masm.canonicalizeFloatIfDeterministic(fvalue);
17663 masm.moveFloat32ToGPR(fvalue, temp);
17664 break;
17666 case Scalar::Float64: {
17667 FloatRegister fvalue = ToFloatRegister(value);
17668 masm.canonicalizeDoubleIfDeterministic(fvalue);
17669 masm.moveDoubleToGPR64(fvalue, temp64);
17670 break;
17672 case Scalar::BigInt64:
17673 case Scalar::BigUint64:
17674 masm.loadBigInt64(ToRegister(value), temp64);
17675 break;
17676 case Scalar::Int8:
17677 case Scalar::Uint8:
17678 case Scalar::Uint8Clamped:
17679 default:
17680 MOZ_CRASH("Invalid typed array type");
17683 if (!noSwap) {
17684 // Swap the bytes in the loaded value.
17685 Label skip;
17686 if (!littleEndian->isConstant()) {
17687 masm.branch32(
17688 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
17689 ToRegister(littleEndian), Imm32(0), &skip);
17692 switch (writeType) {
17693 case Scalar::Int16:
17694 masm.byteSwap16SignExtend(temp);
17695 break;
17696 case Scalar::Uint16:
17697 masm.byteSwap16ZeroExtend(temp);
17698 break;
17699 case Scalar::Int32:
17700 case Scalar::Uint32:
17701 case Scalar::Float32:
17702 masm.byteSwap32(temp);
17703 break;
17704 case Scalar::Float64:
17705 case Scalar::BigInt64:
17706 case Scalar::BigUint64:
17707 masm.byteSwap64(temp64);
17708 break;
17709 case Scalar::Int8:
17710 case Scalar::Uint8:
17711 case Scalar::Uint8Clamped:
17712 default:
17713 MOZ_CRASH("Invalid typed array type");
17716 if (skip.used()) {
17717 masm.bind(&skip);
17721 // Store the value into the destination.
17722 switch (writeType) {
17723 case Scalar::Int16:
17724 case Scalar::Uint16:
17725 masm.store16Unaligned(temp, dest);
17726 break;
17727 case Scalar::Int32:
17728 case Scalar::Uint32:
17729 case Scalar::Float32:
17730 masm.store32Unaligned(temp, dest);
17731 break;
17732 case Scalar::Float64:
17733 case Scalar::BigInt64:
17734 case Scalar::BigUint64:
17735 masm.store64Unaligned(temp64, dest);
17736 break;
17737 case Scalar::Int8:
17738 case Scalar::Uint8:
17739 case Scalar::Uint8Clamped:
17740 default:
17741 MOZ_CRASH("Invalid typed array type");
17745 void CodeGenerator::visitStoreTypedArrayElementHole(
17746 LStoreTypedArrayElementHole* lir) {
17747 Register elements = ToRegister(lir->elements());
17748 const LAllocation* value = lir->value();
17750 Scalar::Type arrayType = lir->mir()->arrayType();
17752 Register index = ToRegister(lir->index());
17753 const LAllocation* length = lir->length();
17754 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
17756 Label skip;
17757 if (length->isRegister()) {
17758 masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
17759 } else {
17760 masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
17763 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
17764 StoreToTypedArray(masm, arrayType, value, dest);
17766 masm.bind(&skip);
17769 void CodeGenerator::visitStoreTypedArrayElementHoleBigInt(
17770 LStoreTypedArrayElementHoleBigInt* lir) {
17771 Register elements = ToRegister(lir->elements());
17772 Register value = ToRegister(lir->value());
17773 Register64 temp = ToRegister64(lir->temp());
17775 Scalar::Type arrayType = lir->mir()->arrayType();
17777 Register index = ToRegister(lir->index());
17778 const LAllocation* length = lir->length();
17779 Register spectreTemp = temp.scratchReg();
17781 Label skip;
17782 if (length->isRegister()) {
17783 masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
17784 } else {
17785 masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
17788 masm.loadBigInt64(value, temp);
17790 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
17791 masm.storeToTypedBigIntArray(arrayType, temp, dest);
17793 masm.bind(&skip);
17796 void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
17797 masm.memoryBarrier(ins->type());
17800 void CodeGenerator::visitAtomicIsLockFree(LAtomicIsLockFree* lir) {
17801 Register value = ToRegister(lir->value());
17802 Register output = ToRegister(lir->output());
17804 masm.atomicIsLockFreeJS(value, output);
17807 void CodeGenerator::visitClampIToUint8(LClampIToUint8* lir) {
17808 Register output = ToRegister(lir->output());
17809 MOZ_ASSERT(output == ToRegister(lir->input()));
17810 masm.clampIntToUint8(output);
17813 void CodeGenerator::visitClampDToUint8(LClampDToUint8* lir) {
17814 FloatRegister input = ToFloatRegister(lir->input());
17815 Register output = ToRegister(lir->output());
17816 masm.clampDoubleToUint8(input, output);
17819 void CodeGenerator::visitClampVToUint8(LClampVToUint8* lir) {
17820 ValueOperand operand = ToValue(lir, LClampVToUint8::InputIndex);
17821 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
17822 Register output = ToRegister(lir->output());
17824 using Fn = bool (*)(JSContext*, JSString*, double*);
17825 OutOfLineCode* oolString = oolCallVM<Fn, StringToNumber>(
17826 lir, ArgList(output), StoreFloatRegisterTo(tempFloat));
17827 Label* stringEntry = oolString->entry();
17828 Label* stringRejoin = oolString->rejoin();
17830 Label fails;
17831 masm.clampValueToUint8(operand, stringEntry, stringRejoin, output, tempFloat,
17832 output, &fails);
17834 bailoutFrom(&fails, lir->snapshot());
17837 void CodeGenerator::visitInCache(LInCache* ins) {
17838 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
17840 ConstantOrRegister key =
17841 toConstantOrRegister(ins, LInCache::LhsIndex, ins->mir()->key()->type());
17842 Register object = ToRegister(ins->rhs());
17843 Register output = ToRegister(ins->output());
17844 Register temp = ToRegister(ins->temp0());
17846 IonInIC cache(liveRegs, key, object, output, temp);
17847 addIC(ins, allocateIC(cache));
17850 void CodeGenerator::visitInArray(LInArray* lir) {
17851 const MInArray* mir = lir->mir();
17852 Register elements = ToRegister(lir->elements());
17853 Register initLength = ToRegister(lir->initLength());
17854 Register output = ToRegister(lir->output());
17856 Label falseBranch, done, trueBranch;
17858 if (lir->index()->isConstant()) {
17859 int32_t index = ToInt32(lir->index());
17861 if (index < 0) {
17862 MOZ_ASSERT(mir->needsNegativeIntCheck());
17863 bailout(lir->snapshot());
17864 return;
17867 masm.branch32(Assembler::BelowOrEqual, initLength, Imm32(index),
17868 &falseBranch);
17870 NativeObject::elementsSizeMustNotOverflow();
17871 Address address = Address(elements, index * sizeof(Value));
17872 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
17873 } else {
17874 Register index = ToRegister(lir->index());
17876 Label negativeIntCheck;
17877 Label* failedInitLength = &falseBranch;
17878 if (mir->needsNegativeIntCheck()) {
17879 failedInitLength = &negativeIntCheck;
17882 masm.branch32(Assembler::BelowOrEqual, initLength, index, failedInitLength);
17884 BaseObjectElementIndex address(elements, index);
17885 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
17887 if (mir->needsNegativeIntCheck()) {
17888 masm.jump(&trueBranch);
17889 masm.bind(&negativeIntCheck);
17891 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
17893 masm.jump(&falseBranch);
17897 masm.bind(&trueBranch);
17898 masm.move32(Imm32(1), output);
17899 masm.jump(&done);
17901 masm.bind(&falseBranch);
17902 masm.move32(Imm32(0), output);
17903 masm.bind(&done);
17906 void CodeGenerator::visitGuardElementNotHole(LGuardElementNotHole* lir) {
17907 Register elements = ToRegister(lir->elements());
17908 const LAllocation* index = lir->index();
17910 Label testMagic;
17911 if (index->isConstant()) {
17912 Address address(elements, ToInt32(index) * sizeof(js::Value));
17913 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
17914 } else {
17915 BaseObjectElementIndex address(elements, ToRegister(index));
17916 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
17918 bailoutFrom(&testMagic, lir->snapshot());
17921 void CodeGenerator::visitInstanceOfO(LInstanceOfO* ins) {
17922 Register protoReg = ToRegister(ins->rhs());
17923 emitInstanceOf(ins, protoReg);
17926 void CodeGenerator::visitInstanceOfV(LInstanceOfV* ins) {
17927 Register protoReg = ToRegister(ins->rhs());
17928 emitInstanceOf(ins, protoReg);
17931 void CodeGenerator::emitInstanceOf(LInstruction* ins, Register protoReg) {
17932 // This path implements fun_hasInstance when the function's prototype is
17933 // known to be the object in protoReg
17935 Label done;
17936 Register output = ToRegister(ins->getDef(0));
17938 // If the lhs is a primitive, the result is false.
17939 Register objReg;
17940 if (ins->isInstanceOfV()) {
17941 Label isObject;
17942 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
17943 masm.branchTestObject(Assembler::Equal, lhsValue, &isObject);
17944 masm.mov(ImmWord(0), output);
17945 masm.jump(&done);
17946 masm.bind(&isObject);
17947 objReg = masm.extractObject(lhsValue, output);
17948 } else {
17949 objReg = ToRegister(ins->toInstanceOfO()->lhs());
17952 // Crawl the lhs's prototype chain in a loop to search for prototypeObject.
17953 // This follows the main loop of js::IsPrototypeOf, though additionally breaks
17954 // out of the loop on Proxy::LazyProto.
17956 // Load the lhs's prototype.
17957 masm.loadObjProto(objReg, output);
17959 Label testLazy;
17961 Label loopPrototypeChain;
17962 masm.bind(&loopPrototypeChain);
17964 // Test for the target prototype object.
17965 Label notPrototypeObject;
17966 masm.branchPtr(Assembler::NotEqual, output, protoReg, &notPrototypeObject);
17967 masm.mov(ImmWord(1), output);
17968 masm.jump(&done);
17969 masm.bind(&notPrototypeObject);
17971 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
17973 // Test for nullptr or Proxy::LazyProto
17974 masm.branchPtr(Assembler::BelowOrEqual, output, ImmWord(1), &testLazy);
17976 // Load the current object's prototype.
17977 masm.loadObjProto(output, output);
17979 masm.jump(&loopPrototypeChain);
17982 // Make a VM call if an object with a lazy proto was found on the prototype
17983 // chain. This currently occurs only for cross compartment wrappers, which
17984 // we do not expect to be compared with non-wrapper functions from this
17985 // compartment. Otherwise, we stopped on a nullptr prototype and the output
17986 // register is already correct.
17988 using Fn = bool (*)(JSContext*, HandleObject, JSObject*, bool*);
17989 auto* ool = oolCallVM<Fn, IsPrototypeOf>(ins, ArgList(protoReg, objReg),
17990 StoreRegisterTo(output));
17992 // Regenerate the original lhs object for the VM call.
17993 Label regenerate, *lazyEntry;
17994 if (objReg != output) {
17995 lazyEntry = ool->entry();
17996 } else {
17997 masm.bind(&regenerate);
17998 lazyEntry = &regenerate;
17999 if (ins->isInstanceOfV()) {
18000 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
18001 objReg = masm.extractObject(lhsValue, output);
18002 } else {
18003 objReg = ToRegister(ins->toInstanceOfO()->lhs());
18005 MOZ_ASSERT(objReg == output);
18006 masm.jump(ool->entry());
18009 masm.bind(&testLazy);
18010 masm.branchPtr(Assembler::Equal, output, ImmWord(1), lazyEntry);
18012 masm.bind(&done);
18013 masm.bind(ool->rejoin());
18016 void CodeGenerator::visitInstanceOfCache(LInstanceOfCache* ins) {
18017 // The Lowering ensures that RHS is an object, and that LHS is a value.
18018 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
18019 TypedOrValueRegister lhs =
18020 TypedOrValueRegister(ToValue(ins, LInstanceOfCache::LHS));
18021 Register rhs = ToRegister(ins->rhs());
18022 Register output = ToRegister(ins->output());
18024 IonInstanceOfIC ic(liveRegs, lhs, rhs, output);
18025 addIC(ins, allocateIC(ic));
18028 void CodeGenerator::visitGetDOMProperty(LGetDOMProperty* ins) {
18029 const Register JSContextReg = ToRegister(ins->getJSContextReg());
18030 const Register ObjectReg = ToRegister(ins->getObjectReg());
18031 const Register PrivateReg = ToRegister(ins->getPrivReg());
18032 const Register ValueReg = ToRegister(ins->getValueReg());
18034 Label haveValue;
18035 if (ins->mir()->valueMayBeInSlot()) {
18036 size_t slot = ins->mir()->domMemberSlotIndex();
18037 // It's a bit annoying to redo these slot calculations, which duplcate
18038 // LSlots and a few other things like that, but I'm not sure there's a
18039 // way to reuse those here.
18041 // If this ever gets fixed to work with proxies (by not assuming that
18042 // reserved slot indices, which is what domMemberSlotIndex() returns,
18043 // match fixed slot indices), we can reenable MGetDOMProperty for
18044 // proxies in IonBuilder.
18045 if (slot < NativeObject::MAX_FIXED_SLOTS) {
18046 masm.loadValue(Address(ObjectReg, NativeObject::getFixedSlotOffset(slot)),
18047 JSReturnOperand);
18048 } else {
18049 // It's a dynamic slot.
18050 slot -= NativeObject::MAX_FIXED_SLOTS;
18051 // Use PrivateReg as a scratch register for the slots pointer.
18052 masm.loadPtr(Address(ObjectReg, NativeObject::offsetOfSlots()),
18053 PrivateReg);
18054 masm.loadValue(Address(PrivateReg, slot * sizeof(js::Value)),
18055 JSReturnOperand);
18057 masm.branchTestUndefined(Assembler::NotEqual, JSReturnOperand, &haveValue);
18060 DebugOnly<uint32_t> initialStack = masm.framePushed();
18062 masm.checkStackAlignment();
18064 // Make space for the outparam. Pre-initialize it to UndefinedValue so we
18065 // can trace it at GC time.
18066 masm.Push(UndefinedValue());
18067 // We pass the pointer to our out param as an instance of
18068 // JSJitGetterCallArgs, since on the binary level it's the same thing.
18069 static_assert(sizeof(JSJitGetterCallArgs) == sizeof(Value*));
18070 masm.moveStackPtrTo(ValueReg);
18072 masm.Push(ObjectReg);
18074 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
18076 // Rooting will happen at GC time.
18077 masm.moveStackPtrTo(ObjectReg);
18079 Realm* getterRealm = ins->mir()->getterRealm();
18080 if (gen->realm->realmPtr() != getterRealm) {
18081 // We use JSContextReg as scratch register here.
18082 masm.switchToRealm(getterRealm, JSContextReg);
18085 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
18086 masm.loadJSContext(JSContextReg);
18087 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
18088 ExitFrameType::IonDOMGetter);
18090 markSafepointAt(safepointOffset, ins);
18092 masm.setupAlignedABICall();
18093 masm.loadJSContext(JSContextReg);
18094 masm.passABIArg(JSContextReg);
18095 masm.passABIArg(ObjectReg);
18096 masm.passABIArg(PrivateReg);
18097 masm.passABIArg(ValueReg);
18098 ensureOsiSpace();
18099 masm.callWithABI(DynamicFunction<JSJitGetterOp>(ins->mir()->fun()),
18100 ABIType::General,
18101 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
18103 if (ins->mir()->isInfallible()) {
18104 masm.loadValue(Address(masm.getStackPointer(),
18105 IonDOMExitFrameLayout::offsetOfResult()),
18106 JSReturnOperand);
18107 } else {
18108 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
18110 masm.loadValue(Address(masm.getStackPointer(),
18111 IonDOMExitFrameLayout::offsetOfResult()),
18112 JSReturnOperand);
18115 // Switch back to the current realm if needed. Note: if the getter threw an
18116 // exception, the exception handler will do this.
18117 if (gen->realm->realmPtr() != getterRealm) {
18118 static_assert(!JSReturnOperand.aliases(ReturnReg),
18119 "Clobbering ReturnReg should not affect the return value");
18120 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
18123 // Until C++ code is instrumented against Spectre, prevent speculative
18124 // execution from returning any private data.
18125 if (JitOptions.spectreJitToCxxCalls && ins->mir()->hasLiveDefUses()) {
18126 masm.speculationBarrier();
18129 masm.adjustStack(IonDOMExitFrameLayout::Size());
18131 masm.bind(&haveValue);
18133 MOZ_ASSERT(masm.framePushed() == initialStack);
18136 void CodeGenerator::visitGetDOMMemberV(LGetDOMMemberV* ins) {
18137 // It's simpler to duplicate visitLoadFixedSlotV here than it is to try to
18138 // use an LLoadFixedSlotV or some subclass of it for this case: that would
18139 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
18140 // we'd have to duplicate a bunch of stuff we now get for free from
18141 // MGetDOMProperty.
18143 // If this ever gets fixed to work with proxies (by not assuming that
18144 // reserved slot indices, which is what domMemberSlotIndex() returns,
18145 // match fixed slot indices), we can reenable MGetDOMMember for
18146 // proxies in IonBuilder.
18147 Register object = ToRegister(ins->object());
18148 size_t slot = ins->mir()->domMemberSlotIndex();
18149 ValueOperand result = ToOutValue(ins);
18151 masm.loadValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
18152 result);
18155 void CodeGenerator::visitGetDOMMemberT(LGetDOMMemberT* ins) {
18156 // It's simpler to duplicate visitLoadFixedSlotT here than it is to try to
18157 // use an LLoadFixedSlotT or some subclass of it for this case: that would
18158 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
18159 // we'd have to duplicate a bunch of stuff we now get for free from
18160 // MGetDOMProperty.
18162 // If this ever gets fixed to work with proxies (by not assuming that
18163 // reserved slot indices, which is what domMemberSlotIndex() returns,
18164 // match fixed slot indices), we can reenable MGetDOMMember for
18165 // proxies in IonBuilder.
18166 Register object = ToRegister(ins->object());
18167 size_t slot = ins->mir()->domMemberSlotIndex();
18168 AnyRegister result = ToAnyRegister(ins->getDef(0));
18169 MIRType type = ins->mir()->type();
18171 masm.loadUnboxedValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
18172 type, result);
18175 void CodeGenerator::visitSetDOMProperty(LSetDOMProperty* ins) {
18176 const Register JSContextReg = ToRegister(ins->getJSContextReg());
18177 const Register ObjectReg = ToRegister(ins->getObjectReg());
18178 const Register PrivateReg = ToRegister(ins->getPrivReg());
18179 const Register ValueReg = ToRegister(ins->getValueReg());
18181 DebugOnly<uint32_t> initialStack = masm.framePushed();
18183 masm.checkStackAlignment();
18185 // Push the argument. Rooting will happen at GC time.
18186 ValueOperand argVal = ToValue(ins, LSetDOMProperty::Value);
18187 masm.Push(argVal);
18188 // We pass the pointer to our out param as an instance of
18189 // JSJitGetterCallArgs, since on the binary level it's the same thing.
18190 static_assert(sizeof(JSJitSetterCallArgs) == sizeof(Value*));
18191 masm.moveStackPtrTo(ValueReg);
18193 masm.Push(ObjectReg);
18195 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
18197 // Rooting will happen at GC time.
18198 masm.moveStackPtrTo(ObjectReg);
18200 Realm* setterRealm = ins->mir()->setterRealm();
18201 if (gen->realm->realmPtr() != setterRealm) {
18202 // We use JSContextReg as scratch register here.
18203 masm.switchToRealm(setterRealm, JSContextReg);
18206 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
18207 masm.loadJSContext(JSContextReg);
18208 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
18209 ExitFrameType::IonDOMSetter);
18211 markSafepointAt(safepointOffset, ins);
18213 masm.setupAlignedABICall();
18214 masm.loadJSContext(JSContextReg);
18215 masm.passABIArg(JSContextReg);
18216 masm.passABIArg(ObjectReg);
18217 masm.passABIArg(PrivateReg);
18218 masm.passABIArg(ValueReg);
18219 ensureOsiSpace();
18220 masm.callWithABI(DynamicFunction<JSJitSetterOp>(ins->mir()->fun()),
18221 ABIType::General,
18222 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
18224 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
18226 // Switch back to the current realm if needed. Note: if the setter threw an
18227 // exception, the exception handler will do this.
18228 if (gen->realm->realmPtr() != setterRealm) {
18229 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
18232 masm.adjustStack(IonDOMExitFrameLayout::Size());
18234 MOZ_ASSERT(masm.framePushed() == initialStack);
18237 void CodeGenerator::visitLoadDOMExpandoValue(LLoadDOMExpandoValue* ins) {
18238 Register proxy = ToRegister(ins->proxy());
18239 ValueOperand out = ToOutValue(ins);
18241 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
18242 out.scratchReg());
18243 masm.loadValue(Address(out.scratchReg(),
18244 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
18245 out);
18248 void CodeGenerator::visitLoadDOMExpandoValueGuardGeneration(
18249 LLoadDOMExpandoValueGuardGeneration* ins) {
18250 Register proxy = ToRegister(ins->proxy());
18251 ValueOperand out = ToOutValue(ins);
18253 Label bail;
18254 masm.loadDOMExpandoValueGuardGeneration(proxy, out,
18255 ins->mir()->expandoAndGeneration(),
18256 ins->mir()->generation(), &bail);
18257 bailoutFrom(&bail, ins->snapshot());
18260 void CodeGenerator::visitLoadDOMExpandoValueIgnoreGeneration(
18261 LLoadDOMExpandoValueIgnoreGeneration* ins) {
18262 Register proxy = ToRegister(ins->proxy());
18263 ValueOperand out = ToOutValue(ins);
18265 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
18266 out.scratchReg());
18268 // Load the ExpandoAndGeneration* from the PrivateValue.
18269 masm.loadPrivate(
18270 Address(out.scratchReg(),
18271 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
18272 out.scratchReg());
18274 // Load expandoAndGeneration->expando into the output Value register.
18275 masm.loadValue(
18276 Address(out.scratchReg(), ExpandoAndGeneration::offsetOfExpando()), out);
18279 void CodeGenerator::visitGuardDOMExpandoMissingOrGuardShape(
18280 LGuardDOMExpandoMissingOrGuardShape* ins) {
18281 Register temp = ToRegister(ins->temp0());
18282 ValueOperand input =
18283 ToValue(ins, LGuardDOMExpandoMissingOrGuardShape::InputIndex);
18285 Label done;
18286 masm.branchTestUndefined(Assembler::Equal, input, &done);
18288 masm.debugAssertIsObject(input);
18289 masm.unboxObject(input, temp);
18290 // The expando object is not used in this case, so we don't need Spectre
18291 // mitigations.
18292 Label bail;
18293 masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, temp,
18294 ins->mir()->shape(), &bail);
18295 bailoutFrom(&bail, ins->snapshot());
18297 masm.bind(&done);
18300 class OutOfLineIsCallable : public OutOfLineCodeBase<CodeGenerator> {
18301 Register object_;
18302 Register output_;
18304 public:
18305 OutOfLineIsCallable(Register object, Register output)
18306 : object_(object), output_(output) {}
18308 void accept(CodeGenerator* codegen) override {
18309 codegen->visitOutOfLineIsCallable(this);
18311 Register object() const { return object_; }
18312 Register output() const { return output_; }
18315 void CodeGenerator::visitIsCallableO(LIsCallableO* ins) {
18316 Register object = ToRegister(ins->object());
18317 Register output = ToRegister(ins->output());
18319 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(object, output);
18320 addOutOfLineCode(ool, ins->mir());
18322 masm.isCallable(object, output, ool->entry());
18324 masm.bind(ool->rejoin());
18327 void CodeGenerator::visitIsCallableV(LIsCallableV* ins) {
18328 ValueOperand val = ToValue(ins, LIsCallableV::ObjectIndex);
18329 Register output = ToRegister(ins->output());
18330 Register temp = ToRegister(ins->temp0());
18332 Label notObject;
18333 masm.fallibleUnboxObject(val, temp, &notObject);
18335 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(temp, output);
18336 addOutOfLineCode(ool, ins->mir());
18338 masm.isCallable(temp, output, ool->entry());
18339 masm.jump(ool->rejoin());
18341 masm.bind(&notObject);
18342 masm.move32(Imm32(0), output);
18344 masm.bind(ool->rejoin());
18347 void CodeGenerator::visitOutOfLineIsCallable(OutOfLineIsCallable* ool) {
18348 Register object = ool->object();
18349 Register output = ool->output();
18351 saveVolatile(output);
18352 using Fn = bool (*)(JSObject* obj);
18353 masm.setupAlignedABICall();
18354 masm.passABIArg(object);
18355 masm.callWithABI<Fn, ObjectIsCallable>();
18356 masm.storeCallBoolResult(output);
18357 restoreVolatile(output);
18358 masm.jump(ool->rejoin());
18361 class OutOfLineIsConstructor : public OutOfLineCodeBase<CodeGenerator> {
18362 LIsConstructor* ins_;
18364 public:
18365 explicit OutOfLineIsConstructor(LIsConstructor* ins) : ins_(ins) {}
18367 void accept(CodeGenerator* codegen) override {
18368 codegen->visitOutOfLineIsConstructor(this);
18370 LIsConstructor* ins() const { return ins_; }
18373 void CodeGenerator::visitIsConstructor(LIsConstructor* ins) {
18374 Register object = ToRegister(ins->object());
18375 Register output = ToRegister(ins->output());
18377 OutOfLineIsConstructor* ool = new (alloc()) OutOfLineIsConstructor(ins);
18378 addOutOfLineCode(ool, ins->mir());
18380 masm.isConstructor(object, output, ool->entry());
18382 masm.bind(ool->rejoin());
18385 void CodeGenerator::visitOutOfLineIsConstructor(OutOfLineIsConstructor* ool) {
18386 LIsConstructor* ins = ool->ins();
18387 Register object = ToRegister(ins->object());
18388 Register output = ToRegister(ins->output());
18390 saveVolatile(output);
18391 using Fn = bool (*)(JSObject* obj);
18392 masm.setupAlignedABICall();
18393 masm.passABIArg(object);
18394 masm.callWithABI<Fn, ObjectIsConstructor>();
18395 masm.storeCallBoolResult(output);
18396 restoreVolatile(output);
18397 masm.jump(ool->rejoin());
18400 void CodeGenerator::visitIsCrossRealmArrayConstructor(
18401 LIsCrossRealmArrayConstructor* ins) {
18402 Register object = ToRegister(ins->object());
18403 Register output = ToRegister(ins->output());
18405 masm.setIsCrossRealmArrayConstructor(object, output);
18408 static void EmitObjectIsArray(MacroAssembler& masm, OutOfLineCode* ool,
18409 Register obj, Register output,
18410 Label* notArray = nullptr) {
18411 masm.loadObjClassUnsafe(obj, output);
18413 Label isArray;
18414 masm.branchPtr(Assembler::Equal, output, ImmPtr(&ArrayObject::class_),
18415 &isArray);
18417 // Branch to OOL path if it's a proxy.
18418 masm.branchTestClassIsProxy(true, output, ool->entry());
18420 if (notArray) {
18421 masm.bind(notArray);
18423 masm.move32(Imm32(0), output);
18424 masm.jump(ool->rejoin());
18426 masm.bind(&isArray);
18427 masm.move32(Imm32(1), output);
18429 masm.bind(ool->rejoin());
18432 void CodeGenerator::visitIsArrayO(LIsArrayO* lir) {
18433 Register object = ToRegister(lir->object());
18434 Register output = ToRegister(lir->output());
18436 using Fn = bool (*)(JSContext*, HandleObject, bool*);
18437 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
18438 lir, ArgList(object), StoreRegisterTo(output));
18439 EmitObjectIsArray(masm, ool, object, output);
18442 void CodeGenerator::visitIsArrayV(LIsArrayV* lir) {
18443 ValueOperand val = ToValue(lir, LIsArrayV::ValueIndex);
18444 Register output = ToRegister(lir->output());
18445 Register temp = ToRegister(lir->temp0());
18447 Label notArray;
18448 masm.fallibleUnboxObject(val, temp, &notArray);
18450 using Fn = bool (*)(JSContext*, HandleObject, bool*);
18451 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
18452 lir, ArgList(temp), StoreRegisterTo(output));
18453 EmitObjectIsArray(masm, ool, temp, output, &notArray);
18456 void CodeGenerator::visitIsTypedArray(LIsTypedArray* lir) {
18457 Register object = ToRegister(lir->object());
18458 Register output = ToRegister(lir->output());
18460 OutOfLineCode* ool = nullptr;
18461 if (lir->mir()->isPossiblyWrapped()) {
18462 using Fn = bool (*)(JSContext*, JSObject*, bool*);
18463 ool = oolCallVM<Fn, jit::IsPossiblyWrappedTypedArray>(
18464 lir, ArgList(object), StoreRegisterTo(output));
18467 Label notTypedArray;
18468 Label done;
18470 masm.loadObjClassUnsafe(object, output);
18471 masm.branchIfClassIsNotTypedArray(output, &notTypedArray);
18473 masm.move32(Imm32(1), output);
18474 masm.jump(&done);
18475 masm.bind(&notTypedArray);
18476 if (ool) {
18477 masm.branchTestClassIsProxy(true, output, ool->entry());
18479 masm.move32(Imm32(0), output);
18480 masm.bind(&done);
18481 if (ool) {
18482 masm.bind(ool->rejoin());
18486 void CodeGenerator::visitIsObject(LIsObject* ins) {
18487 Register output = ToRegister(ins->output());
18488 ValueOperand value = ToValue(ins, LIsObject::ObjectIndex);
18489 masm.testObjectSet(Assembler::Equal, value, output);
18492 void CodeGenerator::visitIsObjectAndBranch(LIsObjectAndBranch* ins) {
18493 ValueOperand value = ToValue(ins, LIsObjectAndBranch::Input);
18494 testObjectEmitBranch(Assembler::Equal, value, ins->ifTrue(), ins->ifFalse());
18497 void CodeGenerator::visitIsNullOrUndefined(LIsNullOrUndefined* ins) {
18498 Register output = ToRegister(ins->output());
18499 ValueOperand value = ToValue(ins, LIsNullOrUndefined::InputIndex);
18501 Label isNotNull, done;
18502 masm.branchTestNull(Assembler::NotEqual, value, &isNotNull);
18504 masm.move32(Imm32(1), output);
18505 masm.jump(&done);
18507 masm.bind(&isNotNull);
18508 masm.testUndefinedSet(Assembler::Equal, value, output);
18510 masm.bind(&done);
18513 void CodeGenerator::visitIsNullOrUndefinedAndBranch(
18514 LIsNullOrUndefinedAndBranch* ins) {
18515 Label* ifTrue = getJumpLabelForBranch(ins->ifTrue());
18516 Label* ifFalse = getJumpLabelForBranch(ins->ifFalse());
18517 ValueOperand value = ToValue(ins, LIsNullOrUndefinedAndBranch::Input);
18519 ScratchTagScope tag(masm, value);
18520 masm.splitTagForTest(value, tag);
18522 masm.branchTestNull(Assembler::Equal, tag, ifTrue);
18523 masm.branchTestUndefined(Assembler::Equal, tag, ifTrue);
18525 if (!isNextBlock(ins->ifFalse()->lir())) {
18526 masm.jump(ifFalse);
18530 void CodeGenerator::loadOutermostJSScript(Register reg) {
18531 // The "outermost" JSScript means the script that we are compiling
18532 // basically; this is not always the script associated with the
18533 // current basic block, which might be an inlined script.
18535 MIRGraph& graph = current->mir()->graph();
18536 MBasicBlock* entryBlock = graph.entryBlock();
18537 masm.movePtr(ImmGCPtr(entryBlock->info().script()), reg);
18540 void CodeGenerator::loadJSScriptForBlock(MBasicBlock* block, Register reg) {
18541 // The current JSScript means the script for the current
18542 // basic block. This may be an inlined script.
18544 JSScript* script = block->info().script();
18545 masm.movePtr(ImmGCPtr(script), reg);
18548 void CodeGenerator::visitHasClass(LHasClass* ins) {
18549 Register lhs = ToRegister(ins->lhs());
18550 Register output = ToRegister(ins->output());
18552 masm.loadObjClassUnsafe(lhs, output);
18553 masm.cmpPtrSet(Assembler::Equal, output, ImmPtr(ins->mir()->getClass()),
18554 output);
18557 void CodeGenerator::visitGuardToClass(LGuardToClass* ins) {
18558 Register lhs = ToRegister(ins->lhs());
18559 Register temp = ToRegister(ins->temp0());
18561 // branchTestObjClass may zero the object register on speculative paths
18562 // (we should have a defineReuseInput allocation in this case).
18563 Register spectreRegToZero = lhs;
18565 Label notEqual;
18567 masm.branchTestObjClass(Assembler::NotEqual, lhs, ins->mir()->getClass(),
18568 temp, spectreRegToZero, &notEqual);
18570 // Can't return null-return here, so bail.
18571 bailoutFrom(&notEqual, ins->snapshot());
18574 void CodeGenerator::visitGuardToEitherClass(LGuardToEitherClass* ins) {
18575 Register lhs = ToRegister(ins->lhs());
18576 Register temp = ToRegister(ins->temp0());
18578 // branchTestObjClass may zero the object register on speculative paths
18579 // (we should have a defineReuseInput allocation in this case).
18580 Register spectreRegToZero = lhs;
18582 Label notEqual;
18584 masm.branchTestObjClass(Assembler::NotEqual, lhs,
18585 {ins->mir()->getClass1(), ins->mir()->getClass2()},
18586 temp, spectreRegToZero, &notEqual);
18588 // Can't return null-return here, so bail.
18589 bailoutFrom(&notEqual, ins->snapshot());
18592 void CodeGenerator::visitGuardToFunction(LGuardToFunction* ins) {
18593 Register lhs = ToRegister(ins->lhs());
18594 Register temp = ToRegister(ins->temp0());
18596 // branchTestObjClass may zero the object register on speculative paths
18597 // (we should have a defineReuseInput allocation in this case).
18598 Register spectreRegToZero = lhs;
18600 Label notEqual;
18602 masm.branchTestObjIsFunction(Assembler::NotEqual, lhs, temp, spectreRegToZero,
18603 &notEqual);
18605 // Can't return null-return here, so bail.
18606 bailoutFrom(&notEqual, ins->snapshot());
18609 void CodeGenerator::visitObjectClassToString(LObjectClassToString* lir) {
18610 Register obj = ToRegister(lir->lhs());
18611 Register temp = ToRegister(lir->temp0());
18613 using Fn = JSString* (*)(JSContext*, JSObject*);
18614 masm.setupAlignedABICall();
18615 masm.loadJSContext(temp);
18616 masm.passABIArg(temp);
18617 masm.passABIArg(obj);
18618 masm.callWithABI<Fn, js::ObjectClassToString>();
18620 bailoutCmpPtr(Assembler::Equal, ReturnReg, ImmWord(0), lir->snapshot());
18623 void CodeGenerator::visitWasmParameter(LWasmParameter* lir) {}
18625 void CodeGenerator::visitWasmParameterI64(LWasmParameterI64* lir) {}
18627 void CodeGenerator::visitWasmReturn(LWasmReturn* lir) {
18628 // Don't emit a jump to the return label if this is the last block.
18629 if (current->mir() != *gen->graph().poBegin()) {
18630 masm.jump(&returnLabel_);
18634 void CodeGenerator::visitWasmReturnI64(LWasmReturnI64* lir) {
18635 // Don't emit a jump to the return label if this is the last block.
18636 if (current->mir() != *gen->graph().poBegin()) {
18637 masm.jump(&returnLabel_);
18641 void CodeGenerator::visitWasmReturnVoid(LWasmReturnVoid* lir) {
18642 // Don't emit a jump to the return label if this is the last block.
18643 if (current->mir() != *gen->graph().poBegin()) {
18644 masm.jump(&returnLabel_);
18648 void CodeGenerator::emitAssertRangeI(MIRType type, const Range* r,
18649 Register input) {
18650 // Check the lower bound.
18651 if (r->hasInt32LowerBound() && r->lower() > INT32_MIN) {
18652 Label success;
18653 if (type == MIRType::Int32 || type == MIRType::Boolean) {
18654 masm.branch32(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
18655 &success);
18656 } else {
18657 MOZ_ASSERT(type == MIRType::IntPtr);
18658 masm.branchPtr(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
18659 &success);
18661 masm.assumeUnreachable(
18662 "Integer input should be equal or higher than Lowerbound.");
18663 masm.bind(&success);
18666 // Check the upper bound.
18667 if (r->hasInt32UpperBound() && r->upper() < INT32_MAX) {
18668 Label success;
18669 if (type == MIRType::Int32 || type == MIRType::Boolean) {
18670 masm.branch32(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
18671 &success);
18672 } else {
18673 MOZ_ASSERT(type == MIRType::IntPtr);
18674 masm.branchPtr(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
18675 &success);
18677 masm.assumeUnreachable(
18678 "Integer input should be lower or equal than Upperbound.");
18679 masm.bind(&success);
18682 // For r->canHaveFractionalPart(), r->canBeNegativeZero(), and
18683 // r->exponent(), there's nothing to check, because if we ended up in the
18684 // integer range checking code, the value is already in an integer register
18685 // in the integer range.
18688 void CodeGenerator::emitAssertRangeD(const Range* r, FloatRegister input,
18689 FloatRegister temp) {
18690 // Check the lower bound.
18691 if (r->hasInt32LowerBound()) {
18692 Label success;
18693 masm.loadConstantDouble(r->lower(), temp);
18694 if (r->canBeNaN()) {
18695 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
18697 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
18698 &success);
18699 masm.assumeUnreachable(
18700 "Double input should be equal or higher than Lowerbound.");
18701 masm.bind(&success);
18703 // Check the upper bound.
18704 if (r->hasInt32UpperBound()) {
18705 Label success;
18706 masm.loadConstantDouble(r->upper(), temp);
18707 if (r->canBeNaN()) {
18708 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
18710 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp, &success);
18711 masm.assumeUnreachable(
18712 "Double input should be lower or equal than Upperbound.");
18713 masm.bind(&success);
18716 // This code does not yet check r->canHaveFractionalPart(). This would require
18717 // new assembler interfaces to make rounding instructions available.
18719 if (!r->canBeNegativeZero()) {
18720 Label success;
18722 // First, test for being equal to 0.0, which also includes -0.0.
18723 masm.loadConstantDouble(0.0, temp);
18724 masm.branchDouble(Assembler::DoubleNotEqualOrUnordered, input, temp,
18725 &success);
18727 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0 is
18728 // -Infinity instead of Infinity.
18729 masm.loadConstantDouble(1.0, temp);
18730 masm.divDouble(input, temp);
18731 masm.branchDouble(Assembler::DoubleGreaterThan, temp, input, &success);
18733 masm.assumeUnreachable("Input shouldn't be negative zero.");
18735 masm.bind(&success);
18738 if (!r->hasInt32Bounds() && !r->canBeInfiniteOrNaN() &&
18739 r->exponent() < FloatingPoint<double>::kExponentBias) {
18740 // Check the bounds implied by the maximum exponent.
18741 Label exponentLoOk;
18742 masm.loadConstantDouble(pow(2.0, r->exponent() + 1), temp);
18743 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentLoOk);
18744 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp,
18745 &exponentLoOk);
18746 masm.assumeUnreachable("Check for exponent failed.");
18747 masm.bind(&exponentLoOk);
18749 Label exponentHiOk;
18750 masm.loadConstantDouble(-pow(2.0, r->exponent() + 1), temp);
18751 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentHiOk);
18752 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
18753 &exponentHiOk);
18754 masm.assumeUnreachable("Check for exponent failed.");
18755 masm.bind(&exponentHiOk);
18756 } else if (!r->hasInt32Bounds() && !r->canBeNaN()) {
18757 // If we think the value can't be NaN, check that it isn't.
18758 Label notnan;
18759 masm.branchDouble(Assembler::DoubleOrdered, input, input, &notnan);
18760 masm.assumeUnreachable("Input shouldn't be NaN.");
18761 masm.bind(&notnan);
18763 // If we think the value also can't be an infinity, check that it isn't.
18764 if (!r->canBeInfiniteOrNaN()) {
18765 Label notposinf;
18766 masm.loadConstantDouble(PositiveInfinity<double>(), temp);
18767 masm.branchDouble(Assembler::DoubleLessThan, input, temp, &notposinf);
18768 masm.assumeUnreachable("Input shouldn't be +Inf.");
18769 masm.bind(&notposinf);
18771 Label notneginf;
18772 masm.loadConstantDouble(NegativeInfinity<double>(), temp);
18773 masm.branchDouble(Assembler::DoubleGreaterThan, input, temp, &notneginf);
18774 masm.assumeUnreachable("Input shouldn't be -Inf.");
18775 masm.bind(&notneginf);
18780 void CodeGenerator::visitAssertClass(LAssertClass* ins) {
18781 Register obj = ToRegister(ins->input());
18782 Register temp = ToRegister(ins->getTemp(0));
18784 Label success;
18785 if (ins->mir()->getClass() == &FunctionClass) {
18786 // Allow both possible function classes here.
18787 masm.branchTestObjIsFunctionNoSpectreMitigations(Assembler::Equal, obj,
18788 temp, &success);
18789 } else {
18790 masm.branchTestObjClassNoSpectreMitigations(
18791 Assembler::Equal, obj, ins->mir()->getClass(), temp, &success);
18793 masm.assumeUnreachable("Wrong KnownClass during run-time");
18794 masm.bind(&success);
18797 void CodeGenerator::visitAssertShape(LAssertShape* ins) {
18798 Register obj = ToRegister(ins->input());
18800 Label success;
18801 masm.branchTestObjShapeNoSpectreMitigations(Assembler::Equal, obj,
18802 ins->mir()->shape(), &success);
18803 masm.assumeUnreachable("Wrong Shape during run-time");
18804 masm.bind(&success);
18807 void CodeGenerator::visitAssertRangeI(LAssertRangeI* ins) {
18808 Register input = ToRegister(ins->input());
18809 const Range* r = ins->range();
18811 emitAssertRangeI(ins->mir()->input()->type(), r, input);
18814 void CodeGenerator::visitAssertRangeD(LAssertRangeD* ins) {
18815 FloatRegister input = ToFloatRegister(ins->input());
18816 FloatRegister temp = ToFloatRegister(ins->temp());
18817 const Range* r = ins->range();
18819 emitAssertRangeD(r, input, temp);
18822 void CodeGenerator::visitAssertRangeF(LAssertRangeF* ins) {
18823 FloatRegister input = ToFloatRegister(ins->input());
18824 FloatRegister temp = ToFloatRegister(ins->temp());
18825 FloatRegister temp2 = ToFloatRegister(ins->temp2());
18827 const Range* r = ins->range();
18829 masm.convertFloat32ToDouble(input, temp);
18830 emitAssertRangeD(r, temp, temp2);
18833 void CodeGenerator::visitAssertRangeV(LAssertRangeV* ins) {
18834 const Range* r = ins->range();
18835 const ValueOperand value = ToValue(ins, LAssertRangeV::Input);
18836 Label done;
18839 ScratchTagScope tag(masm, value);
18840 masm.splitTagForTest(value, tag);
18843 Label isNotInt32;
18844 masm.branchTestInt32(Assembler::NotEqual, tag, &isNotInt32);
18846 ScratchTagScopeRelease _(&tag);
18847 Register unboxInt32 = ToTempUnboxRegister(ins->temp());
18848 Register input = masm.extractInt32(value, unboxInt32);
18849 emitAssertRangeI(MIRType::Int32, r, input);
18850 masm.jump(&done);
18852 masm.bind(&isNotInt32);
18856 Label isNotDouble;
18857 masm.branchTestDouble(Assembler::NotEqual, tag, &isNotDouble);
18859 ScratchTagScopeRelease _(&tag);
18860 FloatRegister input = ToFloatRegister(ins->floatTemp1());
18861 FloatRegister temp = ToFloatRegister(ins->floatTemp2());
18862 masm.unboxDouble(value, input);
18863 emitAssertRangeD(r, input, temp);
18864 masm.jump(&done);
18866 masm.bind(&isNotDouble);
18870 masm.assumeUnreachable("Incorrect range for Value.");
18871 masm.bind(&done);
18874 void CodeGenerator::visitInterruptCheck(LInterruptCheck* lir) {
18875 using Fn = bool (*)(JSContext*);
18876 OutOfLineCode* ool =
18877 oolCallVM<Fn, InterruptCheck>(lir, ArgList(), StoreNothing());
18879 const void* interruptAddr = gen->runtime->addressOfInterruptBits();
18880 masm.branch32(Assembler::NotEqual, AbsoluteAddress(interruptAddr), Imm32(0),
18881 ool->entry());
18882 masm.bind(ool->rejoin());
18885 void CodeGenerator::visitOutOfLineResumableWasmTrap(
18886 OutOfLineResumableWasmTrap* ool) {
18887 LInstruction* lir = ool->lir();
18888 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
18890 markSafepointAt(masm.currentOffset(), lir);
18892 // Note that masm.framePushed() doesn't include the register dump area.
18893 // That will be taken into account when the StackMap is created from the
18894 // LSafepoint.
18895 lir->safepoint()->setFramePushedAtStackMapBase(ool->framePushed());
18896 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::Trap);
18898 masm.jump(ool->rejoin());
18901 void CodeGenerator::visitOutOfLineAbortingWasmTrap(
18902 OutOfLineAbortingWasmTrap* ool) {
18903 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
18906 void CodeGenerator::visitWasmInterruptCheck(LWasmInterruptCheck* lir) {
18907 MOZ_ASSERT(gen->compilingWasm());
18909 OutOfLineResumableWasmTrap* ool = new (alloc()) OutOfLineResumableWasmTrap(
18910 lir, masm.framePushed(), lir->mir()->bytecodeOffset(),
18911 wasm::Trap::CheckInterrupt);
18912 addOutOfLineCode(ool, lir->mir());
18913 masm.branch32(
18914 Assembler::NotEqual,
18915 Address(ToRegister(lir->instance()), wasm::Instance::offsetOfInterrupt()),
18916 Imm32(0), ool->entry());
18917 masm.bind(ool->rejoin());
18920 void CodeGenerator::visitWasmTrap(LWasmTrap* lir) {
18921 MOZ_ASSERT(gen->compilingWasm());
18922 const MWasmTrap* mir = lir->mir();
18924 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
18927 void CodeGenerator::visitWasmTrapIfNull(LWasmTrapIfNull* lir) {
18928 MOZ_ASSERT(gen->compilingWasm());
18929 const MWasmTrapIfNull* mir = lir->mir();
18930 Label nonNull;
18931 Register ref = ToRegister(lir->ref());
18933 masm.branchWasmAnyRefIsNull(false, ref, &nonNull);
18934 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
18935 masm.bind(&nonNull);
18938 void CodeGenerator::visitWasmRefIsSubtypeOfAbstract(
18939 LWasmRefIsSubtypeOfAbstract* ins) {
18940 MOZ_ASSERT(gen->compilingWasm());
18942 const MWasmRefIsSubtypeOfAbstract* mir = ins->mir();
18943 MOZ_ASSERT(!mir->destType().isTypeRef());
18945 Register ref = ToRegister(ins->ref());
18946 Register superSTV = Register::Invalid();
18947 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
18948 Register scratch2 = Register::Invalid();
18949 Register result = ToRegister(ins->output());
18950 Label onSuccess;
18951 Label onFail;
18952 Label join;
18953 masm.branchWasmRefIsSubtype(ref, mir->sourceType(), mir->destType(),
18954 &onSuccess, /*onSuccess=*/true, superSTV,
18955 scratch1, scratch2);
18956 masm.bind(&onFail);
18957 masm.xor32(result, result);
18958 masm.jump(&join);
18959 masm.bind(&onSuccess);
18960 masm.move32(Imm32(1), result);
18961 masm.bind(&join);
18964 void CodeGenerator::visitWasmRefIsSubtypeOfConcrete(
18965 LWasmRefIsSubtypeOfConcrete* ins) {
18966 MOZ_ASSERT(gen->compilingWasm());
18968 const MWasmRefIsSubtypeOfConcrete* mir = ins->mir();
18969 MOZ_ASSERT(mir->destType().isTypeRef());
18971 Register ref = ToRegister(ins->ref());
18972 Register superSTV = ToRegister(ins->superSTV());
18973 Register scratch1 = ToRegister(ins->temp0());
18974 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
18975 Register result = ToRegister(ins->output());
18976 Label onSuccess;
18977 Label join;
18978 masm.branchWasmRefIsSubtype(ref, mir->sourceType(), mir->destType(),
18979 &onSuccess, /*onSuccess=*/true, superSTV,
18980 scratch1, scratch2);
18981 masm.move32(Imm32(0), result);
18982 masm.jump(&join);
18983 masm.bind(&onSuccess);
18984 masm.move32(Imm32(1), result);
18985 masm.bind(&join);
18988 void CodeGenerator::visitWasmRefIsSubtypeOfAbstractAndBranch(
18989 LWasmRefIsSubtypeOfAbstractAndBranch* ins) {
18990 MOZ_ASSERT(gen->compilingWasm());
18991 Register ref = ToRegister(ins->ref());
18992 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
18993 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
18994 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
18995 masm.branchWasmRefIsSubtype(
18996 ref, ins->sourceType(), ins->destType(), onSuccess, /*onSuccess=*/true,
18997 Register::Invalid(), scratch1, Register::Invalid());
18998 masm.jump(onFail);
19001 void CodeGenerator::visitWasmRefIsSubtypeOfConcreteAndBranch(
19002 LWasmRefIsSubtypeOfConcreteAndBranch* ins) {
19003 MOZ_ASSERT(gen->compilingWasm());
19004 Register ref = ToRegister(ins->ref());
19005 Register superSTV = ToRegister(ins->superSTV());
19006 Register scratch1 = ToRegister(ins->temp0());
19007 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
19008 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
19009 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
19010 masm.branchWasmRefIsSubtype(ref, ins->sourceType(), ins->destType(),
19011 onSuccess, /*onSuccess=*/true, superSTV, scratch1,
19012 scratch2);
19013 masm.jump(onFail);
19016 void CodeGenerator::callWasmStructAllocFun(LInstruction* lir,
19017 wasm::SymbolicAddress fun,
19018 Register typeDefData,
19019 Register output) {
19020 masm.Push(InstanceReg);
19021 int32_t framePushedAfterInstance = masm.framePushed();
19022 saveLive(lir);
19024 masm.setupWasmABICall();
19025 masm.passABIArg(InstanceReg);
19026 masm.passABIArg(typeDefData);
19027 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
19028 CodeOffset offset =
19029 masm.callWithABI(wasm::BytecodeOffset(0), fun,
19030 mozilla::Some(instanceOffset), ABIType::General);
19031 masm.storeCallPointerResult(output);
19033 markSafepointAt(offset.offset(), lir);
19034 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAfterInstance);
19035 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::CodegenCall);
19037 restoreLive(lir);
19038 masm.Pop(InstanceReg);
19039 #if JS_CODEGEN_ARM64
19040 masm.syncStackPtr();
19041 #endif
19044 // Out-of-line path to allocate wasm GC structs
19045 class OutOfLineWasmNewStruct : public OutOfLineCodeBase<CodeGenerator> {
19046 LInstruction* lir_;
19047 wasm::SymbolicAddress fun_;
19048 Register typeDefData_;
19049 Register output_;
19051 public:
19052 OutOfLineWasmNewStruct(LInstruction* lir, wasm::SymbolicAddress fun,
19053 Register typeDefData, Register output)
19054 : lir_(lir), fun_(fun), typeDefData_(typeDefData), output_(output) {}
19056 void accept(CodeGenerator* codegen) override {
19057 codegen->visitOutOfLineWasmNewStruct(this);
19060 LInstruction* lir() const { return lir_; }
19061 wasm::SymbolicAddress fun() const { return fun_; }
19062 Register typeDefData() const { return typeDefData_; }
19063 Register output() const { return output_; }
19066 void CodeGenerator::visitOutOfLineWasmNewStruct(OutOfLineWasmNewStruct* ool) {
19067 callWasmStructAllocFun(ool->lir(), ool->fun(), ool->typeDefData(),
19068 ool->output());
19069 masm.jump(ool->rejoin());
19072 void CodeGenerator::visitWasmNewStructObject(LWasmNewStructObject* lir) {
19073 MOZ_ASSERT(gen->compilingWasm());
19075 MWasmNewStructObject* mir = lir->mir();
19077 Register typeDefData = ToRegister(lir->typeDefData());
19078 Register output = ToRegister(lir->output());
19080 if (mir->isOutline()) {
19081 wasm::SymbolicAddress fun = mir->zeroFields()
19082 ? wasm::SymbolicAddress::StructNewOOL_true
19083 : wasm::SymbolicAddress::StructNewOOL_false;
19084 callWasmStructAllocFun(lir, fun, typeDefData, output);
19085 } else {
19086 wasm::SymbolicAddress fun = mir->zeroFields()
19087 ? wasm::SymbolicAddress::StructNewIL_true
19088 : wasm::SymbolicAddress::StructNewIL_false;
19090 Register instance = ToRegister(lir->instance());
19091 MOZ_ASSERT(instance == InstanceReg);
19093 auto ool =
19094 new (alloc()) OutOfLineWasmNewStruct(lir, fun, typeDefData, output);
19095 addOutOfLineCode(ool, lir->mir());
19097 Register temp1 = ToRegister(lir->temp0());
19098 Register temp2 = ToRegister(lir->temp1());
19099 masm.wasmNewStructObject(instance, output, typeDefData, temp1, temp2,
19100 ool->entry(), mir->allocKind(), mir->zeroFields());
19102 masm.bind(ool->rejoin());
19106 void CodeGenerator::callWasmArrayAllocFun(LInstruction* lir,
19107 wasm::SymbolicAddress fun,
19108 Register numElements,
19109 Register typeDefData, Register output,
19110 wasm::BytecodeOffset bytecodeOffset) {
19111 masm.Push(InstanceReg);
19112 int32_t framePushedAfterInstance = masm.framePushed();
19113 saveLive(lir);
19115 masm.setupWasmABICall();
19116 masm.passABIArg(InstanceReg);
19117 masm.passABIArg(numElements);
19118 masm.passABIArg(typeDefData);
19119 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
19120 CodeOffset offset = masm.callWithABI(
19121 bytecodeOffset, fun, mozilla::Some(instanceOffset), ABIType::General);
19122 masm.storeCallPointerResult(output);
19124 markSafepointAt(offset.offset(), lir);
19125 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAfterInstance);
19126 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::CodegenCall);
19128 restoreLive(lir);
19129 masm.Pop(InstanceReg);
19130 #if JS_CODEGEN_ARM64
19131 masm.syncStackPtr();
19132 #endif
19134 Label ok;
19135 masm.branchPtr(Assembler::NonZero, output, ImmWord(0), &ok);
19136 masm.wasmTrap(wasm::Trap::ThrowReported, bytecodeOffset);
19137 masm.bind(&ok);
19140 // Out-of-line path to allocate wasm GC arrays
19141 class OutOfLineWasmNewArray : public OutOfLineCodeBase<CodeGenerator> {
19142 LInstruction* lir_;
19143 wasm::SymbolicAddress fun_;
19144 Register numElementsReg_;
19145 mozilla::Maybe<uint32_t> numElements_;
19146 Register typeDefData_;
19147 Register output_;
19148 wasm::BytecodeOffset bytecodeOffset_;
19150 public:
19151 OutOfLineWasmNewArray(LInstruction* lir, wasm::SymbolicAddress fun,
19152 Register numElementsReg,
19153 mozilla::Maybe<uint32_t> numElements,
19154 Register typeDefData, Register output,
19155 wasm::BytecodeOffset bytecodeOffset)
19156 : lir_(lir),
19157 fun_(fun),
19158 numElementsReg_(numElementsReg),
19159 numElements_(numElements),
19160 typeDefData_(typeDefData),
19161 output_(output),
19162 bytecodeOffset_(bytecodeOffset) {}
19164 void accept(CodeGenerator* codegen) override {
19165 codegen->visitOutOfLineWasmNewArray(this);
19168 LInstruction* lir() const { return lir_; }
19169 wasm::SymbolicAddress fun() const { return fun_; }
19170 Register numElementsReg() const { return numElementsReg_; }
19171 mozilla::Maybe<uint32_t> numElements() const { return numElements_; }
19172 Register typeDefData() const { return typeDefData_; }
19173 Register output() const { return output_; }
19174 wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
19177 void CodeGenerator::visitOutOfLineWasmNewArray(OutOfLineWasmNewArray* ool) {
19178 if (ool->numElements().isSome()) {
19179 masm.move32(Imm32(ool->numElements().value()), ool->numElementsReg());
19181 callWasmArrayAllocFun(ool->lir(), ool->fun(), ool->numElementsReg(),
19182 ool->typeDefData(), ool->output(),
19183 ool->bytecodeOffset());
19184 masm.jump(ool->rejoin());
19187 void CodeGenerator::visitWasmNewArrayObject(LWasmNewArrayObject* lir) {
19188 MOZ_ASSERT(gen->compilingWasm());
19190 MWasmNewArrayObject* mir = lir->mir();
19192 Register typeDefData = ToRegister(lir->typeDefData());
19193 Register output = ToRegister(lir->output());
19194 Register temp1 = ToRegister(lir->temp0());
19195 Register temp2 = ToRegister(lir->temp1());
19197 wasm::SymbolicAddress fun = mir->zeroFields()
19198 ? wasm::SymbolicAddress::ArrayNew_true
19199 : wasm::SymbolicAddress::ArrayNew_false;
19201 if (lir->numElements()->isConstant()) {
19202 // numElements is constant, so we can do optimized code generation.
19203 uint32_t numElements = lir->numElements()->toConstant()->toInt32();
19204 CheckedUint32 storageBytes =
19205 WasmArrayObject::calcStorageBytesChecked(mir->elemSize(), numElements);
19206 if (!storageBytes.isValid() ||
19207 storageBytes.value() > WasmArrayObject_MaxInlineBytes) {
19208 // Too much array data to store inline. Immediately perform an instance
19209 // call to handle the out-of-line storage.
19210 masm.move32(Imm32(numElements), temp1);
19211 callWasmArrayAllocFun(lir, fun, temp1, typeDefData, output,
19212 mir->bytecodeOffset());
19213 } else {
19214 // storageBytes is small enough to be stored inline in WasmArrayObject.
19215 // Attempt a nursery allocation and fall back to an instance call if it
19216 // fails.
19217 Register instance = ToRegister(lir->instance());
19218 MOZ_ASSERT(instance == InstanceReg);
19220 auto ool = new (alloc())
19221 OutOfLineWasmNewArray(lir, fun, temp1, mozilla::Some(numElements),
19222 typeDefData, output, mir->bytecodeOffset());
19223 addOutOfLineCode(ool, lir->mir());
19225 masm.wasmNewArrayObjectFixed(instance, output, typeDefData, temp1, temp2,
19226 ool->entry(), numElements,
19227 storageBytes.value(), mir->zeroFields());
19229 masm.bind(ool->rejoin());
19231 } else {
19232 // numElements is dynamic. Attempt a dynamic inline-storage nursery
19233 // allocation and fall back to an instance call if it fails.
19234 Register instance = ToRegister(lir->instance());
19235 MOZ_ASSERT(instance == InstanceReg);
19236 Register numElements = ToRegister(lir->numElements());
19238 auto ool = new (alloc())
19239 OutOfLineWasmNewArray(lir, fun, numElements, mozilla::Nothing(),
19240 typeDefData, output, mir->bytecodeOffset());
19241 addOutOfLineCode(ool, lir->mir());
19243 masm.wasmNewArrayObject(instance, output, numElements, typeDefData, temp1,
19244 ool->entry(), mir->elemSize(), mir->zeroFields());
19246 masm.bind(ool->rejoin());
19250 void CodeGenerator::visitWasmHeapReg(LWasmHeapReg* ins) {
19251 #ifdef WASM_HAS_HEAPREG
19252 masm.movePtr(HeapReg, ToRegister(ins->output()));
19253 #else
19254 MOZ_CRASH();
19255 #endif
19258 void CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins) {
19259 const MWasmBoundsCheck* mir = ins->mir();
19260 Register ptr = ToRegister(ins->ptr());
19261 Register boundsCheckLimit = ToRegister(ins->boundsCheckLimit());
19262 // When there are no spectre mitigations in place, branching out-of-line to
19263 // the trap is a big performance win, but with mitigations it's trickier. See
19264 // bug 1680243.
19265 if (JitOptions.spectreIndexMasking) {
19266 Label ok;
19267 masm.wasmBoundsCheck32(Assembler::Below, ptr, boundsCheckLimit, &ok);
19268 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
19269 masm.bind(&ok);
19270 } else {
19271 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
19272 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
19273 addOutOfLineCode(ool, mir);
19274 masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
19275 ool->entry());
19279 void CodeGenerator::visitWasmBoundsCheck64(LWasmBoundsCheck64* ins) {
19280 const MWasmBoundsCheck* mir = ins->mir();
19281 Register64 ptr = ToRegister64(ins->ptr());
19282 Register64 boundsCheckLimit = ToRegister64(ins->boundsCheckLimit());
19283 // See above.
19284 if (JitOptions.spectreIndexMasking) {
19285 Label ok;
19286 masm.wasmBoundsCheck64(Assembler::Below, ptr, boundsCheckLimit, &ok);
19287 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
19288 masm.bind(&ok);
19289 } else {
19290 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
19291 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
19292 addOutOfLineCode(ool, mir);
19293 masm.wasmBoundsCheck64(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
19294 ool->entry());
19298 void CodeGenerator::visitWasmBoundsCheckRange32(LWasmBoundsCheckRange32* ins) {
19299 const MWasmBoundsCheckRange32* mir = ins->mir();
19300 Register index = ToRegister(ins->index());
19301 Register length = ToRegister(ins->length());
19302 Register limit = ToRegister(ins->limit());
19303 Register tmp = ToRegister(ins->temp0());
19305 masm.wasmBoundsCheckRange32(index, length, limit, tmp, mir->bytecodeOffset());
19308 void CodeGenerator::visitWasmAlignmentCheck(LWasmAlignmentCheck* ins) {
19309 const MWasmAlignmentCheck* mir = ins->mir();
19310 Register ptr = ToRegister(ins->ptr());
19311 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
19312 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
19313 addOutOfLineCode(ool, mir);
19314 masm.branchTest32(Assembler::NonZero, ptr, Imm32(mir->byteSize() - 1),
19315 ool->entry());
19318 void CodeGenerator::visitWasmAlignmentCheck64(LWasmAlignmentCheck64* ins) {
19319 const MWasmAlignmentCheck* mir = ins->mir();
19320 Register64 ptr = ToRegister64(ins->ptr());
19321 #ifdef JS_64BIT
19322 Register r = ptr.reg;
19323 #else
19324 Register r = ptr.low;
19325 #endif
19326 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
19327 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
19328 addOutOfLineCode(ool, mir);
19329 masm.branchTestPtr(Assembler::NonZero, r, Imm32(mir->byteSize() - 1),
19330 ool->entry());
19333 void CodeGenerator::visitWasmLoadInstance(LWasmLoadInstance* ins) {
19334 switch (ins->mir()->type()) {
19335 case MIRType::WasmAnyRef:
19336 case MIRType::Pointer:
19337 masm.loadPtr(Address(ToRegister(ins->instance()), ins->mir()->offset()),
19338 ToRegister(ins->output()));
19339 break;
19340 case MIRType::Int32:
19341 masm.load32(Address(ToRegister(ins->instance()), ins->mir()->offset()),
19342 ToRegister(ins->output()));
19343 break;
19344 default:
19345 MOZ_CRASH("MIRType not supported in WasmLoadInstance");
19349 void CodeGenerator::visitWasmLoadInstance64(LWasmLoadInstance64* ins) {
19350 MOZ_ASSERT(ins->mir()->type() == MIRType::Int64);
19351 masm.load64(Address(ToRegister(ins->instance()), ins->mir()->offset()),
19352 ToOutRegister64(ins));
19355 void CodeGenerator::incrementWarmUpCounter(AbsoluteAddress warmUpCount,
19356 JSScript* script, Register tmp) {
19357 // The code depends on the JitScript* not being discarded without also
19358 // invalidating Ion code. Assert this.
19359 #ifdef DEBUG
19360 Label ok;
19361 masm.movePtr(ImmGCPtr(script), tmp);
19362 masm.loadJitScript(tmp, tmp);
19363 masm.branchPtr(Assembler::Equal, tmp, ImmPtr(script->jitScript()), &ok);
19364 masm.assumeUnreachable("Didn't find JitScript?");
19365 masm.bind(&ok);
19366 #endif
19368 masm.load32(warmUpCount, tmp);
19369 masm.add32(Imm32(1), tmp);
19370 masm.store32(tmp, warmUpCount);
19373 void CodeGenerator::visitIncrementWarmUpCounter(LIncrementWarmUpCounter* ins) {
19374 Register tmp = ToRegister(ins->temp0());
19376 AbsoluteAddress warmUpCount =
19377 AbsoluteAddress(ins->mir()->script()->jitScript())
19378 .offset(JitScript::offsetOfWarmUpCount());
19379 incrementWarmUpCounter(warmUpCount, ins->mir()->script(), tmp);
19382 void CodeGenerator::visitLexicalCheck(LLexicalCheck* ins) {
19383 ValueOperand inputValue = ToValue(ins, LLexicalCheck::InputIndex);
19384 Label bail;
19385 masm.branchTestMagicValue(Assembler::Equal, inputValue,
19386 JS_UNINITIALIZED_LEXICAL, &bail);
19387 bailoutFrom(&bail, ins->snapshot());
19390 void CodeGenerator::visitThrowRuntimeLexicalError(
19391 LThrowRuntimeLexicalError* ins) {
19392 pushArg(Imm32(ins->mir()->errorNumber()));
19394 using Fn = bool (*)(JSContext*, unsigned);
19395 callVM<Fn, jit::ThrowRuntimeLexicalError>(ins);
19398 void CodeGenerator::visitThrowMsg(LThrowMsg* ins) {
19399 pushArg(Imm32(static_cast<int32_t>(ins->mir()->throwMsgKind())));
19401 using Fn = bool (*)(JSContext*, unsigned);
19402 callVM<Fn, js::ThrowMsgOperation>(ins);
19405 void CodeGenerator::visitGlobalDeclInstantiation(
19406 LGlobalDeclInstantiation* ins) {
19407 pushArg(ImmPtr(ins->mir()->resumePoint()->pc()));
19408 pushArg(ImmGCPtr(ins->mir()->block()->info().script()));
19410 using Fn = bool (*)(JSContext*, HandleScript, const jsbytecode*);
19411 callVM<Fn, GlobalDeclInstantiationFromIon>(ins);
19414 void CodeGenerator::visitDebugger(LDebugger* ins) {
19415 Register cx = ToRegister(ins->temp0());
19417 masm.loadJSContext(cx);
19418 using Fn = bool (*)(JSContext* cx);
19419 masm.setupAlignedABICall();
19420 masm.passABIArg(cx);
19421 masm.callWithABI<Fn, GlobalHasLiveOnDebuggerStatement>();
19423 Label bail;
19424 masm.branchIfTrueBool(ReturnReg, &bail);
19425 bailoutFrom(&bail, ins->snapshot());
19428 void CodeGenerator::visitNewTarget(LNewTarget* ins) {
19429 ValueOperand output = ToOutValue(ins);
19431 // if (isConstructing) output = argv[Max(numActualArgs, numFormalArgs)]
19432 Label notConstructing, done;
19433 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
19434 masm.branchTestPtr(Assembler::Zero, calleeToken,
19435 Imm32(CalleeToken_FunctionConstructing), &notConstructing);
19437 Register argvLen = output.scratchReg();
19438 masm.loadNumActualArgs(FramePointer, argvLen);
19440 Label useNFormals;
19442 size_t numFormalArgs = ins->mirRaw()->block()->info().nargs();
19443 masm.branchPtr(Assembler::Below, argvLen, Imm32(numFormalArgs), &useNFormals);
19445 size_t argsOffset = JitFrameLayout::offsetOfActualArgs();
19447 BaseValueIndex newTarget(FramePointer, argvLen, argsOffset);
19448 masm.loadValue(newTarget, output);
19449 masm.jump(&done);
19452 masm.bind(&useNFormals);
19455 Address newTarget(FramePointer,
19456 argsOffset + (numFormalArgs * sizeof(Value)));
19457 masm.loadValue(newTarget, output);
19458 masm.jump(&done);
19461 // else output = undefined
19462 masm.bind(&notConstructing);
19463 masm.moveValue(UndefinedValue(), output);
19464 masm.bind(&done);
19467 void CodeGenerator::visitCheckReturn(LCheckReturn* ins) {
19468 ValueOperand returnValue = ToValue(ins, LCheckReturn::ReturnValueIndex);
19469 ValueOperand thisValue = ToValue(ins, LCheckReturn::ThisValueIndex);
19470 ValueOperand output = ToOutValue(ins);
19472 using Fn = bool (*)(JSContext*, HandleValue);
19473 OutOfLineCode* ool = oolCallVM<Fn, ThrowBadDerivedReturnOrUninitializedThis>(
19474 ins, ArgList(returnValue), StoreNothing());
19476 Label noChecks;
19477 masm.branchTestObject(Assembler::Equal, returnValue, &noChecks);
19478 masm.branchTestUndefined(Assembler::NotEqual, returnValue, ool->entry());
19479 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
19480 masm.moveValue(thisValue, output);
19481 masm.jump(ool->rejoin());
19482 masm.bind(&noChecks);
19483 masm.moveValue(returnValue, output);
19484 masm.bind(ool->rejoin());
19487 void CodeGenerator::visitCheckIsObj(LCheckIsObj* ins) {
19488 ValueOperand value = ToValue(ins, LCheckIsObj::ValueIndex);
19489 Register output = ToRegister(ins->output());
19491 using Fn = bool (*)(JSContext*, CheckIsObjectKind);
19492 OutOfLineCode* ool = oolCallVM<Fn, ThrowCheckIsObject>(
19493 ins, ArgList(Imm32(ins->mir()->checkKind())), StoreNothing());
19495 masm.fallibleUnboxObject(value, output, ool->entry());
19496 masm.bind(ool->rejoin());
19499 void CodeGenerator::visitCheckObjCoercible(LCheckObjCoercible* ins) {
19500 ValueOperand checkValue = ToValue(ins, LCheckObjCoercible::ValueIndex);
19502 using Fn = bool (*)(JSContext*, HandleValue);
19503 OutOfLineCode* ool = oolCallVM<Fn, ThrowObjectCoercible>(
19504 ins, ArgList(checkValue), StoreNothing());
19505 masm.branchTestNull(Assembler::Equal, checkValue, ool->entry());
19506 masm.branchTestUndefined(Assembler::Equal, checkValue, ool->entry());
19507 masm.bind(ool->rejoin());
19510 void CodeGenerator::visitCheckClassHeritage(LCheckClassHeritage* ins) {
19511 ValueOperand heritage = ToValue(ins, LCheckClassHeritage::HeritageIndex);
19512 Register temp0 = ToRegister(ins->temp0());
19513 Register temp1 = ToRegister(ins->temp1());
19515 using Fn = bool (*)(JSContext*, HandleValue);
19516 OutOfLineCode* ool = oolCallVM<Fn, CheckClassHeritageOperation>(
19517 ins, ArgList(heritage), StoreNothing());
19519 masm.branchTestNull(Assembler::Equal, heritage, ool->rejoin());
19520 masm.fallibleUnboxObject(heritage, temp0, ool->entry());
19522 masm.isConstructor(temp0, temp1, ool->entry());
19523 masm.branchTest32(Assembler::Zero, temp1, temp1, ool->entry());
19525 masm.bind(ool->rejoin());
19528 void CodeGenerator::visitCheckThis(LCheckThis* ins) {
19529 ValueOperand thisValue = ToValue(ins, LCheckThis::ValueIndex);
19531 using Fn = bool (*)(JSContext*);
19532 OutOfLineCode* ool =
19533 oolCallVM<Fn, ThrowUninitializedThis>(ins, ArgList(), StoreNothing());
19534 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
19535 masm.bind(ool->rejoin());
19538 void CodeGenerator::visitCheckThisReinit(LCheckThisReinit* ins) {
19539 ValueOperand thisValue = ToValue(ins, LCheckThisReinit::ThisValueIndex);
19541 using Fn = bool (*)(JSContext*);
19542 OutOfLineCode* ool =
19543 oolCallVM<Fn, ThrowInitializedThis>(ins, ArgList(), StoreNothing());
19544 masm.branchTestMagic(Assembler::NotEqual, thisValue, ool->entry());
19545 masm.bind(ool->rejoin());
19548 void CodeGenerator::visitGenerator(LGenerator* lir) {
19549 Register callee = ToRegister(lir->callee());
19550 Register environmentChain = ToRegister(lir->environmentChain());
19551 Register argsObject = ToRegister(lir->argsObject());
19553 pushArg(argsObject);
19554 pushArg(environmentChain);
19555 pushArg(ImmGCPtr(current->mir()->info().script()));
19556 pushArg(callee);
19558 using Fn = JSObject* (*)(JSContext* cx, HandleFunction, HandleScript,
19559 HandleObject, HandleObject);
19560 callVM<Fn, CreateGenerator>(lir);
19563 void CodeGenerator::visitAsyncResolve(LAsyncResolve* lir) {
19564 Register generator = ToRegister(lir->generator());
19565 ValueOperand value = ToValue(lir, LAsyncResolve::ValueIndex);
19567 pushArg(value);
19568 pushArg(generator);
19570 using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
19571 HandleValue);
19572 callVM<Fn, js::AsyncFunctionResolve>(lir);
19575 void CodeGenerator::visitAsyncReject(LAsyncReject* lir) {
19576 Register generator = ToRegister(lir->generator());
19577 ValueOperand reason = ToValue(lir, LAsyncReject::ReasonIndex);
19578 ValueOperand stack = ToValue(lir, LAsyncReject::StackIndex);
19580 pushArg(stack);
19581 pushArg(reason);
19582 pushArg(generator);
19584 using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
19585 HandleValue, HandleValue);
19586 callVM<Fn, js::AsyncFunctionReject>(lir);
19589 void CodeGenerator::visitAsyncAwait(LAsyncAwait* lir) {
19590 ValueOperand value = ToValue(lir, LAsyncAwait::ValueIndex);
19591 Register generator = ToRegister(lir->generator());
19593 pushArg(value);
19594 pushArg(generator);
19596 using Fn =
19597 JSObject* (*)(JSContext* cx, Handle<AsyncFunctionGeneratorObject*> genObj,
19598 HandleValue value);
19599 callVM<Fn, js::AsyncFunctionAwait>(lir);
19602 void CodeGenerator::visitCanSkipAwait(LCanSkipAwait* lir) {
19603 ValueOperand value = ToValue(lir, LCanSkipAwait::ValueIndex);
19605 pushArg(value);
19607 using Fn = bool (*)(JSContext*, HandleValue, bool* canSkip);
19608 callVM<Fn, js::CanSkipAwait>(lir);
19611 void CodeGenerator::visitMaybeExtractAwaitValue(LMaybeExtractAwaitValue* lir) {
19612 ValueOperand value = ToValue(lir, LMaybeExtractAwaitValue::ValueIndex);
19613 ValueOperand output = ToOutValue(lir);
19614 Register canSkip = ToRegister(lir->canSkip());
19616 Label cantExtract, finished;
19617 masm.branchIfFalseBool(canSkip, &cantExtract);
19619 pushArg(value);
19621 using Fn = bool (*)(JSContext*, HandleValue, MutableHandleValue);
19622 callVM<Fn, js::ExtractAwaitValue>(lir);
19623 masm.jump(&finished);
19624 masm.bind(&cantExtract);
19626 masm.moveValue(value, output);
19628 masm.bind(&finished);
19631 void CodeGenerator::visitDebugCheckSelfHosted(LDebugCheckSelfHosted* ins) {
19632 ValueOperand checkValue = ToValue(ins, LDebugCheckSelfHosted::ValueIndex);
19633 pushArg(checkValue);
19634 using Fn = bool (*)(JSContext*, HandleValue);
19635 callVM<Fn, js::Debug_CheckSelfHosted>(ins);
19638 void CodeGenerator::visitRandom(LRandom* ins) {
19639 using mozilla::non_crypto::XorShift128PlusRNG;
19641 FloatRegister output = ToFloatRegister(ins->output());
19642 Register rngReg = ToRegister(ins->temp0());
19644 Register64 temp1 = ToRegister64(ins->temp1());
19645 Register64 temp2 = ToRegister64(ins->temp2());
19647 const XorShift128PlusRNG* rng = gen->realm->addressOfRandomNumberGenerator();
19648 masm.movePtr(ImmPtr(rng), rngReg);
19650 masm.randomDouble(rngReg, output, temp1, temp2);
19651 if (js::SupportDifferentialTesting()) {
19652 masm.loadConstantDouble(0.0, output);
19656 void CodeGenerator::visitSignExtendInt32(LSignExtendInt32* ins) {
19657 Register input = ToRegister(ins->input());
19658 Register output = ToRegister(ins->output());
19660 switch (ins->mode()) {
19661 case MSignExtendInt32::Byte:
19662 masm.move8SignExtend(input, output);
19663 break;
19664 case MSignExtendInt32::Half:
19665 masm.move16SignExtend(input, output);
19666 break;
19670 void CodeGenerator::visitRotate(LRotate* ins) {
19671 MRotate* mir = ins->mir();
19672 Register input = ToRegister(ins->input());
19673 Register dest = ToRegister(ins->output());
19675 const LAllocation* count = ins->count();
19676 if (count->isConstant()) {
19677 int32_t c = ToInt32(count) & 0x1F;
19678 if (mir->isLeftRotate()) {
19679 masm.rotateLeft(Imm32(c), input, dest);
19680 } else {
19681 masm.rotateRight(Imm32(c), input, dest);
19683 } else {
19684 Register creg = ToRegister(count);
19685 if (mir->isLeftRotate()) {
19686 masm.rotateLeft(creg, input, dest);
19687 } else {
19688 masm.rotateRight(creg, input, dest);
19693 class OutOfLineNaNToZero : public OutOfLineCodeBase<CodeGenerator> {
19694 LNaNToZero* lir_;
19696 public:
19697 explicit OutOfLineNaNToZero(LNaNToZero* lir) : lir_(lir) {}
19699 void accept(CodeGenerator* codegen) override {
19700 codegen->visitOutOfLineNaNToZero(this);
19702 LNaNToZero* lir() const { return lir_; }
19705 void CodeGenerator::visitOutOfLineNaNToZero(OutOfLineNaNToZero* ool) {
19706 FloatRegister output = ToFloatRegister(ool->lir()->output());
19707 masm.loadConstantDouble(0.0, output);
19708 masm.jump(ool->rejoin());
19711 void CodeGenerator::visitNaNToZero(LNaNToZero* lir) {
19712 FloatRegister input = ToFloatRegister(lir->input());
19714 OutOfLineNaNToZero* ool = new (alloc()) OutOfLineNaNToZero(lir);
19715 addOutOfLineCode(ool, lir->mir());
19717 if (lir->mir()->operandIsNeverNegativeZero()) {
19718 masm.branchDouble(Assembler::DoubleUnordered, input, input, ool->entry());
19719 } else {
19720 FloatRegister scratch = ToFloatRegister(lir->temp0());
19721 masm.loadConstantDouble(0.0, scratch);
19722 masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch,
19723 ool->entry());
19725 masm.bind(ool->rejoin());
19728 void CodeGenerator::visitIsPackedArray(LIsPackedArray* lir) {
19729 Register obj = ToRegister(lir->object());
19730 Register output = ToRegister(lir->output());
19731 Register temp = ToRegister(lir->temp0());
19733 masm.setIsPackedArray(obj, output, temp);
19736 void CodeGenerator::visitGuardArrayIsPacked(LGuardArrayIsPacked* lir) {
19737 Register array = ToRegister(lir->array());
19738 Register temp0 = ToRegister(lir->temp0());
19739 Register temp1 = ToRegister(lir->temp1());
19741 Label bail;
19742 masm.branchArrayIsNotPacked(array, temp0, temp1, &bail);
19743 bailoutFrom(&bail, lir->snapshot());
19746 void CodeGenerator::visitGetPrototypeOf(LGetPrototypeOf* lir) {
19747 Register target = ToRegister(lir->target());
19748 ValueOperand out = ToOutValue(lir);
19749 Register scratch = out.scratchReg();
19751 using Fn = bool (*)(JSContext*, HandleObject, MutableHandleValue);
19752 OutOfLineCode* ool = oolCallVM<Fn, jit::GetPrototypeOf>(lir, ArgList(target),
19753 StoreValueTo(out));
19755 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
19757 masm.loadObjProto(target, scratch);
19759 Label hasProto;
19760 masm.branchPtr(Assembler::Above, scratch, ImmWord(1), &hasProto);
19762 // Call into the VM for lazy prototypes.
19763 masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), ool->entry());
19765 masm.moveValue(NullValue(), out);
19766 masm.jump(ool->rejoin());
19768 masm.bind(&hasProto);
19769 masm.tagValue(JSVAL_TYPE_OBJECT, scratch, out);
19771 masm.bind(ool->rejoin());
19774 void CodeGenerator::visitObjectWithProto(LObjectWithProto* lir) {
19775 pushArg(ToValue(lir, LObjectWithProto::PrototypeIndex));
19777 using Fn = PlainObject* (*)(JSContext*, HandleValue);
19778 callVM<Fn, js::ObjectWithProtoOperation>(lir);
19781 void CodeGenerator::visitObjectStaticProto(LObjectStaticProto* lir) {
19782 Register obj = ToRegister(lir->input());
19783 Register output = ToRegister(lir->output());
19785 masm.loadObjProto(obj, output);
19787 #ifdef DEBUG
19788 // We shouldn't encounter a null or lazy proto.
19789 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
19791 Label done;
19792 masm.branchPtr(Assembler::Above, output, ImmWord(1), &done);
19793 masm.assumeUnreachable("Unexpected null or lazy proto in MObjectStaticProto");
19794 masm.bind(&done);
19795 #endif
19798 void CodeGenerator::visitBuiltinObject(LBuiltinObject* lir) {
19799 pushArg(Imm32(static_cast<int32_t>(lir->mir()->builtinObjectKind())));
19801 using Fn = JSObject* (*)(JSContext*, BuiltinObjectKind);
19802 callVM<Fn, js::BuiltinObjectOperation>(lir);
19805 void CodeGenerator::visitSuperFunction(LSuperFunction* lir) {
19806 Register callee = ToRegister(lir->callee());
19807 ValueOperand out = ToOutValue(lir);
19808 Register temp = ToRegister(lir->temp0());
19810 #ifdef DEBUG
19811 Label classCheckDone;
19812 masm.branchTestObjIsFunction(Assembler::Equal, callee, temp, callee,
19813 &classCheckDone);
19814 masm.assumeUnreachable("Unexpected non-JSFunction callee in JSOp::SuperFun");
19815 masm.bind(&classCheckDone);
19816 #endif
19818 // Load prototype of callee
19819 masm.loadObjProto(callee, temp);
19821 #ifdef DEBUG
19822 // We won't encounter a lazy proto, because |callee| is guaranteed to be a
19823 // JSFunction and only proxy objects can have a lazy proto.
19824 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
19826 Label proxyCheckDone;
19827 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
19828 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperFun");
19829 masm.bind(&proxyCheckDone);
19830 #endif
19832 Label nullProto, done;
19833 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
19835 // Box prototype and return
19836 masm.tagValue(JSVAL_TYPE_OBJECT, temp, out);
19837 masm.jump(&done);
19839 masm.bind(&nullProto);
19840 masm.moveValue(NullValue(), out);
19842 masm.bind(&done);
19845 void CodeGenerator::visitInitHomeObject(LInitHomeObject* lir) {
19846 Register func = ToRegister(lir->function());
19847 ValueOperand homeObject = ToValue(lir, LInitHomeObject::HomeObjectIndex);
19849 masm.assertFunctionIsExtended(func);
19851 Address addr(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
19853 emitPreBarrier(addr);
19854 masm.storeValue(homeObject, addr);
19857 void CodeGenerator::visitIsTypedArrayConstructor(
19858 LIsTypedArrayConstructor* lir) {
19859 Register object = ToRegister(lir->object());
19860 Register output = ToRegister(lir->output());
19862 masm.setIsDefinitelyTypedArrayConstructor(object, output);
19865 void CodeGenerator::visitLoadValueTag(LLoadValueTag* lir) {
19866 ValueOperand value = ToValue(lir, LLoadValueTag::ValueIndex);
19867 Register output = ToRegister(lir->output());
19869 Register tag = masm.extractTag(value, output);
19870 if (tag != output) {
19871 masm.mov(tag, output);
19875 void CodeGenerator::visitGuardTagNotEqual(LGuardTagNotEqual* lir) {
19876 Register lhs = ToRegister(lir->lhs());
19877 Register rhs = ToRegister(lir->rhs());
19879 bailoutCmp32(Assembler::Equal, lhs, rhs, lir->snapshot());
19881 // If both lhs and rhs are numbers, can't use tag comparison to do inequality
19882 // comparison
19883 Label done;
19884 masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
19885 masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
19886 bailout(lir->snapshot());
19888 masm.bind(&done);
19891 void CodeGenerator::visitLoadWrapperTarget(LLoadWrapperTarget* lir) {
19892 Register object = ToRegister(lir->object());
19893 Register output = ToRegister(lir->output());
19895 masm.loadPtr(Address(object, ProxyObject::offsetOfReservedSlots()), output);
19897 // Bail for revoked proxies.
19898 Label bail;
19899 Address targetAddr(output,
19900 js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
19901 if (lir->mir()->fallible()) {
19902 masm.fallibleUnboxObject(targetAddr, output, &bail);
19903 bailoutFrom(&bail, lir->snapshot());
19904 } else {
19905 masm.unboxObject(targetAddr, output);
19909 void CodeGenerator::visitGuardHasGetterSetter(LGuardHasGetterSetter* lir) {
19910 Register object = ToRegister(lir->object());
19911 Register temp0 = ToRegister(lir->temp0());
19912 Register temp1 = ToRegister(lir->temp1());
19913 Register temp2 = ToRegister(lir->temp2());
19915 masm.movePropertyKey(lir->mir()->propId(), temp1);
19916 masm.movePtr(ImmGCPtr(lir->mir()->getterSetter()), temp2);
19918 using Fn = bool (*)(JSContext* cx, JSObject* obj, jsid id,
19919 GetterSetter* getterSetter);
19920 masm.setupAlignedABICall();
19921 masm.loadJSContext(temp0);
19922 masm.passABIArg(temp0);
19923 masm.passABIArg(object);
19924 masm.passABIArg(temp1);
19925 masm.passABIArg(temp2);
19926 masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
19928 bailoutIfFalseBool(ReturnReg, lir->snapshot());
19931 void CodeGenerator::visitGuardIsExtensible(LGuardIsExtensible* lir) {
19932 Register object = ToRegister(lir->object());
19933 Register temp = ToRegister(lir->temp0());
19935 Label bail;
19936 masm.branchIfObjectNotExtensible(object, temp, &bail);
19937 bailoutFrom(&bail, lir->snapshot());
19940 void CodeGenerator::visitGuardInt32IsNonNegative(
19941 LGuardInt32IsNonNegative* lir) {
19942 Register index = ToRegister(lir->index());
19944 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
19947 void CodeGenerator::visitGuardInt32Range(LGuardInt32Range* lir) {
19948 Register input = ToRegister(lir->input());
19950 bailoutCmp32(Assembler::LessThan, input, Imm32(lir->mir()->minimum()),
19951 lir->snapshot());
19952 bailoutCmp32(Assembler::GreaterThan, input, Imm32(lir->mir()->maximum()),
19953 lir->snapshot());
19956 void CodeGenerator::visitGuardIndexIsNotDenseElement(
19957 LGuardIndexIsNotDenseElement* lir) {
19958 Register object = ToRegister(lir->object());
19959 Register index = ToRegister(lir->index());
19960 Register temp = ToRegister(lir->temp0());
19961 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
19963 // Load obj->elements.
19964 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
19966 // Ensure index >= initLength or the element is a hole.
19967 Label notDense;
19968 Address capacity(temp, ObjectElements::offsetOfInitializedLength());
19969 masm.spectreBoundsCheck32(index, capacity, spectreTemp, &notDense);
19971 BaseValueIndex element(temp, index);
19972 masm.branchTestMagic(Assembler::Equal, element, &notDense);
19974 bailout(lir->snapshot());
19976 masm.bind(&notDense);
19979 void CodeGenerator::visitGuardIndexIsValidUpdateOrAdd(
19980 LGuardIndexIsValidUpdateOrAdd* lir) {
19981 Register object = ToRegister(lir->object());
19982 Register index = ToRegister(lir->index());
19983 Register temp = ToRegister(lir->temp0());
19984 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
19986 // Load obj->elements.
19987 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
19989 Label success;
19991 // If length is writable, branch to &success. All indices are writable.
19992 Address flags(temp, ObjectElements::offsetOfFlags());
19993 masm.branchTest32(Assembler::Zero, flags,
19994 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
19995 &success);
19997 // Otherwise, ensure index is in bounds.
19998 Label bail;
19999 Address length(temp, ObjectElements::offsetOfLength());
20000 masm.spectreBoundsCheck32(index, length, spectreTemp, &bail);
20001 masm.bind(&success);
20003 bailoutFrom(&bail, lir->snapshot());
20006 void CodeGenerator::visitCallAddOrUpdateSparseElement(
20007 LCallAddOrUpdateSparseElement* lir) {
20008 Register object = ToRegister(lir->object());
20009 Register index = ToRegister(lir->index());
20010 ValueOperand value = ToValue(lir, LCallAddOrUpdateSparseElement::ValueIndex);
20012 pushArg(Imm32(lir->mir()->strict()));
20013 pushArg(value);
20014 pushArg(index);
20015 pushArg(object);
20017 using Fn =
20018 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, HandleValue, bool);
20019 callVM<Fn, js::AddOrUpdateSparseElementHelper>(lir);
20022 void CodeGenerator::visitCallGetSparseElement(LCallGetSparseElement* lir) {
20023 Register object = ToRegister(lir->object());
20024 Register index = ToRegister(lir->index());
20026 pushArg(index);
20027 pushArg(object);
20029 using Fn =
20030 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, MutableHandleValue);
20031 callVM<Fn, js::GetSparseElementHelper>(lir);
20034 void CodeGenerator::visitCallNativeGetElement(LCallNativeGetElement* lir) {
20035 Register object = ToRegister(lir->object());
20036 Register index = ToRegister(lir->index());
20038 pushArg(index);
20039 pushArg(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
20040 pushArg(object);
20042 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
20043 MutableHandleValue);
20044 callVM<Fn, js::NativeGetElement>(lir);
20047 void CodeGenerator::visitCallNativeGetElementSuper(
20048 LCallNativeGetElementSuper* lir) {
20049 Register object = ToRegister(lir->object());
20050 Register index = ToRegister(lir->index());
20051 ValueOperand receiver =
20052 ToValue(lir, LCallNativeGetElementSuper::ReceiverIndex);
20054 pushArg(index);
20055 pushArg(receiver);
20056 pushArg(object);
20058 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
20059 MutableHandleValue);
20060 callVM<Fn, js::NativeGetElement>(lir);
20063 void CodeGenerator::visitCallObjectHasSparseElement(
20064 LCallObjectHasSparseElement* lir) {
20065 Register object = ToRegister(lir->object());
20066 Register index = ToRegister(lir->index());
20067 Register temp0 = ToRegister(lir->temp0());
20068 Register temp1 = ToRegister(lir->temp1());
20069 Register output = ToRegister(lir->output());
20071 masm.reserveStack(sizeof(Value));
20072 masm.moveStackPtrTo(temp1);
20074 using Fn = bool (*)(JSContext*, NativeObject*, int32_t, Value*);
20075 masm.setupAlignedABICall();
20076 masm.loadJSContext(temp0);
20077 masm.passABIArg(temp0);
20078 masm.passABIArg(object);
20079 masm.passABIArg(index);
20080 masm.passABIArg(temp1);
20081 masm.callWithABI<Fn, HasNativeElementPure>();
20082 masm.storeCallPointerResult(temp0);
20084 Label bail, ok;
20085 uint32_t framePushed = masm.framePushed();
20086 masm.branchIfTrueBool(temp0, &ok);
20087 masm.adjustStack(sizeof(Value));
20088 masm.jump(&bail);
20090 masm.bind(&ok);
20091 masm.setFramePushed(framePushed);
20092 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
20093 masm.adjustStack(sizeof(Value));
20095 bailoutFrom(&bail, lir->snapshot());
20098 void CodeGenerator::visitBigIntAsIntN(LBigIntAsIntN* ins) {
20099 Register bits = ToRegister(ins->bits());
20100 Register input = ToRegister(ins->input());
20102 pushArg(bits);
20103 pushArg(input);
20105 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
20106 callVM<Fn, jit::BigIntAsIntN>(ins);
20109 void CodeGenerator::visitBigIntAsIntN64(LBigIntAsIntN64* ins) {
20110 Register input = ToRegister(ins->input());
20111 Register temp = ToRegister(ins->temp());
20112 Register64 temp64 = ToRegister64(ins->temp64());
20113 Register output = ToRegister(ins->output());
20115 Label done, create;
20117 masm.movePtr(input, output);
20119 // Load the BigInt value as an int64.
20120 masm.loadBigInt64(input, temp64);
20122 // Create a new BigInt when the input exceeds the int64 range.
20123 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
20124 Imm32(64 / BigInt::DigitBits), &create);
20126 // And create a new BigInt when the value and the BigInt have different signs.
20127 Label nonNegative;
20128 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
20129 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &create);
20130 masm.jump(&done);
20132 masm.bind(&nonNegative);
20133 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &done);
20135 masm.bind(&create);
20136 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
20138 masm.bind(&done);
20141 void CodeGenerator::visitBigIntAsIntN32(LBigIntAsIntN32* ins) {
20142 Register input = ToRegister(ins->input());
20143 Register temp = ToRegister(ins->temp());
20144 Register64 temp64 = ToRegister64(ins->temp64());
20145 Register output = ToRegister(ins->output());
20147 Label done, create;
20149 masm.movePtr(input, output);
20151 // Load the absolute value of the first digit.
20152 masm.loadFirstBigIntDigitOrZero(input, temp);
20154 // If the absolute value exceeds the int32 range, create a new BigInt.
20155 masm.branchPtr(Assembler::Above, temp, Imm32(INT32_MAX), &create);
20157 // Also create a new BigInt if we have more than one digit.
20158 masm.branch32(Assembler::BelowOrEqual,
20159 Address(input, BigInt::offsetOfLength()), Imm32(1), &done);
20161 masm.bind(&create);
20163 // |temp| stores the absolute value, negate it when the sign flag is set.
20164 Label nonNegative;
20165 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
20166 masm.negPtr(temp);
20167 masm.bind(&nonNegative);
20169 masm.move32To64SignExtend(temp, temp64);
20170 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
20172 masm.bind(&done);
20175 void CodeGenerator::visitBigIntAsUintN(LBigIntAsUintN* ins) {
20176 Register bits = ToRegister(ins->bits());
20177 Register input = ToRegister(ins->input());
20179 pushArg(bits);
20180 pushArg(input);
20182 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
20183 callVM<Fn, jit::BigIntAsUintN>(ins);
20186 void CodeGenerator::visitBigIntAsUintN64(LBigIntAsUintN64* ins) {
20187 Register input = ToRegister(ins->input());
20188 Register temp = ToRegister(ins->temp());
20189 Register64 temp64 = ToRegister64(ins->temp64());
20190 Register output = ToRegister(ins->output());
20192 Label done, create;
20194 masm.movePtr(input, output);
20196 // Load the BigInt value as an uint64.
20197 masm.loadBigInt64(input, temp64);
20199 // Create a new BigInt when the input exceeds the uint64 range.
20200 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
20201 Imm32(64 / BigInt::DigitBits), &create);
20203 // And create a new BigInt when the input has the sign flag set.
20204 masm.branchIfBigIntIsNonNegative(input, &done);
20206 masm.bind(&create);
20207 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
20209 masm.bind(&done);
20212 void CodeGenerator::visitBigIntAsUintN32(LBigIntAsUintN32* ins) {
20213 Register input = ToRegister(ins->input());
20214 Register temp = ToRegister(ins->temp());
20215 Register64 temp64 = ToRegister64(ins->temp64());
20216 Register output = ToRegister(ins->output());
20218 Label done, create;
20220 masm.movePtr(input, output);
20222 // Load the absolute value of the first digit.
20223 masm.loadFirstBigIntDigitOrZero(input, temp);
20225 // If the absolute value exceeds the uint32 range, create a new BigInt.
20226 #if JS_PUNBOX64
20227 masm.branchPtr(Assembler::Above, temp, ImmWord(UINT32_MAX), &create);
20228 #endif
20230 // Also create a new BigInt if we have more than one digit.
20231 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
20232 Imm32(1), &create);
20234 // And create a new BigInt when the input has the sign flag set.
20235 masm.branchIfBigIntIsNonNegative(input, &done);
20237 masm.bind(&create);
20239 // |temp| stores the absolute value, negate it when the sign flag is set.
20240 Label nonNegative;
20241 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
20242 masm.negPtr(temp);
20243 masm.bind(&nonNegative);
20245 masm.move32To64ZeroExtend(temp, temp64);
20246 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
20248 masm.bind(&done);
20251 void CodeGenerator::visitGuardNonGCThing(LGuardNonGCThing* ins) {
20252 ValueOperand input = ToValue(ins, LGuardNonGCThing::InputIndex);
20254 Label bail;
20255 masm.branchTestGCThing(Assembler::Equal, input, &bail);
20256 bailoutFrom(&bail, ins->snapshot());
20259 void CodeGenerator::visitToHashableNonGCThing(LToHashableNonGCThing* ins) {
20260 ValueOperand input = ToValue(ins, LToHashableNonGCThing::InputIndex);
20261 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
20262 ValueOperand output = ToOutValue(ins);
20264 masm.toHashableNonGCThing(input, output, tempFloat);
20267 void CodeGenerator::visitToHashableString(LToHashableString* ins) {
20268 Register input = ToRegister(ins->input());
20269 Register output = ToRegister(ins->output());
20271 using Fn = JSAtom* (*)(JSContext*, JSString*);
20272 auto* ool = oolCallVM<Fn, js::AtomizeString>(ins, ArgList(input),
20273 StoreRegisterTo(output));
20275 Label isAtom;
20276 masm.branchTest32(Assembler::NonZero,
20277 Address(input, JSString::offsetOfFlags()),
20278 Imm32(JSString::ATOM_BIT), &isAtom);
20280 masm.lookupStringInAtomCacheLastLookups(input, output, output, ool->entry());
20281 masm.jump(ool->rejoin());
20282 masm.bind(&isAtom);
20283 masm.movePtr(input, output);
20284 masm.bind(ool->rejoin());
20287 void CodeGenerator::visitToHashableValue(LToHashableValue* ins) {
20288 ValueOperand input = ToValue(ins, LToHashableValue::InputIndex);
20289 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
20290 ValueOperand output = ToOutValue(ins);
20292 Register str = output.scratchReg();
20294 using Fn = JSAtom* (*)(JSContext*, JSString*);
20295 auto* ool =
20296 oolCallVM<Fn, js::AtomizeString>(ins, ArgList(str), StoreRegisterTo(str));
20298 masm.toHashableValue(input, output, tempFloat, ool->entry(), ool->rejoin());
20301 void CodeGenerator::visitHashNonGCThing(LHashNonGCThing* ins) {
20302 ValueOperand input = ToValue(ins, LHashNonGCThing::InputIndex);
20303 Register temp = ToRegister(ins->temp0());
20304 Register output = ToRegister(ins->output());
20306 masm.prepareHashNonGCThing(input, output, temp);
20309 void CodeGenerator::visitHashString(LHashString* ins) {
20310 Register input = ToRegister(ins->input());
20311 Register temp = ToRegister(ins->temp0());
20312 Register output = ToRegister(ins->output());
20314 masm.prepareHashString(input, output, temp);
20317 void CodeGenerator::visitHashSymbol(LHashSymbol* ins) {
20318 Register input = ToRegister(ins->input());
20319 Register output = ToRegister(ins->output());
20321 masm.prepareHashSymbol(input, output);
20324 void CodeGenerator::visitHashBigInt(LHashBigInt* ins) {
20325 Register input = ToRegister(ins->input());
20326 Register temp0 = ToRegister(ins->temp0());
20327 Register temp1 = ToRegister(ins->temp1());
20328 Register temp2 = ToRegister(ins->temp2());
20329 Register output = ToRegister(ins->output());
20331 masm.prepareHashBigInt(input, output, temp0, temp1, temp2);
20334 void CodeGenerator::visitHashObject(LHashObject* ins) {
20335 Register setObj = ToRegister(ins->setObject());
20336 ValueOperand input = ToValue(ins, LHashObject::InputIndex);
20337 Register temp0 = ToRegister(ins->temp0());
20338 Register temp1 = ToRegister(ins->temp1());
20339 Register temp2 = ToRegister(ins->temp2());
20340 Register temp3 = ToRegister(ins->temp3());
20341 Register output = ToRegister(ins->output());
20343 masm.prepareHashObject(setObj, input, output, temp0, temp1, temp2, temp3);
20346 void CodeGenerator::visitHashValue(LHashValue* ins) {
20347 Register setObj = ToRegister(ins->setObject());
20348 ValueOperand input = ToValue(ins, LHashValue::InputIndex);
20349 Register temp0 = ToRegister(ins->temp0());
20350 Register temp1 = ToRegister(ins->temp1());
20351 Register temp2 = ToRegister(ins->temp2());
20352 Register temp3 = ToRegister(ins->temp3());
20353 Register output = ToRegister(ins->output());
20355 masm.prepareHashValue(setObj, input, output, temp0, temp1, temp2, temp3);
20358 void CodeGenerator::visitSetObjectHasNonBigInt(LSetObjectHasNonBigInt* ins) {
20359 Register setObj = ToRegister(ins->setObject());
20360 ValueOperand input = ToValue(ins, LSetObjectHasNonBigInt::InputIndex);
20361 Register hash = ToRegister(ins->hash());
20362 Register temp0 = ToRegister(ins->temp0());
20363 Register temp1 = ToRegister(ins->temp1());
20364 Register output = ToRegister(ins->output());
20366 masm.setObjectHasNonBigInt(setObj, input, hash, output, temp0, temp1);
20369 void CodeGenerator::visitSetObjectHasBigInt(LSetObjectHasBigInt* ins) {
20370 Register setObj = ToRegister(ins->setObject());
20371 ValueOperand input = ToValue(ins, LSetObjectHasBigInt::InputIndex);
20372 Register hash = ToRegister(ins->hash());
20373 Register temp0 = ToRegister(ins->temp0());
20374 Register temp1 = ToRegister(ins->temp1());
20375 Register temp2 = ToRegister(ins->temp2());
20376 Register temp3 = ToRegister(ins->temp3());
20377 Register output = ToRegister(ins->output());
20379 masm.setObjectHasBigInt(setObj, input, hash, output, temp0, temp1, temp2,
20380 temp3);
20383 void CodeGenerator::visitSetObjectHasValue(LSetObjectHasValue* ins) {
20384 Register setObj = ToRegister(ins->setObject());
20385 ValueOperand input = ToValue(ins, LSetObjectHasValue::InputIndex);
20386 Register hash = ToRegister(ins->hash());
20387 Register temp0 = ToRegister(ins->temp0());
20388 Register temp1 = ToRegister(ins->temp1());
20389 Register temp2 = ToRegister(ins->temp2());
20390 Register temp3 = ToRegister(ins->temp3());
20391 Register output = ToRegister(ins->output());
20393 masm.setObjectHasValue(setObj, input, hash, output, temp0, temp1, temp2,
20394 temp3);
20397 void CodeGenerator::visitSetObjectHasValueVMCall(
20398 LSetObjectHasValueVMCall* ins) {
20399 pushArg(ToValue(ins, LSetObjectHasValueVMCall::InputIndex));
20400 pushArg(ToRegister(ins->setObject()));
20402 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
20403 callVM<Fn, jit::SetObjectHas>(ins);
20406 void CodeGenerator::visitSetObjectSize(LSetObjectSize* ins) {
20407 Register setObj = ToRegister(ins->setObject());
20408 Register output = ToRegister(ins->output());
20410 masm.loadSetObjectSize(setObj, output);
20413 void CodeGenerator::visitMapObjectHasNonBigInt(LMapObjectHasNonBigInt* ins) {
20414 Register mapObj = ToRegister(ins->mapObject());
20415 ValueOperand input = ToValue(ins, LMapObjectHasNonBigInt::InputIndex);
20416 Register hash = ToRegister(ins->hash());
20417 Register temp0 = ToRegister(ins->temp0());
20418 Register temp1 = ToRegister(ins->temp1());
20419 Register output = ToRegister(ins->output());
20421 masm.mapObjectHasNonBigInt(mapObj, input, hash, output, temp0, temp1);
20424 void CodeGenerator::visitMapObjectHasBigInt(LMapObjectHasBigInt* ins) {
20425 Register mapObj = ToRegister(ins->mapObject());
20426 ValueOperand input = ToValue(ins, LMapObjectHasBigInt::InputIndex);
20427 Register hash = ToRegister(ins->hash());
20428 Register temp0 = ToRegister(ins->temp0());
20429 Register temp1 = ToRegister(ins->temp1());
20430 Register temp2 = ToRegister(ins->temp2());
20431 Register temp3 = ToRegister(ins->temp3());
20432 Register output = ToRegister(ins->output());
20434 masm.mapObjectHasBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
20435 temp3);
20438 void CodeGenerator::visitMapObjectHasValue(LMapObjectHasValue* ins) {
20439 Register mapObj = ToRegister(ins->mapObject());
20440 ValueOperand input = ToValue(ins, LMapObjectHasValue::InputIndex);
20441 Register hash = ToRegister(ins->hash());
20442 Register temp0 = ToRegister(ins->temp0());
20443 Register temp1 = ToRegister(ins->temp1());
20444 Register temp2 = ToRegister(ins->temp2());
20445 Register temp3 = ToRegister(ins->temp3());
20446 Register output = ToRegister(ins->output());
20448 masm.mapObjectHasValue(mapObj, input, hash, output, temp0, temp1, temp2,
20449 temp3);
20452 void CodeGenerator::visitMapObjectHasValueVMCall(
20453 LMapObjectHasValueVMCall* ins) {
20454 pushArg(ToValue(ins, LMapObjectHasValueVMCall::InputIndex));
20455 pushArg(ToRegister(ins->mapObject()));
20457 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
20458 callVM<Fn, jit::MapObjectHas>(ins);
20461 void CodeGenerator::visitMapObjectGetNonBigInt(LMapObjectGetNonBigInt* ins) {
20462 Register mapObj = ToRegister(ins->mapObject());
20463 ValueOperand input = ToValue(ins, LMapObjectGetNonBigInt::InputIndex);
20464 Register hash = ToRegister(ins->hash());
20465 Register temp0 = ToRegister(ins->temp0());
20466 Register temp1 = ToRegister(ins->temp1());
20467 ValueOperand output = ToOutValue(ins);
20469 masm.mapObjectGetNonBigInt(mapObj, input, hash, output, temp0, temp1,
20470 output.scratchReg());
20473 void CodeGenerator::visitMapObjectGetBigInt(LMapObjectGetBigInt* ins) {
20474 Register mapObj = ToRegister(ins->mapObject());
20475 ValueOperand input = ToValue(ins, LMapObjectGetBigInt::InputIndex);
20476 Register hash = ToRegister(ins->hash());
20477 Register temp0 = ToRegister(ins->temp0());
20478 Register temp1 = ToRegister(ins->temp1());
20479 Register temp2 = ToRegister(ins->temp2());
20480 Register temp3 = ToRegister(ins->temp3());
20481 ValueOperand output = ToOutValue(ins);
20483 masm.mapObjectGetBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
20484 temp3, output.scratchReg());
20487 void CodeGenerator::visitMapObjectGetValue(LMapObjectGetValue* ins) {
20488 Register mapObj = ToRegister(ins->mapObject());
20489 ValueOperand input = ToValue(ins, LMapObjectGetValue::InputIndex);
20490 Register hash = ToRegister(ins->hash());
20491 Register temp0 = ToRegister(ins->temp0());
20492 Register temp1 = ToRegister(ins->temp1());
20493 Register temp2 = ToRegister(ins->temp2());
20494 Register temp3 = ToRegister(ins->temp3());
20495 ValueOperand output = ToOutValue(ins);
20497 masm.mapObjectGetValue(mapObj, input, hash, output, temp0, temp1, temp2,
20498 temp3, output.scratchReg());
20501 void CodeGenerator::visitMapObjectGetValueVMCall(
20502 LMapObjectGetValueVMCall* ins) {
20503 pushArg(ToValue(ins, LMapObjectGetValueVMCall::InputIndex));
20504 pushArg(ToRegister(ins->mapObject()));
20506 using Fn =
20507 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
20508 callVM<Fn, jit::MapObjectGet>(ins);
20511 void CodeGenerator::visitMapObjectSize(LMapObjectSize* ins) {
20512 Register mapObj = ToRegister(ins->mapObject());
20513 Register output = ToRegister(ins->output());
20515 masm.loadMapObjectSize(mapObj, output);
20518 template <size_t NumDefs>
20519 void CodeGenerator::emitIonToWasmCallBase(LIonToWasmCallBase<NumDefs>* lir) {
20520 wasm::JitCallStackArgVector stackArgs;
20521 masm.propagateOOM(stackArgs.reserve(lir->numOperands()));
20522 if (masm.oom()) {
20523 return;
20526 MIonToWasmCall* mir = lir->mir();
20527 const wasm::FuncExport& funcExport = mir->funcExport();
20528 const wasm::FuncType& sig =
20529 mir->instance()->metadata().getFuncExportType(funcExport);
20531 WasmABIArgGenerator abi;
20532 for (size_t i = 0; i < lir->numOperands(); i++) {
20533 MIRType argMir;
20534 switch (sig.args()[i].kind()) {
20535 case wasm::ValType::I32:
20536 case wasm::ValType::I64:
20537 case wasm::ValType::F32:
20538 case wasm::ValType::F64:
20539 argMir = sig.args()[i].toMIRType();
20540 break;
20541 case wasm::ValType::V128:
20542 MOZ_CRASH("unexpected argument type when calling from ion to wasm");
20543 case wasm::ValType::Ref:
20544 // temporarilyUnsupportedReftypeForEntry() restricts args to externref
20545 MOZ_RELEASE_ASSERT(sig.args()[i].refType().isExtern());
20546 // Argument is boxed on the JS side to an anyref, so passed as a
20547 // pointer here.
20548 argMir = sig.args()[i].toMIRType();
20549 break;
20552 ABIArg arg = abi.next(argMir);
20553 switch (arg.kind()) {
20554 case ABIArg::GPR:
20555 case ABIArg::FPU: {
20556 MOZ_ASSERT(ToAnyRegister(lir->getOperand(i)) == arg.reg());
20557 stackArgs.infallibleEmplaceBack(wasm::JitCallStackArg());
20558 break;
20560 case ABIArg::Stack: {
20561 const LAllocation* larg = lir->getOperand(i);
20562 if (larg->isConstant()) {
20563 stackArgs.infallibleEmplaceBack(ToInt32(larg));
20564 } else if (larg->isGeneralReg()) {
20565 stackArgs.infallibleEmplaceBack(ToRegister(larg));
20566 } else if (larg->isFloatReg()) {
20567 stackArgs.infallibleEmplaceBack(ToFloatRegister(larg));
20568 } else {
20569 // Always use the stack pointer here because GenerateDirectCallFromJit
20570 // depends on this.
20571 Address addr = ToAddress<BaseRegForAddress::SP>(larg);
20572 stackArgs.infallibleEmplaceBack(addr);
20574 break;
20576 #ifdef JS_CODEGEN_REGISTER_PAIR
20577 case ABIArg::GPR_PAIR: {
20578 MOZ_CRASH(
20579 "no way to pass i64, and wasm uses hardfp for function calls");
20581 #endif
20582 case ABIArg::Uninitialized: {
20583 MOZ_CRASH("Uninitialized ABIArg kind");
20588 const wasm::ValTypeVector& results = sig.results();
20589 if (results.length() == 0) {
20590 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
20591 } else {
20592 MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
20593 switch (results[0].kind()) {
20594 case wasm::ValType::I32:
20595 MOZ_ASSERT(lir->mir()->type() == MIRType::Int32);
20596 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
20597 break;
20598 case wasm::ValType::I64:
20599 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
20600 MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
20601 break;
20602 case wasm::ValType::F32:
20603 MOZ_ASSERT(lir->mir()->type() == MIRType::Float32);
20604 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnFloat32Reg);
20605 break;
20606 case wasm::ValType::F64:
20607 MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
20608 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
20609 break;
20610 case wasm::ValType::V128:
20611 MOZ_CRASH("unexpected return type when calling from ion to wasm");
20612 case wasm::ValType::Ref:
20613 // The wasm stubs layer unboxes anything that needs to be unboxed
20614 // and leaves it in a Value. A FuncRef/EqRef we could in principle
20615 // leave it as a raw object pointer but for now it complicates the
20616 // API to do so.
20617 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
20618 break;
20622 WasmInstanceObject* instObj = lir->mir()->instanceObject();
20624 Register scratch = ToRegister(lir->temp());
20626 uint32_t callOffset;
20627 ensureOsiSpace();
20628 GenerateDirectCallFromJit(masm, funcExport, instObj->instance(), stackArgs,
20629 scratch, &callOffset);
20631 // Add the instance object to the constant pool, so it is transferred to
20632 // the owning IonScript and so that it gets traced as long as the IonScript
20633 // lives.
20635 uint32_t unused;
20636 masm.propagateOOM(graph.addConstantToPool(ObjectValue(*instObj), &unused));
20638 markSafepointAt(callOffset, lir);
20641 void CodeGenerator::visitIonToWasmCall(LIonToWasmCall* lir) {
20642 emitIonToWasmCallBase(lir);
20644 void CodeGenerator::visitIonToWasmCallV(LIonToWasmCallV* lir) {
20645 emitIonToWasmCallBase(lir);
20647 void CodeGenerator::visitIonToWasmCallI64(LIonToWasmCallI64* lir) {
20648 emitIonToWasmCallBase(lir);
20651 void CodeGenerator::visitWasmNullConstant(LWasmNullConstant* lir) {
20652 masm.xorPtr(ToRegister(lir->output()), ToRegister(lir->output()));
20655 void CodeGenerator::visitWasmFence(LWasmFence* lir) {
20656 MOZ_ASSERT(gen->compilingWasm());
20657 masm.memoryBarrier(MembarFull);
20660 void CodeGenerator::visitWasmAnyRefFromJSValue(LWasmAnyRefFromJSValue* lir) {
20661 ValueOperand input = ToValue(lir, LWasmAnyRefFromJSValue::InputIndex);
20662 Register output = ToRegister(lir->output());
20663 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
20665 using Fn = JSObject* (*)(JSContext* cx, HandleValue value);
20666 OutOfLineCode* oolBoxValue = oolCallVM<Fn, wasm::AnyRef::boxValue>(
20667 lir, ArgList(input), StoreRegisterTo(output));
20668 masm.convertValueToWasmAnyRef(input, output, tempFloat, oolBoxValue->entry());
20669 masm.bind(oolBoxValue->rejoin());
20672 void CodeGenerator::visitWasmAnyRefFromJSObject(LWasmAnyRefFromJSObject* lir) {
20673 Register input = ToRegister(lir->input());
20674 Register output = ToRegister(lir->output());
20675 masm.convertObjectToWasmAnyRef(input, output);
20678 void CodeGenerator::visitWasmAnyRefFromJSString(LWasmAnyRefFromJSString* lir) {
20679 Register input = ToRegister(lir->input());
20680 Register output = ToRegister(lir->output());
20681 masm.convertStringToWasmAnyRef(input, output);
20684 void CodeGenerator::visitWasmNewI31Ref(LWasmNewI31Ref* lir) {
20685 if (lir->value()->isConstant()) {
20686 // i31ref are often created with constants. If that's the case we will
20687 // do the operation statically here. This is similar to what is done
20688 // in masm.truncate32ToWasmI31Ref.
20689 Register output = ToRegister(lir->output());
20690 uint32_t value =
20691 static_cast<uint32_t>(lir->value()->toConstant()->toInt32());
20692 uintptr_t ptr = wasm::AnyRef::fromUint32Truncate(value).rawValue();
20693 masm.movePtr(ImmWord(ptr), output);
20694 } else {
20695 Register value = ToRegister(lir->value());
20696 Register output = ToRegister(lir->output());
20697 masm.truncate32ToWasmI31Ref(value, output);
20701 void CodeGenerator::visitWasmI31RefGet(LWasmI31RefGet* lir) {
20702 Register value = ToRegister(lir->value());
20703 Register output = ToRegister(lir->output());
20704 if (lir->mir()->wideningOp() == wasm::FieldWideningOp::Signed) {
20705 masm.convertWasmI31RefTo32Signed(value, output);
20706 } else {
20707 masm.convertWasmI31RefTo32Unsigned(value, output);
20711 #ifdef FUZZING_JS_FUZZILLI
20712 void CodeGenerator::emitFuzzilliHashDouble(FloatRegister floatDouble,
20713 Register scratch, Register output) {
20714 # ifdef JS_PUNBOX64
20715 Register64 reg64_1(scratch);
20716 Register64 reg64_2(output);
20717 masm.moveDoubleToGPR64(floatDouble, reg64_1);
20718 masm.move64(reg64_1, reg64_2);
20719 masm.rshift64(Imm32(32), reg64_2);
20720 masm.add32(scratch, output);
20721 # else
20722 Register64 reg64(scratch, output);
20723 masm.moveDoubleToGPR64(floatDouble, reg64);
20724 masm.add32(scratch, output);
20725 # endif
20728 void CodeGenerator::emitFuzzilliHashObject(LInstruction* lir, Register obj,
20729 Register output) {
20730 using Fn = void (*)(JSContext* cx, JSObject* obj, uint32_t* out);
20731 OutOfLineCode* ool = oolCallVM<Fn, FuzzilliHashObjectInl>(
20732 lir, ArgList(obj), StoreRegisterTo(output));
20734 masm.jump(ool->entry());
20735 masm.bind(ool->rejoin());
20738 void CodeGenerator::emitFuzzilliHashBigInt(Register bigInt, Register output) {
20739 LiveRegisterSet volatileRegs(GeneralRegisterSet::All(),
20740 FloatRegisterSet::All());
20741 volatileRegs.takeUnchecked(output);
20742 masm.PushRegsInMask(volatileRegs);
20744 using Fn = uint32_t (*)(BigInt* bigInt);
20745 masm.setupUnalignedABICall(output);
20746 masm.passABIArg(bigInt);
20747 masm.callWithABI<Fn, js::FuzzilliHashBigInt>();
20748 masm.storeCallInt32Result(output);
20750 masm.PopRegsInMask(volatileRegs);
20753 void CodeGenerator::visitFuzzilliHashV(LFuzzilliHashV* ins) {
20754 MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Value);
20756 ValueOperand value = ToValue(ins, 0);
20758 Label isDouble, isObject, isBigInt, done;
20760 FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
20761 Register scratch = ToRegister(ins->getTemp(0));
20762 Register output = ToRegister(ins->output());
20763 MOZ_ASSERT(scratch != output);
20765 # ifdef JS_PUNBOX64
20766 Register tagReg = ToRegister(ins->getTemp(0));
20767 masm.splitTag(value, tagReg);
20768 # else
20769 Register tagReg = value.typeReg();
20770 # endif
20772 Label noBigInt;
20773 masm.branchTestBigInt(Assembler::NotEqual, tagReg, &noBigInt);
20774 masm.unboxBigInt(value, scratch);
20775 masm.jump(&isBigInt);
20776 masm.bind(&noBigInt);
20778 Label noObject;
20779 masm.branchTestObject(Assembler::NotEqual, tagReg, &noObject);
20780 masm.unboxObject(value, scratch);
20781 masm.jump(&isObject);
20782 masm.bind(&noObject);
20784 Label noInt32;
20785 masm.branchTestInt32(Assembler::NotEqual, tagReg, &noInt32);
20786 masm.unboxInt32(value, scratch);
20787 masm.convertInt32ToDouble(scratch, scratchFloat);
20788 masm.jump(&isDouble);
20789 masm.bind(&noInt32);
20791 Label noNull;
20792 masm.branchTestNull(Assembler::NotEqual, tagReg, &noNull);
20793 masm.move32(Imm32(1), scratch);
20794 masm.convertInt32ToDouble(scratch, scratchFloat);
20795 masm.jump(&isDouble);
20796 masm.bind(&noNull);
20798 Label noUndefined;
20799 masm.branchTestUndefined(Assembler::NotEqual, tagReg, &noUndefined);
20800 masm.move32(Imm32(2), scratch);
20801 masm.convertInt32ToDouble(scratch, scratchFloat);
20802 masm.jump(&isDouble);
20803 masm.bind(&noUndefined);
20805 Label noBoolean;
20806 masm.branchTestBoolean(Assembler::NotEqual, tagReg, &noBoolean);
20807 masm.unboxBoolean(value, scratch);
20808 masm.add32(Imm32(3), scratch);
20809 masm.convertInt32ToDouble(scratch, scratchFloat);
20810 masm.jump(&isDouble);
20811 masm.bind(&noBoolean);
20813 Label noDouble;
20814 masm.branchTestDouble(Assembler::NotEqual, tagReg, &noDouble);
20815 masm.unboxDouble(value, scratchFloat);
20816 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
20818 masm.jump(&isDouble);
20819 masm.bind(&noDouble);
20820 masm.move32(Imm32(0), output);
20821 masm.jump(&done);
20823 masm.bind(&isBigInt);
20824 emitFuzzilliHashBigInt(scratch, output);
20825 masm.jump(&done);
20827 masm.bind(&isObject);
20828 emitFuzzilliHashObject(ins, scratch, output);
20829 masm.jump(&done);
20831 masm.bind(&isDouble);
20832 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20834 masm.bind(&done);
20837 void CodeGenerator::visitFuzzilliHashT(LFuzzilliHashT* ins) {
20838 const LAllocation* value = ins->value();
20839 MIRType mirType = ins->mir()->getOperand(0)->type();
20841 FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
20842 Register scratch = ToRegister(ins->getTemp(0));
20843 Register output = ToRegister(ins->output());
20844 MOZ_ASSERT(scratch != output);
20846 if (mirType == MIRType::Object) {
20847 MOZ_ASSERT(value->isGeneralReg());
20848 masm.mov(value->toGeneralReg()->reg(), scratch);
20849 emitFuzzilliHashObject(ins, scratch, output);
20850 } else if (mirType == MIRType::BigInt) {
20851 MOZ_ASSERT(value->isGeneralReg());
20852 masm.mov(value->toGeneralReg()->reg(), scratch);
20853 emitFuzzilliHashBigInt(scratch, output);
20854 } else if (mirType == MIRType::Double) {
20855 MOZ_ASSERT(value->isFloatReg());
20856 masm.moveDouble(value->toFloatReg()->reg(), scratchFloat);
20857 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
20858 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20859 } else if (mirType == MIRType::Float32) {
20860 MOZ_ASSERT(value->isFloatReg());
20861 masm.convertFloat32ToDouble(value->toFloatReg()->reg(), scratchFloat);
20862 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
20863 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20864 } else if (mirType == MIRType::Int32) {
20865 MOZ_ASSERT(value->isGeneralReg());
20866 masm.mov(value->toGeneralReg()->reg(), scratch);
20867 masm.convertInt32ToDouble(scratch, scratchFloat);
20868 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20869 } else if (mirType == MIRType::Null) {
20870 MOZ_ASSERT(value->isBogus());
20871 masm.move32(Imm32(1), scratch);
20872 masm.convertInt32ToDouble(scratch, scratchFloat);
20873 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20874 } else if (mirType == MIRType::Undefined) {
20875 MOZ_ASSERT(value->isBogus());
20876 masm.move32(Imm32(2), scratch);
20877 masm.convertInt32ToDouble(scratch, scratchFloat);
20878 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20879 } else if (mirType == MIRType::Boolean) {
20880 MOZ_ASSERT(value->isGeneralReg());
20881 masm.mov(value->toGeneralReg()->reg(), scratch);
20882 masm.add32(Imm32(3), scratch);
20883 masm.convertInt32ToDouble(scratch, scratchFloat);
20884 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20885 } else {
20886 MOZ_CRASH("unexpected type");
20890 void CodeGenerator::visitFuzzilliHashStore(LFuzzilliHashStore* ins) {
20891 const LAllocation* value = ins->value();
20892 MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Int32);
20893 MOZ_ASSERT(value->isGeneralReg());
20895 Register scratchJSContext = ToRegister(ins->getTemp(0));
20896 Register scratch = ToRegister(ins->getTemp(1));
20898 masm.loadJSContext(scratchJSContext);
20900 // stats
20901 Address addrExecHashInputs(scratchJSContext,
20902 offsetof(JSContext, executionHashInputs));
20903 masm.load32(addrExecHashInputs, scratch);
20904 masm.add32(Imm32(1), scratch);
20905 masm.store32(scratch, addrExecHashInputs);
20907 Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
20908 masm.load32(addrExecHash, scratch);
20909 masm.add32(value->toGeneralReg()->reg(), scratch);
20910 masm.rotateLeft(Imm32(1), scratch, scratch);
20911 masm.store32(scratch, addrExecHash);
20913 #endif
20915 static_assert(!std::is_polymorphic_v<CodeGenerator>,
20916 "CodeGenerator should not have any virtual methods");
20918 } // namespace jit
20919 } // namespace js