Bug 1890513: Directly invoke variadic native functions. r=jandem
[gecko.git] / js / src / jit / CodeGenerator.cpp
blob53a9e1d6637d61efc282e69d4136bfa51c117fcf
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/CodeGenerator.h"
9 #include "mozilla/Assertions.h"
10 #include "mozilla/Casting.h"
11 #include "mozilla/DebugOnly.h"
12 #include "mozilla/EndianUtils.h"
13 #include "mozilla/EnumeratedArray.h"
14 #include "mozilla/EnumeratedRange.h"
15 #include "mozilla/EnumSet.h"
16 #include "mozilla/IntegerTypeTraits.h"
17 #include "mozilla/Latin1.h"
18 #include "mozilla/MathAlgorithms.h"
19 #include "mozilla/ScopeExit.h"
20 #include "mozilla/SIMD.h"
22 #include <limits>
23 #include <type_traits>
24 #include <utility>
26 #include "jslibmath.h"
27 #include "jsmath.h"
28 #include "jsnum.h"
30 #include "builtin/MapObject.h"
31 #include "builtin/RegExp.h"
32 #include "builtin/String.h"
33 #include "irregexp/RegExpTypes.h"
34 #include "jit/ABIArgGenerator.h"
35 #include "jit/CompileInfo.h"
36 #include "jit/InlineScriptTree.h"
37 #include "jit/Invalidation.h"
38 #include "jit/IonGenericCallStub.h"
39 #include "jit/IonIC.h"
40 #include "jit/IonScript.h"
41 #include "jit/JitcodeMap.h"
42 #include "jit/JitFrames.h"
43 #include "jit/JitRuntime.h"
44 #include "jit/JitSpewer.h"
45 #include "jit/JitZone.h"
46 #include "jit/Linker.h"
47 #include "jit/MIRGenerator.h"
48 #include "jit/MoveEmitter.h"
49 #include "jit/RangeAnalysis.h"
50 #include "jit/RegExpStubConstants.h"
51 #include "jit/SafepointIndex.h"
52 #include "jit/SharedICHelpers.h"
53 #include "jit/SharedICRegisters.h"
54 #include "jit/VMFunctions.h"
55 #include "jit/WarpSnapshot.h"
56 #include "js/ColumnNumber.h" // JS::LimitedColumnNumberOneOrigin
57 #include "js/experimental/JitInfo.h" // JSJit{Getter,Setter}CallArgs, JSJitMethodCallArgsTraits, JSJitInfo
58 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
59 #include "js/RegExpFlags.h" // JS::RegExpFlag
60 #include "js/ScalarType.h" // js::Scalar::Type
61 #include "proxy/DOMProxy.h"
62 #include "proxy/ScriptedProxyHandler.h"
63 #include "util/CheckedArithmetic.h"
64 #include "util/Unicode.h"
65 #include "vm/ArrayBufferViewObject.h"
66 #include "vm/AsyncFunction.h"
67 #include "vm/AsyncIteration.h"
68 #include "vm/BuiltinObjectKind.h"
69 #include "vm/FunctionFlags.h" // js::FunctionFlags
70 #include "vm/Interpreter.h"
71 #include "vm/JSAtomUtils.h" // AtomizeString
72 #include "vm/MatchPairs.h"
73 #include "vm/RegExpObject.h"
74 #include "vm/RegExpStatics.h"
75 #include "vm/StaticStrings.h"
76 #include "vm/StringObject.h"
77 #include "vm/StringType.h"
78 #include "vm/TypedArrayObject.h"
79 #include "wasm/WasmCodegenConstants.h"
80 #include "wasm/WasmValType.h"
81 #ifdef MOZ_VTUNE
82 # include "vtune/VTuneWrapper.h"
83 #endif
84 #include "wasm/WasmBinary.h"
85 #include "wasm/WasmGC.h"
86 #include "wasm/WasmGcObject.h"
87 #include "wasm/WasmStubs.h"
89 #include "builtin/Boolean-inl.h"
90 #include "jit/MacroAssembler-inl.h"
91 #include "jit/shared/CodeGenerator-shared-inl.h"
92 #include "jit/TemplateObject-inl.h"
93 #include "jit/VMFunctionList-inl.h"
94 #include "vm/JSScript-inl.h"
95 #include "wasm/WasmInstance-inl.h"
97 using namespace js;
98 using namespace js::jit;
100 using JS::GenericNaN;
101 using mozilla::AssertedCast;
102 using mozilla::CheckedUint32;
103 using mozilla::DebugOnly;
104 using mozilla::FloatingPoint;
105 using mozilla::Maybe;
106 using mozilla::NegativeInfinity;
107 using mozilla::PositiveInfinity;
109 using JS::ExpandoAndGeneration;
111 namespace js {
112 namespace jit {
114 #ifdef CHECK_OSIPOINT_REGISTERS
115 template <class Op>
116 static void HandleRegisterDump(Op op, MacroAssembler& masm,
117 LiveRegisterSet liveRegs, Register activation,
118 Register scratch) {
119 const size_t baseOffset = JitActivation::offsetOfRegs();
121 // Handle live GPRs.
122 for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
123 Register reg = *iter;
124 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
126 if (reg == activation) {
127 // To use the original value of the activation register (that's
128 // now on top of the stack), we need the scratch register.
129 masm.push(scratch);
130 masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
131 op(scratch, dump);
132 masm.pop(scratch);
133 } else {
134 op(reg, dump);
138 // Handle live FPRs.
139 for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
140 FloatRegister reg = *iter;
141 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
142 op(reg, dump);
146 class StoreOp {
147 MacroAssembler& masm;
149 public:
150 explicit StoreOp(MacroAssembler& masm) : masm(masm) {}
152 void operator()(Register reg, Address dump) { masm.storePtr(reg, dump); }
153 void operator()(FloatRegister reg, Address dump) {
154 if (reg.isDouble()) {
155 masm.storeDouble(reg, dump);
156 } else if (reg.isSingle()) {
157 masm.storeFloat32(reg, dump);
158 } else if (reg.isSimd128()) {
159 MOZ_CRASH("Unexpected case for SIMD");
160 } else {
161 MOZ_CRASH("Unexpected register type.");
166 class VerifyOp {
167 MacroAssembler& masm;
168 Label* failure_;
170 public:
171 VerifyOp(MacroAssembler& masm, Label* failure)
172 : masm(masm), failure_(failure) {}
174 void operator()(Register reg, Address dump) {
175 masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
177 void operator()(FloatRegister reg, Address dump) {
178 if (reg.isDouble()) {
179 ScratchDoubleScope scratch(masm);
180 masm.loadDouble(dump, scratch);
181 masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
182 } else if (reg.isSingle()) {
183 ScratchFloat32Scope scratch(masm);
184 masm.loadFloat32(dump, scratch);
185 masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
186 } else if (reg.isSimd128()) {
187 MOZ_CRASH("Unexpected case for SIMD");
188 } else {
189 MOZ_CRASH("Unexpected register type.");
194 void CodeGenerator::verifyOsiPointRegs(LSafepoint* safepoint) {
195 // Ensure the live registers stored by callVM did not change between
196 // the call and this OsiPoint. Try-catch relies on this invariant.
198 // Load pointer to the JitActivation in a scratch register.
199 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
200 Register scratch = allRegs.takeAny();
201 masm.push(scratch);
202 masm.loadJitActivation(scratch);
204 // If we should not check registers (because the instruction did not call
205 // into the VM, or a GC happened), we're done.
206 Label failure, done;
207 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
208 masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
210 // Having more than one VM function call made in one visit function at
211 // runtime is a sec-ciritcal error, because if we conservatively assume that
212 // one of the function call can re-enter Ion, then the invalidation process
213 // will potentially add a call at a random location, by patching the code
214 // before the return address.
215 masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
217 // Set checkRegs to 0, so that we don't try to verify registers after we
218 // return from this script to the caller.
219 masm.store32(Imm32(0), checkRegs);
221 // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
222 // temps after calling into the VM. This is fine because no other
223 // instructions (including this OsiPoint) will depend on them. Also
224 // backtracking can also use the same register for an input and an output.
225 // These are marked as clobbered and shouldn't get checked.
226 LiveRegisterSet liveRegs;
227 liveRegs.set() = RegisterSet::Intersect(
228 safepoint->liveRegs().set(),
229 RegisterSet::Not(safepoint->clobberedRegs().set()));
231 VerifyOp op(masm, &failure);
232 HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
234 masm.jump(&done);
236 // Do not profile the callWithABI that occurs below. This is to avoid a
237 // rare corner case that occurs when profiling interacts with itself:
239 // When slow profiling assertions are turned on, FunctionBoundary ops
240 // (which update the profiler pseudo-stack) may emit a callVM, which
241 // forces them to have an osi point associated with them. The
242 // FunctionBoundary for inline function entry is added to the caller's
243 // graph with a PC from the caller's code, but during codegen it modifies
244 // Gecko Profiler instrumentation to add the callee as the current top-most
245 // script. When codegen gets to the OSIPoint, and the callWithABI below is
246 // emitted, the codegen thinks that the current frame is the callee, but
247 // the PC it's using from the OSIPoint refers to the caller. This causes
248 // the profiler instrumentation of the callWithABI below to ASSERT, since
249 // the script and pc are mismatched. To avoid this, we simply omit
250 // instrumentation for these callWithABIs.
252 // Any live register captured by a safepoint (other than temp registers)
253 // must remain unchanged between the call and the OsiPoint instruction.
254 masm.bind(&failure);
255 masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
257 masm.bind(&done);
258 masm.pop(scratch);
261 bool CodeGenerator::shouldVerifyOsiPointRegs(LSafepoint* safepoint) {
262 if (!checkOsiPointRegisters) {
263 return false;
266 if (safepoint->liveRegs().emptyGeneral() &&
267 safepoint->liveRegs().emptyFloat()) {
268 return false; // No registers to check.
271 return true;
274 void CodeGenerator::resetOsiPointRegs(LSafepoint* safepoint) {
275 if (!shouldVerifyOsiPointRegs(safepoint)) {
276 return;
279 // Set checkRegs to 0. If we perform a VM call, the instruction
280 // will set it to 1.
281 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
282 Register scratch = allRegs.takeAny();
283 masm.push(scratch);
284 masm.loadJitActivation(scratch);
285 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
286 masm.store32(Imm32(0), checkRegs);
287 masm.pop(scratch);
290 static void StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs) {
291 // Store a copy of all live registers before performing the call.
292 // When we reach the OsiPoint, we can use this to check nothing
293 // modified them in the meantime.
295 // Load pointer to the JitActivation in a scratch register.
296 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
297 Register scratch = allRegs.takeAny();
298 masm.push(scratch);
299 masm.loadJitActivation(scratch);
301 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
302 masm.add32(Imm32(1), checkRegs);
304 StoreOp op(masm);
305 HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
307 masm.pop(scratch);
309 #endif // CHECK_OSIPOINT_REGISTERS
311 // Before doing any call to Cpp, you should ensure that volatile
312 // registers are evicted by the register allocator.
313 void CodeGenerator::callVMInternal(VMFunctionId id, LInstruction* ins) {
314 TrampolinePtr code = gen->jitRuntime()->getVMWrapper(id);
315 const VMFunctionData& fun = GetVMFunction(id);
317 // Stack is:
318 // ... frame ...
319 // [args]
320 #ifdef DEBUG
321 MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
322 pushedArgs_ = 0;
323 #endif
325 #ifdef CHECK_OSIPOINT_REGISTERS
326 if (shouldVerifyOsiPointRegs(ins->safepoint())) {
327 StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
329 #endif
331 #ifdef DEBUG
332 if (ins->mirRaw()) {
333 MOZ_ASSERT(ins->mirRaw()->isInstruction());
334 MInstruction* mir = ins->mirRaw()->toInstruction();
335 MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
337 // If this MIR instruction has an overridden AliasSet, set the JitRuntime's
338 // disallowArbitraryCode_ flag so we can assert this VMFunction doesn't call
339 // RunScript. Whitelist MInterruptCheck and MCheckOverRecursed because
340 // interrupt callbacks can call JS (chrome JS or shell testing functions).
341 bool isWhitelisted = mir->isInterruptCheck() || mir->isCheckOverRecursed();
342 if (!mir->hasDefaultAliasSet() && !isWhitelisted) {
343 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
344 masm.move32(Imm32(1), ReturnReg);
345 masm.store32(ReturnReg, AbsoluteAddress(addr));
348 #endif
350 // Push an exit frame descriptor.
351 masm.PushFrameDescriptor(FrameType::IonJS);
353 // Call the wrapper function. The wrapper is in charge to unwind the stack
354 // when returning from the call. Failures are handled with exceptions based
355 // on the return value of the C functions. To guard the outcome of the
356 // returned value, use another LIR instruction.
357 ensureOsiSpace();
358 uint32_t callOffset = masm.callJit(code);
359 markSafepointAt(callOffset, ins);
361 #ifdef DEBUG
362 // Reset the disallowArbitraryCode flag after the call.
364 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
365 masm.push(ReturnReg);
366 masm.move32(Imm32(0), ReturnReg);
367 masm.store32(ReturnReg, AbsoluteAddress(addr));
368 masm.pop(ReturnReg);
370 #endif
372 // Pop rest of the exit frame and the arguments left on the stack.
373 int framePop =
374 sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
375 masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
377 // Stack is:
378 // ... frame ...
381 template <typename Fn, Fn fn>
382 void CodeGenerator::callVM(LInstruction* ins) {
383 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
384 callVMInternal(id, ins);
387 // ArgSeq store arguments for OutOfLineCallVM.
389 // OutOfLineCallVM are created with "oolCallVM" function. The third argument of
390 // this function is an instance of a class which provides a "generate" in charge
391 // of pushing the argument, with "pushArg", for a VMFunction.
393 // Such list of arguments can be created by using the "ArgList" function which
394 // creates one instance of "ArgSeq", where the type of the arguments are
395 // inferred from the type of the arguments.
397 // The list of arguments must be written in the same order as if you were
398 // calling the function in C++.
400 // Example:
401 // ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))
403 template <typename... ArgTypes>
404 class ArgSeq {
405 std::tuple<std::remove_reference_t<ArgTypes>...> args_;
407 template <std::size_t... ISeq>
408 inline void generate(CodeGenerator* codegen,
409 std::index_sequence<ISeq...>) const {
410 // Arguments are pushed in reverse order, from last argument to first
411 // argument.
412 (codegen->pushArg(std::get<sizeof...(ISeq) - 1 - ISeq>(args_)), ...);
415 public:
416 explicit ArgSeq(ArgTypes&&... args)
417 : args_(std::forward<ArgTypes>(args)...) {}
419 inline void generate(CodeGenerator* codegen) const {
420 generate(codegen, std::index_sequence_for<ArgTypes...>{});
423 #ifdef DEBUG
424 static constexpr size_t numArgs = sizeof...(ArgTypes);
425 #endif
428 template <typename... ArgTypes>
429 inline ArgSeq<ArgTypes...> ArgList(ArgTypes&&... args) {
430 return ArgSeq<ArgTypes...>(std::forward<ArgTypes>(args)...);
433 // Store wrappers, to generate the right move of data after the VM call.
435 struct StoreNothing {
436 inline void generate(CodeGenerator* codegen) const {}
437 inline LiveRegisterSet clobbered() const {
438 return LiveRegisterSet(); // No register gets clobbered
442 class StoreRegisterTo {
443 private:
444 Register out_;
446 public:
447 explicit StoreRegisterTo(Register out) : out_(out) {}
449 inline void generate(CodeGenerator* codegen) const {
450 // It's okay to use storePointerResultTo here - the VMFunction wrapper
451 // ensures the upper bytes are zero for bool/int32 return values.
452 codegen->storePointerResultTo(out_);
454 inline LiveRegisterSet clobbered() const {
455 LiveRegisterSet set;
456 set.add(out_);
457 return set;
461 class StoreFloatRegisterTo {
462 private:
463 FloatRegister out_;
465 public:
466 explicit StoreFloatRegisterTo(FloatRegister out) : out_(out) {}
468 inline void generate(CodeGenerator* codegen) const {
469 codegen->storeFloatResultTo(out_);
471 inline LiveRegisterSet clobbered() const {
472 LiveRegisterSet set;
473 set.add(out_);
474 return set;
478 template <typename Output>
479 class StoreValueTo_ {
480 private:
481 Output out_;
483 public:
484 explicit StoreValueTo_(const Output& out) : out_(out) {}
486 inline void generate(CodeGenerator* codegen) const {
487 codegen->storeResultValueTo(out_);
489 inline LiveRegisterSet clobbered() const {
490 LiveRegisterSet set;
491 set.add(out_);
492 return set;
496 template <typename Output>
497 StoreValueTo_<Output> StoreValueTo(const Output& out) {
498 return StoreValueTo_<Output>(out);
501 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
502 class OutOfLineCallVM : public OutOfLineCodeBase<CodeGenerator> {
503 private:
504 LInstruction* lir_;
505 ArgSeq args_;
506 StoreOutputTo out_;
508 public:
509 OutOfLineCallVM(LInstruction* lir, const ArgSeq& args,
510 const StoreOutputTo& out)
511 : lir_(lir), args_(args), out_(out) {}
513 void accept(CodeGenerator* codegen) override {
514 codegen->visitOutOfLineCallVM(this);
517 LInstruction* lir() const { return lir_; }
518 const ArgSeq& args() const { return args_; }
519 const StoreOutputTo& out() const { return out_; }
522 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
523 OutOfLineCode* CodeGenerator::oolCallVM(LInstruction* lir, const ArgSeq& args,
524 const StoreOutputTo& out) {
525 MOZ_ASSERT(lir->mirRaw());
526 MOZ_ASSERT(lir->mirRaw()->isInstruction());
528 #ifdef DEBUG
529 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
530 const VMFunctionData& fun = GetVMFunction(id);
531 MOZ_ASSERT(fun.explicitArgs == args.numArgs);
532 MOZ_ASSERT(fun.returnsData() !=
533 (std::is_same_v<StoreOutputTo, StoreNothing>));
534 #endif
536 OutOfLineCode* ool = new (alloc())
537 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>(lir, args, out);
538 addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
539 return ool;
542 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
543 void CodeGenerator::visitOutOfLineCallVM(
544 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>* ool) {
545 LInstruction* lir = ool->lir();
547 #ifdef JS_JITSPEW
548 JitSpewStart(JitSpew_Codegen, " # LIR=%s",
549 lir->opName());
550 if (const char* extra = lir->getExtraName()) {
551 JitSpewCont(JitSpew_Codegen, ":%s", extra);
553 JitSpewFin(JitSpew_Codegen);
554 #endif
555 perfSpewer_.recordInstruction(masm, lir);
556 saveLive(lir);
557 ool->args().generate(this);
558 callVM<Fn, fn>(lir);
559 ool->out().generate(this);
560 restoreLiveIgnore(lir, ool->out().clobbered());
561 masm.jump(ool->rejoin());
564 class OutOfLineICFallback : public OutOfLineCodeBase<CodeGenerator> {
565 private:
566 LInstruction* lir_;
567 size_t cacheIndex_;
568 size_t cacheInfoIndex_;
570 public:
571 OutOfLineICFallback(LInstruction* lir, size_t cacheIndex,
572 size_t cacheInfoIndex)
573 : lir_(lir), cacheIndex_(cacheIndex), cacheInfoIndex_(cacheInfoIndex) {}
575 void bind(MacroAssembler* masm) override {
576 // The binding of the initial jump is done in
577 // CodeGenerator::visitOutOfLineICFallback.
580 size_t cacheIndex() const { return cacheIndex_; }
581 size_t cacheInfoIndex() const { return cacheInfoIndex_; }
582 LInstruction* lir() const { return lir_; }
584 void accept(CodeGenerator* codegen) override {
585 codegen->visitOutOfLineICFallback(this);
589 void CodeGeneratorShared::addIC(LInstruction* lir, size_t cacheIndex) {
590 if (cacheIndex == SIZE_MAX) {
591 masm.setOOM();
592 return;
595 DataPtr<IonIC> cache(this, cacheIndex);
596 MInstruction* mir = lir->mirRaw()->toInstruction();
597 cache->setScriptedLocation(mir->block()->info().script(),
598 mir->resumePoint()->pc());
600 Register temp = cache->scratchRegisterForEntryJump();
601 icInfo_.back().icOffsetForJump = masm.movWithPatch(ImmWord(-1), temp);
602 masm.jump(Address(temp, 0));
604 MOZ_ASSERT(!icInfo_.empty());
606 OutOfLineICFallback* ool =
607 new (alloc()) OutOfLineICFallback(lir, cacheIndex, icInfo_.length() - 1);
608 addOutOfLineCode(ool, mir);
610 masm.bind(ool->rejoin());
611 cache->setRejoinOffset(CodeOffset(ool->rejoin()->offset()));
614 void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) {
615 LInstruction* lir = ool->lir();
616 size_t cacheIndex = ool->cacheIndex();
617 size_t cacheInfoIndex = ool->cacheInfoIndex();
619 DataPtr<IonIC> ic(this, cacheIndex);
621 // Register the location of the OOL path in the IC.
622 ic->setFallbackOffset(CodeOffset(masm.currentOffset()));
624 switch (ic->kind()) {
625 case CacheKind::GetProp:
626 case CacheKind::GetElem: {
627 IonGetPropertyIC* getPropIC = ic->asGetPropertyIC();
629 saveLive(lir);
631 pushArg(getPropIC->id());
632 pushArg(getPropIC->value());
633 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
634 pushArg(ImmGCPtr(gen->outerInfo().script()));
636 using Fn = bool (*)(JSContext*, HandleScript, IonGetPropertyIC*,
637 HandleValue, HandleValue, MutableHandleValue);
638 callVM<Fn, IonGetPropertyIC::update>(lir);
640 StoreValueTo(getPropIC->output()).generate(this);
641 restoreLiveIgnore(lir, StoreValueTo(getPropIC->output()).clobbered());
643 masm.jump(ool->rejoin());
644 return;
646 case CacheKind::GetPropSuper:
647 case CacheKind::GetElemSuper: {
648 IonGetPropSuperIC* getPropSuperIC = ic->asGetPropSuperIC();
650 saveLive(lir);
652 pushArg(getPropSuperIC->id());
653 pushArg(getPropSuperIC->receiver());
654 pushArg(getPropSuperIC->object());
655 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
656 pushArg(ImmGCPtr(gen->outerInfo().script()));
658 using Fn =
659 bool (*)(JSContext*, HandleScript, IonGetPropSuperIC*, HandleObject,
660 HandleValue, HandleValue, MutableHandleValue);
661 callVM<Fn, IonGetPropSuperIC::update>(lir);
663 StoreValueTo(getPropSuperIC->output()).generate(this);
664 restoreLiveIgnore(lir,
665 StoreValueTo(getPropSuperIC->output()).clobbered());
667 masm.jump(ool->rejoin());
668 return;
670 case CacheKind::SetProp:
671 case CacheKind::SetElem: {
672 IonSetPropertyIC* setPropIC = ic->asSetPropertyIC();
674 saveLive(lir);
676 pushArg(setPropIC->rhs());
677 pushArg(setPropIC->id());
678 pushArg(setPropIC->object());
679 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
680 pushArg(ImmGCPtr(gen->outerInfo().script()));
682 using Fn = bool (*)(JSContext*, HandleScript, IonSetPropertyIC*,
683 HandleObject, HandleValue, HandleValue);
684 callVM<Fn, IonSetPropertyIC::update>(lir);
686 restoreLive(lir);
688 masm.jump(ool->rejoin());
689 return;
691 case CacheKind::GetName: {
692 IonGetNameIC* getNameIC = ic->asGetNameIC();
694 saveLive(lir);
696 pushArg(getNameIC->environment());
697 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
698 pushArg(ImmGCPtr(gen->outerInfo().script()));
700 using Fn = bool (*)(JSContext*, HandleScript, IonGetNameIC*, HandleObject,
701 MutableHandleValue);
702 callVM<Fn, IonGetNameIC::update>(lir);
704 StoreValueTo(getNameIC->output()).generate(this);
705 restoreLiveIgnore(lir, StoreValueTo(getNameIC->output()).clobbered());
707 masm.jump(ool->rejoin());
708 return;
710 case CacheKind::BindName: {
711 IonBindNameIC* bindNameIC = ic->asBindNameIC();
713 saveLive(lir);
715 pushArg(bindNameIC->environment());
716 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
717 pushArg(ImmGCPtr(gen->outerInfo().script()));
719 using Fn =
720 JSObject* (*)(JSContext*, HandleScript, IonBindNameIC*, HandleObject);
721 callVM<Fn, IonBindNameIC::update>(lir);
723 StoreRegisterTo(bindNameIC->output()).generate(this);
724 restoreLiveIgnore(lir, StoreRegisterTo(bindNameIC->output()).clobbered());
726 masm.jump(ool->rejoin());
727 return;
729 case CacheKind::GetIterator: {
730 IonGetIteratorIC* getIteratorIC = ic->asGetIteratorIC();
732 saveLive(lir);
734 pushArg(getIteratorIC->value());
735 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
736 pushArg(ImmGCPtr(gen->outerInfo().script()));
738 using Fn = JSObject* (*)(JSContext*, HandleScript, IonGetIteratorIC*,
739 HandleValue);
740 callVM<Fn, IonGetIteratorIC::update>(lir);
742 StoreRegisterTo(getIteratorIC->output()).generate(this);
743 restoreLiveIgnore(lir,
744 StoreRegisterTo(getIteratorIC->output()).clobbered());
746 masm.jump(ool->rejoin());
747 return;
749 case CacheKind::OptimizeSpreadCall: {
750 auto* optimizeSpreadCallIC = ic->asOptimizeSpreadCallIC();
752 saveLive(lir);
754 pushArg(optimizeSpreadCallIC->value());
755 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
756 pushArg(ImmGCPtr(gen->outerInfo().script()));
758 using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeSpreadCallIC*,
759 HandleValue, MutableHandleValue);
760 callVM<Fn, IonOptimizeSpreadCallIC::update>(lir);
762 StoreValueTo(optimizeSpreadCallIC->output()).generate(this);
763 restoreLiveIgnore(
764 lir, StoreValueTo(optimizeSpreadCallIC->output()).clobbered());
766 masm.jump(ool->rejoin());
767 return;
769 case CacheKind::In: {
770 IonInIC* inIC = ic->asInIC();
772 saveLive(lir);
774 pushArg(inIC->object());
775 pushArg(inIC->key());
776 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
777 pushArg(ImmGCPtr(gen->outerInfo().script()));
779 using Fn = bool (*)(JSContext*, HandleScript, IonInIC*, HandleValue,
780 HandleObject, bool*);
781 callVM<Fn, IonInIC::update>(lir);
783 StoreRegisterTo(inIC->output()).generate(this);
784 restoreLiveIgnore(lir, StoreRegisterTo(inIC->output()).clobbered());
786 masm.jump(ool->rejoin());
787 return;
789 case CacheKind::HasOwn: {
790 IonHasOwnIC* hasOwnIC = ic->asHasOwnIC();
792 saveLive(lir);
794 pushArg(hasOwnIC->id());
795 pushArg(hasOwnIC->value());
796 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
797 pushArg(ImmGCPtr(gen->outerInfo().script()));
799 using Fn = bool (*)(JSContext*, HandleScript, IonHasOwnIC*, HandleValue,
800 HandleValue, int32_t*);
801 callVM<Fn, IonHasOwnIC::update>(lir);
803 StoreRegisterTo(hasOwnIC->output()).generate(this);
804 restoreLiveIgnore(lir, StoreRegisterTo(hasOwnIC->output()).clobbered());
806 masm.jump(ool->rejoin());
807 return;
809 case CacheKind::CheckPrivateField: {
810 IonCheckPrivateFieldIC* checkPrivateFieldIC = ic->asCheckPrivateFieldIC();
812 saveLive(lir);
814 pushArg(checkPrivateFieldIC->id());
815 pushArg(checkPrivateFieldIC->value());
817 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
818 pushArg(ImmGCPtr(gen->outerInfo().script()));
820 using Fn = bool (*)(JSContext*, HandleScript, IonCheckPrivateFieldIC*,
821 HandleValue, HandleValue, bool*);
822 callVM<Fn, IonCheckPrivateFieldIC::update>(lir);
824 StoreRegisterTo(checkPrivateFieldIC->output()).generate(this);
825 restoreLiveIgnore(
826 lir, StoreRegisterTo(checkPrivateFieldIC->output()).clobbered());
828 masm.jump(ool->rejoin());
829 return;
831 case CacheKind::InstanceOf: {
832 IonInstanceOfIC* hasInstanceOfIC = ic->asInstanceOfIC();
834 saveLive(lir);
836 pushArg(hasInstanceOfIC->rhs());
837 pushArg(hasInstanceOfIC->lhs());
838 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
839 pushArg(ImmGCPtr(gen->outerInfo().script()));
841 using Fn = bool (*)(JSContext*, HandleScript, IonInstanceOfIC*,
842 HandleValue lhs, HandleObject rhs, bool* res);
843 callVM<Fn, IonInstanceOfIC::update>(lir);
845 StoreRegisterTo(hasInstanceOfIC->output()).generate(this);
846 restoreLiveIgnore(lir,
847 StoreRegisterTo(hasInstanceOfIC->output()).clobbered());
849 masm.jump(ool->rejoin());
850 return;
852 case CacheKind::UnaryArith: {
853 IonUnaryArithIC* unaryArithIC = ic->asUnaryArithIC();
855 saveLive(lir);
857 pushArg(unaryArithIC->input());
858 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
859 pushArg(ImmGCPtr(gen->outerInfo().script()));
861 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
862 IonUnaryArithIC* stub, HandleValue val,
863 MutableHandleValue res);
864 callVM<Fn, IonUnaryArithIC::update>(lir);
866 StoreValueTo(unaryArithIC->output()).generate(this);
867 restoreLiveIgnore(lir, StoreValueTo(unaryArithIC->output()).clobbered());
869 masm.jump(ool->rejoin());
870 return;
872 case CacheKind::ToPropertyKey: {
873 IonToPropertyKeyIC* toPropertyKeyIC = ic->asToPropertyKeyIC();
875 saveLive(lir);
877 pushArg(toPropertyKeyIC->input());
878 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
879 pushArg(ImmGCPtr(gen->outerInfo().script()));
881 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
882 IonToPropertyKeyIC* ic, HandleValue val,
883 MutableHandleValue res);
884 callVM<Fn, IonToPropertyKeyIC::update>(lir);
886 StoreValueTo(toPropertyKeyIC->output()).generate(this);
887 restoreLiveIgnore(lir,
888 StoreValueTo(toPropertyKeyIC->output()).clobbered());
890 masm.jump(ool->rejoin());
891 return;
893 case CacheKind::BinaryArith: {
894 IonBinaryArithIC* binaryArithIC = ic->asBinaryArithIC();
896 saveLive(lir);
898 pushArg(binaryArithIC->rhs());
899 pushArg(binaryArithIC->lhs());
900 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
901 pushArg(ImmGCPtr(gen->outerInfo().script()));
903 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
904 IonBinaryArithIC* stub, HandleValue lhs,
905 HandleValue rhs, MutableHandleValue res);
906 callVM<Fn, IonBinaryArithIC::update>(lir);
908 StoreValueTo(binaryArithIC->output()).generate(this);
909 restoreLiveIgnore(lir, StoreValueTo(binaryArithIC->output()).clobbered());
911 masm.jump(ool->rejoin());
912 return;
914 case CacheKind::Compare: {
915 IonCompareIC* compareIC = ic->asCompareIC();
917 saveLive(lir);
919 pushArg(compareIC->rhs());
920 pushArg(compareIC->lhs());
921 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
922 pushArg(ImmGCPtr(gen->outerInfo().script()));
924 using Fn =
925 bool (*)(JSContext* cx, HandleScript outerScript, IonCompareIC* stub,
926 HandleValue lhs, HandleValue rhs, bool* res);
927 callVM<Fn, IonCompareIC::update>(lir);
929 StoreRegisterTo(compareIC->output()).generate(this);
930 restoreLiveIgnore(lir, StoreRegisterTo(compareIC->output()).clobbered());
932 masm.jump(ool->rejoin());
933 return;
935 case CacheKind::CloseIter: {
936 IonCloseIterIC* closeIterIC = ic->asCloseIterIC();
938 saveLive(lir);
940 pushArg(closeIterIC->iter());
941 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
942 pushArg(ImmGCPtr(gen->outerInfo().script()));
944 using Fn =
945 bool (*)(JSContext*, HandleScript, IonCloseIterIC*, HandleObject);
946 callVM<Fn, IonCloseIterIC::update>(lir);
948 restoreLive(lir);
950 masm.jump(ool->rejoin());
951 return;
953 case CacheKind::OptimizeGetIterator: {
954 auto* optimizeGetIteratorIC = ic->asOptimizeGetIteratorIC();
956 saveLive(lir);
958 pushArg(optimizeGetIteratorIC->value());
959 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
960 pushArg(ImmGCPtr(gen->outerInfo().script()));
962 using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeGetIteratorIC*,
963 HandleValue, bool* res);
964 callVM<Fn, IonOptimizeGetIteratorIC::update>(lir);
966 StoreRegisterTo(optimizeGetIteratorIC->output()).generate(this);
967 restoreLiveIgnore(
968 lir, StoreRegisterTo(optimizeGetIteratorIC->output()).clobbered());
970 masm.jump(ool->rejoin());
971 return;
973 case CacheKind::Call:
974 case CacheKind::TypeOf:
975 case CacheKind::ToBool:
976 case CacheKind::GetIntrinsic:
977 case CacheKind::NewArray:
978 case CacheKind::NewObject:
979 MOZ_CRASH("Unsupported IC");
981 MOZ_CRASH();
984 StringObject* MNewStringObject::templateObj() const {
985 return &templateObj_->as<StringObject>();
988 CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph,
989 MacroAssembler* masm)
990 : CodeGeneratorSpecific(gen, graph, masm),
991 ionScriptLabels_(gen->alloc()),
992 ionNurseryObjectLabels_(gen->alloc()),
993 scriptCounts_(nullptr),
994 zoneStubsToReadBarrier_(0) {}
996 CodeGenerator::~CodeGenerator() { js_delete(scriptCounts_); }
998 void CodeGenerator::visitValueToInt32(LValueToInt32* lir) {
999 ValueOperand operand = ToValue(lir, LValueToInt32::Input);
1000 Register output = ToRegister(lir->output());
1001 FloatRegister temp = ToFloatRegister(lir->tempFloat());
1003 Label fails;
1004 if (lir->mode() == LValueToInt32::TRUNCATE) {
1005 OutOfLineCode* oolDouble = oolTruncateDouble(temp, output, lir->mir());
1007 // We can only handle strings in truncation contexts, like bitwise
1008 // operations.
1009 Register stringReg = ToRegister(lir->temp());
1010 using Fn = bool (*)(JSContext*, JSString*, double*);
1011 auto* oolString = oolCallVM<Fn, StringToNumber>(lir, ArgList(stringReg),
1012 StoreFloatRegisterTo(temp));
1013 Label* stringEntry = oolString->entry();
1014 Label* stringRejoin = oolString->rejoin();
1016 masm.truncateValueToInt32(operand, stringEntry, stringRejoin,
1017 oolDouble->entry(), stringReg, temp, output,
1018 &fails);
1019 masm.bind(oolDouble->rejoin());
1020 } else {
1021 MOZ_ASSERT(lir->mode() == LValueToInt32::NORMAL);
1022 masm.convertValueToInt32(operand, temp, output, &fails,
1023 lir->mirNormal()->needsNegativeZeroCheck(),
1024 lir->mirNormal()->conversion());
1027 bailoutFrom(&fails, lir->snapshot());
1030 void CodeGenerator::visitValueToDouble(LValueToDouble* lir) {
1031 ValueOperand operand = ToValue(lir, LValueToDouble::InputIndex);
1032 FloatRegister output = ToFloatRegister(lir->output());
1034 // Set if we can handle other primitives beside strings, as long as they're
1035 // guaranteed to never throw. This rules out symbols and BigInts, but allows
1036 // booleans, undefined, and null.
1037 bool hasNonStringPrimitives =
1038 lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1040 Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1043 ScratchTagScope tag(masm, operand);
1044 masm.splitTagForTest(operand, tag);
1046 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1047 masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1049 if (hasNonStringPrimitives) {
1050 masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1051 masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1052 masm.branchTestNull(Assembler::Equal, tag, &isNull);
1056 bailout(lir->snapshot());
1058 if (hasNonStringPrimitives) {
1059 masm.bind(&isNull);
1060 masm.loadConstantDouble(0.0, output);
1061 masm.jump(&done);
1064 if (hasNonStringPrimitives) {
1065 masm.bind(&isUndefined);
1066 masm.loadConstantDouble(GenericNaN(), output);
1067 masm.jump(&done);
1070 if (hasNonStringPrimitives) {
1071 masm.bind(&isBool);
1072 masm.boolValueToDouble(operand, output);
1073 masm.jump(&done);
1076 masm.bind(&isInt32);
1077 masm.int32ValueToDouble(operand, output);
1078 masm.jump(&done);
1080 masm.bind(&isDouble);
1081 masm.unboxDouble(operand, output);
1082 masm.bind(&done);
1085 void CodeGenerator::visitValueToFloat32(LValueToFloat32* lir) {
1086 ValueOperand operand = ToValue(lir, LValueToFloat32::InputIndex);
1087 FloatRegister output = ToFloatRegister(lir->output());
1089 // Set if we can handle other primitives beside strings, as long as they're
1090 // guaranteed to never throw. This rules out symbols and BigInts, but allows
1091 // booleans, undefined, and null.
1092 bool hasNonStringPrimitives =
1093 lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1095 Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1098 ScratchTagScope tag(masm, operand);
1099 masm.splitTagForTest(operand, tag);
1101 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1102 masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1104 if (hasNonStringPrimitives) {
1105 masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1106 masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1107 masm.branchTestNull(Assembler::Equal, tag, &isNull);
1111 bailout(lir->snapshot());
1113 if (hasNonStringPrimitives) {
1114 masm.bind(&isNull);
1115 masm.loadConstantFloat32(0.0f, output);
1116 masm.jump(&done);
1119 if (hasNonStringPrimitives) {
1120 masm.bind(&isUndefined);
1121 masm.loadConstantFloat32(float(GenericNaN()), output);
1122 masm.jump(&done);
1125 if (hasNonStringPrimitives) {
1126 masm.bind(&isBool);
1127 masm.boolValueToFloat32(operand, output);
1128 masm.jump(&done);
1131 masm.bind(&isInt32);
1132 masm.int32ValueToFloat32(operand, output);
1133 masm.jump(&done);
1135 masm.bind(&isDouble);
1136 // ARM and MIPS may not have a double register available if we've
1137 // allocated output as a float32.
1138 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
1139 ScratchDoubleScope fpscratch(masm);
1140 masm.unboxDouble(operand, fpscratch);
1141 masm.convertDoubleToFloat32(fpscratch, output);
1142 #else
1143 masm.unboxDouble(operand, output);
1144 masm.convertDoubleToFloat32(output, output);
1145 #endif
1146 masm.bind(&done);
1149 void CodeGenerator::visitValueToBigInt(LValueToBigInt* lir) {
1150 ValueOperand operand = ToValue(lir, LValueToBigInt::InputIndex);
1151 Register output = ToRegister(lir->output());
1153 using Fn = BigInt* (*)(JSContext*, HandleValue);
1154 auto* ool =
1155 oolCallVM<Fn, ToBigInt>(lir, ArgList(operand), StoreRegisterTo(output));
1157 Register tag = masm.extractTag(operand, output);
1159 Label notBigInt, done;
1160 masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
1161 masm.unboxBigInt(operand, output);
1162 masm.jump(&done);
1163 masm.bind(&notBigInt);
1165 masm.branchTestBoolean(Assembler::Equal, tag, ool->entry());
1166 masm.branchTestString(Assembler::Equal, tag, ool->entry());
1168 // ToBigInt(object) can have side-effects; all other types throw a TypeError.
1169 bailout(lir->snapshot());
1171 masm.bind(ool->rejoin());
1172 masm.bind(&done);
1175 void CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir) {
1176 masm.convertInt32ToDouble(ToRegister(lir->input()),
1177 ToFloatRegister(lir->output()));
1180 void CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir) {
1181 masm.convertFloat32ToDouble(ToFloatRegister(lir->input()),
1182 ToFloatRegister(lir->output()));
1185 void CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir) {
1186 masm.convertDoubleToFloat32(ToFloatRegister(lir->input()),
1187 ToFloatRegister(lir->output()));
1190 void CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir) {
1191 masm.convertInt32ToFloat32(ToRegister(lir->input()),
1192 ToFloatRegister(lir->output()));
1195 void CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir) {
1196 Label fail;
1197 FloatRegister input = ToFloatRegister(lir->input());
1198 Register output = ToRegister(lir->output());
1199 masm.convertDoubleToInt32(input, output, &fail,
1200 lir->mir()->needsNegativeZeroCheck());
1201 bailoutFrom(&fail, lir->snapshot());
1204 void CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir) {
1205 Label fail;
1206 FloatRegister input = ToFloatRegister(lir->input());
1207 Register output = ToRegister(lir->output());
1208 masm.convertFloat32ToInt32(input, output, &fail,
1209 lir->mir()->needsNegativeZeroCheck());
1210 bailoutFrom(&fail, lir->snapshot());
1213 void CodeGenerator::visitInt32ToIntPtr(LInt32ToIntPtr* lir) {
1214 #ifdef JS_64BIT
1215 // This LIR instruction is only used if the input can be negative.
1216 MOZ_ASSERT(lir->mir()->canBeNegative());
1218 Register output = ToRegister(lir->output());
1219 const LAllocation* input = lir->input();
1220 if (input->isRegister()) {
1221 masm.move32SignExtendToPtr(ToRegister(input), output);
1222 } else {
1223 masm.load32SignExtendToPtr(ToAddress(input), output);
1225 #else
1226 MOZ_CRASH("Not used on 32-bit platforms");
1227 #endif
1230 void CodeGenerator::visitNonNegativeIntPtrToInt32(
1231 LNonNegativeIntPtrToInt32* lir) {
1232 #ifdef JS_64BIT
1233 Register output = ToRegister(lir->output());
1234 MOZ_ASSERT(ToRegister(lir->input()) == output);
1236 Label bail;
1237 masm.guardNonNegativeIntPtrToInt32(output, &bail);
1238 bailoutFrom(&bail, lir->snapshot());
1239 #else
1240 MOZ_CRASH("Not used on 32-bit platforms");
1241 #endif
1244 void CodeGenerator::visitIntPtrToDouble(LIntPtrToDouble* lir) {
1245 Register input = ToRegister(lir->input());
1246 FloatRegister output = ToFloatRegister(lir->output());
1247 masm.convertIntPtrToDouble(input, output);
1250 void CodeGenerator::visitAdjustDataViewLength(LAdjustDataViewLength* lir) {
1251 Register output = ToRegister(lir->output());
1252 MOZ_ASSERT(ToRegister(lir->input()) == output);
1254 uint32_t byteSize = lir->mir()->byteSize();
1256 #ifdef DEBUG
1257 Label ok;
1258 masm.branchTestPtr(Assembler::NotSigned, output, output, &ok);
1259 masm.assumeUnreachable("Unexpected negative value in LAdjustDataViewLength");
1260 masm.bind(&ok);
1261 #endif
1263 Label bail;
1264 masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), output, &bail);
1265 bailoutFrom(&bail, lir->snapshot());
1268 void CodeGenerator::emitOOLTestObject(Register objreg,
1269 Label* ifEmulatesUndefined,
1270 Label* ifDoesntEmulateUndefined,
1271 Register scratch) {
1272 saveVolatile(scratch);
1273 #if defined(DEBUG) || defined(FUZZING)
1274 masm.loadPtr(AbsoluteAddress(
1275 gen->runtime->addressOfHasSeenObjectEmulateUndefinedFuse()),
1276 scratch);
1277 using Fn = bool (*)(JSObject* obj, size_t fuseValue);
1278 masm.setupAlignedABICall();
1279 masm.passABIArg(objreg);
1280 masm.passABIArg(scratch);
1281 masm.callWithABI<Fn, js::EmulatesUndefinedCheckFuse>();
1282 #else
1283 using Fn = bool (*)(JSObject* obj);
1284 masm.setupAlignedABICall();
1285 masm.passABIArg(objreg);
1286 masm.callWithABI<Fn, js::EmulatesUndefined>();
1287 #endif
1288 masm.storeCallPointerResult(scratch);
1289 restoreVolatile(scratch);
1291 masm.branchIfTrueBool(scratch, ifEmulatesUndefined);
1292 masm.jump(ifDoesntEmulateUndefined);
1295 // Base out-of-line code generator for all tests of the truthiness of an
1296 // object, where the object might not be truthy. (Recall that per spec all
1297 // objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class
1298 // flag to permit objects to look like |undefined| in certain contexts,
1299 // including in object truthiness testing.) We check truthiness inline except
1300 // when we're testing it on a proxy, in which case out-of-line code will call
1301 // EmulatesUndefined for a conclusive answer.
1302 class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator> {
1303 Register objreg_;
1304 Register scratch_;
1306 Label* ifEmulatesUndefined_;
1307 Label* ifDoesntEmulateUndefined_;
1309 #ifdef DEBUG
1310 bool initialized() { return ifEmulatesUndefined_ != nullptr; }
1311 #endif
1313 public:
1314 OutOfLineTestObject()
1315 : ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr) {}
1317 void accept(CodeGenerator* codegen) final {
1318 MOZ_ASSERT(initialized());
1319 codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_,
1320 ifDoesntEmulateUndefined_, scratch_);
1323 // Specify the register where the object to be tested is found, labels to
1324 // jump to if the object is truthy or falsy, and a scratch register for
1325 // use in the out-of-line path.
1326 void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined,
1327 Label* ifDoesntEmulateUndefined, Register scratch) {
1328 MOZ_ASSERT(!initialized());
1329 MOZ_ASSERT(ifEmulatesUndefined);
1330 objreg_ = objreg;
1331 scratch_ = scratch;
1332 ifEmulatesUndefined_ = ifEmulatesUndefined;
1333 ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined;
1337 // A subclass of OutOfLineTestObject containing two extra labels, for use when
1338 // the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line
1339 // code. The user should bind these labels in inline code, and specify them as
1340 // targets via setInputAndTargets, as appropriate.
1341 class OutOfLineTestObjectWithLabels : public OutOfLineTestObject {
1342 Label label1_;
1343 Label label2_;
1345 public:
1346 OutOfLineTestObjectWithLabels() = default;
1348 Label* label1() { return &label1_; }
1349 Label* label2() { return &label2_; }
1352 void CodeGenerator::testObjectEmulatesUndefinedKernel(
1353 Register objreg, Label* ifEmulatesUndefined,
1354 Label* ifDoesntEmulateUndefined, Register scratch,
1355 OutOfLineTestObject* ool) {
1356 ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
1357 scratch);
1359 // Perform a fast-path check of the object's class flags if the object's
1360 // not a proxy. Let out-of-line code handle the slow cases that require
1361 // saving registers, making a function call, and restoring registers.
1362 masm.branchIfObjectEmulatesUndefined(objreg, scratch, ool->entry(),
1363 ifEmulatesUndefined);
1366 void CodeGenerator::branchTestObjectEmulatesUndefined(
1367 Register objreg, Label* ifEmulatesUndefined,
1368 Label* ifDoesntEmulateUndefined, Register scratch,
1369 OutOfLineTestObject* ool) {
1370 MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(),
1371 "ifDoesntEmulateUndefined will be bound to the fallthrough path");
1373 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1374 ifDoesntEmulateUndefined, scratch, ool);
1375 masm.bind(ifDoesntEmulateUndefined);
1378 void CodeGenerator::testObjectEmulatesUndefined(Register objreg,
1379 Label* ifEmulatesUndefined,
1380 Label* ifDoesntEmulateUndefined,
1381 Register scratch,
1382 OutOfLineTestObject* ool) {
1383 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1384 ifDoesntEmulateUndefined, scratch, ool);
1385 masm.jump(ifDoesntEmulateUndefined);
1388 void CodeGenerator::testValueTruthyForType(
1389 JSValueType type, ScratchTagScope& tag, const ValueOperand& value,
1390 Register tempToUnbox, Register temp, FloatRegister floatTemp,
1391 Label* ifTruthy, Label* ifFalsy, OutOfLineTestObject* ool,
1392 bool skipTypeTest) {
1393 #ifdef DEBUG
1394 if (skipTypeTest) {
1395 Label expected;
1396 masm.branchTestType(Assembler::Equal, tag, type, &expected);
1397 masm.assumeUnreachable("Unexpected Value type in testValueTruthyForType");
1398 masm.bind(&expected);
1400 #endif
1402 // Handle irregular types first.
1403 switch (type) {
1404 case JSVAL_TYPE_UNDEFINED:
1405 case JSVAL_TYPE_NULL:
1406 // Undefined and null are falsy.
1407 if (!skipTypeTest) {
1408 masm.branchTestType(Assembler::Equal, tag, type, ifFalsy);
1409 } else {
1410 masm.jump(ifFalsy);
1412 return;
1413 case JSVAL_TYPE_SYMBOL:
1414 // Symbols are truthy.
1415 if (!skipTypeTest) {
1416 masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy);
1417 } else {
1418 masm.jump(ifTruthy);
1420 return;
1421 case JSVAL_TYPE_OBJECT: {
1422 Label notObject;
1423 if (!skipTypeTest) {
1424 masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
1426 ScratchTagScopeRelease _(&tag);
1427 Register objreg = masm.extractObject(value, tempToUnbox);
1428 testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, temp, ool);
1429 masm.bind(&notObject);
1430 return;
1432 default:
1433 break;
1436 // Check the type of the value (unless this is the last possible type).
1437 Label differentType;
1438 if (!skipTypeTest) {
1439 masm.branchTestType(Assembler::NotEqual, tag, type, &differentType);
1442 // Branch if the value is falsy.
1443 ScratchTagScopeRelease _(&tag);
1444 switch (type) {
1445 case JSVAL_TYPE_BOOLEAN: {
1446 masm.branchTestBooleanTruthy(false, value, ifFalsy);
1447 break;
1449 case JSVAL_TYPE_INT32: {
1450 masm.branchTestInt32Truthy(false, value, ifFalsy);
1451 break;
1453 case JSVAL_TYPE_STRING: {
1454 masm.branchTestStringTruthy(false, value, ifFalsy);
1455 break;
1457 case JSVAL_TYPE_BIGINT: {
1458 masm.branchTestBigIntTruthy(false, value, ifFalsy);
1459 break;
1461 case JSVAL_TYPE_DOUBLE: {
1462 masm.unboxDouble(value, floatTemp);
1463 masm.branchTestDoubleTruthy(false, floatTemp, ifFalsy);
1464 break;
1466 default:
1467 MOZ_CRASH("Unexpected value type");
1470 // If we reach this point, the value is truthy. We fall through for
1471 // truthy on the last test; otherwise, branch.
1472 if (!skipTypeTest) {
1473 masm.jump(ifTruthy);
1476 masm.bind(&differentType);
1479 void CodeGenerator::testValueTruthy(const ValueOperand& value,
1480 Register tempToUnbox, Register temp,
1481 FloatRegister floatTemp,
1482 const TypeDataList& observedTypes,
1483 Label* ifTruthy, Label* ifFalsy,
1484 OutOfLineTestObject* ool) {
1485 ScratchTagScope tag(masm, value);
1486 masm.splitTagForTest(value, tag);
1488 const std::initializer_list<JSValueType> defaultOrder = {
1489 JSVAL_TYPE_UNDEFINED, JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN,
1490 JSVAL_TYPE_INT32, JSVAL_TYPE_OBJECT, JSVAL_TYPE_STRING,
1491 JSVAL_TYPE_DOUBLE, JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
1493 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
1495 // Generate tests for previously observed types first.
1496 // The TypeDataList is sorted by descending frequency.
1497 for (auto& observed : observedTypes) {
1498 JSValueType type = observed.type();
1499 remaining -= type;
1501 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1502 ifTruthy, ifFalsy, ool, /*skipTypeTest*/ false);
1505 // Generate tests for remaining types.
1506 for (auto type : defaultOrder) {
1507 if (!remaining.contains(type)) {
1508 continue;
1510 remaining -= type;
1512 // We don't need a type test for the last possible type.
1513 bool skipTypeTest = remaining.isEmpty();
1514 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1515 ifTruthy, ifFalsy, ool, skipTypeTest);
1517 MOZ_ASSERT(remaining.isEmpty());
1519 // We fall through if the final test is truthy.
1522 void CodeGenerator::visitTestBIAndBranch(LTestBIAndBranch* lir) {
1523 Label* ifTrueLabel = getJumpLabelForBranch(lir->ifTrue());
1524 Label* ifFalseLabel = getJumpLabelForBranch(lir->ifFalse());
1525 Register input = ToRegister(lir->input());
1527 if (isNextBlock(lir->ifFalse()->lir())) {
1528 masm.branchIfBigIntIsNonZero(input, ifTrueLabel);
1529 } else if (isNextBlock(lir->ifTrue()->lir())) {
1530 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1531 } else {
1532 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1533 jumpToBlock(lir->ifTrue());
1537 void CodeGenerator::assertObjectDoesNotEmulateUndefined(
1538 Register input, Register temp, const MInstruction* mir) {
1539 #if defined(DEBUG) || defined(FUZZING)
1540 // Validate that the object indeed doesn't have the emulates undefined flag.
1541 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
1542 addOutOfLineCode(ool, mir);
1544 Label* doesNotEmulateUndefined = ool->label1();
1545 Label* emulatesUndefined = ool->label2();
1547 testObjectEmulatesUndefined(input, emulatesUndefined, doesNotEmulateUndefined,
1548 temp, ool);
1549 masm.bind(emulatesUndefined);
1550 masm.assumeUnreachable(
1551 "Found an object emulating undefined while the fuse is intact");
1552 masm.bind(doesNotEmulateUndefined);
1553 #endif
1556 void CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir) {
1557 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1558 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1559 Register input = ToRegister(lir->input());
1561 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
1562 if (intact) {
1563 assertObjectDoesNotEmulateUndefined(input, ToRegister(lir->temp()),
1564 lir->mir());
1565 // Bug 1874905: It would be fantastic if this could be optimized out
1566 masm.jump(truthy);
1567 } else {
1568 auto* ool = new (alloc()) OutOfLineTestObject();
1569 addOutOfLineCode(ool, lir->mir());
1571 testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()),
1572 ool);
1576 void CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir) {
1577 auto* ool = new (alloc()) OutOfLineTestObject();
1578 addOutOfLineCode(ool, lir->mir());
1580 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1581 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1583 ValueOperand input = ToValue(lir, LTestVAndBranch::Input);
1584 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
1585 Register temp = ToRegister(lir->temp2());
1586 FloatRegister floatTemp = ToFloatRegister(lir->tempFloat());
1587 const TypeDataList& observedTypes = lir->mir()->observedTypes();
1589 testValueTruthy(input, tempToUnbox, temp, floatTemp, observedTypes, truthy,
1590 falsy, ool);
1591 masm.jump(truthy);
1594 void CodeGenerator::visitBooleanToString(LBooleanToString* lir) {
1595 Register input = ToRegister(lir->input());
1596 Register output = ToRegister(lir->output());
1597 const JSAtomState& names = gen->runtime->names();
1598 Label true_, done;
1600 masm.branchTest32(Assembler::NonZero, input, input, &true_);
1601 masm.movePtr(ImmGCPtr(names.false_), output);
1602 masm.jump(&done);
1604 masm.bind(&true_);
1605 masm.movePtr(ImmGCPtr(names.true_), output);
1607 masm.bind(&done);
1610 void CodeGenerator::visitIntToString(LIntToString* lir) {
1611 Register input = ToRegister(lir->input());
1612 Register output = ToRegister(lir->output());
1614 using Fn = JSLinearString* (*)(JSContext*, int);
1615 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
1616 lir, ArgList(input), StoreRegisterTo(output));
1618 masm.lookupStaticIntString(input, output, gen->runtime->staticStrings(),
1619 ool->entry());
1621 masm.bind(ool->rejoin());
1624 void CodeGenerator::visitDoubleToString(LDoubleToString* lir) {
1625 FloatRegister input = ToFloatRegister(lir->input());
1626 Register temp = ToRegister(lir->temp0());
1627 Register output = ToRegister(lir->output());
1629 using Fn = JSString* (*)(JSContext*, double);
1630 OutOfLineCode* ool = oolCallVM<Fn, NumberToString<CanGC>>(
1631 lir, ArgList(input), StoreRegisterTo(output));
1633 // Try double to integer conversion and run integer to string code.
1634 masm.convertDoubleToInt32(input, temp, ool->entry(), false);
1635 masm.lookupStaticIntString(temp, output, gen->runtime->staticStrings(),
1636 ool->entry());
1638 masm.bind(ool->rejoin());
1641 void CodeGenerator::visitValueToString(LValueToString* lir) {
1642 ValueOperand input = ToValue(lir, LValueToString::InputIndex);
1643 Register output = ToRegister(lir->output());
1645 using Fn = JSString* (*)(JSContext*, HandleValue);
1646 OutOfLineCode* ool = oolCallVM<Fn, ToStringSlow<CanGC>>(
1647 lir, ArgList(input), StoreRegisterTo(output));
1649 Label done;
1650 Register tag = masm.extractTag(input, output);
1651 const JSAtomState& names = gen->runtime->names();
1653 // String
1655 Label notString;
1656 masm.branchTestString(Assembler::NotEqual, tag, &notString);
1657 masm.unboxString(input, output);
1658 masm.jump(&done);
1659 masm.bind(&notString);
1662 // Integer
1664 Label notInteger;
1665 masm.branchTestInt32(Assembler::NotEqual, tag, &notInteger);
1666 Register unboxed = ToTempUnboxRegister(lir->temp0());
1667 unboxed = masm.extractInt32(input, unboxed);
1668 masm.lookupStaticIntString(unboxed, output, gen->runtime->staticStrings(),
1669 ool->entry());
1670 masm.jump(&done);
1671 masm.bind(&notInteger);
1674 // Double
1676 // Note: no fastpath. Need two extra registers and can only convert doubles
1677 // that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT.
1678 masm.branchTestDouble(Assembler::Equal, tag, ool->entry());
1681 // Undefined
1683 Label notUndefined;
1684 masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
1685 masm.movePtr(ImmGCPtr(names.undefined), output);
1686 masm.jump(&done);
1687 masm.bind(&notUndefined);
1690 // Null
1692 Label notNull;
1693 masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
1694 masm.movePtr(ImmGCPtr(names.null), output);
1695 masm.jump(&done);
1696 masm.bind(&notNull);
1699 // Boolean
1701 Label notBoolean, true_;
1702 masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
1703 masm.branchTestBooleanTruthy(true, input, &true_);
1704 masm.movePtr(ImmGCPtr(names.false_), output);
1705 masm.jump(&done);
1706 masm.bind(&true_);
1707 masm.movePtr(ImmGCPtr(names.true_), output);
1708 masm.jump(&done);
1709 masm.bind(&notBoolean);
1712 // Objects/symbols are only possible when |mir->mightHaveSideEffects()|.
1713 if (lir->mir()->mightHaveSideEffects()) {
1714 // Object
1715 if (lir->mir()->supportSideEffects()) {
1716 masm.branchTestObject(Assembler::Equal, tag, ool->entry());
1717 } else {
1718 // Bail.
1719 MOZ_ASSERT(lir->mir()->needsSnapshot());
1720 Label bail;
1721 masm.branchTestObject(Assembler::Equal, tag, &bail);
1722 bailoutFrom(&bail, lir->snapshot());
1725 // Symbol
1726 if (lir->mir()->supportSideEffects()) {
1727 masm.branchTestSymbol(Assembler::Equal, tag, ool->entry());
1728 } else {
1729 // Bail.
1730 MOZ_ASSERT(lir->mir()->needsSnapshot());
1731 Label bail;
1732 masm.branchTestSymbol(Assembler::Equal, tag, &bail);
1733 bailoutFrom(&bail, lir->snapshot());
1737 // BigInt
1739 // No fastpath currently implemented.
1740 masm.branchTestBigInt(Assembler::Equal, tag, ool->entry());
1743 masm.assumeUnreachable("Unexpected type for LValueToString.");
1745 masm.bind(&done);
1746 masm.bind(ool->rejoin());
1749 using StoreBufferMutationFn = void (*)(js::gc::StoreBuffer*, js::gc::Cell**);
1751 static void EmitStoreBufferMutation(MacroAssembler& masm, Register holder,
1752 size_t offset, Register buffer,
1753 LiveGeneralRegisterSet& liveVolatiles,
1754 StoreBufferMutationFn fun) {
1755 Label callVM;
1756 Label exit;
1758 // Call into the VM to barrier the write. The only registers that need to
1759 // be preserved are those in liveVolatiles, so once they are saved on the
1760 // stack all volatile registers are available for use.
1761 masm.bind(&callVM);
1762 masm.PushRegsInMask(liveVolatiles);
1764 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
1765 regs.takeUnchecked(buffer);
1766 regs.takeUnchecked(holder);
1767 Register addrReg = regs.takeAny();
1769 masm.computeEffectiveAddress(Address(holder, offset), addrReg);
1771 bool needExtraReg = !regs.hasAny<GeneralRegisterSet::DefaultType>();
1772 if (needExtraReg) {
1773 masm.push(holder);
1774 masm.setupUnalignedABICall(holder);
1775 } else {
1776 masm.setupUnalignedABICall(regs.takeAny());
1778 masm.passABIArg(buffer);
1779 masm.passABIArg(addrReg);
1780 masm.callWithABI(DynamicFunction<StoreBufferMutationFn>(fun),
1781 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
1783 if (needExtraReg) {
1784 masm.pop(holder);
1786 masm.PopRegsInMask(liveVolatiles);
1787 masm.bind(&exit);
1790 // Warning: this function modifies prev and next.
1791 static void EmitPostWriteBarrierS(MacroAssembler& masm, Register holder,
1792 size_t offset, Register prev, Register next,
1793 LiveGeneralRegisterSet& liveVolatiles) {
1794 Label exit;
1795 Label checkRemove, putCell;
1797 // if (next && (buffer = next->storeBuffer()))
1798 // but we never pass in nullptr for next.
1799 Register storebuffer = next;
1800 masm.loadStoreBuffer(next, storebuffer);
1801 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &checkRemove);
1803 // if (prev && prev->storeBuffer())
1804 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &putCell);
1805 masm.loadStoreBuffer(prev, prev);
1806 masm.branchPtr(Assembler::NotEqual, prev, ImmWord(0), &exit);
1808 // buffer->putCell(cellp)
1809 masm.bind(&putCell);
1810 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1811 JSString::addCellAddressToStoreBuffer);
1812 masm.jump(&exit);
1814 // if (prev && (buffer = prev->storeBuffer()))
1815 masm.bind(&checkRemove);
1816 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &exit);
1817 masm.loadStoreBuffer(prev, storebuffer);
1818 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &exit);
1819 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1820 JSString::removeCellAddressFromStoreBuffer);
1822 masm.bind(&exit);
1825 void CodeGenerator::visitRegExp(LRegExp* lir) {
1826 Register output = ToRegister(lir->output());
1827 Register temp = ToRegister(lir->temp0());
1828 JSObject* source = lir->mir()->source();
1830 using Fn = JSObject* (*)(JSContext*, Handle<RegExpObject*>);
1831 OutOfLineCode* ool = oolCallVM<Fn, CloneRegExpObject>(
1832 lir, ArgList(ImmGCPtr(source)), StoreRegisterTo(output));
1833 if (lir->mir()->hasShared()) {
1834 TemplateObject templateObject(source);
1835 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
1836 ool->entry());
1837 } else {
1838 masm.jump(ool->entry());
1840 masm.bind(ool->rejoin());
1843 static constexpr int32_t RegExpPairsVectorStartOffset(
1844 int32_t inputOutputDataStartOffset) {
1845 return inputOutputDataStartOffset + int32_t(InputOutputDataSize) +
1846 int32_t(sizeof(MatchPairs));
1849 static Address RegExpPairCountAddress(MacroAssembler& masm,
1850 int32_t inputOutputDataStartOffset) {
1851 return Address(FramePointer, inputOutputDataStartOffset +
1852 int32_t(InputOutputDataSize) +
1853 MatchPairs::offsetOfPairCount());
1856 static void UpdateRegExpStatics(MacroAssembler& masm, Register regexp,
1857 Register input, Register lastIndex,
1858 Register staticsReg, Register temp1,
1859 Register temp2, gc::Heap initialStringHeap,
1860 LiveGeneralRegisterSet& volatileRegs) {
1861 Address pendingInputAddress(staticsReg,
1862 RegExpStatics::offsetOfPendingInput());
1863 Address matchesInputAddress(staticsReg,
1864 RegExpStatics::offsetOfMatchesInput());
1865 Address lazySourceAddress(staticsReg, RegExpStatics::offsetOfLazySource());
1866 Address lazyIndexAddress(staticsReg, RegExpStatics::offsetOfLazyIndex());
1868 masm.guardedCallPreBarrier(pendingInputAddress, MIRType::String);
1869 masm.guardedCallPreBarrier(matchesInputAddress, MIRType::String);
1870 masm.guardedCallPreBarrier(lazySourceAddress, MIRType::String);
1872 if (initialStringHeap == gc::Heap::Default) {
1873 // Writing into RegExpStatics tenured memory; must post-barrier.
1874 if (staticsReg.volatile_()) {
1875 volatileRegs.add(staticsReg);
1878 masm.loadPtr(pendingInputAddress, temp1);
1879 masm.storePtr(input, pendingInputAddress);
1880 masm.movePtr(input, temp2);
1881 EmitPostWriteBarrierS(masm, staticsReg,
1882 RegExpStatics::offsetOfPendingInput(),
1883 temp1 /* prev */, temp2 /* next */, volatileRegs);
1885 masm.loadPtr(matchesInputAddress, temp1);
1886 masm.storePtr(input, matchesInputAddress);
1887 masm.movePtr(input, temp2);
1888 EmitPostWriteBarrierS(masm, staticsReg,
1889 RegExpStatics::offsetOfMatchesInput(),
1890 temp1 /* prev */, temp2 /* next */, volatileRegs);
1891 } else {
1892 masm.debugAssertGCThingIsTenured(input, temp1);
1893 masm.storePtr(input, pendingInputAddress);
1894 masm.storePtr(input, matchesInputAddress);
1897 masm.storePtr(lastIndex,
1898 Address(staticsReg, RegExpStatics::offsetOfLazyIndex()));
1899 masm.store32(
1900 Imm32(1),
1901 Address(staticsReg, RegExpStatics::offsetOfPendingLazyEvaluation()));
1903 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
1904 RegExpObject::SHARED_SLOT)),
1905 temp1, JSVAL_TYPE_PRIVATE_GCTHING);
1906 masm.loadPtr(Address(temp1, RegExpShared::offsetOfSource()), temp2);
1907 masm.storePtr(temp2, lazySourceAddress);
1908 static_assert(sizeof(JS::RegExpFlags) == 1, "load size must match flag size");
1909 masm.load8ZeroExtend(Address(temp1, RegExpShared::offsetOfFlags()), temp2);
1910 masm.store8(temp2, Address(staticsReg, RegExpStatics::offsetOfLazyFlags()));
1913 // Prepare an InputOutputData and optional MatchPairs which space has been
1914 // allocated for on the stack, and try to execute a RegExp on a string input.
1915 // If the RegExp was successfully executed and matched the input, fallthrough.
1916 // Otherwise, jump to notFound or failure.
1918 // inputOutputDataStartOffset is the offset relative to the frame pointer
1919 // register. This offset is negative for the RegExpExecTest stub.
1920 static bool PrepareAndExecuteRegExp(MacroAssembler& masm, Register regexp,
1921 Register input, Register lastIndex,
1922 Register temp1, Register temp2,
1923 Register temp3,
1924 int32_t inputOutputDataStartOffset,
1925 gc::Heap initialStringHeap, Label* notFound,
1926 Label* failure) {
1927 JitSpew(JitSpew_Codegen, "# Emitting PrepareAndExecuteRegExp");
1929 using irregexp::InputOutputData;
1932 * [SMDOC] Stack layout for PrepareAndExecuteRegExp
1934 * Before this function is called, the caller is responsible for
1935 * allocating enough stack space for the following data:
1937 * inputOutputDataStartOffset +-----> +---------------+
1938 * |InputOutputData|
1939 * inputStartAddress +----------> inputStart|
1940 * inputEndAddress +----------> inputEnd|
1941 * startIndexAddress +----------> startIndex|
1942 * matchesAddress +----------> matches|-----+
1943 * +---------------+ |
1944 * matchPairs(Address|Offset) +-----> +---------------+ <--+
1945 * | MatchPairs |
1946 * pairCountAddress +----------> count |
1947 * pairsPointerAddress +----------> pairs |-----+
1948 * +---------------+ |
1949 * pairsArray(Address|Offset) +-----> +---------------+ <--+
1950 * | MatchPair |
1951 * firstMatchStartAddress +----------> start | <--+
1952 * | limit | |
1953 * +---------------+ |
1954 * . |
1955 * . Reserved space for
1956 * . RegExpObject::MaxPairCount
1957 * . MatchPair objects
1958 * . |
1959 * +---------------+ |
1960 * | MatchPair | |
1961 * | start | |
1962 * | limit | <--+
1963 * +---------------+
1966 int32_t ioOffset = inputOutputDataStartOffset;
1967 int32_t matchPairsOffset = ioOffset + int32_t(sizeof(InputOutputData));
1968 int32_t pairsArrayOffset = matchPairsOffset + int32_t(sizeof(MatchPairs));
1970 Address inputStartAddress(FramePointer,
1971 ioOffset + InputOutputData::offsetOfInputStart());
1972 Address inputEndAddress(FramePointer,
1973 ioOffset + InputOutputData::offsetOfInputEnd());
1974 Address startIndexAddress(FramePointer,
1975 ioOffset + InputOutputData::offsetOfStartIndex());
1976 Address matchesAddress(FramePointer,
1977 ioOffset + InputOutputData::offsetOfMatches());
1979 Address matchPairsAddress(FramePointer, matchPairsOffset);
1980 Address pairCountAddress(FramePointer,
1981 matchPairsOffset + MatchPairs::offsetOfPairCount());
1982 Address pairsPointerAddress(FramePointer,
1983 matchPairsOffset + MatchPairs::offsetOfPairs());
1985 Address pairsArrayAddress(FramePointer, pairsArrayOffset);
1986 Address firstMatchStartAddress(FramePointer,
1987 pairsArrayOffset + MatchPair::offsetOfStart());
1989 // First, fill in a skeletal MatchPairs instance on the stack. This will be
1990 // passed to the OOL stub in the caller if we aren't able to execute the
1991 // RegExp inline, and that stub needs to be able to determine whether the
1992 // execution finished successfully.
1994 // Initialize MatchPairs::pairCount to 1. The correct value can only
1995 // be determined after loading the RegExpShared. If the RegExpShared
1996 // has Kind::Atom, this is the correct pairCount.
1997 masm.store32(Imm32(1), pairCountAddress);
1999 // Initialize MatchPairs::pairs pointer
2000 masm.computeEffectiveAddress(pairsArrayAddress, temp1);
2001 masm.storePtr(temp1, pairsPointerAddress);
2003 // Initialize MatchPairs::pairs[0]::start to MatchPair::NoMatch
2004 masm.store32(Imm32(MatchPair::NoMatch), firstMatchStartAddress);
2006 // Determine the set of volatile inputs to save when calling into C++ or
2007 // regexp code.
2008 LiveGeneralRegisterSet volatileRegs;
2009 if (lastIndex.volatile_()) {
2010 volatileRegs.add(lastIndex);
2012 if (input.volatile_()) {
2013 volatileRegs.add(input);
2015 if (regexp.volatile_()) {
2016 volatileRegs.add(regexp);
2019 // Ensure the input string is not a rope.
2020 Label isLinear;
2021 masm.branchIfNotRope(input, &isLinear);
2023 masm.PushRegsInMask(volatileRegs);
2025 using Fn = JSLinearString* (*)(JSString*);
2026 masm.setupUnalignedABICall(temp1);
2027 masm.passABIArg(input);
2028 masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
2030 MOZ_ASSERT(!volatileRegs.has(temp1));
2031 masm.storeCallPointerResult(temp1);
2032 masm.PopRegsInMask(volatileRegs);
2034 masm.branchTestPtr(Assembler::Zero, temp1, temp1, failure);
2036 masm.bind(&isLinear);
2038 // Load the RegExpShared.
2039 Register regexpReg = temp1;
2040 Address sharedSlot = Address(
2041 regexp, NativeObject::getFixedSlotOffset(RegExpObject::SHARED_SLOT));
2042 masm.branchTestUndefined(Assembler::Equal, sharedSlot, failure);
2043 masm.unboxNonDouble(sharedSlot, regexpReg, JSVAL_TYPE_PRIVATE_GCTHING);
2045 // Handle Atom matches
2046 Label notAtom, checkSuccess;
2047 masm.branchPtr(Assembler::Equal,
2048 Address(regexpReg, RegExpShared::offsetOfPatternAtom()),
2049 ImmWord(0), &notAtom);
2051 masm.computeEffectiveAddress(matchPairsAddress, temp3);
2053 masm.PushRegsInMask(volatileRegs);
2054 using Fn = RegExpRunStatus (*)(RegExpShared* re, JSLinearString* input,
2055 size_t start, MatchPairs* matchPairs);
2056 masm.setupUnalignedABICall(temp2);
2057 masm.passABIArg(regexpReg);
2058 masm.passABIArg(input);
2059 masm.passABIArg(lastIndex);
2060 masm.passABIArg(temp3);
2061 masm.callWithABI<Fn, js::ExecuteRegExpAtomRaw>();
2063 MOZ_ASSERT(!volatileRegs.has(temp1));
2064 masm.storeCallInt32Result(temp1);
2065 masm.PopRegsInMask(volatileRegs);
2067 masm.jump(&checkSuccess);
2069 masm.bind(&notAtom);
2071 // Don't handle regexps with too many capture pairs.
2072 masm.load32(Address(regexpReg, RegExpShared::offsetOfPairCount()), temp2);
2073 masm.branch32(Assembler::Above, temp2, Imm32(RegExpObject::MaxPairCount),
2074 failure);
2076 // Fill in the pair count in the MatchPairs on the stack.
2077 masm.store32(temp2, pairCountAddress);
2079 // Load code pointer and length of input (in bytes).
2080 // Store the input start in the InputOutputData.
2081 Register codePointer = temp1; // Note: temp1 was previously regexpReg.
2082 Register byteLength = temp3;
2084 Label isLatin1, done;
2085 masm.loadStringLength(input, byteLength);
2087 masm.branchLatin1String(input, &isLatin1);
2089 // Two-byte input
2090 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
2091 masm.storePtr(temp2, inputStartAddress);
2092 masm.loadPtr(
2093 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/false)),
2094 codePointer);
2095 masm.lshiftPtr(Imm32(1), byteLength);
2096 masm.jump(&done);
2098 // Latin1 input
2099 masm.bind(&isLatin1);
2100 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
2101 masm.storePtr(temp2, inputStartAddress);
2102 masm.loadPtr(
2103 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/true)),
2104 codePointer);
2106 masm.bind(&done);
2108 // Store end pointer
2109 masm.addPtr(byteLength, temp2);
2110 masm.storePtr(temp2, inputEndAddress);
2113 // Guard that the RegExpShared has been compiled for this type of input.
2114 // If it has not been compiled, we fall back to the OOL case, which will
2115 // do a VM call into the interpreter.
2116 // TODO: add an interpreter trampoline?
2117 masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure);
2118 masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer);
2120 // Finish filling in the InputOutputData instance on the stack
2121 masm.computeEffectiveAddress(matchPairsAddress, temp2);
2122 masm.storePtr(temp2, matchesAddress);
2123 masm.storePtr(lastIndex, startIndexAddress);
2125 // Execute the RegExp.
2126 masm.computeEffectiveAddress(
2127 Address(FramePointer, inputOutputDataStartOffset), temp2);
2128 masm.PushRegsInMask(volatileRegs);
2129 masm.setupUnalignedABICall(temp3);
2130 masm.passABIArg(temp2);
2131 masm.callWithABI(codePointer);
2132 masm.storeCallInt32Result(temp1);
2133 masm.PopRegsInMask(volatileRegs);
2135 masm.bind(&checkSuccess);
2136 masm.branch32(Assembler::Equal, temp1,
2137 Imm32(int32_t(RegExpRunStatus::Success_NotFound)), notFound);
2138 masm.branch32(Assembler::Equal, temp1, Imm32(int32_t(RegExpRunStatus::Error)),
2139 failure);
2141 // Lazily update the RegExpStatics.
2142 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2143 RegExpRealm::offsetOfRegExpStatics();
2144 masm.loadGlobalObjectData(temp1);
2145 masm.loadPtr(Address(temp1, offset), temp1);
2146 UpdateRegExpStatics(masm, regexp, input, lastIndex, temp1, temp2, temp3,
2147 initialStringHeap, volatileRegs);
2149 return true;
2152 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
2153 Register len, Register byteOpScratch,
2154 CharEncoding encoding,
2155 size_t maximumLength = SIZE_MAX);
2157 class CreateDependentString {
2158 CharEncoding encoding_;
2159 Register string_;
2160 Register temp1_;
2161 Register temp2_;
2162 Label* failure_;
2164 enum class FallbackKind : uint8_t {
2165 InlineString,
2166 FatInlineString,
2167 NotInlineString,
2168 Count
2170 mozilla::EnumeratedArray<FallbackKind, Label, size_t(FallbackKind::Count)>
2171 fallbacks_, joins_;
2173 public:
2174 CreateDependentString(CharEncoding encoding, Register string, Register temp1,
2175 Register temp2, Label* failure)
2176 : encoding_(encoding),
2177 string_(string),
2178 temp1_(temp1),
2179 temp2_(temp2),
2180 failure_(failure) {}
2182 Register string() const { return string_; }
2183 CharEncoding encoding() const { return encoding_; }
2185 // Generate code that creates DependentString.
2186 // Caller should call generateFallback after masm.ret(), to generate
2187 // fallback path.
2188 void generate(MacroAssembler& masm, const JSAtomState& names,
2189 CompileRuntime* runtime, Register base,
2190 BaseIndex startIndexAddress, BaseIndex limitIndexAddress,
2191 gc::Heap initialStringHeap);
2193 // Generate fallback path for creating DependentString.
2194 void generateFallback(MacroAssembler& masm);
2197 void CreateDependentString::generate(MacroAssembler& masm,
2198 const JSAtomState& names,
2199 CompileRuntime* runtime, Register base,
2200 BaseIndex startIndexAddress,
2201 BaseIndex limitIndexAddress,
2202 gc::Heap initialStringHeap) {
2203 JitSpew(JitSpew_Codegen, "# Emitting CreateDependentString (encoding=%s)",
2204 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2206 auto newGCString = [&](FallbackKind kind) {
2207 uint32_t flags = kind == FallbackKind::InlineString
2208 ? JSString::INIT_THIN_INLINE_FLAGS
2209 : kind == FallbackKind::FatInlineString
2210 ? JSString::INIT_FAT_INLINE_FLAGS
2211 : JSString::INIT_DEPENDENT_FLAGS;
2212 if (encoding_ == CharEncoding::Latin1) {
2213 flags |= JSString::LATIN1_CHARS_BIT;
2216 if (kind != FallbackKind::FatInlineString) {
2217 masm.newGCString(string_, temp2_, initialStringHeap, &fallbacks_[kind]);
2218 } else {
2219 masm.newGCFatInlineString(string_, temp2_, initialStringHeap,
2220 &fallbacks_[kind]);
2222 masm.bind(&joins_[kind]);
2223 masm.store32(Imm32(flags), Address(string_, JSString::offsetOfFlags()));
2226 // Compute the string length.
2227 masm.load32(startIndexAddress, temp2_);
2228 masm.load32(limitIndexAddress, temp1_);
2229 masm.sub32(temp2_, temp1_);
2231 Label done, nonEmpty;
2233 // Zero length matches use the empty string.
2234 masm.branchTest32(Assembler::NonZero, temp1_, temp1_, &nonEmpty);
2235 masm.movePtr(ImmGCPtr(names.empty_), string_);
2236 masm.jump(&done);
2238 masm.bind(&nonEmpty);
2240 // Complete matches use the base string.
2241 Label nonBaseStringMatch;
2242 masm.branchTest32(Assembler::NonZero, temp2_, temp2_, &nonBaseStringMatch);
2243 masm.branch32(Assembler::NotEqual, Address(base, JSString::offsetOfLength()),
2244 temp1_, &nonBaseStringMatch);
2245 masm.movePtr(base, string_);
2246 masm.jump(&done);
2248 masm.bind(&nonBaseStringMatch);
2250 Label notInline;
2252 int32_t maxInlineLength = encoding_ == CharEncoding::Latin1
2253 ? JSFatInlineString::MAX_LENGTH_LATIN1
2254 : JSFatInlineString::MAX_LENGTH_TWO_BYTE;
2255 masm.branch32(Assembler::Above, temp1_, Imm32(maxInlineLength), &notInline);
2257 // Make a thin or fat inline string.
2258 Label stringAllocated, fatInline;
2260 int32_t maxThinInlineLength = encoding_ == CharEncoding::Latin1
2261 ? JSThinInlineString::MAX_LENGTH_LATIN1
2262 : JSThinInlineString::MAX_LENGTH_TWO_BYTE;
2263 masm.branch32(Assembler::Above, temp1_, Imm32(maxThinInlineLength),
2264 &fatInline);
2265 if (encoding_ == CharEncoding::Latin1) {
2266 // One character Latin-1 strings can be loaded directly from the
2267 // static strings table.
2268 Label thinInline;
2269 masm.branch32(Assembler::Above, temp1_, Imm32(1), &thinInline);
2271 static_assert(
2272 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
2273 "Latin-1 strings can be loaded from static strings");
2275 masm.loadStringChars(base, temp1_, encoding_);
2276 masm.loadChar(temp1_, temp2_, temp1_, encoding_);
2278 masm.lookupStaticString(temp1_, string_, runtime->staticStrings());
2280 masm.jump(&done);
2282 masm.bind(&thinInline);
2285 newGCString(FallbackKind::InlineString);
2286 masm.jump(&stringAllocated);
2288 masm.bind(&fatInline);
2289 { newGCString(FallbackKind::FatInlineString); }
2290 masm.bind(&stringAllocated);
2292 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2294 masm.push(string_);
2295 masm.push(base);
2297 MOZ_ASSERT(startIndexAddress.base == FramePointer,
2298 "startIndexAddress is still valid after stack pushes");
2300 // Load chars pointer for the new string.
2301 masm.loadInlineStringCharsForStore(string_, string_);
2303 // Load the source characters pointer.
2304 masm.loadStringChars(base, temp2_, encoding_);
2305 masm.load32(startIndexAddress, base);
2306 masm.addToCharPtr(temp2_, base, encoding_);
2308 CopyStringChars(masm, string_, temp2_, temp1_, base, encoding_);
2310 masm.pop(base);
2311 masm.pop(string_);
2313 masm.jump(&done);
2316 masm.bind(&notInline);
2319 // Make a dependent string.
2320 // Warning: string may be tenured (if the fallback case is hit), so
2321 // stores into it must be post barriered.
2322 newGCString(FallbackKind::NotInlineString);
2324 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2326 masm.loadNonInlineStringChars(base, temp1_, encoding_);
2327 masm.load32(startIndexAddress, temp2_);
2328 masm.addToCharPtr(temp1_, temp2_, encoding_);
2329 masm.storeNonInlineStringChars(temp1_, string_);
2330 masm.storeDependentStringBase(base, string_);
2331 masm.movePtr(base, temp1_);
2333 // Follow any base pointer if the input is itself a dependent string.
2334 // Watch for undepended strings, which have a base pointer but don't
2335 // actually share their characters with it.
2336 Label noBase;
2337 masm.load32(Address(base, JSString::offsetOfFlags()), temp2_);
2338 masm.and32(Imm32(JSString::TYPE_FLAGS_MASK), temp2_);
2339 masm.branchTest32(Assembler::Zero, temp2_, Imm32(JSString::DEPENDENT_BIT),
2340 &noBase);
2341 masm.loadDependentStringBase(base, temp1_);
2342 masm.storeDependentStringBase(temp1_, string_);
2343 masm.bind(&noBase);
2345 // Post-barrier the base store, whether it was the direct or indirect
2346 // base (both will end up in temp1 here).
2347 masm.branchPtrInNurseryChunk(Assembler::Equal, string_, temp2_, &done);
2348 masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp1_, temp2_, &done);
2350 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2351 regsToSave.takeUnchecked(temp1_);
2352 regsToSave.takeUnchecked(temp2_);
2354 masm.PushRegsInMask(regsToSave);
2356 masm.mov(ImmPtr(runtime), temp1_);
2358 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
2359 masm.setupUnalignedABICall(temp2_);
2360 masm.passABIArg(temp1_);
2361 masm.passABIArg(string_);
2362 masm.callWithABI<Fn, PostWriteBarrier>();
2364 masm.PopRegsInMask(regsToSave);
2367 masm.bind(&done);
2370 void CreateDependentString::generateFallback(MacroAssembler& masm) {
2371 JitSpew(JitSpew_Codegen,
2372 "# Emitting CreateDependentString fallback (encoding=%s)",
2373 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2375 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2376 regsToSave.takeUnchecked(string_);
2377 regsToSave.takeUnchecked(temp2_);
2379 for (FallbackKind kind : mozilla::MakeEnumeratedRange(FallbackKind::Count)) {
2380 masm.bind(&fallbacks_[kind]);
2382 masm.PushRegsInMask(regsToSave);
2384 using Fn = void* (*)(JSContext* cx);
2385 masm.setupUnalignedABICall(string_);
2386 masm.loadJSContext(string_);
2387 masm.passABIArg(string_);
2388 if (kind == FallbackKind::FatInlineString) {
2389 masm.callWithABI<Fn, AllocateFatInlineString>();
2390 } else {
2391 masm.callWithABI<Fn, AllocateDependentString>();
2393 masm.storeCallPointerResult(string_);
2395 masm.PopRegsInMask(regsToSave);
2397 masm.branchPtr(Assembler::Equal, string_, ImmWord(0), failure_);
2399 masm.jump(&joins_[kind]);
2403 // Generate the RegExpMatcher and RegExpExecMatch stubs. These are very similar,
2404 // but RegExpExecMatch also has to load and update .lastIndex for global/sticky
2405 // regular expressions.
2406 static JitCode* GenerateRegExpMatchStubShared(JSContext* cx,
2407 gc::Heap initialStringHeap,
2408 bool isExecMatch) {
2409 if (isExecMatch) {
2410 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecMatch stub");
2411 } else {
2412 JitSpew(JitSpew_Codegen, "# Emitting RegExpMatcher stub");
2415 // |initialStringHeap| could be stale after a GC.
2416 JS::AutoCheckCannotGC nogc(cx);
2418 Register regexp = RegExpMatcherRegExpReg;
2419 Register input = RegExpMatcherStringReg;
2420 Register lastIndex = RegExpMatcherLastIndexReg;
2421 ValueOperand result = JSReturnOperand;
2423 // We are free to clobber all registers, as LRegExpMatcher is a call
2424 // instruction.
2425 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2426 regs.take(input);
2427 regs.take(regexp);
2428 regs.take(lastIndex);
2430 Register temp1 = regs.takeAny();
2431 Register temp2 = regs.takeAny();
2432 Register temp3 = regs.takeAny();
2433 Register maybeTemp4 = InvalidReg;
2434 if (!regs.empty()) {
2435 // There are not enough registers on x86.
2436 maybeTemp4 = regs.takeAny();
2438 Register maybeTemp5 = InvalidReg;
2439 if (!regs.empty()) {
2440 // There are not enough registers on x86.
2441 maybeTemp5 = regs.takeAny();
2444 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
2445 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
2447 TempAllocator temp(&cx->tempLifoAlloc());
2448 JitContext jcx(cx);
2449 StackMacroAssembler masm(cx, temp);
2450 AutoCreatedBy acb(masm, "GenerateRegExpMatchStubShared");
2452 #ifdef JS_USE_LINK_REGISTER
2453 masm.pushReturnAddress();
2454 #endif
2455 masm.push(FramePointer);
2456 masm.moveStackPtrTo(FramePointer);
2458 Label notFoundZeroLastIndex;
2459 if (isExecMatch) {
2460 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
2463 // The InputOutputData is placed above the frame pointer and return address on
2464 // the stack.
2465 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
2467 Label notFound, oolEntry;
2468 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
2469 temp3, inputOutputDataStartOffset,
2470 initialStringHeap, &notFound, &oolEntry)) {
2471 return nullptr;
2474 // If a regexp has named captures, fall back to the OOL stub, which
2475 // will end up calling CreateRegExpMatchResults.
2476 Register shared = temp2;
2477 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
2478 RegExpObject::SHARED_SLOT)),
2479 shared, JSVAL_TYPE_PRIVATE_GCTHING);
2480 masm.branchPtr(Assembler::NotEqual,
2481 Address(shared, RegExpShared::offsetOfGroupsTemplate()),
2482 ImmWord(0), &oolEntry);
2484 // Similarly, if the |hasIndices| flag is set, fall back to the OOL stub.
2485 masm.branchTest32(Assembler::NonZero,
2486 Address(shared, RegExpShared::offsetOfFlags()),
2487 Imm32(int32_t(JS::RegExpFlag::HasIndices)), &oolEntry);
2489 Address pairCountAddress =
2490 RegExpPairCountAddress(masm, inputOutputDataStartOffset);
2492 // Construct the result.
2493 Register object = temp1;
2495 // In most cases, the array will have just 1-2 elements, so we optimize for
2496 // that by emitting separate code paths for capacity 2/6/14 (= 4/8/16 slots
2497 // because two slots are used for the elements header).
2499 // Load the array length in temp2 and the shape in temp3.
2500 Label allocated;
2501 masm.load32(pairCountAddress, temp2);
2502 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2503 RegExpRealm::offsetOfNormalMatchResultShape();
2504 masm.loadGlobalObjectData(temp3);
2505 masm.loadPtr(Address(temp3, offset), temp3);
2507 auto emitAllocObject = [&](size_t elementCapacity) {
2508 gc::AllocKind kind = GuessArrayGCKind(elementCapacity);
2509 MOZ_ASSERT(CanChangeToBackgroundAllocKind(kind, &ArrayObject::class_));
2510 kind = ForegroundToBackgroundAllocKind(kind);
2512 #ifdef DEBUG
2513 // Assert all of the available slots are used for |elementCapacity|
2514 // elements.
2515 size_t usedSlots = ObjectElements::VALUES_PER_HEADER + elementCapacity;
2516 MOZ_ASSERT(usedSlots == GetGCKindSlots(kind));
2517 #endif
2519 constexpr size_t numUsedDynamicSlots =
2520 RegExpRealm::MatchResultObjectSlotSpan;
2521 constexpr size_t numDynamicSlots =
2522 RegExpRealm::MatchResultObjectNumDynamicSlots;
2523 constexpr size_t arrayLength = 1;
2524 masm.createArrayWithFixedElements(object, temp3, temp2, temp3,
2525 arrayLength, elementCapacity,
2526 numUsedDynamicSlots, numDynamicSlots,
2527 kind, gc::Heap::Default, &oolEntry);
2530 Label moreThan2;
2531 masm.branch32(Assembler::Above, temp2, Imm32(2), &moreThan2);
2532 emitAllocObject(2);
2533 masm.jump(&allocated);
2535 Label moreThan6;
2536 masm.bind(&moreThan2);
2537 masm.branch32(Assembler::Above, temp2, Imm32(6), &moreThan6);
2538 emitAllocObject(6);
2539 masm.jump(&allocated);
2541 masm.bind(&moreThan6);
2542 static_assert(RegExpObject::MaxPairCount == 14);
2543 emitAllocObject(RegExpObject::MaxPairCount);
2545 masm.bind(&allocated);
2548 // clang-format off
2550 * [SMDOC] Stack layout for the RegExpMatcher stub
2552 * +---------------+
2553 * FramePointer +-----> |Caller-FramePtr|
2554 * +---------------+
2555 * |Return-Address |
2556 * +---------------+
2557 * inputOutputDataStartOffset +-----> +---------------+
2558 * |InputOutputData|
2559 * +---------------+
2560 * +---------------+
2561 * | MatchPairs |
2562 * pairsCountAddress +-----------> count |
2563 * | pairs |
2564 * | |
2565 * +---------------+
2566 * pairsVectorStartOffset +-----> +---------------+
2567 * | MatchPair |
2568 * matchPairStart +------------> start | <-------+
2569 * matchPairLimit +------------> limit | | Reserved space for
2570 * +---------------+ | `RegExpObject::MaxPairCount`
2571 * . | MatchPair objects.
2572 * . |
2573 * . | `count` objects will be
2574 * +---------------+ | initialized and can be
2575 * | MatchPair | | accessed below.
2576 * | start | <-------+
2577 * | limit |
2578 * +---------------+
2580 // clang-format on
2582 static_assert(sizeof(MatchPair) == 2 * sizeof(int32_t),
2583 "MatchPair consists of two int32 values representing the start"
2584 "and the end offset of the match");
2586 int32_t pairsVectorStartOffset =
2587 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
2589 // Incremented by one below for each match pair.
2590 Register matchIndex = temp2;
2591 masm.move32(Imm32(0), matchIndex);
2593 // The element in which to store the result of the current match.
2594 size_t elementsOffset = NativeObject::offsetOfFixedElements();
2595 BaseObjectElementIndex objectMatchElement(object, matchIndex, elementsOffset);
2597 // The current match pair's "start" and "limit" member.
2598 BaseIndex matchPairStart(FramePointer, matchIndex, TimesEight,
2599 pairsVectorStartOffset + MatchPair::offsetOfStart());
2600 BaseIndex matchPairLimit(FramePointer, matchIndex, TimesEight,
2601 pairsVectorStartOffset + MatchPair::offsetOfLimit());
2603 Label* depStrFailure = &oolEntry;
2604 Label restoreRegExpAndLastIndex;
2606 Register temp4;
2607 if (maybeTemp4 == InvalidReg) {
2608 depStrFailure = &restoreRegExpAndLastIndex;
2610 // We don't have enough registers for a fourth temporary. Reuse |regexp|
2611 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2612 masm.push(regexp);
2613 temp4 = regexp;
2614 } else {
2615 temp4 = maybeTemp4;
2618 Register temp5;
2619 if (maybeTemp5 == InvalidReg) {
2620 depStrFailure = &restoreRegExpAndLastIndex;
2622 // We don't have enough registers for a fifth temporary. Reuse |lastIndex|
2623 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2624 masm.push(lastIndex);
2625 temp5 = lastIndex;
2626 } else {
2627 temp5 = maybeTemp5;
2630 auto maybeRestoreRegExpAndLastIndex = [&]() {
2631 if (maybeTemp5 == InvalidReg) {
2632 masm.pop(lastIndex);
2634 if (maybeTemp4 == InvalidReg) {
2635 masm.pop(regexp);
2639 // Loop to construct the match strings. There are two different loops,
2640 // depending on whether the input is a Two-Byte or a Latin-1 string.
2641 CreateDependentString depStrs[]{
2642 {CharEncoding::TwoByte, temp3, temp4, temp5, depStrFailure},
2643 {CharEncoding::Latin1, temp3, temp4, temp5, depStrFailure},
2647 Label isLatin1, done;
2648 masm.branchLatin1String(input, &isLatin1);
2650 for (auto& depStr : depStrs) {
2651 if (depStr.encoding() == CharEncoding::Latin1) {
2652 masm.bind(&isLatin1);
2655 Label matchLoop;
2656 masm.bind(&matchLoop);
2658 static_assert(MatchPair::NoMatch == -1,
2659 "MatchPair::start is negative if no match was found");
2661 Label isUndefined, storeDone;
2662 masm.branch32(Assembler::LessThan, matchPairStart, Imm32(0),
2663 &isUndefined);
2665 depStr.generate(masm, cx->names(), CompileRuntime::get(cx->runtime()),
2666 input, matchPairStart, matchPairLimit,
2667 initialStringHeap);
2669 // Storing into nursery-allocated results object's elements; no post
2670 // barrier.
2671 masm.storeValue(JSVAL_TYPE_STRING, depStr.string(), objectMatchElement);
2672 masm.jump(&storeDone);
2674 masm.bind(&isUndefined);
2675 { masm.storeValue(UndefinedValue(), objectMatchElement); }
2676 masm.bind(&storeDone);
2678 masm.add32(Imm32(1), matchIndex);
2679 masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex,
2680 &done);
2681 masm.jump(&matchLoop);
2684 #ifdef DEBUG
2685 masm.assumeUnreachable("The match string loop doesn't fall through.");
2686 #endif
2688 masm.bind(&done);
2691 maybeRestoreRegExpAndLastIndex();
2693 // Fill in the rest of the output object.
2694 masm.store32(
2695 matchIndex,
2696 Address(object,
2697 elementsOffset + ObjectElements::offsetOfInitializedLength()));
2698 masm.store32(
2699 matchIndex,
2700 Address(object, elementsOffset + ObjectElements::offsetOfLength()));
2702 Address firstMatchPairStartAddress(
2703 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfStart());
2704 Address firstMatchPairLimitAddress(
2705 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfLimit());
2707 static_assert(RegExpRealm::MatchResultObjectIndexSlot == 0,
2708 "First slot holds the 'index' property");
2709 static_assert(RegExpRealm::MatchResultObjectInputSlot == 1,
2710 "Second slot holds the 'input' property");
2712 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
2714 masm.load32(firstMatchPairStartAddress, temp3);
2715 masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0));
2717 // No post barrier needed (address is within nursery object.)
2718 masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value)));
2720 // For the ExecMatch stub, if the regular expression is global or sticky, we
2721 // have to update its .lastIndex slot.
2722 if (isExecMatch) {
2723 MOZ_ASSERT(object != lastIndex);
2724 Label notGlobalOrSticky;
2725 masm.branchTest32(Assembler::Zero, flagsSlot,
2726 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2727 &notGlobalOrSticky);
2728 masm.load32(firstMatchPairLimitAddress, lastIndex);
2729 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
2730 masm.bind(&notGlobalOrSticky);
2733 // All done!
2734 masm.tagValue(JSVAL_TYPE_OBJECT, object, result);
2735 masm.pop(FramePointer);
2736 masm.ret();
2738 masm.bind(&notFound);
2739 if (isExecMatch) {
2740 Label notGlobalOrSticky;
2741 masm.branchTest32(Assembler::Zero, flagsSlot,
2742 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2743 &notGlobalOrSticky);
2744 masm.bind(&notFoundZeroLastIndex);
2745 masm.storeValue(Int32Value(0), lastIndexSlot);
2746 masm.bind(&notGlobalOrSticky);
2748 masm.moveValue(NullValue(), result);
2749 masm.pop(FramePointer);
2750 masm.ret();
2752 // Fallback paths for CreateDependentString.
2753 for (auto& depStr : depStrs) {
2754 depStr.generateFallback(masm);
2757 // Fall-through to the ool entry after restoring the registers.
2758 masm.bind(&restoreRegExpAndLastIndex);
2759 maybeRestoreRegExpAndLastIndex();
2761 // Use an undefined value to signal to the caller that the OOL stub needs to
2762 // be called.
2763 masm.bind(&oolEntry);
2764 masm.moveValue(UndefinedValue(), result);
2765 masm.pop(FramePointer);
2766 masm.ret();
2768 Linker linker(masm);
2769 JitCode* code = linker.newCode(cx, CodeKind::Other);
2770 if (!code) {
2771 return nullptr;
2774 const char* name = isExecMatch ? "RegExpExecMatchStub" : "RegExpMatcherStub";
2775 CollectPerfSpewerJitCodeProfile(code, name);
2776 #ifdef MOZ_VTUNE
2777 vtune::MarkStub(code, name);
2778 #endif
2780 return code;
2783 JitCode* JitZone::generateRegExpMatcherStub(JSContext* cx) {
2784 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2785 /* isExecMatch = */ false);
2788 JitCode* JitZone::generateRegExpExecMatchStub(JSContext* cx) {
2789 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2790 /* isExecMatch = */ true);
2793 class OutOfLineRegExpMatcher : public OutOfLineCodeBase<CodeGenerator> {
2794 LRegExpMatcher* lir_;
2796 public:
2797 explicit OutOfLineRegExpMatcher(LRegExpMatcher* lir) : lir_(lir) {}
2799 void accept(CodeGenerator* codegen) override {
2800 codegen->visitOutOfLineRegExpMatcher(this);
2803 LRegExpMatcher* lir() const { return lir_; }
2806 void CodeGenerator::visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher* ool) {
2807 LRegExpMatcher* lir = ool->lir();
2808 Register lastIndex = ToRegister(lir->lastIndex());
2809 Register input = ToRegister(lir->string());
2810 Register regexp = ToRegister(lir->regexp());
2812 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2813 regs.take(lastIndex);
2814 regs.take(input);
2815 regs.take(regexp);
2816 Register temp = regs.takeAny();
2818 masm.computeEffectiveAddress(
2819 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2821 pushArg(temp);
2822 pushArg(lastIndex);
2823 pushArg(input);
2824 pushArg(regexp);
2826 // We are not using oolCallVM because we are in a Call, and that live
2827 // registers are already saved by the the register allocator.
2828 using Fn =
2829 bool (*)(JSContext*, HandleObject regexp, HandleString input,
2830 int32_t lastIndex, MatchPairs* pairs, MutableHandleValue output);
2831 callVM<Fn, RegExpMatcherRaw>(lir);
2833 masm.jump(ool->rejoin());
2836 void CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir) {
2837 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2838 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2839 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg);
2840 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2842 #if defined(JS_NUNBOX32)
2843 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2844 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2845 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2846 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2847 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Type);
2848 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Data);
2849 #elif defined(JS_PUNBOX64)
2850 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2851 static_assert(RegExpMatcherStringReg != JSReturnReg);
2852 static_assert(RegExpMatcherLastIndexReg != JSReturnReg);
2853 #endif
2855 masm.reserveStack(RegExpReservedStack);
2857 OutOfLineRegExpMatcher* ool = new (alloc()) OutOfLineRegExpMatcher(lir);
2858 addOutOfLineCode(ool, lir->mir());
2860 const JitZone* jitZone = gen->realm->zone()->jitZone();
2861 JitCode* regExpMatcherStub =
2862 jitZone->regExpMatcherStubNoBarrier(&zoneStubsToReadBarrier_);
2863 masm.call(regExpMatcherStub);
2864 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2865 masm.bind(ool->rejoin());
2867 masm.freeStack(RegExpReservedStack);
2870 class OutOfLineRegExpExecMatch : public OutOfLineCodeBase<CodeGenerator> {
2871 LRegExpExecMatch* lir_;
2873 public:
2874 explicit OutOfLineRegExpExecMatch(LRegExpExecMatch* lir) : lir_(lir) {}
2876 void accept(CodeGenerator* codegen) override {
2877 codegen->visitOutOfLineRegExpExecMatch(this);
2880 LRegExpExecMatch* lir() const { return lir_; }
2883 void CodeGenerator::visitOutOfLineRegExpExecMatch(
2884 OutOfLineRegExpExecMatch* ool) {
2885 LRegExpExecMatch* lir = ool->lir();
2886 Register input = ToRegister(lir->string());
2887 Register regexp = ToRegister(lir->regexp());
2889 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2890 regs.take(input);
2891 regs.take(regexp);
2892 Register temp = regs.takeAny();
2894 masm.computeEffectiveAddress(
2895 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2897 pushArg(temp);
2898 pushArg(input);
2899 pushArg(regexp);
2901 // We are not using oolCallVM because we are in a Call and live registers have
2902 // already been saved by the register allocator.
2903 using Fn =
2904 bool (*)(JSContext*, Handle<RegExpObject*> regexp, HandleString input,
2905 MatchPairs* pairs, MutableHandleValue output);
2906 callVM<Fn, RegExpBuiltinExecMatchFromJit>(lir);
2907 masm.jump(ool->rejoin());
2910 void CodeGenerator::visitRegExpExecMatch(LRegExpExecMatch* lir) {
2911 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2912 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2913 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2915 #if defined(JS_NUNBOX32)
2916 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2917 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2918 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2919 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2920 #elif defined(JS_PUNBOX64)
2921 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2922 static_assert(RegExpMatcherStringReg != JSReturnReg);
2923 #endif
2925 masm.reserveStack(RegExpReservedStack);
2927 auto* ool = new (alloc()) OutOfLineRegExpExecMatch(lir);
2928 addOutOfLineCode(ool, lir->mir());
2930 const JitZone* jitZone = gen->realm->zone()->jitZone();
2931 JitCode* regExpExecMatchStub =
2932 jitZone->regExpExecMatchStubNoBarrier(&zoneStubsToReadBarrier_);
2933 masm.call(regExpExecMatchStub);
2934 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2936 masm.bind(ool->rejoin());
2937 masm.freeStack(RegExpReservedStack);
2940 JitCode* JitZone::generateRegExpSearcherStub(JSContext* cx) {
2941 JitSpew(JitSpew_Codegen, "# Emitting RegExpSearcher stub");
2943 Register regexp = RegExpSearcherRegExpReg;
2944 Register input = RegExpSearcherStringReg;
2945 Register lastIndex = RegExpSearcherLastIndexReg;
2946 Register result = ReturnReg;
2948 // We are free to clobber all registers, as LRegExpSearcher is a call
2949 // instruction.
2950 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2951 regs.take(input);
2952 regs.take(regexp);
2953 regs.take(lastIndex);
2955 Register temp1 = regs.takeAny();
2956 Register temp2 = regs.takeAny();
2957 Register temp3 = regs.takeAny();
2959 TempAllocator temp(&cx->tempLifoAlloc());
2960 JitContext jcx(cx);
2961 StackMacroAssembler masm(cx, temp);
2962 AutoCreatedBy acb(masm, "JitZone::generateRegExpSearcherStub");
2964 #ifdef JS_USE_LINK_REGISTER
2965 masm.pushReturnAddress();
2966 #endif
2967 masm.push(FramePointer);
2968 masm.moveStackPtrTo(FramePointer);
2970 #ifdef DEBUG
2971 // Store sentinel value to cx->regExpSearcherLastLimit.
2972 // See comment in RegExpSearcherImpl.
2973 masm.loadJSContext(temp1);
2974 masm.store32(Imm32(RegExpSearcherLastLimitSentinel),
2975 Address(temp1, JSContext::offsetOfRegExpSearcherLastLimit()));
2976 #endif
2978 // The InputOutputData is placed above the frame pointer and return address on
2979 // the stack.
2980 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
2982 Label notFound, oolEntry;
2983 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
2984 temp3, inputOutputDataStartOffset,
2985 initialStringHeap, &notFound, &oolEntry)) {
2986 return nullptr;
2989 // clang-format off
2991 * [SMDOC] Stack layout for the RegExpSearcher stub
2993 * +---------------+
2994 * FramePointer +-----> |Caller-FramePtr|
2995 * +---------------+
2996 * |Return-Address |
2997 * +---------------+
2998 * inputOutputDataStartOffset +-----> +---------------+
2999 * |InputOutputData|
3000 * +---------------+
3001 * +---------------+
3002 * | MatchPairs |
3003 * | count |
3004 * | pairs |
3005 * | |
3006 * +---------------+
3007 * pairsVectorStartOffset +-----> +---------------+
3008 * | MatchPair |
3009 * matchPairStart +------------> start | <-------+
3010 * matchPairLimit +------------> limit | | Reserved space for
3011 * +---------------+ | `RegExpObject::MaxPairCount`
3012 * . | MatchPair objects.
3013 * . |
3014 * . | Only a single object will
3015 * +---------------+ | be initialized and can be
3016 * | MatchPair | | accessed below.
3017 * | start | <-------+
3018 * | limit |
3019 * +---------------+
3021 // clang-format on
3023 int32_t pairsVectorStartOffset =
3024 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
3025 Address matchPairStart(FramePointer,
3026 pairsVectorStartOffset + MatchPair::offsetOfStart());
3027 Address matchPairLimit(FramePointer,
3028 pairsVectorStartOffset + MatchPair::offsetOfLimit());
3030 // Store match limit to cx->regExpSearcherLastLimit and return the index.
3031 masm.load32(matchPairLimit, result);
3032 masm.loadJSContext(input);
3033 masm.store32(result,
3034 Address(input, JSContext::offsetOfRegExpSearcherLastLimit()));
3035 masm.load32(matchPairStart, result);
3036 masm.pop(FramePointer);
3037 masm.ret();
3039 masm.bind(&notFound);
3040 masm.move32(Imm32(RegExpSearcherResultNotFound), result);
3041 masm.pop(FramePointer);
3042 masm.ret();
3044 masm.bind(&oolEntry);
3045 masm.move32(Imm32(RegExpSearcherResultFailed), result);
3046 masm.pop(FramePointer);
3047 masm.ret();
3049 Linker linker(masm);
3050 JitCode* code = linker.newCode(cx, CodeKind::Other);
3051 if (!code) {
3052 return nullptr;
3055 CollectPerfSpewerJitCodeProfile(code, "RegExpSearcherStub");
3056 #ifdef MOZ_VTUNE
3057 vtune::MarkStub(code, "RegExpSearcherStub");
3058 #endif
3060 return code;
3063 class OutOfLineRegExpSearcher : public OutOfLineCodeBase<CodeGenerator> {
3064 LRegExpSearcher* lir_;
3066 public:
3067 explicit OutOfLineRegExpSearcher(LRegExpSearcher* lir) : lir_(lir) {}
3069 void accept(CodeGenerator* codegen) override {
3070 codegen->visitOutOfLineRegExpSearcher(this);
3073 LRegExpSearcher* lir() const { return lir_; }
3076 void CodeGenerator::visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher* ool) {
3077 LRegExpSearcher* lir = ool->lir();
3078 Register lastIndex = ToRegister(lir->lastIndex());
3079 Register input = ToRegister(lir->string());
3080 Register regexp = ToRegister(lir->regexp());
3082 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3083 regs.take(lastIndex);
3084 regs.take(input);
3085 regs.take(regexp);
3086 Register temp = regs.takeAny();
3088 masm.computeEffectiveAddress(
3089 Address(masm.getStackPointer(), InputOutputDataSize), temp);
3091 pushArg(temp);
3092 pushArg(lastIndex);
3093 pushArg(input);
3094 pushArg(regexp);
3096 // We are not using oolCallVM because we are in a Call, and that live
3097 // registers are already saved by the the register allocator.
3098 using Fn = bool (*)(JSContext* cx, HandleObject regexp, HandleString input,
3099 int32_t lastIndex, MatchPairs* pairs, int32_t* result);
3100 callVM<Fn, RegExpSearcherRaw>(lir);
3102 masm.jump(ool->rejoin());
3105 void CodeGenerator::visitRegExpSearcher(LRegExpSearcher* lir) {
3106 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpSearcherRegExpReg);
3107 MOZ_ASSERT(ToRegister(lir->string()) == RegExpSearcherStringReg);
3108 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpSearcherLastIndexReg);
3109 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3111 static_assert(RegExpSearcherRegExpReg != ReturnReg);
3112 static_assert(RegExpSearcherStringReg != ReturnReg);
3113 static_assert(RegExpSearcherLastIndexReg != ReturnReg);
3115 masm.reserveStack(RegExpReservedStack);
3117 OutOfLineRegExpSearcher* ool = new (alloc()) OutOfLineRegExpSearcher(lir);
3118 addOutOfLineCode(ool, lir->mir());
3120 const JitZone* jitZone = gen->realm->zone()->jitZone();
3121 JitCode* regExpSearcherStub =
3122 jitZone->regExpSearcherStubNoBarrier(&zoneStubsToReadBarrier_);
3123 masm.call(regExpSearcherStub);
3124 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpSearcherResultFailed),
3125 ool->entry());
3126 masm.bind(ool->rejoin());
3128 masm.freeStack(RegExpReservedStack);
3131 void CodeGenerator::visitRegExpSearcherLastLimit(
3132 LRegExpSearcherLastLimit* lir) {
3133 Register result = ToRegister(lir->output());
3134 Register scratch = ToRegister(lir->temp0());
3136 masm.loadAndClearRegExpSearcherLastLimit(result, scratch);
3139 JitCode* JitZone::generateRegExpExecTestStub(JSContext* cx) {
3140 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecTest stub");
3142 Register regexp = RegExpExecTestRegExpReg;
3143 Register input = RegExpExecTestStringReg;
3144 Register result = ReturnReg;
3146 TempAllocator temp(&cx->tempLifoAlloc());
3147 JitContext jcx(cx);
3148 StackMacroAssembler masm(cx, temp);
3149 AutoCreatedBy acb(masm, "JitZone::generateRegExpExecTestStub");
3151 #ifdef JS_USE_LINK_REGISTER
3152 masm.pushReturnAddress();
3153 #endif
3154 masm.push(FramePointer);
3155 masm.moveStackPtrTo(FramePointer);
3157 // We are free to clobber all registers, as LRegExpExecTest is a call
3158 // instruction.
3159 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3160 regs.take(input);
3161 regs.take(regexp);
3163 // Ensure lastIndex != result.
3164 regs.take(result);
3165 Register lastIndex = regs.takeAny();
3166 regs.add(result);
3167 Register temp1 = regs.takeAny();
3168 Register temp2 = regs.takeAny();
3169 Register temp3 = regs.takeAny();
3171 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
3172 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
3174 masm.reserveStack(RegExpReservedStack);
3176 // Load lastIndex and skip RegExp execution if needed.
3177 Label notFoundZeroLastIndex;
3178 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
3180 // In visitRegExpMatcher and visitRegExpSearcher, we reserve stack space
3181 // before calling the stub. For RegExpExecTest we call the stub before
3182 // reserving stack space, so the offset of the InputOutputData relative to the
3183 // frame pointer is negative.
3184 constexpr int32_t inputOutputDataStartOffset = -int32_t(RegExpReservedStack);
3186 // On ARM64, load/store instructions can encode an immediate offset in the
3187 // range [-256, 4095]. If we ever fail this assertion, it would be more
3188 // efficient to store the data above the frame pointer similar to
3189 // RegExpMatcher and RegExpSearcher.
3190 static_assert(inputOutputDataStartOffset >= -256);
3192 Label notFound, oolEntry;
3193 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
3194 temp3, inputOutputDataStartOffset,
3195 initialStringHeap, &notFound, &oolEntry)) {
3196 return nullptr;
3199 // Set `result` to true/false to indicate found/not-found, or to
3200 // RegExpExecTestResultFailed if we have to retry in C++. If the regular
3201 // expression is global or sticky, we also have to update its .lastIndex slot.
3203 Label done;
3204 int32_t pairsVectorStartOffset =
3205 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
3206 Address matchPairLimit(FramePointer,
3207 pairsVectorStartOffset + MatchPair::offsetOfLimit());
3209 masm.move32(Imm32(1), result);
3210 masm.branchTest32(Assembler::Zero, flagsSlot,
3211 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3212 &done);
3213 masm.load32(matchPairLimit, lastIndex);
3214 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
3215 masm.jump(&done);
3217 masm.bind(&notFound);
3218 masm.move32(Imm32(0), result);
3219 masm.branchTest32(Assembler::Zero, flagsSlot,
3220 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3221 &done);
3222 masm.storeValue(Int32Value(0), lastIndexSlot);
3223 masm.jump(&done);
3225 masm.bind(&notFoundZeroLastIndex);
3226 masm.move32(Imm32(0), result);
3227 masm.storeValue(Int32Value(0), lastIndexSlot);
3228 masm.jump(&done);
3230 masm.bind(&oolEntry);
3231 masm.move32(Imm32(RegExpExecTestResultFailed), result);
3233 masm.bind(&done);
3234 masm.freeStack(RegExpReservedStack);
3235 masm.pop(FramePointer);
3236 masm.ret();
3238 Linker linker(masm);
3239 JitCode* code = linker.newCode(cx, CodeKind::Other);
3240 if (!code) {
3241 return nullptr;
3244 CollectPerfSpewerJitCodeProfile(code, "RegExpExecTestStub");
3245 #ifdef MOZ_VTUNE
3246 vtune::MarkStub(code, "RegExpExecTestStub");
3247 #endif
3249 return code;
3252 class OutOfLineRegExpExecTest : public OutOfLineCodeBase<CodeGenerator> {
3253 LRegExpExecTest* lir_;
3255 public:
3256 explicit OutOfLineRegExpExecTest(LRegExpExecTest* lir) : lir_(lir) {}
3258 void accept(CodeGenerator* codegen) override {
3259 codegen->visitOutOfLineRegExpExecTest(this);
3262 LRegExpExecTest* lir() const { return lir_; }
3265 void CodeGenerator::visitOutOfLineRegExpExecTest(OutOfLineRegExpExecTest* ool) {
3266 LRegExpExecTest* lir = ool->lir();
3267 Register input = ToRegister(lir->string());
3268 Register regexp = ToRegister(lir->regexp());
3270 pushArg(input);
3271 pushArg(regexp);
3273 // We are not using oolCallVM because we are in a Call and live registers have
3274 // already been saved by the register allocator.
3275 using Fn = bool (*)(JSContext* cx, Handle<RegExpObject*> regexp,
3276 HandleString input, bool* result);
3277 callVM<Fn, RegExpBuiltinExecTestFromJit>(lir);
3279 masm.jump(ool->rejoin());
3282 void CodeGenerator::visitRegExpExecTest(LRegExpExecTest* lir) {
3283 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpExecTestRegExpReg);
3284 MOZ_ASSERT(ToRegister(lir->string()) == RegExpExecTestStringReg);
3285 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3287 static_assert(RegExpExecTestRegExpReg != ReturnReg);
3288 static_assert(RegExpExecTestStringReg != ReturnReg);
3290 auto* ool = new (alloc()) OutOfLineRegExpExecTest(lir);
3291 addOutOfLineCode(ool, lir->mir());
3293 const JitZone* jitZone = gen->realm->zone()->jitZone();
3294 JitCode* regExpExecTestStub =
3295 jitZone->regExpExecTestStubNoBarrier(&zoneStubsToReadBarrier_);
3296 masm.call(regExpExecTestStub);
3298 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpExecTestResultFailed),
3299 ool->entry());
3301 masm.bind(ool->rejoin());
3304 void CodeGenerator::visitRegExpHasCaptureGroups(LRegExpHasCaptureGroups* ins) {
3305 Register regexp = ToRegister(ins->regexp());
3306 Register input = ToRegister(ins->input());
3307 Register output = ToRegister(ins->output());
3309 using Fn =
3310 bool (*)(JSContext*, Handle<RegExpObject*>, Handle<JSString*>, bool*);
3311 auto* ool = oolCallVM<Fn, js::RegExpHasCaptureGroups>(
3312 ins, ArgList(regexp, input), StoreRegisterTo(output));
3314 // Load RegExpShared in |output|.
3315 Label vmCall;
3316 masm.loadParsedRegExpShared(regexp, output, ool->entry());
3318 // Return true iff pairCount > 1.
3319 Label returnTrue;
3320 masm.branch32(Assembler::Above,
3321 Address(output, RegExpShared::offsetOfPairCount()), Imm32(1),
3322 &returnTrue);
3323 masm.move32(Imm32(0), output);
3324 masm.jump(ool->rejoin());
3326 masm.bind(&returnTrue);
3327 masm.move32(Imm32(1), output);
3329 masm.bind(ool->rejoin());
3332 class OutOfLineRegExpPrototypeOptimizable
3333 : public OutOfLineCodeBase<CodeGenerator> {
3334 LRegExpPrototypeOptimizable* ins_;
3336 public:
3337 explicit OutOfLineRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins)
3338 : ins_(ins) {}
3340 void accept(CodeGenerator* codegen) override {
3341 codegen->visitOutOfLineRegExpPrototypeOptimizable(this);
3343 LRegExpPrototypeOptimizable* ins() const { return ins_; }
3346 void CodeGenerator::visitRegExpPrototypeOptimizable(
3347 LRegExpPrototypeOptimizable* ins) {
3348 Register object = ToRegister(ins->object());
3349 Register output = ToRegister(ins->output());
3350 Register temp = ToRegister(ins->temp0());
3352 OutOfLineRegExpPrototypeOptimizable* ool =
3353 new (alloc()) OutOfLineRegExpPrototypeOptimizable(ins);
3354 addOutOfLineCode(ool, ins->mir());
3356 const GlobalObject* global = gen->realm->maybeGlobal();
3357 MOZ_ASSERT(global);
3358 masm.branchIfNotRegExpPrototypeOptimizable(object, temp, global,
3359 ool->entry());
3360 masm.move32(Imm32(0x1), output);
3362 masm.bind(ool->rejoin());
3365 void CodeGenerator::visitOutOfLineRegExpPrototypeOptimizable(
3366 OutOfLineRegExpPrototypeOptimizable* ool) {
3367 LRegExpPrototypeOptimizable* ins = ool->ins();
3368 Register object = ToRegister(ins->object());
3369 Register output = ToRegister(ins->output());
3371 saveVolatile(output);
3373 using Fn = bool (*)(JSContext* cx, JSObject* proto);
3374 masm.setupAlignedABICall();
3375 masm.loadJSContext(output);
3376 masm.passABIArg(output);
3377 masm.passABIArg(object);
3378 masm.callWithABI<Fn, RegExpPrototypeOptimizableRaw>();
3379 masm.storeCallBoolResult(output);
3381 restoreVolatile(output);
3383 masm.jump(ool->rejoin());
3386 class OutOfLineRegExpInstanceOptimizable
3387 : public OutOfLineCodeBase<CodeGenerator> {
3388 LRegExpInstanceOptimizable* ins_;
3390 public:
3391 explicit OutOfLineRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins)
3392 : ins_(ins) {}
3394 void accept(CodeGenerator* codegen) override {
3395 codegen->visitOutOfLineRegExpInstanceOptimizable(this);
3397 LRegExpInstanceOptimizable* ins() const { return ins_; }
3400 void CodeGenerator::visitRegExpInstanceOptimizable(
3401 LRegExpInstanceOptimizable* ins) {
3402 Register object = ToRegister(ins->object());
3403 Register output = ToRegister(ins->output());
3404 Register temp = ToRegister(ins->temp0());
3406 OutOfLineRegExpInstanceOptimizable* ool =
3407 new (alloc()) OutOfLineRegExpInstanceOptimizable(ins);
3408 addOutOfLineCode(ool, ins->mir());
3410 const GlobalObject* global = gen->realm->maybeGlobal();
3411 MOZ_ASSERT(global);
3412 masm.branchIfNotRegExpInstanceOptimizable(object, temp, global, ool->entry());
3413 masm.move32(Imm32(0x1), output);
3415 masm.bind(ool->rejoin());
3418 void CodeGenerator::visitOutOfLineRegExpInstanceOptimizable(
3419 OutOfLineRegExpInstanceOptimizable* ool) {
3420 LRegExpInstanceOptimizable* ins = ool->ins();
3421 Register object = ToRegister(ins->object());
3422 Register proto = ToRegister(ins->proto());
3423 Register output = ToRegister(ins->output());
3425 saveVolatile(output);
3427 using Fn = bool (*)(JSContext* cx, JSObject* obj, JSObject* proto);
3428 masm.setupAlignedABICall();
3429 masm.loadJSContext(output);
3430 masm.passABIArg(output);
3431 masm.passABIArg(object);
3432 masm.passABIArg(proto);
3433 masm.callWithABI<Fn, RegExpInstanceOptimizableRaw>();
3434 masm.storeCallBoolResult(output);
3436 restoreVolatile(output);
3438 masm.jump(ool->rejoin());
3441 static void FindFirstDollarIndex(MacroAssembler& masm, Register str,
3442 Register len, Register temp0, Register temp1,
3443 Register output, CharEncoding encoding) {
3444 #ifdef DEBUG
3445 Label ok;
3446 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
3447 masm.assumeUnreachable("Length should be greater than 0.");
3448 masm.bind(&ok);
3449 #endif
3451 Register chars = temp0;
3452 masm.loadStringChars(str, chars, encoding);
3454 masm.move32(Imm32(0), output);
3456 Label start, done;
3457 masm.bind(&start);
3459 Register currentChar = temp1;
3460 masm.loadChar(chars, output, currentChar, encoding);
3461 masm.branch32(Assembler::Equal, currentChar, Imm32('$'), &done);
3463 masm.add32(Imm32(1), output);
3464 masm.branch32(Assembler::NotEqual, output, len, &start);
3466 masm.move32(Imm32(-1), output);
3468 masm.bind(&done);
3471 void CodeGenerator::visitGetFirstDollarIndex(LGetFirstDollarIndex* ins) {
3472 Register str = ToRegister(ins->str());
3473 Register output = ToRegister(ins->output());
3474 Register temp0 = ToRegister(ins->temp0());
3475 Register temp1 = ToRegister(ins->temp1());
3476 Register len = ToRegister(ins->temp2());
3478 using Fn = bool (*)(JSContext*, JSString*, int32_t*);
3479 OutOfLineCode* ool = oolCallVM<Fn, GetFirstDollarIndexRaw>(
3480 ins, ArgList(str), StoreRegisterTo(output));
3482 masm.branchIfRope(str, ool->entry());
3483 masm.loadStringLength(str, len);
3485 Label isLatin1, done;
3486 masm.branchLatin1String(str, &isLatin1);
3488 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3489 CharEncoding::TwoByte);
3490 masm.jump(&done);
3492 masm.bind(&isLatin1);
3494 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3495 CharEncoding::Latin1);
3497 masm.bind(&done);
3498 masm.bind(ool->rejoin());
3501 void CodeGenerator::visitStringReplace(LStringReplace* lir) {
3502 if (lir->replacement()->isConstant()) {
3503 pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString()));
3504 } else {
3505 pushArg(ToRegister(lir->replacement()));
3508 if (lir->pattern()->isConstant()) {
3509 pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString()));
3510 } else {
3511 pushArg(ToRegister(lir->pattern()));
3514 if (lir->string()->isConstant()) {
3515 pushArg(ImmGCPtr(lir->string()->toConstant()->toString()));
3516 } else {
3517 pushArg(ToRegister(lir->string()));
3520 using Fn =
3521 JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
3522 if (lir->mir()->isFlatReplacement()) {
3523 callVM<Fn, StringFlatReplaceString>(lir);
3524 } else {
3525 callVM<Fn, StringReplace>(lir);
3529 void CodeGenerator::visitBinaryValueCache(LBinaryValueCache* lir) {
3530 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3531 TypedOrValueRegister lhs =
3532 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::LhsIndex));
3533 TypedOrValueRegister rhs =
3534 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::RhsIndex));
3535 ValueOperand output = ToOutValue(lir);
3537 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3539 switch (jsop) {
3540 case JSOp::Add:
3541 case JSOp::Sub:
3542 case JSOp::Mul:
3543 case JSOp::Div:
3544 case JSOp::Mod:
3545 case JSOp::Pow:
3546 case JSOp::BitAnd:
3547 case JSOp::BitOr:
3548 case JSOp::BitXor:
3549 case JSOp::Lsh:
3550 case JSOp::Rsh:
3551 case JSOp::Ursh: {
3552 IonBinaryArithIC ic(liveRegs, lhs, rhs, output);
3553 addIC(lir, allocateIC(ic));
3554 return;
3556 default:
3557 MOZ_CRASH("Unsupported jsop in MBinaryValueCache");
3561 void CodeGenerator::visitBinaryBoolCache(LBinaryBoolCache* lir) {
3562 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3563 TypedOrValueRegister lhs =
3564 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::LhsIndex));
3565 TypedOrValueRegister rhs =
3566 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::RhsIndex));
3567 Register output = ToRegister(lir->output());
3569 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3571 switch (jsop) {
3572 case JSOp::Lt:
3573 case JSOp::Le:
3574 case JSOp::Gt:
3575 case JSOp::Ge:
3576 case JSOp::Eq:
3577 case JSOp::Ne:
3578 case JSOp::StrictEq:
3579 case JSOp::StrictNe: {
3580 IonCompareIC ic(liveRegs, lhs, rhs, output);
3581 addIC(lir, allocateIC(ic));
3582 return;
3584 default:
3585 MOZ_CRASH("Unsupported jsop in MBinaryBoolCache");
3589 void CodeGenerator::visitUnaryCache(LUnaryCache* lir) {
3590 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3591 TypedOrValueRegister input =
3592 TypedOrValueRegister(ToValue(lir, LUnaryCache::InputIndex));
3593 ValueOperand output = ToOutValue(lir);
3595 IonUnaryArithIC ic(liveRegs, input, output);
3596 addIC(lir, allocateIC(ic));
3599 void CodeGenerator::visitModuleMetadata(LModuleMetadata* lir) {
3600 pushArg(ImmPtr(lir->mir()->module()));
3602 using Fn = JSObject* (*)(JSContext*, HandleObject);
3603 callVM<Fn, js::GetOrCreateModuleMetaObject>(lir);
3606 void CodeGenerator::visitDynamicImport(LDynamicImport* lir) {
3607 pushArg(ToValue(lir, LDynamicImport::OptionsIndex));
3608 pushArg(ToValue(lir, LDynamicImport::SpecifierIndex));
3609 pushArg(ImmGCPtr(current->mir()->info().script()));
3611 using Fn = JSObject* (*)(JSContext*, HandleScript, HandleValue, HandleValue);
3612 callVM<Fn, js::StartDynamicModuleImport>(lir);
3615 void CodeGenerator::visitLambda(LLambda* lir) {
3616 Register envChain = ToRegister(lir->environmentChain());
3617 Register output = ToRegister(lir->output());
3618 Register tempReg = ToRegister(lir->temp0());
3620 JSFunction* fun = lir->mir()->templateFunction();
3622 using Fn = JSObject* (*)(JSContext*, HandleFunction, HandleObject);
3623 OutOfLineCode* ool = oolCallVM<Fn, js::Lambda>(
3624 lir, ArgList(ImmGCPtr(fun), envChain), StoreRegisterTo(output));
3626 TemplateObject templateObject(fun);
3627 masm.createGCObject(output, tempReg, templateObject, gc::Heap::Default,
3628 ool->entry());
3630 masm.storeValue(JSVAL_TYPE_OBJECT, envChain,
3631 Address(output, JSFunction::offsetOfEnvironment()));
3632 // No post barrier needed because output is guaranteed to be allocated in
3633 // the nursery.
3635 masm.bind(ool->rejoin());
3638 void CodeGenerator::visitFunctionWithProto(LFunctionWithProto* lir) {
3639 Register envChain = ToRegister(lir->envChain());
3640 Register prototype = ToRegister(lir->prototype());
3642 pushArg(prototype);
3643 pushArg(envChain);
3644 pushArg(ImmGCPtr(lir->mir()->function()));
3646 using Fn =
3647 JSObject* (*)(JSContext*, HandleFunction, HandleObject, HandleObject);
3648 callVM<Fn, js::FunWithProtoOperation>(lir);
3651 void CodeGenerator::visitSetFunName(LSetFunName* lir) {
3652 pushArg(Imm32(lir->mir()->prefixKind()));
3653 pushArg(ToValue(lir, LSetFunName::NameIndex));
3654 pushArg(ToRegister(lir->fun()));
3656 using Fn =
3657 bool (*)(JSContext*, HandleFunction, HandleValue, FunctionPrefixKind);
3658 callVM<Fn, js::SetFunctionName>(lir);
3661 void CodeGenerator::visitOsiPoint(LOsiPoint* lir) {
3662 // Note: markOsiPoint ensures enough space exists between the last
3663 // LOsiPoint and this one to patch adjacent call instructions.
3665 MOZ_ASSERT(masm.framePushed() == frameSize());
3667 uint32_t osiCallPointOffset = markOsiPoint(lir);
3669 LSafepoint* safepoint = lir->associatedSafepoint();
3670 MOZ_ASSERT(!safepoint->osiCallPointOffset());
3671 safepoint->setOsiCallPointOffset(osiCallPointOffset);
3673 #ifdef DEBUG
3674 // There should be no movegroups or other instructions between
3675 // an instruction and its OsiPoint. This is necessary because
3676 // we use the OsiPoint's snapshot from within VM calls.
3677 for (LInstructionReverseIterator iter(current->rbegin(lir));
3678 iter != current->rend(); iter++) {
3679 if (*iter == lir) {
3680 continue;
3682 MOZ_ASSERT(!iter->isMoveGroup());
3683 MOZ_ASSERT(iter->safepoint() == safepoint);
3684 break;
3686 #endif
3688 #ifdef CHECK_OSIPOINT_REGISTERS
3689 if (shouldVerifyOsiPointRegs(safepoint)) {
3690 verifyOsiPointRegs(safepoint);
3692 #endif
3695 void CodeGenerator::visitPhi(LPhi* lir) {
3696 MOZ_CRASH("Unexpected LPhi in CodeGenerator");
3699 void CodeGenerator::visitGoto(LGoto* lir) { jumpToBlock(lir->target()); }
3701 void CodeGenerator::visitTableSwitch(LTableSwitch* ins) {
3702 MTableSwitch* mir = ins->mir();
3703 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3704 const LAllocation* temp;
3706 if (mir->getOperand(0)->type() != MIRType::Int32) {
3707 temp = ins->tempInt()->output();
3709 // The input is a double, so try and convert it to an integer.
3710 // If it does not fit in an integer, take the default case.
3711 masm.convertDoubleToInt32(ToFloatRegister(ins->index()), ToRegister(temp),
3712 defaultcase, false);
3713 } else {
3714 temp = ins->index();
3717 emitTableSwitchDispatch(mir, ToRegister(temp),
3718 ToRegisterOrInvalid(ins->tempPointer()));
3721 void CodeGenerator::visitTableSwitchV(LTableSwitchV* ins) {
3722 MTableSwitch* mir = ins->mir();
3723 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3725 Register index = ToRegister(ins->tempInt());
3726 ValueOperand value = ToValue(ins, LTableSwitchV::InputValue);
3727 Register tag = masm.extractTag(value, index);
3728 masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase);
3730 Label unboxInt, isInt;
3731 masm.branchTestInt32(Assembler::Equal, tag, &unboxInt);
3733 FloatRegister floatIndex = ToFloatRegister(ins->tempFloat());
3734 masm.unboxDouble(value, floatIndex);
3735 masm.convertDoubleToInt32(floatIndex, index, defaultcase, false);
3736 masm.jump(&isInt);
3739 masm.bind(&unboxInt);
3740 masm.unboxInt32(value, index);
3742 masm.bind(&isInt);
3744 emitTableSwitchDispatch(mir, index, ToRegisterOrInvalid(ins->tempPointer()));
3747 void CodeGenerator::visitParameter(LParameter* lir) {}
3749 void CodeGenerator::visitCallee(LCallee* lir) {
3750 Register callee = ToRegister(lir->output());
3751 Address ptr(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3753 masm.loadFunctionFromCalleeToken(ptr, callee);
3756 void CodeGenerator::visitIsConstructing(LIsConstructing* lir) {
3757 Register output = ToRegister(lir->output());
3758 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3759 masm.loadPtr(calleeToken, output);
3761 // We must be inside a function.
3762 MOZ_ASSERT(current->mir()->info().script()->function());
3764 // The low bit indicates whether this call is constructing, just clear the
3765 // other bits.
3766 static_assert(CalleeToken_Function == 0x0,
3767 "CalleeTokenTag value should match");
3768 static_assert(CalleeToken_FunctionConstructing == 0x1,
3769 "CalleeTokenTag value should match");
3770 masm.andPtr(Imm32(0x1), output);
3773 void CodeGenerator::visitReturn(LReturn* lir) {
3774 #if defined(JS_NUNBOX32)
3775 DebugOnly<LAllocation*> type = lir->getOperand(TYPE_INDEX);
3776 DebugOnly<LAllocation*> payload = lir->getOperand(PAYLOAD_INDEX);
3777 MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type);
3778 MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data);
3779 #elif defined(JS_PUNBOX64)
3780 DebugOnly<LAllocation*> result = lir->getOperand(0);
3781 MOZ_ASSERT(ToRegister(result) == JSReturnReg);
3782 #endif
3783 // Don't emit a jump to the return label if this is the last block, as
3784 // it'll fall through to the epilogue.
3786 // This is -not- true however for a Generator-return, which may appear in the
3787 // middle of the last block, so we should always emit the jump there.
3788 if (current->mir() != *gen->graph().poBegin() || lir->isGenerator()) {
3789 masm.jump(&returnLabel_);
3793 void CodeGenerator::visitOsrEntry(LOsrEntry* lir) {
3794 Register temp = ToRegister(lir->temp());
3796 // Remember the OSR entry offset into the code buffer.
3797 masm.flushBuffer();
3798 setOsrEntryOffset(masm.size());
3800 // Allocate the full frame for this function
3801 // Note we have a new entry here. So we reset MacroAssembler::framePushed()
3802 // to 0, before reserving the stack.
3803 MOZ_ASSERT(masm.framePushed() == frameSize());
3804 masm.setFramePushed(0);
3806 // The Baseline code ensured both the frame pointer and stack pointer point to
3807 // the JitFrameLayout on the stack.
3809 // If profiling, save the current frame pointer to a per-thread global field.
3810 if (isProfilerInstrumentationEnabled()) {
3811 masm.profilerEnterFrame(FramePointer, temp);
3814 masm.reserveStack(frameSize());
3815 MOZ_ASSERT(masm.framePushed() == frameSize());
3817 // Ensure that the Ion frames is properly aligned.
3818 masm.assertStackAlignment(JitStackAlignment, 0);
3821 void CodeGenerator::visitOsrEnvironmentChain(LOsrEnvironmentChain* lir) {
3822 const LAllocation* frame = lir->getOperand(0);
3823 const LDefinition* object = lir->getDef(0);
3825 const ptrdiff_t frameOffset =
3826 BaselineFrame::reverseOffsetOfEnvironmentChain();
3828 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3831 void CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir) {
3832 const LAllocation* frame = lir->getOperand(0);
3833 const LDefinition* object = lir->getDef(0);
3835 const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj();
3837 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3840 void CodeGenerator::visitOsrValue(LOsrValue* value) {
3841 const LAllocation* frame = value->getOperand(0);
3842 const ValueOperand out = ToOutValue(value);
3844 const ptrdiff_t frameOffset = value->mir()->frameOffset();
3846 masm.loadValue(Address(ToRegister(frame), frameOffset), out);
3849 void CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir) {
3850 const LAllocation* frame = lir->getOperand(0);
3851 const ValueOperand out = ToOutValue(lir);
3853 Address flags =
3854 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags());
3855 Address retval =
3856 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue());
3858 masm.moveValue(UndefinedValue(), out);
3860 Label done;
3861 masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL),
3862 &done);
3863 masm.loadValue(retval, out);
3864 masm.bind(&done);
3867 void CodeGenerator::visitStackArgT(LStackArgT* lir) {
3868 const LAllocation* arg = lir->arg();
3869 MIRType argType = lir->type();
3870 uint32_t argslot = lir->argslot();
3871 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3873 Address dest = AddressOfPassedArg(argslot);
3875 if (arg->isFloatReg()) {
3876 masm.boxDouble(ToFloatRegister(arg), dest);
3877 } else if (arg->isRegister()) {
3878 masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest);
3879 } else {
3880 masm.storeValue(arg->toConstant()->toJSValue(), dest);
3884 void CodeGenerator::visitStackArgV(LStackArgV* lir) {
3885 ValueOperand val = ToValue(lir, 0);
3886 uint32_t argslot = lir->argslot();
3887 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3889 masm.storeValue(val, AddressOfPassedArg(argslot));
3892 void CodeGenerator::visitMoveGroup(LMoveGroup* group) {
3893 if (!group->numMoves()) {
3894 return;
3897 MoveResolver& resolver = masm.moveResolver();
3899 for (size_t i = 0; i < group->numMoves(); i++) {
3900 const LMove& move = group->getMove(i);
3902 LAllocation from = move.from();
3903 LAllocation to = move.to();
3904 LDefinition::Type type = move.type();
3906 // No bogus moves.
3907 MOZ_ASSERT(from != to);
3908 MOZ_ASSERT(!from.isConstant());
3909 MoveOp::Type moveType;
3910 switch (type) {
3911 case LDefinition::OBJECT:
3912 case LDefinition::SLOTS:
3913 case LDefinition::WASM_ANYREF:
3914 #ifdef JS_NUNBOX32
3915 case LDefinition::TYPE:
3916 case LDefinition::PAYLOAD:
3917 #else
3918 case LDefinition::BOX:
3919 #endif
3920 case LDefinition::GENERAL:
3921 case LDefinition::STACKRESULTS:
3922 moveType = MoveOp::GENERAL;
3923 break;
3924 case LDefinition::INT32:
3925 moveType = MoveOp::INT32;
3926 break;
3927 case LDefinition::FLOAT32:
3928 moveType = MoveOp::FLOAT32;
3929 break;
3930 case LDefinition::DOUBLE:
3931 moveType = MoveOp::DOUBLE;
3932 break;
3933 case LDefinition::SIMD128:
3934 moveType = MoveOp::SIMD128;
3935 break;
3936 default:
3937 MOZ_CRASH("Unexpected move type");
3940 masm.propagateOOM(
3941 resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType));
3944 masm.propagateOOM(resolver.resolve());
3945 if (masm.oom()) {
3946 return;
3949 MoveEmitter emitter(masm);
3951 #ifdef JS_CODEGEN_X86
3952 if (group->maybeScratchRegister().isGeneralReg()) {
3953 emitter.setScratchRegister(
3954 group->maybeScratchRegister().toGeneralReg()->reg());
3955 } else {
3956 resolver.sortMemoryToMemoryMoves();
3958 #endif
3960 emitter.emit(resolver);
3961 emitter.finish();
3964 void CodeGenerator::visitInteger(LInteger* lir) {
3965 masm.move32(Imm32(lir->i32()), ToRegister(lir->output()));
3968 void CodeGenerator::visitInteger64(LInteger64* lir) {
3969 masm.move64(Imm64(lir->i64()), ToOutRegister64(lir));
3972 void CodeGenerator::visitPointer(LPointer* lir) {
3973 masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output()));
3976 void CodeGenerator::visitNurseryObject(LNurseryObject* lir) {
3977 Register output = ToRegister(lir->output());
3978 uint32_t nurseryIndex = lir->mir()->nurseryIndex();
3980 // Load a pointer to the entry in IonScript's nursery objects list.
3981 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), output);
3982 masm.propagateOOM(ionNurseryObjectLabels_.emplaceBack(label, nurseryIndex));
3984 // Load the JSObject*.
3985 masm.loadPtr(Address(output, 0), output);
3988 void CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir) {
3989 // No-op.
3992 void CodeGenerator::visitDebugEnterGCUnsafeRegion(
3993 LDebugEnterGCUnsafeRegion* lir) {
3994 Register temp = ToRegister(lir->temp0());
3996 masm.loadJSContext(temp);
3998 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
3999 masm.add32(Imm32(1), inUnsafeRegion);
4001 Label ok;
4002 masm.branch32(Assembler::GreaterThan, inUnsafeRegion, Imm32(0), &ok);
4003 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
4004 masm.bind(&ok);
4007 void CodeGenerator::visitDebugLeaveGCUnsafeRegion(
4008 LDebugLeaveGCUnsafeRegion* lir) {
4009 Register temp = ToRegister(lir->temp0());
4011 masm.loadJSContext(temp);
4013 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
4014 masm.add32(Imm32(-1), inUnsafeRegion);
4016 Label ok;
4017 masm.branch32(Assembler::GreaterThanOrEqual, inUnsafeRegion, Imm32(0), &ok);
4018 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
4019 masm.bind(&ok);
4022 void CodeGenerator::visitSlots(LSlots* lir) {
4023 Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots());
4024 masm.loadPtr(slots, ToRegister(lir->output()));
4027 void CodeGenerator::visitLoadDynamicSlotV(LLoadDynamicSlotV* lir) {
4028 ValueOperand dest = ToOutValue(lir);
4029 Register base = ToRegister(lir->input());
4030 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
4032 masm.loadValue(Address(base, offset), dest);
4035 static ConstantOrRegister ToConstantOrRegister(const LAllocation* value,
4036 MIRType valueType) {
4037 if (value->isConstant()) {
4038 return ConstantOrRegister(value->toConstant()->toJSValue());
4040 return TypedOrValueRegister(valueType, ToAnyRegister(value));
4043 void CodeGenerator::visitStoreDynamicSlotT(LStoreDynamicSlotT* lir) {
4044 Register base = ToRegister(lir->slots());
4045 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
4046 Address dest(base, offset);
4048 if (lir->mir()->needsBarrier()) {
4049 emitPreBarrier(dest);
4052 MIRType valueType = lir->mir()->value()->type();
4053 ConstantOrRegister value = ToConstantOrRegister(lir->value(), valueType);
4054 masm.storeUnboxedValue(value, valueType, dest);
4057 void CodeGenerator::visitStoreDynamicSlotV(LStoreDynamicSlotV* lir) {
4058 Register base = ToRegister(lir->slots());
4059 int32_t offset = lir->mir()->slot() * sizeof(Value);
4061 const ValueOperand value = ToValue(lir, LStoreDynamicSlotV::ValueIndex);
4063 if (lir->mir()->needsBarrier()) {
4064 emitPreBarrier(Address(base, offset));
4067 masm.storeValue(value, Address(base, offset));
4070 void CodeGenerator::visitElements(LElements* lir) {
4071 Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements());
4072 masm.loadPtr(elements, ToRegister(lir->output()));
4075 void CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment* lir) {
4076 Address environment(ToRegister(lir->function()),
4077 JSFunction::offsetOfEnvironment());
4078 masm.unboxObject(environment, ToRegister(lir->output()));
4081 void CodeGenerator::visitHomeObject(LHomeObject* lir) {
4082 Register func = ToRegister(lir->function());
4083 Address homeObject(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
4085 masm.assertFunctionIsExtended(func);
4086 #ifdef DEBUG
4087 Label isObject;
4088 masm.branchTestObject(Assembler::Equal, homeObject, &isObject);
4089 masm.assumeUnreachable("[[HomeObject]] must be Object");
4090 masm.bind(&isObject);
4091 #endif
4093 masm.unboxObject(homeObject, ToRegister(lir->output()));
4096 void CodeGenerator::visitHomeObjectSuperBase(LHomeObjectSuperBase* lir) {
4097 Register homeObject = ToRegister(lir->homeObject());
4098 ValueOperand output = ToOutValue(lir);
4099 Register temp = output.scratchReg();
4101 masm.loadObjProto(homeObject, temp);
4103 #ifdef DEBUG
4104 // We won't encounter a lazy proto, because the prototype is guaranteed to
4105 // either be a JSFunction or a PlainObject, and only proxy objects can have a
4106 // lazy proto.
4107 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
4109 Label proxyCheckDone;
4110 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
4111 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperBase");
4112 masm.bind(&proxyCheckDone);
4113 #endif
4115 Label nullProto, done;
4116 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
4118 // Box prototype and return
4119 masm.tagValue(JSVAL_TYPE_OBJECT, temp, output);
4120 masm.jump(&done);
4122 masm.bind(&nullProto);
4123 masm.moveValue(NullValue(), output);
4125 masm.bind(&done);
4128 template <class T>
4129 static T* ToConstantObject(MDefinition* def) {
4130 MOZ_ASSERT(def->isConstant());
4131 return &def->toConstant()->toObject().as<T>();
4134 void CodeGenerator::visitNewLexicalEnvironmentObject(
4135 LNewLexicalEnvironmentObject* lir) {
4136 Register output = ToRegister(lir->output());
4137 Register temp = ToRegister(lir->temp0());
4139 auto* templateObj = ToConstantObject<BlockLexicalEnvironmentObject>(
4140 lir->mir()->templateObj());
4141 auto* scope = &templateObj->scope();
4142 gc::Heap initialHeap = gc::Heap::Default;
4144 using Fn =
4145 BlockLexicalEnvironmentObject* (*)(JSContext*, Handle<LexicalScope*>);
4146 auto* ool =
4147 oolCallVM<Fn, BlockLexicalEnvironmentObject::createWithoutEnclosing>(
4148 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4150 TemplateObject templateObject(templateObj);
4151 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4153 masm.bind(ool->rejoin());
4156 void CodeGenerator::visitNewClassBodyEnvironmentObject(
4157 LNewClassBodyEnvironmentObject* lir) {
4158 Register output = ToRegister(lir->output());
4159 Register temp = ToRegister(lir->temp0());
4161 auto* templateObj = ToConstantObject<ClassBodyLexicalEnvironmentObject>(
4162 lir->mir()->templateObj());
4163 auto* scope = &templateObj->scope();
4164 gc::Heap initialHeap = gc::Heap::Default;
4166 using Fn = ClassBodyLexicalEnvironmentObject* (*)(JSContext*,
4167 Handle<ClassBodyScope*>);
4168 auto* ool =
4169 oolCallVM<Fn, ClassBodyLexicalEnvironmentObject::createWithoutEnclosing>(
4170 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4172 TemplateObject templateObject(templateObj);
4173 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4175 masm.bind(ool->rejoin());
4178 void CodeGenerator::visitNewVarEnvironmentObject(
4179 LNewVarEnvironmentObject* lir) {
4180 Register output = ToRegister(lir->output());
4181 Register temp = ToRegister(lir->temp0());
4183 auto* templateObj =
4184 ToConstantObject<VarEnvironmentObject>(lir->mir()->templateObj());
4185 auto* scope = &templateObj->scope().as<VarScope>();
4186 gc::Heap initialHeap = gc::Heap::Default;
4188 using Fn = VarEnvironmentObject* (*)(JSContext*, Handle<VarScope*>);
4189 auto* ool = oolCallVM<Fn, VarEnvironmentObject::createWithoutEnclosing>(
4190 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4192 TemplateObject templateObject(templateObj);
4193 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4195 masm.bind(ool->rejoin());
4198 void CodeGenerator::visitGuardShape(LGuardShape* guard) {
4199 Register obj = ToRegister(guard->input());
4200 Register temp = ToTempRegisterOrInvalid(guard->temp0());
4201 Label bail;
4202 masm.branchTestObjShape(Assembler::NotEqual, obj, guard->mir()->shape(), temp,
4203 obj, &bail);
4204 bailoutFrom(&bail, guard->snapshot());
4207 void CodeGenerator::visitGuardFuse(LGuardFuse* guard) {
4208 auto fuseIndex = guard->mir()->fuseIndex();
4209 switch (fuseIndex) {
4210 case RealmFuses::FuseIndex::OptimizeGetIteratorFuse:
4211 addOptimizeGetIteratorFuseDependency();
4212 return;
4213 default:
4214 // validateAndRegisterFuseDependencies doesn't have
4215 // handling for this yet, actively check fuse instead.
4216 break;
4219 Register temp = ToRegister(guard->temp0());
4220 Label bail;
4222 // Bake specific fuse address for Ion code, because we won't share this code
4223 // across realms.
4224 GuardFuse* fuse = mirGen().realm->realmFuses().getFuseByIndex(fuseIndex);
4225 masm.loadPtr(AbsoluteAddress(fuse->fuseRef()), temp);
4226 masm.branchPtr(Assembler::NotEqual, temp, ImmPtr(nullptr), &bail);
4228 bailoutFrom(&bail, guard->snapshot());
4231 void CodeGenerator::visitGuardMultipleShapes(LGuardMultipleShapes* guard) {
4232 Register obj = ToRegister(guard->object());
4233 Register shapeList = ToRegister(guard->shapeList());
4234 Register temp = ToRegister(guard->temp0());
4235 Register temp2 = ToRegister(guard->temp1());
4236 Register temp3 = ToRegister(guard->temp2());
4237 Register spectre = ToTempRegisterOrInvalid(guard->temp3());
4239 Label bail;
4240 masm.loadPtr(Address(shapeList, NativeObject::offsetOfElements()), temp);
4241 masm.branchTestObjShapeList(Assembler::NotEqual, obj, temp, temp2, temp3,
4242 spectre, &bail);
4243 bailoutFrom(&bail, guard->snapshot());
4246 void CodeGenerator::visitGuardProto(LGuardProto* guard) {
4247 Register obj = ToRegister(guard->object());
4248 Register expected = ToRegister(guard->expected());
4249 Register temp = ToRegister(guard->temp0());
4251 masm.loadObjProto(obj, temp);
4253 Label bail;
4254 masm.branchPtr(Assembler::NotEqual, temp, expected, &bail);
4255 bailoutFrom(&bail, guard->snapshot());
4258 void CodeGenerator::visitGuardNullProto(LGuardNullProto* guard) {
4259 Register obj = ToRegister(guard->input());
4260 Register temp = ToRegister(guard->temp0());
4262 masm.loadObjProto(obj, temp);
4264 Label bail;
4265 masm.branchTestPtr(Assembler::NonZero, temp, temp, &bail);
4266 bailoutFrom(&bail, guard->snapshot());
4269 void CodeGenerator::visitGuardIsNativeObject(LGuardIsNativeObject* guard) {
4270 Register obj = ToRegister(guard->input());
4271 Register temp = ToRegister(guard->temp0());
4273 Label bail;
4274 masm.branchIfNonNativeObj(obj, temp, &bail);
4275 bailoutFrom(&bail, guard->snapshot());
4278 void CodeGenerator::visitGuardGlobalGeneration(LGuardGlobalGeneration* guard) {
4279 Register temp = ToRegister(guard->temp0());
4280 Label bail;
4282 masm.load32(AbsoluteAddress(guard->mir()->generationAddr()), temp);
4283 masm.branch32(Assembler::NotEqual, temp, Imm32(guard->mir()->expected()),
4284 &bail);
4285 bailoutFrom(&bail, guard->snapshot());
4288 void CodeGenerator::visitGuardIsProxy(LGuardIsProxy* guard) {
4289 Register obj = ToRegister(guard->input());
4290 Register temp = ToRegister(guard->temp0());
4292 Label bail;
4293 masm.branchTestObjectIsProxy(false, obj, temp, &bail);
4294 bailoutFrom(&bail, guard->snapshot());
4297 void CodeGenerator::visitGuardIsNotProxy(LGuardIsNotProxy* guard) {
4298 Register obj = ToRegister(guard->input());
4299 Register temp = ToRegister(guard->temp0());
4301 Label bail;
4302 masm.branchTestObjectIsProxy(true, obj, temp, &bail);
4303 bailoutFrom(&bail, guard->snapshot());
4306 void CodeGenerator::visitGuardIsNotDOMProxy(LGuardIsNotDOMProxy* guard) {
4307 Register proxy = ToRegister(guard->proxy());
4308 Register temp = ToRegister(guard->temp0());
4310 Label bail;
4311 masm.branchTestProxyHandlerFamily(Assembler::Equal, proxy, temp,
4312 GetDOMProxyHandlerFamily(), &bail);
4313 bailoutFrom(&bail, guard->snapshot());
4316 void CodeGenerator::visitProxyGet(LProxyGet* lir) {
4317 Register proxy = ToRegister(lir->proxy());
4318 Register temp = ToRegister(lir->temp0());
4320 pushArg(lir->mir()->id(), temp);
4321 pushArg(proxy);
4323 using Fn = bool (*)(JSContext*, HandleObject, HandleId, MutableHandleValue);
4324 callVM<Fn, ProxyGetProperty>(lir);
4327 void CodeGenerator::visitProxyGetByValue(LProxyGetByValue* lir) {
4328 Register proxy = ToRegister(lir->proxy());
4329 ValueOperand idVal = ToValue(lir, LProxyGetByValue::IdIndex);
4331 pushArg(idVal);
4332 pushArg(proxy);
4334 using Fn =
4335 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
4336 callVM<Fn, ProxyGetPropertyByValue>(lir);
4339 void CodeGenerator::visitProxyHasProp(LProxyHasProp* lir) {
4340 Register proxy = ToRegister(lir->proxy());
4341 ValueOperand idVal = ToValue(lir, LProxyHasProp::IdIndex);
4343 pushArg(idVal);
4344 pushArg(proxy);
4346 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
4347 if (lir->mir()->hasOwn()) {
4348 callVM<Fn, ProxyHasOwn>(lir);
4349 } else {
4350 callVM<Fn, ProxyHas>(lir);
4354 void CodeGenerator::visitProxySet(LProxySet* lir) {
4355 Register proxy = ToRegister(lir->proxy());
4356 ValueOperand rhs = ToValue(lir, LProxySet::RhsIndex);
4357 Register temp = ToRegister(lir->temp0());
4359 pushArg(Imm32(lir->mir()->strict()));
4360 pushArg(rhs);
4361 pushArg(lir->mir()->id(), temp);
4362 pushArg(proxy);
4364 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4365 callVM<Fn, ProxySetProperty>(lir);
4368 void CodeGenerator::visitProxySetByValue(LProxySetByValue* lir) {
4369 Register proxy = ToRegister(lir->proxy());
4370 ValueOperand idVal = ToValue(lir, LProxySetByValue::IdIndex);
4371 ValueOperand rhs = ToValue(lir, LProxySetByValue::RhsIndex);
4373 pushArg(Imm32(lir->mir()->strict()));
4374 pushArg(rhs);
4375 pushArg(idVal);
4376 pushArg(proxy);
4378 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
4379 callVM<Fn, ProxySetPropertyByValue>(lir);
4382 void CodeGenerator::visitCallSetArrayLength(LCallSetArrayLength* lir) {
4383 Register obj = ToRegister(lir->obj());
4384 ValueOperand rhs = ToValue(lir, LCallSetArrayLength::RhsIndex);
4386 pushArg(Imm32(lir->mir()->strict()));
4387 pushArg(rhs);
4388 pushArg(obj);
4390 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
4391 callVM<Fn, jit::SetArrayLength>(lir);
4394 void CodeGenerator::visitMegamorphicLoadSlot(LMegamorphicLoadSlot* lir) {
4395 Register obj = ToRegister(lir->object());
4396 Register temp0 = ToRegister(lir->temp0());
4397 Register temp1 = ToRegister(lir->temp1());
4398 Register temp2 = ToRegister(lir->temp2());
4399 Register temp3 = ToRegister(lir->temp3());
4400 ValueOperand output = ToOutValue(lir);
4402 Label bail, cacheHit;
4403 masm.emitMegamorphicCacheLookup(lir->mir()->name(), obj, temp0, temp1, temp2,
4404 output, &cacheHit);
4406 masm.branchIfNonNativeObj(obj, temp0, &bail);
4408 masm.Push(UndefinedValue());
4409 masm.moveStackPtrTo(temp3);
4411 using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
4412 MegamorphicCache::Entry* cacheEntry, Value* vp);
4413 masm.setupAlignedABICall();
4414 masm.loadJSContext(temp0);
4415 masm.passABIArg(temp0);
4416 masm.passABIArg(obj);
4417 masm.movePropertyKey(lir->mir()->name(), temp1);
4418 masm.passABIArg(temp1);
4419 masm.passABIArg(temp2);
4420 masm.passABIArg(temp3);
4422 masm.callWithABI<Fn, GetNativeDataPropertyPure>();
4424 MOZ_ASSERT(!output.aliases(ReturnReg));
4425 masm.Pop(output);
4427 masm.branchIfFalseBool(ReturnReg, &bail);
4429 masm.bind(&cacheHit);
4430 bailoutFrom(&bail, lir->snapshot());
4433 void CodeGenerator::visitMegamorphicLoadSlotByValue(
4434 LMegamorphicLoadSlotByValue* lir) {
4435 Register obj = ToRegister(lir->object());
4436 ValueOperand idVal = ToValue(lir, LMegamorphicLoadSlotByValue::IdIndex);
4437 Register temp0 = ToRegister(lir->temp0());
4438 Register temp1 = ToRegister(lir->temp1());
4439 Register temp2 = ToRegister(lir->temp2());
4440 ValueOperand output = ToOutValue(lir);
4442 Label bail, cacheHit;
4443 masm.emitMegamorphicCacheLookupByValue(idVal, obj, temp0, temp1, temp2,
4444 output, &cacheHit);
4446 masm.branchIfNonNativeObj(obj, temp0, &bail);
4448 // idVal will be in vp[0], result will be stored in vp[1].
4449 masm.reserveStack(sizeof(Value));
4450 masm.Push(idVal);
4451 masm.moveStackPtrTo(temp0);
4453 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4454 MegamorphicCache::Entry* cacheEntry, Value* vp);
4455 masm.setupAlignedABICall();
4456 masm.loadJSContext(temp1);
4457 masm.passABIArg(temp1);
4458 masm.passABIArg(obj);
4459 masm.passABIArg(temp2);
4460 masm.passABIArg(temp0);
4461 masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
4463 MOZ_ASSERT(!idVal.aliases(temp0));
4464 masm.storeCallPointerResult(temp0);
4465 masm.Pop(idVal);
4467 uint32_t framePushed = masm.framePushed();
4468 Label ok;
4469 masm.branchIfTrueBool(temp0, &ok);
4470 masm.freeStack(sizeof(Value)); // Discard result Value.
4471 masm.jump(&bail);
4473 masm.bind(&ok);
4474 masm.setFramePushed(framePushed);
4475 masm.Pop(output);
4477 masm.bind(&cacheHit);
4478 bailoutFrom(&bail, lir->snapshot());
4481 void CodeGenerator::visitMegamorphicStoreSlot(LMegamorphicStoreSlot* lir) {
4482 Register obj = ToRegister(lir->object());
4483 ValueOperand value = ToValue(lir, LMegamorphicStoreSlot::RhsIndex);
4485 Register temp0 = ToRegister(lir->temp0());
4486 #ifndef JS_CODEGEN_X86
4487 Register temp1 = ToRegister(lir->temp1());
4488 Register temp2 = ToRegister(lir->temp2());
4489 #endif
4491 Label cacheHit, done;
4492 #ifdef JS_CODEGEN_X86
4493 masm.emitMegamorphicCachedSetSlot(
4494 lir->mir()->name(), obj, temp0, value, &cacheHit,
4495 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4496 EmitPreBarrier(masm, addr, mirType);
4498 #else
4499 masm.emitMegamorphicCachedSetSlot(
4500 lir->mir()->name(), obj, temp0, temp1, temp2, value, &cacheHit,
4501 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4502 EmitPreBarrier(masm, addr, mirType);
4504 #endif
4506 pushArg(Imm32(lir->mir()->strict()));
4507 pushArg(value);
4508 pushArg(lir->mir()->name(), temp0);
4509 pushArg(obj);
4511 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4512 callVM<Fn, SetPropertyMegamorphic<true>>(lir);
4514 masm.jump(&done);
4515 masm.bind(&cacheHit);
4517 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
4518 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
4520 saveVolatile(temp0);
4521 emitPostWriteBarrier(obj);
4522 restoreVolatile(temp0);
4524 masm.bind(&done);
4527 void CodeGenerator::visitMegamorphicHasProp(LMegamorphicHasProp* lir) {
4528 Register obj = ToRegister(lir->object());
4529 ValueOperand idVal = ToValue(lir, LMegamorphicHasProp::IdIndex);
4530 Register temp0 = ToRegister(lir->temp0());
4531 Register temp1 = ToRegister(lir->temp1());
4532 Register temp2 = ToRegister(lir->temp2());
4533 Register output = ToRegister(lir->output());
4535 Label bail, cacheHit;
4536 masm.emitMegamorphicCacheLookupExists(idVal, obj, temp0, temp1, temp2, output,
4537 &cacheHit, lir->mir()->hasOwn());
4539 masm.branchIfNonNativeObj(obj, temp0, &bail);
4541 // idVal will be in vp[0], result will be stored in vp[1].
4542 masm.reserveStack(sizeof(Value));
4543 masm.Push(idVal);
4544 masm.moveStackPtrTo(temp0);
4546 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4547 MegamorphicCache::Entry* cacheEntry, Value* vp);
4548 masm.setupAlignedABICall();
4549 masm.loadJSContext(temp1);
4550 masm.passABIArg(temp1);
4551 masm.passABIArg(obj);
4552 masm.passABIArg(temp2);
4553 masm.passABIArg(temp0);
4554 if (lir->mir()->hasOwn()) {
4555 masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
4556 } else {
4557 masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
4560 MOZ_ASSERT(!idVal.aliases(temp0));
4561 masm.storeCallPointerResult(temp0);
4562 masm.Pop(idVal);
4564 uint32_t framePushed = masm.framePushed();
4565 Label ok;
4566 masm.branchIfTrueBool(temp0, &ok);
4567 masm.freeStack(sizeof(Value)); // Discard result Value.
4568 masm.jump(&bail);
4570 masm.bind(&ok);
4571 masm.setFramePushed(framePushed);
4572 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
4573 masm.freeStack(sizeof(Value));
4574 masm.bind(&cacheHit);
4576 bailoutFrom(&bail, lir->snapshot());
4579 void CodeGenerator::visitSmallObjectVariableKeyHasProp(
4580 LSmallObjectVariableKeyHasProp* lir) {
4581 Register id = ToRegister(lir->id());
4582 Register output = ToRegister(lir->output());
4584 #ifdef DEBUG
4585 Label isAtom;
4586 masm.branchTest32(Assembler::NonZero, Address(id, JSString::offsetOfFlags()),
4587 Imm32(JSString::ATOM_BIT), &isAtom);
4588 masm.assumeUnreachable("Expected atom input");
4589 masm.bind(&isAtom);
4590 #endif
4592 SharedShape* shape = &lir->mir()->shape()->asShared();
4594 Label done, success;
4595 for (SharedShapePropertyIter<NoGC> iter(shape); !iter.done(); iter++) {
4596 masm.branchPtr(Assembler::Equal, id, ImmGCPtr(iter->key().toAtom()),
4597 &success);
4599 masm.move32(Imm32(0), output);
4600 masm.jump(&done);
4601 masm.bind(&success);
4602 masm.move32(Imm32(1), output);
4603 masm.bind(&done);
4606 void CodeGenerator::visitGuardIsNotArrayBufferMaybeShared(
4607 LGuardIsNotArrayBufferMaybeShared* guard) {
4608 Register obj = ToRegister(guard->input());
4609 Register temp = ToRegister(guard->temp0());
4611 Label bail;
4612 masm.loadObjClassUnsafe(obj, temp);
4613 masm.branchPtr(Assembler::Equal, temp,
4614 ImmPtr(&FixedLengthArrayBufferObject::class_), &bail);
4615 masm.branchPtr(Assembler::Equal, temp,
4616 ImmPtr(&FixedLengthSharedArrayBufferObject::class_), &bail);
4617 masm.branchPtr(Assembler::Equal, temp,
4618 ImmPtr(&ResizableArrayBufferObject::class_), &bail);
4619 masm.branchPtr(Assembler::Equal, temp,
4620 ImmPtr(&GrowableSharedArrayBufferObject::class_), &bail);
4621 bailoutFrom(&bail, guard->snapshot());
4624 void CodeGenerator::visitGuardIsTypedArray(LGuardIsTypedArray* guard) {
4625 Register obj = ToRegister(guard->input());
4626 Register temp = ToRegister(guard->temp0());
4628 Label bail;
4629 masm.loadObjClassUnsafe(obj, temp);
4630 masm.branchIfClassIsNotTypedArray(temp, &bail);
4631 bailoutFrom(&bail, guard->snapshot());
4634 void CodeGenerator::visitGuardIsFixedLengthTypedArray(
4635 LGuardIsFixedLengthTypedArray* guard) {
4636 Register obj = ToRegister(guard->input());
4637 Register temp = ToRegister(guard->temp0());
4639 Label bail;
4640 masm.loadObjClassUnsafe(obj, temp);
4641 masm.branchIfClassIsNotFixedLengthTypedArray(temp, &bail);
4642 bailoutFrom(&bail, guard->snapshot());
4645 void CodeGenerator::visitGuardIsResizableTypedArray(
4646 LGuardIsResizableTypedArray* guard) {
4647 Register obj = ToRegister(guard->input());
4648 Register temp = ToRegister(guard->temp0());
4650 Label bail;
4651 masm.loadObjClassUnsafe(obj, temp);
4652 masm.branchIfClassIsNotResizableTypedArray(temp, &bail);
4653 bailoutFrom(&bail, guard->snapshot());
4656 void CodeGenerator::visitGuardHasProxyHandler(LGuardHasProxyHandler* guard) {
4657 Register obj = ToRegister(guard->input());
4659 Label bail;
4661 Address handlerAddr(obj, ProxyObject::offsetOfHandler());
4662 masm.branchPtr(Assembler::NotEqual, handlerAddr,
4663 ImmPtr(guard->mir()->handler()), &bail);
4665 bailoutFrom(&bail, guard->snapshot());
4668 void CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard) {
4669 Register input = ToRegister(guard->input());
4670 Register expected = ToRegister(guard->expected());
4672 Assembler::Condition cond =
4673 guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
4674 bailoutCmpPtr(cond, input, expected, guard->snapshot());
4677 void CodeGenerator::visitGuardSpecificFunction(LGuardSpecificFunction* guard) {
4678 Register input = ToRegister(guard->input());
4679 Register expected = ToRegister(guard->expected());
4681 bailoutCmpPtr(Assembler::NotEqual, input, expected, guard->snapshot());
4684 void CodeGenerator::visitGuardSpecificAtom(LGuardSpecificAtom* guard) {
4685 Register str = ToRegister(guard->str());
4686 Register scratch = ToRegister(guard->temp0());
4688 LiveRegisterSet volatileRegs = liveVolatileRegs(guard);
4689 volatileRegs.takeUnchecked(scratch);
4691 Label bail;
4692 masm.guardSpecificAtom(str, guard->mir()->atom(), scratch, volatileRegs,
4693 &bail);
4694 bailoutFrom(&bail, guard->snapshot());
4697 void CodeGenerator::visitGuardSpecificSymbol(LGuardSpecificSymbol* guard) {
4698 Register symbol = ToRegister(guard->symbol());
4700 bailoutCmpPtr(Assembler::NotEqual, symbol, ImmGCPtr(guard->mir()->expected()),
4701 guard->snapshot());
4704 void CodeGenerator::visitGuardSpecificInt32(LGuardSpecificInt32* guard) {
4705 Register num = ToRegister(guard->num());
4707 bailoutCmp32(Assembler::NotEqual, num, Imm32(guard->mir()->expected()),
4708 guard->snapshot());
4711 void CodeGenerator::visitGuardStringToIndex(LGuardStringToIndex* lir) {
4712 Register str = ToRegister(lir->string());
4713 Register output = ToRegister(lir->output());
4715 Label vmCall, done;
4716 masm.loadStringIndexValue(str, output, &vmCall);
4717 masm.jump(&done);
4720 masm.bind(&vmCall);
4722 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4723 volatileRegs.takeUnchecked(output);
4724 masm.PushRegsInMask(volatileRegs);
4726 using Fn = int32_t (*)(JSString* str);
4727 masm.setupAlignedABICall();
4728 masm.passABIArg(str);
4729 masm.callWithABI<Fn, GetIndexFromString>();
4730 masm.storeCallInt32Result(output);
4732 masm.PopRegsInMask(volatileRegs);
4734 // GetIndexFromString returns a negative value on failure.
4735 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
4738 masm.bind(&done);
4741 void CodeGenerator::visitGuardStringToInt32(LGuardStringToInt32* lir) {
4742 Register str = ToRegister(lir->string());
4743 Register output = ToRegister(lir->output());
4744 Register temp = ToRegister(lir->temp0());
4746 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4748 Label bail;
4749 masm.guardStringToInt32(str, output, temp, volatileRegs, &bail);
4750 bailoutFrom(&bail, lir->snapshot());
4753 void CodeGenerator::visitGuardStringToDouble(LGuardStringToDouble* lir) {
4754 Register str = ToRegister(lir->string());
4755 FloatRegister output = ToFloatRegister(lir->output());
4756 Register temp0 = ToRegister(lir->temp0());
4757 Register temp1 = ToRegister(lir->temp1());
4759 Label vmCall, done;
4760 // Use indexed value as fast path if possible.
4761 masm.loadStringIndexValue(str, temp0, &vmCall);
4762 masm.convertInt32ToDouble(temp0, output);
4763 masm.jump(&done);
4765 masm.bind(&vmCall);
4767 // Reserve stack for holding the result value of the call.
4768 masm.reserveStack(sizeof(double));
4769 masm.moveStackPtrTo(temp0);
4771 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4772 volatileRegs.takeUnchecked(temp0);
4773 volatileRegs.takeUnchecked(temp1);
4774 masm.PushRegsInMask(volatileRegs);
4776 using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
4777 masm.setupAlignedABICall();
4778 masm.loadJSContext(temp1);
4779 masm.passABIArg(temp1);
4780 masm.passABIArg(str);
4781 masm.passABIArg(temp0);
4782 masm.callWithABI<Fn, StringToNumberPure>();
4783 masm.storeCallPointerResult(temp0);
4785 masm.PopRegsInMask(volatileRegs);
4787 Label ok;
4788 masm.branchIfTrueBool(temp0, &ok);
4790 // OOM path, recovered by StringToNumberPure.
4792 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
4793 // flow-insensitively, and using it here would confuse the stack height
4794 // tracking.
4795 masm.addToStackPtr(Imm32(sizeof(double)));
4796 bailout(lir->snapshot());
4798 masm.bind(&ok);
4799 masm.Pop(output);
4801 masm.bind(&done);
4804 void CodeGenerator::visitGuardNoDenseElements(LGuardNoDenseElements* guard) {
4805 Register obj = ToRegister(guard->input());
4806 Register temp = ToRegister(guard->temp0());
4808 // Load obj->elements.
4809 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
4811 // Make sure there are no dense elements.
4812 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
4813 bailoutCmp32(Assembler::NotEqual, initLength, Imm32(0), guard->snapshot());
4816 void CodeGenerator::visitBooleanToInt64(LBooleanToInt64* lir) {
4817 Register input = ToRegister(lir->input());
4818 Register64 output = ToOutRegister64(lir);
4820 masm.move32To64ZeroExtend(input, output);
4823 void CodeGenerator::emitStringToInt64(LInstruction* lir, Register input,
4824 Register64 output) {
4825 Register temp = output.scratchReg();
4827 saveLive(lir);
4829 masm.reserveStack(sizeof(uint64_t));
4830 masm.moveStackPtrTo(temp);
4831 pushArg(temp);
4832 pushArg(input);
4834 using Fn = bool (*)(JSContext*, HandleString, uint64_t*);
4835 callVM<Fn, DoStringToInt64>(lir);
4837 masm.load64(Address(masm.getStackPointer(), 0), output);
4838 masm.freeStack(sizeof(uint64_t));
4840 restoreLiveIgnore(lir, StoreValueTo(output).clobbered());
4843 void CodeGenerator::visitStringToInt64(LStringToInt64* lir) {
4844 Register input = ToRegister(lir->input());
4845 Register64 output = ToOutRegister64(lir);
4847 emitStringToInt64(lir, input, output);
4850 void CodeGenerator::visitValueToInt64(LValueToInt64* lir) {
4851 ValueOperand input = ToValue(lir, LValueToInt64::InputIndex);
4852 Register temp = ToRegister(lir->temp0());
4853 Register64 output = ToOutRegister64(lir);
4855 int checks = 3;
4857 Label fail, done;
4858 // Jump to fail if this is the last check and we fail it,
4859 // otherwise to the next test.
4860 auto emitTestAndUnbox = [&](auto testAndUnbox) {
4861 MOZ_ASSERT(checks > 0);
4863 checks--;
4864 Label notType;
4865 Label* target = checks ? &notType : &fail;
4867 testAndUnbox(target);
4869 if (checks) {
4870 masm.jump(&done);
4871 masm.bind(&notType);
4875 Register tag = masm.extractTag(input, temp);
4877 // BigInt.
4878 emitTestAndUnbox([&](Label* target) {
4879 masm.branchTestBigInt(Assembler::NotEqual, tag, target);
4880 masm.unboxBigInt(input, temp);
4881 masm.loadBigInt64(temp, output);
4884 // Boolean
4885 emitTestAndUnbox([&](Label* target) {
4886 masm.branchTestBoolean(Assembler::NotEqual, tag, target);
4887 masm.unboxBoolean(input, temp);
4888 masm.move32To64ZeroExtend(temp, output);
4891 // String
4892 emitTestAndUnbox([&](Label* target) {
4893 masm.branchTestString(Assembler::NotEqual, tag, target);
4894 masm.unboxString(input, temp);
4895 emitStringToInt64(lir, temp, output);
4898 MOZ_ASSERT(checks == 0);
4900 bailoutFrom(&fail, lir->snapshot());
4901 masm.bind(&done);
4904 void CodeGenerator::visitTruncateBigIntToInt64(LTruncateBigIntToInt64* lir) {
4905 Register operand = ToRegister(lir->input());
4906 Register64 output = ToOutRegister64(lir);
4908 masm.loadBigInt64(operand, output);
4911 OutOfLineCode* CodeGenerator::createBigIntOutOfLine(LInstruction* lir,
4912 Scalar::Type type,
4913 Register64 input,
4914 Register output) {
4915 #if JS_BITS_PER_WORD == 32
4916 using Fn = BigInt* (*)(JSContext*, uint32_t, uint32_t);
4917 auto args = ArgList(input.low, input.high);
4918 #else
4919 using Fn = BigInt* (*)(JSContext*, uint64_t);
4920 auto args = ArgList(input);
4921 #endif
4923 if (type == Scalar::BigInt64) {
4924 return oolCallVM<Fn, jit::CreateBigIntFromInt64>(lir, args,
4925 StoreRegisterTo(output));
4927 MOZ_ASSERT(type == Scalar::BigUint64);
4928 return oolCallVM<Fn, jit::CreateBigIntFromUint64>(lir, args,
4929 StoreRegisterTo(output));
4932 void CodeGenerator::emitCreateBigInt(LInstruction* lir, Scalar::Type type,
4933 Register64 input, Register output,
4934 Register maybeTemp) {
4935 OutOfLineCode* ool = createBigIntOutOfLine(lir, type, input, output);
4937 if (maybeTemp != InvalidReg) {
4938 masm.newGCBigInt(output, maybeTemp, initialBigIntHeap(), ool->entry());
4939 } else {
4940 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
4941 regs.take(input);
4942 regs.take(output);
4944 Register temp = regs.takeAny();
4946 masm.push(temp);
4948 Label fail, ok;
4949 masm.newGCBigInt(output, temp, initialBigIntHeap(), &fail);
4950 masm.pop(temp);
4951 masm.jump(&ok);
4952 masm.bind(&fail);
4953 masm.pop(temp);
4954 masm.jump(ool->entry());
4955 masm.bind(&ok);
4957 masm.initializeBigInt64(type, output, input);
4958 masm.bind(ool->rejoin());
4961 void CodeGenerator::visitInt64ToBigInt(LInt64ToBigInt* lir) {
4962 Register64 input = ToRegister64(lir->input());
4963 Register temp = ToRegister(lir->temp0());
4964 Register output = ToRegister(lir->output());
4966 emitCreateBigInt(lir, Scalar::BigInt64, input, output, temp);
4969 void CodeGenerator::visitGuardValue(LGuardValue* lir) {
4970 ValueOperand input = ToValue(lir, LGuardValue::InputIndex);
4971 Value expected = lir->mir()->expected();
4972 Label bail;
4973 masm.branchTestValue(Assembler::NotEqual, input, expected, &bail);
4974 bailoutFrom(&bail, lir->snapshot());
4977 void CodeGenerator::visitGuardNullOrUndefined(LGuardNullOrUndefined* lir) {
4978 ValueOperand input = ToValue(lir, LGuardNullOrUndefined::InputIndex);
4980 ScratchTagScope tag(masm, input);
4981 masm.splitTagForTest(input, tag);
4983 Label done;
4984 masm.branchTestNull(Assembler::Equal, tag, &done);
4986 Label bail;
4987 masm.branchTestUndefined(Assembler::NotEqual, tag, &bail);
4988 bailoutFrom(&bail, lir->snapshot());
4990 masm.bind(&done);
4993 void CodeGenerator::visitGuardIsNotObject(LGuardIsNotObject* lir) {
4994 ValueOperand input = ToValue(lir, LGuardIsNotObject::InputIndex);
4996 Label bail;
4997 masm.branchTestObject(Assembler::Equal, input, &bail);
4998 bailoutFrom(&bail, lir->snapshot());
5001 void CodeGenerator::visitGuardFunctionFlags(LGuardFunctionFlags* lir) {
5002 Register function = ToRegister(lir->function());
5004 Label bail;
5005 if (uint16_t flags = lir->mir()->expectedFlags()) {
5006 masm.branchTestFunctionFlags(function, flags, Assembler::Zero, &bail);
5008 if (uint16_t flags = lir->mir()->unexpectedFlags()) {
5009 masm.branchTestFunctionFlags(function, flags, Assembler::NonZero, &bail);
5011 bailoutFrom(&bail, lir->snapshot());
5014 void CodeGenerator::visitGuardFunctionIsNonBuiltinCtor(
5015 LGuardFunctionIsNonBuiltinCtor* lir) {
5016 Register function = ToRegister(lir->function());
5017 Register temp = ToRegister(lir->temp0());
5019 Label bail;
5020 masm.branchIfNotFunctionIsNonBuiltinCtor(function, temp, &bail);
5021 bailoutFrom(&bail, lir->snapshot());
5024 void CodeGenerator::visitGuardFunctionKind(LGuardFunctionKind* lir) {
5025 Register function = ToRegister(lir->function());
5026 Register temp = ToRegister(lir->temp0());
5028 Assembler::Condition cond =
5029 lir->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
5031 Label bail;
5032 masm.branchFunctionKind(cond, lir->mir()->expected(), function, temp, &bail);
5033 bailoutFrom(&bail, lir->snapshot());
5036 void CodeGenerator::visitGuardFunctionScript(LGuardFunctionScript* lir) {
5037 Register function = ToRegister(lir->function());
5039 Address scriptAddr(function, JSFunction::offsetOfJitInfoOrScript());
5040 bailoutCmpPtr(Assembler::NotEqual, scriptAddr,
5041 ImmGCPtr(lir->mir()->expected()), lir->snapshot());
5044 // Out-of-line path to update the store buffer.
5045 class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase<CodeGenerator> {
5046 LInstruction* lir_;
5047 const LAllocation* object_;
5049 public:
5050 OutOfLineCallPostWriteBarrier(LInstruction* lir, const LAllocation* object)
5051 : lir_(lir), object_(object) {}
5053 void accept(CodeGenerator* codegen) override {
5054 codegen->visitOutOfLineCallPostWriteBarrier(this);
5057 LInstruction* lir() const { return lir_; }
5058 const LAllocation* object() const { return object_; }
5061 static void EmitStoreBufferCheckForConstant(MacroAssembler& masm,
5062 const gc::TenuredCell* cell,
5063 AllocatableGeneralRegisterSet& regs,
5064 Label* exit, Label* callVM) {
5065 Register temp = regs.takeAny();
5067 gc::Arena* arena = cell->arena();
5069 Register cells = temp;
5070 masm.loadPtr(AbsoluteAddress(&arena->bufferedCells()), cells);
5072 size_t index = gc::ArenaCellSet::getCellIndex(cell);
5073 size_t word;
5074 uint32_t mask;
5075 gc::ArenaCellSet::getWordIndexAndMask(index, &word, &mask);
5076 size_t offset = gc::ArenaCellSet::offsetOfBits() + word * sizeof(uint32_t);
5078 masm.branchTest32(Assembler::NonZero, Address(cells, offset), Imm32(mask),
5079 exit);
5081 // Check whether this is the sentinel set and if so call the VM to allocate
5082 // one for this arena.
5083 masm.branchPtr(Assembler::Equal,
5084 Address(cells, gc::ArenaCellSet::offsetOfArena()),
5085 ImmPtr(nullptr), callVM);
5087 // Add the cell to the set.
5088 masm.or32(Imm32(mask), Address(cells, offset));
5089 masm.jump(exit);
5091 regs.add(temp);
5094 static void EmitPostWriteBarrier(MacroAssembler& masm, CompileRuntime* runtime,
5095 Register objreg, JSObject* maybeConstant,
5096 bool isGlobal,
5097 AllocatableGeneralRegisterSet& regs) {
5098 MOZ_ASSERT_IF(isGlobal, maybeConstant);
5100 Label callVM;
5101 Label exit;
5103 Register temp = regs.takeAny();
5105 // We already have a fast path to check whether a global is in the store
5106 // buffer.
5107 if (!isGlobal) {
5108 if (maybeConstant) {
5109 // Check store buffer bitmap directly for known object.
5110 EmitStoreBufferCheckForConstant(masm, &maybeConstant->asTenured(), regs,
5111 &exit, &callVM);
5112 } else {
5113 // Check one element cache to avoid VM call.
5114 masm.branchPtr(Assembler::Equal,
5115 AbsoluteAddress(runtime->addressOfLastBufferedWholeCell()),
5116 objreg, &exit);
5120 // Call into the VM to barrier the write.
5121 masm.bind(&callVM);
5123 Register runtimereg = temp;
5124 masm.mov(ImmPtr(runtime), runtimereg);
5126 masm.setupAlignedABICall();
5127 masm.passABIArg(runtimereg);
5128 masm.passABIArg(objreg);
5129 if (isGlobal) {
5130 using Fn = void (*)(JSRuntime* rt, GlobalObject* obj);
5131 masm.callWithABI<Fn, PostGlobalWriteBarrier>();
5132 } else {
5133 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* obj);
5134 masm.callWithABI<Fn, PostWriteBarrier>();
5137 masm.bind(&exit);
5140 void CodeGenerator::emitPostWriteBarrier(const LAllocation* obj) {
5141 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5143 Register objreg;
5144 JSObject* object = nullptr;
5145 bool isGlobal = false;
5146 if (obj->isConstant()) {
5147 object = &obj->toConstant()->toObject();
5148 isGlobal = isGlobalObject(object);
5149 objreg = regs.takeAny();
5150 masm.movePtr(ImmGCPtr(object), objreg);
5151 } else {
5152 objreg = ToRegister(obj);
5153 regs.takeUnchecked(objreg);
5156 EmitPostWriteBarrier(masm, gen->runtime, objreg, object, isGlobal, regs);
5159 // Returns true if `def` might be allocated in the nursery.
5160 static bool ValueNeedsPostBarrier(MDefinition* def) {
5161 if (def->isBox()) {
5162 def = def->toBox()->input();
5164 if (def->type() == MIRType::Value) {
5165 return true;
5167 return NeedsPostBarrier(def->type());
5170 class OutOfLineElementPostWriteBarrier
5171 : public OutOfLineCodeBase<CodeGenerator> {
5172 LiveRegisterSet liveVolatileRegs_;
5173 const LAllocation* index_;
5174 int32_t indexDiff_;
5175 Register obj_;
5176 Register scratch_;
5178 public:
5179 OutOfLineElementPostWriteBarrier(const LiveRegisterSet& liveVolatileRegs,
5180 Register obj, const LAllocation* index,
5181 Register scratch, int32_t indexDiff)
5182 : liveVolatileRegs_(liveVolatileRegs),
5183 index_(index),
5184 indexDiff_(indexDiff),
5185 obj_(obj),
5186 scratch_(scratch) {}
5188 void accept(CodeGenerator* codegen) override {
5189 codegen->visitOutOfLineElementPostWriteBarrier(this);
5192 const LiveRegisterSet& liveVolatileRegs() const { return liveVolatileRegs_; }
5193 const LAllocation* index() const { return index_; }
5194 int32_t indexDiff() const { return indexDiff_; }
5196 Register object() const { return obj_; }
5197 Register scratch() const { return scratch_; }
5200 void CodeGenerator::emitElementPostWriteBarrier(
5201 MInstruction* mir, const LiveRegisterSet& liveVolatileRegs, Register obj,
5202 const LAllocation* index, Register scratch, const ConstantOrRegister& val,
5203 int32_t indexDiff) {
5204 if (val.constant()) {
5205 MOZ_ASSERT_IF(val.value().isGCThing(),
5206 !IsInsideNursery(val.value().toGCThing()));
5207 return;
5210 TypedOrValueRegister reg = val.reg();
5211 if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
5212 return;
5215 auto* ool = new (alloc()) OutOfLineElementPostWriteBarrier(
5216 liveVolatileRegs, obj, index, scratch, indexDiff);
5217 addOutOfLineCode(ool, mir);
5219 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, ool->rejoin());
5221 if (reg.hasValue()) {
5222 masm.branchValueIsNurseryCell(Assembler::Equal, reg.valueReg(), scratch,
5223 ool->entry());
5224 } else {
5225 masm.branchPtrInNurseryChunk(Assembler::Equal, reg.typedReg().gpr(),
5226 scratch, ool->entry());
5229 masm.bind(ool->rejoin());
5232 void CodeGenerator::visitOutOfLineElementPostWriteBarrier(
5233 OutOfLineElementPostWriteBarrier* ool) {
5234 Register obj = ool->object();
5235 Register scratch = ool->scratch();
5236 const LAllocation* index = ool->index();
5237 int32_t indexDiff = ool->indexDiff();
5239 masm.PushRegsInMask(ool->liveVolatileRegs());
5241 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5242 regs.takeUnchecked(obj);
5243 regs.takeUnchecked(scratch);
5245 Register indexReg;
5246 if (index->isConstant()) {
5247 indexReg = regs.takeAny();
5248 masm.move32(Imm32(ToInt32(index) + indexDiff), indexReg);
5249 } else {
5250 indexReg = ToRegister(index);
5251 regs.takeUnchecked(indexReg);
5252 if (indexDiff != 0) {
5253 masm.add32(Imm32(indexDiff), indexReg);
5257 masm.setupUnalignedABICall(scratch);
5258 masm.movePtr(ImmPtr(gen->runtime), scratch);
5259 masm.passABIArg(scratch);
5260 masm.passABIArg(obj);
5261 masm.passABIArg(indexReg);
5262 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5263 masm.callWithABI<Fn, PostWriteElementBarrier>();
5265 // We don't need a sub32 here because indexReg must be in liveVolatileRegs
5266 // if indexDiff is not zero, so it will be restored below.
5267 MOZ_ASSERT_IF(indexDiff != 0, ool->liveVolatileRegs().has(indexReg));
5269 masm.PopRegsInMask(ool->liveVolatileRegs());
5271 masm.jump(ool->rejoin());
5274 void CodeGenerator::emitPostWriteBarrier(Register objreg) {
5275 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5276 regs.takeUnchecked(objreg);
5277 EmitPostWriteBarrier(masm, gen->runtime, objreg, nullptr, false, regs);
5280 void CodeGenerator::visitOutOfLineCallPostWriteBarrier(
5281 OutOfLineCallPostWriteBarrier* ool) {
5282 saveLiveVolatile(ool->lir());
5283 const LAllocation* obj = ool->object();
5284 emitPostWriteBarrier(obj);
5285 restoreLiveVolatile(ool->lir());
5287 masm.jump(ool->rejoin());
5290 void CodeGenerator::maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal,
5291 OutOfLineCode* ool) {
5292 // Check whether an object is a global that we have already barriered before
5293 // calling into the VM.
5295 // We only check for the script's global, not other globals within the same
5296 // compartment, because we bake in a pointer to realm->globalWriteBarriered
5297 // and doing that would be invalid for other realms because they could be
5298 // collected before the Ion code is discarded.
5300 if (!maybeGlobal->isConstant()) {
5301 return;
5304 JSObject* obj = &maybeGlobal->toConstant()->toObject();
5305 if (gen->realm->maybeGlobal() != obj) {
5306 return;
5309 const uint32_t* addr = gen->realm->addressOfGlobalWriteBarriered();
5310 masm.branch32(Assembler::NotEqual, AbsoluteAddress(addr), Imm32(0),
5311 ool->rejoin());
5314 template <class LPostBarrierType, MIRType nurseryType>
5315 void CodeGenerator::visitPostWriteBarrierCommon(LPostBarrierType* lir,
5316 OutOfLineCode* ool) {
5317 static_assert(NeedsPostBarrier(nurseryType));
5319 addOutOfLineCode(ool, lir->mir());
5321 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5323 if (lir->object()->isConstant()) {
5324 // Constant nursery objects cannot appear here, see
5325 // LIRGenerator::visitPostWriteElementBarrier.
5326 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5327 } else {
5328 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5329 temp, ool->rejoin());
5332 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5334 Register value = ToRegister(lir->value());
5335 if constexpr (nurseryType == MIRType::Object) {
5336 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::Object);
5337 } else if constexpr (nurseryType == MIRType::String) {
5338 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::String);
5339 } else {
5340 static_assert(nurseryType == MIRType::BigInt);
5341 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::BigInt);
5343 masm.branchPtrInNurseryChunk(Assembler::Equal, value, temp, ool->entry());
5345 masm.bind(ool->rejoin());
5348 template <class LPostBarrierType>
5349 void CodeGenerator::visitPostWriteBarrierCommonV(LPostBarrierType* lir,
5350 OutOfLineCode* ool) {
5351 addOutOfLineCode(ool, lir->mir());
5353 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5355 if (lir->object()->isConstant()) {
5356 // Constant nursery objects cannot appear here, see
5357 // LIRGenerator::visitPostWriteElementBarrier.
5358 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5359 } else {
5360 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5361 temp, ool->rejoin());
5364 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5366 ValueOperand value = ToValue(lir, LPostBarrierType::ValueIndex);
5367 masm.branchValueIsNurseryCell(Assembler::Equal, value, temp, ool->entry());
5369 masm.bind(ool->rejoin());
5372 void CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO* lir) {
5373 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5374 visitPostWriteBarrierCommon<LPostWriteBarrierO, MIRType::Object>(lir, ool);
5377 void CodeGenerator::visitPostWriteBarrierS(LPostWriteBarrierS* lir) {
5378 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5379 visitPostWriteBarrierCommon<LPostWriteBarrierS, MIRType::String>(lir, ool);
5382 void CodeGenerator::visitPostWriteBarrierBI(LPostWriteBarrierBI* lir) {
5383 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5384 visitPostWriteBarrierCommon<LPostWriteBarrierBI, MIRType::BigInt>(lir, ool);
5387 void CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV* lir) {
5388 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5389 visitPostWriteBarrierCommonV(lir, ool);
5392 // Out-of-line path to update the store buffer.
5393 class OutOfLineCallPostWriteElementBarrier
5394 : public OutOfLineCodeBase<CodeGenerator> {
5395 LInstruction* lir_;
5396 const LAllocation* object_;
5397 const LAllocation* index_;
5399 public:
5400 OutOfLineCallPostWriteElementBarrier(LInstruction* lir,
5401 const LAllocation* object,
5402 const LAllocation* index)
5403 : lir_(lir), object_(object), index_(index) {}
5405 void accept(CodeGenerator* codegen) override {
5406 codegen->visitOutOfLineCallPostWriteElementBarrier(this);
5409 LInstruction* lir() const { return lir_; }
5411 const LAllocation* object() const { return object_; }
5413 const LAllocation* index() const { return index_; }
5416 void CodeGenerator::visitOutOfLineCallPostWriteElementBarrier(
5417 OutOfLineCallPostWriteElementBarrier* ool) {
5418 saveLiveVolatile(ool->lir());
5420 const LAllocation* obj = ool->object();
5421 const LAllocation* index = ool->index();
5423 Register objreg = obj->isConstant() ? InvalidReg : ToRegister(obj);
5424 Register indexreg = ToRegister(index);
5426 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5427 regs.takeUnchecked(indexreg);
5429 if (obj->isConstant()) {
5430 objreg = regs.takeAny();
5431 masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg);
5432 } else {
5433 regs.takeUnchecked(objreg);
5436 Register runtimereg = regs.takeAny();
5437 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5438 masm.setupAlignedABICall();
5439 masm.mov(ImmPtr(gen->runtime), runtimereg);
5440 masm.passABIArg(runtimereg);
5441 masm.passABIArg(objreg);
5442 masm.passABIArg(indexreg);
5443 masm.callWithABI<Fn, PostWriteElementBarrier>();
5445 restoreLiveVolatile(ool->lir());
5447 masm.jump(ool->rejoin());
5450 void CodeGenerator::visitPostWriteElementBarrierO(
5451 LPostWriteElementBarrierO* lir) {
5452 auto ool = new (alloc())
5453 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5454 visitPostWriteBarrierCommon<LPostWriteElementBarrierO, MIRType::Object>(lir,
5455 ool);
5458 void CodeGenerator::visitPostWriteElementBarrierS(
5459 LPostWriteElementBarrierS* lir) {
5460 auto ool = new (alloc())
5461 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5462 visitPostWriteBarrierCommon<LPostWriteElementBarrierS, MIRType::String>(lir,
5463 ool);
5466 void CodeGenerator::visitPostWriteElementBarrierBI(
5467 LPostWriteElementBarrierBI* lir) {
5468 auto ool = new (alloc())
5469 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5470 visitPostWriteBarrierCommon<LPostWriteElementBarrierBI, MIRType::BigInt>(lir,
5471 ool);
5474 void CodeGenerator::visitPostWriteElementBarrierV(
5475 LPostWriteElementBarrierV* lir) {
5476 auto ool = new (alloc())
5477 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5478 visitPostWriteBarrierCommonV(lir, ool);
5481 void CodeGenerator::visitAssertCanElidePostWriteBarrier(
5482 LAssertCanElidePostWriteBarrier* lir) {
5483 Register object = ToRegister(lir->object());
5484 ValueOperand value =
5485 ToValue(lir, LAssertCanElidePostWriteBarrier::ValueIndex);
5486 Register temp = ToRegister(lir->temp0());
5488 Label ok;
5489 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp, &ok);
5490 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp, &ok);
5492 masm.assumeUnreachable("Unexpected missing post write barrier");
5494 masm.bind(&ok);
5497 template <typename LCallIns>
5498 void CodeGenerator::emitCallNative(LCallIns* call, JSNative native,
5499 Register argContextReg, Register argUintNReg,
5500 Register argVpReg, Register tempReg,
5501 uint32_t unusedStack) {
5502 masm.checkStackAlignment();
5504 // Native functions have the signature:
5505 // bool (*)(JSContext*, unsigned, Value* vp)
5506 // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
5507 // are the function arguments.
5509 // Allocate space for the outparam, moving the StackPointer to what will be
5510 // &vp[1].
5511 masm.adjustStack(unusedStack);
5513 // Push a Value containing the callee object: natives are allowed to access
5514 // their callee before setting the return value. The StackPointer is moved
5515 // to &vp[0].
5517 // Also reserves the space for |NativeExitFrameLayout::{lo,hi}CalleeResult_|.
5518 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5519 Register calleeReg = ToRegister(call->getCallee());
5520 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
5522 // Enter the callee realm.
5523 if (call->mir()->maybeCrossRealm()) {
5524 masm.switchToObjectRealm(calleeReg, tempReg);
5526 } else {
5527 WrappedFunction* target = call->mir()->getSingleTarget();
5528 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5530 // Enter the callee realm.
5531 if (call->mir()->maybeCrossRealm()) {
5532 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), tempReg);
5533 masm.switchToObjectRealm(tempReg, tempReg);
5537 // Preload arguments into registers.
5538 masm.loadJSContext(argContextReg);
5539 masm.moveStackPtrTo(argVpReg);
5541 // Initialize |NativeExitFrameLayout::argc_|.
5542 masm.Push(argUintNReg);
5544 // Construct native exit frame.
5546 // |buildFakeExitFrame| initializes |NativeExitFrameLayout::exit_| and
5547 // |enterFakeExitFrameForNative| initializes |NativeExitFrameLayout::footer_|.
5549 // The NativeExitFrameLayout is now fully initialized.
5550 uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
5551 masm.enterFakeExitFrameForNative(argContextReg, tempReg,
5552 call->mir()->isConstructing());
5554 markSafepointAt(safepointOffset, call);
5556 // Construct and execute call.
5557 masm.setupAlignedABICall();
5558 masm.passABIArg(argContextReg);
5559 masm.passABIArg(argUintNReg);
5560 masm.passABIArg(argVpReg);
5562 ensureOsiSpace();
5563 // If we're using a simulator build, `native` will already point to the
5564 // simulator's call-redirection code for LCallClassHook. Load the address in
5565 // a register first so that we don't try to redirect it a second time.
5566 bool emittedCall = false;
5567 #ifdef JS_SIMULATOR
5568 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5569 masm.movePtr(ImmPtr(native), tempReg);
5570 masm.callWithABI(tempReg);
5571 emittedCall = true;
5573 #endif
5574 if (!emittedCall) {
5575 masm.callWithABI(DynamicFunction<JSNative>(native), ABIType::General,
5576 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5579 // Test for failure.
5580 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
5582 // Exit the callee realm.
5583 if (call->mir()->maybeCrossRealm()) {
5584 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5587 // Load the outparam vp[0] into output register(s).
5588 masm.loadValue(
5589 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
5590 JSReturnOperand);
5592 // Until C++ code is instrumented against Spectre, prevent speculative
5593 // execution from returning any private data.
5594 if (JitOptions.spectreJitToCxxCalls && !call->mir()->ignoresReturnValue() &&
5595 call->mir()->hasLiveDefUses()) {
5596 masm.speculationBarrier();
5599 #ifdef DEBUG
5600 // Native constructors are guaranteed to return an Object value.
5601 if (call->mir()->isConstructing()) {
5602 Label notPrimitive;
5603 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5604 &notPrimitive);
5605 masm.assumeUnreachable("native constructors don't return primitives");
5606 masm.bind(&notPrimitive);
5608 #endif
5611 template <typename LCallIns>
5612 void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
5613 uint32_t unusedStack =
5614 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5616 // Registers used for callWithABI() argument-passing.
5617 const Register argContextReg = ToRegister(call->getArgContextReg());
5618 const Register argUintNReg = ToRegister(call->getArgUintNReg());
5619 const Register argVpReg = ToRegister(call->getArgVpReg());
5621 // Misc. temporary registers.
5622 const Register tempReg = ToRegister(call->getTempReg());
5624 DebugOnly<uint32_t> initialStack = masm.framePushed();
5626 // Initialize the argc register.
5627 masm.move32(Imm32(call->mir()->numActualArgs()), argUintNReg);
5629 // Create the exit frame and call the native.
5630 emitCallNative(call, native, argContextReg, argUintNReg, argVpReg, tempReg,
5631 unusedStack);
5633 // The next instruction is removing the footer of the exit frame, so there
5634 // is no need for leaveFakeExitFrame.
5636 // Move the StackPointer back to its original location, unwinding the native
5637 // exit frame.
5638 masm.adjustStack(NativeExitFrameLayout::Size() - unusedStack);
5639 MOZ_ASSERT(masm.framePushed() == initialStack);
5642 void CodeGenerator::visitCallNative(LCallNative* call) {
5643 WrappedFunction* target = call->getSingleTarget();
5644 MOZ_ASSERT(target);
5645 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5647 JSNative native = target->native();
5648 if (call->ignoresReturnValue() && target->hasJitInfo()) {
5649 const JSJitInfo* jitInfo = target->jitInfo();
5650 if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
5651 native = jitInfo->ignoresReturnValueMethod;
5654 emitCallNative(call, native);
5657 void CodeGenerator::visitCallClassHook(LCallClassHook* call) {
5658 emitCallNative(call, call->mir()->target());
5661 static void LoadDOMPrivate(MacroAssembler& masm, Register obj, Register priv,
5662 DOMObjectKind kind) {
5663 // Load the value in DOM_OBJECT_SLOT for a native or proxy DOM object. This
5664 // will be in the first slot but may be fixed or non-fixed.
5665 MOZ_ASSERT(obj != priv);
5667 switch (kind) {
5668 case DOMObjectKind::Native:
5669 // If it's a native object, the value must be in a fixed slot.
5670 // See CanAttachDOMCall in CacheIR.cpp.
5671 masm.debugAssertObjHasFixedSlots(obj, priv);
5672 masm.loadPrivate(Address(obj, NativeObject::getFixedSlotOffset(0)), priv);
5673 break;
5674 case DOMObjectKind::Proxy: {
5675 #ifdef DEBUG
5676 // Sanity check: it must be a DOM proxy.
5677 Label isDOMProxy;
5678 masm.branchTestProxyHandlerFamily(
5679 Assembler::Equal, obj, priv, GetDOMProxyHandlerFamily(), &isDOMProxy);
5680 masm.assumeUnreachable("Expected a DOM proxy");
5681 masm.bind(&isDOMProxy);
5682 #endif
5683 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), priv);
5684 masm.loadPrivate(
5685 Address(priv, js::detail::ProxyReservedSlots::offsetOfSlot(0)), priv);
5686 break;
5691 void CodeGenerator::visitCallDOMNative(LCallDOMNative* call) {
5692 WrappedFunction* target = call->getSingleTarget();
5693 MOZ_ASSERT(target);
5694 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5695 MOZ_ASSERT(target->hasJitInfo());
5696 MOZ_ASSERT(call->mir()->isCallDOMNative());
5698 int unusedStack = UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5700 // Registers used for callWithABI() argument-passing.
5701 const Register argJSContext = ToRegister(call->getArgJSContext());
5702 const Register argObj = ToRegister(call->getArgObj());
5703 const Register argPrivate = ToRegister(call->getArgPrivate());
5704 const Register argArgs = ToRegister(call->getArgArgs());
5706 DebugOnly<uint32_t> initialStack = masm.framePushed();
5708 masm.checkStackAlignment();
5710 // DOM methods have the signature:
5711 // bool (*)(JSContext*, HandleObject, void* private, const
5712 // JSJitMethodCallArgs& args)
5713 // Where args is initialized from an argc and a vp, vp[0] is space for an
5714 // outparam and the callee, vp[1] is |this|, and vp[2] onward are the
5715 // function arguments. Note that args stores the argv, not the vp, and
5716 // argv == vp + 2.
5718 // Nestle the stack up against the pushed arguments, leaving StackPointer at
5719 // &vp[1]
5720 masm.adjustStack(unusedStack);
5721 // argObj is filled with the extracted object, then returned.
5722 Register obj = masm.extractObject(Address(masm.getStackPointer(), 0), argObj);
5723 MOZ_ASSERT(obj == argObj);
5725 // Push a Value containing the callee object: natives are allowed to access
5726 // their callee before setting the return value. After this the StackPointer
5727 // points to &vp[0].
5728 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5730 // Now compute the argv value. Since StackPointer is pointing to &vp[0] and
5731 // argv is &vp[2] we just need to add 2*sizeof(Value) to the current
5732 // StackPointer.
5733 static_assert(JSJitMethodCallArgsTraits::offsetOfArgv == 0);
5734 static_assert(JSJitMethodCallArgsTraits::offsetOfArgc ==
5735 IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv);
5736 masm.computeEffectiveAddress(
5737 Address(masm.getStackPointer(), 2 * sizeof(Value)), argArgs);
5739 LoadDOMPrivate(masm, obj, argPrivate,
5740 static_cast<MCallDOMNative*>(call->mir())->objectKind());
5742 // Push argc from the call instruction into what will become the IonExitFrame
5743 masm.Push(Imm32(call->numActualArgs()));
5745 // Push our argv onto the stack
5746 masm.Push(argArgs);
5747 // And store our JSJitMethodCallArgs* in argArgs.
5748 masm.moveStackPtrTo(argArgs);
5750 // Push |this| object for passing HandleObject. We push after argc to
5751 // maintain the same sp-relative location of the object pointer with other
5752 // DOMExitFrames.
5753 masm.Push(argObj);
5754 masm.moveStackPtrTo(argObj);
5756 if (call->mir()->maybeCrossRealm()) {
5757 // We use argJSContext as scratch register here.
5758 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), argJSContext);
5759 masm.switchToObjectRealm(argJSContext, argJSContext);
5762 // Construct native exit frame.
5763 uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext);
5764 masm.loadJSContext(argJSContext);
5765 masm.enterFakeExitFrame(argJSContext, argJSContext,
5766 ExitFrameType::IonDOMMethod);
5768 markSafepointAt(safepointOffset, call);
5770 // Construct and execute call.
5771 masm.setupAlignedABICall();
5772 masm.loadJSContext(argJSContext);
5773 masm.passABIArg(argJSContext);
5774 masm.passABIArg(argObj);
5775 masm.passABIArg(argPrivate);
5776 masm.passABIArg(argArgs);
5777 ensureOsiSpace();
5778 masm.callWithABI(DynamicFunction<JSJitMethodOp>(target->jitInfo()->method),
5779 ABIType::General,
5780 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5782 if (target->jitInfo()->isInfallible) {
5783 masm.loadValue(Address(masm.getStackPointer(),
5784 IonDOMMethodExitFrameLayout::offsetOfResult()),
5785 JSReturnOperand);
5786 } else {
5787 // Test for failure.
5788 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
5790 // Load the outparam vp[0] into output register(s).
5791 masm.loadValue(Address(masm.getStackPointer(),
5792 IonDOMMethodExitFrameLayout::offsetOfResult()),
5793 JSReturnOperand);
5796 // Switch back to the current realm if needed. Note: if the DOM method threw
5797 // an exception, the exception handler will do this.
5798 if (call->mir()->maybeCrossRealm()) {
5799 static_assert(!JSReturnOperand.aliases(ReturnReg),
5800 "Clobbering ReturnReg should not affect the return value");
5801 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5804 // Until C++ code is instrumented against Spectre, prevent speculative
5805 // execution from returning any private data.
5806 if (JitOptions.spectreJitToCxxCalls && call->mir()->hasLiveDefUses()) {
5807 masm.speculationBarrier();
5810 // The next instruction is removing the footer of the exit frame, so there
5811 // is no need for leaveFakeExitFrame.
5813 // Move the StackPointer back to its original location, unwinding the native
5814 // exit frame.
5815 masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack);
5816 MOZ_ASSERT(masm.framePushed() == initialStack);
5819 void CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir) {
5820 pushArg(ImmGCPtr(lir->mir()->name()));
5822 using Fn = bool (*)(JSContext* cx, Handle<PropertyName*>, MutableHandleValue);
5823 callVM<Fn, GetIntrinsicValue>(lir);
5826 void CodeGenerator::emitCallInvokeFunction(
5827 LInstruction* call, Register calleereg, bool constructing,
5828 bool ignoresReturnValue, uint32_t argc, uint32_t unusedStack) {
5829 // Nestle %esp up to the argument vector.
5830 // Each path must account for framePushed_ separately, for callVM to be valid.
5831 masm.freeStack(unusedStack);
5833 pushArg(masm.getStackPointer()); // argv.
5834 pushArg(Imm32(argc)); // argc.
5835 pushArg(Imm32(ignoresReturnValue));
5836 pushArg(Imm32(constructing)); // constructing.
5837 pushArg(calleereg); // JSFunction*.
5839 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
5840 MutableHandleValue);
5841 callVM<Fn, jit::InvokeFunction>(call);
5843 // Un-nestle %esp from the argument vector. No prefix was pushed.
5844 masm.reserveStack(unusedStack);
5847 void CodeGenerator::visitCallGeneric(LCallGeneric* call) {
5848 // The callee is passed straight through to the trampoline.
5849 MOZ_ASSERT(ToRegister(call->getCallee()) == IonGenericCallCalleeReg);
5851 Register argcReg = ToRegister(call->getArgc());
5852 uint32_t unusedStack =
5853 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5855 // Known-target case is handled by LCallKnown.
5856 MOZ_ASSERT(!call->hasSingleTarget());
5858 masm.checkStackAlignment();
5860 masm.move32(Imm32(call->numActualArgs()), argcReg);
5862 // Nestle the StackPointer up to the argument vector.
5863 masm.freeStack(unusedStack);
5864 ensureOsiSpace();
5866 auto kind = call->mir()->isConstructing() ? IonGenericCallKind::Construct
5867 : IonGenericCallKind::Call;
5869 TrampolinePtr genericCallStub =
5870 gen->jitRuntime()->getIonGenericCallStub(kind);
5871 uint32_t callOffset = masm.callJit(genericCallStub);
5872 markSafepointAt(callOffset, call);
5874 if (call->mir()->maybeCrossRealm()) {
5875 static_assert(!JSReturnOperand.aliases(ReturnReg),
5876 "ReturnReg available as scratch after scripted calls");
5877 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5880 // Restore stack pointer.
5881 masm.setFramePushed(frameSize());
5882 emitRestoreStackPointerFromFP();
5884 // If the return value of the constructing function is Primitive,
5885 // replace the return value with the Object from CreateThis.
5886 if (call->mir()->isConstructing()) {
5887 Label notPrimitive;
5888 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5889 &notPrimitive);
5890 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
5891 JSReturnOperand);
5892 #ifdef DEBUG
5893 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5894 &notPrimitive);
5895 masm.assumeUnreachable("CreateThis creates an object");
5896 #endif
5897 masm.bind(&notPrimitive);
5901 void JitRuntime::generateIonGenericCallArgumentsShift(
5902 MacroAssembler& masm, Register argc, Register curr, Register end,
5903 Register scratch, Label* done) {
5904 static_assert(sizeof(Value) == 8);
5905 // There are |argc| Values on the stack. Shift them all down by 8 bytes,
5906 // overwriting the first value.
5908 // Initialize `curr` to the destination of the first copy, and `end` to the
5909 // final value of curr.
5910 masm.moveStackPtrTo(curr);
5911 masm.computeEffectiveAddress(BaseValueIndex(curr, argc), end);
5913 Label loop;
5914 masm.bind(&loop);
5915 masm.branchPtr(Assembler::Equal, curr, end, done);
5916 masm.loadPtr(Address(curr, 8), scratch);
5917 masm.storePtr(scratch, Address(curr, 0));
5918 masm.addPtr(Imm32(sizeof(uintptr_t)), curr);
5919 masm.jump(&loop);
5922 void JitRuntime::generateIonGenericCallStub(MacroAssembler& masm,
5923 IonGenericCallKind kind) {
5924 AutoCreatedBy acb(masm, "JitRuntime::generateIonGenericCallStub");
5925 ionGenericCallStubOffset_[kind] = startTrampolineCode(masm);
5927 // This code is tightly coupled with visitCallGeneric.
5929 // Upon entry:
5930 // IonGenericCallCalleeReg contains a pointer to the callee object.
5931 // IonGenericCallArgcReg contains the number of actual args.
5932 // The arguments have been pushed onto the stack:
5933 // [newTarget] (iff isConstructing)
5934 // [argN]
5935 // ...
5936 // [arg1]
5937 // [arg0]
5938 // [this]
5939 // <return address> (if not JS_USE_LINK_REGISTER)
5941 // This trampoline is responsible for entering the callee's realm,
5942 // massaging the stack into the right shape, and then performing a
5943 // tail call. We will return directly to the Ion code from the
5944 // callee.
5946 // To do a tail call, we keep the return address in a register, even
5947 // on platforms that don't normally use a link register, and push it
5948 // just before jumping to the callee, after we are done setting up
5949 // the stack.
5951 // The caller is responsible for switching back to the caller's
5952 // realm and cleaning up the stack.
5954 Register calleeReg = IonGenericCallCalleeReg;
5955 Register argcReg = IonGenericCallArgcReg;
5956 Register scratch = IonGenericCallScratch;
5957 Register scratch2 = IonGenericCallScratch2;
5959 #ifndef JS_USE_LINK_REGISTER
5960 Register returnAddrReg = IonGenericCallReturnAddrReg;
5961 masm.pop(returnAddrReg);
5962 #endif
5964 #ifdef JS_CODEGEN_ARM
5965 // The default second scratch register on arm is lr, which we need
5966 // preserved for tail calls.
5967 AutoNonDefaultSecondScratchRegister andssr(masm, IonGenericSecondScratchReg);
5968 #endif
5970 bool isConstructing = kind == IonGenericCallKind::Construct;
5972 Label entry, notFunction, noJitEntry, vmCall;
5973 masm.bind(&entry);
5975 // Guard that the callee is actually a function.
5976 masm.branchTestObjIsFunction(Assembler::NotEqual, calleeReg, scratch,
5977 calleeReg, &notFunction);
5979 // Guard that the callee supports the [[Call]] or [[Construct]] operation.
5980 // If these tests fail, we will call into the VM to throw an exception.
5981 if (isConstructing) {
5982 masm.branchTestFunctionFlags(calleeReg, FunctionFlags::CONSTRUCTOR,
5983 Assembler::Zero, &vmCall);
5984 } else {
5985 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
5986 calleeReg, scratch, &vmCall);
5989 if (isConstructing) {
5990 // Use the slow path if CreateThis was unable to create the |this| object.
5991 Address thisAddr(masm.getStackPointer(), 0);
5992 masm.branchTestNull(Assembler::Equal, thisAddr, &vmCall);
5995 masm.switchToObjectRealm(calleeReg, scratch);
5997 // Load jitCodeRaw for callee if it exists.
5998 masm.branchIfFunctionHasNoJitEntry(calleeReg, isConstructing, &noJitEntry);
6000 // ****************************
6001 // * Functions with jit entry *
6002 // ****************************
6003 masm.loadJitCodeRaw(calleeReg, scratch2);
6005 // Construct the JitFrameLayout.
6006 masm.PushCalleeToken(calleeReg, isConstructing);
6007 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcReg, scratch);
6008 #ifndef JS_USE_LINK_REGISTER
6009 masm.push(returnAddrReg);
6010 #endif
6012 // Check whether we need a rectifier frame.
6013 Label noRectifier;
6014 masm.loadFunctionArgCount(calleeReg, scratch);
6015 masm.branch32(Assembler::BelowOrEqual, scratch, argcReg, &noRectifier);
6017 // Tail-call the arguments rectifier.
6018 // Because all trampolines are created at the same time,
6019 // we can't create a TrampolinePtr for the arguments rectifier,
6020 // because it hasn't been linked yet. We can, however, directly
6021 // encode its offset.
6022 Label rectifier;
6023 bindLabelToOffset(&rectifier, argumentsRectifierOffset_);
6025 masm.jump(&rectifier);
6028 // Tail call the jit entry.
6029 masm.bind(&noRectifier);
6030 masm.jump(scratch2);
6032 // ********************
6033 // * Native functions *
6034 // ********************
6035 masm.bind(&noJitEntry);
6036 if (!isConstructing) {
6037 generateIonGenericCallFunCall(masm, &entry, &vmCall);
6039 generateIonGenericCallNativeFunction(masm, isConstructing);
6041 // *******************
6042 // * Bound functions *
6043 // *******************
6044 // TODO: support class hooks?
6045 masm.bind(&notFunction);
6046 if (!isConstructing) {
6047 // TODO: support generic bound constructors?
6048 generateIonGenericCallBoundFunction(masm, &entry, &vmCall);
6051 // ********************
6052 // * Fallback VM call *
6053 // ********************
6054 masm.bind(&vmCall);
6056 masm.push(masm.getStackPointer()); // argv
6057 masm.push(argcReg); // argc
6058 masm.push(Imm32(false)); // ignores return value
6059 masm.push(Imm32(isConstructing)); // constructing
6060 masm.push(calleeReg); // callee
6062 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
6063 MutableHandleValue);
6064 VMFunctionId id = VMFunctionToId<Fn, jit::InvokeFunction>::id;
6065 uint32_t invokeFunctionOffset = functionWrapperOffsets_[size_t(id)];
6066 Label invokeFunctionVMEntry;
6067 bindLabelToOffset(&invokeFunctionVMEntry, invokeFunctionOffset);
6069 masm.pushFrameDescriptor(FrameType::IonJS);
6070 #ifndef JS_USE_LINK_REGISTER
6071 masm.push(returnAddrReg);
6072 #endif
6073 masm.jump(&invokeFunctionVMEntry);
6076 void JitRuntime::generateIonGenericCallNativeFunction(MacroAssembler& masm,
6077 bool isConstructing) {
6078 Register calleeReg = IonGenericCallCalleeReg;
6079 Register argcReg = IonGenericCallArgcReg;
6080 Register scratch = IonGenericCallScratch;
6081 Register scratch2 = IonGenericCallScratch2;
6082 Register contextReg = IonGenericCallScratch3;
6083 #ifndef JS_USE_LINK_REGISTER
6084 Register returnAddrReg = IonGenericCallReturnAddrReg;
6085 #endif
6087 // Push a value containing the callee, which will become argv[0].
6088 masm.pushValue(JSVAL_TYPE_OBJECT, calleeReg);
6090 // Load the callee address into calleeReg.
6091 #ifdef JS_SIMULATOR
6092 masm.movePtr(ImmPtr(RedirectedCallAnyNative()), calleeReg);
6093 #else
6094 masm.loadPrivate(Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
6095 calleeReg);
6096 #endif
6098 // Load argv into scratch2.
6099 masm.moveStackPtrTo(scratch2);
6101 // Push argc.
6102 masm.push(argcReg);
6104 masm.loadJSContext(contextReg);
6106 // Construct native exit frame. Note that unlike other cases in this
6107 // trampoline, this code does not use a tail call.
6108 masm.pushFrameDescriptor(FrameType::IonJS);
6109 #ifdef JS_USE_LINK_REGISTER
6110 masm.pushReturnAddress();
6111 #else
6112 masm.push(returnAddrReg);
6113 #endif
6115 masm.push(FramePointer);
6116 masm.moveStackPtrTo(FramePointer);
6117 masm.enterFakeExitFrameForNative(contextReg, scratch, isConstructing);
6119 masm.setupUnalignedABICall(scratch);
6120 masm.passABIArg(contextReg); // cx
6121 masm.passABIArg(argcReg); // argc
6122 masm.passABIArg(scratch2); // argv
6124 masm.callWithABI(calleeReg);
6126 // Test for failure.
6127 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
6129 masm.loadValue(
6130 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
6131 JSReturnOperand);
6133 // Leave the exit frame.
6134 masm.moveToStackPtr(FramePointer);
6135 masm.pop(FramePointer);
6137 // Return.
6138 masm.ret();
6141 void JitRuntime::generateIonGenericCallFunCall(MacroAssembler& masm,
6142 Label* entry, Label* vmCall) {
6143 Register calleeReg = IonGenericCallCalleeReg;
6144 Register argcReg = IonGenericCallArgcReg;
6145 Register scratch = IonGenericCallScratch;
6146 Register scratch2 = IonGenericCallScratch2;
6147 Register scratch3 = IonGenericCallScratch3;
6149 Label notFunCall;
6150 masm.branchPtr(Assembler::NotEqual,
6151 Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
6152 ImmPtr(js::fun_call), &notFunCall);
6154 // In general, we can implement fun_call by replacing calleeReg with
6155 // |this|, sliding all the other arguments down, and decrementing argc.
6157 // *BEFORE* *AFTER*
6158 // [argN] argc = N+1 <padding>
6159 // ... [argN] argc = N
6160 // [arg1] ...
6161 // [arg0] [arg1] <- now arg0
6162 // [this] <- top of stack (aligned) [arg0] <- now this
6164 // The only exception is when argc is already 0, in which case instead
6165 // of shifting arguments down we replace [this] with UndefinedValue():
6167 // *BEFORE* *AFTER*
6168 // [this] argc = 0 [undef] argc = 0
6170 // After making this transformation, we can jump back to the beginning
6171 // of this trampoline to handle the inner call.
6173 // Guard that |this| is an object. If it is, replace calleeReg.
6174 masm.fallibleUnboxObject(Address(masm.getStackPointer(), 0), scratch, vmCall);
6175 masm.movePtr(scratch, calleeReg);
6177 Label hasArgs;
6178 masm.branch32(Assembler::NotEqual, argcReg, Imm32(0), &hasArgs);
6180 // No arguments. Replace |this| with |undefined| and start from the top.
6181 masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), 0));
6182 masm.jump(entry);
6184 masm.bind(&hasArgs);
6186 Label doneSliding;
6187 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6188 scratch3, &doneSliding);
6189 masm.bind(&doneSliding);
6190 masm.sub32(Imm32(1), argcReg);
6192 masm.jump(entry);
6194 masm.bind(&notFunCall);
6197 void JitRuntime::generateIonGenericCallBoundFunction(MacroAssembler& masm,
6198 Label* entry,
6199 Label* vmCall) {
6200 Register calleeReg = IonGenericCallCalleeReg;
6201 Register argcReg = IonGenericCallArgcReg;
6202 Register scratch = IonGenericCallScratch;
6203 Register scratch2 = IonGenericCallScratch2;
6204 Register scratch3 = IonGenericCallScratch3;
6206 masm.branchTestObjClass(Assembler::NotEqual, calleeReg,
6207 &BoundFunctionObject::class_, scratch, calleeReg,
6208 vmCall);
6210 Address targetSlot(calleeReg, BoundFunctionObject::offsetOfTargetSlot());
6211 Address flagsSlot(calleeReg, BoundFunctionObject::offsetOfFlagsSlot());
6212 Address thisSlot(calleeReg, BoundFunctionObject::offsetOfBoundThisSlot());
6213 Address firstInlineArgSlot(
6214 calleeReg, BoundFunctionObject::offsetOfFirstInlineBoundArg());
6216 // Check that we won't be pushing too many arguments.
6217 masm.load32(flagsSlot, scratch);
6218 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6219 masm.add32(argcReg, scratch);
6220 masm.branch32(Assembler::Above, scratch, Imm32(JIT_ARGS_LENGTH_MAX), vmCall);
6222 // The stack is currently correctly aligned for a jit call. We will
6223 // be updating the `this` value and potentially adding additional
6224 // arguments. On platforms with 16-byte alignment, if the number of
6225 // bound arguments is odd, we have to move the arguments that are
6226 // currently on the stack. For example, with one bound argument:
6228 // *BEFORE* *AFTER*
6229 // [argN] <padding>
6230 // ... [argN] |
6231 // [arg1] ... | These arguments have been
6232 // [arg0] [arg1] | shifted down 8 bytes.
6233 // [this] <- top of stack (aligned) [arg0] v
6234 // [bound0] <- one bound argument (odd)
6235 // [boundThis] <- top of stack (aligned)
6237 Label poppedThis;
6238 if (JitStackValueAlignment > 1) {
6239 Label alreadyAligned;
6240 masm.branchTest32(Assembler::Zero, flagsSlot,
6241 Imm32(1 << BoundFunctionObject::NumBoundArgsShift),
6242 &alreadyAligned);
6244 // We have an odd number of bound arguments. Shift the existing arguments
6245 // down by 8 bytes.
6246 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6247 scratch3, &poppedThis);
6248 masm.bind(&alreadyAligned);
6251 // Pop the current `this`. It will be replaced with the bound `this`.
6252 masm.freeStack(sizeof(Value));
6253 masm.bind(&poppedThis);
6255 // Load the number of bound arguments in scratch
6256 masm.load32(flagsSlot, scratch);
6257 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6259 Label donePushingBoundArguments;
6260 masm.branch32(Assembler::Equal, scratch, Imm32(0),
6261 &donePushingBoundArguments);
6263 // Update argc to include bound arguments.
6264 masm.add32(scratch, argcReg);
6266 // Load &boundArgs[0] in scratch2.
6267 Label outOfLineBoundArguments, haveBoundArguments;
6268 masm.branch32(Assembler::Above, scratch,
6269 Imm32(BoundFunctionObject::MaxInlineBoundArgs),
6270 &outOfLineBoundArguments);
6271 masm.computeEffectiveAddress(firstInlineArgSlot, scratch2);
6272 masm.jump(&haveBoundArguments);
6274 masm.bind(&outOfLineBoundArguments);
6275 masm.unboxObject(firstInlineArgSlot, scratch2);
6276 masm.loadPtr(Address(scratch2, NativeObject::offsetOfElements()), scratch2);
6278 masm.bind(&haveBoundArguments);
6280 // Load &boundArgs[numBoundArgs] in scratch.
6281 BaseObjectElementIndex lastBoundArg(scratch2, scratch);
6282 masm.computeEffectiveAddress(lastBoundArg, scratch);
6284 // Push the bound arguments, starting with the last one.
6285 // Copying pre-decrements scratch until scratch2 is reached.
6286 Label boundArgumentsLoop;
6287 masm.bind(&boundArgumentsLoop);
6288 masm.subPtr(Imm32(sizeof(Value)), scratch);
6289 masm.pushValue(Address(scratch, 0));
6290 masm.branchPtr(Assembler::Above, scratch, scratch2, &boundArgumentsLoop);
6291 masm.bind(&donePushingBoundArguments);
6293 // Push the bound `this`.
6294 masm.pushValue(thisSlot);
6296 // Load the target in calleeReg.
6297 masm.unboxObject(targetSlot, calleeReg);
6299 // At this point, all preconditions for entering the trampoline are met:
6300 // - calleeReg contains a pointer to the callee object
6301 // - argcReg contains the number of actual args (now including bound args)
6302 // - the arguments are on the stack with the correct alignment.
6303 // Instead of generating more code, we can jump back to the entry point
6304 // of the trampoline to call the bound target.
6305 masm.jump(entry);
6308 void CodeGenerator::visitCallKnown(LCallKnown* call) {
6309 Register calleereg = ToRegister(call->getFunction());
6310 Register objreg = ToRegister(call->getTempObject());
6311 uint32_t unusedStack =
6312 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
6313 WrappedFunction* target = call->getSingleTarget();
6315 // Native single targets (except Wasm and TrampolineNative functions) are
6316 // handled by LCallNative.
6317 MOZ_ASSERT(target->hasJitEntry());
6319 // Missing arguments must have been explicitly appended by WarpBuilder.
6320 DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
6321 MOZ_ASSERT(target->nargs() <=
6322 call->mir()->numStackArgs() - numNonArgsOnStack);
6324 MOZ_ASSERT_IF(call->isConstructing(), target->isConstructor());
6326 masm.checkStackAlignment();
6328 if (target->isClassConstructor() && !call->isConstructing()) {
6329 emitCallInvokeFunction(call, calleereg, call->isConstructing(),
6330 call->ignoresReturnValue(), call->numActualArgs(),
6331 unusedStack);
6332 return;
6335 MOZ_ASSERT_IF(target->isClassConstructor(), call->isConstructing());
6337 MOZ_ASSERT(!call->mir()->needsThisCheck());
6339 if (call->mir()->maybeCrossRealm()) {
6340 masm.switchToObjectRealm(calleereg, objreg);
6343 masm.loadJitCodeRaw(calleereg, objreg);
6345 // Nestle the StackPointer up to the argument vector.
6346 masm.freeStack(unusedStack);
6348 // Construct the JitFrameLayout.
6349 masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
6350 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, call->numActualArgs());
6352 // Finally call the function in objreg.
6353 ensureOsiSpace();
6354 uint32_t callOffset = masm.callJit(objreg);
6355 markSafepointAt(callOffset, call);
6357 if (call->mir()->maybeCrossRealm()) {
6358 static_assert(!JSReturnOperand.aliases(ReturnReg),
6359 "ReturnReg available as scratch after scripted calls");
6360 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6363 // Restore stack pointer: pop JitFrameLayout fields still left on the stack
6364 // and undo the earlier |freeStack(unusedStack)|.
6365 int prefixGarbage =
6366 sizeof(JitFrameLayout) - JitFrameLayout::bytesPoppedAfterCall();
6367 masm.adjustStack(prefixGarbage - unusedStack);
6369 // If the return value of the constructing function is Primitive,
6370 // replace the return value with the Object from CreateThis.
6371 if (call->mir()->isConstructing()) {
6372 Label notPrimitive;
6373 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6374 &notPrimitive);
6375 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
6376 JSReturnOperand);
6377 #ifdef DEBUG
6378 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6379 &notPrimitive);
6380 masm.assumeUnreachable("CreateThis creates an object");
6381 #endif
6382 masm.bind(&notPrimitive);
6386 template <typename T>
6387 void CodeGenerator::emitCallInvokeFunction(T* apply) {
6388 pushArg(masm.getStackPointer()); // argv.
6389 pushArg(ToRegister(apply->getArgc())); // argc.
6390 pushArg(Imm32(apply->mir()->ignoresReturnValue())); // ignoresReturnValue.
6391 pushArg(Imm32(apply->mir()->isConstructing())); // isConstructing.
6392 pushArg(ToRegister(apply->getFunction())); // JSFunction*.
6394 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
6395 MutableHandleValue);
6396 callVM<Fn, jit::InvokeFunction>(apply);
6399 // Do not bailout after the execution of this function since the stack no longer
6400 // correspond to what is expected by the snapshots.
6401 void CodeGenerator::emitAllocateSpaceForApply(Register argcreg,
6402 Register scratch) {
6403 // Use scratch register to calculate stack space (including padding).
6404 masm.movePtr(argcreg, scratch);
6406 // Align the JitFrameLayout on the JitStackAlignment.
6407 if (JitStackValueAlignment > 1) {
6408 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6409 "Stack padding assumes that the frameSize is correct");
6410 MOZ_ASSERT(JitStackValueAlignment == 2);
6411 Label noPaddingNeeded;
6412 // If the number of arguments is odd, then we do not need any padding.
6414 // Note: The |JitStackValueAlignment == 2| condition requires that the
6415 // overall number of values on the stack is even. When we have an odd number
6416 // of arguments, we don't need any padding, because the |thisValue| is
6417 // pushed after the arguments, so the overall number of values on the stack
6418 // is even.
6419 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6420 masm.addPtr(Imm32(1), scratch);
6421 masm.bind(&noPaddingNeeded);
6424 // Reserve space for copying the arguments.
6425 NativeObject::elementsSizeMustNotOverflow();
6426 masm.lshiftPtr(Imm32(ValueShift), scratch);
6427 masm.subFromStackPtr(scratch);
6429 #ifdef DEBUG
6430 // Put a magic value in the space reserved for padding. Note, this code cannot
6431 // be merged with the previous test, as not all architectures can write below
6432 // their stack pointers.
6433 if (JitStackValueAlignment > 1) {
6434 MOZ_ASSERT(JitStackValueAlignment == 2);
6435 Label noPaddingNeeded;
6436 // If the number of arguments is odd, then we do not need any padding.
6437 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6438 BaseValueIndex dstPtr(masm.getStackPointer(), argcreg);
6439 masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr);
6440 masm.bind(&noPaddingNeeded);
6442 #endif
6445 // Do not bailout after the execution of this function since the stack no longer
6446 // correspond to what is expected by the snapshots.
6447 void CodeGenerator::emitAllocateSpaceForConstructAndPushNewTarget(
6448 Register argcreg, Register newTargetAndScratch) {
6449 // Align the JitFrameLayout on the JitStackAlignment. Contrary to
6450 // |emitAllocateSpaceForApply()|, we're always pushing a magic value, because
6451 // we can't write to |newTargetAndScratch| before |new.target| has been pushed
6452 // onto the stack.
6453 if (JitStackValueAlignment > 1) {
6454 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6455 "Stack padding assumes that the frameSize is correct");
6456 MOZ_ASSERT(JitStackValueAlignment == 2);
6458 Label noPaddingNeeded;
6459 // If the number of arguments is even, then we do not need any padding.
6461 // Note: The |JitStackValueAlignment == 2| condition requires that the
6462 // overall number of values on the stack is even. When we have an even
6463 // number of arguments, we don't need any padding, because |new.target| is
6464 // is pushed before the arguments and |thisValue| is pushed after all
6465 // arguments, so the overall number of values on the stack is even.
6466 masm.branchTestPtr(Assembler::Zero, argcreg, Imm32(1), &noPaddingNeeded);
6467 masm.pushValue(MagicValue(JS_ARG_POISON));
6468 masm.bind(&noPaddingNeeded);
6471 // Push |new.target| after the padding value, but before any arguments.
6472 masm.pushValue(JSVAL_TYPE_OBJECT, newTargetAndScratch);
6474 // Use newTargetAndScratch to calculate stack space (including padding).
6475 masm.movePtr(argcreg, newTargetAndScratch);
6477 // Reserve space for copying the arguments.
6478 NativeObject::elementsSizeMustNotOverflow();
6479 masm.lshiftPtr(Imm32(ValueShift), newTargetAndScratch);
6480 masm.subFromStackPtr(newTargetAndScratch);
6483 // Destroys argvIndex and copyreg.
6484 void CodeGenerator::emitCopyValuesForApply(Register argvSrcBase,
6485 Register argvIndex, Register copyreg,
6486 size_t argvSrcOffset,
6487 size_t argvDstOffset) {
6488 Label loop;
6489 masm.bind(&loop);
6491 // As argvIndex is off by 1, and we use the decBranchPtr instruction to loop
6492 // back, we have to substract the size of the word which are copied.
6493 BaseValueIndex srcPtr(argvSrcBase, argvIndex,
6494 int32_t(argvSrcOffset) - sizeof(void*));
6495 BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex,
6496 int32_t(argvDstOffset) - sizeof(void*));
6497 masm.loadPtr(srcPtr, copyreg);
6498 masm.storePtr(copyreg, dstPtr);
6500 // Handle 32 bits architectures.
6501 if (sizeof(Value) == 2 * sizeof(void*)) {
6502 BaseValueIndex srcPtrLow(argvSrcBase, argvIndex,
6503 int32_t(argvSrcOffset) - 2 * sizeof(void*));
6504 BaseValueIndex dstPtrLow(masm.getStackPointer(), argvIndex,
6505 int32_t(argvDstOffset) - 2 * sizeof(void*));
6506 masm.loadPtr(srcPtrLow, copyreg);
6507 masm.storePtr(copyreg, dstPtrLow);
6510 masm.decBranchPtr(Assembler::NonZero, argvIndex, Imm32(1), &loop);
6513 void CodeGenerator::emitRestoreStackPointerFromFP() {
6514 // This is used to restore the stack pointer after a call with a dynamic
6515 // number of arguments.
6517 MOZ_ASSERT(masm.framePushed() == frameSize());
6519 int32_t offset = -int32_t(frameSize());
6520 masm.computeEffectiveAddress(Address(FramePointer, offset),
6521 masm.getStackPointer());
6524 void CodeGenerator::emitPushArguments(Register argcreg, Register scratch,
6525 Register copyreg, uint32_t extraFormals) {
6526 Label end;
6528 // Skip the copy of arguments if there are none.
6529 masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, &end);
6531 // clang-format off
6533 // We are making a copy of the arguments which are above the JitFrameLayout
6534 // of the current Ion frame.
6536 // [arg1] [arg0] <- src [this] [JitFrameLayout] [.. frameSize ..] [pad] [arg1] [arg0] <- dst
6538 // clang-format on
6540 // Compute the source and destination offsets into the stack.
6542 // The |extraFormals| parameter is used when copying rest-parameters and
6543 // allows to skip the initial parameters before the actual rest-parameters.
6544 Register argvSrcBase = FramePointer;
6545 size_t argvSrcOffset =
6546 JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
6547 size_t argvDstOffset = 0;
6549 Register argvIndex = scratch;
6550 masm.move32(argcreg, argvIndex);
6552 // Copy arguments.
6553 emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
6554 argvDstOffset);
6556 // Join with all arguments copied.
6557 masm.bind(&end);
6560 void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply) {
6561 // Holds the function nargs.
6562 Register argcreg = ToRegister(apply->getArgc());
6563 Register copyreg = ToRegister(apply->getTempObject());
6564 Register scratch = ToRegister(apply->getTempForArgCopy());
6565 uint32_t extraFormals = apply->numExtraFormals();
6567 // Allocate space on the stack for arguments.
6568 emitAllocateSpaceForApply(argcreg, scratch);
6570 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6572 // Push |this|.
6573 masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex));
6576 void CodeGenerator::emitPushArguments(LApplyArgsObj* apply) {
6577 Register argsObj = ToRegister(apply->getArgsObj());
6578 Register tmpArgc = ToRegister(apply->getTempObject());
6579 Register scratch = ToRegister(apply->getTempForArgCopy());
6581 // argc and argsObj are mapped to the same calltemp register.
6582 MOZ_ASSERT(argsObj == ToRegister(apply->getArgc()));
6584 // Load argc into tmpArgc.
6585 masm.loadArgumentsObjectLength(argsObj, tmpArgc);
6587 // Allocate space on the stack for arguments.
6588 emitAllocateSpaceForApply(tmpArgc, scratch);
6590 // Load arguments data.
6591 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
6592 argsObj);
6593 size_t argsSrcOffset = ArgumentsData::offsetOfArgs();
6595 // This is the end of the lifetime of argsObj.
6596 // After this call, the argsObj register holds the argument count instead.
6597 emitPushArrayAsArguments(tmpArgc, argsObj, scratch, argsSrcOffset);
6599 // Push |this|.
6600 masm.pushValue(ToValue(apply, LApplyArgsObj::ThisIndex));
6603 void CodeGenerator::emitPushArrayAsArguments(Register tmpArgc,
6604 Register srcBaseAndArgc,
6605 Register scratch,
6606 size_t argvSrcOffset) {
6607 // Preconditions:
6608 // 1. |tmpArgc| * sizeof(Value) bytes have been allocated at the top of
6609 // the stack to hold arguments.
6610 // 2. |srcBaseAndArgc| + |srcOffset| points to an array of |tmpArgc| values.
6612 // Postconditions:
6613 // 1. The arguments at |srcBaseAndArgc| + |srcOffset| have been copied into
6614 // the allocated space.
6615 // 2. |srcBaseAndArgc| now contains the original value of |tmpArgc|.
6617 // |scratch| is used as a temp register within this function and clobbered.
6619 Label noCopy, epilogue;
6621 // Skip the copy of arguments if there are none.
6622 masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
6624 // Copy the values. This code is skipped entirely if there are no values.
6625 size_t argvDstOffset = 0;
6627 Register argvSrcBase = srcBaseAndArgc;
6629 // Stash away |tmpArgc| and adjust argvDstOffset accordingly.
6630 masm.push(tmpArgc);
6631 Register argvIndex = tmpArgc;
6632 argvDstOffset += sizeof(void*);
6634 // Copy
6635 emitCopyValuesForApply(argvSrcBase, argvIndex, scratch, argvSrcOffset,
6636 argvDstOffset);
6638 // Restore.
6639 masm.pop(srcBaseAndArgc); // srcBaseAndArgc now contains argc.
6640 masm.jump(&epilogue);
6642 masm.bind(&noCopy);
6644 // Clear argc if we skipped the copy step.
6645 masm.movePtr(ImmWord(0), srcBaseAndArgc);
6648 // Join with all arguments copied.
6649 // Note, "srcBase" has become "argc".
6650 masm.bind(&epilogue);
6653 void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply) {
6654 Register elements = ToRegister(apply->getElements());
6655 Register tmpArgc = ToRegister(apply->getTempObject());
6656 Register scratch = ToRegister(apply->getTempForArgCopy());
6658 // argc and elements are mapped to the same calltemp register.
6659 MOZ_ASSERT(elements == ToRegister(apply->getArgc()));
6661 // Invariants guarded in the caller:
6662 // - the array is not too long
6663 // - the array length equals its initialized length
6665 // The array length is our argc for the purposes of allocating space.
6666 masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
6668 // Allocate space for the values.
6669 emitAllocateSpaceForApply(tmpArgc, scratch);
6671 // After this call "elements" has become "argc".
6672 size_t elementsOffset = 0;
6673 emitPushArrayAsArguments(tmpArgc, elements, scratch, elementsOffset);
6675 // Push |this|.
6676 masm.pushValue(ToValue(apply, LApplyArrayGeneric::ThisIndex));
6679 void CodeGenerator::emitPushArguments(LConstructArgsGeneric* construct) {
6680 // Holds the function nargs.
6681 Register argcreg = ToRegister(construct->getArgc());
6682 Register copyreg = ToRegister(construct->getTempObject());
6683 Register scratch = ToRegister(construct->getTempForArgCopy());
6684 uint32_t extraFormals = construct->numExtraFormals();
6686 // newTarget and scratch are mapped to the same calltemp register.
6687 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6689 // Allocate space for the values.
6690 // After this call "newTarget" has become "scratch".
6691 emitAllocateSpaceForConstructAndPushNewTarget(argcreg, scratch);
6693 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6695 // Push |this|.
6696 masm.pushValue(ToValue(construct, LConstructArgsGeneric::ThisIndex));
6699 void CodeGenerator::emitPushArguments(LConstructArrayGeneric* construct) {
6700 Register elements = ToRegister(construct->getElements());
6701 Register tmpArgc = ToRegister(construct->getTempObject());
6702 Register scratch = ToRegister(construct->getTempForArgCopy());
6704 // argc and elements are mapped to the same calltemp register.
6705 MOZ_ASSERT(elements == ToRegister(construct->getArgc()));
6707 // newTarget and scratch are mapped to the same calltemp register.
6708 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6710 // Invariants guarded in the caller:
6711 // - the array is not too long
6712 // - the array length equals its initialized length
6714 // The array length is our argc for the purposes of allocating space.
6715 masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
6717 // Allocate space for the values.
6718 // After this call "newTarget" has become "scratch".
6719 emitAllocateSpaceForConstructAndPushNewTarget(tmpArgc, scratch);
6721 // After this call "elements" has become "argc".
6722 size_t elementsOffset = 0;
6723 emitPushArrayAsArguments(tmpArgc, elements, scratch, elementsOffset);
6725 // Push |this|.
6726 masm.pushValue(ToValue(construct, LConstructArrayGeneric::ThisIndex));
6729 template <typename T>
6730 void CodeGenerator::emitApplyGeneric(T* apply) {
6731 // Holds the function object.
6732 Register calleereg = ToRegister(apply->getFunction());
6734 // Temporary register for modifying the function object.
6735 Register objreg = ToRegister(apply->getTempObject());
6736 Register scratch = ToRegister(apply->getTempForArgCopy());
6738 // Holds the function nargs, computed in the invoker or (for ApplyArray,
6739 // ConstructArray, or ApplyArgsObj) in the argument pusher.
6740 Register argcreg = ToRegister(apply->getArgc());
6742 // Copy the arguments of the current function.
6744 // In the case of ApplyArray, ConstructArray, or ApplyArgsObj, also compute
6745 // argc. The argc register and the elements/argsObj register are the same;
6746 // argc must not be referenced before the call to emitPushArguments() and
6747 // elements/argsObj must not be referenced after it returns.
6749 // In the case of ConstructArray or ConstructArgs, also overwrite newTarget;
6750 // newTarget must not be referenced after this point.
6752 // objreg is dead across this call.
6753 emitPushArguments(apply);
6755 masm.checkStackAlignment();
6757 bool constructing = apply->mir()->isConstructing();
6759 // If the function is native, the call is compiled through emitApplyNative.
6760 MOZ_ASSERT_IF(apply->hasSingleTarget(),
6761 !apply->getSingleTarget()->isNativeWithoutJitEntry());
6763 Label end, invoke;
6765 // Unless already known, guard that calleereg is actually a function object.
6766 if (!apply->hasSingleTarget()) {
6767 masm.branchTestObjIsFunction(Assembler::NotEqual, calleereg, objreg,
6768 calleereg, &invoke);
6771 // Guard that calleereg is an interpreted function with a JSScript.
6772 masm.branchIfFunctionHasNoJitEntry(calleereg, constructing, &invoke);
6774 // Guard that callee allows the [[Call]] or [[Construct]] operation required.
6775 if (constructing) {
6776 masm.branchTestFunctionFlags(calleereg, FunctionFlags::CONSTRUCTOR,
6777 Assembler::Zero, &invoke);
6778 } else {
6779 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
6780 calleereg, objreg, &invoke);
6783 // Use the slow path if CreateThis was unable to create the |this| object.
6784 if (constructing) {
6785 Address thisAddr(masm.getStackPointer(), 0);
6786 masm.branchTestNull(Assembler::Equal, thisAddr, &invoke);
6789 // Call with an Ion frame or a rectifier frame.
6791 if (apply->mir()->maybeCrossRealm()) {
6792 masm.switchToObjectRealm(calleereg, objreg);
6795 // Knowing that calleereg is a non-native function, load jitcode.
6796 masm.loadJitCodeRaw(calleereg, objreg);
6798 masm.PushCalleeToken(calleereg, constructing);
6799 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcreg, scratch);
6801 Label underflow, rejoin;
6803 // Check whether the provided arguments satisfy target argc.
6804 if (!apply->hasSingleTarget()) {
6805 Register nformals = scratch;
6806 masm.loadFunctionArgCount(calleereg, nformals);
6807 masm.branch32(Assembler::Below, argcreg, nformals, &underflow);
6808 } else {
6809 masm.branch32(Assembler::Below, argcreg,
6810 Imm32(apply->getSingleTarget()->nargs()), &underflow);
6813 // Skip the construction of the rectifier frame because we have no
6814 // underflow.
6815 masm.jump(&rejoin);
6817 // Argument fixup needed. Get ready to call the argumentsRectifier.
6819 masm.bind(&underflow);
6821 // Hardcode the address of the argumentsRectifier code.
6822 TrampolinePtr argumentsRectifier =
6823 gen->jitRuntime()->getArgumentsRectifier();
6824 masm.movePtr(argumentsRectifier, objreg);
6827 masm.bind(&rejoin);
6829 // Finally call the function in objreg, as assigned by one of the paths
6830 // above.
6831 ensureOsiSpace();
6832 uint32_t callOffset = masm.callJit(objreg);
6833 markSafepointAt(callOffset, apply);
6835 if (apply->mir()->maybeCrossRealm()) {
6836 static_assert(!JSReturnOperand.aliases(ReturnReg),
6837 "ReturnReg available as scratch after scripted calls");
6838 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6841 // Discard JitFrameLayout fields still left on the stack.
6842 masm.freeStack(sizeof(JitFrameLayout) -
6843 JitFrameLayout::bytesPoppedAfterCall());
6844 masm.jump(&end);
6847 // Handle uncompiled or native functions.
6849 masm.bind(&invoke);
6850 emitCallInvokeFunction(apply);
6853 masm.bind(&end);
6855 // If the return value of the constructing function is Primitive, replace the
6856 // return value with the Object from CreateThis.
6857 if (constructing) {
6858 Label notPrimitive;
6859 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6860 &notPrimitive);
6861 masm.loadValue(Address(masm.getStackPointer(), 0), JSReturnOperand);
6863 #ifdef DEBUG
6864 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6865 &notPrimitive);
6866 masm.assumeUnreachable("CreateThis creates an object");
6867 #endif
6869 masm.bind(&notPrimitive);
6872 // Pop arguments and continue.
6873 emitRestoreStackPointerFromFP();
6876 template <typename T>
6877 void CodeGenerator::emitAlignStackForApplyNative(T* apply, Register argc) {
6878 static_assert(JitStackAlignment % ABIStackAlignment == 0,
6879 "aligning on JIT stack subsumes ABI alignment");
6881 // Align the arguments on the JitStackAlignment.
6882 if constexpr (JitStackValueAlignment > 1) {
6883 static_assert(JitStackValueAlignment == 2,
6884 "Stack padding adds exactly one Value");
6885 MOZ_ASSERT(frameSize() % JitStackValueAlignment == 0,
6886 "Stack padding assumes that the frameSize is correct");
6888 Assembler::Condition cond;
6889 if constexpr (T::isConstructing()) {
6890 // If the number of arguments is even, then we do not need any padding.
6892 // Also see emitAllocateSpaceForApply().
6893 cond = Assembler::Zero;
6894 } else {
6895 // If the number of arguments is odd, then we do not need any padding.
6897 // Also see emitAllocateSpaceForConstructAndPushNewTarget().
6898 cond = Assembler::NonZero;
6901 Label noPaddingNeeded;
6902 masm.branchTestPtr(cond, argc, Imm32(1), &noPaddingNeeded);
6903 masm.pushValue(MagicValue(JS_ARG_POISON));
6904 masm.bind(&noPaddingNeeded);
6908 template <typename T>
6909 void CodeGenerator::emitPushNativeArguments(T* apply) {
6910 Register argc = ToRegister(apply->getArgc());
6911 Register tmpArgc = ToRegister(apply->getTempObject());
6912 Register scratch = ToRegister(apply->getTempForArgCopy());
6913 uint32_t extraFormals = apply->numExtraFormals();
6915 // Align stack.
6916 emitAlignStackForApplyNative(apply, argc);
6918 // Push newTarget.
6919 if constexpr (T::isConstructing()) {
6920 masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getNewTarget()));
6923 // Push arguments.
6924 Label noCopy;
6925 masm.branchTestPtr(Assembler::Zero, argc, argc, &noCopy);
6927 // Use scratch register to calculate stack space.
6928 masm.movePtr(argc, scratch);
6930 // Reserve space for copying the arguments.
6931 NativeObject::elementsSizeMustNotOverflow();
6932 masm.lshiftPtr(Imm32(ValueShift), scratch);
6933 masm.subFromStackPtr(scratch);
6935 // Compute the source and destination offsets into the stack.
6936 Register argvSrcBase = FramePointer;
6937 size_t argvSrcOffset =
6938 JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
6939 size_t argvDstOffset = 0;
6941 Register argvIndex = tmpArgc;
6942 masm.move32(argc, argvIndex);
6944 // Copy arguments.
6945 emitCopyValuesForApply(argvSrcBase, argvIndex, scratch, argvSrcOffset,
6946 argvDstOffset);
6948 masm.bind(&noCopy);
6950 // Push |this|.
6951 if constexpr (T::isConstructing()) {
6952 masm.pushValue(MagicValue(JS_IS_CONSTRUCTING));
6953 } else {
6954 masm.pushValue(ToValue(apply, T::ThisIndex));
6958 template <typename T>
6959 void CodeGenerator::emitPushArrayAsNativeArguments(T* apply) {
6960 Register argc = ToRegister(apply->getArgc());
6961 Register elements = ToRegister(apply->getElements());
6962 Register tmpArgc = ToRegister(apply->getTempObject());
6963 Register scratch = ToRegister(apply->getTempForArgCopy());
6965 // NB: argc and elements are mapped to the same register.
6966 MOZ_ASSERT(argc == elements);
6968 // Invariants guarded in the caller:
6969 // - the array is not too long
6970 // - the array length equals its initialized length
6972 // The array length is our argc.
6973 masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
6975 // Align stack.
6976 emitAlignStackForApplyNative(apply, tmpArgc);
6978 // Push newTarget.
6979 if constexpr (T::isConstructing()) {
6980 masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getNewTarget()));
6983 // Skip the copy of arguments if there are none.
6984 Label noCopy;
6985 masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
6987 // |tmpArgc| is off-by-one, so adjust the offset accordingly.
6988 BaseObjectElementIndex srcPtr(elements, tmpArgc,
6989 -int32_t(sizeof(JS::Value)));
6991 Label loop;
6992 masm.bind(&loop);
6993 masm.pushValue(srcPtr, scratch);
6994 masm.decBranchPtr(Assembler::NonZero, tmpArgc, Imm32(1), &loop);
6996 masm.bind(&noCopy);
6998 // Set argc in preparation for calling the native function.
6999 masm.load32(Address(elements, ObjectElements::offsetOfLength()), argc);
7001 // Push |this|.
7002 if constexpr (T::isConstructing()) {
7003 masm.pushValue(MagicValue(JS_IS_CONSTRUCTING));
7004 } else {
7005 masm.pushValue(ToValue(apply, T::ThisIndex));
7009 void CodeGenerator::emitPushArguments(LApplyArgsNative* apply) {
7010 emitPushNativeArguments(apply);
7013 void CodeGenerator::emitPushArguments(LApplyArrayNative* apply) {
7014 emitPushArrayAsNativeArguments(apply);
7017 void CodeGenerator::emitPushArguments(LConstructArgsNative* construct) {
7018 emitPushNativeArguments(construct);
7021 void CodeGenerator::emitPushArguments(LConstructArrayNative* construct) {
7022 emitPushArrayAsNativeArguments(construct);
7025 void CodeGenerator::emitPushArguments(LApplyArgsObjNative* apply) {
7026 Register argc = ToRegister(apply->getArgc());
7027 Register argsObj = ToRegister(apply->getArgsObj());
7028 Register tmpArgc = ToRegister(apply->getTempObject());
7029 Register scratch = ToRegister(apply->getTempForArgCopy());
7030 Register scratch2 = ToRegister(apply->getTempExtra());
7032 // NB: argc and argsObj are mapped to the same register.
7033 MOZ_ASSERT(argc == argsObj);
7035 // Load argc into tmpArgc.
7036 masm.loadArgumentsObjectLength(argsObj, tmpArgc);
7038 // Align stack.
7039 emitAlignStackForApplyNative(apply, tmpArgc);
7041 // Push arguments.
7042 Label noCopy, epilogue;
7043 masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
7045 // Use scratch register to calculate stack space.
7046 masm.movePtr(tmpArgc, scratch);
7048 // Reserve space for copying the arguments.
7049 NativeObject::elementsSizeMustNotOverflow();
7050 masm.lshiftPtr(Imm32(ValueShift), scratch);
7051 masm.subFromStackPtr(scratch);
7053 // Load arguments data.
7054 Register argvSrcBase = argsObj;
7055 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
7056 argvSrcBase);
7057 size_t argvSrcOffset = ArgumentsData::offsetOfArgs();
7058 size_t argvDstOffset = 0;
7060 Register argvIndex = scratch2;
7061 masm.move32(tmpArgc, argvIndex);
7063 // Copy the values.
7064 emitCopyValuesForApply(argvSrcBase, argvIndex, scratch, argvSrcOffset,
7065 argvDstOffset);
7067 masm.bind(&noCopy);
7069 // Set argc in preparation for calling the native function.
7070 masm.movePtr(tmpArgc, argc);
7072 // Push |this|.
7073 masm.pushValue(ToValue(apply, LApplyArgsObjNative::ThisIndex));
7076 template <typename T>
7077 void CodeGenerator::emitApplyNative(T* apply) {
7078 MOZ_ASSERT(T::isConstructing() == apply->mir()->isConstructing(),
7079 "isConstructing condition must be consistent");
7081 WrappedFunction* target = apply->mir()->getSingleTarget();
7082 MOZ_ASSERT(target->isNativeWithoutJitEntry());
7084 JSNative native = target->native();
7085 if (apply->mir()->ignoresReturnValue() && target->hasJitInfo()) {
7086 const JSJitInfo* jitInfo = target->jitInfo();
7087 if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
7088 native = jitInfo->ignoresReturnValueMethod;
7092 // Push arguments, including newTarget and |this|.
7093 emitPushArguments(apply);
7095 // Registers used for callWithABI() argument-passing.
7096 Register argContextReg = ToRegister(apply->getTempObject());
7097 Register argUintNReg = ToRegister(apply->getArgc());
7098 Register argVpReg = ToRegister(apply->getTempForArgCopy());
7099 Register tempReg = ToRegister(apply->getTempExtra());
7101 // No unused stack for variadic calls.
7102 uint32_t unusedStack = 0;
7104 // Pushed arguments don't change the pushed frames amount.
7105 MOZ_ASSERT(masm.framePushed() == frameSize());
7107 // Create the exit frame and call the native.
7108 emitCallNative(apply, native, argContextReg, argUintNReg, argVpReg, tempReg,
7109 unusedStack);
7111 // The exit frame is still on the stack.
7112 MOZ_ASSERT(masm.framePushed() == frameSize() + NativeExitFrameLayout::Size());
7114 // The next instruction is removing the exit frame, so there is no need for
7115 // leaveFakeExitFrame.
7117 // Pop arguments and continue.
7118 masm.setFramePushed(frameSize());
7119 emitRestoreStackPointerFromFP();
7122 template <typename T>
7123 void CodeGenerator::emitApplyArgsGuard(T* apply) {
7124 LSnapshot* snapshot = apply->snapshot();
7125 Register argcreg = ToRegister(apply->getArgc());
7127 // Ensure that we have a reasonable number of arguments.
7128 bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
7131 template <typename T>
7132 void CodeGenerator::emitApplyArgsObjGuard(T* apply) {
7133 Register argsObj = ToRegister(apply->getArgsObj());
7134 Register temp = ToRegister(apply->getTempObject());
7136 Label bail;
7137 masm.loadArgumentsObjectLength(argsObj, temp, &bail);
7138 masm.branch32(Assembler::Above, temp, Imm32(JIT_ARGS_LENGTH_MAX), &bail);
7139 bailoutFrom(&bail, apply->snapshot());
7142 template <typename T>
7143 void CodeGenerator::emitApplyArrayGuard(T* apply) {
7144 LSnapshot* snapshot = apply->snapshot();
7145 Register elements = ToRegister(apply->getElements());
7146 Register tmp = ToRegister(apply->getTempObject());
7148 Address length(elements, ObjectElements::offsetOfLength());
7149 masm.load32(length, tmp);
7151 // Ensure that we have a reasonable number of arguments.
7152 bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
7154 // Ensure that the array does not contain an uninitialized tail.
7156 Address initializedLength(elements,
7157 ObjectElements::offsetOfInitializedLength());
7158 masm.sub32(initializedLength, tmp);
7159 bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
7162 void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) {
7163 emitApplyArgsGuard(apply);
7164 emitApplyGeneric(apply);
7167 void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
7168 emitApplyArgsObjGuard(apply);
7169 emitApplyGeneric(apply);
7172 void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
7173 emitApplyArrayGuard(apply);
7174 emitApplyGeneric(apply);
7177 void CodeGenerator::visitConstructArgsGeneric(LConstructArgsGeneric* lir) {
7178 emitApplyArgsGuard(lir);
7179 emitApplyGeneric(lir);
7182 void CodeGenerator::visitConstructArrayGeneric(LConstructArrayGeneric* lir) {
7183 emitApplyArrayGuard(lir);
7184 emitApplyGeneric(lir);
7187 void CodeGenerator::visitApplyArgsNative(LApplyArgsNative* lir) {
7188 emitApplyArgsGuard(lir);
7189 emitApplyNative(lir);
7192 void CodeGenerator::visitApplyArgsObjNative(LApplyArgsObjNative* lir) {
7193 emitApplyArgsObjGuard(lir);
7194 emitApplyNative(lir);
7197 void CodeGenerator::visitApplyArrayNative(LApplyArrayNative* lir) {
7198 emitApplyArrayGuard(lir);
7199 emitApplyNative(lir);
7202 void CodeGenerator::visitConstructArgsNative(LConstructArgsNative* lir) {
7203 emitApplyArgsGuard(lir);
7204 emitApplyNative(lir);
7207 void CodeGenerator::visitConstructArrayNative(LConstructArrayNative* lir) {
7208 emitApplyArrayGuard(lir);
7209 emitApplyNative(lir);
7212 void CodeGenerator::visitBail(LBail* lir) { bailout(lir->snapshot()); }
7214 void CodeGenerator::visitUnreachable(LUnreachable* lir) {
7215 masm.assumeUnreachable("end-of-block assumed unreachable");
7218 void CodeGenerator::visitEncodeSnapshot(LEncodeSnapshot* lir) {
7219 encode(lir->snapshot());
7222 void CodeGenerator::visitUnreachableResultV(LUnreachableResultV* lir) {
7223 masm.assumeUnreachable("must be unreachable");
7226 void CodeGenerator::visitUnreachableResultT(LUnreachableResultT* lir) {
7227 masm.assumeUnreachable("must be unreachable");
7230 // Out-of-line path to report over-recursed error and fail.
7231 class CheckOverRecursedFailure : public OutOfLineCodeBase<CodeGenerator> {
7232 LInstruction* lir_;
7234 public:
7235 explicit CheckOverRecursedFailure(LInstruction* lir) : lir_(lir) {}
7237 void accept(CodeGenerator* codegen) override {
7238 codegen->visitCheckOverRecursedFailure(this);
7241 LInstruction* lir() const { return lir_; }
7244 void CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed* lir) {
7245 // If we don't push anything on the stack, skip the check.
7246 if (omitOverRecursedCheck()) {
7247 return;
7250 // Ensure that this frame will not cross the stack limit.
7251 // This is a weak check, justified by Ion using the C stack: we must always
7252 // be some distance away from the actual limit, since if the limit is
7253 // crossed, an error must be thrown, which requires more frames.
7255 // It must always be possible to trespass past the stack limit.
7256 // Ion may legally place frames very close to the limit. Calling additional
7257 // C functions may then violate the limit without any checking.
7259 // Since Ion frames exist on the C stack, the stack limit may be
7260 // dynamically set by JS_SetThreadStackLimit() and JS_SetNativeStackQuota().
7262 CheckOverRecursedFailure* ool = new (alloc()) CheckOverRecursedFailure(lir);
7263 addOutOfLineCode(ool, lir->mir());
7265 // Conditional forward (unlikely) branch to failure.
7266 const void* limitAddr = gen->runtime->addressOfJitStackLimit();
7267 masm.branchStackPtrRhs(Assembler::AboveOrEqual, AbsoluteAddress(limitAddr),
7268 ool->entry());
7269 masm.bind(ool->rejoin());
7272 void CodeGenerator::visitCheckOverRecursedFailure(
7273 CheckOverRecursedFailure* ool) {
7274 // The OOL path is hit if the recursion depth has been exceeded.
7275 // Throw an InternalError for over-recursion.
7277 // LFunctionEnvironment can appear before LCheckOverRecursed, so we have
7278 // to save all live registers to avoid crashes if CheckOverRecursed triggers
7279 // a GC.
7280 saveLive(ool->lir());
7282 using Fn = bool (*)(JSContext*);
7283 callVM<Fn, CheckOverRecursed>(ool->lir());
7285 restoreLive(ool->lir());
7286 masm.jump(ool->rejoin());
7289 IonScriptCounts* CodeGenerator::maybeCreateScriptCounts() {
7290 // If scripts are being profiled, create a new IonScriptCounts for the
7291 // profiling data, which will be attached to the associated JSScript or
7292 // wasm module after code generation finishes.
7293 if (!gen->hasProfilingScripts()) {
7294 return nullptr;
7297 // This test inhibits IonScriptCount creation for wasm code which is
7298 // currently incompatible with wasm codegen for two reasons: (1) wasm code
7299 // must be serializable and script count codegen bakes in absolute
7300 // addresses, (2) wasm code does not have a JSScript with which to associate
7301 // code coverage data.
7302 JSScript* script = gen->outerInfo().script();
7303 if (!script) {
7304 return nullptr;
7307 auto counts = MakeUnique<IonScriptCounts>();
7308 if (!counts || !counts->init(graph.numBlocks())) {
7309 return nullptr;
7312 for (size_t i = 0; i < graph.numBlocks(); i++) {
7313 MBasicBlock* block = graph.getBlock(i)->mir();
7315 uint32_t offset = 0;
7316 char* description = nullptr;
7317 if (MResumePoint* resume = block->entryResumePoint()) {
7318 // Find a PC offset in the outermost script to use. If this
7319 // block is from an inlined script, find a location in the
7320 // outer script to associate information about the inlining
7321 // with.
7322 while (resume->caller()) {
7323 resume = resume->caller();
7325 offset = script->pcToOffset(resume->pc());
7327 if (block->entryResumePoint()->caller()) {
7328 // Get the filename and line number of the inner script.
7329 JSScript* innerScript = block->info().script();
7330 description = js_pod_calloc<char>(200);
7331 if (description) {
7332 snprintf(description, 200, "%s:%u", innerScript->filename(),
7333 innerScript->lineno());
7338 if (!counts->block(i).init(block->id(), offset, description,
7339 block->numSuccessors())) {
7340 return nullptr;
7343 for (size_t j = 0; j < block->numSuccessors(); j++) {
7344 counts->block(i).setSuccessor(
7345 j, skipTrivialBlocks(block->getSuccessor(j))->id());
7349 scriptCounts_ = counts.release();
7350 return scriptCounts_;
7353 // Structure for managing the state tracked for a block by script counters.
7354 struct ScriptCountBlockState {
7355 IonBlockCounts& block;
7356 MacroAssembler& masm;
7358 Sprinter printer;
7360 public:
7361 ScriptCountBlockState(IonBlockCounts* block, MacroAssembler* masm)
7362 : block(*block), masm(*masm), printer(GetJitContext()->cx, false) {}
7364 bool init() {
7365 if (!printer.init()) {
7366 return false;
7369 // Bump the hit count for the block at the start. This code is not
7370 // included in either the text for the block or the instruction byte
7371 // counts.
7372 masm.inc64(AbsoluteAddress(block.addressOfHitCount()));
7374 // Collect human readable assembly for the code generated in the block.
7375 masm.setPrinter(&printer);
7377 return true;
7380 void visitInstruction(LInstruction* ins) {
7381 #ifdef JS_JITSPEW
7382 // Prefix stream of assembly instructions with their LIR instruction
7383 // name and any associated high level info.
7384 if (const char* extra = ins->getExtraName()) {
7385 printer.printf("[%s:%s]\n", ins->opName(), extra);
7386 } else {
7387 printer.printf("[%s]\n", ins->opName());
7389 #endif
7392 ~ScriptCountBlockState() {
7393 masm.setPrinter(nullptr);
7395 if (JS::UniqueChars str = printer.release()) {
7396 block.setCode(str.get());
7401 void CodeGenerator::branchIfInvalidated(Register temp, Label* invalidated) {
7402 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), temp);
7403 masm.propagateOOM(ionScriptLabels_.append(label));
7405 // If IonScript::invalidationCount_ != 0, the script has been invalidated.
7406 masm.branch32(Assembler::NotEqual,
7407 Address(temp, IonScript::offsetOfInvalidationCount()), Imm32(0),
7408 invalidated);
7411 #ifdef DEBUG
7412 void CodeGenerator::emitAssertGCThingResult(Register input,
7413 const MDefinition* mir) {
7414 MIRType type = mir->type();
7415 MOZ_ASSERT(type == MIRType::Object || type == MIRType::String ||
7416 type == MIRType::Symbol || type == MIRType::BigInt);
7418 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7419 regs.take(input);
7421 Register temp = regs.takeAny();
7422 masm.push(temp);
7424 // Don't check if the script has been invalidated. In that case invalid
7425 // types are expected (until we reach the OsiPoint and bailout).
7426 Label done;
7427 branchIfInvalidated(temp, &done);
7429 # ifndef JS_SIMULATOR
7430 // Check that we have a valid GC pointer.
7431 // Disable for wasm because we don't have a context on wasm compilation
7432 // threads and this needs a context.
7433 // Also disable for simulator builds because the C++ call is a lot slower
7434 // there than on actual hardware.
7435 if (JitOptions.fullDebugChecks && !IsCompilingWasm()) {
7436 saveVolatile();
7437 masm.setupUnalignedABICall(temp);
7438 masm.loadJSContext(temp);
7439 masm.passABIArg(temp);
7440 masm.passABIArg(input);
7442 switch (type) {
7443 case MIRType::Object: {
7444 using Fn = void (*)(JSContext* cx, JSObject* obj);
7445 masm.callWithABI<Fn, AssertValidObjectPtr>();
7446 break;
7448 case MIRType::String: {
7449 using Fn = void (*)(JSContext* cx, JSString* str);
7450 masm.callWithABI<Fn, AssertValidStringPtr>();
7451 break;
7453 case MIRType::Symbol: {
7454 using Fn = void (*)(JSContext* cx, JS::Symbol* sym);
7455 masm.callWithABI<Fn, AssertValidSymbolPtr>();
7456 break;
7458 case MIRType::BigInt: {
7459 using Fn = void (*)(JSContext* cx, JS::BigInt* bi);
7460 masm.callWithABI<Fn, AssertValidBigIntPtr>();
7461 break;
7463 default:
7464 MOZ_CRASH();
7467 restoreVolatile();
7469 # endif
7471 masm.bind(&done);
7472 masm.pop(temp);
7475 void CodeGenerator::emitAssertResultV(const ValueOperand input,
7476 const MDefinition* mir) {
7477 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7478 regs.take(input);
7480 Register temp1 = regs.takeAny();
7481 Register temp2 = regs.takeAny();
7482 masm.push(temp1);
7483 masm.push(temp2);
7485 // Don't check if the script has been invalidated. In that case invalid
7486 // types are expected (until we reach the OsiPoint and bailout).
7487 Label done;
7488 branchIfInvalidated(temp1, &done);
7490 // Check that we have a valid GC pointer.
7491 if (JitOptions.fullDebugChecks) {
7492 saveVolatile();
7494 masm.pushValue(input);
7495 masm.moveStackPtrTo(temp1);
7497 using Fn = void (*)(JSContext* cx, Value* v);
7498 masm.setupUnalignedABICall(temp2);
7499 masm.loadJSContext(temp2);
7500 masm.passABIArg(temp2);
7501 masm.passABIArg(temp1);
7502 masm.callWithABI<Fn, AssertValidValue>();
7503 masm.popValue(input);
7504 restoreVolatile();
7507 masm.bind(&done);
7508 masm.pop(temp2);
7509 masm.pop(temp1);
7512 void CodeGenerator::emitGCThingResultChecks(LInstruction* lir,
7513 MDefinition* mir) {
7514 if (lir->numDefs() == 0) {
7515 return;
7518 MOZ_ASSERT(lir->numDefs() == 1);
7519 if (lir->getDef(0)->isBogusTemp()) {
7520 return;
7523 Register output = ToRegister(lir->getDef(0));
7524 emitAssertGCThingResult(output, mir);
7527 void CodeGenerator::emitValueResultChecks(LInstruction* lir, MDefinition* mir) {
7528 if (lir->numDefs() == 0) {
7529 return;
7532 MOZ_ASSERT(lir->numDefs() == BOX_PIECES);
7533 if (!lir->getDef(0)->output()->isRegister()) {
7534 return;
7537 ValueOperand output = ToOutValue(lir);
7539 emitAssertResultV(output, mir);
7542 void CodeGenerator::emitDebugResultChecks(LInstruction* ins) {
7543 // In debug builds, check that LIR instructions return valid values.
7545 MDefinition* mir = ins->mirRaw();
7546 if (!mir) {
7547 return;
7550 switch (mir->type()) {
7551 case MIRType::Object:
7552 case MIRType::String:
7553 case MIRType::Symbol:
7554 case MIRType::BigInt:
7555 emitGCThingResultChecks(ins, mir);
7556 break;
7557 case MIRType::Value:
7558 emitValueResultChecks(ins, mir);
7559 break;
7560 default:
7561 break;
7565 void CodeGenerator::emitDebugForceBailing(LInstruction* lir) {
7566 if (MOZ_LIKELY(!gen->options.ionBailAfterEnabled())) {
7567 return;
7569 if (!lir->snapshot()) {
7570 return;
7572 if (lir->isOsiPoint()) {
7573 return;
7576 masm.comment("emitDebugForceBailing");
7577 const void* bailAfterCounterAddr =
7578 gen->runtime->addressOfIonBailAfterCounter();
7580 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7582 Label done, notBail;
7583 masm.branch32(Assembler::Equal, AbsoluteAddress(bailAfterCounterAddr),
7584 Imm32(0), &done);
7586 Register temp = regs.takeAny();
7588 masm.push(temp);
7589 masm.load32(AbsoluteAddress(bailAfterCounterAddr), temp);
7590 masm.sub32(Imm32(1), temp);
7591 masm.store32(temp, AbsoluteAddress(bailAfterCounterAddr));
7593 masm.branch32(Assembler::NotEqual, temp, Imm32(0), &notBail);
7595 masm.pop(temp);
7596 bailout(lir->snapshot());
7598 masm.bind(&notBail);
7599 masm.pop(temp);
7601 masm.bind(&done);
7603 #endif
7605 bool CodeGenerator::generateBody() {
7606 JitSpewCont(JitSpew_Codegen, "\n");
7607 AutoCreatedBy acb(masm, "CodeGenerator::generateBody");
7609 JitSpew(JitSpew_Codegen, "==== BEGIN CodeGenerator::generateBody ====");
7610 IonScriptCounts* counts = maybeCreateScriptCounts();
7612 const bool compilingWasm = gen->compilingWasm();
7614 for (size_t i = 0; i < graph.numBlocks(); i++) {
7615 current = graph.getBlock(i);
7617 // Don't emit any code for trivial blocks, containing just a goto. Such
7618 // blocks are created to split critical edges, and if we didn't end up
7619 // putting any instructions in them, we can skip them.
7620 if (current->isTrivial()) {
7621 continue;
7624 #ifdef JS_JITSPEW
7625 const char* filename = nullptr;
7626 size_t lineNumber = 0;
7627 JS::LimitedColumnNumberOneOrigin columnNumber;
7628 if (current->mir()->info().script()) {
7629 filename = current->mir()->info().script()->filename();
7630 if (current->mir()->pc()) {
7631 lineNumber = PCToLineNumber(current->mir()->info().script(),
7632 current->mir()->pc(), &columnNumber);
7635 JitSpew(JitSpew_Codegen, "--------------------------------");
7636 JitSpew(JitSpew_Codegen, "# block%zu %s:%zu:%u%s:", i,
7637 filename ? filename : "?", lineNumber,
7638 columnNumber.oneOriginValue(),
7639 current->mir()->isLoopHeader() ? " (loop header)" : "");
7640 #endif
7642 if (current->mir()->isLoopHeader() && compilingWasm) {
7643 masm.nopAlign(CodeAlignment);
7646 masm.bind(current->label());
7648 mozilla::Maybe<ScriptCountBlockState> blockCounts;
7649 if (counts) {
7650 blockCounts.emplace(&counts->block(i), &masm);
7651 if (!blockCounts->init()) {
7652 return false;
7656 for (LInstructionIterator iter = current->begin(); iter != current->end();
7657 iter++) {
7658 if (!alloc().ensureBallast()) {
7659 return false;
7662 perfSpewer_.recordInstruction(masm, *iter);
7663 #ifdef JS_JITSPEW
7664 JitSpewStart(JitSpew_Codegen, " # LIR=%s",
7665 iter->opName());
7666 if (const char* extra = iter->getExtraName()) {
7667 JitSpewCont(JitSpew_Codegen, ":%s", extra);
7669 JitSpewFin(JitSpew_Codegen);
7670 #endif
7672 if (counts) {
7673 blockCounts->visitInstruction(*iter);
7676 #ifdef CHECK_OSIPOINT_REGISTERS
7677 if (iter->safepoint() && !compilingWasm) {
7678 resetOsiPointRegs(iter->safepoint());
7680 #endif
7682 if (!compilingWasm) {
7683 if (MDefinition* mir = iter->mirRaw()) {
7684 if (!addNativeToBytecodeEntry(mir->trackedSite())) {
7685 return false;
7690 setElement(*iter); // needed to encode correct snapshot location.
7692 #ifdef DEBUG
7693 emitDebugForceBailing(*iter);
7694 #endif
7696 switch (iter->op()) {
7697 #ifndef JS_CODEGEN_NONE
7698 # define LIROP(op) \
7699 case LNode::Opcode::op: \
7700 visit##op(iter->to##op()); \
7701 break;
7702 LIR_OPCODE_LIST(LIROP)
7703 # undef LIROP
7704 #endif
7705 case LNode::Opcode::Invalid:
7706 default:
7707 MOZ_CRASH("Invalid LIR op");
7710 #ifdef DEBUG
7711 if (!counts) {
7712 emitDebugResultChecks(*iter);
7714 #endif
7716 if (masm.oom()) {
7717 return false;
7721 JitSpew(JitSpew_Codegen, "==== END CodeGenerator::generateBody ====\n");
7722 return true;
7725 // Out-of-line object allocation for LNewArray.
7726 class OutOfLineNewArray : public OutOfLineCodeBase<CodeGenerator> {
7727 LNewArray* lir_;
7729 public:
7730 explicit OutOfLineNewArray(LNewArray* lir) : lir_(lir) {}
7732 void accept(CodeGenerator* codegen) override {
7733 codegen->visitOutOfLineNewArray(this);
7736 LNewArray* lir() const { return lir_; }
7739 void CodeGenerator::visitNewArrayCallVM(LNewArray* lir) {
7740 Register objReg = ToRegister(lir->output());
7742 MOZ_ASSERT(!lir->isCall());
7743 saveLive(lir);
7745 JSObject* templateObject = lir->mir()->templateObject();
7747 if (templateObject) {
7748 pushArg(ImmGCPtr(templateObject->shape()));
7749 pushArg(Imm32(lir->mir()->length()));
7751 using Fn = ArrayObject* (*)(JSContext*, uint32_t, Handle<Shape*>);
7752 callVM<Fn, NewArrayWithShape>(lir);
7753 } else {
7754 pushArg(Imm32(GenericObject));
7755 pushArg(Imm32(lir->mir()->length()));
7757 using Fn = ArrayObject* (*)(JSContext*, uint32_t, NewObjectKind);
7758 callVM<Fn, NewArrayOperation>(lir);
7761 masm.storeCallPointerResult(objReg);
7763 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
7764 restoreLive(lir);
7767 void CodeGenerator::visitAtan2D(LAtan2D* lir) {
7768 FloatRegister y = ToFloatRegister(lir->y());
7769 FloatRegister x = ToFloatRegister(lir->x());
7771 using Fn = double (*)(double x, double y);
7772 masm.setupAlignedABICall();
7773 masm.passABIArg(y, ABIType::Float64);
7774 masm.passABIArg(x, ABIType::Float64);
7775 masm.callWithABI<Fn, ecmaAtan2>(ABIType::Float64);
7777 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7780 void CodeGenerator::visitHypot(LHypot* lir) {
7781 uint32_t numArgs = lir->numArgs();
7782 masm.setupAlignedABICall();
7784 for (uint32_t i = 0; i < numArgs; ++i) {
7785 masm.passABIArg(ToFloatRegister(lir->getOperand(i)), ABIType::Float64);
7788 switch (numArgs) {
7789 case 2: {
7790 using Fn = double (*)(double x, double y);
7791 masm.callWithABI<Fn, ecmaHypot>(ABIType::Float64);
7792 break;
7794 case 3: {
7795 using Fn = double (*)(double x, double y, double z);
7796 masm.callWithABI<Fn, hypot3>(ABIType::Float64);
7797 break;
7799 case 4: {
7800 using Fn = double (*)(double x, double y, double z, double w);
7801 masm.callWithABI<Fn, hypot4>(ABIType::Float64);
7802 break;
7804 default:
7805 MOZ_CRASH("Unexpected number of arguments to hypot function.");
7807 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7810 void CodeGenerator::visitNewArray(LNewArray* lir) {
7811 Register objReg = ToRegister(lir->output());
7812 Register tempReg = ToRegister(lir->temp());
7813 DebugOnly<uint32_t> length = lir->mir()->length();
7815 MOZ_ASSERT(length <= NativeObject::MAX_DENSE_ELEMENTS_COUNT);
7817 if (lir->mir()->isVMCall()) {
7818 visitNewArrayCallVM(lir);
7819 return;
7822 OutOfLineNewArray* ool = new (alloc()) OutOfLineNewArray(lir);
7823 addOutOfLineCode(ool, lir->mir());
7825 TemplateObject templateObject(lir->mir()->templateObject());
7826 #ifdef DEBUG
7827 size_t numInlineElements = gc::GetGCKindSlots(templateObject.getAllocKind()) -
7828 ObjectElements::VALUES_PER_HEADER;
7829 MOZ_ASSERT(length <= numInlineElements,
7830 "Inline allocation only supports inline elements");
7831 #endif
7832 masm.createGCObject(objReg, tempReg, templateObject,
7833 lir->mir()->initialHeap(), ool->entry());
7835 masm.bind(ool->rejoin());
7838 void CodeGenerator::visitOutOfLineNewArray(OutOfLineNewArray* ool) {
7839 visitNewArrayCallVM(ool->lir());
7840 masm.jump(ool->rejoin());
7843 void CodeGenerator::visitNewArrayDynamicLength(LNewArrayDynamicLength* lir) {
7844 Register lengthReg = ToRegister(lir->length());
7845 Register objReg = ToRegister(lir->output());
7846 Register tempReg = ToRegister(lir->temp0());
7848 JSObject* templateObject = lir->mir()->templateObject();
7849 gc::Heap initialHeap = lir->mir()->initialHeap();
7851 using Fn = ArrayObject* (*)(JSContext*, Handle<ArrayObject*>, int32_t length);
7852 OutOfLineCode* ool = oolCallVM<Fn, ArrayConstructorOneArg>(
7853 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
7854 StoreRegisterTo(objReg));
7856 bool canInline = true;
7857 size_t inlineLength = 0;
7858 if (templateObject->as<ArrayObject>().hasFixedElements()) {
7859 size_t numSlots =
7860 gc::GetGCKindSlots(templateObject->asTenured().getAllocKind());
7861 inlineLength = numSlots - ObjectElements::VALUES_PER_HEADER;
7862 } else {
7863 canInline = false;
7866 if (canInline) {
7867 // Try to do the allocation inline if the template object is big enough
7868 // for the length in lengthReg. If the length is bigger we could still
7869 // use the template object and not allocate the elements, but it's more
7870 // efficient to do a single big allocation than (repeatedly) reallocating
7871 // the array later on when filling it.
7872 masm.branch32(Assembler::Above, lengthReg, Imm32(inlineLength),
7873 ool->entry());
7875 TemplateObject templateObj(templateObject);
7876 masm.createGCObject(objReg, tempReg, templateObj, initialHeap,
7877 ool->entry());
7879 size_t lengthOffset = NativeObject::offsetOfFixedElements() +
7880 ObjectElements::offsetOfLength();
7881 masm.store32(lengthReg, Address(objReg, lengthOffset));
7882 } else {
7883 masm.jump(ool->entry());
7886 masm.bind(ool->rejoin());
7889 void CodeGenerator::visitNewIterator(LNewIterator* lir) {
7890 Register objReg = ToRegister(lir->output());
7891 Register tempReg = ToRegister(lir->temp0());
7893 OutOfLineCode* ool;
7894 switch (lir->mir()->type()) {
7895 case MNewIterator::ArrayIterator: {
7896 using Fn = ArrayIteratorObject* (*)(JSContext*);
7897 ool = oolCallVM<Fn, NewArrayIterator>(lir, ArgList(),
7898 StoreRegisterTo(objReg));
7899 break;
7901 case MNewIterator::StringIterator: {
7902 using Fn = StringIteratorObject* (*)(JSContext*);
7903 ool = oolCallVM<Fn, NewStringIterator>(lir, ArgList(),
7904 StoreRegisterTo(objReg));
7905 break;
7907 case MNewIterator::RegExpStringIterator: {
7908 using Fn = RegExpStringIteratorObject* (*)(JSContext*);
7909 ool = oolCallVM<Fn, NewRegExpStringIterator>(lir, ArgList(),
7910 StoreRegisterTo(objReg));
7911 break;
7913 default:
7914 MOZ_CRASH("unexpected iterator type");
7917 TemplateObject templateObject(lir->mir()->templateObject());
7918 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
7919 ool->entry());
7921 masm.bind(ool->rejoin());
7924 void CodeGenerator::visitNewTypedArray(LNewTypedArray* lir) {
7925 Register objReg = ToRegister(lir->output());
7926 Register tempReg = ToRegister(lir->temp0());
7927 Register lengthReg = ToRegister(lir->temp1());
7928 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
7930 JSObject* templateObject = lir->mir()->templateObject();
7931 gc::Heap initialHeap = lir->mir()->initialHeap();
7933 auto* ttemplate = &templateObject->as<FixedLengthTypedArrayObject>();
7935 size_t n = ttemplate->length();
7936 MOZ_ASSERT(n <= INT32_MAX,
7937 "Template objects are only created for int32 lengths");
7939 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
7940 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
7941 lir, ArgList(ImmGCPtr(templateObject), Imm32(n)),
7942 StoreRegisterTo(objReg));
7944 TemplateObject templateObj(templateObject);
7945 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
7947 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
7948 ttemplate, MacroAssembler::TypedArrayLength::Fixed);
7950 masm.bind(ool->rejoin());
7953 void CodeGenerator::visitNewTypedArrayDynamicLength(
7954 LNewTypedArrayDynamicLength* lir) {
7955 Register lengthReg = ToRegister(lir->length());
7956 Register objReg = ToRegister(lir->output());
7957 Register tempReg = ToRegister(lir->temp0());
7958 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
7960 JSObject* templateObject = lir->mir()->templateObject();
7961 gc::Heap initialHeap = lir->mir()->initialHeap();
7963 auto* ttemplate = &templateObject->as<FixedLengthTypedArrayObject>();
7965 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
7966 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
7967 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
7968 StoreRegisterTo(objReg));
7970 // Volatile |lengthReg| is saved across the ABI call in |initTypedArraySlots|.
7971 MOZ_ASSERT_IF(lengthReg.volatile_(), liveRegs.has(lengthReg));
7973 TemplateObject templateObj(templateObject);
7974 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
7976 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
7977 ttemplate,
7978 MacroAssembler::TypedArrayLength::Dynamic);
7980 masm.bind(ool->rejoin());
7983 void CodeGenerator::visitNewTypedArrayFromArray(LNewTypedArrayFromArray* lir) {
7984 pushArg(ToRegister(lir->array()));
7985 pushArg(ImmGCPtr(lir->mir()->templateObject()));
7987 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
7988 callVM<Fn, js::NewTypedArrayWithTemplateAndArray>(lir);
7991 void CodeGenerator::visitNewTypedArrayFromArrayBuffer(
7992 LNewTypedArrayFromArrayBuffer* lir) {
7993 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::LengthIndex));
7994 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::ByteOffsetIndex));
7995 pushArg(ToRegister(lir->arrayBuffer()));
7996 pushArg(ImmGCPtr(lir->mir()->templateObject()));
7998 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
7999 HandleValue, HandleValue);
8000 callVM<Fn, js::NewTypedArrayWithTemplateAndBuffer>(lir);
8003 void CodeGenerator::visitBindFunction(LBindFunction* lir) {
8004 Register target = ToRegister(lir->target());
8005 Register temp1 = ToRegister(lir->temp0());
8006 Register temp2 = ToRegister(lir->temp1());
8008 // Try to allocate a new BoundFunctionObject we can pass to the VM function.
8009 // If this fails, we set temp1 to nullptr so we do the allocation in C++.
8010 TemplateObject templateObject(lir->mir()->templateObject());
8011 Label allocOk, allocFailed;
8012 masm.createGCObject(temp1, temp2, templateObject, gc::Heap::Default,
8013 &allocFailed);
8014 masm.jump(&allocOk);
8016 masm.bind(&allocFailed);
8017 masm.movePtr(ImmWord(0), temp1);
8019 masm.bind(&allocOk);
8021 // Set temp2 to the address of the first argument on the stack.
8022 // Note that the Value slots used for arguments are currently aligned for a
8023 // JIT call, even though that's not strictly necessary for calling into C++.
8024 uint32_t argc = lir->mir()->numStackArgs();
8025 if (JitStackValueAlignment > 1) {
8026 argc = AlignBytes(argc, JitStackValueAlignment);
8028 uint32_t unusedStack = UnusedStackBytesForCall(argc);
8029 masm.computeEffectiveAddress(Address(masm.getStackPointer(), unusedStack),
8030 temp2);
8032 pushArg(temp1);
8033 pushArg(Imm32(lir->mir()->numStackArgs()));
8034 pushArg(temp2);
8035 pushArg(target);
8037 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<JSObject*>, Value*,
8038 uint32_t, Handle<BoundFunctionObject*>);
8039 callVM<Fn, js::BoundFunctionObject::functionBindImpl>(lir);
8042 void CodeGenerator::visitNewBoundFunction(LNewBoundFunction* lir) {
8043 Register output = ToRegister(lir->output());
8044 Register temp = ToRegister(lir->temp0());
8046 JSObject* templateObj = lir->mir()->templateObj();
8048 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<BoundFunctionObject*>);
8049 OutOfLineCode* ool = oolCallVM<Fn, BoundFunctionObject::createWithTemplate>(
8050 lir, ArgList(ImmGCPtr(templateObj)), StoreRegisterTo(output));
8052 TemplateObject templateObject(templateObj);
8053 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
8054 ool->entry());
8056 masm.bind(ool->rejoin());
8059 // Out-of-line object allocation for JSOp::NewObject.
8060 class OutOfLineNewObject : public OutOfLineCodeBase<CodeGenerator> {
8061 LNewObject* lir_;
8063 public:
8064 explicit OutOfLineNewObject(LNewObject* lir) : lir_(lir) {}
8066 void accept(CodeGenerator* codegen) override {
8067 codegen->visitOutOfLineNewObject(this);
8070 LNewObject* lir() const { return lir_; }
8073 void CodeGenerator::visitNewObjectVMCall(LNewObject* lir) {
8074 Register objReg = ToRegister(lir->output());
8076 MOZ_ASSERT(!lir->isCall());
8077 saveLive(lir);
8079 JSObject* templateObject = lir->mir()->templateObject();
8081 // If we're making a new object with a class prototype (that is, an object
8082 // that derives its class from its prototype instead of being
8083 // PlainObject::class_'d) from self-hosted code, we need a different init
8084 // function.
8085 switch (lir->mir()->mode()) {
8086 case MNewObject::ObjectLiteral: {
8087 MOZ_ASSERT(!templateObject);
8088 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
8089 pushArg(ImmGCPtr(lir->mir()->block()->info().script()));
8091 using Fn = JSObject* (*)(JSContext*, HandleScript, const jsbytecode* pc);
8092 callVM<Fn, NewObjectOperation>(lir);
8093 break;
8095 case MNewObject::ObjectCreate: {
8096 pushArg(ImmGCPtr(templateObject));
8098 using Fn = PlainObject* (*)(JSContext*, Handle<PlainObject*>);
8099 callVM<Fn, ObjectCreateWithTemplate>(lir);
8100 break;
8104 masm.storeCallPointerResult(objReg);
8106 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
8107 restoreLive(lir);
8110 static bool ShouldInitFixedSlots(LNewPlainObject* lir, const Shape* shape,
8111 uint32_t nfixed) {
8112 // Look for StoreFixedSlot instructions following an object allocation
8113 // that write to this object before a GC is triggered or this object is
8114 // passed to a VM call. If all fixed slots will be initialized, the
8115 // allocation code doesn't need to set the slots to |undefined|.
8117 if (nfixed == 0) {
8118 return false;
8121 // Keep track of the fixed slots that are initialized. initializedSlots is
8122 // a bit mask with a bit for each slot.
8123 MOZ_ASSERT(nfixed <= NativeObject::MAX_FIXED_SLOTS);
8124 static_assert(NativeObject::MAX_FIXED_SLOTS <= 32,
8125 "Slot bits must fit in 32 bits");
8126 uint32_t initializedSlots = 0;
8127 uint32_t numInitialized = 0;
8129 MInstruction* allocMir = lir->mir();
8130 MBasicBlock* block = allocMir->block();
8132 // Skip the allocation instruction.
8133 MInstructionIterator iter = block->begin(allocMir);
8134 MOZ_ASSERT(*iter == allocMir);
8135 iter++;
8137 // Handle the leading shape guard, if present.
8138 for (; iter != block->end(); iter++) {
8139 if (iter->isConstant()) {
8140 // This instruction won't trigger a GC or read object slots.
8141 continue;
8143 if (iter->isGuardShape()) {
8144 auto* guard = iter->toGuardShape();
8145 if (guard->object() != allocMir || guard->shape() != shape) {
8146 return true;
8148 allocMir = guard;
8149 iter++;
8151 break;
8154 for (; iter != block->end(); iter++) {
8155 if (iter->isConstant() || iter->isPostWriteBarrier()) {
8156 // These instructions won't trigger a GC or read object slots.
8157 continue;
8160 if (iter->isStoreFixedSlot()) {
8161 MStoreFixedSlot* store = iter->toStoreFixedSlot();
8162 if (store->object() != allocMir) {
8163 return true;
8166 // We may not initialize this object slot on allocation, so the
8167 // pre-barrier could read uninitialized memory. Simply disable
8168 // the barrier for this store: the object was just initialized
8169 // so the barrier is not necessary.
8170 store->setNeedsBarrier(false);
8172 uint32_t slot = store->slot();
8173 MOZ_ASSERT(slot < nfixed);
8174 if ((initializedSlots & (1 << slot)) == 0) {
8175 numInitialized++;
8176 initializedSlots |= (1 << slot);
8178 if (numInitialized == nfixed) {
8179 // All fixed slots will be initialized.
8180 MOZ_ASSERT(mozilla::CountPopulation32(initializedSlots) == nfixed);
8181 return false;
8184 continue;
8187 // Unhandled instruction, assume it bails or reads object slots.
8188 return true;
8191 MOZ_CRASH("Shouldn't get here");
8194 void CodeGenerator::visitNewObject(LNewObject* lir) {
8195 Register objReg = ToRegister(lir->output());
8196 Register tempReg = ToRegister(lir->temp());
8198 if (lir->mir()->isVMCall()) {
8199 visitNewObjectVMCall(lir);
8200 return;
8203 OutOfLineNewObject* ool = new (alloc()) OutOfLineNewObject(lir);
8204 addOutOfLineCode(ool, lir->mir());
8206 TemplateObject templateObject(lir->mir()->templateObject());
8208 masm.createGCObject(objReg, tempReg, templateObject,
8209 lir->mir()->initialHeap(), ool->entry());
8211 masm.bind(ool->rejoin());
8214 void CodeGenerator::visitOutOfLineNewObject(OutOfLineNewObject* ool) {
8215 visitNewObjectVMCall(ool->lir());
8216 masm.jump(ool->rejoin());
8219 void CodeGenerator::visitNewPlainObject(LNewPlainObject* lir) {
8220 Register objReg = ToRegister(lir->output());
8221 Register temp0Reg = ToRegister(lir->temp0());
8222 Register temp1Reg = ToRegister(lir->temp1());
8223 Register shapeReg = ToRegister(lir->temp2());
8225 auto* mir = lir->mir();
8226 const Shape* shape = mir->shape();
8227 gc::Heap initialHeap = mir->initialHeap();
8228 gc::AllocKind allocKind = mir->allocKind();
8230 using Fn =
8231 JSObject* (*)(JSContext*, Handle<SharedShape*>, gc::AllocKind, gc::Heap);
8232 OutOfLineCode* ool = oolCallVM<Fn, NewPlainObjectOptimizedFallback>(
8233 lir,
8234 ArgList(ImmGCPtr(shape), Imm32(int32_t(allocKind)),
8235 Imm32(int32_t(initialHeap))),
8236 StoreRegisterTo(objReg));
8238 bool initContents = ShouldInitFixedSlots(lir, shape, mir->numFixedSlots());
8240 masm.movePtr(ImmGCPtr(shape), shapeReg);
8241 masm.createPlainGCObject(
8242 objReg, shapeReg, temp0Reg, temp1Reg, mir->numFixedSlots(),
8243 mir->numDynamicSlots(), allocKind, initialHeap, ool->entry(),
8244 AllocSiteInput(gc::CatchAllAllocSite::Optimized), initContents);
8246 #ifdef DEBUG
8247 // ShouldInitFixedSlots expects that the leading GuardShape will never fail,
8248 // so ensure the newly created object has the correct shape. Should the guard
8249 // ever fail, we may end up with uninitialized fixed slots, which can confuse
8250 // the GC.
8251 Label ok;
8252 masm.branchTestObjShape(Assembler::Equal, objReg, shape, temp0Reg, objReg,
8253 &ok);
8254 masm.assumeUnreachable("Newly created object has the correct shape");
8255 masm.bind(&ok);
8256 #endif
8258 masm.bind(ool->rejoin());
8261 void CodeGenerator::visitNewArrayObject(LNewArrayObject* lir) {
8262 Register objReg = ToRegister(lir->output());
8263 Register temp0Reg = ToRegister(lir->temp0());
8264 Register shapeReg = ToRegister(lir->temp1());
8266 auto* mir = lir->mir();
8267 uint32_t arrayLength = mir->length();
8269 gc::AllocKind allocKind = GuessArrayGCKind(arrayLength);
8270 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
8271 allocKind = ForegroundToBackgroundAllocKind(allocKind);
8273 uint32_t slotCount = GetGCKindSlots(allocKind);
8274 MOZ_ASSERT(slotCount >= ObjectElements::VALUES_PER_HEADER);
8275 uint32_t arrayCapacity = slotCount - ObjectElements::VALUES_PER_HEADER;
8277 const Shape* shape = mir->shape();
8279 NewObjectKind objectKind =
8280 mir->initialHeap() == gc::Heap::Tenured ? TenuredObject : GenericObject;
8282 using Fn =
8283 ArrayObject* (*)(JSContext*, uint32_t, gc::AllocKind, NewObjectKind);
8284 OutOfLineCode* ool = oolCallVM<Fn, NewArrayObjectOptimizedFallback>(
8285 lir,
8286 ArgList(Imm32(arrayLength), Imm32(int32_t(allocKind)), Imm32(objectKind)),
8287 StoreRegisterTo(objReg));
8289 masm.movePtr(ImmPtr(shape), shapeReg);
8290 masm.createArrayWithFixedElements(
8291 objReg, shapeReg, temp0Reg, InvalidReg, arrayLength, arrayCapacity, 0, 0,
8292 allocKind, mir->initialHeap(), ool->entry(),
8293 AllocSiteInput(gc::CatchAllAllocSite::Optimized));
8294 masm.bind(ool->rejoin());
8297 void CodeGenerator::visitNewNamedLambdaObject(LNewNamedLambdaObject* lir) {
8298 Register objReg = ToRegister(lir->output());
8299 Register tempReg = ToRegister(lir->temp0());
8300 const CompileInfo& info = lir->mir()->block()->info();
8302 using Fn = js::NamedLambdaObject* (*)(JSContext*, HandleFunction);
8303 OutOfLineCode* ool = oolCallVM<Fn, NamedLambdaObject::createWithoutEnclosing>(
8304 lir, ArgList(info.funMaybeLazy()), StoreRegisterTo(objReg));
8306 TemplateObject templateObject(lir->mir()->templateObj());
8308 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
8309 ool->entry());
8311 masm.bind(ool->rejoin());
8314 void CodeGenerator::visitNewCallObject(LNewCallObject* lir) {
8315 Register objReg = ToRegister(lir->output());
8316 Register tempReg = ToRegister(lir->temp0());
8318 CallObject* templateObj = lir->mir()->templateObject();
8320 using Fn = CallObject* (*)(JSContext*, Handle<SharedShape*>);
8321 OutOfLineCode* ool = oolCallVM<Fn, CallObject::createWithShape>(
8322 lir, ArgList(ImmGCPtr(templateObj->sharedShape())),
8323 StoreRegisterTo(objReg));
8325 // Inline call object creation, using the OOL path only for tricky cases.
8326 TemplateObject templateObject(templateObj);
8327 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
8328 ool->entry());
8330 masm.bind(ool->rejoin());
8333 void CodeGenerator::visitNewStringObject(LNewStringObject* lir) {
8334 Register input = ToRegister(lir->input());
8335 Register output = ToRegister(lir->output());
8336 Register temp = ToRegister(lir->temp0());
8338 StringObject* templateObj = lir->mir()->templateObj();
8340 using Fn = JSObject* (*)(JSContext*, HandleString);
8341 OutOfLineCode* ool = oolCallVM<Fn, NewStringObject>(lir, ArgList(input),
8342 StoreRegisterTo(output));
8344 TemplateObject templateObject(templateObj);
8345 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
8346 ool->entry());
8348 masm.loadStringLength(input, temp);
8350 masm.storeValue(JSVAL_TYPE_STRING, input,
8351 Address(output, StringObject::offsetOfPrimitiveValue()));
8352 masm.storeValue(JSVAL_TYPE_INT32, temp,
8353 Address(output, StringObject::offsetOfLength()));
8355 masm.bind(ool->rejoin());
8358 void CodeGenerator::visitInitElemGetterSetter(LInitElemGetterSetter* lir) {
8359 Register obj = ToRegister(lir->object());
8360 Register value = ToRegister(lir->value());
8362 pushArg(value);
8363 pushArg(ToValue(lir, LInitElemGetterSetter::IdIndex));
8364 pushArg(obj);
8365 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
8367 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject, HandleValue,
8368 HandleObject);
8369 callVM<Fn, InitElemGetterSetterOperation>(lir);
8372 void CodeGenerator::visitMutateProto(LMutateProto* lir) {
8373 Register objReg = ToRegister(lir->object());
8375 pushArg(ToValue(lir, LMutateProto::ValueIndex));
8376 pushArg(objReg);
8378 using Fn =
8379 bool (*)(JSContext* cx, Handle<PlainObject*> obj, HandleValue value);
8380 callVM<Fn, MutatePrototype>(lir);
8383 void CodeGenerator::visitInitPropGetterSetter(LInitPropGetterSetter* lir) {
8384 Register obj = ToRegister(lir->object());
8385 Register value = ToRegister(lir->value());
8387 pushArg(value);
8388 pushArg(ImmGCPtr(lir->mir()->name()));
8389 pushArg(obj);
8390 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
8392 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject,
8393 Handle<PropertyName*>, HandleObject);
8394 callVM<Fn, InitPropGetterSetterOperation>(lir);
8397 void CodeGenerator::visitCreateThis(LCreateThis* lir) {
8398 const LAllocation* callee = lir->callee();
8399 const LAllocation* newTarget = lir->newTarget();
8401 if (newTarget->isConstant()) {
8402 pushArg(ImmGCPtr(&newTarget->toConstant()->toObject()));
8403 } else {
8404 pushArg(ToRegister(newTarget));
8407 if (callee->isConstant()) {
8408 pushArg(ImmGCPtr(&callee->toConstant()->toObject()));
8409 } else {
8410 pushArg(ToRegister(callee));
8413 using Fn = bool (*)(JSContext* cx, HandleObject callee,
8414 HandleObject newTarget, MutableHandleValue rval);
8415 callVM<Fn, jit::CreateThisFromIon>(lir);
8418 void CodeGenerator::visitCreateArgumentsObject(LCreateArgumentsObject* lir) {
8419 // This should be getting constructed in the first block only, and not any OSR
8420 // entry blocks.
8421 MOZ_ASSERT(lir->mir()->block()->id() == 0);
8423 Register callObj = ToRegister(lir->callObject());
8424 Register temp0 = ToRegister(lir->temp0());
8425 Label done;
8427 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8428 Register objTemp = ToRegister(lir->temp1());
8429 Register cxTemp = ToRegister(lir->temp2());
8431 masm.Push(callObj);
8433 // Try to allocate an arguments object. This will leave the reserved
8434 // slots uninitialized, so it's important we don't GC until we
8435 // initialize these slots in ArgumentsObject::finishForIonPure.
8436 Label failure;
8437 TemplateObject templateObject(templateObj);
8438 masm.createGCObject(objTemp, temp0, templateObject, gc::Heap::Default,
8439 &failure,
8440 /* initContents = */ false);
8442 masm.moveStackPtrTo(temp0);
8443 masm.addPtr(Imm32(masm.framePushed()), temp0);
8445 using Fn = ArgumentsObject* (*)(JSContext* cx, jit::JitFrameLayout* frame,
8446 JSObject* scopeChain, ArgumentsObject* obj);
8447 masm.setupAlignedABICall();
8448 masm.loadJSContext(cxTemp);
8449 masm.passABIArg(cxTemp);
8450 masm.passABIArg(temp0);
8451 masm.passABIArg(callObj);
8452 masm.passABIArg(objTemp);
8454 masm.callWithABI<Fn, ArgumentsObject::finishForIonPure>();
8455 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8457 // Discard saved callObj on the stack.
8458 masm.addToStackPtr(Imm32(sizeof(uintptr_t)));
8459 masm.jump(&done);
8461 masm.bind(&failure);
8462 masm.Pop(callObj);
8465 masm.moveStackPtrTo(temp0);
8466 masm.addPtr(Imm32(frameSize()), temp0);
8468 pushArg(callObj);
8469 pushArg(temp0);
8471 using Fn = ArgumentsObject* (*)(JSContext*, JitFrameLayout*, HandleObject);
8472 callVM<Fn, ArgumentsObject::createForIon>(lir);
8474 masm.bind(&done);
8477 void CodeGenerator::visitCreateInlinedArgumentsObject(
8478 LCreateInlinedArgumentsObject* lir) {
8479 Register callObj = ToRegister(lir->getCallObject());
8480 Register callee = ToRegister(lir->getCallee());
8481 Register argsAddress = ToRegister(lir->temp1());
8482 Register argsObj = ToRegister(lir->temp2());
8484 // TODO: Do we have to worry about alignment here?
8486 // Create a contiguous array of values for ArgumentsObject::create
8487 // by pushing the arguments onto the stack in reverse order.
8488 uint32_t argc = lir->mir()->numActuals();
8489 for (uint32_t i = 0; i < argc; i++) {
8490 uint32_t argNum = argc - i - 1;
8491 uint32_t index = LCreateInlinedArgumentsObject::ArgIndex(argNum);
8492 ConstantOrRegister arg =
8493 toConstantOrRegister(lir, index, lir->mir()->getArg(argNum)->type());
8494 masm.Push(arg);
8496 masm.moveStackPtrTo(argsAddress);
8498 Label done;
8499 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8500 LiveRegisterSet liveRegs;
8501 liveRegs.add(callObj);
8502 liveRegs.add(callee);
8504 masm.PushRegsInMask(liveRegs);
8506 // We are free to clobber all registers, as LCreateInlinedArgumentsObject is
8507 // a call instruction.
8508 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
8509 allRegs.take(callObj);
8510 allRegs.take(callee);
8511 allRegs.take(argsObj);
8512 allRegs.take(argsAddress);
8514 Register temp3 = allRegs.takeAny();
8515 Register temp4 = allRegs.takeAny();
8517 // Try to allocate an arguments object. This will leave the reserved slots
8518 // uninitialized, so it's important we don't GC until we initialize these
8519 // slots in ArgumentsObject::finishForIonPure.
8520 Label failure;
8521 TemplateObject templateObject(templateObj);
8522 masm.createGCObject(argsObj, temp3, templateObject, gc::Heap::Default,
8523 &failure,
8524 /* initContents = */ false);
8526 Register numActuals = temp3;
8527 masm.move32(Imm32(argc), numActuals);
8529 using Fn = ArgumentsObject* (*)(JSContext*, JSObject*, JSFunction*, Value*,
8530 uint32_t, ArgumentsObject*);
8531 masm.setupAlignedABICall();
8532 masm.loadJSContext(temp4);
8533 masm.passABIArg(temp4);
8534 masm.passABIArg(callObj);
8535 masm.passABIArg(callee);
8536 masm.passABIArg(argsAddress);
8537 masm.passABIArg(numActuals);
8538 masm.passABIArg(argsObj);
8540 masm.callWithABI<Fn, ArgumentsObject::finishInlineForIonPure>();
8541 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8543 // Discard saved callObj, callee, and values array on the stack.
8544 masm.addToStackPtr(
8545 Imm32(MacroAssembler::PushRegsInMaskSizeInBytes(liveRegs) +
8546 argc * sizeof(Value)));
8547 masm.jump(&done);
8549 masm.bind(&failure);
8550 masm.PopRegsInMask(liveRegs);
8552 // Reload argsAddress because it may have been overridden.
8553 masm.moveStackPtrTo(argsAddress);
8556 pushArg(Imm32(argc));
8557 pushArg(callObj);
8558 pushArg(callee);
8559 pushArg(argsAddress);
8561 using Fn = ArgumentsObject* (*)(JSContext*, Value*, HandleFunction,
8562 HandleObject, uint32_t);
8563 callVM<Fn, ArgumentsObject::createForInlinedIon>(lir);
8565 // Discard the array of values.
8566 masm.freeStack(argc * sizeof(Value));
8568 masm.bind(&done);
8571 template <class GetInlinedArgument>
8572 void CodeGenerator::emitGetInlinedArgument(GetInlinedArgument* lir,
8573 Register index,
8574 ValueOperand output) {
8575 uint32_t numActuals = lir->mir()->numActuals();
8576 MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
8578 // The index has already been bounds-checked, so the code we
8579 // generate here should be unreachable. We can end up in this
8580 // situation in self-hosted code using GetArgument(), or in a
8581 // monomorphically inlined function if we've inlined some CacheIR
8582 // that was created for a different caller.
8583 if (numActuals == 0) {
8584 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8585 return;
8588 // Check the first n-1 possible indices.
8589 Label done;
8590 for (uint32_t i = 0; i < numActuals - 1; i++) {
8591 Label skip;
8592 ConstantOrRegister arg = toConstantOrRegister(
8593 lir, GetInlinedArgument::ArgIndex(i), lir->mir()->getArg(i)->type());
8594 masm.branch32(Assembler::NotEqual, index, Imm32(i), &skip);
8595 masm.moveValue(arg, output);
8597 masm.jump(&done);
8598 masm.bind(&skip);
8601 #ifdef DEBUG
8602 Label skip;
8603 masm.branch32(Assembler::Equal, index, Imm32(numActuals - 1), &skip);
8604 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8605 masm.bind(&skip);
8606 #endif
8608 // The index has already been bounds-checked, so load the last argument.
8609 uint32_t lastIdx = numActuals - 1;
8610 ConstantOrRegister arg =
8611 toConstantOrRegister(lir, GetInlinedArgument::ArgIndex(lastIdx),
8612 lir->mir()->getArg(lastIdx)->type());
8613 masm.moveValue(arg, output);
8614 masm.bind(&done);
8617 void CodeGenerator::visitGetInlinedArgument(LGetInlinedArgument* lir) {
8618 Register index = ToRegister(lir->getIndex());
8619 ValueOperand output = ToOutValue(lir);
8621 emitGetInlinedArgument(lir, index, output);
8624 void CodeGenerator::visitGetInlinedArgumentHole(LGetInlinedArgumentHole* lir) {
8625 Register index = ToRegister(lir->getIndex());
8626 ValueOperand output = ToOutValue(lir);
8628 uint32_t numActuals = lir->mir()->numActuals();
8630 if (numActuals == 0) {
8631 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8632 masm.moveValue(UndefinedValue(), output);
8633 return;
8636 Label outOfBounds, done;
8637 masm.branch32(Assembler::AboveOrEqual, index, Imm32(numActuals),
8638 &outOfBounds);
8640 emitGetInlinedArgument(lir, index, output);
8641 masm.jump(&done);
8643 masm.bind(&outOfBounds);
8644 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8645 masm.moveValue(UndefinedValue(), output);
8647 masm.bind(&done);
8650 void CodeGenerator::visitGetArgumentsObjectArg(LGetArgumentsObjectArg* lir) {
8651 Register temp = ToRegister(lir->temp0());
8652 Register argsObj = ToRegister(lir->argsObject());
8653 ValueOperand out = ToOutValue(lir);
8655 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8656 temp);
8657 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8658 lir->mir()->argno() * sizeof(Value));
8659 masm.loadValue(argAddr, out);
8660 #ifdef DEBUG
8661 Label success;
8662 masm.branchTestMagic(Assembler::NotEqual, out, &success);
8663 masm.assumeUnreachable(
8664 "Result from ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8665 masm.bind(&success);
8666 #endif
8669 void CodeGenerator::visitSetArgumentsObjectArg(LSetArgumentsObjectArg* lir) {
8670 Register temp = ToRegister(lir->getTemp(0));
8671 Register argsObj = ToRegister(lir->argsObject());
8672 ValueOperand value = ToValue(lir, LSetArgumentsObjectArg::ValueIndex);
8674 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8675 temp);
8676 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8677 lir->mir()->argno() * sizeof(Value));
8678 emitPreBarrier(argAddr);
8679 #ifdef DEBUG
8680 Label success;
8681 masm.branchTestMagic(Assembler::NotEqual, argAddr, &success);
8682 masm.assumeUnreachable(
8683 "Result in ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8684 masm.bind(&success);
8685 #endif
8686 masm.storeValue(value, argAddr);
8689 void CodeGenerator::visitLoadArgumentsObjectArg(LLoadArgumentsObjectArg* lir) {
8690 Register temp = ToRegister(lir->temp0());
8691 Register argsObj = ToRegister(lir->argsObject());
8692 Register index = ToRegister(lir->index());
8693 ValueOperand out = ToOutValue(lir);
8695 Label bail;
8696 masm.loadArgumentsObjectElement(argsObj, index, out, temp, &bail);
8697 bailoutFrom(&bail, lir->snapshot());
8700 void CodeGenerator::visitLoadArgumentsObjectArgHole(
8701 LLoadArgumentsObjectArgHole* lir) {
8702 Register temp = ToRegister(lir->temp0());
8703 Register argsObj = ToRegister(lir->argsObject());
8704 Register index = ToRegister(lir->index());
8705 ValueOperand out = ToOutValue(lir);
8707 Label bail;
8708 masm.loadArgumentsObjectElementHole(argsObj, index, out, temp, &bail);
8709 bailoutFrom(&bail, lir->snapshot());
8712 void CodeGenerator::visitInArgumentsObjectArg(LInArgumentsObjectArg* lir) {
8713 Register temp = ToRegister(lir->temp0());
8714 Register argsObj = ToRegister(lir->argsObject());
8715 Register index = ToRegister(lir->index());
8716 Register out = ToRegister(lir->output());
8718 Label bail;
8719 masm.loadArgumentsObjectElementExists(argsObj, index, out, temp, &bail);
8720 bailoutFrom(&bail, lir->snapshot());
8723 void CodeGenerator::visitArgumentsObjectLength(LArgumentsObjectLength* lir) {
8724 Register argsObj = ToRegister(lir->argsObject());
8725 Register out = ToRegister(lir->output());
8727 Label bail;
8728 masm.loadArgumentsObjectLength(argsObj, out, &bail);
8729 bailoutFrom(&bail, lir->snapshot());
8732 void CodeGenerator::visitArrayFromArgumentsObject(
8733 LArrayFromArgumentsObject* lir) {
8734 pushArg(ToRegister(lir->argsObject()));
8736 using Fn = ArrayObject* (*)(JSContext*, Handle<ArgumentsObject*>);
8737 callVM<Fn, js::ArrayFromArgumentsObject>(lir);
8740 void CodeGenerator::visitGuardArgumentsObjectFlags(
8741 LGuardArgumentsObjectFlags* lir) {
8742 Register argsObj = ToRegister(lir->argsObject());
8743 Register temp = ToRegister(lir->temp0());
8745 Label bail;
8746 masm.branchTestArgumentsObjectFlags(argsObj, temp, lir->mir()->flags(),
8747 Assembler::NonZero, &bail);
8748 bailoutFrom(&bail, lir->snapshot());
8751 void CodeGenerator::visitBoundFunctionNumArgs(LBoundFunctionNumArgs* lir) {
8752 Register obj = ToRegister(lir->object());
8753 Register output = ToRegister(lir->output());
8755 masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
8756 output);
8757 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
8760 void CodeGenerator::visitGuardBoundFunctionIsConstructor(
8761 LGuardBoundFunctionIsConstructor* lir) {
8762 Register obj = ToRegister(lir->object());
8764 Label bail;
8765 Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
8766 masm.branchTest32(Assembler::Zero, flagsSlot,
8767 Imm32(BoundFunctionObject::IsConstructorFlag), &bail);
8768 bailoutFrom(&bail, lir->snapshot());
8771 void CodeGenerator::visitReturnFromCtor(LReturnFromCtor* lir) {
8772 ValueOperand value = ToValue(lir, LReturnFromCtor::ValueIndex);
8773 Register obj = ToRegister(lir->object());
8774 Register output = ToRegister(lir->output());
8776 Label valueIsObject, end;
8778 masm.branchTestObject(Assembler::Equal, value, &valueIsObject);
8780 // Value is not an object. Return that other object.
8781 masm.movePtr(obj, output);
8782 masm.jump(&end);
8784 // Value is an object. Return unbox(Value).
8785 masm.bind(&valueIsObject);
8786 Register payload = masm.extractObject(value, output);
8787 if (payload != output) {
8788 masm.movePtr(payload, output);
8791 masm.bind(&end);
8794 class OutOfLineBoxNonStrictThis : public OutOfLineCodeBase<CodeGenerator> {
8795 LBoxNonStrictThis* ins_;
8797 public:
8798 explicit OutOfLineBoxNonStrictThis(LBoxNonStrictThis* ins) : ins_(ins) {}
8799 void accept(CodeGenerator* codegen) override {
8800 codegen->visitOutOfLineBoxNonStrictThis(this);
8802 LBoxNonStrictThis* ins() const { return ins_; }
8805 void CodeGenerator::visitBoxNonStrictThis(LBoxNonStrictThis* lir) {
8806 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8807 Register output = ToRegister(lir->output());
8809 auto* ool = new (alloc()) OutOfLineBoxNonStrictThis(lir);
8810 addOutOfLineCode(ool, lir->mir());
8812 masm.fallibleUnboxObject(value, output, ool->entry());
8813 masm.bind(ool->rejoin());
8816 void CodeGenerator::visitOutOfLineBoxNonStrictThis(
8817 OutOfLineBoxNonStrictThis* ool) {
8818 LBoxNonStrictThis* lir = ool->ins();
8820 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8821 Register output = ToRegister(lir->output());
8823 Label notNullOrUndefined;
8825 Label isNullOrUndefined;
8826 ScratchTagScope tag(masm, value);
8827 masm.splitTagForTest(value, tag);
8828 masm.branchTestUndefined(Assembler::Equal, tag, &isNullOrUndefined);
8829 masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
8830 masm.bind(&isNullOrUndefined);
8831 masm.movePtr(ImmGCPtr(lir->mir()->globalThis()), output);
8832 masm.jump(ool->rejoin());
8835 masm.bind(&notNullOrUndefined);
8837 saveLive(lir);
8839 pushArg(value);
8840 using Fn = JSObject* (*)(JSContext*, HandleValue);
8841 callVM<Fn, BoxNonStrictThis>(lir);
8843 StoreRegisterTo(output).generate(this);
8844 restoreLiveIgnore(lir, StoreRegisterTo(output).clobbered());
8846 masm.jump(ool->rejoin());
8849 void CodeGenerator::visitImplicitThis(LImplicitThis* lir) {
8850 pushArg(ImmGCPtr(lir->mir()->name()));
8851 pushArg(ToRegister(lir->env()));
8853 using Fn = bool (*)(JSContext*, HandleObject, Handle<PropertyName*>,
8854 MutableHandleValue);
8855 callVM<Fn, ImplicitThisOperation>(lir);
8858 void CodeGenerator::visitArrayLength(LArrayLength* lir) {
8859 Register elements = ToRegister(lir->elements());
8860 Register output = ToRegister(lir->output());
8862 Address length(elements, ObjectElements::offsetOfLength());
8863 masm.load32(length, output);
8865 // Bail out if the length doesn't fit in int32.
8866 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
8869 static void SetLengthFromIndex(MacroAssembler& masm, const LAllocation* index,
8870 const Address& length) {
8871 if (index->isConstant()) {
8872 masm.store32(Imm32(ToInt32(index) + 1), length);
8873 } else {
8874 Register newLength = ToRegister(index);
8875 masm.add32(Imm32(1), newLength);
8876 masm.store32(newLength, length);
8877 masm.sub32(Imm32(1), newLength);
8881 void CodeGenerator::visitSetArrayLength(LSetArrayLength* lir) {
8882 Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength());
8883 SetLengthFromIndex(masm, lir->index(), length);
8886 void CodeGenerator::visitFunctionLength(LFunctionLength* lir) {
8887 Register function = ToRegister(lir->function());
8888 Register output = ToRegister(lir->output());
8890 Label bail;
8892 // Get the JSFunction flags.
8893 masm.load32(Address(function, JSFunction::offsetOfFlagsAndArgCount()),
8894 output);
8896 // Functions with a SelfHostedLazyScript must be compiled with the slow-path
8897 // before the function length is known. If the length was previously resolved,
8898 // the length property may be shadowed.
8899 masm.branchTest32(
8900 Assembler::NonZero, output,
8901 Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
8902 &bail);
8904 masm.loadFunctionLength(function, output, output, &bail);
8906 bailoutFrom(&bail, lir->snapshot());
8909 void CodeGenerator::visitFunctionName(LFunctionName* lir) {
8910 Register function = ToRegister(lir->function());
8911 Register output = ToRegister(lir->output());
8913 Label bail;
8915 const JSAtomState& names = gen->runtime->names();
8916 masm.loadFunctionName(function, output, ImmGCPtr(names.empty_), &bail);
8918 bailoutFrom(&bail, lir->snapshot());
8921 template <class OrderedHashTable>
8922 static void RangeFront(MacroAssembler&, Register, Register, Register);
8924 template <>
8925 void RangeFront<ValueMap>(MacroAssembler& masm, Register range, Register i,
8926 Register front) {
8927 masm.loadPtr(Address(range, ValueMap::Range::offsetOfHashTable()), front);
8928 masm.loadPtr(Address(front, ValueMap::offsetOfImplData()), front);
8930 MOZ_ASSERT(ValueMap::offsetOfImplDataElement() == 0,
8931 "offsetof(Data, element) is 0");
8932 static_assert(ValueMap::sizeofImplData() == 24, "sizeof(Data) is 24");
8933 masm.mulBy3(i, i);
8934 masm.lshiftPtr(Imm32(3), i);
8935 masm.addPtr(i, front);
8938 template <>
8939 void RangeFront<ValueSet>(MacroAssembler& masm, Register range, Register i,
8940 Register front) {
8941 masm.loadPtr(Address(range, ValueSet::Range::offsetOfHashTable()), front);
8942 masm.loadPtr(Address(front, ValueSet::offsetOfImplData()), front);
8944 MOZ_ASSERT(ValueSet::offsetOfImplDataElement() == 0,
8945 "offsetof(Data, element) is 0");
8946 static_assert(ValueSet::sizeofImplData() == 16, "sizeof(Data) is 16");
8947 masm.lshiftPtr(Imm32(4), i);
8948 masm.addPtr(i, front);
8951 template <class OrderedHashTable>
8952 static void RangePopFront(MacroAssembler& masm, Register range, Register front,
8953 Register dataLength, Register temp) {
8954 Register i = temp;
8956 masm.add32(Imm32(1),
8957 Address(range, OrderedHashTable::Range::offsetOfCount()));
8959 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), i);
8961 Label done, seek;
8962 masm.bind(&seek);
8963 masm.add32(Imm32(1), i);
8964 masm.branch32(Assembler::AboveOrEqual, i, dataLength, &done);
8966 // We can add sizeof(Data) to |front| to select the next element, because
8967 // |front| and |range.ht.data[i]| point to the same location.
8968 MOZ_ASSERT(OrderedHashTable::offsetOfImplDataElement() == 0,
8969 "offsetof(Data, element) is 0");
8970 masm.addPtr(Imm32(OrderedHashTable::sizeofImplData()), front);
8972 masm.branchTestMagic(Assembler::Equal,
8973 Address(front, OrderedHashTable::offsetOfEntryKey()),
8974 JS_HASH_KEY_EMPTY, &seek);
8976 masm.bind(&done);
8977 masm.store32(i, Address(range, OrderedHashTable::Range::offsetOfI()));
8980 template <class OrderedHashTable>
8981 static inline void RangeDestruct(MacroAssembler& masm, Register iter,
8982 Register range, Register temp0,
8983 Register temp1) {
8984 Register next = temp0;
8985 Register prevp = temp1;
8987 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfNext()), next);
8988 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfPrevP()), prevp);
8989 masm.storePtr(next, Address(prevp, 0));
8991 Label hasNoNext;
8992 masm.branchTestPtr(Assembler::Zero, next, next, &hasNoNext);
8994 masm.storePtr(prevp, Address(next, OrderedHashTable::Range::offsetOfPrevP()));
8996 masm.bind(&hasNoNext);
8998 Label nurseryAllocated;
8999 masm.branchPtrInNurseryChunk(Assembler::Equal, iter, temp0,
9000 &nurseryAllocated);
9002 masm.callFreeStub(range);
9004 masm.bind(&nurseryAllocated);
9007 template <>
9008 void CodeGenerator::emitLoadIteratorValues<ValueMap>(Register result,
9009 Register temp,
9010 Register front) {
9011 size_t elementsOffset = NativeObject::offsetOfFixedElements();
9013 Address keyAddress(front, ValueMap::Entry::offsetOfKey());
9014 Address valueAddress(front, ValueMap::Entry::offsetOfValue());
9015 Address keyElemAddress(result, elementsOffset);
9016 Address valueElemAddress(result, elementsOffset + sizeof(Value));
9017 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
9018 masm.guardedCallPreBarrier(valueElemAddress, MIRType::Value);
9019 masm.storeValue(keyAddress, keyElemAddress, temp);
9020 masm.storeValue(valueAddress, valueElemAddress, temp);
9022 Label emitBarrier, skipBarrier;
9023 masm.branchValueIsNurseryCell(Assembler::Equal, keyAddress, temp,
9024 &emitBarrier);
9025 masm.branchValueIsNurseryCell(Assembler::NotEqual, valueAddress, temp,
9026 &skipBarrier);
9028 masm.bind(&emitBarrier);
9029 saveVolatile(temp);
9030 emitPostWriteBarrier(result);
9031 restoreVolatile(temp);
9033 masm.bind(&skipBarrier);
9036 template <>
9037 void CodeGenerator::emitLoadIteratorValues<ValueSet>(Register result,
9038 Register temp,
9039 Register front) {
9040 size_t elementsOffset = NativeObject::offsetOfFixedElements();
9042 Address keyAddress(front, ValueSet::offsetOfEntryKey());
9043 Address keyElemAddress(result, elementsOffset);
9044 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
9045 masm.storeValue(keyAddress, keyElemAddress, temp);
9047 Label skipBarrier;
9048 masm.branchValueIsNurseryCell(Assembler::NotEqual, keyAddress, temp,
9049 &skipBarrier);
9051 saveVolatile(temp);
9052 emitPostWriteBarrier(result);
9053 restoreVolatile(temp);
9055 masm.bind(&skipBarrier);
9058 template <class IteratorObject, class OrderedHashTable>
9059 void CodeGenerator::emitGetNextEntryForIterator(LGetNextEntryForIterator* lir) {
9060 Register iter = ToRegister(lir->iter());
9061 Register result = ToRegister(lir->result());
9062 Register temp = ToRegister(lir->temp0());
9063 Register dataLength = ToRegister(lir->temp1());
9064 Register range = ToRegister(lir->temp2());
9065 Register output = ToRegister(lir->output());
9067 #ifdef DEBUG
9068 // Self-hosted code is responsible for ensuring GetNextEntryForIterator is
9069 // only called with the correct iterator class. Assert here all self-
9070 // hosted callers of GetNextEntryForIterator perform this class check.
9071 // No Spectre mitigations are needed because this is DEBUG-only code.
9072 Label success;
9073 masm.branchTestObjClassNoSpectreMitigations(
9074 Assembler::Equal, iter, &IteratorObject::class_, temp, &success);
9075 masm.assumeUnreachable("Iterator object should have the correct class.");
9076 masm.bind(&success);
9077 #endif
9079 masm.loadPrivate(Address(iter, NativeObject::getFixedSlotOffset(
9080 IteratorObject::RangeSlot)),
9081 range);
9083 Label iterAlreadyDone, iterDone, done;
9084 masm.branchTestPtr(Assembler::Zero, range, range, &iterAlreadyDone);
9086 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), temp);
9087 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfHashTable()),
9088 dataLength);
9089 masm.load32(Address(dataLength, OrderedHashTable::offsetOfImplDataLength()),
9090 dataLength);
9091 masm.branch32(Assembler::AboveOrEqual, temp, dataLength, &iterDone);
9093 masm.Push(iter);
9095 Register front = iter;
9096 RangeFront<OrderedHashTable>(masm, range, temp, front);
9098 emitLoadIteratorValues<OrderedHashTable>(result, temp, front);
9100 RangePopFront<OrderedHashTable>(masm, range, front, dataLength, temp);
9102 masm.Pop(iter);
9103 masm.move32(Imm32(0), output);
9105 masm.jump(&done);
9107 masm.bind(&iterDone);
9109 RangeDestruct<OrderedHashTable>(masm, iter, range, temp, dataLength);
9111 masm.storeValue(PrivateValue(nullptr),
9112 Address(iter, NativeObject::getFixedSlotOffset(
9113 IteratorObject::RangeSlot)));
9115 masm.bind(&iterAlreadyDone);
9117 masm.move32(Imm32(1), output);
9119 masm.bind(&done);
9122 void CodeGenerator::visitGetNextEntryForIterator(
9123 LGetNextEntryForIterator* lir) {
9124 if (lir->mir()->mode() == MGetNextEntryForIterator::Map) {
9125 emitGetNextEntryForIterator<MapIteratorObject, ValueMap>(lir);
9126 } else {
9127 MOZ_ASSERT(lir->mir()->mode() == MGetNextEntryForIterator::Set);
9128 emitGetNextEntryForIterator<SetIteratorObject, ValueSet>(lir);
9132 // The point of these is to inform Ion of where these values already are; they
9133 // don't normally generate (much) code.
9134 void CodeGenerator::visitWasmRegisterPairResult(LWasmRegisterPairResult* lir) {}
9135 void CodeGenerator::visitWasmStackResult(LWasmStackResult* lir) {}
9136 void CodeGenerator::visitWasmStackResult64(LWasmStackResult64* lir) {}
9138 void CodeGenerator::visitWasmStackResultArea(LWasmStackResultArea* lir) {
9139 LAllocation* output = lir->getDef(0)->output();
9140 MOZ_ASSERT(output->isStackArea());
9141 bool tempInit = false;
9142 for (auto iter = output->toStackArea()->results(); iter; iter.next()) {
9143 // Zero out ref stack results.
9144 if (iter.isWasmAnyRef()) {
9145 Register temp = ToRegister(lir->temp0());
9146 if (!tempInit) {
9147 masm.xorPtr(temp, temp);
9148 tempInit = true;
9150 masm.storePtr(temp, ToAddress(iter.alloc()));
9155 void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {
9156 #ifdef JS_64BIT
9157 if (MWasmRegisterResult* mir = lir->mir()) {
9158 if (mir->type() == MIRType::Int32) {
9159 masm.widenInt32(ToRegister(lir->output()));
9162 #endif
9165 void CodeGenerator::visitWasmCall(LWasmCall* lir) {
9166 const MWasmCallBase* callBase = lir->callBase();
9167 bool isReturnCall = lir->isReturnCall();
9169 // If this call is in Wasm try code block, initialise a wasm::TryNote for this
9170 // call.
9171 bool inTry = callBase->inTry();
9172 if (inTry) {
9173 size_t tryNoteIndex = callBase->tryNoteIndex();
9174 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9175 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
9176 tryNote.setTryBodyBegin(masm.currentOffset());
9179 MOZ_ASSERT((sizeof(wasm::Frame) + masm.framePushed()) % WasmStackAlignment ==
9181 static_assert(
9182 WasmStackAlignment >= ABIStackAlignment &&
9183 WasmStackAlignment % ABIStackAlignment == 0,
9184 "The wasm stack alignment should subsume the ABI-required alignment");
9186 #ifdef DEBUG
9187 Label ok;
9188 masm.branchTestStackPtr(Assembler::Zero, Imm32(WasmStackAlignment - 1), &ok);
9189 masm.breakpoint();
9190 masm.bind(&ok);
9191 #endif
9193 // LWasmCallBase::isCallPreserved() assumes that all MWasmCalls preserve the
9194 // instance and pinned regs. The only case where where we don't have to
9195 // reload the instance and pinned regs is when the callee preserves them.
9196 bool reloadRegs = true;
9197 bool switchRealm = true;
9199 const wasm::CallSiteDesc& desc = callBase->desc();
9200 const wasm::CalleeDesc& callee = callBase->callee();
9201 CodeOffset retOffset;
9202 CodeOffset secondRetOffset;
9203 switch (callee.which()) {
9204 case wasm::CalleeDesc::Func:
9205 #ifdef ENABLE_WASM_TAIL_CALLS
9206 if (isReturnCall) {
9207 ReturnCallAdjustmentInfo retCallInfo(
9208 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
9209 masm.wasmReturnCall(desc, callee.funcIndex(), retCallInfo);
9210 // The rest of the method is unnecessary for a return call.
9211 return;
9213 #endif
9214 MOZ_ASSERT(!isReturnCall);
9215 retOffset = masm.call(desc, callee.funcIndex());
9216 reloadRegs = false;
9217 switchRealm = false;
9218 break;
9219 case wasm::CalleeDesc::Import:
9220 #ifdef ENABLE_WASM_TAIL_CALLS
9221 if (isReturnCall) {
9222 ReturnCallAdjustmentInfo retCallInfo(
9223 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
9224 masm.wasmReturnCallImport(desc, callee, retCallInfo);
9225 // The rest of the method is unnecessary for a return call.
9226 return;
9228 #endif
9229 MOZ_ASSERT(!isReturnCall);
9230 retOffset = masm.wasmCallImport(desc, callee);
9231 break;
9232 case wasm::CalleeDesc::AsmJSTable:
9233 retOffset = masm.asmCallIndirect(desc, callee);
9234 break;
9235 case wasm::CalleeDesc::WasmTable: {
9236 Label* boundsCheckFailed = nullptr;
9237 if (lir->needsBoundsCheck()) {
9238 OutOfLineAbortingWasmTrap* ool =
9239 new (alloc()) OutOfLineAbortingWasmTrap(
9240 wasm::BytecodeOffset(desc.lineOrBytecode()),
9241 wasm::Trap::OutOfBounds);
9242 if (lir->isCatchable()) {
9243 addOutOfLineCode(ool, lir->mirCatchable());
9244 } else if (isReturnCall) {
9245 #ifdef ENABLE_WASM_TAIL_CALLS
9246 addOutOfLineCode(ool, lir->mirReturnCall());
9247 #else
9248 MOZ_CRASH("Return calls are disabled.");
9249 #endif
9250 } else {
9251 addOutOfLineCode(ool, lir->mirUncatchable());
9253 boundsCheckFailed = ool->entry();
9255 Label* nullCheckFailed = nullptr;
9256 #ifndef WASM_HAS_HEAPREG
9258 OutOfLineAbortingWasmTrap* ool =
9259 new (alloc()) OutOfLineAbortingWasmTrap(
9260 wasm::BytecodeOffset(desc.lineOrBytecode()),
9261 wasm::Trap::IndirectCallToNull);
9262 if (lir->isCatchable()) {
9263 addOutOfLineCode(ool, lir->mirCatchable());
9264 } else if (isReturnCall) {
9265 # ifdef ENABLE_WASM_TAIL_CALLS
9266 addOutOfLineCode(ool, lir->mirReturnCall());
9267 # else
9268 MOZ_CRASH("Return calls are disabled.");
9269 # endif
9270 } else {
9271 addOutOfLineCode(ool, lir->mirUncatchable());
9273 nullCheckFailed = ool->entry();
9275 #endif
9276 #ifdef ENABLE_WASM_TAIL_CALLS
9277 if (isReturnCall) {
9278 ReturnCallAdjustmentInfo retCallInfo(
9279 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
9280 masm.wasmReturnCallIndirect(desc, callee, boundsCheckFailed,
9281 nullCheckFailed, mozilla::Nothing(),
9282 retCallInfo);
9283 // The rest of the method is unnecessary for a return call.
9284 return;
9286 #endif
9287 MOZ_ASSERT(!isReturnCall);
9288 masm.wasmCallIndirect(desc, callee, boundsCheckFailed, nullCheckFailed,
9289 lir->tableSize(), &retOffset, &secondRetOffset);
9290 // Register reloading and realm switching are handled dynamically inside
9291 // wasmCallIndirect. There are two return offsets, one for each call
9292 // instruction (fast path and slow path).
9293 reloadRegs = false;
9294 switchRealm = false;
9295 break;
9297 case wasm::CalleeDesc::Builtin:
9298 retOffset = masm.call(desc, callee.builtin());
9299 reloadRegs = false;
9300 switchRealm = false;
9301 break;
9302 case wasm::CalleeDesc::BuiltinInstanceMethod:
9303 retOffset = masm.wasmCallBuiltinInstanceMethod(
9304 desc, callBase->instanceArg(), callee.builtin(),
9305 callBase->builtinMethodFailureMode());
9306 switchRealm = false;
9307 break;
9308 case wasm::CalleeDesc::FuncRef:
9309 #ifdef ENABLE_WASM_TAIL_CALLS
9310 if (isReturnCall) {
9311 ReturnCallAdjustmentInfo retCallInfo(
9312 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
9313 masm.wasmReturnCallRef(desc, callee, retCallInfo);
9314 // The rest of the method is unnecessary for a return call.
9315 return;
9317 #endif
9318 MOZ_ASSERT(!isReturnCall);
9319 // Register reloading and realm switching are handled dynamically inside
9320 // wasmCallRef. There are two return offsets, one for each call
9321 // instruction (fast path and slow path).
9322 masm.wasmCallRef(desc, callee, &retOffset, &secondRetOffset);
9323 reloadRegs = false;
9324 switchRealm = false;
9325 break;
9328 // Note the assembler offset for the associated LSafePoint.
9329 MOZ_ASSERT(!isReturnCall);
9330 markSafepointAt(retOffset.offset(), lir);
9332 // Now that all the outbound in-memory args are on the stack, note the
9333 // required lower boundary point of the associated StackMap.
9334 uint32_t framePushedAtStackMapBase =
9335 masm.framePushed() -
9336 wasm::AlignStackArgAreaSize(callBase->stackArgAreaSizeUnaligned());
9337 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAtStackMapBase);
9338 MOZ_ASSERT(lir->safepoint()->wasmSafepointKind() ==
9339 WasmSafepointKind::LirCall);
9341 // Note the assembler offset and framePushed for use by the adjunct
9342 // LSafePoint, see visitor for LWasmCallIndirectAdjunctSafepoint below.
9343 if (callee.which() == wasm::CalleeDesc::WasmTable) {
9344 lir->adjunctSafepoint()->recordSafepointInfo(secondRetOffset,
9345 framePushedAtStackMapBase);
9348 if (reloadRegs) {
9349 masm.loadPtr(
9350 Address(masm.getStackPointer(), WasmCallerInstanceOffsetBeforeCall),
9351 InstanceReg);
9352 masm.loadWasmPinnedRegsFromInstance();
9353 if (switchRealm) {
9354 masm.switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
9356 } else {
9357 MOZ_ASSERT(!switchRealm);
9360 #ifdef ENABLE_WASM_TAIL_CALLS
9361 switch (callee.which()) {
9362 case wasm::CalleeDesc::Func:
9363 case wasm::CalleeDesc::Import:
9364 case wasm::CalleeDesc::WasmTable:
9365 case wasm::CalleeDesc::FuncRef:
9366 // Stack allocation could change during Wasm (return) calls,
9367 // recover pre-call state.
9368 masm.freeStackTo(masm.framePushed());
9369 break;
9370 default:
9371 break;
9373 #endif // ENABLE_WASM_TAIL_CALLS
9375 if (inTry) {
9376 // Set the end of the try note range
9377 size_t tryNoteIndex = callBase->tryNoteIndex();
9378 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9379 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
9381 // Don't set the end of the try note if we've OOM'ed, as the above
9382 // instructions may not have been emitted, which will trigger an assert
9383 // about zero-length try-notes. This is okay as this compilation will be
9384 // thrown away.
9385 if (!masm.oom()) {
9386 tryNote.setTryBodyEnd(masm.currentOffset());
9389 // This instruction or the adjunct safepoint must be the last instruction
9390 // in the block. No other instructions may be inserted.
9391 LBlock* block = lir->block();
9392 MOZ_RELEASE_ASSERT(*block->rbegin() == lir ||
9393 (block->rbegin()->isWasmCallIndirectAdjunctSafepoint() &&
9394 *(++block->rbegin()) == lir));
9396 // Jump to the fallthrough block
9397 jumpToBlock(lir->mirCatchable()->getSuccessor(
9398 MWasmCallCatchable::FallthroughBranchIndex));
9402 void CodeGenerator::visitWasmCallLandingPrePad(LWasmCallLandingPrePad* lir) {
9403 LBlock* block = lir->block();
9404 MWasmCallLandingPrePad* mir = lir->mir();
9405 MBasicBlock* mirBlock = mir->block();
9406 MBasicBlock* callMirBlock = mir->callBlock();
9408 // This block must be the pre-pad successor of the call block. No blocks may
9409 // be inserted between us, such as for critical edge splitting.
9410 MOZ_RELEASE_ASSERT(mirBlock == callMirBlock->getSuccessor(
9411 MWasmCallCatchable::PrePadBranchIndex));
9413 // This instruction or a move group must be the first instruction in the
9414 // block. No other instructions may be inserted.
9415 MOZ_RELEASE_ASSERT(*block->begin() == lir || (block->begin()->isMoveGroup() &&
9416 *(++block->begin()) == lir));
9418 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9419 wasm::TryNote& tryNote = tryNotes[mir->tryNoteIndex()];
9420 // Set the entry point for the call try note to be the beginning of this
9421 // block. The above assertions (and assertions in visitWasmCall) guarantee
9422 // that we are not skipping over instructions that should be executed.
9423 tryNote.setLandingPad(block->label()->offset(), masm.framePushed());
9426 void CodeGenerator::visitWasmCallIndirectAdjunctSafepoint(
9427 LWasmCallIndirectAdjunctSafepoint* lir) {
9428 markSafepointAt(lir->safepointLocation().offset(), lir);
9429 lir->safepoint()->setFramePushedAtStackMapBase(
9430 lir->framePushedAtStackMapBase());
9433 template <typename InstructionWithMaybeTrapSite>
9434 void EmitSignalNullCheckTrapSite(MacroAssembler& masm,
9435 InstructionWithMaybeTrapSite* ins,
9436 FaultingCodeOffset fco,
9437 wasm::TrapMachineInsn tmi) {
9438 if (!ins->maybeTrap()) {
9439 return;
9441 wasm::BytecodeOffset trapOffset(ins->maybeTrap()->offset);
9442 masm.append(wasm::Trap::NullPointerDereference,
9443 wasm::TrapSite(tmi, fco, trapOffset));
9446 template <typename InstructionWithMaybeTrapSite, class AddressOrBaseIndex>
9447 void CodeGenerator::emitWasmValueLoad(InstructionWithMaybeTrapSite* ins,
9448 MIRType type, MWideningOp wideningOp,
9449 AddressOrBaseIndex addr,
9450 AnyRegister dst) {
9451 FaultingCodeOffset fco;
9452 switch (type) {
9453 case MIRType::Int32:
9454 switch (wideningOp) {
9455 case MWideningOp::None:
9456 fco = masm.load32(addr, dst.gpr());
9457 EmitSignalNullCheckTrapSite(masm, ins, fco,
9458 wasm::TrapMachineInsn::Load32);
9459 break;
9460 case MWideningOp::FromU16:
9461 fco = masm.load16ZeroExtend(addr, dst.gpr());
9462 EmitSignalNullCheckTrapSite(masm, ins, fco,
9463 wasm::TrapMachineInsn::Load16);
9464 break;
9465 case MWideningOp::FromS16:
9466 fco = masm.load16SignExtend(addr, dst.gpr());
9467 EmitSignalNullCheckTrapSite(masm, ins, fco,
9468 wasm::TrapMachineInsn::Load16);
9469 break;
9470 case MWideningOp::FromU8:
9471 fco = masm.load8ZeroExtend(addr, dst.gpr());
9472 EmitSignalNullCheckTrapSite(masm, ins, fco,
9473 wasm::TrapMachineInsn::Load8);
9474 break;
9475 case MWideningOp::FromS8:
9476 fco = masm.load8SignExtend(addr, dst.gpr());
9477 EmitSignalNullCheckTrapSite(masm, ins, fco,
9478 wasm::TrapMachineInsn::Load8);
9479 break;
9480 default:
9481 MOZ_CRASH("unexpected widening op in ::visitWasmLoadElement");
9483 break;
9484 case MIRType::Float32:
9485 MOZ_ASSERT(wideningOp == MWideningOp::None);
9486 fco = masm.loadFloat32(addr, dst.fpu());
9487 EmitSignalNullCheckTrapSite(masm, ins, fco,
9488 wasm::TrapMachineInsn::Load32);
9489 break;
9490 case MIRType::Double:
9491 MOZ_ASSERT(wideningOp == MWideningOp::None);
9492 fco = masm.loadDouble(addr, dst.fpu());
9493 EmitSignalNullCheckTrapSite(masm, ins, fco,
9494 wasm::TrapMachineInsn::Load64);
9495 break;
9496 case MIRType::Pointer:
9497 case MIRType::WasmAnyRef:
9498 case MIRType::WasmArrayData:
9499 MOZ_ASSERT(wideningOp == MWideningOp::None);
9500 fco = masm.loadPtr(addr, dst.gpr());
9501 EmitSignalNullCheckTrapSite(masm, ins, fco,
9502 wasm::TrapMachineInsnForLoadWord());
9503 break;
9504 default:
9505 MOZ_CRASH("unexpected type in ::emitWasmValueLoad");
9509 template <typename InstructionWithMaybeTrapSite, class AddressOrBaseIndex>
9510 void CodeGenerator::emitWasmValueStore(InstructionWithMaybeTrapSite* ins,
9511 MIRType type, MNarrowingOp narrowingOp,
9512 AnyRegister src,
9513 AddressOrBaseIndex addr) {
9514 FaultingCodeOffset fco;
9515 switch (type) {
9516 case MIRType::Int32:
9517 switch (narrowingOp) {
9518 case MNarrowingOp::None:
9519 fco = masm.store32(src.gpr(), addr);
9520 EmitSignalNullCheckTrapSite(masm, ins, fco,
9521 wasm::TrapMachineInsn::Store32);
9522 break;
9523 case MNarrowingOp::To16:
9524 fco = masm.store16(src.gpr(), addr);
9525 EmitSignalNullCheckTrapSite(masm, ins, fco,
9526 wasm::TrapMachineInsn::Store16);
9527 break;
9528 case MNarrowingOp::To8:
9529 fco = masm.store8(src.gpr(), addr);
9530 EmitSignalNullCheckTrapSite(masm, ins, fco,
9531 wasm::TrapMachineInsn::Store8);
9532 break;
9533 default:
9534 MOZ_CRASH();
9536 break;
9537 case MIRType::Float32:
9538 fco = masm.storeFloat32(src.fpu(), addr);
9539 EmitSignalNullCheckTrapSite(masm, ins, fco,
9540 wasm::TrapMachineInsn::Store32);
9541 break;
9542 case MIRType::Double:
9543 fco = masm.storeDouble(src.fpu(), addr);
9544 EmitSignalNullCheckTrapSite(masm, ins, fco,
9545 wasm::TrapMachineInsn::Store64);
9546 break;
9547 case MIRType::Pointer:
9548 // This could be correct, but it would be a new usage, so check carefully.
9549 MOZ_CRASH("Unexpected type in ::emitWasmValueStore.");
9550 case MIRType::WasmAnyRef:
9551 MOZ_CRASH("Bad type in ::emitWasmValueStore. Use LWasmStoreElementRef.");
9552 default:
9553 MOZ_CRASH("unexpected type in ::emitWasmValueStore");
9557 void CodeGenerator::visitWasmLoadSlot(LWasmLoadSlot* ins) {
9558 MIRType type = ins->type();
9559 MWideningOp wideningOp = ins->wideningOp();
9560 Register container = ToRegister(ins->containerRef());
9561 Address addr(container, ins->offset());
9562 AnyRegister dst = ToAnyRegister(ins->output());
9564 #ifdef ENABLE_WASM_SIMD
9565 if (type == MIRType::Simd128) {
9566 MOZ_ASSERT(wideningOp == MWideningOp::None);
9567 FaultingCodeOffset fco = masm.loadUnalignedSimd128(addr, dst.fpu());
9568 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load128);
9569 return;
9571 #endif
9572 emitWasmValueLoad(ins, type, wideningOp, addr, dst);
9575 void CodeGenerator::visitWasmLoadElement(LWasmLoadElement* ins) {
9576 MIRType type = ins->type();
9577 MWideningOp wideningOp = ins->wideningOp();
9578 Scale scale = ins->scale();
9579 Register base = ToRegister(ins->base());
9580 Register index = ToRegister(ins->index());
9581 AnyRegister dst = ToAnyRegister(ins->output());
9583 #ifdef ENABLE_WASM_SIMD
9584 if (type == MIRType::Simd128) {
9585 MOZ_ASSERT(wideningOp == MWideningOp::None);
9586 FaultingCodeOffset fco;
9587 Register temp = ToRegister(ins->temp0());
9588 masm.movePtr(index, temp);
9589 masm.lshiftPtr(Imm32(4), temp);
9590 fco = masm.loadUnalignedSimd128(BaseIndex(base, temp, Scale::TimesOne),
9591 dst.fpu());
9592 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load128);
9593 return;
9595 #endif
9596 emitWasmValueLoad(ins, type, wideningOp, BaseIndex(base, index, scale), dst);
9599 void CodeGenerator::visitWasmStoreSlot(LWasmStoreSlot* ins) {
9600 MIRType type = ins->type();
9601 MNarrowingOp narrowingOp = ins->narrowingOp();
9602 Register container = ToRegister(ins->containerRef());
9603 Address addr(container, ins->offset());
9604 AnyRegister src = ToAnyRegister(ins->value());
9605 if (type != MIRType::Int32) {
9606 MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
9609 #ifdef ENABLE_WASM_SIMD
9610 if (type == MIRType::Simd128) {
9611 FaultingCodeOffset fco = masm.storeUnalignedSimd128(src.fpu(), addr);
9612 EmitSignalNullCheckTrapSite(masm, ins, fco,
9613 wasm::TrapMachineInsn::Store128);
9614 return;
9616 #endif
9617 emitWasmValueStore(ins, type, narrowingOp, src, addr);
9620 void CodeGenerator::visitWasmStoreElement(LWasmStoreElement* ins) {
9621 MIRType type = ins->type();
9622 MNarrowingOp narrowingOp = ins->narrowingOp();
9623 Scale scale = ins->scale();
9624 Register base = ToRegister(ins->base());
9625 Register index = ToRegister(ins->index());
9626 AnyRegister src = ToAnyRegister(ins->value());
9627 if (type != MIRType::Int32) {
9628 MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
9631 #ifdef ENABLE_WASM_SIMD
9632 if (type == MIRType::Simd128) {
9633 Register temp = ToRegister(ins->temp0());
9634 masm.movePtr(index, temp);
9635 masm.lshiftPtr(Imm32(4), temp);
9636 FaultingCodeOffset fco = masm.storeUnalignedSimd128(
9637 src.fpu(), BaseIndex(base, temp, Scale::TimesOne));
9638 EmitSignalNullCheckTrapSite(masm, ins, fco,
9639 wasm::TrapMachineInsn::Store128);
9640 return;
9642 #endif
9643 emitWasmValueStore(ins, type, narrowingOp, src,
9644 BaseIndex(base, index, scale));
9647 void CodeGenerator::visitWasmLoadTableElement(LWasmLoadTableElement* ins) {
9648 Register elements = ToRegister(ins->elements());
9649 Register index = ToRegister(ins->index());
9650 Register output = ToRegister(ins->output());
9651 masm.loadPtr(BaseIndex(elements, index, ScalePointer), output);
9654 void CodeGenerator::visitWasmDerivedPointer(LWasmDerivedPointer* ins) {
9655 masm.movePtr(ToRegister(ins->base()), ToRegister(ins->output()));
9656 masm.addPtr(Imm32(int32_t(ins->offset())), ToRegister(ins->output()));
9659 void CodeGenerator::visitWasmDerivedIndexPointer(
9660 LWasmDerivedIndexPointer* ins) {
9661 Register base = ToRegister(ins->base());
9662 Register index = ToRegister(ins->index());
9663 Register output = ToRegister(ins->output());
9664 masm.computeEffectiveAddress(BaseIndex(base, index, ins->scale()), output);
9667 void CodeGenerator::visitWasmStoreRef(LWasmStoreRef* ins) {
9668 Register instance = ToRegister(ins->instance());
9669 Register valueBase = ToRegister(ins->valueBase());
9670 size_t offset = ins->offset();
9671 Register value = ToRegister(ins->value());
9672 Register temp = ToRegister(ins->temp0());
9674 if (ins->preBarrierKind() == WasmPreBarrierKind::Normal) {
9675 Label skipPreBarrier;
9676 wasm::EmitWasmPreBarrierGuard(
9677 masm, instance, temp, Address(valueBase, offset), &skipPreBarrier,
9678 ins->maybeTrap() ? &ins->maybeTrap()->offset : nullptr);
9679 wasm::EmitWasmPreBarrierCallImmediate(masm, instance, temp, valueBase,
9680 offset);
9681 masm.bind(&skipPreBarrier);
9684 FaultingCodeOffset fco = masm.storePtr(value, Address(valueBase, offset));
9685 EmitSignalNullCheckTrapSite(masm, ins, fco,
9686 wasm::TrapMachineInsnForStoreWord());
9687 // The postbarrier is handled separately.
9690 void CodeGenerator::visitWasmStoreElementRef(LWasmStoreElementRef* ins) {
9691 Register instance = ToRegister(ins->instance());
9692 Register base = ToRegister(ins->base());
9693 Register index = ToRegister(ins->index());
9694 Register value = ToRegister(ins->value());
9695 Register temp0 = ToTempRegisterOrInvalid(ins->temp0());
9696 Register temp1 = ToTempRegisterOrInvalid(ins->temp1());
9698 BaseIndex addr(base, index, ScalePointer);
9700 if (ins->preBarrierKind() == WasmPreBarrierKind::Normal) {
9701 Label skipPreBarrier;
9702 wasm::EmitWasmPreBarrierGuard(
9703 masm, instance, temp0, addr, &skipPreBarrier,
9704 ins->maybeTrap() ? &ins->maybeTrap()->offset : nullptr);
9705 wasm::EmitWasmPreBarrierCallIndex(masm, instance, temp0, temp1, addr);
9706 masm.bind(&skipPreBarrier);
9709 FaultingCodeOffset fco = masm.storePtr(value, addr);
9710 EmitSignalNullCheckTrapSite(masm, ins, fco,
9711 wasm::TrapMachineInsnForStoreWord());
9712 // The postbarrier is handled separately.
9715 // Out-of-line path to update the store buffer for wasm references.
9716 class OutOfLineWasmCallPostWriteBarrierImmediate
9717 : public OutOfLineCodeBase<CodeGenerator> {
9718 LInstruction* lir_;
9719 Register valueBase_;
9720 Register temp_;
9721 uint32_t valueOffset_;
9723 public:
9724 OutOfLineWasmCallPostWriteBarrierImmediate(LInstruction* lir,
9725 Register valueBase, Register temp,
9726 uint32_t valueOffset)
9727 : lir_(lir),
9728 valueBase_(valueBase),
9729 temp_(temp),
9730 valueOffset_(valueOffset) {}
9732 void accept(CodeGenerator* codegen) override {
9733 codegen->visitOutOfLineWasmCallPostWriteBarrierImmediate(this);
9736 LInstruction* lir() const { return lir_; }
9737 Register valueBase() const { return valueBase_; }
9738 Register temp() const { return temp_; }
9739 uint32_t valueOffset() const { return valueOffset_; }
9742 void CodeGenerator::visitOutOfLineWasmCallPostWriteBarrierImmediate(
9743 OutOfLineWasmCallPostWriteBarrierImmediate* ool) {
9744 saveLiveVolatile(ool->lir());
9745 masm.Push(InstanceReg);
9746 int32_t framePushedAfterInstance = masm.framePushed();
9748 // Fold the value offset into the value base
9749 Register valueAddr = ool->valueBase();
9750 Register temp = ool->temp();
9751 masm.computeEffectiveAddress(Address(valueAddr, ool->valueOffset()), temp);
9753 // Call Instance::postBarrier
9754 masm.setupWasmABICall();
9755 masm.passABIArg(InstanceReg);
9756 masm.passABIArg(temp);
9757 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
9758 masm.callWithABI(wasm::BytecodeOffset(0), wasm::SymbolicAddress::PostBarrier,
9759 mozilla::Some(instanceOffset), ABIType::General);
9761 masm.Pop(InstanceReg);
9762 restoreLiveVolatile(ool->lir());
9764 masm.jump(ool->rejoin());
9767 void CodeGenerator::visitWasmPostWriteBarrierImmediate(
9768 LWasmPostWriteBarrierImmediate* lir) {
9769 Register object = ToRegister(lir->object());
9770 Register value = ToRegister(lir->value());
9771 Register valueBase = ToRegister(lir->valueBase());
9772 Register temp = ToRegister(lir->temp0());
9773 MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
9774 auto* ool = new (alloc()) OutOfLineWasmCallPostWriteBarrierImmediate(
9775 lir, valueBase, temp, lir->valueOffset());
9776 addOutOfLineCode(ool, lir->mir());
9778 wasm::EmitWasmPostBarrierGuard(masm, mozilla::Some(object), temp, value,
9779 ool->rejoin());
9780 masm.jump(ool->entry());
9781 masm.bind(ool->rejoin());
9784 // Out-of-line path to update the store buffer for wasm references.
9785 class OutOfLineWasmCallPostWriteBarrierIndex
9786 : public OutOfLineCodeBase<CodeGenerator> {
9787 LInstruction* lir_;
9788 Register valueBase_;
9789 Register index_;
9790 Register temp_;
9791 uint32_t elemSize_;
9793 public:
9794 OutOfLineWasmCallPostWriteBarrierIndex(LInstruction* lir, Register valueBase,
9795 Register index, Register temp,
9796 uint32_t elemSize)
9797 : lir_(lir),
9798 valueBase_(valueBase),
9799 index_(index),
9800 temp_(temp),
9801 elemSize_(elemSize) {
9802 MOZ_ASSERT(elemSize == 1 || elemSize == 2 || elemSize == 4 ||
9803 elemSize == 8 || elemSize == 16);
9806 void accept(CodeGenerator* codegen) override {
9807 codegen->visitOutOfLineWasmCallPostWriteBarrierIndex(this);
9810 LInstruction* lir() const { return lir_; }
9811 Register valueBase() const { return valueBase_; }
9812 Register index() const { return index_; }
9813 Register temp() const { return temp_; }
9814 uint32_t elemSize() const { return elemSize_; }
9817 void CodeGenerator::visitOutOfLineWasmCallPostWriteBarrierIndex(
9818 OutOfLineWasmCallPostWriteBarrierIndex* ool) {
9819 saveLiveVolatile(ool->lir());
9820 masm.Push(InstanceReg);
9821 int32_t framePushedAfterInstance = masm.framePushed();
9823 // Fold the value offset into the value base
9824 Register temp = ool->temp();
9825 if (ool->elemSize() == 16) {
9826 masm.movePtr(ool->index(), temp);
9827 masm.lshiftPtr(Imm32(4), temp);
9828 masm.addPtr(ool->valueBase(), temp);
9829 } else {
9830 masm.computeEffectiveAddress(BaseIndex(ool->valueBase(), ool->index(),
9831 ScaleFromElemWidth(ool->elemSize())),
9832 temp);
9835 // Call Instance::postBarrier
9836 masm.setupWasmABICall();
9837 masm.passABIArg(InstanceReg);
9838 masm.passABIArg(temp);
9839 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
9840 masm.callWithABI(wasm::BytecodeOffset(0), wasm::SymbolicAddress::PostBarrier,
9841 mozilla::Some(instanceOffset), ABIType::General);
9843 masm.Pop(InstanceReg);
9844 restoreLiveVolatile(ool->lir());
9846 masm.jump(ool->rejoin());
9849 void CodeGenerator::visitWasmPostWriteBarrierIndex(
9850 LWasmPostWriteBarrierIndex* lir) {
9851 Register object = ToRegister(lir->object());
9852 Register value = ToRegister(lir->value());
9853 Register valueBase = ToRegister(lir->valueBase());
9854 Register index = ToRegister(lir->index());
9855 Register temp = ToRegister(lir->temp0());
9856 MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
9857 auto* ool = new (alloc()) OutOfLineWasmCallPostWriteBarrierIndex(
9858 lir, valueBase, index, temp, lir->elemSize());
9859 addOutOfLineCode(ool, lir->mir());
9861 wasm::EmitWasmPostBarrierGuard(masm, mozilla::Some(object), temp, value,
9862 ool->rejoin());
9863 masm.jump(ool->entry());
9864 masm.bind(ool->rejoin());
9867 void CodeGenerator::visitWasmLoadSlotI64(LWasmLoadSlotI64* ins) {
9868 Register container = ToRegister(ins->containerRef());
9869 Address addr(container, ins->offset());
9870 Register64 output = ToOutRegister64(ins);
9871 // Either 1 or 2 words. On a 32-bit target, it is hard to argue that one
9872 // transaction will always trap before the other, so it seems safest to
9873 // register both of them as potentially trapping.
9874 #ifdef JS_64BIT
9875 FaultingCodeOffset fco = masm.load64(addr, output);
9876 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load64);
9877 #else
9878 FaultingCodeOffsetPair fcop = masm.load64(addr, output);
9879 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9880 wasm::TrapMachineInsn::Load32);
9881 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9882 wasm::TrapMachineInsn::Load32);
9883 #endif
9886 void CodeGenerator::visitWasmLoadElementI64(LWasmLoadElementI64* ins) {
9887 Register base = ToRegister(ins->base());
9888 Register index = ToRegister(ins->index());
9889 BaseIndex addr(base, index, Scale::TimesEight);
9890 Register64 output = ToOutRegister64(ins);
9891 // Either 1 or 2 words. On a 32-bit target, it is hard to argue that one
9892 // transaction will always trap before the other, so it seems safest to
9893 // register both of them as potentially trapping.
9894 #ifdef JS_64BIT
9895 FaultingCodeOffset fco = masm.load64(addr, output);
9896 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load64);
9897 #else
9898 FaultingCodeOffsetPair fcop = masm.load64(addr, output);
9899 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9900 wasm::TrapMachineInsn::Load32);
9901 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9902 wasm::TrapMachineInsn::Load32);
9903 #endif
9906 void CodeGenerator::visitWasmStoreSlotI64(LWasmStoreSlotI64* ins) {
9907 Register container = ToRegister(ins->containerRef());
9908 Address addr(container, ins->offset());
9909 Register64 value = ToRegister64(ins->value());
9910 // Either 1 or 2 words. As above we register both transactions in the
9911 // 2-word case.
9912 #ifdef JS_64BIT
9913 FaultingCodeOffset fco = masm.store64(value, addr);
9914 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Store64);
9915 #else
9916 FaultingCodeOffsetPair fcop = masm.store64(value, addr);
9917 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9918 wasm::TrapMachineInsn::Store32);
9919 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9920 wasm::TrapMachineInsn::Store32);
9921 #endif
9924 void CodeGenerator::visitWasmStoreElementI64(LWasmStoreElementI64* ins) {
9925 Register base = ToRegister(ins->base());
9926 Register index = ToRegister(ins->index());
9927 BaseIndex addr(base, index, Scale::TimesEight);
9928 Register64 value = ToRegister64(ins->value());
9929 // Either 1 or 2 words. As above we register both transactions in the
9930 // 2-word case.
9931 #ifdef JS_64BIT
9932 FaultingCodeOffset fco = masm.store64(value, addr);
9933 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Store64);
9934 #else
9935 FaultingCodeOffsetPair fcop = masm.store64(value, addr);
9936 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9937 wasm::TrapMachineInsn::Store32);
9938 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9939 wasm::TrapMachineInsn::Store32);
9940 #endif
9943 void CodeGenerator::visitArrayBufferByteLength(LArrayBufferByteLength* lir) {
9944 Register obj = ToRegister(lir->object());
9945 Register out = ToRegister(lir->output());
9946 masm.loadArrayBufferByteLengthIntPtr(obj, out);
9949 void CodeGenerator::visitArrayBufferViewLength(LArrayBufferViewLength* lir) {
9950 Register obj = ToRegister(lir->object());
9951 Register out = ToRegister(lir->output());
9952 masm.loadArrayBufferViewLengthIntPtr(obj, out);
9955 void CodeGenerator::visitArrayBufferViewByteOffset(
9956 LArrayBufferViewByteOffset* lir) {
9957 Register obj = ToRegister(lir->object());
9958 Register out = ToRegister(lir->output());
9959 masm.loadArrayBufferViewByteOffsetIntPtr(obj, out);
9962 void CodeGenerator::visitArrayBufferViewElements(
9963 LArrayBufferViewElements* lir) {
9964 Register obj = ToRegister(lir->object());
9965 Register out = ToRegister(lir->output());
9966 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), out);
9969 void CodeGenerator::visitTypedArrayElementSize(LTypedArrayElementSize* lir) {
9970 Register obj = ToRegister(lir->object());
9971 Register out = ToRegister(lir->output());
9973 masm.typedArrayElementSize(obj, out);
9976 void CodeGenerator::visitResizableTypedArrayByteOffsetMaybeOutOfBounds(
9977 LResizableTypedArrayByteOffsetMaybeOutOfBounds* lir) {
9978 Register obj = ToRegister(lir->object());
9979 Register out = ToRegister(lir->output());
9980 Register temp = ToRegister(lir->temp0());
9982 masm.loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(obj, out, temp);
9985 void CodeGenerator::visitResizableTypedArrayLength(
9986 LResizableTypedArrayLength* lir) {
9987 Register obj = ToRegister(lir->object());
9988 Register out = ToRegister(lir->output());
9989 Register temp = ToRegister(lir->temp0());
9991 masm.loadResizableTypedArrayLengthIntPtr(lir->synchronization(), obj, out,
9992 temp);
9995 void CodeGenerator::visitResizableDataViewByteLength(
9996 LResizableDataViewByteLength* lir) {
9997 Register obj = ToRegister(lir->object());
9998 Register out = ToRegister(lir->output());
9999 Register temp = ToRegister(lir->temp0());
10001 masm.loadResizableDataViewByteLengthIntPtr(lir->synchronization(), obj, out,
10002 temp);
10005 void CodeGenerator::visitGrowableSharedArrayBufferByteLength(
10006 LGrowableSharedArrayBufferByteLength* lir) {
10007 Register obj = ToRegister(lir->object());
10008 Register out = ToRegister(lir->output());
10010 // Explicit |byteLength| accesses are seq-consistent atomic loads.
10011 auto sync = Synchronization::Load();
10013 masm.loadGrowableSharedArrayBufferByteLengthIntPtr(sync, obj, out);
10016 void CodeGenerator::visitGuardResizableArrayBufferViewInBounds(
10017 LGuardResizableArrayBufferViewInBounds* lir) {
10018 Register obj = ToRegister(lir->object());
10019 Register temp = ToRegister(lir->temp0());
10021 Label bail;
10022 masm.branchIfResizableArrayBufferViewOutOfBounds(obj, temp, &bail);
10023 bailoutFrom(&bail, lir->snapshot());
10026 void CodeGenerator::visitGuardResizableArrayBufferViewInBoundsOrDetached(
10027 LGuardResizableArrayBufferViewInBoundsOrDetached* lir) {
10028 Register obj = ToRegister(lir->object());
10029 Register temp = ToRegister(lir->temp0());
10031 Label done, bail;
10032 masm.branchIfResizableArrayBufferViewInBounds(obj, temp, &done);
10033 masm.branchIfHasAttachedArrayBuffer(obj, temp, &bail);
10034 masm.bind(&done);
10035 bailoutFrom(&bail, lir->snapshot());
10038 void CodeGenerator::visitGuardHasAttachedArrayBuffer(
10039 LGuardHasAttachedArrayBuffer* lir) {
10040 Register obj = ToRegister(lir->object());
10041 Register temp = ToRegister(lir->temp0());
10043 Label bail;
10044 masm.branchIfHasDetachedArrayBuffer(obj, temp, &bail);
10045 bailoutFrom(&bail, lir->snapshot());
10048 class OutOfLineGuardNumberToIntPtrIndex
10049 : public OutOfLineCodeBase<CodeGenerator> {
10050 LGuardNumberToIntPtrIndex* lir_;
10052 public:
10053 explicit OutOfLineGuardNumberToIntPtrIndex(LGuardNumberToIntPtrIndex* lir)
10054 : lir_(lir) {}
10056 void accept(CodeGenerator* codegen) override {
10057 codegen->visitOutOfLineGuardNumberToIntPtrIndex(this);
10059 LGuardNumberToIntPtrIndex* lir() const { return lir_; }
10062 void CodeGenerator::visitGuardNumberToIntPtrIndex(
10063 LGuardNumberToIntPtrIndex* lir) {
10064 FloatRegister input = ToFloatRegister(lir->input());
10065 Register output = ToRegister(lir->output());
10067 if (!lir->mir()->supportOOB()) {
10068 Label bail;
10069 masm.convertDoubleToPtr(input, output, &bail, false);
10070 bailoutFrom(&bail, lir->snapshot());
10071 return;
10074 auto* ool = new (alloc()) OutOfLineGuardNumberToIntPtrIndex(lir);
10075 addOutOfLineCode(ool, lir->mir());
10077 masm.convertDoubleToPtr(input, output, ool->entry(), false);
10078 masm.bind(ool->rejoin());
10081 void CodeGenerator::visitOutOfLineGuardNumberToIntPtrIndex(
10082 OutOfLineGuardNumberToIntPtrIndex* ool) {
10083 // Substitute the invalid index with an arbitrary out-of-bounds index.
10084 masm.movePtr(ImmWord(-1), ToRegister(ool->lir()->output()));
10085 masm.jump(ool->rejoin());
10088 void CodeGenerator::visitStringLength(LStringLength* lir) {
10089 Register input = ToRegister(lir->string());
10090 Register output = ToRegister(lir->output());
10092 masm.loadStringLength(input, output);
10095 void CodeGenerator::visitMinMaxI(LMinMaxI* ins) {
10096 Register first = ToRegister(ins->first());
10097 Register output = ToRegister(ins->output());
10099 MOZ_ASSERT(first == output);
10101 Assembler::Condition cond =
10102 ins->mir()->isMax() ? Assembler::GreaterThan : Assembler::LessThan;
10104 if (ins->second()->isConstant()) {
10105 Label done;
10106 masm.branch32(cond, first, Imm32(ToInt32(ins->second())), &done);
10107 masm.move32(Imm32(ToInt32(ins->second())), output);
10108 masm.bind(&done);
10109 } else {
10110 Register second = ToRegister(ins->second());
10111 masm.cmp32Move32(cond, second, first, second, output);
10115 void CodeGenerator::visitMinMaxArrayI(LMinMaxArrayI* ins) {
10116 Register array = ToRegister(ins->array());
10117 Register output = ToRegister(ins->output());
10118 Register temp1 = ToRegister(ins->temp1());
10119 Register temp2 = ToRegister(ins->temp2());
10120 Register temp3 = ToRegister(ins->temp3());
10121 bool isMax = ins->isMax();
10123 Label bail;
10124 masm.minMaxArrayInt32(array, output, temp1, temp2, temp3, isMax, &bail);
10125 bailoutFrom(&bail, ins->snapshot());
10128 void CodeGenerator::visitMinMaxArrayD(LMinMaxArrayD* ins) {
10129 Register array = ToRegister(ins->array());
10130 FloatRegister output = ToFloatRegister(ins->output());
10131 Register temp1 = ToRegister(ins->temp1());
10132 Register temp2 = ToRegister(ins->temp2());
10133 FloatRegister floatTemp = ToFloatRegister(ins->floatTemp());
10134 bool isMax = ins->isMax();
10136 Label bail;
10137 masm.minMaxArrayNumber(array, output, floatTemp, temp1, temp2, isMax, &bail);
10138 bailoutFrom(&bail, ins->snapshot());
10141 // For Abs*, lowering will have tied input to output on platforms where that is
10142 // sensible, and otherwise left them untied.
10144 void CodeGenerator::visitAbsI(LAbsI* ins) {
10145 Register input = ToRegister(ins->input());
10146 Register output = ToRegister(ins->output());
10148 if (ins->mir()->fallible()) {
10149 Label positive;
10150 if (input != output) {
10151 masm.move32(input, output);
10153 masm.branchTest32(Assembler::NotSigned, output, output, &positive);
10154 Label bail;
10155 masm.branchNeg32(Assembler::Overflow, output, &bail);
10156 bailoutFrom(&bail, ins->snapshot());
10157 masm.bind(&positive);
10158 } else {
10159 masm.abs32(input, output);
10163 void CodeGenerator::visitAbsD(LAbsD* ins) {
10164 masm.absDouble(ToFloatRegister(ins->input()), ToFloatRegister(ins->output()));
10167 void CodeGenerator::visitAbsF(LAbsF* ins) {
10168 masm.absFloat32(ToFloatRegister(ins->input()),
10169 ToFloatRegister(ins->output()));
10172 void CodeGenerator::visitPowII(LPowII* ins) {
10173 Register value = ToRegister(ins->value());
10174 Register power = ToRegister(ins->power());
10175 Register output = ToRegister(ins->output());
10176 Register temp0 = ToRegister(ins->temp0());
10177 Register temp1 = ToRegister(ins->temp1());
10179 Label bailout;
10180 masm.pow32(value, power, output, temp0, temp1, &bailout);
10181 bailoutFrom(&bailout, ins->snapshot());
10184 void CodeGenerator::visitPowI(LPowI* ins) {
10185 FloatRegister value = ToFloatRegister(ins->value());
10186 Register power = ToRegister(ins->power());
10188 using Fn = double (*)(double x, int32_t y);
10189 masm.setupAlignedABICall();
10190 masm.passABIArg(value, ABIType::Float64);
10191 masm.passABIArg(power);
10193 masm.callWithABI<Fn, js::powi>(ABIType::Float64);
10194 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10197 void CodeGenerator::visitPowD(LPowD* ins) {
10198 FloatRegister value = ToFloatRegister(ins->value());
10199 FloatRegister power = ToFloatRegister(ins->power());
10201 using Fn = double (*)(double x, double y);
10202 masm.setupAlignedABICall();
10203 masm.passABIArg(value, ABIType::Float64);
10204 masm.passABIArg(power, ABIType::Float64);
10205 masm.callWithABI<Fn, ecmaPow>(ABIType::Float64);
10207 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10210 void CodeGenerator::visitPowOfTwoI(LPowOfTwoI* ins) {
10211 Register power = ToRegister(ins->power());
10212 Register output = ToRegister(ins->output());
10214 uint32_t base = ins->base();
10215 MOZ_ASSERT(mozilla::IsPowerOfTwo(base));
10217 uint32_t n = mozilla::FloorLog2(base);
10218 MOZ_ASSERT(n != 0);
10220 // Hacker's Delight, 2nd edition, theorem D2.
10221 auto ceilingDiv = [](uint32_t x, uint32_t y) { return (x + y - 1) / y; };
10223 // Take bailout if |power| is greater-or-equals |log_y(2^31)| or is negative.
10224 // |2^(n*y) < 2^31| must hold, hence |n*y < 31| resp. |y < 31/n|.
10226 // Note: it's important for this condition to match the code in CacheIR.cpp
10227 // (CanAttachInt32Pow) to prevent failure loops.
10228 bailoutCmp32(Assembler::AboveOrEqual, power, Imm32(ceilingDiv(31, n)),
10229 ins->snapshot());
10231 // Compute (2^n)^y as 2^(n*y) using repeated shifts. We could directly scale
10232 // |power| and perform a single shift, but due to the lack of necessary
10233 // MacroAssembler functionality, like multiplying a register with an
10234 // immediate, we restrict the number of generated shift instructions when
10235 // lowering this operation.
10236 masm.move32(Imm32(1), output);
10237 do {
10238 masm.lshift32(power, output);
10239 n--;
10240 } while (n > 0);
10243 void CodeGenerator::visitSqrtD(LSqrtD* ins) {
10244 FloatRegister input = ToFloatRegister(ins->input());
10245 FloatRegister output = ToFloatRegister(ins->output());
10246 masm.sqrtDouble(input, output);
10249 void CodeGenerator::visitSqrtF(LSqrtF* ins) {
10250 FloatRegister input = ToFloatRegister(ins->input());
10251 FloatRegister output = ToFloatRegister(ins->output());
10252 masm.sqrtFloat32(input, output);
10255 void CodeGenerator::visitSignI(LSignI* ins) {
10256 Register input = ToRegister(ins->input());
10257 Register output = ToRegister(ins->output());
10258 masm.signInt32(input, output);
10261 void CodeGenerator::visitSignD(LSignD* ins) {
10262 FloatRegister input = ToFloatRegister(ins->input());
10263 FloatRegister output = ToFloatRegister(ins->output());
10264 masm.signDouble(input, output);
10267 void CodeGenerator::visitSignDI(LSignDI* ins) {
10268 FloatRegister input = ToFloatRegister(ins->input());
10269 FloatRegister temp = ToFloatRegister(ins->temp0());
10270 Register output = ToRegister(ins->output());
10272 Label bail;
10273 masm.signDoubleToInt32(input, output, temp, &bail);
10274 bailoutFrom(&bail, ins->snapshot());
10277 void CodeGenerator::visitMathFunctionD(LMathFunctionD* ins) {
10278 FloatRegister input = ToFloatRegister(ins->input());
10279 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10281 UnaryMathFunction fun = ins->mir()->function();
10282 UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
10284 masm.setupAlignedABICall();
10286 masm.passABIArg(input, ABIType::Float64);
10287 masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
10288 ABIType::Float64);
10291 void CodeGenerator::visitMathFunctionF(LMathFunctionF* ins) {
10292 FloatRegister input = ToFloatRegister(ins->input());
10293 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnFloat32Reg);
10295 masm.setupAlignedABICall();
10296 masm.passABIArg(input, ABIType::Float32);
10298 using Fn = float (*)(float x);
10299 Fn funptr = nullptr;
10300 CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check;
10301 switch (ins->mir()->function()) {
10302 case UnaryMathFunction::Floor:
10303 funptr = floorf;
10304 check = CheckUnsafeCallWithABI::DontCheckOther;
10305 break;
10306 case UnaryMathFunction::Round:
10307 funptr = math_roundf_impl;
10308 break;
10309 case UnaryMathFunction::Trunc:
10310 funptr = math_truncf_impl;
10311 break;
10312 case UnaryMathFunction::Ceil:
10313 funptr = ceilf;
10314 check = CheckUnsafeCallWithABI::DontCheckOther;
10315 break;
10316 default:
10317 MOZ_CRASH("Unknown or unsupported float32 math function");
10320 masm.callWithABI(DynamicFunction<Fn>(funptr), ABIType::Float32, check);
10323 void CodeGenerator::visitModD(LModD* ins) {
10324 MOZ_ASSERT(!gen->compilingWasm());
10326 FloatRegister lhs = ToFloatRegister(ins->lhs());
10327 FloatRegister rhs = ToFloatRegister(ins->rhs());
10329 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10331 using Fn = double (*)(double a, double b);
10332 masm.setupAlignedABICall();
10333 masm.passABIArg(lhs, ABIType::Float64);
10334 masm.passABIArg(rhs, ABIType::Float64);
10335 masm.callWithABI<Fn, NumberMod>(ABIType::Float64);
10338 void CodeGenerator::visitModPowTwoD(LModPowTwoD* ins) {
10339 FloatRegister lhs = ToFloatRegister(ins->lhs());
10340 uint32_t divisor = ins->divisor();
10341 MOZ_ASSERT(mozilla::IsPowerOfTwo(divisor));
10343 FloatRegister output = ToFloatRegister(ins->output());
10345 // Compute |n % d| using |copysign(n - (d * trunc(n / d)), n)|.
10347 // This doesn't work if |d| isn't a power of two, because we may lose too much
10348 // precision. For example |Number.MAX_VALUE % 3 == 2|, but
10349 // |3 * trunc(Number.MAX_VALUE / 3) == Infinity|.
10351 Label done;
10353 ScratchDoubleScope scratch(masm);
10355 // Subnormals can lead to performance degradation, which can make calling
10356 // |fmod| faster than this inline implementation. Work around this issue by
10357 // directly returning the input for any value in the interval ]-1, +1[.
10358 Label notSubnormal;
10359 masm.loadConstantDouble(1.0, scratch);
10360 masm.loadConstantDouble(-1.0, output);
10361 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, lhs, scratch,
10362 &notSubnormal);
10363 masm.branchDouble(Assembler::DoubleLessThanOrEqual, lhs, output,
10364 &notSubnormal);
10366 masm.moveDouble(lhs, output);
10367 masm.jump(&done);
10369 masm.bind(&notSubnormal);
10371 if (divisor == 1) {
10372 // The pattern |n % 1 == 0| is used to detect integer numbers. We can skip
10373 // the multiplication by one in this case.
10374 masm.moveDouble(lhs, output);
10375 masm.nearbyIntDouble(RoundingMode::TowardsZero, output, scratch);
10376 masm.subDouble(scratch, output);
10377 } else {
10378 masm.loadConstantDouble(1.0 / double(divisor), scratch);
10379 masm.loadConstantDouble(double(divisor), output);
10381 masm.mulDouble(lhs, scratch);
10382 masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
10383 masm.mulDouble(output, scratch);
10385 masm.moveDouble(lhs, output);
10386 masm.subDouble(scratch, output);
10390 masm.copySignDouble(output, lhs, output);
10391 masm.bind(&done);
10394 void CodeGenerator::visitWasmBuiltinModD(LWasmBuiltinModD* ins) {
10395 masm.Push(InstanceReg);
10396 int32_t framePushedAfterInstance = masm.framePushed();
10398 FloatRegister lhs = ToFloatRegister(ins->lhs());
10399 FloatRegister rhs = ToFloatRegister(ins->rhs());
10401 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10403 masm.setupWasmABICall();
10404 masm.passABIArg(lhs, ABIType::Float64);
10405 masm.passABIArg(rhs, ABIType::Float64);
10407 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
10408 masm.callWithABI(ins->mir()->bytecodeOffset(), wasm::SymbolicAddress::ModD,
10409 mozilla::Some(instanceOffset), ABIType::Float64);
10411 masm.Pop(InstanceReg);
10414 void CodeGenerator::visitBigIntAdd(LBigIntAdd* ins) {
10415 Register lhs = ToRegister(ins->lhs());
10416 Register rhs = ToRegister(ins->rhs());
10417 Register temp1 = ToRegister(ins->temp1());
10418 Register temp2 = ToRegister(ins->temp2());
10419 Register output = ToRegister(ins->output());
10421 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10422 auto* ool = oolCallVM<Fn, BigInt::add>(ins, ArgList(lhs, rhs),
10423 StoreRegisterTo(output));
10425 // 0n + x == x
10426 Label lhsNonZero;
10427 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10428 masm.movePtr(rhs, output);
10429 masm.jump(ool->rejoin());
10430 masm.bind(&lhsNonZero);
10432 // x + 0n == x
10433 Label rhsNonZero;
10434 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10435 masm.movePtr(lhs, output);
10436 masm.jump(ool->rejoin());
10437 masm.bind(&rhsNonZero);
10439 // Call into the VM when either operand can't be loaded into a pointer-sized
10440 // register.
10441 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10442 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10444 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10446 // Create and return the result.
10447 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10448 masm.initializeBigInt(output, temp1);
10450 masm.bind(ool->rejoin());
10453 void CodeGenerator::visitBigIntSub(LBigIntSub* ins) {
10454 Register lhs = ToRegister(ins->lhs());
10455 Register rhs = ToRegister(ins->rhs());
10456 Register temp1 = ToRegister(ins->temp1());
10457 Register temp2 = ToRegister(ins->temp2());
10458 Register output = ToRegister(ins->output());
10460 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10461 auto* ool = oolCallVM<Fn, BigInt::sub>(ins, ArgList(lhs, rhs),
10462 StoreRegisterTo(output));
10464 // x - 0n == x
10465 Label rhsNonZero;
10466 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10467 masm.movePtr(lhs, output);
10468 masm.jump(ool->rejoin());
10469 masm.bind(&rhsNonZero);
10471 // Call into the VM when either operand can't be loaded into a pointer-sized
10472 // register.
10473 masm.loadBigInt(lhs, temp1, ool->entry());
10474 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10476 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10478 // Create and return the result.
10479 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10480 masm.initializeBigInt(output, temp1);
10482 masm.bind(ool->rejoin());
10485 void CodeGenerator::visitBigIntMul(LBigIntMul* ins) {
10486 Register lhs = ToRegister(ins->lhs());
10487 Register rhs = ToRegister(ins->rhs());
10488 Register temp1 = ToRegister(ins->temp1());
10489 Register temp2 = ToRegister(ins->temp2());
10490 Register output = ToRegister(ins->output());
10492 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10493 auto* ool = oolCallVM<Fn, BigInt::mul>(ins, ArgList(lhs, rhs),
10494 StoreRegisterTo(output));
10496 // 0n * x == 0n
10497 Label lhsNonZero;
10498 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10499 masm.movePtr(lhs, output);
10500 masm.jump(ool->rejoin());
10501 masm.bind(&lhsNonZero);
10503 // x * 0n == 0n
10504 Label rhsNonZero;
10505 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10506 masm.movePtr(rhs, output);
10507 masm.jump(ool->rejoin());
10508 masm.bind(&rhsNonZero);
10510 // Call into the VM when either operand can't be loaded into a pointer-sized
10511 // register.
10512 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10513 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10515 masm.branchMulPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10517 // Create and return the result.
10518 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10519 masm.initializeBigInt(output, temp1);
10521 masm.bind(ool->rejoin());
10524 void CodeGenerator::visitBigIntDiv(LBigIntDiv* ins) {
10525 Register lhs = ToRegister(ins->lhs());
10526 Register rhs = ToRegister(ins->rhs());
10527 Register temp1 = ToRegister(ins->temp1());
10528 Register temp2 = ToRegister(ins->temp2());
10529 Register output = ToRegister(ins->output());
10531 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10532 auto* ool = oolCallVM<Fn, BigInt::div>(ins, ArgList(lhs, rhs),
10533 StoreRegisterTo(output));
10535 // x / 0 throws an error.
10536 if (ins->mir()->canBeDivideByZero()) {
10537 masm.branchIfBigIntIsZero(rhs, ool->entry());
10540 // 0n / x == 0n
10541 Label lhsNonZero;
10542 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10543 masm.movePtr(lhs, output);
10544 masm.jump(ool->rejoin());
10545 masm.bind(&lhsNonZero);
10547 // Call into the VM when either operand can't be loaded into a pointer-sized
10548 // register.
10549 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10550 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10552 // |BigInt::div()| returns |lhs| for |lhs / 1n|, which means there's no
10553 // allocation which might trigger a minor GC to free up nursery space. This
10554 // requires us to apply the same optimization here, otherwise we'd end up with
10555 // always entering the OOL call, because the nursery is never evicted.
10556 Label notOne;
10557 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(1), &notOne);
10558 masm.movePtr(lhs, output);
10559 masm.jump(ool->rejoin());
10560 masm.bind(&notOne);
10562 static constexpr auto DigitMin = std::numeric_limits<
10563 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
10565 // Handle an integer overflow from INT{32,64}_MIN / -1.
10566 Label notOverflow;
10567 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
10568 masm.branchPtr(Assembler::Equal, temp2, ImmWord(-1), ool->entry());
10569 masm.bind(&notOverflow);
10571 emitBigIntDiv(ins, temp1, temp2, output, ool->entry());
10573 masm.bind(ool->rejoin());
10576 void CodeGenerator::visitBigIntMod(LBigIntMod* ins) {
10577 Register lhs = ToRegister(ins->lhs());
10578 Register rhs = ToRegister(ins->rhs());
10579 Register temp1 = ToRegister(ins->temp1());
10580 Register temp2 = ToRegister(ins->temp2());
10581 Register output = ToRegister(ins->output());
10583 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10584 auto* ool = oolCallVM<Fn, BigInt::mod>(ins, ArgList(lhs, rhs),
10585 StoreRegisterTo(output));
10587 // x % 0 throws an error.
10588 if (ins->mir()->canBeDivideByZero()) {
10589 masm.branchIfBigIntIsZero(rhs, ool->entry());
10592 // 0n % x == 0n
10593 Label lhsNonZero;
10594 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10595 masm.movePtr(lhs, output);
10596 masm.jump(ool->rejoin());
10597 masm.bind(&lhsNonZero);
10599 // Call into the VM when either operand can't be loaded into a pointer-sized
10600 // register.
10601 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10602 masm.loadBigIntAbsolute(rhs, temp2, ool->entry());
10604 // Similar to the case for BigInt division, we must apply the same allocation
10605 // optimizations as performed in |BigInt::mod()|.
10606 Label notBelow;
10607 masm.branchPtr(Assembler::AboveOrEqual, temp1, temp2, &notBelow);
10608 masm.movePtr(lhs, output);
10609 masm.jump(ool->rejoin());
10610 masm.bind(&notBelow);
10612 // Convert both digits to signed pointer-sized values.
10613 masm.bigIntDigitToSignedPtr(lhs, temp1, ool->entry());
10614 masm.bigIntDigitToSignedPtr(rhs, temp2, ool->entry());
10616 static constexpr auto DigitMin = std::numeric_limits<
10617 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
10619 // Handle an integer overflow from INT{32,64}_MIN / -1.
10620 Label notOverflow;
10621 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
10622 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(-1), &notOverflow);
10623 masm.movePtr(ImmWord(0), temp1);
10624 masm.bind(&notOverflow);
10626 emitBigIntMod(ins, temp1, temp2, output, ool->entry());
10628 masm.bind(ool->rejoin());
10631 void CodeGenerator::visitBigIntPow(LBigIntPow* ins) {
10632 Register lhs = ToRegister(ins->lhs());
10633 Register rhs = ToRegister(ins->rhs());
10634 Register temp1 = ToRegister(ins->temp1());
10635 Register temp2 = ToRegister(ins->temp2());
10636 Register output = ToRegister(ins->output());
10638 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10639 auto* ool = oolCallVM<Fn, BigInt::pow>(ins, ArgList(lhs, rhs),
10640 StoreRegisterTo(output));
10642 // x ** -y throws an error.
10643 if (ins->mir()->canBeNegativeExponent()) {
10644 masm.branchIfBigIntIsNegative(rhs, ool->entry());
10647 Register dest = temp1;
10648 Register base = temp2;
10649 Register exponent = output;
10651 Label done;
10652 masm.movePtr(ImmWord(1), dest); // p = 1
10654 // 1n ** y == 1n
10655 // -1n ** y == 1n when y is even
10656 // -1n ** y == -1n when y is odd
10657 Label lhsNotOne;
10658 masm.branch32(Assembler::Above, Address(lhs, BigInt::offsetOfLength()),
10659 Imm32(1), &lhsNotOne);
10660 masm.loadFirstBigIntDigitOrZero(lhs, base);
10661 masm.branchPtr(Assembler::NotEqual, base, Imm32(1), &lhsNotOne);
10663 masm.loadFirstBigIntDigitOrZero(rhs, exponent);
10665 Label lhsNonNegative;
10666 masm.branchIfBigIntIsNonNegative(lhs, &lhsNonNegative);
10667 masm.branchTestPtr(Assembler::Zero, exponent, Imm32(1), &done);
10668 masm.bind(&lhsNonNegative);
10669 masm.movePtr(lhs, output);
10670 masm.jump(ool->rejoin());
10672 masm.bind(&lhsNotOne);
10674 // x ** 0n == 1n
10675 masm.branchIfBigIntIsZero(rhs, &done);
10677 // 0n ** y == 0n with y != 0n
10678 Label lhsNonZero;
10679 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10681 masm.movePtr(lhs, output);
10682 masm.jump(ool->rejoin());
10684 masm.bind(&lhsNonZero);
10686 // Call into the VM when the exponent can't be loaded into a pointer-sized
10687 // register.
10688 masm.loadBigIntAbsolute(rhs, exponent, ool->entry());
10690 // x ** y with x > 1 and y >= DigitBits can't be pointer-sized.
10691 masm.branchPtr(Assembler::AboveOrEqual, exponent, Imm32(BigInt::DigitBits),
10692 ool->entry());
10694 // x ** 1n == x
10695 Label rhsNotOne;
10696 masm.branch32(Assembler::NotEqual, exponent, Imm32(1), &rhsNotOne);
10698 masm.movePtr(lhs, output);
10699 masm.jump(ool->rejoin());
10701 masm.bind(&rhsNotOne);
10703 // Call into the VM when the base operand can't be loaded into a pointer-sized
10704 // register.
10705 masm.loadBigIntNonZero(lhs, base, ool->entry());
10707 // MacroAssembler::pow32() adjusted to work on pointer-sized registers.
10709 // m = base
10710 // n = exponent
10712 Label start, loop;
10713 masm.jump(&start);
10714 masm.bind(&loop);
10716 // m *= m
10717 masm.branchMulPtr(Assembler::Overflow, base, base, ool->entry());
10719 masm.bind(&start);
10721 // if ((n & 1) != 0) p *= m
10722 Label even;
10723 masm.branchTest32(Assembler::Zero, exponent, Imm32(1), &even);
10724 masm.branchMulPtr(Assembler::Overflow, base, dest, ool->entry());
10725 masm.bind(&even);
10727 // n >>= 1
10728 // if (n == 0) return p
10729 masm.branchRshift32(Assembler::NonZero, Imm32(1), exponent, &loop);
10732 MOZ_ASSERT(temp1 == dest);
10734 // Create and return the result.
10735 masm.bind(&done);
10736 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10737 masm.initializeBigInt(output, temp1);
10739 masm.bind(ool->rejoin());
10742 void CodeGenerator::visitBigIntBitAnd(LBigIntBitAnd* ins) {
10743 Register lhs = ToRegister(ins->lhs());
10744 Register rhs = ToRegister(ins->rhs());
10745 Register temp1 = ToRegister(ins->temp1());
10746 Register temp2 = ToRegister(ins->temp2());
10747 Register output = ToRegister(ins->output());
10749 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10750 auto* ool = oolCallVM<Fn, BigInt::bitAnd>(ins, ArgList(lhs, rhs),
10751 StoreRegisterTo(output));
10753 // 0n & x == 0n
10754 Label lhsNonZero;
10755 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10756 masm.movePtr(lhs, output);
10757 masm.jump(ool->rejoin());
10758 masm.bind(&lhsNonZero);
10760 // x & 0n == 0n
10761 Label rhsNonZero;
10762 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10763 masm.movePtr(rhs, output);
10764 masm.jump(ool->rejoin());
10765 masm.bind(&rhsNonZero);
10767 // Call into the VM when either operand can't be loaded into a pointer-sized
10768 // register.
10769 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10770 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10772 masm.andPtr(temp2, temp1);
10774 // Create and return the result.
10775 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10776 masm.initializeBigInt(output, temp1);
10778 masm.bind(ool->rejoin());
10781 void CodeGenerator::visitBigIntBitOr(LBigIntBitOr* ins) {
10782 Register lhs = ToRegister(ins->lhs());
10783 Register rhs = ToRegister(ins->rhs());
10784 Register temp1 = ToRegister(ins->temp1());
10785 Register temp2 = ToRegister(ins->temp2());
10786 Register output = ToRegister(ins->output());
10788 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10789 auto* ool = oolCallVM<Fn, BigInt::bitOr>(ins, ArgList(lhs, rhs),
10790 StoreRegisterTo(output));
10792 // 0n | x == x
10793 Label lhsNonZero;
10794 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10795 masm.movePtr(rhs, output);
10796 masm.jump(ool->rejoin());
10797 masm.bind(&lhsNonZero);
10799 // x | 0n == x
10800 Label rhsNonZero;
10801 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10802 masm.movePtr(lhs, output);
10803 masm.jump(ool->rejoin());
10804 masm.bind(&rhsNonZero);
10806 // Call into the VM when either operand can't be loaded into a pointer-sized
10807 // register.
10808 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10809 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10811 masm.orPtr(temp2, temp1);
10813 // Create and return the result.
10814 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10815 masm.initializeBigInt(output, temp1);
10817 masm.bind(ool->rejoin());
10820 void CodeGenerator::visitBigIntBitXor(LBigIntBitXor* ins) {
10821 Register lhs = ToRegister(ins->lhs());
10822 Register rhs = ToRegister(ins->rhs());
10823 Register temp1 = ToRegister(ins->temp1());
10824 Register temp2 = ToRegister(ins->temp2());
10825 Register output = ToRegister(ins->output());
10827 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10828 auto* ool = oolCallVM<Fn, BigInt::bitXor>(ins, ArgList(lhs, rhs),
10829 StoreRegisterTo(output));
10831 // 0n ^ x == x
10832 Label lhsNonZero;
10833 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10834 masm.movePtr(rhs, output);
10835 masm.jump(ool->rejoin());
10836 masm.bind(&lhsNonZero);
10838 // x ^ 0n == x
10839 Label rhsNonZero;
10840 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10841 masm.movePtr(lhs, output);
10842 masm.jump(ool->rejoin());
10843 masm.bind(&rhsNonZero);
10845 // Call into the VM when either operand can't be loaded into a pointer-sized
10846 // register.
10847 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10848 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10850 masm.xorPtr(temp2, temp1);
10852 // Create and return the result.
10853 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10854 masm.initializeBigInt(output, temp1);
10856 masm.bind(ool->rejoin());
10859 void CodeGenerator::visitBigIntLsh(LBigIntLsh* ins) {
10860 Register lhs = ToRegister(ins->lhs());
10861 Register rhs = ToRegister(ins->rhs());
10862 Register temp1 = ToRegister(ins->temp1());
10863 Register temp2 = ToRegister(ins->temp2());
10864 Register temp3 = ToRegister(ins->temp3());
10865 Register output = ToRegister(ins->output());
10867 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10868 auto* ool = oolCallVM<Fn, BigInt::lsh>(ins, ArgList(lhs, rhs),
10869 StoreRegisterTo(output));
10871 // 0n << x == 0n
10872 Label lhsNonZero;
10873 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10874 masm.movePtr(lhs, output);
10875 masm.jump(ool->rejoin());
10876 masm.bind(&lhsNonZero);
10878 // x << 0n == x
10879 Label rhsNonZero;
10880 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10881 masm.movePtr(lhs, output);
10882 masm.jump(ool->rejoin());
10883 masm.bind(&rhsNonZero);
10885 // Inline |BigInt::lsh| for the case when |lhs| contains a single digit.
10887 Label rhsTooLarge;
10888 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
10890 // Call into the VM when the left-hand side operand can't be loaded into a
10891 // pointer-sized register.
10892 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10894 // Handle shifts exceeding |BigInt::DigitBits| first.
10895 Label shift, create;
10896 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
10898 masm.bind(&rhsTooLarge);
10900 // x << DigitBits with x != 0n always exceeds pointer-sized storage.
10901 masm.branchIfBigIntIsNonNegative(rhs, ool->entry());
10903 // x << -DigitBits == x >> DigitBits, which is either 0n or -1n.
10904 masm.move32(Imm32(0), temp1);
10905 masm.branchIfBigIntIsNonNegative(lhs, &create);
10906 masm.move32(Imm32(1), temp1);
10907 masm.jump(&create);
10909 masm.bind(&shift);
10911 Label nonNegative;
10912 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
10914 masm.movePtr(temp1, temp3);
10916 // |x << -y| is computed as |x >> y|.
10917 masm.rshiftPtr(temp2, temp1);
10919 // For negative numbers, round down if any bit was shifted out.
10920 masm.branchIfBigIntIsNonNegative(lhs, &create);
10922 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
10923 masm.movePtr(ImmWord(-1), output);
10924 masm.lshiftPtr(temp2, output);
10925 masm.notPtr(output);
10927 // Add plus one when |(lhs.digit(0) & mask) != 0|.
10928 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
10929 masm.addPtr(ImmWord(1), temp1);
10930 masm.jump(&create);
10932 masm.bind(&nonNegative);
10934 masm.movePtr(temp2, temp3);
10936 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
10937 masm.negPtr(temp2);
10938 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
10939 masm.movePtr(temp1, output);
10940 masm.rshiftPtr(temp2, output);
10942 // Call into the VM when any bit will be shifted out.
10943 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
10945 masm.movePtr(temp3, temp2);
10946 masm.lshiftPtr(temp2, temp1);
10948 masm.bind(&create);
10950 // Create and return the result.
10951 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10952 masm.initializeBigIntAbsolute(output, temp1);
10954 // Set the sign bit when the left-hand side is negative.
10955 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
10956 masm.or32(Imm32(BigInt::signBitMask()),
10957 Address(output, BigInt::offsetOfFlags()));
10959 masm.bind(ool->rejoin());
10962 void CodeGenerator::visitBigIntRsh(LBigIntRsh* ins) {
10963 Register lhs = ToRegister(ins->lhs());
10964 Register rhs = ToRegister(ins->rhs());
10965 Register temp1 = ToRegister(ins->temp1());
10966 Register temp2 = ToRegister(ins->temp2());
10967 Register temp3 = ToRegister(ins->temp3());
10968 Register output = ToRegister(ins->output());
10970 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10971 auto* ool = oolCallVM<Fn, BigInt::rsh>(ins, ArgList(lhs, rhs),
10972 StoreRegisterTo(output));
10974 // 0n >> x == 0n
10975 Label lhsNonZero;
10976 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10977 masm.movePtr(lhs, output);
10978 masm.jump(ool->rejoin());
10979 masm.bind(&lhsNonZero);
10981 // x >> 0n == x
10982 Label rhsNonZero;
10983 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10984 masm.movePtr(lhs, output);
10985 masm.jump(ool->rejoin());
10986 masm.bind(&rhsNonZero);
10988 // Inline |BigInt::rsh| for the case when |lhs| contains a single digit.
10990 Label rhsTooLarge;
10991 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
10993 // Call into the VM when the left-hand side operand can't be loaded into a
10994 // pointer-sized register.
10995 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10997 // Handle shifts exceeding |BigInt::DigitBits| first.
10998 Label shift, create;
10999 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
11001 masm.bind(&rhsTooLarge);
11003 // x >> -DigitBits == x << DigitBits, which exceeds pointer-sized storage.
11004 masm.branchIfBigIntIsNegative(rhs, ool->entry());
11006 // x >> DigitBits is either 0n or -1n.
11007 masm.move32(Imm32(0), temp1);
11008 masm.branchIfBigIntIsNonNegative(lhs, &create);
11009 masm.move32(Imm32(1), temp1);
11010 masm.jump(&create);
11012 masm.bind(&shift);
11014 Label nonNegative;
11015 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
11017 masm.movePtr(temp2, temp3);
11019 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
11020 masm.negPtr(temp2);
11021 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
11022 masm.movePtr(temp1, output);
11023 masm.rshiftPtr(temp2, output);
11025 // Call into the VM when any bit will be shifted out.
11026 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
11028 // |x >> -y| is computed as |x << y|.
11029 masm.movePtr(temp3, temp2);
11030 masm.lshiftPtr(temp2, temp1);
11031 masm.jump(&create);
11033 masm.bind(&nonNegative);
11035 masm.movePtr(temp1, temp3);
11037 masm.rshiftPtr(temp2, temp1);
11039 // For negative numbers, round down if any bit was shifted out.
11040 masm.branchIfBigIntIsNonNegative(lhs, &create);
11042 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
11043 masm.movePtr(ImmWord(-1), output);
11044 masm.lshiftPtr(temp2, output);
11045 masm.notPtr(output);
11047 // Add plus one when |(lhs.digit(0) & mask) != 0|.
11048 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
11049 masm.addPtr(ImmWord(1), temp1);
11051 masm.bind(&create);
11053 // Create and return the result.
11054 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11055 masm.initializeBigIntAbsolute(output, temp1);
11057 // Set the sign bit when the left-hand side is negative.
11058 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
11059 masm.or32(Imm32(BigInt::signBitMask()),
11060 Address(output, BigInt::offsetOfFlags()));
11062 masm.bind(ool->rejoin());
11065 void CodeGenerator::visitBigIntIncrement(LBigIntIncrement* ins) {
11066 Register input = ToRegister(ins->input());
11067 Register temp1 = ToRegister(ins->temp1());
11068 Register temp2 = ToRegister(ins->temp2());
11069 Register output = ToRegister(ins->output());
11071 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
11072 auto* ool =
11073 oolCallVM<Fn, BigInt::inc>(ins, ArgList(input), StoreRegisterTo(output));
11075 // Call into the VM when the input can't be loaded into a pointer-sized
11076 // register.
11077 masm.loadBigInt(input, temp1, ool->entry());
11078 masm.movePtr(ImmWord(1), temp2);
11080 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
11082 // Create and return the result.
11083 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11084 masm.initializeBigInt(output, temp1);
11086 masm.bind(ool->rejoin());
11089 void CodeGenerator::visitBigIntDecrement(LBigIntDecrement* ins) {
11090 Register input = ToRegister(ins->input());
11091 Register temp1 = ToRegister(ins->temp1());
11092 Register temp2 = ToRegister(ins->temp2());
11093 Register output = ToRegister(ins->output());
11095 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
11096 auto* ool =
11097 oolCallVM<Fn, BigInt::dec>(ins, ArgList(input), StoreRegisterTo(output));
11099 // Call into the VM when the input can't be loaded into a pointer-sized
11100 // register.
11101 masm.loadBigInt(input, temp1, ool->entry());
11102 masm.movePtr(ImmWord(1), temp2);
11104 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
11106 // Create and return the result.
11107 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11108 masm.initializeBigInt(output, temp1);
11110 masm.bind(ool->rejoin());
11113 void CodeGenerator::visitBigIntNegate(LBigIntNegate* ins) {
11114 Register input = ToRegister(ins->input());
11115 Register temp = ToRegister(ins->temp());
11116 Register output = ToRegister(ins->output());
11118 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
11119 auto* ool =
11120 oolCallVM<Fn, BigInt::neg>(ins, ArgList(input), StoreRegisterTo(output));
11122 // -0n == 0n
11123 Label lhsNonZero;
11124 masm.branchIfBigIntIsNonZero(input, &lhsNonZero);
11125 masm.movePtr(input, output);
11126 masm.jump(ool->rejoin());
11127 masm.bind(&lhsNonZero);
11129 // Call into the VM when the input uses heap digits.
11130 masm.copyBigIntWithInlineDigits(input, output, temp, initialBigIntHeap(),
11131 ool->entry());
11133 // Flip the sign bit.
11134 masm.xor32(Imm32(BigInt::signBitMask()),
11135 Address(output, BigInt::offsetOfFlags()));
11137 masm.bind(ool->rejoin());
11140 void CodeGenerator::visitBigIntBitNot(LBigIntBitNot* ins) {
11141 Register input = ToRegister(ins->input());
11142 Register temp1 = ToRegister(ins->temp1());
11143 Register temp2 = ToRegister(ins->temp2());
11144 Register output = ToRegister(ins->output());
11146 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
11147 auto* ool = oolCallVM<Fn, BigInt::bitNot>(ins, ArgList(input),
11148 StoreRegisterTo(output));
11150 masm.loadBigIntAbsolute(input, temp1, ool->entry());
11152 // This follows the C++ implementation because it let's us support the full
11153 // range [-2^64, 2^64 - 1] on 64-bit resp. [-2^32, 2^32 - 1] on 32-bit.
11154 Label nonNegative, done;
11155 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
11157 // ~(-x) == ~(~(x-1)) == x-1
11158 masm.subPtr(Imm32(1), temp1);
11159 masm.jump(&done);
11161 masm.bind(&nonNegative);
11163 // ~x == -x-1 == -(x+1)
11164 masm.movePtr(ImmWord(1), temp2);
11165 masm.branchAddPtr(Assembler::CarrySet, temp2, temp1, ool->entry());
11167 masm.bind(&done);
11169 // Create and return the result.
11170 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11171 masm.initializeBigIntAbsolute(output, temp1);
11173 // Set the sign bit when the input is positive.
11174 masm.branchIfBigIntIsNegative(input, ool->rejoin());
11175 masm.or32(Imm32(BigInt::signBitMask()),
11176 Address(output, BigInt::offsetOfFlags()));
11178 masm.bind(ool->rejoin());
11181 void CodeGenerator::visitInt32ToStringWithBase(LInt32ToStringWithBase* lir) {
11182 Register input = ToRegister(lir->input());
11183 RegisterOrInt32 base = ToRegisterOrInt32(lir->base());
11184 Register output = ToRegister(lir->output());
11185 Register temp0 = ToRegister(lir->temp0());
11186 Register temp1 = ToRegister(lir->temp1());
11188 bool lowerCase = lir->mir()->lowerCase();
11190 using Fn = JSString* (*)(JSContext*, int32_t, int32_t, bool);
11191 if (base.is<Register>()) {
11192 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
11193 lir, ArgList(input, base.as<Register>(), Imm32(lowerCase)),
11194 StoreRegisterTo(output));
11196 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
11197 masm.loadInt32ToStringWithBase(input, base.as<Register>(), output, temp0,
11198 temp1, gen->runtime->staticStrings(),
11199 liveRegs, lowerCase, ool->entry());
11200 masm.bind(ool->rejoin());
11201 } else {
11202 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
11203 lir, ArgList(input, Imm32(base.as<int32_t>()), Imm32(lowerCase)),
11204 StoreRegisterTo(output));
11206 masm.loadInt32ToStringWithBase(input, base.as<int32_t>(), output, temp0,
11207 temp1, gen->runtime->staticStrings(),
11208 lowerCase, ool->entry());
11209 masm.bind(ool->rejoin());
11213 void CodeGenerator::visitNumberParseInt(LNumberParseInt* lir) {
11214 Register string = ToRegister(lir->string());
11215 Register radix = ToRegister(lir->radix());
11216 ValueOperand output = ToOutValue(lir);
11217 Register temp = ToRegister(lir->temp0());
11219 #ifdef DEBUG
11220 Label ok;
11221 masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
11222 masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
11223 masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
11224 masm.bind(&ok);
11225 #endif
11227 // Use indexed value as fast path if possible.
11228 Label vmCall, done;
11229 masm.loadStringIndexValue(string, temp, &vmCall);
11230 masm.tagValue(JSVAL_TYPE_INT32, temp, output);
11231 masm.jump(&done);
11233 masm.bind(&vmCall);
11235 pushArg(radix);
11236 pushArg(string);
11238 using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
11239 callVM<Fn, js::NumberParseInt>(lir);
11241 masm.bind(&done);
11244 void CodeGenerator::visitDoubleParseInt(LDoubleParseInt* lir) {
11245 FloatRegister number = ToFloatRegister(lir->number());
11246 Register output = ToRegister(lir->output());
11247 FloatRegister temp = ToFloatRegister(lir->temp0());
11249 Label bail;
11250 masm.branchDouble(Assembler::DoubleUnordered, number, number, &bail);
11251 masm.branchTruncateDoubleToInt32(number, output, &bail);
11253 Label ok;
11254 masm.branch32(Assembler::NotEqual, output, Imm32(0), &ok);
11256 // Accept both +0 and -0 and return 0.
11257 masm.loadConstantDouble(0.0, temp);
11258 masm.branchDouble(Assembler::DoubleEqual, number, temp, &ok);
11260 // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
11261 masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, temp);
11262 masm.branchDouble(Assembler::DoubleLessThan, number, temp, &bail);
11264 masm.bind(&ok);
11266 bailoutFrom(&bail, lir->snapshot());
11269 void CodeGenerator::visitFloor(LFloor* lir) {
11270 FloatRegister input = ToFloatRegister(lir->input());
11271 Register output = ToRegister(lir->output());
11273 Label bail;
11274 masm.floorDoubleToInt32(input, output, &bail);
11275 bailoutFrom(&bail, lir->snapshot());
11278 void CodeGenerator::visitFloorF(LFloorF* lir) {
11279 FloatRegister input = ToFloatRegister(lir->input());
11280 Register output = ToRegister(lir->output());
11282 Label bail;
11283 masm.floorFloat32ToInt32(input, output, &bail);
11284 bailoutFrom(&bail, lir->snapshot());
11287 void CodeGenerator::visitCeil(LCeil* lir) {
11288 FloatRegister input = ToFloatRegister(lir->input());
11289 Register output = ToRegister(lir->output());
11291 Label bail;
11292 masm.ceilDoubleToInt32(input, output, &bail);
11293 bailoutFrom(&bail, lir->snapshot());
11296 void CodeGenerator::visitCeilF(LCeilF* lir) {
11297 FloatRegister input = ToFloatRegister(lir->input());
11298 Register output = ToRegister(lir->output());
11300 Label bail;
11301 masm.ceilFloat32ToInt32(input, output, &bail);
11302 bailoutFrom(&bail, lir->snapshot());
11305 void CodeGenerator::visitRound(LRound* lir) {
11306 FloatRegister input = ToFloatRegister(lir->input());
11307 FloatRegister temp = ToFloatRegister(lir->temp0());
11308 Register output = ToRegister(lir->output());
11310 Label bail;
11311 masm.roundDoubleToInt32(input, output, temp, &bail);
11312 bailoutFrom(&bail, lir->snapshot());
11315 void CodeGenerator::visitRoundF(LRoundF* lir) {
11316 FloatRegister input = ToFloatRegister(lir->input());
11317 FloatRegister temp = ToFloatRegister(lir->temp0());
11318 Register output = ToRegister(lir->output());
11320 Label bail;
11321 masm.roundFloat32ToInt32(input, output, temp, &bail);
11322 bailoutFrom(&bail, lir->snapshot());
11325 void CodeGenerator::visitTrunc(LTrunc* lir) {
11326 FloatRegister input = ToFloatRegister(lir->input());
11327 Register output = ToRegister(lir->output());
11329 Label bail;
11330 masm.truncDoubleToInt32(input, output, &bail);
11331 bailoutFrom(&bail, lir->snapshot());
11334 void CodeGenerator::visitTruncF(LTruncF* lir) {
11335 FloatRegister input = ToFloatRegister(lir->input());
11336 Register output = ToRegister(lir->output());
11338 Label bail;
11339 masm.truncFloat32ToInt32(input, output, &bail);
11340 bailoutFrom(&bail, lir->snapshot());
11343 void CodeGenerator::visitCompareS(LCompareS* lir) {
11344 JSOp op = lir->mir()->jsop();
11345 Register left = ToRegister(lir->left());
11346 Register right = ToRegister(lir->right());
11347 Register output = ToRegister(lir->output());
11349 OutOfLineCode* ool = nullptr;
11351 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
11352 if (op == JSOp::Eq || op == JSOp::StrictEq) {
11353 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
11354 lir, ArgList(left, right), StoreRegisterTo(output));
11355 } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
11356 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
11357 lir, ArgList(left, right), StoreRegisterTo(output));
11358 } else if (op == JSOp::Lt) {
11359 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
11360 lir, ArgList(left, right), StoreRegisterTo(output));
11361 } else if (op == JSOp::Le) {
11362 // Push the operands in reverse order for JSOp::Le:
11363 // - |left <= right| is implemented as |right >= left|.
11364 ool =
11365 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
11366 lir, ArgList(right, left), StoreRegisterTo(output));
11367 } else if (op == JSOp::Gt) {
11368 // Push the operands in reverse order for JSOp::Gt:
11369 // - |left > right| is implemented as |right < left|.
11370 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
11371 lir, ArgList(right, left), StoreRegisterTo(output));
11372 } else {
11373 MOZ_ASSERT(op == JSOp::Ge);
11374 ool =
11375 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
11376 lir, ArgList(left, right), StoreRegisterTo(output));
11379 masm.compareStrings(op, left, right, output, ool->entry());
11381 masm.bind(ool->rejoin());
11384 void CodeGenerator::visitCompareSInline(LCompareSInline* lir) {
11385 JSOp op = lir->mir()->jsop();
11386 MOZ_ASSERT(IsEqualityOp(op));
11388 Register input = ToRegister(lir->input());
11389 Register output = ToRegister(lir->output());
11391 const JSLinearString* str = lir->constant();
11392 MOZ_ASSERT(str->length() > 0);
11394 OutOfLineCode* ool = nullptr;
11396 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
11397 if (op == JSOp::Eq || op == JSOp::StrictEq) {
11398 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
11399 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
11400 } else {
11401 MOZ_ASSERT(op == JSOp::Ne || op == JSOp::StrictNe);
11402 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
11403 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
11406 Label compareChars;
11408 Label notPointerEqual;
11410 // If operands point to the same instance, the strings are trivially equal.
11411 masm.branchPtr(Assembler::NotEqual, input, ImmGCPtr(str), &notPointerEqual);
11412 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
11413 masm.jump(ool->rejoin());
11415 masm.bind(&notPointerEqual);
11417 Label setNotEqualResult;
11418 if (str->isAtom()) {
11419 // Atoms cannot be equal to each other if they point to different strings.
11420 Imm32 atomBit(JSString::ATOM_BIT);
11421 masm.branchTest32(Assembler::NonZero,
11422 Address(input, JSString::offsetOfFlags()), atomBit,
11423 &setNotEqualResult);
11426 if (str->hasTwoByteChars()) {
11427 // Pure two-byte strings can't be equal to Latin-1 strings.
11428 JS::AutoCheckCannotGC nogc;
11429 if (!mozilla::IsUtf16Latin1(str->twoByteRange(nogc))) {
11430 masm.branchLatin1String(input, &setNotEqualResult);
11434 // Strings of different length can never be equal.
11435 masm.branch32(Assembler::Equal, Address(input, JSString::offsetOfLength()),
11436 Imm32(str->length()), &compareChars);
11438 masm.bind(&setNotEqualResult);
11439 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
11440 masm.jump(ool->rejoin());
11443 masm.bind(&compareChars);
11445 // Load the input string's characters.
11446 Register stringChars = output;
11447 masm.loadStringCharsForCompare(input, str, stringChars, ool->entry());
11449 // Start comparing character by character.
11450 masm.compareStringChars(op, stringChars, str, output);
11452 masm.bind(ool->rejoin());
11455 void CodeGenerator::visitCompareSSingle(LCompareSSingle* lir) {
11456 JSOp op = lir->jsop();
11457 MOZ_ASSERT(IsRelationalOp(op));
11459 Register input = ToRegister(lir->input());
11460 Register output = ToRegister(lir->output());
11461 Register temp = ToRegister(lir->temp0());
11463 const JSLinearString* str = lir->constant();
11464 MOZ_ASSERT(str->length() == 1);
11466 char16_t ch = str->latin1OrTwoByteChar(0);
11468 masm.movePtr(input, temp);
11470 // Check if the string is empty.
11471 Label compareLength;
11472 masm.branch32(Assembler::Equal, Address(temp, JSString::offsetOfLength()),
11473 Imm32(0), &compareLength);
11475 // The first character is in the left-most rope child.
11476 Label notRope;
11477 masm.branchIfNotRope(temp, &notRope);
11479 // Unwind ropes at the start if possible.
11480 Label unwindRope;
11481 masm.bind(&unwindRope);
11482 masm.loadRopeLeftChild(temp, output);
11483 masm.movePtr(output, temp);
11485 #ifdef DEBUG
11486 Label notEmpty;
11487 masm.branch32(Assembler::NotEqual,
11488 Address(temp, JSString::offsetOfLength()), Imm32(0),
11489 &notEmpty);
11490 masm.assumeUnreachable("rope children are non-empty");
11491 masm.bind(&notEmpty);
11492 #endif
11494 // Otherwise keep unwinding ropes.
11495 masm.branchIfRope(temp, &unwindRope);
11497 masm.bind(&notRope);
11499 // Load the first character into |output|.
11500 auto loadFirstChar = [&](auto encoding) {
11501 masm.loadStringChars(temp, output, encoding);
11502 masm.loadChar(Address(output, 0), output, encoding);
11505 Label done;
11506 if (ch <= JSString::MAX_LATIN1_CHAR) {
11507 // Handle both encodings when the search character is Latin-1.
11508 Label twoByte, compare;
11509 masm.branchTwoByteString(temp, &twoByte);
11511 loadFirstChar(CharEncoding::Latin1);
11512 masm.jump(&compare);
11514 masm.bind(&twoByte);
11515 loadFirstChar(CharEncoding::TwoByte);
11517 masm.bind(&compare);
11518 } else {
11519 // The search character is a two-byte character, so it can't be equal to any
11520 // character of a Latin-1 string.
11521 masm.move32(Imm32(int32_t(op == JSOp::Lt || op == JSOp::Le)), output);
11522 masm.branchLatin1String(temp, &done);
11524 loadFirstChar(CharEncoding::TwoByte);
11527 // Compare the string length when the search character is equal to the
11528 // input's first character.
11529 masm.branch32(Assembler::Equal, output, Imm32(ch), &compareLength);
11531 // Otherwise compute the result and jump to the end.
11532 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false), output, Imm32(ch),
11533 output);
11534 masm.jump(&done);
11536 // Compare the string length to compute the overall result.
11537 masm.bind(&compareLength);
11538 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
11539 Address(temp, JSString::offsetOfLength()), Imm32(1), output);
11541 masm.bind(&done);
11544 void CodeGenerator::visitCompareBigInt(LCompareBigInt* lir) {
11545 JSOp op = lir->mir()->jsop();
11546 Register left = ToRegister(lir->left());
11547 Register right = ToRegister(lir->right());
11548 Register temp0 = ToRegister(lir->temp0());
11549 Register temp1 = ToRegister(lir->temp1());
11550 Register temp2 = ToRegister(lir->temp2());
11551 Register output = ToRegister(lir->output());
11553 Label notSame;
11554 Label compareSign;
11555 Label compareLength;
11556 Label compareDigit;
11558 Label* notSameSign;
11559 Label* notSameLength;
11560 Label* notSameDigit;
11561 if (IsEqualityOp(op)) {
11562 notSameSign = &notSame;
11563 notSameLength = &notSame;
11564 notSameDigit = &notSame;
11565 } else {
11566 notSameSign = &compareSign;
11567 notSameLength = &compareLength;
11568 notSameDigit = &compareDigit;
11571 masm.equalBigInts(left, right, temp0, temp1, temp2, output, notSameSign,
11572 notSameLength, notSameDigit);
11574 Label done;
11575 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
11576 op == JSOp::Ge),
11577 output);
11578 masm.jump(&done);
11580 if (IsEqualityOp(op)) {
11581 masm.bind(&notSame);
11582 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
11583 } else {
11584 Label invertWhenNegative;
11586 // There are two cases when sign(left) != sign(right):
11587 // 1. sign(left) = positive and sign(right) = negative,
11588 // 2. or the dual case with reversed signs.
11590 // For case 1, |left| <cmp> |right| is true for cmp=Gt or cmp=Ge and false
11591 // for cmp=Lt or cmp=Le. Initialize the result for case 1 and handle case 2
11592 // with |invertWhenNegative|.
11593 masm.bind(&compareSign);
11594 masm.move32(Imm32(op == JSOp::Gt || op == JSOp::Ge), output);
11595 masm.jump(&invertWhenNegative);
11597 // For sign(left) = sign(right) and len(digits(left)) != len(digits(right)),
11598 // we have to consider the two cases:
11599 // 1. len(digits(left)) < len(digits(right))
11600 // 2. len(digits(left)) > len(digits(right))
11602 // For |left| <cmp> |right| with cmp=Lt:
11603 // Assume both BigInts are positive, then |left < right| is true for case 1
11604 // and false for case 2. When both are negative, the result is reversed.
11606 // The other comparison operators can be handled similarly.
11608 // |temp0| holds the digits length of the right-hand side operand.
11609 masm.bind(&compareLength);
11610 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
11611 Address(left, BigInt::offsetOfLength()), temp0, output);
11612 masm.jump(&invertWhenNegative);
11614 // Similar to the case above, compare the current digit to determine the
11615 // overall comparison result.
11617 // |temp1| points to the current digit of the left-hand side operand.
11618 // |output| holds the current digit of the right-hand side operand.
11619 masm.bind(&compareDigit);
11620 masm.cmpPtrSet(JSOpToCondition(op, /* isSigned = */ false),
11621 Address(temp1, 0), output, output);
11623 Label nonNegative;
11624 masm.bind(&invertWhenNegative);
11625 masm.branchIfBigIntIsNonNegative(left, &nonNegative);
11626 masm.xor32(Imm32(1), output);
11627 masm.bind(&nonNegative);
11630 masm.bind(&done);
11633 void CodeGenerator::visitCompareBigIntInt32(LCompareBigIntInt32* lir) {
11634 JSOp op = lir->mir()->jsop();
11635 Register left = ToRegister(lir->left());
11636 Register right = ToRegister(lir->right());
11637 Register temp0 = ToRegister(lir->temp0());
11638 Register temp1 = ToRegister(lir->temp1());
11639 Register output = ToRegister(lir->output());
11641 Label ifTrue, ifFalse;
11642 masm.compareBigIntAndInt32(op, left, right, temp0, temp1, &ifTrue, &ifFalse);
11644 Label done;
11645 masm.bind(&ifFalse);
11646 masm.move32(Imm32(0), output);
11647 masm.jump(&done);
11648 masm.bind(&ifTrue);
11649 masm.move32(Imm32(1), output);
11650 masm.bind(&done);
11653 void CodeGenerator::visitCompareBigIntDouble(LCompareBigIntDouble* lir) {
11654 JSOp op = lir->mir()->jsop();
11655 Register left = ToRegister(lir->left());
11656 FloatRegister right = ToFloatRegister(lir->right());
11657 Register output = ToRegister(lir->output());
11659 masm.setupAlignedABICall();
11661 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
11662 // - |left <= right| is implemented as |right >= left|.
11663 // - |left > right| is implemented as |right < left|.
11664 if (op == JSOp::Le || op == JSOp::Gt) {
11665 masm.passABIArg(right, ABIType::Float64);
11666 masm.passABIArg(left);
11667 } else {
11668 masm.passABIArg(left);
11669 masm.passABIArg(right, ABIType::Float64);
11672 using FnBigIntNumber = bool (*)(BigInt*, double);
11673 using FnNumberBigInt = bool (*)(double, BigInt*);
11674 switch (op) {
11675 case JSOp::Eq: {
11676 masm.callWithABI<FnBigIntNumber,
11677 jit::BigIntNumberEqual<EqualityKind::Equal>>();
11678 break;
11680 case JSOp::Ne: {
11681 masm.callWithABI<FnBigIntNumber,
11682 jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
11683 break;
11685 case JSOp::Lt: {
11686 masm.callWithABI<FnBigIntNumber,
11687 jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
11688 break;
11690 case JSOp::Gt: {
11691 masm.callWithABI<FnNumberBigInt,
11692 jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
11693 break;
11695 case JSOp::Le: {
11696 masm.callWithABI<
11697 FnNumberBigInt,
11698 jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
11699 break;
11701 case JSOp::Ge: {
11702 masm.callWithABI<
11703 FnBigIntNumber,
11704 jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
11705 break;
11707 default:
11708 MOZ_CRASH("unhandled op");
11711 masm.storeCallBoolResult(output);
11714 void CodeGenerator::visitCompareBigIntString(LCompareBigIntString* lir) {
11715 JSOp op = lir->mir()->jsop();
11716 Register left = ToRegister(lir->left());
11717 Register right = ToRegister(lir->right());
11719 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
11720 // - |left <= right| is implemented as |right >= left|.
11721 // - |left > right| is implemented as |right < left|.
11722 if (op == JSOp::Le || op == JSOp::Gt) {
11723 pushArg(left);
11724 pushArg(right);
11725 } else {
11726 pushArg(right);
11727 pushArg(left);
11730 using FnBigIntString =
11731 bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
11732 using FnStringBigInt =
11733 bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
11735 switch (op) {
11736 case JSOp::Eq: {
11737 constexpr auto Equal = EqualityKind::Equal;
11738 callVM<FnBigIntString, BigIntStringEqual<Equal>>(lir);
11739 break;
11741 case JSOp::Ne: {
11742 constexpr auto NotEqual = EqualityKind::NotEqual;
11743 callVM<FnBigIntString, BigIntStringEqual<NotEqual>>(lir);
11744 break;
11746 case JSOp::Lt: {
11747 constexpr auto LessThan = ComparisonKind::LessThan;
11748 callVM<FnBigIntString, BigIntStringCompare<LessThan>>(lir);
11749 break;
11751 case JSOp::Gt: {
11752 constexpr auto LessThan = ComparisonKind::LessThan;
11753 callVM<FnStringBigInt, StringBigIntCompare<LessThan>>(lir);
11754 break;
11756 case JSOp::Le: {
11757 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
11758 callVM<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>(lir);
11759 break;
11761 case JSOp::Ge: {
11762 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
11763 callVM<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>(lir);
11764 break;
11766 default:
11767 MOZ_CRASH("Unexpected compare op");
11771 void CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir) {
11772 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
11773 lir->mir()->compareType() == MCompare::Compare_Null);
11775 JSOp op = lir->mir()->jsop();
11776 MOZ_ASSERT(IsLooseEqualityOp(op));
11778 const ValueOperand value = ToValue(lir, LIsNullOrLikeUndefinedV::ValueIndex);
11779 Register output = ToRegister(lir->output());
11781 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
11782 if (!intact) {
11783 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
11784 addOutOfLineCode(ool, lir->mir());
11786 Label* nullOrLikeUndefined = ool->label1();
11787 Label* notNullOrLikeUndefined = ool->label2();
11790 ScratchTagScope tag(masm, value);
11791 masm.splitTagForTest(value, tag);
11793 masm.branchTestNull(Assembler::Equal, tag, nullOrLikeUndefined);
11794 masm.branchTestUndefined(Assembler::Equal, tag, nullOrLikeUndefined);
11796 // Check whether it's a truthy object or a falsy object that emulates
11797 // undefined.
11798 masm.branchTestObject(Assembler::NotEqual, tag, notNullOrLikeUndefined);
11801 Register objreg =
11802 masm.extractObject(value, ToTempUnboxRegister(lir->temp0()));
11803 branchTestObjectEmulatesUndefined(objreg, nullOrLikeUndefined,
11804 notNullOrLikeUndefined, output, ool);
11805 // fall through
11807 Label done;
11809 // It's not null or undefined, and if it's an object it doesn't
11810 // emulate undefined, so it's not like undefined.
11811 masm.move32(Imm32(op == JSOp::Ne), output);
11812 masm.jump(&done);
11814 masm.bind(nullOrLikeUndefined);
11815 masm.move32(Imm32(op == JSOp::Eq), output);
11817 // Both branches meet here.
11818 masm.bind(&done);
11819 } else {
11820 Label nullOrUndefined, notNullOrLikeUndefined;
11821 #if defined(DEBUG) || defined(FUZZING)
11822 Register objreg = Register::Invalid();
11823 #endif
11825 ScratchTagScope tag(masm, value);
11826 masm.splitTagForTest(value, tag);
11828 masm.branchTestNull(Assembler::Equal, tag, &nullOrUndefined);
11829 masm.branchTestUndefined(Assembler::Equal, tag, &nullOrUndefined);
11831 #if defined(DEBUG) || defined(FUZZING)
11832 // Check whether it's a truthy object or a falsy object that emulates
11833 // undefined.
11834 masm.branchTestObject(Assembler::NotEqual, tag, &notNullOrLikeUndefined);
11835 objreg = masm.extractObject(value, ToTempUnboxRegister(lir->temp0()));
11836 #endif
11839 #if defined(DEBUG) || defined(FUZZING)
11840 assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
11841 masm.bind(&notNullOrLikeUndefined);
11842 #endif
11844 Label done;
11846 // It's not null or undefined, and if it's an object it doesn't
11847 // emulate undefined.
11848 masm.move32(Imm32(op == JSOp::Ne), output);
11849 masm.jump(&done);
11851 masm.bind(&nullOrUndefined);
11852 masm.move32(Imm32(op == JSOp::Eq), output);
11854 // Both branches meet here.
11855 masm.bind(&done);
11859 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchV(
11860 LIsNullOrLikeUndefinedAndBranchV* lir) {
11861 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
11862 lir->cmpMir()->compareType() == MCompare::Compare_Null);
11864 JSOp op = lir->cmpMir()->jsop();
11865 MOZ_ASSERT(IsLooseEqualityOp(op));
11867 const ValueOperand value =
11868 ToValue(lir, LIsNullOrLikeUndefinedAndBranchV::Value);
11870 MBasicBlock* ifTrue = lir->ifTrue();
11871 MBasicBlock* ifFalse = lir->ifFalse();
11873 if (op == JSOp::Ne) {
11874 // Swap branches.
11875 std::swap(ifTrue, ifFalse);
11878 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
11880 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
11881 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
11884 ScratchTagScope tag(masm, value);
11885 masm.splitTagForTest(value, tag);
11887 masm.branchTestNull(Assembler::Equal, tag, ifTrueLabel);
11888 masm.branchTestUndefined(Assembler::Equal, tag, ifTrueLabel);
11890 masm.branchTestObject(Assembler::NotEqual, tag, ifFalseLabel);
11893 bool extractObject = !intact;
11894 #if defined(DEBUG) || defined(FUZZING)
11895 // always extract objreg if we're in debug and
11896 // assertObjectDoesNotEmulateUndefined;
11897 extractObject = true;
11898 #endif
11900 Register objreg = Register::Invalid();
11901 Register scratch = ToRegister(lir->temp());
11902 if (extractObject) {
11903 objreg = masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
11905 if (!intact) {
11906 // Objects that emulate undefined are loosely equal to null/undefined.
11907 OutOfLineTestObject* ool = new (alloc()) OutOfLineTestObject();
11908 addOutOfLineCode(ool, lir->cmpMir());
11909 testObjectEmulatesUndefined(objreg, ifTrueLabel, ifFalseLabel, scratch,
11910 ool);
11911 } else {
11912 assertObjectDoesNotEmulateUndefined(objreg, scratch, lir->cmpMir());
11913 // Bug 1874905. This would be nice to optimize out at the MIR level.
11914 masm.jump(ifFalseLabel);
11918 void CodeGenerator::visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT* lir) {
11919 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
11920 lir->mir()->compareType() == MCompare::Compare_Null);
11921 MOZ_ASSERT(lir->mir()->lhs()->type() == MIRType::Object);
11923 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
11924 JSOp op = lir->mir()->jsop();
11925 Register output = ToRegister(lir->output());
11926 Register objreg = ToRegister(lir->input());
11927 if (!intact) {
11928 MOZ_ASSERT(IsLooseEqualityOp(op),
11929 "Strict equality should have been folded");
11931 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
11932 addOutOfLineCode(ool, lir->mir());
11934 Label* emulatesUndefined = ool->label1();
11935 Label* doesntEmulateUndefined = ool->label2();
11937 branchTestObjectEmulatesUndefined(objreg, emulatesUndefined,
11938 doesntEmulateUndefined, output, ool);
11940 Label done;
11942 masm.move32(Imm32(op == JSOp::Ne), output);
11943 masm.jump(&done);
11945 masm.bind(emulatesUndefined);
11946 masm.move32(Imm32(op == JSOp::Eq), output);
11947 masm.bind(&done);
11948 } else {
11949 assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
11950 masm.move32(Imm32(op == JSOp::Ne), output);
11954 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchT(
11955 LIsNullOrLikeUndefinedAndBranchT* lir) {
11956 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
11957 lir->cmpMir()->compareType() == MCompare::Compare_Null);
11958 MOZ_ASSERT(lir->cmpMir()->lhs()->type() == MIRType::Object);
11960 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
11962 JSOp op = lir->cmpMir()->jsop();
11963 MOZ_ASSERT(IsLooseEqualityOp(op), "Strict equality should have been folded");
11965 MBasicBlock* ifTrue = lir->ifTrue();
11966 MBasicBlock* ifFalse = lir->ifFalse();
11968 if (op == JSOp::Ne) {
11969 // Swap branches.
11970 std::swap(ifTrue, ifFalse);
11973 Register input = ToRegister(lir->getOperand(0));
11974 Register scratch = ToRegister(lir->temp());
11975 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
11976 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
11978 if (intact) {
11979 // Bug 1874905. Ideally branches like this would be optimized out.
11980 assertObjectDoesNotEmulateUndefined(input, scratch, lir->mir());
11981 masm.jump(ifFalseLabel);
11982 } else {
11983 auto* ool = new (alloc()) OutOfLineTestObject();
11984 addOutOfLineCode(ool, lir->cmpMir());
11986 // Objects that emulate undefined are loosely equal to null/undefined.
11987 testObjectEmulatesUndefined(input, ifTrueLabel, ifFalseLabel, scratch, ool);
11991 void CodeGenerator::visitIsNull(LIsNull* lir) {
11992 MCompare::CompareType compareType = lir->mir()->compareType();
11993 MOZ_ASSERT(compareType == MCompare::Compare_Null);
11995 JSOp op = lir->mir()->jsop();
11996 MOZ_ASSERT(IsStrictEqualityOp(op));
11998 const ValueOperand value = ToValue(lir, LIsNull::ValueIndex);
11999 Register output = ToRegister(lir->output());
12001 Assembler::Condition cond = JSOpToCondition(compareType, op);
12002 masm.testNullSet(cond, value, output);
12005 void CodeGenerator::visitIsUndefined(LIsUndefined* lir) {
12006 MCompare::CompareType compareType = lir->mir()->compareType();
12007 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
12009 JSOp op = lir->mir()->jsop();
12010 MOZ_ASSERT(IsStrictEqualityOp(op));
12012 const ValueOperand value = ToValue(lir, LIsUndefined::ValueIndex);
12013 Register output = ToRegister(lir->output());
12015 Assembler::Condition cond = JSOpToCondition(compareType, op);
12016 masm.testUndefinedSet(cond, value, output);
12019 void CodeGenerator::visitIsNullAndBranch(LIsNullAndBranch* lir) {
12020 MCompare::CompareType compareType = lir->cmpMir()->compareType();
12021 MOZ_ASSERT(compareType == MCompare::Compare_Null);
12023 JSOp op = lir->cmpMir()->jsop();
12024 MOZ_ASSERT(IsStrictEqualityOp(op));
12026 const ValueOperand value = ToValue(lir, LIsNullAndBranch::Value);
12028 Assembler::Condition cond = JSOpToCondition(compareType, op);
12029 testNullEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
12032 void CodeGenerator::visitIsUndefinedAndBranch(LIsUndefinedAndBranch* lir) {
12033 MCompare::CompareType compareType = lir->cmpMir()->compareType();
12034 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
12036 JSOp op = lir->cmpMir()->jsop();
12037 MOZ_ASSERT(IsStrictEqualityOp(op));
12039 const ValueOperand value = ToValue(lir, LIsUndefinedAndBranch::Value);
12041 Assembler::Condition cond = JSOpToCondition(compareType, op);
12042 testUndefinedEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
12045 void CodeGenerator::visitSameValueDouble(LSameValueDouble* lir) {
12046 FloatRegister left = ToFloatRegister(lir->left());
12047 FloatRegister right = ToFloatRegister(lir->right());
12048 FloatRegister temp = ToFloatRegister(lir->temp0());
12049 Register output = ToRegister(lir->output());
12051 masm.sameValueDouble(left, right, temp, output);
12054 void CodeGenerator::visitSameValue(LSameValue* lir) {
12055 ValueOperand lhs = ToValue(lir, LSameValue::LhsIndex);
12056 ValueOperand rhs = ToValue(lir, LSameValue::RhsIndex);
12057 Register output = ToRegister(lir->output());
12059 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
12060 OutOfLineCode* ool =
12061 oolCallVM<Fn, SameValue>(lir, ArgList(lhs, rhs), StoreRegisterTo(output));
12063 // First check to see if the values have identical bits.
12064 // This is correct for SameValue because SameValue(NaN,NaN) is true,
12065 // and SameValue(0,-0) is false.
12066 masm.branch64(Assembler::NotEqual, lhs.toRegister64(), rhs.toRegister64(),
12067 ool->entry());
12068 masm.move32(Imm32(1), output);
12070 // If this fails, call SameValue.
12071 masm.bind(ool->rejoin());
12074 void CodeGenerator::emitConcat(LInstruction* lir, Register lhs, Register rhs,
12075 Register output) {
12076 using Fn =
12077 JSString* (*)(JSContext*, HandleString, HandleString, js::gc::Heap);
12078 OutOfLineCode* ool = oolCallVM<Fn, ConcatStrings<CanGC>>(
12079 lir, ArgList(lhs, rhs, static_cast<Imm32>(int32_t(gc::Heap::Default))),
12080 StoreRegisterTo(output));
12082 const JitZone* jitZone = gen->realm->zone()->jitZone();
12083 JitCode* stringConcatStub =
12084 jitZone->stringConcatStubNoBarrier(&zoneStubsToReadBarrier_);
12085 masm.call(stringConcatStub);
12086 masm.branchTestPtr(Assembler::Zero, output, output, ool->entry());
12088 masm.bind(ool->rejoin());
12091 void CodeGenerator::visitConcat(LConcat* lir) {
12092 Register lhs = ToRegister(lir->lhs());
12093 Register rhs = ToRegister(lir->rhs());
12095 Register output = ToRegister(lir->output());
12097 MOZ_ASSERT(lhs == CallTempReg0);
12098 MOZ_ASSERT(rhs == CallTempReg1);
12099 MOZ_ASSERT(ToRegister(lir->temp0()) == CallTempReg0);
12100 MOZ_ASSERT(ToRegister(lir->temp1()) == CallTempReg1);
12101 MOZ_ASSERT(ToRegister(lir->temp2()) == CallTempReg2);
12102 MOZ_ASSERT(ToRegister(lir->temp3()) == CallTempReg3);
12103 MOZ_ASSERT(ToRegister(lir->temp4()) == CallTempReg4);
12104 MOZ_ASSERT(output == CallTempReg5);
12106 emitConcat(lir, lhs, rhs, output);
12109 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
12110 Register len, Register byteOpScratch,
12111 CharEncoding fromEncoding, CharEncoding toEncoding,
12112 size_t maximumLength = SIZE_MAX) {
12113 // Copy |len| char16_t code units from |from| to |to|. Assumes len > 0
12114 // (checked below in debug builds), and when done |to| must point to the
12115 // next available char.
12117 #ifdef DEBUG
12118 Label ok;
12119 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
12120 masm.assumeUnreachable("Length should be greater than 0.");
12121 masm.bind(&ok);
12123 if (maximumLength != SIZE_MAX) {
12124 MOZ_ASSERT(maximumLength <= INT32_MAX, "maximum length fits into int32");
12126 Label ok;
12127 masm.branchPtr(Assembler::BelowOrEqual, len, Imm32(maximumLength), &ok);
12128 masm.assumeUnreachable("Length should not exceed maximum length.");
12129 masm.bind(&ok);
12131 #endif
12133 MOZ_ASSERT_IF(toEncoding == CharEncoding::Latin1,
12134 fromEncoding == CharEncoding::Latin1);
12136 size_t fromWidth =
12137 fromEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
12138 size_t toWidth =
12139 toEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
12141 // Try to copy multiple characters at once when both encoding are equal.
12142 if (fromEncoding == toEncoding) {
12143 constexpr size_t ptrWidth = sizeof(uintptr_t);
12145 // Copy |width| bytes and then adjust |from| and |to|.
12146 auto copyCharacters = [&](size_t width) {
12147 static_assert(ptrWidth <= 8, "switch handles only up to eight bytes");
12149 switch (width) {
12150 case 1:
12151 masm.load8ZeroExtend(Address(from, 0), byteOpScratch);
12152 masm.store8(byteOpScratch, Address(to, 0));
12153 break;
12154 case 2:
12155 masm.load16ZeroExtend(Address(from, 0), byteOpScratch);
12156 masm.store16(byteOpScratch, Address(to, 0));
12157 break;
12158 case 4:
12159 masm.load32(Address(from, 0), byteOpScratch);
12160 masm.store32(byteOpScratch, Address(to, 0));
12161 break;
12162 case 8:
12163 MOZ_ASSERT(width == ptrWidth);
12164 masm.loadPtr(Address(from, 0), byteOpScratch);
12165 masm.storePtr(byteOpScratch, Address(to, 0));
12166 break;
12169 masm.addPtr(Imm32(width), from);
12170 masm.addPtr(Imm32(width), to);
12173 // First align |len| to pointer width.
12174 Label done;
12175 for (size_t width = fromWidth; width < ptrWidth; width *= 2) {
12176 // Number of characters which fit into |width| bytes.
12177 size_t charsPerWidth = width / fromWidth;
12179 if (charsPerWidth < maximumLength) {
12180 Label next;
12181 masm.branchTest32(Assembler::Zero, len, Imm32(charsPerWidth), &next);
12183 copyCharacters(width);
12185 masm.branchSub32(Assembler::Zero, Imm32(charsPerWidth), len, &done);
12186 masm.bind(&next);
12187 } else if (charsPerWidth == maximumLength) {
12188 copyCharacters(width);
12189 masm.sub32(Imm32(charsPerWidth), len);
12193 size_t maxInlineLength;
12194 if (fromEncoding == CharEncoding::Latin1) {
12195 maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
12196 } else {
12197 maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
12200 // Number of characters which fit into a single register.
12201 size_t charsPerPtr = ptrWidth / fromWidth;
12203 // Unroll small loops.
12204 constexpr size_t unrollLoopLimit = 3;
12205 size_t loopCount = std::min(maxInlineLength, maximumLength) / charsPerPtr;
12207 #ifdef JS_64BIT
12208 static constexpr size_t latin1MaxInlineByteLength =
12209 JSFatInlineString::MAX_LENGTH_LATIN1 * sizeof(char);
12210 static constexpr size_t twoByteMaxInlineByteLength =
12211 JSFatInlineString::MAX_LENGTH_TWO_BYTE * sizeof(char16_t);
12213 // |unrollLoopLimit| should be large enough to allow loop unrolling on
12214 // 64-bit targets.
12215 static_assert(latin1MaxInlineByteLength / ptrWidth == unrollLoopLimit,
12216 "Latin-1 loops are unrolled on 64-bit");
12217 static_assert(twoByteMaxInlineByteLength / ptrWidth == unrollLoopLimit,
12218 "Two-byte loops are unrolled on 64-bit");
12219 #endif
12221 if (loopCount <= unrollLoopLimit) {
12222 Label labels[unrollLoopLimit];
12224 // Check up front how many characters can be copied.
12225 for (size_t i = 1; i < loopCount; i++) {
12226 masm.branch32(Assembler::Below, len, Imm32((i + 1) * charsPerPtr),
12227 &labels[i]);
12230 // Generate the unrolled loop body.
12231 for (size_t i = loopCount; i > 0; i--) {
12232 copyCharacters(ptrWidth);
12233 masm.sub32(Imm32(charsPerPtr), len);
12235 // Jump target for the previous length check.
12236 if (i != 1) {
12237 masm.bind(&labels[i - 1]);
12240 } else {
12241 Label start;
12242 masm.bind(&start);
12243 copyCharacters(ptrWidth);
12244 masm.branchSub32(Assembler::NonZero, Imm32(charsPerPtr), len, &start);
12247 masm.bind(&done);
12248 } else {
12249 Label start;
12250 masm.bind(&start);
12251 masm.loadChar(Address(from, 0), byteOpScratch, fromEncoding);
12252 masm.storeChar(byteOpScratch, Address(to, 0), toEncoding);
12253 masm.addPtr(Imm32(fromWidth), from);
12254 masm.addPtr(Imm32(toWidth), to);
12255 masm.branchSub32(Assembler::NonZero, Imm32(1), len, &start);
12259 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
12260 Register len, Register byteOpScratch,
12261 CharEncoding encoding, size_t maximumLength) {
12262 CopyStringChars(masm, to, from, len, byteOpScratch, encoding, encoding,
12263 maximumLength);
12266 static void CopyStringCharsMaybeInflate(MacroAssembler& masm, Register input,
12267 Register destChars, Register temp1,
12268 Register temp2) {
12269 // destChars is TwoByte and input is a Latin1 or TwoByte string, so we may
12270 // have to inflate.
12272 Label isLatin1, done;
12273 masm.loadStringLength(input, temp1);
12274 masm.branchLatin1String(input, &isLatin1);
12276 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
12277 masm.movePtr(temp2, input);
12278 CopyStringChars(masm, destChars, input, temp1, temp2,
12279 CharEncoding::TwoByte);
12280 masm.jump(&done);
12282 masm.bind(&isLatin1);
12284 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
12285 masm.movePtr(temp2, input);
12286 CopyStringChars(masm, destChars, input, temp1, temp2, CharEncoding::Latin1,
12287 CharEncoding::TwoByte);
12289 masm.bind(&done);
12292 static void AllocateThinOrFatInlineString(MacroAssembler& masm, Register output,
12293 Register length, Register temp,
12294 gc::Heap initialStringHeap,
12295 Label* failure,
12296 CharEncoding encoding) {
12297 #ifdef DEBUG
12298 size_t maxInlineLength;
12299 if (encoding == CharEncoding::Latin1) {
12300 maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
12301 } else {
12302 maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
12305 Label ok;
12306 masm.branch32(Assembler::BelowOrEqual, length, Imm32(maxInlineLength), &ok);
12307 masm.assumeUnreachable("string length too large to be allocated as inline");
12308 masm.bind(&ok);
12309 #endif
12311 size_t maxThinInlineLength;
12312 if (encoding == CharEncoding::Latin1) {
12313 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_LATIN1;
12314 } else {
12315 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_TWO_BYTE;
12318 Label isFat, allocDone;
12319 masm.branch32(Assembler::Above, length, Imm32(maxThinInlineLength), &isFat);
12321 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
12322 if (encoding == CharEncoding::Latin1) {
12323 flags |= JSString::LATIN1_CHARS_BIT;
12325 masm.newGCString(output, temp, initialStringHeap, failure);
12326 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12327 masm.jump(&allocDone);
12329 masm.bind(&isFat);
12331 uint32_t flags = JSString::INIT_FAT_INLINE_FLAGS;
12332 if (encoding == CharEncoding::Latin1) {
12333 flags |= JSString::LATIN1_CHARS_BIT;
12335 masm.newGCFatInlineString(output, temp, initialStringHeap, failure);
12336 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12338 masm.bind(&allocDone);
12340 // Store length.
12341 masm.store32(length, Address(output, JSString::offsetOfLength()));
12344 static void ConcatInlineString(MacroAssembler& masm, Register lhs, Register rhs,
12345 Register output, Register temp1, Register temp2,
12346 Register temp3, gc::Heap initialStringHeap,
12347 Label* failure, CharEncoding encoding) {
12348 JitSpew(JitSpew_Codegen, "# Emitting ConcatInlineString (encoding=%s)",
12349 (encoding == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
12351 // State: result length in temp2.
12353 // Ensure both strings are linear.
12354 masm.branchIfRope(lhs, failure);
12355 masm.branchIfRope(rhs, failure);
12357 // Allocate a JSThinInlineString or JSFatInlineString.
12358 AllocateThinOrFatInlineString(masm, output, temp2, temp1, initialStringHeap,
12359 failure, encoding);
12361 // Load chars pointer in temp2.
12362 masm.loadInlineStringCharsForStore(output, temp2);
12364 auto copyChars = [&](Register src) {
12365 if (encoding == CharEncoding::TwoByte) {
12366 CopyStringCharsMaybeInflate(masm, src, temp2, temp1, temp3);
12367 } else {
12368 masm.loadStringLength(src, temp3);
12369 masm.loadStringChars(src, temp1, CharEncoding::Latin1);
12370 masm.movePtr(temp1, src);
12371 CopyStringChars(masm, temp2, src, temp3, temp1, CharEncoding::Latin1);
12375 // Copy lhs chars. Note that this advances temp2 to point to the next
12376 // char. This also clobbers the lhs register.
12377 copyChars(lhs);
12379 // Copy rhs chars. Clobbers the rhs register.
12380 copyChars(rhs);
12383 void CodeGenerator::visitSubstr(LSubstr* lir) {
12384 Register string = ToRegister(lir->string());
12385 Register begin = ToRegister(lir->begin());
12386 Register length = ToRegister(lir->length());
12387 Register output = ToRegister(lir->output());
12388 Register temp0 = ToRegister(lir->temp0());
12389 Register temp2 = ToRegister(lir->temp2());
12391 // On x86 there are not enough registers. In that case reuse the string
12392 // register as temporary.
12393 Register temp1 =
12394 lir->temp1()->isBogusTemp() ? string : ToRegister(lir->temp1());
12396 size_t maximumLength = SIZE_MAX;
12398 Range* range = lir->mir()->length()->range();
12399 if (range && range->hasInt32UpperBound()) {
12400 MOZ_ASSERT(range->upper() >= 0);
12401 maximumLength = size_t(range->upper());
12404 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <=
12405 JSThinInlineString::MAX_LENGTH_LATIN1);
12407 static_assert(JSFatInlineString::MAX_LENGTH_TWO_BYTE <=
12408 JSFatInlineString::MAX_LENGTH_LATIN1);
12410 bool tryFatInlineOrDependent =
12411 maximumLength > JSThinInlineString::MAX_LENGTH_TWO_BYTE;
12412 bool tryDependent = maximumLength > JSFatInlineString::MAX_LENGTH_TWO_BYTE;
12414 #ifdef DEBUG
12415 if (maximumLength != SIZE_MAX) {
12416 Label ok;
12417 masm.branch32(Assembler::BelowOrEqual, length, Imm32(maximumLength), &ok);
12418 masm.assumeUnreachable("length should not exceed maximum length");
12419 masm.bind(&ok);
12421 #endif
12423 Label nonZero, nonInput;
12425 // For every edge case use the C++ variant.
12426 // Note: we also use this upon allocation failure in newGCString and
12427 // newGCFatInlineString. To squeeze out even more performance those failures
12428 // can be handled by allocate in ool code and returning to jit code to fill
12429 // in all data.
12430 using Fn = JSString* (*)(JSContext* cx, HandleString str, int32_t begin,
12431 int32_t len);
12432 OutOfLineCode* ool = oolCallVM<Fn, SubstringKernel>(
12433 lir, ArgList(string, begin, length), StoreRegisterTo(output));
12434 Label* slowPath = ool->entry();
12435 Label* done = ool->rejoin();
12437 // Zero length, return emptystring.
12438 masm.branchTest32(Assembler::NonZero, length, length, &nonZero);
12439 const JSAtomState& names = gen->runtime->names();
12440 masm.movePtr(ImmGCPtr(names.empty_), output);
12441 masm.jump(done);
12443 // Substring from 0..|str.length|, return str.
12444 masm.bind(&nonZero);
12445 masm.branch32(Assembler::NotEqual,
12446 Address(string, JSString::offsetOfLength()), length, &nonInput);
12447 #ifdef DEBUG
12449 Label ok;
12450 masm.branchTest32(Assembler::Zero, begin, begin, &ok);
12451 masm.assumeUnreachable("length == str.length implies begin == 0");
12452 masm.bind(&ok);
12454 #endif
12455 masm.movePtr(string, output);
12456 masm.jump(done);
12458 // Use slow path for ropes.
12459 masm.bind(&nonInput);
12460 masm.branchIfRope(string, slowPath);
12462 // Optimize one and two character strings.
12463 Label nonStatic;
12464 masm.branch32(Assembler::Above, length, Imm32(2), &nonStatic);
12466 Label loadLengthOne, loadLengthTwo;
12468 auto loadChars = [&](CharEncoding encoding, bool fallthru) {
12469 size_t size = encoding == CharEncoding::Latin1 ? sizeof(JS::Latin1Char)
12470 : sizeof(char16_t);
12472 masm.loadStringChars(string, temp0, encoding);
12473 masm.loadChar(temp0, begin, temp2, encoding);
12474 masm.branch32(Assembler::Equal, length, Imm32(1), &loadLengthOne);
12475 masm.loadChar(temp0, begin, temp0, encoding, int32_t(size));
12476 if (!fallthru) {
12477 masm.jump(&loadLengthTwo);
12481 Label isLatin1;
12482 masm.branchLatin1String(string, &isLatin1);
12483 loadChars(CharEncoding::TwoByte, /* fallthru = */ false);
12485 masm.bind(&isLatin1);
12486 loadChars(CharEncoding::Latin1, /* fallthru = */ true);
12488 // Try to load a length-two static string.
12489 masm.bind(&loadLengthTwo);
12490 masm.lookupStaticString(temp2, temp0, output, gen->runtime->staticStrings(),
12491 &nonStatic);
12492 masm.jump(done);
12494 // Try to load a length-one static string.
12495 masm.bind(&loadLengthOne);
12496 masm.lookupStaticString(temp2, output, gen->runtime->staticStrings(),
12497 &nonStatic);
12498 masm.jump(done);
12500 masm.bind(&nonStatic);
12502 // Allocate either a JSThinInlineString or JSFatInlineString, or jump to
12503 // notInline if we need a dependent string.
12504 Label notInline;
12506 static_assert(JSThinInlineString::MAX_LENGTH_LATIN1 <
12507 JSFatInlineString::MAX_LENGTH_LATIN1);
12508 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <
12509 JSFatInlineString::MAX_LENGTH_TWO_BYTE);
12511 // Use temp2 to store the JS(Thin|Fat)InlineString flags. This avoids having
12512 // duplicate newGCString/newGCFatInlineString codegen for Latin1 vs TwoByte
12513 // strings.
12515 Label allocFat, allocDone;
12516 if (tryFatInlineOrDependent) {
12517 Label isLatin1, allocThin;
12518 masm.branchLatin1String(string, &isLatin1);
12520 if (tryDependent) {
12521 masm.branch32(Assembler::Above, length,
12522 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
12523 &notInline);
12525 masm.move32(Imm32(0), temp2);
12526 masm.branch32(Assembler::Above, length,
12527 Imm32(JSThinInlineString::MAX_LENGTH_TWO_BYTE),
12528 &allocFat);
12529 masm.jump(&allocThin);
12532 masm.bind(&isLatin1);
12534 if (tryDependent) {
12535 masm.branch32(Assembler::Above, length,
12536 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1),
12537 &notInline);
12539 masm.move32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
12540 masm.branch32(Assembler::Above, length,
12541 Imm32(JSThinInlineString::MAX_LENGTH_LATIN1), &allocFat);
12544 masm.bind(&allocThin);
12545 } else {
12546 masm.load32(Address(string, JSString::offsetOfFlags()), temp2);
12547 masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
12551 masm.newGCString(output, temp0, initialStringHeap(), slowPath);
12552 masm.or32(Imm32(JSString::INIT_THIN_INLINE_FLAGS), temp2);
12555 if (tryFatInlineOrDependent) {
12556 masm.jump(&allocDone);
12558 masm.bind(&allocFat);
12560 masm.newGCFatInlineString(output, temp0, initialStringHeap(), slowPath);
12561 masm.or32(Imm32(JSString::INIT_FAT_INLINE_FLAGS), temp2);
12564 masm.bind(&allocDone);
12567 masm.store32(temp2, Address(output, JSString::offsetOfFlags()));
12568 masm.store32(length, Address(output, JSString::offsetOfLength()));
12570 auto initializeInlineString = [&](CharEncoding encoding) {
12571 masm.loadStringChars(string, temp0, encoding);
12572 masm.addToCharPtr(temp0, begin, encoding);
12573 if (temp1 == string) {
12574 masm.push(string);
12576 masm.loadInlineStringCharsForStore(output, temp1);
12577 CopyStringChars(masm, temp1, temp0, length, temp2, encoding,
12578 maximumLength);
12579 masm.loadStringLength(output, length);
12580 if (temp1 == string) {
12581 masm.pop(string);
12585 Label isInlineLatin1;
12586 masm.branchTest32(Assembler::NonZero, temp2,
12587 Imm32(JSString::LATIN1_CHARS_BIT), &isInlineLatin1);
12588 initializeInlineString(CharEncoding::TwoByte);
12589 masm.jump(done);
12591 masm.bind(&isInlineLatin1);
12592 initializeInlineString(CharEncoding::Latin1);
12595 // Handle other cases with a DependentString.
12596 if (tryDependent) {
12597 masm.jump(done);
12599 masm.bind(&notInline);
12600 masm.newGCString(output, temp0, gen->initialStringHeap(), slowPath);
12601 masm.store32(length, Address(output, JSString::offsetOfLength()));
12602 masm.storeDependentStringBase(string, output);
12604 auto initializeDependentString = [&](CharEncoding encoding) {
12605 uint32_t flags = JSString::INIT_DEPENDENT_FLAGS;
12606 if (encoding == CharEncoding::Latin1) {
12607 flags |= JSString::LATIN1_CHARS_BIT;
12610 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12611 masm.loadNonInlineStringChars(string, temp0, encoding);
12612 masm.addToCharPtr(temp0, begin, encoding);
12613 masm.storeNonInlineStringChars(temp0, output);
12616 Label isLatin1;
12617 masm.branchLatin1String(string, &isLatin1);
12618 initializeDependentString(CharEncoding::TwoByte);
12619 masm.jump(done);
12621 masm.bind(&isLatin1);
12622 initializeDependentString(CharEncoding::Latin1);
12625 masm.bind(done);
12628 JitCode* JitZone::generateStringConcatStub(JSContext* cx) {
12629 JitSpew(JitSpew_Codegen, "# Emitting StringConcat stub");
12631 TempAllocator temp(&cx->tempLifoAlloc());
12632 JitContext jcx(cx);
12633 StackMacroAssembler masm(cx, temp);
12634 AutoCreatedBy acb(masm, "JitZone::generateStringConcatStub");
12636 Register lhs = CallTempReg0;
12637 Register rhs = CallTempReg1;
12638 Register temp1 = CallTempReg2;
12639 Register temp2 = CallTempReg3;
12640 Register temp3 = CallTempReg4;
12641 Register output = CallTempReg5;
12643 Label failure;
12644 #ifdef JS_USE_LINK_REGISTER
12645 masm.pushReturnAddress();
12646 #endif
12647 masm.Push(FramePointer);
12648 masm.moveStackPtrTo(FramePointer);
12650 // If lhs is empty, return rhs.
12651 Label leftEmpty;
12652 masm.loadStringLength(lhs, temp1);
12653 masm.branchTest32(Assembler::Zero, temp1, temp1, &leftEmpty);
12655 // If rhs is empty, return lhs.
12656 Label rightEmpty;
12657 masm.loadStringLength(rhs, temp2);
12658 masm.branchTest32(Assembler::Zero, temp2, temp2, &rightEmpty);
12660 masm.add32(temp1, temp2);
12662 // Check if we can use a JSInlineString. The result is a Latin1 string if
12663 // lhs and rhs are both Latin1, so we AND the flags.
12664 Label isInlineTwoByte, isInlineLatin1;
12665 masm.load32(Address(lhs, JSString::offsetOfFlags()), temp1);
12666 masm.and32(Address(rhs, JSString::offsetOfFlags()), temp1);
12668 Label isLatin1, notInline;
12669 masm.branchTest32(Assembler::NonZero, temp1,
12670 Imm32(JSString::LATIN1_CHARS_BIT), &isLatin1);
12672 masm.branch32(Assembler::BelowOrEqual, temp2,
12673 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
12674 &isInlineTwoByte);
12675 masm.jump(&notInline);
12677 masm.bind(&isLatin1);
12679 masm.branch32(Assembler::BelowOrEqual, temp2,
12680 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), &isInlineLatin1);
12682 masm.bind(&notInline);
12684 // Keep AND'ed flags in temp1.
12686 // Ensure result length <= JSString::MAX_LENGTH.
12687 masm.branch32(Assembler::Above, temp2, Imm32(JSString::MAX_LENGTH), &failure);
12689 // Allocate a new rope, guaranteed to be in the nursery if initialStringHeap
12690 // == gc::Heap::Default. (As a result, no post barriers are needed below.)
12691 masm.newGCString(output, temp3, initialStringHeap, &failure);
12693 // Store rope length and flags. temp1 still holds the result of AND'ing the
12694 // lhs and rhs flags, so we just have to clear the other flags to get our rope
12695 // flags (Latin1 if both lhs and rhs are Latin1).
12696 static_assert(JSString::INIT_ROPE_FLAGS == 0,
12697 "Rope type flags must have no bits set");
12698 masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp1);
12699 masm.store32(temp1, Address(output, JSString::offsetOfFlags()));
12700 masm.store32(temp2, Address(output, JSString::offsetOfLength()));
12702 // Store left and right nodes.
12703 masm.storeRopeChildren(lhs, rhs, output);
12704 masm.pop(FramePointer);
12705 masm.ret();
12707 masm.bind(&leftEmpty);
12708 masm.mov(rhs, output);
12709 masm.pop(FramePointer);
12710 masm.ret();
12712 masm.bind(&rightEmpty);
12713 masm.mov(lhs, output);
12714 masm.pop(FramePointer);
12715 masm.ret();
12717 masm.bind(&isInlineTwoByte);
12718 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
12719 initialStringHeap, &failure, CharEncoding::TwoByte);
12720 masm.pop(FramePointer);
12721 masm.ret();
12723 masm.bind(&isInlineLatin1);
12724 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
12725 initialStringHeap, &failure, CharEncoding::Latin1);
12726 masm.pop(FramePointer);
12727 masm.ret();
12729 masm.bind(&failure);
12730 masm.movePtr(ImmPtr(nullptr), output);
12731 masm.pop(FramePointer);
12732 masm.ret();
12734 Linker linker(masm);
12735 JitCode* code = linker.newCode(cx, CodeKind::Other);
12737 CollectPerfSpewerJitCodeProfile(code, "StringConcatStub");
12738 #ifdef MOZ_VTUNE
12739 vtune::MarkStub(code, "StringConcatStub");
12740 #endif
12742 return code;
12745 void JitRuntime::generateFreeStub(MacroAssembler& masm) {
12746 AutoCreatedBy acb(masm, "JitRuntime::generateFreeStub");
12748 const Register regSlots = CallTempReg0;
12750 freeStubOffset_ = startTrampolineCode(masm);
12752 #ifdef JS_USE_LINK_REGISTER
12753 masm.pushReturnAddress();
12754 #endif
12755 AllocatableRegisterSet regs(RegisterSet::Volatile());
12756 regs.takeUnchecked(regSlots);
12757 LiveRegisterSet save(regs.asLiveSet());
12758 masm.PushRegsInMask(save);
12760 const Register regTemp = regs.takeAnyGeneral();
12761 MOZ_ASSERT(regTemp != regSlots);
12763 using Fn = void (*)(void* p);
12764 masm.setupUnalignedABICall(regTemp);
12765 masm.passABIArg(regSlots);
12766 masm.callWithABI<Fn, js_free>(ABIType::General,
12767 CheckUnsafeCallWithABI::DontCheckOther);
12769 masm.PopRegsInMask(save);
12771 masm.ret();
12774 void JitRuntime::generateLazyLinkStub(MacroAssembler& masm) {
12775 AutoCreatedBy acb(masm, "JitRuntime::generateLazyLinkStub");
12777 lazyLinkStubOffset_ = startTrampolineCode(masm);
12779 #ifdef JS_USE_LINK_REGISTER
12780 masm.pushReturnAddress();
12781 #endif
12782 masm.Push(FramePointer);
12783 masm.moveStackPtrTo(FramePointer);
12785 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
12786 Register temp0 = regs.takeAny();
12787 Register temp1 = regs.takeAny();
12788 Register temp2 = regs.takeAny();
12790 masm.loadJSContext(temp0);
12791 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::LazyLink);
12792 masm.moveStackPtrTo(temp1);
12794 using Fn = uint8_t* (*)(JSContext* cx, LazyLinkExitFrameLayout* frame);
12795 masm.setupUnalignedABICall(temp2);
12796 masm.passABIArg(temp0);
12797 masm.passABIArg(temp1);
12798 masm.callWithABI<Fn, LazyLinkTopActivation>(
12799 ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
12801 // Discard exit frame and restore frame pointer.
12802 masm.leaveExitFrame(0);
12803 masm.pop(FramePointer);
12805 #ifdef JS_USE_LINK_REGISTER
12806 // Restore the return address such that the emitPrologue function of the
12807 // CodeGenerator can push it back on the stack with pushReturnAddress.
12808 masm.popReturnAddress();
12809 #endif
12810 masm.jump(ReturnReg);
12813 void JitRuntime::generateInterpreterStub(MacroAssembler& masm) {
12814 AutoCreatedBy acb(masm, "JitRuntime::generateInterpreterStub");
12816 interpreterStubOffset_ = startTrampolineCode(masm);
12818 #ifdef JS_USE_LINK_REGISTER
12819 masm.pushReturnAddress();
12820 #endif
12821 masm.Push(FramePointer);
12822 masm.moveStackPtrTo(FramePointer);
12824 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
12825 Register temp0 = regs.takeAny();
12826 Register temp1 = regs.takeAny();
12827 Register temp2 = regs.takeAny();
12829 masm.loadJSContext(temp0);
12830 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::InterpreterStub);
12831 masm.moveStackPtrTo(temp1);
12833 using Fn = bool (*)(JSContext* cx, InterpreterStubExitFrameLayout* frame);
12834 masm.setupUnalignedABICall(temp2);
12835 masm.passABIArg(temp0);
12836 masm.passABIArg(temp1);
12837 masm.callWithABI<Fn, InvokeFromInterpreterStub>(
12838 ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
12840 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
12842 // Discard exit frame and restore frame pointer.
12843 masm.leaveExitFrame(0);
12844 masm.pop(FramePointer);
12846 // InvokeFromInterpreterStub stores the return value in argv[0], where the
12847 // caller stored |this|. Subtract |sizeof(void*)| for the frame pointer we
12848 // just popped.
12849 masm.loadValue(Address(masm.getStackPointer(),
12850 JitFrameLayout::offsetOfThis() - sizeof(void*)),
12851 JSReturnOperand);
12852 masm.ret();
12855 void JitRuntime::generateDoubleToInt32ValueStub(MacroAssembler& masm) {
12856 AutoCreatedBy acb(masm, "JitRuntime::generateDoubleToInt32ValueStub");
12857 doubleToInt32ValueStubOffset_ = startTrampolineCode(masm);
12859 Label done;
12860 masm.branchTestDouble(Assembler::NotEqual, R0, &done);
12862 masm.unboxDouble(R0, FloatReg0);
12863 masm.convertDoubleToInt32(FloatReg0, R1.scratchReg(), &done,
12864 /* negativeZeroCheck = */ false);
12865 masm.tagValue(JSVAL_TYPE_INT32, R1.scratchReg(), R0);
12867 masm.bind(&done);
12868 masm.abiret();
12871 void CodeGenerator::visitLinearizeString(LLinearizeString* lir) {
12872 Register str = ToRegister(lir->str());
12873 Register output = ToRegister(lir->output());
12875 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12876 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12877 lir, ArgList(str), StoreRegisterTo(output));
12879 masm.branchIfRope(str, ool->entry());
12881 masm.movePtr(str, output);
12882 masm.bind(ool->rejoin());
12885 void CodeGenerator::visitLinearizeForCharAccess(LLinearizeForCharAccess* lir) {
12886 Register str = ToRegister(lir->str());
12887 Register index = ToRegister(lir->index());
12888 Register output = ToRegister(lir->output());
12890 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12891 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12892 lir, ArgList(str), StoreRegisterTo(output));
12894 masm.branchIfNotCanLoadStringChar(str, index, output, ool->entry());
12896 masm.movePtr(str, output);
12897 masm.bind(ool->rejoin());
12900 void CodeGenerator::visitLinearizeForCodePointAccess(
12901 LLinearizeForCodePointAccess* lir) {
12902 Register str = ToRegister(lir->str());
12903 Register index = ToRegister(lir->index());
12904 Register output = ToRegister(lir->output());
12905 Register temp = ToRegister(lir->temp0());
12907 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12908 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12909 lir, ArgList(str), StoreRegisterTo(output));
12911 masm.branchIfNotCanLoadStringCodePoint(str, index, output, temp,
12912 ool->entry());
12914 masm.movePtr(str, output);
12915 masm.bind(ool->rejoin());
12918 void CodeGenerator::visitToRelativeStringIndex(LToRelativeStringIndex* lir) {
12919 Register index = ToRegister(lir->index());
12920 Register length = ToRegister(lir->length());
12921 Register output = ToRegister(lir->output());
12923 masm.move32(Imm32(0), output);
12924 masm.cmp32Move32(Assembler::LessThan, index, Imm32(0), length, output);
12925 masm.add32(index, output);
12928 void CodeGenerator::visitCharCodeAt(LCharCodeAt* lir) {
12929 Register str = ToRegister(lir->str());
12930 Register output = ToRegister(lir->output());
12931 Register temp0 = ToRegister(lir->temp0());
12932 Register temp1 = ToRegister(lir->temp1());
12934 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12936 if (lir->index()->isBogus()) {
12937 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
12938 StoreRegisterTo(output));
12939 masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
12940 masm.bind(ool->rejoin());
12941 } else {
12942 Register index = ToRegister(lir->index());
12944 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
12945 StoreRegisterTo(output));
12946 masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
12947 masm.bind(ool->rejoin());
12951 void CodeGenerator::visitCharCodeAtOrNegative(LCharCodeAtOrNegative* lir) {
12952 Register str = ToRegister(lir->str());
12953 Register output = ToRegister(lir->output());
12954 Register temp0 = ToRegister(lir->temp0());
12955 Register temp1 = ToRegister(lir->temp1());
12957 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12959 // Return -1 for out-of-bounds access.
12960 masm.move32(Imm32(-1), output);
12962 if (lir->index()->isBogus()) {
12963 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
12964 StoreRegisterTo(output));
12966 masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
12967 Imm32(0), ool->rejoin());
12968 masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
12969 masm.bind(ool->rejoin());
12970 } else {
12971 Register index = ToRegister(lir->index());
12973 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
12974 StoreRegisterTo(output));
12976 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
12977 temp0, ool->rejoin());
12978 masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
12979 masm.bind(ool->rejoin());
12983 void CodeGenerator::visitCodePointAt(LCodePointAt* lir) {
12984 Register str = ToRegister(lir->str());
12985 Register index = ToRegister(lir->index());
12986 Register output = ToRegister(lir->output());
12987 Register temp0 = ToRegister(lir->temp0());
12988 Register temp1 = ToRegister(lir->temp1());
12990 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12991 auto* ool = oolCallVM<Fn, jit::CodePointAt>(lir, ArgList(str, index),
12992 StoreRegisterTo(output));
12994 masm.loadStringCodePoint(str, index, output, temp0, temp1, ool->entry());
12995 masm.bind(ool->rejoin());
12998 void CodeGenerator::visitCodePointAtOrNegative(LCodePointAtOrNegative* lir) {
12999 Register str = ToRegister(lir->str());
13000 Register index = ToRegister(lir->index());
13001 Register output = ToRegister(lir->output());
13002 Register temp0 = ToRegister(lir->temp0());
13003 Register temp1 = ToRegister(lir->temp1());
13005 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
13006 auto* ool = oolCallVM<Fn, jit::CodePointAt>(lir, ArgList(str, index),
13007 StoreRegisterTo(output));
13009 // Return -1 for out-of-bounds access.
13010 masm.move32(Imm32(-1), output);
13012 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
13013 temp0, ool->rejoin());
13014 masm.loadStringCodePoint(str, index, output, temp0, temp1, ool->entry());
13015 masm.bind(ool->rejoin());
13018 void CodeGenerator::visitNegativeToNaN(LNegativeToNaN* lir) {
13019 Register input = ToRegister(lir->input());
13020 ValueOperand output = ToOutValue(lir);
13022 masm.tagValue(JSVAL_TYPE_INT32, input, output);
13024 Label done;
13025 masm.branchTest32(Assembler::NotSigned, input, input, &done);
13026 masm.moveValue(JS::NaNValue(), output);
13027 masm.bind(&done);
13030 void CodeGenerator::visitNegativeToUndefined(LNegativeToUndefined* lir) {
13031 Register input = ToRegister(lir->input());
13032 ValueOperand output = ToOutValue(lir);
13034 masm.tagValue(JSVAL_TYPE_INT32, input, output);
13036 Label done;
13037 masm.branchTest32(Assembler::NotSigned, input, input, &done);
13038 masm.moveValue(JS::UndefinedValue(), output);
13039 masm.bind(&done);
13042 void CodeGenerator::visitFromCharCode(LFromCharCode* lir) {
13043 Register code = ToRegister(lir->code());
13044 Register output = ToRegister(lir->output());
13046 using Fn = JSLinearString* (*)(JSContext*, int32_t);
13047 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
13048 StoreRegisterTo(output));
13050 // OOL path if code >= UNIT_STATIC_LIMIT.
13051 masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
13052 ool->entry());
13054 masm.bind(ool->rejoin());
13057 void CodeGenerator::visitFromCharCodeEmptyIfNegative(
13058 LFromCharCodeEmptyIfNegative* lir) {
13059 Register code = ToRegister(lir->code());
13060 Register output = ToRegister(lir->output());
13062 using Fn = JSLinearString* (*)(JSContext*, int32_t);
13063 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
13064 StoreRegisterTo(output));
13066 // Return the empty string for negative inputs.
13067 const JSAtomState& names = gen->runtime->names();
13068 masm.movePtr(ImmGCPtr(names.empty_), output);
13069 masm.branchTest32(Assembler::Signed, code, code, ool->rejoin());
13071 // OOL path if code >= UNIT_STATIC_LIMIT.
13072 masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
13073 ool->entry());
13075 masm.bind(ool->rejoin());
13078 void CodeGenerator::visitFromCharCodeUndefinedIfNegative(
13079 LFromCharCodeUndefinedIfNegative* lir) {
13080 Register code = ToRegister(lir->code());
13081 ValueOperand output = ToOutValue(lir);
13082 Register temp = output.scratchReg();
13084 using Fn = JSLinearString* (*)(JSContext*, int32_t);
13085 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
13086 StoreRegisterTo(temp));
13088 // Return |undefined| for negative inputs.
13089 Label done;
13090 masm.moveValue(UndefinedValue(), output);
13091 masm.branchTest32(Assembler::Signed, code, code, &done);
13093 // OOL path if code >= UNIT_STATIC_LIMIT.
13094 masm.lookupStaticString(code, temp, gen->runtime->staticStrings(),
13095 ool->entry());
13097 masm.bind(ool->rejoin());
13098 masm.tagValue(JSVAL_TYPE_STRING, temp, output);
13100 masm.bind(&done);
13103 void CodeGenerator::visitFromCodePoint(LFromCodePoint* lir) {
13104 Register codePoint = ToRegister(lir->codePoint());
13105 Register output = ToRegister(lir->output());
13106 Register temp0 = ToRegister(lir->temp0());
13107 Register temp1 = ToRegister(lir->temp1());
13108 LSnapshot* snapshot = lir->snapshot();
13110 // The OOL path is only taken when we can't allocate the inline string.
13111 using Fn = JSLinearString* (*)(JSContext*, char32_t);
13112 auto* ool = oolCallVM<Fn, js::StringFromCodePoint>(lir, ArgList(codePoint),
13113 StoreRegisterTo(output));
13115 Label isTwoByte;
13116 Label* done = ool->rejoin();
13118 static_assert(
13119 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
13120 "Latin-1 strings can be loaded from static strings");
13123 masm.lookupStaticString(codePoint, output, gen->runtime->staticStrings(),
13124 &isTwoByte);
13125 masm.jump(done);
13127 masm.bind(&isTwoByte);
13129 // Use a bailout if the input is not a valid code point, because
13130 // MFromCodePoint is movable and it'd be observable when a moved
13131 // fromCodePoint throws an exception before its actual call site.
13132 bailoutCmp32(Assembler::Above, codePoint, Imm32(unicode::NonBMPMax),
13133 snapshot);
13135 // Allocate a JSThinInlineString.
13137 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE >= 2,
13138 "JSThinInlineString can hold a supplementary code point");
13140 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
13141 masm.newGCString(output, temp0, gen->initialStringHeap(), ool->entry());
13142 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
13145 Label isSupplementary;
13146 masm.branch32(Assembler::AboveOrEqual, codePoint, Imm32(unicode::NonBMPMin),
13147 &isSupplementary);
13149 // Store length.
13150 masm.store32(Imm32(1), Address(output, JSString::offsetOfLength()));
13152 // Load chars pointer in temp0.
13153 masm.loadInlineStringCharsForStore(output, temp0);
13155 masm.store16(codePoint, Address(temp0, 0));
13157 masm.jump(done);
13159 masm.bind(&isSupplementary);
13161 // Store length.
13162 masm.store32(Imm32(2), Address(output, JSString::offsetOfLength()));
13164 // Load chars pointer in temp0.
13165 masm.loadInlineStringCharsForStore(output, temp0);
13167 // Inlined unicode::LeadSurrogate(uint32_t).
13168 masm.move32(codePoint, temp1);
13169 masm.rshift32(Imm32(10), temp1);
13170 masm.add32(Imm32(unicode::LeadSurrogateMin - (unicode::NonBMPMin >> 10)),
13171 temp1);
13173 masm.store16(temp1, Address(temp0, 0));
13175 // Inlined unicode::TrailSurrogate(uint32_t).
13176 masm.move32(codePoint, temp1);
13177 masm.and32(Imm32(0x3FF), temp1);
13178 masm.or32(Imm32(unicode::TrailSurrogateMin), temp1);
13180 masm.store16(temp1, Address(temp0, sizeof(char16_t)));
13184 masm.bind(done);
13187 void CodeGenerator::visitStringIncludes(LStringIncludes* lir) {
13188 pushArg(ToRegister(lir->searchString()));
13189 pushArg(ToRegister(lir->string()));
13191 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13192 callVM<Fn, js::StringIncludes>(lir);
13195 template <typename LIns>
13196 static void CallStringMatch(MacroAssembler& masm, LIns* lir, OutOfLineCode* ool,
13197 LiveRegisterSet volatileRegs) {
13198 Register string = ToRegister(lir->string());
13199 Register output = ToRegister(lir->output());
13200 Register tempLength = ToRegister(lir->temp0());
13201 Register tempChars = ToRegister(lir->temp1());
13202 Register maybeTempPat = ToTempRegisterOrInvalid(lir->temp2());
13204 const JSLinearString* searchString = lir->searchString();
13205 size_t length = searchString->length();
13206 MOZ_ASSERT(length == 1 || length == 2);
13208 // The additional temp register is only needed when searching for two
13209 // pattern characters.
13210 MOZ_ASSERT_IF(length == 2, maybeTempPat != InvalidReg);
13212 if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
13213 masm.move32(Imm32(0), output);
13214 } else {
13215 masm.move32(Imm32(-1), output);
13218 masm.loadStringLength(string, tempLength);
13220 // Can't be a substring when the string is smaller than the search string.
13221 Label done;
13222 masm.branch32(Assembler::Below, tempLength, Imm32(length), ool->rejoin());
13224 bool searchStringIsPureTwoByte = false;
13225 if (searchString->hasTwoByteChars()) {
13226 JS::AutoCheckCannotGC nogc;
13227 searchStringIsPureTwoByte =
13228 !mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc));
13231 // Pure two-byte strings can't occur in a Latin-1 string.
13232 if (searchStringIsPureTwoByte) {
13233 masm.branchLatin1String(string, ool->rejoin());
13236 // Slow path when we need to linearize the string.
13237 masm.branchIfRope(string, ool->entry());
13239 Label restoreVolatile;
13241 auto callMatcher = [&](CharEncoding encoding) {
13242 masm.loadStringChars(string, tempChars, encoding);
13244 LiveGeneralRegisterSet liveRegs;
13245 if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
13246 // Save |tempChars| to compute the result index.
13247 liveRegs.add(tempChars);
13249 #ifdef DEBUG
13250 // Save |tempLength| in debug-mode for assertions.
13251 liveRegs.add(tempLength);
13252 #endif
13254 // Exclude non-volatile registers.
13255 liveRegs.set() = GeneralRegisterSet::Intersect(
13256 liveRegs.set(), GeneralRegisterSet::Volatile());
13258 masm.PushRegsInMask(liveRegs);
13261 if (length == 1) {
13262 char16_t pat = searchString->latin1OrTwoByteChar(0);
13263 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
13264 pat <= JSString::MAX_LATIN1_CHAR);
13266 masm.move32(Imm32(pat), output);
13268 masm.setupAlignedABICall();
13269 masm.passABIArg(tempChars);
13270 masm.passABIArg(output);
13271 masm.passABIArg(tempLength);
13272 if (encoding == CharEncoding::Latin1) {
13273 using Fn = const char* (*)(const char*, char, size_t);
13274 masm.callWithABI<Fn, mozilla::SIMD::memchr8>(
13275 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
13276 } else {
13277 using Fn = const char16_t* (*)(const char16_t*, char16_t, size_t);
13278 masm.callWithABI<Fn, mozilla::SIMD::memchr16>(
13279 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
13281 } else {
13282 char16_t pat0 = searchString->latin1OrTwoByteChar(0);
13283 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
13284 pat0 <= JSString::MAX_LATIN1_CHAR);
13286 char16_t pat1 = searchString->latin1OrTwoByteChar(1);
13287 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
13288 pat1 <= JSString::MAX_LATIN1_CHAR);
13290 masm.move32(Imm32(pat0), output);
13291 masm.move32(Imm32(pat1), maybeTempPat);
13293 masm.setupAlignedABICall();
13294 masm.passABIArg(tempChars);
13295 masm.passABIArg(output);
13296 masm.passABIArg(maybeTempPat);
13297 masm.passABIArg(tempLength);
13298 if (encoding == CharEncoding::Latin1) {
13299 using Fn = const char* (*)(const char*, char, char, size_t);
13300 masm.callWithABI<Fn, mozilla::SIMD::memchr2x8>(
13301 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
13302 } else {
13303 using Fn =
13304 const char16_t* (*)(const char16_t*, char16_t, char16_t, size_t);
13305 masm.callWithABI<Fn, mozilla::SIMD::memchr2x16>(
13306 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
13310 masm.storeCallPointerResult(output);
13312 // Convert to string index for `indexOf`.
13313 if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
13314 // Restore |tempChars|. (And in debug mode |tempLength|.)
13315 masm.PopRegsInMask(liveRegs);
13317 Label found;
13318 masm.branchPtr(Assembler::NotEqual, output, ImmPtr(nullptr), &found);
13320 masm.move32(Imm32(-1), output);
13321 masm.jump(&restoreVolatile);
13323 masm.bind(&found);
13325 #ifdef DEBUG
13326 // Check lower bound.
13327 Label lower;
13328 masm.branchPtr(Assembler::AboveOrEqual, output, tempChars, &lower);
13329 masm.assumeUnreachable("result pointer below string chars");
13330 masm.bind(&lower);
13332 // Compute the end position of the characters.
13333 auto scale = encoding == CharEncoding::Latin1 ? TimesOne : TimesTwo;
13334 masm.computeEffectiveAddress(BaseIndex(tempChars, tempLength, scale),
13335 tempLength);
13337 // Check upper bound.
13338 Label upper;
13339 masm.branchPtr(Assembler::Below, output, tempLength, &upper);
13340 masm.assumeUnreachable("result pointer above string chars");
13341 masm.bind(&upper);
13342 #endif
13344 masm.subPtr(tempChars, output);
13346 if (encoding == CharEncoding::TwoByte) {
13347 masm.rshiftPtr(Imm32(1), output);
13352 volatileRegs.takeUnchecked(output);
13353 volatileRegs.takeUnchecked(tempLength);
13354 volatileRegs.takeUnchecked(tempChars);
13355 if (maybeTempPat != InvalidReg) {
13356 volatileRegs.takeUnchecked(maybeTempPat);
13358 masm.PushRegsInMask(volatileRegs);
13360 // Handle the case when the input is a Latin-1 string.
13361 if (!searchStringIsPureTwoByte) {
13362 Label twoByte;
13363 masm.branchTwoByteString(string, &twoByte);
13365 callMatcher(CharEncoding::Latin1);
13366 masm.jump(&restoreVolatile);
13368 masm.bind(&twoByte);
13371 // Handle the case when the input is a two-byte string.
13372 callMatcher(CharEncoding::TwoByte);
13374 masm.bind(&restoreVolatile);
13375 masm.PopRegsInMask(volatileRegs);
13377 // Convert to bool for `includes`.
13378 if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
13379 masm.cmpPtrSet(Assembler::NotEqual, output, ImmPtr(nullptr), output);
13382 masm.bind(ool->rejoin());
13385 void CodeGenerator::visitStringIncludesSIMD(LStringIncludesSIMD* lir) {
13386 Register string = ToRegister(lir->string());
13387 Register output = ToRegister(lir->output());
13388 const JSLinearString* searchString = lir->searchString();
13390 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13391 auto* ool = oolCallVM<Fn, js::StringIncludes>(
13392 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
13394 CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
13397 void CodeGenerator::visitStringIndexOf(LStringIndexOf* lir) {
13398 pushArg(ToRegister(lir->searchString()));
13399 pushArg(ToRegister(lir->string()));
13401 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
13402 callVM<Fn, js::StringIndexOf>(lir);
13405 void CodeGenerator::visitStringIndexOfSIMD(LStringIndexOfSIMD* lir) {
13406 Register string = ToRegister(lir->string());
13407 Register output = ToRegister(lir->output());
13408 const JSLinearString* searchString = lir->searchString();
13410 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
13411 auto* ool = oolCallVM<Fn, js::StringIndexOf>(
13412 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
13414 CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
13417 void CodeGenerator::visitStringLastIndexOf(LStringLastIndexOf* lir) {
13418 pushArg(ToRegister(lir->searchString()));
13419 pushArg(ToRegister(lir->string()));
13421 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
13422 callVM<Fn, js::StringLastIndexOf>(lir);
13425 void CodeGenerator::visitStringStartsWith(LStringStartsWith* lir) {
13426 pushArg(ToRegister(lir->searchString()));
13427 pushArg(ToRegister(lir->string()));
13429 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13430 callVM<Fn, js::StringStartsWith>(lir);
13433 void CodeGenerator::visitStringStartsWithInline(LStringStartsWithInline* lir) {
13434 Register string = ToRegister(lir->string());
13435 Register output = ToRegister(lir->output());
13436 Register temp = ToRegister(lir->temp0());
13438 const JSLinearString* searchString = lir->searchString();
13440 size_t length = searchString->length();
13441 MOZ_ASSERT(length > 0);
13443 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13444 auto* ool = oolCallVM<Fn, js::StringStartsWith>(
13445 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
13447 masm.move32(Imm32(0), output);
13449 // Can't be a prefix when the string is smaller than the search string.
13450 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
13451 Imm32(length), ool->rejoin());
13453 // Unwind ropes at the start if possible.
13454 Label compare;
13455 masm.movePtr(string, temp);
13456 masm.branchIfNotRope(temp, &compare);
13458 Label unwindRope;
13459 masm.bind(&unwindRope);
13460 masm.loadRopeLeftChild(temp, output);
13461 masm.movePtr(output, temp);
13463 // If the left child is smaller than the search string, jump into the VM to
13464 // linearize the string.
13465 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
13466 Imm32(length), ool->entry());
13468 // Otherwise keep unwinding ropes.
13469 masm.branchIfRope(temp, &unwindRope);
13471 masm.bind(&compare);
13473 // If operands point to the same instance, it's trivially a prefix.
13474 Label notPointerEqual;
13475 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
13476 &notPointerEqual);
13477 masm.move32(Imm32(1), output);
13478 masm.jump(ool->rejoin());
13479 masm.bind(&notPointerEqual);
13481 if (searchString->hasTwoByteChars()) {
13482 // Pure two-byte strings can't be a prefix of Latin-1 strings.
13483 JS::AutoCheckCannotGC nogc;
13484 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
13485 Label compareChars;
13486 masm.branchTwoByteString(temp, &compareChars);
13487 masm.move32(Imm32(0), output);
13488 masm.jump(ool->rejoin());
13489 masm.bind(&compareChars);
13493 // Load the input string's characters.
13494 Register stringChars = output;
13495 masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
13497 // Start comparing character by character.
13498 masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
13500 masm.bind(ool->rejoin());
13503 void CodeGenerator::visitStringEndsWith(LStringEndsWith* lir) {
13504 pushArg(ToRegister(lir->searchString()));
13505 pushArg(ToRegister(lir->string()));
13507 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13508 callVM<Fn, js::StringEndsWith>(lir);
13511 void CodeGenerator::visitStringEndsWithInline(LStringEndsWithInline* lir) {
13512 Register string = ToRegister(lir->string());
13513 Register output = ToRegister(lir->output());
13514 Register temp = ToRegister(lir->temp0());
13516 const JSLinearString* searchString = lir->searchString();
13518 size_t length = searchString->length();
13519 MOZ_ASSERT(length > 0);
13521 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13522 auto* ool = oolCallVM<Fn, js::StringEndsWith>(
13523 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
13525 masm.move32(Imm32(0), output);
13527 // Can't be a suffix when the string is smaller than the search string.
13528 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
13529 Imm32(length), ool->rejoin());
13531 // Unwind ropes at the end if possible.
13532 Label compare;
13533 masm.movePtr(string, temp);
13534 masm.branchIfNotRope(temp, &compare);
13536 Label unwindRope;
13537 masm.bind(&unwindRope);
13538 masm.loadRopeRightChild(temp, output);
13539 masm.movePtr(output, temp);
13541 // If the right child is smaller than the search string, jump into the VM to
13542 // linearize the string.
13543 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
13544 Imm32(length), ool->entry());
13546 // Otherwise keep unwinding ropes.
13547 masm.branchIfRope(temp, &unwindRope);
13549 masm.bind(&compare);
13551 // If operands point to the same instance, it's trivially a suffix.
13552 Label notPointerEqual;
13553 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
13554 &notPointerEqual);
13555 masm.move32(Imm32(1), output);
13556 masm.jump(ool->rejoin());
13557 masm.bind(&notPointerEqual);
13559 CharEncoding encoding = searchString->hasLatin1Chars()
13560 ? CharEncoding::Latin1
13561 : CharEncoding::TwoByte;
13562 if (encoding == CharEncoding::TwoByte) {
13563 // Pure two-byte strings can't be a suffix of Latin-1 strings.
13564 JS::AutoCheckCannotGC nogc;
13565 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
13566 Label compareChars;
13567 masm.branchTwoByteString(temp, &compareChars);
13568 masm.move32(Imm32(0), output);
13569 masm.jump(ool->rejoin());
13570 masm.bind(&compareChars);
13574 // Load the input string's characters.
13575 Register stringChars = output;
13576 masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
13578 // Move string-char pointer to the suffix string.
13579 masm.loadStringLength(temp, temp);
13580 masm.sub32(Imm32(length), temp);
13581 masm.addToCharPtr(stringChars, temp, encoding);
13583 // Start comparing character by character.
13584 masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
13586 masm.bind(ool->rejoin());
13589 void CodeGenerator::visitStringToLowerCase(LStringToLowerCase* lir) {
13590 Register string = ToRegister(lir->string());
13591 Register output = ToRegister(lir->output());
13592 Register temp0 = ToRegister(lir->temp0());
13593 Register temp1 = ToRegister(lir->temp1());
13594 Register temp2 = ToRegister(lir->temp2());
13596 // On x86 there are not enough registers. In that case reuse the string
13597 // register as a temporary.
13598 Register temp3 =
13599 lir->temp3()->isBogusTemp() ? string : ToRegister(lir->temp3());
13600 Register temp4 = ToRegister(lir->temp4());
13602 using Fn = JSString* (*)(JSContext*, HandleString);
13603 OutOfLineCode* ool = oolCallVM<Fn, js::StringToLowerCase>(
13604 lir, ArgList(string), StoreRegisterTo(output));
13606 // Take the slow path if the string isn't a linear Latin-1 string.
13607 Imm32 linearLatin1Bits(JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT);
13608 Register flags = temp0;
13609 masm.load32(Address(string, JSString::offsetOfFlags()), flags);
13610 masm.and32(linearLatin1Bits, flags);
13611 masm.branch32(Assembler::NotEqual, flags, linearLatin1Bits, ool->entry());
13613 Register length = temp0;
13614 masm.loadStringLength(string, length);
13616 // Return the input if it's the empty string.
13617 Label notEmptyString;
13618 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmptyString);
13620 masm.movePtr(string, output);
13621 masm.jump(ool->rejoin());
13623 masm.bind(&notEmptyString);
13625 Register inputChars = temp1;
13626 masm.loadStringChars(string, inputChars, CharEncoding::Latin1);
13628 Register toLowerCaseTable = temp2;
13629 masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), toLowerCaseTable);
13631 // Single element strings can be directly retrieved from static strings cache.
13632 Label notSingleElementString;
13633 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleElementString);
13635 Register current = temp4;
13637 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
13638 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
13639 current);
13640 masm.lookupStaticString(current, output, gen->runtime->staticStrings());
13642 masm.jump(ool->rejoin());
13644 masm.bind(&notSingleElementString);
13646 // Use the OOL-path when the string is too long. This prevents scanning long
13647 // strings which have upper case characters only near the end a second time in
13648 // the VM.
13649 constexpr int32_t MaxInlineLength = 64;
13650 masm.branch32(Assembler::Above, length, Imm32(MaxInlineLength), ool->entry());
13653 // Check if there are any characters which need to be converted.
13655 // This extra loop gives a small performance improvement for strings which
13656 // are already lower cased and lets us avoid calling into the runtime for
13657 // non-inline, all lower case strings. But more importantly it avoids
13658 // repeated inline allocation failures:
13659 // |AllocateThinOrFatInlineString| below takes the OOL-path and calls the
13660 // |js::StringToLowerCase| runtime function when the result string can't be
13661 // allocated inline. And |js::StringToLowerCase| directly returns the input
13662 // string when no characters need to be converted. That means it won't
13663 // trigger GC to clear up the free nursery space, so the next toLowerCase()
13664 // call will again fail to inline allocate the result string.
13665 Label hasUpper;
13667 Register checkInputChars = output;
13668 masm.movePtr(inputChars, checkInputChars);
13670 Register current = temp4;
13672 Label start;
13673 masm.bind(&start);
13674 masm.loadChar(Address(checkInputChars, 0), current, CharEncoding::Latin1);
13675 masm.branch8(Assembler::NotEqual,
13676 BaseIndex(toLowerCaseTable, current, TimesOne), current,
13677 &hasUpper);
13678 masm.addPtr(Imm32(sizeof(Latin1Char)), checkInputChars);
13679 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
13681 // Input is already in lower case.
13682 masm.movePtr(string, output);
13683 masm.jump(ool->rejoin());
13685 masm.bind(&hasUpper);
13687 // |length| was clobbered above, reload.
13688 masm.loadStringLength(string, length);
13690 // Call into the runtime when we can't create an inline string.
13691 masm.branch32(Assembler::Above, length,
13692 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), ool->entry());
13694 AllocateThinOrFatInlineString(masm, output, length, temp4,
13695 initialStringHeap(), ool->entry(),
13696 CharEncoding::Latin1);
13698 if (temp3 == string) {
13699 masm.push(string);
13702 Register outputChars = temp3;
13703 masm.loadInlineStringCharsForStore(output, outputChars);
13706 Register current = temp4;
13708 Label start;
13709 masm.bind(&start);
13710 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
13711 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
13712 current);
13713 masm.storeChar(current, Address(outputChars, 0), CharEncoding::Latin1);
13714 masm.addPtr(Imm32(sizeof(Latin1Char)), inputChars);
13715 masm.addPtr(Imm32(sizeof(Latin1Char)), outputChars);
13716 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
13719 if (temp3 == string) {
13720 masm.pop(string);
13724 masm.bind(ool->rejoin());
13727 void CodeGenerator::visitStringToUpperCase(LStringToUpperCase* lir) {
13728 pushArg(ToRegister(lir->string()));
13730 using Fn = JSString* (*)(JSContext*, HandleString);
13731 callVM<Fn, js::StringToUpperCase>(lir);
13734 void CodeGenerator::visitCharCodeToLowerCase(LCharCodeToLowerCase* lir) {
13735 Register code = ToRegister(lir->code());
13736 Register output = ToRegister(lir->output());
13737 Register temp = ToRegister(lir->temp0());
13739 using Fn = JSString* (*)(JSContext*, int32_t);
13740 auto* ool = oolCallVM<Fn, jit::CharCodeToLowerCase>(lir, ArgList(code),
13741 StoreRegisterTo(output));
13743 constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
13745 // OOL path if code >= NonLatin1Min.
13746 masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
13748 // Convert to lower case.
13749 masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), temp);
13750 masm.load8ZeroExtend(BaseIndex(temp, code, TimesOne), temp);
13752 // Load static string for lower case character.
13753 masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
13755 masm.bind(ool->rejoin());
13758 void CodeGenerator::visitCharCodeToUpperCase(LCharCodeToUpperCase* lir) {
13759 Register code = ToRegister(lir->code());
13760 Register output = ToRegister(lir->output());
13761 Register temp = ToRegister(lir->temp0());
13763 using Fn = JSString* (*)(JSContext*, int32_t);
13764 auto* ool = oolCallVM<Fn, jit::CharCodeToUpperCase>(lir, ArgList(code),
13765 StoreRegisterTo(output));
13767 constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
13769 // OOL path if code >= NonLatin1Min.
13770 masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
13772 // Most one element Latin-1 strings can be directly retrieved from the
13773 // static strings cache, except the following three characters:
13775 // 1. ToUpper(U+00B5) = 0+039C
13776 // 2. ToUpper(U+00FF) = 0+0178
13777 // 3. ToUpper(U+00DF) = 0+0053 0+0053
13778 masm.branch32(Assembler::Equal, code, Imm32(unicode::MICRO_SIGN),
13779 ool->entry());
13780 masm.branch32(Assembler::Equal, code,
13781 Imm32(unicode::LATIN_SMALL_LETTER_Y_WITH_DIAERESIS),
13782 ool->entry());
13783 masm.branch32(Assembler::Equal, code,
13784 Imm32(unicode::LATIN_SMALL_LETTER_SHARP_S), ool->entry());
13786 // Inline unicode::ToUpperCase (without the special case for ASCII characters)
13788 constexpr size_t shift = unicode::CharInfoShift;
13790 // code >> shift
13791 masm.move32(code, temp);
13792 masm.rshift32(Imm32(shift), temp);
13794 // index = index1[code >> shift];
13795 masm.movePtr(ImmPtr(unicode::index1), output);
13796 masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
13798 // (code & ((1 << shift) - 1)
13799 masm.move32(code, output);
13800 masm.and32(Imm32((1 << shift) - 1), output);
13802 // (index << shift) + (code & ((1 << shift) - 1))
13803 masm.lshift32(Imm32(shift), temp);
13804 masm.add32(output, temp);
13806 // index = index2[(index << shift) + (code & ((1 << shift) - 1))]
13807 masm.movePtr(ImmPtr(unicode::index2), output);
13808 masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
13810 // Compute |index * 6| through |(index * 3) * TimesTwo|.
13811 static_assert(sizeof(unicode::CharacterInfo) == 6);
13812 masm.mulBy3(temp, temp);
13814 // upperCase = js_charinfo[index].upperCase
13815 masm.movePtr(ImmPtr(unicode::js_charinfo), output);
13816 masm.load16ZeroExtend(BaseIndex(output, temp, TimesTwo,
13817 offsetof(unicode::CharacterInfo, upperCase)),
13818 temp);
13820 // uint16_t(ch) + upperCase
13821 masm.add32(code, temp);
13823 // Clear any high bits added when performing the unsigned 16-bit addition
13824 // through a signed 32-bit addition.
13825 masm.move8ZeroExtend(temp, temp);
13827 // Load static string for upper case character.
13828 masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
13830 masm.bind(ool->rejoin());
13833 void CodeGenerator::visitStringTrimStartIndex(LStringTrimStartIndex* lir) {
13834 Register string = ToRegister(lir->string());
13835 Register output = ToRegister(lir->output());
13837 auto volatileRegs = liveVolatileRegs(lir);
13838 volatileRegs.takeUnchecked(output);
13840 masm.PushRegsInMask(volatileRegs);
13842 using Fn = int32_t (*)(const JSString*);
13843 masm.setupAlignedABICall();
13844 masm.passABIArg(string);
13845 masm.callWithABI<Fn, jit::StringTrimStartIndex>();
13846 masm.storeCallInt32Result(output);
13848 masm.PopRegsInMask(volatileRegs);
13851 void CodeGenerator::visitStringTrimEndIndex(LStringTrimEndIndex* lir) {
13852 Register string = ToRegister(lir->string());
13853 Register start = ToRegister(lir->start());
13854 Register output = ToRegister(lir->output());
13856 auto volatileRegs = liveVolatileRegs(lir);
13857 volatileRegs.takeUnchecked(output);
13859 masm.PushRegsInMask(volatileRegs);
13861 using Fn = int32_t (*)(const JSString*, int32_t);
13862 masm.setupAlignedABICall();
13863 masm.passABIArg(string);
13864 masm.passABIArg(start);
13865 masm.callWithABI<Fn, jit::StringTrimEndIndex>();
13866 masm.storeCallInt32Result(output);
13868 masm.PopRegsInMask(volatileRegs);
13871 void CodeGenerator::visitStringSplit(LStringSplit* lir) {
13872 pushArg(Imm32(INT32_MAX));
13873 pushArg(ToRegister(lir->separator()));
13874 pushArg(ToRegister(lir->string()));
13876 using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
13877 callVM<Fn, js::StringSplitString>(lir);
13880 void CodeGenerator::visitInitializedLength(LInitializedLength* lir) {
13881 Address initLength(ToRegister(lir->elements()),
13882 ObjectElements::offsetOfInitializedLength());
13883 masm.load32(initLength, ToRegister(lir->output()));
13886 void CodeGenerator::visitSetInitializedLength(LSetInitializedLength* lir) {
13887 Address initLength(ToRegister(lir->elements()),
13888 ObjectElements::offsetOfInitializedLength());
13889 SetLengthFromIndex(masm, lir->index(), initLength);
13892 void CodeGenerator::visitNotBI(LNotBI* lir) {
13893 Register input = ToRegister(lir->input());
13894 Register output = ToRegister(lir->output());
13896 masm.cmp32Set(Assembler::Equal, Address(input, BigInt::offsetOfLength()),
13897 Imm32(0), output);
13900 void CodeGenerator::visitNotO(LNotO* lir) {
13901 Register objreg = ToRegister(lir->input());
13902 Register output = ToRegister(lir->output());
13904 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
13905 if (intact) {
13906 // Bug 1874905: It would be fantastic if this could be optimized out.
13907 assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
13908 masm.move32(Imm32(0), output);
13909 } else {
13910 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
13911 addOutOfLineCode(ool, lir->mir());
13913 Label* ifEmulatesUndefined = ool->label1();
13914 Label* ifDoesntEmulateUndefined = ool->label2();
13916 branchTestObjectEmulatesUndefined(objreg, ifEmulatesUndefined,
13917 ifDoesntEmulateUndefined, output, ool);
13918 // fall through
13920 Label join;
13922 masm.move32(Imm32(0), output);
13923 masm.jump(&join);
13925 masm.bind(ifEmulatesUndefined);
13926 masm.move32(Imm32(1), output);
13928 masm.bind(&join);
13932 void CodeGenerator::visitNotV(LNotV* lir) {
13933 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
13934 addOutOfLineCode(ool, lir->mir());
13936 Label* ifTruthy = ool->label1();
13937 Label* ifFalsy = ool->label2();
13939 ValueOperand input = ToValue(lir, LNotV::InputIndex);
13940 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
13941 FloatRegister floatTemp = ToFloatRegister(lir->temp0());
13942 Register output = ToRegister(lir->output());
13943 const TypeDataList& observedTypes = lir->mir()->observedTypes();
13945 testValueTruthy(input, tempToUnbox, output, floatTemp, observedTypes,
13946 ifTruthy, ifFalsy, ool);
13948 Label join;
13950 // Note that the testValueTruthy call above may choose to fall through
13951 // to ifTruthy instead of branching there.
13952 masm.bind(ifTruthy);
13953 masm.move32(Imm32(0), output);
13954 masm.jump(&join);
13956 masm.bind(ifFalsy);
13957 masm.move32(Imm32(1), output);
13959 // both branches meet here.
13960 masm.bind(&join);
13963 void CodeGenerator::visitBoundsCheck(LBoundsCheck* lir) {
13964 const LAllocation* index = lir->index();
13965 const LAllocation* length = lir->length();
13966 LSnapshot* snapshot = lir->snapshot();
13968 MIRType type = lir->mir()->type();
13970 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
13971 if (type == MIRType::Int32) {
13972 bailoutCmp32(cond, lhs, rhs, snapshot);
13973 } else {
13974 MOZ_ASSERT(type == MIRType::IntPtr);
13975 bailoutCmpPtr(cond, lhs, rhs, snapshot);
13979 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
13980 int32_t rhs) {
13981 if (type == MIRType::Int32) {
13982 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
13983 } else {
13984 MOZ_ASSERT(type == MIRType::IntPtr);
13985 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
13989 if (index->isConstant()) {
13990 // Use uint32 so that the comparison is unsigned.
13991 uint32_t idx = ToInt32(index);
13992 if (length->isConstant()) {
13993 uint32_t len = ToInt32(lir->length());
13994 if (idx < len) {
13995 return;
13997 bailout(snapshot);
13998 return;
14001 if (length->isRegister()) {
14002 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), idx);
14003 } else {
14004 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), idx);
14006 return;
14009 Register indexReg = ToRegister(index);
14010 if (length->isConstant()) {
14011 bailoutCmpConstant(Assembler::AboveOrEqual, indexReg, ToInt32(length));
14012 } else if (length->isRegister()) {
14013 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), indexReg);
14014 } else {
14015 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), indexReg);
14019 void CodeGenerator::visitBoundsCheckRange(LBoundsCheckRange* lir) {
14020 int32_t min = lir->mir()->minimum();
14021 int32_t max = lir->mir()->maximum();
14022 MOZ_ASSERT(max >= min);
14024 LSnapshot* snapshot = lir->snapshot();
14025 MIRType type = lir->mir()->type();
14027 const LAllocation* length = lir->length();
14028 Register temp = ToRegister(lir->getTemp(0));
14030 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
14031 if (type == MIRType::Int32) {
14032 bailoutCmp32(cond, lhs, rhs, snapshot);
14033 } else {
14034 MOZ_ASSERT(type == MIRType::IntPtr);
14035 bailoutCmpPtr(cond, lhs, rhs, snapshot);
14039 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
14040 int32_t rhs) {
14041 if (type == MIRType::Int32) {
14042 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
14043 } else {
14044 MOZ_ASSERT(type == MIRType::IntPtr);
14045 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
14049 if (lir->index()->isConstant()) {
14050 int32_t nmin, nmax;
14051 int32_t index = ToInt32(lir->index());
14052 if (SafeAdd(index, min, &nmin) && SafeAdd(index, max, &nmax) && nmin >= 0) {
14053 if (length->isRegister()) {
14054 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), nmax);
14055 } else {
14056 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), nmax);
14058 return;
14060 masm.mov(ImmWord(index), temp);
14061 } else {
14062 masm.mov(ToRegister(lir->index()), temp);
14065 // If the minimum and maximum differ then do an underflow check first.
14066 // If the two are the same then doing an unsigned comparison on the
14067 // length will also catch a negative index.
14068 if (min != max) {
14069 if (min != 0) {
14070 Label bail;
14071 if (type == MIRType::Int32) {
14072 masm.branchAdd32(Assembler::Overflow, Imm32(min), temp, &bail);
14073 } else {
14074 masm.branchAddPtr(Assembler::Overflow, Imm32(min), temp, &bail);
14076 bailoutFrom(&bail, snapshot);
14079 bailoutCmpConstant(Assembler::LessThan, temp, 0);
14081 if (min != 0) {
14082 int32_t diff;
14083 if (SafeSub(max, min, &diff)) {
14084 max = diff;
14085 } else {
14086 if (type == MIRType::Int32) {
14087 masm.sub32(Imm32(min), temp);
14088 } else {
14089 masm.subPtr(Imm32(min), temp);
14095 // Compute the maximum possible index. No overflow check is needed when
14096 // max > 0. We can only wraparound to a negative number, which will test as
14097 // larger than all nonnegative numbers in the unsigned comparison, and the
14098 // length is required to be nonnegative (else testing a negative length
14099 // would succeed on any nonnegative index).
14100 if (max != 0) {
14101 if (max < 0) {
14102 Label bail;
14103 if (type == MIRType::Int32) {
14104 masm.branchAdd32(Assembler::Overflow, Imm32(max), temp, &bail);
14105 } else {
14106 masm.branchAddPtr(Assembler::Overflow, Imm32(max), temp, &bail);
14108 bailoutFrom(&bail, snapshot);
14109 } else {
14110 if (type == MIRType::Int32) {
14111 masm.add32(Imm32(max), temp);
14112 } else {
14113 masm.addPtr(Imm32(max), temp);
14118 if (length->isRegister()) {
14119 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), temp);
14120 } else {
14121 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), temp);
14125 void CodeGenerator::visitBoundsCheckLower(LBoundsCheckLower* lir) {
14126 int32_t min = lir->mir()->minimum();
14127 bailoutCmp32(Assembler::LessThan, ToRegister(lir->index()), Imm32(min),
14128 lir->snapshot());
14131 void CodeGenerator::visitSpectreMaskIndex(LSpectreMaskIndex* lir) {
14132 MOZ_ASSERT(JitOptions.spectreIndexMasking);
14134 const LAllocation* length = lir->length();
14135 Register index = ToRegister(lir->index());
14136 Register output = ToRegister(lir->output());
14138 if (lir->mir()->type() == MIRType::Int32) {
14139 if (length->isRegister()) {
14140 masm.spectreMaskIndex32(index, ToRegister(length), output);
14141 } else {
14142 masm.spectreMaskIndex32(index, ToAddress(length), output);
14144 } else {
14145 MOZ_ASSERT(lir->mir()->type() == MIRType::IntPtr);
14146 if (length->isRegister()) {
14147 masm.spectreMaskIndexPtr(index, ToRegister(length), output);
14148 } else {
14149 masm.spectreMaskIndexPtr(index, ToAddress(length), output);
14154 class OutOfLineStoreElementHole : public OutOfLineCodeBase<CodeGenerator> {
14155 LInstruction* ins_;
14157 public:
14158 explicit OutOfLineStoreElementHole(LInstruction* ins) : ins_(ins) {
14159 MOZ_ASSERT(ins->isStoreElementHoleV() || ins->isStoreElementHoleT());
14162 void accept(CodeGenerator* codegen) override {
14163 codegen->visitOutOfLineStoreElementHole(this);
14166 MStoreElementHole* mir() const {
14167 return ins_->isStoreElementHoleV() ? ins_->toStoreElementHoleV()->mir()
14168 : ins_->toStoreElementHoleT()->mir();
14170 LInstruction* ins() const { return ins_; }
14173 void CodeGenerator::emitStoreHoleCheck(Register elements,
14174 const LAllocation* index,
14175 LSnapshot* snapshot) {
14176 Label bail;
14177 if (index->isConstant()) {
14178 Address dest(elements, ToInt32(index) * sizeof(js::Value));
14179 masm.branchTestMagic(Assembler::Equal, dest, &bail);
14180 } else {
14181 BaseObjectElementIndex dest(elements, ToRegister(index));
14182 masm.branchTestMagic(Assembler::Equal, dest, &bail);
14184 bailoutFrom(&bail, snapshot);
14187 void CodeGenerator::emitStoreElementTyped(const LAllocation* value,
14188 MIRType valueType, Register elements,
14189 const LAllocation* index) {
14190 MOZ_ASSERT(valueType != MIRType::MagicHole);
14191 ConstantOrRegister v = ToConstantOrRegister(value, valueType);
14192 if (index->isConstant()) {
14193 Address dest(elements, ToInt32(index) * sizeof(js::Value));
14194 masm.storeUnboxedValue(v, valueType, dest);
14195 } else {
14196 BaseObjectElementIndex dest(elements, ToRegister(index));
14197 masm.storeUnboxedValue(v, valueType, dest);
14201 void CodeGenerator::visitStoreElementT(LStoreElementT* store) {
14202 Register elements = ToRegister(store->elements());
14203 const LAllocation* index = store->index();
14205 if (store->mir()->needsBarrier()) {
14206 emitPreBarrier(elements, index);
14209 if (store->mir()->needsHoleCheck()) {
14210 emitStoreHoleCheck(elements, index, store->snapshot());
14213 emitStoreElementTyped(store->value(), store->mir()->value()->type(), elements,
14214 index);
14217 void CodeGenerator::visitStoreElementV(LStoreElementV* lir) {
14218 const ValueOperand value = ToValue(lir, LStoreElementV::Value);
14219 Register elements = ToRegister(lir->elements());
14220 const LAllocation* index = lir->index();
14222 if (lir->mir()->needsBarrier()) {
14223 emitPreBarrier(elements, index);
14226 if (lir->mir()->needsHoleCheck()) {
14227 emitStoreHoleCheck(elements, index, lir->snapshot());
14230 if (lir->index()->isConstant()) {
14231 Address dest(elements, ToInt32(lir->index()) * sizeof(js::Value));
14232 masm.storeValue(value, dest);
14233 } else {
14234 BaseObjectElementIndex dest(elements, ToRegister(lir->index()));
14235 masm.storeValue(value, dest);
14239 void CodeGenerator::visitStoreHoleValueElement(LStoreHoleValueElement* lir) {
14240 Register elements = ToRegister(lir->elements());
14241 Register index = ToRegister(lir->index());
14243 Address elementsFlags(elements, ObjectElements::offsetOfFlags());
14244 masm.or32(Imm32(ObjectElements::NON_PACKED), elementsFlags);
14246 BaseObjectElementIndex element(elements, index);
14247 masm.storeValue(MagicValue(JS_ELEMENTS_HOLE), element);
14250 void CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT* lir) {
14251 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
14252 addOutOfLineCode(ool, lir->mir());
14254 Register obj = ToRegister(lir->object());
14255 Register elements = ToRegister(lir->elements());
14256 Register index = ToRegister(lir->index());
14257 Register temp = ToRegister(lir->temp0());
14259 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
14260 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
14262 emitPreBarrier(elements, lir->index());
14264 masm.bind(ool->rejoin());
14265 emitStoreElementTyped(lir->value(), lir->mir()->value()->type(), elements,
14266 lir->index());
14268 if (ValueNeedsPostBarrier(lir->mir()->value())) {
14269 LiveRegisterSet regs = liveVolatileRegs(lir);
14270 ConstantOrRegister val =
14271 ToConstantOrRegister(lir->value(), lir->mir()->value()->type());
14272 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp, val);
14276 void CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV* lir) {
14277 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
14278 addOutOfLineCode(ool, lir->mir());
14280 Register obj = ToRegister(lir->object());
14281 Register elements = ToRegister(lir->elements());
14282 Register index = ToRegister(lir->index());
14283 const ValueOperand value = ToValue(lir, LStoreElementHoleV::ValueIndex);
14284 Register temp = ToRegister(lir->temp0());
14286 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
14287 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
14289 emitPreBarrier(elements, lir->index());
14291 masm.bind(ool->rejoin());
14292 masm.storeValue(value, BaseObjectElementIndex(elements, index));
14294 if (ValueNeedsPostBarrier(lir->mir()->value())) {
14295 LiveRegisterSet regs = liveVolatileRegs(lir);
14296 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp,
14297 ConstantOrRegister(value));
14301 void CodeGenerator::visitOutOfLineStoreElementHole(
14302 OutOfLineStoreElementHole* ool) {
14303 Register object, elements, index;
14304 LInstruction* ins = ool->ins();
14305 mozilla::Maybe<ConstantOrRegister> value;
14306 Register temp;
14308 if (ins->isStoreElementHoleV()) {
14309 LStoreElementHoleV* store = ins->toStoreElementHoleV();
14310 object = ToRegister(store->object());
14311 elements = ToRegister(store->elements());
14312 index = ToRegister(store->index());
14313 value.emplace(
14314 TypedOrValueRegister(ToValue(store, LStoreElementHoleV::ValueIndex)));
14315 temp = ToRegister(store->temp0());
14316 } else {
14317 LStoreElementHoleT* store = ins->toStoreElementHoleT();
14318 object = ToRegister(store->object());
14319 elements = ToRegister(store->elements());
14320 index = ToRegister(store->index());
14321 if (store->value()->isConstant()) {
14322 value.emplace(
14323 ConstantOrRegister(store->value()->toConstant()->toJSValue()));
14324 } else {
14325 MIRType valueType = store->mir()->value()->type();
14326 value.emplace(
14327 TypedOrValueRegister(valueType, ToAnyRegister(store->value())));
14329 temp = ToRegister(store->temp0());
14332 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
14334 // We're out-of-bounds. We only handle the index == initlength case.
14335 // If index > initializedLength, bail out. Note that this relies on the
14336 // condition flags sticking from the incoming branch.
14337 // Also note: this branch does not need Spectre mitigations, doing that for
14338 // the capacity check below is sufficient.
14339 Label allocElement, addNewElement;
14340 #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
14341 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
14342 // Had to reimplement for MIPS because there are no flags.
14343 bailoutCmp32(Assembler::NotEqual, initLength, index, ins->snapshot());
14344 #else
14345 bailoutIf(Assembler::NotEqual, ins->snapshot());
14346 #endif
14348 // If index < capacity, we can add a dense element inline. If not, we need
14349 // to allocate more elements first.
14350 masm.spectreBoundsCheck32(
14351 index, Address(elements, ObjectElements::offsetOfCapacity()), temp,
14352 &allocElement);
14353 masm.jump(&addNewElement);
14355 masm.bind(&allocElement);
14357 // Save all live volatile registers, except |temp|.
14358 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
14359 liveRegs.takeUnchecked(temp);
14360 masm.PushRegsInMask(liveRegs);
14362 masm.setupAlignedABICall();
14363 masm.loadJSContext(temp);
14364 masm.passABIArg(temp);
14365 masm.passABIArg(object);
14367 using Fn = bool (*)(JSContext*, NativeObject*);
14368 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
14369 masm.storeCallPointerResult(temp);
14371 masm.PopRegsInMask(liveRegs);
14372 bailoutIfFalseBool(temp, ins->snapshot());
14374 // Load the reallocated elements pointer.
14375 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), elements);
14377 masm.bind(&addNewElement);
14379 // Increment initLength
14380 masm.add32(Imm32(1), initLength);
14382 // If length is now <= index, increment length too.
14383 Label skipIncrementLength;
14384 Address length(elements, ObjectElements::offsetOfLength());
14385 masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
14386 masm.add32(Imm32(1), length);
14387 masm.bind(&skipIncrementLength);
14389 // Jump to the inline path where we will store the value.
14390 // We rejoin after the prebarrier, because the memory is uninitialized.
14391 masm.jump(ool->rejoin());
14394 void CodeGenerator::visitArrayPopShift(LArrayPopShift* lir) {
14395 Register obj = ToRegister(lir->object());
14396 Register temp1 = ToRegister(lir->temp0());
14397 Register temp2 = ToRegister(lir->temp1());
14398 ValueOperand out = ToOutValue(lir);
14400 Label bail;
14401 if (lir->mir()->mode() == MArrayPopShift::Pop) {
14402 masm.packedArrayPop(obj, out, temp1, temp2, &bail);
14403 } else {
14404 MOZ_ASSERT(lir->mir()->mode() == MArrayPopShift::Shift);
14405 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
14406 masm.packedArrayShift(obj, out, temp1, temp2, volatileRegs, &bail);
14408 bailoutFrom(&bail, lir->snapshot());
14411 class OutOfLineArrayPush : public OutOfLineCodeBase<CodeGenerator> {
14412 LArrayPush* ins_;
14414 public:
14415 explicit OutOfLineArrayPush(LArrayPush* ins) : ins_(ins) {}
14417 void accept(CodeGenerator* codegen) override {
14418 codegen->visitOutOfLineArrayPush(this);
14421 LArrayPush* ins() const { return ins_; }
14424 void CodeGenerator::visitArrayPush(LArrayPush* lir) {
14425 Register obj = ToRegister(lir->object());
14426 Register elementsTemp = ToRegister(lir->temp0());
14427 Register length = ToRegister(lir->output());
14428 ValueOperand value = ToValue(lir, LArrayPush::ValueIndex);
14429 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
14431 auto* ool = new (alloc()) OutOfLineArrayPush(lir);
14432 addOutOfLineCode(ool, lir->mir());
14434 // Load obj->elements in elementsTemp.
14435 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), elementsTemp);
14437 Address initLengthAddr(elementsTemp,
14438 ObjectElements::offsetOfInitializedLength());
14439 Address lengthAddr(elementsTemp, ObjectElements::offsetOfLength());
14440 Address capacityAddr(elementsTemp, ObjectElements::offsetOfCapacity());
14442 // Bail out if length != initLength.
14443 masm.load32(lengthAddr, length);
14444 bailoutCmp32(Assembler::NotEqual, initLengthAddr, length, lir->snapshot());
14446 // If length < capacity, we can add a dense element inline. If not, we
14447 // need to allocate more elements.
14448 masm.spectreBoundsCheck32(length, capacityAddr, spectreTemp, ool->entry());
14449 masm.bind(ool->rejoin());
14451 // Store the value.
14452 masm.storeValue(value, BaseObjectElementIndex(elementsTemp, length));
14454 // Update length and initialized length.
14455 masm.add32(Imm32(1), length);
14456 masm.store32(length, Address(elementsTemp, ObjectElements::offsetOfLength()));
14457 masm.store32(length, Address(elementsTemp,
14458 ObjectElements::offsetOfInitializedLength()));
14460 if (ValueNeedsPostBarrier(lir->mir()->value())) {
14461 LiveRegisterSet regs = liveVolatileRegs(lir);
14462 regs.addUnchecked(length);
14463 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->output()->output(),
14464 elementsTemp, ConstantOrRegister(value),
14465 /* indexDiff = */ -1);
14469 void CodeGenerator::visitOutOfLineArrayPush(OutOfLineArrayPush* ool) {
14470 LArrayPush* ins = ool->ins();
14472 Register object = ToRegister(ins->object());
14473 Register temp = ToRegister(ins->temp0());
14475 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
14476 liveRegs.takeUnchecked(temp);
14477 liveRegs.addUnchecked(ToRegister(ins->output()));
14478 liveRegs.addUnchecked(ToValue(ins, LArrayPush::ValueIndex));
14480 masm.PushRegsInMask(liveRegs);
14482 masm.setupAlignedABICall();
14483 masm.loadJSContext(temp);
14484 masm.passABIArg(temp);
14485 masm.passABIArg(object);
14487 using Fn = bool (*)(JSContext*, NativeObject* obj);
14488 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
14489 masm.storeCallPointerResult(temp);
14491 masm.PopRegsInMask(liveRegs);
14492 bailoutIfFalseBool(temp, ins->snapshot());
14494 // Load the reallocated elements pointer.
14495 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
14497 masm.jump(ool->rejoin());
14500 void CodeGenerator::visitArraySlice(LArraySlice* lir) {
14501 Register object = ToRegister(lir->object());
14502 Register begin = ToRegister(lir->begin());
14503 Register end = ToRegister(lir->end());
14504 Register temp0 = ToRegister(lir->temp0());
14505 Register temp1 = ToRegister(lir->temp1());
14507 Label call, fail;
14509 Label bail;
14510 masm.branchArrayIsNotPacked(object, temp0, temp1, &bail);
14511 bailoutFrom(&bail, lir->snapshot());
14513 // Try to allocate an object.
14514 TemplateObject templateObject(lir->mir()->templateObj());
14515 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
14516 &fail);
14518 masm.jump(&call);
14520 masm.bind(&fail);
14521 masm.movePtr(ImmPtr(nullptr), temp0);
14523 masm.bind(&call);
14525 pushArg(temp0);
14526 pushArg(end);
14527 pushArg(begin);
14528 pushArg(object);
14530 using Fn =
14531 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
14532 callVM<Fn, ArraySliceDense>(lir);
14535 void CodeGenerator::visitArgumentsSlice(LArgumentsSlice* lir) {
14536 Register object = ToRegister(lir->object());
14537 Register begin = ToRegister(lir->begin());
14538 Register end = ToRegister(lir->end());
14539 Register temp0 = ToRegister(lir->temp0());
14540 Register temp1 = ToRegister(lir->temp1());
14542 Label call, fail;
14544 // Try to allocate an object.
14545 TemplateObject templateObject(lir->mir()->templateObj());
14546 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
14547 &fail);
14549 masm.jump(&call);
14551 masm.bind(&fail);
14552 masm.movePtr(ImmPtr(nullptr), temp0);
14554 masm.bind(&call);
14556 pushArg(temp0);
14557 pushArg(end);
14558 pushArg(begin);
14559 pushArg(object);
14561 using Fn =
14562 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
14563 callVM<Fn, ArgumentsSliceDense>(lir);
14566 #ifdef DEBUG
14567 void CodeGenerator::emitAssertArgumentsSliceBounds(const RegisterOrInt32& begin,
14568 const RegisterOrInt32& count,
14569 Register numActualArgs) {
14570 // |begin| must be positive or zero.
14571 if (begin.is<Register>()) {
14572 Label beginOk;
14573 masm.branch32(Assembler::GreaterThanOrEqual, begin.as<Register>(), Imm32(0),
14574 &beginOk);
14575 masm.assumeUnreachable("begin < 0");
14576 masm.bind(&beginOk);
14577 } else {
14578 MOZ_ASSERT(begin.as<int32_t>() >= 0);
14581 // |count| must be positive or zero.
14582 if (count.is<Register>()) {
14583 Label countOk;
14584 masm.branch32(Assembler::GreaterThanOrEqual, count.as<Register>(), Imm32(0),
14585 &countOk);
14586 masm.assumeUnreachable("count < 0");
14587 masm.bind(&countOk);
14588 } else {
14589 MOZ_ASSERT(count.as<int32_t>() >= 0);
14592 // |begin| must be less-or-equal to |numActualArgs|.
14593 Label argsBeginOk;
14594 if (begin.is<Register>()) {
14595 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
14596 &argsBeginOk);
14597 } else {
14598 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
14599 Imm32(begin.as<int32_t>()), &argsBeginOk);
14601 masm.assumeUnreachable("begin <= numActualArgs");
14602 masm.bind(&argsBeginOk);
14604 // |count| must be less-or-equal to |numActualArgs|.
14605 Label argsCountOk;
14606 if (count.is<Register>()) {
14607 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, count.as<Register>(),
14608 &argsCountOk);
14609 } else {
14610 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
14611 Imm32(count.as<int32_t>()), &argsCountOk);
14613 masm.assumeUnreachable("count <= numActualArgs");
14614 masm.bind(&argsCountOk);
14616 // |begin| and |count| must be preserved, but |numActualArgs| can be changed.
14618 // Pre-condition: |count| <= |numActualArgs|
14619 // Condition to test: |begin + count| <= |numActualArgs|
14620 // Transform to: |begin| <= |numActualArgs - count|
14621 if (count.is<Register>()) {
14622 masm.subPtr(count.as<Register>(), numActualArgs);
14623 } else {
14624 masm.subPtr(Imm32(count.as<int32_t>()), numActualArgs);
14627 // |begin + count| must be less-or-equal to |numActualArgs|.
14628 Label argsBeginCountOk;
14629 if (begin.is<Register>()) {
14630 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
14631 &argsBeginCountOk);
14632 } else {
14633 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
14634 Imm32(begin.as<int32_t>()), &argsBeginCountOk);
14636 masm.assumeUnreachable("begin + count <= numActualArgs");
14637 masm.bind(&argsBeginCountOk);
14639 #endif
14641 template <class ArgumentsSlice>
14642 void CodeGenerator::emitNewArray(ArgumentsSlice* lir,
14643 const RegisterOrInt32& count, Register output,
14644 Register temp) {
14645 using Fn = ArrayObject* (*)(JSContext*, int32_t);
14646 auto* ool = count.match(
14647 [&](Register count) {
14648 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
14649 lir, ArgList(count), StoreRegisterTo(output));
14651 [&](int32_t count) {
14652 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
14653 lir, ArgList(Imm32(count)), StoreRegisterTo(output));
14656 TemplateObject templateObject(lir->mir()->templateObj());
14657 MOZ_ASSERT(templateObject.isArrayObject());
14659 auto templateNativeObj = templateObject.asTemplateNativeObject();
14660 MOZ_ASSERT(templateNativeObj.getArrayLength() == 0);
14661 MOZ_ASSERT(templateNativeObj.getDenseInitializedLength() == 0);
14662 MOZ_ASSERT(!templateNativeObj.hasDynamicElements());
14664 // Check array capacity. Call into the VM if the template object's capacity
14665 // is too small.
14666 bool tryAllocate = count.match(
14667 [&](Register count) {
14668 masm.branch32(Assembler::Above, count,
14669 Imm32(templateNativeObj.getDenseCapacity()),
14670 ool->entry());
14671 return true;
14673 [&](int32_t count) {
14674 MOZ_ASSERT(count >= 0);
14675 if (uint32_t(count) > templateNativeObj.getDenseCapacity()) {
14676 masm.jump(ool->entry());
14677 return false;
14679 return true;
14682 if (tryAllocate) {
14683 // Try to allocate an object.
14684 masm.createGCObject(output, temp, templateObject, lir->mir()->initialHeap(),
14685 ool->entry());
14687 auto setInitializedLengthAndLength = [&](auto count) {
14688 const int elementsOffset = NativeObject::offsetOfFixedElements();
14690 // Update initialized length.
14691 Address initLength(
14692 output, elementsOffset + ObjectElements::offsetOfInitializedLength());
14693 masm.store32(count, initLength);
14695 // Update length.
14696 Address length(output, elementsOffset + ObjectElements::offsetOfLength());
14697 masm.store32(count, length);
14700 // The array object was successfully created. Set the length and initialized
14701 // length and then proceed to fill the elements.
14702 count.match([&](Register count) { setInitializedLengthAndLength(count); },
14703 [&](int32_t count) {
14704 if (count > 0) {
14705 setInitializedLengthAndLength(Imm32(count));
14710 masm.bind(ool->rejoin());
14713 void CodeGenerator::visitFrameArgumentsSlice(LFrameArgumentsSlice* lir) {
14714 Register begin = ToRegister(lir->begin());
14715 Register count = ToRegister(lir->count());
14716 Register temp = ToRegister(lir->temp0());
14717 Register output = ToRegister(lir->output());
14719 #ifdef DEBUG
14720 masm.loadNumActualArgs(FramePointer, temp);
14721 emitAssertArgumentsSliceBounds(RegisterOrInt32(begin), RegisterOrInt32(count),
14722 temp);
14723 #endif
14725 emitNewArray(lir, RegisterOrInt32(count), output, temp);
14727 Label done;
14728 masm.branch32(Assembler::Equal, count, Imm32(0), &done);
14730 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
14731 allRegs.take(begin);
14732 allRegs.take(count);
14733 allRegs.take(temp);
14734 allRegs.take(output);
14736 ValueOperand value = allRegs.takeAnyValue();
14738 LiveRegisterSet liveRegs;
14739 liveRegs.add(output);
14740 liveRegs.add(begin);
14741 liveRegs.add(value);
14743 masm.PushRegsInMask(liveRegs);
14745 // Initialize all elements.
14747 Register elements = output;
14748 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14750 Register argIndex = begin;
14752 Register index = temp;
14753 masm.move32(Imm32(0), index);
14755 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
14756 BaseValueIndex argPtr(FramePointer, argIndex, argvOffset);
14758 Label loop;
14759 masm.bind(&loop);
14761 masm.loadValue(argPtr, value);
14763 // We don't need a pre-barrier, because the element at |index| is guaranteed
14764 // to be a non-GC thing (either uninitialized memory or the magic hole
14765 // value).
14766 masm.storeValue(value, BaseObjectElementIndex(elements, index));
14768 masm.add32(Imm32(1), index);
14769 masm.add32(Imm32(1), argIndex);
14771 masm.branch32(Assembler::LessThan, index, count, &loop);
14773 masm.PopRegsInMask(liveRegs);
14775 // Emit a post-write barrier if |output| is tenured.
14777 // We expect that |output| is nursery allocated, so it isn't worth the
14778 // trouble to check if no frame argument is a nursery thing, which would
14779 // allow to omit the post-write barrier.
14780 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
14782 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
14783 volatileRegs.takeUnchecked(temp);
14784 if (output.volatile_()) {
14785 volatileRegs.addUnchecked(output);
14788 masm.PushRegsInMask(volatileRegs);
14789 emitPostWriteBarrier(output);
14790 masm.PopRegsInMask(volatileRegs);
14792 masm.bind(&done);
14795 CodeGenerator::RegisterOrInt32 CodeGenerator::ToRegisterOrInt32(
14796 const LAllocation* allocation) {
14797 if (allocation->isConstant()) {
14798 return RegisterOrInt32(allocation->toConstant()->toInt32());
14800 return RegisterOrInt32(ToRegister(allocation));
14803 void CodeGenerator::visitInlineArgumentsSlice(LInlineArgumentsSlice* lir) {
14804 RegisterOrInt32 begin = ToRegisterOrInt32(lir->begin());
14805 RegisterOrInt32 count = ToRegisterOrInt32(lir->count());
14806 Register temp = ToRegister(lir->temp());
14807 Register output = ToRegister(lir->output());
14809 uint32_t numActuals = lir->mir()->numActuals();
14811 #ifdef DEBUG
14812 masm.move32(Imm32(numActuals), temp);
14814 emitAssertArgumentsSliceBounds(begin, count, temp);
14815 #endif
14817 emitNewArray(lir, count, output, temp);
14819 // We're done if there are no actual arguments.
14820 if (numActuals == 0) {
14821 return;
14824 // Check if any arguments have to be copied.
14825 Label done;
14826 if (count.is<Register>()) {
14827 masm.branch32(Assembler::Equal, count.as<Register>(), Imm32(0), &done);
14828 } else if (count.as<int32_t>() == 0) {
14829 return;
14832 auto getArg = [&](uint32_t i) {
14833 return toConstantOrRegister(lir, LInlineArgumentsSlice::ArgIndex(i),
14834 lir->mir()->getArg(i)->type());
14837 auto storeArg = [&](uint32_t i, auto dest) {
14838 // We don't need a pre-barrier because the element at |index| is guaranteed
14839 // to be a non-GC thing (either uninitialized memory or the magic hole
14840 // value).
14841 masm.storeConstantOrRegister(getArg(i), dest);
14844 // Initialize all elements.
14845 if (numActuals == 1) {
14846 // There's exactly one argument. We've checked that |count| is non-zero,
14847 // which implies that |begin| must be zero.
14848 MOZ_ASSERT_IF(begin.is<int32_t>(), begin.as<int32_t>() == 0);
14850 Register elements = temp;
14851 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14853 storeArg(0, Address(elements, 0));
14854 } else if (begin.is<Register>()) {
14855 // There is more than one argument and |begin| isn't a compile-time
14856 // constant. Iterate through 0..numActuals to search for |begin| and then
14857 // start copying |count| arguments from that index.
14859 LiveGeneralRegisterSet liveRegs;
14860 liveRegs.add(output);
14861 liveRegs.add(begin.as<Register>());
14863 masm.PushRegsInMask(liveRegs);
14865 Register elements = output;
14866 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14868 Register argIndex = begin.as<Register>();
14870 Register index = temp;
14871 masm.move32(Imm32(0), index);
14873 Label doneLoop;
14874 for (uint32_t i = 0; i < numActuals; ++i) {
14875 Label next;
14876 masm.branch32(Assembler::NotEqual, argIndex, Imm32(i), &next);
14878 storeArg(i, BaseObjectElementIndex(elements, index));
14880 masm.add32(Imm32(1), index);
14881 masm.add32(Imm32(1), argIndex);
14883 if (count.is<Register>()) {
14884 masm.branch32(Assembler::GreaterThanOrEqual, index,
14885 count.as<Register>(), &doneLoop);
14886 } else {
14887 masm.branch32(Assembler::GreaterThanOrEqual, index,
14888 Imm32(count.as<int32_t>()), &doneLoop);
14891 masm.bind(&next);
14893 masm.bind(&doneLoop);
14895 masm.PopRegsInMask(liveRegs);
14896 } else {
14897 // There is more than one argument and |begin| is a compile-time constant.
14899 Register elements = temp;
14900 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14902 int32_t argIndex = begin.as<int32_t>();
14904 int32_t index = 0;
14906 Label doneLoop;
14907 for (uint32_t i = argIndex; i < numActuals; ++i) {
14908 storeArg(i, Address(elements, index * sizeof(Value)));
14910 index += 1;
14912 if (count.is<Register>()) {
14913 masm.branch32(Assembler::LessThanOrEqual, count.as<Register>(),
14914 Imm32(index), &doneLoop);
14915 } else {
14916 if (index >= count.as<int32_t>()) {
14917 break;
14921 masm.bind(&doneLoop);
14924 // Determine if we have to emit post-write barrier.
14926 // If either |begin| or |count| is a constant, use their value directly.
14927 // Otherwise assume we copy all inline arguments from 0..numActuals.
14928 bool postWriteBarrier = false;
14929 uint32_t actualBegin = begin.match([](Register) { return 0; },
14930 [](int32_t value) { return value; });
14931 uint32_t actualCount =
14932 count.match([=](Register) { return numActuals; },
14933 [](int32_t value) -> uint32_t { return value; });
14934 for (uint32_t i = 0; i < actualCount; ++i) {
14935 ConstantOrRegister arg = getArg(actualBegin + i);
14936 if (arg.constant()) {
14937 Value v = arg.value();
14938 if (v.isGCThing() && IsInsideNursery(v.toGCThing())) {
14939 postWriteBarrier = true;
14941 } else {
14942 MIRType type = arg.reg().type();
14943 if (type == MIRType::Value || NeedsPostBarrier(type)) {
14944 postWriteBarrier = true;
14949 // Emit a post-write barrier if |output| is tenured and we couldn't
14950 // determine at compile-time that no barrier is needed.
14951 if (postWriteBarrier) {
14952 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
14954 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
14955 volatileRegs.takeUnchecked(temp);
14956 if (output.volatile_()) {
14957 volatileRegs.addUnchecked(output);
14960 masm.PushRegsInMask(volatileRegs);
14961 emitPostWriteBarrier(output);
14962 masm.PopRegsInMask(volatileRegs);
14965 masm.bind(&done);
14968 void CodeGenerator::visitNormalizeSliceTerm(LNormalizeSliceTerm* lir) {
14969 Register value = ToRegister(lir->value());
14970 Register length = ToRegister(lir->length());
14971 Register output = ToRegister(lir->output());
14973 masm.move32(value, output);
14975 Label positive;
14976 masm.branch32(Assembler::GreaterThanOrEqual, value, Imm32(0), &positive);
14978 Label done;
14979 masm.add32(length, output);
14980 masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(0), &done);
14981 masm.move32(Imm32(0), output);
14982 masm.jump(&done);
14984 masm.bind(&positive);
14985 masm.cmp32Move32(Assembler::LessThan, length, value, length, output);
14987 masm.bind(&done);
14990 void CodeGenerator::visitArrayJoin(LArrayJoin* lir) {
14991 Label skipCall;
14993 Register output = ToRegister(lir->output());
14994 Register sep = ToRegister(lir->separator());
14995 Register array = ToRegister(lir->array());
14996 Register temp = ToRegister(lir->temp0());
14998 // Fast path for simple length <= 1 cases.
15000 masm.loadPtr(Address(array, NativeObject::offsetOfElements()), temp);
15001 Address length(temp, ObjectElements::offsetOfLength());
15002 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
15004 // Check for length == 0
15005 Label notEmpty;
15006 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmpty);
15007 const JSAtomState& names = gen->runtime->names();
15008 masm.movePtr(ImmGCPtr(names.empty_), output);
15009 masm.jump(&skipCall);
15011 masm.bind(&notEmpty);
15012 Label notSingleString;
15013 // Check for length == 1, initializedLength >= 1, arr[0].isString()
15014 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleString);
15015 masm.branch32(Assembler::LessThan, initLength, Imm32(1), &notSingleString);
15017 Address elem0(temp, 0);
15018 masm.branchTestString(Assembler::NotEqual, elem0, &notSingleString);
15020 // At this point, 'output' can be used as a scratch register, since we're
15021 // guaranteed to succeed.
15022 masm.unboxString(elem0, output);
15023 masm.jump(&skipCall);
15024 masm.bind(&notSingleString);
15027 pushArg(sep);
15028 pushArg(array);
15030 using Fn = JSString* (*)(JSContext*, HandleObject, HandleString);
15031 callVM<Fn, jit::ArrayJoin>(lir);
15032 masm.bind(&skipCall);
15035 void CodeGenerator::visitObjectKeys(LObjectKeys* lir) {
15036 Register object = ToRegister(lir->object());
15038 pushArg(object);
15040 using Fn = JSObject* (*)(JSContext*, HandleObject);
15041 callVM<Fn, jit::ObjectKeys>(lir);
15044 void CodeGenerator::visitObjectKeysLength(LObjectKeysLength* lir) {
15045 Register object = ToRegister(lir->object());
15047 pushArg(object);
15049 using Fn = bool (*)(JSContext*, HandleObject, int32_t*);
15050 callVM<Fn, jit::ObjectKeysLength>(lir);
15053 void CodeGenerator::visitGetIteratorCache(LGetIteratorCache* lir) {
15054 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
15055 TypedOrValueRegister val =
15056 toConstantOrRegister(lir, LGetIteratorCache::ValueIndex,
15057 lir->mir()->value()->type())
15058 .reg();
15059 Register output = ToRegister(lir->output());
15060 Register temp0 = ToRegister(lir->temp0());
15061 Register temp1 = ToRegister(lir->temp1());
15063 IonGetIteratorIC ic(liveRegs, val, output, temp0, temp1);
15064 addIC(lir, allocateIC(ic));
15067 void CodeGenerator::visitOptimizeSpreadCallCache(
15068 LOptimizeSpreadCallCache* lir) {
15069 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
15070 ValueOperand val = ToValue(lir, LOptimizeSpreadCallCache::ValueIndex);
15071 ValueOperand output = ToOutValue(lir);
15072 Register temp = ToRegister(lir->temp0());
15074 IonOptimizeSpreadCallIC ic(liveRegs, val, output, temp);
15075 addIC(lir, allocateIC(ic));
15078 void CodeGenerator::visitCloseIterCache(LCloseIterCache* lir) {
15079 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
15080 Register iter = ToRegister(lir->iter());
15081 Register temp = ToRegister(lir->temp0());
15082 CompletionKind kind = CompletionKind(lir->mir()->completionKind());
15084 IonCloseIterIC ic(liveRegs, iter, temp, kind);
15085 addIC(lir, allocateIC(ic));
15088 void CodeGenerator::visitOptimizeGetIteratorCache(
15089 LOptimizeGetIteratorCache* lir) {
15090 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
15091 ValueOperand val = ToValue(lir, LOptimizeGetIteratorCache::ValueIndex);
15092 Register output = ToRegister(lir->output());
15093 Register temp = ToRegister(lir->temp0());
15095 IonOptimizeGetIteratorIC ic(liveRegs, val, output, temp);
15096 addIC(lir, allocateIC(ic));
15099 void CodeGenerator::visitIteratorMore(LIteratorMore* lir) {
15100 const Register obj = ToRegister(lir->iterator());
15101 const ValueOperand output = ToOutValue(lir);
15102 const Register temp = ToRegister(lir->temp0());
15104 masm.iteratorMore(obj, output, temp);
15107 void CodeGenerator::visitIsNoIterAndBranch(LIsNoIterAndBranch* lir) {
15108 ValueOperand input = ToValue(lir, LIsNoIterAndBranch::Input);
15109 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
15110 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
15112 masm.branchTestMagic(Assembler::Equal, input, ifTrue);
15114 if (!isNextBlock(lir->ifFalse()->lir())) {
15115 masm.jump(ifFalse);
15119 void CodeGenerator::visitIteratorEnd(LIteratorEnd* lir) {
15120 const Register obj = ToRegister(lir->object());
15121 const Register temp0 = ToRegister(lir->temp0());
15122 const Register temp1 = ToRegister(lir->temp1());
15123 const Register temp2 = ToRegister(lir->temp2());
15125 masm.iteratorClose(obj, temp0, temp1, temp2);
15128 void CodeGenerator::visitArgumentsLength(LArgumentsLength* lir) {
15129 // read number of actual arguments from the JS frame.
15130 Register argc = ToRegister(lir->output());
15131 masm.loadNumActualArgs(FramePointer, argc);
15134 void CodeGenerator::visitGetFrameArgument(LGetFrameArgument* lir) {
15135 ValueOperand result = ToOutValue(lir);
15136 const LAllocation* index = lir->index();
15137 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
15139 // This instruction is used to access actual arguments and formal arguments.
15140 // The number of Values on the stack is |max(numFormals, numActuals)|, so we
15141 // assert |index < numFormals || index < numActuals| in debug builds.
15142 DebugOnly<size_t> numFormals = gen->outerInfo().script()->function()->nargs();
15144 if (index->isConstant()) {
15145 int32_t i = index->toConstant()->toInt32();
15146 #ifdef DEBUG
15147 if (uint32_t(i) >= numFormals) {
15148 Label ok;
15149 Register argc = result.scratchReg();
15150 masm.loadNumActualArgs(FramePointer, argc);
15151 masm.branch32(Assembler::Above, argc, Imm32(i), &ok);
15152 masm.assumeUnreachable("Invalid argument index");
15153 masm.bind(&ok);
15155 #endif
15156 Address argPtr(FramePointer, sizeof(Value) * i + argvOffset);
15157 masm.loadValue(argPtr, result);
15158 } else {
15159 Register i = ToRegister(index);
15160 #ifdef DEBUG
15161 Label ok;
15162 Register argc = result.scratchReg();
15163 masm.branch32(Assembler::Below, i, Imm32(numFormals), &ok);
15164 masm.loadNumActualArgs(FramePointer, argc);
15165 masm.branch32(Assembler::Above, argc, i, &ok);
15166 masm.assumeUnreachable("Invalid argument index");
15167 masm.bind(&ok);
15168 #endif
15169 BaseValueIndex argPtr(FramePointer, i, argvOffset);
15170 masm.loadValue(argPtr, result);
15174 void CodeGenerator::visitGetFrameArgumentHole(LGetFrameArgumentHole* lir) {
15175 ValueOperand result = ToOutValue(lir);
15176 Register index = ToRegister(lir->index());
15177 Register length = ToRegister(lir->length());
15178 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
15179 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
15181 Label outOfBounds, done;
15182 masm.spectreBoundsCheck32(index, length, spectreTemp, &outOfBounds);
15184 BaseValueIndex argPtr(FramePointer, index, argvOffset);
15185 masm.loadValue(argPtr, result);
15186 masm.jump(&done);
15188 masm.bind(&outOfBounds);
15189 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
15190 masm.moveValue(UndefinedValue(), result);
15192 masm.bind(&done);
15195 void CodeGenerator::visitRest(LRest* lir) {
15196 Register numActuals = ToRegister(lir->numActuals());
15197 Register temp0 = ToRegister(lir->temp0());
15198 Register temp1 = ToRegister(lir->temp1());
15199 Register temp2 = ToRegister(lir->temp2());
15200 Register temp3 = ToRegister(lir->temp3());
15201 unsigned numFormals = lir->mir()->numFormals();
15203 constexpr uint32_t arrayCapacity = 2;
15205 if (Shape* shape = lir->mir()->shape()) {
15206 uint32_t arrayLength = 0;
15207 gc::AllocKind allocKind = GuessArrayGCKind(arrayCapacity);
15208 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
15209 allocKind = ForegroundToBackgroundAllocKind(allocKind);
15210 MOZ_ASSERT(GetGCKindSlots(allocKind) ==
15211 arrayCapacity + ObjectElements::VALUES_PER_HEADER);
15213 Label joinAlloc, failAlloc;
15214 masm.movePtr(ImmGCPtr(shape), temp0);
15215 masm.createArrayWithFixedElements(temp2, temp0, temp1, InvalidReg,
15216 arrayLength, arrayCapacity, 0, 0,
15217 allocKind, gc::Heap::Default, &failAlloc);
15218 masm.jump(&joinAlloc);
15220 masm.bind(&failAlloc);
15221 masm.movePtr(ImmPtr(nullptr), temp2);
15223 masm.bind(&joinAlloc);
15224 } else {
15225 masm.movePtr(ImmPtr(nullptr), temp2);
15228 // Set temp1 to the address of the first actual argument.
15229 size_t actualsOffset = JitFrameLayout::offsetOfActualArgs();
15230 masm.computeEffectiveAddress(Address(FramePointer, actualsOffset), temp1);
15232 // Compute array length: max(numActuals - numFormals, 0).
15233 Register lengthReg;
15234 if (numFormals) {
15235 lengthReg = temp0;
15236 Label emptyLength, joinLength;
15237 masm.branch32(Assembler::LessThanOrEqual, numActuals, Imm32(numFormals),
15238 &emptyLength);
15240 masm.move32(numActuals, lengthReg);
15241 masm.sub32(Imm32(numFormals), lengthReg);
15243 // Skip formal arguments.
15244 masm.addPtr(Imm32(sizeof(Value) * numFormals), temp1);
15246 masm.jump(&joinLength);
15248 masm.bind(&emptyLength);
15250 masm.move32(Imm32(0), lengthReg);
15252 // Leave temp1 pointed to the start of actuals() when the rest-array
15253 // length is zero. We don't use |actuals() + numFormals| because
15254 // |numFormals| can be any non-negative int32 value when this MRest was
15255 // created from scalar replacement optimizations. And it seems
15256 // questionable to compute a Value* pointer which points to who knows
15257 // where.
15259 masm.bind(&joinLength);
15260 } else {
15261 // Use numActuals directly when there are no formals.
15262 lengthReg = numActuals;
15265 // Try to initialize the array elements.
15266 Label vmCall, done;
15267 if (lir->mir()->shape()) {
15268 // Call into C++ if we failed to allocate an array or there are more than
15269 // |arrayCapacity| elements.
15270 masm.branchTestPtr(Assembler::Zero, temp2, temp2, &vmCall);
15271 masm.branch32(Assembler::Above, lengthReg, Imm32(arrayCapacity), &vmCall);
15273 // The array must be nursery allocated so no post barrier is needed.
15274 #ifdef DEBUG
15275 Label ok;
15276 masm.branchPtrInNurseryChunk(Assembler::Equal, temp2, temp3, &ok);
15277 masm.assumeUnreachable("Unexpected tenured object for LRest");
15278 masm.bind(&ok);
15279 #endif
15281 Label initialized;
15282 masm.branch32(Assembler::Equal, lengthReg, Imm32(0), &initialized);
15284 // Store length and initializedLength.
15285 Register elements = temp3;
15286 masm.loadPtr(Address(temp2, NativeObject::offsetOfElements()), elements);
15287 Address lengthAddr(elements, ObjectElements::offsetOfLength());
15288 Address initLengthAddr(elements,
15289 ObjectElements::offsetOfInitializedLength());
15290 masm.store32(lengthReg, lengthAddr);
15291 masm.store32(lengthReg, initLengthAddr);
15293 // Store either one or two elements. This may clobber lengthReg (temp0).
15294 static_assert(arrayCapacity == 2, "code handles 1 or 2 elements");
15295 Label storeFirst;
15296 masm.branch32(Assembler::Equal, lengthReg, Imm32(1), &storeFirst);
15297 masm.storeValue(Address(temp1, sizeof(Value)),
15298 Address(elements, sizeof(Value)), temp0);
15299 masm.bind(&storeFirst);
15300 masm.storeValue(Address(temp1, 0), Address(elements, 0), temp0);
15302 // Done.
15303 masm.bind(&initialized);
15304 masm.movePtr(temp2, ReturnReg);
15305 masm.jump(&done);
15308 masm.bind(&vmCall);
15310 pushArg(temp2);
15311 pushArg(temp1);
15312 pushArg(lengthReg);
15314 using Fn =
15315 ArrayObject* (*)(JSContext*, uint32_t, Value*, Handle<ArrayObject*>);
15316 callVM<Fn, InitRestParameter>(lir);
15318 masm.bind(&done);
15321 // Create a stackmap from the given safepoint, with the structure:
15323 // <reg dump, if any>
15324 // | ++ <body (general spill)>
15325 // | | ++ <space for Frame>
15326 // | | ++ <inbound args>
15327 // | | |
15328 // Lowest Addr Highest Addr
15329 // |
15330 // framePushedAtStackMapBase
15332 // The caller owns the resulting stackmap. This assumes a grow-down stack.
15334 // For non-debug builds, if the stackmap would contain no pointers, no
15335 // stackmap is created, and nullptr is returned. For a debug build, a
15336 // stackmap is always created and returned.
15338 // Depending on the type of safepoint, the stackmap may need to account for
15339 // spilled registers. WasmSafepointKind::LirCall corresponds to LIR nodes where
15340 // isCall() == true, for which the register allocator will spill/restore all
15341 // live registers at the LIR level - in this case, the LSafepoint sees only live
15342 // values on the stack, never in registers. WasmSafepointKind::CodegenCall, on
15343 // the other hand, is for LIR nodes which may manually spill/restore live
15344 // registers in codegen, in which case the stackmap must account for this. Traps
15345 // also require tracking of live registers, but spilling is handled by the trap
15346 // mechanism.
15347 static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
15348 const RegisterOffsets& trapExitLayout,
15349 size_t trapExitLayoutNumWords,
15350 size_t nInboundStackArgBytes,
15351 wasm::StackMap** result) {
15352 // Ensure this is defined on all return paths.
15353 *result = nullptr;
15355 // The size of the wasm::Frame itself.
15356 const size_t nFrameBytes = sizeof(wasm::Frame);
15358 // This is the number of bytes spilled for live registers, outside of a trap.
15359 // For traps, trapExitLayout and trapExitLayoutNumWords will be used.
15360 const size_t nRegisterDumpBytes =
15361 MacroAssembler::PushRegsInMaskSizeInBytes(safepoint.liveRegs());
15363 // As mentioned above, for WasmSafepointKind::LirCall, register spills and
15364 // restores are handled at the LIR level and there should therefore be no live
15365 // registers to handle here.
15366 MOZ_ASSERT_IF(safepoint.wasmSafepointKind() == WasmSafepointKind::LirCall,
15367 nRegisterDumpBytes == 0);
15368 MOZ_ASSERT(nRegisterDumpBytes % sizeof(void*) == 0);
15370 // This is the number of bytes in the general spill area, below the Frame.
15371 const size_t nBodyBytes = safepoint.framePushedAtStackMapBase();
15373 // The stack map owns any alignment padding around inbound stack args.
15374 const size_t nInboundStackArgBytesAligned =
15375 wasm::AlignStackArgAreaSize(nInboundStackArgBytes);
15377 // This is the number of bytes in the general spill area, the Frame, and the
15378 // incoming args, but not including any register dump area.
15379 const size_t nNonRegisterBytes =
15380 nBodyBytes + nFrameBytes + nInboundStackArgBytesAligned;
15381 MOZ_ASSERT(nNonRegisterBytes % sizeof(void*) == 0);
15383 // This is the number of bytes in the register dump area, if any, below the
15384 // general spill area.
15385 const size_t nRegisterBytes =
15386 (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap)
15387 ? (trapExitLayoutNumWords * sizeof(void*))
15388 : nRegisterDumpBytes;
15390 // This is the total number of bytes covered by the map.
15391 const size_t nTotalBytes = nNonRegisterBytes + nRegisterBytes;
15393 #ifndef DEBUG
15394 bool needStackMap = !(safepoint.wasmAnyRefRegs().empty() &&
15395 safepoint.wasmAnyRefSlots().empty() &&
15396 safepoint.slotsOrElementsSlots().empty());
15398 // There are no references, and this is a non-debug build, so don't bother
15399 // building the stackmap.
15400 if (!needStackMap) {
15401 return true;
15403 #endif
15405 wasm::StackMap* stackMap =
15406 wasm::StackMap::create(nTotalBytes / sizeof(void*));
15407 if (!stackMap) {
15408 return false;
15410 if (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap) {
15411 stackMap->setExitStubWords(trapExitLayoutNumWords);
15414 // REG DUMP AREA, if any.
15415 size_t regDumpWords = 0;
15416 const LiveGeneralRegisterSet wasmAnyRefRegs = safepoint.wasmAnyRefRegs();
15417 const LiveGeneralRegisterSet slotsOrElementsRegs =
15418 safepoint.slotsOrElementsRegs();
15419 const LiveGeneralRegisterSet refRegs(GeneralRegisterSet::Union(
15420 wasmAnyRefRegs.set(), slotsOrElementsRegs.set()));
15421 GeneralRegisterForwardIterator refRegsIter(refRegs);
15422 switch (safepoint.wasmSafepointKind()) {
15423 case WasmSafepointKind::LirCall:
15424 case WasmSafepointKind::CodegenCall: {
15425 size_t spilledNumWords = nRegisterDumpBytes / sizeof(void*);
15426 regDumpWords += spilledNumWords;
15428 for (; refRegsIter.more(); ++refRegsIter) {
15429 Register reg = *refRegsIter;
15430 size_t offsetFromSpillBase =
15431 safepoint.liveRegs().gprs().offsetOfPushedRegister(reg) /
15432 sizeof(void*);
15433 MOZ_ASSERT(0 < offsetFromSpillBase &&
15434 offsetFromSpillBase <= spilledNumWords);
15435 size_t index = spilledNumWords - offsetFromSpillBase;
15437 if (wasmAnyRefRegs.has(reg)) {
15438 stackMap->set(index, wasm::StackMap::AnyRef);
15439 } else {
15440 MOZ_ASSERT(slotsOrElementsRegs.has(reg));
15441 stackMap->set(index, wasm::StackMap::ArrayDataPointer);
15444 // Float and vector registers do not have to be handled; they cannot
15445 // contain wasm anyrefs, and they are spilled after general-purpose
15446 // registers. Gprs are therefore closest to the spill base and thus their
15447 // offset calculation does not need to account for other spills.
15448 } break;
15449 case WasmSafepointKind::Trap: {
15450 regDumpWords += trapExitLayoutNumWords;
15452 for (; refRegsIter.more(); ++refRegsIter) {
15453 Register reg = *refRegsIter;
15454 size_t offsetFromTop = trapExitLayout.getOffset(reg);
15456 // If this doesn't hold, the associated register wasn't saved by
15457 // the trap exit stub. Better to crash now than much later, in
15458 // some obscure place, and possibly with security consequences.
15459 MOZ_RELEASE_ASSERT(offsetFromTop < trapExitLayoutNumWords);
15461 // offsetFromTop is an offset in words down from the highest
15462 // address in the exit stub save area. Switch it around to be an
15463 // offset up from the bottom of the (integer register) save area.
15464 size_t offsetFromBottom = trapExitLayoutNumWords - 1 - offsetFromTop;
15466 if (wasmAnyRefRegs.has(reg)) {
15467 stackMap->set(offsetFromBottom, wasm::StackMap::AnyRef);
15468 } else {
15469 MOZ_ASSERT(slotsOrElementsRegs.has(reg));
15470 stackMap->set(offsetFromBottom, wasm::StackMap::ArrayDataPointer);
15473 } break;
15474 default:
15475 MOZ_CRASH("unreachable");
15478 // BODY (GENERAL SPILL) AREA and FRAME and INCOMING ARGS
15479 // Deal with roots on the stack.
15480 const LSafepoint::SlotList& wasmAnyRefSlots = safepoint.wasmAnyRefSlots();
15481 for (SafepointSlotEntry wasmAnyRefSlot : wasmAnyRefSlots) {
15482 // The following needs to correspond with JitFrameLayout::slotRef
15483 // wasmAnyRefSlot.stack == 0 means the slot is in the args area
15484 if (wasmAnyRefSlot.stack) {
15485 // It's a slot in the body allocation, so .slot is interpreted
15486 // as an index downwards from the Frame*
15487 MOZ_ASSERT(wasmAnyRefSlot.slot <= nBodyBytes);
15488 uint32_t offsetInBytes = nBodyBytes - wasmAnyRefSlot.slot;
15489 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
15490 stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
15491 wasm::StackMap::AnyRef);
15492 } else {
15493 // It's an argument slot
15494 MOZ_ASSERT(wasmAnyRefSlot.slot < nInboundStackArgBytes);
15495 uint32_t offsetInBytes = nBodyBytes + nFrameBytes + wasmAnyRefSlot.slot;
15496 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
15497 stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
15498 wasm::StackMap::AnyRef);
15502 // Track array data pointers on the stack
15503 const LSafepoint::SlotList& slots = safepoint.slotsOrElementsSlots();
15504 for (SafepointSlotEntry slot : slots) {
15505 MOZ_ASSERT(slot.stack);
15507 // It's a slot in the body allocation, so .slot is interpreted
15508 // as an index downwards from the Frame*
15509 MOZ_ASSERT(slot.slot <= nBodyBytes);
15510 uint32_t offsetInBytes = nBodyBytes - slot.slot;
15511 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
15512 stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
15513 wasm::StackMap::Kind::ArrayDataPointer);
15516 // Record in the map, how far down from the highest address the Frame* is.
15517 // Take the opportunity to check that we haven't marked any part of the
15518 // Frame itself as a pointer.
15519 stackMap->setFrameOffsetFromTop((nInboundStackArgBytesAligned + nFrameBytes) /
15520 sizeof(void*));
15521 #ifdef DEBUG
15522 for (uint32_t i = 0; i < nFrameBytes / sizeof(void*); i++) {
15523 MOZ_ASSERT(stackMap->get(stackMap->header.numMappedWords -
15524 stackMap->header.frameOffsetFromTop + i) ==
15525 wasm::StackMap::Kind::POD);
15527 #endif
15529 *result = stackMap;
15530 return true;
15533 bool CodeGenerator::generateWasm(
15534 wasm::CallIndirectId callIndirectId, wasm::BytecodeOffset trapOffset,
15535 const wasm::ArgTypeVector& argTypes, const RegisterOffsets& trapExitLayout,
15536 size_t trapExitLayoutNumWords, wasm::FuncOffsets* offsets,
15537 wasm::StackMaps* stackMaps, wasm::Decoder* decoder) {
15538 AutoCreatedBy acb(masm, "CodeGenerator::generateWasm");
15540 JitSpew(JitSpew_Codegen, "# Emitting wasm code");
15542 size_t nInboundStackArgBytes = StackArgAreaSizeUnaligned(argTypes);
15543 inboundStackArgBytes_ = nInboundStackArgBytes;
15545 wasm::GenerateFunctionPrologue(masm, callIndirectId, mozilla::Nothing(),
15546 offsets);
15548 MOZ_ASSERT(masm.framePushed() == 0);
15550 // Very large frames are implausible, probably an attack.
15551 if (frameSize() > wasm::MaxFrameSize) {
15552 return decoder->fail(decoder->beginOffset(), "stack frame is too large");
15555 if (omitOverRecursedCheck()) {
15556 masm.reserveStack(frameSize());
15557 } else {
15558 std::pair<CodeOffset, uint32_t> pair =
15559 masm.wasmReserveStackChecked(frameSize(), trapOffset);
15560 CodeOffset trapInsnOffset = pair.first;
15561 size_t nBytesReservedBeforeTrap = pair.second;
15563 wasm::StackMap* functionEntryStackMap = nullptr;
15564 if (!CreateStackMapForFunctionEntryTrap(
15565 argTypes, trapExitLayout, trapExitLayoutNumWords,
15566 nBytesReservedBeforeTrap, nInboundStackArgBytes,
15567 &functionEntryStackMap)) {
15568 return false;
15571 // In debug builds, we'll always have a stack map, even if there are no
15572 // refs to track.
15573 MOZ_ASSERT(functionEntryStackMap);
15575 if (functionEntryStackMap &&
15576 !stackMaps->add((uint8_t*)(uintptr_t)trapInsnOffset.offset(),
15577 functionEntryStackMap)) {
15578 functionEntryStackMap->destroy();
15579 return false;
15583 MOZ_ASSERT(masm.framePushed() == frameSize());
15585 if (!generateBody()) {
15586 return false;
15589 masm.bind(&returnLabel_);
15590 wasm::GenerateFunctionEpilogue(masm, frameSize(), offsets);
15592 if (!generateOutOfLineCode()) {
15593 return false;
15596 masm.flush();
15597 if (masm.oom()) {
15598 return false;
15601 offsets->end = masm.currentOffset();
15603 MOZ_ASSERT(!masm.failureLabel()->used());
15604 MOZ_ASSERT(snapshots_.listSize() == 0);
15605 MOZ_ASSERT(snapshots_.RVATableSize() == 0);
15606 MOZ_ASSERT(recovers_.size() == 0);
15607 MOZ_ASSERT(graph.numConstants() == 0);
15608 MOZ_ASSERT(osiIndices_.empty());
15609 MOZ_ASSERT(icList_.empty());
15610 MOZ_ASSERT(safepoints_.size() == 0);
15611 MOZ_ASSERT(!scriptCounts_);
15613 // Convert the safepoints to stackmaps and add them to our running
15614 // collection thereof.
15615 for (CodegenSafepointIndex& index : safepointIndices_) {
15616 wasm::StackMap* stackMap = nullptr;
15617 if (!CreateStackMapFromLSafepoint(*index.safepoint(), trapExitLayout,
15618 trapExitLayoutNumWords,
15619 nInboundStackArgBytes, &stackMap)) {
15620 return false;
15623 // In debug builds, we'll always have a stack map.
15624 MOZ_ASSERT(stackMap);
15625 if (!stackMap) {
15626 continue;
15629 if (!stackMaps->add((uint8_t*)(uintptr_t)index.displacement(), stackMap)) {
15630 stackMap->destroy();
15631 return false;
15635 return true;
15638 bool CodeGenerator::generate() {
15639 AutoCreatedBy acb(masm, "CodeGenerator::generate");
15641 JitSpew(JitSpew_Codegen, "# Emitting code for script %s:%u:%u",
15642 gen->outerInfo().script()->filename(),
15643 gen->outerInfo().script()->lineno(),
15644 gen->outerInfo().script()->column().oneOriginValue());
15646 // Initialize native code table with an entry to the start of
15647 // top-level script.
15648 InlineScriptTree* tree = gen->outerInfo().inlineScriptTree();
15649 jsbytecode* startPC = tree->script()->code();
15650 BytecodeSite* startSite = new (gen->alloc()) BytecodeSite(tree, startPC);
15651 if (!addNativeToBytecodeEntry(startSite)) {
15652 return false;
15655 if (!safepoints_.init(gen->alloc())) {
15656 return false;
15659 perfSpewer_.recordOffset(masm, "Prologue");
15660 if (!generatePrologue()) {
15661 return false;
15664 // Reset native => bytecode map table with top-level script and startPc.
15665 if (!addNativeToBytecodeEntry(startSite)) {
15666 return false;
15669 if (!generateBody()) {
15670 return false;
15673 // Reset native => bytecode map table with top-level script and startPc.
15674 if (!addNativeToBytecodeEntry(startSite)) {
15675 return false;
15678 perfSpewer_.recordOffset(masm, "Epilogue");
15679 if (!generateEpilogue()) {
15680 return false;
15683 // Reset native => bytecode map table with top-level script and startPc.
15684 if (!addNativeToBytecodeEntry(startSite)) {
15685 return false;
15688 perfSpewer_.recordOffset(masm, "InvalidateEpilogue");
15689 generateInvalidateEpilogue();
15691 // native => bytecode entries for OOL code will be added
15692 // by CodeGeneratorShared::generateOutOfLineCode
15693 perfSpewer_.recordOffset(masm, "OOLCode");
15694 if (!generateOutOfLineCode()) {
15695 return false;
15698 // Add terminal entry.
15699 if (!addNativeToBytecodeEntry(startSite)) {
15700 return false;
15703 // Dump Native to bytecode entries to spew.
15704 dumpNativeToBytecodeEntries();
15706 // We encode safepoints after the OSI-point offsets have been determined.
15707 if (!encodeSafepoints()) {
15708 return false;
15711 return !masm.oom();
15714 static bool AddInlinedCompilations(JSContext* cx, HandleScript script,
15715 IonCompilationId compilationId,
15716 const WarpSnapshot* snapshot,
15717 bool* isValid) {
15718 MOZ_ASSERT(!*isValid);
15719 RecompileInfo recompileInfo(script, compilationId);
15721 JitZone* jitZone = cx->zone()->jitZone();
15723 for (const auto* scriptSnapshot : snapshot->scripts()) {
15724 JSScript* inlinedScript = scriptSnapshot->script();
15725 if (inlinedScript == script) {
15726 continue;
15729 // TODO(post-Warp): This matches FinishCompilation and is necessary to
15730 // ensure in-progress compilations are canceled when an inlined functon
15731 // becomes a debuggee. See the breakpoint-14.js jit-test.
15732 // When TI is gone, try to clean this up by moving AddInlinedCompilations to
15733 // WarpOracle so that we can handle this as part of addPendingRecompile
15734 // instead of requiring this separate check.
15735 if (inlinedScript->isDebuggee()) {
15736 *isValid = false;
15737 return true;
15740 if (!jitZone->addInlinedCompilation(recompileInfo, inlinedScript)) {
15741 return false;
15745 *isValid = true;
15746 return true;
15749 void CodeGenerator::validateAndRegisterFuseDependencies(JSContext* cx,
15750 HandleScript script,
15751 bool* isValid) {
15752 // No need to validate as we will toss this compilation anyhow.
15753 if (!*isValid) {
15754 return;
15757 for (auto dependency : fuseDependencies) {
15758 switch (dependency) {
15759 case FuseDependencyKind::HasSeenObjectEmulateUndefinedFuse: {
15760 auto& hasSeenObjectEmulateUndefinedFuse =
15761 cx->runtime()->hasSeenObjectEmulateUndefinedFuse.ref();
15763 if (!hasSeenObjectEmulateUndefinedFuse.intact()) {
15764 JitSpew(JitSpew_Codegen,
15765 "tossing compilation; hasSeenObjectEmulateUndefinedFuse fuse "
15766 "dependency no longer valid\n");
15767 *isValid = false;
15768 return;
15771 if (!hasSeenObjectEmulateUndefinedFuse.addFuseDependency(cx, script)) {
15772 JitSpew(JitSpew_Codegen,
15773 "tossing compilation; failed to register "
15774 "hasSeenObjectEmulateUndefinedFuse script dependency\n");
15775 *isValid = false;
15776 return;
15778 break;
15781 case FuseDependencyKind::OptimizeGetIteratorFuse: {
15782 auto& optimizeGetIteratorFuse =
15783 cx->realm()->realmFuses.optimizeGetIteratorFuse;
15784 if (!optimizeGetIteratorFuse.intact()) {
15785 JitSpew(JitSpew_Codegen,
15786 "tossing compilation; optimizeGetIteratorFuse fuse "
15787 "dependency no longer valid\n");
15788 *isValid = false;
15789 return;
15792 if (!optimizeGetIteratorFuse.addFuseDependency(cx, script)) {
15793 JitSpew(JitSpew_Codegen,
15794 "tossing compilation; failed to register "
15795 "optimizeGetIteratorFuse script dependency\n");
15796 *isValid = false;
15797 return;
15799 break;
15802 default:
15803 MOZ_CRASH("Unknown Dependency Kind");
15808 bool CodeGenerator::link(JSContext* cx, const WarpSnapshot* snapshot) {
15809 AutoCreatedBy acb(masm, "CodeGenerator::link");
15811 // We cancel off-thread Ion compilations in a few places during GC, but if
15812 // this compilation was performed off-thread it will already have been
15813 // removed from the relevant lists by this point. Don't allow GC here.
15814 JS::AutoAssertNoGC nogc(cx);
15816 RootedScript script(cx, gen->outerInfo().script());
15817 MOZ_ASSERT(!script->hasIonScript());
15819 // Perform any read barriers which were skipped while compiling the
15820 // script, which may have happened off-thread.
15821 JitZone* jitZone = cx->zone()->jitZone();
15822 jitZone->performStubReadBarriers(zoneStubsToReadBarrier_);
15824 if (scriptCounts_ && !script->hasScriptCounts() &&
15825 !script->initScriptCounts(cx)) {
15826 return false;
15829 IonCompilationId compilationId =
15830 cx->runtime()->jitRuntime()->nextCompilationId();
15831 jitZone->currentCompilationIdRef().emplace(compilationId);
15832 auto resetCurrentId = mozilla::MakeScopeExit(
15833 [jitZone] { jitZone->currentCompilationIdRef().reset(); });
15835 // Record constraints. If an error occured, returns false and potentially
15836 // prevent future compilations. Otherwise, if an invalidation occured, then
15837 // skip the current compilation.
15838 bool isValid = false;
15840 // If an inlined script is invalidated (for example, by attaching
15841 // a debugger), we must also invalidate the parent IonScript.
15842 if (!AddInlinedCompilations(cx, script, compilationId, snapshot, &isValid)) {
15843 return false;
15846 // Validate fuse dependencies here; if a fuse has popped since we registered a
15847 // dependency then we need to toss this compilation as it assumes things which
15848 // are not valid.
15850 // Eagerly register a fuse dependency here too; this way if we OOM we can
15851 // instead simply remove the compilation and move on with our lives.
15852 validateAndRegisterFuseDependencies(cx, script, &isValid);
15854 // This compilation is no longer valid; don't proceed, but return true as this
15855 // isn't an error case either.
15856 if (!isValid) {
15857 return true;
15860 uint32_t argumentSlots = (gen->outerInfo().nargs() + 1) * sizeof(Value);
15862 size_t numNurseryObjects = snapshot->nurseryObjects().length();
15864 IonScript* ionScript = IonScript::New(
15865 cx, compilationId, graph.localSlotsSize(), argumentSlots, frameDepth_,
15866 snapshots_.listSize(), snapshots_.RVATableSize(), recovers_.size(),
15867 graph.numConstants(), numNurseryObjects, safepointIndices_.length(),
15868 osiIndices_.length(), icList_.length(), runtimeData_.length(),
15869 safepoints_.size());
15870 if (!ionScript) {
15871 return false;
15873 #ifdef DEBUG
15874 ionScript->setICHash(snapshot->icHash());
15875 #endif
15877 auto freeIonScript = mozilla::MakeScopeExit([&ionScript] {
15878 // Use js_free instead of IonScript::Destroy: the cache list is still
15879 // uninitialized.
15880 js_free(ionScript);
15883 Linker linker(masm);
15884 JitCode* code = linker.newCode(cx, CodeKind::Ion);
15885 if (!code) {
15886 return false;
15889 // Encode native to bytecode map if profiling is enabled.
15890 if (isProfilerInstrumentationEnabled()) {
15891 // Generate native-to-bytecode main table.
15892 IonEntry::ScriptList scriptList;
15893 if (!generateCompactNativeToBytecodeMap(cx, code, scriptList)) {
15894 return false;
15897 uint8_t* ionTableAddr =
15898 ((uint8_t*)nativeToBytecodeMap_.get()) + nativeToBytecodeTableOffset_;
15899 JitcodeIonTable* ionTable = (JitcodeIonTable*)ionTableAddr;
15901 // Construct the IonEntry that will go into the global table.
15902 auto entry = MakeJitcodeGlobalEntry<IonEntry>(
15903 cx, code, code->raw(), code->rawEnd(), std::move(scriptList), ionTable);
15904 if (!entry) {
15905 return false;
15907 (void)nativeToBytecodeMap_.release(); // Table is now owned by |entry|.
15909 // Add entry to the global table.
15910 JitcodeGlobalTable* globalTable =
15911 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
15912 if (!globalTable->addEntry(std::move(entry))) {
15913 return false;
15916 // Mark the jitcode as having a bytecode map.
15917 code->setHasBytecodeMap();
15918 } else {
15919 // Add a dumy jitcodeGlobalTable entry.
15920 auto entry = MakeJitcodeGlobalEntry<DummyEntry>(cx, code, code->raw(),
15921 code->rawEnd());
15922 if (!entry) {
15923 return false;
15926 // Add entry to the global table.
15927 JitcodeGlobalTable* globalTable =
15928 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
15929 if (!globalTable->addEntry(std::move(entry))) {
15930 return false;
15933 // Mark the jitcode as having a bytecode map.
15934 code->setHasBytecodeMap();
15937 ionScript->setMethod(code);
15939 // If the Gecko Profiler is enabled, mark IonScript as having been
15940 // instrumented accordingly.
15941 if (isProfilerInstrumentationEnabled()) {
15942 ionScript->setHasProfilingInstrumentation();
15945 Assembler::PatchDataWithValueCheck(
15946 CodeLocationLabel(code, invalidateEpilogueData_), ImmPtr(ionScript),
15947 ImmPtr((void*)-1));
15949 for (CodeOffset offset : ionScriptLabels_) {
15950 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, offset),
15951 ImmPtr(ionScript), ImmPtr((void*)-1));
15954 for (NurseryObjectLabel label : ionNurseryObjectLabels_) {
15955 void* entry = ionScript->addressOfNurseryObject(label.nurseryIndex);
15956 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label.offset),
15957 ImmPtr(entry), ImmPtr((void*)-1));
15960 // for generating inline caches during the execution.
15961 if (runtimeData_.length()) {
15962 ionScript->copyRuntimeData(&runtimeData_[0]);
15964 if (icList_.length()) {
15965 ionScript->copyICEntries(&icList_[0]);
15968 for (size_t i = 0; i < icInfo_.length(); i++) {
15969 IonIC& ic = ionScript->getICFromIndex(i);
15970 Assembler::PatchDataWithValueCheck(
15971 CodeLocationLabel(code, icInfo_[i].icOffsetForJump),
15972 ImmPtr(ic.codeRawPtr()), ImmPtr((void*)-1));
15973 Assembler::PatchDataWithValueCheck(
15974 CodeLocationLabel(code, icInfo_[i].icOffsetForPush), ImmPtr(&ic),
15975 ImmPtr((void*)-1));
15978 JitSpew(JitSpew_Codegen, "Created IonScript %p (raw %p)", (void*)ionScript,
15979 (void*)code->raw());
15981 ionScript->setInvalidationEpilogueDataOffset(
15982 invalidateEpilogueData_.offset());
15983 if (jsbytecode* osrPc = gen->outerInfo().osrPc()) {
15984 ionScript->setOsrPc(osrPc);
15985 ionScript->setOsrEntryOffset(getOsrEntryOffset());
15987 ionScript->setInvalidationEpilogueOffset(invalidate_.offset());
15989 perfSpewer_.saveProfile(cx, script, code);
15991 #ifdef MOZ_VTUNE
15992 vtune::MarkScript(code, script, "ion");
15993 #endif
15995 // Set a Ion counter hint for this script.
15996 if (cx->runtime()->jitRuntime()->hasJitHintsMap()) {
15997 JitHintsMap* jitHints = cx->runtime()->jitRuntime()->getJitHintsMap();
15998 jitHints->recordIonCompilation(script);
16001 // for marking during GC.
16002 if (safepointIndices_.length()) {
16003 ionScript->copySafepointIndices(&safepointIndices_[0]);
16005 if (safepoints_.size()) {
16006 ionScript->copySafepoints(&safepoints_);
16009 // for recovering from an Ion Frame.
16010 if (osiIndices_.length()) {
16011 ionScript->copyOsiIndices(&osiIndices_[0]);
16013 if (snapshots_.listSize()) {
16014 ionScript->copySnapshots(&snapshots_);
16016 MOZ_ASSERT_IF(snapshots_.listSize(), recovers_.size());
16017 if (recovers_.size()) {
16018 ionScript->copyRecovers(&recovers_);
16020 if (graph.numConstants()) {
16021 const Value* vp = graph.constantPool();
16022 ionScript->copyConstants(vp);
16023 for (size_t i = 0; i < graph.numConstants(); i++) {
16024 const Value& v = vp[i];
16025 if (v.isGCThing()) {
16026 if (gc::StoreBuffer* sb = v.toGCThing()->storeBuffer()) {
16027 sb->putWholeCell(script);
16028 break;
16034 // Attach any generated script counts to the script.
16035 if (IonScriptCounts* counts = extractScriptCounts()) {
16036 script->addIonCounts(counts);
16038 // WARNING: Code after this point must be infallible!
16040 // Copy the list of nursery objects. Note that the store buffer can add
16041 // HeapPtr edges that must be cleared in IonScript::Destroy. See the
16042 // infallibility warning above.
16043 const auto& nurseryObjects = snapshot->nurseryObjects();
16044 for (size_t i = 0; i < nurseryObjects.length(); i++) {
16045 ionScript->nurseryObjects()[i].init(nurseryObjects[i]);
16048 // Transfer ownership of the IonScript to the JitScript. At this point enough
16049 // of the IonScript must be initialized for IonScript::Destroy to work.
16050 freeIonScript.release();
16051 script->jitScript()->setIonScript(script, ionScript);
16053 return true;
16056 // An out-of-line path to convert a boxed int32 to either a float or double.
16057 class OutOfLineUnboxFloatingPoint : public OutOfLineCodeBase<CodeGenerator> {
16058 LUnboxFloatingPoint* unboxFloatingPoint_;
16060 public:
16061 explicit OutOfLineUnboxFloatingPoint(LUnboxFloatingPoint* unboxFloatingPoint)
16062 : unboxFloatingPoint_(unboxFloatingPoint) {}
16064 void accept(CodeGenerator* codegen) override {
16065 codegen->visitOutOfLineUnboxFloatingPoint(this);
16068 LUnboxFloatingPoint* unboxFloatingPoint() const {
16069 return unboxFloatingPoint_;
16073 void CodeGenerator::visitUnboxFloatingPoint(LUnboxFloatingPoint* lir) {
16074 const ValueOperand box = ToValue(lir, LUnboxFloatingPoint::Input);
16075 const LDefinition* result = lir->output();
16077 // Out-of-line path to convert int32 to double or bailout
16078 // if this instruction is fallible.
16079 OutOfLineUnboxFloatingPoint* ool =
16080 new (alloc()) OutOfLineUnboxFloatingPoint(lir);
16081 addOutOfLineCode(ool, lir->mir());
16083 FloatRegister resultReg = ToFloatRegister(result);
16084 masm.branchTestDouble(Assembler::NotEqual, box, ool->entry());
16085 masm.unboxDouble(box, resultReg);
16086 if (lir->type() == MIRType::Float32) {
16087 masm.convertDoubleToFloat32(resultReg, resultReg);
16089 masm.bind(ool->rejoin());
16092 void CodeGenerator::visitOutOfLineUnboxFloatingPoint(
16093 OutOfLineUnboxFloatingPoint* ool) {
16094 LUnboxFloatingPoint* ins = ool->unboxFloatingPoint();
16095 const ValueOperand value = ToValue(ins, LUnboxFloatingPoint::Input);
16097 if (ins->mir()->fallible()) {
16098 Label bail;
16099 masm.branchTestInt32(Assembler::NotEqual, value, &bail);
16100 bailoutFrom(&bail, ins->snapshot());
16102 masm.int32ValueToFloatingPoint(value, ToFloatRegister(ins->output()),
16103 ins->type());
16104 masm.jump(ool->rejoin());
16107 void CodeGenerator::visitCallBindVar(LCallBindVar* lir) {
16108 pushArg(ToRegister(lir->environmentChain()));
16110 using Fn = JSObject* (*)(JSContext*, JSObject*);
16111 callVM<Fn, BindVarOperation>(lir);
16114 void CodeGenerator::visitMegamorphicSetElement(LMegamorphicSetElement* lir) {
16115 Register obj = ToRegister(lir->getOperand(0));
16116 ValueOperand idVal = ToValue(lir, LMegamorphicSetElement::IndexIndex);
16117 ValueOperand value = ToValue(lir, LMegamorphicSetElement::ValueIndex);
16119 Register temp0 = ToRegister(lir->temp0());
16120 // See comment in LIROps.yaml (x86 is short on registers)
16121 #ifndef JS_CODEGEN_X86
16122 Register temp1 = ToRegister(lir->temp1());
16123 Register temp2 = ToRegister(lir->temp2());
16124 #endif
16126 Label cacheHit, done;
16127 #ifdef JS_CODEGEN_X86
16128 masm.emitMegamorphicCachedSetSlot(
16129 idVal, obj, temp0, value, &cacheHit,
16130 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
16131 EmitPreBarrier(masm, addr, mirType);
16133 #else
16134 masm.emitMegamorphicCachedSetSlot(
16135 idVal, obj, temp0, temp1, temp2, value, &cacheHit,
16136 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
16137 EmitPreBarrier(masm, addr, mirType);
16139 #endif
16141 pushArg(Imm32(lir->mir()->strict()));
16142 pushArg(ToValue(lir, LMegamorphicSetElement::ValueIndex));
16143 pushArg(ToValue(lir, LMegamorphicSetElement::IndexIndex));
16144 pushArg(obj);
16146 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
16147 callVM<Fn, js::jit::SetElementMegamorphic<true>>(lir);
16149 masm.jump(&done);
16150 masm.bind(&cacheHit);
16152 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
16153 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
16155 saveVolatile(temp0);
16156 emitPostWriteBarrier(obj);
16157 restoreVolatile(temp0);
16159 masm.bind(&done);
16162 void CodeGenerator::visitLoadScriptedProxyHandler(
16163 LLoadScriptedProxyHandler* ins) {
16164 Register obj = ToRegister(ins->getOperand(0));
16165 Register output = ToRegister(ins->output());
16167 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), output);
16169 Label bail;
16170 Address handlerAddr(output, js::detail::ProxyReservedSlots::offsetOfSlot(
16171 ScriptedProxyHandler::HANDLER_EXTRA));
16172 masm.fallibleUnboxObject(handlerAddr, output, &bail);
16173 bailoutFrom(&bail, ins->snapshot());
16176 #ifdef JS_PUNBOX64
16177 void CodeGenerator::visitCheckScriptedProxyGetResult(
16178 LCheckScriptedProxyGetResult* ins) {
16179 ValueOperand target = ToValue(ins, LCheckScriptedProxyGetResult::TargetIndex);
16180 ValueOperand value = ToValue(ins, LCheckScriptedProxyGetResult::ValueIndex);
16181 ValueOperand id = ToValue(ins, LCheckScriptedProxyGetResult::IdIndex);
16182 Register scratch = ToRegister(ins->temp0());
16183 Register scratch2 = ToRegister(ins->temp1());
16185 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue,
16186 MutableHandleValue);
16187 OutOfLineCode* ool = oolCallVM<Fn, CheckProxyGetByValueResult>(
16188 ins, ArgList(scratch, id, value), StoreValueTo(value));
16190 masm.unboxObject(target, scratch);
16191 masm.branchTestObjectNeedsProxyResultValidation(Assembler::NonZero, scratch,
16192 scratch2, ool->entry());
16193 masm.bind(ool->rejoin());
16195 #endif
16197 void CodeGenerator::visitIdToStringOrSymbol(LIdToStringOrSymbol* ins) {
16198 ValueOperand id = ToValue(ins, LIdToStringOrSymbol::IdIndex);
16199 ValueOperand output = ToOutValue(ins);
16200 Register scratch = ToRegister(ins->temp0());
16202 masm.moveValue(id, output);
16204 Label done, callVM;
16205 Label bail;
16207 ScratchTagScope tag(masm, output);
16208 masm.splitTagForTest(output, tag);
16209 masm.branchTestString(Assembler::Equal, tag, &done);
16210 masm.branchTestSymbol(Assembler::Equal, tag, &done);
16211 masm.branchTestInt32(Assembler::NotEqual, tag, &bail);
16214 masm.unboxInt32(output, scratch);
16216 using Fn = JSLinearString* (*)(JSContext*, int);
16217 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
16218 ins, ArgList(scratch), StoreRegisterTo(output.scratchReg()));
16220 masm.lookupStaticIntString(scratch, output.scratchReg(),
16221 gen->runtime->staticStrings(), ool->entry());
16223 masm.bind(ool->rejoin());
16224 masm.tagValue(JSVAL_TYPE_STRING, output.scratchReg(), output);
16225 masm.bind(&done);
16227 bailoutFrom(&bail, ins->snapshot());
16230 void CodeGenerator::visitLoadFixedSlotV(LLoadFixedSlotV* ins) {
16231 const Register obj = ToRegister(ins->getOperand(0));
16232 size_t slot = ins->mir()->slot();
16233 ValueOperand result = ToOutValue(ins);
16235 masm.loadValue(Address(obj, NativeObject::getFixedSlotOffset(slot)), result);
16238 void CodeGenerator::visitLoadFixedSlotT(LLoadFixedSlotT* ins) {
16239 const Register obj = ToRegister(ins->getOperand(0));
16240 size_t slot = ins->mir()->slot();
16241 AnyRegister result = ToAnyRegister(ins->getDef(0));
16242 MIRType type = ins->mir()->type();
16244 masm.loadUnboxedValue(Address(obj, NativeObject::getFixedSlotOffset(slot)),
16245 type, result);
16248 template <typename T>
16249 static void EmitLoadAndUnbox(MacroAssembler& masm, const T& src, MIRType type,
16250 bool fallible, AnyRegister dest, Label* fail) {
16251 if (type == MIRType::Double) {
16252 MOZ_ASSERT(dest.isFloat());
16253 masm.ensureDouble(src, dest.fpu(), fail);
16254 return;
16256 if (fallible) {
16257 switch (type) {
16258 case MIRType::Int32:
16259 masm.fallibleUnboxInt32(src, dest.gpr(), fail);
16260 break;
16261 case MIRType::Boolean:
16262 masm.fallibleUnboxBoolean(src, dest.gpr(), fail);
16263 break;
16264 case MIRType::Object:
16265 masm.fallibleUnboxObject(src, dest.gpr(), fail);
16266 break;
16267 case MIRType::String:
16268 masm.fallibleUnboxString(src, dest.gpr(), fail);
16269 break;
16270 case MIRType::Symbol:
16271 masm.fallibleUnboxSymbol(src, dest.gpr(), fail);
16272 break;
16273 case MIRType::BigInt:
16274 masm.fallibleUnboxBigInt(src, dest.gpr(), fail);
16275 break;
16276 default:
16277 MOZ_CRASH("Unexpected MIRType");
16279 return;
16281 masm.loadUnboxedValue(src, type, dest);
16284 void CodeGenerator::visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox* ins) {
16285 const MLoadFixedSlotAndUnbox* mir = ins->mir();
16286 MIRType type = mir->type();
16287 Register input = ToRegister(ins->object());
16288 AnyRegister result = ToAnyRegister(ins->output());
16289 size_t slot = mir->slot();
16291 Address address(input, NativeObject::getFixedSlotOffset(slot));
16293 Label bail;
16294 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16295 if (mir->fallible()) {
16296 bailoutFrom(&bail, ins->snapshot());
16300 void CodeGenerator::visitLoadDynamicSlotAndUnbox(
16301 LLoadDynamicSlotAndUnbox* ins) {
16302 const MLoadDynamicSlotAndUnbox* mir = ins->mir();
16303 MIRType type = mir->type();
16304 Register input = ToRegister(ins->slots());
16305 AnyRegister result = ToAnyRegister(ins->output());
16306 size_t slot = mir->slot();
16308 Address address(input, slot * sizeof(JS::Value));
16310 Label bail;
16311 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16312 if (mir->fallible()) {
16313 bailoutFrom(&bail, ins->snapshot());
16317 void CodeGenerator::visitLoadElementAndUnbox(LLoadElementAndUnbox* ins) {
16318 const MLoadElementAndUnbox* mir = ins->mir();
16319 MIRType type = mir->type();
16320 Register elements = ToRegister(ins->elements());
16321 AnyRegister result = ToAnyRegister(ins->output());
16323 Label bail;
16324 if (ins->index()->isConstant()) {
16325 NativeObject::elementsSizeMustNotOverflow();
16326 int32_t offset = ToInt32(ins->index()) * sizeof(Value);
16327 Address address(elements, offset);
16328 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16329 } else {
16330 BaseObjectElementIndex address(elements, ToRegister(ins->index()));
16331 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16334 if (mir->fallible()) {
16335 bailoutFrom(&bail, ins->snapshot());
16339 class OutOfLineAtomizeSlot : public OutOfLineCodeBase<CodeGenerator> {
16340 LInstruction* lir_;
16341 Register stringReg_;
16342 Address slotAddr_;
16343 TypedOrValueRegister dest_;
16345 public:
16346 OutOfLineAtomizeSlot(LInstruction* lir, Register stringReg, Address slotAddr,
16347 TypedOrValueRegister dest)
16348 : lir_(lir), stringReg_(stringReg), slotAddr_(slotAddr), dest_(dest) {}
16350 void accept(CodeGenerator* codegen) final {
16351 codegen->visitOutOfLineAtomizeSlot(this);
16353 LInstruction* lir() const { return lir_; }
16354 Register stringReg() const { return stringReg_; }
16355 Address slotAddr() const { return slotAddr_; }
16356 TypedOrValueRegister dest() const { return dest_; }
16359 void CodeGenerator::visitOutOfLineAtomizeSlot(OutOfLineAtomizeSlot* ool) {
16360 LInstruction* lir = ool->lir();
16361 Register stringReg = ool->stringReg();
16362 Address slotAddr = ool->slotAddr();
16363 TypedOrValueRegister dest = ool->dest();
16365 // This code is called with a non-atomic string in |stringReg|.
16366 // When it returns, |stringReg| contains an unboxed pointer to an
16367 // atomized version of that string, and |slotAddr| contains a
16368 // StringValue pointing to that atom. If |dest| is a ValueOperand,
16369 // it contains the same StringValue; otherwise we assert that |dest|
16370 // is |stringReg|.
16372 saveLive(lir);
16373 pushArg(stringReg);
16375 using Fn = JSAtom* (*)(JSContext*, JSString*);
16376 callVM<Fn, js::AtomizeString>(lir);
16377 StoreRegisterTo(stringReg).generate(this);
16378 restoreLiveIgnore(lir, StoreRegisterTo(stringReg).clobbered());
16380 if (dest.hasValue()) {
16381 masm.moveValue(
16382 TypedOrValueRegister(MIRType::String, AnyRegister(stringReg)),
16383 dest.valueReg());
16384 } else {
16385 MOZ_ASSERT(dest.typedReg().gpr() == stringReg);
16388 emitPreBarrier(slotAddr);
16389 masm.storeTypedOrValue(dest, slotAddr);
16391 // We don't need a post-barrier because atoms aren't nursery-allocated.
16392 #ifdef DEBUG
16393 // We need a temp register for the nursery check. Spill something.
16394 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
16395 allRegs.take(stringReg);
16396 Register temp = allRegs.takeAny();
16397 masm.push(temp);
16399 Label tenured;
16400 masm.branchPtrInNurseryChunk(Assembler::NotEqual, stringReg, temp, &tenured);
16401 masm.assumeUnreachable("AtomizeString returned a nursery pointer");
16402 masm.bind(&tenured);
16404 masm.pop(temp);
16405 #endif
16407 masm.jump(ool->rejoin());
16410 void CodeGenerator::emitMaybeAtomizeSlot(LInstruction* ins, Register stringReg,
16411 Address slotAddr,
16412 TypedOrValueRegister dest) {
16413 OutOfLineAtomizeSlot* ool =
16414 new (alloc()) OutOfLineAtomizeSlot(ins, stringReg, slotAddr, dest);
16415 addOutOfLineCode(ool, ins->mirRaw()->toInstruction());
16416 masm.branchTest32(Assembler::Zero,
16417 Address(stringReg, JSString::offsetOfFlags()),
16418 Imm32(JSString::ATOM_BIT), ool->entry());
16419 masm.bind(ool->rejoin());
16422 void CodeGenerator::visitLoadFixedSlotAndAtomize(
16423 LLoadFixedSlotAndAtomize* ins) {
16424 Register obj = ToRegister(ins->getOperand(0));
16425 Register temp = ToRegister(ins->temp0());
16426 size_t slot = ins->mir()->slot();
16427 ValueOperand result = ToOutValue(ins);
16429 Address slotAddr(obj, NativeObject::getFixedSlotOffset(slot));
16430 masm.loadValue(slotAddr, result);
16432 Label notString;
16433 masm.branchTestString(Assembler::NotEqual, result, &notString);
16434 masm.unboxString(result, temp);
16435 emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
16436 masm.bind(&notString);
16439 void CodeGenerator::visitLoadDynamicSlotAndAtomize(
16440 LLoadDynamicSlotAndAtomize* ins) {
16441 ValueOperand result = ToOutValue(ins);
16442 Register temp = ToRegister(ins->temp0());
16443 Register base = ToRegister(ins->input());
16444 int32_t offset = ins->mir()->slot() * sizeof(js::Value);
16446 Address slotAddr(base, offset);
16447 masm.loadValue(slotAddr, result);
16449 Label notString;
16450 masm.branchTestString(Assembler::NotEqual, result, &notString);
16451 masm.unboxString(result, temp);
16452 emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
16453 masm.bind(&notString);
16456 void CodeGenerator::visitLoadFixedSlotUnboxAndAtomize(
16457 LLoadFixedSlotUnboxAndAtomize* ins) {
16458 const MLoadFixedSlotAndUnbox* mir = ins->mir();
16459 MOZ_ASSERT(mir->type() == MIRType::String);
16460 Register input = ToRegister(ins->object());
16461 AnyRegister result = ToAnyRegister(ins->output());
16462 size_t slot = mir->slot();
16464 Address slotAddr(input, NativeObject::getFixedSlotOffset(slot));
16466 Label bail;
16467 EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
16468 &bail);
16469 emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
16470 TypedOrValueRegister(MIRType::String, result));
16472 if (mir->fallible()) {
16473 bailoutFrom(&bail, ins->snapshot());
16477 void CodeGenerator::visitLoadDynamicSlotUnboxAndAtomize(
16478 LLoadDynamicSlotUnboxAndAtomize* ins) {
16479 const MLoadDynamicSlotAndUnbox* mir = ins->mir();
16480 MOZ_ASSERT(mir->type() == MIRType::String);
16481 Register input = ToRegister(ins->slots());
16482 AnyRegister result = ToAnyRegister(ins->output());
16483 size_t slot = mir->slot();
16485 Address slotAddr(input, slot * sizeof(JS::Value));
16487 Label bail;
16488 EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
16489 &bail);
16490 emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
16491 TypedOrValueRegister(MIRType::String, result));
16493 if (mir->fallible()) {
16494 bailoutFrom(&bail, ins->snapshot());
16498 void CodeGenerator::visitAddAndStoreSlot(LAddAndStoreSlot* ins) {
16499 const Register obj = ToRegister(ins->getOperand(0));
16500 const ValueOperand value = ToValue(ins, LAddAndStoreSlot::ValueIndex);
16501 const Register maybeTemp = ToTempRegisterOrInvalid(ins->temp0());
16503 Shape* shape = ins->mir()->shape();
16504 masm.storeObjShape(shape, obj, [](MacroAssembler& masm, const Address& addr) {
16505 EmitPreBarrier(masm, addr, MIRType::Shape);
16508 // Perform the store. No pre-barrier required since this is a new
16509 // initialization.
16511 uint32_t offset = ins->mir()->slotOffset();
16512 if (ins->mir()->kind() == MAddAndStoreSlot::Kind::FixedSlot) {
16513 Address slot(obj, offset);
16514 masm.storeValue(value, slot);
16515 } else {
16516 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), maybeTemp);
16517 Address slot(maybeTemp, offset);
16518 masm.storeValue(value, slot);
16522 void CodeGenerator::visitAllocateAndStoreSlot(LAllocateAndStoreSlot* ins) {
16523 const Register obj = ToRegister(ins->getOperand(0));
16524 const ValueOperand value = ToValue(ins, LAllocateAndStoreSlot::ValueIndex);
16525 const Register temp0 = ToRegister(ins->temp0());
16526 const Register temp1 = ToRegister(ins->temp1());
16528 masm.Push(obj);
16529 masm.Push(value);
16531 using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
16532 masm.setupAlignedABICall();
16533 masm.loadJSContext(temp0);
16534 masm.passABIArg(temp0);
16535 masm.passABIArg(obj);
16536 masm.move32(Imm32(ins->mir()->numNewSlots()), temp1);
16537 masm.passABIArg(temp1);
16538 masm.callWithABI<Fn, NativeObject::growSlotsPure>();
16539 masm.storeCallPointerResult(temp0);
16541 masm.Pop(value);
16542 masm.Pop(obj);
16544 bailoutIfFalseBool(temp0, ins->snapshot());
16546 masm.storeObjShape(ins->mir()->shape(), obj,
16547 [](MacroAssembler& masm, const Address& addr) {
16548 EmitPreBarrier(masm, addr, MIRType::Shape);
16551 // Perform the store. No pre-barrier required since this is a new
16552 // initialization.
16553 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), temp0);
16554 Address slot(temp0, ins->mir()->slotOffset());
16555 masm.storeValue(value, slot);
16558 void CodeGenerator::visitAddSlotAndCallAddPropHook(
16559 LAddSlotAndCallAddPropHook* ins) {
16560 const Register obj = ToRegister(ins->object());
16561 const ValueOperand value =
16562 ToValue(ins, LAddSlotAndCallAddPropHook::ValueIndex);
16564 pushArg(ImmGCPtr(ins->mir()->shape()));
16565 pushArg(value);
16566 pushArg(obj);
16568 using Fn =
16569 bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, Handle<Shape*>);
16570 callVM<Fn, AddSlotAndCallAddPropHook>(ins);
16573 void CodeGenerator::visitStoreFixedSlotV(LStoreFixedSlotV* ins) {
16574 const Register obj = ToRegister(ins->getOperand(0));
16575 size_t slot = ins->mir()->slot();
16577 const ValueOperand value = ToValue(ins, LStoreFixedSlotV::ValueIndex);
16579 Address address(obj, NativeObject::getFixedSlotOffset(slot));
16580 if (ins->mir()->needsBarrier()) {
16581 emitPreBarrier(address);
16584 masm.storeValue(value, address);
16587 void CodeGenerator::visitStoreFixedSlotT(LStoreFixedSlotT* ins) {
16588 const Register obj = ToRegister(ins->getOperand(0));
16589 size_t slot = ins->mir()->slot();
16591 const LAllocation* value = ins->value();
16592 MIRType valueType = ins->mir()->value()->type();
16594 Address address(obj, NativeObject::getFixedSlotOffset(slot));
16595 if (ins->mir()->needsBarrier()) {
16596 emitPreBarrier(address);
16599 ConstantOrRegister nvalue =
16600 value->isConstant()
16601 ? ConstantOrRegister(value->toConstant()->toJSValue())
16602 : TypedOrValueRegister(valueType, ToAnyRegister(value));
16603 masm.storeConstantOrRegister(nvalue, address);
16606 void CodeGenerator::visitGetNameCache(LGetNameCache* ins) {
16607 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16608 Register envChain = ToRegister(ins->envObj());
16609 ValueOperand output = ToOutValue(ins);
16610 Register temp = ToRegister(ins->temp0());
16612 IonGetNameIC ic(liveRegs, envChain, output, temp);
16613 addIC(ins, allocateIC(ic));
16616 void CodeGenerator::addGetPropertyCache(LInstruction* ins,
16617 LiveRegisterSet liveRegs,
16618 TypedOrValueRegister value,
16619 const ConstantOrRegister& id,
16620 ValueOperand output) {
16621 CacheKind kind = CacheKind::GetElem;
16622 if (id.constant() && id.value().isString()) {
16623 JSString* idString = id.value().toString();
16624 if (idString->isAtom() && !idString->asAtom().isIndex()) {
16625 kind = CacheKind::GetProp;
16628 IonGetPropertyIC cache(kind, liveRegs, value, id, output);
16629 addIC(ins, allocateIC(cache));
16632 void CodeGenerator::addSetPropertyCache(LInstruction* ins,
16633 LiveRegisterSet liveRegs,
16634 Register objReg, Register temp,
16635 const ConstantOrRegister& id,
16636 const ConstantOrRegister& value,
16637 bool strict) {
16638 CacheKind kind = CacheKind::SetElem;
16639 if (id.constant() && id.value().isString()) {
16640 JSString* idString = id.value().toString();
16641 if (idString->isAtom() && !idString->asAtom().isIndex()) {
16642 kind = CacheKind::SetProp;
16645 IonSetPropertyIC cache(kind, liveRegs, objReg, temp, id, value, strict);
16646 addIC(ins, allocateIC(cache));
16649 ConstantOrRegister CodeGenerator::toConstantOrRegister(LInstruction* lir,
16650 size_t n, MIRType type) {
16651 if (type == MIRType::Value) {
16652 return TypedOrValueRegister(ToValue(lir, n));
16655 const LAllocation* value = lir->getOperand(n);
16656 if (value->isConstant()) {
16657 return ConstantOrRegister(value->toConstant()->toJSValue());
16660 return TypedOrValueRegister(type, ToAnyRegister(value));
16663 void CodeGenerator::visitGetPropertyCache(LGetPropertyCache* ins) {
16664 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16665 TypedOrValueRegister value =
16666 toConstantOrRegister(ins, LGetPropertyCache::ValueIndex,
16667 ins->mir()->value()->type())
16668 .reg();
16669 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropertyCache::IdIndex,
16670 ins->mir()->idval()->type());
16671 ValueOperand output = ToOutValue(ins);
16672 addGetPropertyCache(ins, liveRegs, value, id, output);
16675 void CodeGenerator::visitGetPropSuperCache(LGetPropSuperCache* ins) {
16676 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16677 Register obj = ToRegister(ins->obj());
16678 TypedOrValueRegister receiver =
16679 toConstantOrRegister(ins, LGetPropSuperCache::ReceiverIndex,
16680 ins->mir()->receiver()->type())
16681 .reg();
16682 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropSuperCache::IdIndex,
16683 ins->mir()->idval()->type());
16684 ValueOperand output = ToOutValue(ins);
16686 CacheKind kind = CacheKind::GetElemSuper;
16687 if (id.constant() && id.value().isString()) {
16688 JSString* idString = id.value().toString();
16689 if (idString->isAtom() && !idString->asAtom().isIndex()) {
16690 kind = CacheKind::GetPropSuper;
16694 IonGetPropSuperIC cache(kind, liveRegs, obj, receiver, id, output);
16695 addIC(ins, allocateIC(cache));
16698 void CodeGenerator::visitBindNameCache(LBindNameCache* ins) {
16699 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16700 Register envChain = ToRegister(ins->environmentChain());
16701 Register output = ToRegister(ins->output());
16702 Register temp = ToRegister(ins->temp0());
16704 IonBindNameIC ic(liveRegs, envChain, output, temp);
16705 addIC(ins, allocateIC(ic));
16708 void CodeGenerator::visitHasOwnCache(LHasOwnCache* ins) {
16709 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16710 TypedOrValueRegister value =
16711 toConstantOrRegister(ins, LHasOwnCache::ValueIndex,
16712 ins->mir()->value()->type())
16713 .reg();
16714 TypedOrValueRegister id = toConstantOrRegister(ins, LHasOwnCache::IdIndex,
16715 ins->mir()->idval()->type())
16716 .reg();
16717 Register output = ToRegister(ins->output());
16719 IonHasOwnIC cache(liveRegs, value, id, output);
16720 addIC(ins, allocateIC(cache));
16723 void CodeGenerator::visitCheckPrivateFieldCache(LCheckPrivateFieldCache* ins) {
16724 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16725 TypedOrValueRegister value =
16726 toConstantOrRegister(ins, LCheckPrivateFieldCache::ValueIndex,
16727 ins->mir()->value()->type())
16728 .reg();
16729 TypedOrValueRegister id =
16730 toConstantOrRegister(ins, LCheckPrivateFieldCache::IdIndex,
16731 ins->mir()->idval()->type())
16732 .reg();
16733 Register output = ToRegister(ins->output());
16735 IonCheckPrivateFieldIC cache(liveRegs, value, id, output);
16736 addIC(ins, allocateIC(cache));
16739 void CodeGenerator::visitNewPrivateName(LNewPrivateName* ins) {
16740 pushArg(ImmGCPtr(ins->mir()->name()));
16742 using Fn = JS::Symbol* (*)(JSContext*, Handle<JSAtom*>);
16743 callVM<Fn, NewPrivateName>(ins);
16746 void CodeGenerator::visitCallDeleteProperty(LCallDeleteProperty* lir) {
16747 pushArg(ImmGCPtr(lir->mir()->name()));
16748 pushArg(ToValue(lir, LCallDeleteProperty::ValueIndex));
16750 using Fn = bool (*)(JSContext*, HandleValue, Handle<PropertyName*>, bool*);
16751 if (lir->mir()->strict()) {
16752 callVM<Fn, DelPropOperation<true>>(lir);
16753 } else {
16754 callVM<Fn, DelPropOperation<false>>(lir);
16758 void CodeGenerator::visitCallDeleteElement(LCallDeleteElement* lir) {
16759 pushArg(ToValue(lir, LCallDeleteElement::IndexIndex));
16760 pushArg(ToValue(lir, LCallDeleteElement::ValueIndex));
16762 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
16763 if (lir->mir()->strict()) {
16764 callVM<Fn, DelElemOperation<true>>(lir);
16765 } else {
16766 callVM<Fn, DelElemOperation<false>>(lir);
16770 void CodeGenerator::visitObjectToIterator(LObjectToIterator* lir) {
16771 Register obj = ToRegister(lir->object());
16772 Register iterObj = ToRegister(lir->output());
16773 Register temp = ToRegister(lir->temp0());
16774 Register temp2 = ToRegister(lir->temp1());
16775 Register temp3 = ToRegister(lir->temp2());
16777 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
16778 OutOfLineCode* ool = (lir->mir()->wantsIndices())
16779 ? oolCallVM<Fn, GetIteratorWithIndices>(
16780 lir, ArgList(obj), StoreRegisterTo(iterObj))
16781 : oolCallVM<Fn, GetIterator>(
16782 lir, ArgList(obj), StoreRegisterTo(iterObj));
16784 masm.maybeLoadIteratorFromShape(obj, iterObj, temp, temp2, temp3,
16785 ool->entry());
16787 Register nativeIter = temp;
16788 masm.loadPrivate(
16789 Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
16790 nativeIter);
16792 if (lir->mir()->wantsIndices()) {
16793 // At least one consumer of the output of this iterator has been optimized
16794 // to use iterator indices. If the cached iterator doesn't include indices,
16795 // but it was marked to indicate that we can create them if needed, then we
16796 // do a VM call to replace the cached iterator with a fresh iterator
16797 // including indices.
16798 masm.branchNativeIteratorIndices(Assembler::Equal, nativeIter, temp2,
16799 NativeIteratorIndices::AvailableOnRequest,
16800 ool->entry());
16803 Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlagsAndCount());
16804 masm.storePtr(
16805 obj, Address(nativeIter, NativeIterator::offsetOfObjectBeingIterated()));
16806 masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
16808 Register enumeratorsAddr = temp2;
16809 masm.movePtr(ImmPtr(lir->mir()->enumeratorsAddr()), enumeratorsAddr);
16810 masm.registerIterator(enumeratorsAddr, nativeIter, temp3);
16812 // Generate post-write barrier for storing to |iterObj->objectBeingIterated_|.
16813 // We already know that |iterObj| is tenured, so we only have to check |obj|.
16814 Label skipBarrier;
16815 masm.branchPtrInNurseryChunk(Assembler::NotEqual, obj, temp2, &skipBarrier);
16817 LiveRegisterSet save = liveVolatileRegs(lir);
16818 save.takeUnchecked(temp);
16819 save.takeUnchecked(temp2);
16820 save.takeUnchecked(temp3);
16821 if (iterObj.volatile_()) {
16822 save.addUnchecked(iterObj);
16825 masm.PushRegsInMask(save);
16826 emitPostWriteBarrier(iterObj);
16827 masm.PopRegsInMask(save);
16829 masm.bind(&skipBarrier);
16831 masm.bind(ool->rejoin());
16834 void CodeGenerator::visitValueToIterator(LValueToIterator* lir) {
16835 pushArg(ToValue(lir, LValueToIterator::ValueIndex));
16837 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleValue);
16838 callVM<Fn, ValueToIterator>(lir);
16841 void CodeGenerator::visitIteratorHasIndicesAndBranch(
16842 LIteratorHasIndicesAndBranch* lir) {
16843 Register iterator = ToRegister(lir->iterator());
16844 Register object = ToRegister(lir->object());
16845 Register temp = ToRegister(lir->temp());
16846 Register temp2 = ToRegister(lir->temp2());
16847 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
16848 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
16850 // Check that the iterator has indices available.
16851 Address nativeIterAddr(iterator,
16852 PropertyIteratorObject::offsetOfIteratorSlot());
16853 masm.loadPrivate(nativeIterAddr, temp);
16854 masm.branchNativeIteratorIndices(Assembler::NotEqual, temp, temp2,
16855 NativeIteratorIndices::Valid, ifFalse);
16857 // Guard that the first shape stored in the iterator matches the current
16858 // shape of the iterated object.
16859 Address firstShapeAddr(temp, NativeIterator::offsetOfFirstShape());
16860 masm.loadPtr(firstShapeAddr, temp);
16861 masm.branchTestObjShape(Assembler::NotEqual, object, temp, temp2, object,
16862 ifFalse);
16864 if (!isNextBlock(lir->ifTrue()->lir())) {
16865 masm.jump(ifTrue);
16869 void CodeGenerator::visitLoadSlotByIteratorIndex(
16870 LLoadSlotByIteratorIndex* lir) {
16871 Register object = ToRegister(lir->object());
16872 Register iterator = ToRegister(lir->iterator());
16873 Register temp = ToRegister(lir->temp0());
16874 Register temp2 = ToRegister(lir->temp1());
16875 ValueOperand result = ToOutValue(lir);
16877 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
16879 Label notDynamicSlot, notFixedSlot, done;
16880 masm.branch32(Assembler::NotEqual, temp2,
16881 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
16882 &notDynamicSlot);
16883 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
16884 masm.loadValue(BaseValueIndex(temp2, temp), result);
16885 masm.jump(&done);
16887 masm.bind(&notDynamicSlot);
16888 masm.branch32(Assembler::NotEqual, temp2,
16889 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
16890 // Fixed slot
16891 masm.loadValue(BaseValueIndex(object, temp, sizeof(NativeObject)), result);
16892 masm.jump(&done);
16893 masm.bind(&notFixedSlot);
16895 #ifdef DEBUG
16896 Label kindOkay;
16897 masm.branch32(Assembler::Equal, temp2,
16898 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
16899 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
16900 masm.bind(&kindOkay);
16901 #endif
16903 // Dense element
16904 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
16905 Label indexOkay;
16906 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
16907 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
16908 masm.assumeUnreachable("Dense element out of bounds");
16909 masm.bind(&indexOkay);
16911 masm.loadValue(BaseObjectElementIndex(temp2, temp), result);
16912 masm.bind(&done);
16915 void CodeGenerator::visitStoreSlotByIteratorIndex(
16916 LStoreSlotByIteratorIndex* lir) {
16917 Register object = ToRegister(lir->object());
16918 Register iterator = ToRegister(lir->iterator());
16919 ValueOperand value = ToValue(lir, LStoreSlotByIteratorIndex::ValueIndex);
16920 Register temp = ToRegister(lir->temp0());
16921 Register temp2 = ToRegister(lir->temp1());
16923 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
16925 Label notDynamicSlot, notFixedSlot, done, doStore;
16926 masm.branch32(Assembler::NotEqual, temp2,
16927 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
16928 &notDynamicSlot);
16929 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
16930 masm.computeEffectiveAddress(BaseValueIndex(temp2, temp), temp);
16931 masm.jump(&doStore);
16933 masm.bind(&notDynamicSlot);
16934 masm.branch32(Assembler::NotEqual, temp2,
16935 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
16936 // Fixed slot
16937 masm.computeEffectiveAddress(
16938 BaseValueIndex(object, temp, sizeof(NativeObject)), temp);
16939 masm.jump(&doStore);
16940 masm.bind(&notFixedSlot);
16942 #ifdef DEBUG
16943 Label kindOkay;
16944 masm.branch32(Assembler::Equal, temp2,
16945 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
16946 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
16947 masm.bind(&kindOkay);
16948 #endif
16950 // Dense element
16951 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
16952 Label indexOkay;
16953 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
16954 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
16955 masm.assumeUnreachable("Dense element out of bounds");
16956 masm.bind(&indexOkay);
16958 BaseObjectElementIndex elementAddress(temp2, temp);
16959 masm.computeEffectiveAddress(elementAddress, temp);
16961 masm.bind(&doStore);
16962 Address storeAddress(temp, 0);
16963 emitPreBarrier(storeAddress);
16964 masm.storeValue(value, storeAddress);
16966 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp2, &done);
16967 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp2, &done);
16969 saveVolatile(temp2);
16970 emitPostWriteBarrier(object);
16971 restoreVolatile(temp2);
16973 masm.bind(&done);
16976 void CodeGenerator::visitSetPropertyCache(LSetPropertyCache* ins) {
16977 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16978 Register objReg = ToRegister(ins->object());
16979 Register temp = ToRegister(ins->temp0());
16981 ConstantOrRegister id = toConstantOrRegister(ins, LSetPropertyCache::IdIndex,
16982 ins->mir()->idval()->type());
16983 ConstantOrRegister value = toConstantOrRegister(
16984 ins, LSetPropertyCache::ValueIndex, ins->mir()->value()->type());
16986 addSetPropertyCache(ins, liveRegs, objReg, temp, id, value,
16987 ins->mir()->strict());
16990 void CodeGenerator::visitThrow(LThrow* lir) {
16991 pushArg(ToValue(lir, LThrow::ValueIndex));
16993 using Fn = bool (*)(JSContext*, HandleValue);
16994 callVM<Fn, js::ThrowOperation>(lir);
16997 void CodeGenerator::visitThrowWithStack(LThrowWithStack* lir) {
16998 pushArg(ToValue(lir, LThrowWithStack::StackIndex));
16999 pushArg(ToValue(lir, LThrowWithStack::ValueIndex));
17001 using Fn = bool (*)(JSContext*, HandleValue, HandleValue);
17002 callVM<Fn, js::ThrowWithStackOperation>(lir);
17005 class OutOfLineTypeOfV : public OutOfLineCodeBase<CodeGenerator> {
17006 LTypeOfV* ins_;
17008 public:
17009 explicit OutOfLineTypeOfV(LTypeOfV* ins) : ins_(ins) {}
17011 void accept(CodeGenerator* codegen) override {
17012 codegen->visitOutOfLineTypeOfV(this);
17014 LTypeOfV* ins() const { return ins_; }
17017 void CodeGenerator::emitTypeOfJSType(JSValueType type, Register output) {
17018 switch (type) {
17019 case JSVAL_TYPE_OBJECT:
17020 masm.move32(Imm32(JSTYPE_OBJECT), output);
17021 break;
17022 case JSVAL_TYPE_DOUBLE:
17023 case JSVAL_TYPE_INT32:
17024 masm.move32(Imm32(JSTYPE_NUMBER), output);
17025 break;
17026 case JSVAL_TYPE_BOOLEAN:
17027 masm.move32(Imm32(JSTYPE_BOOLEAN), output);
17028 break;
17029 case JSVAL_TYPE_UNDEFINED:
17030 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
17031 break;
17032 case JSVAL_TYPE_NULL:
17033 masm.move32(Imm32(JSTYPE_OBJECT), output);
17034 break;
17035 case JSVAL_TYPE_STRING:
17036 masm.move32(Imm32(JSTYPE_STRING), output);
17037 break;
17038 case JSVAL_TYPE_SYMBOL:
17039 masm.move32(Imm32(JSTYPE_SYMBOL), output);
17040 break;
17041 case JSVAL_TYPE_BIGINT:
17042 masm.move32(Imm32(JSTYPE_BIGINT), output);
17043 break;
17044 default:
17045 MOZ_CRASH("Unsupported JSValueType");
17049 void CodeGenerator::emitTypeOfCheck(JSValueType type, Register tag,
17050 Register output, Label* done,
17051 Label* oolObject) {
17052 Label notMatch;
17053 switch (type) {
17054 case JSVAL_TYPE_OBJECT:
17055 // The input may be a callable object (result is "function") or
17056 // may emulate undefined (result is "undefined"). Use an OOL path.
17057 masm.branchTestObject(Assembler::Equal, tag, oolObject);
17058 return;
17059 case JSVAL_TYPE_DOUBLE:
17060 case JSVAL_TYPE_INT32:
17061 masm.branchTestNumber(Assembler::NotEqual, tag, &notMatch);
17062 break;
17063 default:
17064 masm.branchTestType(Assembler::NotEqual, tag, type, &notMatch);
17065 break;
17068 emitTypeOfJSType(type, output);
17069 masm.jump(done);
17070 masm.bind(&notMatch);
17073 void CodeGenerator::visitTypeOfV(LTypeOfV* lir) {
17074 const ValueOperand value = ToValue(lir, LTypeOfV::InputIndex);
17075 Register output = ToRegister(lir->output());
17076 Register tag = masm.extractTag(value, output);
17078 Label done;
17080 auto* ool = new (alloc()) OutOfLineTypeOfV(lir);
17081 addOutOfLineCode(ool, lir->mir());
17083 const std::initializer_list<JSValueType> defaultOrder = {
17084 JSVAL_TYPE_OBJECT, JSVAL_TYPE_DOUBLE, JSVAL_TYPE_UNDEFINED,
17085 JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN, JSVAL_TYPE_STRING,
17086 JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
17088 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
17090 // Generate checks for previously observed types first.
17091 // The TypeDataList is sorted by descending frequency.
17092 for (auto& observed : lir->mir()->observedTypes()) {
17093 JSValueType type = observed.type();
17095 // Unify number types.
17096 if (type == JSVAL_TYPE_INT32) {
17097 type = JSVAL_TYPE_DOUBLE;
17100 remaining -= type;
17102 emitTypeOfCheck(type, tag, output, &done, ool->entry());
17105 // Generate checks for remaining types.
17106 for (auto type : defaultOrder) {
17107 if (!remaining.contains(type)) {
17108 continue;
17110 remaining -= type;
17112 if (remaining.isEmpty() && type != JSVAL_TYPE_OBJECT) {
17113 // We can skip the check for the last remaining type, unless the type is
17114 // JSVAL_TYPE_OBJECT, which may have to go through the OOL path.
17115 #ifdef DEBUG
17116 emitTypeOfCheck(type, tag, output, &done, ool->entry());
17117 masm.assumeUnreachable("Unexpected Value type in visitTypeOfV");
17118 #else
17119 emitTypeOfJSType(type, output);
17120 #endif
17121 } else {
17122 emitTypeOfCheck(type, tag, output, &done, ool->entry());
17125 MOZ_ASSERT(remaining.isEmpty());
17127 masm.bind(&done);
17128 masm.bind(ool->rejoin());
17131 void CodeGenerator::emitTypeOfObject(Register obj, Register output,
17132 Label* done) {
17133 Label slowCheck, isObject, isCallable, isUndefined;
17134 masm.typeOfObject(obj, output, &slowCheck, &isObject, &isCallable,
17135 &isUndefined);
17137 masm.bind(&isCallable);
17138 masm.move32(Imm32(JSTYPE_FUNCTION), output);
17139 masm.jump(done);
17141 masm.bind(&isUndefined);
17142 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
17143 masm.jump(done);
17145 masm.bind(&isObject);
17146 masm.move32(Imm32(JSTYPE_OBJECT), output);
17147 masm.jump(done);
17149 masm.bind(&slowCheck);
17151 saveVolatile(output);
17152 using Fn = JSType (*)(JSObject*);
17153 masm.setupAlignedABICall();
17154 masm.passABIArg(obj);
17155 masm.callWithABI<Fn, js::TypeOfObject>();
17156 masm.storeCallInt32Result(output);
17157 restoreVolatile(output);
17160 void CodeGenerator::visitOutOfLineTypeOfV(OutOfLineTypeOfV* ool) {
17161 LTypeOfV* ins = ool->ins();
17163 ValueOperand input = ToValue(ins, LTypeOfV::InputIndex);
17164 Register temp = ToTempUnboxRegister(ins->temp0());
17165 Register output = ToRegister(ins->output());
17167 Register obj = masm.extractObject(input, temp);
17168 emitTypeOfObject(obj, output, ool->rejoin());
17169 masm.jump(ool->rejoin());
17172 void CodeGenerator::visitTypeOfO(LTypeOfO* lir) {
17173 Register obj = ToRegister(lir->object());
17174 Register output = ToRegister(lir->output());
17176 Label done;
17177 emitTypeOfObject(obj, output, &done);
17178 masm.bind(&done);
17181 void CodeGenerator::visitTypeOfName(LTypeOfName* lir) {
17182 Register input = ToRegister(lir->input());
17183 Register output = ToRegister(lir->output());
17185 #ifdef DEBUG
17186 Label ok;
17187 masm.branch32(Assembler::Below, input, Imm32(JSTYPE_LIMIT), &ok);
17188 masm.assumeUnreachable("bad JSType");
17189 masm.bind(&ok);
17190 #endif
17192 static_assert(JSTYPE_UNDEFINED == 0);
17194 masm.movePtr(ImmPtr(&gen->runtime->names().undefined), output);
17195 masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
17198 class OutOfLineTypeOfIsNonPrimitiveV : public OutOfLineCodeBase<CodeGenerator> {
17199 LTypeOfIsNonPrimitiveV* ins_;
17201 public:
17202 explicit OutOfLineTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* ins)
17203 : ins_(ins) {}
17205 void accept(CodeGenerator* codegen) override {
17206 codegen->visitOutOfLineTypeOfIsNonPrimitiveV(this);
17208 auto* ins() const { return ins_; }
17211 class OutOfLineTypeOfIsNonPrimitiveO : public OutOfLineCodeBase<CodeGenerator> {
17212 LTypeOfIsNonPrimitiveO* ins_;
17214 public:
17215 explicit OutOfLineTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* ins)
17216 : ins_(ins) {}
17218 void accept(CodeGenerator* codegen) override {
17219 codegen->visitOutOfLineTypeOfIsNonPrimitiveO(this);
17221 auto* ins() const { return ins_; }
17224 void CodeGenerator::emitTypeOfIsObjectOOL(MTypeOfIs* mir, Register obj,
17225 Register output) {
17226 saveVolatile(output);
17227 using Fn = JSType (*)(JSObject*);
17228 masm.setupAlignedABICall();
17229 masm.passABIArg(obj);
17230 masm.callWithABI<Fn, js::TypeOfObject>();
17231 masm.storeCallInt32Result(output);
17232 restoreVolatile(output);
17234 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
17235 masm.cmp32Set(cond, output, Imm32(mir->jstype()), output);
17238 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveV(
17239 OutOfLineTypeOfIsNonPrimitiveV* ool) {
17240 auto* ins = ool->ins();
17241 ValueOperand input = ToValue(ins, LTypeOfIsNonPrimitiveV::InputIndex);
17242 Register output = ToRegister(ins->output());
17243 Register temp = ToTempUnboxRegister(ins->temp0());
17245 Register obj = masm.extractObject(input, temp);
17247 emitTypeOfIsObjectOOL(ins->mir(), obj, output);
17249 masm.jump(ool->rejoin());
17252 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveO(
17253 OutOfLineTypeOfIsNonPrimitiveO* ool) {
17254 auto* ins = ool->ins();
17255 Register input = ToRegister(ins->input());
17256 Register output = ToRegister(ins->output());
17258 emitTypeOfIsObjectOOL(ins->mir(), input, output);
17260 masm.jump(ool->rejoin());
17263 void CodeGenerator::emitTypeOfIsObject(MTypeOfIs* mir, Register obj,
17264 Register output, Label* success,
17265 Label* fail, Label* slowCheck) {
17266 Label* isObject = fail;
17267 Label* isFunction = fail;
17268 Label* isUndefined = fail;
17270 switch (mir->jstype()) {
17271 case JSTYPE_UNDEFINED:
17272 isUndefined = success;
17273 break;
17275 case JSTYPE_OBJECT:
17276 isObject = success;
17277 break;
17279 case JSTYPE_FUNCTION:
17280 isFunction = success;
17281 break;
17283 case JSTYPE_STRING:
17284 case JSTYPE_NUMBER:
17285 case JSTYPE_BOOLEAN:
17286 case JSTYPE_SYMBOL:
17287 case JSTYPE_BIGINT:
17288 #ifdef ENABLE_RECORD_TUPLE
17289 case JSTYPE_RECORD:
17290 case JSTYPE_TUPLE:
17291 #endif
17292 case JSTYPE_LIMIT:
17293 MOZ_CRASH("Primitive type");
17296 masm.typeOfObject(obj, output, slowCheck, isObject, isFunction, isUndefined);
17298 auto op = mir->jsop();
17300 Label done;
17301 masm.bind(fail);
17302 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
17303 masm.jump(&done);
17304 masm.bind(success);
17305 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
17306 masm.bind(&done);
17309 void CodeGenerator::visitTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* lir) {
17310 ValueOperand input = ToValue(lir, LTypeOfIsNonPrimitiveV::InputIndex);
17311 Register output = ToRegister(lir->output());
17312 Register temp = ToTempUnboxRegister(lir->temp0());
17314 auto* mir = lir->mir();
17316 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveV(lir);
17317 addOutOfLineCode(ool, mir);
17319 Label success, fail;
17321 switch (mir->jstype()) {
17322 case JSTYPE_UNDEFINED: {
17323 ScratchTagScope tag(masm, input);
17324 masm.splitTagForTest(input, tag);
17326 masm.branchTestUndefined(Assembler::Equal, tag, &success);
17327 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
17328 break;
17331 case JSTYPE_OBJECT: {
17332 ScratchTagScope tag(masm, input);
17333 masm.splitTagForTest(input, tag);
17335 masm.branchTestNull(Assembler::Equal, tag, &success);
17336 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
17337 break;
17340 case JSTYPE_FUNCTION: {
17341 masm.branchTestObject(Assembler::NotEqual, input, &fail);
17342 break;
17345 case JSTYPE_STRING:
17346 case JSTYPE_NUMBER:
17347 case JSTYPE_BOOLEAN:
17348 case JSTYPE_SYMBOL:
17349 case JSTYPE_BIGINT:
17350 #ifdef ENABLE_RECORD_TUPLE
17351 case JSTYPE_RECORD:
17352 case JSTYPE_TUPLE:
17353 #endif
17354 case JSTYPE_LIMIT:
17355 MOZ_CRASH("Primitive type");
17358 Register obj = masm.extractObject(input, temp);
17360 emitTypeOfIsObject(mir, obj, output, &success, &fail, ool->entry());
17362 masm.bind(ool->rejoin());
17365 void CodeGenerator::visitTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* lir) {
17366 Register input = ToRegister(lir->input());
17367 Register output = ToRegister(lir->output());
17369 auto* mir = lir->mir();
17371 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveO(lir);
17372 addOutOfLineCode(ool, mir);
17374 Label success, fail;
17375 emitTypeOfIsObject(mir, input, output, &success, &fail, ool->entry());
17377 masm.bind(ool->rejoin());
17380 void CodeGenerator::visitTypeOfIsPrimitive(LTypeOfIsPrimitive* lir) {
17381 ValueOperand input = ToValue(lir, LTypeOfIsPrimitive::InputIndex);
17382 Register output = ToRegister(lir->output());
17384 auto* mir = lir->mir();
17385 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
17387 switch (mir->jstype()) {
17388 case JSTYPE_STRING:
17389 masm.testStringSet(cond, input, output);
17390 break;
17391 case JSTYPE_NUMBER:
17392 masm.testNumberSet(cond, input, output);
17393 break;
17394 case JSTYPE_BOOLEAN:
17395 masm.testBooleanSet(cond, input, output);
17396 break;
17397 case JSTYPE_SYMBOL:
17398 masm.testSymbolSet(cond, input, output);
17399 break;
17400 case JSTYPE_BIGINT:
17401 masm.testBigIntSet(cond, input, output);
17402 break;
17404 case JSTYPE_UNDEFINED:
17405 case JSTYPE_OBJECT:
17406 case JSTYPE_FUNCTION:
17407 #ifdef ENABLE_RECORD_TUPLE
17408 case JSTYPE_RECORD:
17409 case JSTYPE_TUPLE:
17410 #endif
17411 case JSTYPE_LIMIT:
17412 MOZ_CRASH("Non-primitive type");
17416 void CodeGenerator::visitToAsyncIter(LToAsyncIter* lir) {
17417 pushArg(ToValue(lir, LToAsyncIter::NextMethodIndex));
17418 pushArg(ToRegister(lir->iterator()));
17420 using Fn = JSObject* (*)(JSContext*, HandleObject, HandleValue);
17421 callVM<Fn, js::CreateAsyncFromSyncIterator>(lir);
17424 void CodeGenerator::visitToPropertyKeyCache(LToPropertyKeyCache* lir) {
17425 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
17426 ValueOperand input = ToValue(lir, LToPropertyKeyCache::InputIndex);
17427 ValueOperand output = ToOutValue(lir);
17429 IonToPropertyKeyIC ic(liveRegs, input, output);
17430 addIC(lir, allocateIC(ic));
17433 void CodeGenerator::visitLoadElementV(LLoadElementV* load) {
17434 Register elements = ToRegister(load->elements());
17435 const ValueOperand out = ToOutValue(load);
17437 if (load->index()->isConstant()) {
17438 NativeObject::elementsSizeMustNotOverflow();
17439 int32_t offset = ToInt32(load->index()) * sizeof(Value);
17440 masm.loadValue(Address(elements, offset), out);
17441 } else {
17442 masm.loadValue(BaseObjectElementIndex(elements, ToRegister(load->index())),
17443 out);
17446 Label testMagic;
17447 masm.branchTestMagic(Assembler::Equal, out, &testMagic);
17448 bailoutFrom(&testMagic, load->snapshot());
17451 void CodeGenerator::visitLoadElementHole(LLoadElementHole* lir) {
17452 Register elements = ToRegister(lir->elements());
17453 Register index = ToRegister(lir->index());
17454 Register initLength = ToRegister(lir->initLength());
17455 const ValueOperand out = ToOutValue(lir);
17457 const MLoadElementHole* mir = lir->mir();
17459 // If the index is out of bounds, load |undefined|. Otherwise, load the
17460 // value.
17461 Label outOfBounds, done;
17462 masm.spectreBoundsCheck32(index, initLength, out.scratchReg(), &outOfBounds);
17464 masm.loadValue(BaseObjectElementIndex(elements, index), out);
17466 // If the value wasn't a hole, we're done. Otherwise, we'll load undefined.
17467 masm.branchTestMagic(Assembler::NotEqual, out, &done);
17469 if (mir->needsNegativeIntCheck()) {
17470 Label loadUndefined;
17471 masm.jump(&loadUndefined);
17473 masm.bind(&outOfBounds);
17475 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
17477 masm.bind(&loadUndefined);
17478 } else {
17479 masm.bind(&outOfBounds);
17481 masm.moveValue(UndefinedValue(), out);
17483 masm.bind(&done);
17486 void CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir) {
17487 Register elements = ToRegister(lir->elements());
17488 Register temp = ToTempRegisterOrInvalid(lir->temp0());
17489 AnyRegister out = ToAnyRegister(lir->output());
17491 const MLoadUnboxedScalar* mir = lir->mir();
17493 Scalar::Type storageType = mir->storageType();
17495 Label fail;
17496 if (lir->index()->isConstant()) {
17497 Address source =
17498 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
17499 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
17500 } else {
17501 BaseIndex source(elements, ToRegister(lir->index()),
17502 ScaleFromScalarType(storageType), mir->offsetAdjustment());
17503 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
17506 if (fail.used()) {
17507 bailoutFrom(&fail, lir->snapshot());
17511 void CodeGenerator::visitLoadUnboxedBigInt(LLoadUnboxedBigInt* lir) {
17512 Register elements = ToRegister(lir->elements());
17513 Register temp = ToRegister(lir->temp());
17514 Register64 temp64 = ToRegister64(lir->temp64());
17515 Register out = ToRegister(lir->output());
17517 const MLoadUnboxedScalar* mir = lir->mir();
17519 Scalar::Type storageType = mir->storageType();
17521 if (lir->index()->isConstant()) {
17522 Address source =
17523 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
17524 masm.load64(source, temp64);
17525 } else {
17526 BaseIndex source(elements, ToRegister(lir->index()),
17527 ScaleFromScalarType(storageType), mir->offsetAdjustment());
17528 masm.load64(source, temp64);
17531 emitCreateBigInt(lir, storageType, temp64, out, temp);
17534 void CodeGenerator::visitLoadDataViewElement(LLoadDataViewElement* lir) {
17535 Register elements = ToRegister(lir->elements());
17536 const LAllocation* littleEndian = lir->littleEndian();
17537 Register temp = ToTempRegisterOrInvalid(lir->temp());
17538 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
17539 AnyRegister out = ToAnyRegister(lir->output());
17541 const MLoadDataViewElement* mir = lir->mir();
17542 Scalar::Type storageType = mir->storageType();
17544 BaseIndex source(elements, ToRegister(lir->index()), TimesOne);
17546 bool noSwap = littleEndian->isConstant() &&
17547 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
17549 // Directly load if no byte swap is needed and the platform supports unaligned
17550 // accesses for the access. (Such support is assumed for integer types.)
17551 if (noSwap && (!Scalar::isFloatingType(storageType) ||
17552 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
17553 if (!Scalar::isBigIntType(storageType)) {
17554 Label fail;
17555 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
17557 if (fail.used()) {
17558 bailoutFrom(&fail, lir->snapshot());
17560 } else {
17561 masm.load64(source, temp64);
17563 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
17565 return;
17568 // Load the value into a gpr register.
17569 switch (storageType) {
17570 case Scalar::Int16:
17571 masm.load16UnalignedSignExtend(source, out.gpr());
17572 break;
17573 case Scalar::Uint16:
17574 masm.load16UnalignedZeroExtend(source, out.gpr());
17575 break;
17576 case Scalar::Int32:
17577 masm.load32Unaligned(source, out.gpr());
17578 break;
17579 case Scalar::Uint32:
17580 masm.load32Unaligned(source, out.isFloat() ? temp : out.gpr());
17581 break;
17582 case Scalar::Float32:
17583 masm.load32Unaligned(source, temp);
17584 break;
17585 case Scalar::Float64:
17586 case Scalar::BigInt64:
17587 case Scalar::BigUint64:
17588 masm.load64Unaligned(source, temp64);
17589 break;
17590 case Scalar::Int8:
17591 case Scalar::Uint8:
17592 case Scalar::Uint8Clamped:
17593 default:
17594 MOZ_CRASH("Invalid typed array type");
17597 if (!noSwap) {
17598 // Swap the bytes in the loaded value.
17599 Label skip;
17600 if (!littleEndian->isConstant()) {
17601 masm.branch32(
17602 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
17603 ToRegister(littleEndian), Imm32(0), &skip);
17606 switch (storageType) {
17607 case Scalar::Int16:
17608 masm.byteSwap16SignExtend(out.gpr());
17609 break;
17610 case Scalar::Uint16:
17611 masm.byteSwap16ZeroExtend(out.gpr());
17612 break;
17613 case Scalar::Int32:
17614 masm.byteSwap32(out.gpr());
17615 break;
17616 case Scalar::Uint32:
17617 masm.byteSwap32(out.isFloat() ? temp : out.gpr());
17618 break;
17619 case Scalar::Float32:
17620 masm.byteSwap32(temp);
17621 break;
17622 case Scalar::Float64:
17623 case Scalar::BigInt64:
17624 case Scalar::BigUint64:
17625 masm.byteSwap64(temp64);
17626 break;
17627 case Scalar::Int8:
17628 case Scalar::Uint8:
17629 case Scalar::Uint8Clamped:
17630 default:
17631 MOZ_CRASH("Invalid typed array type");
17634 if (skip.used()) {
17635 masm.bind(&skip);
17639 // Move the value into the output register.
17640 switch (storageType) {
17641 case Scalar::Int16:
17642 case Scalar::Uint16:
17643 case Scalar::Int32:
17644 break;
17645 case Scalar::Uint32:
17646 if (out.isFloat()) {
17647 masm.convertUInt32ToDouble(temp, out.fpu());
17648 } else {
17649 // Bail out if the value doesn't fit into a signed int32 value. This
17650 // is what allows MLoadDataViewElement to have a type() of
17651 // MIRType::Int32 for UInt32 array loads.
17652 bailoutTest32(Assembler::Signed, out.gpr(), out.gpr(), lir->snapshot());
17654 break;
17655 case Scalar::Float32:
17656 masm.moveGPRToFloat32(temp, out.fpu());
17657 masm.canonicalizeFloat(out.fpu());
17658 break;
17659 case Scalar::Float64:
17660 masm.moveGPR64ToDouble(temp64, out.fpu());
17661 masm.canonicalizeDouble(out.fpu());
17662 break;
17663 case Scalar::BigInt64:
17664 case Scalar::BigUint64:
17665 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
17666 break;
17667 case Scalar::Int8:
17668 case Scalar::Uint8:
17669 case Scalar::Uint8Clamped:
17670 default:
17671 MOZ_CRASH("Invalid typed array type");
17675 void CodeGenerator::visitLoadTypedArrayElementHole(
17676 LLoadTypedArrayElementHole* lir) {
17677 Register elements = ToRegister(lir->elements());
17678 Register index = ToRegister(lir->index());
17679 Register length = ToRegister(lir->length());
17680 const ValueOperand out = ToOutValue(lir);
17682 Register scratch = out.scratchReg();
17684 // Load undefined if index >= length.
17685 Label outOfBounds, done;
17686 masm.spectreBoundsCheckPtr(index, length, scratch, &outOfBounds);
17688 Scalar::Type arrayType = lir->mir()->arrayType();
17689 Label fail;
17690 BaseIndex source(elements, index, ScaleFromScalarType(arrayType));
17691 MacroAssembler::Uint32Mode uint32Mode =
17692 lir->mir()->forceDouble() ? MacroAssembler::Uint32Mode::ForceDouble
17693 : MacroAssembler::Uint32Mode::FailOnDouble;
17694 masm.loadFromTypedArray(arrayType, source, out, uint32Mode, out.scratchReg(),
17695 &fail);
17696 masm.jump(&done);
17698 masm.bind(&outOfBounds);
17699 masm.moveValue(UndefinedValue(), out);
17701 if (fail.used()) {
17702 bailoutFrom(&fail, lir->snapshot());
17705 masm.bind(&done);
17708 void CodeGenerator::visitLoadTypedArrayElementHoleBigInt(
17709 LLoadTypedArrayElementHoleBigInt* lir) {
17710 Register elements = ToRegister(lir->elements());
17711 Register index = ToRegister(lir->index());
17712 Register length = ToRegister(lir->length());
17713 const ValueOperand out = ToOutValue(lir);
17715 Register temp = ToRegister(lir->temp());
17717 // On x86 there are not enough registers. In that case reuse the output
17718 // registers as temporaries.
17719 #ifdef JS_CODEGEN_X86
17720 MOZ_ASSERT(lir->temp64().isBogusTemp());
17721 Register64 temp64 = out.toRegister64();
17722 #else
17723 Register64 temp64 = ToRegister64(lir->temp64());
17724 #endif
17726 // Load undefined if index >= length.
17727 Label outOfBounds, done;
17728 masm.spectreBoundsCheckPtr(index, length, temp, &outOfBounds);
17730 Scalar::Type arrayType = lir->mir()->arrayType();
17731 BaseIndex source(elements, index, ScaleFromScalarType(arrayType));
17732 masm.load64(source, temp64);
17734 #ifdef JS_CODEGEN_X86
17735 Register bigInt = temp;
17736 Register maybeTemp = InvalidReg;
17737 #else
17738 Register bigInt = out.scratchReg();
17739 Register maybeTemp = temp;
17740 #endif
17741 emitCreateBigInt(lir, arrayType, temp64, bigInt, maybeTemp);
17743 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, out);
17744 masm.jump(&done);
17746 masm.bind(&outOfBounds);
17747 masm.moveValue(UndefinedValue(), out);
17749 masm.bind(&done);
17752 template <SwitchTableType tableType>
17753 class OutOfLineSwitch : public OutOfLineCodeBase<CodeGenerator> {
17754 using LabelsVector = Vector<Label, 0, JitAllocPolicy>;
17755 using CodeLabelsVector = Vector<CodeLabel, 0, JitAllocPolicy>;
17756 LabelsVector labels_;
17757 CodeLabelsVector codeLabels_;
17758 CodeLabel start_;
17759 bool isOutOfLine_;
17761 void accept(CodeGenerator* codegen) override {
17762 codegen->visitOutOfLineSwitch(this);
17765 public:
17766 explicit OutOfLineSwitch(TempAllocator& alloc)
17767 : labels_(alloc), codeLabels_(alloc), isOutOfLine_(false) {}
17769 CodeLabel* start() { return &start_; }
17771 CodeLabelsVector& codeLabels() { return codeLabels_; }
17772 LabelsVector& labels() { return labels_; }
17774 void jumpToCodeEntries(MacroAssembler& masm, Register index, Register temp) {
17775 Register base;
17776 if (tableType == SwitchTableType::Inline) {
17777 #if defined(JS_CODEGEN_ARM)
17778 base = ::js::jit::pc;
17779 #else
17780 MOZ_CRASH("NYI: SwitchTableType::Inline");
17781 #endif
17782 } else {
17783 #if defined(JS_CODEGEN_ARM)
17784 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
17785 #else
17786 masm.mov(start(), temp);
17787 base = temp;
17788 #endif
17790 BaseIndex jumpTarget(base, index, ScalePointer);
17791 masm.branchToComputedAddress(jumpTarget);
17794 // Register an entry in the switch table.
17795 void addTableEntry(MacroAssembler& masm) {
17796 if ((!isOutOfLine_ && tableType == SwitchTableType::Inline) ||
17797 (isOutOfLine_ && tableType == SwitchTableType::OutOfLine)) {
17798 CodeLabel cl;
17799 masm.writeCodePointer(&cl);
17800 masm.propagateOOM(codeLabels_.append(std::move(cl)));
17803 // Register the code, to which the table will jump to.
17804 void addCodeEntry(MacroAssembler& masm) {
17805 Label entry;
17806 masm.bind(&entry);
17807 masm.propagateOOM(labels_.append(std::move(entry)));
17810 void setOutOfLine() { isOutOfLine_ = true; }
17813 template <SwitchTableType tableType>
17814 void CodeGenerator::visitOutOfLineSwitch(
17815 OutOfLineSwitch<tableType>* jumpTable) {
17816 jumpTable->setOutOfLine();
17817 auto& labels = jumpTable->labels();
17819 if (tableType == SwitchTableType::OutOfLine) {
17820 #if defined(JS_CODEGEN_ARM)
17821 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
17822 #elif defined(JS_CODEGEN_NONE)
17823 MOZ_CRASH();
17824 #else
17826 # if defined(JS_CODEGEN_ARM64)
17827 AutoForbidPoolsAndNops afp(
17828 &masm,
17829 (labels.length() + 1) * (sizeof(void*) / vixl::kInstructionSize));
17830 # endif
17832 masm.haltingAlign(sizeof(void*));
17834 // Bind the address of the jump table and reserve the space for code
17835 // pointers to jump in the newly generated code.
17836 masm.bind(jumpTable->start());
17837 masm.addCodeLabel(*jumpTable->start());
17838 for (size_t i = 0, e = labels.length(); i < e; i++) {
17839 jumpTable->addTableEntry(masm);
17841 #endif
17844 // Register all reserved pointers of the jump table to target labels. The
17845 // entries of the jump table need to be absolute addresses and thus must be
17846 // patched after codegen is finished.
17847 auto& codeLabels = jumpTable->codeLabels();
17848 for (size_t i = 0, e = codeLabels.length(); i < e; i++) {
17849 auto& cl = codeLabels[i];
17850 cl.target()->bind(labels[i].offset());
17851 masm.addCodeLabel(cl);
17855 template void CodeGenerator::visitOutOfLineSwitch(
17856 OutOfLineSwitch<SwitchTableType::Inline>* jumpTable);
17857 template void CodeGenerator::visitOutOfLineSwitch(
17858 OutOfLineSwitch<SwitchTableType::OutOfLine>* jumpTable);
17860 template <typename T>
17861 static inline void StoreToTypedArray(MacroAssembler& masm,
17862 Scalar::Type writeType,
17863 const LAllocation* value, const T& dest) {
17864 if (writeType == Scalar::Float32 || writeType == Scalar::Float64) {
17865 masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest);
17866 } else {
17867 if (value->isConstant()) {
17868 masm.storeToTypedIntArray(writeType, Imm32(ToInt32(value)), dest);
17869 } else {
17870 masm.storeToTypedIntArray(writeType, ToRegister(value), dest);
17875 void CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir) {
17876 Register elements = ToRegister(lir->elements());
17877 const LAllocation* value = lir->value();
17879 const MStoreUnboxedScalar* mir = lir->mir();
17881 Scalar::Type writeType = mir->writeType();
17883 if (lir->index()->isConstant()) {
17884 Address dest = ToAddress(elements, lir->index(), writeType);
17885 StoreToTypedArray(masm, writeType, value, dest);
17886 } else {
17887 BaseIndex dest(elements, ToRegister(lir->index()),
17888 ScaleFromScalarType(writeType));
17889 StoreToTypedArray(masm, writeType, value, dest);
17893 void CodeGenerator::visitStoreUnboxedBigInt(LStoreUnboxedBigInt* lir) {
17894 Register elements = ToRegister(lir->elements());
17895 Register value = ToRegister(lir->value());
17896 Register64 temp = ToRegister64(lir->temp());
17898 Scalar::Type writeType = lir->mir()->writeType();
17900 masm.loadBigInt64(value, temp);
17902 if (lir->index()->isConstant()) {
17903 Address dest = ToAddress(elements, lir->index(), writeType);
17904 masm.storeToTypedBigIntArray(writeType, temp, dest);
17905 } else {
17906 BaseIndex dest(elements, ToRegister(lir->index()),
17907 ScaleFromScalarType(writeType));
17908 masm.storeToTypedBigIntArray(writeType, temp, dest);
17912 void CodeGenerator::visitStoreDataViewElement(LStoreDataViewElement* lir) {
17913 Register elements = ToRegister(lir->elements());
17914 const LAllocation* value = lir->value();
17915 const LAllocation* littleEndian = lir->littleEndian();
17916 Register temp = ToTempRegisterOrInvalid(lir->temp());
17917 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
17919 const MStoreDataViewElement* mir = lir->mir();
17920 Scalar::Type writeType = mir->writeType();
17922 BaseIndex dest(elements, ToRegister(lir->index()), TimesOne);
17924 bool noSwap = littleEndian->isConstant() &&
17925 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
17927 // Directly store if no byte swap is needed and the platform supports
17928 // unaligned accesses for the access. (Such support is assumed for integer
17929 // types.)
17930 if (noSwap && (!Scalar::isFloatingType(writeType) ||
17931 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
17932 if (!Scalar::isBigIntType(writeType)) {
17933 StoreToTypedArray(masm, writeType, value, dest);
17934 } else {
17935 masm.loadBigInt64(ToRegister(value), temp64);
17936 masm.storeToTypedBigIntArray(writeType, temp64, dest);
17938 return;
17941 // Load the value into a gpr register.
17942 switch (writeType) {
17943 case Scalar::Int16:
17944 case Scalar::Uint16:
17945 case Scalar::Int32:
17946 case Scalar::Uint32:
17947 if (value->isConstant()) {
17948 masm.move32(Imm32(ToInt32(value)), temp);
17949 } else {
17950 masm.move32(ToRegister(value), temp);
17952 break;
17953 case Scalar::Float32: {
17954 FloatRegister fvalue = ToFloatRegister(value);
17955 masm.canonicalizeFloatIfDeterministic(fvalue);
17956 masm.moveFloat32ToGPR(fvalue, temp);
17957 break;
17959 case Scalar::Float64: {
17960 FloatRegister fvalue = ToFloatRegister(value);
17961 masm.canonicalizeDoubleIfDeterministic(fvalue);
17962 masm.moveDoubleToGPR64(fvalue, temp64);
17963 break;
17965 case Scalar::BigInt64:
17966 case Scalar::BigUint64:
17967 masm.loadBigInt64(ToRegister(value), temp64);
17968 break;
17969 case Scalar::Int8:
17970 case Scalar::Uint8:
17971 case Scalar::Uint8Clamped:
17972 default:
17973 MOZ_CRASH("Invalid typed array type");
17976 if (!noSwap) {
17977 // Swap the bytes in the loaded value.
17978 Label skip;
17979 if (!littleEndian->isConstant()) {
17980 masm.branch32(
17981 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
17982 ToRegister(littleEndian), Imm32(0), &skip);
17985 switch (writeType) {
17986 case Scalar::Int16:
17987 masm.byteSwap16SignExtend(temp);
17988 break;
17989 case Scalar::Uint16:
17990 masm.byteSwap16ZeroExtend(temp);
17991 break;
17992 case Scalar::Int32:
17993 case Scalar::Uint32:
17994 case Scalar::Float32:
17995 masm.byteSwap32(temp);
17996 break;
17997 case Scalar::Float64:
17998 case Scalar::BigInt64:
17999 case Scalar::BigUint64:
18000 masm.byteSwap64(temp64);
18001 break;
18002 case Scalar::Int8:
18003 case Scalar::Uint8:
18004 case Scalar::Uint8Clamped:
18005 default:
18006 MOZ_CRASH("Invalid typed array type");
18009 if (skip.used()) {
18010 masm.bind(&skip);
18014 // Store the value into the destination.
18015 switch (writeType) {
18016 case Scalar::Int16:
18017 case Scalar::Uint16:
18018 masm.store16Unaligned(temp, dest);
18019 break;
18020 case Scalar::Int32:
18021 case Scalar::Uint32:
18022 case Scalar::Float32:
18023 masm.store32Unaligned(temp, dest);
18024 break;
18025 case Scalar::Float64:
18026 case Scalar::BigInt64:
18027 case Scalar::BigUint64:
18028 masm.store64Unaligned(temp64, dest);
18029 break;
18030 case Scalar::Int8:
18031 case Scalar::Uint8:
18032 case Scalar::Uint8Clamped:
18033 default:
18034 MOZ_CRASH("Invalid typed array type");
18038 void CodeGenerator::visitStoreTypedArrayElementHole(
18039 LStoreTypedArrayElementHole* lir) {
18040 Register elements = ToRegister(lir->elements());
18041 const LAllocation* value = lir->value();
18043 Scalar::Type arrayType = lir->mir()->arrayType();
18045 Register index = ToRegister(lir->index());
18046 const LAllocation* length = lir->length();
18047 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
18049 Label skip;
18050 if (length->isRegister()) {
18051 masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
18052 } else {
18053 masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
18056 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
18057 StoreToTypedArray(masm, arrayType, value, dest);
18059 masm.bind(&skip);
18062 void CodeGenerator::visitStoreTypedArrayElementHoleBigInt(
18063 LStoreTypedArrayElementHoleBigInt* lir) {
18064 Register elements = ToRegister(lir->elements());
18065 Register value = ToRegister(lir->value());
18066 Register64 temp = ToRegister64(lir->temp());
18068 Scalar::Type arrayType = lir->mir()->arrayType();
18070 Register index = ToRegister(lir->index());
18071 const LAllocation* length = lir->length();
18072 Register spectreTemp = temp.scratchReg();
18074 Label skip;
18075 if (length->isRegister()) {
18076 masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
18077 } else {
18078 masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
18081 masm.loadBigInt64(value, temp);
18083 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
18084 masm.storeToTypedBigIntArray(arrayType, temp, dest);
18086 masm.bind(&skip);
18089 void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
18090 masm.memoryBarrier(ins->type());
18093 void CodeGenerator::visitAtomicIsLockFree(LAtomicIsLockFree* lir) {
18094 Register value = ToRegister(lir->value());
18095 Register output = ToRegister(lir->output());
18097 masm.atomicIsLockFreeJS(value, output);
18100 void CodeGenerator::visitClampIToUint8(LClampIToUint8* lir) {
18101 Register output = ToRegister(lir->output());
18102 MOZ_ASSERT(output == ToRegister(lir->input()));
18103 masm.clampIntToUint8(output);
18106 void CodeGenerator::visitClampDToUint8(LClampDToUint8* lir) {
18107 FloatRegister input = ToFloatRegister(lir->input());
18108 Register output = ToRegister(lir->output());
18109 masm.clampDoubleToUint8(input, output);
18112 void CodeGenerator::visitClampVToUint8(LClampVToUint8* lir) {
18113 ValueOperand operand = ToValue(lir, LClampVToUint8::InputIndex);
18114 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
18115 Register output = ToRegister(lir->output());
18117 using Fn = bool (*)(JSContext*, JSString*, double*);
18118 OutOfLineCode* oolString = oolCallVM<Fn, StringToNumber>(
18119 lir, ArgList(output), StoreFloatRegisterTo(tempFloat));
18120 Label* stringEntry = oolString->entry();
18121 Label* stringRejoin = oolString->rejoin();
18123 Label fails;
18124 masm.clampValueToUint8(operand, stringEntry, stringRejoin, output, tempFloat,
18125 output, &fails);
18127 bailoutFrom(&fails, lir->snapshot());
18130 void CodeGenerator::visitInCache(LInCache* ins) {
18131 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
18133 ConstantOrRegister key =
18134 toConstantOrRegister(ins, LInCache::LhsIndex, ins->mir()->key()->type());
18135 Register object = ToRegister(ins->rhs());
18136 Register output = ToRegister(ins->output());
18137 Register temp = ToRegister(ins->temp0());
18139 IonInIC cache(liveRegs, key, object, output, temp);
18140 addIC(ins, allocateIC(cache));
18143 void CodeGenerator::visitInArray(LInArray* lir) {
18144 const MInArray* mir = lir->mir();
18145 Register elements = ToRegister(lir->elements());
18146 Register initLength = ToRegister(lir->initLength());
18147 Register output = ToRegister(lir->output());
18149 Label falseBranch, done, trueBranch;
18151 if (lir->index()->isConstant()) {
18152 int32_t index = ToInt32(lir->index());
18154 if (index < 0) {
18155 MOZ_ASSERT(mir->needsNegativeIntCheck());
18156 bailout(lir->snapshot());
18157 return;
18160 masm.branch32(Assembler::BelowOrEqual, initLength, Imm32(index),
18161 &falseBranch);
18163 NativeObject::elementsSizeMustNotOverflow();
18164 Address address = Address(elements, index * sizeof(Value));
18165 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
18166 } else {
18167 Register index = ToRegister(lir->index());
18169 Label negativeIntCheck;
18170 Label* failedInitLength = &falseBranch;
18171 if (mir->needsNegativeIntCheck()) {
18172 failedInitLength = &negativeIntCheck;
18175 masm.branch32(Assembler::BelowOrEqual, initLength, index, failedInitLength);
18177 BaseObjectElementIndex address(elements, index);
18178 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
18180 if (mir->needsNegativeIntCheck()) {
18181 masm.jump(&trueBranch);
18182 masm.bind(&negativeIntCheck);
18184 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
18186 masm.jump(&falseBranch);
18190 masm.bind(&trueBranch);
18191 masm.move32(Imm32(1), output);
18192 masm.jump(&done);
18194 masm.bind(&falseBranch);
18195 masm.move32(Imm32(0), output);
18196 masm.bind(&done);
18199 void CodeGenerator::visitGuardElementNotHole(LGuardElementNotHole* lir) {
18200 Register elements = ToRegister(lir->elements());
18201 const LAllocation* index = lir->index();
18203 Label testMagic;
18204 if (index->isConstant()) {
18205 Address address(elements, ToInt32(index) * sizeof(js::Value));
18206 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
18207 } else {
18208 BaseObjectElementIndex address(elements, ToRegister(index));
18209 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
18211 bailoutFrom(&testMagic, lir->snapshot());
18214 void CodeGenerator::visitInstanceOfO(LInstanceOfO* ins) {
18215 Register protoReg = ToRegister(ins->rhs());
18216 emitInstanceOf(ins, protoReg);
18219 void CodeGenerator::visitInstanceOfV(LInstanceOfV* ins) {
18220 Register protoReg = ToRegister(ins->rhs());
18221 emitInstanceOf(ins, protoReg);
18224 void CodeGenerator::emitInstanceOf(LInstruction* ins, Register protoReg) {
18225 // This path implements fun_hasInstance when the function's prototype is
18226 // known to be the object in protoReg
18228 Label done;
18229 Register output = ToRegister(ins->getDef(0));
18231 // If the lhs is a primitive, the result is false.
18232 Register objReg;
18233 if (ins->isInstanceOfV()) {
18234 Label isObject;
18235 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
18236 masm.branchTestObject(Assembler::Equal, lhsValue, &isObject);
18237 masm.mov(ImmWord(0), output);
18238 masm.jump(&done);
18239 masm.bind(&isObject);
18240 objReg = masm.extractObject(lhsValue, output);
18241 } else {
18242 objReg = ToRegister(ins->toInstanceOfO()->lhs());
18245 // Crawl the lhs's prototype chain in a loop to search for prototypeObject.
18246 // This follows the main loop of js::IsPrototypeOf, though additionally breaks
18247 // out of the loop on Proxy::LazyProto.
18249 // Load the lhs's prototype.
18250 masm.loadObjProto(objReg, output);
18252 Label testLazy;
18254 Label loopPrototypeChain;
18255 masm.bind(&loopPrototypeChain);
18257 // Test for the target prototype object.
18258 Label notPrototypeObject;
18259 masm.branchPtr(Assembler::NotEqual, output, protoReg, &notPrototypeObject);
18260 masm.mov(ImmWord(1), output);
18261 masm.jump(&done);
18262 masm.bind(&notPrototypeObject);
18264 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
18266 // Test for nullptr or Proxy::LazyProto
18267 masm.branchPtr(Assembler::BelowOrEqual, output, ImmWord(1), &testLazy);
18269 // Load the current object's prototype.
18270 masm.loadObjProto(output, output);
18272 masm.jump(&loopPrototypeChain);
18275 // Make a VM call if an object with a lazy proto was found on the prototype
18276 // chain. This currently occurs only for cross compartment wrappers, which
18277 // we do not expect to be compared with non-wrapper functions from this
18278 // compartment. Otherwise, we stopped on a nullptr prototype and the output
18279 // register is already correct.
18281 using Fn = bool (*)(JSContext*, HandleObject, JSObject*, bool*);
18282 auto* ool = oolCallVM<Fn, IsPrototypeOf>(ins, ArgList(protoReg, objReg),
18283 StoreRegisterTo(output));
18285 // Regenerate the original lhs object for the VM call.
18286 Label regenerate, *lazyEntry;
18287 if (objReg != output) {
18288 lazyEntry = ool->entry();
18289 } else {
18290 masm.bind(&regenerate);
18291 lazyEntry = &regenerate;
18292 if (ins->isInstanceOfV()) {
18293 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
18294 objReg = masm.extractObject(lhsValue, output);
18295 } else {
18296 objReg = ToRegister(ins->toInstanceOfO()->lhs());
18298 MOZ_ASSERT(objReg == output);
18299 masm.jump(ool->entry());
18302 masm.bind(&testLazy);
18303 masm.branchPtr(Assembler::Equal, output, ImmWord(1), lazyEntry);
18305 masm.bind(&done);
18306 masm.bind(ool->rejoin());
18309 void CodeGenerator::visitInstanceOfCache(LInstanceOfCache* ins) {
18310 // The Lowering ensures that RHS is an object, and that LHS is a value.
18311 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
18312 TypedOrValueRegister lhs =
18313 TypedOrValueRegister(ToValue(ins, LInstanceOfCache::LHS));
18314 Register rhs = ToRegister(ins->rhs());
18315 Register output = ToRegister(ins->output());
18317 IonInstanceOfIC ic(liveRegs, lhs, rhs, output);
18318 addIC(ins, allocateIC(ic));
18321 void CodeGenerator::visitGetDOMProperty(LGetDOMProperty* ins) {
18322 const Register JSContextReg = ToRegister(ins->getJSContextReg());
18323 const Register ObjectReg = ToRegister(ins->getObjectReg());
18324 const Register PrivateReg = ToRegister(ins->getPrivReg());
18325 const Register ValueReg = ToRegister(ins->getValueReg());
18327 Label haveValue;
18328 if (ins->mir()->valueMayBeInSlot()) {
18329 size_t slot = ins->mir()->domMemberSlotIndex();
18330 // It's a bit annoying to redo these slot calculations, which duplcate
18331 // LSlots and a few other things like that, but I'm not sure there's a
18332 // way to reuse those here.
18334 // If this ever gets fixed to work with proxies (by not assuming that
18335 // reserved slot indices, which is what domMemberSlotIndex() returns,
18336 // match fixed slot indices), we can reenable MGetDOMProperty for
18337 // proxies in IonBuilder.
18338 if (slot < NativeObject::MAX_FIXED_SLOTS) {
18339 masm.loadValue(Address(ObjectReg, NativeObject::getFixedSlotOffset(slot)),
18340 JSReturnOperand);
18341 } else {
18342 // It's a dynamic slot.
18343 slot -= NativeObject::MAX_FIXED_SLOTS;
18344 // Use PrivateReg as a scratch register for the slots pointer.
18345 masm.loadPtr(Address(ObjectReg, NativeObject::offsetOfSlots()),
18346 PrivateReg);
18347 masm.loadValue(Address(PrivateReg, slot * sizeof(js::Value)),
18348 JSReturnOperand);
18350 masm.branchTestUndefined(Assembler::NotEqual, JSReturnOperand, &haveValue);
18353 DebugOnly<uint32_t> initialStack = masm.framePushed();
18355 masm.checkStackAlignment();
18357 // Make space for the outparam. Pre-initialize it to UndefinedValue so we
18358 // can trace it at GC time.
18359 masm.Push(UndefinedValue());
18360 // We pass the pointer to our out param as an instance of
18361 // JSJitGetterCallArgs, since on the binary level it's the same thing.
18362 static_assert(sizeof(JSJitGetterCallArgs) == sizeof(Value*));
18363 masm.moveStackPtrTo(ValueReg);
18365 masm.Push(ObjectReg);
18367 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
18369 // Rooting will happen at GC time.
18370 masm.moveStackPtrTo(ObjectReg);
18372 Realm* getterRealm = ins->mir()->getterRealm();
18373 if (gen->realm->realmPtr() != getterRealm) {
18374 // We use JSContextReg as scratch register here.
18375 masm.switchToRealm(getterRealm, JSContextReg);
18378 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
18379 masm.loadJSContext(JSContextReg);
18380 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
18381 ExitFrameType::IonDOMGetter);
18383 markSafepointAt(safepointOffset, ins);
18385 masm.setupAlignedABICall();
18386 masm.loadJSContext(JSContextReg);
18387 masm.passABIArg(JSContextReg);
18388 masm.passABIArg(ObjectReg);
18389 masm.passABIArg(PrivateReg);
18390 masm.passABIArg(ValueReg);
18391 ensureOsiSpace();
18392 masm.callWithABI(DynamicFunction<JSJitGetterOp>(ins->mir()->fun()),
18393 ABIType::General,
18394 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
18396 if (ins->mir()->isInfallible()) {
18397 masm.loadValue(Address(masm.getStackPointer(),
18398 IonDOMExitFrameLayout::offsetOfResult()),
18399 JSReturnOperand);
18400 } else {
18401 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
18403 masm.loadValue(Address(masm.getStackPointer(),
18404 IonDOMExitFrameLayout::offsetOfResult()),
18405 JSReturnOperand);
18408 // Switch back to the current realm if needed. Note: if the getter threw an
18409 // exception, the exception handler will do this.
18410 if (gen->realm->realmPtr() != getterRealm) {
18411 static_assert(!JSReturnOperand.aliases(ReturnReg),
18412 "Clobbering ReturnReg should not affect the return value");
18413 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
18416 // Until C++ code is instrumented against Spectre, prevent speculative
18417 // execution from returning any private data.
18418 if (JitOptions.spectreJitToCxxCalls && ins->mir()->hasLiveDefUses()) {
18419 masm.speculationBarrier();
18422 masm.adjustStack(IonDOMExitFrameLayout::Size());
18424 masm.bind(&haveValue);
18426 MOZ_ASSERT(masm.framePushed() == initialStack);
18429 void CodeGenerator::visitGetDOMMemberV(LGetDOMMemberV* ins) {
18430 // It's simpler to duplicate visitLoadFixedSlotV here than it is to try to
18431 // use an LLoadFixedSlotV or some subclass of it for this case: that would
18432 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
18433 // we'd have to duplicate a bunch of stuff we now get for free from
18434 // MGetDOMProperty.
18436 // If this ever gets fixed to work with proxies (by not assuming that
18437 // reserved slot indices, which is what domMemberSlotIndex() returns,
18438 // match fixed slot indices), we can reenable MGetDOMMember for
18439 // proxies in IonBuilder.
18440 Register object = ToRegister(ins->object());
18441 size_t slot = ins->mir()->domMemberSlotIndex();
18442 ValueOperand result = ToOutValue(ins);
18444 masm.loadValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
18445 result);
18448 void CodeGenerator::visitGetDOMMemberT(LGetDOMMemberT* ins) {
18449 // It's simpler to duplicate visitLoadFixedSlotT here than it is to try to
18450 // use an LLoadFixedSlotT or some subclass of it for this case: that would
18451 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
18452 // we'd have to duplicate a bunch of stuff we now get for free from
18453 // MGetDOMProperty.
18455 // If this ever gets fixed to work with proxies (by not assuming that
18456 // reserved slot indices, which is what domMemberSlotIndex() returns,
18457 // match fixed slot indices), we can reenable MGetDOMMember for
18458 // proxies in IonBuilder.
18459 Register object = ToRegister(ins->object());
18460 size_t slot = ins->mir()->domMemberSlotIndex();
18461 AnyRegister result = ToAnyRegister(ins->getDef(0));
18462 MIRType type = ins->mir()->type();
18464 masm.loadUnboxedValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
18465 type, result);
18468 void CodeGenerator::visitSetDOMProperty(LSetDOMProperty* ins) {
18469 const Register JSContextReg = ToRegister(ins->getJSContextReg());
18470 const Register ObjectReg = ToRegister(ins->getObjectReg());
18471 const Register PrivateReg = ToRegister(ins->getPrivReg());
18472 const Register ValueReg = ToRegister(ins->getValueReg());
18474 DebugOnly<uint32_t> initialStack = masm.framePushed();
18476 masm.checkStackAlignment();
18478 // Push the argument. Rooting will happen at GC time.
18479 ValueOperand argVal = ToValue(ins, LSetDOMProperty::Value);
18480 masm.Push(argVal);
18481 // We pass the pointer to our out param as an instance of
18482 // JSJitGetterCallArgs, since on the binary level it's the same thing.
18483 static_assert(sizeof(JSJitSetterCallArgs) == sizeof(Value*));
18484 masm.moveStackPtrTo(ValueReg);
18486 masm.Push(ObjectReg);
18488 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
18490 // Rooting will happen at GC time.
18491 masm.moveStackPtrTo(ObjectReg);
18493 Realm* setterRealm = ins->mir()->setterRealm();
18494 if (gen->realm->realmPtr() != setterRealm) {
18495 // We use JSContextReg as scratch register here.
18496 masm.switchToRealm(setterRealm, JSContextReg);
18499 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
18500 masm.loadJSContext(JSContextReg);
18501 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
18502 ExitFrameType::IonDOMSetter);
18504 markSafepointAt(safepointOffset, ins);
18506 masm.setupAlignedABICall();
18507 masm.loadJSContext(JSContextReg);
18508 masm.passABIArg(JSContextReg);
18509 masm.passABIArg(ObjectReg);
18510 masm.passABIArg(PrivateReg);
18511 masm.passABIArg(ValueReg);
18512 ensureOsiSpace();
18513 masm.callWithABI(DynamicFunction<JSJitSetterOp>(ins->mir()->fun()),
18514 ABIType::General,
18515 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
18517 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
18519 // Switch back to the current realm if needed. Note: if the setter threw an
18520 // exception, the exception handler will do this.
18521 if (gen->realm->realmPtr() != setterRealm) {
18522 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
18525 masm.adjustStack(IonDOMExitFrameLayout::Size());
18527 MOZ_ASSERT(masm.framePushed() == initialStack);
18530 void CodeGenerator::visitLoadDOMExpandoValue(LLoadDOMExpandoValue* ins) {
18531 Register proxy = ToRegister(ins->proxy());
18532 ValueOperand out = ToOutValue(ins);
18534 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
18535 out.scratchReg());
18536 masm.loadValue(Address(out.scratchReg(),
18537 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
18538 out);
18541 void CodeGenerator::visitLoadDOMExpandoValueGuardGeneration(
18542 LLoadDOMExpandoValueGuardGeneration* ins) {
18543 Register proxy = ToRegister(ins->proxy());
18544 ValueOperand out = ToOutValue(ins);
18546 Label bail;
18547 masm.loadDOMExpandoValueGuardGeneration(proxy, out,
18548 ins->mir()->expandoAndGeneration(),
18549 ins->mir()->generation(), &bail);
18550 bailoutFrom(&bail, ins->snapshot());
18553 void CodeGenerator::visitLoadDOMExpandoValueIgnoreGeneration(
18554 LLoadDOMExpandoValueIgnoreGeneration* ins) {
18555 Register proxy = ToRegister(ins->proxy());
18556 ValueOperand out = ToOutValue(ins);
18558 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
18559 out.scratchReg());
18561 // Load the ExpandoAndGeneration* from the PrivateValue.
18562 masm.loadPrivate(
18563 Address(out.scratchReg(),
18564 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
18565 out.scratchReg());
18567 // Load expandoAndGeneration->expando into the output Value register.
18568 masm.loadValue(
18569 Address(out.scratchReg(), ExpandoAndGeneration::offsetOfExpando()), out);
18572 void CodeGenerator::visitGuardDOMExpandoMissingOrGuardShape(
18573 LGuardDOMExpandoMissingOrGuardShape* ins) {
18574 Register temp = ToRegister(ins->temp0());
18575 ValueOperand input =
18576 ToValue(ins, LGuardDOMExpandoMissingOrGuardShape::InputIndex);
18578 Label done;
18579 masm.branchTestUndefined(Assembler::Equal, input, &done);
18581 masm.debugAssertIsObject(input);
18582 masm.unboxObject(input, temp);
18583 // The expando object is not used in this case, so we don't need Spectre
18584 // mitigations.
18585 Label bail;
18586 masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, temp,
18587 ins->mir()->shape(), &bail);
18588 bailoutFrom(&bail, ins->snapshot());
18590 masm.bind(&done);
18593 class OutOfLineIsCallable : public OutOfLineCodeBase<CodeGenerator> {
18594 Register object_;
18595 Register output_;
18597 public:
18598 OutOfLineIsCallable(Register object, Register output)
18599 : object_(object), output_(output) {}
18601 void accept(CodeGenerator* codegen) override {
18602 codegen->visitOutOfLineIsCallable(this);
18604 Register object() const { return object_; }
18605 Register output() const { return output_; }
18608 void CodeGenerator::visitIsCallableO(LIsCallableO* ins) {
18609 Register object = ToRegister(ins->object());
18610 Register output = ToRegister(ins->output());
18612 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(object, output);
18613 addOutOfLineCode(ool, ins->mir());
18615 masm.isCallable(object, output, ool->entry());
18617 masm.bind(ool->rejoin());
18620 void CodeGenerator::visitIsCallableV(LIsCallableV* ins) {
18621 ValueOperand val = ToValue(ins, LIsCallableV::ObjectIndex);
18622 Register output = ToRegister(ins->output());
18623 Register temp = ToRegister(ins->temp0());
18625 Label notObject;
18626 masm.fallibleUnboxObject(val, temp, &notObject);
18628 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(temp, output);
18629 addOutOfLineCode(ool, ins->mir());
18631 masm.isCallable(temp, output, ool->entry());
18632 masm.jump(ool->rejoin());
18634 masm.bind(&notObject);
18635 masm.move32(Imm32(0), output);
18637 masm.bind(ool->rejoin());
18640 void CodeGenerator::visitOutOfLineIsCallable(OutOfLineIsCallable* ool) {
18641 Register object = ool->object();
18642 Register output = ool->output();
18644 saveVolatile(output);
18645 using Fn = bool (*)(JSObject* obj);
18646 masm.setupAlignedABICall();
18647 masm.passABIArg(object);
18648 masm.callWithABI<Fn, ObjectIsCallable>();
18649 masm.storeCallBoolResult(output);
18650 restoreVolatile(output);
18651 masm.jump(ool->rejoin());
18654 class OutOfLineIsConstructor : public OutOfLineCodeBase<CodeGenerator> {
18655 LIsConstructor* ins_;
18657 public:
18658 explicit OutOfLineIsConstructor(LIsConstructor* ins) : ins_(ins) {}
18660 void accept(CodeGenerator* codegen) override {
18661 codegen->visitOutOfLineIsConstructor(this);
18663 LIsConstructor* ins() const { return ins_; }
18666 void CodeGenerator::visitIsConstructor(LIsConstructor* ins) {
18667 Register object = ToRegister(ins->object());
18668 Register output = ToRegister(ins->output());
18670 OutOfLineIsConstructor* ool = new (alloc()) OutOfLineIsConstructor(ins);
18671 addOutOfLineCode(ool, ins->mir());
18673 masm.isConstructor(object, output, ool->entry());
18675 masm.bind(ool->rejoin());
18678 void CodeGenerator::visitOutOfLineIsConstructor(OutOfLineIsConstructor* ool) {
18679 LIsConstructor* ins = ool->ins();
18680 Register object = ToRegister(ins->object());
18681 Register output = ToRegister(ins->output());
18683 saveVolatile(output);
18684 using Fn = bool (*)(JSObject* obj);
18685 masm.setupAlignedABICall();
18686 masm.passABIArg(object);
18687 masm.callWithABI<Fn, ObjectIsConstructor>();
18688 masm.storeCallBoolResult(output);
18689 restoreVolatile(output);
18690 masm.jump(ool->rejoin());
18693 void CodeGenerator::visitIsCrossRealmArrayConstructor(
18694 LIsCrossRealmArrayConstructor* ins) {
18695 Register object = ToRegister(ins->object());
18696 Register output = ToRegister(ins->output());
18698 masm.setIsCrossRealmArrayConstructor(object, output);
18701 static void EmitObjectIsArray(MacroAssembler& masm, OutOfLineCode* ool,
18702 Register obj, Register output,
18703 Label* notArray = nullptr) {
18704 masm.loadObjClassUnsafe(obj, output);
18706 Label isArray;
18707 masm.branchPtr(Assembler::Equal, output, ImmPtr(&ArrayObject::class_),
18708 &isArray);
18710 // Branch to OOL path if it's a proxy.
18711 masm.branchTestClassIsProxy(true, output, ool->entry());
18713 if (notArray) {
18714 masm.bind(notArray);
18716 masm.move32(Imm32(0), output);
18717 masm.jump(ool->rejoin());
18719 masm.bind(&isArray);
18720 masm.move32(Imm32(1), output);
18722 masm.bind(ool->rejoin());
18725 void CodeGenerator::visitIsArrayO(LIsArrayO* lir) {
18726 Register object = ToRegister(lir->object());
18727 Register output = ToRegister(lir->output());
18729 using Fn = bool (*)(JSContext*, HandleObject, bool*);
18730 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
18731 lir, ArgList(object), StoreRegisterTo(output));
18732 EmitObjectIsArray(masm, ool, object, output);
18735 void CodeGenerator::visitIsArrayV(LIsArrayV* lir) {
18736 ValueOperand val = ToValue(lir, LIsArrayV::ValueIndex);
18737 Register output = ToRegister(lir->output());
18738 Register temp = ToRegister(lir->temp0());
18740 Label notArray;
18741 masm.fallibleUnboxObject(val, temp, &notArray);
18743 using Fn = bool (*)(JSContext*, HandleObject, bool*);
18744 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
18745 lir, ArgList(temp), StoreRegisterTo(output));
18746 EmitObjectIsArray(masm, ool, temp, output, &notArray);
18749 void CodeGenerator::visitIsTypedArray(LIsTypedArray* lir) {
18750 Register object = ToRegister(lir->object());
18751 Register output = ToRegister(lir->output());
18753 OutOfLineCode* ool = nullptr;
18754 if (lir->mir()->isPossiblyWrapped()) {
18755 using Fn = bool (*)(JSContext*, JSObject*, bool*);
18756 ool = oolCallVM<Fn, jit::IsPossiblyWrappedTypedArray>(
18757 lir, ArgList(object), StoreRegisterTo(output));
18760 Label notTypedArray;
18761 Label done;
18763 masm.loadObjClassUnsafe(object, output);
18764 masm.branchIfClassIsNotTypedArray(output, &notTypedArray);
18766 masm.move32(Imm32(1), output);
18767 masm.jump(&done);
18768 masm.bind(&notTypedArray);
18769 if (ool) {
18770 masm.branchTestClassIsProxy(true, output, ool->entry());
18772 masm.move32(Imm32(0), output);
18773 masm.bind(&done);
18774 if (ool) {
18775 masm.bind(ool->rejoin());
18779 void CodeGenerator::visitIsObject(LIsObject* ins) {
18780 Register output = ToRegister(ins->output());
18781 ValueOperand value = ToValue(ins, LIsObject::ObjectIndex);
18782 masm.testObjectSet(Assembler::Equal, value, output);
18785 void CodeGenerator::visitIsObjectAndBranch(LIsObjectAndBranch* ins) {
18786 ValueOperand value = ToValue(ins, LIsObjectAndBranch::Input);
18787 testObjectEmitBranch(Assembler::Equal, value, ins->ifTrue(), ins->ifFalse());
18790 void CodeGenerator::visitIsNullOrUndefined(LIsNullOrUndefined* ins) {
18791 Register output = ToRegister(ins->output());
18792 ValueOperand value = ToValue(ins, LIsNullOrUndefined::InputIndex);
18794 Label isNotNull, done;
18795 masm.branchTestNull(Assembler::NotEqual, value, &isNotNull);
18797 masm.move32(Imm32(1), output);
18798 masm.jump(&done);
18800 masm.bind(&isNotNull);
18801 masm.testUndefinedSet(Assembler::Equal, value, output);
18803 masm.bind(&done);
18806 void CodeGenerator::visitIsNullOrUndefinedAndBranch(
18807 LIsNullOrUndefinedAndBranch* ins) {
18808 Label* ifTrue = getJumpLabelForBranch(ins->ifTrue());
18809 Label* ifFalse = getJumpLabelForBranch(ins->ifFalse());
18810 ValueOperand value = ToValue(ins, LIsNullOrUndefinedAndBranch::Input);
18812 ScratchTagScope tag(masm, value);
18813 masm.splitTagForTest(value, tag);
18815 masm.branchTestNull(Assembler::Equal, tag, ifTrue);
18816 masm.branchTestUndefined(Assembler::Equal, tag, ifTrue);
18818 if (!isNextBlock(ins->ifFalse()->lir())) {
18819 masm.jump(ifFalse);
18823 void CodeGenerator::loadOutermostJSScript(Register reg) {
18824 // The "outermost" JSScript means the script that we are compiling
18825 // basically; this is not always the script associated with the
18826 // current basic block, which might be an inlined script.
18828 MIRGraph& graph = current->mir()->graph();
18829 MBasicBlock* entryBlock = graph.entryBlock();
18830 masm.movePtr(ImmGCPtr(entryBlock->info().script()), reg);
18833 void CodeGenerator::loadJSScriptForBlock(MBasicBlock* block, Register reg) {
18834 // The current JSScript means the script for the current
18835 // basic block. This may be an inlined script.
18837 JSScript* script = block->info().script();
18838 masm.movePtr(ImmGCPtr(script), reg);
18841 void CodeGenerator::visitHasClass(LHasClass* ins) {
18842 Register lhs = ToRegister(ins->lhs());
18843 Register output = ToRegister(ins->output());
18845 masm.loadObjClassUnsafe(lhs, output);
18846 masm.cmpPtrSet(Assembler::Equal, output, ImmPtr(ins->mir()->getClass()),
18847 output);
18850 void CodeGenerator::visitGuardToClass(LGuardToClass* ins) {
18851 Register lhs = ToRegister(ins->lhs());
18852 Register temp = ToRegister(ins->temp0());
18854 // branchTestObjClass may zero the object register on speculative paths
18855 // (we should have a defineReuseInput allocation in this case).
18856 Register spectreRegToZero = lhs;
18858 Label notEqual;
18860 masm.branchTestObjClass(Assembler::NotEqual, lhs, ins->mir()->getClass(),
18861 temp, spectreRegToZero, &notEqual);
18863 // Can't return null-return here, so bail.
18864 bailoutFrom(&notEqual, ins->snapshot());
18867 void CodeGenerator::visitGuardToEitherClass(LGuardToEitherClass* ins) {
18868 Register lhs = ToRegister(ins->lhs());
18869 Register temp = ToRegister(ins->temp0());
18871 // branchTestObjClass may zero the object register on speculative paths
18872 // (we should have a defineReuseInput allocation in this case).
18873 Register spectreRegToZero = lhs;
18875 Label notEqual;
18877 masm.branchTestObjClass(Assembler::NotEqual, lhs,
18878 {ins->mir()->getClass1(), ins->mir()->getClass2()},
18879 temp, spectreRegToZero, &notEqual);
18881 // Can't return null-return here, so bail.
18882 bailoutFrom(&notEqual, ins->snapshot());
18885 void CodeGenerator::visitGuardToFunction(LGuardToFunction* ins) {
18886 Register lhs = ToRegister(ins->lhs());
18887 Register temp = ToRegister(ins->temp0());
18889 // branchTestObjClass may zero the object register on speculative paths
18890 // (we should have a defineReuseInput allocation in this case).
18891 Register spectreRegToZero = lhs;
18893 Label notEqual;
18895 masm.branchTestObjIsFunction(Assembler::NotEqual, lhs, temp, spectreRegToZero,
18896 &notEqual);
18898 // Can't return null-return here, so bail.
18899 bailoutFrom(&notEqual, ins->snapshot());
18902 void CodeGenerator::visitObjectClassToString(LObjectClassToString* lir) {
18903 Register obj = ToRegister(lir->lhs());
18904 Register temp = ToRegister(lir->temp0());
18906 using Fn = JSString* (*)(JSContext*, JSObject*);
18907 masm.setupAlignedABICall();
18908 masm.loadJSContext(temp);
18909 masm.passABIArg(temp);
18910 masm.passABIArg(obj);
18911 masm.callWithABI<Fn, js::ObjectClassToString>();
18913 bailoutCmpPtr(Assembler::Equal, ReturnReg, ImmWord(0), lir->snapshot());
18916 void CodeGenerator::visitWasmParameter(LWasmParameter* lir) {}
18918 void CodeGenerator::visitWasmParameterI64(LWasmParameterI64* lir) {}
18920 void CodeGenerator::visitWasmReturn(LWasmReturn* lir) {
18921 // Don't emit a jump to the return label if this is the last block.
18922 if (current->mir() != *gen->graph().poBegin()) {
18923 masm.jump(&returnLabel_);
18927 void CodeGenerator::visitWasmReturnI64(LWasmReturnI64* lir) {
18928 // Don't emit a jump to the return label if this is the last block.
18929 if (current->mir() != *gen->graph().poBegin()) {
18930 masm.jump(&returnLabel_);
18934 void CodeGenerator::visitWasmReturnVoid(LWasmReturnVoid* lir) {
18935 // Don't emit a jump to the return label if this is the last block.
18936 if (current->mir() != *gen->graph().poBegin()) {
18937 masm.jump(&returnLabel_);
18941 void CodeGenerator::emitAssertRangeI(MIRType type, const Range* r,
18942 Register input) {
18943 // Check the lower bound.
18944 if (r->hasInt32LowerBound() && r->lower() > INT32_MIN) {
18945 Label success;
18946 if (type == MIRType::Int32 || type == MIRType::Boolean) {
18947 masm.branch32(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
18948 &success);
18949 } else {
18950 MOZ_ASSERT(type == MIRType::IntPtr);
18951 masm.branchPtr(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
18952 &success);
18954 masm.assumeUnreachable(
18955 "Integer input should be equal or higher than Lowerbound.");
18956 masm.bind(&success);
18959 // Check the upper bound.
18960 if (r->hasInt32UpperBound() && r->upper() < INT32_MAX) {
18961 Label success;
18962 if (type == MIRType::Int32 || type == MIRType::Boolean) {
18963 masm.branch32(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
18964 &success);
18965 } else {
18966 MOZ_ASSERT(type == MIRType::IntPtr);
18967 masm.branchPtr(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
18968 &success);
18970 masm.assumeUnreachable(
18971 "Integer input should be lower or equal than Upperbound.");
18972 masm.bind(&success);
18975 // For r->canHaveFractionalPart(), r->canBeNegativeZero(), and
18976 // r->exponent(), there's nothing to check, because if we ended up in the
18977 // integer range checking code, the value is already in an integer register
18978 // in the integer range.
18981 void CodeGenerator::emitAssertRangeD(const Range* r, FloatRegister input,
18982 FloatRegister temp) {
18983 // Check the lower bound.
18984 if (r->hasInt32LowerBound()) {
18985 Label success;
18986 masm.loadConstantDouble(r->lower(), temp);
18987 if (r->canBeNaN()) {
18988 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
18990 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
18991 &success);
18992 masm.assumeUnreachable(
18993 "Double input should be equal or higher than Lowerbound.");
18994 masm.bind(&success);
18996 // Check the upper bound.
18997 if (r->hasInt32UpperBound()) {
18998 Label success;
18999 masm.loadConstantDouble(r->upper(), temp);
19000 if (r->canBeNaN()) {
19001 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
19003 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp, &success);
19004 masm.assumeUnreachable(
19005 "Double input should be lower or equal than Upperbound.");
19006 masm.bind(&success);
19009 // This code does not yet check r->canHaveFractionalPart(). This would require
19010 // new assembler interfaces to make rounding instructions available.
19012 if (!r->canBeNegativeZero()) {
19013 Label success;
19015 // First, test for being equal to 0.0, which also includes -0.0.
19016 masm.loadConstantDouble(0.0, temp);
19017 masm.branchDouble(Assembler::DoubleNotEqualOrUnordered, input, temp,
19018 &success);
19020 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0 is
19021 // -Infinity instead of Infinity.
19022 masm.loadConstantDouble(1.0, temp);
19023 masm.divDouble(input, temp);
19024 masm.branchDouble(Assembler::DoubleGreaterThan, temp, input, &success);
19026 masm.assumeUnreachable("Input shouldn't be negative zero.");
19028 masm.bind(&success);
19031 if (!r->hasInt32Bounds() && !r->canBeInfiniteOrNaN() &&
19032 r->exponent() < FloatingPoint<double>::kExponentBias) {
19033 // Check the bounds implied by the maximum exponent.
19034 Label exponentLoOk;
19035 masm.loadConstantDouble(pow(2.0, r->exponent() + 1), temp);
19036 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentLoOk);
19037 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp,
19038 &exponentLoOk);
19039 masm.assumeUnreachable("Check for exponent failed.");
19040 masm.bind(&exponentLoOk);
19042 Label exponentHiOk;
19043 masm.loadConstantDouble(-pow(2.0, r->exponent() + 1), temp);
19044 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentHiOk);
19045 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
19046 &exponentHiOk);
19047 masm.assumeUnreachable("Check for exponent failed.");
19048 masm.bind(&exponentHiOk);
19049 } else if (!r->hasInt32Bounds() && !r->canBeNaN()) {
19050 // If we think the value can't be NaN, check that it isn't.
19051 Label notnan;
19052 masm.branchDouble(Assembler::DoubleOrdered, input, input, &notnan);
19053 masm.assumeUnreachable("Input shouldn't be NaN.");
19054 masm.bind(&notnan);
19056 // If we think the value also can't be an infinity, check that it isn't.
19057 if (!r->canBeInfiniteOrNaN()) {
19058 Label notposinf;
19059 masm.loadConstantDouble(PositiveInfinity<double>(), temp);
19060 masm.branchDouble(Assembler::DoubleLessThan, input, temp, &notposinf);
19061 masm.assumeUnreachable("Input shouldn't be +Inf.");
19062 masm.bind(&notposinf);
19064 Label notneginf;
19065 masm.loadConstantDouble(NegativeInfinity<double>(), temp);
19066 masm.branchDouble(Assembler::DoubleGreaterThan, input, temp, &notneginf);
19067 masm.assumeUnreachable("Input shouldn't be -Inf.");
19068 masm.bind(&notneginf);
19073 void CodeGenerator::visitAssertClass(LAssertClass* ins) {
19074 Register obj = ToRegister(ins->input());
19075 Register temp = ToRegister(ins->getTemp(0));
19077 Label success;
19078 if (ins->mir()->getClass() == &FunctionClass) {
19079 // Allow both possible function classes here.
19080 masm.branchTestObjIsFunctionNoSpectreMitigations(Assembler::Equal, obj,
19081 temp, &success);
19082 } else {
19083 masm.branchTestObjClassNoSpectreMitigations(
19084 Assembler::Equal, obj, ins->mir()->getClass(), temp, &success);
19086 masm.assumeUnreachable("Wrong KnownClass during run-time");
19087 masm.bind(&success);
19090 void CodeGenerator::visitAssertShape(LAssertShape* ins) {
19091 Register obj = ToRegister(ins->input());
19093 Label success;
19094 masm.branchTestObjShapeNoSpectreMitigations(Assembler::Equal, obj,
19095 ins->mir()->shape(), &success);
19096 masm.assumeUnreachable("Wrong Shape during run-time");
19097 masm.bind(&success);
19100 void CodeGenerator::visitAssertRangeI(LAssertRangeI* ins) {
19101 Register input = ToRegister(ins->input());
19102 const Range* r = ins->range();
19104 emitAssertRangeI(ins->mir()->input()->type(), r, input);
19107 void CodeGenerator::visitAssertRangeD(LAssertRangeD* ins) {
19108 FloatRegister input = ToFloatRegister(ins->input());
19109 FloatRegister temp = ToFloatRegister(ins->temp());
19110 const Range* r = ins->range();
19112 emitAssertRangeD(r, input, temp);
19115 void CodeGenerator::visitAssertRangeF(LAssertRangeF* ins) {
19116 FloatRegister input = ToFloatRegister(ins->input());
19117 FloatRegister temp = ToFloatRegister(ins->temp());
19118 FloatRegister temp2 = ToFloatRegister(ins->temp2());
19120 const Range* r = ins->range();
19122 masm.convertFloat32ToDouble(input, temp);
19123 emitAssertRangeD(r, temp, temp2);
19126 void CodeGenerator::visitAssertRangeV(LAssertRangeV* ins) {
19127 const Range* r = ins->range();
19128 const ValueOperand value = ToValue(ins, LAssertRangeV::Input);
19129 Label done;
19132 ScratchTagScope tag(masm, value);
19133 masm.splitTagForTest(value, tag);
19136 Label isNotInt32;
19137 masm.branchTestInt32(Assembler::NotEqual, tag, &isNotInt32);
19139 ScratchTagScopeRelease _(&tag);
19140 Register unboxInt32 = ToTempUnboxRegister(ins->temp());
19141 Register input = masm.extractInt32(value, unboxInt32);
19142 emitAssertRangeI(MIRType::Int32, r, input);
19143 masm.jump(&done);
19145 masm.bind(&isNotInt32);
19149 Label isNotDouble;
19150 masm.branchTestDouble(Assembler::NotEqual, tag, &isNotDouble);
19152 ScratchTagScopeRelease _(&tag);
19153 FloatRegister input = ToFloatRegister(ins->floatTemp1());
19154 FloatRegister temp = ToFloatRegister(ins->floatTemp2());
19155 masm.unboxDouble(value, input);
19156 emitAssertRangeD(r, input, temp);
19157 masm.jump(&done);
19159 masm.bind(&isNotDouble);
19163 masm.assumeUnreachable("Incorrect range for Value.");
19164 masm.bind(&done);
19167 void CodeGenerator::visitInterruptCheck(LInterruptCheck* lir) {
19168 using Fn = bool (*)(JSContext*);
19169 OutOfLineCode* ool =
19170 oolCallVM<Fn, InterruptCheck>(lir, ArgList(), StoreNothing());
19172 const void* interruptAddr = gen->runtime->addressOfInterruptBits();
19173 masm.branch32(Assembler::NotEqual, AbsoluteAddress(interruptAddr), Imm32(0),
19174 ool->entry());
19175 masm.bind(ool->rejoin());
19178 void CodeGenerator::visitOutOfLineResumableWasmTrap(
19179 OutOfLineResumableWasmTrap* ool) {
19180 LInstruction* lir = ool->lir();
19181 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
19183 markSafepointAt(masm.currentOffset(), lir);
19185 // Note that masm.framePushed() doesn't include the register dump area.
19186 // That will be taken into account when the StackMap is created from the
19187 // LSafepoint.
19188 lir->safepoint()->setFramePushedAtStackMapBase(ool->framePushed());
19189 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::Trap);
19191 masm.jump(ool->rejoin());
19194 void CodeGenerator::visitOutOfLineAbortingWasmTrap(
19195 OutOfLineAbortingWasmTrap* ool) {
19196 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
19199 void CodeGenerator::visitWasmInterruptCheck(LWasmInterruptCheck* lir) {
19200 MOZ_ASSERT(gen->compilingWasm());
19202 OutOfLineResumableWasmTrap* ool = new (alloc()) OutOfLineResumableWasmTrap(
19203 lir, masm.framePushed(), lir->mir()->bytecodeOffset(),
19204 wasm::Trap::CheckInterrupt);
19205 addOutOfLineCode(ool, lir->mir());
19206 masm.branch32(
19207 Assembler::NotEqual,
19208 Address(ToRegister(lir->instance()), wasm::Instance::offsetOfInterrupt()),
19209 Imm32(0), ool->entry());
19210 masm.bind(ool->rejoin());
19213 void CodeGenerator::visitWasmTrap(LWasmTrap* lir) {
19214 MOZ_ASSERT(gen->compilingWasm());
19215 const MWasmTrap* mir = lir->mir();
19217 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
19220 void CodeGenerator::visitWasmTrapIfNull(LWasmTrapIfNull* lir) {
19221 MOZ_ASSERT(gen->compilingWasm());
19222 const MWasmTrapIfNull* mir = lir->mir();
19223 Label nonNull;
19224 Register ref = ToRegister(lir->ref());
19226 masm.branchWasmAnyRefIsNull(false, ref, &nonNull);
19227 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
19228 masm.bind(&nonNull);
19231 void CodeGenerator::visitWasmRefIsSubtypeOfAbstract(
19232 LWasmRefIsSubtypeOfAbstract* ins) {
19233 MOZ_ASSERT(gen->compilingWasm());
19235 const MWasmRefIsSubtypeOfAbstract* mir = ins->mir();
19236 MOZ_ASSERT(!mir->destType().isTypeRef());
19238 Register ref = ToRegister(ins->ref());
19239 Register superSTV = Register::Invalid();
19240 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
19241 Register scratch2 = Register::Invalid();
19242 Register result = ToRegister(ins->output());
19243 Label onSuccess;
19244 Label onFail;
19245 Label join;
19246 masm.branchWasmRefIsSubtype(ref, mir->sourceType(), mir->destType(),
19247 &onSuccess, /*onSuccess=*/true, superSTV,
19248 scratch1, scratch2);
19249 masm.bind(&onFail);
19250 masm.xor32(result, result);
19251 masm.jump(&join);
19252 masm.bind(&onSuccess);
19253 masm.move32(Imm32(1), result);
19254 masm.bind(&join);
19257 void CodeGenerator::visitWasmRefIsSubtypeOfConcrete(
19258 LWasmRefIsSubtypeOfConcrete* ins) {
19259 MOZ_ASSERT(gen->compilingWasm());
19261 const MWasmRefIsSubtypeOfConcrete* mir = ins->mir();
19262 MOZ_ASSERT(mir->destType().isTypeRef());
19264 Register ref = ToRegister(ins->ref());
19265 Register superSTV = ToRegister(ins->superSTV());
19266 Register scratch1 = ToRegister(ins->temp0());
19267 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
19268 Register result = ToRegister(ins->output());
19269 Label onSuccess;
19270 Label join;
19271 masm.branchWasmRefIsSubtype(ref, mir->sourceType(), mir->destType(),
19272 &onSuccess, /*onSuccess=*/true, superSTV,
19273 scratch1, scratch2);
19274 masm.move32(Imm32(0), result);
19275 masm.jump(&join);
19276 masm.bind(&onSuccess);
19277 masm.move32(Imm32(1), result);
19278 masm.bind(&join);
19281 void CodeGenerator::visitWasmRefIsSubtypeOfAbstractAndBranch(
19282 LWasmRefIsSubtypeOfAbstractAndBranch* ins) {
19283 MOZ_ASSERT(gen->compilingWasm());
19284 Register ref = ToRegister(ins->ref());
19285 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
19286 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
19287 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
19288 masm.branchWasmRefIsSubtype(
19289 ref, ins->sourceType(), ins->destType(), onSuccess, /*onSuccess=*/true,
19290 Register::Invalid(), scratch1, Register::Invalid());
19291 masm.jump(onFail);
19294 void CodeGenerator::visitWasmRefIsSubtypeOfConcreteAndBranch(
19295 LWasmRefIsSubtypeOfConcreteAndBranch* ins) {
19296 MOZ_ASSERT(gen->compilingWasm());
19297 Register ref = ToRegister(ins->ref());
19298 Register superSTV = ToRegister(ins->superSTV());
19299 Register scratch1 = ToRegister(ins->temp0());
19300 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
19301 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
19302 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
19303 masm.branchWasmRefIsSubtype(ref, ins->sourceType(), ins->destType(),
19304 onSuccess, /*onSuccess=*/true, superSTV, scratch1,
19305 scratch2);
19306 masm.jump(onFail);
19309 void CodeGenerator::callWasmStructAllocFun(LInstruction* lir,
19310 wasm::SymbolicAddress fun,
19311 Register typeDefData,
19312 Register output) {
19313 masm.Push(InstanceReg);
19314 int32_t framePushedAfterInstance = masm.framePushed();
19315 saveLive(lir);
19317 masm.setupWasmABICall();
19318 masm.passABIArg(InstanceReg);
19319 masm.passABIArg(typeDefData);
19320 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
19321 CodeOffset offset =
19322 masm.callWithABI(wasm::BytecodeOffset(0), fun,
19323 mozilla::Some(instanceOffset), ABIType::General);
19324 masm.storeCallPointerResult(output);
19326 markSafepointAt(offset.offset(), lir);
19327 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAfterInstance);
19328 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::CodegenCall);
19330 restoreLive(lir);
19331 masm.Pop(InstanceReg);
19332 #if JS_CODEGEN_ARM64
19333 masm.syncStackPtr();
19334 #endif
19337 // Out-of-line path to allocate wasm GC structs
19338 class OutOfLineWasmNewStruct : public OutOfLineCodeBase<CodeGenerator> {
19339 LInstruction* lir_;
19340 wasm::SymbolicAddress fun_;
19341 Register typeDefData_;
19342 Register output_;
19344 public:
19345 OutOfLineWasmNewStruct(LInstruction* lir, wasm::SymbolicAddress fun,
19346 Register typeDefData, Register output)
19347 : lir_(lir), fun_(fun), typeDefData_(typeDefData), output_(output) {}
19349 void accept(CodeGenerator* codegen) override {
19350 codegen->visitOutOfLineWasmNewStruct(this);
19353 LInstruction* lir() const { return lir_; }
19354 wasm::SymbolicAddress fun() const { return fun_; }
19355 Register typeDefData() const { return typeDefData_; }
19356 Register output() const { return output_; }
19359 void CodeGenerator::visitOutOfLineWasmNewStruct(OutOfLineWasmNewStruct* ool) {
19360 callWasmStructAllocFun(ool->lir(), ool->fun(), ool->typeDefData(),
19361 ool->output());
19362 masm.jump(ool->rejoin());
19365 void CodeGenerator::visitWasmNewStructObject(LWasmNewStructObject* lir) {
19366 MOZ_ASSERT(gen->compilingWasm());
19368 MWasmNewStructObject* mir = lir->mir();
19370 Register typeDefData = ToRegister(lir->typeDefData());
19371 Register output = ToRegister(lir->output());
19373 if (mir->isOutline()) {
19374 wasm::SymbolicAddress fun = mir->zeroFields()
19375 ? wasm::SymbolicAddress::StructNewOOL_true
19376 : wasm::SymbolicAddress::StructNewOOL_false;
19377 callWasmStructAllocFun(lir, fun, typeDefData, output);
19378 } else {
19379 wasm::SymbolicAddress fun = mir->zeroFields()
19380 ? wasm::SymbolicAddress::StructNewIL_true
19381 : wasm::SymbolicAddress::StructNewIL_false;
19383 Register instance = ToRegister(lir->instance());
19384 MOZ_ASSERT(instance == InstanceReg);
19386 auto ool =
19387 new (alloc()) OutOfLineWasmNewStruct(lir, fun, typeDefData, output);
19388 addOutOfLineCode(ool, lir->mir());
19390 Register temp1 = ToRegister(lir->temp0());
19391 Register temp2 = ToRegister(lir->temp1());
19392 masm.wasmNewStructObject(instance, output, typeDefData, temp1, temp2,
19393 ool->entry(), mir->allocKind(), mir->zeroFields());
19395 masm.bind(ool->rejoin());
19399 void CodeGenerator::callWasmArrayAllocFun(LInstruction* lir,
19400 wasm::SymbolicAddress fun,
19401 Register numElements,
19402 Register typeDefData, Register output,
19403 wasm::BytecodeOffset bytecodeOffset) {
19404 masm.Push(InstanceReg);
19405 int32_t framePushedAfterInstance = masm.framePushed();
19406 saveLive(lir);
19408 masm.setupWasmABICall();
19409 masm.passABIArg(InstanceReg);
19410 masm.passABIArg(numElements);
19411 masm.passABIArg(typeDefData);
19412 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
19413 CodeOffset offset = masm.callWithABI(
19414 bytecodeOffset, fun, mozilla::Some(instanceOffset), ABIType::General);
19415 masm.storeCallPointerResult(output);
19417 markSafepointAt(offset.offset(), lir);
19418 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAfterInstance);
19419 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::CodegenCall);
19421 restoreLive(lir);
19422 masm.Pop(InstanceReg);
19423 #if JS_CODEGEN_ARM64
19424 masm.syncStackPtr();
19425 #endif
19427 Label ok;
19428 masm.branchPtr(Assembler::NonZero, output, ImmWord(0), &ok);
19429 masm.wasmTrap(wasm::Trap::ThrowReported, bytecodeOffset);
19430 masm.bind(&ok);
19433 // Out-of-line path to allocate wasm GC arrays
19434 class OutOfLineWasmNewArray : public OutOfLineCodeBase<CodeGenerator> {
19435 LInstruction* lir_;
19436 wasm::SymbolicAddress fun_;
19437 Register numElementsReg_;
19438 mozilla::Maybe<uint32_t> numElements_;
19439 Register typeDefData_;
19440 Register output_;
19441 wasm::BytecodeOffset bytecodeOffset_;
19443 public:
19444 OutOfLineWasmNewArray(LInstruction* lir, wasm::SymbolicAddress fun,
19445 Register numElementsReg,
19446 mozilla::Maybe<uint32_t> numElements,
19447 Register typeDefData, Register output,
19448 wasm::BytecodeOffset bytecodeOffset)
19449 : lir_(lir),
19450 fun_(fun),
19451 numElementsReg_(numElementsReg),
19452 numElements_(numElements),
19453 typeDefData_(typeDefData),
19454 output_(output),
19455 bytecodeOffset_(bytecodeOffset) {}
19457 void accept(CodeGenerator* codegen) override {
19458 codegen->visitOutOfLineWasmNewArray(this);
19461 LInstruction* lir() const { return lir_; }
19462 wasm::SymbolicAddress fun() const { return fun_; }
19463 Register numElementsReg() const { return numElementsReg_; }
19464 mozilla::Maybe<uint32_t> numElements() const { return numElements_; }
19465 Register typeDefData() const { return typeDefData_; }
19466 Register output() const { return output_; }
19467 wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
19470 void CodeGenerator::visitOutOfLineWasmNewArray(OutOfLineWasmNewArray* ool) {
19471 if (ool->numElements().isSome()) {
19472 masm.move32(Imm32(ool->numElements().value()), ool->numElementsReg());
19474 callWasmArrayAllocFun(ool->lir(), ool->fun(), ool->numElementsReg(),
19475 ool->typeDefData(), ool->output(),
19476 ool->bytecodeOffset());
19477 masm.jump(ool->rejoin());
19480 void CodeGenerator::visitWasmNewArrayObject(LWasmNewArrayObject* lir) {
19481 MOZ_ASSERT(gen->compilingWasm());
19483 MWasmNewArrayObject* mir = lir->mir();
19485 Register typeDefData = ToRegister(lir->typeDefData());
19486 Register output = ToRegister(lir->output());
19487 Register temp1 = ToRegister(lir->temp0());
19488 Register temp2 = ToRegister(lir->temp1());
19490 wasm::SymbolicAddress fun = mir->zeroFields()
19491 ? wasm::SymbolicAddress::ArrayNew_true
19492 : wasm::SymbolicAddress::ArrayNew_false;
19494 if (lir->numElements()->isConstant()) {
19495 // numElements is constant, so we can do optimized code generation.
19496 uint32_t numElements = lir->numElements()->toConstant()->toInt32();
19497 CheckedUint32 storageBytes =
19498 WasmArrayObject::calcStorageBytesChecked(mir->elemSize(), numElements);
19499 if (!storageBytes.isValid() ||
19500 storageBytes.value() > WasmArrayObject_MaxInlineBytes) {
19501 // Too much array data to store inline. Immediately perform an instance
19502 // call to handle the out-of-line storage.
19503 masm.move32(Imm32(numElements), temp1);
19504 callWasmArrayAllocFun(lir, fun, temp1, typeDefData, output,
19505 mir->bytecodeOffset());
19506 } else {
19507 // storageBytes is small enough to be stored inline in WasmArrayObject.
19508 // Attempt a nursery allocation and fall back to an instance call if it
19509 // fails.
19510 Register instance = ToRegister(lir->instance());
19511 MOZ_ASSERT(instance == InstanceReg);
19513 auto ool = new (alloc())
19514 OutOfLineWasmNewArray(lir, fun, temp1, mozilla::Some(numElements),
19515 typeDefData, output, mir->bytecodeOffset());
19516 addOutOfLineCode(ool, lir->mir());
19518 masm.wasmNewArrayObjectFixed(instance, output, typeDefData, temp1, temp2,
19519 ool->entry(), numElements,
19520 storageBytes.value(), mir->zeroFields());
19522 masm.bind(ool->rejoin());
19524 } else {
19525 // numElements is dynamic. Attempt a dynamic inline-storage nursery
19526 // allocation and fall back to an instance call if it fails.
19527 Register instance = ToRegister(lir->instance());
19528 MOZ_ASSERT(instance == InstanceReg);
19529 Register numElements = ToRegister(lir->numElements());
19531 auto ool = new (alloc())
19532 OutOfLineWasmNewArray(lir, fun, numElements, mozilla::Nothing(),
19533 typeDefData, output, mir->bytecodeOffset());
19534 addOutOfLineCode(ool, lir->mir());
19536 masm.wasmNewArrayObject(instance, output, numElements, typeDefData, temp1,
19537 ool->entry(), mir->elemSize(), mir->zeroFields());
19539 masm.bind(ool->rejoin());
19543 void CodeGenerator::visitWasmHeapReg(LWasmHeapReg* ins) {
19544 #ifdef WASM_HAS_HEAPREG
19545 masm.movePtr(HeapReg, ToRegister(ins->output()));
19546 #else
19547 MOZ_CRASH();
19548 #endif
19551 void CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins) {
19552 const MWasmBoundsCheck* mir = ins->mir();
19553 Register ptr = ToRegister(ins->ptr());
19554 Register boundsCheckLimit = ToRegister(ins->boundsCheckLimit());
19555 // When there are no spectre mitigations in place, branching out-of-line to
19556 // the trap is a big performance win, but with mitigations it's trickier. See
19557 // bug 1680243.
19558 if (JitOptions.spectreIndexMasking) {
19559 Label ok;
19560 masm.wasmBoundsCheck32(Assembler::Below, ptr, boundsCheckLimit, &ok);
19561 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
19562 masm.bind(&ok);
19563 } else {
19564 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
19565 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
19566 addOutOfLineCode(ool, mir);
19567 masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
19568 ool->entry());
19572 void CodeGenerator::visitWasmBoundsCheck64(LWasmBoundsCheck64* ins) {
19573 const MWasmBoundsCheck* mir = ins->mir();
19574 Register64 ptr = ToRegister64(ins->ptr());
19575 Register64 boundsCheckLimit = ToRegister64(ins->boundsCheckLimit());
19576 // See above.
19577 if (JitOptions.spectreIndexMasking) {
19578 Label ok;
19579 masm.wasmBoundsCheck64(Assembler::Below, ptr, boundsCheckLimit, &ok);
19580 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
19581 masm.bind(&ok);
19582 } else {
19583 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
19584 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
19585 addOutOfLineCode(ool, mir);
19586 masm.wasmBoundsCheck64(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
19587 ool->entry());
19591 void CodeGenerator::visitWasmBoundsCheckRange32(LWasmBoundsCheckRange32* ins) {
19592 const MWasmBoundsCheckRange32* mir = ins->mir();
19593 Register index = ToRegister(ins->index());
19594 Register length = ToRegister(ins->length());
19595 Register limit = ToRegister(ins->limit());
19596 Register tmp = ToRegister(ins->temp0());
19598 masm.wasmBoundsCheckRange32(index, length, limit, tmp, mir->bytecodeOffset());
19601 void CodeGenerator::visitWasmAlignmentCheck(LWasmAlignmentCheck* ins) {
19602 const MWasmAlignmentCheck* mir = ins->mir();
19603 Register ptr = ToRegister(ins->ptr());
19604 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
19605 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
19606 addOutOfLineCode(ool, mir);
19607 masm.branchTest32(Assembler::NonZero, ptr, Imm32(mir->byteSize() - 1),
19608 ool->entry());
19611 void CodeGenerator::visitWasmAlignmentCheck64(LWasmAlignmentCheck64* ins) {
19612 const MWasmAlignmentCheck* mir = ins->mir();
19613 Register64 ptr = ToRegister64(ins->ptr());
19614 #ifdef JS_64BIT
19615 Register r = ptr.reg;
19616 #else
19617 Register r = ptr.low;
19618 #endif
19619 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
19620 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
19621 addOutOfLineCode(ool, mir);
19622 masm.branchTestPtr(Assembler::NonZero, r, Imm32(mir->byteSize() - 1),
19623 ool->entry());
19626 void CodeGenerator::visitWasmLoadInstance(LWasmLoadInstance* ins) {
19627 switch (ins->mir()->type()) {
19628 case MIRType::WasmAnyRef:
19629 case MIRType::Pointer:
19630 masm.loadPtr(Address(ToRegister(ins->instance()), ins->mir()->offset()),
19631 ToRegister(ins->output()));
19632 break;
19633 case MIRType::Int32:
19634 masm.load32(Address(ToRegister(ins->instance()), ins->mir()->offset()),
19635 ToRegister(ins->output()));
19636 break;
19637 default:
19638 MOZ_CRASH("MIRType not supported in WasmLoadInstance");
19642 void CodeGenerator::visitWasmLoadInstance64(LWasmLoadInstance64* ins) {
19643 MOZ_ASSERT(ins->mir()->type() == MIRType::Int64);
19644 masm.load64(Address(ToRegister(ins->instance()), ins->mir()->offset()),
19645 ToOutRegister64(ins));
19648 void CodeGenerator::incrementWarmUpCounter(AbsoluteAddress warmUpCount,
19649 JSScript* script, Register tmp) {
19650 // The code depends on the JitScript* not being discarded without also
19651 // invalidating Ion code. Assert this.
19652 #ifdef DEBUG
19653 Label ok;
19654 masm.movePtr(ImmGCPtr(script), tmp);
19655 masm.loadJitScript(tmp, tmp);
19656 masm.branchPtr(Assembler::Equal, tmp, ImmPtr(script->jitScript()), &ok);
19657 masm.assumeUnreachable("Didn't find JitScript?");
19658 masm.bind(&ok);
19659 #endif
19661 masm.load32(warmUpCount, tmp);
19662 masm.add32(Imm32(1), tmp);
19663 masm.store32(tmp, warmUpCount);
19666 void CodeGenerator::visitIncrementWarmUpCounter(LIncrementWarmUpCounter* ins) {
19667 Register tmp = ToRegister(ins->temp0());
19669 AbsoluteAddress warmUpCount =
19670 AbsoluteAddress(ins->mir()->script()->jitScript())
19671 .offset(JitScript::offsetOfWarmUpCount());
19672 incrementWarmUpCounter(warmUpCount, ins->mir()->script(), tmp);
19675 void CodeGenerator::visitLexicalCheck(LLexicalCheck* ins) {
19676 ValueOperand inputValue = ToValue(ins, LLexicalCheck::InputIndex);
19677 Label bail;
19678 masm.branchTestMagicValue(Assembler::Equal, inputValue,
19679 JS_UNINITIALIZED_LEXICAL, &bail);
19680 bailoutFrom(&bail, ins->snapshot());
19683 void CodeGenerator::visitThrowRuntimeLexicalError(
19684 LThrowRuntimeLexicalError* ins) {
19685 pushArg(Imm32(ins->mir()->errorNumber()));
19687 using Fn = bool (*)(JSContext*, unsigned);
19688 callVM<Fn, jit::ThrowRuntimeLexicalError>(ins);
19691 void CodeGenerator::visitThrowMsg(LThrowMsg* ins) {
19692 pushArg(Imm32(static_cast<int32_t>(ins->mir()->throwMsgKind())));
19694 using Fn = bool (*)(JSContext*, unsigned);
19695 callVM<Fn, js::ThrowMsgOperation>(ins);
19698 void CodeGenerator::visitGlobalDeclInstantiation(
19699 LGlobalDeclInstantiation* ins) {
19700 pushArg(ImmPtr(ins->mir()->resumePoint()->pc()));
19701 pushArg(ImmGCPtr(ins->mir()->block()->info().script()));
19703 using Fn = bool (*)(JSContext*, HandleScript, const jsbytecode*);
19704 callVM<Fn, GlobalDeclInstantiationFromIon>(ins);
19707 void CodeGenerator::visitDebugger(LDebugger* ins) {
19708 Register cx = ToRegister(ins->temp0());
19710 masm.loadJSContext(cx);
19711 using Fn = bool (*)(JSContext* cx);
19712 masm.setupAlignedABICall();
19713 masm.passABIArg(cx);
19714 masm.callWithABI<Fn, GlobalHasLiveOnDebuggerStatement>();
19716 Label bail;
19717 masm.branchIfTrueBool(ReturnReg, &bail);
19718 bailoutFrom(&bail, ins->snapshot());
19721 void CodeGenerator::visitNewTarget(LNewTarget* ins) {
19722 ValueOperand output = ToOutValue(ins);
19724 // if (isConstructing) output = argv[Max(numActualArgs, numFormalArgs)]
19725 Label notConstructing, done;
19726 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
19727 masm.branchTestPtr(Assembler::Zero, calleeToken,
19728 Imm32(CalleeToken_FunctionConstructing), &notConstructing);
19730 Register argvLen = output.scratchReg();
19731 masm.loadNumActualArgs(FramePointer, argvLen);
19733 Label useNFormals;
19735 size_t numFormalArgs = ins->mirRaw()->block()->info().nargs();
19736 masm.branchPtr(Assembler::Below, argvLen, Imm32(numFormalArgs), &useNFormals);
19738 size_t argsOffset = JitFrameLayout::offsetOfActualArgs();
19740 BaseValueIndex newTarget(FramePointer, argvLen, argsOffset);
19741 masm.loadValue(newTarget, output);
19742 masm.jump(&done);
19745 masm.bind(&useNFormals);
19748 Address newTarget(FramePointer,
19749 argsOffset + (numFormalArgs * sizeof(Value)));
19750 masm.loadValue(newTarget, output);
19751 masm.jump(&done);
19754 // else output = undefined
19755 masm.bind(&notConstructing);
19756 masm.moveValue(UndefinedValue(), output);
19757 masm.bind(&done);
19760 void CodeGenerator::visitCheckReturn(LCheckReturn* ins) {
19761 ValueOperand returnValue = ToValue(ins, LCheckReturn::ReturnValueIndex);
19762 ValueOperand thisValue = ToValue(ins, LCheckReturn::ThisValueIndex);
19763 ValueOperand output = ToOutValue(ins);
19765 using Fn = bool (*)(JSContext*, HandleValue);
19766 OutOfLineCode* ool = oolCallVM<Fn, ThrowBadDerivedReturnOrUninitializedThis>(
19767 ins, ArgList(returnValue), StoreNothing());
19769 Label noChecks;
19770 masm.branchTestObject(Assembler::Equal, returnValue, &noChecks);
19771 masm.branchTestUndefined(Assembler::NotEqual, returnValue, ool->entry());
19772 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
19773 masm.moveValue(thisValue, output);
19774 masm.jump(ool->rejoin());
19775 masm.bind(&noChecks);
19776 masm.moveValue(returnValue, output);
19777 masm.bind(ool->rejoin());
19780 void CodeGenerator::visitCheckIsObj(LCheckIsObj* ins) {
19781 ValueOperand value = ToValue(ins, LCheckIsObj::ValueIndex);
19782 Register output = ToRegister(ins->output());
19784 using Fn = bool (*)(JSContext*, CheckIsObjectKind);
19785 OutOfLineCode* ool = oolCallVM<Fn, ThrowCheckIsObject>(
19786 ins, ArgList(Imm32(ins->mir()->checkKind())), StoreNothing());
19788 masm.fallibleUnboxObject(value, output, ool->entry());
19789 masm.bind(ool->rejoin());
19792 void CodeGenerator::visitCheckObjCoercible(LCheckObjCoercible* ins) {
19793 ValueOperand checkValue = ToValue(ins, LCheckObjCoercible::ValueIndex);
19795 using Fn = bool (*)(JSContext*, HandleValue);
19796 OutOfLineCode* ool = oolCallVM<Fn, ThrowObjectCoercible>(
19797 ins, ArgList(checkValue), StoreNothing());
19798 masm.branchTestNull(Assembler::Equal, checkValue, ool->entry());
19799 masm.branchTestUndefined(Assembler::Equal, checkValue, ool->entry());
19800 masm.bind(ool->rejoin());
19803 void CodeGenerator::visitCheckClassHeritage(LCheckClassHeritage* ins) {
19804 ValueOperand heritage = ToValue(ins, LCheckClassHeritage::HeritageIndex);
19805 Register temp0 = ToRegister(ins->temp0());
19806 Register temp1 = ToRegister(ins->temp1());
19808 using Fn = bool (*)(JSContext*, HandleValue);
19809 OutOfLineCode* ool = oolCallVM<Fn, CheckClassHeritageOperation>(
19810 ins, ArgList(heritage), StoreNothing());
19812 masm.branchTestNull(Assembler::Equal, heritage, ool->rejoin());
19813 masm.fallibleUnboxObject(heritage, temp0, ool->entry());
19815 masm.isConstructor(temp0, temp1, ool->entry());
19816 masm.branchTest32(Assembler::Zero, temp1, temp1, ool->entry());
19818 masm.bind(ool->rejoin());
19821 void CodeGenerator::visitCheckThis(LCheckThis* ins) {
19822 ValueOperand thisValue = ToValue(ins, LCheckThis::ValueIndex);
19824 using Fn = bool (*)(JSContext*);
19825 OutOfLineCode* ool =
19826 oolCallVM<Fn, ThrowUninitializedThis>(ins, ArgList(), StoreNothing());
19827 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
19828 masm.bind(ool->rejoin());
19831 void CodeGenerator::visitCheckThisReinit(LCheckThisReinit* ins) {
19832 ValueOperand thisValue = ToValue(ins, LCheckThisReinit::ThisValueIndex);
19834 using Fn = bool (*)(JSContext*);
19835 OutOfLineCode* ool =
19836 oolCallVM<Fn, ThrowInitializedThis>(ins, ArgList(), StoreNothing());
19837 masm.branchTestMagic(Assembler::NotEqual, thisValue, ool->entry());
19838 masm.bind(ool->rejoin());
19841 void CodeGenerator::visitGenerator(LGenerator* lir) {
19842 Register callee = ToRegister(lir->callee());
19843 Register environmentChain = ToRegister(lir->environmentChain());
19844 Register argsObject = ToRegister(lir->argsObject());
19846 pushArg(argsObject);
19847 pushArg(environmentChain);
19848 pushArg(ImmGCPtr(current->mir()->info().script()));
19849 pushArg(callee);
19851 using Fn = JSObject* (*)(JSContext* cx, HandleFunction, HandleScript,
19852 HandleObject, HandleObject);
19853 callVM<Fn, CreateGenerator>(lir);
19856 void CodeGenerator::visitAsyncResolve(LAsyncResolve* lir) {
19857 Register generator = ToRegister(lir->generator());
19858 ValueOperand value = ToValue(lir, LAsyncResolve::ValueIndex);
19860 pushArg(value);
19861 pushArg(generator);
19863 using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
19864 HandleValue);
19865 callVM<Fn, js::AsyncFunctionResolve>(lir);
19868 void CodeGenerator::visitAsyncReject(LAsyncReject* lir) {
19869 Register generator = ToRegister(lir->generator());
19870 ValueOperand reason = ToValue(lir, LAsyncReject::ReasonIndex);
19871 ValueOperand stack = ToValue(lir, LAsyncReject::StackIndex);
19873 pushArg(stack);
19874 pushArg(reason);
19875 pushArg(generator);
19877 using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
19878 HandleValue, HandleValue);
19879 callVM<Fn, js::AsyncFunctionReject>(lir);
19882 void CodeGenerator::visitAsyncAwait(LAsyncAwait* lir) {
19883 ValueOperand value = ToValue(lir, LAsyncAwait::ValueIndex);
19884 Register generator = ToRegister(lir->generator());
19886 pushArg(value);
19887 pushArg(generator);
19889 using Fn =
19890 JSObject* (*)(JSContext* cx, Handle<AsyncFunctionGeneratorObject*> genObj,
19891 HandleValue value);
19892 callVM<Fn, js::AsyncFunctionAwait>(lir);
19895 void CodeGenerator::visitCanSkipAwait(LCanSkipAwait* lir) {
19896 ValueOperand value = ToValue(lir, LCanSkipAwait::ValueIndex);
19898 pushArg(value);
19900 using Fn = bool (*)(JSContext*, HandleValue, bool* canSkip);
19901 callVM<Fn, js::CanSkipAwait>(lir);
19904 void CodeGenerator::visitMaybeExtractAwaitValue(LMaybeExtractAwaitValue* lir) {
19905 ValueOperand value = ToValue(lir, LMaybeExtractAwaitValue::ValueIndex);
19906 ValueOperand output = ToOutValue(lir);
19907 Register canSkip = ToRegister(lir->canSkip());
19909 Label cantExtract, finished;
19910 masm.branchIfFalseBool(canSkip, &cantExtract);
19912 pushArg(value);
19914 using Fn = bool (*)(JSContext*, HandleValue, MutableHandleValue);
19915 callVM<Fn, js::ExtractAwaitValue>(lir);
19916 masm.jump(&finished);
19917 masm.bind(&cantExtract);
19919 masm.moveValue(value, output);
19921 masm.bind(&finished);
19924 void CodeGenerator::visitDebugCheckSelfHosted(LDebugCheckSelfHosted* ins) {
19925 ValueOperand checkValue = ToValue(ins, LDebugCheckSelfHosted::ValueIndex);
19926 pushArg(checkValue);
19927 using Fn = bool (*)(JSContext*, HandleValue);
19928 callVM<Fn, js::Debug_CheckSelfHosted>(ins);
19931 void CodeGenerator::visitRandom(LRandom* ins) {
19932 using mozilla::non_crypto::XorShift128PlusRNG;
19934 FloatRegister output = ToFloatRegister(ins->output());
19935 Register rngReg = ToRegister(ins->temp0());
19937 Register64 temp1 = ToRegister64(ins->temp1());
19938 Register64 temp2 = ToRegister64(ins->temp2());
19940 const XorShift128PlusRNG* rng = gen->realm->addressOfRandomNumberGenerator();
19941 masm.movePtr(ImmPtr(rng), rngReg);
19943 masm.randomDouble(rngReg, output, temp1, temp2);
19944 if (js::SupportDifferentialTesting()) {
19945 masm.loadConstantDouble(0.0, output);
19949 void CodeGenerator::visitSignExtendInt32(LSignExtendInt32* ins) {
19950 Register input = ToRegister(ins->input());
19951 Register output = ToRegister(ins->output());
19953 switch (ins->mode()) {
19954 case MSignExtendInt32::Byte:
19955 masm.move8SignExtend(input, output);
19956 break;
19957 case MSignExtendInt32::Half:
19958 masm.move16SignExtend(input, output);
19959 break;
19963 void CodeGenerator::visitRotate(LRotate* ins) {
19964 MRotate* mir = ins->mir();
19965 Register input = ToRegister(ins->input());
19966 Register dest = ToRegister(ins->output());
19968 const LAllocation* count = ins->count();
19969 if (count->isConstant()) {
19970 int32_t c = ToInt32(count) & 0x1F;
19971 if (mir->isLeftRotate()) {
19972 masm.rotateLeft(Imm32(c), input, dest);
19973 } else {
19974 masm.rotateRight(Imm32(c), input, dest);
19976 } else {
19977 Register creg = ToRegister(count);
19978 if (mir->isLeftRotate()) {
19979 masm.rotateLeft(creg, input, dest);
19980 } else {
19981 masm.rotateRight(creg, input, dest);
19986 class OutOfLineNaNToZero : public OutOfLineCodeBase<CodeGenerator> {
19987 LNaNToZero* lir_;
19989 public:
19990 explicit OutOfLineNaNToZero(LNaNToZero* lir) : lir_(lir) {}
19992 void accept(CodeGenerator* codegen) override {
19993 codegen->visitOutOfLineNaNToZero(this);
19995 LNaNToZero* lir() const { return lir_; }
19998 void CodeGenerator::visitOutOfLineNaNToZero(OutOfLineNaNToZero* ool) {
19999 FloatRegister output = ToFloatRegister(ool->lir()->output());
20000 masm.loadConstantDouble(0.0, output);
20001 masm.jump(ool->rejoin());
20004 void CodeGenerator::visitNaNToZero(LNaNToZero* lir) {
20005 FloatRegister input = ToFloatRegister(lir->input());
20007 OutOfLineNaNToZero* ool = new (alloc()) OutOfLineNaNToZero(lir);
20008 addOutOfLineCode(ool, lir->mir());
20010 if (lir->mir()->operandIsNeverNegativeZero()) {
20011 masm.branchDouble(Assembler::DoubleUnordered, input, input, ool->entry());
20012 } else {
20013 FloatRegister scratch = ToFloatRegister(lir->temp0());
20014 masm.loadConstantDouble(0.0, scratch);
20015 masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch,
20016 ool->entry());
20018 masm.bind(ool->rejoin());
20021 void CodeGenerator::visitIsPackedArray(LIsPackedArray* lir) {
20022 Register obj = ToRegister(lir->object());
20023 Register output = ToRegister(lir->output());
20024 Register temp = ToRegister(lir->temp0());
20026 masm.setIsPackedArray(obj, output, temp);
20029 void CodeGenerator::visitGuardArrayIsPacked(LGuardArrayIsPacked* lir) {
20030 Register array = ToRegister(lir->array());
20031 Register temp0 = ToRegister(lir->temp0());
20032 Register temp1 = ToRegister(lir->temp1());
20034 Label bail;
20035 masm.branchArrayIsNotPacked(array, temp0, temp1, &bail);
20036 bailoutFrom(&bail, lir->snapshot());
20039 void CodeGenerator::visitGetPrototypeOf(LGetPrototypeOf* lir) {
20040 Register target = ToRegister(lir->target());
20041 ValueOperand out = ToOutValue(lir);
20042 Register scratch = out.scratchReg();
20044 using Fn = bool (*)(JSContext*, HandleObject, MutableHandleValue);
20045 OutOfLineCode* ool = oolCallVM<Fn, jit::GetPrototypeOf>(lir, ArgList(target),
20046 StoreValueTo(out));
20048 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
20050 masm.loadObjProto(target, scratch);
20052 Label hasProto;
20053 masm.branchPtr(Assembler::Above, scratch, ImmWord(1), &hasProto);
20055 // Call into the VM for lazy prototypes.
20056 masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), ool->entry());
20058 masm.moveValue(NullValue(), out);
20059 masm.jump(ool->rejoin());
20061 masm.bind(&hasProto);
20062 masm.tagValue(JSVAL_TYPE_OBJECT, scratch, out);
20064 masm.bind(ool->rejoin());
20067 void CodeGenerator::visitObjectWithProto(LObjectWithProto* lir) {
20068 pushArg(ToValue(lir, LObjectWithProto::PrototypeIndex));
20070 using Fn = PlainObject* (*)(JSContext*, HandleValue);
20071 callVM<Fn, js::ObjectWithProtoOperation>(lir);
20074 void CodeGenerator::visitObjectStaticProto(LObjectStaticProto* lir) {
20075 Register obj = ToRegister(lir->input());
20076 Register output = ToRegister(lir->output());
20078 masm.loadObjProto(obj, output);
20080 #ifdef DEBUG
20081 // We shouldn't encounter a null or lazy proto.
20082 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
20084 Label done;
20085 masm.branchPtr(Assembler::Above, output, ImmWord(1), &done);
20086 masm.assumeUnreachable("Unexpected null or lazy proto in MObjectStaticProto");
20087 masm.bind(&done);
20088 #endif
20091 void CodeGenerator::visitBuiltinObject(LBuiltinObject* lir) {
20092 pushArg(Imm32(static_cast<int32_t>(lir->mir()->builtinObjectKind())));
20094 using Fn = JSObject* (*)(JSContext*, BuiltinObjectKind);
20095 callVM<Fn, js::BuiltinObjectOperation>(lir);
20098 void CodeGenerator::visitSuperFunction(LSuperFunction* lir) {
20099 Register callee = ToRegister(lir->callee());
20100 ValueOperand out = ToOutValue(lir);
20101 Register temp = ToRegister(lir->temp0());
20103 #ifdef DEBUG
20104 Label classCheckDone;
20105 masm.branchTestObjIsFunction(Assembler::Equal, callee, temp, callee,
20106 &classCheckDone);
20107 masm.assumeUnreachable("Unexpected non-JSFunction callee in JSOp::SuperFun");
20108 masm.bind(&classCheckDone);
20109 #endif
20111 // Load prototype of callee
20112 masm.loadObjProto(callee, temp);
20114 #ifdef DEBUG
20115 // We won't encounter a lazy proto, because |callee| is guaranteed to be a
20116 // JSFunction and only proxy objects can have a lazy proto.
20117 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
20119 Label proxyCheckDone;
20120 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
20121 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperFun");
20122 masm.bind(&proxyCheckDone);
20123 #endif
20125 Label nullProto, done;
20126 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
20128 // Box prototype and return
20129 masm.tagValue(JSVAL_TYPE_OBJECT, temp, out);
20130 masm.jump(&done);
20132 masm.bind(&nullProto);
20133 masm.moveValue(NullValue(), out);
20135 masm.bind(&done);
20138 void CodeGenerator::visitInitHomeObject(LInitHomeObject* lir) {
20139 Register func = ToRegister(lir->function());
20140 ValueOperand homeObject = ToValue(lir, LInitHomeObject::HomeObjectIndex);
20142 masm.assertFunctionIsExtended(func);
20144 Address addr(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
20146 emitPreBarrier(addr);
20147 masm.storeValue(homeObject, addr);
20150 void CodeGenerator::visitIsTypedArrayConstructor(
20151 LIsTypedArrayConstructor* lir) {
20152 Register object = ToRegister(lir->object());
20153 Register output = ToRegister(lir->output());
20155 masm.setIsDefinitelyTypedArrayConstructor(object, output);
20158 void CodeGenerator::visitLoadValueTag(LLoadValueTag* lir) {
20159 ValueOperand value = ToValue(lir, LLoadValueTag::ValueIndex);
20160 Register output = ToRegister(lir->output());
20162 Register tag = masm.extractTag(value, output);
20163 if (tag != output) {
20164 masm.mov(tag, output);
20168 void CodeGenerator::visitGuardTagNotEqual(LGuardTagNotEqual* lir) {
20169 Register lhs = ToRegister(lir->lhs());
20170 Register rhs = ToRegister(lir->rhs());
20172 bailoutCmp32(Assembler::Equal, lhs, rhs, lir->snapshot());
20174 // If both lhs and rhs are numbers, can't use tag comparison to do inequality
20175 // comparison
20176 Label done;
20177 masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
20178 masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
20179 bailout(lir->snapshot());
20181 masm.bind(&done);
20184 void CodeGenerator::visitLoadWrapperTarget(LLoadWrapperTarget* lir) {
20185 Register object = ToRegister(lir->object());
20186 Register output = ToRegister(lir->output());
20188 masm.loadPtr(Address(object, ProxyObject::offsetOfReservedSlots()), output);
20190 // Bail for revoked proxies.
20191 Label bail;
20192 Address targetAddr(output,
20193 js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
20194 if (lir->mir()->fallible()) {
20195 masm.fallibleUnboxObject(targetAddr, output, &bail);
20196 bailoutFrom(&bail, lir->snapshot());
20197 } else {
20198 masm.unboxObject(targetAddr, output);
20202 void CodeGenerator::visitGuardHasGetterSetter(LGuardHasGetterSetter* lir) {
20203 Register object = ToRegister(lir->object());
20204 Register temp0 = ToRegister(lir->temp0());
20205 Register temp1 = ToRegister(lir->temp1());
20206 Register temp2 = ToRegister(lir->temp2());
20208 masm.movePropertyKey(lir->mir()->propId(), temp1);
20209 masm.movePtr(ImmGCPtr(lir->mir()->getterSetter()), temp2);
20211 using Fn = bool (*)(JSContext* cx, JSObject* obj, jsid id,
20212 GetterSetter* getterSetter);
20213 masm.setupAlignedABICall();
20214 masm.loadJSContext(temp0);
20215 masm.passABIArg(temp0);
20216 masm.passABIArg(object);
20217 masm.passABIArg(temp1);
20218 masm.passABIArg(temp2);
20219 masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
20221 bailoutIfFalseBool(ReturnReg, lir->snapshot());
20224 void CodeGenerator::visitGuardIsExtensible(LGuardIsExtensible* lir) {
20225 Register object = ToRegister(lir->object());
20226 Register temp = ToRegister(lir->temp0());
20228 Label bail;
20229 masm.branchIfObjectNotExtensible(object, temp, &bail);
20230 bailoutFrom(&bail, lir->snapshot());
20233 void CodeGenerator::visitGuardInt32IsNonNegative(
20234 LGuardInt32IsNonNegative* lir) {
20235 Register index = ToRegister(lir->index());
20237 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
20240 void CodeGenerator::visitGuardInt32Range(LGuardInt32Range* lir) {
20241 Register input = ToRegister(lir->input());
20243 bailoutCmp32(Assembler::LessThan, input, Imm32(lir->mir()->minimum()),
20244 lir->snapshot());
20245 bailoutCmp32(Assembler::GreaterThan, input, Imm32(lir->mir()->maximum()),
20246 lir->snapshot());
20249 void CodeGenerator::visitGuardIndexIsNotDenseElement(
20250 LGuardIndexIsNotDenseElement* lir) {
20251 Register object = ToRegister(lir->object());
20252 Register index = ToRegister(lir->index());
20253 Register temp = ToRegister(lir->temp0());
20254 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
20256 // Load obj->elements.
20257 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
20259 // Ensure index >= initLength or the element is a hole.
20260 Label notDense;
20261 Address capacity(temp, ObjectElements::offsetOfInitializedLength());
20262 masm.spectreBoundsCheck32(index, capacity, spectreTemp, &notDense);
20264 BaseValueIndex element(temp, index);
20265 masm.branchTestMagic(Assembler::Equal, element, &notDense);
20267 bailout(lir->snapshot());
20269 masm.bind(&notDense);
20272 void CodeGenerator::visitGuardIndexIsValidUpdateOrAdd(
20273 LGuardIndexIsValidUpdateOrAdd* lir) {
20274 Register object = ToRegister(lir->object());
20275 Register index = ToRegister(lir->index());
20276 Register temp = ToRegister(lir->temp0());
20277 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
20279 // Load obj->elements.
20280 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
20282 Label success;
20284 // If length is writable, branch to &success. All indices are writable.
20285 Address flags(temp, ObjectElements::offsetOfFlags());
20286 masm.branchTest32(Assembler::Zero, flags,
20287 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
20288 &success);
20290 // Otherwise, ensure index is in bounds.
20291 Label bail;
20292 Address length(temp, ObjectElements::offsetOfLength());
20293 masm.spectreBoundsCheck32(index, length, spectreTemp, &bail);
20294 masm.bind(&success);
20296 bailoutFrom(&bail, lir->snapshot());
20299 void CodeGenerator::visitCallAddOrUpdateSparseElement(
20300 LCallAddOrUpdateSparseElement* lir) {
20301 Register object = ToRegister(lir->object());
20302 Register index = ToRegister(lir->index());
20303 ValueOperand value = ToValue(lir, LCallAddOrUpdateSparseElement::ValueIndex);
20305 pushArg(Imm32(lir->mir()->strict()));
20306 pushArg(value);
20307 pushArg(index);
20308 pushArg(object);
20310 using Fn =
20311 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, HandleValue, bool);
20312 callVM<Fn, js::AddOrUpdateSparseElementHelper>(lir);
20315 void CodeGenerator::visitCallGetSparseElement(LCallGetSparseElement* lir) {
20316 Register object = ToRegister(lir->object());
20317 Register index = ToRegister(lir->index());
20319 pushArg(index);
20320 pushArg(object);
20322 using Fn =
20323 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, MutableHandleValue);
20324 callVM<Fn, js::GetSparseElementHelper>(lir);
20327 void CodeGenerator::visitCallNativeGetElement(LCallNativeGetElement* lir) {
20328 Register object = ToRegister(lir->object());
20329 Register index = ToRegister(lir->index());
20331 pushArg(index);
20332 pushArg(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
20333 pushArg(object);
20335 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
20336 MutableHandleValue);
20337 callVM<Fn, js::NativeGetElement>(lir);
20340 void CodeGenerator::visitCallNativeGetElementSuper(
20341 LCallNativeGetElementSuper* lir) {
20342 Register object = ToRegister(lir->object());
20343 Register index = ToRegister(lir->index());
20344 ValueOperand receiver =
20345 ToValue(lir, LCallNativeGetElementSuper::ReceiverIndex);
20347 pushArg(index);
20348 pushArg(receiver);
20349 pushArg(object);
20351 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
20352 MutableHandleValue);
20353 callVM<Fn, js::NativeGetElement>(lir);
20356 void CodeGenerator::visitCallObjectHasSparseElement(
20357 LCallObjectHasSparseElement* lir) {
20358 Register object = ToRegister(lir->object());
20359 Register index = ToRegister(lir->index());
20360 Register temp0 = ToRegister(lir->temp0());
20361 Register temp1 = ToRegister(lir->temp1());
20362 Register output = ToRegister(lir->output());
20364 masm.reserveStack(sizeof(Value));
20365 masm.moveStackPtrTo(temp1);
20367 using Fn = bool (*)(JSContext*, NativeObject*, int32_t, Value*);
20368 masm.setupAlignedABICall();
20369 masm.loadJSContext(temp0);
20370 masm.passABIArg(temp0);
20371 masm.passABIArg(object);
20372 masm.passABIArg(index);
20373 masm.passABIArg(temp1);
20374 masm.callWithABI<Fn, HasNativeElementPure>();
20375 masm.storeCallPointerResult(temp0);
20377 Label bail, ok;
20378 uint32_t framePushed = masm.framePushed();
20379 masm.branchIfTrueBool(temp0, &ok);
20380 masm.adjustStack(sizeof(Value));
20381 masm.jump(&bail);
20383 masm.bind(&ok);
20384 masm.setFramePushed(framePushed);
20385 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
20386 masm.adjustStack(sizeof(Value));
20388 bailoutFrom(&bail, lir->snapshot());
20391 void CodeGenerator::visitBigIntAsIntN(LBigIntAsIntN* ins) {
20392 Register bits = ToRegister(ins->bits());
20393 Register input = ToRegister(ins->input());
20395 pushArg(bits);
20396 pushArg(input);
20398 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
20399 callVM<Fn, jit::BigIntAsIntN>(ins);
20402 void CodeGenerator::visitBigIntAsIntN64(LBigIntAsIntN64* ins) {
20403 Register input = ToRegister(ins->input());
20404 Register temp = ToRegister(ins->temp());
20405 Register64 temp64 = ToRegister64(ins->temp64());
20406 Register output = ToRegister(ins->output());
20408 Label done, create;
20410 masm.movePtr(input, output);
20412 // Load the BigInt value as an int64.
20413 masm.loadBigInt64(input, temp64);
20415 // Create a new BigInt when the input exceeds the int64 range.
20416 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
20417 Imm32(64 / BigInt::DigitBits), &create);
20419 // And create a new BigInt when the value and the BigInt have different signs.
20420 Label nonNegative;
20421 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
20422 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &create);
20423 masm.jump(&done);
20425 masm.bind(&nonNegative);
20426 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &done);
20428 masm.bind(&create);
20429 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
20431 masm.bind(&done);
20434 void CodeGenerator::visitBigIntAsIntN32(LBigIntAsIntN32* ins) {
20435 Register input = ToRegister(ins->input());
20436 Register temp = ToRegister(ins->temp());
20437 Register64 temp64 = ToRegister64(ins->temp64());
20438 Register output = ToRegister(ins->output());
20440 Label done, create;
20442 masm.movePtr(input, output);
20444 // Load the absolute value of the first digit.
20445 masm.loadFirstBigIntDigitOrZero(input, temp);
20447 // If the absolute value exceeds the int32 range, create a new BigInt.
20448 masm.branchPtr(Assembler::Above, temp, Imm32(INT32_MAX), &create);
20450 // Also create a new BigInt if we have more than one digit.
20451 masm.branch32(Assembler::BelowOrEqual,
20452 Address(input, BigInt::offsetOfLength()), Imm32(1), &done);
20454 masm.bind(&create);
20456 // |temp| stores the absolute value, negate it when the sign flag is set.
20457 Label nonNegative;
20458 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
20459 masm.negPtr(temp);
20460 masm.bind(&nonNegative);
20462 masm.move32To64SignExtend(temp, temp64);
20463 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
20465 masm.bind(&done);
20468 void CodeGenerator::visitBigIntAsUintN(LBigIntAsUintN* ins) {
20469 Register bits = ToRegister(ins->bits());
20470 Register input = ToRegister(ins->input());
20472 pushArg(bits);
20473 pushArg(input);
20475 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
20476 callVM<Fn, jit::BigIntAsUintN>(ins);
20479 void CodeGenerator::visitBigIntAsUintN64(LBigIntAsUintN64* ins) {
20480 Register input = ToRegister(ins->input());
20481 Register temp = ToRegister(ins->temp());
20482 Register64 temp64 = ToRegister64(ins->temp64());
20483 Register output = ToRegister(ins->output());
20485 Label done, create;
20487 masm.movePtr(input, output);
20489 // Load the BigInt value as an uint64.
20490 masm.loadBigInt64(input, temp64);
20492 // Create a new BigInt when the input exceeds the uint64 range.
20493 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
20494 Imm32(64 / BigInt::DigitBits), &create);
20496 // And create a new BigInt when the input has the sign flag set.
20497 masm.branchIfBigIntIsNonNegative(input, &done);
20499 masm.bind(&create);
20500 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
20502 masm.bind(&done);
20505 void CodeGenerator::visitBigIntAsUintN32(LBigIntAsUintN32* ins) {
20506 Register input = ToRegister(ins->input());
20507 Register temp = ToRegister(ins->temp());
20508 Register64 temp64 = ToRegister64(ins->temp64());
20509 Register output = ToRegister(ins->output());
20511 Label done, create;
20513 masm.movePtr(input, output);
20515 // Load the absolute value of the first digit.
20516 masm.loadFirstBigIntDigitOrZero(input, temp);
20518 // If the absolute value exceeds the uint32 range, create a new BigInt.
20519 #if JS_PUNBOX64
20520 masm.branchPtr(Assembler::Above, temp, ImmWord(UINT32_MAX), &create);
20521 #endif
20523 // Also create a new BigInt if we have more than one digit.
20524 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
20525 Imm32(1), &create);
20527 // And create a new BigInt when the input has the sign flag set.
20528 masm.branchIfBigIntIsNonNegative(input, &done);
20530 masm.bind(&create);
20532 // |temp| stores the absolute value, negate it when the sign flag is set.
20533 Label nonNegative;
20534 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
20535 masm.negPtr(temp);
20536 masm.bind(&nonNegative);
20538 masm.move32To64ZeroExtend(temp, temp64);
20539 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
20541 masm.bind(&done);
20544 void CodeGenerator::visitGuardNonGCThing(LGuardNonGCThing* ins) {
20545 ValueOperand input = ToValue(ins, LGuardNonGCThing::InputIndex);
20547 Label bail;
20548 masm.branchTestGCThing(Assembler::Equal, input, &bail);
20549 bailoutFrom(&bail, ins->snapshot());
20552 void CodeGenerator::visitToHashableNonGCThing(LToHashableNonGCThing* ins) {
20553 ValueOperand input = ToValue(ins, LToHashableNonGCThing::InputIndex);
20554 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
20555 ValueOperand output = ToOutValue(ins);
20557 masm.toHashableNonGCThing(input, output, tempFloat);
20560 void CodeGenerator::visitToHashableString(LToHashableString* ins) {
20561 Register input = ToRegister(ins->input());
20562 Register output = ToRegister(ins->output());
20564 using Fn = JSAtom* (*)(JSContext*, JSString*);
20565 auto* ool = oolCallVM<Fn, js::AtomizeString>(ins, ArgList(input),
20566 StoreRegisterTo(output));
20568 Label isAtom;
20569 masm.branchTest32(Assembler::NonZero,
20570 Address(input, JSString::offsetOfFlags()),
20571 Imm32(JSString::ATOM_BIT), &isAtom);
20573 masm.lookupStringInAtomCacheLastLookups(input, output, output, ool->entry());
20574 masm.jump(ool->rejoin());
20575 masm.bind(&isAtom);
20576 masm.movePtr(input, output);
20577 masm.bind(ool->rejoin());
20580 void CodeGenerator::visitToHashableValue(LToHashableValue* ins) {
20581 ValueOperand input = ToValue(ins, LToHashableValue::InputIndex);
20582 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
20583 ValueOperand output = ToOutValue(ins);
20585 Register str = output.scratchReg();
20587 using Fn = JSAtom* (*)(JSContext*, JSString*);
20588 auto* ool =
20589 oolCallVM<Fn, js::AtomizeString>(ins, ArgList(str), StoreRegisterTo(str));
20591 masm.toHashableValue(input, output, tempFloat, ool->entry(), ool->rejoin());
20594 void CodeGenerator::visitHashNonGCThing(LHashNonGCThing* ins) {
20595 ValueOperand input = ToValue(ins, LHashNonGCThing::InputIndex);
20596 Register temp = ToRegister(ins->temp0());
20597 Register output = ToRegister(ins->output());
20599 masm.prepareHashNonGCThing(input, output, temp);
20602 void CodeGenerator::visitHashString(LHashString* ins) {
20603 Register input = ToRegister(ins->input());
20604 Register temp = ToRegister(ins->temp0());
20605 Register output = ToRegister(ins->output());
20607 masm.prepareHashString(input, output, temp);
20610 void CodeGenerator::visitHashSymbol(LHashSymbol* ins) {
20611 Register input = ToRegister(ins->input());
20612 Register output = ToRegister(ins->output());
20614 masm.prepareHashSymbol(input, output);
20617 void CodeGenerator::visitHashBigInt(LHashBigInt* ins) {
20618 Register input = ToRegister(ins->input());
20619 Register temp0 = ToRegister(ins->temp0());
20620 Register temp1 = ToRegister(ins->temp1());
20621 Register temp2 = ToRegister(ins->temp2());
20622 Register output = ToRegister(ins->output());
20624 masm.prepareHashBigInt(input, output, temp0, temp1, temp2);
20627 void CodeGenerator::visitHashObject(LHashObject* ins) {
20628 Register setObj = ToRegister(ins->setObject());
20629 ValueOperand input = ToValue(ins, LHashObject::InputIndex);
20630 Register temp0 = ToRegister(ins->temp0());
20631 Register temp1 = ToRegister(ins->temp1());
20632 Register temp2 = ToRegister(ins->temp2());
20633 Register temp3 = ToRegister(ins->temp3());
20634 Register output = ToRegister(ins->output());
20636 masm.prepareHashObject(setObj, input, output, temp0, temp1, temp2, temp3);
20639 void CodeGenerator::visitHashValue(LHashValue* ins) {
20640 Register setObj = ToRegister(ins->setObject());
20641 ValueOperand input = ToValue(ins, LHashValue::InputIndex);
20642 Register temp0 = ToRegister(ins->temp0());
20643 Register temp1 = ToRegister(ins->temp1());
20644 Register temp2 = ToRegister(ins->temp2());
20645 Register temp3 = ToRegister(ins->temp3());
20646 Register output = ToRegister(ins->output());
20648 masm.prepareHashValue(setObj, input, output, temp0, temp1, temp2, temp3);
20651 void CodeGenerator::visitSetObjectHasNonBigInt(LSetObjectHasNonBigInt* ins) {
20652 Register setObj = ToRegister(ins->setObject());
20653 ValueOperand input = ToValue(ins, LSetObjectHasNonBigInt::InputIndex);
20654 Register hash = ToRegister(ins->hash());
20655 Register temp0 = ToRegister(ins->temp0());
20656 Register temp1 = ToRegister(ins->temp1());
20657 Register output = ToRegister(ins->output());
20659 masm.setObjectHasNonBigInt(setObj, input, hash, output, temp0, temp1);
20662 void CodeGenerator::visitSetObjectHasBigInt(LSetObjectHasBigInt* ins) {
20663 Register setObj = ToRegister(ins->setObject());
20664 ValueOperand input = ToValue(ins, LSetObjectHasBigInt::InputIndex);
20665 Register hash = ToRegister(ins->hash());
20666 Register temp0 = ToRegister(ins->temp0());
20667 Register temp1 = ToRegister(ins->temp1());
20668 Register temp2 = ToRegister(ins->temp2());
20669 Register temp3 = ToRegister(ins->temp3());
20670 Register output = ToRegister(ins->output());
20672 masm.setObjectHasBigInt(setObj, input, hash, output, temp0, temp1, temp2,
20673 temp3);
20676 void CodeGenerator::visitSetObjectHasValue(LSetObjectHasValue* ins) {
20677 Register setObj = ToRegister(ins->setObject());
20678 ValueOperand input = ToValue(ins, LSetObjectHasValue::InputIndex);
20679 Register hash = ToRegister(ins->hash());
20680 Register temp0 = ToRegister(ins->temp0());
20681 Register temp1 = ToRegister(ins->temp1());
20682 Register temp2 = ToRegister(ins->temp2());
20683 Register temp3 = ToRegister(ins->temp3());
20684 Register output = ToRegister(ins->output());
20686 masm.setObjectHasValue(setObj, input, hash, output, temp0, temp1, temp2,
20687 temp3);
20690 void CodeGenerator::visitSetObjectHasValueVMCall(
20691 LSetObjectHasValueVMCall* ins) {
20692 pushArg(ToValue(ins, LSetObjectHasValueVMCall::InputIndex));
20693 pushArg(ToRegister(ins->setObject()));
20695 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
20696 callVM<Fn, jit::SetObjectHas>(ins);
20699 void CodeGenerator::visitSetObjectSize(LSetObjectSize* ins) {
20700 Register setObj = ToRegister(ins->setObject());
20701 Register output = ToRegister(ins->output());
20703 masm.loadSetObjectSize(setObj, output);
20706 void CodeGenerator::visitMapObjectHasNonBigInt(LMapObjectHasNonBigInt* ins) {
20707 Register mapObj = ToRegister(ins->mapObject());
20708 ValueOperand input = ToValue(ins, LMapObjectHasNonBigInt::InputIndex);
20709 Register hash = ToRegister(ins->hash());
20710 Register temp0 = ToRegister(ins->temp0());
20711 Register temp1 = ToRegister(ins->temp1());
20712 Register output = ToRegister(ins->output());
20714 masm.mapObjectHasNonBigInt(mapObj, input, hash, output, temp0, temp1);
20717 void CodeGenerator::visitMapObjectHasBigInt(LMapObjectHasBigInt* ins) {
20718 Register mapObj = ToRegister(ins->mapObject());
20719 ValueOperand input = ToValue(ins, LMapObjectHasBigInt::InputIndex);
20720 Register hash = ToRegister(ins->hash());
20721 Register temp0 = ToRegister(ins->temp0());
20722 Register temp1 = ToRegister(ins->temp1());
20723 Register temp2 = ToRegister(ins->temp2());
20724 Register temp3 = ToRegister(ins->temp3());
20725 Register output = ToRegister(ins->output());
20727 masm.mapObjectHasBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
20728 temp3);
20731 void CodeGenerator::visitMapObjectHasValue(LMapObjectHasValue* ins) {
20732 Register mapObj = ToRegister(ins->mapObject());
20733 ValueOperand input = ToValue(ins, LMapObjectHasValue::InputIndex);
20734 Register hash = ToRegister(ins->hash());
20735 Register temp0 = ToRegister(ins->temp0());
20736 Register temp1 = ToRegister(ins->temp1());
20737 Register temp2 = ToRegister(ins->temp2());
20738 Register temp3 = ToRegister(ins->temp3());
20739 Register output = ToRegister(ins->output());
20741 masm.mapObjectHasValue(mapObj, input, hash, output, temp0, temp1, temp2,
20742 temp3);
20745 void CodeGenerator::visitMapObjectHasValueVMCall(
20746 LMapObjectHasValueVMCall* ins) {
20747 pushArg(ToValue(ins, LMapObjectHasValueVMCall::InputIndex));
20748 pushArg(ToRegister(ins->mapObject()));
20750 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
20751 callVM<Fn, jit::MapObjectHas>(ins);
20754 void CodeGenerator::visitMapObjectGetNonBigInt(LMapObjectGetNonBigInt* ins) {
20755 Register mapObj = ToRegister(ins->mapObject());
20756 ValueOperand input = ToValue(ins, LMapObjectGetNonBigInt::InputIndex);
20757 Register hash = ToRegister(ins->hash());
20758 Register temp0 = ToRegister(ins->temp0());
20759 Register temp1 = ToRegister(ins->temp1());
20760 ValueOperand output = ToOutValue(ins);
20762 masm.mapObjectGetNonBigInt(mapObj, input, hash, output, temp0, temp1,
20763 output.scratchReg());
20766 void CodeGenerator::visitMapObjectGetBigInt(LMapObjectGetBigInt* ins) {
20767 Register mapObj = ToRegister(ins->mapObject());
20768 ValueOperand input = ToValue(ins, LMapObjectGetBigInt::InputIndex);
20769 Register hash = ToRegister(ins->hash());
20770 Register temp0 = ToRegister(ins->temp0());
20771 Register temp1 = ToRegister(ins->temp1());
20772 Register temp2 = ToRegister(ins->temp2());
20773 Register temp3 = ToRegister(ins->temp3());
20774 ValueOperand output = ToOutValue(ins);
20776 masm.mapObjectGetBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
20777 temp3, output.scratchReg());
20780 void CodeGenerator::visitMapObjectGetValue(LMapObjectGetValue* ins) {
20781 Register mapObj = ToRegister(ins->mapObject());
20782 ValueOperand input = ToValue(ins, LMapObjectGetValue::InputIndex);
20783 Register hash = ToRegister(ins->hash());
20784 Register temp0 = ToRegister(ins->temp0());
20785 Register temp1 = ToRegister(ins->temp1());
20786 Register temp2 = ToRegister(ins->temp2());
20787 Register temp3 = ToRegister(ins->temp3());
20788 ValueOperand output = ToOutValue(ins);
20790 masm.mapObjectGetValue(mapObj, input, hash, output, temp0, temp1, temp2,
20791 temp3, output.scratchReg());
20794 void CodeGenerator::visitMapObjectGetValueVMCall(
20795 LMapObjectGetValueVMCall* ins) {
20796 pushArg(ToValue(ins, LMapObjectGetValueVMCall::InputIndex));
20797 pushArg(ToRegister(ins->mapObject()));
20799 using Fn =
20800 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
20801 callVM<Fn, jit::MapObjectGet>(ins);
20804 void CodeGenerator::visitMapObjectSize(LMapObjectSize* ins) {
20805 Register mapObj = ToRegister(ins->mapObject());
20806 Register output = ToRegister(ins->output());
20808 masm.loadMapObjectSize(mapObj, output);
20811 template <size_t NumDefs>
20812 void CodeGenerator::emitIonToWasmCallBase(LIonToWasmCallBase<NumDefs>* lir) {
20813 wasm::JitCallStackArgVector stackArgs;
20814 masm.propagateOOM(stackArgs.reserve(lir->numOperands()));
20815 if (masm.oom()) {
20816 return;
20819 MIonToWasmCall* mir = lir->mir();
20820 const wasm::FuncExport& funcExport = mir->funcExport();
20821 const wasm::FuncType& sig =
20822 mir->instance()->metadata().getFuncExportType(funcExport);
20824 WasmABIArgGenerator abi;
20825 for (size_t i = 0; i < lir->numOperands(); i++) {
20826 MIRType argMir;
20827 switch (sig.args()[i].kind()) {
20828 case wasm::ValType::I32:
20829 case wasm::ValType::I64:
20830 case wasm::ValType::F32:
20831 case wasm::ValType::F64:
20832 argMir = sig.args()[i].toMIRType();
20833 break;
20834 case wasm::ValType::V128:
20835 MOZ_CRASH("unexpected argument type when calling from ion to wasm");
20836 case wasm::ValType::Ref:
20837 // temporarilyUnsupportedReftypeForEntry() restricts args to externref
20838 MOZ_RELEASE_ASSERT(sig.args()[i].refType().isExtern());
20839 // Argument is boxed on the JS side to an anyref, so passed as a
20840 // pointer here.
20841 argMir = sig.args()[i].toMIRType();
20842 break;
20845 ABIArg arg = abi.next(argMir);
20846 switch (arg.kind()) {
20847 case ABIArg::GPR:
20848 case ABIArg::FPU: {
20849 MOZ_ASSERT(ToAnyRegister(lir->getOperand(i)) == arg.reg());
20850 stackArgs.infallibleEmplaceBack(wasm::JitCallStackArg());
20851 break;
20853 case ABIArg::Stack: {
20854 const LAllocation* larg = lir->getOperand(i);
20855 if (larg->isConstant()) {
20856 stackArgs.infallibleEmplaceBack(ToInt32(larg));
20857 } else if (larg->isGeneralReg()) {
20858 stackArgs.infallibleEmplaceBack(ToRegister(larg));
20859 } else if (larg->isFloatReg()) {
20860 stackArgs.infallibleEmplaceBack(ToFloatRegister(larg));
20861 } else {
20862 // Always use the stack pointer here because GenerateDirectCallFromJit
20863 // depends on this.
20864 Address addr = ToAddress<BaseRegForAddress::SP>(larg);
20865 stackArgs.infallibleEmplaceBack(addr);
20867 break;
20869 #ifdef JS_CODEGEN_REGISTER_PAIR
20870 case ABIArg::GPR_PAIR: {
20871 MOZ_CRASH(
20872 "no way to pass i64, and wasm uses hardfp for function calls");
20874 #endif
20875 case ABIArg::Uninitialized: {
20876 MOZ_CRASH("Uninitialized ABIArg kind");
20881 const wasm::ValTypeVector& results = sig.results();
20882 if (results.length() == 0) {
20883 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
20884 } else {
20885 MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
20886 switch (results[0].kind()) {
20887 case wasm::ValType::I32:
20888 MOZ_ASSERT(lir->mir()->type() == MIRType::Int32);
20889 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
20890 break;
20891 case wasm::ValType::I64:
20892 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
20893 MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
20894 break;
20895 case wasm::ValType::F32:
20896 MOZ_ASSERT(lir->mir()->type() == MIRType::Float32);
20897 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnFloat32Reg);
20898 break;
20899 case wasm::ValType::F64:
20900 MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
20901 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
20902 break;
20903 case wasm::ValType::V128:
20904 MOZ_CRASH("unexpected return type when calling from ion to wasm");
20905 case wasm::ValType::Ref:
20906 // The wasm stubs layer unboxes anything that needs to be unboxed
20907 // and leaves it in a Value. A FuncRef/EqRef we could in principle
20908 // leave it as a raw object pointer but for now it complicates the
20909 // API to do so.
20910 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
20911 break;
20915 WasmInstanceObject* instObj = lir->mir()->instanceObject();
20917 Register scratch = ToRegister(lir->temp());
20919 uint32_t callOffset;
20920 ensureOsiSpace();
20921 GenerateDirectCallFromJit(masm, funcExport, instObj->instance(), stackArgs,
20922 scratch, &callOffset);
20924 // Add the instance object to the constant pool, so it is transferred to
20925 // the owning IonScript and so that it gets traced as long as the IonScript
20926 // lives.
20928 uint32_t unused;
20929 masm.propagateOOM(graph.addConstantToPool(ObjectValue(*instObj), &unused));
20931 markSafepointAt(callOffset, lir);
20934 void CodeGenerator::visitIonToWasmCall(LIonToWasmCall* lir) {
20935 emitIonToWasmCallBase(lir);
20937 void CodeGenerator::visitIonToWasmCallV(LIonToWasmCallV* lir) {
20938 emitIonToWasmCallBase(lir);
20940 void CodeGenerator::visitIonToWasmCallI64(LIonToWasmCallI64* lir) {
20941 emitIonToWasmCallBase(lir);
20944 void CodeGenerator::visitWasmNullConstant(LWasmNullConstant* lir) {
20945 masm.xorPtr(ToRegister(lir->output()), ToRegister(lir->output()));
20948 void CodeGenerator::visitWasmFence(LWasmFence* lir) {
20949 MOZ_ASSERT(gen->compilingWasm());
20950 masm.memoryBarrier(MembarFull);
20953 void CodeGenerator::visitWasmAnyRefFromJSValue(LWasmAnyRefFromJSValue* lir) {
20954 ValueOperand input = ToValue(lir, LWasmAnyRefFromJSValue::InputIndex);
20955 Register output = ToRegister(lir->output());
20956 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
20958 using Fn = JSObject* (*)(JSContext* cx, HandleValue value);
20959 OutOfLineCode* oolBoxValue = oolCallVM<Fn, wasm::AnyRef::boxValue>(
20960 lir, ArgList(input), StoreRegisterTo(output));
20961 masm.convertValueToWasmAnyRef(input, output, tempFloat, oolBoxValue->entry());
20962 masm.bind(oolBoxValue->rejoin());
20965 void CodeGenerator::visitWasmAnyRefFromJSObject(LWasmAnyRefFromJSObject* lir) {
20966 Register input = ToRegister(lir->input());
20967 Register output = ToRegister(lir->output());
20968 masm.convertObjectToWasmAnyRef(input, output);
20971 void CodeGenerator::visitWasmAnyRefFromJSString(LWasmAnyRefFromJSString* lir) {
20972 Register input = ToRegister(lir->input());
20973 Register output = ToRegister(lir->output());
20974 masm.convertStringToWasmAnyRef(input, output);
20977 void CodeGenerator::visitWasmNewI31Ref(LWasmNewI31Ref* lir) {
20978 if (lir->value()->isConstant()) {
20979 // i31ref are often created with constants. If that's the case we will
20980 // do the operation statically here. This is similar to what is done
20981 // in masm.truncate32ToWasmI31Ref.
20982 Register output = ToRegister(lir->output());
20983 uint32_t value =
20984 static_cast<uint32_t>(lir->value()->toConstant()->toInt32());
20985 uintptr_t ptr = wasm::AnyRef::fromUint32Truncate(value).rawValue();
20986 masm.movePtr(ImmWord(ptr), output);
20987 } else {
20988 Register value = ToRegister(lir->value());
20989 Register output = ToRegister(lir->output());
20990 masm.truncate32ToWasmI31Ref(value, output);
20994 void CodeGenerator::visitWasmI31RefGet(LWasmI31RefGet* lir) {
20995 Register value = ToRegister(lir->value());
20996 Register output = ToRegister(lir->output());
20997 if (lir->mir()->wideningOp() == wasm::FieldWideningOp::Signed) {
20998 masm.convertWasmI31RefTo32Signed(value, output);
20999 } else {
21000 masm.convertWasmI31RefTo32Unsigned(value, output);
21004 #ifdef FUZZING_JS_FUZZILLI
21005 void CodeGenerator::emitFuzzilliHashDouble(FloatRegister floatDouble,
21006 Register scratch, Register output) {
21007 # ifdef JS_PUNBOX64
21008 Register64 reg64_1(scratch);
21009 Register64 reg64_2(output);
21010 masm.moveDoubleToGPR64(floatDouble, reg64_1);
21011 masm.move64(reg64_1, reg64_2);
21012 masm.rshift64(Imm32(32), reg64_2);
21013 masm.add32(scratch, output);
21014 # else
21015 Register64 reg64(scratch, output);
21016 masm.moveDoubleToGPR64(floatDouble, reg64);
21017 masm.add32(scratch, output);
21018 # endif
21021 void CodeGenerator::emitFuzzilliHashObject(LInstruction* lir, Register obj,
21022 Register output) {
21023 using Fn = void (*)(JSContext* cx, JSObject* obj, uint32_t* out);
21024 OutOfLineCode* ool = oolCallVM<Fn, FuzzilliHashObjectInl>(
21025 lir, ArgList(obj), StoreRegisterTo(output));
21027 masm.jump(ool->entry());
21028 masm.bind(ool->rejoin());
21031 void CodeGenerator::emitFuzzilliHashBigInt(Register bigInt, Register output) {
21032 LiveRegisterSet volatileRegs(GeneralRegisterSet::All(),
21033 FloatRegisterSet::All());
21034 volatileRegs.takeUnchecked(output);
21035 masm.PushRegsInMask(volatileRegs);
21037 using Fn = uint32_t (*)(BigInt* bigInt);
21038 masm.setupUnalignedABICall(output);
21039 masm.passABIArg(bigInt);
21040 masm.callWithABI<Fn, js::FuzzilliHashBigInt>();
21041 masm.storeCallInt32Result(output);
21043 masm.PopRegsInMask(volatileRegs);
21046 void CodeGenerator::visitFuzzilliHashV(LFuzzilliHashV* ins) {
21047 MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Value);
21049 ValueOperand value = ToValue(ins, 0);
21051 Label isDouble, isObject, isBigInt, done;
21053 FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
21054 Register scratch = ToRegister(ins->getTemp(0));
21055 Register output = ToRegister(ins->output());
21056 MOZ_ASSERT(scratch != output);
21058 # ifdef JS_PUNBOX64
21059 Register tagReg = ToRegister(ins->getTemp(0));
21060 masm.splitTag(value, tagReg);
21061 # else
21062 Register tagReg = value.typeReg();
21063 # endif
21065 Label noBigInt;
21066 masm.branchTestBigInt(Assembler::NotEqual, tagReg, &noBigInt);
21067 masm.unboxBigInt(value, scratch);
21068 masm.jump(&isBigInt);
21069 masm.bind(&noBigInt);
21071 Label noObject;
21072 masm.branchTestObject(Assembler::NotEqual, tagReg, &noObject);
21073 masm.unboxObject(value, scratch);
21074 masm.jump(&isObject);
21075 masm.bind(&noObject);
21077 Label noInt32;
21078 masm.branchTestInt32(Assembler::NotEqual, tagReg, &noInt32);
21079 masm.unboxInt32(value, scratch);
21080 masm.convertInt32ToDouble(scratch, scratchFloat);
21081 masm.jump(&isDouble);
21082 masm.bind(&noInt32);
21084 Label noNull;
21085 masm.branchTestNull(Assembler::NotEqual, tagReg, &noNull);
21086 masm.move32(Imm32(1), scratch);
21087 masm.convertInt32ToDouble(scratch, scratchFloat);
21088 masm.jump(&isDouble);
21089 masm.bind(&noNull);
21091 Label noUndefined;
21092 masm.branchTestUndefined(Assembler::NotEqual, tagReg, &noUndefined);
21093 masm.move32(Imm32(2), scratch);
21094 masm.convertInt32ToDouble(scratch, scratchFloat);
21095 masm.jump(&isDouble);
21096 masm.bind(&noUndefined);
21098 Label noBoolean;
21099 masm.branchTestBoolean(Assembler::NotEqual, tagReg, &noBoolean);
21100 masm.unboxBoolean(value, scratch);
21101 masm.add32(Imm32(3), scratch);
21102 masm.convertInt32ToDouble(scratch, scratchFloat);
21103 masm.jump(&isDouble);
21104 masm.bind(&noBoolean);
21106 Label noDouble;
21107 masm.branchTestDouble(Assembler::NotEqual, tagReg, &noDouble);
21108 masm.unboxDouble(value, scratchFloat);
21109 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
21111 masm.jump(&isDouble);
21112 masm.bind(&noDouble);
21113 masm.move32(Imm32(0), output);
21114 masm.jump(&done);
21116 masm.bind(&isBigInt);
21117 emitFuzzilliHashBigInt(scratch, output);
21118 masm.jump(&done);
21120 masm.bind(&isObject);
21121 emitFuzzilliHashObject(ins, scratch, output);
21122 masm.jump(&done);
21124 masm.bind(&isDouble);
21125 emitFuzzilliHashDouble(scratchFloat, scratch, output);
21127 masm.bind(&done);
21130 void CodeGenerator::visitFuzzilliHashT(LFuzzilliHashT* ins) {
21131 const LAllocation* value = ins->value();
21132 MIRType mirType = ins->mir()->getOperand(0)->type();
21134 FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
21135 Register scratch = ToRegister(ins->getTemp(0));
21136 Register output = ToRegister(ins->output());
21137 MOZ_ASSERT(scratch != output);
21139 if (mirType == MIRType::Object) {
21140 MOZ_ASSERT(value->isGeneralReg());
21141 masm.mov(value->toGeneralReg()->reg(), scratch);
21142 emitFuzzilliHashObject(ins, scratch, output);
21143 } else if (mirType == MIRType::BigInt) {
21144 MOZ_ASSERT(value->isGeneralReg());
21145 masm.mov(value->toGeneralReg()->reg(), scratch);
21146 emitFuzzilliHashBigInt(scratch, output);
21147 } else if (mirType == MIRType::Double) {
21148 MOZ_ASSERT(value->isFloatReg());
21149 masm.moveDouble(value->toFloatReg()->reg(), scratchFloat);
21150 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
21151 emitFuzzilliHashDouble(scratchFloat, scratch, output);
21152 } else if (mirType == MIRType::Float32) {
21153 MOZ_ASSERT(value->isFloatReg());
21154 masm.convertFloat32ToDouble(value->toFloatReg()->reg(), scratchFloat);
21155 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
21156 emitFuzzilliHashDouble(scratchFloat, scratch, output);
21157 } else if (mirType == MIRType::Int32) {
21158 MOZ_ASSERT(value->isGeneralReg());
21159 masm.mov(value->toGeneralReg()->reg(), scratch);
21160 masm.convertInt32ToDouble(scratch, scratchFloat);
21161 emitFuzzilliHashDouble(scratchFloat, scratch, output);
21162 } else if (mirType == MIRType::Null) {
21163 MOZ_ASSERT(value->isBogus());
21164 masm.move32(Imm32(1), scratch);
21165 masm.convertInt32ToDouble(scratch, scratchFloat);
21166 emitFuzzilliHashDouble(scratchFloat, scratch, output);
21167 } else if (mirType == MIRType::Undefined) {
21168 MOZ_ASSERT(value->isBogus());
21169 masm.move32(Imm32(2), scratch);
21170 masm.convertInt32ToDouble(scratch, scratchFloat);
21171 emitFuzzilliHashDouble(scratchFloat, scratch, output);
21172 } else if (mirType == MIRType::Boolean) {
21173 MOZ_ASSERT(value->isGeneralReg());
21174 masm.mov(value->toGeneralReg()->reg(), scratch);
21175 masm.add32(Imm32(3), scratch);
21176 masm.convertInt32ToDouble(scratch, scratchFloat);
21177 emitFuzzilliHashDouble(scratchFloat, scratch, output);
21178 } else {
21179 MOZ_CRASH("unexpected type");
21183 void CodeGenerator::visitFuzzilliHashStore(LFuzzilliHashStore* ins) {
21184 const LAllocation* value = ins->value();
21185 MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Int32);
21186 MOZ_ASSERT(value->isGeneralReg());
21188 Register scratchJSContext = ToRegister(ins->getTemp(0));
21189 Register scratch = ToRegister(ins->getTemp(1));
21191 masm.loadJSContext(scratchJSContext);
21193 // stats
21194 Address addrExecHashInputs(scratchJSContext,
21195 offsetof(JSContext, executionHashInputs));
21196 masm.load32(addrExecHashInputs, scratch);
21197 masm.add32(Imm32(1), scratch);
21198 masm.store32(scratch, addrExecHashInputs);
21200 Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
21201 masm.load32(addrExecHash, scratch);
21202 masm.add32(value->toGeneralReg()->reg(), scratch);
21203 masm.rotateLeft(Imm32(1), scratch, scratch);
21204 masm.store32(scratch, addrExecHash);
21206 #endif
21208 static_assert(!std::is_polymorphic_v<CodeGenerator>,
21209 "CodeGenerator should not have any virtual methods");
21211 } // namespace jit
21212 } // namespace js