Bug 1889091 - Part 3: Specialize calls to native functions with variadic parameters...
[gecko.git] / js / src / jit / CodeGenerator.cpp
blob0ced9bd4af01f71530f35058889393a328bdb4ac
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/CodeGenerator.h"
9 #include "mozilla/Assertions.h"
10 #include "mozilla/Casting.h"
11 #include "mozilla/DebugOnly.h"
12 #include "mozilla/EndianUtils.h"
13 #include "mozilla/EnumeratedArray.h"
14 #include "mozilla/EnumeratedRange.h"
15 #include "mozilla/EnumSet.h"
16 #include "mozilla/IntegerTypeTraits.h"
17 #include "mozilla/Latin1.h"
18 #include "mozilla/MathAlgorithms.h"
19 #include "mozilla/ScopeExit.h"
20 #include "mozilla/SIMD.h"
22 #include <limits>
23 #include <type_traits>
24 #include <utility>
26 #include "jslibmath.h"
27 #include "jsmath.h"
28 #include "jsnum.h"
30 #include "builtin/MapObject.h"
31 #include "builtin/RegExp.h"
32 #include "builtin/String.h"
33 #include "irregexp/RegExpTypes.h"
34 #include "jit/ABIArgGenerator.h"
35 #include "jit/CompileInfo.h"
36 #include "jit/InlineScriptTree.h"
37 #include "jit/Invalidation.h"
38 #include "jit/IonGenericCallStub.h"
39 #include "jit/IonIC.h"
40 #include "jit/IonScript.h"
41 #include "jit/JitcodeMap.h"
42 #include "jit/JitFrames.h"
43 #include "jit/JitRuntime.h"
44 #include "jit/JitSpewer.h"
45 #include "jit/JitZone.h"
46 #include "jit/Linker.h"
47 #include "jit/MIRGenerator.h"
48 #include "jit/MoveEmitter.h"
49 #include "jit/RangeAnalysis.h"
50 #include "jit/RegExpStubConstants.h"
51 #include "jit/SafepointIndex.h"
52 #include "jit/SharedICHelpers.h"
53 #include "jit/SharedICRegisters.h"
54 #include "jit/VMFunctions.h"
55 #include "jit/WarpSnapshot.h"
56 #include "js/ColumnNumber.h" // JS::LimitedColumnNumberOneOrigin
57 #include "js/experimental/JitInfo.h" // JSJit{Getter,Setter}CallArgs, JSJitMethodCallArgsTraits, JSJitInfo
58 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
59 #include "js/RegExpFlags.h" // JS::RegExpFlag
60 #include "js/ScalarType.h" // js::Scalar::Type
61 #include "proxy/DOMProxy.h"
62 #include "proxy/ScriptedProxyHandler.h"
63 #include "util/CheckedArithmetic.h"
64 #include "util/Unicode.h"
65 #include "vm/ArrayBufferViewObject.h"
66 #include "vm/AsyncFunction.h"
67 #include "vm/AsyncIteration.h"
68 #include "vm/BuiltinObjectKind.h"
69 #include "vm/FunctionFlags.h" // js::FunctionFlags
70 #include "vm/Interpreter.h"
71 #include "vm/JSAtomUtils.h" // AtomizeString
72 #include "vm/MatchPairs.h"
73 #include "vm/RegExpObject.h"
74 #include "vm/RegExpStatics.h"
75 #include "vm/StaticStrings.h"
76 #include "vm/StringObject.h"
77 #include "vm/StringType.h"
78 #include "vm/TypedArrayObject.h"
79 #include "wasm/WasmCodegenConstants.h"
80 #include "wasm/WasmValType.h"
81 #ifdef MOZ_VTUNE
82 # include "vtune/VTuneWrapper.h"
83 #endif
84 #include "wasm/WasmBinary.h"
85 #include "wasm/WasmGC.h"
86 #include "wasm/WasmGcObject.h"
87 #include "wasm/WasmStubs.h"
89 #include "builtin/Boolean-inl.h"
90 #include "jit/MacroAssembler-inl.h"
91 #include "jit/shared/CodeGenerator-shared-inl.h"
92 #include "jit/TemplateObject-inl.h"
93 #include "jit/VMFunctionList-inl.h"
94 #include "vm/JSScript-inl.h"
95 #include "wasm/WasmInstance-inl.h"
97 using namespace js;
98 using namespace js::jit;
100 using JS::GenericNaN;
101 using mozilla::AssertedCast;
102 using mozilla::CheckedUint32;
103 using mozilla::DebugOnly;
104 using mozilla::FloatingPoint;
105 using mozilla::Maybe;
106 using mozilla::NegativeInfinity;
107 using mozilla::PositiveInfinity;
109 using JS::ExpandoAndGeneration;
111 namespace js {
112 namespace jit {
114 #ifdef CHECK_OSIPOINT_REGISTERS
115 template <class Op>
116 static void HandleRegisterDump(Op op, MacroAssembler& masm,
117 LiveRegisterSet liveRegs, Register activation,
118 Register scratch) {
119 const size_t baseOffset = JitActivation::offsetOfRegs();
121 // Handle live GPRs.
122 for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
123 Register reg = *iter;
124 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
126 if (reg == activation) {
127 // To use the original value of the activation register (that's
128 // now on top of the stack), we need the scratch register.
129 masm.push(scratch);
130 masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
131 op(scratch, dump);
132 masm.pop(scratch);
133 } else {
134 op(reg, dump);
138 // Handle live FPRs.
139 for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
140 FloatRegister reg = *iter;
141 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
142 op(reg, dump);
146 class StoreOp {
147 MacroAssembler& masm;
149 public:
150 explicit StoreOp(MacroAssembler& masm) : masm(masm) {}
152 void operator()(Register reg, Address dump) { masm.storePtr(reg, dump); }
153 void operator()(FloatRegister reg, Address dump) {
154 if (reg.isDouble()) {
155 masm.storeDouble(reg, dump);
156 } else if (reg.isSingle()) {
157 masm.storeFloat32(reg, dump);
158 } else if (reg.isSimd128()) {
159 MOZ_CRASH("Unexpected case for SIMD");
160 } else {
161 MOZ_CRASH("Unexpected register type.");
166 class VerifyOp {
167 MacroAssembler& masm;
168 Label* failure_;
170 public:
171 VerifyOp(MacroAssembler& masm, Label* failure)
172 : masm(masm), failure_(failure) {}
174 void operator()(Register reg, Address dump) {
175 masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
177 void operator()(FloatRegister reg, Address dump) {
178 if (reg.isDouble()) {
179 ScratchDoubleScope scratch(masm);
180 masm.loadDouble(dump, scratch);
181 masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
182 } else if (reg.isSingle()) {
183 ScratchFloat32Scope scratch(masm);
184 masm.loadFloat32(dump, scratch);
185 masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
186 } else if (reg.isSimd128()) {
187 MOZ_CRASH("Unexpected case for SIMD");
188 } else {
189 MOZ_CRASH("Unexpected register type.");
194 void CodeGenerator::verifyOsiPointRegs(LSafepoint* safepoint) {
195 // Ensure the live registers stored by callVM did not change between
196 // the call and this OsiPoint. Try-catch relies on this invariant.
198 // Load pointer to the JitActivation in a scratch register.
199 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
200 Register scratch = allRegs.takeAny();
201 masm.push(scratch);
202 masm.loadJitActivation(scratch);
204 // If we should not check registers (because the instruction did not call
205 // into the VM, or a GC happened), we're done.
206 Label failure, done;
207 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
208 masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
210 // Having more than one VM function call made in one visit function at
211 // runtime is a sec-ciritcal error, because if we conservatively assume that
212 // one of the function call can re-enter Ion, then the invalidation process
213 // will potentially add a call at a random location, by patching the code
214 // before the return address.
215 masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
217 // Set checkRegs to 0, so that we don't try to verify registers after we
218 // return from this script to the caller.
219 masm.store32(Imm32(0), checkRegs);
221 // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
222 // temps after calling into the VM. This is fine because no other
223 // instructions (including this OsiPoint) will depend on them. Also
224 // backtracking can also use the same register for an input and an output.
225 // These are marked as clobbered and shouldn't get checked.
226 LiveRegisterSet liveRegs;
227 liveRegs.set() = RegisterSet::Intersect(
228 safepoint->liveRegs().set(),
229 RegisterSet::Not(safepoint->clobberedRegs().set()));
231 VerifyOp op(masm, &failure);
232 HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
234 masm.jump(&done);
236 // Do not profile the callWithABI that occurs below. This is to avoid a
237 // rare corner case that occurs when profiling interacts with itself:
239 // When slow profiling assertions are turned on, FunctionBoundary ops
240 // (which update the profiler pseudo-stack) may emit a callVM, which
241 // forces them to have an osi point associated with them. The
242 // FunctionBoundary for inline function entry is added to the caller's
243 // graph with a PC from the caller's code, but during codegen it modifies
244 // Gecko Profiler instrumentation to add the callee as the current top-most
245 // script. When codegen gets to the OSIPoint, and the callWithABI below is
246 // emitted, the codegen thinks that the current frame is the callee, but
247 // the PC it's using from the OSIPoint refers to the caller. This causes
248 // the profiler instrumentation of the callWithABI below to ASSERT, since
249 // the script and pc are mismatched. To avoid this, we simply omit
250 // instrumentation for these callWithABIs.
252 // Any live register captured by a safepoint (other than temp registers)
253 // must remain unchanged between the call and the OsiPoint instruction.
254 masm.bind(&failure);
255 masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
257 masm.bind(&done);
258 masm.pop(scratch);
261 bool CodeGenerator::shouldVerifyOsiPointRegs(LSafepoint* safepoint) {
262 if (!checkOsiPointRegisters) {
263 return false;
266 if (safepoint->liveRegs().emptyGeneral() &&
267 safepoint->liveRegs().emptyFloat()) {
268 return false; // No registers to check.
271 return true;
274 void CodeGenerator::resetOsiPointRegs(LSafepoint* safepoint) {
275 if (!shouldVerifyOsiPointRegs(safepoint)) {
276 return;
279 // Set checkRegs to 0. If we perform a VM call, the instruction
280 // will set it to 1.
281 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
282 Register scratch = allRegs.takeAny();
283 masm.push(scratch);
284 masm.loadJitActivation(scratch);
285 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
286 masm.store32(Imm32(0), checkRegs);
287 masm.pop(scratch);
290 static void StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs) {
291 // Store a copy of all live registers before performing the call.
292 // When we reach the OsiPoint, we can use this to check nothing
293 // modified them in the meantime.
295 // Load pointer to the JitActivation in a scratch register.
296 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
297 Register scratch = allRegs.takeAny();
298 masm.push(scratch);
299 masm.loadJitActivation(scratch);
301 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
302 masm.add32(Imm32(1), checkRegs);
304 StoreOp op(masm);
305 HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
307 masm.pop(scratch);
309 #endif // CHECK_OSIPOINT_REGISTERS
311 // Before doing any call to Cpp, you should ensure that volatile
312 // registers are evicted by the register allocator.
313 void CodeGenerator::callVMInternal(VMFunctionId id, LInstruction* ins) {
314 TrampolinePtr code = gen->jitRuntime()->getVMWrapper(id);
315 const VMFunctionData& fun = GetVMFunction(id);
317 // Stack is:
318 // ... frame ...
319 // [args]
320 #ifdef DEBUG
321 MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
322 pushedArgs_ = 0;
323 #endif
325 #ifdef CHECK_OSIPOINT_REGISTERS
326 if (shouldVerifyOsiPointRegs(ins->safepoint())) {
327 StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
329 #endif
331 #ifdef DEBUG
332 if (ins->mirRaw()) {
333 MOZ_ASSERT(ins->mirRaw()->isInstruction());
334 MInstruction* mir = ins->mirRaw()->toInstruction();
335 MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
337 // If this MIR instruction has an overridden AliasSet, set the JitRuntime's
338 // disallowArbitraryCode_ flag so we can assert this VMFunction doesn't call
339 // RunScript. Whitelist MInterruptCheck and MCheckOverRecursed because
340 // interrupt callbacks can call JS (chrome JS or shell testing functions).
341 bool isWhitelisted = mir->isInterruptCheck() || mir->isCheckOverRecursed();
342 if (!mir->hasDefaultAliasSet() && !isWhitelisted) {
343 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
344 masm.move32(Imm32(1), ReturnReg);
345 masm.store32(ReturnReg, AbsoluteAddress(addr));
348 #endif
350 // Push an exit frame descriptor.
351 masm.PushFrameDescriptor(FrameType::IonJS);
353 // Call the wrapper function. The wrapper is in charge to unwind the stack
354 // when returning from the call. Failures are handled with exceptions based
355 // on the return value of the C functions. To guard the outcome of the
356 // returned value, use another LIR instruction.
357 ensureOsiSpace();
358 uint32_t callOffset = masm.callJit(code);
359 markSafepointAt(callOffset, ins);
361 #ifdef DEBUG
362 // Reset the disallowArbitraryCode flag after the call.
364 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
365 masm.push(ReturnReg);
366 masm.move32(Imm32(0), ReturnReg);
367 masm.store32(ReturnReg, AbsoluteAddress(addr));
368 masm.pop(ReturnReg);
370 #endif
372 // Pop rest of the exit frame and the arguments left on the stack.
373 int framePop =
374 sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
375 masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
377 // Stack is:
378 // ... frame ...
381 template <typename Fn, Fn fn>
382 void CodeGenerator::callVM(LInstruction* ins) {
383 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
384 callVMInternal(id, ins);
387 // ArgSeq store arguments for OutOfLineCallVM.
389 // OutOfLineCallVM are created with "oolCallVM" function. The third argument of
390 // this function is an instance of a class which provides a "generate" in charge
391 // of pushing the argument, with "pushArg", for a VMFunction.
393 // Such list of arguments can be created by using the "ArgList" function which
394 // creates one instance of "ArgSeq", where the type of the arguments are
395 // inferred from the type of the arguments.
397 // The list of arguments must be written in the same order as if you were
398 // calling the function in C++.
400 // Example:
401 // ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))
403 template <typename... ArgTypes>
404 class ArgSeq {
405 std::tuple<std::remove_reference_t<ArgTypes>...> args_;
407 template <std::size_t... ISeq>
408 inline void generate(CodeGenerator* codegen,
409 std::index_sequence<ISeq...>) const {
410 // Arguments are pushed in reverse order, from last argument to first
411 // argument.
412 (codegen->pushArg(std::get<sizeof...(ISeq) - 1 - ISeq>(args_)), ...);
415 public:
416 explicit ArgSeq(ArgTypes&&... args)
417 : args_(std::forward<ArgTypes>(args)...) {}
419 inline void generate(CodeGenerator* codegen) const {
420 generate(codegen, std::index_sequence_for<ArgTypes...>{});
423 #ifdef DEBUG
424 static constexpr size_t numArgs = sizeof...(ArgTypes);
425 #endif
428 template <typename... ArgTypes>
429 inline ArgSeq<ArgTypes...> ArgList(ArgTypes&&... args) {
430 return ArgSeq<ArgTypes...>(std::forward<ArgTypes>(args)...);
433 // Store wrappers, to generate the right move of data after the VM call.
435 struct StoreNothing {
436 inline void generate(CodeGenerator* codegen) const {}
437 inline LiveRegisterSet clobbered() const {
438 return LiveRegisterSet(); // No register gets clobbered
442 class StoreRegisterTo {
443 private:
444 Register out_;
446 public:
447 explicit StoreRegisterTo(Register out) : out_(out) {}
449 inline void generate(CodeGenerator* codegen) const {
450 // It's okay to use storePointerResultTo here - the VMFunction wrapper
451 // ensures the upper bytes are zero for bool/int32 return values.
452 codegen->storePointerResultTo(out_);
454 inline LiveRegisterSet clobbered() const {
455 LiveRegisterSet set;
456 set.add(out_);
457 return set;
461 class StoreFloatRegisterTo {
462 private:
463 FloatRegister out_;
465 public:
466 explicit StoreFloatRegisterTo(FloatRegister out) : out_(out) {}
468 inline void generate(CodeGenerator* codegen) const {
469 codegen->storeFloatResultTo(out_);
471 inline LiveRegisterSet clobbered() const {
472 LiveRegisterSet set;
473 set.add(out_);
474 return set;
478 template <typename Output>
479 class StoreValueTo_ {
480 private:
481 Output out_;
483 public:
484 explicit StoreValueTo_(const Output& out) : out_(out) {}
486 inline void generate(CodeGenerator* codegen) const {
487 codegen->storeResultValueTo(out_);
489 inline LiveRegisterSet clobbered() const {
490 LiveRegisterSet set;
491 set.add(out_);
492 return set;
496 template <typename Output>
497 StoreValueTo_<Output> StoreValueTo(const Output& out) {
498 return StoreValueTo_<Output>(out);
501 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
502 class OutOfLineCallVM : public OutOfLineCodeBase<CodeGenerator> {
503 private:
504 LInstruction* lir_;
505 ArgSeq args_;
506 StoreOutputTo out_;
508 public:
509 OutOfLineCallVM(LInstruction* lir, const ArgSeq& args,
510 const StoreOutputTo& out)
511 : lir_(lir), args_(args), out_(out) {}
513 void accept(CodeGenerator* codegen) override {
514 codegen->visitOutOfLineCallVM(this);
517 LInstruction* lir() const { return lir_; }
518 const ArgSeq& args() const { return args_; }
519 const StoreOutputTo& out() const { return out_; }
522 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
523 OutOfLineCode* CodeGenerator::oolCallVM(LInstruction* lir, const ArgSeq& args,
524 const StoreOutputTo& out) {
525 MOZ_ASSERT(lir->mirRaw());
526 MOZ_ASSERT(lir->mirRaw()->isInstruction());
528 #ifdef DEBUG
529 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
530 const VMFunctionData& fun = GetVMFunction(id);
531 MOZ_ASSERT(fun.explicitArgs == args.numArgs);
532 MOZ_ASSERT(fun.returnsData() !=
533 (std::is_same_v<StoreOutputTo, StoreNothing>));
534 #endif
536 OutOfLineCode* ool = new (alloc())
537 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>(lir, args, out);
538 addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
539 return ool;
542 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
543 void CodeGenerator::visitOutOfLineCallVM(
544 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>* ool) {
545 LInstruction* lir = ool->lir();
547 #ifdef JS_JITSPEW
548 JitSpewStart(JitSpew_Codegen, " # LIR=%s",
549 lir->opName());
550 if (const char* extra = lir->getExtraName()) {
551 JitSpewCont(JitSpew_Codegen, ":%s", extra);
553 JitSpewFin(JitSpew_Codegen);
554 #endif
555 perfSpewer_.recordInstruction(masm, lir);
556 saveLive(lir);
557 ool->args().generate(this);
558 callVM<Fn, fn>(lir);
559 ool->out().generate(this);
560 restoreLiveIgnore(lir, ool->out().clobbered());
561 masm.jump(ool->rejoin());
564 class OutOfLineICFallback : public OutOfLineCodeBase<CodeGenerator> {
565 private:
566 LInstruction* lir_;
567 size_t cacheIndex_;
568 size_t cacheInfoIndex_;
570 public:
571 OutOfLineICFallback(LInstruction* lir, size_t cacheIndex,
572 size_t cacheInfoIndex)
573 : lir_(lir), cacheIndex_(cacheIndex), cacheInfoIndex_(cacheInfoIndex) {}
575 void bind(MacroAssembler* masm) override {
576 // The binding of the initial jump is done in
577 // CodeGenerator::visitOutOfLineICFallback.
580 size_t cacheIndex() const { return cacheIndex_; }
581 size_t cacheInfoIndex() const { return cacheInfoIndex_; }
582 LInstruction* lir() const { return lir_; }
584 void accept(CodeGenerator* codegen) override {
585 codegen->visitOutOfLineICFallback(this);
589 void CodeGeneratorShared::addIC(LInstruction* lir, size_t cacheIndex) {
590 if (cacheIndex == SIZE_MAX) {
591 masm.setOOM();
592 return;
595 DataPtr<IonIC> cache(this, cacheIndex);
596 MInstruction* mir = lir->mirRaw()->toInstruction();
597 cache->setScriptedLocation(mir->block()->info().script(),
598 mir->resumePoint()->pc());
600 Register temp = cache->scratchRegisterForEntryJump();
601 icInfo_.back().icOffsetForJump = masm.movWithPatch(ImmWord(-1), temp);
602 masm.jump(Address(temp, 0));
604 MOZ_ASSERT(!icInfo_.empty());
606 OutOfLineICFallback* ool =
607 new (alloc()) OutOfLineICFallback(lir, cacheIndex, icInfo_.length() - 1);
608 addOutOfLineCode(ool, mir);
610 masm.bind(ool->rejoin());
611 cache->setRejoinOffset(CodeOffset(ool->rejoin()->offset()));
614 void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) {
615 LInstruction* lir = ool->lir();
616 size_t cacheIndex = ool->cacheIndex();
617 size_t cacheInfoIndex = ool->cacheInfoIndex();
619 DataPtr<IonIC> ic(this, cacheIndex);
621 // Register the location of the OOL path in the IC.
622 ic->setFallbackOffset(CodeOffset(masm.currentOffset()));
624 switch (ic->kind()) {
625 case CacheKind::GetProp:
626 case CacheKind::GetElem: {
627 IonGetPropertyIC* getPropIC = ic->asGetPropertyIC();
629 saveLive(lir);
631 pushArg(getPropIC->id());
632 pushArg(getPropIC->value());
633 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
634 pushArg(ImmGCPtr(gen->outerInfo().script()));
636 using Fn = bool (*)(JSContext*, HandleScript, IonGetPropertyIC*,
637 HandleValue, HandleValue, MutableHandleValue);
638 callVM<Fn, IonGetPropertyIC::update>(lir);
640 StoreValueTo(getPropIC->output()).generate(this);
641 restoreLiveIgnore(lir, StoreValueTo(getPropIC->output()).clobbered());
643 masm.jump(ool->rejoin());
644 return;
646 case CacheKind::GetPropSuper:
647 case CacheKind::GetElemSuper: {
648 IonGetPropSuperIC* getPropSuperIC = ic->asGetPropSuperIC();
650 saveLive(lir);
652 pushArg(getPropSuperIC->id());
653 pushArg(getPropSuperIC->receiver());
654 pushArg(getPropSuperIC->object());
655 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
656 pushArg(ImmGCPtr(gen->outerInfo().script()));
658 using Fn =
659 bool (*)(JSContext*, HandleScript, IonGetPropSuperIC*, HandleObject,
660 HandleValue, HandleValue, MutableHandleValue);
661 callVM<Fn, IonGetPropSuperIC::update>(lir);
663 StoreValueTo(getPropSuperIC->output()).generate(this);
664 restoreLiveIgnore(lir,
665 StoreValueTo(getPropSuperIC->output()).clobbered());
667 masm.jump(ool->rejoin());
668 return;
670 case CacheKind::SetProp:
671 case CacheKind::SetElem: {
672 IonSetPropertyIC* setPropIC = ic->asSetPropertyIC();
674 saveLive(lir);
676 pushArg(setPropIC->rhs());
677 pushArg(setPropIC->id());
678 pushArg(setPropIC->object());
679 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
680 pushArg(ImmGCPtr(gen->outerInfo().script()));
682 using Fn = bool (*)(JSContext*, HandleScript, IonSetPropertyIC*,
683 HandleObject, HandleValue, HandleValue);
684 callVM<Fn, IonSetPropertyIC::update>(lir);
686 restoreLive(lir);
688 masm.jump(ool->rejoin());
689 return;
691 case CacheKind::GetName: {
692 IonGetNameIC* getNameIC = ic->asGetNameIC();
694 saveLive(lir);
696 pushArg(getNameIC->environment());
697 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
698 pushArg(ImmGCPtr(gen->outerInfo().script()));
700 using Fn = bool (*)(JSContext*, HandleScript, IonGetNameIC*, HandleObject,
701 MutableHandleValue);
702 callVM<Fn, IonGetNameIC::update>(lir);
704 StoreValueTo(getNameIC->output()).generate(this);
705 restoreLiveIgnore(lir, StoreValueTo(getNameIC->output()).clobbered());
707 masm.jump(ool->rejoin());
708 return;
710 case CacheKind::BindName: {
711 IonBindNameIC* bindNameIC = ic->asBindNameIC();
713 saveLive(lir);
715 pushArg(bindNameIC->environment());
716 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
717 pushArg(ImmGCPtr(gen->outerInfo().script()));
719 using Fn =
720 JSObject* (*)(JSContext*, HandleScript, IonBindNameIC*, HandleObject);
721 callVM<Fn, IonBindNameIC::update>(lir);
723 StoreRegisterTo(bindNameIC->output()).generate(this);
724 restoreLiveIgnore(lir, StoreRegisterTo(bindNameIC->output()).clobbered());
726 masm.jump(ool->rejoin());
727 return;
729 case CacheKind::GetIterator: {
730 IonGetIteratorIC* getIteratorIC = ic->asGetIteratorIC();
732 saveLive(lir);
734 pushArg(getIteratorIC->value());
735 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
736 pushArg(ImmGCPtr(gen->outerInfo().script()));
738 using Fn = JSObject* (*)(JSContext*, HandleScript, IonGetIteratorIC*,
739 HandleValue);
740 callVM<Fn, IonGetIteratorIC::update>(lir);
742 StoreRegisterTo(getIteratorIC->output()).generate(this);
743 restoreLiveIgnore(lir,
744 StoreRegisterTo(getIteratorIC->output()).clobbered());
746 masm.jump(ool->rejoin());
747 return;
749 case CacheKind::OptimizeSpreadCall: {
750 auto* optimizeSpreadCallIC = ic->asOptimizeSpreadCallIC();
752 saveLive(lir);
754 pushArg(optimizeSpreadCallIC->value());
755 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
756 pushArg(ImmGCPtr(gen->outerInfo().script()));
758 using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeSpreadCallIC*,
759 HandleValue, MutableHandleValue);
760 callVM<Fn, IonOptimizeSpreadCallIC::update>(lir);
762 StoreValueTo(optimizeSpreadCallIC->output()).generate(this);
763 restoreLiveIgnore(
764 lir, StoreValueTo(optimizeSpreadCallIC->output()).clobbered());
766 masm.jump(ool->rejoin());
767 return;
769 case CacheKind::In: {
770 IonInIC* inIC = ic->asInIC();
772 saveLive(lir);
774 pushArg(inIC->object());
775 pushArg(inIC->key());
776 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
777 pushArg(ImmGCPtr(gen->outerInfo().script()));
779 using Fn = bool (*)(JSContext*, HandleScript, IonInIC*, HandleValue,
780 HandleObject, bool*);
781 callVM<Fn, IonInIC::update>(lir);
783 StoreRegisterTo(inIC->output()).generate(this);
784 restoreLiveIgnore(lir, StoreRegisterTo(inIC->output()).clobbered());
786 masm.jump(ool->rejoin());
787 return;
789 case CacheKind::HasOwn: {
790 IonHasOwnIC* hasOwnIC = ic->asHasOwnIC();
792 saveLive(lir);
794 pushArg(hasOwnIC->id());
795 pushArg(hasOwnIC->value());
796 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
797 pushArg(ImmGCPtr(gen->outerInfo().script()));
799 using Fn = bool (*)(JSContext*, HandleScript, IonHasOwnIC*, HandleValue,
800 HandleValue, int32_t*);
801 callVM<Fn, IonHasOwnIC::update>(lir);
803 StoreRegisterTo(hasOwnIC->output()).generate(this);
804 restoreLiveIgnore(lir, StoreRegisterTo(hasOwnIC->output()).clobbered());
806 masm.jump(ool->rejoin());
807 return;
809 case CacheKind::CheckPrivateField: {
810 IonCheckPrivateFieldIC* checkPrivateFieldIC = ic->asCheckPrivateFieldIC();
812 saveLive(lir);
814 pushArg(checkPrivateFieldIC->id());
815 pushArg(checkPrivateFieldIC->value());
817 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
818 pushArg(ImmGCPtr(gen->outerInfo().script()));
820 using Fn = bool (*)(JSContext*, HandleScript, IonCheckPrivateFieldIC*,
821 HandleValue, HandleValue, bool*);
822 callVM<Fn, IonCheckPrivateFieldIC::update>(lir);
824 StoreRegisterTo(checkPrivateFieldIC->output()).generate(this);
825 restoreLiveIgnore(
826 lir, StoreRegisterTo(checkPrivateFieldIC->output()).clobbered());
828 masm.jump(ool->rejoin());
829 return;
831 case CacheKind::InstanceOf: {
832 IonInstanceOfIC* hasInstanceOfIC = ic->asInstanceOfIC();
834 saveLive(lir);
836 pushArg(hasInstanceOfIC->rhs());
837 pushArg(hasInstanceOfIC->lhs());
838 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
839 pushArg(ImmGCPtr(gen->outerInfo().script()));
841 using Fn = bool (*)(JSContext*, HandleScript, IonInstanceOfIC*,
842 HandleValue lhs, HandleObject rhs, bool* res);
843 callVM<Fn, IonInstanceOfIC::update>(lir);
845 StoreRegisterTo(hasInstanceOfIC->output()).generate(this);
846 restoreLiveIgnore(lir,
847 StoreRegisterTo(hasInstanceOfIC->output()).clobbered());
849 masm.jump(ool->rejoin());
850 return;
852 case CacheKind::UnaryArith: {
853 IonUnaryArithIC* unaryArithIC = ic->asUnaryArithIC();
855 saveLive(lir);
857 pushArg(unaryArithIC->input());
858 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
859 pushArg(ImmGCPtr(gen->outerInfo().script()));
861 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
862 IonUnaryArithIC* stub, HandleValue val,
863 MutableHandleValue res);
864 callVM<Fn, IonUnaryArithIC::update>(lir);
866 StoreValueTo(unaryArithIC->output()).generate(this);
867 restoreLiveIgnore(lir, StoreValueTo(unaryArithIC->output()).clobbered());
869 masm.jump(ool->rejoin());
870 return;
872 case CacheKind::ToPropertyKey: {
873 IonToPropertyKeyIC* toPropertyKeyIC = ic->asToPropertyKeyIC();
875 saveLive(lir);
877 pushArg(toPropertyKeyIC->input());
878 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
879 pushArg(ImmGCPtr(gen->outerInfo().script()));
881 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
882 IonToPropertyKeyIC* ic, HandleValue val,
883 MutableHandleValue res);
884 callVM<Fn, IonToPropertyKeyIC::update>(lir);
886 StoreValueTo(toPropertyKeyIC->output()).generate(this);
887 restoreLiveIgnore(lir,
888 StoreValueTo(toPropertyKeyIC->output()).clobbered());
890 masm.jump(ool->rejoin());
891 return;
893 case CacheKind::BinaryArith: {
894 IonBinaryArithIC* binaryArithIC = ic->asBinaryArithIC();
896 saveLive(lir);
898 pushArg(binaryArithIC->rhs());
899 pushArg(binaryArithIC->lhs());
900 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
901 pushArg(ImmGCPtr(gen->outerInfo().script()));
903 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
904 IonBinaryArithIC* stub, HandleValue lhs,
905 HandleValue rhs, MutableHandleValue res);
906 callVM<Fn, IonBinaryArithIC::update>(lir);
908 StoreValueTo(binaryArithIC->output()).generate(this);
909 restoreLiveIgnore(lir, StoreValueTo(binaryArithIC->output()).clobbered());
911 masm.jump(ool->rejoin());
912 return;
914 case CacheKind::Compare: {
915 IonCompareIC* compareIC = ic->asCompareIC();
917 saveLive(lir);
919 pushArg(compareIC->rhs());
920 pushArg(compareIC->lhs());
921 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
922 pushArg(ImmGCPtr(gen->outerInfo().script()));
924 using Fn =
925 bool (*)(JSContext* cx, HandleScript outerScript, IonCompareIC* stub,
926 HandleValue lhs, HandleValue rhs, bool* res);
927 callVM<Fn, IonCompareIC::update>(lir);
929 StoreRegisterTo(compareIC->output()).generate(this);
930 restoreLiveIgnore(lir, StoreRegisterTo(compareIC->output()).clobbered());
932 masm.jump(ool->rejoin());
933 return;
935 case CacheKind::CloseIter: {
936 IonCloseIterIC* closeIterIC = ic->asCloseIterIC();
938 saveLive(lir);
940 pushArg(closeIterIC->iter());
941 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
942 pushArg(ImmGCPtr(gen->outerInfo().script()));
944 using Fn =
945 bool (*)(JSContext*, HandleScript, IonCloseIterIC*, HandleObject);
946 callVM<Fn, IonCloseIterIC::update>(lir);
948 restoreLive(lir);
950 masm.jump(ool->rejoin());
951 return;
953 case CacheKind::OptimizeGetIterator: {
954 auto* optimizeGetIteratorIC = ic->asOptimizeGetIteratorIC();
956 saveLive(lir);
958 pushArg(optimizeGetIteratorIC->value());
959 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
960 pushArg(ImmGCPtr(gen->outerInfo().script()));
962 using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeGetIteratorIC*,
963 HandleValue, bool* res);
964 callVM<Fn, IonOptimizeGetIteratorIC::update>(lir);
966 StoreRegisterTo(optimizeGetIteratorIC->output()).generate(this);
967 restoreLiveIgnore(
968 lir, StoreRegisterTo(optimizeGetIteratorIC->output()).clobbered());
970 masm.jump(ool->rejoin());
971 return;
973 case CacheKind::Call:
974 case CacheKind::TypeOf:
975 case CacheKind::ToBool:
976 case CacheKind::GetIntrinsic:
977 case CacheKind::NewArray:
978 case CacheKind::NewObject:
979 MOZ_CRASH("Unsupported IC");
981 MOZ_CRASH();
984 StringObject* MNewStringObject::templateObj() const {
985 return &templateObj_->as<StringObject>();
988 CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph,
989 MacroAssembler* masm)
990 : CodeGeneratorSpecific(gen, graph, masm),
991 ionScriptLabels_(gen->alloc()),
992 ionNurseryObjectLabels_(gen->alloc()),
993 scriptCounts_(nullptr),
994 zoneStubsToReadBarrier_(0) {}
996 CodeGenerator::~CodeGenerator() { js_delete(scriptCounts_); }
998 void CodeGenerator::visitValueToInt32(LValueToInt32* lir) {
999 ValueOperand operand = ToValue(lir, LValueToInt32::Input);
1000 Register output = ToRegister(lir->output());
1001 FloatRegister temp = ToFloatRegister(lir->tempFloat());
1003 Label fails;
1004 if (lir->mode() == LValueToInt32::TRUNCATE) {
1005 OutOfLineCode* oolDouble = oolTruncateDouble(temp, output, lir->mir());
1007 // We can only handle strings in truncation contexts, like bitwise
1008 // operations.
1009 Register stringReg = ToRegister(lir->temp());
1010 using Fn = bool (*)(JSContext*, JSString*, double*);
1011 auto* oolString = oolCallVM<Fn, StringToNumber>(lir, ArgList(stringReg),
1012 StoreFloatRegisterTo(temp));
1013 Label* stringEntry = oolString->entry();
1014 Label* stringRejoin = oolString->rejoin();
1016 masm.truncateValueToInt32(operand, stringEntry, stringRejoin,
1017 oolDouble->entry(), stringReg, temp, output,
1018 &fails);
1019 masm.bind(oolDouble->rejoin());
1020 } else {
1021 MOZ_ASSERT(lir->mode() == LValueToInt32::NORMAL);
1022 masm.convertValueToInt32(operand, temp, output, &fails,
1023 lir->mirNormal()->needsNegativeZeroCheck(),
1024 lir->mirNormal()->conversion());
1027 bailoutFrom(&fails, lir->snapshot());
1030 void CodeGenerator::visitValueToDouble(LValueToDouble* lir) {
1031 ValueOperand operand = ToValue(lir, LValueToDouble::InputIndex);
1032 FloatRegister output = ToFloatRegister(lir->output());
1034 // Set if we can handle other primitives beside strings, as long as they're
1035 // guaranteed to never throw. This rules out symbols and BigInts, but allows
1036 // booleans, undefined, and null.
1037 bool hasNonStringPrimitives =
1038 lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1040 Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1043 ScratchTagScope tag(masm, operand);
1044 masm.splitTagForTest(operand, tag);
1046 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1047 masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1049 if (hasNonStringPrimitives) {
1050 masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1051 masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1052 masm.branchTestNull(Assembler::Equal, tag, &isNull);
1056 bailout(lir->snapshot());
1058 if (hasNonStringPrimitives) {
1059 masm.bind(&isNull);
1060 masm.loadConstantDouble(0.0, output);
1061 masm.jump(&done);
1064 if (hasNonStringPrimitives) {
1065 masm.bind(&isUndefined);
1066 masm.loadConstantDouble(GenericNaN(), output);
1067 masm.jump(&done);
1070 if (hasNonStringPrimitives) {
1071 masm.bind(&isBool);
1072 masm.boolValueToDouble(operand, output);
1073 masm.jump(&done);
1076 masm.bind(&isInt32);
1077 masm.int32ValueToDouble(operand, output);
1078 masm.jump(&done);
1080 masm.bind(&isDouble);
1081 masm.unboxDouble(operand, output);
1082 masm.bind(&done);
1085 void CodeGenerator::visitValueToFloat32(LValueToFloat32* lir) {
1086 ValueOperand operand = ToValue(lir, LValueToFloat32::InputIndex);
1087 FloatRegister output = ToFloatRegister(lir->output());
1089 // Set if we can handle other primitives beside strings, as long as they're
1090 // guaranteed to never throw. This rules out symbols and BigInts, but allows
1091 // booleans, undefined, and null.
1092 bool hasNonStringPrimitives =
1093 lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1095 Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1098 ScratchTagScope tag(masm, operand);
1099 masm.splitTagForTest(operand, tag);
1101 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1102 masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1104 if (hasNonStringPrimitives) {
1105 masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1106 masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1107 masm.branchTestNull(Assembler::Equal, tag, &isNull);
1111 bailout(lir->snapshot());
1113 if (hasNonStringPrimitives) {
1114 masm.bind(&isNull);
1115 masm.loadConstantFloat32(0.0f, output);
1116 masm.jump(&done);
1119 if (hasNonStringPrimitives) {
1120 masm.bind(&isUndefined);
1121 masm.loadConstantFloat32(float(GenericNaN()), output);
1122 masm.jump(&done);
1125 if (hasNonStringPrimitives) {
1126 masm.bind(&isBool);
1127 masm.boolValueToFloat32(operand, output);
1128 masm.jump(&done);
1131 masm.bind(&isInt32);
1132 masm.int32ValueToFloat32(operand, output);
1133 masm.jump(&done);
1135 masm.bind(&isDouble);
1136 // ARM and MIPS may not have a double register available if we've
1137 // allocated output as a float32.
1138 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
1139 ScratchDoubleScope fpscratch(masm);
1140 masm.unboxDouble(operand, fpscratch);
1141 masm.convertDoubleToFloat32(fpscratch, output);
1142 #else
1143 masm.unboxDouble(operand, output);
1144 masm.convertDoubleToFloat32(output, output);
1145 #endif
1146 masm.bind(&done);
1149 void CodeGenerator::visitValueToBigInt(LValueToBigInt* lir) {
1150 ValueOperand operand = ToValue(lir, LValueToBigInt::InputIndex);
1151 Register output = ToRegister(lir->output());
1153 using Fn = BigInt* (*)(JSContext*, HandleValue);
1154 auto* ool =
1155 oolCallVM<Fn, ToBigInt>(lir, ArgList(operand), StoreRegisterTo(output));
1157 Register tag = masm.extractTag(operand, output);
1159 Label notBigInt, done;
1160 masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
1161 masm.unboxBigInt(operand, output);
1162 masm.jump(&done);
1163 masm.bind(&notBigInt);
1165 masm.branchTestBoolean(Assembler::Equal, tag, ool->entry());
1166 masm.branchTestString(Assembler::Equal, tag, ool->entry());
1168 // ToBigInt(object) can have side-effects; all other types throw a TypeError.
1169 bailout(lir->snapshot());
1171 masm.bind(ool->rejoin());
1172 masm.bind(&done);
1175 void CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir) {
1176 masm.convertInt32ToDouble(ToRegister(lir->input()),
1177 ToFloatRegister(lir->output()));
1180 void CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir) {
1181 masm.convertFloat32ToDouble(ToFloatRegister(lir->input()),
1182 ToFloatRegister(lir->output()));
1185 void CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir) {
1186 masm.convertDoubleToFloat32(ToFloatRegister(lir->input()),
1187 ToFloatRegister(lir->output()));
1190 void CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir) {
1191 masm.convertInt32ToFloat32(ToRegister(lir->input()),
1192 ToFloatRegister(lir->output()));
1195 void CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir) {
1196 Label fail;
1197 FloatRegister input = ToFloatRegister(lir->input());
1198 Register output = ToRegister(lir->output());
1199 masm.convertDoubleToInt32(input, output, &fail,
1200 lir->mir()->needsNegativeZeroCheck());
1201 bailoutFrom(&fail, lir->snapshot());
1204 void CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir) {
1205 Label fail;
1206 FloatRegister input = ToFloatRegister(lir->input());
1207 Register output = ToRegister(lir->output());
1208 masm.convertFloat32ToInt32(input, output, &fail,
1209 lir->mir()->needsNegativeZeroCheck());
1210 bailoutFrom(&fail, lir->snapshot());
1213 void CodeGenerator::visitInt32ToIntPtr(LInt32ToIntPtr* lir) {
1214 #ifdef JS_64BIT
1215 // This LIR instruction is only used if the input can be negative.
1216 MOZ_ASSERT(lir->mir()->canBeNegative());
1218 Register output = ToRegister(lir->output());
1219 const LAllocation* input = lir->input();
1220 if (input->isRegister()) {
1221 masm.move32SignExtendToPtr(ToRegister(input), output);
1222 } else {
1223 masm.load32SignExtendToPtr(ToAddress(input), output);
1225 #else
1226 MOZ_CRASH("Not used on 32-bit platforms");
1227 #endif
1230 void CodeGenerator::visitNonNegativeIntPtrToInt32(
1231 LNonNegativeIntPtrToInt32* lir) {
1232 #ifdef JS_64BIT
1233 Register output = ToRegister(lir->output());
1234 MOZ_ASSERT(ToRegister(lir->input()) == output);
1236 Label bail;
1237 masm.guardNonNegativeIntPtrToInt32(output, &bail);
1238 bailoutFrom(&bail, lir->snapshot());
1239 #else
1240 MOZ_CRASH("Not used on 32-bit platforms");
1241 #endif
1244 void CodeGenerator::visitIntPtrToDouble(LIntPtrToDouble* lir) {
1245 Register input = ToRegister(lir->input());
1246 FloatRegister output = ToFloatRegister(lir->output());
1247 masm.convertIntPtrToDouble(input, output);
1250 void CodeGenerator::visitAdjustDataViewLength(LAdjustDataViewLength* lir) {
1251 Register output = ToRegister(lir->output());
1252 MOZ_ASSERT(ToRegister(lir->input()) == output);
1254 uint32_t byteSize = lir->mir()->byteSize();
1256 #ifdef DEBUG
1257 Label ok;
1258 masm.branchTestPtr(Assembler::NotSigned, output, output, &ok);
1259 masm.assumeUnreachable("Unexpected negative value in LAdjustDataViewLength");
1260 masm.bind(&ok);
1261 #endif
1263 Label bail;
1264 masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), output, &bail);
1265 bailoutFrom(&bail, lir->snapshot());
1268 void CodeGenerator::emitOOLTestObject(Register objreg,
1269 Label* ifEmulatesUndefined,
1270 Label* ifDoesntEmulateUndefined,
1271 Register scratch) {
1272 saveVolatile(scratch);
1273 #if defined(DEBUG) || defined(FUZZING)
1274 masm.loadPtr(AbsoluteAddress(
1275 gen->runtime->addressOfHasSeenObjectEmulateUndefinedFuse()),
1276 scratch);
1277 using Fn = bool (*)(JSObject* obj, size_t fuseValue);
1278 masm.setupAlignedABICall();
1279 masm.passABIArg(objreg);
1280 masm.passABIArg(scratch);
1281 masm.callWithABI<Fn, js::EmulatesUndefinedCheckFuse>();
1282 #else
1283 using Fn = bool (*)(JSObject* obj);
1284 masm.setupAlignedABICall();
1285 masm.passABIArg(objreg);
1286 masm.callWithABI<Fn, js::EmulatesUndefined>();
1287 #endif
1288 masm.storeCallPointerResult(scratch);
1289 restoreVolatile(scratch);
1291 masm.branchIfTrueBool(scratch, ifEmulatesUndefined);
1292 masm.jump(ifDoesntEmulateUndefined);
1295 // Base out-of-line code generator for all tests of the truthiness of an
1296 // object, where the object might not be truthy. (Recall that per spec all
1297 // objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class
1298 // flag to permit objects to look like |undefined| in certain contexts,
1299 // including in object truthiness testing.) We check truthiness inline except
1300 // when we're testing it on a proxy, in which case out-of-line code will call
1301 // EmulatesUndefined for a conclusive answer.
1302 class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator> {
1303 Register objreg_;
1304 Register scratch_;
1306 Label* ifEmulatesUndefined_;
1307 Label* ifDoesntEmulateUndefined_;
1309 #ifdef DEBUG
1310 bool initialized() { return ifEmulatesUndefined_ != nullptr; }
1311 #endif
1313 public:
1314 OutOfLineTestObject()
1315 : ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr) {}
1317 void accept(CodeGenerator* codegen) final {
1318 MOZ_ASSERT(initialized());
1319 codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_,
1320 ifDoesntEmulateUndefined_, scratch_);
1323 // Specify the register where the object to be tested is found, labels to
1324 // jump to if the object is truthy or falsy, and a scratch register for
1325 // use in the out-of-line path.
1326 void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined,
1327 Label* ifDoesntEmulateUndefined, Register scratch) {
1328 MOZ_ASSERT(!initialized());
1329 MOZ_ASSERT(ifEmulatesUndefined);
1330 objreg_ = objreg;
1331 scratch_ = scratch;
1332 ifEmulatesUndefined_ = ifEmulatesUndefined;
1333 ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined;
1337 // A subclass of OutOfLineTestObject containing two extra labels, for use when
1338 // the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line
1339 // code. The user should bind these labels in inline code, and specify them as
1340 // targets via setInputAndTargets, as appropriate.
1341 class OutOfLineTestObjectWithLabels : public OutOfLineTestObject {
1342 Label label1_;
1343 Label label2_;
1345 public:
1346 OutOfLineTestObjectWithLabels() = default;
1348 Label* label1() { return &label1_; }
1349 Label* label2() { return &label2_; }
1352 void CodeGenerator::testObjectEmulatesUndefinedKernel(
1353 Register objreg, Label* ifEmulatesUndefined,
1354 Label* ifDoesntEmulateUndefined, Register scratch,
1355 OutOfLineTestObject* ool) {
1356 ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
1357 scratch);
1359 // Perform a fast-path check of the object's class flags if the object's
1360 // not a proxy. Let out-of-line code handle the slow cases that require
1361 // saving registers, making a function call, and restoring registers.
1362 masm.branchIfObjectEmulatesUndefined(objreg, scratch, ool->entry(),
1363 ifEmulatesUndefined);
1366 void CodeGenerator::branchTestObjectEmulatesUndefined(
1367 Register objreg, Label* ifEmulatesUndefined,
1368 Label* ifDoesntEmulateUndefined, Register scratch,
1369 OutOfLineTestObject* ool) {
1370 MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(),
1371 "ifDoesntEmulateUndefined will be bound to the fallthrough path");
1373 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1374 ifDoesntEmulateUndefined, scratch, ool);
1375 masm.bind(ifDoesntEmulateUndefined);
1378 void CodeGenerator::testObjectEmulatesUndefined(Register objreg,
1379 Label* ifEmulatesUndefined,
1380 Label* ifDoesntEmulateUndefined,
1381 Register scratch,
1382 OutOfLineTestObject* ool) {
1383 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1384 ifDoesntEmulateUndefined, scratch, ool);
1385 masm.jump(ifDoesntEmulateUndefined);
1388 void CodeGenerator::testValueTruthyForType(
1389 JSValueType type, ScratchTagScope& tag, const ValueOperand& value,
1390 Register tempToUnbox, Register temp, FloatRegister floatTemp,
1391 Label* ifTruthy, Label* ifFalsy, OutOfLineTestObject* ool,
1392 bool skipTypeTest) {
1393 #ifdef DEBUG
1394 if (skipTypeTest) {
1395 Label expected;
1396 masm.branchTestType(Assembler::Equal, tag, type, &expected);
1397 masm.assumeUnreachable("Unexpected Value type in testValueTruthyForType");
1398 masm.bind(&expected);
1400 #endif
1402 // Handle irregular types first.
1403 switch (type) {
1404 case JSVAL_TYPE_UNDEFINED:
1405 case JSVAL_TYPE_NULL:
1406 // Undefined and null are falsy.
1407 if (!skipTypeTest) {
1408 masm.branchTestType(Assembler::Equal, tag, type, ifFalsy);
1409 } else {
1410 masm.jump(ifFalsy);
1412 return;
1413 case JSVAL_TYPE_SYMBOL:
1414 // Symbols are truthy.
1415 if (!skipTypeTest) {
1416 masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy);
1417 } else {
1418 masm.jump(ifTruthy);
1420 return;
1421 case JSVAL_TYPE_OBJECT: {
1422 Label notObject;
1423 if (!skipTypeTest) {
1424 masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
1426 ScratchTagScopeRelease _(&tag);
1427 Register objreg = masm.extractObject(value, tempToUnbox);
1428 testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, temp, ool);
1429 masm.bind(&notObject);
1430 return;
1432 default:
1433 break;
1436 // Check the type of the value (unless this is the last possible type).
1437 Label differentType;
1438 if (!skipTypeTest) {
1439 masm.branchTestType(Assembler::NotEqual, tag, type, &differentType);
1442 // Branch if the value is falsy.
1443 ScratchTagScopeRelease _(&tag);
1444 switch (type) {
1445 case JSVAL_TYPE_BOOLEAN: {
1446 masm.branchTestBooleanTruthy(false, value, ifFalsy);
1447 break;
1449 case JSVAL_TYPE_INT32: {
1450 masm.branchTestInt32Truthy(false, value, ifFalsy);
1451 break;
1453 case JSVAL_TYPE_STRING: {
1454 masm.branchTestStringTruthy(false, value, ifFalsy);
1455 break;
1457 case JSVAL_TYPE_BIGINT: {
1458 masm.branchTestBigIntTruthy(false, value, ifFalsy);
1459 break;
1461 case JSVAL_TYPE_DOUBLE: {
1462 masm.unboxDouble(value, floatTemp);
1463 masm.branchTestDoubleTruthy(false, floatTemp, ifFalsy);
1464 break;
1466 default:
1467 MOZ_CRASH("Unexpected value type");
1470 // If we reach this point, the value is truthy. We fall through for
1471 // truthy on the last test; otherwise, branch.
1472 if (!skipTypeTest) {
1473 masm.jump(ifTruthy);
1476 masm.bind(&differentType);
1479 void CodeGenerator::testValueTruthy(const ValueOperand& value,
1480 Register tempToUnbox, Register temp,
1481 FloatRegister floatTemp,
1482 const TypeDataList& observedTypes,
1483 Label* ifTruthy, Label* ifFalsy,
1484 OutOfLineTestObject* ool) {
1485 ScratchTagScope tag(masm, value);
1486 masm.splitTagForTest(value, tag);
1488 const std::initializer_list<JSValueType> defaultOrder = {
1489 JSVAL_TYPE_UNDEFINED, JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN,
1490 JSVAL_TYPE_INT32, JSVAL_TYPE_OBJECT, JSVAL_TYPE_STRING,
1491 JSVAL_TYPE_DOUBLE, JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
1493 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
1495 // Generate tests for previously observed types first.
1496 // The TypeDataList is sorted by descending frequency.
1497 for (auto& observed : observedTypes) {
1498 JSValueType type = observed.type();
1499 remaining -= type;
1501 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1502 ifTruthy, ifFalsy, ool, /*skipTypeTest*/ false);
1505 // Generate tests for remaining types.
1506 for (auto type : defaultOrder) {
1507 if (!remaining.contains(type)) {
1508 continue;
1510 remaining -= type;
1512 // We don't need a type test for the last possible type.
1513 bool skipTypeTest = remaining.isEmpty();
1514 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1515 ifTruthy, ifFalsy, ool, skipTypeTest);
1517 MOZ_ASSERT(remaining.isEmpty());
1519 // We fall through if the final test is truthy.
1522 void CodeGenerator::visitTestBIAndBranch(LTestBIAndBranch* lir) {
1523 Label* ifTrueLabel = getJumpLabelForBranch(lir->ifTrue());
1524 Label* ifFalseLabel = getJumpLabelForBranch(lir->ifFalse());
1525 Register input = ToRegister(lir->input());
1527 if (isNextBlock(lir->ifFalse()->lir())) {
1528 masm.branchIfBigIntIsNonZero(input, ifTrueLabel);
1529 } else if (isNextBlock(lir->ifTrue()->lir())) {
1530 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1531 } else {
1532 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1533 jumpToBlock(lir->ifTrue());
1537 void CodeGenerator::assertObjectDoesNotEmulateUndefined(
1538 Register input, Register temp, const MInstruction* mir) {
1539 #if defined(DEBUG) || defined(FUZZING)
1540 // Validate that the object indeed doesn't have the emulates undefined flag.
1541 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
1542 addOutOfLineCode(ool, mir);
1544 Label* doesNotEmulateUndefined = ool->label1();
1545 Label* emulatesUndefined = ool->label2();
1547 testObjectEmulatesUndefined(input, emulatesUndefined, doesNotEmulateUndefined,
1548 temp, ool);
1549 masm.bind(emulatesUndefined);
1550 masm.assumeUnreachable(
1551 "Found an object emulating undefined while the fuse is intact");
1552 masm.bind(doesNotEmulateUndefined);
1553 #endif
1556 void CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir) {
1557 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1558 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1559 Register input = ToRegister(lir->input());
1561 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
1562 if (intact) {
1563 assertObjectDoesNotEmulateUndefined(input, ToRegister(lir->temp()),
1564 lir->mir());
1565 // Bug 1874905: It would be fantastic if this could be optimized out
1566 masm.jump(truthy);
1567 } else {
1568 auto* ool = new (alloc()) OutOfLineTestObject();
1569 addOutOfLineCode(ool, lir->mir());
1571 testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()),
1572 ool);
1576 void CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir) {
1577 auto* ool = new (alloc()) OutOfLineTestObject();
1578 addOutOfLineCode(ool, lir->mir());
1580 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1581 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1583 ValueOperand input = ToValue(lir, LTestVAndBranch::Input);
1584 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
1585 Register temp = ToRegister(lir->temp2());
1586 FloatRegister floatTemp = ToFloatRegister(lir->tempFloat());
1587 const TypeDataList& observedTypes = lir->mir()->observedTypes();
1589 testValueTruthy(input, tempToUnbox, temp, floatTemp, observedTypes, truthy,
1590 falsy, ool);
1591 masm.jump(truthy);
1594 void CodeGenerator::visitBooleanToString(LBooleanToString* lir) {
1595 Register input = ToRegister(lir->input());
1596 Register output = ToRegister(lir->output());
1597 const JSAtomState& names = gen->runtime->names();
1598 Label true_, done;
1600 masm.branchTest32(Assembler::NonZero, input, input, &true_);
1601 masm.movePtr(ImmGCPtr(names.false_), output);
1602 masm.jump(&done);
1604 masm.bind(&true_);
1605 masm.movePtr(ImmGCPtr(names.true_), output);
1607 masm.bind(&done);
1610 void CodeGenerator::visitIntToString(LIntToString* lir) {
1611 Register input = ToRegister(lir->input());
1612 Register output = ToRegister(lir->output());
1614 using Fn = JSLinearString* (*)(JSContext*, int);
1615 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
1616 lir, ArgList(input), StoreRegisterTo(output));
1618 masm.lookupStaticIntString(input, output, gen->runtime->staticStrings(),
1619 ool->entry());
1621 masm.bind(ool->rejoin());
1624 void CodeGenerator::visitDoubleToString(LDoubleToString* lir) {
1625 FloatRegister input = ToFloatRegister(lir->input());
1626 Register temp = ToRegister(lir->temp0());
1627 Register output = ToRegister(lir->output());
1629 using Fn = JSString* (*)(JSContext*, double);
1630 OutOfLineCode* ool = oolCallVM<Fn, NumberToString<CanGC>>(
1631 lir, ArgList(input), StoreRegisterTo(output));
1633 // Try double to integer conversion and run integer to string code.
1634 masm.convertDoubleToInt32(input, temp, ool->entry(), false);
1635 masm.lookupStaticIntString(temp, output, gen->runtime->staticStrings(),
1636 ool->entry());
1638 masm.bind(ool->rejoin());
1641 void CodeGenerator::visitValueToString(LValueToString* lir) {
1642 ValueOperand input = ToValue(lir, LValueToString::InputIndex);
1643 Register output = ToRegister(lir->output());
1645 using Fn = JSString* (*)(JSContext*, HandleValue);
1646 OutOfLineCode* ool = oolCallVM<Fn, ToStringSlow<CanGC>>(
1647 lir, ArgList(input), StoreRegisterTo(output));
1649 Label done;
1650 Register tag = masm.extractTag(input, output);
1651 const JSAtomState& names = gen->runtime->names();
1653 // String
1655 Label notString;
1656 masm.branchTestString(Assembler::NotEqual, tag, &notString);
1657 masm.unboxString(input, output);
1658 masm.jump(&done);
1659 masm.bind(&notString);
1662 // Integer
1664 Label notInteger;
1665 masm.branchTestInt32(Assembler::NotEqual, tag, &notInteger);
1666 Register unboxed = ToTempUnboxRegister(lir->temp0());
1667 unboxed = masm.extractInt32(input, unboxed);
1668 masm.lookupStaticIntString(unboxed, output, gen->runtime->staticStrings(),
1669 ool->entry());
1670 masm.jump(&done);
1671 masm.bind(&notInteger);
1674 // Double
1676 // Note: no fastpath. Need two extra registers and can only convert doubles
1677 // that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT.
1678 masm.branchTestDouble(Assembler::Equal, tag, ool->entry());
1681 // Undefined
1683 Label notUndefined;
1684 masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
1685 masm.movePtr(ImmGCPtr(names.undefined), output);
1686 masm.jump(&done);
1687 masm.bind(&notUndefined);
1690 // Null
1692 Label notNull;
1693 masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
1694 masm.movePtr(ImmGCPtr(names.null), output);
1695 masm.jump(&done);
1696 masm.bind(&notNull);
1699 // Boolean
1701 Label notBoolean, true_;
1702 masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
1703 masm.branchTestBooleanTruthy(true, input, &true_);
1704 masm.movePtr(ImmGCPtr(names.false_), output);
1705 masm.jump(&done);
1706 masm.bind(&true_);
1707 masm.movePtr(ImmGCPtr(names.true_), output);
1708 masm.jump(&done);
1709 masm.bind(&notBoolean);
1712 // Objects/symbols are only possible when |mir->mightHaveSideEffects()|.
1713 if (lir->mir()->mightHaveSideEffects()) {
1714 // Object
1715 if (lir->mir()->supportSideEffects()) {
1716 masm.branchTestObject(Assembler::Equal, tag, ool->entry());
1717 } else {
1718 // Bail.
1719 MOZ_ASSERT(lir->mir()->needsSnapshot());
1720 Label bail;
1721 masm.branchTestObject(Assembler::Equal, tag, &bail);
1722 bailoutFrom(&bail, lir->snapshot());
1725 // Symbol
1726 if (lir->mir()->supportSideEffects()) {
1727 masm.branchTestSymbol(Assembler::Equal, tag, ool->entry());
1728 } else {
1729 // Bail.
1730 MOZ_ASSERT(lir->mir()->needsSnapshot());
1731 Label bail;
1732 masm.branchTestSymbol(Assembler::Equal, tag, &bail);
1733 bailoutFrom(&bail, lir->snapshot());
1737 // BigInt
1739 // No fastpath currently implemented.
1740 masm.branchTestBigInt(Assembler::Equal, tag, ool->entry());
1743 masm.assumeUnreachable("Unexpected type for LValueToString.");
1745 masm.bind(&done);
1746 masm.bind(ool->rejoin());
1749 using StoreBufferMutationFn = void (*)(js::gc::StoreBuffer*, js::gc::Cell**);
1751 static void EmitStoreBufferMutation(MacroAssembler& masm, Register holder,
1752 size_t offset, Register buffer,
1753 LiveGeneralRegisterSet& liveVolatiles,
1754 StoreBufferMutationFn fun) {
1755 Label callVM;
1756 Label exit;
1758 // Call into the VM to barrier the write. The only registers that need to
1759 // be preserved are those in liveVolatiles, so once they are saved on the
1760 // stack all volatile registers are available for use.
1761 masm.bind(&callVM);
1762 masm.PushRegsInMask(liveVolatiles);
1764 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
1765 regs.takeUnchecked(buffer);
1766 regs.takeUnchecked(holder);
1767 Register addrReg = regs.takeAny();
1769 masm.computeEffectiveAddress(Address(holder, offset), addrReg);
1771 bool needExtraReg = !regs.hasAny<GeneralRegisterSet::DefaultType>();
1772 if (needExtraReg) {
1773 masm.push(holder);
1774 masm.setupUnalignedABICall(holder);
1775 } else {
1776 masm.setupUnalignedABICall(regs.takeAny());
1778 masm.passABIArg(buffer);
1779 masm.passABIArg(addrReg);
1780 masm.callWithABI(DynamicFunction<StoreBufferMutationFn>(fun),
1781 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
1783 if (needExtraReg) {
1784 masm.pop(holder);
1786 masm.PopRegsInMask(liveVolatiles);
1787 masm.bind(&exit);
1790 // Warning: this function modifies prev and next.
1791 static void EmitPostWriteBarrierS(MacroAssembler& masm, Register holder,
1792 size_t offset, Register prev, Register next,
1793 LiveGeneralRegisterSet& liveVolatiles) {
1794 Label exit;
1795 Label checkRemove, putCell;
1797 // if (next && (buffer = next->storeBuffer()))
1798 // but we never pass in nullptr for next.
1799 Register storebuffer = next;
1800 masm.loadStoreBuffer(next, storebuffer);
1801 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &checkRemove);
1803 // if (prev && prev->storeBuffer())
1804 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &putCell);
1805 masm.loadStoreBuffer(prev, prev);
1806 masm.branchPtr(Assembler::NotEqual, prev, ImmWord(0), &exit);
1808 // buffer->putCell(cellp)
1809 masm.bind(&putCell);
1810 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1811 JSString::addCellAddressToStoreBuffer);
1812 masm.jump(&exit);
1814 // if (prev && (buffer = prev->storeBuffer()))
1815 masm.bind(&checkRemove);
1816 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &exit);
1817 masm.loadStoreBuffer(prev, storebuffer);
1818 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &exit);
1819 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1820 JSString::removeCellAddressFromStoreBuffer);
1822 masm.bind(&exit);
1825 void CodeGenerator::visitRegExp(LRegExp* lir) {
1826 Register output = ToRegister(lir->output());
1827 Register temp = ToRegister(lir->temp0());
1828 JSObject* source = lir->mir()->source();
1830 using Fn = JSObject* (*)(JSContext*, Handle<RegExpObject*>);
1831 OutOfLineCode* ool = oolCallVM<Fn, CloneRegExpObject>(
1832 lir, ArgList(ImmGCPtr(source)), StoreRegisterTo(output));
1833 if (lir->mir()->hasShared()) {
1834 TemplateObject templateObject(source);
1835 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
1836 ool->entry());
1837 } else {
1838 masm.jump(ool->entry());
1840 masm.bind(ool->rejoin());
1843 static constexpr int32_t RegExpPairsVectorStartOffset(
1844 int32_t inputOutputDataStartOffset) {
1845 return inputOutputDataStartOffset + int32_t(InputOutputDataSize) +
1846 int32_t(sizeof(MatchPairs));
1849 static Address RegExpPairCountAddress(MacroAssembler& masm,
1850 int32_t inputOutputDataStartOffset) {
1851 return Address(FramePointer, inputOutputDataStartOffset +
1852 int32_t(InputOutputDataSize) +
1853 MatchPairs::offsetOfPairCount());
1856 static void UpdateRegExpStatics(MacroAssembler& masm, Register regexp,
1857 Register input, Register lastIndex,
1858 Register staticsReg, Register temp1,
1859 Register temp2, gc::Heap initialStringHeap,
1860 LiveGeneralRegisterSet& volatileRegs) {
1861 Address pendingInputAddress(staticsReg,
1862 RegExpStatics::offsetOfPendingInput());
1863 Address matchesInputAddress(staticsReg,
1864 RegExpStatics::offsetOfMatchesInput());
1865 Address lazySourceAddress(staticsReg, RegExpStatics::offsetOfLazySource());
1866 Address lazyIndexAddress(staticsReg, RegExpStatics::offsetOfLazyIndex());
1868 masm.guardedCallPreBarrier(pendingInputAddress, MIRType::String);
1869 masm.guardedCallPreBarrier(matchesInputAddress, MIRType::String);
1870 masm.guardedCallPreBarrier(lazySourceAddress, MIRType::String);
1872 if (initialStringHeap == gc::Heap::Default) {
1873 // Writing into RegExpStatics tenured memory; must post-barrier.
1874 if (staticsReg.volatile_()) {
1875 volatileRegs.add(staticsReg);
1878 masm.loadPtr(pendingInputAddress, temp1);
1879 masm.storePtr(input, pendingInputAddress);
1880 masm.movePtr(input, temp2);
1881 EmitPostWriteBarrierS(masm, staticsReg,
1882 RegExpStatics::offsetOfPendingInput(),
1883 temp1 /* prev */, temp2 /* next */, volatileRegs);
1885 masm.loadPtr(matchesInputAddress, temp1);
1886 masm.storePtr(input, matchesInputAddress);
1887 masm.movePtr(input, temp2);
1888 EmitPostWriteBarrierS(masm, staticsReg,
1889 RegExpStatics::offsetOfMatchesInput(),
1890 temp1 /* prev */, temp2 /* next */, volatileRegs);
1891 } else {
1892 masm.debugAssertGCThingIsTenured(input, temp1);
1893 masm.storePtr(input, pendingInputAddress);
1894 masm.storePtr(input, matchesInputAddress);
1897 masm.storePtr(lastIndex,
1898 Address(staticsReg, RegExpStatics::offsetOfLazyIndex()));
1899 masm.store32(
1900 Imm32(1),
1901 Address(staticsReg, RegExpStatics::offsetOfPendingLazyEvaluation()));
1903 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
1904 RegExpObject::SHARED_SLOT)),
1905 temp1, JSVAL_TYPE_PRIVATE_GCTHING);
1906 masm.loadPtr(Address(temp1, RegExpShared::offsetOfSource()), temp2);
1907 masm.storePtr(temp2, lazySourceAddress);
1908 static_assert(sizeof(JS::RegExpFlags) == 1, "load size must match flag size");
1909 masm.load8ZeroExtend(Address(temp1, RegExpShared::offsetOfFlags()), temp2);
1910 masm.store8(temp2, Address(staticsReg, RegExpStatics::offsetOfLazyFlags()));
1913 // Prepare an InputOutputData and optional MatchPairs which space has been
1914 // allocated for on the stack, and try to execute a RegExp on a string input.
1915 // If the RegExp was successfully executed and matched the input, fallthrough.
1916 // Otherwise, jump to notFound or failure.
1918 // inputOutputDataStartOffset is the offset relative to the frame pointer
1919 // register. This offset is negative for the RegExpExecTest stub.
1920 static bool PrepareAndExecuteRegExp(MacroAssembler& masm, Register regexp,
1921 Register input, Register lastIndex,
1922 Register temp1, Register temp2,
1923 Register temp3,
1924 int32_t inputOutputDataStartOffset,
1925 gc::Heap initialStringHeap, Label* notFound,
1926 Label* failure) {
1927 JitSpew(JitSpew_Codegen, "# Emitting PrepareAndExecuteRegExp");
1929 using irregexp::InputOutputData;
1932 * [SMDOC] Stack layout for PrepareAndExecuteRegExp
1934 * Before this function is called, the caller is responsible for
1935 * allocating enough stack space for the following data:
1937 * inputOutputDataStartOffset +-----> +---------------+
1938 * |InputOutputData|
1939 * inputStartAddress +----------> inputStart|
1940 * inputEndAddress +----------> inputEnd|
1941 * startIndexAddress +----------> startIndex|
1942 * matchesAddress +----------> matches|-----+
1943 * +---------------+ |
1944 * matchPairs(Address|Offset) +-----> +---------------+ <--+
1945 * | MatchPairs |
1946 * pairCountAddress +----------> count |
1947 * pairsPointerAddress +----------> pairs |-----+
1948 * +---------------+ |
1949 * pairsArray(Address|Offset) +-----> +---------------+ <--+
1950 * | MatchPair |
1951 * firstMatchStartAddress +----------> start | <--+
1952 * | limit | |
1953 * +---------------+ |
1954 * . |
1955 * . Reserved space for
1956 * . RegExpObject::MaxPairCount
1957 * . MatchPair objects
1958 * . |
1959 * +---------------+ |
1960 * | MatchPair | |
1961 * | start | |
1962 * | limit | <--+
1963 * +---------------+
1966 int32_t ioOffset = inputOutputDataStartOffset;
1967 int32_t matchPairsOffset = ioOffset + int32_t(sizeof(InputOutputData));
1968 int32_t pairsArrayOffset = matchPairsOffset + int32_t(sizeof(MatchPairs));
1970 Address inputStartAddress(FramePointer,
1971 ioOffset + InputOutputData::offsetOfInputStart());
1972 Address inputEndAddress(FramePointer,
1973 ioOffset + InputOutputData::offsetOfInputEnd());
1974 Address startIndexAddress(FramePointer,
1975 ioOffset + InputOutputData::offsetOfStartIndex());
1976 Address matchesAddress(FramePointer,
1977 ioOffset + InputOutputData::offsetOfMatches());
1979 Address matchPairsAddress(FramePointer, matchPairsOffset);
1980 Address pairCountAddress(FramePointer,
1981 matchPairsOffset + MatchPairs::offsetOfPairCount());
1982 Address pairsPointerAddress(FramePointer,
1983 matchPairsOffset + MatchPairs::offsetOfPairs());
1985 Address pairsArrayAddress(FramePointer, pairsArrayOffset);
1986 Address firstMatchStartAddress(FramePointer,
1987 pairsArrayOffset + MatchPair::offsetOfStart());
1989 // First, fill in a skeletal MatchPairs instance on the stack. This will be
1990 // passed to the OOL stub in the caller if we aren't able to execute the
1991 // RegExp inline, and that stub needs to be able to determine whether the
1992 // execution finished successfully.
1994 // Initialize MatchPairs::pairCount to 1. The correct value can only
1995 // be determined after loading the RegExpShared. If the RegExpShared
1996 // has Kind::Atom, this is the correct pairCount.
1997 masm.store32(Imm32(1), pairCountAddress);
1999 // Initialize MatchPairs::pairs pointer
2000 masm.computeEffectiveAddress(pairsArrayAddress, temp1);
2001 masm.storePtr(temp1, pairsPointerAddress);
2003 // Initialize MatchPairs::pairs[0]::start to MatchPair::NoMatch
2004 masm.store32(Imm32(MatchPair::NoMatch), firstMatchStartAddress);
2006 // Determine the set of volatile inputs to save when calling into C++ or
2007 // regexp code.
2008 LiveGeneralRegisterSet volatileRegs;
2009 if (lastIndex.volatile_()) {
2010 volatileRegs.add(lastIndex);
2012 if (input.volatile_()) {
2013 volatileRegs.add(input);
2015 if (regexp.volatile_()) {
2016 volatileRegs.add(regexp);
2019 // Ensure the input string is not a rope.
2020 Label isLinear;
2021 masm.branchIfNotRope(input, &isLinear);
2023 masm.PushRegsInMask(volatileRegs);
2025 using Fn = JSLinearString* (*)(JSString*);
2026 masm.setupUnalignedABICall(temp1);
2027 masm.passABIArg(input);
2028 masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
2030 MOZ_ASSERT(!volatileRegs.has(temp1));
2031 masm.storeCallPointerResult(temp1);
2032 masm.PopRegsInMask(volatileRegs);
2034 masm.branchTestPtr(Assembler::Zero, temp1, temp1, failure);
2036 masm.bind(&isLinear);
2038 // Load the RegExpShared.
2039 Register regexpReg = temp1;
2040 Address sharedSlot = Address(
2041 regexp, NativeObject::getFixedSlotOffset(RegExpObject::SHARED_SLOT));
2042 masm.branchTestUndefined(Assembler::Equal, sharedSlot, failure);
2043 masm.unboxNonDouble(sharedSlot, regexpReg, JSVAL_TYPE_PRIVATE_GCTHING);
2045 // Handle Atom matches
2046 Label notAtom, checkSuccess;
2047 masm.branchPtr(Assembler::Equal,
2048 Address(regexpReg, RegExpShared::offsetOfPatternAtom()),
2049 ImmWord(0), &notAtom);
2051 masm.computeEffectiveAddress(matchPairsAddress, temp3);
2053 masm.PushRegsInMask(volatileRegs);
2054 using Fn = RegExpRunStatus (*)(RegExpShared* re, JSLinearString* input,
2055 size_t start, MatchPairs* matchPairs);
2056 masm.setupUnalignedABICall(temp2);
2057 masm.passABIArg(regexpReg);
2058 masm.passABIArg(input);
2059 masm.passABIArg(lastIndex);
2060 masm.passABIArg(temp3);
2061 masm.callWithABI<Fn, js::ExecuteRegExpAtomRaw>();
2063 MOZ_ASSERT(!volatileRegs.has(temp1));
2064 masm.storeCallInt32Result(temp1);
2065 masm.PopRegsInMask(volatileRegs);
2067 masm.jump(&checkSuccess);
2069 masm.bind(&notAtom);
2071 // Don't handle regexps with too many capture pairs.
2072 masm.load32(Address(regexpReg, RegExpShared::offsetOfPairCount()), temp2);
2073 masm.branch32(Assembler::Above, temp2, Imm32(RegExpObject::MaxPairCount),
2074 failure);
2076 // Fill in the pair count in the MatchPairs on the stack.
2077 masm.store32(temp2, pairCountAddress);
2079 // Load code pointer and length of input (in bytes).
2080 // Store the input start in the InputOutputData.
2081 Register codePointer = temp1; // Note: temp1 was previously regexpReg.
2082 Register byteLength = temp3;
2084 Label isLatin1, done;
2085 masm.loadStringLength(input, byteLength);
2087 masm.branchLatin1String(input, &isLatin1);
2089 // Two-byte input
2090 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
2091 masm.storePtr(temp2, inputStartAddress);
2092 masm.loadPtr(
2093 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/false)),
2094 codePointer);
2095 masm.lshiftPtr(Imm32(1), byteLength);
2096 masm.jump(&done);
2098 // Latin1 input
2099 masm.bind(&isLatin1);
2100 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
2101 masm.storePtr(temp2, inputStartAddress);
2102 masm.loadPtr(
2103 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/true)),
2104 codePointer);
2106 masm.bind(&done);
2108 // Store end pointer
2109 masm.addPtr(byteLength, temp2);
2110 masm.storePtr(temp2, inputEndAddress);
2113 // Guard that the RegExpShared has been compiled for this type of input.
2114 // If it has not been compiled, we fall back to the OOL case, which will
2115 // do a VM call into the interpreter.
2116 // TODO: add an interpreter trampoline?
2117 masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure);
2118 masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer);
2120 // Finish filling in the InputOutputData instance on the stack
2121 masm.computeEffectiveAddress(matchPairsAddress, temp2);
2122 masm.storePtr(temp2, matchesAddress);
2123 masm.storePtr(lastIndex, startIndexAddress);
2125 // Execute the RegExp.
2126 masm.computeEffectiveAddress(
2127 Address(FramePointer, inputOutputDataStartOffset), temp2);
2128 masm.PushRegsInMask(volatileRegs);
2129 masm.setupUnalignedABICall(temp3);
2130 masm.passABIArg(temp2);
2131 masm.callWithABI(codePointer);
2132 masm.storeCallInt32Result(temp1);
2133 masm.PopRegsInMask(volatileRegs);
2135 masm.bind(&checkSuccess);
2136 masm.branch32(Assembler::Equal, temp1,
2137 Imm32(int32_t(RegExpRunStatus::Success_NotFound)), notFound);
2138 masm.branch32(Assembler::Equal, temp1, Imm32(int32_t(RegExpRunStatus::Error)),
2139 failure);
2141 // Lazily update the RegExpStatics.
2142 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2143 RegExpRealm::offsetOfRegExpStatics();
2144 masm.loadGlobalObjectData(temp1);
2145 masm.loadPtr(Address(temp1, offset), temp1);
2146 UpdateRegExpStatics(masm, regexp, input, lastIndex, temp1, temp2, temp3,
2147 initialStringHeap, volatileRegs);
2149 return true;
2152 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
2153 Register len, Register byteOpScratch,
2154 CharEncoding encoding,
2155 size_t maximumLength = SIZE_MAX);
2157 class CreateDependentString {
2158 CharEncoding encoding_;
2159 Register string_;
2160 Register temp1_;
2161 Register temp2_;
2162 Label* failure_;
2164 enum class FallbackKind : uint8_t {
2165 InlineString,
2166 FatInlineString,
2167 NotInlineString,
2168 Count
2170 mozilla::EnumeratedArray<FallbackKind, Label, size_t(FallbackKind::Count)>
2171 fallbacks_, joins_;
2173 public:
2174 CreateDependentString(CharEncoding encoding, Register string, Register temp1,
2175 Register temp2, Label* failure)
2176 : encoding_(encoding),
2177 string_(string),
2178 temp1_(temp1),
2179 temp2_(temp2),
2180 failure_(failure) {}
2182 Register string() const { return string_; }
2183 CharEncoding encoding() const { return encoding_; }
2185 // Generate code that creates DependentString.
2186 // Caller should call generateFallback after masm.ret(), to generate
2187 // fallback path.
2188 void generate(MacroAssembler& masm, const JSAtomState& names,
2189 CompileRuntime* runtime, Register base,
2190 BaseIndex startIndexAddress, BaseIndex limitIndexAddress,
2191 gc::Heap initialStringHeap);
2193 // Generate fallback path for creating DependentString.
2194 void generateFallback(MacroAssembler& masm);
2197 void CreateDependentString::generate(MacroAssembler& masm,
2198 const JSAtomState& names,
2199 CompileRuntime* runtime, Register base,
2200 BaseIndex startIndexAddress,
2201 BaseIndex limitIndexAddress,
2202 gc::Heap initialStringHeap) {
2203 JitSpew(JitSpew_Codegen, "# Emitting CreateDependentString (encoding=%s)",
2204 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2206 auto newGCString = [&](FallbackKind kind) {
2207 uint32_t flags = kind == FallbackKind::InlineString
2208 ? JSString::INIT_THIN_INLINE_FLAGS
2209 : kind == FallbackKind::FatInlineString
2210 ? JSString::INIT_FAT_INLINE_FLAGS
2211 : JSString::INIT_DEPENDENT_FLAGS;
2212 if (encoding_ == CharEncoding::Latin1) {
2213 flags |= JSString::LATIN1_CHARS_BIT;
2216 if (kind != FallbackKind::FatInlineString) {
2217 masm.newGCString(string_, temp2_, initialStringHeap, &fallbacks_[kind]);
2218 } else {
2219 masm.newGCFatInlineString(string_, temp2_, initialStringHeap,
2220 &fallbacks_[kind]);
2222 masm.bind(&joins_[kind]);
2223 masm.store32(Imm32(flags), Address(string_, JSString::offsetOfFlags()));
2226 // Compute the string length.
2227 masm.load32(startIndexAddress, temp2_);
2228 masm.load32(limitIndexAddress, temp1_);
2229 masm.sub32(temp2_, temp1_);
2231 Label done, nonEmpty;
2233 // Zero length matches use the empty string.
2234 masm.branchTest32(Assembler::NonZero, temp1_, temp1_, &nonEmpty);
2235 masm.movePtr(ImmGCPtr(names.empty_), string_);
2236 masm.jump(&done);
2238 masm.bind(&nonEmpty);
2240 // Complete matches use the base string.
2241 Label nonBaseStringMatch;
2242 masm.branchTest32(Assembler::NonZero, temp2_, temp2_, &nonBaseStringMatch);
2243 masm.branch32(Assembler::NotEqual, Address(base, JSString::offsetOfLength()),
2244 temp1_, &nonBaseStringMatch);
2245 masm.movePtr(base, string_);
2246 masm.jump(&done);
2248 masm.bind(&nonBaseStringMatch);
2250 Label notInline;
2252 int32_t maxInlineLength = encoding_ == CharEncoding::Latin1
2253 ? JSFatInlineString::MAX_LENGTH_LATIN1
2254 : JSFatInlineString::MAX_LENGTH_TWO_BYTE;
2255 masm.branch32(Assembler::Above, temp1_, Imm32(maxInlineLength), &notInline);
2257 // Make a thin or fat inline string.
2258 Label stringAllocated, fatInline;
2260 int32_t maxThinInlineLength = encoding_ == CharEncoding::Latin1
2261 ? JSThinInlineString::MAX_LENGTH_LATIN1
2262 : JSThinInlineString::MAX_LENGTH_TWO_BYTE;
2263 masm.branch32(Assembler::Above, temp1_, Imm32(maxThinInlineLength),
2264 &fatInline);
2265 if (encoding_ == CharEncoding::Latin1) {
2266 // One character Latin-1 strings can be loaded directly from the
2267 // static strings table.
2268 Label thinInline;
2269 masm.branch32(Assembler::Above, temp1_, Imm32(1), &thinInline);
2271 static_assert(
2272 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
2273 "Latin-1 strings can be loaded from static strings");
2275 masm.loadStringChars(base, temp1_, encoding_);
2276 masm.loadChar(temp1_, temp2_, temp1_, encoding_);
2278 masm.lookupStaticString(temp1_, string_, runtime->staticStrings());
2280 masm.jump(&done);
2282 masm.bind(&thinInline);
2285 newGCString(FallbackKind::InlineString);
2286 masm.jump(&stringAllocated);
2288 masm.bind(&fatInline);
2289 { newGCString(FallbackKind::FatInlineString); }
2290 masm.bind(&stringAllocated);
2292 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2294 masm.push(string_);
2295 masm.push(base);
2297 MOZ_ASSERT(startIndexAddress.base == FramePointer,
2298 "startIndexAddress is still valid after stack pushes");
2300 // Load chars pointer for the new string.
2301 masm.loadInlineStringCharsForStore(string_, string_);
2303 // Load the source characters pointer.
2304 masm.loadStringChars(base, temp2_, encoding_);
2305 masm.load32(startIndexAddress, base);
2306 masm.addToCharPtr(temp2_, base, encoding_);
2308 CopyStringChars(masm, string_, temp2_, temp1_, base, encoding_);
2310 masm.pop(base);
2311 masm.pop(string_);
2313 masm.jump(&done);
2316 masm.bind(&notInline);
2319 // Make a dependent string.
2320 // Warning: string may be tenured (if the fallback case is hit), so
2321 // stores into it must be post barriered.
2322 newGCString(FallbackKind::NotInlineString);
2324 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2326 masm.loadNonInlineStringChars(base, temp1_, encoding_);
2327 masm.load32(startIndexAddress, temp2_);
2328 masm.addToCharPtr(temp1_, temp2_, encoding_);
2329 masm.storeNonInlineStringChars(temp1_, string_);
2330 masm.storeDependentStringBase(base, string_);
2331 masm.movePtr(base, temp1_);
2333 // Follow any base pointer if the input is itself a dependent string.
2334 // Watch for undepended strings, which have a base pointer but don't
2335 // actually share their characters with it.
2336 Label noBase;
2337 masm.load32(Address(base, JSString::offsetOfFlags()), temp2_);
2338 masm.and32(Imm32(JSString::TYPE_FLAGS_MASK), temp2_);
2339 masm.branchTest32(Assembler::Zero, temp2_, Imm32(JSString::DEPENDENT_BIT),
2340 &noBase);
2341 masm.loadDependentStringBase(base, temp1_);
2342 masm.storeDependentStringBase(temp1_, string_);
2343 masm.bind(&noBase);
2345 // Post-barrier the base store, whether it was the direct or indirect
2346 // base (both will end up in temp1 here).
2347 masm.branchPtrInNurseryChunk(Assembler::Equal, string_, temp2_, &done);
2348 masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp1_, temp2_, &done);
2350 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2351 regsToSave.takeUnchecked(temp1_);
2352 regsToSave.takeUnchecked(temp2_);
2354 masm.PushRegsInMask(regsToSave);
2356 masm.mov(ImmPtr(runtime), temp1_);
2358 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
2359 masm.setupUnalignedABICall(temp2_);
2360 masm.passABIArg(temp1_);
2361 masm.passABIArg(string_);
2362 masm.callWithABI<Fn, PostWriteBarrier>();
2364 masm.PopRegsInMask(regsToSave);
2367 masm.bind(&done);
2370 void CreateDependentString::generateFallback(MacroAssembler& masm) {
2371 JitSpew(JitSpew_Codegen,
2372 "# Emitting CreateDependentString fallback (encoding=%s)",
2373 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2375 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2376 regsToSave.takeUnchecked(string_);
2377 regsToSave.takeUnchecked(temp2_);
2379 for (FallbackKind kind : mozilla::MakeEnumeratedRange(FallbackKind::Count)) {
2380 masm.bind(&fallbacks_[kind]);
2382 masm.PushRegsInMask(regsToSave);
2384 using Fn = void* (*)(JSContext* cx);
2385 masm.setupUnalignedABICall(string_);
2386 masm.loadJSContext(string_);
2387 masm.passABIArg(string_);
2388 if (kind == FallbackKind::FatInlineString) {
2389 masm.callWithABI<Fn, AllocateFatInlineString>();
2390 } else {
2391 masm.callWithABI<Fn, AllocateDependentString>();
2393 masm.storeCallPointerResult(string_);
2395 masm.PopRegsInMask(regsToSave);
2397 masm.branchPtr(Assembler::Equal, string_, ImmWord(0), failure_);
2399 masm.jump(&joins_[kind]);
2403 // Generate the RegExpMatcher and RegExpExecMatch stubs. These are very similar,
2404 // but RegExpExecMatch also has to load and update .lastIndex for global/sticky
2405 // regular expressions.
2406 static JitCode* GenerateRegExpMatchStubShared(JSContext* cx,
2407 gc::Heap initialStringHeap,
2408 bool isExecMatch) {
2409 if (isExecMatch) {
2410 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecMatch stub");
2411 } else {
2412 JitSpew(JitSpew_Codegen, "# Emitting RegExpMatcher stub");
2415 // |initialStringHeap| could be stale after a GC.
2416 JS::AutoCheckCannotGC nogc(cx);
2418 Register regexp = RegExpMatcherRegExpReg;
2419 Register input = RegExpMatcherStringReg;
2420 Register lastIndex = RegExpMatcherLastIndexReg;
2421 ValueOperand result = JSReturnOperand;
2423 // We are free to clobber all registers, as LRegExpMatcher is a call
2424 // instruction.
2425 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2426 regs.take(input);
2427 regs.take(regexp);
2428 regs.take(lastIndex);
2430 Register temp1 = regs.takeAny();
2431 Register temp2 = regs.takeAny();
2432 Register temp3 = regs.takeAny();
2433 Register maybeTemp4 = InvalidReg;
2434 if (!regs.empty()) {
2435 // There are not enough registers on x86.
2436 maybeTemp4 = regs.takeAny();
2438 Register maybeTemp5 = InvalidReg;
2439 if (!regs.empty()) {
2440 // There are not enough registers on x86.
2441 maybeTemp5 = regs.takeAny();
2444 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
2445 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
2447 TempAllocator temp(&cx->tempLifoAlloc());
2448 JitContext jcx(cx);
2449 StackMacroAssembler masm(cx, temp);
2450 AutoCreatedBy acb(masm, "GenerateRegExpMatchStubShared");
2452 #ifdef JS_USE_LINK_REGISTER
2453 masm.pushReturnAddress();
2454 #endif
2455 masm.push(FramePointer);
2456 masm.moveStackPtrTo(FramePointer);
2458 Label notFoundZeroLastIndex;
2459 if (isExecMatch) {
2460 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
2463 // The InputOutputData is placed above the frame pointer and return address on
2464 // the stack.
2465 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
2467 Label notFound, oolEntry;
2468 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
2469 temp3, inputOutputDataStartOffset,
2470 initialStringHeap, &notFound, &oolEntry)) {
2471 return nullptr;
2474 // If a regexp has named captures, fall back to the OOL stub, which
2475 // will end up calling CreateRegExpMatchResults.
2476 Register shared = temp2;
2477 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
2478 RegExpObject::SHARED_SLOT)),
2479 shared, JSVAL_TYPE_PRIVATE_GCTHING);
2480 masm.branchPtr(Assembler::NotEqual,
2481 Address(shared, RegExpShared::offsetOfGroupsTemplate()),
2482 ImmWord(0), &oolEntry);
2484 // Similarly, if the |hasIndices| flag is set, fall back to the OOL stub.
2485 masm.branchTest32(Assembler::NonZero,
2486 Address(shared, RegExpShared::offsetOfFlags()),
2487 Imm32(int32_t(JS::RegExpFlag::HasIndices)), &oolEntry);
2489 Address pairCountAddress =
2490 RegExpPairCountAddress(masm, inputOutputDataStartOffset);
2492 // Construct the result.
2493 Register object = temp1;
2495 // In most cases, the array will have just 1-2 elements, so we optimize for
2496 // that by emitting separate code paths for capacity 2/6/14 (= 4/8/16 slots
2497 // because two slots are used for the elements header).
2499 // Load the array length in temp2 and the shape in temp3.
2500 Label allocated;
2501 masm.load32(pairCountAddress, temp2);
2502 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2503 RegExpRealm::offsetOfNormalMatchResultShape();
2504 masm.loadGlobalObjectData(temp3);
2505 masm.loadPtr(Address(temp3, offset), temp3);
2507 auto emitAllocObject = [&](size_t elementCapacity) {
2508 gc::AllocKind kind = GuessArrayGCKind(elementCapacity);
2509 MOZ_ASSERT(CanChangeToBackgroundAllocKind(kind, &ArrayObject::class_));
2510 kind = ForegroundToBackgroundAllocKind(kind);
2512 #ifdef DEBUG
2513 // Assert all of the available slots are used for |elementCapacity|
2514 // elements.
2515 size_t usedSlots = ObjectElements::VALUES_PER_HEADER + elementCapacity;
2516 MOZ_ASSERT(usedSlots == GetGCKindSlots(kind));
2517 #endif
2519 constexpr size_t numUsedDynamicSlots =
2520 RegExpRealm::MatchResultObjectSlotSpan;
2521 constexpr size_t numDynamicSlots =
2522 RegExpRealm::MatchResultObjectNumDynamicSlots;
2523 constexpr size_t arrayLength = 1;
2524 masm.createArrayWithFixedElements(object, temp3, temp2, temp3,
2525 arrayLength, elementCapacity,
2526 numUsedDynamicSlots, numDynamicSlots,
2527 kind, gc::Heap::Default, &oolEntry);
2530 Label moreThan2;
2531 masm.branch32(Assembler::Above, temp2, Imm32(2), &moreThan2);
2532 emitAllocObject(2);
2533 masm.jump(&allocated);
2535 Label moreThan6;
2536 masm.bind(&moreThan2);
2537 masm.branch32(Assembler::Above, temp2, Imm32(6), &moreThan6);
2538 emitAllocObject(6);
2539 masm.jump(&allocated);
2541 masm.bind(&moreThan6);
2542 static_assert(RegExpObject::MaxPairCount == 14);
2543 emitAllocObject(RegExpObject::MaxPairCount);
2545 masm.bind(&allocated);
2548 // clang-format off
2550 * [SMDOC] Stack layout for the RegExpMatcher stub
2552 * +---------------+
2553 * FramePointer +-----> |Caller-FramePtr|
2554 * +---------------+
2555 * |Return-Address |
2556 * +---------------+
2557 * inputOutputDataStartOffset +-----> +---------------+
2558 * |InputOutputData|
2559 * +---------------+
2560 * +---------------+
2561 * | MatchPairs |
2562 * pairsCountAddress +-----------> count |
2563 * | pairs |
2564 * | |
2565 * +---------------+
2566 * pairsVectorStartOffset +-----> +---------------+
2567 * | MatchPair |
2568 * matchPairStart +------------> start | <-------+
2569 * matchPairLimit +------------> limit | | Reserved space for
2570 * +---------------+ | `RegExpObject::MaxPairCount`
2571 * . | MatchPair objects.
2572 * . |
2573 * . | `count` objects will be
2574 * +---------------+ | initialized and can be
2575 * | MatchPair | | accessed below.
2576 * | start | <-------+
2577 * | limit |
2578 * +---------------+
2580 // clang-format on
2582 static_assert(sizeof(MatchPair) == 2 * sizeof(int32_t),
2583 "MatchPair consists of two int32 values representing the start"
2584 "and the end offset of the match");
2586 int32_t pairsVectorStartOffset =
2587 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
2589 // Incremented by one below for each match pair.
2590 Register matchIndex = temp2;
2591 masm.move32(Imm32(0), matchIndex);
2593 // The element in which to store the result of the current match.
2594 size_t elementsOffset = NativeObject::offsetOfFixedElements();
2595 BaseObjectElementIndex objectMatchElement(object, matchIndex, elementsOffset);
2597 // The current match pair's "start" and "limit" member.
2598 BaseIndex matchPairStart(FramePointer, matchIndex, TimesEight,
2599 pairsVectorStartOffset + MatchPair::offsetOfStart());
2600 BaseIndex matchPairLimit(FramePointer, matchIndex, TimesEight,
2601 pairsVectorStartOffset + MatchPair::offsetOfLimit());
2603 Label* depStrFailure = &oolEntry;
2604 Label restoreRegExpAndLastIndex;
2606 Register temp4;
2607 if (maybeTemp4 == InvalidReg) {
2608 depStrFailure = &restoreRegExpAndLastIndex;
2610 // We don't have enough registers for a fourth temporary. Reuse |regexp|
2611 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2612 masm.push(regexp);
2613 temp4 = regexp;
2614 } else {
2615 temp4 = maybeTemp4;
2618 Register temp5;
2619 if (maybeTemp5 == InvalidReg) {
2620 depStrFailure = &restoreRegExpAndLastIndex;
2622 // We don't have enough registers for a fifth temporary. Reuse |lastIndex|
2623 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2624 masm.push(lastIndex);
2625 temp5 = lastIndex;
2626 } else {
2627 temp5 = maybeTemp5;
2630 auto maybeRestoreRegExpAndLastIndex = [&]() {
2631 if (maybeTemp5 == InvalidReg) {
2632 masm.pop(lastIndex);
2634 if (maybeTemp4 == InvalidReg) {
2635 masm.pop(regexp);
2639 // Loop to construct the match strings. There are two different loops,
2640 // depending on whether the input is a Two-Byte or a Latin-1 string.
2641 CreateDependentString depStrs[]{
2642 {CharEncoding::TwoByte, temp3, temp4, temp5, depStrFailure},
2643 {CharEncoding::Latin1, temp3, temp4, temp5, depStrFailure},
2647 Label isLatin1, done;
2648 masm.branchLatin1String(input, &isLatin1);
2650 for (auto& depStr : depStrs) {
2651 if (depStr.encoding() == CharEncoding::Latin1) {
2652 masm.bind(&isLatin1);
2655 Label matchLoop;
2656 masm.bind(&matchLoop);
2658 static_assert(MatchPair::NoMatch == -1,
2659 "MatchPair::start is negative if no match was found");
2661 Label isUndefined, storeDone;
2662 masm.branch32(Assembler::LessThan, matchPairStart, Imm32(0),
2663 &isUndefined);
2665 depStr.generate(masm, cx->names(), CompileRuntime::get(cx->runtime()),
2666 input, matchPairStart, matchPairLimit,
2667 initialStringHeap);
2669 // Storing into nursery-allocated results object's elements; no post
2670 // barrier.
2671 masm.storeValue(JSVAL_TYPE_STRING, depStr.string(), objectMatchElement);
2672 masm.jump(&storeDone);
2674 masm.bind(&isUndefined);
2675 { masm.storeValue(UndefinedValue(), objectMatchElement); }
2676 masm.bind(&storeDone);
2678 masm.add32(Imm32(1), matchIndex);
2679 masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex,
2680 &done);
2681 masm.jump(&matchLoop);
2684 #ifdef DEBUG
2685 masm.assumeUnreachable("The match string loop doesn't fall through.");
2686 #endif
2688 masm.bind(&done);
2691 maybeRestoreRegExpAndLastIndex();
2693 // Fill in the rest of the output object.
2694 masm.store32(
2695 matchIndex,
2696 Address(object,
2697 elementsOffset + ObjectElements::offsetOfInitializedLength()));
2698 masm.store32(
2699 matchIndex,
2700 Address(object, elementsOffset + ObjectElements::offsetOfLength()));
2702 Address firstMatchPairStartAddress(
2703 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfStart());
2704 Address firstMatchPairLimitAddress(
2705 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfLimit());
2707 static_assert(RegExpRealm::MatchResultObjectIndexSlot == 0,
2708 "First slot holds the 'index' property");
2709 static_assert(RegExpRealm::MatchResultObjectInputSlot == 1,
2710 "Second slot holds the 'input' property");
2712 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
2714 masm.load32(firstMatchPairStartAddress, temp3);
2715 masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0));
2717 // No post barrier needed (address is within nursery object.)
2718 masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value)));
2720 // For the ExecMatch stub, if the regular expression is global or sticky, we
2721 // have to update its .lastIndex slot.
2722 if (isExecMatch) {
2723 MOZ_ASSERT(object != lastIndex);
2724 Label notGlobalOrSticky;
2725 masm.branchTest32(Assembler::Zero, flagsSlot,
2726 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2727 &notGlobalOrSticky);
2728 masm.load32(firstMatchPairLimitAddress, lastIndex);
2729 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
2730 masm.bind(&notGlobalOrSticky);
2733 // All done!
2734 masm.tagValue(JSVAL_TYPE_OBJECT, object, result);
2735 masm.pop(FramePointer);
2736 masm.ret();
2738 masm.bind(&notFound);
2739 if (isExecMatch) {
2740 Label notGlobalOrSticky;
2741 masm.branchTest32(Assembler::Zero, flagsSlot,
2742 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2743 &notGlobalOrSticky);
2744 masm.bind(&notFoundZeroLastIndex);
2745 masm.storeValue(Int32Value(0), lastIndexSlot);
2746 masm.bind(&notGlobalOrSticky);
2748 masm.moveValue(NullValue(), result);
2749 masm.pop(FramePointer);
2750 masm.ret();
2752 // Fallback paths for CreateDependentString.
2753 for (auto& depStr : depStrs) {
2754 depStr.generateFallback(masm);
2757 // Fall-through to the ool entry after restoring the registers.
2758 masm.bind(&restoreRegExpAndLastIndex);
2759 maybeRestoreRegExpAndLastIndex();
2761 // Use an undefined value to signal to the caller that the OOL stub needs to
2762 // be called.
2763 masm.bind(&oolEntry);
2764 masm.moveValue(UndefinedValue(), result);
2765 masm.pop(FramePointer);
2766 masm.ret();
2768 Linker linker(masm);
2769 JitCode* code = linker.newCode(cx, CodeKind::Other);
2770 if (!code) {
2771 return nullptr;
2774 const char* name = isExecMatch ? "RegExpExecMatchStub" : "RegExpMatcherStub";
2775 CollectPerfSpewerJitCodeProfile(code, name);
2776 #ifdef MOZ_VTUNE
2777 vtune::MarkStub(code, name);
2778 #endif
2780 return code;
2783 JitCode* JitZone::generateRegExpMatcherStub(JSContext* cx) {
2784 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2785 /* isExecMatch = */ false);
2788 JitCode* JitZone::generateRegExpExecMatchStub(JSContext* cx) {
2789 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2790 /* isExecMatch = */ true);
2793 class OutOfLineRegExpMatcher : public OutOfLineCodeBase<CodeGenerator> {
2794 LRegExpMatcher* lir_;
2796 public:
2797 explicit OutOfLineRegExpMatcher(LRegExpMatcher* lir) : lir_(lir) {}
2799 void accept(CodeGenerator* codegen) override {
2800 codegen->visitOutOfLineRegExpMatcher(this);
2803 LRegExpMatcher* lir() const { return lir_; }
2806 void CodeGenerator::visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher* ool) {
2807 LRegExpMatcher* lir = ool->lir();
2808 Register lastIndex = ToRegister(lir->lastIndex());
2809 Register input = ToRegister(lir->string());
2810 Register regexp = ToRegister(lir->regexp());
2812 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2813 regs.take(lastIndex);
2814 regs.take(input);
2815 regs.take(regexp);
2816 Register temp = regs.takeAny();
2818 masm.computeEffectiveAddress(
2819 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2821 pushArg(temp);
2822 pushArg(lastIndex);
2823 pushArg(input);
2824 pushArg(regexp);
2826 // We are not using oolCallVM because we are in a Call, and that live
2827 // registers are already saved by the the register allocator.
2828 using Fn =
2829 bool (*)(JSContext*, HandleObject regexp, HandleString input,
2830 int32_t lastIndex, MatchPairs* pairs, MutableHandleValue output);
2831 callVM<Fn, RegExpMatcherRaw>(lir);
2833 masm.jump(ool->rejoin());
2836 void CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir) {
2837 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2838 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2839 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg);
2840 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2842 #if defined(JS_NUNBOX32)
2843 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2844 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2845 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2846 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2847 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Type);
2848 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Data);
2849 #elif defined(JS_PUNBOX64)
2850 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2851 static_assert(RegExpMatcherStringReg != JSReturnReg);
2852 static_assert(RegExpMatcherLastIndexReg != JSReturnReg);
2853 #endif
2855 masm.reserveStack(RegExpReservedStack);
2857 OutOfLineRegExpMatcher* ool = new (alloc()) OutOfLineRegExpMatcher(lir);
2858 addOutOfLineCode(ool, lir->mir());
2860 const JitZone* jitZone = gen->realm->zone()->jitZone();
2861 JitCode* regExpMatcherStub =
2862 jitZone->regExpMatcherStubNoBarrier(&zoneStubsToReadBarrier_);
2863 masm.call(regExpMatcherStub);
2864 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2865 masm.bind(ool->rejoin());
2867 masm.freeStack(RegExpReservedStack);
2870 class OutOfLineRegExpExecMatch : public OutOfLineCodeBase<CodeGenerator> {
2871 LRegExpExecMatch* lir_;
2873 public:
2874 explicit OutOfLineRegExpExecMatch(LRegExpExecMatch* lir) : lir_(lir) {}
2876 void accept(CodeGenerator* codegen) override {
2877 codegen->visitOutOfLineRegExpExecMatch(this);
2880 LRegExpExecMatch* lir() const { return lir_; }
2883 void CodeGenerator::visitOutOfLineRegExpExecMatch(
2884 OutOfLineRegExpExecMatch* ool) {
2885 LRegExpExecMatch* lir = ool->lir();
2886 Register input = ToRegister(lir->string());
2887 Register regexp = ToRegister(lir->regexp());
2889 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2890 regs.take(input);
2891 regs.take(regexp);
2892 Register temp = regs.takeAny();
2894 masm.computeEffectiveAddress(
2895 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2897 pushArg(temp);
2898 pushArg(input);
2899 pushArg(regexp);
2901 // We are not using oolCallVM because we are in a Call and live registers have
2902 // already been saved by the register allocator.
2903 using Fn =
2904 bool (*)(JSContext*, Handle<RegExpObject*> regexp, HandleString input,
2905 MatchPairs* pairs, MutableHandleValue output);
2906 callVM<Fn, RegExpBuiltinExecMatchFromJit>(lir);
2907 masm.jump(ool->rejoin());
2910 void CodeGenerator::visitRegExpExecMatch(LRegExpExecMatch* lir) {
2911 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2912 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2913 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2915 #if defined(JS_NUNBOX32)
2916 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2917 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2918 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2919 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2920 #elif defined(JS_PUNBOX64)
2921 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2922 static_assert(RegExpMatcherStringReg != JSReturnReg);
2923 #endif
2925 masm.reserveStack(RegExpReservedStack);
2927 auto* ool = new (alloc()) OutOfLineRegExpExecMatch(lir);
2928 addOutOfLineCode(ool, lir->mir());
2930 const JitZone* jitZone = gen->realm->zone()->jitZone();
2931 JitCode* regExpExecMatchStub =
2932 jitZone->regExpExecMatchStubNoBarrier(&zoneStubsToReadBarrier_);
2933 masm.call(regExpExecMatchStub);
2934 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2936 masm.bind(ool->rejoin());
2937 masm.freeStack(RegExpReservedStack);
2940 JitCode* JitZone::generateRegExpSearcherStub(JSContext* cx) {
2941 JitSpew(JitSpew_Codegen, "# Emitting RegExpSearcher stub");
2943 Register regexp = RegExpSearcherRegExpReg;
2944 Register input = RegExpSearcherStringReg;
2945 Register lastIndex = RegExpSearcherLastIndexReg;
2946 Register result = ReturnReg;
2948 // We are free to clobber all registers, as LRegExpSearcher is a call
2949 // instruction.
2950 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2951 regs.take(input);
2952 regs.take(regexp);
2953 regs.take(lastIndex);
2955 Register temp1 = regs.takeAny();
2956 Register temp2 = regs.takeAny();
2957 Register temp3 = regs.takeAny();
2959 TempAllocator temp(&cx->tempLifoAlloc());
2960 JitContext jcx(cx);
2961 StackMacroAssembler masm(cx, temp);
2962 AutoCreatedBy acb(masm, "JitZone::generateRegExpSearcherStub");
2964 #ifdef JS_USE_LINK_REGISTER
2965 masm.pushReturnAddress();
2966 #endif
2967 masm.push(FramePointer);
2968 masm.moveStackPtrTo(FramePointer);
2970 #ifdef DEBUG
2971 // Store sentinel value to cx->regExpSearcherLastLimit.
2972 // See comment in RegExpSearcherImpl.
2973 masm.loadJSContext(temp1);
2974 masm.store32(Imm32(RegExpSearcherLastLimitSentinel),
2975 Address(temp1, JSContext::offsetOfRegExpSearcherLastLimit()));
2976 #endif
2978 // The InputOutputData is placed above the frame pointer and return address on
2979 // the stack.
2980 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
2982 Label notFound, oolEntry;
2983 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
2984 temp3, inputOutputDataStartOffset,
2985 initialStringHeap, &notFound, &oolEntry)) {
2986 return nullptr;
2989 // clang-format off
2991 * [SMDOC] Stack layout for the RegExpSearcher stub
2993 * +---------------+
2994 * FramePointer +-----> |Caller-FramePtr|
2995 * +---------------+
2996 * |Return-Address |
2997 * +---------------+
2998 * inputOutputDataStartOffset +-----> +---------------+
2999 * |InputOutputData|
3000 * +---------------+
3001 * +---------------+
3002 * | MatchPairs |
3003 * | count |
3004 * | pairs |
3005 * | |
3006 * +---------------+
3007 * pairsVectorStartOffset +-----> +---------------+
3008 * | MatchPair |
3009 * matchPairStart +------------> start | <-------+
3010 * matchPairLimit +------------> limit | | Reserved space for
3011 * +---------------+ | `RegExpObject::MaxPairCount`
3012 * . | MatchPair objects.
3013 * . |
3014 * . | Only a single object will
3015 * +---------------+ | be initialized and can be
3016 * | MatchPair | | accessed below.
3017 * | start | <-------+
3018 * | limit |
3019 * +---------------+
3021 // clang-format on
3023 int32_t pairsVectorStartOffset =
3024 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
3025 Address matchPairStart(FramePointer,
3026 pairsVectorStartOffset + MatchPair::offsetOfStart());
3027 Address matchPairLimit(FramePointer,
3028 pairsVectorStartOffset + MatchPair::offsetOfLimit());
3030 // Store match limit to cx->regExpSearcherLastLimit and return the index.
3031 masm.load32(matchPairLimit, result);
3032 masm.loadJSContext(input);
3033 masm.store32(result,
3034 Address(input, JSContext::offsetOfRegExpSearcherLastLimit()));
3035 masm.load32(matchPairStart, result);
3036 masm.pop(FramePointer);
3037 masm.ret();
3039 masm.bind(&notFound);
3040 masm.move32(Imm32(RegExpSearcherResultNotFound), result);
3041 masm.pop(FramePointer);
3042 masm.ret();
3044 masm.bind(&oolEntry);
3045 masm.move32(Imm32(RegExpSearcherResultFailed), result);
3046 masm.pop(FramePointer);
3047 masm.ret();
3049 Linker linker(masm);
3050 JitCode* code = linker.newCode(cx, CodeKind::Other);
3051 if (!code) {
3052 return nullptr;
3055 CollectPerfSpewerJitCodeProfile(code, "RegExpSearcherStub");
3056 #ifdef MOZ_VTUNE
3057 vtune::MarkStub(code, "RegExpSearcherStub");
3058 #endif
3060 return code;
3063 class OutOfLineRegExpSearcher : public OutOfLineCodeBase<CodeGenerator> {
3064 LRegExpSearcher* lir_;
3066 public:
3067 explicit OutOfLineRegExpSearcher(LRegExpSearcher* lir) : lir_(lir) {}
3069 void accept(CodeGenerator* codegen) override {
3070 codegen->visitOutOfLineRegExpSearcher(this);
3073 LRegExpSearcher* lir() const { return lir_; }
3076 void CodeGenerator::visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher* ool) {
3077 LRegExpSearcher* lir = ool->lir();
3078 Register lastIndex = ToRegister(lir->lastIndex());
3079 Register input = ToRegister(lir->string());
3080 Register regexp = ToRegister(lir->regexp());
3082 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3083 regs.take(lastIndex);
3084 regs.take(input);
3085 regs.take(regexp);
3086 Register temp = regs.takeAny();
3088 masm.computeEffectiveAddress(
3089 Address(masm.getStackPointer(), InputOutputDataSize), temp);
3091 pushArg(temp);
3092 pushArg(lastIndex);
3093 pushArg(input);
3094 pushArg(regexp);
3096 // We are not using oolCallVM because we are in a Call, and that live
3097 // registers are already saved by the the register allocator.
3098 using Fn = bool (*)(JSContext* cx, HandleObject regexp, HandleString input,
3099 int32_t lastIndex, MatchPairs* pairs, int32_t* result);
3100 callVM<Fn, RegExpSearcherRaw>(lir);
3102 masm.jump(ool->rejoin());
3105 void CodeGenerator::visitRegExpSearcher(LRegExpSearcher* lir) {
3106 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpSearcherRegExpReg);
3107 MOZ_ASSERT(ToRegister(lir->string()) == RegExpSearcherStringReg);
3108 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpSearcherLastIndexReg);
3109 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3111 static_assert(RegExpSearcherRegExpReg != ReturnReg);
3112 static_assert(RegExpSearcherStringReg != ReturnReg);
3113 static_assert(RegExpSearcherLastIndexReg != ReturnReg);
3115 masm.reserveStack(RegExpReservedStack);
3117 OutOfLineRegExpSearcher* ool = new (alloc()) OutOfLineRegExpSearcher(lir);
3118 addOutOfLineCode(ool, lir->mir());
3120 const JitZone* jitZone = gen->realm->zone()->jitZone();
3121 JitCode* regExpSearcherStub =
3122 jitZone->regExpSearcherStubNoBarrier(&zoneStubsToReadBarrier_);
3123 masm.call(regExpSearcherStub);
3124 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpSearcherResultFailed),
3125 ool->entry());
3126 masm.bind(ool->rejoin());
3128 masm.freeStack(RegExpReservedStack);
3131 void CodeGenerator::visitRegExpSearcherLastLimit(
3132 LRegExpSearcherLastLimit* lir) {
3133 Register result = ToRegister(lir->output());
3134 Register scratch = ToRegister(lir->temp0());
3136 masm.loadAndClearRegExpSearcherLastLimit(result, scratch);
3139 JitCode* JitZone::generateRegExpExecTestStub(JSContext* cx) {
3140 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecTest stub");
3142 Register regexp = RegExpExecTestRegExpReg;
3143 Register input = RegExpExecTestStringReg;
3144 Register result = ReturnReg;
3146 TempAllocator temp(&cx->tempLifoAlloc());
3147 JitContext jcx(cx);
3148 StackMacroAssembler masm(cx, temp);
3149 AutoCreatedBy acb(masm, "JitZone::generateRegExpExecTestStub");
3151 #ifdef JS_USE_LINK_REGISTER
3152 masm.pushReturnAddress();
3153 #endif
3154 masm.push(FramePointer);
3155 masm.moveStackPtrTo(FramePointer);
3157 // We are free to clobber all registers, as LRegExpExecTest is a call
3158 // instruction.
3159 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3160 regs.take(input);
3161 regs.take(regexp);
3163 // Ensure lastIndex != result.
3164 regs.take(result);
3165 Register lastIndex = regs.takeAny();
3166 regs.add(result);
3167 Register temp1 = regs.takeAny();
3168 Register temp2 = regs.takeAny();
3169 Register temp3 = regs.takeAny();
3171 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
3172 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
3174 masm.reserveStack(RegExpReservedStack);
3176 // Load lastIndex and skip RegExp execution if needed.
3177 Label notFoundZeroLastIndex;
3178 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
3180 // In visitRegExpMatcher and visitRegExpSearcher, we reserve stack space
3181 // before calling the stub. For RegExpExecTest we call the stub before
3182 // reserving stack space, so the offset of the InputOutputData relative to the
3183 // frame pointer is negative.
3184 constexpr int32_t inputOutputDataStartOffset = -int32_t(RegExpReservedStack);
3186 // On ARM64, load/store instructions can encode an immediate offset in the
3187 // range [-256, 4095]. If we ever fail this assertion, it would be more
3188 // efficient to store the data above the frame pointer similar to
3189 // RegExpMatcher and RegExpSearcher.
3190 static_assert(inputOutputDataStartOffset >= -256);
3192 Label notFound, oolEntry;
3193 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
3194 temp3, inputOutputDataStartOffset,
3195 initialStringHeap, &notFound, &oolEntry)) {
3196 return nullptr;
3199 // Set `result` to true/false to indicate found/not-found, or to
3200 // RegExpExecTestResultFailed if we have to retry in C++. If the regular
3201 // expression is global or sticky, we also have to update its .lastIndex slot.
3203 Label done;
3204 int32_t pairsVectorStartOffset =
3205 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
3206 Address matchPairLimit(FramePointer,
3207 pairsVectorStartOffset + MatchPair::offsetOfLimit());
3209 masm.move32(Imm32(1), result);
3210 masm.branchTest32(Assembler::Zero, flagsSlot,
3211 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3212 &done);
3213 masm.load32(matchPairLimit, lastIndex);
3214 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
3215 masm.jump(&done);
3217 masm.bind(&notFound);
3218 masm.move32(Imm32(0), result);
3219 masm.branchTest32(Assembler::Zero, flagsSlot,
3220 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3221 &done);
3222 masm.storeValue(Int32Value(0), lastIndexSlot);
3223 masm.jump(&done);
3225 masm.bind(&notFoundZeroLastIndex);
3226 masm.move32(Imm32(0), result);
3227 masm.storeValue(Int32Value(0), lastIndexSlot);
3228 masm.jump(&done);
3230 masm.bind(&oolEntry);
3231 masm.move32(Imm32(RegExpExecTestResultFailed), result);
3233 masm.bind(&done);
3234 masm.freeStack(RegExpReservedStack);
3235 masm.pop(FramePointer);
3236 masm.ret();
3238 Linker linker(masm);
3239 JitCode* code = linker.newCode(cx, CodeKind::Other);
3240 if (!code) {
3241 return nullptr;
3244 CollectPerfSpewerJitCodeProfile(code, "RegExpExecTestStub");
3245 #ifdef MOZ_VTUNE
3246 vtune::MarkStub(code, "RegExpExecTestStub");
3247 #endif
3249 return code;
3252 class OutOfLineRegExpExecTest : public OutOfLineCodeBase<CodeGenerator> {
3253 LRegExpExecTest* lir_;
3255 public:
3256 explicit OutOfLineRegExpExecTest(LRegExpExecTest* lir) : lir_(lir) {}
3258 void accept(CodeGenerator* codegen) override {
3259 codegen->visitOutOfLineRegExpExecTest(this);
3262 LRegExpExecTest* lir() const { return lir_; }
3265 void CodeGenerator::visitOutOfLineRegExpExecTest(OutOfLineRegExpExecTest* ool) {
3266 LRegExpExecTest* lir = ool->lir();
3267 Register input = ToRegister(lir->string());
3268 Register regexp = ToRegister(lir->regexp());
3270 pushArg(input);
3271 pushArg(regexp);
3273 // We are not using oolCallVM because we are in a Call and live registers have
3274 // already been saved by the register allocator.
3275 using Fn = bool (*)(JSContext* cx, Handle<RegExpObject*> regexp,
3276 HandleString input, bool* result);
3277 callVM<Fn, RegExpBuiltinExecTestFromJit>(lir);
3279 masm.jump(ool->rejoin());
3282 void CodeGenerator::visitRegExpExecTest(LRegExpExecTest* lir) {
3283 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpExecTestRegExpReg);
3284 MOZ_ASSERT(ToRegister(lir->string()) == RegExpExecTestStringReg);
3285 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3287 static_assert(RegExpExecTestRegExpReg != ReturnReg);
3288 static_assert(RegExpExecTestStringReg != ReturnReg);
3290 auto* ool = new (alloc()) OutOfLineRegExpExecTest(lir);
3291 addOutOfLineCode(ool, lir->mir());
3293 const JitZone* jitZone = gen->realm->zone()->jitZone();
3294 JitCode* regExpExecTestStub =
3295 jitZone->regExpExecTestStubNoBarrier(&zoneStubsToReadBarrier_);
3296 masm.call(regExpExecTestStub);
3298 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpExecTestResultFailed),
3299 ool->entry());
3301 masm.bind(ool->rejoin());
3304 void CodeGenerator::visitRegExpHasCaptureGroups(LRegExpHasCaptureGroups* ins) {
3305 Register regexp = ToRegister(ins->regexp());
3306 Register input = ToRegister(ins->input());
3307 Register output = ToRegister(ins->output());
3309 using Fn =
3310 bool (*)(JSContext*, Handle<RegExpObject*>, Handle<JSString*>, bool*);
3311 auto* ool = oolCallVM<Fn, js::RegExpHasCaptureGroups>(
3312 ins, ArgList(regexp, input), StoreRegisterTo(output));
3314 // Load RegExpShared in |output|.
3315 Label vmCall;
3316 masm.loadParsedRegExpShared(regexp, output, ool->entry());
3318 // Return true iff pairCount > 1.
3319 Label returnTrue;
3320 masm.branch32(Assembler::Above,
3321 Address(output, RegExpShared::offsetOfPairCount()), Imm32(1),
3322 &returnTrue);
3323 masm.move32(Imm32(0), output);
3324 masm.jump(ool->rejoin());
3326 masm.bind(&returnTrue);
3327 masm.move32(Imm32(1), output);
3329 masm.bind(ool->rejoin());
3332 class OutOfLineRegExpPrototypeOptimizable
3333 : public OutOfLineCodeBase<CodeGenerator> {
3334 LRegExpPrototypeOptimizable* ins_;
3336 public:
3337 explicit OutOfLineRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins)
3338 : ins_(ins) {}
3340 void accept(CodeGenerator* codegen) override {
3341 codegen->visitOutOfLineRegExpPrototypeOptimizable(this);
3343 LRegExpPrototypeOptimizable* ins() const { return ins_; }
3346 void CodeGenerator::visitRegExpPrototypeOptimizable(
3347 LRegExpPrototypeOptimizable* ins) {
3348 Register object = ToRegister(ins->object());
3349 Register output = ToRegister(ins->output());
3350 Register temp = ToRegister(ins->temp0());
3352 OutOfLineRegExpPrototypeOptimizable* ool =
3353 new (alloc()) OutOfLineRegExpPrototypeOptimizable(ins);
3354 addOutOfLineCode(ool, ins->mir());
3356 const GlobalObject* global = gen->realm->maybeGlobal();
3357 MOZ_ASSERT(global);
3358 masm.branchIfNotRegExpPrototypeOptimizable(object, temp, global,
3359 ool->entry());
3360 masm.move32(Imm32(0x1), output);
3362 masm.bind(ool->rejoin());
3365 void CodeGenerator::visitOutOfLineRegExpPrototypeOptimizable(
3366 OutOfLineRegExpPrototypeOptimizable* ool) {
3367 LRegExpPrototypeOptimizable* ins = ool->ins();
3368 Register object = ToRegister(ins->object());
3369 Register output = ToRegister(ins->output());
3371 saveVolatile(output);
3373 using Fn = bool (*)(JSContext* cx, JSObject* proto);
3374 masm.setupAlignedABICall();
3375 masm.loadJSContext(output);
3376 masm.passABIArg(output);
3377 masm.passABIArg(object);
3378 masm.callWithABI<Fn, RegExpPrototypeOptimizableRaw>();
3379 masm.storeCallBoolResult(output);
3381 restoreVolatile(output);
3383 masm.jump(ool->rejoin());
3386 class OutOfLineRegExpInstanceOptimizable
3387 : public OutOfLineCodeBase<CodeGenerator> {
3388 LRegExpInstanceOptimizable* ins_;
3390 public:
3391 explicit OutOfLineRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins)
3392 : ins_(ins) {}
3394 void accept(CodeGenerator* codegen) override {
3395 codegen->visitOutOfLineRegExpInstanceOptimizable(this);
3397 LRegExpInstanceOptimizable* ins() const { return ins_; }
3400 void CodeGenerator::visitRegExpInstanceOptimizable(
3401 LRegExpInstanceOptimizable* ins) {
3402 Register object = ToRegister(ins->object());
3403 Register output = ToRegister(ins->output());
3404 Register temp = ToRegister(ins->temp0());
3406 OutOfLineRegExpInstanceOptimizable* ool =
3407 new (alloc()) OutOfLineRegExpInstanceOptimizable(ins);
3408 addOutOfLineCode(ool, ins->mir());
3410 const GlobalObject* global = gen->realm->maybeGlobal();
3411 MOZ_ASSERT(global);
3412 masm.branchIfNotRegExpInstanceOptimizable(object, temp, global, ool->entry());
3413 masm.move32(Imm32(0x1), output);
3415 masm.bind(ool->rejoin());
3418 void CodeGenerator::visitOutOfLineRegExpInstanceOptimizable(
3419 OutOfLineRegExpInstanceOptimizable* ool) {
3420 LRegExpInstanceOptimizable* ins = ool->ins();
3421 Register object = ToRegister(ins->object());
3422 Register proto = ToRegister(ins->proto());
3423 Register output = ToRegister(ins->output());
3425 saveVolatile(output);
3427 using Fn = bool (*)(JSContext* cx, JSObject* obj, JSObject* proto);
3428 masm.setupAlignedABICall();
3429 masm.loadJSContext(output);
3430 masm.passABIArg(output);
3431 masm.passABIArg(object);
3432 masm.passABIArg(proto);
3433 masm.callWithABI<Fn, RegExpInstanceOptimizableRaw>();
3434 masm.storeCallBoolResult(output);
3436 restoreVolatile(output);
3438 masm.jump(ool->rejoin());
3441 static void FindFirstDollarIndex(MacroAssembler& masm, Register str,
3442 Register len, Register temp0, Register temp1,
3443 Register output, CharEncoding encoding) {
3444 #ifdef DEBUG
3445 Label ok;
3446 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
3447 masm.assumeUnreachable("Length should be greater than 0.");
3448 masm.bind(&ok);
3449 #endif
3451 Register chars = temp0;
3452 masm.loadStringChars(str, chars, encoding);
3454 masm.move32(Imm32(0), output);
3456 Label start, done;
3457 masm.bind(&start);
3459 Register currentChar = temp1;
3460 masm.loadChar(chars, output, currentChar, encoding);
3461 masm.branch32(Assembler::Equal, currentChar, Imm32('$'), &done);
3463 masm.add32(Imm32(1), output);
3464 masm.branch32(Assembler::NotEqual, output, len, &start);
3466 masm.move32(Imm32(-1), output);
3468 masm.bind(&done);
3471 void CodeGenerator::visitGetFirstDollarIndex(LGetFirstDollarIndex* ins) {
3472 Register str = ToRegister(ins->str());
3473 Register output = ToRegister(ins->output());
3474 Register temp0 = ToRegister(ins->temp0());
3475 Register temp1 = ToRegister(ins->temp1());
3476 Register len = ToRegister(ins->temp2());
3478 using Fn = bool (*)(JSContext*, JSString*, int32_t*);
3479 OutOfLineCode* ool = oolCallVM<Fn, GetFirstDollarIndexRaw>(
3480 ins, ArgList(str), StoreRegisterTo(output));
3482 masm.branchIfRope(str, ool->entry());
3483 masm.loadStringLength(str, len);
3485 Label isLatin1, done;
3486 masm.branchLatin1String(str, &isLatin1);
3488 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3489 CharEncoding::TwoByte);
3490 masm.jump(&done);
3492 masm.bind(&isLatin1);
3494 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3495 CharEncoding::Latin1);
3497 masm.bind(&done);
3498 masm.bind(ool->rejoin());
3501 void CodeGenerator::visitStringReplace(LStringReplace* lir) {
3502 if (lir->replacement()->isConstant()) {
3503 pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString()));
3504 } else {
3505 pushArg(ToRegister(lir->replacement()));
3508 if (lir->pattern()->isConstant()) {
3509 pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString()));
3510 } else {
3511 pushArg(ToRegister(lir->pattern()));
3514 if (lir->string()->isConstant()) {
3515 pushArg(ImmGCPtr(lir->string()->toConstant()->toString()));
3516 } else {
3517 pushArg(ToRegister(lir->string()));
3520 using Fn =
3521 JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
3522 if (lir->mir()->isFlatReplacement()) {
3523 callVM<Fn, StringFlatReplaceString>(lir);
3524 } else {
3525 callVM<Fn, StringReplace>(lir);
3529 void CodeGenerator::visitBinaryValueCache(LBinaryValueCache* lir) {
3530 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3531 TypedOrValueRegister lhs =
3532 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::LhsIndex));
3533 TypedOrValueRegister rhs =
3534 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::RhsIndex));
3535 ValueOperand output = ToOutValue(lir);
3537 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3539 switch (jsop) {
3540 case JSOp::Add:
3541 case JSOp::Sub:
3542 case JSOp::Mul:
3543 case JSOp::Div:
3544 case JSOp::Mod:
3545 case JSOp::Pow:
3546 case JSOp::BitAnd:
3547 case JSOp::BitOr:
3548 case JSOp::BitXor:
3549 case JSOp::Lsh:
3550 case JSOp::Rsh:
3551 case JSOp::Ursh: {
3552 IonBinaryArithIC ic(liveRegs, lhs, rhs, output);
3553 addIC(lir, allocateIC(ic));
3554 return;
3556 default:
3557 MOZ_CRASH("Unsupported jsop in MBinaryValueCache");
3561 void CodeGenerator::visitBinaryBoolCache(LBinaryBoolCache* lir) {
3562 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3563 TypedOrValueRegister lhs =
3564 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::LhsIndex));
3565 TypedOrValueRegister rhs =
3566 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::RhsIndex));
3567 Register output = ToRegister(lir->output());
3569 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3571 switch (jsop) {
3572 case JSOp::Lt:
3573 case JSOp::Le:
3574 case JSOp::Gt:
3575 case JSOp::Ge:
3576 case JSOp::Eq:
3577 case JSOp::Ne:
3578 case JSOp::StrictEq:
3579 case JSOp::StrictNe: {
3580 IonCompareIC ic(liveRegs, lhs, rhs, output);
3581 addIC(lir, allocateIC(ic));
3582 return;
3584 default:
3585 MOZ_CRASH("Unsupported jsop in MBinaryBoolCache");
3589 void CodeGenerator::visitUnaryCache(LUnaryCache* lir) {
3590 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3591 TypedOrValueRegister input =
3592 TypedOrValueRegister(ToValue(lir, LUnaryCache::InputIndex));
3593 ValueOperand output = ToOutValue(lir);
3595 IonUnaryArithIC ic(liveRegs, input, output);
3596 addIC(lir, allocateIC(ic));
3599 void CodeGenerator::visitModuleMetadata(LModuleMetadata* lir) {
3600 pushArg(ImmPtr(lir->mir()->module()));
3602 using Fn = JSObject* (*)(JSContext*, HandleObject);
3603 callVM<Fn, js::GetOrCreateModuleMetaObject>(lir);
3606 void CodeGenerator::visitDynamicImport(LDynamicImport* lir) {
3607 pushArg(ToValue(lir, LDynamicImport::OptionsIndex));
3608 pushArg(ToValue(lir, LDynamicImport::SpecifierIndex));
3609 pushArg(ImmGCPtr(current->mir()->info().script()));
3611 using Fn = JSObject* (*)(JSContext*, HandleScript, HandleValue, HandleValue);
3612 callVM<Fn, js::StartDynamicModuleImport>(lir);
3615 void CodeGenerator::visitLambda(LLambda* lir) {
3616 Register envChain = ToRegister(lir->environmentChain());
3617 Register output = ToRegister(lir->output());
3618 Register tempReg = ToRegister(lir->temp0());
3620 JSFunction* fun = lir->mir()->templateFunction();
3622 using Fn = JSObject* (*)(JSContext*, HandleFunction, HandleObject);
3623 OutOfLineCode* ool = oolCallVM<Fn, js::Lambda>(
3624 lir, ArgList(ImmGCPtr(fun), envChain), StoreRegisterTo(output));
3626 TemplateObject templateObject(fun);
3627 masm.createGCObject(output, tempReg, templateObject, gc::Heap::Default,
3628 ool->entry());
3630 masm.storeValue(JSVAL_TYPE_OBJECT, envChain,
3631 Address(output, JSFunction::offsetOfEnvironment()));
3632 // No post barrier needed because output is guaranteed to be allocated in
3633 // the nursery.
3635 masm.bind(ool->rejoin());
3638 void CodeGenerator::visitFunctionWithProto(LFunctionWithProto* lir) {
3639 Register envChain = ToRegister(lir->envChain());
3640 Register prototype = ToRegister(lir->prototype());
3642 pushArg(prototype);
3643 pushArg(envChain);
3644 pushArg(ImmGCPtr(lir->mir()->function()));
3646 using Fn =
3647 JSObject* (*)(JSContext*, HandleFunction, HandleObject, HandleObject);
3648 callVM<Fn, js::FunWithProtoOperation>(lir);
3651 void CodeGenerator::visitSetFunName(LSetFunName* lir) {
3652 pushArg(Imm32(lir->mir()->prefixKind()));
3653 pushArg(ToValue(lir, LSetFunName::NameIndex));
3654 pushArg(ToRegister(lir->fun()));
3656 using Fn =
3657 bool (*)(JSContext*, HandleFunction, HandleValue, FunctionPrefixKind);
3658 callVM<Fn, js::SetFunctionName>(lir);
3661 void CodeGenerator::visitOsiPoint(LOsiPoint* lir) {
3662 // Note: markOsiPoint ensures enough space exists between the last
3663 // LOsiPoint and this one to patch adjacent call instructions.
3665 MOZ_ASSERT(masm.framePushed() == frameSize());
3667 uint32_t osiCallPointOffset = markOsiPoint(lir);
3669 LSafepoint* safepoint = lir->associatedSafepoint();
3670 MOZ_ASSERT(!safepoint->osiCallPointOffset());
3671 safepoint->setOsiCallPointOffset(osiCallPointOffset);
3673 #ifdef DEBUG
3674 // There should be no movegroups or other instructions between
3675 // an instruction and its OsiPoint. This is necessary because
3676 // we use the OsiPoint's snapshot from within VM calls.
3677 for (LInstructionReverseIterator iter(current->rbegin(lir));
3678 iter != current->rend(); iter++) {
3679 if (*iter == lir) {
3680 continue;
3682 MOZ_ASSERT(!iter->isMoveGroup());
3683 MOZ_ASSERT(iter->safepoint() == safepoint);
3684 break;
3686 #endif
3688 #ifdef CHECK_OSIPOINT_REGISTERS
3689 if (shouldVerifyOsiPointRegs(safepoint)) {
3690 verifyOsiPointRegs(safepoint);
3692 #endif
3695 void CodeGenerator::visitPhi(LPhi* lir) {
3696 MOZ_CRASH("Unexpected LPhi in CodeGenerator");
3699 void CodeGenerator::visitGoto(LGoto* lir) { jumpToBlock(lir->target()); }
3701 void CodeGenerator::visitTableSwitch(LTableSwitch* ins) {
3702 MTableSwitch* mir = ins->mir();
3703 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3704 const LAllocation* temp;
3706 if (mir->getOperand(0)->type() != MIRType::Int32) {
3707 temp = ins->tempInt()->output();
3709 // The input is a double, so try and convert it to an integer.
3710 // If it does not fit in an integer, take the default case.
3711 masm.convertDoubleToInt32(ToFloatRegister(ins->index()), ToRegister(temp),
3712 defaultcase, false);
3713 } else {
3714 temp = ins->index();
3717 emitTableSwitchDispatch(mir, ToRegister(temp),
3718 ToRegisterOrInvalid(ins->tempPointer()));
3721 void CodeGenerator::visitTableSwitchV(LTableSwitchV* ins) {
3722 MTableSwitch* mir = ins->mir();
3723 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3725 Register index = ToRegister(ins->tempInt());
3726 ValueOperand value = ToValue(ins, LTableSwitchV::InputValue);
3727 Register tag = masm.extractTag(value, index);
3728 masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase);
3730 Label unboxInt, isInt;
3731 masm.branchTestInt32(Assembler::Equal, tag, &unboxInt);
3733 FloatRegister floatIndex = ToFloatRegister(ins->tempFloat());
3734 masm.unboxDouble(value, floatIndex);
3735 masm.convertDoubleToInt32(floatIndex, index, defaultcase, false);
3736 masm.jump(&isInt);
3739 masm.bind(&unboxInt);
3740 masm.unboxInt32(value, index);
3742 masm.bind(&isInt);
3744 emitTableSwitchDispatch(mir, index, ToRegisterOrInvalid(ins->tempPointer()));
3747 void CodeGenerator::visitParameter(LParameter* lir) {}
3749 void CodeGenerator::visitCallee(LCallee* lir) {
3750 Register callee = ToRegister(lir->output());
3751 Address ptr(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3753 masm.loadFunctionFromCalleeToken(ptr, callee);
3756 void CodeGenerator::visitIsConstructing(LIsConstructing* lir) {
3757 Register output = ToRegister(lir->output());
3758 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3759 masm.loadPtr(calleeToken, output);
3761 // We must be inside a function.
3762 MOZ_ASSERT(current->mir()->info().script()->function());
3764 // The low bit indicates whether this call is constructing, just clear the
3765 // other bits.
3766 static_assert(CalleeToken_Function == 0x0,
3767 "CalleeTokenTag value should match");
3768 static_assert(CalleeToken_FunctionConstructing == 0x1,
3769 "CalleeTokenTag value should match");
3770 masm.andPtr(Imm32(0x1), output);
3773 void CodeGenerator::visitReturn(LReturn* lir) {
3774 #if defined(JS_NUNBOX32)
3775 DebugOnly<LAllocation*> type = lir->getOperand(TYPE_INDEX);
3776 DebugOnly<LAllocation*> payload = lir->getOperand(PAYLOAD_INDEX);
3777 MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type);
3778 MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data);
3779 #elif defined(JS_PUNBOX64)
3780 DebugOnly<LAllocation*> result = lir->getOperand(0);
3781 MOZ_ASSERT(ToRegister(result) == JSReturnReg);
3782 #endif
3783 // Don't emit a jump to the return label if this is the last block, as
3784 // it'll fall through to the epilogue.
3786 // This is -not- true however for a Generator-return, which may appear in the
3787 // middle of the last block, so we should always emit the jump there.
3788 if (current->mir() != *gen->graph().poBegin() || lir->isGenerator()) {
3789 masm.jump(&returnLabel_);
3793 void CodeGenerator::visitOsrEntry(LOsrEntry* lir) {
3794 Register temp = ToRegister(lir->temp());
3796 // Remember the OSR entry offset into the code buffer.
3797 masm.flushBuffer();
3798 setOsrEntryOffset(masm.size());
3800 // Allocate the full frame for this function
3801 // Note we have a new entry here. So we reset MacroAssembler::framePushed()
3802 // to 0, before reserving the stack.
3803 MOZ_ASSERT(masm.framePushed() == frameSize());
3804 masm.setFramePushed(0);
3806 // The Baseline code ensured both the frame pointer and stack pointer point to
3807 // the JitFrameLayout on the stack.
3809 // If profiling, save the current frame pointer to a per-thread global field.
3810 if (isProfilerInstrumentationEnabled()) {
3811 masm.profilerEnterFrame(FramePointer, temp);
3814 masm.reserveStack(frameSize());
3815 MOZ_ASSERT(masm.framePushed() == frameSize());
3817 // Ensure that the Ion frames is properly aligned.
3818 masm.assertStackAlignment(JitStackAlignment, 0);
3821 void CodeGenerator::visitOsrEnvironmentChain(LOsrEnvironmentChain* lir) {
3822 const LAllocation* frame = lir->getOperand(0);
3823 const LDefinition* object = lir->getDef(0);
3825 const ptrdiff_t frameOffset =
3826 BaselineFrame::reverseOffsetOfEnvironmentChain();
3828 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3831 void CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir) {
3832 const LAllocation* frame = lir->getOperand(0);
3833 const LDefinition* object = lir->getDef(0);
3835 const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj();
3837 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3840 void CodeGenerator::visitOsrValue(LOsrValue* value) {
3841 const LAllocation* frame = value->getOperand(0);
3842 const ValueOperand out = ToOutValue(value);
3844 const ptrdiff_t frameOffset = value->mir()->frameOffset();
3846 masm.loadValue(Address(ToRegister(frame), frameOffset), out);
3849 void CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir) {
3850 const LAllocation* frame = lir->getOperand(0);
3851 const ValueOperand out = ToOutValue(lir);
3853 Address flags =
3854 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags());
3855 Address retval =
3856 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue());
3858 masm.moveValue(UndefinedValue(), out);
3860 Label done;
3861 masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL),
3862 &done);
3863 masm.loadValue(retval, out);
3864 masm.bind(&done);
3867 void CodeGenerator::visitStackArgT(LStackArgT* lir) {
3868 const LAllocation* arg = lir->arg();
3869 MIRType argType = lir->type();
3870 uint32_t argslot = lir->argslot();
3871 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3873 Address dest = AddressOfPassedArg(argslot);
3875 if (arg->isFloatReg()) {
3876 masm.boxDouble(ToFloatRegister(arg), dest);
3877 } else if (arg->isRegister()) {
3878 masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest);
3879 } else {
3880 masm.storeValue(arg->toConstant()->toJSValue(), dest);
3884 void CodeGenerator::visitStackArgV(LStackArgV* lir) {
3885 ValueOperand val = ToValue(lir, 0);
3886 uint32_t argslot = lir->argslot();
3887 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3889 masm.storeValue(val, AddressOfPassedArg(argslot));
3892 void CodeGenerator::visitMoveGroup(LMoveGroup* group) {
3893 if (!group->numMoves()) {
3894 return;
3897 MoveResolver& resolver = masm.moveResolver();
3899 for (size_t i = 0; i < group->numMoves(); i++) {
3900 const LMove& move = group->getMove(i);
3902 LAllocation from = move.from();
3903 LAllocation to = move.to();
3904 LDefinition::Type type = move.type();
3906 // No bogus moves.
3907 MOZ_ASSERT(from != to);
3908 MOZ_ASSERT(!from.isConstant());
3909 MoveOp::Type moveType;
3910 switch (type) {
3911 case LDefinition::OBJECT:
3912 case LDefinition::SLOTS:
3913 case LDefinition::WASM_ANYREF:
3914 #ifdef JS_NUNBOX32
3915 case LDefinition::TYPE:
3916 case LDefinition::PAYLOAD:
3917 #else
3918 case LDefinition::BOX:
3919 #endif
3920 case LDefinition::GENERAL:
3921 case LDefinition::STACKRESULTS:
3922 moveType = MoveOp::GENERAL;
3923 break;
3924 case LDefinition::INT32:
3925 moveType = MoveOp::INT32;
3926 break;
3927 case LDefinition::FLOAT32:
3928 moveType = MoveOp::FLOAT32;
3929 break;
3930 case LDefinition::DOUBLE:
3931 moveType = MoveOp::DOUBLE;
3932 break;
3933 case LDefinition::SIMD128:
3934 moveType = MoveOp::SIMD128;
3935 break;
3936 default:
3937 MOZ_CRASH("Unexpected move type");
3940 masm.propagateOOM(
3941 resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType));
3944 masm.propagateOOM(resolver.resolve());
3945 if (masm.oom()) {
3946 return;
3949 MoveEmitter emitter(masm);
3951 #ifdef JS_CODEGEN_X86
3952 if (group->maybeScratchRegister().isGeneralReg()) {
3953 emitter.setScratchRegister(
3954 group->maybeScratchRegister().toGeneralReg()->reg());
3955 } else {
3956 resolver.sortMemoryToMemoryMoves();
3958 #endif
3960 emitter.emit(resolver);
3961 emitter.finish();
3964 void CodeGenerator::visitInteger(LInteger* lir) {
3965 masm.move32(Imm32(lir->i32()), ToRegister(lir->output()));
3968 void CodeGenerator::visitInteger64(LInteger64* lir) {
3969 masm.move64(Imm64(lir->i64()), ToOutRegister64(lir));
3972 void CodeGenerator::visitPointer(LPointer* lir) {
3973 masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output()));
3976 void CodeGenerator::visitNurseryObject(LNurseryObject* lir) {
3977 Register output = ToRegister(lir->output());
3978 uint32_t nurseryIndex = lir->mir()->nurseryIndex();
3980 // Load a pointer to the entry in IonScript's nursery objects list.
3981 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), output);
3982 masm.propagateOOM(ionNurseryObjectLabels_.emplaceBack(label, nurseryIndex));
3984 // Load the JSObject*.
3985 masm.loadPtr(Address(output, 0), output);
3988 void CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir) {
3989 // No-op.
3992 void CodeGenerator::visitDebugEnterGCUnsafeRegion(
3993 LDebugEnterGCUnsafeRegion* lir) {
3994 Register temp = ToRegister(lir->temp0());
3996 masm.loadJSContext(temp);
3998 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
3999 masm.add32(Imm32(1), inUnsafeRegion);
4001 Label ok;
4002 masm.branch32(Assembler::GreaterThan, inUnsafeRegion, Imm32(0), &ok);
4003 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
4004 masm.bind(&ok);
4007 void CodeGenerator::visitDebugLeaveGCUnsafeRegion(
4008 LDebugLeaveGCUnsafeRegion* lir) {
4009 Register temp = ToRegister(lir->temp0());
4011 masm.loadJSContext(temp);
4013 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
4014 masm.add32(Imm32(-1), inUnsafeRegion);
4016 Label ok;
4017 masm.branch32(Assembler::GreaterThanOrEqual, inUnsafeRegion, Imm32(0), &ok);
4018 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
4019 masm.bind(&ok);
4022 void CodeGenerator::visitSlots(LSlots* lir) {
4023 Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots());
4024 masm.loadPtr(slots, ToRegister(lir->output()));
4027 void CodeGenerator::visitLoadDynamicSlotV(LLoadDynamicSlotV* lir) {
4028 ValueOperand dest = ToOutValue(lir);
4029 Register base = ToRegister(lir->input());
4030 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
4032 masm.loadValue(Address(base, offset), dest);
4035 static ConstantOrRegister ToConstantOrRegister(const LAllocation* value,
4036 MIRType valueType) {
4037 if (value->isConstant()) {
4038 return ConstantOrRegister(value->toConstant()->toJSValue());
4040 return TypedOrValueRegister(valueType, ToAnyRegister(value));
4043 void CodeGenerator::visitStoreDynamicSlotT(LStoreDynamicSlotT* lir) {
4044 Register base = ToRegister(lir->slots());
4045 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
4046 Address dest(base, offset);
4048 if (lir->mir()->needsBarrier()) {
4049 emitPreBarrier(dest);
4052 MIRType valueType = lir->mir()->value()->type();
4053 ConstantOrRegister value = ToConstantOrRegister(lir->value(), valueType);
4054 masm.storeUnboxedValue(value, valueType, dest);
4057 void CodeGenerator::visitStoreDynamicSlotV(LStoreDynamicSlotV* lir) {
4058 Register base = ToRegister(lir->slots());
4059 int32_t offset = lir->mir()->slot() * sizeof(Value);
4061 const ValueOperand value = ToValue(lir, LStoreDynamicSlotV::ValueIndex);
4063 if (lir->mir()->needsBarrier()) {
4064 emitPreBarrier(Address(base, offset));
4067 masm.storeValue(value, Address(base, offset));
4070 void CodeGenerator::visitElements(LElements* lir) {
4071 Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements());
4072 masm.loadPtr(elements, ToRegister(lir->output()));
4075 void CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment* lir) {
4076 Address environment(ToRegister(lir->function()),
4077 JSFunction::offsetOfEnvironment());
4078 masm.unboxObject(environment, ToRegister(lir->output()));
4081 void CodeGenerator::visitHomeObject(LHomeObject* lir) {
4082 Register func = ToRegister(lir->function());
4083 Address homeObject(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
4085 masm.assertFunctionIsExtended(func);
4086 #ifdef DEBUG
4087 Label isObject;
4088 masm.branchTestObject(Assembler::Equal, homeObject, &isObject);
4089 masm.assumeUnreachable("[[HomeObject]] must be Object");
4090 masm.bind(&isObject);
4091 #endif
4093 masm.unboxObject(homeObject, ToRegister(lir->output()));
4096 void CodeGenerator::visitHomeObjectSuperBase(LHomeObjectSuperBase* lir) {
4097 Register homeObject = ToRegister(lir->homeObject());
4098 ValueOperand output = ToOutValue(lir);
4099 Register temp = output.scratchReg();
4101 masm.loadObjProto(homeObject, temp);
4103 #ifdef DEBUG
4104 // We won't encounter a lazy proto, because the prototype is guaranteed to
4105 // either be a JSFunction or a PlainObject, and only proxy objects can have a
4106 // lazy proto.
4107 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
4109 Label proxyCheckDone;
4110 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
4111 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperBase");
4112 masm.bind(&proxyCheckDone);
4113 #endif
4115 Label nullProto, done;
4116 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
4118 // Box prototype and return
4119 masm.tagValue(JSVAL_TYPE_OBJECT, temp, output);
4120 masm.jump(&done);
4122 masm.bind(&nullProto);
4123 masm.moveValue(NullValue(), output);
4125 masm.bind(&done);
4128 template <class T>
4129 static T* ToConstantObject(MDefinition* def) {
4130 MOZ_ASSERT(def->isConstant());
4131 return &def->toConstant()->toObject().as<T>();
4134 void CodeGenerator::visitNewLexicalEnvironmentObject(
4135 LNewLexicalEnvironmentObject* lir) {
4136 Register output = ToRegister(lir->output());
4137 Register temp = ToRegister(lir->temp0());
4139 auto* templateObj = ToConstantObject<BlockLexicalEnvironmentObject>(
4140 lir->mir()->templateObj());
4141 auto* scope = &templateObj->scope();
4142 gc::Heap initialHeap = gc::Heap::Default;
4144 using Fn =
4145 BlockLexicalEnvironmentObject* (*)(JSContext*, Handle<LexicalScope*>);
4146 auto* ool =
4147 oolCallVM<Fn, BlockLexicalEnvironmentObject::createWithoutEnclosing>(
4148 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4150 TemplateObject templateObject(templateObj);
4151 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4153 masm.bind(ool->rejoin());
4156 void CodeGenerator::visitNewClassBodyEnvironmentObject(
4157 LNewClassBodyEnvironmentObject* lir) {
4158 Register output = ToRegister(lir->output());
4159 Register temp = ToRegister(lir->temp0());
4161 auto* templateObj = ToConstantObject<ClassBodyLexicalEnvironmentObject>(
4162 lir->mir()->templateObj());
4163 auto* scope = &templateObj->scope();
4164 gc::Heap initialHeap = gc::Heap::Default;
4166 using Fn = ClassBodyLexicalEnvironmentObject* (*)(JSContext*,
4167 Handle<ClassBodyScope*>);
4168 auto* ool =
4169 oolCallVM<Fn, ClassBodyLexicalEnvironmentObject::createWithoutEnclosing>(
4170 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4172 TemplateObject templateObject(templateObj);
4173 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4175 masm.bind(ool->rejoin());
4178 void CodeGenerator::visitNewVarEnvironmentObject(
4179 LNewVarEnvironmentObject* lir) {
4180 Register output = ToRegister(lir->output());
4181 Register temp = ToRegister(lir->temp0());
4183 auto* templateObj =
4184 ToConstantObject<VarEnvironmentObject>(lir->mir()->templateObj());
4185 auto* scope = &templateObj->scope().as<VarScope>();
4186 gc::Heap initialHeap = gc::Heap::Default;
4188 using Fn = VarEnvironmentObject* (*)(JSContext*, Handle<VarScope*>);
4189 auto* ool = oolCallVM<Fn, VarEnvironmentObject::createWithoutEnclosing>(
4190 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4192 TemplateObject templateObject(templateObj);
4193 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4195 masm.bind(ool->rejoin());
4198 void CodeGenerator::visitGuardShape(LGuardShape* guard) {
4199 Register obj = ToRegister(guard->input());
4200 Register temp = ToTempRegisterOrInvalid(guard->temp0());
4201 Label bail;
4202 masm.branchTestObjShape(Assembler::NotEqual, obj, guard->mir()->shape(), temp,
4203 obj, &bail);
4204 bailoutFrom(&bail, guard->snapshot());
4207 void CodeGenerator::visitGuardFuse(LGuardFuse* guard) {
4208 auto fuseIndex = guard->mir()->fuseIndex();
4209 switch (fuseIndex) {
4210 case RealmFuses::FuseIndex::OptimizeGetIteratorFuse:
4211 addOptimizeGetIteratorFuseDependency();
4212 return;
4213 default:
4214 // validateAndRegisterFuseDependencies doesn't have
4215 // handling for this yet, actively check fuse instead.
4216 break;
4219 Register temp = ToRegister(guard->temp0());
4220 Label bail;
4222 // Bake specific fuse address for Ion code, because we won't share this code
4223 // across realms.
4224 GuardFuse* fuse = mirGen().realm->realmFuses().getFuseByIndex(fuseIndex);
4225 masm.loadPtr(AbsoluteAddress(fuse->fuseRef()), temp);
4226 masm.branchPtr(Assembler::NotEqual, temp, ImmPtr(nullptr), &bail);
4228 bailoutFrom(&bail, guard->snapshot());
4231 void CodeGenerator::visitGuardMultipleShapes(LGuardMultipleShapes* guard) {
4232 Register obj = ToRegister(guard->object());
4233 Register shapeList = ToRegister(guard->shapeList());
4234 Register temp = ToRegister(guard->temp0());
4235 Register temp2 = ToRegister(guard->temp1());
4236 Register temp3 = ToRegister(guard->temp2());
4237 Register spectre = ToTempRegisterOrInvalid(guard->temp3());
4239 Label bail;
4240 masm.loadPtr(Address(shapeList, NativeObject::offsetOfElements()), temp);
4241 masm.branchTestObjShapeList(Assembler::NotEqual, obj, temp, temp2, temp3,
4242 spectre, &bail);
4243 bailoutFrom(&bail, guard->snapshot());
4246 void CodeGenerator::visitGuardProto(LGuardProto* guard) {
4247 Register obj = ToRegister(guard->object());
4248 Register expected = ToRegister(guard->expected());
4249 Register temp = ToRegister(guard->temp0());
4251 masm.loadObjProto(obj, temp);
4253 Label bail;
4254 masm.branchPtr(Assembler::NotEqual, temp, expected, &bail);
4255 bailoutFrom(&bail, guard->snapshot());
4258 void CodeGenerator::visitGuardNullProto(LGuardNullProto* guard) {
4259 Register obj = ToRegister(guard->input());
4260 Register temp = ToRegister(guard->temp0());
4262 masm.loadObjProto(obj, temp);
4264 Label bail;
4265 masm.branchTestPtr(Assembler::NonZero, temp, temp, &bail);
4266 bailoutFrom(&bail, guard->snapshot());
4269 void CodeGenerator::visitGuardIsNativeObject(LGuardIsNativeObject* guard) {
4270 Register obj = ToRegister(guard->input());
4271 Register temp = ToRegister(guard->temp0());
4273 Label bail;
4274 masm.branchIfNonNativeObj(obj, temp, &bail);
4275 bailoutFrom(&bail, guard->snapshot());
4278 void CodeGenerator::visitGuardGlobalGeneration(LGuardGlobalGeneration* guard) {
4279 Register temp = ToRegister(guard->temp0());
4280 Label bail;
4282 masm.load32(AbsoluteAddress(guard->mir()->generationAddr()), temp);
4283 masm.branch32(Assembler::NotEqual, temp, Imm32(guard->mir()->expected()),
4284 &bail);
4285 bailoutFrom(&bail, guard->snapshot());
4288 void CodeGenerator::visitGuardIsProxy(LGuardIsProxy* guard) {
4289 Register obj = ToRegister(guard->input());
4290 Register temp = ToRegister(guard->temp0());
4292 Label bail;
4293 masm.branchTestObjectIsProxy(false, obj, temp, &bail);
4294 bailoutFrom(&bail, guard->snapshot());
4297 void CodeGenerator::visitGuardIsNotProxy(LGuardIsNotProxy* guard) {
4298 Register obj = ToRegister(guard->input());
4299 Register temp = ToRegister(guard->temp0());
4301 Label bail;
4302 masm.branchTestObjectIsProxy(true, obj, temp, &bail);
4303 bailoutFrom(&bail, guard->snapshot());
4306 void CodeGenerator::visitGuardIsNotDOMProxy(LGuardIsNotDOMProxy* guard) {
4307 Register proxy = ToRegister(guard->proxy());
4308 Register temp = ToRegister(guard->temp0());
4310 Label bail;
4311 masm.branchTestProxyHandlerFamily(Assembler::Equal, proxy, temp,
4312 GetDOMProxyHandlerFamily(), &bail);
4313 bailoutFrom(&bail, guard->snapshot());
4316 void CodeGenerator::visitProxyGet(LProxyGet* lir) {
4317 Register proxy = ToRegister(lir->proxy());
4318 Register temp = ToRegister(lir->temp0());
4320 pushArg(lir->mir()->id(), temp);
4321 pushArg(proxy);
4323 using Fn = bool (*)(JSContext*, HandleObject, HandleId, MutableHandleValue);
4324 callVM<Fn, ProxyGetProperty>(lir);
4327 void CodeGenerator::visitProxyGetByValue(LProxyGetByValue* lir) {
4328 Register proxy = ToRegister(lir->proxy());
4329 ValueOperand idVal = ToValue(lir, LProxyGetByValue::IdIndex);
4331 pushArg(idVal);
4332 pushArg(proxy);
4334 using Fn =
4335 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
4336 callVM<Fn, ProxyGetPropertyByValue>(lir);
4339 void CodeGenerator::visitProxyHasProp(LProxyHasProp* lir) {
4340 Register proxy = ToRegister(lir->proxy());
4341 ValueOperand idVal = ToValue(lir, LProxyHasProp::IdIndex);
4343 pushArg(idVal);
4344 pushArg(proxy);
4346 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
4347 if (lir->mir()->hasOwn()) {
4348 callVM<Fn, ProxyHasOwn>(lir);
4349 } else {
4350 callVM<Fn, ProxyHas>(lir);
4354 void CodeGenerator::visitProxySet(LProxySet* lir) {
4355 Register proxy = ToRegister(lir->proxy());
4356 ValueOperand rhs = ToValue(lir, LProxySet::RhsIndex);
4357 Register temp = ToRegister(lir->temp0());
4359 pushArg(Imm32(lir->mir()->strict()));
4360 pushArg(rhs);
4361 pushArg(lir->mir()->id(), temp);
4362 pushArg(proxy);
4364 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4365 callVM<Fn, ProxySetProperty>(lir);
4368 void CodeGenerator::visitProxySetByValue(LProxySetByValue* lir) {
4369 Register proxy = ToRegister(lir->proxy());
4370 ValueOperand idVal = ToValue(lir, LProxySetByValue::IdIndex);
4371 ValueOperand rhs = ToValue(lir, LProxySetByValue::RhsIndex);
4373 pushArg(Imm32(lir->mir()->strict()));
4374 pushArg(rhs);
4375 pushArg(idVal);
4376 pushArg(proxy);
4378 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
4379 callVM<Fn, ProxySetPropertyByValue>(lir);
4382 void CodeGenerator::visitCallSetArrayLength(LCallSetArrayLength* lir) {
4383 Register obj = ToRegister(lir->obj());
4384 ValueOperand rhs = ToValue(lir, LCallSetArrayLength::RhsIndex);
4386 pushArg(Imm32(lir->mir()->strict()));
4387 pushArg(rhs);
4388 pushArg(obj);
4390 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
4391 callVM<Fn, jit::SetArrayLength>(lir);
4394 void CodeGenerator::visitMegamorphicLoadSlot(LMegamorphicLoadSlot* lir) {
4395 Register obj = ToRegister(lir->object());
4396 Register temp0 = ToRegister(lir->temp0());
4397 Register temp1 = ToRegister(lir->temp1());
4398 Register temp2 = ToRegister(lir->temp2());
4399 Register temp3 = ToRegister(lir->temp3());
4400 ValueOperand output = ToOutValue(lir);
4402 Label bail, cacheHit;
4403 masm.emitMegamorphicCacheLookup(lir->mir()->name(), obj, temp0, temp1, temp2,
4404 output, &cacheHit);
4406 masm.branchIfNonNativeObj(obj, temp0, &bail);
4408 masm.Push(UndefinedValue());
4409 masm.moveStackPtrTo(temp3);
4411 using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
4412 MegamorphicCache::Entry* cacheEntry, Value* vp);
4413 masm.setupAlignedABICall();
4414 masm.loadJSContext(temp0);
4415 masm.passABIArg(temp0);
4416 masm.passABIArg(obj);
4417 masm.movePropertyKey(lir->mir()->name(), temp1);
4418 masm.passABIArg(temp1);
4419 masm.passABIArg(temp2);
4420 masm.passABIArg(temp3);
4422 masm.callWithABI<Fn, GetNativeDataPropertyPure>();
4424 MOZ_ASSERT(!output.aliases(ReturnReg));
4425 masm.Pop(output);
4427 masm.branchIfFalseBool(ReturnReg, &bail);
4429 masm.bind(&cacheHit);
4430 bailoutFrom(&bail, lir->snapshot());
4433 void CodeGenerator::visitMegamorphicLoadSlotByValue(
4434 LMegamorphicLoadSlotByValue* lir) {
4435 Register obj = ToRegister(lir->object());
4436 ValueOperand idVal = ToValue(lir, LMegamorphicLoadSlotByValue::IdIndex);
4437 Register temp0 = ToRegister(lir->temp0());
4438 Register temp1 = ToRegister(lir->temp1());
4439 Register temp2 = ToRegister(lir->temp2());
4440 ValueOperand output = ToOutValue(lir);
4442 Label bail, cacheHit;
4443 masm.emitMegamorphicCacheLookupByValue(idVal, obj, temp0, temp1, temp2,
4444 output, &cacheHit);
4446 masm.branchIfNonNativeObj(obj, temp0, &bail);
4448 // idVal will be in vp[0], result will be stored in vp[1].
4449 masm.reserveStack(sizeof(Value));
4450 masm.Push(idVal);
4451 masm.moveStackPtrTo(temp0);
4453 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4454 MegamorphicCache::Entry* cacheEntry, Value* vp);
4455 masm.setupAlignedABICall();
4456 masm.loadJSContext(temp1);
4457 masm.passABIArg(temp1);
4458 masm.passABIArg(obj);
4459 masm.passABIArg(temp2);
4460 masm.passABIArg(temp0);
4461 masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
4463 MOZ_ASSERT(!idVal.aliases(temp0));
4464 masm.storeCallPointerResult(temp0);
4465 masm.Pop(idVal);
4467 uint32_t framePushed = masm.framePushed();
4468 Label ok;
4469 masm.branchIfTrueBool(temp0, &ok);
4470 masm.freeStack(sizeof(Value)); // Discard result Value.
4471 masm.jump(&bail);
4473 masm.bind(&ok);
4474 masm.setFramePushed(framePushed);
4475 masm.Pop(output);
4477 masm.bind(&cacheHit);
4478 bailoutFrom(&bail, lir->snapshot());
4481 void CodeGenerator::visitMegamorphicStoreSlot(LMegamorphicStoreSlot* lir) {
4482 Register obj = ToRegister(lir->object());
4483 ValueOperand value = ToValue(lir, LMegamorphicStoreSlot::RhsIndex);
4485 Register temp0 = ToRegister(lir->temp0());
4486 #ifndef JS_CODEGEN_X86
4487 Register temp1 = ToRegister(lir->temp1());
4488 Register temp2 = ToRegister(lir->temp2());
4489 #endif
4491 Label cacheHit, done;
4492 #ifdef JS_CODEGEN_X86
4493 masm.emitMegamorphicCachedSetSlot(
4494 lir->mir()->name(), obj, temp0, value, &cacheHit,
4495 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4496 EmitPreBarrier(masm, addr, mirType);
4498 #else
4499 masm.emitMegamorphicCachedSetSlot(
4500 lir->mir()->name(), obj, temp0, temp1, temp2, value, &cacheHit,
4501 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4502 EmitPreBarrier(masm, addr, mirType);
4504 #endif
4506 pushArg(Imm32(lir->mir()->strict()));
4507 pushArg(value);
4508 pushArg(lir->mir()->name(), temp0);
4509 pushArg(obj);
4511 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4512 callVM<Fn, SetPropertyMegamorphic<true>>(lir);
4514 masm.jump(&done);
4515 masm.bind(&cacheHit);
4517 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
4518 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
4520 saveVolatile(temp0);
4521 emitPostWriteBarrier(obj);
4522 restoreVolatile(temp0);
4524 masm.bind(&done);
4527 void CodeGenerator::visitMegamorphicHasProp(LMegamorphicHasProp* lir) {
4528 Register obj = ToRegister(lir->object());
4529 ValueOperand idVal = ToValue(lir, LMegamorphicHasProp::IdIndex);
4530 Register temp0 = ToRegister(lir->temp0());
4531 Register temp1 = ToRegister(lir->temp1());
4532 Register temp2 = ToRegister(lir->temp2());
4533 Register output = ToRegister(lir->output());
4535 Label bail, cacheHit;
4536 masm.emitMegamorphicCacheLookupExists(idVal, obj, temp0, temp1, temp2, output,
4537 &cacheHit, lir->mir()->hasOwn());
4539 masm.branchIfNonNativeObj(obj, temp0, &bail);
4541 // idVal will be in vp[0], result will be stored in vp[1].
4542 masm.reserveStack(sizeof(Value));
4543 masm.Push(idVal);
4544 masm.moveStackPtrTo(temp0);
4546 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4547 MegamorphicCache::Entry* cacheEntry, Value* vp);
4548 masm.setupAlignedABICall();
4549 masm.loadJSContext(temp1);
4550 masm.passABIArg(temp1);
4551 masm.passABIArg(obj);
4552 masm.passABIArg(temp2);
4553 masm.passABIArg(temp0);
4554 if (lir->mir()->hasOwn()) {
4555 masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
4556 } else {
4557 masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
4560 MOZ_ASSERT(!idVal.aliases(temp0));
4561 masm.storeCallPointerResult(temp0);
4562 masm.Pop(idVal);
4564 uint32_t framePushed = masm.framePushed();
4565 Label ok;
4566 masm.branchIfTrueBool(temp0, &ok);
4567 masm.freeStack(sizeof(Value)); // Discard result Value.
4568 masm.jump(&bail);
4570 masm.bind(&ok);
4571 masm.setFramePushed(framePushed);
4572 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
4573 masm.freeStack(sizeof(Value));
4574 masm.bind(&cacheHit);
4576 bailoutFrom(&bail, lir->snapshot());
4579 void CodeGenerator::visitSmallObjectVariableKeyHasProp(
4580 LSmallObjectVariableKeyHasProp* lir) {
4581 Register id = ToRegister(lir->id());
4582 Register output = ToRegister(lir->output());
4584 #ifdef DEBUG
4585 Label isAtom;
4586 masm.branchTest32(Assembler::NonZero, Address(id, JSString::offsetOfFlags()),
4587 Imm32(JSString::ATOM_BIT), &isAtom);
4588 masm.assumeUnreachable("Expected atom input");
4589 masm.bind(&isAtom);
4590 #endif
4592 SharedShape* shape = &lir->mir()->shape()->asShared();
4594 Label done, success;
4595 for (SharedShapePropertyIter<NoGC> iter(shape); !iter.done(); iter++) {
4596 masm.branchPtr(Assembler::Equal, id, ImmGCPtr(iter->key().toAtom()),
4597 &success);
4599 masm.move32(Imm32(0), output);
4600 masm.jump(&done);
4601 masm.bind(&success);
4602 masm.move32(Imm32(1), output);
4603 masm.bind(&done);
4606 void CodeGenerator::visitGuardIsNotArrayBufferMaybeShared(
4607 LGuardIsNotArrayBufferMaybeShared* guard) {
4608 Register obj = ToRegister(guard->input());
4609 Register temp = ToRegister(guard->temp0());
4611 Label bail;
4612 masm.loadObjClassUnsafe(obj, temp);
4613 masm.branchPtr(Assembler::Equal, temp,
4614 ImmPtr(&FixedLengthArrayBufferObject::class_), &bail);
4615 masm.branchPtr(Assembler::Equal, temp,
4616 ImmPtr(&FixedLengthSharedArrayBufferObject::class_), &bail);
4617 masm.branchPtr(Assembler::Equal, temp,
4618 ImmPtr(&ResizableArrayBufferObject::class_), &bail);
4619 masm.branchPtr(Assembler::Equal, temp,
4620 ImmPtr(&GrowableSharedArrayBufferObject::class_), &bail);
4621 bailoutFrom(&bail, guard->snapshot());
4624 void CodeGenerator::visitGuardIsTypedArray(LGuardIsTypedArray* guard) {
4625 Register obj = ToRegister(guard->input());
4626 Register temp = ToRegister(guard->temp0());
4628 Label bail;
4629 masm.loadObjClassUnsafe(obj, temp);
4630 masm.branchIfClassIsNotTypedArray(temp, &bail);
4631 bailoutFrom(&bail, guard->snapshot());
4634 void CodeGenerator::visitGuardIsFixedLengthTypedArray(
4635 LGuardIsFixedLengthTypedArray* guard) {
4636 Register obj = ToRegister(guard->input());
4637 Register temp = ToRegister(guard->temp0());
4639 Label bail;
4640 masm.loadObjClassUnsafe(obj, temp);
4641 masm.branchIfClassIsNotFixedLengthTypedArray(temp, &bail);
4642 bailoutFrom(&bail, guard->snapshot());
4645 void CodeGenerator::visitGuardIsResizableTypedArray(
4646 LGuardIsResizableTypedArray* guard) {
4647 Register obj = ToRegister(guard->input());
4648 Register temp = ToRegister(guard->temp0());
4650 Label bail;
4651 masm.loadObjClassUnsafe(obj, temp);
4652 masm.branchIfClassIsNotResizableTypedArray(temp, &bail);
4653 bailoutFrom(&bail, guard->snapshot());
4656 void CodeGenerator::visitGuardHasProxyHandler(LGuardHasProxyHandler* guard) {
4657 Register obj = ToRegister(guard->input());
4659 Label bail;
4661 Address handlerAddr(obj, ProxyObject::offsetOfHandler());
4662 masm.branchPtr(Assembler::NotEqual, handlerAddr,
4663 ImmPtr(guard->mir()->handler()), &bail);
4665 bailoutFrom(&bail, guard->snapshot());
4668 void CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard) {
4669 Register input = ToRegister(guard->input());
4670 Register expected = ToRegister(guard->expected());
4672 Assembler::Condition cond =
4673 guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
4674 bailoutCmpPtr(cond, input, expected, guard->snapshot());
4677 void CodeGenerator::visitGuardSpecificFunction(LGuardSpecificFunction* guard) {
4678 Register input = ToRegister(guard->input());
4679 Register expected = ToRegister(guard->expected());
4681 bailoutCmpPtr(Assembler::NotEqual, input, expected, guard->snapshot());
4684 void CodeGenerator::visitGuardSpecificAtom(LGuardSpecificAtom* guard) {
4685 Register str = ToRegister(guard->str());
4686 Register scratch = ToRegister(guard->temp0());
4688 LiveRegisterSet volatileRegs = liveVolatileRegs(guard);
4689 volatileRegs.takeUnchecked(scratch);
4691 Label bail;
4692 masm.guardSpecificAtom(str, guard->mir()->atom(), scratch, volatileRegs,
4693 &bail);
4694 bailoutFrom(&bail, guard->snapshot());
4697 void CodeGenerator::visitGuardSpecificSymbol(LGuardSpecificSymbol* guard) {
4698 Register symbol = ToRegister(guard->symbol());
4700 bailoutCmpPtr(Assembler::NotEqual, symbol, ImmGCPtr(guard->mir()->expected()),
4701 guard->snapshot());
4704 void CodeGenerator::visitGuardSpecificInt32(LGuardSpecificInt32* guard) {
4705 Register num = ToRegister(guard->num());
4707 bailoutCmp32(Assembler::NotEqual, num, Imm32(guard->mir()->expected()),
4708 guard->snapshot());
4711 void CodeGenerator::visitGuardStringToIndex(LGuardStringToIndex* lir) {
4712 Register str = ToRegister(lir->string());
4713 Register output = ToRegister(lir->output());
4715 Label vmCall, done;
4716 masm.loadStringIndexValue(str, output, &vmCall);
4717 masm.jump(&done);
4720 masm.bind(&vmCall);
4722 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4723 volatileRegs.takeUnchecked(output);
4724 masm.PushRegsInMask(volatileRegs);
4726 using Fn = int32_t (*)(JSString* str);
4727 masm.setupAlignedABICall();
4728 masm.passABIArg(str);
4729 masm.callWithABI<Fn, GetIndexFromString>();
4730 masm.storeCallInt32Result(output);
4732 masm.PopRegsInMask(volatileRegs);
4734 // GetIndexFromString returns a negative value on failure.
4735 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
4738 masm.bind(&done);
4741 void CodeGenerator::visitGuardStringToInt32(LGuardStringToInt32* lir) {
4742 Register str = ToRegister(lir->string());
4743 Register output = ToRegister(lir->output());
4744 Register temp = ToRegister(lir->temp0());
4746 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4748 Label bail;
4749 masm.guardStringToInt32(str, output, temp, volatileRegs, &bail);
4750 bailoutFrom(&bail, lir->snapshot());
4753 void CodeGenerator::visitGuardStringToDouble(LGuardStringToDouble* lir) {
4754 Register str = ToRegister(lir->string());
4755 FloatRegister output = ToFloatRegister(lir->output());
4756 Register temp0 = ToRegister(lir->temp0());
4757 Register temp1 = ToRegister(lir->temp1());
4759 Label vmCall, done;
4760 // Use indexed value as fast path if possible.
4761 masm.loadStringIndexValue(str, temp0, &vmCall);
4762 masm.convertInt32ToDouble(temp0, output);
4763 masm.jump(&done);
4765 masm.bind(&vmCall);
4767 // Reserve stack for holding the result value of the call.
4768 masm.reserveStack(sizeof(double));
4769 masm.moveStackPtrTo(temp0);
4771 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4772 volatileRegs.takeUnchecked(temp0);
4773 volatileRegs.takeUnchecked(temp1);
4774 masm.PushRegsInMask(volatileRegs);
4776 using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
4777 masm.setupAlignedABICall();
4778 masm.loadJSContext(temp1);
4779 masm.passABIArg(temp1);
4780 masm.passABIArg(str);
4781 masm.passABIArg(temp0);
4782 masm.callWithABI<Fn, StringToNumberPure>();
4783 masm.storeCallPointerResult(temp0);
4785 masm.PopRegsInMask(volatileRegs);
4787 Label ok;
4788 masm.branchIfTrueBool(temp0, &ok);
4790 // OOM path, recovered by StringToNumberPure.
4792 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
4793 // flow-insensitively, and using it here would confuse the stack height
4794 // tracking.
4795 masm.addToStackPtr(Imm32(sizeof(double)));
4796 bailout(lir->snapshot());
4798 masm.bind(&ok);
4799 masm.Pop(output);
4801 masm.bind(&done);
4804 void CodeGenerator::visitGuardNoDenseElements(LGuardNoDenseElements* guard) {
4805 Register obj = ToRegister(guard->input());
4806 Register temp = ToRegister(guard->temp0());
4808 // Load obj->elements.
4809 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
4811 // Make sure there are no dense elements.
4812 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
4813 bailoutCmp32(Assembler::NotEqual, initLength, Imm32(0), guard->snapshot());
4816 void CodeGenerator::visitBooleanToInt64(LBooleanToInt64* lir) {
4817 Register input = ToRegister(lir->input());
4818 Register64 output = ToOutRegister64(lir);
4820 masm.move32To64ZeroExtend(input, output);
4823 void CodeGenerator::emitStringToInt64(LInstruction* lir, Register input,
4824 Register64 output) {
4825 Register temp = output.scratchReg();
4827 saveLive(lir);
4829 masm.reserveStack(sizeof(uint64_t));
4830 masm.moveStackPtrTo(temp);
4831 pushArg(temp);
4832 pushArg(input);
4834 using Fn = bool (*)(JSContext*, HandleString, uint64_t*);
4835 callVM<Fn, DoStringToInt64>(lir);
4837 masm.load64(Address(masm.getStackPointer(), 0), output);
4838 masm.freeStack(sizeof(uint64_t));
4840 restoreLiveIgnore(lir, StoreValueTo(output).clobbered());
4843 void CodeGenerator::visitStringToInt64(LStringToInt64* lir) {
4844 Register input = ToRegister(lir->input());
4845 Register64 output = ToOutRegister64(lir);
4847 emitStringToInt64(lir, input, output);
4850 void CodeGenerator::visitValueToInt64(LValueToInt64* lir) {
4851 ValueOperand input = ToValue(lir, LValueToInt64::InputIndex);
4852 Register temp = ToRegister(lir->temp0());
4853 Register64 output = ToOutRegister64(lir);
4855 int checks = 3;
4857 Label fail, done;
4858 // Jump to fail if this is the last check and we fail it,
4859 // otherwise to the next test.
4860 auto emitTestAndUnbox = [&](auto testAndUnbox) {
4861 MOZ_ASSERT(checks > 0);
4863 checks--;
4864 Label notType;
4865 Label* target = checks ? &notType : &fail;
4867 testAndUnbox(target);
4869 if (checks) {
4870 masm.jump(&done);
4871 masm.bind(&notType);
4875 Register tag = masm.extractTag(input, temp);
4877 // BigInt.
4878 emitTestAndUnbox([&](Label* target) {
4879 masm.branchTestBigInt(Assembler::NotEqual, tag, target);
4880 masm.unboxBigInt(input, temp);
4881 masm.loadBigInt64(temp, output);
4884 // Boolean
4885 emitTestAndUnbox([&](Label* target) {
4886 masm.branchTestBoolean(Assembler::NotEqual, tag, target);
4887 masm.unboxBoolean(input, temp);
4888 masm.move32To64ZeroExtend(temp, output);
4891 // String
4892 emitTestAndUnbox([&](Label* target) {
4893 masm.branchTestString(Assembler::NotEqual, tag, target);
4894 masm.unboxString(input, temp);
4895 emitStringToInt64(lir, temp, output);
4898 MOZ_ASSERT(checks == 0);
4900 bailoutFrom(&fail, lir->snapshot());
4901 masm.bind(&done);
4904 void CodeGenerator::visitTruncateBigIntToInt64(LTruncateBigIntToInt64* lir) {
4905 Register operand = ToRegister(lir->input());
4906 Register64 output = ToOutRegister64(lir);
4908 masm.loadBigInt64(operand, output);
4911 OutOfLineCode* CodeGenerator::createBigIntOutOfLine(LInstruction* lir,
4912 Scalar::Type type,
4913 Register64 input,
4914 Register output) {
4915 #if JS_BITS_PER_WORD == 32
4916 using Fn = BigInt* (*)(JSContext*, uint32_t, uint32_t);
4917 auto args = ArgList(input.low, input.high);
4918 #else
4919 using Fn = BigInt* (*)(JSContext*, uint64_t);
4920 auto args = ArgList(input);
4921 #endif
4923 if (type == Scalar::BigInt64) {
4924 return oolCallVM<Fn, jit::CreateBigIntFromInt64>(lir, args,
4925 StoreRegisterTo(output));
4927 MOZ_ASSERT(type == Scalar::BigUint64);
4928 return oolCallVM<Fn, jit::CreateBigIntFromUint64>(lir, args,
4929 StoreRegisterTo(output));
4932 void CodeGenerator::emitCreateBigInt(LInstruction* lir, Scalar::Type type,
4933 Register64 input, Register output,
4934 Register maybeTemp) {
4935 OutOfLineCode* ool = createBigIntOutOfLine(lir, type, input, output);
4937 if (maybeTemp != InvalidReg) {
4938 masm.newGCBigInt(output, maybeTemp, initialBigIntHeap(), ool->entry());
4939 } else {
4940 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
4941 regs.take(input);
4942 regs.take(output);
4944 Register temp = regs.takeAny();
4946 masm.push(temp);
4948 Label fail, ok;
4949 masm.newGCBigInt(output, temp, initialBigIntHeap(), &fail);
4950 masm.pop(temp);
4951 masm.jump(&ok);
4952 masm.bind(&fail);
4953 masm.pop(temp);
4954 masm.jump(ool->entry());
4955 masm.bind(&ok);
4957 masm.initializeBigInt64(type, output, input);
4958 masm.bind(ool->rejoin());
4961 void CodeGenerator::visitInt64ToBigInt(LInt64ToBigInt* lir) {
4962 Register64 input = ToRegister64(lir->input());
4963 Register temp = ToRegister(lir->temp0());
4964 Register output = ToRegister(lir->output());
4966 emitCreateBigInt(lir, Scalar::BigInt64, input, output, temp);
4969 void CodeGenerator::visitGuardValue(LGuardValue* lir) {
4970 ValueOperand input = ToValue(lir, LGuardValue::InputIndex);
4971 Value expected = lir->mir()->expected();
4972 Label bail;
4973 masm.branchTestValue(Assembler::NotEqual, input, expected, &bail);
4974 bailoutFrom(&bail, lir->snapshot());
4977 void CodeGenerator::visitGuardNullOrUndefined(LGuardNullOrUndefined* lir) {
4978 ValueOperand input = ToValue(lir, LGuardNullOrUndefined::InputIndex);
4980 ScratchTagScope tag(masm, input);
4981 masm.splitTagForTest(input, tag);
4983 Label done;
4984 masm.branchTestNull(Assembler::Equal, tag, &done);
4986 Label bail;
4987 masm.branchTestUndefined(Assembler::NotEqual, tag, &bail);
4988 bailoutFrom(&bail, lir->snapshot());
4990 masm.bind(&done);
4993 void CodeGenerator::visitGuardIsNotObject(LGuardIsNotObject* lir) {
4994 ValueOperand input = ToValue(lir, LGuardIsNotObject::InputIndex);
4996 Label bail;
4997 masm.branchTestObject(Assembler::Equal, input, &bail);
4998 bailoutFrom(&bail, lir->snapshot());
5001 void CodeGenerator::visitGuardFunctionFlags(LGuardFunctionFlags* lir) {
5002 Register function = ToRegister(lir->function());
5004 Label bail;
5005 if (uint16_t flags = lir->mir()->expectedFlags()) {
5006 masm.branchTestFunctionFlags(function, flags, Assembler::Zero, &bail);
5008 if (uint16_t flags = lir->mir()->unexpectedFlags()) {
5009 masm.branchTestFunctionFlags(function, flags, Assembler::NonZero, &bail);
5011 bailoutFrom(&bail, lir->snapshot());
5014 void CodeGenerator::visitGuardFunctionIsNonBuiltinCtor(
5015 LGuardFunctionIsNonBuiltinCtor* lir) {
5016 Register function = ToRegister(lir->function());
5017 Register temp = ToRegister(lir->temp0());
5019 Label bail;
5020 masm.branchIfNotFunctionIsNonBuiltinCtor(function, temp, &bail);
5021 bailoutFrom(&bail, lir->snapshot());
5024 void CodeGenerator::visitGuardFunctionKind(LGuardFunctionKind* lir) {
5025 Register function = ToRegister(lir->function());
5026 Register temp = ToRegister(lir->temp0());
5028 Assembler::Condition cond =
5029 lir->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
5031 Label bail;
5032 masm.branchFunctionKind(cond, lir->mir()->expected(), function, temp, &bail);
5033 bailoutFrom(&bail, lir->snapshot());
5036 void CodeGenerator::visitGuardFunctionScript(LGuardFunctionScript* lir) {
5037 Register function = ToRegister(lir->function());
5039 Address scriptAddr(function, JSFunction::offsetOfJitInfoOrScript());
5040 bailoutCmpPtr(Assembler::NotEqual, scriptAddr,
5041 ImmGCPtr(lir->mir()->expected()), lir->snapshot());
5044 // Out-of-line path to update the store buffer.
5045 class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase<CodeGenerator> {
5046 LInstruction* lir_;
5047 const LAllocation* object_;
5049 public:
5050 OutOfLineCallPostWriteBarrier(LInstruction* lir, const LAllocation* object)
5051 : lir_(lir), object_(object) {}
5053 void accept(CodeGenerator* codegen) override {
5054 codegen->visitOutOfLineCallPostWriteBarrier(this);
5057 LInstruction* lir() const { return lir_; }
5058 const LAllocation* object() const { return object_; }
5061 static void EmitStoreBufferCheckForConstant(MacroAssembler& masm,
5062 const gc::TenuredCell* cell,
5063 AllocatableGeneralRegisterSet& regs,
5064 Label* exit, Label* callVM) {
5065 Register temp = regs.takeAny();
5067 gc::Arena* arena = cell->arena();
5069 Register cells = temp;
5070 masm.loadPtr(AbsoluteAddress(&arena->bufferedCells()), cells);
5072 size_t index = gc::ArenaCellSet::getCellIndex(cell);
5073 size_t word;
5074 uint32_t mask;
5075 gc::ArenaCellSet::getWordIndexAndMask(index, &word, &mask);
5076 size_t offset = gc::ArenaCellSet::offsetOfBits() + word * sizeof(uint32_t);
5078 masm.branchTest32(Assembler::NonZero, Address(cells, offset), Imm32(mask),
5079 exit);
5081 // Check whether this is the sentinel set and if so call the VM to allocate
5082 // one for this arena.
5083 masm.branchPtr(Assembler::Equal,
5084 Address(cells, gc::ArenaCellSet::offsetOfArena()),
5085 ImmPtr(nullptr), callVM);
5087 // Add the cell to the set.
5088 masm.or32(Imm32(mask), Address(cells, offset));
5089 masm.jump(exit);
5091 regs.add(temp);
5094 static void EmitPostWriteBarrier(MacroAssembler& masm, CompileRuntime* runtime,
5095 Register objreg, JSObject* maybeConstant,
5096 bool isGlobal,
5097 AllocatableGeneralRegisterSet& regs) {
5098 MOZ_ASSERT_IF(isGlobal, maybeConstant);
5100 Label callVM;
5101 Label exit;
5103 Register temp = regs.takeAny();
5105 // We already have a fast path to check whether a global is in the store
5106 // buffer.
5107 if (!isGlobal) {
5108 if (maybeConstant) {
5109 // Check store buffer bitmap directly for known object.
5110 EmitStoreBufferCheckForConstant(masm, &maybeConstant->asTenured(), regs,
5111 &exit, &callVM);
5112 } else {
5113 // Check one element cache to avoid VM call.
5114 masm.branchPtr(Assembler::Equal,
5115 AbsoluteAddress(runtime->addressOfLastBufferedWholeCell()),
5116 objreg, &exit);
5120 // Call into the VM to barrier the write.
5121 masm.bind(&callVM);
5123 Register runtimereg = temp;
5124 masm.mov(ImmPtr(runtime), runtimereg);
5126 masm.setupAlignedABICall();
5127 masm.passABIArg(runtimereg);
5128 masm.passABIArg(objreg);
5129 if (isGlobal) {
5130 using Fn = void (*)(JSRuntime* rt, GlobalObject* obj);
5131 masm.callWithABI<Fn, PostGlobalWriteBarrier>();
5132 } else {
5133 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* obj);
5134 masm.callWithABI<Fn, PostWriteBarrier>();
5137 masm.bind(&exit);
5140 void CodeGenerator::emitPostWriteBarrier(const LAllocation* obj) {
5141 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5143 Register objreg;
5144 JSObject* object = nullptr;
5145 bool isGlobal = false;
5146 if (obj->isConstant()) {
5147 object = &obj->toConstant()->toObject();
5148 isGlobal = isGlobalObject(object);
5149 objreg = regs.takeAny();
5150 masm.movePtr(ImmGCPtr(object), objreg);
5151 } else {
5152 objreg = ToRegister(obj);
5153 regs.takeUnchecked(objreg);
5156 EmitPostWriteBarrier(masm, gen->runtime, objreg, object, isGlobal, regs);
5159 // Returns true if `def` might be allocated in the nursery.
5160 static bool ValueNeedsPostBarrier(MDefinition* def) {
5161 if (def->isBox()) {
5162 def = def->toBox()->input();
5164 if (def->type() == MIRType::Value) {
5165 return true;
5167 return NeedsPostBarrier(def->type());
5170 class OutOfLineElementPostWriteBarrier
5171 : public OutOfLineCodeBase<CodeGenerator> {
5172 LiveRegisterSet liveVolatileRegs_;
5173 const LAllocation* index_;
5174 int32_t indexDiff_;
5175 Register obj_;
5176 Register scratch_;
5178 public:
5179 OutOfLineElementPostWriteBarrier(const LiveRegisterSet& liveVolatileRegs,
5180 Register obj, const LAllocation* index,
5181 Register scratch, int32_t indexDiff)
5182 : liveVolatileRegs_(liveVolatileRegs),
5183 index_(index),
5184 indexDiff_(indexDiff),
5185 obj_(obj),
5186 scratch_(scratch) {}
5188 void accept(CodeGenerator* codegen) override {
5189 codegen->visitOutOfLineElementPostWriteBarrier(this);
5192 const LiveRegisterSet& liveVolatileRegs() const { return liveVolatileRegs_; }
5193 const LAllocation* index() const { return index_; }
5194 int32_t indexDiff() const { return indexDiff_; }
5196 Register object() const { return obj_; }
5197 Register scratch() const { return scratch_; }
5200 void CodeGenerator::emitElementPostWriteBarrier(
5201 MInstruction* mir, const LiveRegisterSet& liveVolatileRegs, Register obj,
5202 const LAllocation* index, Register scratch, const ConstantOrRegister& val,
5203 int32_t indexDiff) {
5204 if (val.constant()) {
5205 MOZ_ASSERT_IF(val.value().isGCThing(),
5206 !IsInsideNursery(val.value().toGCThing()));
5207 return;
5210 TypedOrValueRegister reg = val.reg();
5211 if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
5212 return;
5215 auto* ool = new (alloc()) OutOfLineElementPostWriteBarrier(
5216 liveVolatileRegs, obj, index, scratch, indexDiff);
5217 addOutOfLineCode(ool, mir);
5219 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, ool->rejoin());
5221 if (reg.hasValue()) {
5222 masm.branchValueIsNurseryCell(Assembler::Equal, reg.valueReg(), scratch,
5223 ool->entry());
5224 } else {
5225 masm.branchPtrInNurseryChunk(Assembler::Equal, reg.typedReg().gpr(),
5226 scratch, ool->entry());
5229 masm.bind(ool->rejoin());
5232 void CodeGenerator::visitOutOfLineElementPostWriteBarrier(
5233 OutOfLineElementPostWriteBarrier* ool) {
5234 Register obj = ool->object();
5235 Register scratch = ool->scratch();
5236 const LAllocation* index = ool->index();
5237 int32_t indexDiff = ool->indexDiff();
5239 masm.PushRegsInMask(ool->liveVolatileRegs());
5241 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5242 regs.takeUnchecked(obj);
5243 regs.takeUnchecked(scratch);
5245 Register indexReg;
5246 if (index->isConstant()) {
5247 indexReg = regs.takeAny();
5248 masm.move32(Imm32(ToInt32(index) + indexDiff), indexReg);
5249 } else {
5250 indexReg = ToRegister(index);
5251 regs.takeUnchecked(indexReg);
5252 if (indexDiff != 0) {
5253 masm.add32(Imm32(indexDiff), indexReg);
5257 masm.setupUnalignedABICall(scratch);
5258 masm.movePtr(ImmPtr(gen->runtime), scratch);
5259 masm.passABIArg(scratch);
5260 masm.passABIArg(obj);
5261 masm.passABIArg(indexReg);
5262 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5263 masm.callWithABI<Fn, PostWriteElementBarrier>();
5265 // We don't need a sub32 here because indexReg must be in liveVolatileRegs
5266 // if indexDiff is not zero, so it will be restored below.
5267 MOZ_ASSERT_IF(indexDiff != 0, ool->liveVolatileRegs().has(indexReg));
5269 masm.PopRegsInMask(ool->liveVolatileRegs());
5271 masm.jump(ool->rejoin());
5274 void CodeGenerator::emitPostWriteBarrier(Register objreg) {
5275 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5276 regs.takeUnchecked(objreg);
5277 EmitPostWriteBarrier(masm, gen->runtime, objreg, nullptr, false, regs);
5280 void CodeGenerator::visitOutOfLineCallPostWriteBarrier(
5281 OutOfLineCallPostWriteBarrier* ool) {
5282 saveLiveVolatile(ool->lir());
5283 const LAllocation* obj = ool->object();
5284 emitPostWriteBarrier(obj);
5285 restoreLiveVolatile(ool->lir());
5287 masm.jump(ool->rejoin());
5290 void CodeGenerator::maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal,
5291 OutOfLineCode* ool) {
5292 // Check whether an object is a global that we have already barriered before
5293 // calling into the VM.
5295 // We only check for the script's global, not other globals within the same
5296 // compartment, because we bake in a pointer to realm->globalWriteBarriered
5297 // and doing that would be invalid for other realms because they could be
5298 // collected before the Ion code is discarded.
5300 if (!maybeGlobal->isConstant()) {
5301 return;
5304 JSObject* obj = &maybeGlobal->toConstant()->toObject();
5305 if (gen->realm->maybeGlobal() != obj) {
5306 return;
5309 const uint32_t* addr = gen->realm->addressOfGlobalWriteBarriered();
5310 masm.branch32(Assembler::NotEqual, AbsoluteAddress(addr), Imm32(0),
5311 ool->rejoin());
5314 template <class LPostBarrierType, MIRType nurseryType>
5315 void CodeGenerator::visitPostWriteBarrierCommon(LPostBarrierType* lir,
5316 OutOfLineCode* ool) {
5317 static_assert(NeedsPostBarrier(nurseryType));
5319 addOutOfLineCode(ool, lir->mir());
5321 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5323 if (lir->object()->isConstant()) {
5324 // Constant nursery objects cannot appear here, see
5325 // LIRGenerator::visitPostWriteElementBarrier.
5326 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5327 } else {
5328 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5329 temp, ool->rejoin());
5332 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5334 Register value = ToRegister(lir->value());
5335 if constexpr (nurseryType == MIRType::Object) {
5336 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::Object);
5337 } else if constexpr (nurseryType == MIRType::String) {
5338 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::String);
5339 } else {
5340 static_assert(nurseryType == MIRType::BigInt);
5341 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::BigInt);
5343 masm.branchPtrInNurseryChunk(Assembler::Equal, value, temp, ool->entry());
5345 masm.bind(ool->rejoin());
5348 template <class LPostBarrierType>
5349 void CodeGenerator::visitPostWriteBarrierCommonV(LPostBarrierType* lir,
5350 OutOfLineCode* ool) {
5351 addOutOfLineCode(ool, lir->mir());
5353 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5355 if (lir->object()->isConstant()) {
5356 // Constant nursery objects cannot appear here, see
5357 // LIRGenerator::visitPostWriteElementBarrier.
5358 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5359 } else {
5360 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5361 temp, ool->rejoin());
5364 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5366 ValueOperand value = ToValue(lir, LPostBarrierType::ValueIndex);
5367 masm.branchValueIsNurseryCell(Assembler::Equal, value, temp, ool->entry());
5369 masm.bind(ool->rejoin());
5372 void CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO* lir) {
5373 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5374 visitPostWriteBarrierCommon<LPostWriteBarrierO, MIRType::Object>(lir, ool);
5377 void CodeGenerator::visitPostWriteBarrierS(LPostWriteBarrierS* lir) {
5378 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5379 visitPostWriteBarrierCommon<LPostWriteBarrierS, MIRType::String>(lir, ool);
5382 void CodeGenerator::visitPostWriteBarrierBI(LPostWriteBarrierBI* lir) {
5383 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5384 visitPostWriteBarrierCommon<LPostWriteBarrierBI, MIRType::BigInt>(lir, ool);
5387 void CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV* lir) {
5388 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5389 visitPostWriteBarrierCommonV(lir, ool);
5392 // Out-of-line path to update the store buffer.
5393 class OutOfLineCallPostWriteElementBarrier
5394 : public OutOfLineCodeBase<CodeGenerator> {
5395 LInstruction* lir_;
5396 const LAllocation* object_;
5397 const LAllocation* index_;
5399 public:
5400 OutOfLineCallPostWriteElementBarrier(LInstruction* lir,
5401 const LAllocation* object,
5402 const LAllocation* index)
5403 : lir_(lir), object_(object), index_(index) {}
5405 void accept(CodeGenerator* codegen) override {
5406 codegen->visitOutOfLineCallPostWriteElementBarrier(this);
5409 LInstruction* lir() const { return lir_; }
5411 const LAllocation* object() const { return object_; }
5413 const LAllocation* index() const { return index_; }
5416 void CodeGenerator::visitOutOfLineCallPostWriteElementBarrier(
5417 OutOfLineCallPostWriteElementBarrier* ool) {
5418 saveLiveVolatile(ool->lir());
5420 const LAllocation* obj = ool->object();
5421 const LAllocation* index = ool->index();
5423 Register objreg = obj->isConstant() ? InvalidReg : ToRegister(obj);
5424 Register indexreg = ToRegister(index);
5426 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5427 regs.takeUnchecked(indexreg);
5429 if (obj->isConstant()) {
5430 objreg = regs.takeAny();
5431 masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg);
5432 } else {
5433 regs.takeUnchecked(objreg);
5436 Register runtimereg = regs.takeAny();
5437 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5438 masm.setupAlignedABICall();
5439 masm.mov(ImmPtr(gen->runtime), runtimereg);
5440 masm.passABIArg(runtimereg);
5441 masm.passABIArg(objreg);
5442 masm.passABIArg(indexreg);
5443 masm.callWithABI<Fn, PostWriteElementBarrier>();
5445 restoreLiveVolatile(ool->lir());
5447 masm.jump(ool->rejoin());
5450 void CodeGenerator::visitPostWriteElementBarrierO(
5451 LPostWriteElementBarrierO* lir) {
5452 auto ool = new (alloc())
5453 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5454 visitPostWriteBarrierCommon<LPostWriteElementBarrierO, MIRType::Object>(lir,
5455 ool);
5458 void CodeGenerator::visitPostWriteElementBarrierS(
5459 LPostWriteElementBarrierS* lir) {
5460 auto ool = new (alloc())
5461 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5462 visitPostWriteBarrierCommon<LPostWriteElementBarrierS, MIRType::String>(lir,
5463 ool);
5466 void CodeGenerator::visitPostWriteElementBarrierBI(
5467 LPostWriteElementBarrierBI* lir) {
5468 auto ool = new (alloc())
5469 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5470 visitPostWriteBarrierCommon<LPostWriteElementBarrierBI, MIRType::BigInt>(lir,
5471 ool);
5474 void CodeGenerator::visitPostWriteElementBarrierV(
5475 LPostWriteElementBarrierV* lir) {
5476 auto ool = new (alloc())
5477 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5478 visitPostWriteBarrierCommonV(lir, ool);
5481 void CodeGenerator::visitAssertCanElidePostWriteBarrier(
5482 LAssertCanElidePostWriteBarrier* lir) {
5483 Register object = ToRegister(lir->object());
5484 ValueOperand value =
5485 ToValue(lir, LAssertCanElidePostWriteBarrier::ValueIndex);
5486 Register temp = ToRegister(lir->temp0());
5488 Label ok;
5489 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp, &ok);
5490 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp, &ok);
5492 masm.assumeUnreachable("Unexpected missing post write barrier");
5494 masm.bind(&ok);
5497 template <typename LCallIns>
5498 void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
5499 MCallBase* mir = call->mir();
5501 uint32_t unusedStack = UnusedStackBytesForCall(mir->paddedNumStackArgs());
5503 // Registers used for callWithABI() argument-passing.
5504 const Register argContextReg = ToRegister(call->getArgContextReg());
5505 const Register argUintNReg = ToRegister(call->getArgUintNReg());
5506 const Register argVpReg = ToRegister(call->getArgVpReg());
5508 // Misc. temporary registers.
5509 const Register tempReg = ToRegister(call->getTempReg());
5511 DebugOnly<uint32_t> initialStack = masm.framePushed();
5513 masm.checkStackAlignment();
5515 // Native functions have the signature:
5516 // bool (*)(JSContext*, unsigned, Value* vp)
5517 // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
5518 // are the function arguments.
5520 // Allocate space for the outparam, moving the StackPointer to what will be
5521 // &vp[1].
5522 masm.adjustStack(unusedStack);
5524 // Push a Value containing the callee object: natives are allowed to access
5525 // their callee before setting the return value. The StackPointer is moved
5526 // to &vp[0].
5527 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5528 Register calleeReg = ToRegister(call->getCallee());
5529 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
5531 if (call->mir()->maybeCrossRealm()) {
5532 masm.switchToObjectRealm(calleeReg, tempReg);
5534 } else {
5535 WrappedFunction* target = call->getSingleTarget();
5536 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5538 if (call->mir()->maybeCrossRealm()) {
5539 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), tempReg);
5540 masm.switchToObjectRealm(tempReg, tempReg);
5544 // Preload arguments into registers.
5545 masm.loadJSContext(argContextReg);
5546 masm.move32(Imm32(call->mir()->numActualArgs()), argUintNReg);
5547 masm.moveStackPtrTo(argVpReg);
5549 masm.Push(argUintNReg);
5551 // Construct native exit frame.
5552 uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
5553 masm.enterFakeExitFrameForNative(argContextReg, tempReg,
5554 call->mir()->isConstructing());
5556 markSafepointAt(safepointOffset, call);
5558 // Construct and execute call.
5559 masm.setupAlignedABICall();
5560 masm.passABIArg(argContextReg);
5561 masm.passABIArg(argUintNReg);
5562 masm.passABIArg(argVpReg);
5564 ensureOsiSpace();
5565 // If we're using a simulator build, `native` will already point to the
5566 // simulator's call-redirection code for LCallClassHook. Load the address in
5567 // a register first so that we don't try to redirect it a second time.
5568 bool emittedCall = false;
5569 #ifdef JS_SIMULATOR
5570 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5571 masm.movePtr(ImmPtr(native), tempReg);
5572 masm.callWithABI(tempReg);
5573 emittedCall = true;
5575 #endif
5576 if (!emittedCall) {
5577 masm.callWithABI(DynamicFunction<JSNative>(native), ABIType::General,
5578 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5581 // Test for failure.
5582 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
5584 if (call->mir()->maybeCrossRealm()) {
5585 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5588 // Load the outparam vp[0] into output register(s).
5589 masm.loadValue(
5590 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
5591 JSReturnOperand);
5593 // Until C++ code is instrumented against Spectre, prevent speculative
5594 // execution from returning any private data.
5595 if (JitOptions.spectreJitToCxxCalls && !call->mir()->ignoresReturnValue() &&
5596 mir->hasLiveDefUses()) {
5597 masm.speculationBarrier();
5600 // The next instruction is removing the footer of the exit frame, so there
5601 // is no need for leaveFakeExitFrame.
5603 // Move the StackPointer back to its original location, unwinding the native
5604 // exit frame.
5605 masm.adjustStack(NativeExitFrameLayout::Size() - unusedStack);
5606 MOZ_ASSERT(masm.framePushed() == initialStack);
5609 void CodeGenerator::visitCallNative(LCallNative* call) {
5610 WrappedFunction* target = call->getSingleTarget();
5611 MOZ_ASSERT(target);
5612 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5614 JSNative native = target->native();
5615 if (call->ignoresReturnValue() && target->hasJitInfo()) {
5616 const JSJitInfo* jitInfo = target->jitInfo();
5617 if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
5618 native = jitInfo->ignoresReturnValueMethod;
5621 emitCallNative(call, native);
5624 void CodeGenerator::visitCallClassHook(LCallClassHook* call) {
5625 emitCallNative(call, call->mir()->target());
5628 static void LoadDOMPrivate(MacroAssembler& masm, Register obj, Register priv,
5629 DOMObjectKind kind) {
5630 // Load the value in DOM_OBJECT_SLOT for a native or proxy DOM object. This
5631 // will be in the first slot but may be fixed or non-fixed.
5632 MOZ_ASSERT(obj != priv);
5634 switch (kind) {
5635 case DOMObjectKind::Native:
5636 // If it's a native object, the value must be in a fixed slot.
5637 // See CanAttachDOMCall in CacheIR.cpp.
5638 masm.debugAssertObjHasFixedSlots(obj, priv);
5639 masm.loadPrivate(Address(obj, NativeObject::getFixedSlotOffset(0)), priv);
5640 break;
5641 case DOMObjectKind::Proxy: {
5642 #ifdef DEBUG
5643 // Sanity check: it must be a DOM proxy.
5644 Label isDOMProxy;
5645 masm.branchTestProxyHandlerFamily(
5646 Assembler::Equal, obj, priv, GetDOMProxyHandlerFamily(), &isDOMProxy);
5647 masm.assumeUnreachable("Expected a DOM proxy");
5648 masm.bind(&isDOMProxy);
5649 #endif
5650 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), priv);
5651 masm.loadPrivate(
5652 Address(priv, js::detail::ProxyReservedSlots::offsetOfSlot(0)), priv);
5653 break;
5658 void CodeGenerator::visitCallDOMNative(LCallDOMNative* call) {
5659 WrappedFunction* target = call->getSingleTarget();
5660 MOZ_ASSERT(target);
5661 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5662 MOZ_ASSERT(target->hasJitInfo());
5663 MOZ_ASSERT(call->mir()->isCallDOMNative());
5665 int unusedStack = UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5667 // Registers used for callWithABI() argument-passing.
5668 const Register argJSContext = ToRegister(call->getArgJSContext());
5669 const Register argObj = ToRegister(call->getArgObj());
5670 const Register argPrivate = ToRegister(call->getArgPrivate());
5671 const Register argArgs = ToRegister(call->getArgArgs());
5673 DebugOnly<uint32_t> initialStack = masm.framePushed();
5675 masm.checkStackAlignment();
5677 // DOM methods have the signature:
5678 // bool (*)(JSContext*, HandleObject, void* private, const
5679 // JSJitMethodCallArgs& args)
5680 // Where args is initialized from an argc and a vp, vp[0] is space for an
5681 // outparam and the callee, vp[1] is |this|, and vp[2] onward are the
5682 // function arguments. Note that args stores the argv, not the vp, and
5683 // argv == vp + 2.
5685 // Nestle the stack up against the pushed arguments, leaving StackPointer at
5686 // &vp[1]
5687 masm.adjustStack(unusedStack);
5688 // argObj is filled with the extracted object, then returned.
5689 Register obj = masm.extractObject(Address(masm.getStackPointer(), 0), argObj);
5690 MOZ_ASSERT(obj == argObj);
5692 // Push a Value containing the callee object: natives are allowed to access
5693 // their callee before setting the return value. After this the StackPointer
5694 // points to &vp[0].
5695 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5697 // Now compute the argv value. Since StackPointer is pointing to &vp[0] and
5698 // argv is &vp[2] we just need to add 2*sizeof(Value) to the current
5699 // StackPointer.
5700 static_assert(JSJitMethodCallArgsTraits::offsetOfArgv == 0);
5701 static_assert(JSJitMethodCallArgsTraits::offsetOfArgc ==
5702 IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv);
5703 masm.computeEffectiveAddress(
5704 Address(masm.getStackPointer(), 2 * sizeof(Value)), argArgs);
5706 LoadDOMPrivate(masm, obj, argPrivate,
5707 static_cast<MCallDOMNative*>(call->mir())->objectKind());
5709 // Push argc from the call instruction into what will become the IonExitFrame
5710 masm.Push(Imm32(call->numActualArgs()));
5712 // Push our argv onto the stack
5713 masm.Push(argArgs);
5714 // And store our JSJitMethodCallArgs* in argArgs.
5715 masm.moveStackPtrTo(argArgs);
5717 // Push |this| object for passing HandleObject. We push after argc to
5718 // maintain the same sp-relative location of the object pointer with other
5719 // DOMExitFrames.
5720 masm.Push(argObj);
5721 masm.moveStackPtrTo(argObj);
5723 if (call->mir()->maybeCrossRealm()) {
5724 // We use argJSContext as scratch register here.
5725 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), argJSContext);
5726 masm.switchToObjectRealm(argJSContext, argJSContext);
5729 // Construct native exit frame.
5730 uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext);
5731 masm.loadJSContext(argJSContext);
5732 masm.enterFakeExitFrame(argJSContext, argJSContext,
5733 ExitFrameType::IonDOMMethod);
5735 markSafepointAt(safepointOffset, call);
5737 // Construct and execute call.
5738 masm.setupAlignedABICall();
5739 masm.loadJSContext(argJSContext);
5740 masm.passABIArg(argJSContext);
5741 masm.passABIArg(argObj);
5742 masm.passABIArg(argPrivate);
5743 masm.passABIArg(argArgs);
5744 ensureOsiSpace();
5745 masm.callWithABI(DynamicFunction<JSJitMethodOp>(target->jitInfo()->method),
5746 ABIType::General,
5747 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5749 if (target->jitInfo()->isInfallible) {
5750 masm.loadValue(Address(masm.getStackPointer(),
5751 IonDOMMethodExitFrameLayout::offsetOfResult()),
5752 JSReturnOperand);
5753 } else {
5754 // Test for failure.
5755 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
5757 // Load the outparam vp[0] into output register(s).
5758 masm.loadValue(Address(masm.getStackPointer(),
5759 IonDOMMethodExitFrameLayout::offsetOfResult()),
5760 JSReturnOperand);
5763 // Switch back to the current realm if needed. Note: if the DOM method threw
5764 // an exception, the exception handler will do this.
5765 if (call->mir()->maybeCrossRealm()) {
5766 static_assert(!JSReturnOperand.aliases(ReturnReg),
5767 "Clobbering ReturnReg should not affect the return value");
5768 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5771 // Until C++ code is instrumented against Spectre, prevent speculative
5772 // execution from returning any private data.
5773 if (JitOptions.spectreJitToCxxCalls && call->mir()->hasLiveDefUses()) {
5774 masm.speculationBarrier();
5777 // The next instruction is removing the footer of the exit frame, so there
5778 // is no need for leaveFakeExitFrame.
5780 // Move the StackPointer back to its original location, unwinding the native
5781 // exit frame.
5782 masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack);
5783 MOZ_ASSERT(masm.framePushed() == initialStack);
5786 void CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir) {
5787 pushArg(ImmGCPtr(lir->mir()->name()));
5789 using Fn = bool (*)(JSContext* cx, Handle<PropertyName*>, MutableHandleValue);
5790 callVM<Fn, GetIntrinsicValue>(lir);
5793 void CodeGenerator::emitCallInvokeFunction(
5794 LInstruction* call, Register calleereg, bool constructing,
5795 bool ignoresReturnValue, uint32_t argc, uint32_t unusedStack) {
5796 // Nestle %esp up to the argument vector.
5797 // Each path must account for framePushed_ separately, for callVM to be valid.
5798 masm.freeStack(unusedStack);
5800 pushArg(masm.getStackPointer()); // argv.
5801 pushArg(Imm32(argc)); // argc.
5802 pushArg(Imm32(ignoresReturnValue));
5803 pushArg(Imm32(constructing)); // constructing.
5804 pushArg(calleereg); // JSFunction*.
5806 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
5807 MutableHandleValue);
5808 callVM<Fn, jit::InvokeFunction>(call);
5810 // Un-nestle %esp from the argument vector. No prefix was pushed.
5811 masm.reserveStack(unusedStack);
5814 void CodeGenerator::visitCallGeneric(LCallGeneric* call) {
5815 // The callee is passed straight through to the trampoline.
5816 MOZ_ASSERT(ToRegister(call->getCallee()) == IonGenericCallCalleeReg);
5818 Register argcReg = ToRegister(call->getArgc());
5819 uint32_t unusedStack =
5820 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5822 // Known-target case is handled by LCallKnown.
5823 MOZ_ASSERT(!call->hasSingleTarget());
5825 masm.checkStackAlignment();
5827 masm.move32(Imm32(call->numActualArgs()), argcReg);
5829 // Nestle the StackPointer up to the argument vector.
5830 masm.freeStack(unusedStack);
5831 ensureOsiSpace();
5833 auto kind = call->mir()->isConstructing() ? IonGenericCallKind::Construct
5834 : IonGenericCallKind::Call;
5836 TrampolinePtr genericCallStub =
5837 gen->jitRuntime()->getIonGenericCallStub(kind);
5838 uint32_t callOffset = masm.callJit(genericCallStub);
5839 markSafepointAt(callOffset, call);
5841 if (call->mir()->maybeCrossRealm()) {
5842 static_assert(!JSReturnOperand.aliases(ReturnReg),
5843 "ReturnReg available as scratch after scripted calls");
5844 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5847 // Restore stack pointer.
5848 masm.setFramePushed(frameSize());
5849 emitRestoreStackPointerFromFP();
5851 // If the return value of the constructing function is Primitive,
5852 // replace the return value with the Object from CreateThis.
5853 if (call->mir()->isConstructing()) {
5854 Label notPrimitive;
5855 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5856 &notPrimitive);
5857 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
5858 JSReturnOperand);
5859 #ifdef DEBUG
5860 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5861 &notPrimitive);
5862 masm.assumeUnreachable("CreateThis creates an object");
5863 #endif
5864 masm.bind(&notPrimitive);
5868 void JitRuntime::generateIonGenericCallArgumentsShift(
5869 MacroAssembler& masm, Register argc, Register curr, Register end,
5870 Register scratch, Label* done) {
5871 static_assert(sizeof(Value) == 8);
5872 // There are |argc| Values on the stack. Shift them all down by 8 bytes,
5873 // overwriting the first value.
5875 // Initialize `curr` to the destination of the first copy, and `end` to the
5876 // final value of curr.
5877 masm.moveStackPtrTo(curr);
5878 masm.computeEffectiveAddress(BaseValueIndex(curr, argc), end);
5880 Label loop;
5881 masm.bind(&loop);
5882 masm.branchPtr(Assembler::Equal, curr, end, done);
5883 masm.loadPtr(Address(curr, 8), scratch);
5884 masm.storePtr(scratch, Address(curr, 0));
5885 masm.addPtr(Imm32(sizeof(uintptr_t)), curr);
5886 masm.jump(&loop);
5889 void JitRuntime::generateIonGenericCallStub(MacroAssembler& masm,
5890 IonGenericCallKind kind) {
5891 AutoCreatedBy acb(masm, "JitRuntime::generateIonGenericCallStub");
5892 ionGenericCallStubOffset_[kind] = startTrampolineCode(masm);
5894 // This code is tightly coupled with visitCallGeneric.
5896 // Upon entry:
5897 // IonGenericCallCalleeReg contains a pointer to the callee object.
5898 // IonGenericCallArgcReg contains the number of actual args.
5899 // The arguments have been pushed onto the stack:
5900 // [newTarget] (iff isConstructing)
5901 // [argN]
5902 // ...
5903 // [arg1]
5904 // [arg0]
5905 // [this]
5906 // <return address> (if not JS_USE_LINK_REGISTER)
5908 // This trampoline is responsible for entering the callee's realm,
5909 // massaging the stack into the right shape, and then performing a
5910 // tail call. We will return directly to the Ion code from the
5911 // callee.
5913 // To do a tail call, we keep the return address in a register, even
5914 // on platforms that don't normally use a link register, and push it
5915 // just before jumping to the callee, after we are done setting up
5916 // the stack.
5918 // The caller is responsible for switching back to the caller's
5919 // realm and cleaning up the stack.
5921 Register calleeReg = IonGenericCallCalleeReg;
5922 Register argcReg = IonGenericCallArgcReg;
5923 Register scratch = IonGenericCallScratch;
5924 Register scratch2 = IonGenericCallScratch2;
5926 #ifndef JS_USE_LINK_REGISTER
5927 Register returnAddrReg = IonGenericCallReturnAddrReg;
5928 masm.pop(returnAddrReg);
5929 #endif
5931 #ifdef JS_CODEGEN_ARM
5932 // The default second scratch register on arm is lr, which we need
5933 // preserved for tail calls.
5934 AutoNonDefaultSecondScratchRegister andssr(masm, IonGenericSecondScratchReg);
5935 #endif
5937 bool isConstructing = kind == IonGenericCallKind::Construct;
5939 Label entry, notFunction, noJitEntry, vmCall;
5940 masm.bind(&entry);
5942 // Guard that the callee is actually a function.
5943 masm.branchTestObjIsFunction(Assembler::NotEqual, calleeReg, scratch,
5944 calleeReg, &notFunction);
5946 // Guard that the callee supports the [[Call]] or [[Construct]] operation.
5947 // If these tests fail, we will call into the VM to throw an exception.
5948 if (isConstructing) {
5949 masm.branchTestFunctionFlags(calleeReg, FunctionFlags::CONSTRUCTOR,
5950 Assembler::Zero, &vmCall);
5951 } else {
5952 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
5953 calleeReg, scratch, &vmCall);
5956 if (isConstructing) {
5957 // Use the slow path if CreateThis was unable to create the |this| object.
5958 Address thisAddr(masm.getStackPointer(), 0);
5959 masm.branchTestNull(Assembler::Equal, thisAddr, &vmCall);
5962 masm.switchToObjectRealm(calleeReg, scratch);
5964 // Load jitCodeRaw for callee if it exists.
5965 masm.branchIfFunctionHasNoJitEntry(calleeReg, isConstructing, &noJitEntry);
5967 // ****************************
5968 // * Functions with jit entry *
5969 // ****************************
5970 masm.loadJitCodeRaw(calleeReg, scratch2);
5972 // Construct the JitFrameLayout.
5973 masm.PushCalleeToken(calleeReg, isConstructing);
5974 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcReg, scratch);
5975 #ifndef JS_USE_LINK_REGISTER
5976 masm.push(returnAddrReg);
5977 #endif
5979 // Check whether we need a rectifier frame.
5980 Label noRectifier;
5981 masm.loadFunctionArgCount(calleeReg, scratch);
5982 masm.branch32(Assembler::BelowOrEqual, scratch, argcReg, &noRectifier);
5984 // Tail-call the arguments rectifier.
5985 // Because all trampolines are created at the same time,
5986 // we can't create a TrampolinePtr for the arguments rectifier,
5987 // because it hasn't been linked yet. We can, however, directly
5988 // encode its offset.
5989 Label rectifier;
5990 bindLabelToOffset(&rectifier, argumentsRectifierOffset_);
5992 masm.jump(&rectifier);
5995 // Tail call the jit entry.
5996 masm.bind(&noRectifier);
5997 masm.jump(scratch2);
5999 // ********************
6000 // * Native functions *
6001 // ********************
6002 masm.bind(&noJitEntry);
6003 if (!isConstructing) {
6004 generateIonGenericCallFunCall(masm, &entry, &vmCall);
6006 generateIonGenericCallNativeFunction(masm, isConstructing);
6008 // *******************
6009 // * Bound functions *
6010 // *******************
6011 // TODO: support class hooks?
6012 masm.bind(&notFunction);
6013 if (!isConstructing) {
6014 // TODO: support generic bound constructors?
6015 generateIonGenericCallBoundFunction(masm, &entry, &vmCall);
6018 // ********************
6019 // * Fallback VM call *
6020 // ********************
6021 masm.bind(&vmCall);
6023 masm.push(masm.getStackPointer()); // argv
6024 masm.push(argcReg); // argc
6025 masm.push(Imm32(false)); // ignores return value
6026 masm.push(Imm32(isConstructing)); // constructing
6027 masm.push(calleeReg); // callee
6029 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
6030 MutableHandleValue);
6031 VMFunctionId id = VMFunctionToId<Fn, jit::InvokeFunction>::id;
6032 uint32_t invokeFunctionOffset = functionWrapperOffsets_[size_t(id)];
6033 Label invokeFunctionVMEntry;
6034 bindLabelToOffset(&invokeFunctionVMEntry, invokeFunctionOffset);
6036 masm.pushFrameDescriptor(FrameType::IonJS);
6037 #ifndef JS_USE_LINK_REGISTER
6038 masm.push(returnAddrReg);
6039 #endif
6040 masm.jump(&invokeFunctionVMEntry);
6043 void JitRuntime::generateIonGenericCallNativeFunction(MacroAssembler& masm,
6044 bool isConstructing) {
6045 Register calleeReg = IonGenericCallCalleeReg;
6046 Register argcReg = IonGenericCallArgcReg;
6047 Register scratch = IonGenericCallScratch;
6048 Register scratch2 = IonGenericCallScratch2;
6049 Register contextReg = IonGenericCallScratch3;
6050 #ifndef JS_USE_LINK_REGISTER
6051 Register returnAddrReg = IonGenericCallReturnAddrReg;
6052 #endif
6054 // Push a value containing the callee, which will become argv[0].
6055 masm.pushValue(JSVAL_TYPE_OBJECT, calleeReg);
6057 // Load the callee address into calleeReg.
6058 #ifdef JS_SIMULATOR
6059 masm.movePtr(ImmPtr(RedirectedCallAnyNative()), calleeReg);
6060 #else
6061 masm.loadPrivate(Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
6062 calleeReg);
6063 #endif
6065 // Load argv into scratch2.
6066 masm.moveStackPtrTo(scratch2);
6068 // Push argc.
6069 masm.push(argcReg);
6071 masm.loadJSContext(contextReg);
6073 // Construct native exit frame. Note that unlike other cases in this
6074 // trampoline, this code does not use a tail call.
6075 masm.pushFrameDescriptor(FrameType::IonJS);
6076 #ifdef JS_USE_LINK_REGISTER
6077 masm.pushReturnAddress();
6078 #else
6079 masm.push(returnAddrReg);
6080 #endif
6082 masm.push(FramePointer);
6083 masm.moveStackPtrTo(FramePointer);
6084 masm.enterFakeExitFrameForNative(contextReg, scratch, isConstructing);
6086 masm.setupUnalignedABICall(scratch);
6087 masm.passABIArg(contextReg); // cx
6088 masm.passABIArg(argcReg); // argc
6089 masm.passABIArg(scratch2); // argv
6091 masm.callWithABI(calleeReg);
6093 // Test for failure.
6094 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
6096 masm.loadValue(
6097 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
6098 JSReturnOperand);
6100 // Leave the exit frame.
6101 masm.moveToStackPtr(FramePointer);
6102 masm.pop(FramePointer);
6104 // Return.
6105 masm.ret();
6108 void JitRuntime::generateIonGenericCallFunCall(MacroAssembler& masm,
6109 Label* entry, Label* vmCall) {
6110 Register calleeReg = IonGenericCallCalleeReg;
6111 Register argcReg = IonGenericCallArgcReg;
6112 Register scratch = IonGenericCallScratch;
6113 Register scratch2 = IonGenericCallScratch2;
6114 Register scratch3 = IonGenericCallScratch3;
6116 Label notFunCall;
6117 masm.branchPtr(Assembler::NotEqual,
6118 Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
6119 ImmPtr(js::fun_call), &notFunCall);
6121 // In general, we can implement fun_call by replacing calleeReg with
6122 // |this|, sliding all the other arguments down, and decrementing argc.
6124 // *BEFORE* *AFTER*
6125 // [argN] argc = N+1 <padding>
6126 // ... [argN] argc = N
6127 // [arg1] ...
6128 // [arg0] [arg1] <- now arg0
6129 // [this] <- top of stack (aligned) [arg0] <- now this
6131 // The only exception is when argc is already 0, in which case instead
6132 // of shifting arguments down we replace [this] with UndefinedValue():
6134 // *BEFORE* *AFTER*
6135 // [this] argc = 0 [undef] argc = 0
6137 // After making this transformation, we can jump back to the beginning
6138 // of this trampoline to handle the inner call.
6140 // Guard that |this| is an object. If it is, replace calleeReg.
6141 masm.fallibleUnboxObject(Address(masm.getStackPointer(), 0), scratch, vmCall);
6142 masm.movePtr(scratch, calleeReg);
6144 Label hasArgs;
6145 masm.branch32(Assembler::NotEqual, argcReg, Imm32(0), &hasArgs);
6147 // No arguments. Replace |this| with |undefined| and start from the top.
6148 masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), 0));
6149 masm.jump(entry);
6151 masm.bind(&hasArgs);
6153 Label doneSliding;
6154 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6155 scratch3, &doneSliding);
6156 masm.bind(&doneSliding);
6157 masm.sub32(Imm32(1), argcReg);
6159 masm.jump(entry);
6161 masm.bind(&notFunCall);
6164 void JitRuntime::generateIonGenericCallBoundFunction(MacroAssembler& masm,
6165 Label* entry,
6166 Label* vmCall) {
6167 Register calleeReg = IonGenericCallCalleeReg;
6168 Register argcReg = IonGenericCallArgcReg;
6169 Register scratch = IonGenericCallScratch;
6170 Register scratch2 = IonGenericCallScratch2;
6171 Register scratch3 = IonGenericCallScratch3;
6173 masm.branchTestObjClass(Assembler::NotEqual, calleeReg,
6174 &BoundFunctionObject::class_, scratch, calleeReg,
6175 vmCall);
6177 Address targetSlot(calleeReg, BoundFunctionObject::offsetOfTargetSlot());
6178 Address flagsSlot(calleeReg, BoundFunctionObject::offsetOfFlagsSlot());
6179 Address thisSlot(calleeReg, BoundFunctionObject::offsetOfBoundThisSlot());
6180 Address firstInlineArgSlot(
6181 calleeReg, BoundFunctionObject::offsetOfFirstInlineBoundArg());
6183 // Check that we won't be pushing too many arguments.
6184 masm.load32(flagsSlot, scratch);
6185 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6186 masm.add32(argcReg, scratch);
6187 masm.branch32(Assembler::Above, scratch, Imm32(JIT_ARGS_LENGTH_MAX), vmCall);
6189 // The stack is currently correctly aligned for a jit call. We will
6190 // be updating the `this` value and potentially adding additional
6191 // arguments. On platforms with 16-byte alignment, if the number of
6192 // bound arguments is odd, we have to move the arguments that are
6193 // currently on the stack. For example, with one bound argument:
6195 // *BEFORE* *AFTER*
6196 // [argN] <padding>
6197 // ... [argN] |
6198 // [arg1] ... | These arguments have been
6199 // [arg0] [arg1] | shifted down 8 bytes.
6200 // [this] <- top of stack (aligned) [arg0] v
6201 // [bound0] <- one bound argument (odd)
6202 // [boundThis] <- top of stack (aligned)
6204 Label poppedThis;
6205 if (JitStackValueAlignment > 1) {
6206 Label alreadyAligned;
6207 masm.branchTest32(Assembler::Zero, flagsSlot,
6208 Imm32(1 << BoundFunctionObject::NumBoundArgsShift),
6209 &alreadyAligned);
6211 // We have an odd number of bound arguments. Shift the existing arguments
6212 // down by 8 bytes.
6213 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6214 scratch3, &poppedThis);
6215 masm.bind(&alreadyAligned);
6218 // Pop the current `this`. It will be replaced with the bound `this`.
6219 masm.freeStack(sizeof(Value));
6220 masm.bind(&poppedThis);
6222 // Load the number of bound arguments in scratch
6223 masm.load32(flagsSlot, scratch);
6224 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6226 Label donePushingBoundArguments;
6227 masm.branch32(Assembler::Equal, scratch, Imm32(0),
6228 &donePushingBoundArguments);
6230 // Update argc to include bound arguments.
6231 masm.add32(scratch, argcReg);
6233 // Load &boundArgs[0] in scratch2.
6234 Label outOfLineBoundArguments, haveBoundArguments;
6235 masm.branch32(Assembler::Above, scratch,
6236 Imm32(BoundFunctionObject::MaxInlineBoundArgs),
6237 &outOfLineBoundArguments);
6238 masm.computeEffectiveAddress(firstInlineArgSlot, scratch2);
6239 masm.jump(&haveBoundArguments);
6241 masm.bind(&outOfLineBoundArguments);
6242 masm.unboxObject(firstInlineArgSlot, scratch2);
6243 masm.loadPtr(Address(scratch2, NativeObject::offsetOfElements()), scratch2);
6245 masm.bind(&haveBoundArguments);
6247 // Load &boundArgs[numBoundArgs] in scratch.
6248 BaseObjectElementIndex lastBoundArg(scratch2, scratch);
6249 masm.computeEffectiveAddress(lastBoundArg, scratch);
6251 // Push the bound arguments, starting with the last one.
6252 // Copying pre-decrements scratch until scratch2 is reached.
6253 Label boundArgumentsLoop;
6254 masm.bind(&boundArgumentsLoop);
6255 masm.subPtr(Imm32(sizeof(Value)), scratch);
6256 masm.pushValue(Address(scratch, 0));
6257 masm.branchPtr(Assembler::Above, scratch, scratch2, &boundArgumentsLoop);
6258 masm.bind(&donePushingBoundArguments);
6260 // Push the bound `this`.
6261 masm.pushValue(thisSlot);
6263 // Load the target in calleeReg.
6264 masm.unboxObject(targetSlot, calleeReg);
6266 // At this point, all preconditions for entering the trampoline are met:
6267 // - calleeReg contains a pointer to the callee object
6268 // - argcReg contains the number of actual args (now including bound args)
6269 // - the arguments are on the stack with the correct alignment.
6270 // Instead of generating more code, we can jump back to the entry point
6271 // of the trampoline to call the bound target.
6272 masm.jump(entry);
6275 void CodeGenerator::visitCallKnown(LCallKnown* call) {
6276 Register calleereg = ToRegister(call->getFunction());
6277 Register objreg = ToRegister(call->getTempObject());
6278 uint32_t unusedStack =
6279 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
6280 WrappedFunction* target = call->getSingleTarget();
6282 // Native single targets (except Wasm and TrampolineNative functions) are
6283 // handled by LCallNative.
6284 MOZ_ASSERT(target->hasJitEntry());
6286 // Missing arguments must have been explicitly appended by WarpBuilder.
6287 DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
6288 MOZ_ASSERT(target->nargs() <=
6289 call->mir()->numStackArgs() - numNonArgsOnStack);
6291 MOZ_ASSERT_IF(call->isConstructing(), target->isConstructor());
6293 masm.checkStackAlignment();
6295 if (target->isClassConstructor() && !call->isConstructing()) {
6296 emitCallInvokeFunction(call, calleereg, call->isConstructing(),
6297 call->ignoresReturnValue(), call->numActualArgs(),
6298 unusedStack);
6299 return;
6302 MOZ_ASSERT_IF(target->isClassConstructor(), call->isConstructing());
6304 MOZ_ASSERT(!call->mir()->needsThisCheck());
6306 if (call->mir()->maybeCrossRealm()) {
6307 masm.switchToObjectRealm(calleereg, objreg);
6310 masm.loadJitCodeRaw(calleereg, objreg);
6312 // Nestle the StackPointer up to the argument vector.
6313 masm.freeStack(unusedStack);
6315 // Construct the JitFrameLayout.
6316 masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
6317 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, call->numActualArgs());
6319 // Finally call the function in objreg.
6320 ensureOsiSpace();
6321 uint32_t callOffset = masm.callJit(objreg);
6322 markSafepointAt(callOffset, call);
6324 if (call->mir()->maybeCrossRealm()) {
6325 static_assert(!JSReturnOperand.aliases(ReturnReg),
6326 "ReturnReg available as scratch after scripted calls");
6327 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6330 // Restore stack pointer: pop JitFrameLayout fields still left on the stack
6331 // and undo the earlier |freeStack(unusedStack)|.
6332 int prefixGarbage =
6333 sizeof(JitFrameLayout) - JitFrameLayout::bytesPoppedAfterCall();
6334 masm.adjustStack(prefixGarbage - unusedStack);
6336 // If the return value of the constructing function is Primitive,
6337 // replace the return value with the Object from CreateThis.
6338 if (call->mir()->isConstructing()) {
6339 Label notPrimitive;
6340 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6341 &notPrimitive);
6342 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
6343 JSReturnOperand);
6344 #ifdef DEBUG
6345 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6346 &notPrimitive);
6347 masm.assumeUnreachable("CreateThis creates an object");
6348 #endif
6349 masm.bind(&notPrimitive);
6353 template <typename T>
6354 void CodeGenerator::emitCallInvokeFunction(T* apply) {
6355 Register objreg = ToRegister(apply->getTempObject());
6357 // Push the space used by the arguments.
6358 masm.moveStackPtrTo(objreg);
6360 pushArg(objreg); // argv.
6361 pushArg(ToRegister(apply->getArgc())); // argc.
6362 pushArg(Imm32(apply->mir()->ignoresReturnValue())); // ignoresReturnValue.
6363 pushArg(Imm32(apply->mir()->isConstructing())); // isConstructing.
6364 pushArg(ToRegister(apply->getFunction())); // JSFunction*.
6366 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
6367 MutableHandleValue);
6368 callVM<Fn, jit::InvokeFunction>(apply);
6371 // Do not bailout after the execution of this function since the stack no longer
6372 // correspond to what is expected by the snapshots.
6373 void CodeGenerator::emitAllocateSpaceForApply(Register argcreg,
6374 Register scratch) {
6375 // Use scratch register to calculate stack space (including padding).
6376 masm.movePtr(argcreg, scratch);
6378 // Align the JitFrameLayout on the JitStackAlignment.
6379 if (JitStackValueAlignment > 1) {
6380 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6381 "Stack padding assumes that the frameSize is correct");
6382 MOZ_ASSERT(JitStackValueAlignment == 2);
6383 Label noPaddingNeeded;
6384 // if the number of arguments is odd, then we do not need any padding.
6385 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6386 masm.addPtr(Imm32(1), scratch);
6387 masm.bind(&noPaddingNeeded);
6390 // Reserve space for copying the arguments.
6391 NativeObject::elementsSizeMustNotOverflow();
6392 masm.lshiftPtr(Imm32(ValueShift), scratch);
6393 masm.subFromStackPtr(scratch);
6395 #ifdef DEBUG
6396 // Put a magic value in the space reserved for padding. Note, this code
6397 // cannot be merged with the previous test, as not all architectures can
6398 // write below their stack pointers.
6399 if (JitStackValueAlignment > 1) {
6400 MOZ_ASSERT(JitStackValueAlignment == 2);
6401 Label noPaddingNeeded;
6402 // if the number of arguments is odd, then we do not need any padding.
6403 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6404 BaseValueIndex dstPtr(masm.getStackPointer(), argcreg);
6405 masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr);
6406 masm.bind(&noPaddingNeeded);
6408 #endif
6411 // Do not bailout after the execution of this function since the stack no longer
6412 // correspond to what is expected by the snapshots.
6413 void CodeGenerator::emitAllocateSpaceForConstructAndPushNewTarget(
6414 Register argcreg, Register newTargetAndScratch) {
6415 // Align the JitFrameLayout on the JitStackAlignment. Contrary to
6416 // |emitAllocateSpaceForApply()|, we're always pushing a magic value, because
6417 // we can't write to |newTargetAndScratch| before |new.target| has
6418 // been pushed onto the stack.
6419 if (JitStackValueAlignment > 1) {
6420 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6421 "Stack padding assumes that the frameSize is correct");
6422 MOZ_ASSERT(JitStackValueAlignment == 2);
6424 Label noPaddingNeeded;
6425 // If the number of arguments is even, then we do not need any padding.
6426 masm.branchTestPtr(Assembler::Zero, argcreg, Imm32(1), &noPaddingNeeded);
6427 masm.pushValue(MagicValue(JS_ARG_POISON));
6428 masm.bind(&noPaddingNeeded);
6431 // Push |new.target| after the padding value, but before any arguments.
6432 masm.pushValue(JSVAL_TYPE_OBJECT, newTargetAndScratch);
6434 // Use newTargetAndScratch to calculate stack space (including padding).
6435 masm.movePtr(argcreg, newTargetAndScratch);
6437 // Reserve space for copying the arguments.
6438 NativeObject::elementsSizeMustNotOverflow();
6439 masm.lshiftPtr(Imm32(ValueShift), newTargetAndScratch);
6440 masm.subFromStackPtr(newTargetAndScratch);
6443 // Destroys argvIndex and copyreg.
6444 void CodeGenerator::emitCopyValuesForApply(Register argvSrcBase,
6445 Register argvIndex, Register copyreg,
6446 size_t argvSrcOffset,
6447 size_t argvDstOffset) {
6448 Label loop;
6449 masm.bind(&loop);
6451 // As argvIndex is off by 1, and we use the decBranchPtr instruction
6452 // to loop back, we have to substract the size of the word which are
6453 // copied.
6454 BaseValueIndex srcPtr(argvSrcBase, argvIndex,
6455 int32_t(argvSrcOffset) - sizeof(void*));
6456 BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex,
6457 int32_t(argvDstOffset) - sizeof(void*));
6458 masm.loadPtr(srcPtr, copyreg);
6459 masm.storePtr(copyreg, dstPtr);
6461 // Handle 32 bits architectures.
6462 if (sizeof(Value) == 2 * sizeof(void*)) {
6463 BaseValueIndex srcPtrLow(argvSrcBase, argvIndex,
6464 int32_t(argvSrcOffset) - 2 * sizeof(void*));
6465 BaseValueIndex dstPtrLow(masm.getStackPointer(), argvIndex,
6466 int32_t(argvDstOffset) - 2 * sizeof(void*));
6467 masm.loadPtr(srcPtrLow, copyreg);
6468 masm.storePtr(copyreg, dstPtrLow);
6471 masm.decBranchPtr(Assembler::NonZero, argvIndex, Imm32(1), &loop);
6474 void CodeGenerator::emitRestoreStackPointerFromFP() {
6475 // This is used to restore the stack pointer after a call with a dynamic
6476 // number of arguments.
6478 MOZ_ASSERT(masm.framePushed() == frameSize());
6480 int32_t offset = -int32_t(frameSize());
6481 masm.computeEffectiveAddress(Address(FramePointer, offset),
6482 masm.getStackPointer());
6485 void CodeGenerator::emitPushArguments(Register argcreg, Register scratch,
6486 Register copyreg, uint32_t extraFormals) {
6487 Label end;
6489 // Skip the copy of arguments if there are none.
6490 masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, &end);
6492 // clang-format off
6494 // We are making a copy of the arguments which are above the JitFrameLayout
6495 // of the current Ion frame.
6497 // [arg1] [arg0] <- src [this] [JitFrameLayout] [.. frameSize ..] [pad] [arg1] [arg0] <- dst
6499 // clang-format on
6501 // Compute the source and destination offsets into the stack.
6502 Register argvSrcBase = FramePointer;
6503 size_t argvSrcOffset =
6504 JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
6505 size_t argvDstOffset = 0;
6507 Register argvIndex = scratch;
6508 masm.move32(argcreg, argvIndex);
6510 // Copy arguments.
6511 emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
6512 argvDstOffset);
6514 // Join with all arguments copied and the extra stack usage computed.
6515 masm.bind(&end);
6518 void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply,
6519 Register scratch) {
6520 // Holds the function nargs. Initially the number of args to the caller.
6521 Register argcreg = ToRegister(apply->getArgc());
6522 Register copyreg = ToRegister(apply->getTempObject());
6523 uint32_t extraFormals = apply->numExtraFormals();
6525 emitAllocateSpaceForApply(argcreg, scratch);
6527 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6529 // Push |this|.
6530 masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex));
6533 void CodeGenerator::emitPushArguments(LApplyArgsObj* apply, Register scratch) {
6534 // argc and argsObj are mapped to the same calltemp register.
6535 MOZ_ASSERT(apply->getArgsObj() == apply->getArgc());
6537 Register tmpArgc = ToRegister(apply->getTempObject());
6538 Register argsObj = ToRegister(apply->getArgsObj());
6540 // Load argc into tmpArgc.
6541 masm.loadArgumentsObjectLength(argsObj, tmpArgc);
6543 // Allocate space on the stack for arguments. This modifies scratch.
6544 emitAllocateSpaceForApply(tmpArgc, scratch);
6546 // Load arguments data
6547 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
6548 argsObj);
6549 size_t argsSrcOffset = ArgumentsData::offsetOfArgs();
6551 // This is the end of the lifetime of argsObj.
6552 // After this call, the argsObj register holds the argument count instead.
6553 emitPushArrayAsArguments(tmpArgc, argsObj, scratch, argsSrcOffset);
6555 masm.pushValue(ToValue(apply, LApplyArgsObj::ThisIndex));
6558 void CodeGenerator::emitPushArrayAsArguments(Register tmpArgc,
6559 Register srcBaseAndArgc,
6560 Register scratch,
6561 size_t argvSrcOffset) {
6562 // Preconditions:
6563 // 1. |tmpArgc| * sizeof(Value) bytes have been allocated at the top of
6564 // the stack to hold arguments.
6565 // 2. |srcBaseAndArgc| + |srcOffset| points to an array of |tmpArgc| values.
6567 // Postconditions:
6568 // 1. The arguments at |srcBaseAndArgc| + |srcOffset| have been copied into
6569 // the allocated space.
6570 // 2. |srcBaseAndArgc| now contains the original value of |tmpArgc|.
6572 // |scratch| is used as a temp register within this function and clobbered.
6574 Label noCopy, epilogue;
6576 // Skip the copy of arguments if there are none.
6577 masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
6579 // Copy the values. This code is skipped entirely if there are
6580 // no values.
6581 size_t argvDstOffset = 0;
6583 Register argvSrcBase = srcBaseAndArgc;
6584 Register copyreg = scratch;
6586 masm.push(tmpArgc);
6587 Register argvIndex = tmpArgc;
6588 argvDstOffset += sizeof(void*);
6590 // Copy
6591 emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
6592 argvDstOffset);
6594 // Restore.
6595 masm.pop(srcBaseAndArgc); // srcBaseAndArgc now contains argc.
6596 masm.jump(&epilogue);
6598 // Clear argc if we skipped the copy step.
6599 masm.bind(&noCopy);
6600 masm.movePtr(ImmWord(0), srcBaseAndArgc);
6602 // Join with all arguments copied and the extra stack usage computed.
6603 // Note, "srcBase" has become "argc".
6604 masm.bind(&epilogue);
6607 void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply,
6608 Register scratch) {
6609 Register tmpArgc = ToRegister(apply->getTempObject());
6610 Register elementsAndArgc = ToRegister(apply->getElements());
6612 // Invariants guarded in the caller:
6613 // - the array is not too long
6614 // - the array length equals its initialized length
6616 // The array length is our argc for the purposes of allocating space.
6617 Address length(ToRegister(apply->getElements()),
6618 ObjectElements::offsetOfLength());
6619 masm.load32(length, tmpArgc);
6621 // Allocate space for the values.
6622 emitAllocateSpaceForApply(tmpArgc, scratch);
6624 // After this call "elements" has become "argc".
6625 size_t elementsOffset = 0;
6626 emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
6628 // Push |this|.
6629 masm.pushValue(ToValue(apply, LApplyArrayGeneric::ThisIndex));
6632 void CodeGenerator::emitPushArguments(LConstructArgsGeneric* construct,
6633 Register scratch) {
6634 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6636 // Holds the function nargs. Initially the number of args to the caller.
6637 Register argcreg = ToRegister(construct->getArgc());
6638 Register copyreg = ToRegister(construct->getTempObject());
6639 uint32_t extraFormals = construct->numExtraFormals();
6641 // Allocate space for the values.
6642 // After this call "newTarget" has become "scratch".
6643 emitAllocateSpaceForConstructAndPushNewTarget(argcreg, scratch);
6645 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6647 // Push |this|.
6648 masm.pushValue(ToValue(construct, LConstructArgsGeneric::ThisIndex));
6651 void CodeGenerator::emitPushArguments(LConstructArrayGeneric* construct,
6652 Register scratch) {
6653 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6655 Register tmpArgc = ToRegister(construct->getTempObject());
6656 Register elementsAndArgc = ToRegister(construct->getElements());
6658 // Invariants guarded in the caller:
6659 // - the array is not too long
6660 // - the array length equals its initialized length
6662 // The array length is our argc for the purposes of allocating space.
6663 Address length(ToRegister(construct->getElements()),
6664 ObjectElements::offsetOfLength());
6665 masm.load32(length, tmpArgc);
6667 // Allocate space for the values.
6668 emitAllocateSpaceForConstructAndPushNewTarget(tmpArgc, scratch);
6670 // After this call "elements" has become "argc" and "newTarget" has become
6671 // "scratch".
6672 size_t elementsOffset = 0;
6673 emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
6675 // Push |this|.
6676 masm.pushValue(ToValue(construct, LConstructArrayGeneric::ThisIndex));
6679 template <typename T>
6680 void CodeGenerator::emitApplyGeneric(T* apply) {
6681 // Holds the function object.
6682 Register calleereg = ToRegister(apply->getFunction());
6684 // Temporary register for modifying the function object.
6685 Register objreg = ToRegister(apply->getTempObject());
6686 Register scratch = ToRegister(apply->getTempForArgCopy());
6688 // Holds the function nargs, computed in the invoker or (for ApplyArray,
6689 // ConstructArray, or ApplyArgsObj) in the argument pusher.
6690 Register argcreg = ToRegister(apply->getArgc());
6692 // Copy the arguments of the current function.
6694 // In the case of ApplyArray, ConstructArray, or ApplyArgsObj, also
6695 // compute argc. The argc register and the elements/argsObj register
6696 // are the same; argc must not be referenced before the call to
6697 // emitPushArguments() and elements/argsObj must not be referenced
6698 // after it returns.
6700 // In the case of ConstructArray or ConstructArgs, also overwrite newTarget
6701 // with scratch; newTarget must not be referenced after this point.
6703 // objreg is dead across this call.
6704 emitPushArguments(apply, scratch);
6706 masm.checkStackAlignment();
6708 bool constructing = apply->mir()->isConstructing();
6710 // If the function is native, the call is compiled through emitApplyNative.
6711 MOZ_ASSERT_IF(apply->hasSingleTarget(),
6712 !apply->getSingleTarget()->isNativeWithoutJitEntry());
6714 Label end, invoke;
6716 // Unless already known, guard that calleereg is actually a function object.
6717 if (!apply->hasSingleTarget()) {
6718 masm.branchTestObjIsFunction(Assembler::NotEqual, calleereg, objreg,
6719 calleereg, &invoke);
6722 // Guard that calleereg is an interpreted function with a JSScript.
6723 masm.branchIfFunctionHasNoJitEntry(calleereg, constructing, &invoke);
6725 // Guard that callee allows the [[Call]] or [[Construct]] operation required.
6726 if (constructing) {
6727 masm.branchTestFunctionFlags(calleereg, FunctionFlags::CONSTRUCTOR,
6728 Assembler::Zero, &invoke);
6729 } else {
6730 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
6731 calleereg, objreg, &invoke);
6734 // Use the slow path if CreateThis was unable to create the |this| object.
6735 if (constructing) {
6736 Address thisAddr(masm.getStackPointer(), 0);
6737 masm.branchTestNull(Assembler::Equal, thisAddr, &invoke);
6740 // Call with an Ion frame or a rectifier frame.
6742 if (apply->mir()->maybeCrossRealm()) {
6743 masm.switchToObjectRealm(calleereg, objreg);
6746 // Knowing that calleereg is a non-native function, load jitcode.
6747 masm.loadJitCodeRaw(calleereg, objreg);
6749 masm.PushCalleeToken(calleereg, constructing);
6750 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcreg, scratch);
6752 Label underflow, rejoin;
6754 // Check whether the provided arguments satisfy target argc.
6755 if (!apply->hasSingleTarget()) {
6756 Register nformals = scratch;
6757 masm.loadFunctionArgCount(calleereg, nformals);
6758 masm.branch32(Assembler::Below, argcreg, nformals, &underflow);
6759 } else {
6760 masm.branch32(Assembler::Below, argcreg,
6761 Imm32(apply->getSingleTarget()->nargs()), &underflow);
6764 // Skip the construction of the rectifier frame because we have no
6765 // underflow.
6766 masm.jump(&rejoin);
6768 // Argument fixup needed. Get ready to call the argumentsRectifier.
6770 masm.bind(&underflow);
6772 // Hardcode the address of the argumentsRectifier code.
6773 TrampolinePtr argumentsRectifier =
6774 gen->jitRuntime()->getArgumentsRectifier();
6775 masm.movePtr(argumentsRectifier, objreg);
6778 masm.bind(&rejoin);
6780 // Finally call the function in objreg, as assigned by one of the paths
6781 // above.
6782 ensureOsiSpace();
6783 uint32_t callOffset = masm.callJit(objreg);
6784 markSafepointAt(callOffset, apply);
6786 if (apply->mir()->maybeCrossRealm()) {
6787 static_assert(!JSReturnOperand.aliases(ReturnReg),
6788 "ReturnReg available as scratch after scripted calls");
6789 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6792 // Discard JitFrameLayout fields still left on the stack.
6793 masm.freeStack(sizeof(JitFrameLayout) -
6794 JitFrameLayout::bytesPoppedAfterCall());
6795 masm.jump(&end);
6798 // Handle uncompiled or native functions.
6800 masm.bind(&invoke);
6801 emitCallInvokeFunction(apply);
6804 masm.bind(&end);
6806 // If the return value of the constructing function is Primitive,
6807 // replace the return value with the Object from CreateThis.
6808 if (constructing) {
6809 Label notPrimitive;
6810 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6811 &notPrimitive);
6812 masm.loadValue(Address(masm.getStackPointer(), 0), JSReturnOperand);
6814 #ifdef DEBUG
6815 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6816 &notPrimitive);
6817 masm.assumeUnreachable("CreateThis creates an object");
6818 #endif
6820 masm.bind(&notPrimitive);
6823 // Pop arguments and continue.
6824 emitRestoreStackPointerFromFP();
6827 template <typename T>
6828 void CodeGenerator::emitCallInvokeNativeFunction(T* apply) {
6829 Register argv = ToRegister(apply->getTempObject());
6831 // Push the space used by the arguments.
6832 masm.moveStackPtrTo(argv);
6834 pushArg(argv); // argv.
6835 pushArg(ToRegister(apply->getArgc())); // argc.
6836 pushArg(Imm32(apply->mir()->ignoresReturnValue())); // ignoresReturnValue.
6837 pushArg(Imm32(apply->mir()->isConstructing())); // isConstructing.
6839 using Fn =
6840 bool (*)(JSContext*, bool, bool, uint32_t, Value*, MutableHandleValue);
6841 callVM<Fn, jit::InvokeNativeFunction>(apply);
6844 template <typename T>
6845 void CodeGenerator::emitPushNativeArguments(T* apply) {
6846 Register argc = ToRegister(apply->getArgc());
6847 Register tmpArgc = ToRegister(apply->getTempObject());
6848 Register scratch = ToRegister(apply->getTempForArgCopy());
6849 uint32_t extraFormals = apply->numExtraFormals();
6851 // Push arguments.
6852 Label noCopy;
6853 masm.branchTestPtr(Assembler::Zero, argc, argc, &noCopy);
6855 // Use scratch register to calculate stack space (no padding needed).
6856 masm.movePtr(argc, scratch);
6858 // Reserve space for copying the arguments.
6859 NativeObject::elementsSizeMustNotOverflow();
6860 masm.lshiftPtr(Imm32(ValueShift), scratch);
6861 masm.subFromStackPtr(scratch);
6863 // Compute the source and destination offsets into the stack.
6864 Register argvSrcBase = FramePointer;
6865 size_t argvSrcOffset =
6866 JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
6867 size_t argvDstOffset = 0;
6869 Register argvIndex = tmpArgc;
6870 masm.move32(argc, argvIndex);
6872 // Copy arguments.
6873 emitCopyValuesForApply(argvSrcBase, argvIndex, scratch, argvSrcOffset,
6874 argvDstOffset);
6876 masm.bind(&noCopy);
6879 template <typename T>
6880 void CodeGenerator::emitPushArrayAsNativeArguments(T* apply) {
6881 Register argc = ToRegister(apply->getArgc());
6882 Register elements = ToRegister(apply->getElements());
6883 Register tmpArgc = ToRegister(apply->getTempObject());
6884 Register scratch = ToRegister(apply->getTempForArgCopy());
6886 // NB: argc and elements are mapped to the same register.
6887 MOZ_ASSERT(argc == elements);
6889 // Invariants guarded in the caller:
6890 // - the array is not too long
6891 // - the array length equals its initialized length
6893 // The array length is our argc.
6894 masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
6896 // Skip the copy of arguments if there are none.
6897 Label noCopy;
6898 masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
6900 // |tmpArgc| is off-by-one, so adjust the offset accordingly.
6901 BaseObjectElementIndex srcPtr(elements, tmpArgc,
6902 -int32_t(sizeof(JS::Value)));
6904 Label loop;
6905 masm.bind(&loop);
6906 masm.pushValue(srcPtr, scratch);
6907 masm.decBranchPtr(Assembler::NonZero, tmpArgc, Imm32(1), &loop);
6909 masm.bind(&noCopy);
6911 // Set argc in preparation for emitCallInvokeNativeFunction.
6912 masm.load32(Address(elements, ObjectElements::offsetOfLength()), argc);
6915 void CodeGenerator::emitPushArguments(LApplyArgsNative* apply) {
6916 emitPushNativeArguments(apply);
6919 void CodeGenerator::emitPushArguments(LApplyArrayNative* apply) {
6920 emitPushArrayAsNativeArguments(apply);
6923 void CodeGenerator::emitPushArguments(LConstructArgsNative* construct) {
6924 emitPushNativeArguments(construct);
6927 void CodeGenerator::emitPushArguments(LConstructArrayNative* construct) {
6928 emitPushArrayAsNativeArguments(construct);
6931 void CodeGenerator::emitPushArguments(LApplyArgsObjNative* apply) {
6932 Register argc = ToRegister(apply->getArgc());
6933 Register argsObj = ToRegister(apply->getArgsObj());
6934 Register tmpArgc = ToRegister(apply->getTempObject());
6935 Register scratch = ToRegister(apply->getTempForArgCopy());
6937 // NB: argc and argsObj are mapped to the same register.
6938 MOZ_ASSERT(argc == argsObj);
6940 // Load argc into tmpArgc.
6941 masm.loadArgumentsObjectLength(argsObj, tmpArgc);
6943 // Push arguments.
6944 Label noCopy, epilogue;
6945 masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
6947 // Use scratch register to calculate stack space (no padding needed).
6948 masm.movePtr(tmpArgc, scratch);
6950 // Reserve space for copying the arguments.
6951 NativeObject::elementsSizeMustNotOverflow();
6952 masm.lshiftPtr(Imm32(ValueShift), scratch);
6953 masm.subFromStackPtr(scratch);
6955 // Load arguments data.
6956 Register argvSrcBase = argsObj;
6957 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
6958 argvSrcBase);
6959 size_t argvSrcOffset = ArgumentsData::offsetOfArgs();
6960 size_t argvDstOffset = 0;
6962 // Stash away |tmpArgc| and adjust argvDstOffset accordingly.
6963 masm.push(tmpArgc);
6964 argvDstOffset += sizeof(void*);
6966 // Copy the values.
6967 emitCopyValuesForApply(argvSrcBase, tmpArgc, scratch, argvSrcOffset,
6968 argvDstOffset);
6970 // Set argc in preparation for emitCallInvokeNativeFunction.
6971 masm.pop(argc);
6972 masm.jump(&epilogue);
6974 masm.bind(&noCopy);
6976 // Set argc in preparation for emitCallInvokeNativeFunction.
6977 masm.movePtr(ImmWord(0), argc);
6979 masm.bind(&epilogue);
6982 template <typename T>
6983 void CodeGenerator::emitApplyNative(T* apply) {
6984 MOZ_ASSERT(apply->mir()->getSingleTarget()->isNativeWithoutJitEntry());
6986 constexpr bool isConstructing = T::isConstructing();
6987 MOZ_ASSERT(isConstructing == apply->mir()->isConstructing(),
6988 "isConstructing condition must be consistent");
6990 // Push newTarget.
6991 if constexpr (isConstructing) {
6992 masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getNewTarget()));
6995 // Push arguments.
6996 emitPushArguments(apply);
6998 // Push |this|.
6999 if constexpr (isConstructing) {
7000 masm.pushValue(MagicValue(JS_IS_CONSTRUCTING));
7001 } else {
7002 masm.pushValue(ToValue(apply, T::ThisIndex));
7005 // Push callee.
7006 masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getFunction()));
7008 // Call the native function.
7009 emitCallInvokeNativeFunction(apply);
7011 // Pop arguments and continue.
7012 emitRestoreStackPointerFromFP();
7015 template <typename T>
7016 void CodeGenerator::emitApplyArgsGuard(T* apply) {
7017 LSnapshot* snapshot = apply->snapshot();
7018 Register argcreg = ToRegister(apply->getArgc());
7020 // Ensure that we have a reasonable number of arguments.
7021 bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
7024 template <typename T>
7025 void CodeGenerator::emitApplyArgsObjGuard(T* apply) {
7026 Register argsObj = ToRegister(apply->getArgsObj());
7027 Register temp = ToRegister(apply->getTempObject());
7029 Label bail;
7030 masm.loadArgumentsObjectLength(argsObj, temp, &bail);
7031 masm.branch32(Assembler::Above, temp, Imm32(JIT_ARGS_LENGTH_MAX), &bail);
7032 bailoutFrom(&bail, apply->snapshot());
7035 template <typename T>
7036 void CodeGenerator::emitApplyArrayGuard(T* apply) {
7037 LSnapshot* snapshot = apply->snapshot();
7038 Register elements = ToRegister(apply->getElements());
7039 Register tmp = ToRegister(apply->getTempObject());
7041 Address length(elements, ObjectElements::offsetOfLength());
7042 masm.load32(length, tmp);
7044 // Ensure that we have a reasonable number of arguments.
7045 bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
7047 // Ensure that the array does not contain an uninitialized tail.
7049 Address initializedLength(elements,
7050 ObjectElements::offsetOfInitializedLength());
7051 masm.sub32(initializedLength, tmp);
7052 bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
7055 void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) {
7056 emitApplyArgsGuard(apply);
7057 emitApplyGeneric(apply);
7060 void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
7061 emitApplyArgsObjGuard(apply);
7062 emitApplyGeneric(apply);
7065 void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
7066 emitApplyArrayGuard(apply);
7067 emitApplyGeneric(apply);
7070 void CodeGenerator::visitConstructArgsGeneric(LConstructArgsGeneric* lir) {
7071 emitApplyArgsGuard(lir);
7072 emitApplyGeneric(lir);
7075 void CodeGenerator::visitConstructArrayGeneric(LConstructArrayGeneric* lir) {
7076 emitApplyArrayGuard(lir);
7077 emitApplyGeneric(lir);
7080 void CodeGenerator::visitApplyArgsNative(LApplyArgsNative* lir) {
7081 emitApplyArgsGuard(lir);
7082 emitApplyNative(lir);
7085 void CodeGenerator::visitApplyArgsObjNative(LApplyArgsObjNative* lir) {
7086 emitApplyArgsObjGuard(lir);
7087 emitApplyNative(lir);
7090 void CodeGenerator::visitApplyArrayNative(LApplyArrayNative* lir) {
7091 emitApplyArrayGuard(lir);
7092 emitApplyNative(lir);
7095 void CodeGenerator::visitConstructArgsNative(LConstructArgsNative* lir) {
7096 emitApplyArgsGuard(lir);
7097 emitApplyNative(lir);
7100 void CodeGenerator::visitConstructArrayNative(LConstructArrayNative* lir) {
7101 emitApplyArrayGuard(lir);
7102 emitApplyNative(lir);
7105 void CodeGenerator::visitBail(LBail* lir) { bailout(lir->snapshot()); }
7107 void CodeGenerator::visitUnreachable(LUnreachable* lir) {
7108 masm.assumeUnreachable("end-of-block assumed unreachable");
7111 void CodeGenerator::visitEncodeSnapshot(LEncodeSnapshot* lir) {
7112 encode(lir->snapshot());
7115 void CodeGenerator::visitUnreachableResultV(LUnreachableResultV* lir) {
7116 masm.assumeUnreachable("must be unreachable");
7119 void CodeGenerator::visitUnreachableResultT(LUnreachableResultT* lir) {
7120 masm.assumeUnreachable("must be unreachable");
7123 // Out-of-line path to report over-recursed error and fail.
7124 class CheckOverRecursedFailure : public OutOfLineCodeBase<CodeGenerator> {
7125 LInstruction* lir_;
7127 public:
7128 explicit CheckOverRecursedFailure(LInstruction* lir) : lir_(lir) {}
7130 void accept(CodeGenerator* codegen) override {
7131 codegen->visitCheckOverRecursedFailure(this);
7134 LInstruction* lir() const { return lir_; }
7137 void CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed* lir) {
7138 // If we don't push anything on the stack, skip the check.
7139 if (omitOverRecursedCheck()) {
7140 return;
7143 // Ensure that this frame will not cross the stack limit.
7144 // This is a weak check, justified by Ion using the C stack: we must always
7145 // be some distance away from the actual limit, since if the limit is
7146 // crossed, an error must be thrown, which requires more frames.
7148 // It must always be possible to trespass past the stack limit.
7149 // Ion may legally place frames very close to the limit. Calling additional
7150 // C functions may then violate the limit without any checking.
7152 // Since Ion frames exist on the C stack, the stack limit may be
7153 // dynamically set by JS_SetThreadStackLimit() and JS_SetNativeStackQuota().
7155 CheckOverRecursedFailure* ool = new (alloc()) CheckOverRecursedFailure(lir);
7156 addOutOfLineCode(ool, lir->mir());
7158 // Conditional forward (unlikely) branch to failure.
7159 const void* limitAddr = gen->runtime->addressOfJitStackLimit();
7160 masm.branchStackPtrRhs(Assembler::AboveOrEqual, AbsoluteAddress(limitAddr),
7161 ool->entry());
7162 masm.bind(ool->rejoin());
7165 void CodeGenerator::visitCheckOverRecursedFailure(
7166 CheckOverRecursedFailure* ool) {
7167 // The OOL path is hit if the recursion depth has been exceeded.
7168 // Throw an InternalError for over-recursion.
7170 // LFunctionEnvironment can appear before LCheckOverRecursed, so we have
7171 // to save all live registers to avoid crashes if CheckOverRecursed triggers
7172 // a GC.
7173 saveLive(ool->lir());
7175 using Fn = bool (*)(JSContext*);
7176 callVM<Fn, CheckOverRecursed>(ool->lir());
7178 restoreLive(ool->lir());
7179 masm.jump(ool->rejoin());
7182 IonScriptCounts* CodeGenerator::maybeCreateScriptCounts() {
7183 // If scripts are being profiled, create a new IonScriptCounts for the
7184 // profiling data, which will be attached to the associated JSScript or
7185 // wasm module after code generation finishes.
7186 if (!gen->hasProfilingScripts()) {
7187 return nullptr;
7190 // This test inhibits IonScriptCount creation for wasm code which is
7191 // currently incompatible with wasm codegen for two reasons: (1) wasm code
7192 // must be serializable and script count codegen bakes in absolute
7193 // addresses, (2) wasm code does not have a JSScript with which to associate
7194 // code coverage data.
7195 JSScript* script = gen->outerInfo().script();
7196 if (!script) {
7197 return nullptr;
7200 auto counts = MakeUnique<IonScriptCounts>();
7201 if (!counts || !counts->init(graph.numBlocks())) {
7202 return nullptr;
7205 for (size_t i = 0; i < graph.numBlocks(); i++) {
7206 MBasicBlock* block = graph.getBlock(i)->mir();
7208 uint32_t offset = 0;
7209 char* description = nullptr;
7210 if (MResumePoint* resume = block->entryResumePoint()) {
7211 // Find a PC offset in the outermost script to use. If this
7212 // block is from an inlined script, find a location in the
7213 // outer script to associate information about the inlining
7214 // with.
7215 while (resume->caller()) {
7216 resume = resume->caller();
7218 offset = script->pcToOffset(resume->pc());
7220 if (block->entryResumePoint()->caller()) {
7221 // Get the filename and line number of the inner script.
7222 JSScript* innerScript = block->info().script();
7223 description = js_pod_calloc<char>(200);
7224 if (description) {
7225 snprintf(description, 200, "%s:%u", innerScript->filename(),
7226 innerScript->lineno());
7231 if (!counts->block(i).init(block->id(), offset, description,
7232 block->numSuccessors())) {
7233 return nullptr;
7236 for (size_t j = 0; j < block->numSuccessors(); j++) {
7237 counts->block(i).setSuccessor(
7238 j, skipTrivialBlocks(block->getSuccessor(j))->id());
7242 scriptCounts_ = counts.release();
7243 return scriptCounts_;
7246 // Structure for managing the state tracked for a block by script counters.
7247 struct ScriptCountBlockState {
7248 IonBlockCounts& block;
7249 MacroAssembler& masm;
7251 Sprinter printer;
7253 public:
7254 ScriptCountBlockState(IonBlockCounts* block, MacroAssembler* masm)
7255 : block(*block), masm(*masm), printer(GetJitContext()->cx, false) {}
7257 bool init() {
7258 if (!printer.init()) {
7259 return false;
7262 // Bump the hit count for the block at the start. This code is not
7263 // included in either the text for the block or the instruction byte
7264 // counts.
7265 masm.inc64(AbsoluteAddress(block.addressOfHitCount()));
7267 // Collect human readable assembly for the code generated in the block.
7268 masm.setPrinter(&printer);
7270 return true;
7273 void visitInstruction(LInstruction* ins) {
7274 #ifdef JS_JITSPEW
7275 // Prefix stream of assembly instructions with their LIR instruction
7276 // name and any associated high level info.
7277 if (const char* extra = ins->getExtraName()) {
7278 printer.printf("[%s:%s]\n", ins->opName(), extra);
7279 } else {
7280 printer.printf("[%s]\n", ins->opName());
7282 #endif
7285 ~ScriptCountBlockState() {
7286 masm.setPrinter(nullptr);
7288 if (JS::UniqueChars str = printer.release()) {
7289 block.setCode(str.get());
7294 void CodeGenerator::branchIfInvalidated(Register temp, Label* invalidated) {
7295 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), temp);
7296 masm.propagateOOM(ionScriptLabels_.append(label));
7298 // If IonScript::invalidationCount_ != 0, the script has been invalidated.
7299 masm.branch32(Assembler::NotEqual,
7300 Address(temp, IonScript::offsetOfInvalidationCount()), Imm32(0),
7301 invalidated);
7304 #ifdef DEBUG
7305 void CodeGenerator::emitAssertGCThingResult(Register input,
7306 const MDefinition* mir) {
7307 MIRType type = mir->type();
7308 MOZ_ASSERT(type == MIRType::Object || type == MIRType::String ||
7309 type == MIRType::Symbol || type == MIRType::BigInt);
7311 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7312 regs.take(input);
7314 Register temp = regs.takeAny();
7315 masm.push(temp);
7317 // Don't check if the script has been invalidated. In that case invalid
7318 // types are expected (until we reach the OsiPoint and bailout).
7319 Label done;
7320 branchIfInvalidated(temp, &done);
7322 # ifndef JS_SIMULATOR
7323 // Check that we have a valid GC pointer.
7324 // Disable for wasm because we don't have a context on wasm compilation
7325 // threads and this needs a context.
7326 // Also disable for simulator builds because the C++ call is a lot slower
7327 // there than on actual hardware.
7328 if (JitOptions.fullDebugChecks && !IsCompilingWasm()) {
7329 saveVolatile();
7330 masm.setupUnalignedABICall(temp);
7331 masm.loadJSContext(temp);
7332 masm.passABIArg(temp);
7333 masm.passABIArg(input);
7335 switch (type) {
7336 case MIRType::Object: {
7337 using Fn = void (*)(JSContext* cx, JSObject* obj);
7338 masm.callWithABI<Fn, AssertValidObjectPtr>();
7339 break;
7341 case MIRType::String: {
7342 using Fn = void (*)(JSContext* cx, JSString* str);
7343 masm.callWithABI<Fn, AssertValidStringPtr>();
7344 break;
7346 case MIRType::Symbol: {
7347 using Fn = void (*)(JSContext* cx, JS::Symbol* sym);
7348 masm.callWithABI<Fn, AssertValidSymbolPtr>();
7349 break;
7351 case MIRType::BigInt: {
7352 using Fn = void (*)(JSContext* cx, JS::BigInt* bi);
7353 masm.callWithABI<Fn, AssertValidBigIntPtr>();
7354 break;
7356 default:
7357 MOZ_CRASH();
7360 restoreVolatile();
7362 # endif
7364 masm.bind(&done);
7365 masm.pop(temp);
7368 void CodeGenerator::emitAssertResultV(const ValueOperand input,
7369 const MDefinition* mir) {
7370 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7371 regs.take(input);
7373 Register temp1 = regs.takeAny();
7374 Register temp2 = regs.takeAny();
7375 masm.push(temp1);
7376 masm.push(temp2);
7378 // Don't check if the script has been invalidated. In that case invalid
7379 // types are expected (until we reach the OsiPoint and bailout).
7380 Label done;
7381 branchIfInvalidated(temp1, &done);
7383 // Check that we have a valid GC pointer.
7384 if (JitOptions.fullDebugChecks) {
7385 saveVolatile();
7387 masm.pushValue(input);
7388 masm.moveStackPtrTo(temp1);
7390 using Fn = void (*)(JSContext* cx, Value* v);
7391 masm.setupUnalignedABICall(temp2);
7392 masm.loadJSContext(temp2);
7393 masm.passABIArg(temp2);
7394 masm.passABIArg(temp1);
7395 masm.callWithABI<Fn, AssertValidValue>();
7396 masm.popValue(input);
7397 restoreVolatile();
7400 masm.bind(&done);
7401 masm.pop(temp2);
7402 masm.pop(temp1);
7405 void CodeGenerator::emitGCThingResultChecks(LInstruction* lir,
7406 MDefinition* mir) {
7407 if (lir->numDefs() == 0) {
7408 return;
7411 MOZ_ASSERT(lir->numDefs() == 1);
7412 if (lir->getDef(0)->isBogusTemp()) {
7413 return;
7416 Register output = ToRegister(lir->getDef(0));
7417 emitAssertGCThingResult(output, mir);
7420 void CodeGenerator::emitValueResultChecks(LInstruction* lir, MDefinition* mir) {
7421 if (lir->numDefs() == 0) {
7422 return;
7425 MOZ_ASSERT(lir->numDefs() == BOX_PIECES);
7426 if (!lir->getDef(0)->output()->isRegister()) {
7427 return;
7430 ValueOperand output = ToOutValue(lir);
7432 emitAssertResultV(output, mir);
7435 void CodeGenerator::emitDebugResultChecks(LInstruction* ins) {
7436 // In debug builds, check that LIR instructions return valid values.
7438 MDefinition* mir = ins->mirRaw();
7439 if (!mir) {
7440 return;
7443 switch (mir->type()) {
7444 case MIRType::Object:
7445 case MIRType::String:
7446 case MIRType::Symbol:
7447 case MIRType::BigInt:
7448 emitGCThingResultChecks(ins, mir);
7449 break;
7450 case MIRType::Value:
7451 emitValueResultChecks(ins, mir);
7452 break;
7453 default:
7454 break;
7458 void CodeGenerator::emitDebugForceBailing(LInstruction* lir) {
7459 if (MOZ_LIKELY(!gen->options.ionBailAfterEnabled())) {
7460 return;
7462 if (!lir->snapshot()) {
7463 return;
7465 if (lir->isOsiPoint()) {
7466 return;
7469 masm.comment("emitDebugForceBailing");
7470 const void* bailAfterCounterAddr =
7471 gen->runtime->addressOfIonBailAfterCounter();
7473 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7475 Label done, notBail;
7476 masm.branch32(Assembler::Equal, AbsoluteAddress(bailAfterCounterAddr),
7477 Imm32(0), &done);
7479 Register temp = regs.takeAny();
7481 masm.push(temp);
7482 masm.load32(AbsoluteAddress(bailAfterCounterAddr), temp);
7483 masm.sub32(Imm32(1), temp);
7484 masm.store32(temp, AbsoluteAddress(bailAfterCounterAddr));
7486 masm.branch32(Assembler::NotEqual, temp, Imm32(0), &notBail);
7488 masm.pop(temp);
7489 bailout(lir->snapshot());
7491 masm.bind(&notBail);
7492 masm.pop(temp);
7494 masm.bind(&done);
7496 #endif
7498 bool CodeGenerator::generateBody() {
7499 JitSpewCont(JitSpew_Codegen, "\n");
7500 AutoCreatedBy acb(masm, "CodeGenerator::generateBody");
7502 JitSpew(JitSpew_Codegen, "==== BEGIN CodeGenerator::generateBody ====");
7503 IonScriptCounts* counts = maybeCreateScriptCounts();
7505 const bool compilingWasm = gen->compilingWasm();
7507 for (size_t i = 0; i < graph.numBlocks(); i++) {
7508 current = graph.getBlock(i);
7510 // Don't emit any code for trivial blocks, containing just a goto. Such
7511 // blocks are created to split critical edges, and if we didn't end up
7512 // putting any instructions in them, we can skip them.
7513 if (current->isTrivial()) {
7514 continue;
7517 #ifdef JS_JITSPEW
7518 const char* filename = nullptr;
7519 size_t lineNumber = 0;
7520 JS::LimitedColumnNumberOneOrigin columnNumber;
7521 if (current->mir()->info().script()) {
7522 filename = current->mir()->info().script()->filename();
7523 if (current->mir()->pc()) {
7524 lineNumber = PCToLineNumber(current->mir()->info().script(),
7525 current->mir()->pc(), &columnNumber);
7528 JitSpew(JitSpew_Codegen, "--------------------------------");
7529 JitSpew(JitSpew_Codegen, "# block%zu %s:%zu:%u%s:", i,
7530 filename ? filename : "?", lineNumber,
7531 columnNumber.oneOriginValue(),
7532 current->mir()->isLoopHeader() ? " (loop header)" : "");
7533 #endif
7535 if (current->mir()->isLoopHeader() && compilingWasm) {
7536 masm.nopAlign(CodeAlignment);
7539 masm.bind(current->label());
7541 mozilla::Maybe<ScriptCountBlockState> blockCounts;
7542 if (counts) {
7543 blockCounts.emplace(&counts->block(i), &masm);
7544 if (!blockCounts->init()) {
7545 return false;
7549 for (LInstructionIterator iter = current->begin(); iter != current->end();
7550 iter++) {
7551 if (!alloc().ensureBallast()) {
7552 return false;
7555 perfSpewer_.recordInstruction(masm, *iter);
7556 #ifdef JS_JITSPEW
7557 JitSpewStart(JitSpew_Codegen, " # LIR=%s",
7558 iter->opName());
7559 if (const char* extra = iter->getExtraName()) {
7560 JitSpewCont(JitSpew_Codegen, ":%s", extra);
7562 JitSpewFin(JitSpew_Codegen);
7563 #endif
7565 if (counts) {
7566 blockCounts->visitInstruction(*iter);
7569 #ifdef CHECK_OSIPOINT_REGISTERS
7570 if (iter->safepoint() && !compilingWasm) {
7571 resetOsiPointRegs(iter->safepoint());
7573 #endif
7575 if (!compilingWasm) {
7576 if (MDefinition* mir = iter->mirRaw()) {
7577 if (!addNativeToBytecodeEntry(mir->trackedSite())) {
7578 return false;
7583 setElement(*iter); // needed to encode correct snapshot location.
7585 #ifdef DEBUG
7586 emitDebugForceBailing(*iter);
7587 #endif
7589 switch (iter->op()) {
7590 #ifndef JS_CODEGEN_NONE
7591 # define LIROP(op) \
7592 case LNode::Opcode::op: \
7593 visit##op(iter->to##op()); \
7594 break;
7595 LIR_OPCODE_LIST(LIROP)
7596 # undef LIROP
7597 #endif
7598 case LNode::Opcode::Invalid:
7599 default:
7600 MOZ_CRASH("Invalid LIR op");
7603 #ifdef DEBUG
7604 if (!counts) {
7605 emitDebugResultChecks(*iter);
7607 #endif
7609 if (masm.oom()) {
7610 return false;
7614 JitSpew(JitSpew_Codegen, "==== END CodeGenerator::generateBody ====\n");
7615 return true;
7618 // Out-of-line object allocation for LNewArray.
7619 class OutOfLineNewArray : public OutOfLineCodeBase<CodeGenerator> {
7620 LNewArray* lir_;
7622 public:
7623 explicit OutOfLineNewArray(LNewArray* lir) : lir_(lir) {}
7625 void accept(CodeGenerator* codegen) override {
7626 codegen->visitOutOfLineNewArray(this);
7629 LNewArray* lir() const { return lir_; }
7632 void CodeGenerator::visitNewArrayCallVM(LNewArray* lir) {
7633 Register objReg = ToRegister(lir->output());
7635 MOZ_ASSERT(!lir->isCall());
7636 saveLive(lir);
7638 JSObject* templateObject = lir->mir()->templateObject();
7640 if (templateObject) {
7641 pushArg(ImmGCPtr(templateObject->shape()));
7642 pushArg(Imm32(lir->mir()->length()));
7644 using Fn = ArrayObject* (*)(JSContext*, uint32_t, Handle<Shape*>);
7645 callVM<Fn, NewArrayWithShape>(lir);
7646 } else {
7647 pushArg(Imm32(GenericObject));
7648 pushArg(Imm32(lir->mir()->length()));
7650 using Fn = ArrayObject* (*)(JSContext*, uint32_t, NewObjectKind);
7651 callVM<Fn, NewArrayOperation>(lir);
7654 masm.storeCallPointerResult(objReg);
7656 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
7657 restoreLive(lir);
7660 void CodeGenerator::visitAtan2D(LAtan2D* lir) {
7661 FloatRegister y = ToFloatRegister(lir->y());
7662 FloatRegister x = ToFloatRegister(lir->x());
7664 using Fn = double (*)(double x, double y);
7665 masm.setupAlignedABICall();
7666 masm.passABIArg(y, ABIType::Float64);
7667 masm.passABIArg(x, ABIType::Float64);
7668 masm.callWithABI<Fn, ecmaAtan2>(ABIType::Float64);
7670 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7673 void CodeGenerator::visitHypot(LHypot* lir) {
7674 uint32_t numArgs = lir->numArgs();
7675 masm.setupAlignedABICall();
7677 for (uint32_t i = 0; i < numArgs; ++i) {
7678 masm.passABIArg(ToFloatRegister(lir->getOperand(i)), ABIType::Float64);
7681 switch (numArgs) {
7682 case 2: {
7683 using Fn = double (*)(double x, double y);
7684 masm.callWithABI<Fn, ecmaHypot>(ABIType::Float64);
7685 break;
7687 case 3: {
7688 using Fn = double (*)(double x, double y, double z);
7689 masm.callWithABI<Fn, hypot3>(ABIType::Float64);
7690 break;
7692 case 4: {
7693 using Fn = double (*)(double x, double y, double z, double w);
7694 masm.callWithABI<Fn, hypot4>(ABIType::Float64);
7695 break;
7697 default:
7698 MOZ_CRASH("Unexpected number of arguments to hypot function.");
7700 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7703 void CodeGenerator::visitNewArray(LNewArray* lir) {
7704 Register objReg = ToRegister(lir->output());
7705 Register tempReg = ToRegister(lir->temp());
7706 DebugOnly<uint32_t> length = lir->mir()->length();
7708 MOZ_ASSERT(length <= NativeObject::MAX_DENSE_ELEMENTS_COUNT);
7710 if (lir->mir()->isVMCall()) {
7711 visitNewArrayCallVM(lir);
7712 return;
7715 OutOfLineNewArray* ool = new (alloc()) OutOfLineNewArray(lir);
7716 addOutOfLineCode(ool, lir->mir());
7718 TemplateObject templateObject(lir->mir()->templateObject());
7719 #ifdef DEBUG
7720 size_t numInlineElements = gc::GetGCKindSlots(templateObject.getAllocKind()) -
7721 ObjectElements::VALUES_PER_HEADER;
7722 MOZ_ASSERT(length <= numInlineElements,
7723 "Inline allocation only supports inline elements");
7724 #endif
7725 masm.createGCObject(objReg, tempReg, templateObject,
7726 lir->mir()->initialHeap(), ool->entry());
7728 masm.bind(ool->rejoin());
7731 void CodeGenerator::visitOutOfLineNewArray(OutOfLineNewArray* ool) {
7732 visitNewArrayCallVM(ool->lir());
7733 masm.jump(ool->rejoin());
7736 void CodeGenerator::visitNewArrayDynamicLength(LNewArrayDynamicLength* lir) {
7737 Register lengthReg = ToRegister(lir->length());
7738 Register objReg = ToRegister(lir->output());
7739 Register tempReg = ToRegister(lir->temp0());
7741 JSObject* templateObject = lir->mir()->templateObject();
7742 gc::Heap initialHeap = lir->mir()->initialHeap();
7744 using Fn = ArrayObject* (*)(JSContext*, Handle<ArrayObject*>, int32_t length);
7745 OutOfLineCode* ool = oolCallVM<Fn, ArrayConstructorOneArg>(
7746 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
7747 StoreRegisterTo(objReg));
7749 bool canInline = true;
7750 size_t inlineLength = 0;
7751 if (templateObject->as<ArrayObject>().hasFixedElements()) {
7752 size_t numSlots =
7753 gc::GetGCKindSlots(templateObject->asTenured().getAllocKind());
7754 inlineLength = numSlots - ObjectElements::VALUES_PER_HEADER;
7755 } else {
7756 canInline = false;
7759 if (canInline) {
7760 // Try to do the allocation inline if the template object is big enough
7761 // for the length in lengthReg. If the length is bigger we could still
7762 // use the template object and not allocate the elements, but it's more
7763 // efficient to do a single big allocation than (repeatedly) reallocating
7764 // the array later on when filling it.
7765 masm.branch32(Assembler::Above, lengthReg, Imm32(inlineLength),
7766 ool->entry());
7768 TemplateObject templateObj(templateObject);
7769 masm.createGCObject(objReg, tempReg, templateObj, initialHeap,
7770 ool->entry());
7772 size_t lengthOffset = NativeObject::offsetOfFixedElements() +
7773 ObjectElements::offsetOfLength();
7774 masm.store32(lengthReg, Address(objReg, lengthOffset));
7775 } else {
7776 masm.jump(ool->entry());
7779 masm.bind(ool->rejoin());
7782 void CodeGenerator::visitNewIterator(LNewIterator* lir) {
7783 Register objReg = ToRegister(lir->output());
7784 Register tempReg = ToRegister(lir->temp0());
7786 OutOfLineCode* ool;
7787 switch (lir->mir()->type()) {
7788 case MNewIterator::ArrayIterator: {
7789 using Fn = ArrayIteratorObject* (*)(JSContext*);
7790 ool = oolCallVM<Fn, NewArrayIterator>(lir, ArgList(),
7791 StoreRegisterTo(objReg));
7792 break;
7794 case MNewIterator::StringIterator: {
7795 using Fn = StringIteratorObject* (*)(JSContext*);
7796 ool = oolCallVM<Fn, NewStringIterator>(lir, ArgList(),
7797 StoreRegisterTo(objReg));
7798 break;
7800 case MNewIterator::RegExpStringIterator: {
7801 using Fn = RegExpStringIteratorObject* (*)(JSContext*);
7802 ool = oolCallVM<Fn, NewRegExpStringIterator>(lir, ArgList(),
7803 StoreRegisterTo(objReg));
7804 break;
7806 default:
7807 MOZ_CRASH("unexpected iterator type");
7810 TemplateObject templateObject(lir->mir()->templateObject());
7811 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
7812 ool->entry());
7814 masm.bind(ool->rejoin());
7817 void CodeGenerator::visitNewTypedArray(LNewTypedArray* lir) {
7818 Register objReg = ToRegister(lir->output());
7819 Register tempReg = ToRegister(lir->temp0());
7820 Register lengthReg = ToRegister(lir->temp1());
7821 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
7823 JSObject* templateObject = lir->mir()->templateObject();
7824 gc::Heap initialHeap = lir->mir()->initialHeap();
7826 auto* ttemplate = &templateObject->as<FixedLengthTypedArrayObject>();
7828 size_t n = ttemplate->length();
7829 MOZ_ASSERT(n <= INT32_MAX,
7830 "Template objects are only created for int32 lengths");
7832 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
7833 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
7834 lir, ArgList(ImmGCPtr(templateObject), Imm32(n)),
7835 StoreRegisterTo(objReg));
7837 TemplateObject templateObj(templateObject);
7838 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
7840 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
7841 ttemplate, MacroAssembler::TypedArrayLength::Fixed);
7843 masm.bind(ool->rejoin());
7846 void CodeGenerator::visitNewTypedArrayDynamicLength(
7847 LNewTypedArrayDynamicLength* lir) {
7848 Register lengthReg = ToRegister(lir->length());
7849 Register objReg = ToRegister(lir->output());
7850 Register tempReg = ToRegister(lir->temp0());
7851 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
7853 JSObject* templateObject = lir->mir()->templateObject();
7854 gc::Heap initialHeap = lir->mir()->initialHeap();
7856 auto* ttemplate = &templateObject->as<FixedLengthTypedArrayObject>();
7858 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
7859 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
7860 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
7861 StoreRegisterTo(objReg));
7863 // Volatile |lengthReg| is saved across the ABI call in |initTypedArraySlots|.
7864 MOZ_ASSERT_IF(lengthReg.volatile_(), liveRegs.has(lengthReg));
7866 TemplateObject templateObj(templateObject);
7867 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
7869 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
7870 ttemplate,
7871 MacroAssembler::TypedArrayLength::Dynamic);
7873 masm.bind(ool->rejoin());
7876 void CodeGenerator::visitNewTypedArrayFromArray(LNewTypedArrayFromArray* lir) {
7877 pushArg(ToRegister(lir->array()));
7878 pushArg(ImmGCPtr(lir->mir()->templateObject()));
7880 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
7881 callVM<Fn, js::NewTypedArrayWithTemplateAndArray>(lir);
7884 void CodeGenerator::visitNewTypedArrayFromArrayBuffer(
7885 LNewTypedArrayFromArrayBuffer* lir) {
7886 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::LengthIndex));
7887 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::ByteOffsetIndex));
7888 pushArg(ToRegister(lir->arrayBuffer()));
7889 pushArg(ImmGCPtr(lir->mir()->templateObject()));
7891 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
7892 HandleValue, HandleValue);
7893 callVM<Fn, js::NewTypedArrayWithTemplateAndBuffer>(lir);
7896 void CodeGenerator::visitBindFunction(LBindFunction* lir) {
7897 Register target = ToRegister(lir->target());
7898 Register temp1 = ToRegister(lir->temp0());
7899 Register temp2 = ToRegister(lir->temp1());
7901 // Try to allocate a new BoundFunctionObject we can pass to the VM function.
7902 // If this fails, we set temp1 to nullptr so we do the allocation in C++.
7903 TemplateObject templateObject(lir->mir()->templateObject());
7904 Label allocOk, allocFailed;
7905 masm.createGCObject(temp1, temp2, templateObject, gc::Heap::Default,
7906 &allocFailed);
7907 masm.jump(&allocOk);
7909 masm.bind(&allocFailed);
7910 masm.movePtr(ImmWord(0), temp1);
7912 masm.bind(&allocOk);
7914 // Set temp2 to the address of the first argument on the stack.
7915 // Note that the Value slots used for arguments are currently aligned for a
7916 // JIT call, even though that's not strictly necessary for calling into C++.
7917 uint32_t argc = lir->mir()->numStackArgs();
7918 if (JitStackValueAlignment > 1) {
7919 argc = AlignBytes(argc, JitStackValueAlignment);
7921 uint32_t unusedStack = UnusedStackBytesForCall(argc);
7922 masm.computeEffectiveAddress(Address(masm.getStackPointer(), unusedStack),
7923 temp2);
7925 pushArg(temp1);
7926 pushArg(Imm32(lir->mir()->numStackArgs()));
7927 pushArg(temp2);
7928 pushArg(target);
7930 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<JSObject*>, Value*,
7931 uint32_t, Handle<BoundFunctionObject*>);
7932 callVM<Fn, js::BoundFunctionObject::functionBindImpl>(lir);
7935 void CodeGenerator::visitNewBoundFunction(LNewBoundFunction* lir) {
7936 Register output = ToRegister(lir->output());
7937 Register temp = ToRegister(lir->temp0());
7939 JSObject* templateObj = lir->mir()->templateObj();
7941 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<BoundFunctionObject*>);
7942 OutOfLineCode* ool = oolCallVM<Fn, BoundFunctionObject::createWithTemplate>(
7943 lir, ArgList(ImmGCPtr(templateObj)), StoreRegisterTo(output));
7945 TemplateObject templateObject(templateObj);
7946 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
7947 ool->entry());
7949 masm.bind(ool->rejoin());
7952 // Out-of-line object allocation for JSOp::NewObject.
7953 class OutOfLineNewObject : public OutOfLineCodeBase<CodeGenerator> {
7954 LNewObject* lir_;
7956 public:
7957 explicit OutOfLineNewObject(LNewObject* lir) : lir_(lir) {}
7959 void accept(CodeGenerator* codegen) override {
7960 codegen->visitOutOfLineNewObject(this);
7963 LNewObject* lir() const { return lir_; }
7966 void CodeGenerator::visitNewObjectVMCall(LNewObject* lir) {
7967 Register objReg = ToRegister(lir->output());
7969 MOZ_ASSERT(!lir->isCall());
7970 saveLive(lir);
7972 JSObject* templateObject = lir->mir()->templateObject();
7974 // If we're making a new object with a class prototype (that is, an object
7975 // that derives its class from its prototype instead of being
7976 // PlainObject::class_'d) from self-hosted code, we need a different init
7977 // function.
7978 switch (lir->mir()->mode()) {
7979 case MNewObject::ObjectLiteral: {
7980 MOZ_ASSERT(!templateObject);
7981 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
7982 pushArg(ImmGCPtr(lir->mir()->block()->info().script()));
7984 using Fn = JSObject* (*)(JSContext*, HandleScript, const jsbytecode* pc);
7985 callVM<Fn, NewObjectOperation>(lir);
7986 break;
7988 case MNewObject::ObjectCreate: {
7989 pushArg(ImmGCPtr(templateObject));
7991 using Fn = PlainObject* (*)(JSContext*, Handle<PlainObject*>);
7992 callVM<Fn, ObjectCreateWithTemplate>(lir);
7993 break;
7997 masm.storeCallPointerResult(objReg);
7999 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
8000 restoreLive(lir);
8003 static bool ShouldInitFixedSlots(LNewPlainObject* lir, const Shape* shape,
8004 uint32_t nfixed) {
8005 // Look for StoreFixedSlot instructions following an object allocation
8006 // that write to this object before a GC is triggered or this object is
8007 // passed to a VM call. If all fixed slots will be initialized, the
8008 // allocation code doesn't need to set the slots to |undefined|.
8010 if (nfixed == 0) {
8011 return false;
8014 // Keep track of the fixed slots that are initialized. initializedSlots is
8015 // a bit mask with a bit for each slot.
8016 MOZ_ASSERT(nfixed <= NativeObject::MAX_FIXED_SLOTS);
8017 static_assert(NativeObject::MAX_FIXED_SLOTS <= 32,
8018 "Slot bits must fit in 32 bits");
8019 uint32_t initializedSlots = 0;
8020 uint32_t numInitialized = 0;
8022 MInstruction* allocMir = lir->mir();
8023 MBasicBlock* block = allocMir->block();
8025 // Skip the allocation instruction.
8026 MInstructionIterator iter = block->begin(allocMir);
8027 MOZ_ASSERT(*iter == allocMir);
8028 iter++;
8030 // Handle the leading shape guard, if present.
8031 for (; iter != block->end(); iter++) {
8032 if (iter->isConstant()) {
8033 // This instruction won't trigger a GC or read object slots.
8034 continue;
8036 if (iter->isGuardShape()) {
8037 auto* guard = iter->toGuardShape();
8038 if (guard->object() != allocMir || guard->shape() != shape) {
8039 return true;
8041 allocMir = guard;
8042 iter++;
8044 break;
8047 for (; iter != block->end(); iter++) {
8048 if (iter->isConstant() || iter->isPostWriteBarrier()) {
8049 // These instructions won't trigger a GC or read object slots.
8050 continue;
8053 if (iter->isStoreFixedSlot()) {
8054 MStoreFixedSlot* store = iter->toStoreFixedSlot();
8055 if (store->object() != allocMir) {
8056 return true;
8059 // We may not initialize this object slot on allocation, so the
8060 // pre-barrier could read uninitialized memory. Simply disable
8061 // the barrier for this store: the object was just initialized
8062 // so the barrier is not necessary.
8063 store->setNeedsBarrier(false);
8065 uint32_t slot = store->slot();
8066 MOZ_ASSERT(slot < nfixed);
8067 if ((initializedSlots & (1 << slot)) == 0) {
8068 numInitialized++;
8069 initializedSlots |= (1 << slot);
8071 if (numInitialized == nfixed) {
8072 // All fixed slots will be initialized.
8073 MOZ_ASSERT(mozilla::CountPopulation32(initializedSlots) == nfixed);
8074 return false;
8077 continue;
8080 // Unhandled instruction, assume it bails or reads object slots.
8081 return true;
8084 MOZ_CRASH("Shouldn't get here");
8087 void CodeGenerator::visitNewObject(LNewObject* lir) {
8088 Register objReg = ToRegister(lir->output());
8089 Register tempReg = ToRegister(lir->temp());
8091 if (lir->mir()->isVMCall()) {
8092 visitNewObjectVMCall(lir);
8093 return;
8096 OutOfLineNewObject* ool = new (alloc()) OutOfLineNewObject(lir);
8097 addOutOfLineCode(ool, lir->mir());
8099 TemplateObject templateObject(lir->mir()->templateObject());
8101 masm.createGCObject(objReg, tempReg, templateObject,
8102 lir->mir()->initialHeap(), ool->entry());
8104 masm.bind(ool->rejoin());
8107 void CodeGenerator::visitOutOfLineNewObject(OutOfLineNewObject* ool) {
8108 visitNewObjectVMCall(ool->lir());
8109 masm.jump(ool->rejoin());
8112 void CodeGenerator::visitNewPlainObject(LNewPlainObject* lir) {
8113 Register objReg = ToRegister(lir->output());
8114 Register temp0Reg = ToRegister(lir->temp0());
8115 Register temp1Reg = ToRegister(lir->temp1());
8116 Register shapeReg = ToRegister(lir->temp2());
8118 auto* mir = lir->mir();
8119 const Shape* shape = mir->shape();
8120 gc::Heap initialHeap = mir->initialHeap();
8121 gc::AllocKind allocKind = mir->allocKind();
8123 using Fn =
8124 JSObject* (*)(JSContext*, Handle<SharedShape*>, gc::AllocKind, gc::Heap);
8125 OutOfLineCode* ool = oolCallVM<Fn, NewPlainObjectOptimizedFallback>(
8126 lir,
8127 ArgList(ImmGCPtr(shape), Imm32(int32_t(allocKind)),
8128 Imm32(int32_t(initialHeap))),
8129 StoreRegisterTo(objReg));
8131 bool initContents = ShouldInitFixedSlots(lir, shape, mir->numFixedSlots());
8133 masm.movePtr(ImmGCPtr(shape), shapeReg);
8134 masm.createPlainGCObject(
8135 objReg, shapeReg, temp0Reg, temp1Reg, mir->numFixedSlots(),
8136 mir->numDynamicSlots(), allocKind, initialHeap, ool->entry(),
8137 AllocSiteInput(gc::CatchAllAllocSite::Optimized), initContents);
8139 #ifdef DEBUG
8140 // ShouldInitFixedSlots expects that the leading GuardShape will never fail,
8141 // so ensure the newly created object has the correct shape. Should the guard
8142 // ever fail, we may end up with uninitialized fixed slots, which can confuse
8143 // the GC.
8144 Label ok;
8145 masm.branchTestObjShape(Assembler::Equal, objReg, shape, temp0Reg, objReg,
8146 &ok);
8147 masm.assumeUnreachable("Newly created object has the correct shape");
8148 masm.bind(&ok);
8149 #endif
8151 masm.bind(ool->rejoin());
8154 void CodeGenerator::visitNewArrayObject(LNewArrayObject* lir) {
8155 Register objReg = ToRegister(lir->output());
8156 Register temp0Reg = ToRegister(lir->temp0());
8157 Register shapeReg = ToRegister(lir->temp1());
8159 auto* mir = lir->mir();
8160 uint32_t arrayLength = mir->length();
8162 gc::AllocKind allocKind = GuessArrayGCKind(arrayLength);
8163 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
8164 allocKind = ForegroundToBackgroundAllocKind(allocKind);
8166 uint32_t slotCount = GetGCKindSlots(allocKind);
8167 MOZ_ASSERT(slotCount >= ObjectElements::VALUES_PER_HEADER);
8168 uint32_t arrayCapacity = slotCount - ObjectElements::VALUES_PER_HEADER;
8170 const Shape* shape = mir->shape();
8172 NewObjectKind objectKind =
8173 mir->initialHeap() == gc::Heap::Tenured ? TenuredObject : GenericObject;
8175 using Fn =
8176 ArrayObject* (*)(JSContext*, uint32_t, gc::AllocKind, NewObjectKind);
8177 OutOfLineCode* ool = oolCallVM<Fn, NewArrayObjectOptimizedFallback>(
8178 lir,
8179 ArgList(Imm32(arrayLength), Imm32(int32_t(allocKind)), Imm32(objectKind)),
8180 StoreRegisterTo(objReg));
8182 masm.movePtr(ImmPtr(shape), shapeReg);
8183 masm.createArrayWithFixedElements(
8184 objReg, shapeReg, temp0Reg, InvalidReg, arrayLength, arrayCapacity, 0, 0,
8185 allocKind, mir->initialHeap(), ool->entry(),
8186 AllocSiteInput(gc::CatchAllAllocSite::Optimized));
8187 masm.bind(ool->rejoin());
8190 void CodeGenerator::visitNewNamedLambdaObject(LNewNamedLambdaObject* lir) {
8191 Register objReg = ToRegister(lir->output());
8192 Register tempReg = ToRegister(lir->temp0());
8193 const CompileInfo& info = lir->mir()->block()->info();
8195 using Fn = js::NamedLambdaObject* (*)(JSContext*, HandleFunction);
8196 OutOfLineCode* ool = oolCallVM<Fn, NamedLambdaObject::createWithoutEnclosing>(
8197 lir, ArgList(info.funMaybeLazy()), StoreRegisterTo(objReg));
8199 TemplateObject templateObject(lir->mir()->templateObj());
8201 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
8202 ool->entry());
8204 masm.bind(ool->rejoin());
8207 void CodeGenerator::visitNewCallObject(LNewCallObject* lir) {
8208 Register objReg = ToRegister(lir->output());
8209 Register tempReg = ToRegister(lir->temp0());
8211 CallObject* templateObj = lir->mir()->templateObject();
8213 using Fn = CallObject* (*)(JSContext*, Handle<SharedShape*>);
8214 OutOfLineCode* ool = oolCallVM<Fn, CallObject::createWithShape>(
8215 lir, ArgList(ImmGCPtr(templateObj->sharedShape())),
8216 StoreRegisterTo(objReg));
8218 // Inline call object creation, using the OOL path only for tricky cases.
8219 TemplateObject templateObject(templateObj);
8220 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
8221 ool->entry());
8223 masm.bind(ool->rejoin());
8226 void CodeGenerator::visitNewStringObject(LNewStringObject* lir) {
8227 Register input = ToRegister(lir->input());
8228 Register output = ToRegister(lir->output());
8229 Register temp = ToRegister(lir->temp0());
8231 StringObject* templateObj = lir->mir()->templateObj();
8233 using Fn = JSObject* (*)(JSContext*, HandleString);
8234 OutOfLineCode* ool = oolCallVM<Fn, NewStringObject>(lir, ArgList(input),
8235 StoreRegisterTo(output));
8237 TemplateObject templateObject(templateObj);
8238 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
8239 ool->entry());
8241 masm.loadStringLength(input, temp);
8243 masm.storeValue(JSVAL_TYPE_STRING, input,
8244 Address(output, StringObject::offsetOfPrimitiveValue()));
8245 masm.storeValue(JSVAL_TYPE_INT32, temp,
8246 Address(output, StringObject::offsetOfLength()));
8248 masm.bind(ool->rejoin());
8251 void CodeGenerator::visitInitElemGetterSetter(LInitElemGetterSetter* lir) {
8252 Register obj = ToRegister(lir->object());
8253 Register value = ToRegister(lir->value());
8255 pushArg(value);
8256 pushArg(ToValue(lir, LInitElemGetterSetter::IdIndex));
8257 pushArg(obj);
8258 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
8260 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject, HandleValue,
8261 HandleObject);
8262 callVM<Fn, InitElemGetterSetterOperation>(lir);
8265 void CodeGenerator::visitMutateProto(LMutateProto* lir) {
8266 Register objReg = ToRegister(lir->object());
8268 pushArg(ToValue(lir, LMutateProto::ValueIndex));
8269 pushArg(objReg);
8271 using Fn =
8272 bool (*)(JSContext* cx, Handle<PlainObject*> obj, HandleValue value);
8273 callVM<Fn, MutatePrototype>(lir);
8276 void CodeGenerator::visitInitPropGetterSetter(LInitPropGetterSetter* lir) {
8277 Register obj = ToRegister(lir->object());
8278 Register value = ToRegister(lir->value());
8280 pushArg(value);
8281 pushArg(ImmGCPtr(lir->mir()->name()));
8282 pushArg(obj);
8283 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
8285 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject,
8286 Handle<PropertyName*>, HandleObject);
8287 callVM<Fn, InitPropGetterSetterOperation>(lir);
8290 void CodeGenerator::visitCreateThis(LCreateThis* lir) {
8291 const LAllocation* callee = lir->callee();
8292 const LAllocation* newTarget = lir->newTarget();
8294 if (newTarget->isConstant()) {
8295 pushArg(ImmGCPtr(&newTarget->toConstant()->toObject()));
8296 } else {
8297 pushArg(ToRegister(newTarget));
8300 if (callee->isConstant()) {
8301 pushArg(ImmGCPtr(&callee->toConstant()->toObject()));
8302 } else {
8303 pushArg(ToRegister(callee));
8306 using Fn = bool (*)(JSContext* cx, HandleObject callee,
8307 HandleObject newTarget, MutableHandleValue rval);
8308 callVM<Fn, jit::CreateThisFromIon>(lir);
8311 void CodeGenerator::visitCreateArgumentsObject(LCreateArgumentsObject* lir) {
8312 // This should be getting constructed in the first block only, and not any OSR
8313 // entry blocks.
8314 MOZ_ASSERT(lir->mir()->block()->id() == 0);
8316 Register callObj = ToRegister(lir->callObject());
8317 Register temp0 = ToRegister(lir->temp0());
8318 Label done;
8320 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8321 Register objTemp = ToRegister(lir->temp1());
8322 Register cxTemp = ToRegister(lir->temp2());
8324 masm.Push(callObj);
8326 // Try to allocate an arguments object. This will leave the reserved
8327 // slots uninitialized, so it's important we don't GC until we
8328 // initialize these slots in ArgumentsObject::finishForIonPure.
8329 Label failure;
8330 TemplateObject templateObject(templateObj);
8331 masm.createGCObject(objTemp, temp0, templateObject, gc::Heap::Default,
8332 &failure,
8333 /* initContents = */ false);
8335 masm.moveStackPtrTo(temp0);
8336 masm.addPtr(Imm32(masm.framePushed()), temp0);
8338 using Fn = ArgumentsObject* (*)(JSContext* cx, jit::JitFrameLayout* frame,
8339 JSObject* scopeChain, ArgumentsObject* obj);
8340 masm.setupAlignedABICall();
8341 masm.loadJSContext(cxTemp);
8342 masm.passABIArg(cxTemp);
8343 masm.passABIArg(temp0);
8344 masm.passABIArg(callObj);
8345 masm.passABIArg(objTemp);
8347 masm.callWithABI<Fn, ArgumentsObject::finishForIonPure>();
8348 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8350 // Discard saved callObj on the stack.
8351 masm.addToStackPtr(Imm32(sizeof(uintptr_t)));
8352 masm.jump(&done);
8354 masm.bind(&failure);
8355 masm.Pop(callObj);
8358 masm.moveStackPtrTo(temp0);
8359 masm.addPtr(Imm32(frameSize()), temp0);
8361 pushArg(callObj);
8362 pushArg(temp0);
8364 using Fn = ArgumentsObject* (*)(JSContext*, JitFrameLayout*, HandleObject);
8365 callVM<Fn, ArgumentsObject::createForIon>(lir);
8367 masm.bind(&done);
8370 void CodeGenerator::visitCreateInlinedArgumentsObject(
8371 LCreateInlinedArgumentsObject* lir) {
8372 Register callObj = ToRegister(lir->getCallObject());
8373 Register callee = ToRegister(lir->getCallee());
8374 Register argsAddress = ToRegister(lir->temp1());
8375 Register argsObj = ToRegister(lir->temp2());
8377 // TODO: Do we have to worry about alignment here?
8379 // Create a contiguous array of values for ArgumentsObject::create
8380 // by pushing the arguments onto the stack in reverse order.
8381 uint32_t argc = lir->mir()->numActuals();
8382 for (uint32_t i = 0; i < argc; i++) {
8383 uint32_t argNum = argc - i - 1;
8384 uint32_t index = LCreateInlinedArgumentsObject::ArgIndex(argNum);
8385 ConstantOrRegister arg =
8386 toConstantOrRegister(lir, index, lir->mir()->getArg(argNum)->type());
8387 masm.Push(arg);
8389 masm.moveStackPtrTo(argsAddress);
8391 Label done;
8392 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8393 LiveRegisterSet liveRegs;
8394 liveRegs.add(callObj);
8395 liveRegs.add(callee);
8397 masm.PushRegsInMask(liveRegs);
8399 // We are free to clobber all registers, as LCreateInlinedArgumentsObject is
8400 // a call instruction.
8401 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
8402 allRegs.take(callObj);
8403 allRegs.take(callee);
8404 allRegs.take(argsObj);
8405 allRegs.take(argsAddress);
8407 Register temp3 = allRegs.takeAny();
8408 Register temp4 = allRegs.takeAny();
8410 // Try to allocate an arguments object. This will leave the reserved slots
8411 // uninitialized, so it's important we don't GC until we initialize these
8412 // slots in ArgumentsObject::finishForIonPure.
8413 Label failure;
8414 TemplateObject templateObject(templateObj);
8415 masm.createGCObject(argsObj, temp3, templateObject, gc::Heap::Default,
8416 &failure,
8417 /* initContents = */ false);
8419 Register numActuals = temp3;
8420 masm.move32(Imm32(argc), numActuals);
8422 using Fn = ArgumentsObject* (*)(JSContext*, JSObject*, JSFunction*, Value*,
8423 uint32_t, ArgumentsObject*);
8424 masm.setupAlignedABICall();
8425 masm.loadJSContext(temp4);
8426 masm.passABIArg(temp4);
8427 masm.passABIArg(callObj);
8428 masm.passABIArg(callee);
8429 masm.passABIArg(argsAddress);
8430 masm.passABIArg(numActuals);
8431 masm.passABIArg(argsObj);
8433 masm.callWithABI<Fn, ArgumentsObject::finishInlineForIonPure>();
8434 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8436 // Discard saved callObj, callee, and values array on the stack.
8437 masm.addToStackPtr(
8438 Imm32(MacroAssembler::PushRegsInMaskSizeInBytes(liveRegs) +
8439 argc * sizeof(Value)));
8440 masm.jump(&done);
8442 masm.bind(&failure);
8443 masm.PopRegsInMask(liveRegs);
8445 // Reload argsAddress because it may have been overridden.
8446 masm.moveStackPtrTo(argsAddress);
8449 pushArg(Imm32(argc));
8450 pushArg(callObj);
8451 pushArg(callee);
8452 pushArg(argsAddress);
8454 using Fn = ArgumentsObject* (*)(JSContext*, Value*, HandleFunction,
8455 HandleObject, uint32_t);
8456 callVM<Fn, ArgumentsObject::createForInlinedIon>(lir);
8458 // Discard the array of values.
8459 masm.freeStack(argc * sizeof(Value));
8461 masm.bind(&done);
8464 template <class GetInlinedArgument>
8465 void CodeGenerator::emitGetInlinedArgument(GetInlinedArgument* lir,
8466 Register index,
8467 ValueOperand output) {
8468 uint32_t numActuals = lir->mir()->numActuals();
8469 MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
8471 // The index has already been bounds-checked, so the code we
8472 // generate here should be unreachable. We can end up in this
8473 // situation in self-hosted code using GetArgument(), or in a
8474 // monomorphically inlined function if we've inlined some CacheIR
8475 // that was created for a different caller.
8476 if (numActuals == 0) {
8477 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8478 return;
8481 // Check the first n-1 possible indices.
8482 Label done;
8483 for (uint32_t i = 0; i < numActuals - 1; i++) {
8484 Label skip;
8485 ConstantOrRegister arg = toConstantOrRegister(
8486 lir, GetInlinedArgument::ArgIndex(i), lir->mir()->getArg(i)->type());
8487 masm.branch32(Assembler::NotEqual, index, Imm32(i), &skip);
8488 masm.moveValue(arg, output);
8490 masm.jump(&done);
8491 masm.bind(&skip);
8494 #ifdef DEBUG
8495 Label skip;
8496 masm.branch32(Assembler::Equal, index, Imm32(numActuals - 1), &skip);
8497 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8498 masm.bind(&skip);
8499 #endif
8501 // The index has already been bounds-checked, so load the last argument.
8502 uint32_t lastIdx = numActuals - 1;
8503 ConstantOrRegister arg =
8504 toConstantOrRegister(lir, GetInlinedArgument::ArgIndex(lastIdx),
8505 lir->mir()->getArg(lastIdx)->type());
8506 masm.moveValue(arg, output);
8507 masm.bind(&done);
8510 void CodeGenerator::visitGetInlinedArgument(LGetInlinedArgument* lir) {
8511 Register index = ToRegister(lir->getIndex());
8512 ValueOperand output = ToOutValue(lir);
8514 emitGetInlinedArgument(lir, index, output);
8517 void CodeGenerator::visitGetInlinedArgumentHole(LGetInlinedArgumentHole* lir) {
8518 Register index = ToRegister(lir->getIndex());
8519 ValueOperand output = ToOutValue(lir);
8521 uint32_t numActuals = lir->mir()->numActuals();
8523 if (numActuals == 0) {
8524 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8525 masm.moveValue(UndefinedValue(), output);
8526 return;
8529 Label outOfBounds, done;
8530 masm.branch32(Assembler::AboveOrEqual, index, Imm32(numActuals),
8531 &outOfBounds);
8533 emitGetInlinedArgument(lir, index, output);
8534 masm.jump(&done);
8536 masm.bind(&outOfBounds);
8537 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8538 masm.moveValue(UndefinedValue(), output);
8540 masm.bind(&done);
8543 void CodeGenerator::visitGetArgumentsObjectArg(LGetArgumentsObjectArg* lir) {
8544 Register temp = ToRegister(lir->temp0());
8545 Register argsObj = ToRegister(lir->argsObject());
8546 ValueOperand out = ToOutValue(lir);
8548 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8549 temp);
8550 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8551 lir->mir()->argno() * sizeof(Value));
8552 masm.loadValue(argAddr, out);
8553 #ifdef DEBUG
8554 Label success;
8555 masm.branchTestMagic(Assembler::NotEqual, out, &success);
8556 masm.assumeUnreachable(
8557 "Result from ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8558 masm.bind(&success);
8559 #endif
8562 void CodeGenerator::visitSetArgumentsObjectArg(LSetArgumentsObjectArg* lir) {
8563 Register temp = ToRegister(lir->getTemp(0));
8564 Register argsObj = ToRegister(lir->argsObject());
8565 ValueOperand value = ToValue(lir, LSetArgumentsObjectArg::ValueIndex);
8567 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8568 temp);
8569 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8570 lir->mir()->argno() * sizeof(Value));
8571 emitPreBarrier(argAddr);
8572 #ifdef DEBUG
8573 Label success;
8574 masm.branchTestMagic(Assembler::NotEqual, argAddr, &success);
8575 masm.assumeUnreachable(
8576 "Result in ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8577 masm.bind(&success);
8578 #endif
8579 masm.storeValue(value, argAddr);
8582 void CodeGenerator::visitLoadArgumentsObjectArg(LLoadArgumentsObjectArg* lir) {
8583 Register temp = ToRegister(lir->temp0());
8584 Register argsObj = ToRegister(lir->argsObject());
8585 Register index = ToRegister(lir->index());
8586 ValueOperand out = ToOutValue(lir);
8588 Label bail;
8589 masm.loadArgumentsObjectElement(argsObj, index, out, temp, &bail);
8590 bailoutFrom(&bail, lir->snapshot());
8593 void CodeGenerator::visitLoadArgumentsObjectArgHole(
8594 LLoadArgumentsObjectArgHole* lir) {
8595 Register temp = ToRegister(lir->temp0());
8596 Register argsObj = ToRegister(lir->argsObject());
8597 Register index = ToRegister(lir->index());
8598 ValueOperand out = ToOutValue(lir);
8600 Label bail;
8601 masm.loadArgumentsObjectElementHole(argsObj, index, out, temp, &bail);
8602 bailoutFrom(&bail, lir->snapshot());
8605 void CodeGenerator::visitInArgumentsObjectArg(LInArgumentsObjectArg* lir) {
8606 Register temp = ToRegister(lir->temp0());
8607 Register argsObj = ToRegister(lir->argsObject());
8608 Register index = ToRegister(lir->index());
8609 Register out = ToRegister(lir->output());
8611 Label bail;
8612 masm.loadArgumentsObjectElementExists(argsObj, index, out, temp, &bail);
8613 bailoutFrom(&bail, lir->snapshot());
8616 void CodeGenerator::visitArgumentsObjectLength(LArgumentsObjectLength* lir) {
8617 Register argsObj = ToRegister(lir->argsObject());
8618 Register out = ToRegister(lir->output());
8620 Label bail;
8621 masm.loadArgumentsObjectLength(argsObj, out, &bail);
8622 bailoutFrom(&bail, lir->snapshot());
8625 void CodeGenerator::visitArrayFromArgumentsObject(
8626 LArrayFromArgumentsObject* lir) {
8627 pushArg(ToRegister(lir->argsObject()));
8629 using Fn = ArrayObject* (*)(JSContext*, Handle<ArgumentsObject*>);
8630 callVM<Fn, js::ArrayFromArgumentsObject>(lir);
8633 void CodeGenerator::visitGuardArgumentsObjectFlags(
8634 LGuardArgumentsObjectFlags* lir) {
8635 Register argsObj = ToRegister(lir->argsObject());
8636 Register temp = ToRegister(lir->temp0());
8638 Label bail;
8639 masm.branchTestArgumentsObjectFlags(argsObj, temp, lir->mir()->flags(),
8640 Assembler::NonZero, &bail);
8641 bailoutFrom(&bail, lir->snapshot());
8644 void CodeGenerator::visitBoundFunctionNumArgs(LBoundFunctionNumArgs* lir) {
8645 Register obj = ToRegister(lir->object());
8646 Register output = ToRegister(lir->output());
8648 masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
8649 output);
8650 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
8653 void CodeGenerator::visitGuardBoundFunctionIsConstructor(
8654 LGuardBoundFunctionIsConstructor* lir) {
8655 Register obj = ToRegister(lir->object());
8657 Label bail;
8658 Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
8659 masm.branchTest32(Assembler::Zero, flagsSlot,
8660 Imm32(BoundFunctionObject::IsConstructorFlag), &bail);
8661 bailoutFrom(&bail, lir->snapshot());
8664 void CodeGenerator::visitReturnFromCtor(LReturnFromCtor* lir) {
8665 ValueOperand value = ToValue(lir, LReturnFromCtor::ValueIndex);
8666 Register obj = ToRegister(lir->object());
8667 Register output = ToRegister(lir->output());
8669 Label valueIsObject, end;
8671 masm.branchTestObject(Assembler::Equal, value, &valueIsObject);
8673 // Value is not an object. Return that other object.
8674 masm.movePtr(obj, output);
8675 masm.jump(&end);
8677 // Value is an object. Return unbox(Value).
8678 masm.bind(&valueIsObject);
8679 Register payload = masm.extractObject(value, output);
8680 if (payload != output) {
8681 masm.movePtr(payload, output);
8684 masm.bind(&end);
8687 class OutOfLineBoxNonStrictThis : public OutOfLineCodeBase<CodeGenerator> {
8688 LBoxNonStrictThis* ins_;
8690 public:
8691 explicit OutOfLineBoxNonStrictThis(LBoxNonStrictThis* ins) : ins_(ins) {}
8692 void accept(CodeGenerator* codegen) override {
8693 codegen->visitOutOfLineBoxNonStrictThis(this);
8695 LBoxNonStrictThis* ins() const { return ins_; }
8698 void CodeGenerator::visitBoxNonStrictThis(LBoxNonStrictThis* lir) {
8699 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8700 Register output = ToRegister(lir->output());
8702 auto* ool = new (alloc()) OutOfLineBoxNonStrictThis(lir);
8703 addOutOfLineCode(ool, lir->mir());
8705 masm.fallibleUnboxObject(value, output, ool->entry());
8706 masm.bind(ool->rejoin());
8709 void CodeGenerator::visitOutOfLineBoxNonStrictThis(
8710 OutOfLineBoxNonStrictThis* ool) {
8711 LBoxNonStrictThis* lir = ool->ins();
8713 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8714 Register output = ToRegister(lir->output());
8716 Label notNullOrUndefined;
8718 Label isNullOrUndefined;
8719 ScratchTagScope tag(masm, value);
8720 masm.splitTagForTest(value, tag);
8721 masm.branchTestUndefined(Assembler::Equal, tag, &isNullOrUndefined);
8722 masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
8723 masm.bind(&isNullOrUndefined);
8724 masm.movePtr(ImmGCPtr(lir->mir()->globalThis()), output);
8725 masm.jump(ool->rejoin());
8728 masm.bind(&notNullOrUndefined);
8730 saveLive(lir);
8732 pushArg(value);
8733 using Fn = JSObject* (*)(JSContext*, HandleValue);
8734 callVM<Fn, BoxNonStrictThis>(lir);
8736 StoreRegisterTo(output).generate(this);
8737 restoreLiveIgnore(lir, StoreRegisterTo(output).clobbered());
8739 masm.jump(ool->rejoin());
8742 void CodeGenerator::visitImplicitThis(LImplicitThis* lir) {
8743 pushArg(ImmGCPtr(lir->mir()->name()));
8744 pushArg(ToRegister(lir->env()));
8746 using Fn = bool (*)(JSContext*, HandleObject, Handle<PropertyName*>,
8747 MutableHandleValue);
8748 callVM<Fn, ImplicitThisOperation>(lir);
8751 void CodeGenerator::visitArrayLength(LArrayLength* lir) {
8752 Register elements = ToRegister(lir->elements());
8753 Register output = ToRegister(lir->output());
8755 Address length(elements, ObjectElements::offsetOfLength());
8756 masm.load32(length, output);
8758 // Bail out if the length doesn't fit in int32.
8759 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
8762 static void SetLengthFromIndex(MacroAssembler& masm, const LAllocation* index,
8763 const Address& length) {
8764 if (index->isConstant()) {
8765 masm.store32(Imm32(ToInt32(index) + 1), length);
8766 } else {
8767 Register newLength = ToRegister(index);
8768 masm.add32(Imm32(1), newLength);
8769 masm.store32(newLength, length);
8770 masm.sub32(Imm32(1), newLength);
8774 void CodeGenerator::visitSetArrayLength(LSetArrayLength* lir) {
8775 Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength());
8776 SetLengthFromIndex(masm, lir->index(), length);
8779 void CodeGenerator::visitFunctionLength(LFunctionLength* lir) {
8780 Register function = ToRegister(lir->function());
8781 Register output = ToRegister(lir->output());
8783 Label bail;
8785 // Get the JSFunction flags.
8786 masm.load32(Address(function, JSFunction::offsetOfFlagsAndArgCount()),
8787 output);
8789 // Functions with a SelfHostedLazyScript must be compiled with the slow-path
8790 // before the function length is known. If the length was previously resolved,
8791 // the length property may be shadowed.
8792 masm.branchTest32(
8793 Assembler::NonZero, output,
8794 Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
8795 &bail);
8797 masm.loadFunctionLength(function, output, output, &bail);
8799 bailoutFrom(&bail, lir->snapshot());
8802 void CodeGenerator::visitFunctionName(LFunctionName* lir) {
8803 Register function = ToRegister(lir->function());
8804 Register output = ToRegister(lir->output());
8806 Label bail;
8808 const JSAtomState& names = gen->runtime->names();
8809 masm.loadFunctionName(function, output, ImmGCPtr(names.empty_), &bail);
8811 bailoutFrom(&bail, lir->snapshot());
8814 template <class OrderedHashTable>
8815 static void RangeFront(MacroAssembler&, Register, Register, Register);
8817 template <>
8818 void RangeFront<ValueMap>(MacroAssembler& masm, Register range, Register i,
8819 Register front) {
8820 masm.loadPtr(Address(range, ValueMap::Range::offsetOfHashTable()), front);
8821 masm.loadPtr(Address(front, ValueMap::offsetOfImplData()), front);
8823 MOZ_ASSERT(ValueMap::offsetOfImplDataElement() == 0,
8824 "offsetof(Data, element) is 0");
8825 static_assert(ValueMap::sizeofImplData() == 24, "sizeof(Data) is 24");
8826 masm.mulBy3(i, i);
8827 masm.lshiftPtr(Imm32(3), i);
8828 masm.addPtr(i, front);
8831 template <>
8832 void RangeFront<ValueSet>(MacroAssembler& masm, Register range, Register i,
8833 Register front) {
8834 masm.loadPtr(Address(range, ValueSet::Range::offsetOfHashTable()), front);
8835 masm.loadPtr(Address(front, ValueSet::offsetOfImplData()), front);
8837 MOZ_ASSERT(ValueSet::offsetOfImplDataElement() == 0,
8838 "offsetof(Data, element) is 0");
8839 static_assert(ValueSet::sizeofImplData() == 16, "sizeof(Data) is 16");
8840 masm.lshiftPtr(Imm32(4), i);
8841 masm.addPtr(i, front);
8844 template <class OrderedHashTable>
8845 static void RangePopFront(MacroAssembler& masm, Register range, Register front,
8846 Register dataLength, Register temp) {
8847 Register i = temp;
8849 masm.add32(Imm32(1),
8850 Address(range, OrderedHashTable::Range::offsetOfCount()));
8852 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), i);
8854 Label done, seek;
8855 masm.bind(&seek);
8856 masm.add32(Imm32(1), i);
8857 masm.branch32(Assembler::AboveOrEqual, i, dataLength, &done);
8859 // We can add sizeof(Data) to |front| to select the next element, because
8860 // |front| and |range.ht.data[i]| point to the same location.
8861 MOZ_ASSERT(OrderedHashTable::offsetOfImplDataElement() == 0,
8862 "offsetof(Data, element) is 0");
8863 masm.addPtr(Imm32(OrderedHashTable::sizeofImplData()), front);
8865 masm.branchTestMagic(Assembler::Equal,
8866 Address(front, OrderedHashTable::offsetOfEntryKey()),
8867 JS_HASH_KEY_EMPTY, &seek);
8869 masm.bind(&done);
8870 masm.store32(i, Address(range, OrderedHashTable::Range::offsetOfI()));
8873 template <class OrderedHashTable>
8874 static inline void RangeDestruct(MacroAssembler& masm, Register iter,
8875 Register range, Register temp0,
8876 Register temp1) {
8877 Register next = temp0;
8878 Register prevp = temp1;
8880 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfNext()), next);
8881 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfPrevP()), prevp);
8882 masm.storePtr(next, Address(prevp, 0));
8884 Label hasNoNext;
8885 masm.branchTestPtr(Assembler::Zero, next, next, &hasNoNext);
8887 masm.storePtr(prevp, Address(next, OrderedHashTable::Range::offsetOfPrevP()));
8889 masm.bind(&hasNoNext);
8891 Label nurseryAllocated;
8892 masm.branchPtrInNurseryChunk(Assembler::Equal, iter, temp0,
8893 &nurseryAllocated);
8895 masm.callFreeStub(range);
8897 masm.bind(&nurseryAllocated);
8900 template <>
8901 void CodeGenerator::emitLoadIteratorValues<ValueMap>(Register result,
8902 Register temp,
8903 Register front) {
8904 size_t elementsOffset = NativeObject::offsetOfFixedElements();
8906 Address keyAddress(front, ValueMap::Entry::offsetOfKey());
8907 Address valueAddress(front, ValueMap::Entry::offsetOfValue());
8908 Address keyElemAddress(result, elementsOffset);
8909 Address valueElemAddress(result, elementsOffset + sizeof(Value));
8910 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
8911 masm.guardedCallPreBarrier(valueElemAddress, MIRType::Value);
8912 masm.storeValue(keyAddress, keyElemAddress, temp);
8913 masm.storeValue(valueAddress, valueElemAddress, temp);
8915 Label emitBarrier, skipBarrier;
8916 masm.branchValueIsNurseryCell(Assembler::Equal, keyAddress, temp,
8917 &emitBarrier);
8918 masm.branchValueIsNurseryCell(Assembler::NotEqual, valueAddress, temp,
8919 &skipBarrier);
8921 masm.bind(&emitBarrier);
8922 saveVolatile(temp);
8923 emitPostWriteBarrier(result);
8924 restoreVolatile(temp);
8926 masm.bind(&skipBarrier);
8929 template <>
8930 void CodeGenerator::emitLoadIteratorValues<ValueSet>(Register result,
8931 Register temp,
8932 Register front) {
8933 size_t elementsOffset = NativeObject::offsetOfFixedElements();
8935 Address keyAddress(front, ValueSet::offsetOfEntryKey());
8936 Address keyElemAddress(result, elementsOffset);
8937 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
8938 masm.storeValue(keyAddress, keyElemAddress, temp);
8940 Label skipBarrier;
8941 masm.branchValueIsNurseryCell(Assembler::NotEqual, keyAddress, temp,
8942 &skipBarrier);
8944 saveVolatile(temp);
8945 emitPostWriteBarrier(result);
8946 restoreVolatile(temp);
8948 masm.bind(&skipBarrier);
8951 template <class IteratorObject, class OrderedHashTable>
8952 void CodeGenerator::emitGetNextEntryForIterator(LGetNextEntryForIterator* lir) {
8953 Register iter = ToRegister(lir->iter());
8954 Register result = ToRegister(lir->result());
8955 Register temp = ToRegister(lir->temp0());
8956 Register dataLength = ToRegister(lir->temp1());
8957 Register range = ToRegister(lir->temp2());
8958 Register output = ToRegister(lir->output());
8960 #ifdef DEBUG
8961 // Self-hosted code is responsible for ensuring GetNextEntryForIterator is
8962 // only called with the correct iterator class. Assert here all self-
8963 // hosted callers of GetNextEntryForIterator perform this class check.
8964 // No Spectre mitigations are needed because this is DEBUG-only code.
8965 Label success;
8966 masm.branchTestObjClassNoSpectreMitigations(
8967 Assembler::Equal, iter, &IteratorObject::class_, temp, &success);
8968 masm.assumeUnreachable("Iterator object should have the correct class.");
8969 masm.bind(&success);
8970 #endif
8972 masm.loadPrivate(Address(iter, NativeObject::getFixedSlotOffset(
8973 IteratorObject::RangeSlot)),
8974 range);
8976 Label iterAlreadyDone, iterDone, done;
8977 masm.branchTestPtr(Assembler::Zero, range, range, &iterAlreadyDone);
8979 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), temp);
8980 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfHashTable()),
8981 dataLength);
8982 masm.load32(Address(dataLength, OrderedHashTable::offsetOfImplDataLength()),
8983 dataLength);
8984 masm.branch32(Assembler::AboveOrEqual, temp, dataLength, &iterDone);
8986 masm.Push(iter);
8988 Register front = iter;
8989 RangeFront<OrderedHashTable>(masm, range, temp, front);
8991 emitLoadIteratorValues<OrderedHashTable>(result, temp, front);
8993 RangePopFront<OrderedHashTable>(masm, range, front, dataLength, temp);
8995 masm.Pop(iter);
8996 masm.move32(Imm32(0), output);
8998 masm.jump(&done);
9000 masm.bind(&iterDone);
9002 RangeDestruct<OrderedHashTable>(masm, iter, range, temp, dataLength);
9004 masm.storeValue(PrivateValue(nullptr),
9005 Address(iter, NativeObject::getFixedSlotOffset(
9006 IteratorObject::RangeSlot)));
9008 masm.bind(&iterAlreadyDone);
9010 masm.move32(Imm32(1), output);
9012 masm.bind(&done);
9015 void CodeGenerator::visitGetNextEntryForIterator(
9016 LGetNextEntryForIterator* lir) {
9017 if (lir->mir()->mode() == MGetNextEntryForIterator::Map) {
9018 emitGetNextEntryForIterator<MapIteratorObject, ValueMap>(lir);
9019 } else {
9020 MOZ_ASSERT(lir->mir()->mode() == MGetNextEntryForIterator::Set);
9021 emitGetNextEntryForIterator<SetIteratorObject, ValueSet>(lir);
9025 // The point of these is to inform Ion of where these values already are; they
9026 // don't normally generate (much) code.
9027 void CodeGenerator::visitWasmRegisterPairResult(LWasmRegisterPairResult* lir) {}
9028 void CodeGenerator::visitWasmStackResult(LWasmStackResult* lir) {}
9029 void CodeGenerator::visitWasmStackResult64(LWasmStackResult64* lir) {}
9031 void CodeGenerator::visitWasmStackResultArea(LWasmStackResultArea* lir) {
9032 LAllocation* output = lir->getDef(0)->output();
9033 MOZ_ASSERT(output->isStackArea());
9034 bool tempInit = false;
9035 for (auto iter = output->toStackArea()->results(); iter; iter.next()) {
9036 // Zero out ref stack results.
9037 if (iter.isWasmAnyRef()) {
9038 Register temp = ToRegister(lir->temp0());
9039 if (!tempInit) {
9040 masm.xorPtr(temp, temp);
9041 tempInit = true;
9043 masm.storePtr(temp, ToAddress(iter.alloc()));
9048 void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {
9049 #ifdef JS_64BIT
9050 if (MWasmRegisterResult* mir = lir->mir()) {
9051 if (mir->type() == MIRType::Int32) {
9052 masm.widenInt32(ToRegister(lir->output()));
9055 #endif
9058 void CodeGenerator::visitWasmCall(LWasmCall* lir) {
9059 const MWasmCallBase* callBase = lir->callBase();
9060 bool isReturnCall = lir->isReturnCall();
9062 // If this call is in Wasm try code block, initialise a wasm::TryNote for this
9063 // call.
9064 bool inTry = callBase->inTry();
9065 if (inTry) {
9066 size_t tryNoteIndex = callBase->tryNoteIndex();
9067 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9068 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
9069 tryNote.setTryBodyBegin(masm.currentOffset());
9072 MOZ_ASSERT((sizeof(wasm::Frame) + masm.framePushed()) % WasmStackAlignment ==
9074 static_assert(
9075 WasmStackAlignment >= ABIStackAlignment &&
9076 WasmStackAlignment % ABIStackAlignment == 0,
9077 "The wasm stack alignment should subsume the ABI-required alignment");
9079 #ifdef DEBUG
9080 Label ok;
9081 masm.branchTestStackPtr(Assembler::Zero, Imm32(WasmStackAlignment - 1), &ok);
9082 masm.breakpoint();
9083 masm.bind(&ok);
9084 #endif
9086 // LWasmCallBase::isCallPreserved() assumes that all MWasmCalls preserve the
9087 // instance and pinned regs. The only case where where we don't have to
9088 // reload the instance and pinned regs is when the callee preserves them.
9089 bool reloadRegs = true;
9090 bool switchRealm = true;
9092 const wasm::CallSiteDesc& desc = callBase->desc();
9093 const wasm::CalleeDesc& callee = callBase->callee();
9094 CodeOffset retOffset;
9095 CodeOffset secondRetOffset;
9096 switch (callee.which()) {
9097 case wasm::CalleeDesc::Func:
9098 #ifdef ENABLE_WASM_TAIL_CALLS
9099 if (isReturnCall) {
9100 ReturnCallAdjustmentInfo retCallInfo(
9101 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
9102 masm.wasmReturnCall(desc, callee.funcIndex(), retCallInfo);
9103 // The rest of the method is unnecessary for a return call.
9104 return;
9106 #endif
9107 MOZ_ASSERT(!isReturnCall);
9108 retOffset = masm.call(desc, callee.funcIndex());
9109 reloadRegs = false;
9110 switchRealm = false;
9111 break;
9112 case wasm::CalleeDesc::Import:
9113 #ifdef ENABLE_WASM_TAIL_CALLS
9114 if (isReturnCall) {
9115 ReturnCallAdjustmentInfo retCallInfo(
9116 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
9117 masm.wasmReturnCallImport(desc, callee, retCallInfo);
9118 // The rest of the method is unnecessary for a return call.
9119 return;
9121 #endif
9122 MOZ_ASSERT(!isReturnCall);
9123 retOffset = masm.wasmCallImport(desc, callee);
9124 break;
9125 case wasm::CalleeDesc::AsmJSTable:
9126 retOffset = masm.asmCallIndirect(desc, callee);
9127 break;
9128 case wasm::CalleeDesc::WasmTable: {
9129 Label* boundsCheckFailed = nullptr;
9130 if (lir->needsBoundsCheck()) {
9131 OutOfLineAbortingWasmTrap* ool =
9132 new (alloc()) OutOfLineAbortingWasmTrap(
9133 wasm::BytecodeOffset(desc.lineOrBytecode()),
9134 wasm::Trap::OutOfBounds);
9135 if (lir->isCatchable()) {
9136 addOutOfLineCode(ool, lir->mirCatchable());
9137 } else if (isReturnCall) {
9138 #ifdef ENABLE_WASM_TAIL_CALLS
9139 addOutOfLineCode(ool, lir->mirReturnCall());
9140 #else
9141 MOZ_CRASH("Return calls are disabled.");
9142 #endif
9143 } else {
9144 addOutOfLineCode(ool, lir->mirUncatchable());
9146 boundsCheckFailed = ool->entry();
9148 Label* nullCheckFailed = nullptr;
9149 #ifndef WASM_HAS_HEAPREG
9151 OutOfLineAbortingWasmTrap* ool =
9152 new (alloc()) OutOfLineAbortingWasmTrap(
9153 wasm::BytecodeOffset(desc.lineOrBytecode()),
9154 wasm::Trap::IndirectCallToNull);
9155 if (lir->isCatchable()) {
9156 addOutOfLineCode(ool, lir->mirCatchable());
9157 } else if (isReturnCall) {
9158 # ifdef ENABLE_WASM_TAIL_CALLS
9159 addOutOfLineCode(ool, lir->mirReturnCall());
9160 # else
9161 MOZ_CRASH("Return calls are disabled.");
9162 # endif
9163 } else {
9164 addOutOfLineCode(ool, lir->mirUncatchable());
9166 nullCheckFailed = ool->entry();
9168 #endif
9169 #ifdef ENABLE_WASM_TAIL_CALLS
9170 if (isReturnCall) {
9171 ReturnCallAdjustmentInfo retCallInfo(
9172 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
9173 masm.wasmReturnCallIndirect(desc, callee, boundsCheckFailed,
9174 nullCheckFailed, mozilla::Nothing(),
9175 retCallInfo);
9176 // The rest of the method is unnecessary for a return call.
9177 return;
9179 #endif
9180 MOZ_ASSERT(!isReturnCall);
9181 masm.wasmCallIndirect(desc, callee, boundsCheckFailed, nullCheckFailed,
9182 lir->tableSize(), &retOffset, &secondRetOffset);
9183 // Register reloading and realm switching are handled dynamically inside
9184 // wasmCallIndirect. There are two return offsets, one for each call
9185 // instruction (fast path and slow path).
9186 reloadRegs = false;
9187 switchRealm = false;
9188 break;
9190 case wasm::CalleeDesc::Builtin:
9191 retOffset = masm.call(desc, callee.builtin());
9192 reloadRegs = false;
9193 switchRealm = false;
9194 break;
9195 case wasm::CalleeDesc::BuiltinInstanceMethod:
9196 retOffset = masm.wasmCallBuiltinInstanceMethod(
9197 desc, callBase->instanceArg(), callee.builtin(),
9198 callBase->builtinMethodFailureMode());
9199 switchRealm = false;
9200 break;
9201 case wasm::CalleeDesc::FuncRef:
9202 #ifdef ENABLE_WASM_TAIL_CALLS
9203 if (isReturnCall) {
9204 ReturnCallAdjustmentInfo retCallInfo(
9205 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
9206 masm.wasmReturnCallRef(desc, callee, retCallInfo);
9207 // The rest of the method is unnecessary for a return call.
9208 return;
9210 #endif
9211 MOZ_ASSERT(!isReturnCall);
9212 // Register reloading and realm switching are handled dynamically inside
9213 // wasmCallRef. There are two return offsets, one for each call
9214 // instruction (fast path and slow path).
9215 masm.wasmCallRef(desc, callee, &retOffset, &secondRetOffset);
9216 reloadRegs = false;
9217 switchRealm = false;
9218 break;
9221 // Note the assembler offset for the associated LSafePoint.
9222 MOZ_ASSERT(!isReturnCall);
9223 markSafepointAt(retOffset.offset(), lir);
9225 // Now that all the outbound in-memory args are on the stack, note the
9226 // required lower boundary point of the associated StackMap.
9227 uint32_t framePushedAtStackMapBase =
9228 masm.framePushed() -
9229 wasm::AlignStackArgAreaSize(callBase->stackArgAreaSizeUnaligned());
9230 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAtStackMapBase);
9231 MOZ_ASSERT(lir->safepoint()->wasmSafepointKind() ==
9232 WasmSafepointKind::LirCall);
9234 // Note the assembler offset and framePushed for use by the adjunct
9235 // LSafePoint, see visitor for LWasmCallIndirectAdjunctSafepoint below.
9236 if (callee.which() == wasm::CalleeDesc::WasmTable) {
9237 lir->adjunctSafepoint()->recordSafepointInfo(secondRetOffset,
9238 framePushedAtStackMapBase);
9241 if (reloadRegs) {
9242 masm.loadPtr(
9243 Address(masm.getStackPointer(), WasmCallerInstanceOffsetBeforeCall),
9244 InstanceReg);
9245 masm.loadWasmPinnedRegsFromInstance();
9246 if (switchRealm) {
9247 masm.switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
9249 } else {
9250 MOZ_ASSERT(!switchRealm);
9253 #ifdef ENABLE_WASM_TAIL_CALLS
9254 switch (callee.which()) {
9255 case wasm::CalleeDesc::Func:
9256 case wasm::CalleeDesc::Import:
9257 case wasm::CalleeDesc::WasmTable:
9258 case wasm::CalleeDesc::FuncRef:
9259 // Stack allocation could change during Wasm (return) calls,
9260 // recover pre-call state.
9261 masm.freeStackTo(masm.framePushed());
9262 break;
9263 default:
9264 break;
9266 #endif // ENABLE_WASM_TAIL_CALLS
9268 if (inTry) {
9269 // Set the end of the try note range
9270 size_t tryNoteIndex = callBase->tryNoteIndex();
9271 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9272 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
9274 // Don't set the end of the try note if we've OOM'ed, as the above
9275 // instructions may not have been emitted, which will trigger an assert
9276 // about zero-length try-notes. This is okay as this compilation will be
9277 // thrown away.
9278 if (!masm.oom()) {
9279 tryNote.setTryBodyEnd(masm.currentOffset());
9282 // This instruction or the adjunct safepoint must be the last instruction
9283 // in the block. No other instructions may be inserted.
9284 LBlock* block = lir->block();
9285 MOZ_RELEASE_ASSERT(*block->rbegin() == lir ||
9286 (block->rbegin()->isWasmCallIndirectAdjunctSafepoint() &&
9287 *(++block->rbegin()) == lir));
9289 // Jump to the fallthrough block
9290 jumpToBlock(lir->mirCatchable()->getSuccessor(
9291 MWasmCallCatchable::FallthroughBranchIndex));
9295 void CodeGenerator::visitWasmCallLandingPrePad(LWasmCallLandingPrePad* lir) {
9296 LBlock* block = lir->block();
9297 MWasmCallLandingPrePad* mir = lir->mir();
9298 MBasicBlock* mirBlock = mir->block();
9299 MBasicBlock* callMirBlock = mir->callBlock();
9301 // This block must be the pre-pad successor of the call block. No blocks may
9302 // be inserted between us, such as for critical edge splitting.
9303 MOZ_RELEASE_ASSERT(mirBlock == callMirBlock->getSuccessor(
9304 MWasmCallCatchable::PrePadBranchIndex));
9306 // This instruction or a move group must be the first instruction in the
9307 // block. No other instructions may be inserted.
9308 MOZ_RELEASE_ASSERT(*block->begin() == lir || (block->begin()->isMoveGroup() &&
9309 *(++block->begin()) == lir));
9311 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9312 wasm::TryNote& tryNote = tryNotes[mir->tryNoteIndex()];
9313 // Set the entry point for the call try note to be the beginning of this
9314 // block. The above assertions (and assertions in visitWasmCall) guarantee
9315 // that we are not skipping over instructions that should be executed.
9316 tryNote.setLandingPad(block->label()->offset(), masm.framePushed());
9319 void CodeGenerator::visitWasmCallIndirectAdjunctSafepoint(
9320 LWasmCallIndirectAdjunctSafepoint* lir) {
9321 markSafepointAt(lir->safepointLocation().offset(), lir);
9322 lir->safepoint()->setFramePushedAtStackMapBase(
9323 lir->framePushedAtStackMapBase());
9326 template <typename InstructionWithMaybeTrapSite>
9327 void EmitSignalNullCheckTrapSite(MacroAssembler& masm,
9328 InstructionWithMaybeTrapSite* ins,
9329 FaultingCodeOffset fco,
9330 wasm::TrapMachineInsn tmi) {
9331 if (!ins->maybeTrap()) {
9332 return;
9334 wasm::BytecodeOffset trapOffset(ins->maybeTrap()->offset);
9335 masm.append(wasm::Trap::NullPointerDereference,
9336 wasm::TrapSite(tmi, fco, trapOffset));
9339 template <typename InstructionWithMaybeTrapSite, class AddressOrBaseIndex>
9340 void CodeGenerator::emitWasmValueLoad(InstructionWithMaybeTrapSite* ins,
9341 MIRType type, MWideningOp wideningOp,
9342 AddressOrBaseIndex addr,
9343 AnyRegister dst) {
9344 FaultingCodeOffset fco;
9345 switch (type) {
9346 case MIRType::Int32:
9347 switch (wideningOp) {
9348 case MWideningOp::None:
9349 fco = masm.load32(addr, dst.gpr());
9350 EmitSignalNullCheckTrapSite(masm, ins, fco,
9351 wasm::TrapMachineInsn::Load32);
9352 break;
9353 case MWideningOp::FromU16:
9354 fco = masm.load16ZeroExtend(addr, dst.gpr());
9355 EmitSignalNullCheckTrapSite(masm, ins, fco,
9356 wasm::TrapMachineInsn::Load16);
9357 break;
9358 case MWideningOp::FromS16:
9359 fco = masm.load16SignExtend(addr, dst.gpr());
9360 EmitSignalNullCheckTrapSite(masm, ins, fco,
9361 wasm::TrapMachineInsn::Load16);
9362 break;
9363 case MWideningOp::FromU8:
9364 fco = masm.load8ZeroExtend(addr, dst.gpr());
9365 EmitSignalNullCheckTrapSite(masm, ins, fco,
9366 wasm::TrapMachineInsn::Load8);
9367 break;
9368 case MWideningOp::FromS8:
9369 fco = masm.load8SignExtend(addr, dst.gpr());
9370 EmitSignalNullCheckTrapSite(masm, ins, fco,
9371 wasm::TrapMachineInsn::Load8);
9372 break;
9373 default:
9374 MOZ_CRASH("unexpected widening op in ::visitWasmLoadElement");
9376 break;
9377 case MIRType::Float32:
9378 MOZ_ASSERT(wideningOp == MWideningOp::None);
9379 fco = masm.loadFloat32(addr, dst.fpu());
9380 EmitSignalNullCheckTrapSite(masm, ins, fco,
9381 wasm::TrapMachineInsn::Load32);
9382 break;
9383 case MIRType::Double:
9384 MOZ_ASSERT(wideningOp == MWideningOp::None);
9385 fco = masm.loadDouble(addr, dst.fpu());
9386 EmitSignalNullCheckTrapSite(masm, ins, fco,
9387 wasm::TrapMachineInsn::Load64);
9388 break;
9389 case MIRType::Pointer:
9390 case MIRType::WasmAnyRef:
9391 case MIRType::WasmArrayData:
9392 MOZ_ASSERT(wideningOp == MWideningOp::None);
9393 fco = masm.loadPtr(addr, dst.gpr());
9394 EmitSignalNullCheckTrapSite(masm, ins, fco,
9395 wasm::TrapMachineInsnForLoadWord());
9396 break;
9397 default:
9398 MOZ_CRASH("unexpected type in ::emitWasmValueLoad");
9402 template <typename InstructionWithMaybeTrapSite, class AddressOrBaseIndex>
9403 void CodeGenerator::emitWasmValueStore(InstructionWithMaybeTrapSite* ins,
9404 MIRType type, MNarrowingOp narrowingOp,
9405 AnyRegister src,
9406 AddressOrBaseIndex addr) {
9407 FaultingCodeOffset fco;
9408 switch (type) {
9409 case MIRType::Int32:
9410 switch (narrowingOp) {
9411 case MNarrowingOp::None:
9412 fco = masm.store32(src.gpr(), addr);
9413 EmitSignalNullCheckTrapSite(masm, ins, fco,
9414 wasm::TrapMachineInsn::Store32);
9415 break;
9416 case MNarrowingOp::To16:
9417 fco = masm.store16(src.gpr(), addr);
9418 EmitSignalNullCheckTrapSite(masm, ins, fco,
9419 wasm::TrapMachineInsn::Store16);
9420 break;
9421 case MNarrowingOp::To8:
9422 fco = masm.store8(src.gpr(), addr);
9423 EmitSignalNullCheckTrapSite(masm, ins, fco,
9424 wasm::TrapMachineInsn::Store8);
9425 break;
9426 default:
9427 MOZ_CRASH();
9429 break;
9430 case MIRType::Float32:
9431 fco = masm.storeFloat32(src.fpu(), addr);
9432 EmitSignalNullCheckTrapSite(masm, ins, fco,
9433 wasm::TrapMachineInsn::Store32);
9434 break;
9435 case MIRType::Double:
9436 fco = masm.storeDouble(src.fpu(), addr);
9437 EmitSignalNullCheckTrapSite(masm, ins, fco,
9438 wasm::TrapMachineInsn::Store64);
9439 break;
9440 case MIRType::Pointer:
9441 // This could be correct, but it would be a new usage, so check carefully.
9442 MOZ_CRASH("Unexpected type in ::emitWasmValueStore.");
9443 case MIRType::WasmAnyRef:
9444 MOZ_CRASH("Bad type in ::emitWasmValueStore. Use LWasmStoreElementRef.");
9445 default:
9446 MOZ_CRASH("unexpected type in ::emitWasmValueStore");
9450 void CodeGenerator::visitWasmLoadSlot(LWasmLoadSlot* ins) {
9451 MIRType type = ins->type();
9452 MWideningOp wideningOp = ins->wideningOp();
9453 Register container = ToRegister(ins->containerRef());
9454 Address addr(container, ins->offset());
9455 AnyRegister dst = ToAnyRegister(ins->output());
9457 #ifdef ENABLE_WASM_SIMD
9458 if (type == MIRType::Simd128) {
9459 MOZ_ASSERT(wideningOp == MWideningOp::None);
9460 FaultingCodeOffset fco = masm.loadUnalignedSimd128(addr, dst.fpu());
9461 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load128);
9462 return;
9464 #endif
9465 emitWasmValueLoad(ins, type, wideningOp, addr, dst);
9468 void CodeGenerator::visitWasmLoadElement(LWasmLoadElement* ins) {
9469 MIRType type = ins->type();
9470 MWideningOp wideningOp = ins->wideningOp();
9471 Scale scale = ins->scale();
9472 Register base = ToRegister(ins->base());
9473 Register index = ToRegister(ins->index());
9474 AnyRegister dst = ToAnyRegister(ins->output());
9476 #ifdef ENABLE_WASM_SIMD
9477 if (type == MIRType::Simd128) {
9478 MOZ_ASSERT(wideningOp == MWideningOp::None);
9479 FaultingCodeOffset fco;
9480 Register temp = ToRegister(ins->temp0());
9481 masm.movePtr(index, temp);
9482 masm.lshiftPtr(Imm32(4), temp);
9483 fco = masm.loadUnalignedSimd128(BaseIndex(base, temp, Scale::TimesOne),
9484 dst.fpu());
9485 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load128);
9486 return;
9488 #endif
9489 emitWasmValueLoad(ins, type, wideningOp, BaseIndex(base, index, scale), dst);
9492 void CodeGenerator::visitWasmStoreSlot(LWasmStoreSlot* ins) {
9493 MIRType type = ins->type();
9494 MNarrowingOp narrowingOp = ins->narrowingOp();
9495 Register container = ToRegister(ins->containerRef());
9496 Address addr(container, ins->offset());
9497 AnyRegister src = ToAnyRegister(ins->value());
9498 if (type != MIRType::Int32) {
9499 MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
9502 #ifdef ENABLE_WASM_SIMD
9503 if (type == MIRType::Simd128) {
9504 FaultingCodeOffset fco = masm.storeUnalignedSimd128(src.fpu(), addr);
9505 EmitSignalNullCheckTrapSite(masm, ins, fco,
9506 wasm::TrapMachineInsn::Store128);
9507 return;
9509 #endif
9510 emitWasmValueStore(ins, type, narrowingOp, src, addr);
9513 void CodeGenerator::visitWasmStoreElement(LWasmStoreElement* ins) {
9514 MIRType type = ins->type();
9515 MNarrowingOp narrowingOp = ins->narrowingOp();
9516 Scale scale = ins->scale();
9517 Register base = ToRegister(ins->base());
9518 Register index = ToRegister(ins->index());
9519 AnyRegister src = ToAnyRegister(ins->value());
9520 if (type != MIRType::Int32) {
9521 MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
9524 #ifdef ENABLE_WASM_SIMD
9525 if (type == MIRType::Simd128) {
9526 Register temp = ToRegister(ins->temp0());
9527 masm.movePtr(index, temp);
9528 masm.lshiftPtr(Imm32(4), temp);
9529 FaultingCodeOffset fco = masm.storeUnalignedSimd128(
9530 src.fpu(), BaseIndex(base, temp, Scale::TimesOne));
9531 EmitSignalNullCheckTrapSite(masm, ins, fco,
9532 wasm::TrapMachineInsn::Store128);
9533 return;
9535 #endif
9536 emitWasmValueStore(ins, type, narrowingOp, src,
9537 BaseIndex(base, index, scale));
9540 void CodeGenerator::visitWasmLoadTableElement(LWasmLoadTableElement* ins) {
9541 Register elements = ToRegister(ins->elements());
9542 Register index = ToRegister(ins->index());
9543 Register output = ToRegister(ins->output());
9544 masm.loadPtr(BaseIndex(elements, index, ScalePointer), output);
9547 void CodeGenerator::visitWasmDerivedPointer(LWasmDerivedPointer* ins) {
9548 masm.movePtr(ToRegister(ins->base()), ToRegister(ins->output()));
9549 masm.addPtr(Imm32(int32_t(ins->offset())), ToRegister(ins->output()));
9552 void CodeGenerator::visitWasmDerivedIndexPointer(
9553 LWasmDerivedIndexPointer* ins) {
9554 Register base = ToRegister(ins->base());
9555 Register index = ToRegister(ins->index());
9556 Register output = ToRegister(ins->output());
9557 masm.computeEffectiveAddress(BaseIndex(base, index, ins->scale()), output);
9560 void CodeGenerator::visitWasmStoreRef(LWasmStoreRef* ins) {
9561 Register instance = ToRegister(ins->instance());
9562 Register valueBase = ToRegister(ins->valueBase());
9563 size_t offset = ins->offset();
9564 Register value = ToRegister(ins->value());
9565 Register temp = ToRegister(ins->temp0());
9567 if (ins->preBarrierKind() == WasmPreBarrierKind::Normal) {
9568 Label skipPreBarrier;
9569 wasm::EmitWasmPreBarrierGuard(
9570 masm, instance, temp, Address(valueBase, offset), &skipPreBarrier,
9571 ins->maybeTrap() ? &ins->maybeTrap()->offset : nullptr);
9572 wasm::EmitWasmPreBarrierCallImmediate(masm, instance, temp, valueBase,
9573 offset);
9574 masm.bind(&skipPreBarrier);
9577 FaultingCodeOffset fco = masm.storePtr(value, Address(valueBase, offset));
9578 EmitSignalNullCheckTrapSite(masm, ins, fco,
9579 wasm::TrapMachineInsnForStoreWord());
9580 // The postbarrier is handled separately.
9583 void CodeGenerator::visitWasmStoreElementRef(LWasmStoreElementRef* ins) {
9584 Register instance = ToRegister(ins->instance());
9585 Register base = ToRegister(ins->base());
9586 Register index = ToRegister(ins->index());
9587 Register value = ToRegister(ins->value());
9588 Register temp0 = ToTempRegisterOrInvalid(ins->temp0());
9589 Register temp1 = ToTempRegisterOrInvalid(ins->temp1());
9591 BaseIndex addr(base, index, ScalePointer);
9593 if (ins->preBarrierKind() == WasmPreBarrierKind::Normal) {
9594 Label skipPreBarrier;
9595 wasm::EmitWasmPreBarrierGuard(
9596 masm, instance, temp0, addr, &skipPreBarrier,
9597 ins->maybeTrap() ? &ins->maybeTrap()->offset : nullptr);
9598 wasm::EmitWasmPreBarrierCallIndex(masm, instance, temp0, temp1, addr);
9599 masm.bind(&skipPreBarrier);
9602 FaultingCodeOffset fco = masm.storePtr(value, addr);
9603 EmitSignalNullCheckTrapSite(masm, ins, fco,
9604 wasm::TrapMachineInsnForStoreWord());
9605 // The postbarrier is handled separately.
9608 // Out-of-line path to update the store buffer for wasm references.
9609 class OutOfLineWasmCallPostWriteBarrierImmediate
9610 : public OutOfLineCodeBase<CodeGenerator> {
9611 LInstruction* lir_;
9612 Register valueBase_;
9613 Register temp_;
9614 uint32_t valueOffset_;
9616 public:
9617 OutOfLineWasmCallPostWriteBarrierImmediate(LInstruction* lir,
9618 Register valueBase, Register temp,
9619 uint32_t valueOffset)
9620 : lir_(lir),
9621 valueBase_(valueBase),
9622 temp_(temp),
9623 valueOffset_(valueOffset) {}
9625 void accept(CodeGenerator* codegen) override {
9626 codegen->visitOutOfLineWasmCallPostWriteBarrierImmediate(this);
9629 LInstruction* lir() const { return lir_; }
9630 Register valueBase() const { return valueBase_; }
9631 Register temp() const { return temp_; }
9632 uint32_t valueOffset() const { return valueOffset_; }
9635 void CodeGenerator::visitOutOfLineWasmCallPostWriteBarrierImmediate(
9636 OutOfLineWasmCallPostWriteBarrierImmediate* ool) {
9637 saveLiveVolatile(ool->lir());
9638 masm.Push(InstanceReg);
9639 int32_t framePushedAfterInstance = masm.framePushed();
9641 // Fold the value offset into the value base
9642 Register valueAddr = ool->valueBase();
9643 Register temp = ool->temp();
9644 masm.computeEffectiveAddress(Address(valueAddr, ool->valueOffset()), temp);
9646 // Call Instance::postBarrier
9647 masm.setupWasmABICall();
9648 masm.passABIArg(InstanceReg);
9649 masm.passABIArg(temp);
9650 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
9651 masm.callWithABI(wasm::BytecodeOffset(0), wasm::SymbolicAddress::PostBarrier,
9652 mozilla::Some(instanceOffset), ABIType::General);
9654 masm.Pop(InstanceReg);
9655 restoreLiveVolatile(ool->lir());
9657 masm.jump(ool->rejoin());
9660 void CodeGenerator::visitWasmPostWriteBarrierImmediate(
9661 LWasmPostWriteBarrierImmediate* lir) {
9662 Register object = ToRegister(lir->object());
9663 Register value = ToRegister(lir->value());
9664 Register valueBase = ToRegister(lir->valueBase());
9665 Register temp = ToRegister(lir->temp0());
9666 MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
9667 auto* ool = new (alloc()) OutOfLineWasmCallPostWriteBarrierImmediate(
9668 lir, valueBase, temp, lir->valueOffset());
9669 addOutOfLineCode(ool, lir->mir());
9671 wasm::EmitWasmPostBarrierGuard(masm, mozilla::Some(object), temp, value,
9672 ool->rejoin());
9673 masm.jump(ool->entry());
9674 masm.bind(ool->rejoin());
9677 // Out-of-line path to update the store buffer for wasm references.
9678 class OutOfLineWasmCallPostWriteBarrierIndex
9679 : public OutOfLineCodeBase<CodeGenerator> {
9680 LInstruction* lir_;
9681 Register valueBase_;
9682 Register index_;
9683 Register temp_;
9684 uint32_t elemSize_;
9686 public:
9687 OutOfLineWasmCallPostWriteBarrierIndex(LInstruction* lir, Register valueBase,
9688 Register index, Register temp,
9689 uint32_t elemSize)
9690 : lir_(lir),
9691 valueBase_(valueBase),
9692 index_(index),
9693 temp_(temp),
9694 elemSize_(elemSize) {
9695 MOZ_ASSERT(elemSize == 1 || elemSize == 2 || elemSize == 4 ||
9696 elemSize == 8 || elemSize == 16);
9699 void accept(CodeGenerator* codegen) override {
9700 codegen->visitOutOfLineWasmCallPostWriteBarrierIndex(this);
9703 LInstruction* lir() const { return lir_; }
9704 Register valueBase() const { return valueBase_; }
9705 Register index() const { return index_; }
9706 Register temp() const { return temp_; }
9707 uint32_t elemSize() const { return elemSize_; }
9710 void CodeGenerator::visitOutOfLineWasmCallPostWriteBarrierIndex(
9711 OutOfLineWasmCallPostWriteBarrierIndex* ool) {
9712 saveLiveVolatile(ool->lir());
9713 masm.Push(InstanceReg);
9714 int32_t framePushedAfterInstance = masm.framePushed();
9716 // Fold the value offset into the value base
9717 Register temp = ool->temp();
9718 if (ool->elemSize() == 16) {
9719 masm.movePtr(ool->index(), temp);
9720 masm.lshiftPtr(Imm32(4), temp);
9721 masm.addPtr(ool->valueBase(), temp);
9722 } else {
9723 masm.computeEffectiveAddress(BaseIndex(ool->valueBase(), ool->index(),
9724 ScaleFromElemWidth(ool->elemSize())),
9725 temp);
9728 // Call Instance::postBarrier
9729 masm.setupWasmABICall();
9730 masm.passABIArg(InstanceReg);
9731 masm.passABIArg(temp);
9732 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
9733 masm.callWithABI(wasm::BytecodeOffset(0), wasm::SymbolicAddress::PostBarrier,
9734 mozilla::Some(instanceOffset), ABIType::General);
9736 masm.Pop(InstanceReg);
9737 restoreLiveVolatile(ool->lir());
9739 masm.jump(ool->rejoin());
9742 void CodeGenerator::visitWasmPostWriteBarrierIndex(
9743 LWasmPostWriteBarrierIndex* lir) {
9744 Register object = ToRegister(lir->object());
9745 Register value = ToRegister(lir->value());
9746 Register valueBase = ToRegister(lir->valueBase());
9747 Register index = ToRegister(lir->index());
9748 Register temp = ToRegister(lir->temp0());
9749 MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
9750 auto* ool = new (alloc()) OutOfLineWasmCallPostWriteBarrierIndex(
9751 lir, valueBase, index, temp, lir->elemSize());
9752 addOutOfLineCode(ool, lir->mir());
9754 wasm::EmitWasmPostBarrierGuard(masm, mozilla::Some(object), temp, value,
9755 ool->rejoin());
9756 masm.jump(ool->entry());
9757 masm.bind(ool->rejoin());
9760 void CodeGenerator::visitWasmLoadSlotI64(LWasmLoadSlotI64* ins) {
9761 Register container = ToRegister(ins->containerRef());
9762 Address addr(container, ins->offset());
9763 Register64 output = ToOutRegister64(ins);
9764 // Either 1 or 2 words. On a 32-bit target, it is hard to argue that one
9765 // transaction will always trap before the other, so it seems safest to
9766 // register both of them as potentially trapping.
9767 #ifdef JS_64BIT
9768 FaultingCodeOffset fco = masm.load64(addr, output);
9769 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load64);
9770 #else
9771 FaultingCodeOffsetPair fcop = masm.load64(addr, output);
9772 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9773 wasm::TrapMachineInsn::Load32);
9774 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9775 wasm::TrapMachineInsn::Load32);
9776 #endif
9779 void CodeGenerator::visitWasmLoadElementI64(LWasmLoadElementI64* ins) {
9780 Register base = ToRegister(ins->base());
9781 Register index = ToRegister(ins->index());
9782 BaseIndex addr(base, index, Scale::TimesEight);
9783 Register64 output = ToOutRegister64(ins);
9784 // Either 1 or 2 words. On a 32-bit target, it is hard to argue that one
9785 // transaction will always trap before the other, so it seems safest to
9786 // register both of them as potentially trapping.
9787 #ifdef JS_64BIT
9788 FaultingCodeOffset fco = masm.load64(addr, output);
9789 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load64);
9790 #else
9791 FaultingCodeOffsetPair fcop = masm.load64(addr, output);
9792 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9793 wasm::TrapMachineInsn::Load32);
9794 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9795 wasm::TrapMachineInsn::Load32);
9796 #endif
9799 void CodeGenerator::visitWasmStoreSlotI64(LWasmStoreSlotI64* ins) {
9800 Register container = ToRegister(ins->containerRef());
9801 Address addr(container, ins->offset());
9802 Register64 value = ToRegister64(ins->value());
9803 // Either 1 or 2 words. As above we register both transactions in the
9804 // 2-word case.
9805 #ifdef JS_64BIT
9806 FaultingCodeOffset fco = masm.store64(value, addr);
9807 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Store64);
9808 #else
9809 FaultingCodeOffsetPair fcop = masm.store64(value, addr);
9810 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9811 wasm::TrapMachineInsn::Store32);
9812 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9813 wasm::TrapMachineInsn::Store32);
9814 #endif
9817 void CodeGenerator::visitWasmStoreElementI64(LWasmStoreElementI64* ins) {
9818 Register base = ToRegister(ins->base());
9819 Register index = ToRegister(ins->index());
9820 BaseIndex addr(base, index, Scale::TimesEight);
9821 Register64 value = ToRegister64(ins->value());
9822 // Either 1 or 2 words. As above we register both transactions in the
9823 // 2-word case.
9824 #ifdef JS_64BIT
9825 FaultingCodeOffset fco = masm.store64(value, addr);
9826 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Store64);
9827 #else
9828 FaultingCodeOffsetPair fcop = masm.store64(value, addr);
9829 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9830 wasm::TrapMachineInsn::Store32);
9831 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9832 wasm::TrapMachineInsn::Store32);
9833 #endif
9836 void CodeGenerator::visitArrayBufferByteLength(LArrayBufferByteLength* lir) {
9837 Register obj = ToRegister(lir->object());
9838 Register out = ToRegister(lir->output());
9839 masm.loadArrayBufferByteLengthIntPtr(obj, out);
9842 void CodeGenerator::visitArrayBufferViewLength(LArrayBufferViewLength* lir) {
9843 Register obj = ToRegister(lir->object());
9844 Register out = ToRegister(lir->output());
9845 masm.loadArrayBufferViewLengthIntPtr(obj, out);
9848 void CodeGenerator::visitArrayBufferViewByteOffset(
9849 LArrayBufferViewByteOffset* lir) {
9850 Register obj = ToRegister(lir->object());
9851 Register out = ToRegister(lir->output());
9852 masm.loadArrayBufferViewByteOffsetIntPtr(obj, out);
9855 void CodeGenerator::visitArrayBufferViewElements(
9856 LArrayBufferViewElements* lir) {
9857 Register obj = ToRegister(lir->object());
9858 Register out = ToRegister(lir->output());
9859 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), out);
9862 void CodeGenerator::visitTypedArrayElementSize(LTypedArrayElementSize* lir) {
9863 Register obj = ToRegister(lir->object());
9864 Register out = ToRegister(lir->output());
9866 masm.typedArrayElementSize(obj, out);
9869 void CodeGenerator::visitResizableTypedArrayByteOffsetMaybeOutOfBounds(
9870 LResizableTypedArrayByteOffsetMaybeOutOfBounds* lir) {
9871 Register obj = ToRegister(lir->object());
9872 Register out = ToRegister(lir->output());
9873 Register temp = ToRegister(lir->temp0());
9875 masm.loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(obj, out, temp);
9878 void CodeGenerator::visitResizableTypedArrayLength(
9879 LResizableTypedArrayLength* lir) {
9880 Register obj = ToRegister(lir->object());
9881 Register out = ToRegister(lir->output());
9882 Register temp = ToRegister(lir->temp0());
9884 masm.loadResizableTypedArrayLengthIntPtr(lir->synchronization(), obj, out,
9885 temp);
9888 void CodeGenerator::visitResizableDataViewByteLength(
9889 LResizableDataViewByteLength* lir) {
9890 Register obj = ToRegister(lir->object());
9891 Register out = ToRegister(lir->output());
9892 Register temp = ToRegister(lir->temp0());
9894 masm.loadResizableDataViewByteLengthIntPtr(lir->synchronization(), obj, out,
9895 temp);
9898 void CodeGenerator::visitGrowableSharedArrayBufferByteLength(
9899 LGrowableSharedArrayBufferByteLength* lir) {
9900 Register obj = ToRegister(lir->object());
9901 Register out = ToRegister(lir->output());
9903 // Explicit |byteLength| accesses are seq-consistent atomic loads.
9904 auto sync = Synchronization::Load();
9906 masm.loadGrowableSharedArrayBufferByteLengthIntPtr(sync, obj, out);
9909 void CodeGenerator::visitGuardResizableArrayBufferViewInBounds(
9910 LGuardResizableArrayBufferViewInBounds* lir) {
9911 Register obj = ToRegister(lir->object());
9912 Register temp = ToRegister(lir->temp0());
9914 Label bail;
9915 masm.branchIfResizableArrayBufferViewOutOfBounds(obj, temp, &bail);
9916 bailoutFrom(&bail, lir->snapshot());
9919 void CodeGenerator::visitGuardResizableArrayBufferViewInBoundsOrDetached(
9920 LGuardResizableArrayBufferViewInBoundsOrDetached* lir) {
9921 Register obj = ToRegister(lir->object());
9922 Register temp = ToRegister(lir->temp0());
9924 Label done, bail;
9925 masm.branchIfResizableArrayBufferViewInBounds(obj, temp, &done);
9926 masm.branchIfHasAttachedArrayBuffer(obj, temp, &bail);
9927 masm.bind(&done);
9928 bailoutFrom(&bail, lir->snapshot());
9931 void CodeGenerator::visitGuardHasAttachedArrayBuffer(
9932 LGuardHasAttachedArrayBuffer* lir) {
9933 Register obj = ToRegister(lir->object());
9934 Register temp = ToRegister(lir->temp0());
9936 Label bail;
9937 masm.branchIfHasDetachedArrayBuffer(obj, temp, &bail);
9938 bailoutFrom(&bail, lir->snapshot());
9941 class OutOfLineGuardNumberToIntPtrIndex
9942 : public OutOfLineCodeBase<CodeGenerator> {
9943 LGuardNumberToIntPtrIndex* lir_;
9945 public:
9946 explicit OutOfLineGuardNumberToIntPtrIndex(LGuardNumberToIntPtrIndex* lir)
9947 : lir_(lir) {}
9949 void accept(CodeGenerator* codegen) override {
9950 codegen->visitOutOfLineGuardNumberToIntPtrIndex(this);
9952 LGuardNumberToIntPtrIndex* lir() const { return lir_; }
9955 void CodeGenerator::visitGuardNumberToIntPtrIndex(
9956 LGuardNumberToIntPtrIndex* lir) {
9957 FloatRegister input = ToFloatRegister(lir->input());
9958 Register output = ToRegister(lir->output());
9960 if (!lir->mir()->supportOOB()) {
9961 Label bail;
9962 masm.convertDoubleToPtr(input, output, &bail, false);
9963 bailoutFrom(&bail, lir->snapshot());
9964 return;
9967 auto* ool = new (alloc()) OutOfLineGuardNumberToIntPtrIndex(lir);
9968 addOutOfLineCode(ool, lir->mir());
9970 masm.convertDoubleToPtr(input, output, ool->entry(), false);
9971 masm.bind(ool->rejoin());
9974 void CodeGenerator::visitOutOfLineGuardNumberToIntPtrIndex(
9975 OutOfLineGuardNumberToIntPtrIndex* ool) {
9976 // Substitute the invalid index with an arbitrary out-of-bounds index.
9977 masm.movePtr(ImmWord(-1), ToRegister(ool->lir()->output()));
9978 masm.jump(ool->rejoin());
9981 void CodeGenerator::visitStringLength(LStringLength* lir) {
9982 Register input = ToRegister(lir->string());
9983 Register output = ToRegister(lir->output());
9985 masm.loadStringLength(input, output);
9988 void CodeGenerator::visitMinMaxI(LMinMaxI* ins) {
9989 Register first = ToRegister(ins->first());
9990 Register output = ToRegister(ins->output());
9992 MOZ_ASSERT(first == output);
9994 Assembler::Condition cond =
9995 ins->mir()->isMax() ? Assembler::GreaterThan : Assembler::LessThan;
9997 if (ins->second()->isConstant()) {
9998 Label done;
9999 masm.branch32(cond, first, Imm32(ToInt32(ins->second())), &done);
10000 masm.move32(Imm32(ToInt32(ins->second())), output);
10001 masm.bind(&done);
10002 } else {
10003 Register second = ToRegister(ins->second());
10004 masm.cmp32Move32(cond, second, first, second, output);
10008 void CodeGenerator::visitMinMaxArrayI(LMinMaxArrayI* ins) {
10009 Register array = ToRegister(ins->array());
10010 Register output = ToRegister(ins->output());
10011 Register temp1 = ToRegister(ins->temp1());
10012 Register temp2 = ToRegister(ins->temp2());
10013 Register temp3 = ToRegister(ins->temp3());
10014 bool isMax = ins->isMax();
10016 Label bail;
10017 masm.minMaxArrayInt32(array, output, temp1, temp2, temp3, isMax, &bail);
10018 bailoutFrom(&bail, ins->snapshot());
10021 void CodeGenerator::visitMinMaxArrayD(LMinMaxArrayD* ins) {
10022 Register array = ToRegister(ins->array());
10023 FloatRegister output = ToFloatRegister(ins->output());
10024 Register temp1 = ToRegister(ins->temp1());
10025 Register temp2 = ToRegister(ins->temp2());
10026 FloatRegister floatTemp = ToFloatRegister(ins->floatTemp());
10027 bool isMax = ins->isMax();
10029 Label bail;
10030 masm.minMaxArrayNumber(array, output, floatTemp, temp1, temp2, isMax, &bail);
10031 bailoutFrom(&bail, ins->snapshot());
10034 // For Abs*, lowering will have tied input to output on platforms where that is
10035 // sensible, and otherwise left them untied.
10037 void CodeGenerator::visitAbsI(LAbsI* ins) {
10038 Register input = ToRegister(ins->input());
10039 Register output = ToRegister(ins->output());
10041 if (ins->mir()->fallible()) {
10042 Label positive;
10043 if (input != output) {
10044 masm.move32(input, output);
10046 masm.branchTest32(Assembler::NotSigned, output, output, &positive);
10047 Label bail;
10048 masm.branchNeg32(Assembler::Overflow, output, &bail);
10049 bailoutFrom(&bail, ins->snapshot());
10050 masm.bind(&positive);
10051 } else {
10052 masm.abs32(input, output);
10056 void CodeGenerator::visitAbsD(LAbsD* ins) {
10057 masm.absDouble(ToFloatRegister(ins->input()), ToFloatRegister(ins->output()));
10060 void CodeGenerator::visitAbsF(LAbsF* ins) {
10061 masm.absFloat32(ToFloatRegister(ins->input()),
10062 ToFloatRegister(ins->output()));
10065 void CodeGenerator::visitPowII(LPowII* ins) {
10066 Register value = ToRegister(ins->value());
10067 Register power = ToRegister(ins->power());
10068 Register output = ToRegister(ins->output());
10069 Register temp0 = ToRegister(ins->temp0());
10070 Register temp1 = ToRegister(ins->temp1());
10072 Label bailout;
10073 masm.pow32(value, power, output, temp0, temp1, &bailout);
10074 bailoutFrom(&bailout, ins->snapshot());
10077 void CodeGenerator::visitPowI(LPowI* ins) {
10078 FloatRegister value = ToFloatRegister(ins->value());
10079 Register power = ToRegister(ins->power());
10081 using Fn = double (*)(double x, int32_t y);
10082 masm.setupAlignedABICall();
10083 masm.passABIArg(value, ABIType::Float64);
10084 masm.passABIArg(power);
10086 masm.callWithABI<Fn, js::powi>(ABIType::Float64);
10087 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10090 void CodeGenerator::visitPowD(LPowD* ins) {
10091 FloatRegister value = ToFloatRegister(ins->value());
10092 FloatRegister power = ToFloatRegister(ins->power());
10094 using Fn = double (*)(double x, double y);
10095 masm.setupAlignedABICall();
10096 masm.passABIArg(value, ABIType::Float64);
10097 masm.passABIArg(power, ABIType::Float64);
10098 masm.callWithABI<Fn, ecmaPow>(ABIType::Float64);
10100 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10103 void CodeGenerator::visitPowOfTwoI(LPowOfTwoI* ins) {
10104 Register power = ToRegister(ins->power());
10105 Register output = ToRegister(ins->output());
10107 uint32_t base = ins->base();
10108 MOZ_ASSERT(mozilla::IsPowerOfTwo(base));
10110 uint32_t n = mozilla::FloorLog2(base);
10111 MOZ_ASSERT(n != 0);
10113 // Hacker's Delight, 2nd edition, theorem D2.
10114 auto ceilingDiv = [](uint32_t x, uint32_t y) { return (x + y - 1) / y; };
10116 // Take bailout if |power| is greater-or-equals |log_y(2^31)| or is negative.
10117 // |2^(n*y) < 2^31| must hold, hence |n*y < 31| resp. |y < 31/n|.
10119 // Note: it's important for this condition to match the code in CacheIR.cpp
10120 // (CanAttachInt32Pow) to prevent failure loops.
10121 bailoutCmp32(Assembler::AboveOrEqual, power, Imm32(ceilingDiv(31, n)),
10122 ins->snapshot());
10124 // Compute (2^n)^y as 2^(n*y) using repeated shifts. We could directly scale
10125 // |power| and perform a single shift, but due to the lack of necessary
10126 // MacroAssembler functionality, like multiplying a register with an
10127 // immediate, we restrict the number of generated shift instructions when
10128 // lowering this operation.
10129 masm.move32(Imm32(1), output);
10130 do {
10131 masm.lshift32(power, output);
10132 n--;
10133 } while (n > 0);
10136 void CodeGenerator::visitSqrtD(LSqrtD* ins) {
10137 FloatRegister input = ToFloatRegister(ins->input());
10138 FloatRegister output = ToFloatRegister(ins->output());
10139 masm.sqrtDouble(input, output);
10142 void CodeGenerator::visitSqrtF(LSqrtF* ins) {
10143 FloatRegister input = ToFloatRegister(ins->input());
10144 FloatRegister output = ToFloatRegister(ins->output());
10145 masm.sqrtFloat32(input, output);
10148 void CodeGenerator::visitSignI(LSignI* ins) {
10149 Register input = ToRegister(ins->input());
10150 Register output = ToRegister(ins->output());
10151 masm.signInt32(input, output);
10154 void CodeGenerator::visitSignD(LSignD* ins) {
10155 FloatRegister input = ToFloatRegister(ins->input());
10156 FloatRegister output = ToFloatRegister(ins->output());
10157 masm.signDouble(input, output);
10160 void CodeGenerator::visitSignDI(LSignDI* ins) {
10161 FloatRegister input = ToFloatRegister(ins->input());
10162 FloatRegister temp = ToFloatRegister(ins->temp0());
10163 Register output = ToRegister(ins->output());
10165 Label bail;
10166 masm.signDoubleToInt32(input, output, temp, &bail);
10167 bailoutFrom(&bail, ins->snapshot());
10170 void CodeGenerator::visitMathFunctionD(LMathFunctionD* ins) {
10171 FloatRegister input = ToFloatRegister(ins->input());
10172 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10174 UnaryMathFunction fun = ins->mir()->function();
10175 UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
10177 masm.setupAlignedABICall();
10179 masm.passABIArg(input, ABIType::Float64);
10180 masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
10181 ABIType::Float64);
10184 void CodeGenerator::visitMathFunctionF(LMathFunctionF* ins) {
10185 FloatRegister input = ToFloatRegister(ins->input());
10186 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnFloat32Reg);
10188 masm.setupAlignedABICall();
10189 masm.passABIArg(input, ABIType::Float32);
10191 using Fn = float (*)(float x);
10192 Fn funptr = nullptr;
10193 CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check;
10194 switch (ins->mir()->function()) {
10195 case UnaryMathFunction::Floor:
10196 funptr = floorf;
10197 check = CheckUnsafeCallWithABI::DontCheckOther;
10198 break;
10199 case UnaryMathFunction::Round:
10200 funptr = math_roundf_impl;
10201 break;
10202 case UnaryMathFunction::Trunc:
10203 funptr = math_truncf_impl;
10204 break;
10205 case UnaryMathFunction::Ceil:
10206 funptr = ceilf;
10207 check = CheckUnsafeCallWithABI::DontCheckOther;
10208 break;
10209 default:
10210 MOZ_CRASH("Unknown or unsupported float32 math function");
10213 masm.callWithABI(DynamicFunction<Fn>(funptr), ABIType::Float32, check);
10216 void CodeGenerator::visitModD(LModD* ins) {
10217 MOZ_ASSERT(!gen->compilingWasm());
10219 FloatRegister lhs = ToFloatRegister(ins->lhs());
10220 FloatRegister rhs = ToFloatRegister(ins->rhs());
10222 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10224 using Fn = double (*)(double a, double b);
10225 masm.setupAlignedABICall();
10226 masm.passABIArg(lhs, ABIType::Float64);
10227 masm.passABIArg(rhs, ABIType::Float64);
10228 masm.callWithABI<Fn, NumberMod>(ABIType::Float64);
10231 void CodeGenerator::visitModPowTwoD(LModPowTwoD* ins) {
10232 FloatRegister lhs = ToFloatRegister(ins->lhs());
10233 uint32_t divisor = ins->divisor();
10234 MOZ_ASSERT(mozilla::IsPowerOfTwo(divisor));
10236 FloatRegister output = ToFloatRegister(ins->output());
10238 // Compute |n % d| using |copysign(n - (d * trunc(n / d)), n)|.
10240 // This doesn't work if |d| isn't a power of two, because we may lose too much
10241 // precision. For example |Number.MAX_VALUE % 3 == 2|, but
10242 // |3 * trunc(Number.MAX_VALUE / 3) == Infinity|.
10244 Label done;
10246 ScratchDoubleScope scratch(masm);
10248 // Subnormals can lead to performance degradation, which can make calling
10249 // |fmod| faster than this inline implementation. Work around this issue by
10250 // directly returning the input for any value in the interval ]-1, +1[.
10251 Label notSubnormal;
10252 masm.loadConstantDouble(1.0, scratch);
10253 masm.loadConstantDouble(-1.0, output);
10254 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, lhs, scratch,
10255 &notSubnormal);
10256 masm.branchDouble(Assembler::DoubleLessThanOrEqual, lhs, output,
10257 &notSubnormal);
10259 masm.moveDouble(lhs, output);
10260 masm.jump(&done);
10262 masm.bind(&notSubnormal);
10264 if (divisor == 1) {
10265 // The pattern |n % 1 == 0| is used to detect integer numbers. We can skip
10266 // the multiplication by one in this case.
10267 masm.moveDouble(lhs, output);
10268 masm.nearbyIntDouble(RoundingMode::TowardsZero, output, scratch);
10269 masm.subDouble(scratch, output);
10270 } else {
10271 masm.loadConstantDouble(1.0 / double(divisor), scratch);
10272 masm.loadConstantDouble(double(divisor), output);
10274 masm.mulDouble(lhs, scratch);
10275 masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
10276 masm.mulDouble(output, scratch);
10278 masm.moveDouble(lhs, output);
10279 masm.subDouble(scratch, output);
10283 masm.copySignDouble(output, lhs, output);
10284 masm.bind(&done);
10287 void CodeGenerator::visitWasmBuiltinModD(LWasmBuiltinModD* ins) {
10288 masm.Push(InstanceReg);
10289 int32_t framePushedAfterInstance = masm.framePushed();
10291 FloatRegister lhs = ToFloatRegister(ins->lhs());
10292 FloatRegister rhs = ToFloatRegister(ins->rhs());
10294 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10296 masm.setupWasmABICall();
10297 masm.passABIArg(lhs, ABIType::Float64);
10298 masm.passABIArg(rhs, ABIType::Float64);
10300 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
10301 masm.callWithABI(ins->mir()->bytecodeOffset(), wasm::SymbolicAddress::ModD,
10302 mozilla::Some(instanceOffset), ABIType::Float64);
10304 masm.Pop(InstanceReg);
10307 void CodeGenerator::visitBigIntAdd(LBigIntAdd* ins) {
10308 Register lhs = ToRegister(ins->lhs());
10309 Register rhs = ToRegister(ins->rhs());
10310 Register temp1 = ToRegister(ins->temp1());
10311 Register temp2 = ToRegister(ins->temp2());
10312 Register output = ToRegister(ins->output());
10314 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10315 auto* ool = oolCallVM<Fn, BigInt::add>(ins, ArgList(lhs, rhs),
10316 StoreRegisterTo(output));
10318 // 0n + x == x
10319 Label lhsNonZero;
10320 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10321 masm.movePtr(rhs, output);
10322 masm.jump(ool->rejoin());
10323 masm.bind(&lhsNonZero);
10325 // x + 0n == x
10326 Label rhsNonZero;
10327 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10328 masm.movePtr(lhs, output);
10329 masm.jump(ool->rejoin());
10330 masm.bind(&rhsNonZero);
10332 // Call into the VM when either operand can't be loaded into a pointer-sized
10333 // register.
10334 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10335 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10337 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10339 // Create and return the result.
10340 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10341 masm.initializeBigInt(output, temp1);
10343 masm.bind(ool->rejoin());
10346 void CodeGenerator::visitBigIntSub(LBigIntSub* ins) {
10347 Register lhs = ToRegister(ins->lhs());
10348 Register rhs = ToRegister(ins->rhs());
10349 Register temp1 = ToRegister(ins->temp1());
10350 Register temp2 = ToRegister(ins->temp2());
10351 Register output = ToRegister(ins->output());
10353 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10354 auto* ool = oolCallVM<Fn, BigInt::sub>(ins, ArgList(lhs, rhs),
10355 StoreRegisterTo(output));
10357 // x - 0n == x
10358 Label rhsNonZero;
10359 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10360 masm.movePtr(lhs, output);
10361 masm.jump(ool->rejoin());
10362 masm.bind(&rhsNonZero);
10364 // Call into the VM when either operand can't be loaded into a pointer-sized
10365 // register.
10366 masm.loadBigInt(lhs, temp1, ool->entry());
10367 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10369 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10371 // Create and return the result.
10372 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10373 masm.initializeBigInt(output, temp1);
10375 masm.bind(ool->rejoin());
10378 void CodeGenerator::visitBigIntMul(LBigIntMul* ins) {
10379 Register lhs = ToRegister(ins->lhs());
10380 Register rhs = ToRegister(ins->rhs());
10381 Register temp1 = ToRegister(ins->temp1());
10382 Register temp2 = ToRegister(ins->temp2());
10383 Register output = ToRegister(ins->output());
10385 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10386 auto* ool = oolCallVM<Fn, BigInt::mul>(ins, ArgList(lhs, rhs),
10387 StoreRegisterTo(output));
10389 // 0n * x == 0n
10390 Label lhsNonZero;
10391 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10392 masm.movePtr(lhs, output);
10393 masm.jump(ool->rejoin());
10394 masm.bind(&lhsNonZero);
10396 // x * 0n == 0n
10397 Label rhsNonZero;
10398 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10399 masm.movePtr(rhs, output);
10400 masm.jump(ool->rejoin());
10401 masm.bind(&rhsNonZero);
10403 // Call into the VM when either operand can't be loaded into a pointer-sized
10404 // register.
10405 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10406 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10408 masm.branchMulPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10410 // Create and return the result.
10411 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10412 masm.initializeBigInt(output, temp1);
10414 masm.bind(ool->rejoin());
10417 void CodeGenerator::visitBigIntDiv(LBigIntDiv* ins) {
10418 Register lhs = ToRegister(ins->lhs());
10419 Register rhs = ToRegister(ins->rhs());
10420 Register temp1 = ToRegister(ins->temp1());
10421 Register temp2 = ToRegister(ins->temp2());
10422 Register output = ToRegister(ins->output());
10424 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10425 auto* ool = oolCallVM<Fn, BigInt::div>(ins, ArgList(lhs, rhs),
10426 StoreRegisterTo(output));
10428 // x / 0 throws an error.
10429 if (ins->mir()->canBeDivideByZero()) {
10430 masm.branchIfBigIntIsZero(rhs, ool->entry());
10433 // 0n / x == 0n
10434 Label lhsNonZero;
10435 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10436 masm.movePtr(lhs, output);
10437 masm.jump(ool->rejoin());
10438 masm.bind(&lhsNonZero);
10440 // Call into the VM when either operand can't be loaded into a pointer-sized
10441 // register.
10442 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10443 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10445 // |BigInt::div()| returns |lhs| for |lhs / 1n|, which means there's no
10446 // allocation which might trigger a minor GC to free up nursery space. This
10447 // requires us to apply the same optimization here, otherwise we'd end up with
10448 // always entering the OOL call, because the nursery is never evicted.
10449 Label notOne;
10450 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(1), &notOne);
10451 masm.movePtr(lhs, output);
10452 masm.jump(ool->rejoin());
10453 masm.bind(&notOne);
10455 static constexpr auto DigitMin = std::numeric_limits<
10456 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
10458 // Handle an integer overflow from INT{32,64}_MIN / -1.
10459 Label notOverflow;
10460 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
10461 masm.branchPtr(Assembler::Equal, temp2, ImmWord(-1), ool->entry());
10462 masm.bind(&notOverflow);
10464 emitBigIntDiv(ins, temp1, temp2, output, ool->entry());
10466 masm.bind(ool->rejoin());
10469 void CodeGenerator::visitBigIntMod(LBigIntMod* ins) {
10470 Register lhs = ToRegister(ins->lhs());
10471 Register rhs = ToRegister(ins->rhs());
10472 Register temp1 = ToRegister(ins->temp1());
10473 Register temp2 = ToRegister(ins->temp2());
10474 Register output = ToRegister(ins->output());
10476 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10477 auto* ool = oolCallVM<Fn, BigInt::mod>(ins, ArgList(lhs, rhs),
10478 StoreRegisterTo(output));
10480 // x % 0 throws an error.
10481 if (ins->mir()->canBeDivideByZero()) {
10482 masm.branchIfBigIntIsZero(rhs, ool->entry());
10485 // 0n % x == 0n
10486 Label lhsNonZero;
10487 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10488 masm.movePtr(lhs, output);
10489 masm.jump(ool->rejoin());
10490 masm.bind(&lhsNonZero);
10492 // Call into the VM when either operand can't be loaded into a pointer-sized
10493 // register.
10494 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10495 masm.loadBigIntAbsolute(rhs, temp2, ool->entry());
10497 // Similar to the case for BigInt division, we must apply the same allocation
10498 // optimizations as performed in |BigInt::mod()|.
10499 Label notBelow;
10500 masm.branchPtr(Assembler::AboveOrEqual, temp1, temp2, &notBelow);
10501 masm.movePtr(lhs, output);
10502 masm.jump(ool->rejoin());
10503 masm.bind(&notBelow);
10505 // Convert both digits to signed pointer-sized values.
10506 masm.bigIntDigitToSignedPtr(lhs, temp1, ool->entry());
10507 masm.bigIntDigitToSignedPtr(rhs, temp2, ool->entry());
10509 static constexpr auto DigitMin = std::numeric_limits<
10510 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
10512 // Handle an integer overflow from INT{32,64}_MIN / -1.
10513 Label notOverflow;
10514 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
10515 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(-1), &notOverflow);
10516 masm.movePtr(ImmWord(0), temp1);
10517 masm.bind(&notOverflow);
10519 emitBigIntMod(ins, temp1, temp2, output, ool->entry());
10521 masm.bind(ool->rejoin());
10524 void CodeGenerator::visitBigIntPow(LBigIntPow* ins) {
10525 Register lhs = ToRegister(ins->lhs());
10526 Register rhs = ToRegister(ins->rhs());
10527 Register temp1 = ToRegister(ins->temp1());
10528 Register temp2 = ToRegister(ins->temp2());
10529 Register output = ToRegister(ins->output());
10531 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10532 auto* ool = oolCallVM<Fn, BigInt::pow>(ins, ArgList(lhs, rhs),
10533 StoreRegisterTo(output));
10535 // x ** -y throws an error.
10536 if (ins->mir()->canBeNegativeExponent()) {
10537 masm.branchIfBigIntIsNegative(rhs, ool->entry());
10540 Register dest = temp1;
10541 Register base = temp2;
10542 Register exponent = output;
10544 Label done;
10545 masm.movePtr(ImmWord(1), dest); // p = 1
10547 // 1n ** y == 1n
10548 // -1n ** y == 1n when y is even
10549 // -1n ** y == -1n when y is odd
10550 Label lhsNotOne;
10551 masm.branch32(Assembler::Above, Address(lhs, BigInt::offsetOfLength()),
10552 Imm32(1), &lhsNotOne);
10553 masm.loadFirstBigIntDigitOrZero(lhs, base);
10554 masm.branchPtr(Assembler::NotEqual, base, Imm32(1), &lhsNotOne);
10556 masm.loadFirstBigIntDigitOrZero(rhs, exponent);
10558 Label lhsNonNegative;
10559 masm.branchIfBigIntIsNonNegative(lhs, &lhsNonNegative);
10560 masm.branchTestPtr(Assembler::Zero, exponent, Imm32(1), &done);
10561 masm.bind(&lhsNonNegative);
10562 masm.movePtr(lhs, output);
10563 masm.jump(ool->rejoin());
10565 masm.bind(&lhsNotOne);
10567 // x ** 0n == 1n
10568 masm.branchIfBigIntIsZero(rhs, &done);
10570 // 0n ** y == 0n with y != 0n
10571 Label lhsNonZero;
10572 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10574 masm.movePtr(lhs, output);
10575 masm.jump(ool->rejoin());
10577 masm.bind(&lhsNonZero);
10579 // Call into the VM when the exponent can't be loaded into a pointer-sized
10580 // register.
10581 masm.loadBigIntAbsolute(rhs, exponent, ool->entry());
10583 // x ** y with x > 1 and y >= DigitBits can't be pointer-sized.
10584 masm.branchPtr(Assembler::AboveOrEqual, exponent, Imm32(BigInt::DigitBits),
10585 ool->entry());
10587 // x ** 1n == x
10588 Label rhsNotOne;
10589 masm.branch32(Assembler::NotEqual, exponent, Imm32(1), &rhsNotOne);
10591 masm.movePtr(lhs, output);
10592 masm.jump(ool->rejoin());
10594 masm.bind(&rhsNotOne);
10596 // Call into the VM when the base operand can't be loaded into a pointer-sized
10597 // register.
10598 masm.loadBigIntNonZero(lhs, base, ool->entry());
10600 // MacroAssembler::pow32() adjusted to work on pointer-sized registers.
10602 // m = base
10603 // n = exponent
10605 Label start, loop;
10606 masm.jump(&start);
10607 masm.bind(&loop);
10609 // m *= m
10610 masm.branchMulPtr(Assembler::Overflow, base, base, ool->entry());
10612 masm.bind(&start);
10614 // if ((n & 1) != 0) p *= m
10615 Label even;
10616 masm.branchTest32(Assembler::Zero, exponent, Imm32(1), &even);
10617 masm.branchMulPtr(Assembler::Overflow, base, dest, ool->entry());
10618 masm.bind(&even);
10620 // n >>= 1
10621 // if (n == 0) return p
10622 masm.branchRshift32(Assembler::NonZero, Imm32(1), exponent, &loop);
10625 MOZ_ASSERT(temp1 == dest);
10627 // Create and return the result.
10628 masm.bind(&done);
10629 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10630 masm.initializeBigInt(output, temp1);
10632 masm.bind(ool->rejoin());
10635 void CodeGenerator::visitBigIntBitAnd(LBigIntBitAnd* ins) {
10636 Register lhs = ToRegister(ins->lhs());
10637 Register rhs = ToRegister(ins->rhs());
10638 Register temp1 = ToRegister(ins->temp1());
10639 Register temp2 = ToRegister(ins->temp2());
10640 Register output = ToRegister(ins->output());
10642 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10643 auto* ool = oolCallVM<Fn, BigInt::bitAnd>(ins, ArgList(lhs, rhs),
10644 StoreRegisterTo(output));
10646 // 0n & x == 0n
10647 Label lhsNonZero;
10648 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10649 masm.movePtr(lhs, output);
10650 masm.jump(ool->rejoin());
10651 masm.bind(&lhsNonZero);
10653 // x & 0n == 0n
10654 Label rhsNonZero;
10655 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10656 masm.movePtr(rhs, output);
10657 masm.jump(ool->rejoin());
10658 masm.bind(&rhsNonZero);
10660 // Call into the VM when either operand can't be loaded into a pointer-sized
10661 // register.
10662 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10663 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10665 masm.andPtr(temp2, temp1);
10667 // Create and return the result.
10668 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10669 masm.initializeBigInt(output, temp1);
10671 masm.bind(ool->rejoin());
10674 void CodeGenerator::visitBigIntBitOr(LBigIntBitOr* ins) {
10675 Register lhs = ToRegister(ins->lhs());
10676 Register rhs = ToRegister(ins->rhs());
10677 Register temp1 = ToRegister(ins->temp1());
10678 Register temp2 = ToRegister(ins->temp2());
10679 Register output = ToRegister(ins->output());
10681 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10682 auto* ool = oolCallVM<Fn, BigInt::bitOr>(ins, ArgList(lhs, rhs),
10683 StoreRegisterTo(output));
10685 // 0n | x == x
10686 Label lhsNonZero;
10687 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10688 masm.movePtr(rhs, output);
10689 masm.jump(ool->rejoin());
10690 masm.bind(&lhsNonZero);
10692 // x | 0n == x
10693 Label rhsNonZero;
10694 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10695 masm.movePtr(lhs, output);
10696 masm.jump(ool->rejoin());
10697 masm.bind(&rhsNonZero);
10699 // Call into the VM when either operand can't be loaded into a pointer-sized
10700 // register.
10701 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10702 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10704 masm.orPtr(temp2, temp1);
10706 // Create and return the result.
10707 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10708 masm.initializeBigInt(output, temp1);
10710 masm.bind(ool->rejoin());
10713 void CodeGenerator::visitBigIntBitXor(LBigIntBitXor* ins) {
10714 Register lhs = ToRegister(ins->lhs());
10715 Register rhs = ToRegister(ins->rhs());
10716 Register temp1 = ToRegister(ins->temp1());
10717 Register temp2 = ToRegister(ins->temp2());
10718 Register output = ToRegister(ins->output());
10720 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10721 auto* ool = oolCallVM<Fn, BigInt::bitXor>(ins, ArgList(lhs, rhs),
10722 StoreRegisterTo(output));
10724 // 0n ^ x == x
10725 Label lhsNonZero;
10726 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10727 masm.movePtr(rhs, output);
10728 masm.jump(ool->rejoin());
10729 masm.bind(&lhsNonZero);
10731 // x ^ 0n == x
10732 Label rhsNonZero;
10733 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10734 masm.movePtr(lhs, output);
10735 masm.jump(ool->rejoin());
10736 masm.bind(&rhsNonZero);
10738 // Call into the VM when either operand can't be loaded into a pointer-sized
10739 // register.
10740 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10741 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10743 masm.xorPtr(temp2, temp1);
10745 // Create and return the result.
10746 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10747 masm.initializeBigInt(output, temp1);
10749 masm.bind(ool->rejoin());
10752 void CodeGenerator::visitBigIntLsh(LBigIntLsh* ins) {
10753 Register lhs = ToRegister(ins->lhs());
10754 Register rhs = ToRegister(ins->rhs());
10755 Register temp1 = ToRegister(ins->temp1());
10756 Register temp2 = ToRegister(ins->temp2());
10757 Register temp3 = ToRegister(ins->temp3());
10758 Register output = ToRegister(ins->output());
10760 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10761 auto* ool = oolCallVM<Fn, BigInt::lsh>(ins, ArgList(lhs, rhs),
10762 StoreRegisterTo(output));
10764 // 0n << x == 0n
10765 Label lhsNonZero;
10766 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10767 masm.movePtr(lhs, output);
10768 masm.jump(ool->rejoin());
10769 masm.bind(&lhsNonZero);
10771 // x << 0n == x
10772 Label rhsNonZero;
10773 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10774 masm.movePtr(lhs, output);
10775 masm.jump(ool->rejoin());
10776 masm.bind(&rhsNonZero);
10778 // Inline |BigInt::lsh| for the case when |lhs| contains a single digit.
10780 Label rhsTooLarge;
10781 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
10783 // Call into the VM when the left-hand side operand can't be loaded into a
10784 // pointer-sized register.
10785 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10787 // Handle shifts exceeding |BigInt::DigitBits| first.
10788 Label shift, create;
10789 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
10791 masm.bind(&rhsTooLarge);
10793 // x << DigitBits with x != 0n always exceeds pointer-sized storage.
10794 masm.branchIfBigIntIsNonNegative(rhs, ool->entry());
10796 // x << -DigitBits == x >> DigitBits, which is either 0n or -1n.
10797 masm.move32(Imm32(0), temp1);
10798 masm.branchIfBigIntIsNonNegative(lhs, &create);
10799 masm.move32(Imm32(1), temp1);
10800 masm.jump(&create);
10802 masm.bind(&shift);
10804 Label nonNegative;
10805 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
10807 masm.movePtr(temp1, temp3);
10809 // |x << -y| is computed as |x >> y|.
10810 masm.rshiftPtr(temp2, temp1);
10812 // For negative numbers, round down if any bit was shifted out.
10813 masm.branchIfBigIntIsNonNegative(lhs, &create);
10815 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
10816 masm.movePtr(ImmWord(-1), output);
10817 masm.lshiftPtr(temp2, output);
10818 masm.notPtr(output);
10820 // Add plus one when |(lhs.digit(0) & mask) != 0|.
10821 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
10822 masm.addPtr(ImmWord(1), temp1);
10823 masm.jump(&create);
10825 masm.bind(&nonNegative);
10827 masm.movePtr(temp2, temp3);
10829 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
10830 masm.negPtr(temp2);
10831 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
10832 masm.movePtr(temp1, output);
10833 masm.rshiftPtr(temp2, output);
10835 // Call into the VM when any bit will be shifted out.
10836 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
10838 masm.movePtr(temp3, temp2);
10839 masm.lshiftPtr(temp2, temp1);
10841 masm.bind(&create);
10843 // Create and return the result.
10844 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10845 masm.initializeBigIntAbsolute(output, temp1);
10847 // Set the sign bit when the left-hand side is negative.
10848 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
10849 masm.or32(Imm32(BigInt::signBitMask()),
10850 Address(output, BigInt::offsetOfFlags()));
10852 masm.bind(ool->rejoin());
10855 void CodeGenerator::visitBigIntRsh(LBigIntRsh* ins) {
10856 Register lhs = ToRegister(ins->lhs());
10857 Register rhs = ToRegister(ins->rhs());
10858 Register temp1 = ToRegister(ins->temp1());
10859 Register temp2 = ToRegister(ins->temp2());
10860 Register temp3 = ToRegister(ins->temp3());
10861 Register output = ToRegister(ins->output());
10863 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10864 auto* ool = oolCallVM<Fn, BigInt::rsh>(ins, ArgList(lhs, rhs),
10865 StoreRegisterTo(output));
10867 // 0n >> x == 0n
10868 Label lhsNonZero;
10869 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10870 masm.movePtr(lhs, output);
10871 masm.jump(ool->rejoin());
10872 masm.bind(&lhsNonZero);
10874 // x >> 0n == x
10875 Label rhsNonZero;
10876 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10877 masm.movePtr(lhs, output);
10878 masm.jump(ool->rejoin());
10879 masm.bind(&rhsNonZero);
10881 // Inline |BigInt::rsh| for the case when |lhs| contains a single digit.
10883 Label rhsTooLarge;
10884 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
10886 // Call into the VM when the left-hand side operand can't be loaded into a
10887 // pointer-sized register.
10888 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10890 // Handle shifts exceeding |BigInt::DigitBits| first.
10891 Label shift, create;
10892 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
10894 masm.bind(&rhsTooLarge);
10896 // x >> -DigitBits == x << DigitBits, which exceeds pointer-sized storage.
10897 masm.branchIfBigIntIsNegative(rhs, ool->entry());
10899 // x >> DigitBits is either 0n or -1n.
10900 masm.move32(Imm32(0), temp1);
10901 masm.branchIfBigIntIsNonNegative(lhs, &create);
10902 masm.move32(Imm32(1), temp1);
10903 masm.jump(&create);
10905 masm.bind(&shift);
10907 Label nonNegative;
10908 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
10910 masm.movePtr(temp2, temp3);
10912 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
10913 masm.negPtr(temp2);
10914 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
10915 masm.movePtr(temp1, output);
10916 masm.rshiftPtr(temp2, output);
10918 // Call into the VM when any bit will be shifted out.
10919 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
10921 // |x >> -y| is computed as |x << y|.
10922 masm.movePtr(temp3, temp2);
10923 masm.lshiftPtr(temp2, temp1);
10924 masm.jump(&create);
10926 masm.bind(&nonNegative);
10928 masm.movePtr(temp1, temp3);
10930 masm.rshiftPtr(temp2, temp1);
10932 // For negative numbers, round down if any bit was shifted out.
10933 masm.branchIfBigIntIsNonNegative(lhs, &create);
10935 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
10936 masm.movePtr(ImmWord(-1), output);
10937 masm.lshiftPtr(temp2, output);
10938 masm.notPtr(output);
10940 // Add plus one when |(lhs.digit(0) & mask) != 0|.
10941 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
10942 masm.addPtr(ImmWord(1), temp1);
10944 masm.bind(&create);
10946 // Create and return the result.
10947 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10948 masm.initializeBigIntAbsolute(output, temp1);
10950 // Set the sign bit when the left-hand side is negative.
10951 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
10952 masm.or32(Imm32(BigInt::signBitMask()),
10953 Address(output, BigInt::offsetOfFlags()));
10955 masm.bind(ool->rejoin());
10958 void CodeGenerator::visitBigIntIncrement(LBigIntIncrement* ins) {
10959 Register input = ToRegister(ins->input());
10960 Register temp1 = ToRegister(ins->temp1());
10961 Register temp2 = ToRegister(ins->temp2());
10962 Register output = ToRegister(ins->output());
10964 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10965 auto* ool =
10966 oolCallVM<Fn, BigInt::inc>(ins, ArgList(input), StoreRegisterTo(output));
10968 // Call into the VM when the input can't be loaded into a pointer-sized
10969 // register.
10970 masm.loadBigInt(input, temp1, ool->entry());
10971 masm.movePtr(ImmWord(1), temp2);
10973 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10975 // Create and return the result.
10976 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10977 masm.initializeBigInt(output, temp1);
10979 masm.bind(ool->rejoin());
10982 void CodeGenerator::visitBigIntDecrement(LBigIntDecrement* ins) {
10983 Register input = ToRegister(ins->input());
10984 Register temp1 = ToRegister(ins->temp1());
10985 Register temp2 = ToRegister(ins->temp2());
10986 Register output = ToRegister(ins->output());
10988 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10989 auto* ool =
10990 oolCallVM<Fn, BigInt::dec>(ins, ArgList(input), StoreRegisterTo(output));
10992 // Call into the VM when the input can't be loaded into a pointer-sized
10993 // register.
10994 masm.loadBigInt(input, temp1, ool->entry());
10995 masm.movePtr(ImmWord(1), temp2);
10997 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10999 // Create and return the result.
11000 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11001 masm.initializeBigInt(output, temp1);
11003 masm.bind(ool->rejoin());
11006 void CodeGenerator::visitBigIntNegate(LBigIntNegate* ins) {
11007 Register input = ToRegister(ins->input());
11008 Register temp = ToRegister(ins->temp());
11009 Register output = ToRegister(ins->output());
11011 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
11012 auto* ool =
11013 oolCallVM<Fn, BigInt::neg>(ins, ArgList(input), StoreRegisterTo(output));
11015 // -0n == 0n
11016 Label lhsNonZero;
11017 masm.branchIfBigIntIsNonZero(input, &lhsNonZero);
11018 masm.movePtr(input, output);
11019 masm.jump(ool->rejoin());
11020 masm.bind(&lhsNonZero);
11022 // Call into the VM when the input uses heap digits.
11023 masm.copyBigIntWithInlineDigits(input, output, temp, initialBigIntHeap(),
11024 ool->entry());
11026 // Flip the sign bit.
11027 masm.xor32(Imm32(BigInt::signBitMask()),
11028 Address(output, BigInt::offsetOfFlags()));
11030 masm.bind(ool->rejoin());
11033 void CodeGenerator::visitBigIntBitNot(LBigIntBitNot* ins) {
11034 Register input = ToRegister(ins->input());
11035 Register temp1 = ToRegister(ins->temp1());
11036 Register temp2 = ToRegister(ins->temp2());
11037 Register output = ToRegister(ins->output());
11039 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
11040 auto* ool = oolCallVM<Fn, BigInt::bitNot>(ins, ArgList(input),
11041 StoreRegisterTo(output));
11043 masm.loadBigIntAbsolute(input, temp1, ool->entry());
11045 // This follows the C++ implementation because it let's us support the full
11046 // range [-2^64, 2^64 - 1] on 64-bit resp. [-2^32, 2^32 - 1] on 32-bit.
11047 Label nonNegative, done;
11048 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
11050 // ~(-x) == ~(~(x-1)) == x-1
11051 masm.subPtr(Imm32(1), temp1);
11052 masm.jump(&done);
11054 masm.bind(&nonNegative);
11056 // ~x == -x-1 == -(x+1)
11057 masm.movePtr(ImmWord(1), temp2);
11058 masm.branchAddPtr(Assembler::CarrySet, temp2, temp1, ool->entry());
11060 masm.bind(&done);
11062 // Create and return the result.
11063 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11064 masm.initializeBigIntAbsolute(output, temp1);
11066 // Set the sign bit when the input is positive.
11067 masm.branchIfBigIntIsNegative(input, ool->rejoin());
11068 masm.or32(Imm32(BigInt::signBitMask()),
11069 Address(output, BigInt::offsetOfFlags()));
11071 masm.bind(ool->rejoin());
11074 void CodeGenerator::visitInt32ToStringWithBase(LInt32ToStringWithBase* lir) {
11075 Register input = ToRegister(lir->input());
11076 RegisterOrInt32 base = ToRegisterOrInt32(lir->base());
11077 Register output = ToRegister(lir->output());
11078 Register temp0 = ToRegister(lir->temp0());
11079 Register temp1 = ToRegister(lir->temp1());
11081 bool lowerCase = lir->mir()->lowerCase();
11083 using Fn = JSString* (*)(JSContext*, int32_t, int32_t, bool);
11084 if (base.is<Register>()) {
11085 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
11086 lir, ArgList(input, base.as<Register>(), Imm32(lowerCase)),
11087 StoreRegisterTo(output));
11089 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
11090 masm.loadInt32ToStringWithBase(input, base.as<Register>(), output, temp0,
11091 temp1, gen->runtime->staticStrings(),
11092 liveRegs, lowerCase, ool->entry());
11093 masm.bind(ool->rejoin());
11094 } else {
11095 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
11096 lir, ArgList(input, Imm32(base.as<int32_t>()), Imm32(lowerCase)),
11097 StoreRegisterTo(output));
11099 masm.loadInt32ToStringWithBase(input, base.as<int32_t>(), output, temp0,
11100 temp1, gen->runtime->staticStrings(),
11101 lowerCase, ool->entry());
11102 masm.bind(ool->rejoin());
11106 void CodeGenerator::visitNumberParseInt(LNumberParseInt* lir) {
11107 Register string = ToRegister(lir->string());
11108 Register radix = ToRegister(lir->radix());
11109 ValueOperand output = ToOutValue(lir);
11110 Register temp = ToRegister(lir->temp0());
11112 #ifdef DEBUG
11113 Label ok;
11114 masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
11115 masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
11116 masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
11117 masm.bind(&ok);
11118 #endif
11120 // Use indexed value as fast path if possible.
11121 Label vmCall, done;
11122 masm.loadStringIndexValue(string, temp, &vmCall);
11123 masm.tagValue(JSVAL_TYPE_INT32, temp, output);
11124 masm.jump(&done);
11126 masm.bind(&vmCall);
11128 pushArg(radix);
11129 pushArg(string);
11131 using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
11132 callVM<Fn, js::NumberParseInt>(lir);
11134 masm.bind(&done);
11137 void CodeGenerator::visitDoubleParseInt(LDoubleParseInt* lir) {
11138 FloatRegister number = ToFloatRegister(lir->number());
11139 Register output = ToRegister(lir->output());
11140 FloatRegister temp = ToFloatRegister(lir->temp0());
11142 Label bail;
11143 masm.branchDouble(Assembler::DoubleUnordered, number, number, &bail);
11144 masm.branchTruncateDoubleToInt32(number, output, &bail);
11146 Label ok;
11147 masm.branch32(Assembler::NotEqual, output, Imm32(0), &ok);
11149 // Accept both +0 and -0 and return 0.
11150 masm.loadConstantDouble(0.0, temp);
11151 masm.branchDouble(Assembler::DoubleEqual, number, temp, &ok);
11153 // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
11154 masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, temp);
11155 masm.branchDouble(Assembler::DoubleLessThan, number, temp, &bail);
11157 masm.bind(&ok);
11159 bailoutFrom(&bail, lir->snapshot());
11162 void CodeGenerator::visitFloor(LFloor* lir) {
11163 FloatRegister input = ToFloatRegister(lir->input());
11164 Register output = ToRegister(lir->output());
11166 Label bail;
11167 masm.floorDoubleToInt32(input, output, &bail);
11168 bailoutFrom(&bail, lir->snapshot());
11171 void CodeGenerator::visitFloorF(LFloorF* lir) {
11172 FloatRegister input = ToFloatRegister(lir->input());
11173 Register output = ToRegister(lir->output());
11175 Label bail;
11176 masm.floorFloat32ToInt32(input, output, &bail);
11177 bailoutFrom(&bail, lir->snapshot());
11180 void CodeGenerator::visitCeil(LCeil* lir) {
11181 FloatRegister input = ToFloatRegister(lir->input());
11182 Register output = ToRegister(lir->output());
11184 Label bail;
11185 masm.ceilDoubleToInt32(input, output, &bail);
11186 bailoutFrom(&bail, lir->snapshot());
11189 void CodeGenerator::visitCeilF(LCeilF* lir) {
11190 FloatRegister input = ToFloatRegister(lir->input());
11191 Register output = ToRegister(lir->output());
11193 Label bail;
11194 masm.ceilFloat32ToInt32(input, output, &bail);
11195 bailoutFrom(&bail, lir->snapshot());
11198 void CodeGenerator::visitRound(LRound* lir) {
11199 FloatRegister input = ToFloatRegister(lir->input());
11200 FloatRegister temp = ToFloatRegister(lir->temp0());
11201 Register output = ToRegister(lir->output());
11203 Label bail;
11204 masm.roundDoubleToInt32(input, output, temp, &bail);
11205 bailoutFrom(&bail, lir->snapshot());
11208 void CodeGenerator::visitRoundF(LRoundF* lir) {
11209 FloatRegister input = ToFloatRegister(lir->input());
11210 FloatRegister temp = ToFloatRegister(lir->temp0());
11211 Register output = ToRegister(lir->output());
11213 Label bail;
11214 masm.roundFloat32ToInt32(input, output, temp, &bail);
11215 bailoutFrom(&bail, lir->snapshot());
11218 void CodeGenerator::visitTrunc(LTrunc* lir) {
11219 FloatRegister input = ToFloatRegister(lir->input());
11220 Register output = ToRegister(lir->output());
11222 Label bail;
11223 masm.truncDoubleToInt32(input, output, &bail);
11224 bailoutFrom(&bail, lir->snapshot());
11227 void CodeGenerator::visitTruncF(LTruncF* lir) {
11228 FloatRegister input = ToFloatRegister(lir->input());
11229 Register output = ToRegister(lir->output());
11231 Label bail;
11232 masm.truncFloat32ToInt32(input, output, &bail);
11233 bailoutFrom(&bail, lir->snapshot());
11236 void CodeGenerator::visitCompareS(LCompareS* lir) {
11237 JSOp op = lir->mir()->jsop();
11238 Register left = ToRegister(lir->left());
11239 Register right = ToRegister(lir->right());
11240 Register output = ToRegister(lir->output());
11242 OutOfLineCode* ool = nullptr;
11244 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
11245 if (op == JSOp::Eq || op == JSOp::StrictEq) {
11246 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
11247 lir, ArgList(left, right), StoreRegisterTo(output));
11248 } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
11249 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
11250 lir, ArgList(left, right), StoreRegisterTo(output));
11251 } else if (op == JSOp::Lt) {
11252 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
11253 lir, ArgList(left, right), StoreRegisterTo(output));
11254 } else if (op == JSOp::Le) {
11255 // Push the operands in reverse order for JSOp::Le:
11256 // - |left <= right| is implemented as |right >= left|.
11257 ool =
11258 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
11259 lir, ArgList(right, left), StoreRegisterTo(output));
11260 } else if (op == JSOp::Gt) {
11261 // Push the operands in reverse order for JSOp::Gt:
11262 // - |left > right| is implemented as |right < left|.
11263 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
11264 lir, ArgList(right, left), StoreRegisterTo(output));
11265 } else {
11266 MOZ_ASSERT(op == JSOp::Ge);
11267 ool =
11268 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
11269 lir, ArgList(left, right), StoreRegisterTo(output));
11272 masm.compareStrings(op, left, right, output, ool->entry());
11274 masm.bind(ool->rejoin());
11277 void CodeGenerator::visitCompareSInline(LCompareSInline* lir) {
11278 JSOp op = lir->mir()->jsop();
11279 MOZ_ASSERT(IsEqualityOp(op));
11281 Register input = ToRegister(lir->input());
11282 Register output = ToRegister(lir->output());
11284 const JSLinearString* str = lir->constant();
11285 MOZ_ASSERT(str->length() > 0);
11287 OutOfLineCode* ool = nullptr;
11289 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
11290 if (op == JSOp::Eq || op == JSOp::StrictEq) {
11291 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
11292 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
11293 } else {
11294 MOZ_ASSERT(op == JSOp::Ne || op == JSOp::StrictNe);
11295 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
11296 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
11299 Label compareChars;
11301 Label notPointerEqual;
11303 // If operands point to the same instance, the strings are trivially equal.
11304 masm.branchPtr(Assembler::NotEqual, input, ImmGCPtr(str), &notPointerEqual);
11305 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
11306 masm.jump(ool->rejoin());
11308 masm.bind(&notPointerEqual);
11310 Label setNotEqualResult;
11311 if (str->isAtom()) {
11312 // Atoms cannot be equal to each other if they point to different strings.
11313 Imm32 atomBit(JSString::ATOM_BIT);
11314 masm.branchTest32(Assembler::NonZero,
11315 Address(input, JSString::offsetOfFlags()), atomBit,
11316 &setNotEqualResult);
11319 if (str->hasTwoByteChars()) {
11320 // Pure two-byte strings can't be equal to Latin-1 strings.
11321 JS::AutoCheckCannotGC nogc;
11322 if (!mozilla::IsUtf16Latin1(str->twoByteRange(nogc))) {
11323 masm.branchLatin1String(input, &setNotEqualResult);
11327 // Strings of different length can never be equal.
11328 masm.branch32(Assembler::Equal, Address(input, JSString::offsetOfLength()),
11329 Imm32(str->length()), &compareChars);
11331 masm.bind(&setNotEqualResult);
11332 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
11333 masm.jump(ool->rejoin());
11336 masm.bind(&compareChars);
11338 // Load the input string's characters.
11339 Register stringChars = output;
11340 masm.loadStringCharsForCompare(input, str, stringChars, ool->entry());
11342 // Start comparing character by character.
11343 masm.compareStringChars(op, stringChars, str, output);
11345 masm.bind(ool->rejoin());
11348 void CodeGenerator::visitCompareSSingle(LCompareSSingle* lir) {
11349 JSOp op = lir->jsop();
11350 MOZ_ASSERT(IsRelationalOp(op));
11352 Register input = ToRegister(lir->input());
11353 Register output = ToRegister(lir->output());
11354 Register temp = ToRegister(lir->temp0());
11356 const JSLinearString* str = lir->constant();
11357 MOZ_ASSERT(str->length() == 1);
11359 char16_t ch = str->latin1OrTwoByteChar(0);
11361 masm.movePtr(input, temp);
11363 // Check if the string is empty.
11364 Label compareLength;
11365 masm.branch32(Assembler::Equal, Address(temp, JSString::offsetOfLength()),
11366 Imm32(0), &compareLength);
11368 // The first character is in the left-most rope child.
11369 Label notRope;
11370 masm.branchIfNotRope(temp, &notRope);
11372 // Unwind ropes at the start if possible.
11373 Label unwindRope;
11374 masm.bind(&unwindRope);
11375 masm.loadRopeLeftChild(temp, output);
11376 masm.movePtr(output, temp);
11378 #ifdef DEBUG
11379 Label notEmpty;
11380 masm.branch32(Assembler::NotEqual,
11381 Address(temp, JSString::offsetOfLength()), Imm32(0),
11382 &notEmpty);
11383 masm.assumeUnreachable("rope children are non-empty");
11384 masm.bind(&notEmpty);
11385 #endif
11387 // Otherwise keep unwinding ropes.
11388 masm.branchIfRope(temp, &unwindRope);
11390 masm.bind(&notRope);
11392 // Load the first character into |output|.
11393 auto loadFirstChar = [&](auto encoding) {
11394 masm.loadStringChars(temp, output, encoding);
11395 masm.loadChar(Address(output, 0), output, encoding);
11398 Label done;
11399 if (ch <= JSString::MAX_LATIN1_CHAR) {
11400 // Handle both encodings when the search character is Latin-1.
11401 Label twoByte, compare;
11402 masm.branchTwoByteString(temp, &twoByte);
11404 loadFirstChar(CharEncoding::Latin1);
11405 masm.jump(&compare);
11407 masm.bind(&twoByte);
11408 loadFirstChar(CharEncoding::TwoByte);
11410 masm.bind(&compare);
11411 } else {
11412 // The search character is a two-byte character, so it can't be equal to any
11413 // character of a Latin-1 string.
11414 masm.move32(Imm32(int32_t(op == JSOp::Lt || op == JSOp::Le)), output);
11415 masm.branchLatin1String(temp, &done);
11417 loadFirstChar(CharEncoding::TwoByte);
11420 // Compare the string length when the search character is equal to the
11421 // input's first character.
11422 masm.branch32(Assembler::Equal, output, Imm32(ch), &compareLength);
11424 // Otherwise compute the result and jump to the end.
11425 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false), output, Imm32(ch),
11426 output);
11427 masm.jump(&done);
11429 // Compare the string length to compute the overall result.
11430 masm.bind(&compareLength);
11431 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
11432 Address(temp, JSString::offsetOfLength()), Imm32(1), output);
11434 masm.bind(&done);
11437 void CodeGenerator::visitCompareBigInt(LCompareBigInt* lir) {
11438 JSOp op = lir->mir()->jsop();
11439 Register left = ToRegister(lir->left());
11440 Register right = ToRegister(lir->right());
11441 Register temp0 = ToRegister(lir->temp0());
11442 Register temp1 = ToRegister(lir->temp1());
11443 Register temp2 = ToRegister(lir->temp2());
11444 Register output = ToRegister(lir->output());
11446 Label notSame;
11447 Label compareSign;
11448 Label compareLength;
11449 Label compareDigit;
11451 Label* notSameSign;
11452 Label* notSameLength;
11453 Label* notSameDigit;
11454 if (IsEqualityOp(op)) {
11455 notSameSign = &notSame;
11456 notSameLength = &notSame;
11457 notSameDigit = &notSame;
11458 } else {
11459 notSameSign = &compareSign;
11460 notSameLength = &compareLength;
11461 notSameDigit = &compareDigit;
11464 masm.equalBigInts(left, right, temp0, temp1, temp2, output, notSameSign,
11465 notSameLength, notSameDigit);
11467 Label done;
11468 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
11469 op == JSOp::Ge),
11470 output);
11471 masm.jump(&done);
11473 if (IsEqualityOp(op)) {
11474 masm.bind(&notSame);
11475 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
11476 } else {
11477 Label invertWhenNegative;
11479 // There are two cases when sign(left) != sign(right):
11480 // 1. sign(left) = positive and sign(right) = negative,
11481 // 2. or the dual case with reversed signs.
11483 // For case 1, |left| <cmp> |right| is true for cmp=Gt or cmp=Ge and false
11484 // for cmp=Lt or cmp=Le. Initialize the result for case 1 and handle case 2
11485 // with |invertWhenNegative|.
11486 masm.bind(&compareSign);
11487 masm.move32(Imm32(op == JSOp::Gt || op == JSOp::Ge), output);
11488 masm.jump(&invertWhenNegative);
11490 // For sign(left) = sign(right) and len(digits(left)) != len(digits(right)),
11491 // we have to consider the two cases:
11492 // 1. len(digits(left)) < len(digits(right))
11493 // 2. len(digits(left)) > len(digits(right))
11495 // For |left| <cmp> |right| with cmp=Lt:
11496 // Assume both BigInts are positive, then |left < right| is true for case 1
11497 // and false for case 2. When both are negative, the result is reversed.
11499 // The other comparison operators can be handled similarly.
11501 // |temp0| holds the digits length of the right-hand side operand.
11502 masm.bind(&compareLength);
11503 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
11504 Address(left, BigInt::offsetOfLength()), temp0, output);
11505 masm.jump(&invertWhenNegative);
11507 // Similar to the case above, compare the current digit to determine the
11508 // overall comparison result.
11510 // |temp1| points to the current digit of the left-hand side operand.
11511 // |output| holds the current digit of the right-hand side operand.
11512 masm.bind(&compareDigit);
11513 masm.cmpPtrSet(JSOpToCondition(op, /* isSigned = */ false),
11514 Address(temp1, 0), output, output);
11516 Label nonNegative;
11517 masm.bind(&invertWhenNegative);
11518 masm.branchIfBigIntIsNonNegative(left, &nonNegative);
11519 masm.xor32(Imm32(1), output);
11520 masm.bind(&nonNegative);
11523 masm.bind(&done);
11526 void CodeGenerator::visitCompareBigIntInt32(LCompareBigIntInt32* lir) {
11527 JSOp op = lir->mir()->jsop();
11528 Register left = ToRegister(lir->left());
11529 Register right = ToRegister(lir->right());
11530 Register temp0 = ToRegister(lir->temp0());
11531 Register temp1 = ToRegister(lir->temp1());
11532 Register output = ToRegister(lir->output());
11534 Label ifTrue, ifFalse;
11535 masm.compareBigIntAndInt32(op, left, right, temp0, temp1, &ifTrue, &ifFalse);
11537 Label done;
11538 masm.bind(&ifFalse);
11539 masm.move32(Imm32(0), output);
11540 masm.jump(&done);
11541 masm.bind(&ifTrue);
11542 masm.move32(Imm32(1), output);
11543 masm.bind(&done);
11546 void CodeGenerator::visitCompareBigIntDouble(LCompareBigIntDouble* lir) {
11547 JSOp op = lir->mir()->jsop();
11548 Register left = ToRegister(lir->left());
11549 FloatRegister right = ToFloatRegister(lir->right());
11550 Register output = ToRegister(lir->output());
11552 masm.setupAlignedABICall();
11554 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
11555 // - |left <= right| is implemented as |right >= left|.
11556 // - |left > right| is implemented as |right < left|.
11557 if (op == JSOp::Le || op == JSOp::Gt) {
11558 masm.passABIArg(right, ABIType::Float64);
11559 masm.passABIArg(left);
11560 } else {
11561 masm.passABIArg(left);
11562 masm.passABIArg(right, ABIType::Float64);
11565 using FnBigIntNumber = bool (*)(BigInt*, double);
11566 using FnNumberBigInt = bool (*)(double, BigInt*);
11567 switch (op) {
11568 case JSOp::Eq: {
11569 masm.callWithABI<FnBigIntNumber,
11570 jit::BigIntNumberEqual<EqualityKind::Equal>>();
11571 break;
11573 case JSOp::Ne: {
11574 masm.callWithABI<FnBigIntNumber,
11575 jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
11576 break;
11578 case JSOp::Lt: {
11579 masm.callWithABI<FnBigIntNumber,
11580 jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
11581 break;
11583 case JSOp::Gt: {
11584 masm.callWithABI<FnNumberBigInt,
11585 jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
11586 break;
11588 case JSOp::Le: {
11589 masm.callWithABI<
11590 FnNumberBigInt,
11591 jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
11592 break;
11594 case JSOp::Ge: {
11595 masm.callWithABI<
11596 FnBigIntNumber,
11597 jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
11598 break;
11600 default:
11601 MOZ_CRASH("unhandled op");
11604 masm.storeCallBoolResult(output);
11607 void CodeGenerator::visitCompareBigIntString(LCompareBigIntString* lir) {
11608 JSOp op = lir->mir()->jsop();
11609 Register left = ToRegister(lir->left());
11610 Register right = ToRegister(lir->right());
11612 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
11613 // - |left <= right| is implemented as |right >= left|.
11614 // - |left > right| is implemented as |right < left|.
11615 if (op == JSOp::Le || op == JSOp::Gt) {
11616 pushArg(left);
11617 pushArg(right);
11618 } else {
11619 pushArg(right);
11620 pushArg(left);
11623 using FnBigIntString =
11624 bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
11625 using FnStringBigInt =
11626 bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
11628 switch (op) {
11629 case JSOp::Eq: {
11630 constexpr auto Equal = EqualityKind::Equal;
11631 callVM<FnBigIntString, BigIntStringEqual<Equal>>(lir);
11632 break;
11634 case JSOp::Ne: {
11635 constexpr auto NotEqual = EqualityKind::NotEqual;
11636 callVM<FnBigIntString, BigIntStringEqual<NotEqual>>(lir);
11637 break;
11639 case JSOp::Lt: {
11640 constexpr auto LessThan = ComparisonKind::LessThan;
11641 callVM<FnBigIntString, BigIntStringCompare<LessThan>>(lir);
11642 break;
11644 case JSOp::Gt: {
11645 constexpr auto LessThan = ComparisonKind::LessThan;
11646 callVM<FnStringBigInt, StringBigIntCompare<LessThan>>(lir);
11647 break;
11649 case JSOp::Le: {
11650 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
11651 callVM<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>(lir);
11652 break;
11654 case JSOp::Ge: {
11655 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
11656 callVM<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>(lir);
11657 break;
11659 default:
11660 MOZ_CRASH("Unexpected compare op");
11664 void CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir) {
11665 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
11666 lir->mir()->compareType() == MCompare::Compare_Null);
11668 JSOp op = lir->mir()->jsop();
11669 MOZ_ASSERT(IsLooseEqualityOp(op));
11671 const ValueOperand value = ToValue(lir, LIsNullOrLikeUndefinedV::ValueIndex);
11672 Register output = ToRegister(lir->output());
11674 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
11675 if (!intact) {
11676 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
11677 addOutOfLineCode(ool, lir->mir());
11679 Label* nullOrLikeUndefined = ool->label1();
11680 Label* notNullOrLikeUndefined = ool->label2();
11683 ScratchTagScope tag(masm, value);
11684 masm.splitTagForTest(value, tag);
11686 masm.branchTestNull(Assembler::Equal, tag, nullOrLikeUndefined);
11687 masm.branchTestUndefined(Assembler::Equal, tag, nullOrLikeUndefined);
11689 // Check whether it's a truthy object or a falsy object that emulates
11690 // undefined.
11691 masm.branchTestObject(Assembler::NotEqual, tag, notNullOrLikeUndefined);
11694 Register objreg =
11695 masm.extractObject(value, ToTempUnboxRegister(lir->temp0()));
11696 branchTestObjectEmulatesUndefined(objreg, nullOrLikeUndefined,
11697 notNullOrLikeUndefined, output, ool);
11698 // fall through
11700 Label done;
11702 // It's not null or undefined, and if it's an object it doesn't
11703 // emulate undefined, so it's not like undefined.
11704 masm.move32(Imm32(op == JSOp::Ne), output);
11705 masm.jump(&done);
11707 masm.bind(nullOrLikeUndefined);
11708 masm.move32(Imm32(op == JSOp::Eq), output);
11710 // Both branches meet here.
11711 masm.bind(&done);
11712 } else {
11713 Label nullOrUndefined, notNullOrLikeUndefined;
11714 #if defined(DEBUG) || defined(FUZZING)
11715 Register objreg = Register::Invalid();
11716 #endif
11718 ScratchTagScope tag(masm, value);
11719 masm.splitTagForTest(value, tag);
11721 masm.branchTestNull(Assembler::Equal, tag, &nullOrUndefined);
11722 masm.branchTestUndefined(Assembler::Equal, tag, &nullOrUndefined);
11724 #if defined(DEBUG) || defined(FUZZING)
11725 // Check whether it's a truthy object or a falsy object that emulates
11726 // undefined.
11727 masm.branchTestObject(Assembler::NotEqual, tag, &notNullOrLikeUndefined);
11728 objreg = masm.extractObject(value, ToTempUnboxRegister(lir->temp0()));
11729 #endif
11732 #if defined(DEBUG) || defined(FUZZING)
11733 assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
11734 masm.bind(&notNullOrLikeUndefined);
11735 #endif
11737 Label done;
11739 // It's not null or undefined, and if it's an object it doesn't
11740 // emulate undefined.
11741 masm.move32(Imm32(op == JSOp::Ne), output);
11742 masm.jump(&done);
11744 masm.bind(&nullOrUndefined);
11745 masm.move32(Imm32(op == JSOp::Eq), output);
11747 // Both branches meet here.
11748 masm.bind(&done);
11752 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchV(
11753 LIsNullOrLikeUndefinedAndBranchV* lir) {
11754 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
11755 lir->cmpMir()->compareType() == MCompare::Compare_Null);
11757 JSOp op = lir->cmpMir()->jsop();
11758 MOZ_ASSERT(IsLooseEqualityOp(op));
11760 const ValueOperand value =
11761 ToValue(lir, LIsNullOrLikeUndefinedAndBranchV::Value);
11763 MBasicBlock* ifTrue = lir->ifTrue();
11764 MBasicBlock* ifFalse = lir->ifFalse();
11766 if (op == JSOp::Ne) {
11767 // Swap branches.
11768 std::swap(ifTrue, ifFalse);
11771 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
11773 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
11774 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
11777 ScratchTagScope tag(masm, value);
11778 masm.splitTagForTest(value, tag);
11780 masm.branchTestNull(Assembler::Equal, tag, ifTrueLabel);
11781 masm.branchTestUndefined(Assembler::Equal, tag, ifTrueLabel);
11783 masm.branchTestObject(Assembler::NotEqual, tag, ifFalseLabel);
11786 bool extractObject = !intact;
11787 #if defined(DEBUG) || defined(FUZZING)
11788 // always extract objreg if we're in debug and
11789 // assertObjectDoesNotEmulateUndefined;
11790 extractObject = true;
11791 #endif
11793 Register objreg = Register::Invalid();
11794 Register scratch = ToRegister(lir->temp());
11795 if (extractObject) {
11796 objreg = masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
11798 if (!intact) {
11799 // Objects that emulate undefined are loosely equal to null/undefined.
11800 OutOfLineTestObject* ool = new (alloc()) OutOfLineTestObject();
11801 addOutOfLineCode(ool, lir->cmpMir());
11802 testObjectEmulatesUndefined(objreg, ifTrueLabel, ifFalseLabel, scratch,
11803 ool);
11804 } else {
11805 assertObjectDoesNotEmulateUndefined(objreg, scratch, lir->cmpMir());
11806 // Bug 1874905. This would be nice to optimize out at the MIR level.
11807 masm.jump(ifFalseLabel);
11811 void CodeGenerator::visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT* lir) {
11812 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
11813 lir->mir()->compareType() == MCompare::Compare_Null);
11814 MOZ_ASSERT(lir->mir()->lhs()->type() == MIRType::Object);
11816 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
11817 JSOp op = lir->mir()->jsop();
11818 Register output = ToRegister(lir->output());
11819 Register objreg = ToRegister(lir->input());
11820 if (!intact) {
11821 MOZ_ASSERT(IsLooseEqualityOp(op),
11822 "Strict equality should have been folded");
11824 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
11825 addOutOfLineCode(ool, lir->mir());
11827 Label* emulatesUndefined = ool->label1();
11828 Label* doesntEmulateUndefined = ool->label2();
11830 branchTestObjectEmulatesUndefined(objreg, emulatesUndefined,
11831 doesntEmulateUndefined, output, ool);
11833 Label done;
11835 masm.move32(Imm32(op == JSOp::Ne), output);
11836 masm.jump(&done);
11838 masm.bind(emulatesUndefined);
11839 masm.move32(Imm32(op == JSOp::Eq), output);
11840 masm.bind(&done);
11841 } else {
11842 assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
11843 masm.move32(Imm32(op == JSOp::Ne), output);
11847 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchT(
11848 LIsNullOrLikeUndefinedAndBranchT* lir) {
11849 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
11850 lir->cmpMir()->compareType() == MCompare::Compare_Null);
11851 MOZ_ASSERT(lir->cmpMir()->lhs()->type() == MIRType::Object);
11853 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
11855 JSOp op = lir->cmpMir()->jsop();
11856 MOZ_ASSERT(IsLooseEqualityOp(op), "Strict equality should have been folded");
11858 MBasicBlock* ifTrue = lir->ifTrue();
11859 MBasicBlock* ifFalse = lir->ifFalse();
11861 if (op == JSOp::Ne) {
11862 // Swap branches.
11863 std::swap(ifTrue, ifFalse);
11866 Register input = ToRegister(lir->getOperand(0));
11867 Register scratch = ToRegister(lir->temp());
11868 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
11869 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
11871 if (intact) {
11872 // Bug 1874905. Ideally branches like this would be optimized out.
11873 assertObjectDoesNotEmulateUndefined(input, scratch, lir->mir());
11874 masm.jump(ifFalseLabel);
11875 } else {
11876 auto* ool = new (alloc()) OutOfLineTestObject();
11877 addOutOfLineCode(ool, lir->cmpMir());
11879 // Objects that emulate undefined are loosely equal to null/undefined.
11880 testObjectEmulatesUndefined(input, ifTrueLabel, ifFalseLabel, scratch, ool);
11884 void CodeGenerator::visitIsNull(LIsNull* lir) {
11885 MCompare::CompareType compareType = lir->mir()->compareType();
11886 MOZ_ASSERT(compareType == MCompare::Compare_Null);
11888 JSOp op = lir->mir()->jsop();
11889 MOZ_ASSERT(IsStrictEqualityOp(op));
11891 const ValueOperand value = ToValue(lir, LIsNull::ValueIndex);
11892 Register output = ToRegister(lir->output());
11894 Assembler::Condition cond = JSOpToCondition(compareType, op);
11895 masm.testNullSet(cond, value, output);
11898 void CodeGenerator::visitIsUndefined(LIsUndefined* lir) {
11899 MCompare::CompareType compareType = lir->mir()->compareType();
11900 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
11902 JSOp op = lir->mir()->jsop();
11903 MOZ_ASSERT(IsStrictEqualityOp(op));
11905 const ValueOperand value = ToValue(lir, LIsUndefined::ValueIndex);
11906 Register output = ToRegister(lir->output());
11908 Assembler::Condition cond = JSOpToCondition(compareType, op);
11909 masm.testUndefinedSet(cond, value, output);
11912 void CodeGenerator::visitIsNullAndBranch(LIsNullAndBranch* lir) {
11913 MCompare::CompareType compareType = lir->cmpMir()->compareType();
11914 MOZ_ASSERT(compareType == MCompare::Compare_Null);
11916 JSOp op = lir->cmpMir()->jsop();
11917 MOZ_ASSERT(IsStrictEqualityOp(op));
11919 const ValueOperand value = ToValue(lir, LIsNullAndBranch::Value);
11921 Assembler::Condition cond = JSOpToCondition(compareType, op);
11922 testNullEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
11925 void CodeGenerator::visitIsUndefinedAndBranch(LIsUndefinedAndBranch* lir) {
11926 MCompare::CompareType compareType = lir->cmpMir()->compareType();
11927 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
11929 JSOp op = lir->cmpMir()->jsop();
11930 MOZ_ASSERT(IsStrictEqualityOp(op));
11932 const ValueOperand value = ToValue(lir, LIsUndefinedAndBranch::Value);
11934 Assembler::Condition cond = JSOpToCondition(compareType, op);
11935 testUndefinedEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
11938 void CodeGenerator::visitSameValueDouble(LSameValueDouble* lir) {
11939 FloatRegister left = ToFloatRegister(lir->left());
11940 FloatRegister right = ToFloatRegister(lir->right());
11941 FloatRegister temp = ToFloatRegister(lir->temp0());
11942 Register output = ToRegister(lir->output());
11944 masm.sameValueDouble(left, right, temp, output);
11947 void CodeGenerator::visitSameValue(LSameValue* lir) {
11948 ValueOperand lhs = ToValue(lir, LSameValue::LhsIndex);
11949 ValueOperand rhs = ToValue(lir, LSameValue::RhsIndex);
11950 Register output = ToRegister(lir->output());
11952 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
11953 OutOfLineCode* ool =
11954 oolCallVM<Fn, SameValue>(lir, ArgList(lhs, rhs), StoreRegisterTo(output));
11956 // First check to see if the values have identical bits.
11957 // This is correct for SameValue because SameValue(NaN,NaN) is true,
11958 // and SameValue(0,-0) is false.
11959 masm.branch64(Assembler::NotEqual, lhs.toRegister64(), rhs.toRegister64(),
11960 ool->entry());
11961 masm.move32(Imm32(1), output);
11963 // If this fails, call SameValue.
11964 masm.bind(ool->rejoin());
11967 void CodeGenerator::emitConcat(LInstruction* lir, Register lhs, Register rhs,
11968 Register output) {
11969 using Fn =
11970 JSString* (*)(JSContext*, HandleString, HandleString, js::gc::Heap);
11971 OutOfLineCode* ool = oolCallVM<Fn, ConcatStrings<CanGC>>(
11972 lir, ArgList(lhs, rhs, static_cast<Imm32>(int32_t(gc::Heap::Default))),
11973 StoreRegisterTo(output));
11975 const JitZone* jitZone = gen->realm->zone()->jitZone();
11976 JitCode* stringConcatStub =
11977 jitZone->stringConcatStubNoBarrier(&zoneStubsToReadBarrier_);
11978 masm.call(stringConcatStub);
11979 masm.branchTestPtr(Assembler::Zero, output, output, ool->entry());
11981 masm.bind(ool->rejoin());
11984 void CodeGenerator::visitConcat(LConcat* lir) {
11985 Register lhs = ToRegister(lir->lhs());
11986 Register rhs = ToRegister(lir->rhs());
11988 Register output = ToRegister(lir->output());
11990 MOZ_ASSERT(lhs == CallTempReg0);
11991 MOZ_ASSERT(rhs == CallTempReg1);
11992 MOZ_ASSERT(ToRegister(lir->temp0()) == CallTempReg0);
11993 MOZ_ASSERT(ToRegister(lir->temp1()) == CallTempReg1);
11994 MOZ_ASSERT(ToRegister(lir->temp2()) == CallTempReg2);
11995 MOZ_ASSERT(ToRegister(lir->temp3()) == CallTempReg3);
11996 MOZ_ASSERT(ToRegister(lir->temp4()) == CallTempReg4);
11997 MOZ_ASSERT(output == CallTempReg5);
11999 emitConcat(lir, lhs, rhs, output);
12002 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
12003 Register len, Register byteOpScratch,
12004 CharEncoding fromEncoding, CharEncoding toEncoding,
12005 size_t maximumLength = SIZE_MAX) {
12006 // Copy |len| char16_t code units from |from| to |to|. Assumes len > 0
12007 // (checked below in debug builds), and when done |to| must point to the
12008 // next available char.
12010 #ifdef DEBUG
12011 Label ok;
12012 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
12013 masm.assumeUnreachable("Length should be greater than 0.");
12014 masm.bind(&ok);
12016 if (maximumLength != SIZE_MAX) {
12017 MOZ_ASSERT(maximumLength <= INT32_MAX, "maximum length fits into int32");
12019 Label ok;
12020 masm.branchPtr(Assembler::BelowOrEqual, len, Imm32(maximumLength), &ok);
12021 masm.assumeUnreachable("Length should not exceed maximum length.");
12022 masm.bind(&ok);
12024 #endif
12026 MOZ_ASSERT_IF(toEncoding == CharEncoding::Latin1,
12027 fromEncoding == CharEncoding::Latin1);
12029 size_t fromWidth =
12030 fromEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
12031 size_t toWidth =
12032 toEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
12034 // Try to copy multiple characters at once when both encoding are equal.
12035 if (fromEncoding == toEncoding) {
12036 constexpr size_t ptrWidth = sizeof(uintptr_t);
12038 // Copy |width| bytes and then adjust |from| and |to|.
12039 auto copyCharacters = [&](size_t width) {
12040 static_assert(ptrWidth <= 8, "switch handles only up to eight bytes");
12042 switch (width) {
12043 case 1:
12044 masm.load8ZeroExtend(Address(from, 0), byteOpScratch);
12045 masm.store8(byteOpScratch, Address(to, 0));
12046 break;
12047 case 2:
12048 masm.load16ZeroExtend(Address(from, 0), byteOpScratch);
12049 masm.store16(byteOpScratch, Address(to, 0));
12050 break;
12051 case 4:
12052 masm.load32(Address(from, 0), byteOpScratch);
12053 masm.store32(byteOpScratch, Address(to, 0));
12054 break;
12055 case 8:
12056 MOZ_ASSERT(width == ptrWidth);
12057 masm.loadPtr(Address(from, 0), byteOpScratch);
12058 masm.storePtr(byteOpScratch, Address(to, 0));
12059 break;
12062 masm.addPtr(Imm32(width), from);
12063 masm.addPtr(Imm32(width), to);
12066 // First align |len| to pointer width.
12067 Label done;
12068 for (size_t width = fromWidth; width < ptrWidth; width *= 2) {
12069 // Number of characters which fit into |width| bytes.
12070 size_t charsPerWidth = width / fromWidth;
12072 if (charsPerWidth < maximumLength) {
12073 Label next;
12074 masm.branchTest32(Assembler::Zero, len, Imm32(charsPerWidth), &next);
12076 copyCharacters(width);
12078 masm.branchSub32(Assembler::Zero, Imm32(charsPerWidth), len, &done);
12079 masm.bind(&next);
12080 } else if (charsPerWidth == maximumLength) {
12081 copyCharacters(width);
12082 masm.sub32(Imm32(charsPerWidth), len);
12086 size_t maxInlineLength;
12087 if (fromEncoding == CharEncoding::Latin1) {
12088 maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
12089 } else {
12090 maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
12093 // Number of characters which fit into a single register.
12094 size_t charsPerPtr = ptrWidth / fromWidth;
12096 // Unroll small loops.
12097 constexpr size_t unrollLoopLimit = 3;
12098 size_t loopCount = std::min(maxInlineLength, maximumLength) / charsPerPtr;
12100 #ifdef JS_64BIT
12101 static constexpr size_t latin1MaxInlineByteLength =
12102 JSFatInlineString::MAX_LENGTH_LATIN1 * sizeof(char);
12103 static constexpr size_t twoByteMaxInlineByteLength =
12104 JSFatInlineString::MAX_LENGTH_TWO_BYTE * sizeof(char16_t);
12106 // |unrollLoopLimit| should be large enough to allow loop unrolling on
12107 // 64-bit targets.
12108 static_assert(latin1MaxInlineByteLength / ptrWidth == unrollLoopLimit,
12109 "Latin-1 loops are unrolled on 64-bit");
12110 static_assert(twoByteMaxInlineByteLength / ptrWidth == unrollLoopLimit,
12111 "Two-byte loops are unrolled on 64-bit");
12112 #endif
12114 if (loopCount <= unrollLoopLimit) {
12115 Label labels[unrollLoopLimit];
12117 // Check up front how many characters can be copied.
12118 for (size_t i = 1; i < loopCount; i++) {
12119 masm.branch32(Assembler::Below, len, Imm32((i + 1) * charsPerPtr),
12120 &labels[i]);
12123 // Generate the unrolled loop body.
12124 for (size_t i = loopCount; i > 0; i--) {
12125 copyCharacters(ptrWidth);
12126 masm.sub32(Imm32(charsPerPtr), len);
12128 // Jump target for the previous length check.
12129 if (i != 1) {
12130 masm.bind(&labels[i - 1]);
12133 } else {
12134 Label start;
12135 masm.bind(&start);
12136 copyCharacters(ptrWidth);
12137 masm.branchSub32(Assembler::NonZero, Imm32(charsPerPtr), len, &start);
12140 masm.bind(&done);
12141 } else {
12142 Label start;
12143 masm.bind(&start);
12144 masm.loadChar(Address(from, 0), byteOpScratch, fromEncoding);
12145 masm.storeChar(byteOpScratch, Address(to, 0), toEncoding);
12146 masm.addPtr(Imm32(fromWidth), from);
12147 masm.addPtr(Imm32(toWidth), to);
12148 masm.branchSub32(Assembler::NonZero, Imm32(1), len, &start);
12152 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
12153 Register len, Register byteOpScratch,
12154 CharEncoding encoding, size_t maximumLength) {
12155 CopyStringChars(masm, to, from, len, byteOpScratch, encoding, encoding,
12156 maximumLength);
12159 static void CopyStringCharsMaybeInflate(MacroAssembler& masm, Register input,
12160 Register destChars, Register temp1,
12161 Register temp2) {
12162 // destChars is TwoByte and input is a Latin1 or TwoByte string, so we may
12163 // have to inflate.
12165 Label isLatin1, done;
12166 masm.loadStringLength(input, temp1);
12167 masm.branchLatin1String(input, &isLatin1);
12169 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
12170 masm.movePtr(temp2, input);
12171 CopyStringChars(masm, destChars, input, temp1, temp2,
12172 CharEncoding::TwoByte);
12173 masm.jump(&done);
12175 masm.bind(&isLatin1);
12177 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
12178 masm.movePtr(temp2, input);
12179 CopyStringChars(masm, destChars, input, temp1, temp2, CharEncoding::Latin1,
12180 CharEncoding::TwoByte);
12182 masm.bind(&done);
12185 static void AllocateThinOrFatInlineString(MacroAssembler& masm, Register output,
12186 Register length, Register temp,
12187 gc::Heap initialStringHeap,
12188 Label* failure,
12189 CharEncoding encoding) {
12190 #ifdef DEBUG
12191 size_t maxInlineLength;
12192 if (encoding == CharEncoding::Latin1) {
12193 maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
12194 } else {
12195 maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
12198 Label ok;
12199 masm.branch32(Assembler::BelowOrEqual, length, Imm32(maxInlineLength), &ok);
12200 masm.assumeUnreachable("string length too large to be allocated as inline");
12201 masm.bind(&ok);
12202 #endif
12204 size_t maxThinInlineLength;
12205 if (encoding == CharEncoding::Latin1) {
12206 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_LATIN1;
12207 } else {
12208 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_TWO_BYTE;
12211 Label isFat, allocDone;
12212 masm.branch32(Assembler::Above, length, Imm32(maxThinInlineLength), &isFat);
12214 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
12215 if (encoding == CharEncoding::Latin1) {
12216 flags |= JSString::LATIN1_CHARS_BIT;
12218 masm.newGCString(output, temp, initialStringHeap, failure);
12219 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12220 masm.jump(&allocDone);
12222 masm.bind(&isFat);
12224 uint32_t flags = JSString::INIT_FAT_INLINE_FLAGS;
12225 if (encoding == CharEncoding::Latin1) {
12226 flags |= JSString::LATIN1_CHARS_BIT;
12228 masm.newGCFatInlineString(output, temp, initialStringHeap, failure);
12229 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12231 masm.bind(&allocDone);
12233 // Store length.
12234 masm.store32(length, Address(output, JSString::offsetOfLength()));
12237 static void ConcatInlineString(MacroAssembler& masm, Register lhs, Register rhs,
12238 Register output, Register temp1, Register temp2,
12239 Register temp3, gc::Heap initialStringHeap,
12240 Label* failure, CharEncoding encoding) {
12241 JitSpew(JitSpew_Codegen, "# Emitting ConcatInlineString (encoding=%s)",
12242 (encoding == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
12244 // State: result length in temp2.
12246 // Ensure both strings are linear.
12247 masm.branchIfRope(lhs, failure);
12248 masm.branchIfRope(rhs, failure);
12250 // Allocate a JSThinInlineString or JSFatInlineString.
12251 AllocateThinOrFatInlineString(masm, output, temp2, temp1, initialStringHeap,
12252 failure, encoding);
12254 // Load chars pointer in temp2.
12255 masm.loadInlineStringCharsForStore(output, temp2);
12257 auto copyChars = [&](Register src) {
12258 if (encoding == CharEncoding::TwoByte) {
12259 CopyStringCharsMaybeInflate(masm, src, temp2, temp1, temp3);
12260 } else {
12261 masm.loadStringLength(src, temp3);
12262 masm.loadStringChars(src, temp1, CharEncoding::Latin1);
12263 masm.movePtr(temp1, src);
12264 CopyStringChars(masm, temp2, src, temp3, temp1, CharEncoding::Latin1);
12268 // Copy lhs chars. Note that this advances temp2 to point to the next
12269 // char. This also clobbers the lhs register.
12270 copyChars(lhs);
12272 // Copy rhs chars. Clobbers the rhs register.
12273 copyChars(rhs);
12276 void CodeGenerator::visitSubstr(LSubstr* lir) {
12277 Register string = ToRegister(lir->string());
12278 Register begin = ToRegister(lir->begin());
12279 Register length = ToRegister(lir->length());
12280 Register output = ToRegister(lir->output());
12281 Register temp0 = ToRegister(lir->temp0());
12282 Register temp2 = ToRegister(lir->temp2());
12284 // On x86 there are not enough registers. In that case reuse the string
12285 // register as temporary.
12286 Register temp1 =
12287 lir->temp1()->isBogusTemp() ? string : ToRegister(lir->temp1());
12289 size_t maximumLength = SIZE_MAX;
12291 Range* range = lir->mir()->length()->range();
12292 if (range && range->hasInt32UpperBound()) {
12293 MOZ_ASSERT(range->upper() >= 0);
12294 maximumLength = size_t(range->upper());
12297 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <=
12298 JSThinInlineString::MAX_LENGTH_LATIN1);
12300 static_assert(JSFatInlineString::MAX_LENGTH_TWO_BYTE <=
12301 JSFatInlineString::MAX_LENGTH_LATIN1);
12303 bool tryFatInlineOrDependent =
12304 maximumLength > JSThinInlineString::MAX_LENGTH_TWO_BYTE;
12305 bool tryDependent = maximumLength > JSFatInlineString::MAX_LENGTH_TWO_BYTE;
12307 #ifdef DEBUG
12308 if (maximumLength != SIZE_MAX) {
12309 Label ok;
12310 masm.branch32(Assembler::BelowOrEqual, length, Imm32(maximumLength), &ok);
12311 masm.assumeUnreachable("length should not exceed maximum length");
12312 masm.bind(&ok);
12314 #endif
12316 Label nonZero, nonInput;
12318 // For every edge case use the C++ variant.
12319 // Note: we also use this upon allocation failure in newGCString and
12320 // newGCFatInlineString. To squeeze out even more performance those failures
12321 // can be handled by allocate in ool code and returning to jit code to fill
12322 // in all data.
12323 using Fn = JSString* (*)(JSContext* cx, HandleString str, int32_t begin,
12324 int32_t len);
12325 OutOfLineCode* ool = oolCallVM<Fn, SubstringKernel>(
12326 lir, ArgList(string, begin, length), StoreRegisterTo(output));
12327 Label* slowPath = ool->entry();
12328 Label* done = ool->rejoin();
12330 // Zero length, return emptystring.
12331 masm.branchTest32(Assembler::NonZero, length, length, &nonZero);
12332 const JSAtomState& names = gen->runtime->names();
12333 masm.movePtr(ImmGCPtr(names.empty_), output);
12334 masm.jump(done);
12336 // Substring from 0..|str.length|, return str.
12337 masm.bind(&nonZero);
12338 masm.branch32(Assembler::NotEqual,
12339 Address(string, JSString::offsetOfLength()), length, &nonInput);
12340 #ifdef DEBUG
12342 Label ok;
12343 masm.branchTest32(Assembler::Zero, begin, begin, &ok);
12344 masm.assumeUnreachable("length == str.length implies begin == 0");
12345 masm.bind(&ok);
12347 #endif
12348 masm.movePtr(string, output);
12349 masm.jump(done);
12351 // Use slow path for ropes.
12352 masm.bind(&nonInput);
12353 masm.branchIfRope(string, slowPath);
12355 // Optimize one and two character strings.
12356 Label nonStatic;
12357 masm.branch32(Assembler::Above, length, Imm32(2), &nonStatic);
12359 Label loadLengthOne, loadLengthTwo;
12361 auto loadChars = [&](CharEncoding encoding, bool fallthru) {
12362 size_t size = encoding == CharEncoding::Latin1 ? sizeof(JS::Latin1Char)
12363 : sizeof(char16_t);
12365 masm.loadStringChars(string, temp0, encoding);
12366 masm.loadChar(temp0, begin, temp2, encoding);
12367 masm.branch32(Assembler::Equal, length, Imm32(1), &loadLengthOne);
12368 masm.loadChar(temp0, begin, temp0, encoding, int32_t(size));
12369 if (!fallthru) {
12370 masm.jump(&loadLengthTwo);
12374 Label isLatin1;
12375 masm.branchLatin1String(string, &isLatin1);
12376 loadChars(CharEncoding::TwoByte, /* fallthru = */ false);
12378 masm.bind(&isLatin1);
12379 loadChars(CharEncoding::Latin1, /* fallthru = */ true);
12381 // Try to load a length-two static string.
12382 masm.bind(&loadLengthTwo);
12383 masm.lookupStaticString(temp2, temp0, output, gen->runtime->staticStrings(),
12384 &nonStatic);
12385 masm.jump(done);
12387 // Try to load a length-one static string.
12388 masm.bind(&loadLengthOne);
12389 masm.lookupStaticString(temp2, output, gen->runtime->staticStrings(),
12390 &nonStatic);
12391 masm.jump(done);
12393 masm.bind(&nonStatic);
12395 // Allocate either a JSThinInlineString or JSFatInlineString, or jump to
12396 // notInline if we need a dependent string.
12397 Label notInline;
12399 static_assert(JSThinInlineString::MAX_LENGTH_LATIN1 <
12400 JSFatInlineString::MAX_LENGTH_LATIN1);
12401 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <
12402 JSFatInlineString::MAX_LENGTH_TWO_BYTE);
12404 // Use temp2 to store the JS(Thin|Fat)InlineString flags. This avoids having
12405 // duplicate newGCString/newGCFatInlineString codegen for Latin1 vs TwoByte
12406 // strings.
12408 Label allocFat, allocDone;
12409 if (tryFatInlineOrDependent) {
12410 Label isLatin1, allocThin;
12411 masm.branchLatin1String(string, &isLatin1);
12413 if (tryDependent) {
12414 masm.branch32(Assembler::Above, length,
12415 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
12416 &notInline);
12418 masm.move32(Imm32(0), temp2);
12419 masm.branch32(Assembler::Above, length,
12420 Imm32(JSThinInlineString::MAX_LENGTH_TWO_BYTE),
12421 &allocFat);
12422 masm.jump(&allocThin);
12425 masm.bind(&isLatin1);
12427 if (tryDependent) {
12428 masm.branch32(Assembler::Above, length,
12429 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1),
12430 &notInline);
12432 masm.move32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
12433 masm.branch32(Assembler::Above, length,
12434 Imm32(JSThinInlineString::MAX_LENGTH_LATIN1), &allocFat);
12437 masm.bind(&allocThin);
12438 } else {
12439 masm.load32(Address(string, JSString::offsetOfFlags()), temp2);
12440 masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
12444 masm.newGCString(output, temp0, initialStringHeap(), slowPath);
12445 masm.or32(Imm32(JSString::INIT_THIN_INLINE_FLAGS), temp2);
12448 if (tryFatInlineOrDependent) {
12449 masm.jump(&allocDone);
12451 masm.bind(&allocFat);
12453 masm.newGCFatInlineString(output, temp0, initialStringHeap(), slowPath);
12454 masm.or32(Imm32(JSString::INIT_FAT_INLINE_FLAGS), temp2);
12457 masm.bind(&allocDone);
12460 masm.store32(temp2, Address(output, JSString::offsetOfFlags()));
12461 masm.store32(length, Address(output, JSString::offsetOfLength()));
12463 auto initializeInlineString = [&](CharEncoding encoding) {
12464 masm.loadStringChars(string, temp0, encoding);
12465 masm.addToCharPtr(temp0, begin, encoding);
12466 if (temp1 == string) {
12467 masm.push(string);
12469 masm.loadInlineStringCharsForStore(output, temp1);
12470 CopyStringChars(masm, temp1, temp0, length, temp2, encoding,
12471 maximumLength);
12472 masm.loadStringLength(output, length);
12473 if (temp1 == string) {
12474 masm.pop(string);
12478 Label isInlineLatin1;
12479 masm.branchTest32(Assembler::NonZero, temp2,
12480 Imm32(JSString::LATIN1_CHARS_BIT), &isInlineLatin1);
12481 initializeInlineString(CharEncoding::TwoByte);
12482 masm.jump(done);
12484 masm.bind(&isInlineLatin1);
12485 initializeInlineString(CharEncoding::Latin1);
12488 // Handle other cases with a DependentString.
12489 if (tryDependent) {
12490 masm.jump(done);
12492 masm.bind(&notInline);
12493 masm.newGCString(output, temp0, gen->initialStringHeap(), slowPath);
12494 masm.store32(length, Address(output, JSString::offsetOfLength()));
12495 masm.storeDependentStringBase(string, output);
12497 auto initializeDependentString = [&](CharEncoding encoding) {
12498 uint32_t flags = JSString::INIT_DEPENDENT_FLAGS;
12499 if (encoding == CharEncoding::Latin1) {
12500 flags |= JSString::LATIN1_CHARS_BIT;
12503 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12504 masm.loadNonInlineStringChars(string, temp0, encoding);
12505 masm.addToCharPtr(temp0, begin, encoding);
12506 masm.storeNonInlineStringChars(temp0, output);
12509 Label isLatin1;
12510 masm.branchLatin1String(string, &isLatin1);
12511 initializeDependentString(CharEncoding::TwoByte);
12512 masm.jump(done);
12514 masm.bind(&isLatin1);
12515 initializeDependentString(CharEncoding::Latin1);
12518 masm.bind(done);
12521 JitCode* JitZone::generateStringConcatStub(JSContext* cx) {
12522 JitSpew(JitSpew_Codegen, "# Emitting StringConcat stub");
12524 TempAllocator temp(&cx->tempLifoAlloc());
12525 JitContext jcx(cx);
12526 StackMacroAssembler masm(cx, temp);
12527 AutoCreatedBy acb(masm, "JitZone::generateStringConcatStub");
12529 Register lhs = CallTempReg0;
12530 Register rhs = CallTempReg1;
12531 Register temp1 = CallTempReg2;
12532 Register temp2 = CallTempReg3;
12533 Register temp3 = CallTempReg4;
12534 Register output = CallTempReg5;
12536 Label failure;
12537 #ifdef JS_USE_LINK_REGISTER
12538 masm.pushReturnAddress();
12539 #endif
12540 masm.Push(FramePointer);
12541 masm.moveStackPtrTo(FramePointer);
12543 // If lhs is empty, return rhs.
12544 Label leftEmpty;
12545 masm.loadStringLength(lhs, temp1);
12546 masm.branchTest32(Assembler::Zero, temp1, temp1, &leftEmpty);
12548 // If rhs is empty, return lhs.
12549 Label rightEmpty;
12550 masm.loadStringLength(rhs, temp2);
12551 masm.branchTest32(Assembler::Zero, temp2, temp2, &rightEmpty);
12553 masm.add32(temp1, temp2);
12555 // Check if we can use a JSInlineString. The result is a Latin1 string if
12556 // lhs and rhs are both Latin1, so we AND the flags.
12557 Label isInlineTwoByte, isInlineLatin1;
12558 masm.load32(Address(lhs, JSString::offsetOfFlags()), temp1);
12559 masm.and32(Address(rhs, JSString::offsetOfFlags()), temp1);
12561 Label isLatin1, notInline;
12562 masm.branchTest32(Assembler::NonZero, temp1,
12563 Imm32(JSString::LATIN1_CHARS_BIT), &isLatin1);
12565 masm.branch32(Assembler::BelowOrEqual, temp2,
12566 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
12567 &isInlineTwoByte);
12568 masm.jump(&notInline);
12570 masm.bind(&isLatin1);
12572 masm.branch32(Assembler::BelowOrEqual, temp2,
12573 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), &isInlineLatin1);
12575 masm.bind(&notInline);
12577 // Keep AND'ed flags in temp1.
12579 // Ensure result length <= JSString::MAX_LENGTH.
12580 masm.branch32(Assembler::Above, temp2, Imm32(JSString::MAX_LENGTH), &failure);
12582 // Allocate a new rope, guaranteed to be in the nursery if initialStringHeap
12583 // == gc::Heap::Default. (As a result, no post barriers are needed below.)
12584 masm.newGCString(output, temp3, initialStringHeap, &failure);
12586 // Store rope length and flags. temp1 still holds the result of AND'ing the
12587 // lhs and rhs flags, so we just have to clear the other flags to get our rope
12588 // flags (Latin1 if both lhs and rhs are Latin1).
12589 static_assert(JSString::INIT_ROPE_FLAGS == 0,
12590 "Rope type flags must have no bits set");
12591 masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp1);
12592 masm.store32(temp1, Address(output, JSString::offsetOfFlags()));
12593 masm.store32(temp2, Address(output, JSString::offsetOfLength()));
12595 // Store left and right nodes.
12596 masm.storeRopeChildren(lhs, rhs, output);
12597 masm.pop(FramePointer);
12598 masm.ret();
12600 masm.bind(&leftEmpty);
12601 masm.mov(rhs, output);
12602 masm.pop(FramePointer);
12603 masm.ret();
12605 masm.bind(&rightEmpty);
12606 masm.mov(lhs, output);
12607 masm.pop(FramePointer);
12608 masm.ret();
12610 masm.bind(&isInlineTwoByte);
12611 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
12612 initialStringHeap, &failure, CharEncoding::TwoByte);
12613 masm.pop(FramePointer);
12614 masm.ret();
12616 masm.bind(&isInlineLatin1);
12617 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
12618 initialStringHeap, &failure, CharEncoding::Latin1);
12619 masm.pop(FramePointer);
12620 masm.ret();
12622 masm.bind(&failure);
12623 masm.movePtr(ImmPtr(nullptr), output);
12624 masm.pop(FramePointer);
12625 masm.ret();
12627 Linker linker(masm);
12628 JitCode* code = linker.newCode(cx, CodeKind::Other);
12630 CollectPerfSpewerJitCodeProfile(code, "StringConcatStub");
12631 #ifdef MOZ_VTUNE
12632 vtune::MarkStub(code, "StringConcatStub");
12633 #endif
12635 return code;
12638 void JitRuntime::generateFreeStub(MacroAssembler& masm) {
12639 AutoCreatedBy acb(masm, "JitRuntime::generateFreeStub");
12641 const Register regSlots = CallTempReg0;
12643 freeStubOffset_ = startTrampolineCode(masm);
12645 #ifdef JS_USE_LINK_REGISTER
12646 masm.pushReturnAddress();
12647 #endif
12648 AllocatableRegisterSet regs(RegisterSet::Volatile());
12649 regs.takeUnchecked(regSlots);
12650 LiveRegisterSet save(regs.asLiveSet());
12651 masm.PushRegsInMask(save);
12653 const Register regTemp = regs.takeAnyGeneral();
12654 MOZ_ASSERT(regTemp != regSlots);
12656 using Fn = void (*)(void* p);
12657 masm.setupUnalignedABICall(regTemp);
12658 masm.passABIArg(regSlots);
12659 masm.callWithABI<Fn, js_free>(ABIType::General,
12660 CheckUnsafeCallWithABI::DontCheckOther);
12662 masm.PopRegsInMask(save);
12664 masm.ret();
12667 void JitRuntime::generateLazyLinkStub(MacroAssembler& masm) {
12668 AutoCreatedBy acb(masm, "JitRuntime::generateLazyLinkStub");
12670 lazyLinkStubOffset_ = startTrampolineCode(masm);
12672 #ifdef JS_USE_LINK_REGISTER
12673 masm.pushReturnAddress();
12674 #endif
12675 masm.Push(FramePointer);
12676 masm.moveStackPtrTo(FramePointer);
12678 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
12679 Register temp0 = regs.takeAny();
12680 Register temp1 = regs.takeAny();
12681 Register temp2 = regs.takeAny();
12683 masm.loadJSContext(temp0);
12684 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::LazyLink);
12685 masm.moveStackPtrTo(temp1);
12687 using Fn = uint8_t* (*)(JSContext* cx, LazyLinkExitFrameLayout* frame);
12688 masm.setupUnalignedABICall(temp2);
12689 masm.passABIArg(temp0);
12690 masm.passABIArg(temp1);
12691 masm.callWithABI<Fn, LazyLinkTopActivation>(
12692 ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
12694 // Discard exit frame and restore frame pointer.
12695 masm.leaveExitFrame(0);
12696 masm.pop(FramePointer);
12698 #ifdef JS_USE_LINK_REGISTER
12699 // Restore the return address such that the emitPrologue function of the
12700 // CodeGenerator can push it back on the stack with pushReturnAddress.
12701 masm.popReturnAddress();
12702 #endif
12703 masm.jump(ReturnReg);
12706 void JitRuntime::generateInterpreterStub(MacroAssembler& masm) {
12707 AutoCreatedBy acb(masm, "JitRuntime::generateInterpreterStub");
12709 interpreterStubOffset_ = startTrampolineCode(masm);
12711 #ifdef JS_USE_LINK_REGISTER
12712 masm.pushReturnAddress();
12713 #endif
12714 masm.Push(FramePointer);
12715 masm.moveStackPtrTo(FramePointer);
12717 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
12718 Register temp0 = regs.takeAny();
12719 Register temp1 = regs.takeAny();
12720 Register temp2 = regs.takeAny();
12722 masm.loadJSContext(temp0);
12723 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::InterpreterStub);
12724 masm.moveStackPtrTo(temp1);
12726 using Fn = bool (*)(JSContext* cx, InterpreterStubExitFrameLayout* frame);
12727 masm.setupUnalignedABICall(temp2);
12728 masm.passABIArg(temp0);
12729 masm.passABIArg(temp1);
12730 masm.callWithABI<Fn, InvokeFromInterpreterStub>(
12731 ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
12733 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
12735 // Discard exit frame and restore frame pointer.
12736 masm.leaveExitFrame(0);
12737 masm.pop(FramePointer);
12739 // InvokeFromInterpreterStub stores the return value in argv[0], where the
12740 // caller stored |this|. Subtract |sizeof(void*)| for the frame pointer we
12741 // just popped.
12742 masm.loadValue(Address(masm.getStackPointer(),
12743 JitFrameLayout::offsetOfThis() - sizeof(void*)),
12744 JSReturnOperand);
12745 masm.ret();
12748 void JitRuntime::generateDoubleToInt32ValueStub(MacroAssembler& masm) {
12749 AutoCreatedBy acb(masm, "JitRuntime::generateDoubleToInt32ValueStub");
12750 doubleToInt32ValueStubOffset_ = startTrampolineCode(masm);
12752 Label done;
12753 masm.branchTestDouble(Assembler::NotEqual, R0, &done);
12755 masm.unboxDouble(R0, FloatReg0);
12756 masm.convertDoubleToInt32(FloatReg0, R1.scratchReg(), &done,
12757 /* negativeZeroCheck = */ false);
12758 masm.tagValue(JSVAL_TYPE_INT32, R1.scratchReg(), R0);
12760 masm.bind(&done);
12761 masm.abiret();
12764 void CodeGenerator::visitLinearizeString(LLinearizeString* lir) {
12765 Register str = ToRegister(lir->str());
12766 Register output = ToRegister(lir->output());
12768 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12769 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12770 lir, ArgList(str), StoreRegisterTo(output));
12772 masm.branchIfRope(str, ool->entry());
12774 masm.movePtr(str, output);
12775 masm.bind(ool->rejoin());
12778 void CodeGenerator::visitLinearizeForCharAccess(LLinearizeForCharAccess* lir) {
12779 Register str = ToRegister(lir->str());
12780 Register index = ToRegister(lir->index());
12781 Register output = ToRegister(lir->output());
12783 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12784 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12785 lir, ArgList(str), StoreRegisterTo(output));
12787 masm.branchIfNotCanLoadStringChar(str, index, output, ool->entry());
12789 masm.movePtr(str, output);
12790 masm.bind(ool->rejoin());
12793 void CodeGenerator::visitLinearizeForCodePointAccess(
12794 LLinearizeForCodePointAccess* lir) {
12795 Register str = ToRegister(lir->str());
12796 Register index = ToRegister(lir->index());
12797 Register output = ToRegister(lir->output());
12798 Register temp = ToRegister(lir->temp0());
12800 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12801 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12802 lir, ArgList(str), StoreRegisterTo(output));
12804 masm.branchIfNotCanLoadStringCodePoint(str, index, output, temp,
12805 ool->entry());
12807 masm.movePtr(str, output);
12808 masm.bind(ool->rejoin());
12811 void CodeGenerator::visitToRelativeStringIndex(LToRelativeStringIndex* lir) {
12812 Register index = ToRegister(lir->index());
12813 Register length = ToRegister(lir->length());
12814 Register output = ToRegister(lir->output());
12816 masm.move32(Imm32(0), output);
12817 masm.cmp32Move32(Assembler::LessThan, index, Imm32(0), length, output);
12818 masm.add32(index, output);
12821 void CodeGenerator::visitCharCodeAt(LCharCodeAt* lir) {
12822 Register str = ToRegister(lir->str());
12823 Register output = ToRegister(lir->output());
12824 Register temp0 = ToRegister(lir->temp0());
12825 Register temp1 = ToRegister(lir->temp1());
12827 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12829 if (lir->index()->isBogus()) {
12830 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
12831 StoreRegisterTo(output));
12832 masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
12833 masm.bind(ool->rejoin());
12834 } else {
12835 Register index = ToRegister(lir->index());
12837 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
12838 StoreRegisterTo(output));
12839 masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
12840 masm.bind(ool->rejoin());
12844 void CodeGenerator::visitCharCodeAtOrNegative(LCharCodeAtOrNegative* lir) {
12845 Register str = ToRegister(lir->str());
12846 Register output = ToRegister(lir->output());
12847 Register temp0 = ToRegister(lir->temp0());
12848 Register temp1 = ToRegister(lir->temp1());
12850 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12852 // Return -1 for out-of-bounds access.
12853 masm.move32(Imm32(-1), output);
12855 if (lir->index()->isBogus()) {
12856 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
12857 StoreRegisterTo(output));
12859 masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
12860 Imm32(0), ool->rejoin());
12861 masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
12862 masm.bind(ool->rejoin());
12863 } else {
12864 Register index = ToRegister(lir->index());
12866 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
12867 StoreRegisterTo(output));
12869 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
12870 temp0, ool->rejoin());
12871 masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
12872 masm.bind(ool->rejoin());
12876 void CodeGenerator::visitCodePointAt(LCodePointAt* lir) {
12877 Register str = ToRegister(lir->str());
12878 Register index = ToRegister(lir->index());
12879 Register output = ToRegister(lir->output());
12880 Register temp0 = ToRegister(lir->temp0());
12881 Register temp1 = ToRegister(lir->temp1());
12883 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12884 auto* ool = oolCallVM<Fn, jit::CodePointAt>(lir, ArgList(str, index),
12885 StoreRegisterTo(output));
12887 masm.loadStringCodePoint(str, index, output, temp0, temp1, ool->entry());
12888 masm.bind(ool->rejoin());
12891 void CodeGenerator::visitCodePointAtOrNegative(LCodePointAtOrNegative* lir) {
12892 Register str = ToRegister(lir->str());
12893 Register index = ToRegister(lir->index());
12894 Register output = ToRegister(lir->output());
12895 Register temp0 = ToRegister(lir->temp0());
12896 Register temp1 = ToRegister(lir->temp1());
12898 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12899 auto* ool = oolCallVM<Fn, jit::CodePointAt>(lir, ArgList(str, index),
12900 StoreRegisterTo(output));
12902 // Return -1 for out-of-bounds access.
12903 masm.move32(Imm32(-1), output);
12905 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
12906 temp0, ool->rejoin());
12907 masm.loadStringCodePoint(str, index, output, temp0, temp1, ool->entry());
12908 masm.bind(ool->rejoin());
12911 void CodeGenerator::visitNegativeToNaN(LNegativeToNaN* lir) {
12912 Register input = ToRegister(lir->input());
12913 ValueOperand output = ToOutValue(lir);
12915 masm.tagValue(JSVAL_TYPE_INT32, input, output);
12917 Label done;
12918 masm.branchTest32(Assembler::NotSigned, input, input, &done);
12919 masm.moveValue(JS::NaNValue(), output);
12920 masm.bind(&done);
12923 void CodeGenerator::visitNegativeToUndefined(LNegativeToUndefined* lir) {
12924 Register input = ToRegister(lir->input());
12925 ValueOperand output = ToOutValue(lir);
12927 masm.tagValue(JSVAL_TYPE_INT32, input, output);
12929 Label done;
12930 masm.branchTest32(Assembler::NotSigned, input, input, &done);
12931 masm.moveValue(JS::UndefinedValue(), output);
12932 masm.bind(&done);
12935 void CodeGenerator::visitFromCharCode(LFromCharCode* lir) {
12936 Register code = ToRegister(lir->code());
12937 Register output = ToRegister(lir->output());
12939 using Fn = JSLinearString* (*)(JSContext*, int32_t);
12940 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
12941 StoreRegisterTo(output));
12943 // OOL path if code >= UNIT_STATIC_LIMIT.
12944 masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
12945 ool->entry());
12947 masm.bind(ool->rejoin());
12950 void CodeGenerator::visitFromCharCodeEmptyIfNegative(
12951 LFromCharCodeEmptyIfNegative* lir) {
12952 Register code = ToRegister(lir->code());
12953 Register output = ToRegister(lir->output());
12955 using Fn = JSLinearString* (*)(JSContext*, int32_t);
12956 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
12957 StoreRegisterTo(output));
12959 // Return the empty string for negative inputs.
12960 const JSAtomState& names = gen->runtime->names();
12961 masm.movePtr(ImmGCPtr(names.empty_), output);
12962 masm.branchTest32(Assembler::Signed, code, code, ool->rejoin());
12964 // OOL path if code >= UNIT_STATIC_LIMIT.
12965 masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
12966 ool->entry());
12968 masm.bind(ool->rejoin());
12971 void CodeGenerator::visitFromCharCodeUndefinedIfNegative(
12972 LFromCharCodeUndefinedIfNegative* lir) {
12973 Register code = ToRegister(lir->code());
12974 ValueOperand output = ToOutValue(lir);
12975 Register temp = output.scratchReg();
12977 using Fn = JSLinearString* (*)(JSContext*, int32_t);
12978 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
12979 StoreRegisterTo(temp));
12981 // Return |undefined| for negative inputs.
12982 Label done;
12983 masm.moveValue(UndefinedValue(), output);
12984 masm.branchTest32(Assembler::Signed, code, code, &done);
12986 // OOL path if code >= UNIT_STATIC_LIMIT.
12987 masm.lookupStaticString(code, temp, gen->runtime->staticStrings(),
12988 ool->entry());
12990 masm.bind(ool->rejoin());
12991 masm.tagValue(JSVAL_TYPE_STRING, temp, output);
12993 masm.bind(&done);
12996 void CodeGenerator::visitFromCodePoint(LFromCodePoint* lir) {
12997 Register codePoint = ToRegister(lir->codePoint());
12998 Register output = ToRegister(lir->output());
12999 Register temp0 = ToRegister(lir->temp0());
13000 Register temp1 = ToRegister(lir->temp1());
13001 LSnapshot* snapshot = lir->snapshot();
13003 // The OOL path is only taken when we can't allocate the inline string.
13004 using Fn = JSLinearString* (*)(JSContext*, char32_t);
13005 auto* ool = oolCallVM<Fn, js::StringFromCodePoint>(lir, ArgList(codePoint),
13006 StoreRegisterTo(output));
13008 Label isTwoByte;
13009 Label* done = ool->rejoin();
13011 static_assert(
13012 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
13013 "Latin-1 strings can be loaded from static strings");
13016 masm.lookupStaticString(codePoint, output, gen->runtime->staticStrings(),
13017 &isTwoByte);
13018 masm.jump(done);
13020 masm.bind(&isTwoByte);
13022 // Use a bailout if the input is not a valid code point, because
13023 // MFromCodePoint is movable and it'd be observable when a moved
13024 // fromCodePoint throws an exception before its actual call site.
13025 bailoutCmp32(Assembler::Above, codePoint, Imm32(unicode::NonBMPMax),
13026 snapshot);
13028 // Allocate a JSThinInlineString.
13030 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE >= 2,
13031 "JSThinInlineString can hold a supplementary code point");
13033 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
13034 masm.newGCString(output, temp0, gen->initialStringHeap(), ool->entry());
13035 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
13038 Label isSupplementary;
13039 masm.branch32(Assembler::AboveOrEqual, codePoint, Imm32(unicode::NonBMPMin),
13040 &isSupplementary);
13042 // Store length.
13043 masm.store32(Imm32(1), Address(output, JSString::offsetOfLength()));
13045 // Load chars pointer in temp0.
13046 masm.loadInlineStringCharsForStore(output, temp0);
13048 masm.store16(codePoint, Address(temp0, 0));
13050 masm.jump(done);
13052 masm.bind(&isSupplementary);
13054 // Store length.
13055 masm.store32(Imm32(2), Address(output, JSString::offsetOfLength()));
13057 // Load chars pointer in temp0.
13058 masm.loadInlineStringCharsForStore(output, temp0);
13060 // Inlined unicode::LeadSurrogate(uint32_t).
13061 masm.move32(codePoint, temp1);
13062 masm.rshift32(Imm32(10), temp1);
13063 masm.add32(Imm32(unicode::LeadSurrogateMin - (unicode::NonBMPMin >> 10)),
13064 temp1);
13066 masm.store16(temp1, Address(temp0, 0));
13068 // Inlined unicode::TrailSurrogate(uint32_t).
13069 masm.move32(codePoint, temp1);
13070 masm.and32(Imm32(0x3FF), temp1);
13071 masm.or32(Imm32(unicode::TrailSurrogateMin), temp1);
13073 masm.store16(temp1, Address(temp0, sizeof(char16_t)));
13077 masm.bind(done);
13080 void CodeGenerator::visitStringIncludes(LStringIncludes* lir) {
13081 pushArg(ToRegister(lir->searchString()));
13082 pushArg(ToRegister(lir->string()));
13084 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13085 callVM<Fn, js::StringIncludes>(lir);
13088 template <typename LIns>
13089 static void CallStringMatch(MacroAssembler& masm, LIns* lir, OutOfLineCode* ool,
13090 LiveRegisterSet volatileRegs) {
13091 Register string = ToRegister(lir->string());
13092 Register output = ToRegister(lir->output());
13093 Register tempLength = ToRegister(lir->temp0());
13094 Register tempChars = ToRegister(lir->temp1());
13095 Register maybeTempPat = ToTempRegisterOrInvalid(lir->temp2());
13097 const JSLinearString* searchString = lir->searchString();
13098 size_t length = searchString->length();
13099 MOZ_ASSERT(length == 1 || length == 2);
13101 // The additional temp register is only needed when searching for two
13102 // pattern characters.
13103 MOZ_ASSERT_IF(length == 2, maybeTempPat != InvalidReg);
13105 if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
13106 masm.move32(Imm32(0), output);
13107 } else {
13108 masm.move32(Imm32(-1), output);
13111 masm.loadStringLength(string, tempLength);
13113 // Can't be a substring when the string is smaller than the search string.
13114 Label done;
13115 masm.branch32(Assembler::Below, tempLength, Imm32(length), ool->rejoin());
13117 bool searchStringIsPureTwoByte = false;
13118 if (searchString->hasTwoByteChars()) {
13119 JS::AutoCheckCannotGC nogc;
13120 searchStringIsPureTwoByte =
13121 !mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc));
13124 // Pure two-byte strings can't occur in a Latin-1 string.
13125 if (searchStringIsPureTwoByte) {
13126 masm.branchLatin1String(string, ool->rejoin());
13129 // Slow path when we need to linearize the string.
13130 masm.branchIfRope(string, ool->entry());
13132 Label restoreVolatile;
13134 auto callMatcher = [&](CharEncoding encoding) {
13135 masm.loadStringChars(string, tempChars, encoding);
13137 LiveGeneralRegisterSet liveRegs;
13138 if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
13139 // Save |tempChars| to compute the result index.
13140 liveRegs.add(tempChars);
13142 #ifdef DEBUG
13143 // Save |tempLength| in debug-mode for assertions.
13144 liveRegs.add(tempLength);
13145 #endif
13147 // Exclude non-volatile registers.
13148 liveRegs.set() = GeneralRegisterSet::Intersect(
13149 liveRegs.set(), GeneralRegisterSet::Volatile());
13151 masm.PushRegsInMask(liveRegs);
13154 if (length == 1) {
13155 char16_t pat = searchString->latin1OrTwoByteChar(0);
13156 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
13157 pat <= JSString::MAX_LATIN1_CHAR);
13159 masm.move32(Imm32(pat), output);
13161 masm.setupAlignedABICall();
13162 masm.passABIArg(tempChars);
13163 masm.passABIArg(output);
13164 masm.passABIArg(tempLength);
13165 if (encoding == CharEncoding::Latin1) {
13166 using Fn = const char* (*)(const char*, char, size_t);
13167 masm.callWithABI<Fn, mozilla::SIMD::memchr8>(
13168 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
13169 } else {
13170 using Fn = const char16_t* (*)(const char16_t*, char16_t, size_t);
13171 masm.callWithABI<Fn, mozilla::SIMD::memchr16>(
13172 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
13174 } else {
13175 char16_t pat0 = searchString->latin1OrTwoByteChar(0);
13176 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
13177 pat0 <= JSString::MAX_LATIN1_CHAR);
13179 char16_t pat1 = searchString->latin1OrTwoByteChar(1);
13180 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
13181 pat1 <= JSString::MAX_LATIN1_CHAR);
13183 masm.move32(Imm32(pat0), output);
13184 masm.move32(Imm32(pat1), maybeTempPat);
13186 masm.setupAlignedABICall();
13187 masm.passABIArg(tempChars);
13188 masm.passABIArg(output);
13189 masm.passABIArg(maybeTempPat);
13190 masm.passABIArg(tempLength);
13191 if (encoding == CharEncoding::Latin1) {
13192 using Fn = const char* (*)(const char*, char, char, size_t);
13193 masm.callWithABI<Fn, mozilla::SIMD::memchr2x8>(
13194 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
13195 } else {
13196 using Fn =
13197 const char16_t* (*)(const char16_t*, char16_t, char16_t, size_t);
13198 masm.callWithABI<Fn, mozilla::SIMD::memchr2x16>(
13199 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
13203 masm.storeCallPointerResult(output);
13205 // Convert to string index for `indexOf`.
13206 if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
13207 // Restore |tempChars|. (And in debug mode |tempLength|.)
13208 masm.PopRegsInMask(liveRegs);
13210 Label found;
13211 masm.branchPtr(Assembler::NotEqual, output, ImmPtr(nullptr), &found);
13213 masm.move32(Imm32(-1), output);
13214 masm.jump(&restoreVolatile);
13216 masm.bind(&found);
13218 #ifdef DEBUG
13219 // Check lower bound.
13220 Label lower;
13221 masm.branchPtr(Assembler::AboveOrEqual, output, tempChars, &lower);
13222 masm.assumeUnreachable("result pointer below string chars");
13223 masm.bind(&lower);
13225 // Compute the end position of the characters.
13226 auto scale = encoding == CharEncoding::Latin1 ? TimesOne : TimesTwo;
13227 masm.computeEffectiveAddress(BaseIndex(tempChars, tempLength, scale),
13228 tempLength);
13230 // Check upper bound.
13231 Label upper;
13232 masm.branchPtr(Assembler::Below, output, tempLength, &upper);
13233 masm.assumeUnreachable("result pointer above string chars");
13234 masm.bind(&upper);
13235 #endif
13237 masm.subPtr(tempChars, output);
13239 if (encoding == CharEncoding::TwoByte) {
13240 masm.rshiftPtr(Imm32(1), output);
13245 volatileRegs.takeUnchecked(output);
13246 volatileRegs.takeUnchecked(tempLength);
13247 volatileRegs.takeUnchecked(tempChars);
13248 if (maybeTempPat != InvalidReg) {
13249 volatileRegs.takeUnchecked(maybeTempPat);
13251 masm.PushRegsInMask(volatileRegs);
13253 // Handle the case when the input is a Latin-1 string.
13254 if (!searchStringIsPureTwoByte) {
13255 Label twoByte;
13256 masm.branchTwoByteString(string, &twoByte);
13258 callMatcher(CharEncoding::Latin1);
13259 masm.jump(&restoreVolatile);
13261 masm.bind(&twoByte);
13264 // Handle the case when the input is a two-byte string.
13265 callMatcher(CharEncoding::TwoByte);
13267 masm.bind(&restoreVolatile);
13268 masm.PopRegsInMask(volatileRegs);
13270 // Convert to bool for `includes`.
13271 if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
13272 masm.cmpPtrSet(Assembler::NotEqual, output, ImmPtr(nullptr), output);
13275 masm.bind(ool->rejoin());
13278 void CodeGenerator::visitStringIncludesSIMD(LStringIncludesSIMD* lir) {
13279 Register string = ToRegister(lir->string());
13280 Register output = ToRegister(lir->output());
13281 const JSLinearString* searchString = lir->searchString();
13283 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13284 auto* ool = oolCallVM<Fn, js::StringIncludes>(
13285 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
13287 CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
13290 void CodeGenerator::visitStringIndexOf(LStringIndexOf* lir) {
13291 pushArg(ToRegister(lir->searchString()));
13292 pushArg(ToRegister(lir->string()));
13294 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
13295 callVM<Fn, js::StringIndexOf>(lir);
13298 void CodeGenerator::visitStringIndexOfSIMD(LStringIndexOfSIMD* lir) {
13299 Register string = ToRegister(lir->string());
13300 Register output = ToRegister(lir->output());
13301 const JSLinearString* searchString = lir->searchString();
13303 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
13304 auto* ool = oolCallVM<Fn, js::StringIndexOf>(
13305 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
13307 CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
13310 void CodeGenerator::visitStringLastIndexOf(LStringLastIndexOf* lir) {
13311 pushArg(ToRegister(lir->searchString()));
13312 pushArg(ToRegister(lir->string()));
13314 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
13315 callVM<Fn, js::StringLastIndexOf>(lir);
13318 void CodeGenerator::visitStringStartsWith(LStringStartsWith* lir) {
13319 pushArg(ToRegister(lir->searchString()));
13320 pushArg(ToRegister(lir->string()));
13322 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13323 callVM<Fn, js::StringStartsWith>(lir);
13326 void CodeGenerator::visitStringStartsWithInline(LStringStartsWithInline* lir) {
13327 Register string = ToRegister(lir->string());
13328 Register output = ToRegister(lir->output());
13329 Register temp = ToRegister(lir->temp0());
13331 const JSLinearString* searchString = lir->searchString();
13333 size_t length = searchString->length();
13334 MOZ_ASSERT(length > 0);
13336 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13337 auto* ool = oolCallVM<Fn, js::StringStartsWith>(
13338 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
13340 masm.move32(Imm32(0), output);
13342 // Can't be a prefix when the string is smaller than the search string.
13343 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
13344 Imm32(length), ool->rejoin());
13346 // Unwind ropes at the start if possible.
13347 Label compare;
13348 masm.movePtr(string, temp);
13349 masm.branchIfNotRope(temp, &compare);
13351 Label unwindRope;
13352 masm.bind(&unwindRope);
13353 masm.loadRopeLeftChild(temp, output);
13354 masm.movePtr(output, temp);
13356 // If the left child is smaller than the search string, jump into the VM to
13357 // linearize the string.
13358 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
13359 Imm32(length), ool->entry());
13361 // Otherwise keep unwinding ropes.
13362 masm.branchIfRope(temp, &unwindRope);
13364 masm.bind(&compare);
13366 // If operands point to the same instance, it's trivially a prefix.
13367 Label notPointerEqual;
13368 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
13369 &notPointerEqual);
13370 masm.move32(Imm32(1), output);
13371 masm.jump(ool->rejoin());
13372 masm.bind(&notPointerEqual);
13374 if (searchString->hasTwoByteChars()) {
13375 // Pure two-byte strings can't be a prefix of Latin-1 strings.
13376 JS::AutoCheckCannotGC nogc;
13377 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
13378 Label compareChars;
13379 masm.branchTwoByteString(temp, &compareChars);
13380 masm.move32(Imm32(0), output);
13381 masm.jump(ool->rejoin());
13382 masm.bind(&compareChars);
13386 // Load the input string's characters.
13387 Register stringChars = output;
13388 masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
13390 // Start comparing character by character.
13391 masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
13393 masm.bind(ool->rejoin());
13396 void CodeGenerator::visitStringEndsWith(LStringEndsWith* lir) {
13397 pushArg(ToRegister(lir->searchString()));
13398 pushArg(ToRegister(lir->string()));
13400 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13401 callVM<Fn, js::StringEndsWith>(lir);
13404 void CodeGenerator::visitStringEndsWithInline(LStringEndsWithInline* lir) {
13405 Register string = ToRegister(lir->string());
13406 Register output = ToRegister(lir->output());
13407 Register temp = ToRegister(lir->temp0());
13409 const JSLinearString* searchString = lir->searchString();
13411 size_t length = searchString->length();
13412 MOZ_ASSERT(length > 0);
13414 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13415 auto* ool = oolCallVM<Fn, js::StringEndsWith>(
13416 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
13418 masm.move32(Imm32(0), output);
13420 // Can't be a suffix when the string is smaller than the search string.
13421 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
13422 Imm32(length), ool->rejoin());
13424 // Unwind ropes at the end if possible.
13425 Label compare;
13426 masm.movePtr(string, temp);
13427 masm.branchIfNotRope(temp, &compare);
13429 Label unwindRope;
13430 masm.bind(&unwindRope);
13431 masm.loadRopeRightChild(temp, output);
13432 masm.movePtr(output, temp);
13434 // If the right child is smaller than the search string, jump into the VM to
13435 // linearize the string.
13436 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
13437 Imm32(length), ool->entry());
13439 // Otherwise keep unwinding ropes.
13440 masm.branchIfRope(temp, &unwindRope);
13442 masm.bind(&compare);
13444 // If operands point to the same instance, it's trivially a suffix.
13445 Label notPointerEqual;
13446 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
13447 &notPointerEqual);
13448 masm.move32(Imm32(1), output);
13449 masm.jump(ool->rejoin());
13450 masm.bind(&notPointerEqual);
13452 CharEncoding encoding = searchString->hasLatin1Chars()
13453 ? CharEncoding::Latin1
13454 : CharEncoding::TwoByte;
13455 if (encoding == CharEncoding::TwoByte) {
13456 // Pure two-byte strings can't be a suffix of Latin-1 strings.
13457 JS::AutoCheckCannotGC nogc;
13458 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
13459 Label compareChars;
13460 masm.branchTwoByteString(temp, &compareChars);
13461 masm.move32(Imm32(0), output);
13462 masm.jump(ool->rejoin());
13463 masm.bind(&compareChars);
13467 // Load the input string's characters.
13468 Register stringChars = output;
13469 masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
13471 // Move string-char pointer to the suffix string.
13472 masm.loadStringLength(temp, temp);
13473 masm.sub32(Imm32(length), temp);
13474 masm.addToCharPtr(stringChars, temp, encoding);
13476 // Start comparing character by character.
13477 masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
13479 masm.bind(ool->rejoin());
13482 void CodeGenerator::visitStringToLowerCase(LStringToLowerCase* lir) {
13483 Register string = ToRegister(lir->string());
13484 Register output = ToRegister(lir->output());
13485 Register temp0 = ToRegister(lir->temp0());
13486 Register temp1 = ToRegister(lir->temp1());
13487 Register temp2 = ToRegister(lir->temp2());
13489 // On x86 there are not enough registers. In that case reuse the string
13490 // register as a temporary.
13491 Register temp3 =
13492 lir->temp3()->isBogusTemp() ? string : ToRegister(lir->temp3());
13493 Register temp4 = ToRegister(lir->temp4());
13495 using Fn = JSString* (*)(JSContext*, HandleString);
13496 OutOfLineCode* ool = oolCallVM<Fn, js::StringToLowerCase>(
13497 lir, ArgList(string), StoreRegisterTo(output));
13499 // Take the slow path if the string isn't a linear Latin-1 string.
13500 Imm32 linearLatin1Bits(JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT);
13501 Register flags = temp0;
13502 masm.load32(Address(string, JSString::offsetOfFlags()), flags);
13503 masm.and32(linearLatin1Bits, flags);
13504 masm.branch32(Assembler::NotEqual, flags, linearLatin1Bits, ool->entry());
13506 Register length = temp0;
13507 masm.loadStringLength(string, length);
13509 // Return the input if it's the empty string.
13510 Label notEmptyString;
13511 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmptyString);
13513 masm.movePtr(string, output);
13514 masm.jump(ool->rejoin());
13516 masm.bind(&notEmptyString);
13518 Register inputChars = temp1;
13519 masm.loadStringChars(string, inputChars, CharEncoding::Latin1);
13521 Register toLowerCaseTable = temp2;
13522 masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), toLowerCaseTable);
13524 // Single element strings can be directly retrieved from static strings cache.
13525 Label notSingleElementString;
13526 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleElementString);
13528 Register current = temp4;
13530 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
13531 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
13532 current);
13533 masm.lookupStaticString(current, output, gen->runtime->staticStrings());
13535 masm.jump(ool->rejoin());
13537 masm.bind(&notSingleElementString);
13539 // Use the OOL-path when the string is too long. This prevents scanning long
13540 // strings which have upper case characters only near the end a second time in
13541 // the VM.
13542 constexpr int32_t MaxInlineLength = 64;
13543 masm.branch32(Assembler::Above, length, Imm32(MaxInlineLength), ool->entry());
13546 // Check if there are any characters which need to be converted.
13548 // This extra loop gives a small performance improvement for strings which
13549 // are already lower cased and lets us avoid calling into the runtime for
13550 // non-inline, all lower case strings. But more importantly it avoids
13551 // repeated inline allocation failures:
13552 // |AllocateThinOrFatInlineString| below takes the OOL-path and calls the
13553 // |js::StringToLowerCase| runtime function when the result string can't be
13554 // allocated inline. And |js::StringToLowerCase| directly returns the input
13555 // string when no characters need to be converted. That means it won't
13556 // trigger GC to clear up the free nursery space, so the next toLowerCase()
13557 // call will again fail to inline allocate the result string.
13558 Label hasUpper;
13560 Register checkInputChars = output;
13561 masm.movePtr(inputChars, checkInputChars);
13563 Register current = temp4;
13565 Label start;
13566 masm.bind(&start);
13567 masm.loadChar(Address(checkInputChars, 0), current, CharEncoding::Latin1);
13568 masm.branch8(Assembler::NotEqual,
13569 BaseIndex(toLowerCaseTable, current, TimesOne), current,
13570 &hasUpper);
13571 masm.addPtr(Imm32(sizeof(Latin1Char)), checkInputChars);
13572 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
13574 // Input is already in lower case.
13575 masm.movePtr(string, output);
13576 masm.jump(ool->rejoin());
13578 masm.bind(&hasUpper);
13580 // |length| was clobbered above, reload.
13581 masm.loadStringLength(string, length);
13583 // Call into the runtime when we can't create an inline string.
13584 masm.branch32(Assembler::Above, length,
13585 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), ool->entry());
13587 AllocateThinOrFatInlineString(masm, output, length, temp4,
13588 initialStringHeap(), ool->entry(),
13589 CharEncoding::Latin1);
13591 if (temp3 == string) {
13592 masm.push(string);
13595 Register outputChars = temp3;
13596 masm.loadInlineStringCharsForStore(output, outputChars);
13599 Register current = temp4;
13601 Label start;
13602 masm.bind(&start);
13603 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
13604 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
13605 current);
13606 masm.storeChar(current, Address(outputChars, 0), CharEncoding::Latin1);
13607 masm.addPtr(Imm32(sizeof(Latin1Char)), inputChars);
13608 masm.addPtr(Imm32(sizeof(Latin1Char)), outputChars);
13609 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
13612 if (temp3 == string) {
13613 masm.pop(string);
13617 masm.bind(ool->rejoin());
13620 void CodeGenerator::visitStringToUpperCase(LStringToUpperCase* lir) {
13621 pushArg(ToRegister(lir->string()));
13623 using Fn = JSString* (*)(JSContext*, HandleString);
13624 callVM<Fn, js::StringToUpperCase>(lir);
13627 void CodeGenerator::visitCharCodeToLowerCase(LCharCodeToLowerCase* lir) {
13628 Register code = ToRegister(lir->code());
13629 Register output = ToRegister(lir->output());
13630 Register temp = ToRegister(lir->temp0());
13632 using Fn = JSString* (*)(JSContext*, int32_t);
13633 auto* ool = oolCallVM<Fn, jit::CharCodeToLowerCase>(lir, ArgList(code),
13634 StoreRegisterTo(output));
13636 constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
13638 // OOL path if code >= NonLatin1Min.
13639 masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
13641 // Convert to lower case.
13642 masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), temp);
13643 masm.load8ZeroExtend(BaseIndex(temp, code, TimesOne), temp);
13645 // Load static string for lower case character.
13646 masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
13648 masm.bind(ool->rejoin());
13651 void CodeGenerator::visitCharCodeToUpperCase(LCharCodeToUpperCase* lir) {
13652 Register code = ToRegister(lir->code());
13653 Register output = ToRegister(lir->output());
13654 Register temp = ToRegister(lir->temp0());
13656 using Fn = JSString* (*)(JSContext*, int32_t);
13657 auto* ool = oolCallVM<Fn, jit::CharCodeToUpperCase>(lir, ArgList(code),
13658 StoreRegisterTo(output));
13660 constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
13662 // OOL path if code >= NonLatin1Min.
13663 masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
13665 // Most one element Latin-1 strings can be directly retrieved from the
13666 // static strings cache, except the following three characters:
13668 // 1. ToUpper(U+00B5) = 0+039C
13669 // 2. ToUpper(U+00FF) = 0+0178
13670 // 3. ToUpper(U+00DF) = 0+0053 0+0053
13671 masm.branch32(Assembler::Equal, code, Imm32(unicode::MICRO_SIGN),
13672 ool->entry());
13673 masm.branch32(Assembler::Equal, code,
13674 Imm32(unicode::LATIN_SMALL_LETTER_Y_WITH_DIAERESIS),
13675 ool->entry());
13676 masm.branch32(Assembler::Equal, code,
13677 Imm32(unicode::LATIN_SMALL_LETTER_SHARP_S), ool->entry());
13679 // Inline unicode::ToUpperCase (without the special case for ASCII characters)
13681 constexpr size_t shift = unicode::CharInfoShift;
13683 // code >> shift
13684 masm.move32(code, temp);
13685 masm.rshift32(Imm32(shift), temp);
13687 // index = index1[code >> shift];
13688 masm.movePtr(ImmPtr(unicode::index1), output);
13689 masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
13691 // (code & ((1 << shift) - 1)
13692 masm.move32(code, output);
13693 masm.and32(Imm32((1 << shift) - 1), output);
13695 // (index << shift) + (code & ((1 << shift) - 1))
13696 masm.lshift32(Imm32(shift), temp);
13697 masm.add32(output, temp);
13699 // index = index2[(index << shift) + (code & ((1 << shift) - 1))]
13700 masm.movePtr(ImmPtr(unicode::index2), output);
13701 masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
13703 // Compute |index * 6| through |(index * 3) * TimesTwo|.
13704 static_assert(sizeof(unicode::CharacterInfo) == 6);
13705 masm.mulBy3(temp, temp);
13707 // upperCase = js_charinfo[index].upperCase
13708 masm.movePtr(ImmPtr(unicode::js_charinfo), output);
13709 masm.load16ZeroExtend(BaseIndex(output, temp, TimesTwo,
13710 offsetof(unicode::CharacterInfo, upperCase)),
13711 temp);
13713 // uint16_t(ch) + upperCase
13714 masm.add32(code, temp);
13716 // Clear any high bits added when performing the unsigned 16-bit addition
13717 // through a signed 32-bit addition.
13718 masm.move8ZeroExtend(temp, temp);
13720 // Load static string for upper case character.
13721 masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
13723 masm.bind(ool->rejoin());
13726 void CodeGenerator::visitStringTrimStartIndex(LStringTrimStartIndex* lir) {
13727 Register string = ToRegister(lir->string());
13728 Register output = ToRegister(lir->output());
13730 auto volatileRegs = liveVolatileRegs(lir);
13731 volatileRegs.takeUnchecked(output);
13733 masm.PushRegsInMask(volatileRegs);
13735 using Fn = int32_t (*)(const JSString*);
13736 masm.setupAlignedABICall();
13737 masm.passABIArg(string);
13738 masm.callWithABI<Fn, jit::StringTrimStartIndex>();
13739 masm.storeCallInt32Result(output);
13741 masm.PopRegsInMask(volatileRegs);
13744 void CodeGenerator::visitStringTrimEndIndex(LStringTrimEndIndex* lir) {
13745 Register string = ToRegister(lir->string());
13746 Register start = ToRegister(lir->start());
13747 Register output = ToRegister(lir->output());
13749 auto volatileRegs = liveVolatileRegs(lir);
13750 volatileRegs.takeUnchecked(output);
13752 masm.PushRegsInMask(volatileRegs);
13754 using Fn = int32_t (*)(const JSString*, int32_t);
13755 masm.setupAlignedABICall();
13756 masm.passABIArg(string);
13757 masm.passABIArg(start);
13758 masm.callWithABI<Fn, jit::StringTrimEndIndex>();
13759 masm.storeCallInt32Result(output);
13761 masm.PopRegsInMask(volatileRegs);
13764 void CodeGenerator::visitStringSplit(LStringSplit* lir) {
13765 pushArg(Imm32(INT32_MAX));
13766 pushArg(ToRegister(lir->separator()));
13767 pushArg(ToRegister(lir->string()));
13769 using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
13770 callVM<Fn, js::StringSplitString>(lir);
13773 void CodeGenerator::visitInitializedLength(LInitializedLength* lir) {
13774 Address initLength(ToRegister(lir->elements()),
13775 ObjectElements::offsetOfInitializedLength());
13776 masm.load32(initLength, ToRegister(lir->output()));
13779 void CodeGenerator::visitSetInitializedLength(LSetInitializedLength* lir) {
13780 Address initLength(ToRegister(lir->elements()),
13781 ObjectElements::offsetOfInitializedLength());
13782 SetLengthFromIndex(masm, lir->index(), initLength);
13785 void CodeGenerator::visitNotBI(LNotBI* lir) {
13786 Register input = ToRegister(lir->input());
13787 Register output = ToRegister(lir->output());
13789 masm.cmp32Set(Assembler::Equal, Address(input, BigInt::offsetOfLength()),
13790 Imm32(0), output);
13793 void CodeGenerator::visitNotO(LNotO* lir) {
13794 Register objreg = ToRegister(lir->input());
13795 Register output = ToRegister(lir->output());
13797 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
13798 if (intact) {
13799 // Bug 1874905: It would be fantastic if this could be optimized out.
13800 assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
13801 masm.move32(Imm32(0), output);
13802 } else {
13803 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
13804 addOutOfLineCode(ool, lir->mir());
13806 Label* ifEmulatesUndefined = ool->label1();
13807 Label* ifDoesntEmulateUndefined = ool->label2();
13809 branchTestObjectEmulatesUndefined(objreg, ifEmulatesUndefined,
13810 ifDoesntEmulateUndefined, output, ool);
13811 // fall through
13813 Label join;
13815 masm.move32(Imm32(0), output);
13816 masm.jump(&join);
13818 masm.bind(ifEmulatesUndefined);
13819 masm.move32(Imm32(1), output);
13821 masm.bind(&join);
13825 void CodeGenerator::visitNotV(LNotV* lir) {
13826 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
13827 addOutOfLineCode(ool, lir->mir());
13829 Label* ifTruthy = ool->label1();
13830 Label* ifFalsy = ool->label2();
13832 ValueOperand input = ToValue(lir, LNotV::InputIndex);
13833 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
13834 FloatRegister floatTemp = ToFloatRegister(lir->temp0());
13835 Register output = ToRegister(lir->output());
13836 const TypeDataList& observedTypes = lir->mir()->observedTypes();
13838 testValueTruthy(input, tempToUnbox, output, floatTemp, observedTypes,
13839 ifTruthy, ifFalsy, ool);
13841 Label join;
13843 // Note that the testValueTruthy call above may choose to fall through
13844 // to ifTruthy instead of branching there.
13845 masm.bind(ifTruthy);
13846 masm.move32(Imm32(0), output);
13847 masm.jump(&join);
13849 masm.bind(ifFalsy);
13850 masm.move32(Imm32(1), output);
13852 // both branches meet here.
13853 masm.bind(&join);
13856 void CodeGenerator::visitBoundsCheck(LBoundsCheck* lir) {
13857 const LAllocation* index = lir->index();
13858 const LAllocation* length = lir->length();
13859 LSnapshot* snapshot = lir->snapshot();
13861 MIRType type = lir->mir()->type();
13863 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
13864 if (type == MIRType::Int32) {
13865 bailoutCmp32(cond, lhs, rhs, snapshot);
13866 } else {
13867 MOZ_ASSERT(type == MIRType::IntPtr);
13868 bailoutCmpPtr(cond, lhs, rhs, snapshot);
13872 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
13873 int32_t rhs) {
13874 if (type == MIRType::Int32) {
13875 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
13876 } else {
13877 MOZ_ASSERT(type == MIRType::IntPtr);
13878 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
13882 if (index->isConstant()) {
13883 // Use uint32 so that the comparison is unsigned.
13884 uint32_t idx = ToInt32(index);
13885 if (length->isConstant()) {
13886 uint32_t len = ToInt32(lir->length());
13887 if (idx < len) {
13888 return;
13890 bailout(snapshot);
13891 return;
13894 if (length->isRegister()) {
13895 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), idx);
13896 } else {
13897 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), idx);
13899 return;
13902 Register indexReg = ToRegister(index);
13903 if (length->isConstant()) {
13904 bailoutCmpConstant(Assembler::AboveOrEqual, indexReg, ToInt32(length));
13905 } else if (length->isRegister()) {
13906 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), indexReg);
13907 } else {
13908 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), indexReg);
13912 void CodeGenerator::visitBoundsCheckRange(LBoundsCheckRange* lir) {
13913 int32_t min = lir->mir()->minimum();
13914 int32_t max = lir->mir()->maximum();
13915 MOZ_ASSERT(max >= min);
13917 LSnapshot* snapshot = lir->snapshot();
13918 MIRType type = lir->mir()->type();
13920 const LAllocation* length = lir->length();
13921 Register temp = ToRegister(lir->getTemp(0));
13923 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
13924 if (type == MIRType::Int32) {
13925 bailoutCmp32(cond, lhs, rhs, snapshot);
13926 } else {
13927 MOZ_ASSERT(type == MIRType::IntPtr);
13928 bailoutCmpPtr(cond, lhs, rhs, snapshot);
13932 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
13933 int32_t rhs) {
13934 if (type == MIRType::Int32) {
13935 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
13936 } else {
13937 MOZ_ASSERT(type == MIRType::IntPtr);
13938 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
13942 if (lir->index()->isConstant()) {
13943 int32_t nmin, nmax;
13944 int32_t index = ToInt32(lir->index());
13945 if (SafeAdd(index, min, &nmin) && SafeAdd(index, max, &nmax) && nmin >= 0) {
13946 if (length->isRegister()) {
13947 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), nmax);
13948 } else {
13949 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), nmax);
13951 return;
13953 masm.mov(ImmWord(index), temp);
13954 } else {
13955 masm.mov(ToRegister(lir->index()), temp);
13958 // If the minimum and maximum differ then do an underflow check first.
13959 // If the two are the same then doing an unsigned comparison on the
13960 // length will also catch a negative index.
13961 if (min != max) {
13962 if (min != 0) {
13963 Label bail;
13964 if (type == MIRType::Int32) {
13965 masm.branchAdd32(Assembler::Overflow, Imm32(min), temp, &bail);
13966 } else {
13967 masm.branchAddPtr(Assembler::Overflow, Imm32(min), temp, &bail);
13969 bailoutFrom(&bail, snapshot);
13972 bailoutCmpConstant(Assembler::LessThan, temp, 0);
13974 if (min != 0) {
13975 int32_t diff;
13976 if (SafeSub(max, min, &diff)) {
13977 max = diff;
13978 } else {
13979 if (type == MIRType::Int32) {
13980 masm.sub32(Imm32(min), temp);
13981 } else {
13982 masm.subPtr(Imm32(min), temp);
13988 // Compute the maximum possible index. No overflow check is needed when
13989 // max > 0. We can only wraparound to a negative number, which will test as
13990 // larger than all nonnegative numbers in the unsigned comparison, and the
13991 // length is required to be nonnegative (else testing a negative length
13992 // would succeed on any nonnegative index).
13993 if (max != 0) {
13994 if (max < 0) {
13995 Label bail;
13996 if (type == MIRType::Int32) {
13997 masm.branchAdd32(Assembler::Overflow, Imm32(max), temp, &bail);
13998 } else {
13999 masm.branchAddPtr(Assembler::Overflow, Imm32(max), temp, &bail);
14001 bailoutFrom(&bail, snapshot);
14002 } else {
14003 if (type == MIRType::Int32) {
14004 masm.add32(Imm32(max), temp);
14005 } else {
14006 masm.addPtr(Imm32(max), temp);
14011 if (length->isRegister()) {
14012 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), temp);
14013 } else {
14014 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), temp);
14018 void CodeGenerator::visitBoundsCheckLower(LBoundsCheckLower* lir) {
14019 int32_t min = lir->mir()->minimum();
14020 bailoutCmp32(Assembler::LessThan, ToRegister(lir->index()), Imm32(min),
14021 lir->snapshot());
14024 void CodeGenerator::visitSpectreMaskIndex(LSpectreMaskIndex* lir) {
14025 MOZ_ASSERT(JitOptions.spectreIndexMasking);
14027 const LAllocation* length = lir->length();
14028 Register index = ToRegister(lir->index());
14029 Register output = ToRegister(lir->output());
14031 if (lir->mir()->type() == MIRType::Int32) {
14032 if (length->isRegister()) {
14033 masm.spectreMaskIndex32(index, ToRegister(length), output);
14034 } else {
14035 masm.spectreMaskIndex32(index, ToAddress(length), output);
14037 } else {
14038 MOZ_ASSERT(lir->mir()->type() == MIRType::IntPtr);
14039 if (length->isRegister()) {
14040 masm.spectreMaskIndexPtr(index, ToRegister(length), output);
14041 } else {
14042 masm.spectreMaskIndexPtr(index, ToAddress(length), output);
14047 class OutOfLineStoreElementHole : public OutOfLineCodeBase<CodeGenerator> {
14048 LInstruction* ins_;
14050 public:
14051 explicit OutOfLineStoreElementHole(LInstruction* ins) : ins_(ins) {
14052 MOZ_ASSERT(ins->isStoreElementHoleV() || ins->isStoreElementHoleT());
14055 void accept(CodeGenerator* codegen) override {
14056 codegen->visitOutOfLineStoreElementHole(this);
14059 MStoreElementHole* mir() const {
14060 return ins_->isStoreElementHoleV() ? ins_->toStoreElementHoleV()->mir()
14061 : ins_->toStoreElementHoleT()->mir();
14063 LInstruction* ins() const { return ins_; }
14066 void CodeGenerator::emitStoreHoleCheck(Register elements,
14067 const LAllocation* index,
14068 LSnapshot* snapshot) {
14069 Label bail;
14070 if (index->isConstant()) {
14071 Address dest(elements, ToInt32(index) * sizeof(js::Value));
14072 masm.branchTestMagic(Assembler::Equal, dest, &bail);
14073 } else {
14074 BaseObjectElementIndex dest(elements, ToRegister(index));
14075 masm.branchTestMagic(Assembler::Equal, dest, &bail);
14077 bailoutFrom(&bail, snapshot);
14080 void CodeGenerator::emitStoreElementTyped(const LAllocation* value,
14081 MIRType valueType, Register elements,
14082 const LAllocation* index) {
14083 MOZ_ASSERT(valueType != MIRType::MagicHole);
14084 ConstantOrRegister v = ToConstantOrRegister(value, valueType);
14085 if (index->isConstant()) {
14086 Address dest(elements, ToInt32(index) * sizeof(js::Value));
14087 masm.storeUnboxedValue(v, valueType, dest);
14088 } else {
14089 BaseObjectElementIndex dest(elements, ToRegister(index));
14090 masm.storeUnboxedValue(v, valueType, dest);
14094 void CodeGenerator::visitStoreElementT(LStoreElementT* store) {
14095 Register elements = ToRegister(store->elements());
14096 const LAllocation* index = store->index();
14098 if (store->mir()->needsBarrier()) {
14099 emitPreBarrier(elements, index);
14102 if (store->mir()->needsHoleCheck()) {
14103 emitStoreHoleCheck(elements, index, store->snapshot());
14106 emitStoreElementTyped(store->value(), store->mir()->value()->type(), elements,
14107 index);
14110 void CodeGenerator::visitStoreElementV(LStoreElementV* lir) {
14111 const ValueOperand value = ToValue(lir, LStoreElementV::Value);
14112 Register elements = ToRegister(lir->elements());
14113 const LAllocation* index = lir->index();
14115 if (lir->mir()->needsBarrier()) {
14116 emitPreBarrier(elements, index);
14119 if (lir->mir()->needsHoleCheck()) {
14120 emitStoreHoleCheck(elements, index, lir->snapshot());
14123 if (lir->index()->isConstant()) {
14124 Address dest(elements, ToInt32(lir->index()) * sizeof(js::Value));
14125 masm.storeValue(value, dest);
14126 } else {
14127 BaseObjectElementIndex dest(elements, ToRegister(lir->index()));
14128 masm.storeValue(value, dest);
14132 void CodeGenerator::visitStoreHoleValueElement(LStoreHoleValueElement* lir) {
14133 Register elements = ToRegister(lir->elements());
14134 Register index = ToRegister(lir->index());
14136 Address elementsFlags(elements, ObjectElements::offsetOfFlags());
14137 masm.or32(Imm32(ObjectElements::NON_PACKED), elementsFlags);
14139 BaseObjectElementIndex element(elements, index);
14140 masm.storeValue(MagicValue(JS_ELEMENTS_HOLE), element);
14143 void CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT* lir) {
14144 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
14145 addOutOfLineCode(ool, lir->mir());
14147 Register obj = ToRegister(lir->object());
14148 Register elements = ToRegister(lir->elements());
14149 Register index = ToRegister(lir->index());
14150 Register temp = ToRegister(lir->temp0());
14152 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
14153 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
14155 emitPreBarrier(elements, lir->index());
14157 masm.bind(ool->rejoin());
14158 emitStoreElementTyped(lir->value(), lir->mir()->value()->type(), elements,
14159 lir->index());
14161 if (ValueNeedsPostBarrier(lir->mir()->value())) {
14162 LiveRegisterSet regs = liveVolatileRegs(lir);
14163 ConstantOrRegister val =
14164 ToConstantOrRegister(lir->value(), lir->mir()->value()->type());
14165 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp, val);
14169 void CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV* lir) {
14170 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
14171 addOutOfLineCode(ool, lir->mir());
14173 Register obj = ToRegister(lir->object());
14174 Register elements = ToRegister(lir->elements());
14175 Register index = ToRegister(lir->index());
14176 const ValueOperand value = ToValue(lir, LStoreElementHoleV::ValueIndex);
14177 Register temp = ToRegister(lir->temp0());
14179 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
14180 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
14182 emitPreBarrier(elements, lir->index());
14184 masm.bind(ool->rejoin());
14185 masm.storeValue(value, BaseObjectElementIndex(elements, index));
14187 if (ValueNeedsPostBarrier(lir->mir()->value())) {
14188 LiveRegisterSet regs = liveVolatileRegs(lir);
14189 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp,
14190 ConstantOrRegister(value));
14194 void CodeGenerator::visitOutOfLineStoreElementHole(
14195 OutOfLineStoreElementHole* ool) {
14196 Register object, elements, index;
14197 LInstruction* ins = ool->ins();
14198 mozilla::Maybe<ConstantOrRegister> value;
14199 Register temp;
14201 if (ins->isStoreElementHoleV()) {
14202 LStoreElementHoleV* store = ins->toStoreElementHoleV();
14203 object = ToRegister(store->object());
14204 elements = ToRegister(store->elements());
14205 index = ToRegister(store->index());
14206 value.emplace(
14207 TypedOrValueRegister(ToValue(store, LStoreElementHoleV::ValueIndex)));
14208 temp = ToRegister(store->temp0());
14209 } else {
14210 LStoreElementHoleT* store = ins->toStoreElementHoleT();
14211 object = ToRegister(store->object());
14212 elements = ToRegister(store->elements());
14213 index = ToRegister(store->index());
14214 if (store->value()->isConstant()) {
14215 value.emplace(
14216 ConstantOrRegister(store->value()->toConstant()->toJSValue()));
14217 } else {
14218 MIRType valueType = store->mir()->value()->type();
14219 value.emplace(
14220 TypedOrValueRegister(valueType, ToAnyRegister(store->value())));
14222 temp = ToRegister(store->temp0());
14225 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
14227 // We're out-of-bounds. We only handle the index == initlength case.
14228 // If index > initializedLength, bail out. Note that this relies on the
14229 // condition flags sticking from the incoming branch.
14230 // Also note: this branch does not need Spectre mitigations, doing that for
14231 // the capacity check below is sufficient.
14232 Label allocElement, addNewElement;
14233 #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
14234 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
14235 // Had to reimplement for MIPS because there are no flags.
14236 bailoutCmp32(Assembler::NotEqual, initLength, index, ins->snapshot());
14237 #else
14238 bailoutIf(Assembler::NotEqual, ins->snapshot());
14239 #endif
14241 // If index < capacity, we can add a dense element inline. If not, we need
14242 // to allocate more elements first.
14243 masm.spectreBoundsCheck32(
14244 index, Address(elements, ObjectElements::offsetOfCapacity()), temp,
14245 &allocElement);
14246 masm.jump(&addNewElement);
14248 masm.bind(&allocElement);
14250 // Save all live volatile registers, except |temp|.
14251 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
14252 liveRegs.takeUnchecked(temp);
14253 masm.PushRegsInMask(liveRegs);
14255 masm.setupAlignedABICall();
14256 masm.loadJSContext(temp);
14257 masm.passABIArg(temp);
14258 masm.passABIArg(object);
14260 using Fn = bool (*)(JSContext*, NativeObject*);
14261 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
14262 masm.storeCallPointerResult(temp);
14264 masm.PopRegsInMask(liveRegs);
14265 bailoutIfFalseBool(temp, ins->snapshot());
14267 // Load the reallocated elements pointer.
14268 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), elements);
14270 masm.bind(&addNewElement);
14272 // Increment initLength
14273 masm.add32(Imm32(1), initLength);
14275 // If length is now <= index, increment length too.
14276 Label skipIncrementLength;
14277 Address length(elements, ObjectElements::offsetOfLength());
14278 masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
14279 masm.add32(Imm32(1), length);
14280 masm.bind(&skipIncrementLength);
14282 // Jump to the inline path where we will store the value.
14283 // We rejoin after the prebarrier, because the memory is uninitialized.
14284 masm.jump(ool->rejoin());
14287 void CodeGenerator::visitArrayPopShift(LArrayPopShift* lir) {
14288 Register obj = ToRegister(lir->object());
14289 Register temp1 = ToRegister(lir->temp0());
14290 Register temp2 = ToRegister(lir->temp1());
14291 ValueOperand out = ToOutValue(lir);
14293 Label bail;
14294 if (lir->mir()->mode() == MArrayPopShift::Pop) {
14295 masm.packedArrayPop(obj, out, temp1, temp2, &bail);
14296 } else {
14297 MOZ_ASSERT(lir->mir()->mode() == MArrayPopShift::Shift);
14298 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
14299 masm.packedArrayShift(obj, out, temp1, temp2, volatileRegs, &bail);
14301 bailoutFrom(&bail, lir->snapshot());
14304 class OutOfLineArrayPush : public OutOfLineCodeBase<CodeGenerator> {
14305 LArrayPush* ins_;
14307 public:
14308 explicit OutOfLineArrayPush(LArrayPush* ins) : ins_(ins) {}
14310 void accept(CodeGenerator* codegen) override {
14311 codegen->visitOutOfLineArrayPush(this);
14314 LArrayPush* ins() const { return ins_; }
14317 void CodeGenerator::visitArrayPush(LArrayPush* lir) {
14318 Register obj = ToRegister(lir->object());
14319 Register elementsTemp = ToRegister(lir->temp0());
14320 Register length = ToRegister(lir->output());
14321 ValueOperand value = ToValue(lir, LArrayPush::ValueIndex);
14322 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
14324 auto* ool = new (alloc()) OutOfLineArrayPush(lir);
14325 addOutOfLineCode(ool, lir->mir());
14327 // Load obj->elements in elementsTemp.
14328 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), elementsTemp);
14330 Address initLengthAddr(elementsTemp,
14331 ObjectElements::offsetOfInitializedLength());
14332 Address lengthAddr(elementsTemp, ObjectElements::offsetOfLength());
14333 Address capacityAddr(elementsTemp, ObjectElements::offsetOfCapacity());
14335 // Bail out if length != initLength.
14336 masm.load32(lengthAddr, length);
14337 bailoutCmp32(Assembler::NotEqual, initLengthAddr, length, lir->snapshot());
14339 // If length < capacity, we can add a dense element inline. If not, we
14340 // need to allocate more elements.
14341 masm.spectreBoundsCheck32(length, capacityAddr, spectreTemp, ool->entry());
14342 masm.bind(ool->rejoin());
14344 // Store the value.
14345 masm.storeValue(value, BaseObjectElementIndex(elementsTemp, length));
14347 // Update length and initialized length.
14348 masm.add32(Imm32(1), length);
14349 masm.store32(length, Address(elementsTemp, ObjectElements::offsetOfLength()));
14350 masm.store32(length, Address(elementsTemp,
14351 ObjectElements::offsetOfInitializedLength()));
14353 if (ValueNeedsPostBarrier(lir->mir()->value())) {
14354 LiveRegisterSet regs = liveVolatileRegs(lir);
14355 regs.addUnchecked(length);
14356 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->output()->output(),
14357 elementsTemp, ConstantOrRegister(value),
14358 /* indexDiff = */ -1);
14362 void CodeGenerator::visitOutOfLineArrayPush(OutOfLineArrayPush* ool) {
14363 LArrayPush* ins = ool->ins();
14365 Register object = ToRegister(ins->object());
14366 Register temp = ToRegister(ins->temp0());
14368 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
14369 liveRegs.takeUnchecked(temp);
14370 liveRegs.addUnchecked(ToRegister(ins->output()));
14371 liveRegs.addUnchecked(ToValue(ins, LArrayPush::ValueIndex));
14373 masm.PushRegsInMask(liveRegs);
14375 masm.setupAlignedABICall();
14376 masm.loadJSContext(temp);
14377 masm.passABIArg(temp);
14378 masm.passABIArg(object);
14380 using Fn = bool (*)(JSContext*, NativeObject* obj);
14381 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
14382 masm.storeCallPointerResult(temp);
14384 masm.PopRegsInMask(liveRegs);
14385 bailoutIfFalseBool(temp, ins->snapshot());
14387 // Load the reallocated elements pointer.
14388 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
14390 masm.jump(ool->rejoin());
14393 void CodeGenerator::visitArraySlice(LArraySlice* lir) {
14394 Register object = ToRegister(lir->object());
14395 Register begin = ToRegister(lir->begin());
14396 Register end = ToRegister(lir->end());
14397 Register temp0 = ToRegister(lir->temp0());
14398 Register temp1 = ToRegister(lir->temp1());
14400 Label call, fail;
14402 Label bail;
14403 masm.branchArrayIsNotPacked(object, temp0, temp1, &bail);
14404 bailoutFrom(&bail, lir->snapshot());
14406 // Try to allocate an object.
14407 TemplateObject templateObject(lir->mir()->templateObj());
14408 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
14409 &fail);
14411 masm.jump(&call);
14413 masm.bind(&fail);
14414 masm.movePtr(ImmPtr(nullptr), temp0);
14416 masm.bind(&call);
14418 pushArg(temp0);
14419 pushArg(end);
14420 pushArg(begin);
14421 pushArg(object);
14423 using Fn =
14424 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
14425 callVM<Fn, ArraySliceDense>(lir);
14428 void CodeGenerator::visitArgumentsSlice(LArgumentsSlice* lir) {
14429 Register object = ToRegister(lir->object());
14430 Register begin = ToRegister(lir->begin());
14431 Register end = ToRegister(lir->end());
14432 Register temp0 = ToRegister(lir->temp0());
14433 Register temp1 = ToRegister(lir->temp1());
14435 Label call, fail;
14437 // Try to allocate an object.
14438 TemplateObject templateObject(lir->mir()->templateObj());
14439 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
14440 &fail);
14442 masm.jump(&call);
14444 masm.bind(&fail);
14445 masm.movePtr(ImmPtr(nullptr), temp0);
14447 masm.bind(&call);
14449 pushArg(temp0);
14450 pushArg(end);
14451 pushArg(begin);
14452 pushArg(object);
14454 using Fn =
14455 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
14456 callVM<Fn, ArgumentsSliceDense>(lir);
14459 #ifdef DEBUG
14460 void CodeGenerator::emitAssertArgumentsSliceBounds(const RegisterOrInt32& begin,
14461 const RegisterOrInt32& count,
14462 Register numActualArgs) {
14463 // |begin| must be positive or zero.
14464 if (begin.is<Register>()) {
14465 Label beginOk;
14466 masm.branch32(Assembler::GreaterThanOrEqual, begin.as<Register>(), Imm32(0),
14467 &beginOk);
14468 masm.assumeUnreachable("begin < 0");
14469 masm.bind(&beginOk);
14470 } else {
14471 MOZ_ASSERT(begin.as<int32_t>() >= 0);
14474 // |count| must be positive or zero.
14475 if (count.is<Register>()) {
14476 Label countOk;
14477 masm.branch32(Assembler::GreaterThanOrEqual, count.as<Register>(), Imm32(0),
14478 &countOk);
14479 masm.assumeUnreachable("count < 0");
14480 masm.bind(&countOk);
14481 } else {
14482 MOZ_ASSERT(count.as<int32_t>() >= 0);
14485 // |begin| must be less-or-equal to |numActualArgs|.
14486 Label argsBeginOk;
14487 if (begin.is<Register>()) {
14488 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
14489 &argsBeginOk);
14490 } else {
14491 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
14492 Imm32(begin.as<int32_t>()), &argsBeginOk);
14494 masm.assumeUnreachable("begin <= numActualArgs");
14495 masm.bind(&argsBeginOk);
14497 // |count| must be less-or-equal to |numActualArgs|.
14498 Label argsCountOk;
14499 if (count.is<Register>()) {
14500 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, count.as<Register>(),
14501 &argsCountOk);
14502 } else {
14503 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
14504 Imm32(count.as<int32_t>()), &argsCountOk);
14506 masm.assumeUnreachable("count <= numActualArgs");
14507 masm.bind(&argsCountOk);
14509 // |begin| and |count| must be preserved, but |numActualArgs| can be changed.
14511 // Pre-condition: |count| <= |numActualArgs|
14512 // Condition to test: |begin + count| <= |numActualArgs|
14513 // Transform to: |begin| <= |numActualArgs - count|
14514 if (count.is<Register>()) {
14515 masm.subPtr(count.as<Register>(), numActualArgs);
14516 } else {
14517 masm.subPtr(Imm32(count.as<int32_t>()), numActualArgs);
14520 // |begin + count| must be less-or-equal to |numActualArgs|.
14521 Label argsBeginCountOk;
14522 if (begin.is<Register>()) {
14523 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
14524 &argsBeginCountOk);
14525 } else {
14526 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
14527 Imm32(begin.as<int32_t>()), &argsBeginCountOk);
14529 masm.assumeUnreachable("begin + count <= numActualArgs");
14530 masm.bind(&argsBeginCountOk);
14532 #endif
14534 template <class ArgumentsSlice>
14535 void CodeGenerator::emitNewArray(ArgumentsSlice* lir,
14536 const RegisterOrInt32& count, Register output,
14537 Register temp) {
14538 using Fn = ArrayObject* (*)(JSContext*, int32_t);
14539 auto* ool = count.match(
14540 [&](Register count) {
14541 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
14542 lir, ArgList(count), StoreRegisterTo(output));
14544 [&](int32_t count) {
14545 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
14546 lir, ArgList(Imm32(count)), StoreRegisterTo(output));
14549 TemplateObject templateObject(lir->mir()->templateObj());
14550 MOZ_ASSERT(templateObject.isArrayObject());
14552 auto templateNativeObj = templateObject.asTemplateNativeObject();
14553 MOZ_ASSERT(templateNativeObj.getArrayLength() == 0);
14554 MOZ_ASSERT(templateNativeObj.getDenseInitializedLength() == 0);
14555 MOZ_ASSERT(!templateNativeObj.hasDynamicElements());
14557 // Check array capacity. Call into the VM if the template object's capacity
14558 // is too small.
14559 bool tryAllocate = count.match(
14560 [&](Register count) {
14561 masm.branch32(Assembler::Above, count,
14562 Imm32(templateNativeObj.getDenseCapacity()),
14563 ool->entry());
14564 return true;
14566 [&](int32_t count) {
14567 MOZ_ASSERT(count >= 0);
14568 if (uint32_t(count) > templateNativeObj.getDenseCapacity()) {
14569 masm.jump(ool->entry());
14570 return false;
14572 return true;
14575 if (tryAllocate) {
14576 // Try to allocate an object.
14577 masm.createGCObject(output, temp, templateObject, lir->mir()->initialHeap(),
14578 ool->entry());
14580 auto setInitializedLengthAndLength = [&](auto count) {
14581 const int elementsOffset = NativeObject::offsetOfFixedElements();
14583 // Update initialized length.
14584 Address initLength(
14585 output, elementsOffset + ObjectElements::offsetOfInitializedLength());
14586 masm.store32(count, initLength);
14588 // Update length.
14589 Address length(output, elementsOffset + ObjectElements::offsetOfLength());
14590 masm.store32(count, length);
14593 // The array object was successfully created. Set the length and initialized
14594 // length and then proceed to fill the elements.
14595 count.match([&](Register count) { setInitializedLengthAndLength(count); },
14596 [&](int32_t count) {
14597 if (count > 0) {
14598 setInitializedLengthAndLength(Imm32(count));
14603 masm.bind(ool->rejoin());
14606 void CodeGenerator::visitFrameArgumentsSlice(LFrameArgumentsSlice* lir) {
14607 Register begin = ToRegister(lir->begin());
14608 Register count = ToRegister(lir->count());
14609 Register temp = ToRegister(lir->temp0());
14610 Register output = ToRegister(lir->output());
14612 #ifdef DEBUG
14613 masm.loadNumActualArgs(FramePointer, temp);
14614 emitAssertArgumentsSliceBounds(RegisterOrInt32(begin), RegisterOrInt32(count),
14615 temp);
14616 #endif
14618 emitNewArray(lir, RegisterOrInt32(count), output, temp);
14620 Label done;
14621 masm.branch32(Assembler::Equal, count, Imm32(0), &done);
14623 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
14624 allRegs.take(begin);
14625 allRegs.take(count);
14626 allRegs.take(temp);
14627 allRegs.take(output);
14629 ValueOperand value = allRegs.takeAnyValue();
14631 LiveRegisterSet liveRegs;
14632 liveRegs.add(output);
14633 liveRegs.add(begin);
14634 liveRegs.add(value);
14636 masm.PushRegsInMask(liveRegs);
14638 // Initialize all elements.
14640 Register elements = output;
14641 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14643 Register argIndex = begin;
14645 Register index = temp;
14646 masm.move32(Imm32(0), index);
14648 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
14649 BaseValueIndex argPtr(FramePointer, argIndex, argvOffset);
14651 Label loop;
14652 masm.bind(&loop);
14654 masm.loadValue(argPtr, value);
14656 // We don't need a pre-barrier, because the element at |index| is guaranteed
14657 // to be a non-GC thing (either uninitialized memory or the magic hole
14658 // value).
14659 masm.storeValue(value, BaseObjectElementIndex(elements, index));
14661 masm.add32(Imm32(1), index);
14662 masm.add32(Imm32(1), argIndex);
14664 masm.branch32(Assembler::LessThan, index, count, &loop);
14666 masm.PopRegsInMask(liveRegs);
14668 // Emit a post-write barrier if |output| is tenured.
14670 // We expect that |output| is nursery allocated, so it isn't worth the
14671 // trouble to check if no frame argument is a nursery thing, which would
14672 // allow to omit the post-write barrier.
14673 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
14675 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
14676 volatileRegs.takeUnchecked(temp);
14677 if (output.volatile_()) {
14678 volatileRegs.addUnchecked(output);
14681 masm.PushRegsInMask(volatileRegs);
14682 emitPostWriteBarrier(output);
14683 masm.PopRegsInMask(volatileRegs);
14685 masm.bind(&done);
14688 CodeGenerator::RegisterOrInt32 CodeGenerator::ToRegisterOrInt32(
14689 const LAllocation* allocation) {
14690 if (allocation->isConstant()) {
14691 return RegisterOrInt32(allocation->toConstant()->toInt32());
14693 return RegisterOrInt32(ToRegister(allocation));
14696 void CodeGenerator::visitInlineArgumentsSlice(LInlineArgumentsSlice* lir) {
14697 RegisterOrInt32 begin = ToRegisterOrInt32(lir->begin());
14698 RegisterOrInt32 count = ToRegisterOrInt32(lir->count());
14699 Register temp = ToRegister(lir->temp());
14700 Register output = ToRegister(lir->output());
14702 uint32_t numActuals = lir->mir()->numActuals();
14704 #ifdef DEBUG
14705 masm.move32(Imm32(numActuals), temp);
14707 emitAssertArgumentsSliceBounds(begin, count, temp);
14708 #endif
14710 emitNewArray(lir, count, output, temp);
14712 // We're done if there are no actual arguments.
14713 if (numActuals == 0) {
14714 return;
14717 // Check if any arguments have to be copied.
14718 Label done;
14719 if (count.is<Register>()) {
14720 masm.branch32(Assembler::Equal, count.as<Register>(), Imm32(0), &done);
14721 } else if (count.as<int32_t>() == 0) {
14722 return;
14725 auto getArg = [&](uint32_t i) {
14726 return toConstantOrRegister(lir, LInlineArgumentsSlice::ArgIndex(i),
14727 lir->mir()->getArg(i)->type());
14730 auto storeArg = [&](uint32_t i, auto dest) {
14731 // We don't need a pre-barrier because the element at |index| is guaranteed
14732 // to be a non-GC thing (either uninitialized memory or the magic hole
14733 // value).
14734 masm.storeConstantOrRegister(getArg(i), dest);
14737 // Initialize all elements.
14738 if (numActuals == 1) {
14739 // There's exactly one argument. We've checked that |count| is non-zero,
14740 // which implies that |begin| must be zero.
14741 MOZ_ASSERT_IF(begin.is<int32_t>(), begin.as<int32_t>() == 0);
14743 Register elements = temp;
14744 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14746 storeArg(0, Address(elements, 0));
14747 } else if (begin.is<Register>()) {
14748 // There is more than one argument and |begin| isn't a compile-time
14749 // constant. Iterate through 0..numActuals to search for |begin| and then
14750 // start copying |count| arguments from that index.
14752 LiveGeneralRegisterSet liveRegs;
14753 liveRegs.add(output);
14754 liveRegs.add(begin.as<Register>());
14756 masm.PushRegsInMask(liveRegs);
14758 Register elements = output;
14759 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14761 Register argIndex = begin.as<Register>();
14763 Register index = temp;
14764 masm.move32(Imm32(0), index);
14766 Label doneLoop;
14767 for (uint32_t i = 0; i < numActuals; ++i) {
14768 Label next;
14769 masm.branch32(Assembler::NotEqual, argIndex, Imm32(i), &next);
14771 storeArg(i, BaseObjectElementIndex(elements, index));
14773 masm.add32(Imm32(1), index);
14774 masm.add32(Imm32(1), argIndex);
14776 if (count.is<Register>()) {
14777 masm.branch32(Assembler::GreaterThanOrEqual, index,
14778 count.as<Register>(), &doneLoop);
14779 } else {
14780 masm.branch32(Assembler::GreaterThanOrEqual, index,
14781 Imm32(count.as<int32_t>()), &doneLoop);
14784 masm.bind(&next);
14786 masm.bind(&doneLoop);
14788 masm.PopRegsInMask(liveRegs);
14789 } else {
14790 // There is more than one argument and |begin| is a compile-time constant.
14792 Register elements = temp;
14793 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14795 int32_t argIndex = begin.as<int32_t>();
14797 int32_t index = 0;
14799 Label doneLoop;
14800 for (uint32_t i = argIndex; i < numActuals; ++i) {
14801 storeArg(i, Address(elements, index * sizeof(Value)));
14803 index += 1;
14805 if (count.is<Register>()) {
14806 masm.branch32(Assembler::LessThanOrEqual, count.as<Register>(),
14807 Imm32(index), &doneLoop);
14808 } else {
14809 if (index >= count.as<int32_t>()) {
14810 break;
14814 masm.bind(&doneLoop);
14817 // Determine if we have to emit post-write barrier.
14819 // If either |begin| or |count| is a constant, use their value directly.
14820 // Otherwise assume we copy all inline arguments from 0..numActuals.
14821 bool postWriteBarrier = false;
14822 uint32_t actualBegin = begin.match([](Register) { return 0; },
14823 [](int32_t value) { return value; });
14824 uint32_t actualCount =
14825 count.match([=](Register) { return numActuals; },
14826 [](int32_t value) -> uint32_t { return value; });
14827 for (uint32_t i = 0; i < actualCount; ++i) {
14828 ConstantOrRegister arg = getArg(actualBegin + i);
14829 if (arg.constant()) {
14830 Value v = arg.value();
14831 if (v.isGCThing() && IsInsideNursery(v.toGCThing())) {
14832 postWriteBarrier = true;
14834 } else {
14835 MIRType type = arg.reg().type();
14836 if (type == MIRType::Value || NeedsPostBarrier(type)) {
14837 postWriteBarrier = true;
14842 // Emit a post-write barrier if |output| is tenured and we couldn't
14843 // determine at compile-time that no barrier is needed.
14844 if (postWriteBarrier) {
14845 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
14847 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
14848 volatileRegs.takeUnchecked(temp);
14849 if (output.volatile_()) {
14850 volatileRegs.addUnchecked(output);
14853 masm.PushRegsInMask(volatileRegs);
14854 emitPostWriteBarrier(output);
14855 masm.PopRegsInMask(volatileRegs);
14858 masm.bind(&done);
14861 void CodeGenerator::visitNormalizeSliceTerm(LNormalizeSliceTerm* lir) {
14862 Register value = ToRegister(lir->value());
14863 Register length = ToRegister(lir->length());
14864 Register output = ToRegister(lir->output());
14866 masm.move32(value, output);
14868 Label positive;
14869 masm.branch32(Assembler::GreaterThanOrEqual, value, Imm32(0), &positive);
14871 Label done;
14872 masm.add32(length, output);
14873 masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(0), &done);
14874 masm.move32(Imm32(0), output);
14875 masm.jump(&done);
14877 masm.bind(&positive);
14878 masm.cmp32Move32(Assembler::LessThan, length, value, length, output);
14880 masm.bind(&done);
14883 void CodeGenerator::visitArrayJoin(LArrayJoin* lir) {
14884 Label skipCall;
14886 Register output = ToRegister(lir->output());
14887 Register sep = ToRegister(lir->separator());
14888 Register array = ToRegister(lir->array());
14889 Register temp = ToRegister(lir->temp0());
14891 // Fast path for simple length <= 1 cases.
14893 masm.loadPtr(Address(array, NativeObject::offsetOfElements()), temp);
14894 Address length(temp, ObjectElements::offsetOfLength());
14895 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
14897 // Check for length == 0
14898 Label notEmpty;
14899 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmpty);
14900 const JSAtomState& names = gen->runtime->names();
14901 masm.movePtr(ImmGCPtr(names.empty_), output);
14902 masm.jump(&skipCall);
14904 masm.bind(&notEmpty);
14905 Label notSingleString;
14906 // Check for length == 1, initializedLength >= 1, arr[0].isString()
14907 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleString);
14908 masm.branch32(Assembler::LessThan, initLength, Imm32(1), &notSingleString);
14910 Address elem0(temp, 0);
14911 masm.branchTestString(Assembler::NotEqual, elem0, &notSingleString);
14913 // At this point, 'output' can be used as a scratch register, since we're
14914 // guaranteed to succeed.
14915 masm.unboxString(elem0, output);
14916 masm.jump(&skipCall);
14917 masm.bind(&notSingleString);
14920 pushArg(sep);
14921 pushArg(array);
14923 using Fn = JSString* (*)(JSContext*, HandleObject, HandleString);
14924 callVM<Fn, jit::ArrayJoin>(lir);
14925 masm.bind(&skipCall);
14928 void CodeGenerator::visitObjectKeys(LObjectKeys* lir) {
14929 Register object = ToRegister(lir->object());
14931 pushArg(object);
14933 using Fn = JSObject* (*)(JSContext*, HandleObject);
14934 callVM<Fn, jit::ObjectKeys>(lir);
14937 void CodeGenerator::visitObjectKeysLength(LObjectKeysLength* lir) {
14938 Register object = ToRegister(lir->object());
14940 pushArg(object);
14942 using Fn = bool (*)(JSContext*, HandleObject, int32_t*);
14943 callVM<Fn, jit::ObjectKeysLength>(lir);
14946 void CodeGenerator::visitGetIteratorCache(LGetIteratorCache* lir) {
14947 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14948 TypedOrValueRegister val =
14949 toConstantOrRegister(lir, LGetIteratorCache::ValueIndex,
14950 lir->mir()->value()->type())
14951 .reg();
14952 Register output = ToRegister(lir->output());
14953 Register temp0 = ToRegister(lir->temp0());
14954 Register temp1 = ToRegister(lir->temp1());
14956 IonGetIteratorIC ic(liveRegs, val, output, temp0, temp1);
14957 addIC(lir, allocateIC(ic));
14960 void CodeGenerator::visitOptimizeSpreadCallCache(
14961 LOptimizeSpreadCallCache* lir) {
14962 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14963 ValueOperand val = ToValue(lir, LOptimizeSpreadCallCache::ValueIndex);
14964 ValueOperand output = ToOutValue(lir);
14965 Register temp = ToRegister(lir->temp0());
14967 IonOptimizeSpreadCallIC ic(liveRegs, val, output, temp);
14968 addIC(lir, allocateIC(ic));
14971 void CodeGenerator::visitCloseIterCache(LCloseIterCache* lir) {
14972 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14973 Register iter = ToRegister(lir->iter());
14974 Register temp = ToRegister(lir->temp0());
14975 CompletionKind kind = CompletionKind(lir->mir()->completionKind());
14977 IonCloseIterIC ic(liveRegs, iter, temp, kind);
14978 addIC(lir, allocateIC(ic));
14981 void CodeGenerator::visitOptimizeGetIteratorCache(
14982 LOptimizeGetIteratorCache* lir) {
14983 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14984 ValueOperand val = ToValue(lir, LOptimizeGetIteratorCache::ValueIndex);
14985 Register output = ToRegister(lir->output());
14986 Register temp = ToRegister(lir->temp0());
14988 IonOptimizeGetIteratorIC ic(liveRegs, val, output, temp);
14989 addIC(lir, allocateIC(ic));
14992 void CodeGenerator::visitIteratorMore(LIteratorMore* lir) {
14993 const Register obj = ToRegister(lir->iterator());
14994 const ValueOperand output = ToOutValue(lir);
14995 const Register temp = ToRegister(lir->temp0());
14997 masm.iteratorMore(obj, output, temp);
15000 void CodeGenerator::visitIsNoIterAndBranch(LIsNoIterAndBranch* lir) {
15001 ValueOperand input = ToValue(lir, LIsNoIterAndBranch::Input);
15002 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
15003 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
15005 masm.branchTestMagic(Assembler::Equal, input, ifTrue);
15007 if (!isNextBlock(lir->ifFalse()->lir())) {
15008 masm.jump(ifFalse);
15012 void CodeGenerator::visitIteratorEnd(LIteratorEnd* lir) {
15013 const Register obj = ToRegister(lir->object());
15014 const Register temp0 = ToRegister(lir->temp0());
15015 const Register temp1 = ToRegister(lir->temp1());
15016 const Register temp2 = ToRegister(lir->temp2());
15018 masm.iteratorClose(obj, temp0, temp1, temp2);
15021 void CodeGenerator::visitArgumentsLength(LArgumentsLength* lir) {
15022 // read number of actual arguments from the JS frame.
15023 Register argc = ToRegister(lir->output());
15024 masm.loadNumActualArgs(FramePointer, argc);
15027 void CodeGenerator::visitGetFrameArgument(LGetFrameArgument* lir) {
15028 ValueOperand result = ToOutValue(lir);
15029 const LAllocation* index = lir->index();
15030 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
15032 // This instruction is used to access actual arguments and formal arguments.
15033 // The number of Values on the stack is |max(numFormals, numActuals)|, so we
15034 // assert |index < numFormals || index < numActuals| in debug builds.
15035 DebugOnly<size_t> numFormals = gen->outerInfo().script()->function()->nargs();
15037 if (index->isConstant()) {
15038 int32_t i = index->toConstant()->toInt32();
15039 #ifdef DEBUG
15040 if (uint32_t(i) >= numFormals) {
15041 Label ok;
15042 Register argc = result.scratchReg();
15043 masm.loadNumActualArgs(FramePointer, argc);
15044 masm.branch32(Assembler::Above, argc, Imm32(i), &ok);
15045 masm.assumeUnreachable("Invalid argument index");
15046 masm.bind(&ok);
15048 #endif
15049 Address argPtr(FramePointer, sizeof(Value) * i + argvOffset);
15050 masm.loadValue(argPtr, result);
15051 } else {
15052 Register i = ToRegister(index);
15053 #ifdef DEBUG
15054 Label ok;
15055 Register argc = result.scratchReg();
15056 masm.branch32(Assembler::Below, i, Imm32(numFormals), &ok);
15057 masm.loadNumActualArgs(FramePointer, argc);
15058 masm.branch32(Assembler::Above, argc, i, &ok);
15059 masm.assumeUnreachable("Invalid argument index");
15060 masm.bind(&ok);
15061 #endif
15062 BaseValueIndex argPtr(FramePointer, i, argvOffset);
15063 masm.loadValue(argPtr, result);
15067 void CodeGenerator::visitGetFrameArgumentHole(LGetFrameArgumentHole* lir) {
15068 ValueOperand result = ToOutValue(lir);
15069 Register index = ToRegister(lir->index());
15070 Register length = ToRegister(lir->length());
15071 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
15072 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
15074 Label outOfBounds, done;
15075 masm.spectreBoundsCheck32(index, length, spectreTemp, &outOfBounds);
15077 BaseValueIndex argPtr(FramePointer, index, argvOffset);
15078 masm.loadValue(argPtr, result);
15079 masm.jump(&done);
15081 masm.bind(&outOfBounds);
15082 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
15083 masm.moveValue(UndefinedValue(), result);
15085 masm.bind(&done);
15088 void CodeGenerator::visitRest(LRest* lir) {
15089 Register numActuals = ToRegister(lir->numActuals());
15090 Register temp0 = ToRegister(lir->temp0());
15091 Register temp1 = ToRegister(lir->temp1());
15092 Register temp2 = ToRegister(lir->temp2());
15093 Register temp3 = ToRegister(lir->temp3());
15094 unsigned numFormals = lir->mir()->numFormals();
15096 constexpr uint32_t arrayCapacity = 2;
15098 if (Shape* shape = lir->mir()->shape()) {
15099 uint32_t arrayLength = 0;
15100 gc::AllocKind allocKind = GuessArrayGCKind(arrayCapacity);
15101 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
15102 allocKind = ForegroundToBackgroundAllocKind(allocKind);
15103 MOZ_ASSERT(GetGCKindSlots(allocKind) ==
15104 arrayCapacity + ObjectElements::VALUES_PER_HEADER);
15106 Label joinAlloc, failAlloc;
15107 masm.movePtr(ImmGCPtr(shape), temp0);
15108 masm.createArrayWithFixedElements(temp2, temp0, temp1, InvalidReg,
15109 arrayLength, arrayCapacity, 0, 0,
15110 allocKind, gc::Heap::Default, &failAlloc);
15111 masm.jump(&joinAlloc);
15113 masm.bind(&failAlloc);
15114 masm.movePtr(ImmPtr(nullptr), temp2);
15116 masm.bind(&joinAlloc);
15117 } else {
15118 masm.movePtr(ImmPtr(nullptr), temp2);
15121 // Set temp1 to the address of the first actual argument.
15122 size_t actualsOffset = JitFrameLayout::offsetOfActualArgs();
15123 masm.computeEffectiveAddress(Address(FramePointer, actualsOffset), temp1);
15125 // Compute array length: max(numActuals - numFormals, 0).
15126 Register lengthReg;
15127 if (numFormals) {
15128 lengthReg = temp0;
15129 Label emptyLength, joinLength;
15130 masm.branch32(Assembler::LessThanOrEqual, numActuals, Imm32(numFormals),
15131 &emptyLength);
15133 masm.move32(numActuals, lengthReg);
15134 masm.sub32(Imm32(numFormals), lengthReg);
15136 // Skip formal arguments.
15137 masm.addPtr(Imm32(sizeof(Value) * numFormals), temp1);
15139 masm.jump(&joinLength);
15141 masm.bind(&emptyLength);
15143 masm.move32(Imm32(0), lengthReg);
15145 // Leave temp1 pointed to the start of actuals() when the rest-array
15146 // length is zero. We don't use |actuals() + numFormals| because
15147 // |numFormals| can be any non-negative int32 value when this MRest was
15148 // created from scalar replacement optimizations. And it seems
15149 // questionable to compute a Value* pointer which points to who knows
15150 // where.
15152 masm.bind(&joinLength);
15153 } else {
15154 // Use numActuals directly when there are no formals.
15155 lengthReg = numActuals;
15158 // Try to initialize the array elements.
15159 Label vmCall, done;
15160 if (lir->mir()->shape()) {
15161 // Call into C++ if we failed to allocate an array or there are more than
15162 // |arrayCapacity| elements.
15163 masm.branchTestPtr(Assembler::Zero, temp2, temp2, &vmCall);
15164 masm.branch32(Assembler::Above, lengthReg, Imm32(arrayCapacity), &vmCall);
15166 // The array must be nursery allocated so no post barrier is needed.
15167 #ifdef DEBUG
15168 Label ok;
15169 masm.branchPtrInNurseryChunk(Assembler::Equal, temp2, temp3, &ok);
15170 masm.assumeUnreachable("Unexpected tenured object for LRest");
15171 masm.bind(&ok);
15172 #endif
15174 Label initialized;
15175 masm.branch32(Assembler::Equal, lengthReg, Imm32(0), &initialized);
15177 // Store length and initializedLength.
15178 Register elements = temp3;
15179 masm.loadPtr(Address(temp2, NativeObject::offsetOfElements()), elements);
15180 Address lengthAddr(elements, ObjectElements::offsetOfLength());
15181 Address initLengthAddr(elements,
15182 ObjectElements::offsetOfInitializedLength());
15183 masm.store32(lengthReg, lengthAddr);
15184 masm.store32(lengthReg, initLengthAddr);
15186 // Store either one or two elements. This may clobber lengthReg (temp0).
15187 static_assert(arrayCapacity == 2, "code handles 1 or 2 elements");
15188 Label storeFirst;
15189 masm.branch32(Assembler::Equal, lengthReg, Imm32(1), &storeFirst);
15190 masm.storeValue(Address(temp1, sizeof(Value)),
15191 Address(elements, sizeof(Value)), temp0);
15192 masm.bind(&storeFirst);
15193 masm.storeValue(Address(temp1, 0), Address(elements, 0), temp0);
15195 // Done.
15196 masm.bind(&initialized);
15197 masm.movePtr(temp2, ReturnReg);
15198 masm.jump(&done);
15201 masm.bind(&vmCall);
15203 pushArg(temp2);
15204 pushArg(temp1);
15205 pushArg(lengthReg);
15207 using Fn =
15208 ArrayObject* (*)(JSContext*, uint32_t, Value*, Handle<ArrayObject*>);
15209 callVM<Fn, InitRestParameter>(lir);
15211 masm.bind(&done);
15214 // Create a stackmap from the given safepoint, with the structure:
15216 // <reg dump, if any>
15217 // | ++ <body (general spill)>
15218 // | | ++ <space for Frame>
15219 // | | ++ <inbound args>
15220 // | | |
15221 // Lowest Addr Highest Addr
15222 // |
15223 // framePushedAtStackMapBase
15225 // The caller owns the resulting stackmap. This assumes a grow-down stack.
15227 // For non-debug builds, if the stackmap would contain no pointers, no
15228 // stackmap is created, and nullptr is returned. For a debug build, a
15229 // stackmap is always created and returned.
15231 // Depending on the type of safepoint, the stackmap may need to account for
15232 // spilled registers. WasmSafepointKind::LirCall corresponds to LIR nodes where
15233 // isCall() == true, for which the register allocator will spill/restore all
15234 // live registers at the LIR level - in this case, the LSafepoint sees only live
15235 // values on the stack, never in registers. WasmSafepointKind::CodegenCall, on
15236 // the other hand, is for LIR nodes which may manually spill/restore live
15237 // registers in codegen, in which case the stackmap must account for this. Traps
15238 // also require tracking of live registers, but spilling is handled by the trap
15239 // mechanism.
15240 static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
15241 const RegisterOffsets& trapExitLayout,
15242 size_t trapExitLayoutNumWords,
15243 size_t nInboundStackArgBytes,
15244 wasm::StackMap** result) {
15245 // Ensure this is defined on all return paths.
15246 *result = nullptr;
15248 // The size of the wasm::Frame itself.
15249 const size_t nFrameBytes = sizeof(wasm::Frame);
15251 // This is the number of bytes spilled for live registers, outside of a trap.
15252 // For traps, trapExitLayout and trapExitLayoutNumWords will be used.
15253 const size_t nRegisterDumpBytes =
15254 MacroAssembler::PushRegsInMaskSizeInBytes(safepoint.liveRegs());
15256 // As mentioned above, for WasmSafepointKind::LirCall, register spills and
15257 // restores are handled at the LIR level and there should therefore be no live
15258 // registers to handle here.
15259 MOZ_ASSERT_IF(safepoint.wasmSafepointKind() == WasmSafepointKind::LirCall,
15260 nRegisterDumpBytes == 0);
15261 MOZ_ASSERT(nRegisterDumpBytes % sizeof(void*) == 0);
15263 // This is the number of bytes in the general spill area, below the Frame.
15264 const size_t nBodyBytes = safepoint.framePushedAtStackMapBase();
15266 // The stack map owns any alignment padding around inbound stack args.
15267 const size_t nInboundStackArgBytesAligned =
15268 wasm::AlignStackArgAreaSize(nInboundStackArgBytes);
15270 // This is the number of bytes in the general spill area, the Frame, and the
15271 // incoming args, but not including any register dump area.
15272 const size_t nNonRegisterBytes =
15273 nBodyBytes + nFrameBytes + nInboundStackArgBytesAligned;
15274 MOZ_ASSERT(nNonRegisterBytes % sizeof(void*) == 0);
15276 // This is the number of bytes in the register dump area, if any, below the
15277 // general spill area.
15278 const size_t nRegisterBytes =
15279 (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap)
15280 ? (trapExitLayoutNumWords * sizeof(void*))
15281 : nRegisterDumpBytes;
15283 // This is the total number of bytes covered by the map.
15284 const size_t nTotalBytes = nNonRegisterBytes + nRegisterBytes;
15286 #ifndef DEBUG
15287 bool needStackMap = !(safepoint.wasmAnyRefRegs().empty() &&
15288 safepoint.wasmAnyRefSlots().empty() &&
15289 safepoint.slotsOrElementsSlots().empty());
15291 // There are no references, and this is a non-debug build, so don't bother
15292 // building the stackmap.
15293 if (!needStackMap) {
15294 return true;
15296 #endif
15298 wasm::StackMap* stackMap =
15299 wasm::StackMap::create(nTotalBytes / sizeof(void*));
15300 if (!stackMap) {
15301 return false;
15303 if (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap) {
15304 stackMap->setExitStubWords(trapExitLayoutNumWords);
15307 // REG DUMP AREA, if any.
15308 size_t regDumpWords = 0;
15309 const LiveGeneralRegisterSet wasmAnyRefRegs = safepoint.wasmAnyRefRegs();
15310 const LiveGeneralRegisterSet slotsOrElementsRegs =
15311 safepoint.slotsOrElementsRegs();
15312 const LiveGeneralRegisterSet refRegs(GeneralRegisterSet::Union(
15313 wasmAnyRefRegs.set(), slotsOrElementsRegs.set()));
15314 GeneralRegisterForwardIterator refRegsIter(refRegs);
15315 switch (safepoint.wasmSafepointKind()) {
15316 case WasmSafepointKind::LirCall:
15317 case WasmSafepointKind::CodegenCall: {
15318 size_t spilledNumWords = nRegisterDumpBytes / sizeof(void*);
15319 regDumpWords += spilledNumWords;
15321 for (; refRegsIter.more(); ++refRegsIter) {
15322 Register reg = *refRegsIter;
15323 size_t offsetFromSpillBase =
15324 safepoint.liveRegs().gprs().offsetOfPushedRegister(reg) /
15325 sizeof(void*);
15326 MOZ_ASSERT(0 < offsetFromSpillBase &&
15327 offsetFromSpillBase <= spilledNumWords);
15328 size_t index = spilledNumWords - offsetFromSpillBase;
15330 if (wasmAnyRefRegs.has(reg)) {
15331 stackMap->set(index, wasm::StackMap::AnyRef);
15332 } else {
15333 MOZ_ASSERT(slotsOrElementsRegs.has(reg));
15334 stackMap->set(index, wasm::StackMap::ArrayDataPointer);
15337 // Float and vector registers do not have to be handled; they cannot
15338 // contain wasm anyrefs, and they are spilled after general-purpose
15339 // registers. Gprs are therefore closest to the spill base and thus their
15340 // offset calculation does not need to account for other spills.
15341 } break;
15342 case WasmSafepointKind::Trap: {
15343 regDumpWords += trapExitLayoutNumWords;
15345 for (; refRegsIter.more(); ++refRegsIter) {
15346 Register reg = *refRegsIter;
15347 size_t offsetFromTop = trapExitLayout.getOffset(reg);
15349 // If this doesn't hold, the associated register wasn't saved by
15350 // the trap exit stub. Better to crash now than much later, in
15351 // some obscure place, and possibly with security consequences.
15352 MOZ_RELEASE_ASSERT(offsetFromTop < trapExitLayoutNumWords);
15354 // offsetFromTop is an offset in words down from the highest
15355 // address in the exit stub save area. Switch it around to be an
15356 // offset up from the bottom of the (integer register) save area.
15357 size_t offsetFromBottom = trapExitLayoutNumWords - 1 - offsetFromTop;
15359 if (wasmAnyRefRegs.has(reg)) {
15360 stackMap->set(offsetFromBottom, wasm::StackMap::AnyRef);
15361 } else {
15362 MOZ_ASSERT(slotsOrElementsRegs.has(reg));
15363 stackMap->set(offsetFromBottom, wasm::StackMap::ArrayDataPointer);
15366 } break;
15367 default:
15368 MOZ_CRASH("unreachable");
15371 // BODY (GENERAL SPILL) AREA and FRAME and INCOMING ARGS
15372 // Deal with roots on the stack.
15373 const LSafepoint::SlotList& wasmAnyRefSlots = safepoint.wasmAnyRefSlots();
15374 for (SafepointSlotEntry wasmAnyRefSlot : wasmAnyRefSlots) {
15375 // The following needs to correspond with JitFrameLayout::slotRef
15376 // wasmAnyRefSlot.stack == 0 means the slot is in the args area
15377 if (wasmAnyRefSlot.stack) {
15378 // It's a slot in the body allocation, so .slot is interpreted
15379 // as an index downwards from the Frame*
15380 MOZ_ASSERT(wasmAnyRefSlot.slot <= nBodyBytes);
15381 uint32_t offsetInBytes = nBodyBytes - wasmAnyRefSlot.slot;
15382 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
15383 stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
15384 wasm::StackMap::AnyRef);
15385 } else {
15386 // It's an argument slot
15387 MOZ_ASSERT(wasmAnyRefSlot.slot < nInboundStackArgBytes);
15388 uint32_t offsetInBytes = nBodyBytes + nFrameBytes + wasmAnyRefSlot.slot;
15389 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
15390 stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
15391 wasm::StackMap::AnyRef);
15395 // Track array data pointers on the stack
15396 const LSafepoint::SlotList& slots = safepoint.slotsOrElementsSlots();
15397 for (SafepointSlotEntry slot : slots) {
15398 MOZ_ASSERT(slot.stack);
15400 // It's a slot in the body allocation, so .slot is interpreted
15401 // as an index downwards from the Frame*
15402 MOZ_ASSERT(slot.slot <= nBodyBytes);
15403 uint32_t offsetInBytes = nBodyBytes - slot.slot;
15404 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
15405 stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
15406 wasm::StackMap::Kind::ArrayDataPointer);
15409 // Record in the map, how far down from the highest address the Frame* is.
15410 // Take the opportunity to check that we haven't marked any part of the
15411 // Frame itself as a pointer.
15412 stackMap->setFrameOffsetFromTop((nInboundStackArgBytesAligned + nFrameBytes) /
15413 sizeof(void*));
15414 #ifdef DEBUG
15415 for (uint32_t i = 0; i < nFrameBytes / sizeof(void*); i++) {
15416 MOZ_ASSERT(stackMap->get(stackMap->header.numMappedWords -
15417 stackMap->header.frameOffsetFromTop + i) ==
15418 wasm::StackMap::Kind::POD);
15420 #endif
15422 *result = stackMap;
15423 return true;
15426 bool CodeGenerator::generateWasm(
15427 wasm::CallIndirectId callIndirectId, wasm::BytecodeOffset trapOffset,
15428 const wasm::ArgTypeVector& argTypes, const RegisterOffsets& trapExitLayout,
15429 size_t trapExitLayoutNumWords, wasm::FuncOffsets* offsets,
15430 wasm::StackMaps* stackMaps, wasm::Decoder* decoder) {
15431 AutoCreatedBy acb(masm, "CodeGenerator::generateWasm");
15433 JitSpew(JitSpew_Codegen, "# Emitting wasm code");
15435 size_t nInboundStackArgBytes = StackArgAreaSizeUnaligned(argTypes);
15436 inboundStackArgBytes_ = nInboundStackArgBytes;
15438 wasm::GenerateFunctionPrologue(masm, callIndirectId, mozilla::Nothing(),
15439 offsets);
15441 MOZ_ASSERT(masm.framePushed() == 0);
15443 // Very large frames are implausible, probably an attack.
15444 if (frameSize() > wasm::MaxFrameSize) {
15445 return decoder->fail(decoder->beginOffset(), "stack frame is too large");
15448 if (omitOverRecursedCheck()) {
15449 masm.reserveStack(frameSize());
15450 } else {
15451 std::pair<CodeOffset, uint32_t> pair =
15452 masm.wasmReserveStackChecked(frameSize(), trapOffset);
15453 CodeOffset trapInsnOffset = pair.first;
15454 size_t nBytesReservedBeforeTrap = pair.second;
15456 wasm::StackMap* functionEntryStackMap = nullptr;
15457 if (!CreateStackMapForFunctionEntryTrap(
15458 argTypes, trapExitLayout, trapExitLayoutNumWords,
15459 nBytesReservedBeforeTrap, nInboundStackArgBytes,
15460 &functionEntryStackMap)) {
15461 return false;
15464 // In debug builds, we'll always have a stack map, even if there are no
15465 // refs to track.
15466 MOZ_ASSERT(functionEntryStackMap);
15468 if (functionEntryStackMap &&
15469 !stackMaps->add((uint8_t*)(uintptr_t)trapInsnOffset.offset(),
15470 functionEntryStackMap)) {
15471 functionEntryStackMap->destroy();
15472 return false;
15476 MOZ_ASSERT(masm.framePushed() == frameSize());
15478 if (!generateBody()) {
15479 return false;
15482 masm.bind(&returnLabel_);
15483 wasm::GenerateFunctionEpilogue(masm, frameSize(), offsets);
15485 if (!generateOutOfLineCode()) {
15486 return false;
15489 masm.flush();
15490 if (masm.oom()) {
15491 return false;
15494 offsets->end = masm.currentOffset();
15496 MOZ_ASSERT(!masm.failureLabel()->used());
15497 MOZ_ASSERT(snapshots_.listSize() == 0);
15498 MOZ_ASSERT(snapshots_.RVATableSize() == 0);
15499 MOZ_ASSERT(recovers_.size() == 0);
15500 MOZ_ASSERT(graph.numConstants() == 0);
15501 MOZ_ASSERT(osiIndices_.empty());
15502 MOZ_ASSERT(icList_.empty());
15503 MOZ_ASSERT(safepoints_.size() == 0);
15504 MOZ_ASSERT(!scriptCounts_);
15506 // Convert the safepoints to stackmaps and add them to our running
15507 // collection thereof.
15508 for (CodegenSafepointIndex& index : safepointIndices_) {
15509 wasm::StackMap* stackMap = nullptr;
15510 if (!CreateStackMapFromLSafepoint(*index.safepoint(), trapExitLayout,
15511 trapExitLayoutNumWords,
15512 nInboundStackArgBytes, &stackMap)) {
15513 return false;
15516 // In debug builds, we'll always have a stack map.
15517 MOZ_ASSERT(stackMap);
15518 if (!stackMap) {
15519 continue;
15522 if (!stackMaps->add((uint8_t*)(uintptr_t)index.displacement(), stackMap)) {
15523 stackMap->destroy();
15524 return false;
15528 return true;
15531 bool CodeGenerator::generate() {
15532 AutoCreatedBy acb(masm, "CodeGenerator::generate");
15534 JitSpew(JitSpew_Codegen, "# Emitting code for script %s:%u:%u",
15535 gen->outerInfo().script()->filename(),
15536 gen->outerInfo().script()->lineno(),
15537 gen->outerInfo().script()->column().oneOriginValue());
15539 // Initialize native code table with an entry to the start of
15540 // top-level script.
15541 InlineScriptTree* tree = gen->outerInfo().inlineScriptTree();
15542 jsbytecode* startPC = tree->script()->code();
15543 BytecodeSite* startSite = new (gen->alloc()) BytecodeSite(tree, startPC);
15544 if (!addNativeToBytecodeEntry(startSite)) {
15545 return false;
15548 if (!safepoints_.init(gen->alloc())) {
15549 return false;
15552 perfSpewer_.recordOffset(masm, "Prologue");
15553 if (!generatePrologue()) {
15554 return false;
15557 // Reset native => bytecode map table with top-level script and startPc.
15558 if (!addNativeToBytecodeEntry(startSite)) {
15559 return false;
15562 if (!generateBody()) {
15563 return false;
15566 // Reset native => bytecode map table with top-level script and startPc.
15567 if (!addNativeToBytecodeEntry(startSite)) {
15568 return false;
15571 perfSpewer_.recordOffset(masm, "Epilogue");
15572 if (!generateEpilogue()) {
15573 return false;
15576 // Reset native => bytecode map table with top-level script and startPc.
15577 if (!addNativeToBytecodeEntry(startSite)) {
15578 return false;
15581 perfSpewer_.recordOffset(masm, "InvalidateEpilogue");
15582 generateInvalidateEpilogue();
15584 // native => bytecode entries for OOL code will be added
15585 // by CodeGeneratorShared::generateOutOfLineCode
15586 perfSpewer_.recordOffset(masm, "OOLCode");
15587 if (!generateOutOfLineCode()) {
15588 return false;
15591 // Add terminal entry.
15592 if (!addNativeToBytecodeEntry(startSite)) {
15593 return false;
15596 // Dump Native to bytecode entries to spew.
15597 dumpNativeToBytecodeEntries();
15599 // We encode safepoints after the OSI-point offsets have been determined.
15600 if (!encodeSafepoints()) {
15601 return false;
15604 return !masm.oom();
15607 static bool AddInlinedCompilations(JSContext* cx, HandleScript script,
15608 IonCompilationId compilationId,
15609 const WarpSnapshot* snapshot,
15610 bool* isValid) {
15611 MOZ_ASSERT(!*isValid);
15612 RecompileInfo recompileInfo(script, compilationId);
15614 JitZone* jitZone = cx->zone()->jitZone();
15616 for (const auto* scriptSnapshot : snapshot->scripts()) {
15617 JSScript* inlinedScript = scriptSnapshot->script();
15618 if (inlinedScript == script) {
15619 continue;
15622 // TODO(post-Warp): This matches FinishCompilation and is necessary to
15623 // ensure in-progress compilations are canceled when an inlined functon
15624 // becomes a debuggee. See the breakpoint-14.js jit-test.
15625 // When TI is gone, try to clean this up by moving AddInlinedCompilations to
15626 // WarpOracle so that we can handle this as part of addPendingRecompile
15627 // instead of requiring this separate check.
15628 if (inlinedScript->isDebuggee()) {
15629 *isValid = false;
15630 return true;
15633 if (!jitZone->addInlinedCompilation(recompileInfo, inlinedScript)) {
15634 return false;
15638 *isValid = true;
15639 return true;
15642 void CodeGenerator::validateAndRegisterFuseDependencies(JSContext* cx,
15643 HandleScript script,
15644 bool* isValid) {
15645 // No need to validate as we will toss this compilation anyhow.
15646 if (!*isValid) {
15647 return;
15650 for (auto dependency : fuseDependencies) {
15651 switch (dependency) {
15652 case FuseDependencyKind::HasSeenObjectEmulateUndefinedFuse: {
15653 auto& hasSeenObjectEmulateUndefinedFuse =
15654 cx->runtime()->hasSeenObjectEmulateUndefinedFuse.ref();
15656 if (!hasSeenObjectEmulateUndefinedFuse.intact()) {
15657 JitSpew(JitSpew_Codegen,
15658 "tossing compilation; hasSeenObjectEmulateUndefinedFuse fuse "
15659 "dependency no longer valid\n");
15660 *isValid = false;
15661 return;
15664 if (!hasSeenObjectEmulateUndefinedFuse.addFuseDependency(cx, script)) {
15665 JitSpew(JitSpew_Codegen,
15666 "tossing compilation; failed to register "
15667 "hasSeenObjectEmulateUndefinedFuse script dependency\n");
15668 *isValid = false;
15669 return;
15671 break;
15674 case FuseDependencyKind::OptimizeGetIteratorFuse: {
15675 auto& optimizeGetIteratorFuse =
15676 cx->realm()->realmFuses.optimizeGetIteratorFuse;
15677 if (!optimizeGetIteratorFuse.intact()) {
15678 JitSpew(JitSpew_Codegen,
15679 "tossing compilation; optimizeGetIteratorFuse fuse "
15680 "dependency no longer valid\n");
15681 *isValid = false;
15682 return;
15685 if (!optimizeGetIteratorFuse.addFuseDependency(cx, script)) {
15686 JitSpew(JitSpew_Codegen,
15687 "tossing compilation; failed to register "
15688 "optimizeGetIteratorFuse script dependency\n");
15689 *isValid = false;
15690 return;
15692 break;
15695 default:
15696 MOZ_CRASH("Unknown Dependency Kind");
15701 bool CodeGenerator::link(JSContext* cx, const WarpSnapshot* snapshot) {
15702 AutoCreatedBy acb(masm, "CodeGenerator::link");
15704 // We cancel off-thread Ion compilations in a few places during GC, but if
15705 // this compilation was performed off-thread it will already have been
15706 // removed from the relevant lists by this point. Don't allow GC here.
15707 JS::AutoAssertNoGC nogc(cx);
15709 RootedScript script(cx, gen->outerInfo().script());
15710 MOZ_ASSERT(!script->hasIonScript());
15712 // Perform any read barriers which were skipped while compiling the
15713 // script, which may have happened off-thread.
15714 JitZone* jitZone = cx->zone()->jitZone();
15715 jitZone->performStubReadBarriers(zoneStubsToReadBarrier_);
15717 if (scriptCounts_ && !script->hasScriptCounts() &&
15718 !script->initScriptCounts(cx)) {
15719 return false;
15722 IonCompilationId compilationId =
15723 cx->runtime()->jitRuntime()->nextCompilationId();
15724 jitZone->currentCompilationIdRef().emplace(compilationId);
15725 auto resetCurrentId = mozilla::MakeScopeExit(
15726 [jitZone] { jitZone->currentCompilationIdRef().reset(); });
15728 // Record constraints. If an error occured, returns false and potentially
15729 // prevent future compilations. Otherwise, if an invalidation occured, then
15730 // skip the current compilation.
15731 bool isValid = false;
15733 // If an inlined script is invalidated (for example, by attaching
15734 // a debugger), we must also invalidate the parent IonScript.
15735 if (!AddInlinedCompilations(cx, script, compilationId, snapshot, &isValid)) {
15736 return false;
15739 // Validate fuse dependencies here; if a fuse has popped since we registered a
15740 // dependency then we need to toss this compilation as it assumes things which
15741 // are not valid.
15743 // Eagerly register a fuse dependency here too; this way if we OOM we can
15744 // instead simply remove the compilation and move on with our lives.
15745 validateAndRegisterFuseDependencies(cx, script, &isValid);
15747 // This compilation is no longer valid; don't proceed, but return true as this
15748 // isn't an error case either.
15749 if (!isValid) {
15750 return true;
15753 uint32_t argumentSlots = (gen->outerInfo().nargs() + 1) * sizeof(Value);
15755 size_t numNurseryObjects = snapshot->nurseryObjects().length();
15757 IonScript* ionScript = IonScript::New(
15758 cx, compilationId, graph.localSlotsSize(), argumentSlots, frameDepth_,
15759 snapshots_.listSize(), snapshots_.RVATableSize(), recovers_.size(),
15760 graph.numConstants(), numNurseryObjects, safepointIndices_.length(),
15761 osiIndices_.length(), icList_.length(), runtimeData_.length(),
15762 safepoints_.size());
15763 if (!ionScript) {
15764 return false;
15766 #ifdef DEBUG
15767 ionScript->setICHash(snapshot->icHash());
15768 #endif
15770 auto freeIonScript = mozilla::MakeScopeExit([&ionScript] {
15771 // Use js_free instead of IonScript::Destroy: the cache list is still
15772 // uninitialized.
15773 js_free(ionScript);
15776 Linker linker(masm);
15777 JitCode* code = linker.newCode(cx, CodeKind::Ion);
15778 if (!code) {
15779 return false;
15782 // Encode native to bytecode map if profiling is enabled.
15783 if (isProfilerInstrumentationEnabled()) {
15784 // Generate native-to-bytecode main table.
15785 IonEntry::ScriptList scriptList;
15786 if (!generateCompactNativeToBytecodeMap(cx, code, scriptList)) {
15787 return false;
15790 uint8_t* ionTableAddr =
15791 ((uint8_t*)nativeToBytecodeMap_.get()) + nativeToBytecodeTableOffset_;
15792 JitcodeIonTable* ionTable = (JitcodeIonTable*)ionTableAddr;
15794 // Construct the IonEntry that will go into the global table.
15795 auto entry = MakeJitcodeGlobalEntry<IonEntry>(
15796 cx, code, code->raw(), code->rawEnd(), std::move(scriptList), ionTable);
15797 if (!entry) {
15798 return false;
15800 (void)nativeToBytecodeMap_.release(); // Table is now owned by |entry|.
15802 // Add entry to the global table.
15803 JitcodeGlobalTable* globalTable =
15804 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
15805 if (!globalTable->addEntry(std::move(entry))) {
15806 return false;
15809 // Mark the jitcode as having a bytecode map.
15810 code->setHasBytecodeMap();
15811 } else {
15812 // Add a dumy jitcodeGlobalTable entry.
15813 auto entry = MakeJitcodeGlobalEntry<DummyEntry>(cx, code, code->raw(),
15814 code->rawEnd());
15815 if (!entry) {
15816 return false;
15819 // Add entry to the global table.
15820 JitcodeGlobalTable* globalTable =
15821 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
15822 if (!globalTable->addEntry(std::move(entry))) {
15823 return false;
15826 // Mark the jitcode as having a bytecode map.
15827 code->setHasBytecodeMap();
15830 ionScript->setMethod(code);
15832 // If the Gecko Profiler is enabled, mark IonScript as having been
15833 // instrumented accordingly.
15834 if (isProfilerInstrumentationEnabled()) {
15835 ionScript->setHasProfilingInstrumentation();
15838 Assembler::PatchDataWithValueCheck(
15839 CodeLocationLabel(code, invalidateEpilogueData_), ImmPtr(ionScript),
15840 ImmPtr((void*)-1));
15842 for (CodeOffset offset : ionScriptLabels_) {
15843 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, offset),
15844 ImmPtr(ionScript), ImmPtr((void*)-1));
15847 for (NurseryObjectLabel label : ionNurseryObjectLabels_) {
15848 void* entry = ionScript->addressOfNurseryObject(label.nurseryIndex);
15849 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label.offset),
15850 ImmPtr(entry), ImmPtr((void*)-1));
15853 // for generating inline caches during the execution.
15854 if (runtimeData_.length()) {
15855 ionScript->copyRuntimeData(&runtimeData_[0]);
15857 if (icList_.length()) {
15858 ionScript->copyICEntries(&icList_[0]);
15861 for (size_t i = 0; i < icInfo_.length(); i++) {
15862 IonIC& ic = ionScript->getICFromIndex(i);
15863 Assembler::PatchDataWithValueCheck(
15864 CodeLocationLabel(code, icInfo_[i].icOffsetForJump),
15865 ImmPtr(ic.codeRawPtr()), ImmPtr((void*)-1));
15866 Assembler::PatchDataWithValueCheck(
15867 CodeLocationLabel(code, icInfo_[i].icOffsetForPush), ImmPtr(&ic),
15868 ImmPtr((void*)-1));
15871 JitSpew(JitSpew_Codegen, "Created IonScript %p (raw %p)", (void*)ionScript,
15872 (void*)code->raw());
15874 ionScript->setInvalidationEpilogueDataOffset(
15875 invalidateEpilogueData_.offset());
15876 if (jsbytecode* osrPc = gen->outerInfo().osrPc()) {
15877 ionScript->setOsrPc(osrPc);
15878 ionScript->setOsrEntryOffset(getOsrEntryOffset());
15880 ionScript->setInvalidationEpilogueOffset(invalidate_.offset());
15882 perfSpewer_.saveProfile(cx, script, code);
15884 #ifdef MOZ_VTUNE
15885 vtune::MarkScript(code, script, "ion");
15886 #endif
15888 // Set a Ion counter hint for this script.
15889 if (cx->runtime()->jitRuntime()->hasJitHintsMap()) {
15890 JitHintsMap* jitHints = cx->runtime()->jitRuntime()->getJitHintsMap();
15891 jitHints->recordIonCompilation(script);
15894 // for marking during GC.
15895 if (safepointIndices_.length()) {
15896 ionScript->copySafepointIndices(&safepointIndices_[0]);
15898 if (safepoints_.size()) {
15899 ionScript->copySafepoints(&safepoints_);
15902 // for recovering from an Ion Frame.
15903 if (osiIndices_.length()) {
15904 ionScript->copyOsiIndices(&osiIndices_[0]);
15906 if (snapshots_.listSize()) {
15907 ionScript->copySnapshots(&snapshots_);
15909 MOZ_ASSERT_IF(snapshots_.listSize(), recovers_.size());
15910 if (recovers_.size()) {
15911 ionScript->copyRecovers(&recovers_);
15913 if (graph.numConstants()) {
15914 const Value* vp = graph.constantPool();
15915 ionScript->copyConstants(vp);
15916 for (size_t i = 0; i < graph.numConstants(); i++) {
15917 const Value& v = vp[i];
15918 if (v.isGCThing()) {
15919 if (gc::StoreBuffer* sb = v.toGCThing()->storeBuffer()) {
15920 sb->putWholeCell(script);
15921 break;
15927 // Attach any generated script counts to the script.
15928 if (IonScriptCounts* counts = extractScriptCounts()) {
15929 script->addIonCounts(counts);
15931 // WARNING: Code after this point must be infallible!
15933 // Copy the list of nursery objects. Note that the store buffer can add
15934 // HeapPtr edges that must be cleared in IonScript::Destroy. See the
15935 // infallibility warning above.
15936 const auto& nurseryObjects = snapshot->nurseryObjects();
15937 for (size_t i = 0; i < nurseryObjects.length(); i++) {
15938 ionScript->nurseryObjects()[i].init(nurseryObjects[i]);
15941 // Transfer ownership of the IonScript to the JitScript. At this point enough
15942 // of the IonScript must be initialized for IonScript::Destroy to work.
15943 freeIonScript.release();
15944 script->jitScript()->setIonScript(script, ionScript);
15946 return true;
15949 // An out-of-line path to convert a boxed int32 to either a float or double.
15950 class OutOfLineUnboxFloatingPoint : public OutOfLineCodeBase<CodeGenerator> {
15951 LUnboxFloatingPoint* unboxFloatingPoint_;
15953 public:
15954 explicit OutOfLineUnboxFloatingPoint(LUnboxFloatingPoint* unboxFloatingPoint)
15955 : unboxFloatingPoint_(unboxFloatingPoint) {}
15957 void accept(CodeGenerator* codegen) override {
15958 codegen->visitOutOfLineUnboxFloatingPoint(this);
15961 LUnboxFloatingPoint* unboxFloatingPoint() const {
15962 return unboxFloatingPoint_;
15966 void CodeGenerator::visitUnboxFloatingPoint(LUnboxFloatingPoint* lir) {
15967 const ValueOperand box = ToValue(lir, LUnboxFloatingPoint::Input);
15968 const LDefinition* result = lir->output();
15970 // Out-of-line path to convert int32 to double or bailout
15971 // if this instruction is fallible.
15972 OutOfLineUnboxFloatingPoint* ool =
15973 new (alloc()) OutOfLineUnboxFloatingPoint(lir);
15974 addOutOfLineCode(ool, lir->mir());
15976 FloatRegister resultReg = ToFloatRegister(result);
15977 masm.branchTestDouble(Assembler::NotEqual, box, ool->entry());
15978 masm.unboxDouble(box, resultReg);
15979 if (lir->type() == MIRType::Float32) {
15980 masm.convertDoubleToFloat32(resultReg, resultReg);
15982 masm.bind(ool->rejoin());
15985 void CodeGenerator::visitOutOfLineUnboxFloatingPoint(
15986 OutOfLineUnboxFloatingPoint* ool) {
15987 LUnboxFloatingPoint* ins = ool->unboxFloatingPoint();
15988 const ValueOperand value = ToValue(ins, LUnboxFloatingPoint::Input);
15990 if (ins->mir()->fallible()) {
15991 Label bail;
15992 masm.branchTestInt32(Assembler::NotEqual, value, &bail);
15993 bailoutFrom(&bail, ins->snapshot());
15995 masm.int32ValueToFloatingPoint(value, ToFloatRegister(ins->output()),
15996 ins->type());
15997 masm.jump(ool->rejoin());
16000 void CodeGenerator::visitCallBindVar(LCallBindVar* lir) {
16001 pushArg(ToRegister(lir->environmentChain()));
16003 using Fn = JSObject* (*)(JSContext*, JSObject*);
16004 callVM<Fn, BindVarOperation>(lir);
16007 void CodeGenerator::visitMegamorphicSetElement(LMegamorphicSetElement* lir) {
16008 Register obj = ToRegister(lir->getOperand(0));
16009 ValueOperand idVal = ToValue(lir, LMegamorphicSetElement::IndexIndex);
16010 ValueOperand value = ToValue(lir, LMegamorphicSetElement::ValueIndex);
16012 Register temp0 = ToRegister(lir->temp0());
16013 // See comment in LIROps.yaml (x86 is short on registers)
16014 #ifndef JS_CODEGEN_X86
16015 Register temp1 = ToRegister(lir->temp1());
16016 Register temp2 = ToRegister(lir->temp2());
16017 #endif
16019 Label cacheHit, done;
16020 #ifdef JS_CODEGEN_X86
16021 masm.emitMegamorphicCachedSetSlot(
16022 idVal, obj, temp0, value, &cacheHit,
16023 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
16024 EmitPreBarrier(masm, addr, mirType);
16026 #else
16027 masm.emitMegamorphicCachedSetSlot(
16028 idVal, obj, temp0, temp1, temp2, value, &cacheHit,
16029 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
16030 EmitPreBarrier(masm, addr, mirType);
16032 #endif
16034 pushArg(Imm32(lir->mir()->strict()));
16035 pushArg(ToValue(lir, LMegamorphicSetElement::ValueIndex));
16036 pushArg(ToValue(lir, LMegamorphicSetElement::IndexIndex));
16037 pushArg(obj);
16039 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
16040 callVM<Fn, js::jit::SetElementMegamorphic<true>>(lir);
16042 masm.jump(&done);
16043 masm.bind(&cacheHit);
16045 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
16046 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
16048 saveVolatile(temp0);
16049 emitPostWriteBarrier(obj);
16050 restoreVolatile(temp0);
16052 masm.bind(&done);
16055 void CodeGenerator::visitLoadScriptedProxyHandler(
16056 LLoadScriptedProxyHandler* ins) {
16057 Register obj = ToRegister(ins->getOperand(0));
16058 Register output = ToRegister(ins->output());
16060 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), output);
16062 Label bail;
16063 Address handlerAddr(output, js::detail::ProxyReservedSlots::offsetOfSlot(
16064 ScriptedProxyHandler::HANDLER_EXTRA));
16065 masm.fallibleUnboxObject(handlerAddr, output, &bail);
16066 bailoutFrom(&bail, ins->snapshot());
16069 #ifdef JS_PUNBOX64
16070 void CodeGenerator::visitCheckScriptedProxyGetResult(
16071 LCheckScriptedProxyGetResult* ins) {
16072 ValueOperand target = ToValue(ins, LCheckScriptedProxyGetResult::TargetIndex);
16073 ValueOperand value = ToValue(ins, LCheckScriptedProxyGetResult::ValueIndex);
16074 ValueOperand id = ToValue(ins, LCheckScriptedProxyGetResult::IdIndex);
16075 Register scratch = ToRegister(ins->temp0());
16076 Register scratch2 = ToRegister(ins->temp1());
16078 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue,
16079 MutableHandleValue);
16080 OutOfLineCode* ool = oolCallVM<Fn, CheckProxyGetByValueResult>(
16081 ins, ArgList(scratch, id, value), StoreValueTo(value));
16083 masm.unboxObject(target, scratch);
16084 masm.branchTestObjectNeedsProxyResultValidation(Assembler::NonZero, scratch,
16085 scratch2, ool->entry());
16086 masm.bind(ool->rejoin());
16088 #endif
16090 void CodeGenerator::visitIdToStringOrSymbol(LIdToStringOrSymbol* ins) {
16091 ValueOperand id = ToValue(ins, LIdToStringOrSymbol::IdIndex);
16092 ValueOperand output = ToOutValue(ins);
16093 Register scratch = ToRegister(ins->temp0());
16095 masm.moveValue(id, output);
16097 Label done, callVM;
16098 Label bail;
16100 ScratchTagScope tag(masm, output);
16101 masm.splitTagForTest(output, tag);
16102 masm.branchTestString(Assembler::Equal, tag, &done);
16103 masm.branchTestSymbol(Assembler::Equal, tag, &done);
16104 masm.branchTestInt32(Assembler::NotEqual, tag, &bail);
16107 masm.unboxInt32(output, scratch);
16109 using Fn = JSLinearString* (*)(JSContext*, int);
16110 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
16111 ins, ArgList(scratch), StoreRegisterTo(output.scratchReg()));
16113 masm.lookupStaticIntString(scratch, output.scratchReg(),
16114 gen->runtime->staticStrings(), ool->entry());
16116 masm.bind(ool->rejoin());
16117 masm.tagValue(JSVAL_TYPE_STRING, output.scratchReg(), output);
16118 masm.bind(&done);
16120 bailoutFrom(&bail, ins->snapshot());
16123 void CodeGenerator::visitLoadFixedSlotV(LLoadFixedSlotV* ins) {
16124 const Register obj = ToRegister(ins->getOperand(0));
16125 size_t slot = ins->mir()->slot();
16126 ValueOperand result = ToOutValue(ins);
16128 masm.loadValue(Address(obj, NativeObject::getFixedSlotOffset(slot)), result);
16131 void CodeGenerator::visitLoadFixedSlotT(LLoadFixedSlotT* ins) {
16132 const Register obj = ToRegister(ins->getOperand(0));
16133 size_t slot = ins->mir()->slot();
16134 AnyRegister result = ToAnyRegister(ins->getDef(0));
16135 MIRType type = ins->mir()->type();
16137 masm.loadUnboxedValue(Address(obj, NativeObject::getFixedSlotOffset(slot)),
16138 type, result);
16141 template <typename T>
16142 static void EmitLoadAndUnbox(MacroAssembler& masm, const T& src, MIRType type,
16143 bool fallible, AnyRegister dest, Label* fail) {
16144 if (type == MIRType::Double) {
16145 MOZ_ASSERT(dest.isFloat());
16146 masm.ensureDouble(src, dest.fpu(), fail);
16147 return;
16149 if (fallible) {
16150 switch (type) {
16151 case MIRType::Int32:
16152 masm.fallibleUnboxInt32(src, dest.gpr(), fail);
16153 break;
16154 case MIRType::Boolean:
16155 masm.fallibleUnboxBoolean(src, dest.gpr(), fail);
16156 break;
16157 case MIRType::Object:
16158 masm.fallibleUnboxObject(src, dest.gpr(), fail);
16159 break;
16160 case MIRType::String:
16161 masm.fallibleUnboxString(src, dest.gpr(), fail);
16162 break;
16163 case MIRType::Symbol:
16164 masm.fallibleUnboxSymbol(src, dest.gpr(), fail);
16165 break;
16166 case MIRType::BigInt:
16167 masm.fallibleUnboxBigInt(src, dest.gpr(), fail);
16168 break;
16169 default:
16170 MOZ_CRASH("Unexpected MIRType");
16172 return;
16174 masm.loadUnboxedValue(src, type, dest);
16177 void CodeGenerator::visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox* ins) {
16178 const MLoadFixedSlotAndUnbox* mir = ins->mir();
16179 MIRType type = mir->type();
16180 Register input = ToRegister(ins->object());
16181 AnyRegister result = ToAnyRegister(ins->output());
16182 size_t slot = mir->slot();
16184 Address address(input, NativeObject::getFixedSlotOffset(slot));
16186 Label bail;
16187 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16188 if (mir->fallible()) {
16189 bailoutFrom(&bail, ins->snapshot());
16193 void CodeGenerator::visitLoadDynamicSlotAndUnbox(
16194 LLoadDynamicSlotAndUnbox* ins) {
16195 const MLoadDynamicSlotAndUnbox* mir = ins->mir();
16196 MIRType type = mir->type();
16197 Register input = ToRegister(ins->slots());
16198 AnyRegister result = ToAnyRegister(ins->output());
16199 size_t slot = mir->slot();
16201 Address address(input, slot * sizeof(JS::Value));
16203 Label bail;
16204 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16205 if (mir->fallible()) {
16206 bailoutFrom(&bail, ins->snapshot());
16210 void CodeGenerator::visitLoadElementAndUnbox(LLoadElementAndUnbox* ins) {
16211 const MLoadElementAndUnbox* mir = ins->mir();
16212 MIRType type = mir->type();
16213 Register elements = ToRegister(ins->elements());
16214 AnyRegister result = ToAnyRegister(ins->output());
16216 Label bail;
16217 if (ins->index()->isConstant()) {
16218 NativeObject::elementsSizeMustNotOverflow();
16219 int32_t offset = ToInt32(ins->index()) * sizeof(Value);
16220 Address address(elements, offset);
16221 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16222 } else {
16223 BaseObjectElementIndex address(elements, ToRegister(ins->index()));
16224 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16227 if (mir->fallible()) {
16228 bailoutFrom(&bail, ins->snapshot());
16232 class OutOfLineAtomizeSlot : public OutOfLineCodeBase<CodeGenerator> {
16233 LInstruction* lir_;
16234 Register stringReg_;
16235 Address slotAddr_;
16236 TypedOrValueRegister dest_;
16238 public:
16239 OutOfLineAtomizeSlot(LInstruction* lir, Register stringReg, Address slotAddr,
16240 TypedOrValueRegister dest)
16241 : lir_(lir), stringReg_(stringReg), slotAddr_(slotAddr), dest_(dest) {}
16243 void accept(CodeGenerator* codegen) final {
16244 codegen->visitOutOfLineAtomizeSlot(this);
16246 LInstruction* lir() const { return lir_; }
16247 Register stringReg() const { return stringReg_; }
16248 Address slotAddr() const { return slotAddr_; }
16249 TypedOrValueRegister dest() const { return dest_; }
16252 void CodeGenerator::visitOutOfLineAtomizeSlot(OutOfLineAtomizeSlot* ool) {
16253 LInstruction* lir = ool->lir();
16254 Register stringReg = ool->stringReg();
16255 Address slotAddr = ool->slotAddr();
16256 TypedOrValueRegister dest = ool->dest();
16258 // This code is called with a non-atomic string in |stringReg|.
16259 // When it returns, |stringReg| contains an unboxed pointer to an
16260 // atomized version of that string, and |slotAddr| contains a
16261 // StringValue pointing to that atom. If |dest| is a ValueOperand,
16262 // it contains the same StringValue; otherwise we assert that |dest|
16263 // is |stringReg|.
16265 saveLive(lir);
16266 pushArg(stringReg);
16268 using Fn = JSAtom* (*)(JSContext*, JSString*);
16269 callVM<Fn, js::AtomizeString>(lir);
16270 StoreRegisterTo(stringReg).generate(this);
16271 restoreLiveIgnore(lir, StoreRegisterTo(stringReg).clobbered());
16273 if (dest.hasValue()) {
16274 masm.moveValue(
16275 TypedOrValueRegister(MIRType::String, AnyRegister(stringReg)),
16276 dest.valueReg());
16277 } else {
16278 MOZ_ASSERT(dest.typedReg().gpr() == stringReg);
16281 emitPreBarrier(slotAddr);
16282 masm.storeTypedOrValue(dest, slotAddr);
16284 // We don't need a post-barrier because atoms aren't nursery-allocated.
16285 #ifdef DEBUG
16286 // We need a temp register for the nursery check. Spill something.
16287 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
16288 allRegs.take(stringReg);
16289 Register temp = allRegs.takeAny();
16290 masm.push(temp);
16292 Label tenured;
16293 masm.branchPtrInNurseryChunk(Assembler::NotEqual, stringReg, temp, &tenured);
16294 masm.assumeUnreachable("AtomizeString returned a nursery pointer");
16295 masm.bind(&tenured);
16297 masm.pop(temp);
16298 #endif
16300 masm.jump(ool->rejoin());
16303 void CodeGenerator::emitMaybeAtomizeSlot(LInstruction* ins, Register stringReg,
16304 Address slotAddr,
16305 TypedOrValueRegister dest) {
16306 OutOfLineAtomizeSlot* ool =
16307 new (alloc()) OutOfLineAtomizeSlot(ins, stringReg, slotAddr, dest);
16308 addOutOfLineCode(ool, ins->mirRaw()->toInstruction());
16309 masm.branchTest32(Assembler::Zero,
16310 Address(stringReg, JSString::offsetOfFlags()),
16311 Imm32(JSString::ATOM_BIT), ool->entry());
16312 masm.bind(ool->rejoin());
16315 void CodeGenerator::visitLoadFixedSlotAndAtomize(
16316 LLoadFixedSlotAndAtomize* ins) {
16317 Register obj = ToRegister(ins->getOperand(0));
16318 Register temp = ToRegister(ins->temp0());
16319 size_t slot = ins->mir()->slot();
16320 ValueOperand result = ToOutValue(ins);
16322 Address slotAddr(obj, NativeObject::getFixedSlotOffset(slot));
16323 masm.loadValue(slotAddr, result);
16325 Label notString;
16326 masm.branchTestString(Assembler::NotEqual, result, &notString);
16327 masm.unboxString(result, temp);
16328 emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
16329 masm.bind(&notString);
16332 void CodeGenerator::visitLoadDynamicSlotAndAtomize(
16333 LLoadDynamicSlotAndAtomize* ins) {
16334 ValueOperand result = ToOutValue(ins);
16335 Register temp = ToRegister(ins->temp0());
16336 Register base = ToRegister(ins->input());
16337 int32_t offset = ins->mir()->slot() * sizeof(js::Value);
16339 Address slotAddr(base, offset);
16340 masm.loadValue(slotAddr, result);
16342 Label notString;
16343 masm.branchTestString(Assembler::NotEqual, result, &notString);
16344 masm.unboxString(result, temp);
16345 emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
16346 masm.bind(&notString);
16349 void CodeGenerator::visitLoadFixedSlotUnboxAndAtomize(
16350 LLoadFixedSlotUnboxAndAtomize* ins) {
16351 const MLoadFixedSlotAndUnbox* mir = ins->mir();
16352 MOZ_ASSERT(mir->type() == MIRType::String);
16353 Register input = ToRegister(ins->object());
16354 AnyRegister result = ToAnyRegister(ins->output());
16355 size_t slot = mir->slot();
16357 Address slotAddr(input, NativeObject::getFixedSlotOffset(slot));
16359 Label bail;
16360 EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
16361 &bail);
16362 emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
16363 TypedOrValueRegister(MIRType::String, result));
16365 if (mir->fallible()) {
16366 bailoutFrom(&bail, ins->snapshot());
16370 void CodeGenerator::visitLoadDynamicSlotUnboxAndAtomize(
16371 LLoadDynamicSlotUnboxAndAtomize* ins) {
16372 const MLoadDynamicSlotAndUnbox* mir = ins->mir();
16373 MOZ_ASSERT(mir->type() == MIRType::String);
16374 Register input = ToRegister(ins->slots());
16375 AnyRegister result = ToAnyRegister(ins->output());
16376 size_t slot = mir->slot();
16378 Address slotAddr(input, slot * sizeof(JS::Value));
16380 Label bail;
16381 EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
16382 &bail);
16383 emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
16384 TypedOrValueRegister(MIRType::String, result));
16386 if (mir->fallible()) {
16387 bailoutFrom(&bail, ins->snapshot());
16391 void CodeGenerator::visitAddAndStoreSlot(LAddAndStoreSlot* ins) {
16392 const Register obj = ToRegister(ins->getOperand(0));
16393 const ValueOperand value = ToValue(ins, LAddAndStoreSlot::ValueIndex);
16394 const Register maybeTemp = ToTempRegisterOrInvalid(ins->temp0());
16396 Shape* shape = ins->mir()->shape();
16397 masm.storeObjShape(shape, obj, [](MacroAssembler& masm, const Address& addr) {
16398 EmitPreBarrier(masm, addr, MIRType::Shape);
16401 // Perform the store. No pre-barrier required since this is a new
16402 // initialization.
16404 uint32_t offset = ins->mir()->slotOffset();
16405 if (ins->mir()->kind() == MAddAndStoreSlot::Kind::FixedSlot) {
16406 Address slot(obj, offset);
16407 masm.storeValue(value, slot);
16408 } else {
16409 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), maybeTemp);
16410 Address slot(maybeTemp, offset);
16411 masm.storeValue(value, slot);
16415 void CodeGenerator::visitAllocateAndStoreSlot(LAllocateAndStoreSlot* ins) {
16416 const Register obj = ToRegister(ins->getOperand(0));
16417 const ValueOperand value = ToValue(ins, LAllocateAndStoreSlot::ValueIndex);
16418 const Register temp0 = ToRegister(ins->temp0());
16419 const Register temp1 = ToRegister(ins->temp1());
16421 masm.Push(obj);
16422 masm.Push(value);
16424 using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
16425 masm.setupAlignedABICall();
16426 masm.loadJSContext(temp0);
16427 masm.passABIArg(temp0);
16428 masm.passABIArg(obj);
16429 masm.move32(Imm32(ins->mir()->numNewSlots()), temp1);
16430 masm.passABIArg(temp1);
16431 masm.callWithABI<Fn, NativeObject::growSlotsPure>();
16432 masm.storeCallPointerResult(temp0);
16434 masm.Pop(value);
16435 masm.Pop(obj);
16437 bailoutIfFalseBool(temp0, ins->snapshot());
16439 masm.storeObjShape(ins->mir()->shape(), obj,
16440 [](MacroAssembler& masm, const Address& addr) {
16441 EmitPreBarrier(masm, addr, MIRType::Shape);
16444 // Perform the store. No pre-barrier required since this is a new
16445 // initialization.
16446 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), temp0);
16447 Address slot(temp0, ins->mir()->slotOffset());
16448 masm.storeValue(value, slot);
16451 void CodeGenerator::visitAddSlotAndCallAddPropHook(
16452 LAddSlotAndCallAddPropHook* ins) {
16453 const Register obj = ToRegister(ins->object());
16454 const ValueOperand value =
16455 ToValue(ins, LAddSlotAndCallAddPropHook::ValueIndex);
16457 pushArg(ImmGCPtr(ins->mir()->shape()));
16458 pushArg(value);
16459 pushArg(obj);
16461 using Fn =
16462 bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, Handle<Shape*>);
16463 callVM<Fn, AddSlotAndCallAddPropHook>(ins);
16466 void CodeGenerator::visitStoreFixedSlotV(LStoreFixedSlotV* ins) {
16467 const Register obj = ToRegister(ins->getOperand(0));
16468 size_t slot = ins->mir()->slot();
16470 const ValueOperand value = ToValue(ins, LStoreFixedSlotV::ValueIndex);
16472 Address address(obj, NativeObject::getFixedSlotOffset(slot));
16473 if (ins->mir()->needsBarrier()) {
16474 emitPreBarrier(address);
16477 masm.storeValue(value, address);
16480 void CodeGenerator::visitStoreFixedSlotT(LStoreFixedSlotT* ins) {
16481 const Register obj = ToRegister(ins->getOperand(0));
16482 size_t slot = ins->mir()->slot();
16484 const LAllocation* value = ins->value();
16485 MIRType valueType = ins->mir()->value()->type();
16487 Address address(obj, NativeObject::getFixedSlotOffset(slot));
16488 if (ins->mir()->needsBarrier()) {
16489 emitPreBarrier(address);
16492 ConstantOrRegister nvalue =
16493 value->isConstant()
16494 ? ConstantOrRegister(value->toConstant()->toJSValue())
16495 : TypedOrValueRegister(valueType, ToAnyRegister(value));
16496 masm.storeConstantOrRegister(nvalue, address);
16499 void CodeGenerator::visitGetNameCache(LGetNameCache* ins) {
16500 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16501 Register envChain = ToRegister(ins->envObj());
16502 ValueOperand output = ToOutValue(ins);
16503 Register temp = ToRegister(ins->temp0());
16505 IonGetNameIC ic(liveRegs, envChain, output, temp);
16506 addIC(ins, allocateIC(ic));
16509 void CodeGenerator::addGetPropertyCache(LInstruction* ins,
16510 LiveRegisterSet liveRegs,
16511 TypedOrValueRegister value,
16512 const ConstantOrRegister& id,
16513 ValueOperand output) {
16514 CacheKind kind = CacheKind::GetElem;
16515 if (id.constant() && id.value().isString()) {
16516 JSString* idString = id.value().toString();
16517 if (idString->isAtom() && !idString->asAtom().isIndex()) {
16518 kind = CacheKind::GetProp;
16521 IonGetPropertyIC cache(kind, liveRegs, value, id, output);
16522 addIC(ins, allocateIC(cache));
16525 void CodeGenerator::addSetPropertyCache(LInstruction* ins,
16526 LiveRegisterSet liveRegs,
16527 Register objReg, Register temp,
16528 const ConstantOrRegister& id,
16529 const ConstantOrRegister& value,
16530 bool strict) {
16531 CacheKind kind = CacheKind::SetElem;
16532 if (id.constant() && id.value().isString()) {
16533 JSString* idString = id.value().toString();
16534 if (idString->isAtom() && !idString->asAtom().isIndex()) {
16535 kind = CacheKind::SetProp;
16538 IonSetPropertyIC cache(kind, liveRegs, objReg, temp, id, value, strict);
16539 addIC(ins, allocateIC(cache));
16542 ConstantOrRegister CodeGenerator::toConstantOrRegister(LInstruction* lir,
16543 size_t n, MIRType type) {
16544 if (type == MIRType::Value) {
16545 return TypedOrValueRegister(ToValue(lir, n));
16548 const LAllocation* value = lir->getOperand(n);
16549 if (value->isConstant()) {
16550 return ConstantOrRegister(value->toConstant()->toJSValue());
16553 return TypedOrValueRegister(type, ToAnyRegister(value));
16556 void CodeGenerator::visitGetPropertyCache(LGetPropertyCache* ins) {
16557 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16558 TypedOrValueRegister value =
16559 toConstantOrRegister(ins, LGetPropertyCache::ValueIndex,
16560 ins->mir()->value()->type())
16561 .reg();
16562 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropertyCache::IdIndex,
16563 ins->mir()->idval()->type());
16564 ValueOperand output = ToOutValue(ins);
16565 addGetPropertyCache(ins, liveRegs, value, id, output);
16568 void CodeGenerator::visitGetPropSuperCache(LGetPropSuperCache* ins) {
16569 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16570 Register obj = ToRegister(ins->obj());
16571 TypedOrValueRegister receiver =
16572 toConstantOrRegister(ins, LGetPropSuperCache::ReceiverIndex,
16573 ins->mir()->receiver()->type())
16574 .reg();
16575 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropSuperCache::IdIndex,
16576 ins->mir()->idval()->type());
16577 ValueOperand output = ToOutValue(ins);
16579 CacheKind kind = CacheKind::GetElemSuper;
16580 if (id.constant() && id.value().isString()) {
16581 JSString* idString = id.value().toString();
16582 if (idString->isAtom() && !idString->asAtom().isIndex()) {
16583 kind = CacheKind::GetPropSuper;
16587 IonGetPropSuperIC cache(kind, liveRegs, obj, receiver, id, output);
16588 addIC(ins, allocateIC(cache));
16591 void CodeGenerator::visitBindNameCache(LBindNameCache* ins) {
16592 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16593 Register envChain = ToRegister(ins->environmentChain());
16594 Register output = ToRegister(ins->output());
16595 Register temp = ToRegister(ins->temp0());
16597 IonBindNameIC ic(liveRegs, envChain, output, temp);
16598 addIC(ins, allocateIC(ic));
16601 void CodeGenerator::visitHasOwnCache(LHasOwnCache* ins) {
16602 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16603 TypedOrValueRegister value =
16604 toConstantOrRegister(ins, LHasOwnCache::ValueIndex,
16605 ins->mir()->value()->type())
16606 .reg();
16607 TypedOrValueRegister id = toConstantOrRegister(ins, LHasOwnCache::IdIndex,
16608 ins->mir()->idval()->type())
16609 .reg();
16610 Register output = ToRegister(ins->output());
16612 IonHasOwnIC cache(liveRegs, value, id, output);
16613 addIC(ins, allocateIC(cache));
16616 void CodeGenerator::visitCheckPrivateFieldCache(LCheckPrivateFieldCache* ins) {
16617 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16618 TypedOrValueRegister value =
16619 toConstantOrRegister(ins, LCheckPrivateFieldCache::ValueIndex,
16620 ins->mir()->value()->type())
16621 .reg();
16622 TypedOrValueRegister id =
16623 toConstantOrRegister(ins, LCheckPrivateFieldCache::IdIndex,
16624 ins->mir()->idval()->type())
16625 .reg();
16626 Register output = ToRegister(ins->output());
16628 IonCheckPrivateFieldIC cache(liveRegs, value, id, output);
16629 addIC(ins, allocateIC(cache));
16632 void CodeGenerator::visitNewPrivateName(LNewPrivateName* ins) {
16633 pushArg(ImmGCPtr(ins->mir()->name()));
16635 using Fn = JS::Symbol* (*)(JSContext*, Handle<JSAtom*>);
16636 callVM<Fn, NewPrivateName>(ins);
16639 void CodeGenerator::visitCallDeleteProperty(LCallDeleteProperty* lir) {
16640 pushArg(ImmGCPtr(lir->mir()->name()));
16641 pushArg(ToValue(lir, LCallDeleteProperty::ValueIndex));
16643 using Fn = bool (*)(JSContext*, HandleValue, Handle<PropertyName*>, bool*);
16644 if (lir->mir()->strict()) {
16645 callVM<Fn, DelPropOperation<true>>(lir);
16646 } else {
16647 callVM<Fn, DelPropOperation<false>>(lir);
16651 void CodeGenerator::visitCallDeleteElement(LCallDeleteElement* lir) {
16652 pushArg(ToValue(lir, LCallDeleteElement::IndexIndex));
16653 pushArg(ToValue(lir, LCallDeleteElement::ValueIndex));
16655 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
16656 if (lir->mir()->strict()) {
16657 callVM<Fn, DelElemOperation<true>>(lir);
16658 } else {
16659 callVM<Fn, DelElemOperation<false>>(lir);
16663 void CodeGenerator::visitObjectToIterator(LObjectToIterator* lir) {
16664 Register obj = ToRegister(lir->object());
16665 Register iterObj = ToRegister(lir->output());
16666 Register temp = ToRegister(lir->temp0());
16667 Register temp2 = ToRegister(lir->temp1());
16668 Register temp3 = ToRegister(lir->temp2());
16670 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
16671 OutOfLineCode* ool = (lir->mir()->wantsIndices())
16672 ? oolCallVM<Fn, GetIteratorWithIndices>(
16673 lir, ArgList(obj), StoreRegisterTo(iterObj))
16674 : oolCallVM<Fn, GetIterator>(
16675 lir, ArgList(obj), StoreRegisterTo(iterObj));
16677 masm.maybeLoadIteratorFromShape(obj, iterObj, temp, temp2, temp3,
16678 ool->entry());
16680 Register nativeIter = temp;
16681 masm.loadPrivate(
16682 Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
16683 nativeIter);
16685 if (lir->mir()->wantsIndices()) {
16686 // At least one consumer of the output of this iterator has been optimized
16687 // to use iterator indices. If the cached iterator doesn't include indices,
16688 // but it was marked to indicate that we can create them if needed, then we
16689 // do a VM call to replace the cached iterator with a fresh iterator
16690 // including indices.
16691 masm.branchNativeIteratorIndices(Assembler::Equal, nativeIter, temp2,
16692 NativeIteratorIndices::AvailableOnRequest,
16693 ool->entry());
16696 Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlagsAndCount());
16697 masm.storePtr(
16698 obj, Address(nativeIter, NativeIterator::offsetOfObjectBeingIterated()));
16699 masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
16701 Register enumeratorsAddr = temp2;
16702 masm.movePtr(ImmPtr(lir->mir()->enumeratorsAddr()), enumeratorsAddr);
16703 masm.registerIterator(enumeratorsAddr, nativeIter, temp3);
16705 // Generate post-write barrier for storing to |iterObj->objectBeingIterated_|.
16706 // We already know that |iterObj| is tenured, so we only have to check |obj|.
16707 Label skipBarrier;
16708 masm.branchPtrInNurseryChunk(Assembler::NotEqual, obj, temp2, &skipBarrier);
16710 LiveRegisterSet save = liveVolatileRegs(lir);
16711 save.takeUnchecked(temp);
16712 save.takeUnchecked(temp2);
16713 save.takeUnchecked(temp3);
16714 if (iterObj.volatile_()) {
16715 save.addUnchecked(iterObj);
16718 masm.PushRegsInMask(save);
16719 emitPostWriteBarrier(iterObj);
16720 masm.PopRegsInMask(save);
16722 masm.bind(&skipBarrier);
16724 masm.bind(ool->rejoin());
16727 void CodeGenerator::visitValueToIterator(LValueToIterator* lir) {
16728 pushArg(ToValue(lir, LValueToIterator::ValueIndex));
16730 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleValue);
16731 callVM<Fn, ValueToIterator>(lir);
16734 void CodeGenerator::visitIteratorHasIndicesAndBranch(
16735 LIteratorHasIndicesAndBranch* lir) {
16736 Register iterator = ToRegister(lir->iterator());
16737 Register object = ToRegister(lir->object());
16738 Register temp = ToRegister(lir->temp());
16739 Register temp2 = ToRegister(lir->temp2());
16740 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
16741 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
16743 // Check that the iterator has indices available.
16744 Address nativeIterAddr(iterator,
16745 PropertyIteratorObject::offsetOfIteratorSlot());
16746 masm.loadPrivate(nativeIterAddr, temp);
16747 masm.branchNativeIteratorIndices(Assembler::NotEqual, temp, temp2,
16748 NativeIteratorIndices::Valid, ifFalse);
16750 // Guard that the first shape stored in the iterator matches the current
16751 // shape of the iterated object.
16752 Address firstShapeAddr(temp, NativeIterator::offsetOfFirstShape());
16753 masm.loadPtr(firstShapeAddr, temp);
16754 masm.branchTestObjShape(Assembler::NotEqual, object, temp, temp2, object,
16755 ifFalse);
16757 if (!isNextBlock(lir->ifTrue()->lir())) {
16758 masm.jump(ifTrue);
16762 void CodeGenerator::visitLoadSlotByIteratorIndex(
16763 LLoadSlotByIteratorIndex* lir) {
16764 Register object = ToRegister(lir->object());
16765 Register iterator = ToRegister(lir->iterator());
16766 Register temp = ToRegister(lir->temp0());
16767 Register temp2 = ToRegister(lir->temp1());
16768 ValueOperand result = ToOutValue(lir);
16770 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
16772 Label notDynamicSlot, notFixedSlot, done;
16773 masm.branch32(Assembler::NotEqual, temp2,
16774 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
16775 &notDynamicSlot);
16776 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
16777 masm.loadValue(BaseValueIndex(temp2, temp), result);
16778 masm.jump(&done);
16780 masm.bind(&notDynamicSlot);
16781 masm.branch32(Assembler::NotEqual, temp2,
16782 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
16783 // Fixed slot
16784 masm.loadValue(BaseValueIndex(object, temp, sizeof(NativeObject)), result);
16785 masm.jump(&done);
16786 masm.bind(&notFixedSlot);
16788 #ifdef DEBUG
16789 Label kindOkay;
16790 masm.branch32(Assembler::Equal, temp2,
16791 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
16792 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
16793 masm.bind(&kindOkay);
16794 #endif
16796 // Dense element
16797 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
16798 Label indexOkay;
16799 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
16800 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
16801 masm.assumeUnreachable("Dense element out of bounds");
16802 masm.bind(&indexOkay);
16804 masm.loadValue(BaseObjectElementIndex(temp2, temp), result);
16805 masm.bind(&done);
16808 void CodeGenerator::visitStoreSlotByIteratorIndex(
16809 LStoreSlotByIteratorIndex* lir) {
16810 Register object = ToRegister(lir->object());
16811 Register iterator = ToRegister(lir->iterator());
16812 ValueOperand value = ToValue(lir, LStoreSlotByIteratorIndex::ValueIndex);
16813 Register temp = ToRegister(lir->temp0());
16814 Register temp2 = ToRegister(lir->temp1());
16816 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
16818 Label notDynamicSlot, notFixedSlot, done, doStore;
16819 masm.branch32(Assembler::NotEqual, temp2,
16820 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
16821 &notDynamicSlot);
16822 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
16823 masm.computeEffectiveAddress(BaseValueIndex(temp2, temp), temp);
16824 masm.jump(&doStore);
16826 masm.bind(&notDynamicSlot);
16827 masm.branch32(Assembler::NotEqual, temp2,
16828 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
16829 // Fixed slot
16830 masm.computeEffectiveAddress(
16831 BaseValueIndex(object, temp, sizeof(NativeObject)), temp);
16832 masm.jump(&doStore);
16833 masm.bind(&notFixedSlot);
16835 #ifdef DEBUG
16836 Label kindOkay;
16837 masm.branch32(Assembler::Equal, temp2,
16838 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
16839 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
16840 masm.bind(&kindOkay);
16841 #endif
16843 // Dense element
16844 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
16845 Label indexOkay;
16846 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
16847 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
16848 masm.assumeUnreachable("Dense element out of bounds");
16849 masm.bind(&indexOkay);
16851 BaseObjectElementIndex elementAddress(temp2, temp);
16852 masm.computeEffectiveAddress(elementAddress, temp);
16854 masm.bind(&doStore);
16855 Address storeAddress(temp, 0);
16856 emitPreBarrier(storeAddress);
16857 masm.storeValue(value, storeAddress);
16859 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp2, &done);
16860 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp2, &done);
16862 saveVolatile(temp2);
16863 emitPostWriteBarrier(object);
16864 restoreVolatile(temp2);
16866 masm.bind(&done);
16869 void CodeGenerator::visitSetPropertyCache(LSetPropertyCache* ins) {
16870 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16871 Register objReg = ToRegister(ins->object());
16872 Register temp = ToRegister(ins->temp0());
16874 ConstantOrRegister id = toConstantOrRegister(ins, LSetPropertyCache::IdIndex,
16875 ins->mir()->idval()->type());
16876 ConstantOrRegister value = toConstantOrRegister(
16877 ins, LSetPropertyCache::ValueIndex, ins->mir()->value()->type());
16879 addSetPropertyCache(ins, liveRegs, objReg, temp, id, value,
16880 ins->mir()->strict());
16883 void CodeGenerator::visitThrow(LThrow* lir) {
16884 pushArg(ToValue(lir, LThrow::ValueIndex));
16886 using Fn = bool (*)(JSContext*, HandleValue);
16887 callVM<Fn, js::ThrowOperation>(lir);
16890 void CodeGenerator::visitThrowWithStack(LThrowWithStack* lir) {
16891 pushArg(ToValue(lir, LThrowWithStack::StackIndex));
16892 pushArg(ToValue(lir, LThrowWithStack::ValueIndex));
16894 using Fn = bool (*)(JSContext*, HandleValue, HandleValue);
16895 callVM<Fn, js::ThrowWithStackOperation>(lir);
16898 class OutOfLineTypeOfV : public OutOfLineCodeBase<CodeGenerator> {
16899 LTypeOfV* ins_;
16901 public:
16902 explicit OutOfLineTypeOfV(LTypeOfV* ins) : ins_(ins) {}
16904 void accept(CodeGenerator* codegen) override {
16905 codegen->visitOutOfLineTypeOfV(this);
16907 LTypeOfV* ins() const { return ins_; }
16910 void CodeGenerator::emitTypeOfJSType(JSValueType type, Register output) {
16911 switch (type) {
16912 case JSVAL_TYPE_OBJECT:
16913 masm.move32(Imm32(JSTYPE_OBJECT), output);
16914 break;
16915 case JSVAL_TYPE_DOUBLE:
16916 case JSVAL_TYPE_INT32:
16917 masm.move32(Imm32(JSTYPE_NUMBER), output);
16918 break;
16919 case JSVAL_TYPE_BOOLEAN:
16920 masm.move32(Imm32(JSTYPE_BOOLEAN), output);
16921 break;
16922 case JSVAL_TYPE_UNDEFINED:
16923 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
16924 break;
16925 case JSVAL_TYPE_NULL:
16926 masm.move32(Imm32(JSTYPE_OBJECT), output);
16927 break;
16928 case JSVAL_TYPE_STRING:
16929 masm.move32(Imm32(JSTYPE_STRING), output);
16930 break;
16931 case JSVAL_TYPE_SYMBOL:
16932 masm.move32(Imm32(JSTYPE_SYMBOL), output);
16933 break;
16934 case JSVAL_TYPE_BIGINT:
16935 masm.move32(Imm32(JSTYPE_BIGINT), output);
16936 break;
16937 default:
16938 MOZ_CRASH("Unsupported JSValueType");
16942 void CodeGenerator::emitTypeOfCheck(JSValueType type, Register tag,
16943 Register output, Label* done,
16944 Label* oolObject) {
16945 Label notMatch;
16946 switch (type) {
16947 case JSVAL_TYPE_OBJECT:
16948 // The input may be a callable object (result is "function") or
16949 // may emulate undefined (result is "undefined"). Use an OOL path.
16950 masm.branchTestObject(Assembler::Equal, tag, oolObject);
16951 return;
16952 case JSVAL_TYPE_DOUBLE:
16953 case JSVAL_TYPE_INT32:
16954 masm.branchTestNumber(Assembler::NotEqual, tag, &notMatch);
16955 break;
16956 default:
16957 masm.branchTestType(Assembler::NotEqual, tag, type, &notMatch);
16958 break;
16961 emitTypeOfJSType(type, output);
16962 masm.jump(done);
16963 masm.bind(&notMatch);
16966 void CodeGenerator::visitTypeOfV(LTypeOfV* lir) {
16967 const ValueOperand value = ToValue(lir, LTypeOfV::InputIndex);
16968 Register output = ToRegister(lir->output());
16969 Register tag = masm.extractTag(value, output);
16971 Label done;
16973 auto* ool = new (alloc()) OutOfLineTypeOfV(lir);
16974 addOutOfLineCode(ool, lir->mir());
16976 const std::initializer_list<JSValueType> defaultOrder = {
16977 JSVAL_TYPE_OBJECT, JSVAL_TYPE_DOUBLE, JSVAL_TYPE_UNDEFINED,
16978 JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN, JSVAL_TYPE_STRING,
16979 JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
16981 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
16983 // Generate checks for previously observed types first.
16984 // The TypeDataList is sorted by descending frequency.
16985 for (auto& observed : lir->mir()->observedTypes()) {
16986 JSValueType type = observed.type();
16988 // Unify number types.
16989 if (type == JSVAL_TYPE_INT32) {
16990 type = JSVAL_TYPE_DOUBLE;
16993 remaining -= type;
16995 emitTypeOfCheck(type, tag, output, &done, ool->entry());
16998 // Generate checks for remaining types.
16999 for (auto type : defaultOrder) {
17000 if (!remaining.contains(type)) {
17001 continue;
17003 remaining -= type;
17005 if (remaining.isEmpty() && type != JSVAL_TYPE_OBJECT) {
17006 // We can skip the check for the last remaining type, unless the type is
17007 // JSVAL_TYPE_OBJECT, which may have to go through the OOL path.
17008 #ifdef DEBUG
17009 emitTypeOfCheck(type, tag, output, &done, ool->entry());
17010 masm.assumeUnreachable("Unexpected Value type in visitTypeOfV");
17011 #else
17012 emitTypeOfJSType(type, output);
17013 #endif
17014 } else {
17015 emitTypeOfCheck(type, tag, output, &done, ool->entry());
17018 MOZ_ASSERT(remaining.isEmpty());
17020 masm.bind(&done);
17021 masm.bind(ool->rejoin());
17024 void CodeGenerator::emitTypeOfObject(Register obj, Register output,
17025 Label* done) {
17026 Label slowCheck, isObject, isCallable, isUndefined;
17027 masm.typeOfObject(obj, output, &slowCheck, &isObject, &isCallable,
17028 &isUndefined);
17030 masm.bind(&isCallable);
17031 masm.move32(Imm32(JSTYPE_FUNCTION), output);
17032 masm.jump(done);
17034 masm.bind(&isUndefined);
17035 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
17036 masm.jump(done);
17038 masm.bind(&isObject);
17039 masm.move32(Imm32(JSTYPE_OBJECT), output);
17040 masm.jump(done);
17042 masm.bind(&slowCheck);
17044 saveVolatile(output);
17045 using Fn = JSType (*)(JSObject*);
17046 masm.setupAlignedABICall();
17047 masm.passABIArg(obj);
17048 masm.callWithABI<Fn, js::TypeOfObject>();
17049 masm.storeCallInt32Result(output);
17050 restoreVolatile(output);
17053 void CodeGenerator::visitOutOfLineTypeOfV(OutOfLineTypeOfV* ool) {
17054 LTypeOfV* ins = ool->ins();
17056 ValueOperand input = ToValue(ins, LTypeOfV::InputIndex);
17057 Register temp = ToTempUnboxRegister(ins->temp0());
17058 Register output = ToRegister(ins->output());
17060 Register obj = masm.extractObject(input, temp);
17061 emitTypeOfObject(obj, output, ool->rejoin());
17062 masm.jump(ool->rejoin());
17065 void CodeGenerator::visitTypeOfO(LTypeOfO* lir) {
17066 Register obj = ToRegister(lir->object());
17067 Register output = ToRegister(lir->output());
17069 Label done;
17070 emitTypeOfObject(obj, output, &done);
17071 masm.bind(&done);
17074 void CodeGenerator::visitTypeOfName(LTypeOfName* lir) {
17075 Register input = ToRegister(lir->input());
17076 Register output = ToRegister(lir->output());
17078 #ifdef DEBUG
17079 Label ok;
17080 masm.branch32(Assembler::Below, input, Imm32(JSTYPE_LIMIT), &ok);
17081 masm.assumeUnreachable("bad JSType");
17082 masm.bind(&ok);
17083 #endif
17085 static_assert(JSTYPE_UNDEFINED == 0);
17087 masm.movePtr(ImmPtr(&gen->runtime->names().undefined), output);
17088 masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
17091 class OutOfLineTypeOfIsNonPrimitiveV : public OutOfLineCodeBase<CodeGenerator> {
17092 LTypeOfIsNonPrimitiveV* ins_;
17094 public:
17095 explicit OutOfLineTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* ins)
17096 : ins_(ins) {}
17098 void accept(CodeGenerator* codegen) override {
17099 codegen->visitOutOfLineTypeOfIsNonPrimitiveV(this);
17101 auto* ins() const { return ins_; }
17104 class OutOfLineTypeOfIsNonPrimitiveO : public OutOfLineCodeBase<CodeGenerator> {
17105 LTypeOfIsNonPrimitiveO* ins_;
17107 public:
17108 explicit OutOfLineTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* ins)
17109 : ins_(ins) {}
17111 void accept(CodeGenerator* codegen) override {
17112 codegen->visitOutOfLineTypeOfIsNonPrimitiveO(this);
17114 auto* ins() const { return ins_; }
17117 void CodeGenerator::emitTypeOfIsObjectOOL(MTypeOfIs* mir, Register obj,
17118 Register output) {
17119 saveVolatile(output);
17120 using Fn = JSType (*)(JSObject*);
17121 masm.setupAlignedABICall();
17122 masm.passABIArg(obj);
17123 masm.callWithABI<Fn, js::TypeOfObject>();
17124 masm.storeCallInt32Result(output);
17125 restoreVolatile(output);
17127 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
17128 masm.cmp32Set(cond, output, Imm32(mir->jstype()), output);
17131 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveV(
17132 OutOfLineTypeOfIsNonPrimitiveV* ool) {
17133 auto* ins = ool->ins();
17134 ValueOperand input = ToValue(ins, LTypeOfIsNonPrimitiveV::InputIndex);
17135 Register output = ToRegister(ins->output());
17136 Register temp = ToTempUnboxRegister(ins->temp0());
17138 Register obj = masm.extractObject(input, temp);
17140 emitTypeOfIsObjectOOL(ins->mir(), obj, output);
17142 masm.jump(ool->rejoin());
17145 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveO(
17146 OutOfLineTypeOfIsNonPrimitiveO* ool) {
17147 auto* ins = ool->ins();
17148 Register input = ToRegister(ins->input());
17149 Register output = ToRegister(ins->output());
17151 emitTypeOfIsObjectOOL(ins->mir(), input, output);
17153 masm.jump(ool->rejoin());
17156 void CodeGenerator::emitTypeOfIsObject(MTypeOfIs* mir, Register obj,
17157 Register output, Label* success,
17158 Label* fail, Label* slowCheck) {
17159 Label* isObject = fail;
17160 Label* isFunction = fail;
17161 Label* isUndefined = fail;
17163 switch (mir->jstype()) {
17164 case JSTYPE_UNDEFINED:
17165 isUndefined = success;
17166 break;
17168 case JSTYPE_OBJECT:
17169 isObject = success;
17170 break;
17172 case JSTYPE_FUNCTION:
17173 isFunction = success;
17174 break;
17176 case JSTYPE_STRING:
17177 case JSTYPE_NUMBER:
17178 case JSTYPE_BOOLEAN:
17179 case JSTYPE_SYMBOL:
17180 case JSTYPE_BIGINT:
17181 #ifdef ENABLE_RECORD_TUPLE
17182 case JSTYPE_RECORD:
17183 case JSTYPE_TUPLE:
17184 #endif
17185 case JSTYPE_LIMIT:
17186 MOZ_CRASH("Primitive type");
17189 masm.typeOfObject(obj, output, slowCheck, isObject, isFunction, isUndefined);
17191 auto op = mir->jsop();
17193 Label done;
17194 masm.bind(fail);
17195 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
17196 masm.jump(&done);
17197 masm.bind(success);
17198 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
17199 masm.bind(&done);
17202 void CodeGenerator::visitTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* lir) {
17203 ValueOperand input = ToValue(lir, LTypeOfIsNonPrimitiveV::InputIndex);
17204 Register output = ToRegister(lir->output());
17205 Register temp = ToTempUnboxRegister(lir->temp0());
17207 auto* mir = lir->mir();
17209 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveV(lir);
17210 addOutOfLineCode(ool, mir);
17212 Label success, fail;
17214 switch (mir->jstype()) {
17215 case JSTYPE_UNDEFINED: {
17216 ScratchTagScope tag(masm, input);
17217 masm.splitTagForTest(input, tag);
17219 masm.branchTestUndefined(Assembler::Equal, tag, &success);
17220 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
17221 break;
17224 case JSTYPE_OBJECT: {
17225 ScratchTagScope tag(masm, input);
17226 masm.splitTagForTest(input, tag);
17228 masm.branchTestNull(Assembler::Equal, tag, &success);
17229 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
17230 break;
17233 case JSTYPE_FUNCTION: {
17234 masm.branchTestObject(Assembler::NotEqual, input, &fail);
17235 break;
17238 case JSTYPE_STRING:
17239 case JSTYPE_NUMBER:
17240 case JSTYPE_BOOLEAN:
17241 case JSTYPE_SYMBOL:
17242 case JSTYPE_BIGINT:
17243 #ifdef ENABLE_RECORD_TUPLE
17244 case JSTYPE_RECORD:
17245 case JSTYPE_TUPLE:
17246 #endif
17247 case JSTYPE_LIMIT:
17248 MOZ_CRASH("Primitive type");
17251 Register obj = masm.extractObject(input, temp);
17253 emitTypeOfIsObject(mir, obj, output, &success, &fail, ool->entry());
17255 masm.bind(ool->rejoin());
17258 void CodeGenerator::visitTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* lir) {
17259 Register input = ToRegister(lir->input());
17260 Register output = ToRegister(lir->output());
17262 auto* mir = lir->mir();
17264 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveO(lir);
17265 addOutOfLineCode(ool, mir);
17267 Label success, fail;
17268 emitTypeOfIsObject(mir, input, output, &success, &fail, ool->entry());
17270 masm.bind(ool->rejoin());
17273 void CodeGenerator::visitTypeOfIsPrimitive(LTypeOfIsPrimitive* lir) {
17274 ValueOperand input = ToValue(lir, LTypeOfIsPrimitive::InputIndex);
17275 Register output = ToRegister(lir->output());
17277 auto* mir = lir->mir();
17278 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
17280 switch (mir->jstype()) {
17281 case JSTYPE_STRING:
17282 masm.testStringSet(cond, input, output);
17283 break;
17284 case JSTYPE_NUMBER:
17285 masm.testNumberSet(cond, input, output);
17286 break;
17287 case JSTYPE_BOOLEAN:
17288 masm.testBooleanSet(cond, input, output);
17289 break;
17290 case JSTYPE_SYMBOL:
17291 masm.testSymbolSet(cond, input, output);
17292 break;
17293 case JSTYPE_BIGINT:
17294 masm.testBigIntSet(cond, input, output);
17295 break;
17297 case JSTYPE_UNDEFINED:
17298 case JSTYPE_OBJECT:
17299 case JSTYPE_FUNCTION:
17300 #ifdef ENABLE_RECORD_TUPLE
17301 case JSTYPE_RECORD:
17302 case JSTYPE_TUPLE:
17303 #endif
17304 case JSTYPE_LIMIT:
17305 MOZ_CRASH("Non-primitive type");
17309 void CodeGenerator::visitToAsyncIter(LToAsyncIter* lir) {
17310 pushArg(ToValue(lir, LToAsyncIter::NextMethodIndex));
17311 pushArg(ToRegister(lir->iterator()));
17313 using Fn = JSObject* (*)(JSContext*, HandleObject, HandleValue);
17314 callVM<Fn, js::CreateAsyncFromSyncIterator>(lir);
17317 void CodeGenerator::visitToPropertyKeyCache(LToPropertyKeyCache* lir) {
17318 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
17319 ValueOperand input = ToValue(lir, LToPropertyKeyCache::InputIndex);
17320 ValueOperand output = ToOutValue(lir);
17322 IonToPropertyKeyIC ic(liveRegs, input, output);
17323 addIC(lir, allocateIC(ic));
17326 void CodeGenerator::visitLoadElementV(LLoadElementV* load) {
17327 Register elements = ToRegister(load->elements());
17328 const ValueOperand out = ToOutValue(load);
17330 if (load->index()->isConstant()) {
17331 NativeObject::elementsSizeMustNotOverflow();
17332 int32_t offset = ToInt32(load->index()) * sizeof(Value);
17333 masm.loadValue(Address(elements, offset), out);
17334 } else {
17335 masm.loadValue(BaseObjectElementIndex(elements, ToRegister(load->index())),
17336 out);
17339 Label testMagic;
17340 masm.branchTestMagic(Assembler::Equal, out, &testMagic);
17341 bailoutFrom(&testMagic, load->snapshot());
17344 void CodeGenerator::visitLoadElementHole(LLoadElementHole* lir) {
17345 Register elements = ToRegister(lir->elements());
17346 Register index = ToRegister(lir->index());
17347 Register initLength = ToRegister(lir->initLength());
17348 const ValueOperand out = ToOutValue(lir);
17350 const MLoadElementHole* mir = lir->mir();
17352 // If the index is out of bounds, load |undefined|. Otherwise, load the
17353 // value.
17354 Label outOfBounds, done;
17355 masm.spectreBoundsCheck32(index, initLength, out.scratchReg(), &outOfBounds);
17357 masm.loadValue(BaseObjectElementIndex(elements, index), out);
17359 // If the value wasn't a hole, we're done. Otherwise, we'll load undefined.
17360 masm.branchTestMagic(Assembler::NotEqual, out, &done);
17362 if (mir->needsNegativeIntCheck()) {
17363 Label loadUndefined;
17364 masm.jump(&loadUndefined);
17366 masm.bind(&outOfBounds);
17368 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
17370 masm.bind(&loadUndefined);
17371 } else {
17372 masm.bind(&outOfBounds);
17374 masm.moveValue(UndefinedValue(), out);
17376 masm.bind(&done);
17379 void CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir) {
17380 Register elements = ToRegister(lir->elements());
17381 Register temp = ToTempRegisterOrInvalid(lir->temp0());
17382 AnyRegister out = ToAnyRegister(lir->output());
17384 const MLoadUnboxedScalar* mir = lir->mir();
17386 Scalar::Type storageType = mir->storageType();
17388 Label fail;
17389 if (lir->index()->isConstant()) {
17390 Address source =
17391 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
17392 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
17393 } else {
17394 BaseIndex source(elements, ToRegister(lir->index()),
17395 ScaleFromScalarType(storageType), mir->offsetAdjustment());
17396 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
17399 if (fail.used()) {
17400 bailoutFrom(&fail, lir->snapshot());
17404 void CodeGenerator::visitLoadUnboxedBigInt(LLoadUnboxedBigInt* lir) {
17405 Register elements = ToRegister(lir->elements());
17406 Register temp = ToRegister(lir->temp());
17407 Register64 temp64 = ToRegister64(lir->temp64());
17408 Register out = ToRegister(lir->output());
17410 const MLoadUnboxedScalar* mir = lir->mir();
17412 Scalar::Type storageType = mir->storageType();
17414 if (lir->index()->isConstant()) {
17415 Address source =
17416 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
17417 masm.load64(source, temp64);
17418 } else {
17419 BaseIndex source(elements, ToRegister(lir->index()),
17420 ScaleFromScalarType(storageType), mir->offsetAdjustment());
17421 masm.load64(source, temp64);
17424 emitCreateBigInt(lir, storageType, temp64, out, temp);
17427 void CodeGenerator::visitLoadDataViewElement(LLoadDataViewElement* lir) {
17428 Register elements = ToRegister(lir->elements());
17429 const LAllocation* littleEndian = lir->littleEndian();
17430 Register temp = ToTempRegisterOrInvalid(lir->temp());
17431 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
17432 AnyRegister out = ToAnyRegister(lir->output());
17434 const MLoadDataViewElement* mir = lir->mir();
17435 Scalar::Type storageType = mir->storageType();
17437 BaseIndex source(elements, ToRegister(lir->index()), TimesOne);
17439 bool noSwap = littleEndian->isConstant() &&
17440 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
17442 // Directly load if no byte swap is needed and the platform supports unaligned
17443 // accesses for the access. (Such support is assumed for integer types.)
17444 if (noSwap && (!Scalar::isFloatingType(storageType) ||
17445 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
17446 if (!Scalar::isBigIntType(storageType)) {
17447 Label fail;
17448 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
17450 if (fail.used()) {
17451 bailoutFrom(&fail, lir->snapshot());
17453 } else {
17454 masm.load64(source, temp64);
17456 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
17458 return;
17461 // Load the value into a gpr register.
17462 switch (storageType) {
17463 case Scalar::Int16:
17464 masm.load16UnalignedSignExtend(source, out.gpr());
17465 break;
17466 case Scalar::Uint16:
17467 masm.load16UnalignedZeroExtend(source, out.gpr());
17468 break;
17469 case Scalar::Int32:
17470 masm.load32Unaligned(source, out.gpr());
17471 break;
17472 case Scalar::Uint32:
17473 masm.load32Unaligned(source, out.isFloat() ? temp : out.gpr());
17474 break;
17475 case Scalar::Float32:
17476 masm.load32Unaligned(source, temp);
17477 break;
17478 case Scalar::Float64:
17479 case Scalar::BigInt64:
17480 case Scalar::BigUint64:
17481 masm.load64Unaligned(source, temp64);
17482 break;
17483 case Scalar::Int8:
17484 case Scalar::Uint8:
17485 case Scalar::Uint8Clamped:
17486 default:
17487 MOZ_CRASH("Invalid typed array type");
17490 if (!noSwap) {
17491 // Swap the bytes in the loaded value.
17492 Label skip;
17493 if (!littleEndian->isConstant()) {
17494 masm.branch32(
17495 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
17496 ToRegister(littleEndian), Imm32(0), &skip);
17499 switch (storageType) {
17500 case Scalar::Int16:
17501 masm.byteSwap16SignExtend(out.gpr());
17502 break;
17503 case Scalar::Uint16:
17504 masm.byteSwap16ZeroExtend(out.gpr());
17505 break;
17506 case Scalar::Int32:
17507 masm.byteSwap32(out.gpr());
17508 break;
17509 case Scalar::Uint32:
17510 masm.byteSwap32(out.isFloat() ? temp : out.gpr());
17511 break;
17512 case Scalar::Float32:
17513 masm.byteSwap32(temp);
17514 break;
17515 case Scalar::Float64:
17516 case Scalar::BigInt64:
17517 case Scalar::BigUint64:
17518 masm.byteSwap64(temp64);
17519 break;
17520 case Scalar::Int8:
17521 case Scalar::Uint8:
17522 case Scalar::Uint8Clamped:
17523 default:
17524 MOZ_CRASH("Invalid typed array type");
17527 if (skip.used()) {
17528 masm.bind(&skip);
17532 // Move the value into the output register.
17533 switch (storageType) {
17534 case Scalar::Int16:
17535 case Scalar::Uint16:
17536 case Scalar::Int32:
17537 break;
17538 case Scalar::Uint32:
17539 if (out.isFloat()) {
17540 masm.convertUInt32ToDouble(temp, out.fpu());
17541 } else {
17542 // Bail out if the value doesn't fit into a signed int32 value. This
17543 // is what allows MLoadDataViewElement to have a type() of
17544 // MIRType::Int32 for UInt32 array loads.
17545 bailoutTest32(Assembler::Signed, out.gpr(), out.gpr(), lir->snapshot());
17547 break;
17548 case Scalar::Float32:
17549 masm.moveGPRToFloat32(temp, out.fpu());
17550 masm.canonicalizeFloat(out.fpu());
17551 break;
17552 case Scalar::Float64:
17553 masm.moveGPR64ToDouble(temp64, out.fpu());
17554 masm.canonicalizeDouble(out.fpu());
17555 break;
17556 case Scalar::BigInt64:
17557 case Scalar::BigUint64:
17558 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
17559 break;
17560 case Scalar::Int8:
17561 case Scalar::Uint8:
17562 case Scalar::Uint8Clamped:
17563 default:
17564 MOZ_CRASH("Invalid typed array type");
17568 void CodeGenerator::visitLoadTypedArrayElementHole(
17569 LLoadTypedArrayElementHole* lir) {
17570 Register elements = ToRegister(lir->elements());
17571 Register index = ToRegister(lir->index());
17572 Register length = ToRegister(lir->length());
17573 const ValueOperand out = ToOutValue(lir);
17575 Register scratch = out.scratchReg();
17577 // Load undefined if index >= length.
17578 Label outOfBounds, done;
17579 masm.spectreBoundsCheckPtr(index, length, scratch, &outOfBounds);
17581 Scalar::Type arrayType = lir->mir()->arrayType();
17582 Label fail;
17583 BaseIndex source(elements, index, ScaleFromScalarType(arrayType));
17584 MacroAssembler::Uint32Mode uint32Mode =
17585 lir->mir()->forceDouble() ? MacroAssembler::Uint32Mode::ForceDouble
17586 : MacroAssembler::Uint32Mode::FailOnDouble;
17587 masm.loadFromTypedArray(arrayType, source, out, uint32Mode, out.scratchReg(),
17588 &fail);
17589 masm.jump(&done);
17591 masm.bind(&outOfBounds);
17592 masm.moveValue(UndefinedValue(), out);
17594 if (fail.used()) {
17595 bailoutFrom(&fail, lir->snapshot());
17598 masm.bind(&done);
17601 void CodeGenerator::visitLoadTypedArrayElementHoleBigInt(
17602 LLoadTypedArrayElementHoleBigInt* lir) {
17603 Register elements = ToRegister(lir->elements());
17604 Register index = ToRegister(lir->index());
17605 Register length = ToRegister(lir->length());
17606 const ValueOperand out = ToOutValue(lir);
17608 Register temp = ToRegister(lir->temp());
17610 // On x86 there are not enough registers. In that case reuse the output
17611 // registers as temporaries.
17612 #ifdef JS_CODEGEN_X86
17613 MOZ_ASSERT(lir->temp64().isBogusTemp());
17614 Register64 temp64 = out.toRegister64();
17615 #else
17616 Register64 temp64 = ToRegister64(lir->temp64());
17617 #endif
17619 // Load undefined if index >= length.
17620 Label outOfBounds, done;
17621 masm.spectreBoundsCheckPtr(index, length, temp, &outOfBounds);
17623 Scalar::Type arrayType = lir->mir()->arrayType();
17624 BaseIndex source(elements, index, ScaleFromScalarType(arrayType));
17625 masm.load64(source, temp64);
17627 #ifdef JS_CODEGEN_X86
17628 Register bigInt = temp;
17629 Register maybeTemp = InvalidReg;
17630 #else
17631 Register bigInt = out.scratchReg();
17632 Register maybeTemp = temp;
17633 #endif
17634 emitCreateBigInt(lir, arrayType, temp64, bigInt, maybeTemp);
17636 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, out);
17637 masm.jump(&done);
17639 masm.bind(&outOfBounds);
17640 masm.moveValue(UndefinedValue(), out);
17642 masm.bind(&done);
17645 template <SwitchTableType tableType>
17646 class OutOfLineSwitch : public OutOfLineCodeBase<CodeGenerator> {
17647 using LabelsVector = Vector<Label, 0, JitAllocPolicy>;
17648 using CodeLabelsVector = Vector<CodeLabel, 0, JitAllocPolicy>;
17649 LabelsVector labels_;
17650 CodeLabelsVector codeLabels_;
17651 CodeLabel start_;
17652 bool isOutOfLine_;
17654 void accept(CodeGenerator* codegen) override {
17655 codegen->visitOutOfLineSwitch(this);
17658 public:
17659 explicit OutOfLineSwitch(TempAllocator& alloc)
17660 : labels_(alloc), codeLabels_(alloc), isOutOfLine_(false) {}
17662 CodeLabel* start() { return &start_; }
17664 CodeLabelsVector& codeLabels() { return codeLabels_; }
17665 LabelsVector& labels() { return labels_; }
17667 void jumpToCodeEntries(MacroAssembler& masm, Register index, Register temp) {
17668 Register base;
17669 if (tableType == SwitchTableType::Inline) {
17670 #if defined(JS_CODEGEN_ARM)
17671 base = ::js::jit::pc;
17672 #else
17673 MOZ_CRASH("NYI: SwitchTableType::Inline");
17674 #endif
17675 } else {
17676 #if defined(JS_CODEGEN_ARM)
17677 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
17678 #else
17679 masm.mov(start(), temp);
17680 base = temp;
17681 #endif
17683 BaseIndex jumpTarget(base, index, ScalePointer);
17684 masm.branchToComputedAddress(jumpTarget);
17687 // Register an entry in the switch table.
17688 void addTableEntry(MacroAssembler& masm) {
17689 if ((!isOutOfLine_ && tableType == SwitchTableType::Inline) ||
17690 (isOutOfLine_ && tableType == SwitchTableType::OutOfLine)) {
17691 CodeLabel cl;
17692 masm.writeCodePointer(&cl);
17693 masm.propagateOOM(codeLabels_.append(std::move(cl)));
17696 // Register the code, to which the table will jump to.
17697 void addCodeEntry(MacroAssembler& masm) {
17698 Label entry;
17699 masm.bind(&entry);
17700 masm.propagateOOM(labels_.append(std::move(entry)));
17703 void setOutOfLine() { isOutOfLine_ = true; }
17706 template <SwitchTableType tableType>
17707 void CodeGenerator::visitOutOfLineSwitch(
17708 OutOfLineSwitch<tableType>* jumpTable) {
17709 jumpTable->setOutOfLine();
17710 auto& labels = jumpTable->labels();
17712 if (tableType == SwitchTableType::OutOfLine) {
17713 #if defined(JS_CODEGEN_ARM)
17714 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
17715 #elif defined(JS_CODEGEN_NONE)
17716 MOZ_CRASH();
17717 #else
17719 # if defined(JS_CODEGEN_ARM64)
17720 AutoForbidPoolsAndNops afp(
17721 &masm,
17722 (labels.length() + 1) * (sizeof(void*) / vixl::kInstructionSize));
17723 # endif
17725 masm.haltingAlign(sizeof(void*));
17727 // Bind the address of the jump table and reserve the space for code
17728 // pointers to jump in the newly generated code.
17729 masm.bind(jumpTable->start());
17730 masm.addCodeLabel(*jumpTable->start());
17731 for (size_t i = 0, e = labels.length(); i < e; i++) {
17732 jumpTable->addTableEntry(masm);
17734 #endif
17737 // Register all reserved pointers of the jump table to target labels. The
17738 // entries of the jump table need to be absolute addresses and thus must be
17739 // patched after codegen is finished.
17740 auto& codeLabels = jumpTable->codeLabels();
17741 for (size_t i = 0, e = codeLabels.length(); i < e; i++) {
17742 auto& cl = codeLabels[i];
17743 cl.target()->bind(labels[i].offset());
17744 masm.addCodeLabel(cl);
17748 template void CodeGenerator::visitOutOfLineSwitch(
17749 OutOfLineSwitch<SwitchTableType::Inline>* jumpTable);
17750 template void CodeGenerator::visitOutOfLineSwitch(
17751 OutOfLineSwitch<SwitchTableType::OutOfLine>* jumpTable);
17753 template <typename T>
17754 static inline void StoreToTypedArray(MacroAssembler& masm,
17755 Scalar::Type writeType,
17756 const LAllocation* value, const T& dest) {
17757 if (writeType == Scalar::Float32 || writeType == Scalar::Float64) {
17758 masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest);
17759 } else {
17760 if (value->isConstant()) {
17761 masm.storeToTypedIntArray(writeType, Imm32(ToInt32(value)), dest);
17762 } else {
17763 masm.storeToTypedIntArray(writeType, ToRegister(value), dest);
17768 void CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir) {
17769 Register elements = ToRegister(lir->elements());
17770 const LAllocation* value = lir->value();
17772 const MStoreUnboxedScalar* mir = lir->mir();
17774 Scalar::Type writeType = mir->writeType();
17776 if (lir->index()->isConstant()) {
17777 Address dest = ToAddress(elements, lir->index(), writeType);
17778 StoreToTypedArray(masm, writeType, value, dest);
17779 } else {
17780 BaseIndex dest(elements, ToRegister(lir->index()),
17781 ScaleFromScalarType(writeType));
17782 StoreToTypedArray(masm, writeType, value, dest);
17786 void CodeGenerator::visitStoreUnboxedBigInt(LStoreUnboxedBigInt* lir) {
17787 Register elements = ToRegister(lir->elements());
17788 Register value = ToRegister(lir->value());
17789 Register64 temp = ToRegister64(lir->temp());
17791 Scalar::Type writeType = lir->mir()->writeType();
17793 masm.loadBigInt64(value, temp);
17795 if (lir->index()->isConstant()) {
17796 Address dest = ToAddress(elements, lir->index(), writeType);
17797 masm.storeToTypedBigIntArray(writeType, temp, dest);
17798 } else {
17799 BaseIndex dest(elements, ToRegister(lir->index()),
17800 ScaleFromScalarType(writeType));
17801 masm.storeToTypedBigIntArray(writeType, temp, dest);
17805 void CodeGenerator::visitStoreDataViewElement(LStoreDataViewElement* lir) {
17806 Register elements = ToRegister(lir->elements());
17807 const LAllocation* value = lir->value();
17808 const LAllocation* littleEndian = lir->littleEndian();
17809 Register temp = ToTempRegisterOrInvalid(lir->temp());
17810 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
17812 const MStoreDataViewElement* mir = lir->mir();
17813 Scalar::Type writeType = mir->writeType();
17815 BaseIndex dest(elements, ToRegister(lir->index()), TimesOne);
17817 bool noSwap = littleEndian->isConstant() &&
17818 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
17820 // Directly store if no byte swap is needed and the platform supports
17821 // unaligned accesses for the access. (Such support is assumed for integer
17822 // types.)
17823 if (noSwap && (!Scalar::isFloatingType(writeType) ||
17824 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
17825 if (!Scalar::isBigIntType(writeType)) {
17826 StoreToTypedArray(masm, writeType, value, dest);
17827 } else {
17828 masm.loadBigInt64(ToRegister(value), temp64);
17829 masm.storeToTypedBigIntArray(writeType, temp64, dest);
17831 return;
17834 // Load the value into a gpr register.
17835 switch (writeType) {
17836 case Scalar::Int16:
17837 case Scalar::Uint16:
17838 case Scalar::Int32:
17839 case Scalar::Uint32:
17840 if (value->isConstant()) {
17841 masm.move32(Imm32(ToInt32(value)), temp);
17842 } else {
17843 masm.move32(ToRegister(value), temp);
17845 break;
17846 case Scalar::Float32: {
17847 FloatRegister fvalue = ToFloatRegister(value);
17848 masm.canonicalizeFloatIfDeterministic(fvalue);
17849 masm.moveFloat32ToGPR(fvalue, temp);
17850 break;
17852 case Scalar::Float64: {
17853 FloatRegister fvalue = ToFloatRegister(value);
17854 masm.canonicalizeDoubleIfDeterministic(fvalue);
17855 masm.moveDoubleToGPR64(fvalue, temp64);
17856 break;
17858 case Scalar::BigInt64:
17859 case Scalar::BigUint64:
17860 masm.loadBigInt64(ToRegister(value), temp64);
17861 break;
17862 case Scalar::Int8:
17863 case Scalar::Uint8:
17864 case Scalar::Uint8Clamped:
17865 default:
17866 MOZ_CRASH("Invalid typed array type");
17869 if (!noSwap) {
17870 // Swap the bytes in the loaded value.
17871 Label skip;
17872 if (!littleEndian->isConstant()) {
17873 masm.branch32(
17874 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
17875 ToRegister(littleEndian), Imm32(0), &skip);
17878 switch (writeType) {
17879 case Scalar::Int16:
17880 masm.byteSwap16SignExtend(temp);
17881 break;
17882 case Scalar::Uint16:
17883 masm.byteSwap16ZeroExtend(temp);
17884 break;
17885 case Scalar::Int32:
17886 case Scalar::Uint32:
17887 case Scalar::Float32:
17888 masm.byteSwap32(temp);
17889 break;
17890 case Scalar::Float64:
17891 case Scalar::BigInt64:
17892 case Scalar::BigUint64:
17893 masm.byteSwap64(temp64);
17894 break;
17895 case Scalar::Int8:
17896 case Scalar::Uint8:
17897 case Scalar::Uint8Clamped:
17898 default:
17899 MOZ_CRASH("Invalid typed array type");
17902 if (skip.used()) {
17903 masm.bind(&skip);
17907 // Store the value into the destination.
17908 switch (writeType) {
17909 case Scalar::Int16:
17910 case Scalar::Uint16:
17911 masm.store16Unaligned(temp, dest);
17912 break;
17913 case Scalar::Int32:
17914 case Scalar::Uint32:
17915 case Scalar::Float32:
17916 masm.store32Unaligned(temp, dest);
17917 break;
17918 case Scalar::Float64:
17919 case Scalar::BigInt64:
17920 case Scalar::BigUint64:
17921 masm.store64Unaligned(temp64, dest);
17922 break;
17923 case Scalar::Int8:
17924 case Scalar::Uint8:
17925 case Scalar::Uint8Clamped:
17926 default:
17927 MOZ_CRASH("Invalid typed array type");
17931 void CodeGenerator::visitStoreTypedArrayElementHole(
17932 LStoreTypedArrayElementHole* lir) {
17933 Register elements = ToRegister(lir->elements());
17934 const LAllocation* value = lir->value();
17936 Scalar::Type arrayType = lir->mir()->arrayType();
17938 Register index = ToRegister(lir->index());
17939 const LAllocation* length = lir->length();
17940 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
17942 Label skip;
17943 if (length->isRegister()) {
17944 masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
17945 } else {
17946 masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
17949 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
17950 StoreToTypedArray(masm, arrayType, value, dest);
17952 masm.bind(&skip);
17955 void CodeGenerator::visitStoreTypedArrayElementHoleBigInt(
17956 LStoreTypedArrayElementHoleBigInt* lir) {
17957 Register elements = ToRegister(lir->elements());
17958 Register value = ToRegister(lir->value());
17959 Register64 temp = ToRegister64(lir->temp());
17961 Scalar::Type arrayType = lir->mir()->arrayType();
17963 Register index = ToRegister(lir->index());
17964 const LAllocation* length = lir->length();
17965 Register spectreTemp = temp.scratchReg();
17967 Label skip;
17968 if (length->isRegister()) {
17969 masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
17970 } else {
17971 masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
17974 masm.loadBigInt64(value, temp);
17976 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
17977 masm.storeToTypedBigIntArray(arrayType, temp, dest);
17979 masm.bind(&skip);
17982 void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
17983 masm.memoryBarrier(ins->type());
17986 void CodeGenerator::visitAtomicIsLockFree(LAtomicIsLockFree* lir) {
17987 Register value = ToRegister(lir->value());
17988 Register output = ToRegister(lir->output());
17990 masm.atomicIsLockFreeJS(value, output);
17993 void CodeGenerator::visitClampIToUint8(LClampIToUint8* lir) {
17994 Register output = ToRegister(lir->output());
17995 MOZ_ASSERT(output == ToRegister(lir->input()));
17996 masm.clampIntToUint8(output);
17999 void CodeGenerator::visitClampDToUint8(LClampDToUint8* lir) {
18000 FloatRegister input = ToFloatRegister(lir->input());
18001 Register output = ToRegister(lir->output());
18002 masm.clampDoubleToUint8(input, output);
18005 void CodeGenerator::visitClampVToUint8(LClampVToUint8* lir) {
18006 ValueOperand operand = ToValue(lir, LClampVToUint8::InputIndex);
18007 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
18008 Register output = ToRegister(lir->output());
18010 using Fn = bool (*)(JSContext*, JSString*, double*);
18011 OutOfLineCode* oolString = oolCallVM<Fn, StringToNumber>(
18012 lir, ArgList(output), StoreFloatRegisterTo(tempFloat));
18013 Label* stringEntry = oolString->entry();
18014 Label* stringRejoin = oolString->rejoin();
18016 Label fails;
18017 masm.clampValueToUint8(operand, stringEntry, stringRejoin, output, tempFloat,
18018 output, &fails);
18020 bailoutFrom(&fails, lir->snapshot());
18023 void CodeGenerator::visitInCache(LInCache* ins) {
18024 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
18026 ConstantOrRegister key =
18027 toConstantOrRegister(ins, LInCache::LhsIndex, ins->mir()->key()->type());
18028 Register object = ToRegister(ins->rhs());
18029 Register output = ToRegister(ins->output());
18030 Register temp = ToRegister(ins->temp0());
18032 IonInIC cache(liveRegs, key, object, output, temp);
18033 addIC(ins, allocateIC(cache));
18036 void CodeGenerator::visitInArray(LInArray* lir) {
18037 const MInArray* mir = lir->mir();
18038 Register elements = ToRegister(lir->elements());
18039 Register initLength = ToRegister(lir->initLength());
18040 Register output = ToRegister(lir->output());
18042 Label falseBranch, done, trueBranch;
18044 if (lir->index()->isConstant()) {
18045 int32_t index = ToInt32(lir->index());
18047 if (index < 0) {
18048 MOZ_ASSERT(mir->needsNegativeIntCheck());
18049 bailout(lir->snapshot());
18050 return;
18053 masm.branch32(Assembler::BelowOrEqual, initLength, Imm32(index),
18054 &falseBranch);
18056 NativeObject::elementsSizeMustNotOverflow();
18057 Address address = Address(elements, index * sizeof(Value));
18058 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
18059 } else {
18060 Register index = ToRegister(lir->index());
18062 Label negativeIntCheck;
18063 Label* failedInitLength = &falseBranch;
18064 if (mir->needsNegativeIntCheck()) {
18065 failedInitLength = &negativeIntCheck;
18068 masm.branch32(Assembler::BelowOrEqual, initLength, index, failedInitLength);
18070 BaseObjectElementIndex address(elements, index);
18071 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
18073 if (mir->needsNegativeIntCheck()) {
18074 masm.jump(&trueBranch);
18075 masm.bind(&negativeIntCheck);
18077 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
18079 masm.jump(&falseBranch);
18083 masm.bind(&trueBranch);
18084 masm.move32(Imm32(1), output);
18085 masm.jump(&done);
18087 masm.bind(&falseBranch);
18088 masm.move32(Imm32(0), output);
18089 masm.bind(&done);
18092 void CodeGenerator::visitGuardElementNotHole(LGuardElementNotHole* lir) {
18093 Register elements = ToRegister(lir->elements());
18094 const LAllocation* index = lir->index();
18096 Label testMagic;
18097 if (index->isConstant()) {
18098 Address address(elements, ToInt32(index) * sizeof(js::Value));
18099 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
18100 } else {
18101 BaseObjectElementIndex address(elements, ToRegister(index));
18102 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
18104 bailoutFrom(&testMagic, lir->snapshot());
18107 void CodeGenerator::visitInstanceOfO(LInstanceOfO* ins) {
18108 Register protoReg = ToRegister(ins->rhs());
18109 emitInstanceOf(ins, protoReg);
18112 void CodeGenerator::visitInstanceOfV(LInstanceOfV* ins) {
18113 Register protoReg = ToRegister(ins->rhs());
18114 emitInstanceOf(ins, protoReg);
18117 void CodeGenerator::emitInstanceOf(LInstruction* ins, Register protoReg) {
18118 // This path implements fun_hasInstance when the function's prototype is
18119 // known to be the object in protoReg
18121 Label done;
18122 Register output = ToRegister(ins->getDef(0));
18124 // If the lhs is a primitive, the result is false.
18125 Register objReg;
18126 if (ins->isInstanceOfV()) {
18127 Label isObject;
18128 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
18129 masm.branchTestObject(Assembler::Equal, lhsValue, &isObject);
18130 masm.mov(ImmWord(0), output);
18131 masm.jump(&done);
18132 masm.bind(&isObject);
18133 objReg = masm.extractObject(lhsValue, output);
18134 } else {
18135 objReg = ToRegister(ins->toInstanceOfO()->lhs());
18138 // Crawl the lhs's prototype chain in a loop to search for prototypeObject.
18139 // This follows the main loop of js::IsPrototypeOf, though additionally breaks
18140 // out of the loop on Proxy::LazyProto.
18142 // Load the lhs's prototype.
18143 masm.loadObjProto(objReg, output);
18145 Label testLazy;
18147 Label loopPrototypeChain;
18148 masm.bind(&loopPrototypeChain);
18150 // Test for the target prototype object.
18151 Label notPrototypeObject;
18152 masm.branchPtr(Assembler::NotEqual, output, protoReg, &notPrototypeObject);
18153 masm.mov(ImmWord(1), output);
18154 masm.jump(&done);
18155 masm.bind(&notPrototypeObject);
18157 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
18159 // Test for nullptr or Proxy::LazyProto
18160 masm.branchPtr(Assembler::BelowOrEqual, output, ImmWord(1), &testLazy);
18162 // Load the current object's prototype.
18163 masm.loadObjProto(output, output);
18165 masm.jump(&loopPrototypeChain);
18168 // Make a VM call if an object with a lazy proto was found on the prototype
18169 // chain. This currently occurs only for cross compartment wrappers, which
18170 // we do not expect to be compared with non-wrapper functions from this
18171 // compartment. Otherwise, we stopped on a nullptr prototype and the output
18172 // register is already correct.
18174 using Fn = bool (*)(JSContext*, HandleObject, JSObject*, bool*);
18175 auto* ool = oolCallVM<Fn, IsPrototypeOf>(ins, ArgList(protoReg, objReg),
18176 StoreRegisterTo(output));
18178 // Regenerate the original lhs object for the VM call.
18179 Label regenerate, *lazyEntry;
18180 if (objReg != output) {
18181 lazyEntry = ool->entry();
18182 } else {
18183 masm.bind(&regenerate);
18184 lazyEntry = &regenerate;
18185 if (ins->isInstanceOfV()) {
18186 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
18187 objReg = masm.extractObject(lhsValue, output);
18188 } else {
18189 objReg = ToRegister(ins->toInstanceOfO()->lhs());
18191 MOZ_ASSERT(objReg == output);
18192 masm.jump(ool->entry());
18195 masm.bind(&testLazy);
18196 masm.branchPtr(Assembler::Equal, output, ImmWord(1), lazyEntry);
18198 masm.bind(&done);
18199 masm.bind(ool->rejoin());
18202 void CodeGenerator::visitInstanceOfCache(LInstanceOfCache* ins) {
18203 // The Lowering ensures that RHS is an object, and that LHS is a value.
18204 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
18205 TypedOrValueRegister lhs =
18206 TypedOrValueRegister(ToValue(ins, LInstanceOfCache::LHS));
18207 Register rhs = ToRegister(ins->rhs());
18208 Register output = ToRegister(ins->output());
18210 IonInstanceOfIC ic(liveRegs, lhs, rhs, output);
18211 addIC(ins, allocateIC(ic));
18214 void CodeGenerator::visitGetDOMProperty(LGetDOMProperty* ins) {
18215 const Register JSContextReg = ToRegister(ins->getJSContextReg());
18216 const Register ObjectReg = ToRegister(ins->getObjectReg());
18217 const Register PrivateReg = ToRegister(ins->getPrivReg());
18218 const Register ValueReg = ToRegister(ins->getValueReg());
18220 Label haveValue;
18221 if (ins->mir()->valueMayBeInSlot()) {
18222 size_t slot = ins->mir()->domMemberSlotIndex();
18223 // It's a bit annoying to redo these slot calculations, which duplcate
18224 // LSlots and a few other things like that, but I'm not sure there's a
18225 // way to reuse those here.
18227 // If this ever gets fixed to work with proxies (by not assuming that
18228 // reserved slot indices, which is what domMemberSlotIndex() returns,
18229 // match fixed slot indices), we can reenable MGetDOMProperty for
18230 // proxies in IonBuilder.
18231 if (slot < NativeObject::MAX_FIXED_SLOTS) {
18232 masm.loadValue(Address(ObjectReg, NativeObject::getFixedSlotOffset(slot)),
18233 JSReturnOperand);
18234 } else {
18235 // It's a dynamic slot.
18236 slot -= NativeObject::MAX_FIXED_SLOTS;
18237 // Use PrivateReg as a scratch register for the slots pointer.
18238 masm.loadPtr(Address(ObjectReg, NativeObject::offsetOfSlots()),
18239 PrivateReg);
18240 masm.loadValue(Address(PrivateReg, slot * sizeof(js::Value)),
18241 JSReturnOperand);
18243 masm.branchTestUndefined(Assembler::NotEqual, JSReturnOperand, &haveValue);
18246 DebugOnly<uint32_t> initialStack = masm.framePushed();
18248 masm.checkStackAlignment();
18250 // Make space for the outparam. Pre-initialize it to UndefinedValue so we
18251 // can trace it at GC time.
18252 masm.Push(UndefinedValue());
18253 // We pass the pointer to our out param as an instance of
18254 // JSJitGetterCallArgs, since on the binary level it's the same thing.
18255 static_assert(sizeof(JSJitGetterCallArgs) == sizeof(Value*));
18256 masm.moveStackPtrTo(ValueReg);
18258 masm.Push(ObjectReg);
18260 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
18262 // Rooting will happen at GC time.
18263 masm.moveStackPtrTo(ObjectReg);
18265 Realm* getterRealm = ins->mir()->getterRealm();
18266 if (gen->realm->realmPtr() != getterRealm) {
18267 // We use JSContextReg as scratch register here.
18268 masm.switchToRealm(getterRealm, JSContextReg);
18271 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
18272 masm.loadJSContext(JSContextReg);
18273 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
18274 ExitFrameType::IonDOMGetter);
18276 markSafepointAt(safepointOffset, ins);
18278 masm.setupAlignedABICall();
18279 masm.loadJSContext(JSContextReg);
18280 masm.passABIArg(JSContextReg);
18281 masm.passABIArg(ObjectReg);
18282 masm.passABIArg(PrivateReg);
18283 masm.passABIArg(ValueReg);
18284 ensureOsiSpace();
18285 masm.callWithABI(DynamicFunction<JSJitGetterOp>(ins->mir()->fun()),
18286 ABIType::General,
18287 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
18289 if (ins->mir()->isInfallible()) {
18290 masm.loadValue(Address(masm.getStackPointer(),
18291 IonDOMExitFrameLayout::offsetOfResult()),
18292 JSReturnOperand);
18293 } else {
18294 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
18296 masm.loadValue(Address(masm.getStackPointer(),
18297 IonDOMExitFrameLayout::offsetOfResult()),
18298 JSReturnOperand);
18301 // Switch back to the current realm if needed. Note: if the getter threw an
18302 // exception, the exception handler will do this.
18303 if (gen->realm->realmPtr() != getterRealm) {
18304 static_assert(!JSReturnOperand.aliases(ReturnReg),
18305 "Clobbering ReturnReg should not affect the return value");
18306 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
18309 // Until C++ code is instrumented against Spectre, prevent speculative
18310 // execution from returning any private data.
18311 if (JitOptions.spectreJitToCxxCalls && ins->mir()->hasLiveDefUses()) {
18312 masm.speculationBarrier();
18315 masm.adjustStack(IonDOMExitFrameLayout::Size());
18317 masm.bind(&haveValue);
18319 MOZ_ASSERT(masm.framePushed() == initialStack);
18322 void CodeGenerator::visitGetDOMMemberV(LGetDOMMemberV* ins) {
18323 // It's simpler to duplicate visitLoadFixedSlotV here than it is to try to
18324 // use an LLoadFixedSlotV or some subclass of it for this case: that would
18325 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
18326 // we'd have to duplicate a bunch of stuff we now get for free from
18327 // MGetDOMProperty.
18329 // If this ever gets fixed to work with proxies (by not assuming that
18330 // reserved slot indices, which is what domMemberSlotIndex() returns,
18331 // match fixed slot indices), we can reenable MGetDOMMember for
18332 // proxies in IonBuilder.
18333 Register object = ToRegister(ins->object());
18334 size_t slot = ins->mir()->domMemberSlotIndex();
18335 ValueOperand result = ToOutValue(ins);
18337 masm.loadValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
18338 result);
18341 void CodeGenerator::visitGetDOMMemberT(LGetDOMMemberT* ins) {
18342 // It's simpler to duplicate visitLoadFixedSlotT here than it is to try to
18343 // use an LLoadFixedSlotT or some subclass of it for this case: that would
18344 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
18345 // we'd have to duplicate a bunch of stuff we now get for free from
18346 // MGetDOMProperty.
18348 // If this ever gets fixed to work with proxies (by not assuming that
18349 // reserved slot indices, which is what domMemberSlotIndex() returns,
18350 // match fixed slot indices), we can reenable MGetDOMMember for
18351 // proxies in IonBuilder.
18352 Register object = ToRegister(ins->object());
18353 size_t slot = ins->mir()->domMemberSlotIndex();
18354 AnyRegister result = ToAnyRegister(ins->getDef(0));
18355 MIRType type = ins->mir()->type();
18357 masm.loadUnboxedValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
18358 type, result);
18361 void CodeGenerator::visitSetDOMProperty(LSetDOMProperty* ins) {
18362 const Register JSContextReg = ToRegister(ins->getJSContextReg());
18363 const Register ObjectReg = ToRegister(ins->getObjectReg());
18364 const Register PrivateReg = ToRegister(ins->getPrivReg());
18365 const Register ValueReg = ToRegister(ins->getValueReg());
18367 DebugOnly<uint32_t> initialStack = masm.framePushed();
18369 masm.checkStackAlignment();
18371 // Push the argument. Rooting will happen at GC time.
18372 ValueOperand argVal = ToValue(ins, LSetDOMProperty::Value);
18373 masm.Push(argVal);
18374 // We pass the pointer to our out param as an instance of
18375 // JSJitGetterCallArgs, since on the binary level it's the same thing.
18376 static_assert(sizeof(JSJitSetterCallArgs) == sizeof(Value*));
18377 masm.moveStackPtrTo(ValueReg);
18379 masm.Push(ObjectReg);
18381 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
18383 // Rooting will happen at GC time.
18384 masm.moveStackPtrTo(ObjectReg);
18386 Realm* setterRealm = ins->mir()->setterRealm();
18387 if (gen->realm->realmPtr() != setterRealm) {
18388 // We use JSContextReg as scratch register here.
18389 masm.switchToRealm(setterRealm, JSContextReg);
18392 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
18393 masm.loadJSContext(JSContextReg);
18394 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
18395 ExitFrameType::IonDOMSetter);
18397 markSafepointAt(safepointOffset, ins);
18399 masm.setupAlignedABICall();
18400 masm.loadJSContext(JSContextReg);
18401 masm.passABIArg(JSContextReg);
18402 masm.passABIArg(ObjectReg);
18403 masm.passABIArg(PrivateReg);
18404 masm.passABIArg(ValueReg);
18405 ensureOsiSpace();
18406 masm.callWithABI(DynamicFunction<JSJitSetterOp>(ins->mir()->fun()),
18407 ABIType::General,
18408 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
18410 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
18412 // Switch back to the current realm if needed. Note: if the setter threw an
18413 // exception, the exception handler will do this.
18414 if (gen->realm->realmPtr() != setterRealm) {
18415 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
18418 masm.adjustStack(IonDOMExitFrameLayout::Size());
18420 MOZ_ASSERT(masm.framePushed() == initialStack);
18423 void CodeGenerator::visitLoadDOMExpandoValue(LLoadDOMExpandoValue* ins) {
18424 Register proxy = ToRegister(ins->proxy());
18425 ValueOperand out = ToOutValue(ins);
18427 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
18428 out.scratchReg());
18429 masm.loadValue(Address(out.scratchReg(),
18430 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
18431 out);
18434 void CodeGenerator::visitLoadDOMExpandoValueGuardGeneration(
18435 LLoadDOMExpandoValueGuardGeneration* ins) {
18436 Register proxy = ToRegister(ins->proxy());
18437 ValueOperand out = ToOutValue(ins);
18439 Label bail;
18440 masm.loadDOMExpandoValueGuardGeneration(proxy, out,
18441 ins->mir()->expandoAndGeneration(),
18442 ins->mir()->generation(), &bail);
18443 bailoutFrom(&bail, ins->snapshot());
18446 void CodeGenerator::visitLoadDOMExpandoValueIgnoreGeneration(
18447 LLoadDOMExpandoValueIgnoreGeneration* ins) {
18448 Register proxy = ToRegister(ins->proxy());
18449 ValueOperand out = ToOutValue(ins);
18451 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
18452 out.scratchReg());
18454 // Load the ExpandoAndGeneration* from the PrivateValue.
18455 masm.loadPrivate(
18456 Address(out.scratchReg(),
18457 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
18458 out.scratchReg());
18460 // Load expandoAndGeneration->expando into the output Value register.
18461 masm.loadValue(
18462 Address(out.scratchReg(), ExpandoAndGeneration::offsetOfExpando()), out);
18465 void CodeGenerator::visitGuardDOMExpandoMissingOrGuardShape(
18466 LGuardDOMExpandoMissingOrGuardShape* ins) {
18467 Register temp = ToRegister(ins->temp0());
18468 ValueOperand input =
18469 ToValue(ins, LGuardDOMExpandoMissingOrGuardShape::InputIndex);
18471 Label done;
18472 masm.branchTestUndefined(Assembler::Equal, input, &done);
18474 masm.debugAssertIsObject(input);
18475 masm.unboxObject(input, temp);
18476 // The expando object is not used in this case, so we don't need Spectre
18477 // mitigations.
18478 Label bail;
18479 masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, temp,
18480 ins->mir()->shape(), &bail);
18481 bailoutFrom(&bail, ins->snapshot());
18483 masm.bind(&done);
18486 class OutOfLineIsCallable : public OutOfLineCodeBase<CodeGenerator> {
18487 Register object_;
18488 Register output_;
18490 public:
18491 OutOfLineIsCallable(Register object, Register output)
18492 : object_(object), output_(output) {}
18494 void accept(CodeGenerator* codegen) override {
18495 codegen->visitOutOfLineIsCallable(this);
18497 Register object() const { return object_; }
18498 Register output() const { return output_; }
18501 void CodeGenerator::visitIsCallableO(LIsCallableO* ins) {
18502 Register object = ToRegister(ins->object());
18503 Register output = ToRegister(ins->output());
18505 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(object, output);
18506 addOutOfLineCode(ool, ins->mir());
18508 masm.isCallable(object, output, ool->entry());
18510 masm.bind(ool->rejoin());
18513 void CodeGenerator::visitIsCallableV(LIsCallableV* ins) {
18514 ValueOperand val = ToValue(ins, LIsCallableV::ObjectIndex);
18515 Register output = ToRegister(ins->output());
18516 Register temp = ToRegister(ins->temp0());
18518 Label notObject;
18519 masm.fallibleUnboxObject(val, temp, &notObject);
18521 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(temp, output);
18522 addOutOfLineCode(ool, ins->mir());
18524 masm.isCallable(temp, output, ool->entry());
18525 masm.jump(ool->rejoin());
18527 masm.bind(&notObject);
18528 masm.move32(Imm32(0), output);
18530 masm.bind(ool->rejoin());
18533 void CodeGenerator::visitOutOfLineIsCallable(OutOfLineIsCallable* ool) {
18534 Register object = ool->object();
18535 Register output = ool->output();
18537 saveVolatile(output);
18538 using Fn = bool (*)(JSObject* obj);
18539 masm.setupAlignedABICall();
18540 masm.passABIArg(object);
18541 masm.callWithABI<Fn, ObjectIsCallable>();
18542 masm.storeCallBoolResult(output);
18543 restoreVolatile(output);
18544 masm.jump(ool->rejoin());
18547 class OutOfLineIsConstructor : public OutOfLineCodeBase<CodeGenerator> {
18548 LIsConstructor* ins_;
18550 public:
18551 explicit OutOfLineIsConstructor(LIsConstructor* ins) : ins_(ins) {}
18553 void accept(CodeGenerator* codegen) override {
18554 codegen->visitOutOfLineIsConstructor(this);
18556 LIsConstructor* ins() const { return ins_; }
18559 void CodeGenerator::visitIsConstructor(LIsConstructor* ins) {
18560 Register object = ToRegister(ins->object());
18561 Register output = ToRegister(ins->output());
18563 OutOfLineIsConstructor* ool = new (alloc()) OutOfLineIsConstructor(ins);
18564 addOutOfLineCode(ool, ins->mir());
18566 masm.isConstructor(object, output, ool->entry());
18568 masm.bind(ool->rejoin());
18571 void CodeGenerator::visitOutOfLineIsConstructor(OutOfLineIsConstructor* ool) {
18572 LIsConstructor* ins = ool->ins();
18573 Register object = ToRegister(ins->object());
18574 Register output = ToRegister(ins->output());
18576 saveVolatile(output);
18577 using Fn = bool (*)(JSObject* obj);
18578 masm.setupAlignedABICall();
18579 masm.passABIArg(object);
18580 masm.callWithABI<Fn, ObjectIsConstructor>();
18581 masm.storeCallBoolResult(output);
18582 restoreVolatile(output);
18583 masm.jump(ool->rejoin());
18586 void CodeGenerator::visitIsCrossRealmArrayConstructor(
18587 LIsCrossRealmArrayConstructor* ins) {
18588 Register object = ToRegister(ins->object());
18589 Register output = ToRegister(ins->output());
18591 masm.setIsCrossRealmArrayConstructor(object, output);
18594 static void EmitObjectIsArray(MacroAssembler& masm, OutOfLineCode* ool,
18595 Register obj, Register output,
18596 Label* notArray = nullptr) {
18597 masm.loadObjClassUnsafe(obj, output);
18599 Label isArray;
18600 masm.branchPtr(Assembler::Equal, output, ImmPtr(&ArrayObject::class_),
18601 &isArray);
18603 // Branch to OOL path if it's a proxy.
18604 masm.branchTestClassIsProxy(true, output, ool->entry());
18606 if (notArray) {
18607 masm.bind(notArray);
18609 masm.move32(Imm32(0), output);
18610 masm.jump(ool->rejoin());
18612 masm.bind(&isArray);
18613 masm.move32(Imm32(1), output);
18615 masm.bind(ool->rejoin());
18618 void CodeGenerator::visitIsArrayO(LIsArrayO* lir) {
18619 Register object = ToRegister(lir->object());
18620 Register output = ToRegister(lir->output());
18622 using Fn = bool (*)(JSContext*, HandleObject, bool*);
18623 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
18624 lir, ArgList(object), StoreRegisterTo(output));
18625 EmitObjectIsArray(masm, ool, object, output);
18628 void CodeGenerator::visitIsArrayV(LIsArrayV* lir) {
18629 ValueOperand val = ToValue(lir, LIsArrayV::ValueIndex);
18630 Register output = ToRegister(lir->output());
18631 Register temp = ToRegister(lir->temp0());
18633 Label notArray;
18634 masm.fallibleUnboxObject(val, temp, &notArray);
18636 using Fn = bool (*)(JSContext*, HandleObject, bool*);
18637 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
18638 lir, ArgList(temp), StoreRegisterTo(output));
18639 EmitObjectIsArray(masm, ool, temp, output, &notArray);
18642 void CodeGenerator::visitIsTypedArray(LIsTypedArray* lir) {
18643 Register object = ToRegister(lir->object());
18644 Register output = ToRegister(lir->output());
18646 OutOfLineCode* ool = nullptr;
18647 if (lir->mir()->isPossiblyWrapped()) {
18648 using Fn = bool (*)(JSContext*, JSObject*, bool*);
18649 ool = oolCallVM<Fn, jit::IsPossiblyWrappedTypedArray>(
18650 lir, ArgList(object), StoreRegisterTo(output));
18653 Label notTypedArray;
18654 Label done;
18656 masm.loadObjClassUnsafe(object, output);
18657 masm.branchIfClassIsNotTypedArray(output, &notTypedArray);
18659 masm.move32(Imm32(1), output);
18660 masm.jump(&done);
18661 masm.bind(&notTypedArray);
18662 if (ool) {
18663 masm.branchTestClassIsProxy(true, output, ool->entry());
18665 masm.move32(Imm32(0), output);
18666 masm.bind(&done);
18667 if (ool) {
18668 masm.bind(ool->rejoin());
18672 void CodeGenerator::visitIsObject(LIsObject* ins) {
18673 Register output = ToRegister(ins->output());
18674 ValueOperand value = ToValue(ins, LIsObject::ObjectIndex);
18675 masm.testObjectSet(Assembler::Equal, value, output);
18678 void CodeGenerator::visitIsObjectAndBranch(LIsObjectAndBranch* ins) {
18679 ValueOperand value = ToValue(ins, LIsObjectAndBranch::Input);
18680 testObjectEmitBranch(Assembler::Equal, value, ins->ifTrue(), ins->ifFalse());
18683 void CodeGenerator::visitIsNullOrUndefined(LIsNullOrUndefined* ins) {
18684 Register output = ToRegister(ins->output());
18685 ValueOperand value = ToValue(ins, LIsNullOrUndefined::InputIndex);
18687 Label isNotNull, done;
18688 masm.branchTestNull(Assembler::NotEqual, value, &isNotNull);
18690 masm.move32(Imm32(1), output);
18691 masm.jump(&done);
18693 masm.bind(&isNotNull);
18694 masm.testUndefinedSet(Assembler::Equal, value, output);
18696 masm.bind(&done);
18699 void CodeGenerator::visitIsNullOrUndefinedAndBranch(
18700 LIsNullOrUndefinedAndBranch* ins) {
18701 Label* ifTrue = getJumpLabelForBranch(ins->ifTrue());
18702 Label* ifFalse = getJumpLabelForBranch(ins->ifFalse());
18703 ValueOperand value = ToValue(ins, LIsNullOrUndefinedAndBranch::Input);
18705 ScratchTagScope tag(masm, value);
18706 masm.splitTagForTest(value, tag);
18708 masm.branchTestNull(Assembler::Equal, tag, ifTrue);
18709 masm.branchTestUndefined(Assembler::Equal, tag, ifTrue);
18711 if (!isNextBlock(ins->ifFalse()->lir())) {
18712 masm.jump(ifFalse);
18716 void CodeGenerator::loadOutermostJSScript(Register reg) {
18717 // The "outermost" JSScript means the script that we are compiling
18718 // basically; this is not always the script associated with the
18719 // current basic block, which might be an inlined script.
18721 MIRGraph& graph = current->mir()->graph();
18722 MBasicBlock* entryBlock = graph.entryBlock();
18723 masm.movePtr(ImmGCPtr(entryBlock->info().script()), reg);
18726 void CodeGenerator::loadJSScriptForBlock(MBasicBlock* block, Register reg) {
18727 // The current JSScript means the script for the current
18728 // basic block. This may be an inlined script.
18730 JSScript* script = block->info().script();
18731 masm.movePtr(ImmGCPtr(script), reg);
18734 void CodeGenerator::visitHasClass(LHasClass* ins) {
18735 Register lhs = ToRegister(ins->lhs());
18736 Register output = ToRegister(ins->output());
18738 masm.loadObjClassUnsafe(lhs, output);
18739 masm.cmpPtrSet(Assembler::Equal, output, ImmPtr(ins->mir()->getClass()),
18740 output);
18743 void CodeGenerator::visitGuardToClass(LGuardToClass* ins) {
18744 Register lhs = ToRegister(ins->lhs());
18745 Register temp = ToRegister(ins->temp0());
18747 // branchTestObjClass may zero the object register on speculative paths
18748 // (we should have a defineReuseInput allocation in this case).
18749 Register spectreRegToZero = lhs;
18751 Label notEqual;
18753 masm.branchTestObjClass(Assembler::NotEqual, lhs, ins->mir()->getClass(),
18754 temp, spectreRegToZero, &notEqual);
18756 // Can't return null-return here, so bail.
18757 bailoutFrom(&notEqual, ins->snapshot());
18760 void CodeGenerator::visitGuardToEitherClass(LGuardToEitherClass* ins) {
18761 Register lhs = ToRegister(ins->lhs());
18762 Register temp = ToRegister(ins->temp0());
18764 // branchTestObjClass may zero the object register on speculative paths
18765 // (we should have a defineReuseInput allocation in this case).
18766 Register spectreRegToZero = lhs;
18768 Label notEqual;
18770 masm.branchTestObjClass(Assembler::NotEqual, lhs,
18771 {ins->mir()->getClass1(), ins->mir()->getClass2()},
18772 temp, spectreRegToZero, &notEqual);
18774 // Can't return null-return here, so bail.
18775 bailoutFrom(&notEqual, ins->snapshot());
18778 void CodeGenerator::visitGuardToFunction(LGuardToFunction* ins) {
18779 Register lhs = ToRegister(ins->lhs());
18780 Register temp = ToRegister(ins->temp0());
18782 // branchTestObjClass may zero the object register on speculative paths
18783 // (we should have a defineReuseInput allocation in this case).
18784 Register spectreRegToZero = lhs;
18786 Label notEqual;
18788 masm.branchTestObjIsFunction(Assembler::NotEqual, lhs, temp, spectreRegToZero,
18789 &notEqual);
18791 // Can't return null-return here, so bail.
18792 bailoutFrom(&notEqual, ins->snapshot());
18795 void CodeGenerator::visitObjectClassToString(LObjectClassToString* lir) {
18796 Register obj = ToRegister(lir->lhs());
18797 Register temp = ToRegister(lir->temp0());
18799 using Fn = JSString* (*)(JSContext*, JSObject*);
18800 masm.setupAlignedABICall();
18801 masm.loadJSContext(temp);
18802 masm.passABIArg(temp);
18803 masm.passABIArg(obj);
18804 masm.callWithABI<Fn, js::ObjectClassToString>();
18806 bailoutCmpPtr(Assembler::Equal, ReturnReg, ImmWord(0), lir->snapshot());
18809 void CodeGenerator::visitWasmParameter(LWasmParameter* lir) {}
18811 void CodeGenerator::visitWasmParameterI64(LWasmParameterI64* lir) {}
18813 void CodeGenerator::visitWasmReturn(LWasmReturn* lir) {
18814 // Don't emit a jump to the return label if this is the last block.
18815 if (current->mir() != *gen->graph().poBegin()) {
18816 masm.jump(&returnLabel_);
18820 void CodeGenerator::visitWasmReturnI64(LWasmReturnI64* lir) {
18821 // Don't emit a jump to the return label if this is the last block.
18822 if (current->mir() != *gen->graph().poBegin()) {
18823 masm.jump(&returnLabel_);
18827 void CodeGenerator::visitWasmReturnVoid(LWasmReturnVoid* lir) {
18828 // Don't emit a jump to the return label if this is the last block.
18829 if (current->mir() != *gen->graph().poBegin()) {
18830 masm.jump(&returnLabel_);
18834 void CodeGenerator::emitAssertRangeI(MIRType type, const Range* r,
18835 Register input) {
18836 // Check the lower bound.
18837 if (r->hasInt32LowerBound() && r->lower() > INT32_MIN) {
18838 Label success;
18839 if (type == MIRType::Int32 || type == MIRType::Boolean) {
18840 masm.branch32(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
18841 &success);
18842 } else {
18843 MOZ_ASSERT(type == MIRType::IntPtr);
18844 masm.branchPtr(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
18845 &success);
18847 masm.assumeUnreachable(
18848 "Integer input should be equal or higher than Lowerbound.");
18849 masm.bind(&success);
18852 // Check the upper bound.
18853 if (r->hasInt32UpperBound() && r->upper() < INT32_MAX) {
18854 Label success;
18855 if (type == MIRType::Int32 || type == MIRType::Boolean) {
18856 masm.branch32(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
18857 &success);
18858 } else {
18859 MOZ_ASSERT(type == MIRType::IntPtr);
18860 masm.branchPtr(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
18861 &success);
18863 masm.assumeUnreachable(
18864 "Integer input should be lower or equal than Upperbound.");
18865 masm.bind(&success);
18868 // For r->canHaveFractionalPart(), r->canBeNegativeZero(), and
18869 // r->exponent(), there's nothing to check, because if we ended up in the
18870 // integer range checking code, the value is already in an integer register
18871 // in the integer range.
18874 void CodeGenerator::emitAssertRangeD(const Range* r, FloatRegister input,
18875 FloatRegister temp) {
18876 // Check the lower bound.
18877 if (r->hasInt32LowerBound()) {
18878 Label success;
18879 masm.loadConstantDouble(r->lower(), temp);
18880 if (r->canBeNaN()) {
18881 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
18883 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
18884 &success);
18885 masm.assumeUnreachable(
18886 "Double input should be equal or higher than Lowerbound.");
18887 masm.bind(&success);
18889 // Check the upper bound.
18890 if (r->hasInt32UpperBound()) {
18891 Label success;
18892 masm.loadConstantDouble(r->upper(), temp);
18893 if (r->canBeNaN()) {
18894 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
18896 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp, &success);
18897 masm.assumeUnreachable(
18898 "Double input should be lower or equal than Upperbound.");
18899 masm.bind(&success);
18902 // This code does not yet check r->canHaveFractionalPart(). This would require
18903 // new assembler interfaces to make rounding instructions available.
18905 if (!r->canBeNegativeZero()) {
18906 Label success;
18908 // First, test for being equal to 0.0, which also includes -0.0.
18909 masm.loadConstantDouble(0.0, temp);
18910 masm.branchDouble(Assembler::DoubleNotEqualOrUnordered, input, temp,
18911 &success);
18913 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0 is
18914 // -Infinity instead of Infinity.
18915 masm.loadConstantDouble(1.0, temp);
18916 masm.divDouble(input, temp);
18917 masm.branchDouble(Assembler::DoubleGreaterThan, temp, input, &success);
18919 masm.assumeUnreachable("Input shouldn't be negative zero.");
18921 masm.bind(&success);
18924 if (!r->hasInt32Bounds() && !r->canBeInfiniteOrNaN() &&
18925 r->exponent() < FloatingPoint<double>::kExponentBias) {
18926 // Check the bounds implied by the maximum exponent.
18927 Label exponentLoOk;
18928 masm.loadConstantDouble(pow(2.0, r->exponent() + 1), temp);
18929 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentLoOk);
18930 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp,
18931 &exponentLoOk);
18932 masm.assumeUnreachable("Check for exponent failed.");
18933 masm.bind(&exponentLoOk);
18935 Label exponentHiOk;
18936 masm.loadConstantDouble(-pow(2.0, r->exponent() + 1), temp);
18937 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentHiOk);
18938 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
18939 &exponentHiOk);
18940 masm.assumeUnreachable("Check for exponent failed.");
18941 masm.bind(&exponentHiOk);
18942 } else if (!r->hasInt32Bounds() && !r->canBeNaN()) {
18943 // If we think the value can't be NaN, check that it isn't.
18944 Label notnan;
18945 masm.branchDouble(Assembler::DoubleOrdered, input, input, &notnan);
18946 masm.assumeUnreachable("Input shouldn't be NaN.");
18947 masm.bind(&notnan);
18949 // If we think the value also can't be an infinity, check that it isn't.
18950 if (!r->canBeInfiniteOrNaN()) {
18951 Label notposinf;
18952 masm.loadConstantDouble(PositiveInfinity<double>(), temp);
18953 masm.branchDouble(Assembler::DoubleLessThan, input, temp, &notposinf);
18954 masm.assumeUnreachable("Input shouldn't be +Inf.");
18955 masm.bind(&notposinf);
18957 Label notneginf;
18958 masm.loadConstantDouble(NegativeInfinity<double>(), temp);
18959 masm.branchDouble(Assembler::DoubleGreaterThan, input, temp, &notneginf);
18960 masm.assumeUnreachable("Input shouldn't be -Inf.");
18961 masm.bind(&notneginf);
18966 void CodeGenerator::visitAssertClass(LAssertClass* ins) {
18967 Register obj = ToRegister(ins->input());
18968 Register temp = ToRegister(ins->getTemp(0));
18970 Label success;
18971 if (ins->mir()->getClass() == &FunctionClass) {
18972 // Allow both possible function classes here.
18973 masm.branchTestObjIsFunctionNoSpectreMitigations(Assembler::Equal, obj,
18974 temp, &success);
18975 } else {
18976 masm.branchTestObjClassNoSpectreMitigations(
18977 Assembler::Equal, obj, ins->mir()->getClass(), temp, &success);
18979 masm.assumeUnreachable("Wrong KnownClass during run-time");
18980 masm.bind(&success);
18983 void CodeGenerator::visitAssertShape(LAssertShape* ins) {
18984 Register obj = ToRegister(ins->input());
18986 Label success;
18987 masm.branchTestObjShapeNoSpectreMitigations(Assembler::Equal, obj,
18988 ins->mir()->shape(), &success);
18989 masm.assumeUnreachable("Wrong Shape during run-time");
18990 masm.bind(&success);
18993 void CodeGenerator::visitAssertRangeI(LAssertRangeI* ins) {
18994 Register input = ToRegister(ins->input());
18995 const Range* r = ins->range();
18997 emitAssertRangeI(ins->mir()->input()->type(), r, input);
19000 void CodeGenerator::visitAssertRangeD(LAssertRangeD* ins) {
19001 FloatRegister input = ToFloatRegister(ins->input());
19002 FloatRegister temp = ToFloatRegister(ins->temp());
19003 const Range* r = ins->range();
19005 emitAssertRangeD(r, input, temp);
19008 void CodeGenerator::visitAssertRangeF(LAssertRangeF* ins) {
19009 FloatRegister input = ToFloatRegister(ins->input());
19010 FloatRegister temp = ToFloatRegister(ins->temp());
19011 FloatRegister temp2 = ToFloatRegister(ins->temp2());
19013 const Range* r = ins->range();
19015 masm.convertFloat32ToDouble(input, temp);
19016 emitAssertRangeD(r, temp, temp2);
19019 void CodeGenerator::visitAssertRangeV(LAssertRangeV* ins) {
19020 const Range* r = ins->range();
19021 const ValueOperand value = ToValue(ins, LAssertRangeV::Input);
19022 Label done;
19025 ScratchTagScope tag(masm, value);
19026 masm.splitTagForTest(value, tag);
19029 Label isNotInt32;
19030 masm.branchTestInt32(Assembler::NotEqual, tag, &isNotInt32);
19032 ScratchTagScopeRelease _(&tag);
19033 Register unboxInt32 = ToTempUnboxRegister(ins->temp());
19034 Register input = masm.extractInt32(value, unboxInt32);
19035 emitAssertRangeI(MIRType::Int32, r, input);
19036 masm.jump(&done);
19038 masm.bind(&isNotInt32);
19042 Label isNotDouble;
19043 masm.branchTestDouble(Assembler::NotEqual, tag, &isNotDouble);
19045 ScratchTagScopeRelease _(&tag);
19046 FloatRegister input = ToFloatRegister(ins->floatTemp1());
19047 FloatRegister temp = ToFloatRegister(ins->floatTemp2());
19048 masm.unboxDouble(value, input);
19049 emitAssertRangeD(r, input, temp);
19050 masm.jump(&done);
19052 masm.bind(&isNotDouble);
19056 masm.assumeUnreachable("Incorrect range for Value.");
19057 masm.bind(&done);
19060 void CodeGenerator::visitInterruptCheck(LInterruptCheck* lir) {
19061 using Fn = bool (*)(JSContext*);
19062 OutOfLineCode* ool =
19063 oolCallVM<Fn, InterruptCheck>(lir, ArgList(), StoreNothing());
19065 const void* interruptAddr = gen->runtime->addressOfInterruptBits();
19066 masm.branch32(Assembler::NotEqual, AbsoluteAddress(interruptAddr), Imm32(0),
19067 ool->entry());
19068 masm.bind(ool->rejoin());
19071 void CodeGenerator::visitOutOfLineResumableWasmTrap(
19072 OutOfLineResumableWasmTrap* ool) {
19073 LInstruction* lir = ool->lir();
19074 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
19076 markSafepointAt(masm.currentOffset(), lir);
19078 // Note that masm.framePushed() doesn't include the register dump area.
19079 // That will be taken into account when the StackMap is created from the
19080 // LSafepoint.
19081 lir->safepoint()->setFramePushedAtStackMapBase(ool->framePushed());
19082 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::Trap);
19084 masm.jump(ool->rejoin());
19087 void CodeGenerator::visitOutOfLineAbortingWasmTrap(
19088 OutOfLineAbortingWasmTrap* ool) {
19089 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
19092 void CodeGenerator::visitWasmInterruptCheck(LWasmInterruptCheck* lir) {
19093 MOZ_ASSERT(gen->compilingWasm());
19095 OutOfLineResumableWasmTrap* ool = new (alloc()) OutOfLineResumableWasmTrap(
19096 lir, masm.framePushed(), lir->mir()->bytecodeOffset(),
19097 wasm::Trap::CheckInterrupt);
19098 addOutOfLineCode(ool, lir->mir());
19099 masm.branch32(
19100 Assembler::NotEqual,
19101 Address(ToRegister(lir->instance()), wasm::Instance::offsetOfInterrupt()),
19102 Imm32(0), ool->entry());
19103 masm.bind(ool->rejoin());
19106 void CodeGenerator::visitWasmTrap(LWasmTrap* lir) {
19107 MOZ_ASSERT(gen->compilingWasm());
19108 const MWasmTrap* mir = lir->mir();
19110 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
19113 void CodeGenerator::visitWasmTrapIfNull(LWasmTrapIfNull* lir) {
19114 MOZ_ASSERT(gen->compilingWasm());
19115 const MWasmTrapIfNull* mir = lir->mir();
19116 Label nonNull;
19117 Register ref = ToRegister(lir->ref());
19119 masm.branchWasmAnyRefIsNull(false, ref, &nonNull);
19120 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
19121 masm.bind(&nonNull);
19124 void CodeGenerator::visitWasmRefIsSubtypeOfAbstract(
19125 LWasmRefIsSubtypeOfAbstract* ins) {
19126 MOZ_ASSERT(gen->compilingWasm());
19128 const MWasmRefIsSubtypeOfAbstract* mir = ins->mir();
19129 MOZ_ASSERT(!mir->destType().isTypeRef());
19131 Register ref = ToRegister(ins->ref());
19132 Register superSTV = Register::Invalid();
19133 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
19134 Register scratch2 = Register::Invalid();
19135 Register result = ToRegister(ins->output());
19136 Label onSuccess;
19137 Label onFail;
19138 Label join;
19139 masm.branchWasmRefIsSubtype(ref, mir->sourceType(), mir->destType(),
19140 &onSuccess, /*onSuccess=*/true, superSTV,
19141 scratch1, scratch2);
19142 masm.bind(&onFail);
19143 masm.xor32(result, result);
19144 masm.jump(&join);
19145 masm.bind(&onSuccess);
19146 masm.move32(Imm32(1), result);
19147 masm.bind(&join);
19150 void CodeGenerator::visitWasmRefIsSubtypeOfConcrete(
19151 LWasmRefIsSubtypeOfConcrete* ins) {
19152 MOZ_ASSERT(gen->compilingWasm());
19154 const MWasmRefIsSubtypeOfConcrete* mir = ins->mir();
19155 MOZ_ASSERT(mir->destType().isTypeRef());
19157 Register ref = ToRegister(ins->ref());
19158 Register superSTV = ToRegister(ins->superSTV());
19159 Register scratch1 = ToRegister(ins->temp0());
19160 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
19161 Register result = ToRegister(ins->output());
19162 Label onSuccess;
19163 Label join;
19164 masm.branchWasmRefIsSubtype(ref, mir->sourceType(), mir->destType(),
19165 &onSuccess, /*onSuccess=*/true, superSTV,
19166 scratch1, scratch2);
19167 masm.move32(Imm32(0), result);
19168 masm.jump(&join);
19169 masm.bind(&onSuccess);
19170 masm.move32(Imm32(1), result);
19171 masm.bind(&join);
19174 void CodeGenerator::visitWasmRefIsSubtypeOfAbstractAndBranch(
19175 LWasmRefIsSubtypeOfAbstractAndBranch* ins) {
19176 MOZ_ASSERT(gen->compilingWasm());
19177 Register ref = ToRegister(ins->ref());
19178 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
19179 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
19180 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
19181 masm.branchWasmRefIsSubtype(
19182 ref, ins->sourceType(), ins->destType(), onSuccess, /*onSuccess=*/true,
19183 Register::Invalid(), scratch1, Register::Invalid());
19184 masm.jump(onFail);
19187 void CodeGenerator::visitWasmRefIsSubtypeOfConcreteAndBranch(
19188 LWasmRefIsSubtypeOfConcreteAndBranch* ins) {
19189 MOZ_ASSERT(gen->compilingWasm());
19190 Register ref = ToRegister(ins->ref());
19191 Register superSTV = ToRegister(ins->superSTV());
19192 Register scratch1 = ToRegister(ins->temp0());
19193 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
19194 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
19195 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
19196 masm.branchWasmRefIsSubtype(ref, ins->sourceType(), ins->destType(),
19197 onSuccess, /*onSuccess=*/true, superSTV, scratch1,
19198 scratch2);
19199 masm.jump(onFail);
19202 void CodeGenerator::callWasmStructAllocFun(LInstruction* lir,
19203 wasm::SymbolicAddress fun,
19204 Register typeDefData,
19205 Register output) {
19206 masm.Push(InstanceReg);
19207 int32_t framePushedAfterInstance = masm.framePushed();
19208 saveLive(lir);
19210 masm.setupWasmABICall();
19211 masm.passABIArg(InstanceReg);
19212 masm.passABIArg(typeDefData);
19213 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
19214 CodeOffset offset =
19215 masm.callWithABI(wasm::BytecodeOffset(0), fun,
19216 mozilla::Some(instanceOffset), ABIType::General);
19217 masm.storeCallPointerResult(output);
19219 markSafepointAt(offset.offset(), lir);
19220 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAfterInstance);
19221 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::CodegenCall);
19223 restoreLive(lir);
19224 masm.Pop(InstanceReg);
19225 #if JS_CODEGEN_ARM64
19226 masm.syncStackPtr();
19227 #endif
19230 // Out-of-line path to allocate wasm GC structs
19231 class OutOfLineWasmNewStruct : public OutOfLineCodeBase<CodeGenerator> {
19232 LInstruction* lir_;
19233 wasm::SymbolicAddress fun_;
19234 Register typeDefData_;
19235 Register output_;
19237 public:
19238 OutOfLineWasmNewStruct(LInstruction* lir, wasm::SymbolicAddress fun,
19239 Register typeDefData, Register output)
19240 : lir_(lir), fun_(fun), typeDefData_(typeDefData), output_(output) {}
19242 void accept(CodeGenerator* codegen) override {
19243 codegen->visitOutOfLineWasmNewStruct(this);
19246 LInstruction* lir() const { return lir_; }
19247 wasm::SymbolicAddress fun() const { return fun_; }
19248 Register typeDefData() const { return typeDefData_; }
19249 Register output() const { return output_; }
19252 void CodeGenerator::visitOutOfLineWasmNewStruct(OutOfLineWasmNewStruct* ool) {
19253 callWasmStructAllocFun(ool->lir(), ool->fun(), ool->typeDefData(),
19254 ool->output());
19255 masm.jump(ool->rejoin());
19258 void CodeGenerator::visitWasmNewStructObject(LWasmNewStructObject* lir) {
19259 MOZ_ASSERT(gen->compilingWasm());
19261 MWasmNewStructObject* mir = lir->mir();
19263 Register typeDefData = ToRegister(lir->typeDefData());
19264 Register output = ToRegister(lir->output());
19266 if (mir->isOutline()) {
19267 wasm::SymbolicAddress fun = mir->zeroFields()
19268 ? wasm::SymbolicAddress::StructNewOOL_true
19269 : wasm::SymbolicAddress::StructNewOOL_false;
19270 callWasmStructAllocFun(lir, fun, typeDefData, output);
19271 } else {
19272 wasm::SymbolicAddress fun = mir->zeroFields()
19273 ? wasm::SymbolicAddress::StructNewIL_true
19274 : wasm::SymbolicAddress::StructNewIL_false;
19276 Register instance = ToRegister(lir->instance());
19277 MOZ_ASSERT(instance == InstanceReg);
19279 auto ool =
19280 new (alloc()) OutOfLineWasmNewStruct(lir, fun, typeDefData, output);
19281 addOutOfLineCode(ool, lir->mir());
19283 Register temp1 = ToRegister(lir->temp0());
19284 Register temp2 = ToRegister(lir->temp1());
19285 masm.wasmNewStructObject(instance, output, typeDefData, temp1, temp2,
19286 ool->entry(), mir->allocKind(), mir->zeroFields());
19288 masm.bind(ool->rejoin());
19292 void CodeGenerator::callWasmArrayAllocFun(LInstruction* lir,
19293 wasm::SymbolicAddress fun,
19294 Register numElements,
19295 Register typeDefData, Register output,
19296 wasm::BytecodeOffset bytecodeOffset) {
19297 masm.Push(InstanceReg);
19298 int32_t framePushedAfterInstance = masm.framePushed();
19299 saveLive(lir);
19301 masm.setupWasmABICall();
19302 masm.passABIArg(InstanceReg);
19303 masm.passABIArg(numElements);
19304 masm.passABIArg(typeDefData);
19305 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
19306 CodeOffset offset = masm.callWithABI(
19307 bytecodeOffset, fun, mozilla::Some(instanceOffset), ABIType::General);
19308 masm.storeCallPointerResult(output);
19310 markSafepointAt(offset.offset(), lir);
19311 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAfterInstance);
19312 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::CodegenCall);
19314 restoreLive(lir);
19315 masm.Pop(InstanceReg);
19316 #if JS_CODEGEN_ARM64
19317 masm.syncStackPtr();
19318 #endif
19320 Label ok;
19321 masm.branchPtr(Assembler::NonZero, output, ImmWord(0), &ok);
19322 masm.wasmTrap(wasm::Trap::ThrowReported, bytecodeOffset);
19323 masm.bind(&ok);
19326 // Out-of-line path to allocate wasm GC arrays
19327 class OutOfLineWasmNewArray : public OutOfLineCodeBase<CodeGenerator> {
19328 LInstruction* lir_;
19329 wasm::SymbolicAddress fun_;
19330 Register numElementsReg_;
19331 mozilla::Maybe<uint32_t> numElements_;
19332 Register typeDefData_;
19333 Register output_;
19334 wasm::BytecodeOffset bytecodeOffset_;
19336 public:
19337 OutOfLineWasmNewArray(LInstruction* lir, wasm::SymbolicAddress fun,
19338 Register numElementsReg,
19339 mozilla::Maybe<uint32_t> numElements,
19340 Register typeDefData, Register output,
19341 wasm::BytecodeOffset bytecodeOffset)
19342 : lir_(lir),
19343 fun_(fun),
19344 numElementsReg_(numElementsReg),
19345 numElements_(numElements),
19346 typeDefData_(typeDefData),
19347 output_(output),
19348 bytecodeOffset_(bytecodeOffset) {}
19350 void accept(CodeGenerator* codegen) override {
19351 codegen->visitOutOfLineWasmNewArray(this);
19354 LInstruction* lir() const { return lir_; }
19355 wasm::SymbolicAddress fun() const { return fun_; }
19356 Register numElementsReg() const { return numElementsReg_; }
19357 mozilla::Maybe<uint32_t> numElements() const { return numElements_; }
19358 Register typeDefData() const { return typeDefData_; }
19359 Register output() const { return output_; }
19360 wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
19363 void CodeGenerator::visitOutOfLineWasmNewArray(OutOfLineWasmNewArray* ool) {
19364 if (ool->numElements().isSome()) {
19365 masm.move32(Imm32(ool->numElements().value()), ool->numElementsReg());
19367 callWasmArrayAllocFun(ool->lir(), ool->fun(), ool->numElementsReg(),
19368 ool->typeDefData(), ool->output(),
19369 ool->bytecodeOffset());
19370 masm.jump(ool->rejoin());
19373 void CodeGenerator::visitWasmNewArrayObject(LWasmNewArrayObject* lir) {
19374 MOZ_ASSERT(gen->compilingWasm());
19376 MWasmNewArrayObject* mir = lir->mir();
19378 Register typeDefData = ToRegister(lir->typeDefData());
19379 Register output = ToRegister(lir->output());
19380 Register temp1 = ToRegister(lir->temp0());
19381 Register temp2 = ToRegister(lir->temp1());
19383 wasm::SymbolicAddress fun = mir->zeroFields()
19384 ? wasm::SymbolicAddress::ArrayNew_true
19385 : wasm::SymbolicAddress::ArrayNew_false;
19387 if (lir->numElements()->isConstant()) {
19388 // numElements is constant, so we can do optimized code generation.
19389 uint32_t numElements = lir->numElements()->toConstant()->toInt32();
19390 CheckedUint32 storageBytes =
19391 WasmArrayObject::calcStorageBytesChecked(mir->elemSize(), numElements);
19392 if (!storageBytes.isValid() ||
19393 storageBytes.value() > WasmArrayObject_MaxInlineBytes) {
19394 // Too much array data to store inline. Immediately perform an instance
19395 // call to handle the out-of-line storage.
19396 masm.move32(Imm32(numElements), temp1);
19397 callWasmArrayAllocFun(lir, fun, temp1, typeDefData, output,
19398 mir->bytecodeOffset());
19399 } else {
19400 // storageBytes is small enough to be stored inline in WasmArrayObject.
19401 // Attempt a nursery allocation and fall back to an instance call if it
19402 // fails.
19403 Register instance = ToRegister(lir->instance());
19404 MOZ_ASSERT(instance == InstanceReg);
19406 auto ool = new (alloc())
19407 OutOfLineWasmNewArray(lir, fun, temp1, mozilla::Some(numElements),
19408 typeDefData, output, mir->bytecodeOffset());
19409 addOutOfLineCode(ool, lir->mir());
19411 masm.wasmNewArrayObjectFixed(instance, output, typeDefData, temp1, temp2,
19412 ool->entry(), numElements,
19413 storageBytes.value(), mir->zeroFields());
19415 masm.bind(ool->rejoin());
19417 } else {
19418 // numElements is dynamic. Attempt a dynamic inline-storage nursery
19419 // allocation and fall back to an instance call if it fails.
19420 Register instance = ToRegister(lir->instance());
19421 MOZ_ASSERT(instance == InstanceReg);
19422 Register numElements = ToRegister(lir->numElements());
19424 auto ool = new (alloc())
19425 OutOfLineWasmNewArray(lir, fun, numElements, mozilla::Nothing(),
19426 typeDefData, output, mir->bytecodeOffset());
19427 addOutOfLineCode(ool, lir->mir());
19429 masm.wasmNewArrayObject(instance, output, numElements, typeDefData, temp1,
19430 ool->entry(), mir->elemSize(), mir->zeroFields());
19432 masm.bind(ool->rejoin());
19436 void CodeGenerator::visitWasmHeapReg(LWasmHeapReg* ins) {
19437 #ifdef WASM_HAS_HEAPREG
19438 masm.movePtr(HeapReg, ToRegister(ins->output()));
19439 #else
19440 MOZ_CRASH();
19441 #endif
19444 void CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins) {
19445 const MWasmBoundsCheck* mir = ins->mir();
19446 Register ptr = ToRegister(ins->ptr());
19447 Register boundsCheckLimit = ToRegister(ins->boundsCheckLimit());
19448 // When there are no spectre mitigations in place, branching out-of-line to
19449 // the trap is a big performance win, but with mitigations it's trickier. See
19450 // bug 1680243.
19451 if (JitOptions.spectreIndexMasking) {
19452 Label ok;
19453 masm.wasmBoundsCheck32(Assembler::Below, ptr, boundsCheckLimit, &ok);
19454 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
19455 masm.bind(&ok);
19456 } else {
19457 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
19458 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
19459 addOutOfLineCode(ool, mir);
19460 masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
19461 ool->entry());
19465 void CodeGenerator::visitWasmBoundsCheck64(LWasmBoundsCheck64* ins) {
19466 const MWasmBoundsCheck* mir = ins->mir();
19467 Register64 ptr = ToRegister64(ins->ptr());
19468 Register64 boundsCheckLimit = ToRegister64(ins->boundsCheckLimit());
19469 // See above.
19470 if (JitOptions.spectreIndexMasking) {
19471 Label ok;
19472 masm.wasmBoundsCheck64(Assembler::Below, ptr, boundsCheckLimit, &ok);
19473 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
19474 masm.bind(&ok);
19475 } else {
19476 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
19477 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
19478 addOutOfLineCode(ool, mir);
19479 masm.wasmBoundsCheck64(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
19480 ool->entry());
19484 void CodeGenerator::visitWasmBoundsCheckRange32(LWasmBoundsCheckRange32* ins) {
19485 const MWasmBoundsCheckRange32* mir = ins->mir();
19486 Register index = ToRegister(ins->index());
19487 Register length = ToRegister(ins->length());
19488 Register limit = ToRegister(ins->limit());
19489 Register tmp = ToRegister(ins->temp0());
19491 masm.wasmBoundsCheckRange32(index, length, limit, tmp, mir->bytecodeOffset());
19494 void CodeGenerator::visitWasmAlignmentCheck(LWasmAlignmentCheck* ins) {
19495 const MWasmAlignmentCheck* mir = ins->mir();
19496 Register ptr = ToRegister(ins->ptr());
19497 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
19498 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
19499 addOutOfLineCode(ool, mir);
19500 masm.branchTest32(Assembler::NonZero, ptr, Imm32(mir->byteSize() - 1),
19501 ool->entry());
19504 void CodeGenerator::visitWasmAlignmentCheck64(LWasmAlignmentCheck64* ins) {
19505 const MWasmAlignmentCheck* mir = ins->mir();
19506 Register64 ptr = ToRegister64(ins->ptr());
19507 #ifdef JS_64BIT
19508 Register r = ptr.reg;
19509 #else
19510 Register r = ptr.low;
19511 #endif
19512 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
19513 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
19514 addOutOfLineCode(ool, mir);
19515 masm.branchTestPtr(Assembler::NonZero, r, Imm32(mir->byteSize() - 1),
19516 ool->entry());
19519 void CodeGenerator::visitWasmLoadInstance(LWasmLoadInstance* ins) {
19520 switch (ins->mir()->type()) {
19521 case MIRType::WasmAnyRef:
19522 case MIRType::Pointer:
19523 masm.loadPtr(Address(ToRegister(ins->instance()), ins->mir()->offset()),
19524 ToRegister(ins->output()));
19525 break;
19526 case MIRType::Int32:
19527 masm.load32(Address(ToRegister(ins->instance()), ins->mir()->offset()),
19528 ToRegister(ins->output()));
19529 break;
19530 default:
19531 MOZ_CRASH("MIRType not supported in WasmLoadInstance");
19535 void CodeGenerator::visitWasmLoadInstance64(LWasmLoadInstance64* ins) {
19536 MOZ_ASSERT(ins->mir()->type() == MIRType::Int64);
19537 masm.load64(Address(ToRegister(ins->instance()), ins->mir()->offset()),
19538 ToOutRegister64(ins));
19541 void CodeGenerator::incrementWarmUpCounter(AbsoluteAddress warmUpCount,
19542 JSScript* script, Register tmp) {
19543 // The code depends on the JitScript* not being discarded without also
19544 // invalidating Ion code. Assert this.
19545 #ifdef DEBUG
19546 Label ok;
19547 masm.movePtr(ImmGCPtr(script), tmp);
19548 masm.loadJitScript(tmp, tmp);
19549 masm.branchPtr(Assembler::Equal, tmp, ImmPtr(script->jitScript()), &ok);
19550 masm.assumeUnreachable("Didn't find JitScript?");
19551 masm.bind(&ok);
19552 #endif
19554 masm.load32(warmUpCount, tmp);
19555 masm.add32(Imm32(1), tmp);
19556 masm.store32(tmp, warmUpCount);
19559 void CodeGenerator::visitIncrementWarmUpCounter(LIncrementWarmUpCounter* ins) {
19560 Register tmp = ToRegister(ins->temp0());
19562 AbsoluteAddress warmUpCount =
19563 AbsoluteAddress(ins->mir()->script()->jitScript())
19564 .offset(JitScript::offsetOfWarmUpCount());
19565 incrementWarmUpCounter(warmUpCount, ins->mir()->script(), tmp);
19568 void CodeGenerator::visitLexicalCheck(LLexicalCheck* ins) {
19569 ValueOperand inputValue = ToValue(ins, LLexicalCheck::InputIndex);
19570 Label bail;
19571 masm.branchTestMagicValue(Assembler::Equal, inputValue,
19572 JS_UNINITIALIZED_LEXICAL, &bail);
19573 bailoutFrom(&bail, ins->snapshot());
19576 void CodeGenerator::visitThrowRuntimeLexicalError(
19577 LThrowRuntimeLexicalError* ins) {
19578 pushArg(Imm32(ins->mir()->errorNumber()));
19580 using Fn = bool (*)(JSContext*, unsigned);
19581 callVM<Fn, jit::ThrowRuntimeLexicalError>(ins);
19584 void CodeGenerator::visitThrowMsg(LThrowMsg* ins) {
19585 pushArg(Imm32(static_cast<int32_t>(ins->mir()->throwMsgKind())));
19587 using Fn = bool (*)(JSContext*, unsigned);
19588 callVM<Fn, js::ThrowMsgOperation>(ins);
19591 void CodeGenerator::visitGlobalDeclInstantiation(
19592 LGlobalDeclInstantiation* ins) {
19593 pushArg(ImmPtr(ins->mir()->resumePoint()->pc()));
19594 pushArg(ImmGCPtr(ins->mir()->block()->info().script()));
19596 using Fn = bool (*)(JSContext*, HandleScript, const jsbytecode*);
19597 callVM<Fn, GlobalDeclInstantiationFromIon>(ins);
19600 void CodeGenerator::visitDebugger(LDebugger* ins) {
19601 Register cx = ToRegister(ins->temp0());
19603 masm.loadJSContext(cx);
19604 using Fn = bool (*)(JSContext* cx);
19605 masm.setupAlignedABICall();
19606 masm.passABIArg(cx);
19607 masm.callWithABI<Fn, GlobalHasLiveOnDebuggerStatement>();
19609 Label bail;
19610 masm.branchIfTrueBool(ReturnReg, &bail);
19611 bailoutFrom(&bail, ins->snapshot());
19614 void CodeGenerator::visitNewTarget(LNewTarget* ins) {
19615 ValueOperand output = ToOutValue(ins);
19617 // if (isConstructing) output = argv[Max(numActualArgs, numFormalArgs)]
19618 Label notConstructing, done;
19619 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
19620 masm.branchTestPtr(Assembler::Zero, calleeToken,
19621 Imm32(CalleeToken_FunctionConstructing), &notConstructing);
19623 Register argvLen = output.scratchReg();
19624 masm.loadNumActualArgs(FramePointer, argvLen);
19626 Label useNFormals;
19628 size_t numFormalArgs = ins->mirRaw()->block()->info().nargs();
19629 masm.branchPtr(Assembler::Below, argvLen, Imm32(numFormalArgs), &useNFormals);
19631 size_t argsOffset = JitFrameLayout::offsetOfActualArgs();
19633 BaseValueIndex newTarget(FramePointer, argvLen, argsOffset);
19634 masm.loadValue(newTarget, output);
19635 masm.jump(&done);
19638 masm.bind(&useNFormals);
19641 Address newTarget(FramePointer,
19642 argsOffset + (numFormalArgs * sizeof(Value)));
19643 masm.loadValue(newTarget, output);
19644 masm.jump(&done);
19647 // else output = undefined
19648 masm.bind(&notConstructing);
19649 masm.moveValue(UndefinedValue(), output);
19650 masm.bind(&done);
19653 void CodeGenerator::visitCheckReturn(LCheckReturn* ins) {
19654 ValueOperand returnValue = ToValue(ins, LCheckReturn::ReturnValueIndex);
19655 ValueOperand thisValue = ToValue(ins, LCheckReturn::ThisValueIndex);
19656 ValueOperand output = ToOutValue(ins);
19658 using Fn = bool (*)(JSContext*, HandleValue);
19659 OutOfLineCode* ool = oolCallVM<Fn, ThrowBadDerivedReturnOrUninitializedThis>(
19660 ins, ArgList(returnValue), StoreNothing());
19662 Label noChecks;
19663 masm.branchTestObject(Assembler::Equal, returnValue, &noChecks);
19664 masm.branchTestUndefined(Assembler::NotEqual, returnValue, ool->entry());
19665 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
19666 masm.moveValue(thisValue, output);
19667 masm.jump(ool->rejoin());
19668 masm.bind(&noChecks);
19669 masm.moveValue(returnValue, output);
19670 masm.bind(ool->rejoin());
19673 void CodeGenerator::visitCheckIsObj(LCheckIsObj* ins) {
19674 ValueOperand value = ToValue(ins, LCheckIsObj::ValueIndex);
19675 Register output = ToRegister(ins->output());
19677 using Fn = bool (*)(JSContext*, CheckIsObjectKind);
19678 OutOfLineCode* ool = oolCallVM<Fn, ThrowCheckIsObject>(
19679 ins, ArgList(Imm32(ins->mir()->checkKind())), StoreNothing());
19681 masm.fallibleUnboxObject(value, output, ool->entry());
19682 masm.bind(ool->rejoin());
19685 void CodeGenerator::visitCheckObjCoercible(LCheckObjCoercible* ins) {
19686 ValueOperand checkValue = ToValue(ins, LCheckObjCoercible::ValueIndex);
19688 using Fn = bool (*)(JSContext*, HandleValue);
19689 OutOfLineCode* ool = oolCallVM<Fn, ThrowObjectCoercible>(
19690 ins, ArgList(checkValue), StoreNothing());
19691 masm.branchTestNull(Assembler::Equal, checkValue, ool->entry());
19692 masm.branchTestUndefined(Assembler::Equal, checkValue, ool->entry());
19693 masm.bind(ool->rejoin());
19696 void CodeGenerator::visitCheckClassHeritage(LCheckClassHeritage* ins) {
19697 ValueOperand heritage = ToValue(ins, LCheckClassHeritage::HeritageIndex);
19698 Register temp0 = ToRegister(ins->temp0());
19699 Register temp1 = ToRegister(ins->temp1());
19701 using Fn = bool (*)(JSContext*, HandleValue);
19702 OutOfLineCode* ool = oolCallVM<Fn, CheckClassHeritageOperation>(
19703 ins, ArgList(heritage), StoreNothing());
19705 masm.branchTestNull(Assembler::Equal, heritage, ool->rejoin());
19706 masm.fallibleUnboxObject(heritage, temp0, ool->entry());
19708 masm.isConstructor(temp0, temp1, ool->entry());
19709 masm.branchTest32(Assembler::Zero, temp1, temp1, ool->entry());
19711 masm.bind(ool->rejoin());
19714 void CodeGenerator::visitCheckThis(LCheckThis* ins) {
19715 ValueOperand thisValue = ToValue(ins, LCheckThis::ValueIndex);
19717 using Fn = bool (*)(JSContext*);
19718 OutOfLineCode* ool =
19719 oolCallVM<Fn, ThrowUninitializedThis>(ins, ArgList(), StoreNothing());
19720 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
19721 masm.bind(ool->rejoin());
19724 void CodeGenerator::visitCheckThisReinit(LCheckThisReinit* ins) {
19725 ValueOperand thisValue = ToValue(ins, LCheckThisReinit::ThisValueIndex);
19727 using Fn = bool (*)(JSContext*);
19728 OutOfLineCode* ool =
19729 oolCallVM<Fn, ThrowInitializedThis>(ins, ArgList(), StoreNothing());
19730 masm.branchTestMagic(Assembler::NotEqual, thisValue, ool->entry());
19731 masm.bind(ool->rejoin());
19734 void CodeGenerator::visitGenerator(LGenerator* lir) {
19735 Register callee = ToRegister(lir->callee());
19736 Register environmentChain = ToRegister(lir->environmentChain());
19737 Register argsObject = ToRegister(lir->argsObject());
19739 pushArg(argsObject);
19740 pushArg(environmentChain);
19741 pushArg(ImmGCPtr(current->mir()->info().script()));
19742 pushArg(callee);
19744 using Fn = JSObject* (*)(JSContext* cx, HandleFunction, HandleScript,
19745 HandleObject, HandleObject);
19746 callVM<Fn, CreateGenerator>(lir);
19749 void CodeGenerator::visitAsyncResolve(LAsyncResolve* lir) {
19750 Register generator = ToRegister(lir->generator());
19751 ValueOperand value = ToValue(lir, LAsyncResolve::ValueIndex);
19753 pushArg(value);
19754 pushArg(generator);
19756 using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
19757 HandleValue);
19758 callVM<Fn, js::AsyncFunctionResolve>(lir);
19761 void CodeGenerator::visitAsyncReject(LAsyncReject* lir) {
19762 Register generator = ToRegister(lir->generator());
19763 ValueOperand reason = ToValue(lir, LAsyncReject::ReasonIndex);
19764 ValueOperand stack = ToValue(lir, LAsyncReject::StackIndex);
19766 pushArg(stack);
19767 pushArg(reason);
19768 pushArg(generator);
19770 using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
19771 HandleValue, HandleValue);
19772 callVM<Fn, js::AsyncFunctionReject>(lir);
19775 void CodeGenerator::visitAsyncAwait(LAsyncAwait* lir) {
19776 ValueOperand value = ToValue(lir, LAsyncAwait::ValueIndex);
19777 Register generator = ToRegister(lir->generator());
19779 pushArg(value);
19780 pushArg(generator);
19782 using Fn =
19783 JSObject* (*)(JSContext* cx, Handle<AsyncFunctionGeneratorObject*> genObj,
19784 HandleValue value);
19785 callVM<Fn, js::AsyncFunctionAwait>(lir);
19788 void CodeGenerator::visitCanSkipAwait(LCanSkipAwait* lir) {
19789 ValueOperand value = ToValue(lir, LCanSkipAwait::ValueIndex);
19791 pushArg(value);
19793 using Fn = bool (*)(JSContext*, HandleValue, bool* canSkip);
19794 callVM<Fn, js::CanSkipAwait>(lir);
19797 void CodeGenerator::visitMaybeExtractAwaitValue(LMaybeExtractAwaitValue* lir) {
19798 ValueOperand value = ToValue(lir, LMaybeExtractAwaitValue::ValueIndex);
19799 ValueOperand output = ToOutValue(lir);
19800 Register canSkip = ToRegister(lir->canSkip());
19802 Label cantExtract, finished;
19803 masm.branchIfFalseBool(canSkip, &cantExtract);
19805 pushArg(value);
19807 using Fn = bool (*)(JSContext*, HandleValue, MutableHandleValue);
19808 callVM<Fn, js::ExtractAwaitValue>(lir);
19809 masm.jump(&finished);
19810 masm.bind(&cantExtract);
19812 masm.moveValue(value, output);
19814 masm.bind(&finished);
19817 void CodeGenerator::visitDebugCheckSelfHosted(LDebugCheckSelfHosted* ins) {
19818 ValueOperand checkValue = ToValue(ins, LDebugCheckSelfHosted::ValueIndex);
19819 pushArg(checkValue);
19820 using Fn = bool (*)(JSContext*, HandleValue);
19821 callVM<Fn, js::Debug_CheckSelfHosted>(ins);
19824 void CodeGenerator::visitRandom(LRandom* ins) {
19825 using mozilla::non_crypto::XorShift128PlusRNG;
19827 FloatRegister output = ToFloatRegister(ins->output());
19828 Register rngReg = ToRegister(ins->temp0());
19830 Register64 temp1 = ToRegister64(ins->temp1());
19831 Register64 temp2 = ToRegister64(ins->temp2());
19833 const XorShift128PlusRNG* rng = gen->realm->addressOfRandomNumberGenerator();
19834 masm.movePtr(ImmPtr(rng), rngReg);
19836 masm.randomDouble(rngReg, output, temp1, temp2);
19837 if (js::SupportDifferentialTesting()) {
19838 masm.loadConstantDouble(0.0, output);
19842 void CodeGenerator::visitSignExtendInt32(LSignExtendInt32* ins) {
19843 Register input = ToRegister(ins->input());
19844 Register output = ToRegister(ins->output());
19846 switch (ins->mode()) {
19847 case MSignExtendInt32::Byte:
19848 masm.move8SignExtend(input, output);
19849 break;
19850 case MSignExtendInt32::Half:
19851 masm.move16SignExtend(input, output);
19852 break;
19856 void CodeGenerator::visitRotate(LRotate* ins) {
19857 MRotate* mir = ins->mir();
19858 Register input = ToRegister(ins->input());
19859 Register dest = ToRegister(ins->output());
19861 const LAllocation* count = ins->count();
19862 if (count->isConstant()) {
19863 int32_t c = ToInt32(count) & 0x1F;
19864 if (mir->isLeftRotate()) {
19865 masm.rotateLeft(Imm32(c), input, dest);
19866 } else {
19867 masm.rotateRight(Imm32(c), input, dest);
19869 } else {
19870 Register creg = ToRegister(count);
19871 if (mir->isLeftRotate()) {
19872 masm.rotateLeft(creg, input, dest);
19873 } else {
19874 masm.rotateRight(creg, input, dest);
19879 class OutOfLineNaNToZero : public OutOfLineCodeBase<CodeGenerator> {
19880 LNaNToZero* lir_;
19882 public:
19883 explicit OutOfLineNaNToZero(LNaNToZero* lir) : lir_(lir) {}
19885 void accept(CodeGenerator* codegen) override {
19886 codegen->visitOutOfLineNaNToZero(this);
19888 LNaNToZero* lir() const { return lir_; }
19891 void CodeGenerator::visitOutOfLineNaNToZero(OutOfLineNaNToZero* ool) {
19892 FloatRegister output = ToFloatRegister(ool->lir()->output());
19893 masm.loadConstantDouble(0.0, output);
19894 masm.jump(ool->rejoin());
19897 void CodeGenerator::visitNaNToZero(LNaNToZero* lir) {
19898 FloatRegister input = ToFloatRegister(lir->input());
19900 OutOfLineNaNToZero* ool = new (alloc()) OutOfLineNaNToZero(lir);
19901 addOutOfLineCode(ool, lir->mir());
19903 if (lir->mir()->operandIsNeverNegativeZero()) {
19904 masm.branchDouble(Assembler::DoubleUnordered, input, input, ool->entry());
19905 } else {
19906 FloatRegister scratch = ToFloatRegister(lir->temp0());
19907 masm.loadConstantDouble(0.0, scratch);
19908 masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch,
19909 ool->entry());
19911 masm.bind(ool->rejoin());
19914 void CodeGenerator::visitIsPackedArray(LIsPackedArray* lir) {
19915 Register obj = ToRegister(lir->object());
19916 Register output = ToRegister(lir->output());
19917 Register temp = ToRegister(lir->temp0());
19919 masm.setIsPackedArray(obj, output, temp);
19922 void CodeGenerator::visitGuardArrayIsPacked(LGuardArrayIsPacked* lir) {
19923 Register array = ToRegister(lir->array());
19924 Register temp0 = ToRegister(lir->temp0());
19925 Register temp1 = ToRegister(lir->temp1());
19927 Label bail;
19928 masm.branchArrayIsNotPacked(array, temp0, temp1, &bail);
19929 bailoutFrom(&bail, lir->snapshot());
19932 void CodeGenerator::visitGetPrototypeOf(LGetPrototypeOf* lir) {
19933 Register target = ToRegister(lir->target());
19934 ValueOperand out = ToOutValue(lir);
19935 Register scratch = out.scratchReg();
19937 using Fn = bool (*)(JSContext*, HandleObject, MutableHandleValue);
19938 OutOfLineCode* ool = oolCallVM<Fn, jit::GetPrototypeOf>(lir, ArgList(target),
19939 StoreValueTo(out));
19941 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
19943 masm.loadObjProto(target, scratch);
19945 Label hasProto;
19946 masm.branchPtr(Assembler::Above, scratch, ImmWord(1), &hasProto);
19948 // Call into the VM for lazy prototypes.
19949 masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), ool->entry());
19951 masm.moveValue(NullValue(), out);
19952 masm.jump(ool->rejoin());
19954 masm.bind(&hasProto);
19955 masm.tagValue(JSVAL_TYPE_OBJECT, scratch, out);
19957 masm.bind(ool->rejoin());
19960 void CodeGenerator::visitObjectWithProto(LObjectWithProto* lir) {
19961 pushArg(ToValue(lir, LObjectWithProto::PrototypeIndex));
19963 using Fn = PlainObject* (*)(JSContext*, HandleValue);
19964 callVM<Fn, js::ObjectWithProtoOperation>(lir);
19967 void CodeGenerator::visitObjectStaticProto(LObjectStaticProto* lir) {
19968 Register obj = ToRegister(lir->input());
19969 Register output = ToRegister(lir->output());
19971 masm.loadObjProto(obj, output);
19973 #ifdef DEBUG
19974 // We shouldn't encounter a null or lazy proto.
19975 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
19977 Label done;
19978 masm.branchPtr(Assembler::Above, output, ImmWord(1), &done);
19979 masm.assumeUnreachable("Unexpected null or lazy proto in MObjectStaticProto");
19980 masm.bind(&done);
19981 #endif
19984 void CodeGenerator::visitBuiltinObject(LBuiltinObject* lir) {
19985 pushArg(Imm32(static_cast<int32_t>(lir->mir()->builtinObjectKind())));
19987 using Fn = JSObject* (*)(JSContext*, BuiltinObjectKind);
19988 callVM<Fn, js::BuiltinObjectOperation>(lir);
19991 void CodeGenerator::visitSuperFunction(LSuperFunction* lir) {
19992 Register callee = ToRegister(lir->callee());
19993 ValueOperand out = ToOutValue(lir);
19994 Register temp = ToRegister(lir->temp0());
19996 #ifdef DEBUG
19997 Label classCheckDone;
19998 masm.branchTestObjIsFunction(Assembler::Equal, callee, temp, callee,
19999 &classCheckDone);
20000 masm.assumeUnreachable("Unexpected non-JSFunction callee in JSOp::SuperFun");
20001 masm.bind(&classCheckDone);
20002 #endif
20004 // Load prototype of callee
20005 masm.loadObjProto(callee, temp);
20007 #ifdef DEBUG
20008 // We won't encounter a lazy proto, because |callee| is guaranteed to be a
20009 // JSFunction and only proxy objects can have a lazy proto.
20010 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
20012 Label proxyCheckDone;
20013 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
20014 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperFun");
20015 masm.bind(&proxyCheckDone);
20016 #endif
20018 Label nullProto, done;
20019 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
20021 // Box prototype and return
20022 masm.tagValue(JSVAL_TYPE_OBJECT, temp, out);
20023 masm.jump(&done);
20025 masm.bind(&nullProto);
20026 masm.moveValue(NullValue(), out);
20028 masm.bind(&done);
20031 void CodeGenerator::visitInitHomeObject(LInitHomeObject* lir) {
20032 Register func = ToRegister(lir->function());
20033 ValueOperand homeObject = ToValue(lir, LInitHomeObject::HomeObjectIndex);
20035 masm.assertFunctionIsExtended(func);
20037 Address addr(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
20039 emitPreBarrier(addr);
20040 masm.storeValue(homeObject, addr);
20043 void CodeGenerator::visitIsTypedArrayConstructor(
20044 LIsTypedArrayConstructor* lir) {
20045 Register object = ToRegister(lir->object());
20046 Register output = ToRegister(lir->output());
20048 masm.setIsDefinitelyTypedArrayConstructor(object, output);
20051 void CodeGenerator::visitLoadValueTag(LLoadValueTag* lir) {
20052 ValueOperand value = ToValue(lir, LLoadValueTag::ValueIndex);
20053 Register output = ToRegister(lir->output());
20055 Register tag = masm.extractTag(value, output);
20056 if (tag != output) {
20057 masm.mov(tag, output);
20061 void CodeGenerator::visitGuardTagNotEqual(LGuardTagNotEqual* lir) {
20062 Register lhs = ToRegister(lir->lhs());
20063 Register rhs = ToRegister(lir->rhs());
20065 bailoutCmp32(Assembler::Equal, lhs, rhs, lir->snapshot());
20067 // If both lhs and rhs are numbers, can't use tag comparison to do inequality
20068 // comparison
20069 Label done;
20070 masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
20071 masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
20072 bailout(lir->snapshot());
20074 masm.bind(&done);
20077 void CodeGenerator::visitLoadWrapperTarget(LLoadWrapperTarget* lir) {
20078 Register object = ToRegister(lir->object());
20079 Register output = ToRegister(lir->output());
20081 masm.loadPtr(Address(object, ProxyObject::offsetOfReservedSlots()), output);
20083 // Bail for revoked proxies.
20084 Label bail;
20085 Address targetAddr(output,
20086 js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
20087 if (lir->mir()->fallible()) {
20088 masm.fallibleUnboxObject(targetAddr, output, &bail);
20089 bailoutFrom(&bail, lir->snapshot());
20090 } else {
20091 masm.unboxObject(targetAddr, output);
20095 void CodeGenerator::visitGuardHasGetterSetter(LGuardHasGetterSetter* lir) {
20096 Register object = ToRegister(lir->object());
20097 Register temp0 = ToRegister(lir->temp0());
20098 Register temp1 = ToRegister(lir->temp1());
20099 Register temp2 = ToRegister(lir->temp2());
20101 masm.movePropertyKey(lir->mir()->propId(), temp1);
20102 masm.movePtr(ImmGCPtr(lir->mir()->getterSetter()), temp2);
20104 using Fn = bool (*)(JSContext* cx, JSObject* obj, jsid id,
20105 GetterSetter* getterSetter);
20106 masm.setupAlignedABICall();
20107 masm.loadJSContext(temp0);
20108 masm.passABIArg(temp0);
20109 masm.passABIArg(object);
20110 masm.passABIArg(temp1);
20111 masm.passABIArg(temp2);
20112 masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
20114 bailoutIfFalseBool(ReturnReg, lir->snapshot());
20117 void CodeGenerator::visitGuardIsExtensible(LGuardIsExtensible* lir) {
20118 Register object = ToRegister(lir->object());
20119 Register temp = ToRegister(lir->temp0());
20121 Label bail;
20122 masm.branchIfObjectNotExtensible(object, temp, &bail);
20123 bailoutFrom(&bail, lir->snapshot());
20126 void CodeGenerator::visitGuardInt32IsNonNegative(
20127 LGuardInt32IsNonNegative* lir) {
20128 Register index = ToRegister(lir->index());
20130 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
20133 void CodeGenerator::visitGuardInt32Range(LGuardInt32Range* lir) {
20134 Register input = ToRegister(lir->input());
20136 bailoutCmp32(Assembler::LessThan, input, Imm32(lir->mir()->minimum()),
20137 lir->snapshot());
20138 bailoutCmp32(Assembler::GreaterThan, input, Imm32(lir->mir()->maximum()),
20139 lir->snapshot());
20142 void CodeGenerator::visitGuardIndexIsNotDenseElement(
20143 LGuardIndexIsNotDenseElement* lir) {
20144 Register object = ToRegister(lir->object());
20145 Register index = ToRegister(lir->index());
20146 Register temp = ToRegister(lir->temp0());
20147 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
20149 // Load obj->elements.
20150 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
20152 // Ensure index >= initLength or the element is a hole.
20153 Label notDense;
20154 Address capacity(temp, ObjectElements::offsetOfInitializedLength());
20155 masm.spectreBoundsCheck32(index, capacity, spectreTemp, &notDense);
20157 BaseValueIndex element(temp, index);
20158 masm.branchTestMagic(Assembler::Equal, element, &notDense);
20160 bailout(lir->snapshot());
20162 masm.bind(&notDense);
20165 void CodeGenerator::visitGuardIndexIsValidUpdateOrAdd(
20166 LGuardIndexIsValidUpdateOrAdd* lir) {
20167 Register object = ToRegister(lir->object());
20168 Register index = ToRegister(lir->index());
20169 Register temp = ToRegister(lir->temp0());
20170 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
20172 // Load obj->elements.
20173 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
20175 Label success;
20177 // If length is writable, branch to &success. All indices are writable.
20178 Address flags(temp, ObjectElements::offsetOfFlags());
20179 masm.branchTest32(Assembler::Zero, flags,
20180 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
20181 &success);
20183 // Otherwise, ensure index is in bounds.
20184 Label bail;
20185 Address length(temp, ObjectElements::offsetOfLength());
20186 masm.spectreBoundsCheck32(index, length, spectreTemp, &bail);
20187 masm.bind(&success);
20189 bailoutFrom(&bail, lir->snapshot());
20192 void CodeGenerator::visitCallAddOrUpdateSparseElement(
20193 LCallAddOrUpdateSparseElement* lir) {
20194 Register object = ToRegister(lir->object());
20195 Register index = ToRegister(lir->index());
20196 ValueOperand value = ToValue(lir, LCallAddOrUpdateSparseElement::ValueIndex);
20198 pushArg(Imm32(lir->mir()->strict()));
20199 pushArg(value);
20200 pushArg(index);
20201 pushArg(object);
20203 using Fn =
20204 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, HandleValue, bool);
20205 callVM<Fn, js::AddOrUpdateSparseElementHelper>(lir);
20208 void CodeGenerator::visitCallGetSparseElement(LCallGetSparseElement* lir) {
20209 Register object = ToRegister(lir->object());
20210 Register index = ToRegister(lir->index());
20212 pushArg(index);
20213 pushArg(object);
20215 using Fn =
20216 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, MutableHandleValue);
20217 callVM<Fn, js::GetSparseElementHelper>(lir);
20220 void CodeGenerator::visitCallNativeGetElement(LCallNativeGetElement* lir) {
20221 Register object = ToRegister(lir->object());
20222 Register index = ToRegister(lir->index());
20224 pushArg(index);
20225 pushArg(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
20226 pushArg(object);
20228 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
20229 MutableHandleValue);
20230 callVM<Fn, js::NativeGetElement>(lir);
20233 void CodeGenerator::visitCallNativeGetElementSuper(
20234 LCallNativeGetElementSuper* lir) {
20235 Register object = ToRegister(lir->object());
20236 Register index = ToRegister(lir->index());
20237 ValueOperand receiver =
20238 ToValue(lir, LCallNativeGetElementSuper::ReceiverIndex);
20240 pushArg(index);
20241 pushArg(receiver);
20242 pushArg(object);
20244 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
20245 MutableHandleValue);
20246 callVM<Fn, js::NativeGetElement>(lir);
20249 void CodeGenerator::visitCallObjectHasSparseElement(
20250 LCallObjectHasSparseElement* lir) {
20251 Register object = ToRegister(lir->object());
20252 Register index = ToRegister(lir->index());
20253 Register temp0 = ToRegister(lir->temp0());
20254 Register temp1 = ToRegister(lir->temp1());
20255 Register output = ToRegister(lir->output());
20257 masm.reserveStack(sizeof(Value));
20258 masm.moveStackPtrTo(temp1);
20260 using Fn = bool (*)(JSContext*, NativeObject*, int32_t, Value*);
20261 masm.setupAlignedABICall();
20262 masm.loadJSContext(temp0);
20263 masm.passABIArg(temp0);
20264 masm.passABIArg(object);
20265 masm.passABIArg(index);
20266 masm.passABIArg(temp1);
20267 masm.callWithABI<Fn, HasNativeElementPure>();
20268 masm.storeCallPointerResult(temp0);
20270 Label bail, ok;
20271 uint32_t framePushed = masm.framePushed();
20272 masm.branchIfTrueBool(temp0, &ok);
20273 masm.adjustStack(sizeof(Value));
20274 masm.jump(&bail);
20276 masm.bind(&ok);
20277 masm.setFramePushed(framePushed);
20278 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
20279 masm.adjustStack(sizeof(Value));
20281 bailoutFrom(&bail, lir->snapshot());
20284 void CodeGenerator::visitBigIntAsIntN(LBigIntAsIntN* ins) {
20285 Register bits = ToRegister(ins->bits());
20286 Register input = ToRegister(ins->input());
20288 pushArg(bits);
20289 pushArg(input);
20291 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
20292 callVM<Fn, jit::BigIntAsIntN>(ins);
20295 void CodeGenerator::visitBigIntAsIntN64(LBigIntAsIntN64* ins) {
20296 Register input = ToRegister(ins->input());
20297 Register temp = ToRegister(ins->temp());
20298 Register64 temp64 = ToRegister64(ins->temp64());
20299 Register output = ToRegister(ins->output());
20301 Label done, create;
20303 masm.movePtr(input, output);
20305 // Load the BigInt value as an int64.
20306 masm.loadBigInt64(input, temp64);
20308 // Create a new BigInt when the input exceeds the int64 range.
20309 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
20310 Imm32(64 / BigInt::DigitBits), &create);
20312 // And create a new BigInt when the value and the BigInt have different signs.
20313 Label nonNegative;
20314 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
20315 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &create);
20316 masm.jump(&done);
20318 masm.bind(&nonNegative);
20319 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &done);
20321 masm.bind(&create);
20322 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
20324 masm.bind(&done);
20327 void CodeGenerator::visitBigIntAsIntN32(LBigIntAsIntN32* ins) {
20328 Register input = ToRegister(ins->input());
20329 Register temp = ToRegister(ins->temp());
20330 Register64 temp64 = ToRegister64(ins->temp64());
20331 Register output = ToRegister(ins->output());
20333 Label done, create;
20335 masm.movePtr(input, output);
20337 // Load the absolute value of the first digit.
20338 masm.loadFirstBigIntDigitOrZero(input, temp);
20340 // If the absolute value exceeds the int32 range, create a new BigInt.
20341 masm.branchPtr(Assembler::Above, temp, Imm32(INT32_MAX), &create);
20343 // Also create a new BigInt if we have more than one digit.
20344 masm.branch32(Assembler::BelowOrEqual,
20345 Address(input, BigInt::offsetOfLength()), Imm32(1), &done);
20347 masm.bind(&create);
20349 // |temp| stores the absolute value, negate it when the sign flag is set.
20350 Label nonNegative;
20351 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
20352 masm.negPtr(temp);
20353 masm.bind(&nonNegative);
20355 masm.move32To64SignExtend(temp, temp64);
20356 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
20358 masm.bind(&done);
20361 void CodeGenerator::visitBigIntAsUintN(LBigIntAsUintN* ins) {
20362 Register bits = ToRegister(ins->bits());
20363 Register input = ToRegister(ins->input());
20365 pushArg(bits);
20366 pushArg(input);
20368 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
20369 callVM<Fn, jit::BigIntAsUintN>(ins);
20372 void CodeGenerator::visitBigIntAsUintN64(LBigIntAsUintN64* ins) {
20373 Register input = ToRegister(ins->input());
20374 Register temp = ToRegister(ins->temp());
20375 Register64 temp64 = ToRegister64(ins->temp64());
20376 Register output = ToRegister(ins->output());
20378 Label done, create;
20380 masm.movePtr(input, output);
20382 // Load the BigInt value as an uint64.
20383 masm.loadBigInt64(input, temp64);
20385 // Create a new BigInt when the input exceeds the uint64 range.
20386 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
20387 Imm32(64 / BigInt::DigitBits), &create);
20389 // And create a new BigInt when the input has the sign flag set.
20390 masm.branchIfBigIntIsNonNegative(input, &done);
20392 masm.bind(&create);
20393 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
20395 masm.bind(&done);
20398 void CodeGenerator::visitBigIntAsUintN32(LBigIntAsUintN32* ins) {
20399 Register input = ToRegister(ins->input());
20400 Register temp = ToRegister(ins->temp());
20401 Register64 temp64 = ToRegister64(ins->temp64());
20402 Register output = ToRegister(ins->output());
20404 Label done, create;
20406 masm.movePtr(input, output);
20408 // Load the absolute value of the first digit.
20409 masm.loadFirstBigIntDigitOrZero(input, temp);
20411 // If the absolute value exceeds the uint32 range, create a new BigInt.
20412 #if JS_PUNBOX64
20413 masm.branchPtr(Assembler::Above, temp, ImmWord(UINT32_MAX), &create);
20414 #endif
20416 // Also create a new BigInt if we have more than one digit.
20417 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
20418 Imm32(1), &create);
20420 // And create a new BigInt when the input has the sign flag set.
20421 masm.branchIfBigIntIsNonNegative(input, &done);
20423 masm.bind(&create);
20425 // |temp| stores the absolute value, negate it when the sign flag is set.
20426 Label nonNegative;
20427 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
20428 masm.negPtr(temp);
20429 masm.bind(&nonNegative);
20431 masm.move32To64ZeroExtend(temp, temp64);
20432 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
20434 masm.bind(&done);
20437 void CodeGenerator::visitGuardNonGCThing(LGuardNonGCThing* ins) {
20438 ValueOperand input = ToValue(ins, LGuardNonGCThing::InputIndex);
20440 Label bail;
20441 masm.branchTestGCThing(Assembler::Equal, input, &bail);
20442 bailoutFrom(&bail, ins->snapshot());
20445 void CodeGenerator::visitToHashableNonGCThing(LToHashableNonGCThing* ins) {
20446 ValueOperand input = ToValue(ins, LToHashableNonGCThing::InputIndex);
20447 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
20448 ValueOperand output = ToOutValue(ins);
20450 masm.toHashableNonGCThing(input, output, tempFloat);
20453 void CodeGenerator::visitToHashableString(LToHashableString* ins) {
20454 Register input = ToRegister(ins->input());
20455 Register output = ToRegister(ins->output());
20457 using Fn = JSAtom* (*)(JSContext*, JSString*);
20458 auto* ool = oolCallVM<Fn, js::AtomizeString>(ins, ArgList(input),
20459 StoreRegisterTo(output));
20461 Label isAtom;
20462 masm.branchTest32(Assembler::NonZero,
20463 Address(input, JSString::offsetOfFlags()),
20464 Imm32(JSString::ATOM_BIT), &isAtom);
20466 masm.lookupStringInAtomCacheLastLookups(input, output, output, ool->entry());
20467 masm.jump(ool->rejoin());
20468 masm.bind(&isAtom);
20469 masm.movePtr(input, output);
20470 masm.bind(ool->rejoin());
20473 void CodeGenerator::visitToHashableValue(LToHashableValue* ins) {
20474 ValueOperand input = ToValue(ins, LToHashableValue::InputIndex);
20475 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
20476 ValueOperand output = ToOutValue(ins);
20478 Register str = output.scratchReg();
20480 using Fn = JSAtom* (*)(JSContext*, JSString*);
20481 auto* ool =
20482 oolCallVM<Fn, js::AtomizeString>(ins, ArgList(str), StoreRegisterTo(str));
20484 masm.toHashableValue(input, output, tempFloat, ool->entry(), ool->rejoin());
20487 void CodeGenerator::visitHashNonGCThing(LHashNonGCThing* ins) {
20488 ValueOperand input = ToValue(ins, LHashNonGCThing::InputIndex);
20489 Register temp = ToRegister(ins->temp0());
20490 Register output = ToRegister(ins->output());
20492 masm.prepareHashNonGCThing(input, output, temp);
20495 void CodeGenerator::visitHashString(LHashString* ins) {
20496 Register input = ToRegister(ins->input());
20497 Register temp = ToRegister(ins->temp0());
20498 Register output = ToRegister(ins->output());
20500 masm.prepareHashString(input, output, temp);
20503 void CodeGenerator::visitHashSymbol(LHashSymbol* ins) {
20504 Register input = ToRegister(ins->input());
20505 Register output = ToRegister(ins->output());
20507 masm.prepareHashSymbol(input, output);
20510 void CodeGenerator::visitHashBigInt(LHashBigInt* ins) {
20511 Register input = ToRegister(ins->input());
20512 Register temp0 = ToRegister(ins->temp0());
20513 Register temp1 = ToRegister(ins->temp1());
20514 Register temp2 = ToRegister(ins->temp2());
20515 Register output = ToRegister(ins->output());
20517 masm.prepareHashBigInt(input, output, temp0, temp1, temp2);
20520 void CodeGenerator::visitHashObject(LHashObject* ins) {
20521 Register setObj = ToRegister(ins->setObject());
20522 ValueOperand input = ToValue(ins, LHashObject::InputIndex);
20523 Register temp0 = ToRegister(ins->temp0());
20524 Register temp1 = ToRegister(ins->temp1());
20525 Register temp2 = ToRegister(ins->temp2());
20526 Register temp3 = ToRegister(ins->temp3());
20527 Register output = ToRegister(ins->output());
20529 masm.prepareHashObject(setObj, input, output, temp0, temp1, temp2, temp3);
20532 void CodeGenerator::visitHashValue(LHashValue* ins) {
20533 Register setObj = ToRegister(ins->setObject());
20534 ValueOperand input = ToValue(ins, LHashValue::InputIndex);
20535 Register temp0 = ToRegister(ins->temp0());
20536 Register temp1 = ToRegister(ins->temp1());
20537 Register temp2 = ToRegister(ins->temp2());
20538 Register temp3 = ToRegister(ins->temp3());
20539 Register output = ToRegister(ins->output());
20541 masm.prepareHashValue(setObj, input, output, temp0, temp1, temp2, temp3);
20544 void CodeGenerator::visitSetObjectHasNonBigInt(LSetObjectHasNonBigInt* ins) {
20545 Register setObj = ToRegister(ins->setObject());
20546 ValueOperand input = ToValue(ins, LSetObjectHasNonBigInt::InputIndex);
20547 Register hash = ToRegister(ins->hash());
20548 Register temp0 = ToRegister(ins->temp0());
20549 Register temp1 = ToRegister(ins->temp1());
20550 Register output = ToRegister(ins->output());
20552 masm.setObjectHasNonBigInt(setObj, input, hash, output, temp0, temp1);
20555 void CodeGenerator::visitSetObjectHasBigInt(LSetObjectHasBigInt* ins) {
20556 Register setObj = ToRegister(ins->setObject());
20557 ValueOperand input = ToValue(ins, LSetObjectHasBigInt::InputIndex);
20558 Register hash = ToRegister(ins->hash());
20559 Register temp0 = ToRegister(ins->temp0());
20560 Register temp1 = ToRegister(ins->temp1());
20561 Register temp2 = ToRegister(ins->temp2());
20562 Register temp3 = ToRegister(ins->temp3());
20563 Register output = ToRegister(ins->output());
20565 masm.setObjectHasBigInt(setObj, input, hash, output, temp0, temp1, temp2,
20566 temp3);
20569 void CodeGenerator::visitSetObjectHasValue(LSetObjectHasValue* ins) {
20570 Register setObj = ToRegister(ins->setObject());
20571 ValueOperand input = ToValue(ins, LSetObjectHasValue::InputIndex);
20572 Register hash = ToRegister(ins->hash());
20573 Register temp0 = ToRegister(ins->temp0());
20574 Register temp1 = ToRegister(ins->temp1());
20575 Register temp2 = ToRegister(ins->temp2());
20576 Register temp3 = ToRegister(ins->temp3());
20577 Register output = ToRegister(ins->output());
20579 masm.setObjectHasValue(setObj, input, hash, output, temp0, temp1, temp2,
20580 temp3);
20583 void CodeGenerator::visitSetObjectHasValueVMCall(
20584 LSetObjectHasValueVMCall* ins) {
20585 pushArg(ToValue(ins, LSetObjectHasValueVMCall::InputIndex));
20586 pushArg(ToRegister(ins->setObject()));
20588 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
20589 callVM<Fn, jit::SetObjectHas>(ins);
20592 void CodeGenerator::visitSetObjectSize(LSetObjectSize* ins) {
20593 Register setObj = ToRegister(ins->setObject());
20594 Register output = ToRegister(ins->output());
20596 masm.loadSetObjectSize(setObj, output);
20599 void CodeGenerator::visitMapObjectHasNonBigInt(LMapObjectHasNonBigInt* ins) {
20600 Register mapObj = ToRegister(ins->mapObject());
20601 ValueOperand input = ToValue(ins, LMapObjectHasNonBigInt::InputIndex);
20602 Register hash = ToRegister(ins->hash());
20603 Register temp0 = ToRegister(ins->temp0());
20604 Register temp1 = ToRegister(ins->temp1());
20605 Register output = ToRegister(ins->output());
20607 masm.mapObjectHasNonBigInt(mapObj, input, hash, output, temp0, temp1);
20610 void CodeGenerator::visitMapObjectHasBigInt(LMapObjectHasBigInt* ins) {
20611 Register mapObj = ToRegister(ins->mapObject());
20612 ValueOperand input = ToValue(ins, LMapObjectHasBigInt::InputIndex);
20613 Register hash = ToRegister(ins->hash());
20614 Register temp0 = ToRegister(ins->temp0());
20615 Register temp1 = ToRegister(ins->temp1());
20616 Register temp2 = ToRegister(ins->temp2());
20617 Register temp3 = ToRegister(ins->temp3());
20618 Register output = ToRegister(ins->output());
20620 masm.mapObjectHasBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
20621 temp3);
20624 void CodeGenerator::visitMapObjectHasValue(LMapObjectHasValue* ins) {
20625 Register mapObj = ToRegister(ins->mapObject());
20626 ValueOperand input = ToValue(ins, LMapObjectHasValue::InputIndex);
20627 Register hash = ToRegister(ins->hash());
20628 Register temp0 = ToRegister(ins->temp0());
20629 Register temp1 = ToRegister(ins->temp1());
20630 Register temp2 = ToRegister(ins->temp2());
20631 Register temp3 = ToRegister(ins->temp3());
20632 Register output = ToRegister(ins->output());
20634 masm.mapObjectHasValue(mapObj, input, hash, output, temp0, temp1, temp2,
20635 temp3);
20638 void CodeGenerator::visitMapObjectHasValueVMCall(
20639 LMapObjectHasValueVMCall* ins) {
20640 pushArg(ToValue(ins, LMapObjectHasValueVMCall::InputIndex));
20641 pushArg(ToRegister(ins->mapObject()));
20643 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
20644 callVM<Fn, jit::MapObjectHas>(ins);
20647 void CodeGenerator::visitMapObjectGetNonBigInt(LMapObjectGetNonBigInt* ins) {
20648 Register mapObj = ToRegister(ins->mapObject());
20649 ValueOperand input = ToValue(ins, LMapObjectGetNonBigInt::InputIndex);
20650 Register hash = ToRegister(ins->hash());
20651 Register temp0 = ToRegister(ins->temp0());
20652 Register temp1 = ToRegister(ins->temp1());
20653 ValueOperand output = ToOutValue(ins);
20655 masm.mapObjectGetNonBigInt(mapObj, input, hash, output, temp0, temp1,
20656 output.scratchReg());
20659 void CodeGenerator::visitMapObjectGetBigInt(LMapObjectGetBigInt* ins) {
20660 Register mapObj = ToRegister(ins->mapObject());
20661 ValueOperand input = ToValue(ins, LMapObjectGetBigInt::InputIndex);
20662 Register hash = ToRegister(ins->hash());
20663 Register temp0 = ToRegister(ins->temp0());
20664 Register temp1 = ToRegister(ins->temp1());
20665 Register temp2 = ToRegister(ins->temp2());
20666 Register temp3 = ToRegister(ins->temp3());
20667 ValueOperand output = ToOutValue(ins);
20669 masm.mapObjectGetBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
20670 temp3, output.scratchReg());
20673 void CodeGenerator::visitMapObjectGetValue(LMapObjectGetValue* ins) {
20674 Register mapObj = ToRegister(ins->mapObject());
20675 ValueOperand input = ToValue(ins, LMapObjectGetValue::InputIndex);
20676 Register hash = ToRegister(ins->hash());
20677 Register temp0 = ToRegister(ins->temp0());
20678 Register temp1 = ToRegister(ins->temp1());
20679 Register temp2 = ToRegister(ins->temp2());
20680 Register temp3 = ToRegister(ins->temp3());
20681 ValueOperand output = ToOutValue(ins);
20683 masm.mapObjectGetValue(mapObj, input, hash, output, temp0, temp1, temp2,
20684 temp3, output.scratchReg());
20687 void CodeGenerator::visitMapObjectGetValueVMCall(
20688 LMapObjectGetValueVMCall* ins) {
20689 pushArg(ToValue(ins, LMapObjectGetValueVMCall::InputIndex));
20690 pushArg(ToRegister(ins->mapObject()));
20692 using Fn =
20693 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
20694 callVM<Fn, jit::MapObjectGet>(ins);
20697 void CodeGenerator::visitMapObjectSize(LMapObjectSize* ins) {
20698 Register mapObj = ToRegister(ins->mapObject());
20699 Register output = ToRegister(ins->output());
20701 masm.loadMapObjectSize(mapObj, output);
20704 template <size_t NumDefs>
20705 void CodeGenerator::emitIonToWasmCallBase(LIonToWasmCallBase<NumDefs>* lir) {
20706 wasm::JitCallStackArgVector stackArgs;
20707 masm.propagateOOM(stackArgs.reserve(lir->numOperands()));
20708 if (masm.oom()) {
20709 return;
20712 MIonToWasmCall* mir = lir->mir();
20713 const wasm::FuncExport& funcExport = mir->funcExport();
20714 const wasm::FuncType& sig =
20715 mir->instance()->metadata().getFuncExportType(funcExport);
20717 WasmABIArgGenerator abi;
20718 for (size_t i = 0; i < lir->numOperands(); i++) {
20719 MIRType argMir;
20720 switch (sig.args()[i].kind()) {
20721 case wasm::ValType::I32:
20722 case wasm::ValType::I64:
20723 case wasm::ValType::F32:
20724 case wasm::ValType::F64:
20725 argMir = sig.args()[i].toMIRType();
20726 break;
20727 case wasm::ValType::V128:
20728 MOZ_CRASH("unexpected argument type when calling from ion to wasm");
20729 case wasm::ValType::Ref:
20730 // temporarilyUnsupportedReftypeForEntry() restricts args to externref
20731 MOZ_RELEASE_ASSERT(sig.args()[i].refType().isExtern());
20732 // Argument is boxed on the JS side to an anyref, so passed as a
20733 // pointer here.
20734 argMir = sig.args()[i].toMIRType();
20735 break;
20738 ABIArg arg = abi.next(argMir);
20739 switch (arg.kind()) {
20740 case ABIArg::GPR:
20741 case ABIArg::FPU: {
20742 MOZ_ASSERT(ToAnyRegister(lir->getOperand(i)) == arg.reg());
20743 stackArgs.infallibleEmplaceBack(wasm::JitCallStackArg());
20744 break;
20746 case ABIArg::Stack: {
20747 const LAllocation* larg = lir->getOperand(i);
20748 if (larg->isConstant()) {
20749 stackArgs.infallibleEmplaceBack(ToInt32(larg));
20750 } else if (larg->isGeneralReg()) {
20751 stackArgs.infallibleEmplaceBack(ToRegister(larg));
20752 } else if (larg->isFloatReg()) {
20753 stackArgs.infallibleEmplaceBack(ToFloatRegister(larg));
20754 } else {
20755 // Always use the stack pointer here because GenerateDirectCallFromJit
20756 // depends on this.
20757 Address addr = ToAddress<BaseRegForAddress::SP>(larg);
20758 stackArgs.infallibleEmplaceBack(addr);
20760 break;
20762 #ifdef JS_CODEGEN_REGISTER_PAIR
20763 case ABIArg::GPR_PAIR: {
20764 MOZ_CRASH(
20765 "no way to pass i64, and wasm uses hardfp for function calls");
20767 #endif
20768 case ABIArg::Uninitialized: {
20769 MOZ_CRASH("Uninitialized ABIArg kind");
20774 const wasm::ValTypeVector& results = sig.results();
20775 if (results.length() == 0) {
20776 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
20777 } else {
20778 MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
20779 switch (results[0].kind()) {
20780 case wasm::ValType::I32:
20781 MOZ_ASSERT(lir->mir()->type() == MIRType::Int32);
20782 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
20783 break;
20784 case wasm::ValType::I64:
20785 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
20786 MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
20787 break;
20788 case wasm::ValType::F32:
20789 MOZ_ASSERT(lir->mir()->type() == MIRType::Float32);
20790 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnFloat32Reg);
20791 break;
20792 case wasm::ValType::F64:
20793 MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
20794 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
20795 break;
20796 case wasm::ValType::V128:
20797 MOZ_CRASH("unexpected return type when calling from ion to wasm");
20798 case wasm::ValType::Ref:
20799 // The wasm stubs layer unboxes anything that needs to be unboxed
20800 // and leaves it in a Value. A FuncRef/EqRef we could in principle
20801 // leave it as a raw object pointer but for now it complicates the
20802 // API to do so.
20803 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
20804 break;
20808 WasmInstanceObject* instObj = lir->mir()->instanceObject();
20810 Register scratch = ToRegister(lir->temp());
20812 uint32_t callOffset;
20813 ensureOsiSpace();
20814 GenerateDirectCallFromJit(masm, funcExport, instObj->instance(), stackArgs,
20815 scratch, &callOffset);
20817 // Add the instance object to the constant pool, so it is transferred to
20818 // the owning IonScript and so that it gets traced as long as the IonScript
20819 // lives.
20821 uint32_t unused;
20822 masm.propagateOOM(graph.addConstantToPool(ObjectValue(*instObj), &unused));
20824 markSafepointAt(callOffset, lir);
20827 void CodeGenerator::visitIonToWasmCall(LIonToWasmCall* lir) {
20828 emitIonToWasmCallBase(lir);
20830 void CodeGenerator::visitIonToWasmCallV(LIonToWasmCallV* lir) {
20831 emitIonToWasmCallBase(lir);
20833 void CodeGenerator::visitIonToWasmCallI64(LIonToWasmCallI64* lir) {
20834 emitIonToWasmCallBase(lir);
20837 void CodeGenerator::visitWasmNullConstant(LWasmNullConstant* lir) {
20838 masm.xorPtr(ToRegister(lir->output()), ToRegister(lir->output()));
20841 void CodeGenerator::visitWasmFence(LWasmFence* lir) {
20842 MOZ_ASSERT(gen->compilingWasm());
20843 masm.memoryBarrier(MembarFull);
20846 void CodeGenerator::visitWasmAnyRefFromJSValue(LWasmAnyRefFromJSValue* lir) {
20847 ValueOperand input = ToValue(lir, LWasmAnyRefFromJSValue::InputIndex);
20848 Register output = ToRegister(lir->output());
20849 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
20851 using Fn = JSObject* (*)(JSContext* cx, HandleValue value);
20852 OutOfLineCode* oolBoxValue = oolCallVM<Fn, wasm::AnyRef::boxValue>(
20853 lir, ArgList(input), StoreRegisterTo(output));
20854 masm.convertValueToWasmAnyRef(input, output, tempFloat, oolBoxValue->entry());
20855 masm.bind(oolBoxValue->rejoin());
20858 void CodeGenerator::visitWasmAnyRefFromJSObject(LWasmAnyRefFromJSObject* lir) {
20859 Register input = ToRegister(lir->input());
20860 Register output = ToRegister(lir->output());
20861 masm.convertObjectToWasmAnyRef(input, output);
20864 void CodeGenerator::visitWasmAnyRefFromJSString(LWasmAnyRefFromJSString* lir) {
20865 Register input = ToRegister(lir->input());
20866 Register output = ToRegister(lir->output());
20867 masm.convertStringToWasmAnyRef(input, output);
20870 void CodeGenerator::visitWasmNewI31Ref(LWasmNewI31Ref* lir) {
20871 if (lir->value()->isConstant()) {
20872 // i31ref are often created with constants. If that's the case we will
20873 // do the operation statically here. This is similar to what is done
20874 // in masm.truncate32ToWasmI31Ref.
20875 Register output = ToRegister(lir->output());
20876 uint32_t value =
20877 static_cast<uint32_t>(lir->value()->toConstant()->toInt32());
20878 uintptr_t ptr = wasm::AnyRef::fromUint32Truncate(value).rawValue();
20879 masm.movePtr(ImmWord(ptr), output);
20880 } else {
20881 Register value = ToRegister(lir->value());
20882 Register output = ToRegister(lir->output());
20883 masm.truncate32ToWasmI31Ref(value, output);
20887 void CodeGenerator::visitWasmI31RefGet(LWasmI31RefGet* lir) {
20888 Register value = ToRegister(lir->value());
20889 Register output = ToRegister(lir->output());
20890 if (lir->mir()->wideningOp() == wasm::FieldWideningOp::Signed) {
20891 masm.convertWasmI31RefTo32Signed(value, output);
20892 } else {
20893 masm.convertWasmI31RefTo32Unsigned(value, output);
20897 #ifdef FUZZING_JS_FUZZILLI
20898 void CodeGenerator::emitFuzzilliHashDouble(FloatRegister floatDouble,
20899 Register scratch, Register output) {
20900 # ifdef JS_PUNBOX64
20901 Register64 reg64_1(scratch);
20902 Register64 reg64_2(output);
20903 masm.moveDoubleToGPR64(floatDouble, reg64_1);
20904 masm.move64(reg64_1, reg64_2);
20905 masm.rshift64(Imm32(32), reg64_2);
20906 masm.add32(scratch, output);
20907 # else
20908 Register64 reg64(scratch, output);
20909 masm.moveDoubleToGPR64(floatDouble, reg64);
20910 masm.add32(scratch, output);
20911 # endif
20914 void CodeGenerator::emitFuzzilliHashObject(LInstruction* lir, Register obj,
20915 Register output) {
20916 using Fn = void (*)(JSContext* cx, JSObject* obj, uint32_t* out);
20917 OutOfLineCode* ool = oolCallVM<Fn, FuzzilliHashObjectInl>(
20918 lir, ArgList(obj), StoreRegisterTo(output));
20920 masm.jump(ool->entry());
20921 masm.bind(ool->rejoin());
20924 void CodeGenerator::emitFuzzilliHashBigInt(Register bigInt, Register output) {
20925 LiveRegisterSet volatileRegs(GeneralRegisterSet::All(),
20926 FloatRegisterSet::All());
20927 volatileRegs.takeUnchecked(output);
20928 masm.PushRegsInMask(volatileRegs);
20930 using Fn = uint32_t (*)(BigInt* bigInt);
20931 masm.setupUnalignedABICall(output);
20932 masm.passABIArg(bigInt);
20933 masm.callWithABI<Fn, js::FuzzilliHashBigInt>();
20934 masm.storeCallInt32Result(output);
20936 masm.PopRegsInMask(volatileRegs);
20939 void CodeGenerator::visitFuzzilliHashV(LFuzzilliHashV* ins) {
20940 MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Value);
20942 ValueOperand value = ToValue(ins, 0);
20944 Label isDouble, isObject, isBigInt, done;
20946 FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
20947 Register scratch = ToRegister(ins->getTemp(0));
20948 Register output = ToRegister(ins->output());
20949 MOZ_ASSERT(scratch != output);
20951 # ifdef JS_PUNBOX64
20952 Register tagReg = ToRegister(ins->getTemp(0));
20953 masm.splitTag(value, tagReg);
20954 # else
20955 Register tagReg = value.typeReg();
20956 # endif
20958 Label noBigInt;
20959 masm.branchTestBigInt(Assembler::NotEqual, tagReg, &noBigInt);
20960 masm.unboxBigInt(value, scratch);
20961 masm.jump(&isBigInt);
20962 masm.bind(&noBigInt);
20964 Label noObject;
20965 masm.branchTestObject(Assembler::NotEqual, tagReg, &noObject);
20966 masm.unboxObject(value, scratch);
20967 masm.jump(&isObject);
20968 masm.bind(&noObject);
20970 Label noInt32;
20971 masm.branchTestInt32(Assembler::NotEqual, tagReg, &noInt32);
20972 masm.unboxInt32(value, scratch);
20973 masm.convertInt32ToDouble(scratch, scratchFloat);
20974 masm.jump(&isDouble);
20975 masm.bind(&noInt32);
20977 Label noNull;
20978 masm.branchTestNull(Assembler::NotEqual, tagReg, &noNull);
20979 masm.move32(Imm32(1), scratch);
20980 masm.convertInt32ToDouble(scratch, scratchFloat);
20981 masm.jump(&isDouble);
20982 masm.bind(&noNull);
20984 Label noUndefined;
20985 masm.branchTestUndefined(Assembler::NotEqual, tagReg, &noUndefined);
20986 masm.move32(Imm32(2), scratch);
20987 masm.convertInt32ToDouble(scratch, scratchFloat);
20988 masm.jump(&isDouble);
20989 masm.bind(&noUndefined);
20991 Label noBoolean;
20992 masm.branchTestBoolean(Assembler::NotEqual, tagReg, &noBoolean);
20993 masm.unboxBoolean(value, scratch);
20994 masm.add32(Imm32(3), scratch);
20995 masm.convertInt32ToDouble(scratch, scratchFloat);
20996 masm.jump(&isDouble);
20997 masm.bind(&noBoolean);
20999 Label noDouble;
21000 masm.branchTestDouble(Assembler::NotEqual, tagReg, &noDouble);
21001 masm.unboxDouble(value, scratchFloat);
21002 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
21004 masm.jump(&isDouble);
21005 masm.bind(&noDouble);
21006 masm.move32(Imm32(0), output);
21007 masm.jump(&done);
21009 masm.bind(&isBigInt);
21010 emitFuzzilliHashBigInt(scratch, output);
21011 masm.jump(&done);
21013 masm.bind(&isObject);
21014 emitFuzzilliHashObject(ins, scratch, output);
21015 masm.jump(&done);
21017 masm.bind(&isDouble);
21018 emitFuzzilliHashDouble(scratchFloat, scratch, output);
21020 masm.bind(&done);
21023 void CodeGenerator::visitFuzzilliHashT(LFuzzilliHashT* ins) {
21024 const LAllocation* value = ins->value();
21025 MIRType mirType = ins->mir()->getOperand(0)->type();
21027 FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
21028 Register scratch = ToRegister(ins->getTemp(0));
21029 Register output = ToRegister(ins->output());
21030 MOZ_ASSERT(scratch != output);
21032 if (mirType == MIRType::Object) {
21033 MOZ_ASSERT(value->isGeneralReg());
21034 masm.mov(value->toGeneralReg()->reg(), scratch);
21035 emitFuzzilliHashObject(ins, scratch, output);
21036 } else if (mirType == MIRType::BigInt) {
21037 MOZ_ASSERT(value->isGeneralReg());
21038 masm.mov(value->toGeneralReg()->reg(), scratch);
21039 emitFuzzilliHashBigInt(scratch, output);
21040 } else if (mirType == MIRType::Double) {
21041 MOZ_ASSERT(value->isFloatReg());
21042 masm.moveDouble(value->toFloatReg()->reg(), scratchFloat);
21043 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
21044 emitFuzzilliHashDouble(scratchFloat, scratch, output);
21045 } else if (mirType == MIRType::Float32) {
21046 MOZ_ASSERT(value->isFloatReg());
21047 masm.convertFloat32ToDouble(value->toFloatReg()->reg(), scratchFloat);
21048 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
21049 emitFuzzilliHashDouble(scratchFloat, scratch, output);
21050 } else if (mirType == MIRType::Int32) {
21051 MOZ_ASSERT(value->isGeneralReg());
21052 masm.mov(value->toGeneralReg()->reg(), scratch);
21053 masm.convertInt32ToDouble(scratch, scratchFloat);
21054 emitFuzzilliHashDouble(scratchFloat, scratch, output);
21055 } else if (mirType == MIRType::Null) {
21056 MOZ_ASSERT(value->isBogus());
21057 masm.move32(Imm32(1), scratch);
21058 masm.convertInt32ToDouble(scratch, scratchFloat);
21059 emitFuzzilliHashDouble(scratchFloat, scratch, output);
21060 } else if (mirType == MIRType::Undefined) {
21061 MOZ_ASSERT(value->isBogus());
21062 masm.move32(Imm32(2), scratch);
21063 masm.convertInt32ToDouble(scratch, scratchFloat);
21064 emitFuzzilliHashDouble(scratchFloat, scratch, output);
21065 } else if (mirType == MIRType::Boolean) {
21066 MOZ_ASSERT(value->isGeneralReg());
21067 masm.mov(value->toGeneralReg()->reg(), scratch);
21068 masm.add32(Imm32(3), scratch);
21069 masm.convertInt32ToDouble(scratch, scratchFloat);
21070 emitFuzzilliHashDouble(scratchFloat, scratch, output);
21071 } else {
21072 MOZ_CRASH("unexpected type");
21076 void CodeGenerator::visitFuzzilliHashStore(LFuzzilliHashStore* ins) {
21077 const LAllocation* value = ins->value();
21078 MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Int32);
21079 MOZ_ASSERT(value->isGeneralReg());
21081 Register scratchJSContext = ToRegister(ins->getTemp(0));
21082 Register scratch = ToRegister(ins->getTemp(1));
21084 masm.loadJSContext(scratchJSContext);
21086 // stats
21087 Address addrExecHashInputs(scratchJSContext,
21088 offsetof(JSContext, executionHashInputs));
21089 masm.load32(addrExecHashInputs, scratch);
21090 masm.add32(Imm32(1), scratch);
21091 masm.store32(scratch, addrExecHashInputs);
21093 Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
21094 masm.load32(addrExecHash, scratch);
21095 masm.add32(value->toGeneralReg()->reg(), scratch);
21096 masm.rotateLeft(Imm32(1), scratch, scratch);
21097 masm.store32(scratch, addrExecHash);
21099 #endif
21101 static_assert(!std::is_polymorphic_v<CodeGenerator>,
21102 "CodeGenerator should not have any virtual methods");
21104 } // namespace jit
21105 } // namespace js