Backed out 22 changesets (bug 1839396) for causing build bustages on js/Printer.h...
[gecko.git] / js / src / jit / CodeGenerator.cpp
blobfd56c866cd33d0352ede93d47b82301b263f7f42
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/CodeGenerator.h"
9 #include "mozilla/Assertions.h"
10 #include "mozilla/Casting.h"
11 #include "mozilla/DebugOnly.h"
12 #include "mozilla/EndianUtils.h"
13 #include "mozilla/EnumeratedArray.h"
14 #include "mozilla/EnumeratedRange.h"
15 #include "mozilla/EnumSet.h"
16 #include "mozilla/IntegerTypeTraits.h"
17 #include "mozilla/Latin1.h"
18 #include "mozilla/MathAlgorithms.h"
19 #include "mozilla/ScopeExit.h"
21 #include <limits>
22 #include <type_traits>
23 #include <utility>
25 #include "jslibmath.h"
26 #include "jsmath.h"
27 #include "jsnum.h"
29 #include "builtin/MapObject.h"
30 #include "builtin/RegExp.h"
31 #include "builtin/String.h"
32 #include "irregexp/RegExpTypes.h"
33 #include "jit/ABIArgGenerator.h"
34 #include "jit/CompileInfo.h"
35 #include "jit/InlineScriptTree.h"
36 #include "jit/Invalidation.h"
37 #include "jit/IonGenericCallStub.h"
38 #include "jit/IonIC.h"
39 #include "jit/IonScript.h"
40 #include "jit/JitcodeMap.h"
41 #include "jit/JitFrames.h"
42 #include "jit/JitRuntime.h"
43 #include "jit/JitSpewer.h"
44 #include "jit/JitZone.h"
45 #include "jit/Linker.h"
46 #include "jit/MIRGenerator.h"
47 #include "jit/MoveEmitter.h"
48 #include "jit/RangeAnalysis.h"
49 #include "jit/RegExpStubConstants.h"
50 #include "jit/SafepointIndex.h"
51 #include "jit/SharedICHelpers.h"
52 #include "jit/SharedICRegisters.h"
53 #include "jit/VMFunctions.h"
54 #include "jit/WarpSnapshot.h"
55 #include "js/ColumnNumber.h" // JS::LimitedColumnNumberZeroOrigin
56 #include "js/experimental/JitInfo.h" // JSJit{Getter,Setter}CallArgs, JSJitMethodCallArgsTraits, JSJitInfo
57 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
58 #include "js/RegExpFlags.h" // JS::RegExpFlag
59 #include "js/ScalarType.h" // js::Scalar::Type
60 #include "proxy/DOMProxy.h"
61 #include "proxy/ScriptedProxyHandler.h"
62 #include "util/CheckedArithmetic.h"
63 #include "util/Unicode.h"
64 #include "vm/ArrayBufferViewObject.h"
65 #include "vm/AsyncFunction.h"
66 #include "vm/AsyncIteration.h"
67 #include "vm/BuiltinObjectKind.h"
68 #include "vm/FunctionFlags.h" // js::FunctionFlags
69 #include "vm/Interpreter.h"
70 #include "vm/JSAtomUtils.h" // AtomizeString
71 #include "vm/MatchPairs.h"
72 #include "vm/RegExpObject.h"
73 #include "vm/RegExpStatics.h"
74 #include "vm/StaticStrings.h"
75 #include "vm/StringObject.h"
76 #include "vm/StringType.h"
77 #include "vm/TypedArrayObject.h"
78 #include "wasm/WasmCodegenConstants.h"
79 #include "wasm/WasmValType.h"
80 #ifdef MOZ_VTUNE
81 # include "vtune/VTuneWrapper.h"
82 #endif
83 #include "wasm/WasmBinary.h"
84 #include "wasm/WasmGC.h"
85 #include "wasm/WasmGcObject.h"
86 #include "wasm/WasmStubs.h"
88 #include "builtin/Boolean-inl.h"
89 #include "jit/MacroAssembler-inl.h"
90 #include "jit/shared/CodeGenerator-shared-inl.h"
91 #include "jit/TemplateObject-inl.h"
92 #include "jit/VMFunctionList-inl.h"
93 #include "vm/JSScript-inl.h"
94 #include "wasm/WasmInstance-inl.h"
96 using namespace js;
97 using namespace js::jit;
99 using JS::GenericNaN;
100 using mozilla::AssertedCast;
101 using mozilla::DebugOnly;
102 using mozilla::FloatingPoint;
103 using mozilla::Maybe;
104 using mozilla::NegativeInfinity;
105 using mozilla::PositiveInfinity;
107 using JS::ExpandoAndGeneration;
109 namespace js {
110 namespace jit {
112 #ifdef CHECK_OSIPOINT_REGISTERS
113 template <class Op>
114 static void HandleRegisterDump(Op op, MacroAssembler& masm,
115 LiveRegisterSet liveRegs, Register activation,
116 Register scratch) {
117 const size_t baseOffset = JitActivation::offsetOfRegs();
119 // Handle live GPRs.
120 for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
121 Register reg = *iter;
122 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
124 if (reg == activation) {
125 // To use the original value of the activation register (that's
126 // now on top of the stack), we need the scratch register.
127 masm.push(scratch);
128 masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
129 op(scratch, dump);
130 masm.pop(scratch);
131 } else {
132 op(reg, dump);
136 // Handle live FPRs.
137 for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
138 FloatRegister reg = *iter;
139 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
140 op(reg, dump);
144 class StoreOp {
145 MacroAssembler& masm;
147 public:
148 explicit StoreOp(MacroAssembler& masm) : masm(masm) {}
150 void operator()(Register reg, Address dump) { masm.storePtr(reg, dump); }
151 void operator()(FloatRegister reg, Address dump) {
152 if (reg.isDouble()) {
153 masm.storeDouble(reg, dump);
154 } else if (reg.isSingle()) {
155 masm.storeFloat32(reg, dump);
156 } else if (reg.isSimd128()) {
157 MOZ_CRASH("Unexpected case for SIMD");
158 } else {
159 MOZ_CRASH("Unexpected register type.");
164 class VerifyOp {
165 MacroAssembler& masm;
166 Label* failure_;
168 public:
169 VerifyOp(MacroAssembler& masm, Label* failure)
170 : masm(masm), failure_(failure) {}
172 void operator()(Register reg, Address dump) {
173 masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
175 void operator()(FloatRegister reg, Address dump) {
176 if (reg.isDouble()) {
177 ScratchDoubleScope scratch(masm);
178 masm.loadDouble(dump, scratch);
179 masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
180 } else if (reg.isSingle()) {
181 ScratchFloat32Scope scratch(masm);
182 masm.loadFloat32(dump, scratch);
183 masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
184 } else if (reg.isSimd128()) {
185 MOZ_CRASH("Unexpected case for SIMD");
186 } else {
187 MOZ_CRASH("Unexpected register type.");
192 void CodeGenerator::verifyOsiPointRegs(LSafepoint* safepoint) {
193 // Ensure the live registers stored by callVM did not change between
194 // the call and this OsiPoint. Try-catch relies on this invariant.
196 // Load pointer to the JitActivation in a scratch register.
197 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
198 Register scratch = allRegs.takeAny();
199 masm.push(scratch);
200 masm.loadJitActivation(scratch);
202 // If we should not check registers (because the instruction did not call
203 // into the VM, or a GC happened), we're done.
204 Label failure, done;
205 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
206 masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
208 // Having more than one VM function call made in one visit function at
209 // runtime is a sec-ciritcal error, because if we conservatively assume that
210 // one of the function call can re-enter Ion, then the invalidation process
211 // will potentially add a call at a random location, by patching the code
212 // before the return address.
213 masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
215 // Set checkRegs to 0, so that we don't try to verify registers after we
216 // return from this script to the caller.
217 masm.store32(Imm32(0), checkRegs);
219 // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
220 // temps after calling into the VM. This is fine because no other
221 // instructions (including this OsiPoint) will depend on them. Also
222 // backtracking can also use the same register for an input and an output.
223 // These are marked as clobbered and shouldn't get checked.
224 LiveRegisterSet liveRegs;
225 liveRegs.set() = RegisterSet::Intersect(
226 safepoint->liveRegs().set(),
227 RegisterSet::Not(safepoint->clobberedRegs().set()));
229 VerifyOp op(masm, &failure);
230 HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
232 masm.jump(&done);
234 // Do not profile the callWithABI that occurs below. This is to avoid a
235 // rare corner case that occurs when profiling interacts with itself:
237 // When slow profiling assertions are turned on, FunctionBoundary ops
238 // (which update the profiler pseudo-stack) may emit a callVM, which
239 // forces them to have an osi point associated with them. The
240 // FunctionBoundary for inline function entry is added to the caller's
241 // graph with a PC from the caller's code, but during codegen it modifies
242 // Gecko Profiler instrumentation to add the callee as the current top-most
243 // script. When codegen gets to the OSIPoint, and the callWithABI below is
244 // emitted, the codegen thinks that the current frame is the callee, but
245 // the PC it's using from the OSIPoint refers to the caller. This causes
246 // the profiler instrumentation of the callWithABI below to ASSERT, since
247 // the script and pc are mismatched. To avoid this, we simply omit
248 // instrumentation for these callWithABIs.
250 // Any live register captured by a safepoint (other than temp registers)
251 // must remain unchanged between the call and the OsiPoint instruction.
252 masm.bind(&failure);
253 masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
255 masm.bind(&done);
256 masm.pop(scratch);
259 bool CodeGenerator::shouldVerifyOsiPointRegs(LSafepoint* safepoint) {
260 if (!checkOsiPointRegisters) {
261 return false;
264 if (safepoint->liveRegs().emptyGeneral() &&
265 safepoint->liveRegs().emptyFloat()) {
266 return false; // No registers to check.
269 return true;
272 void CodeGenerator::resetOsiPointRegs(LSafepoint* safepoint) {
273 if (!shouldVerifyOsiPointRegs(safepoint)) {
274 return;
277 // Set checkRegs to 0. If we perform a VM call, the instruction
278 // will set it to 1.
279 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
280 Register scratch = allRegs.takeAny();
281 masm.push(scratch);
282 masm.loadJitActivation(scratch);
283 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
284 masm.store32(Imm32(0), checkRegs);
285 masm.pop(scratch);
288 static void StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs) {
289 // Store a copy of all live registers before performing the call.
290 // When we reach the OsiPoint, we can use this to check nothing
291 // modified them in the meantime.
293 // Load pointer to the JitActivation in a scratch register.
294 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
295 Register scratch = allRegs.takeAny();
296 masm.push(scratch);
297 masm.loadJitActivation(scratch);
299 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
300 masm.add32(Imm32(1), checkRegs);
302 StoreOp op(masm);
303 HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
305 masm.pop(scratch);
307 #endif // CHECK_OSIPOINT_REGISTERS
309 // Before doing any call to Cpp, you should ensure that volatile
310 // registers are evicted by the register allocator.
311 void CodeGenerator::callVMInternal(VMFunctionId id, LInstruction* ins) {
312 TrampolinePtr code = gen->jitRuntime()->getVMWrapper(id);
313 const VMFunctionData& fun = GetVMFunction(id);
315 // Stack is:
316 // ... frame ...
317 // [args]
318 #ifdef DEBUG
319 MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
320 pushedArgs_ = 0;
321 #endif
323 #ifdef CHECK_OSIPOINT_REGISTERS
324 if (shouldVerifyOsiPointRegs(ins->safepoint())) {
325 StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
327 #endif
329 #ifdef DEBUG
330 if (ins->mirRaw()) {
331 MOZ_ASSERT(ins->mirRaw()->isInstruction());
332 MInstruction* mir = ins->mirRaw()->toInstruction();
333 MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
335 // If this MIR instruction has an overridden AliasSet, set the JitRuntime's
336 // disallowArbitraryCode_ flag so we can assert this VMFunction doesn't call
337 // RunScript. Whitelist MInterruptCheck and MCheckOverRecursed because
338 // interrupt callbacks can call JS (chrome JS or shell testing functions).
339 bool isWhitelisted = mir->isInterruptCheck() || mir->isCheckOverRecursed();
340 if (!mir->hasDefaultAliasSet() && !isWhitelisted) {
341 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
342 masm.move32(Imm32(1), ReturnReg);
343 masm.store32(ReturnReg, AbsoluteAddress(addr));
346 #endif
348 // Push an exit frame descriptor.
349 masm.PushFrameDescriptor(FrameType::IonJS);
351 // Call the wrapper function. The wrapper is in charge to unwind the stack
352 // when returning from the call. Failures are handled with exceptions based
353 // on the return value of the C functions. To guard the outcome of the
354 // returned value, use another LIR instruction.
355 ensureOsiSpace();
356 uint32_t callOffset = masm.callJit(code);
357 markSafepointAt(callOffset, ins);
359 #ifdef DEBUG
360 // Reset the disallowArbitraryCode flag after the call.
362 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
363 masm.push(ReturnReg);
364 masm.move32(Imm32(0), ReturnReg);
365 masm.store32(ReturnReg, AbsoluteAddress(addr));
366 masm.pop(ReturnReg);
368 #endif
370 // Pop rest of the exit frame and the arguments left on the stack.
371 int framePop =
372 sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
373 masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
375 // Stack is:
376 // ... frame ...
379 template <typename Fn, Fn fn>
380 void CodeGenerator::callVM(LInstruction* ins) {
381 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
382 callVMInternal(id, ins);
385 // ArgSeq store arguments for OutOfLineCallVM.
387 // OutOfLineCallVM are created with "oolCallVM" function. The third argument of
388 // this function is an instance of a class which provides a "generate" in charge
389 // of pushing the argument, with "pushArg", for a VMFunction.
391 // Such list of arguments can be created by using the "ArgList" function which
392 // creates one instance of "ArgSeq", where the type of the arguments are
393 // inferred from the type of the arguments.
395 // The list of arguments must be written in the same order as if you were
396 // calling the function in C++.
398 // Example:
399 // ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))
401 template <typename... ArgTypes>
402 class ArgSeq {
403 std::tuple<std::remove_reference_t<ArgTypes>...> args_;
405 template <std::size_t... ISeq>
406 inline void generate(CodeGenerator* codegen,
407 std::index_sequence<ISeq...>) const {
408 // Arguments are pushed in reverse order, from last argument to first
409 // argument.
410 (codegen->pushArg(std::get<sizeof...(ISeq) - 1 - ISeq>(args_)), ...);
413 public:
414 explicit ArgSeq(ArgTypes&&... args)
415 : args_(std::forward<ArgTypes>(args)...) {}
417 inline void generate(CodeGenerator* codegen) const {
418 generate(codegen, std::index_sequence_for<ArgTypes...>{});
421 #ifdef DEBUG
422 static constexpr size_t numArgs = sizeof...(ArgTypes);
423 #endif
426 template <typename... ArgTypes>
427 inline ArgSeq<ArgTypes...> ArgList(ArgTypes&&... args) {
428 return ArgSeq<ArgTypes...>(std::forward<ArgTypes>(args)...);
431 // Store wrappers, to generate the right move of data after the VM call.
433 struct StoreNothing {
434 inline void generate(CodeGenerator* codegen) const {}
435 inline LiveRegisterSet clobbered() const {
436 return LiveRegisterSet(); // No register gets clobbered
440 class StoreRegisterTo {
441 private:
442 Register out_;
444 public:
445 explicit StoreRegisterTo(Register out) : out_(out) {}
447 inline void generate(CodeGenerator* codegen) const {
448 // It's okay to use storePointerResultTo here - the VMFunction wrapper
449 // ensures the upper bytes are zero for bool/int32 return values.
450 codegen->storePointerResultTo(out_);
452 inline LiveRegisterSet clobbered() const {
453 LiveRegisterSet set;
454 set.add(out_);
455 return set;
459 class StoreFloatRegisterTo {
460 private:
461 FloatRegister out_;
463 public:
464 explicit StoreFloatRegisterTo(FloatRegister out) : out_(out) {}
466 inline void generate(CodeGenerator* codegen) const {
467 codegen->storeFloatResultTo(out_);
469 inline LiveRegisterSet clobbered() const {
470 LiveRegisterSet set;
471 set.add(out_);
472 return set;
476 template <typename Output>
477 class StoreValueTo_ {
478 private:
479 Output out_;
481 public:
482 explicit StoreValueTo_(const Output& out) : out_(out) {}
484 inline void generate(CodeGenerator* codegen) const {
485 codegen->storeResultValueTo(out_);
487 inline LiveRegisterSet clobbered() const {
488 LiveRegisterSet set;
489 set.add(out_);
490 return set;
494 template <typename Output>
495 StoreValueTo_<Output> StoreValueTo(const Output& out) {
496 return StoreValueTo_<Output>(out);
499 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
500 class OutOfLineCallVM : public OutOfLineCodeBase<CodeGenerator> {
501 private:
502 LInstruction* lir_;
503 ArgSeq args_;
504 StoreOutputTo out_;
506 public:
507 OutOfLineCallVM(LInstruction* lir, const ArgSeq& args,
508 const StoreOutputTo& out)
509 : lir_(lir), args_(args), out_(out) {}
511 void accept(CodeGenerator* codegen) override {
512 codegen->visitOutOfLineCallVM(this);
515 LInstruction* lir() const { return lir_; }
516 const ArgSeq& args() const { return args_; }
517 const StoreOutputTo& out() const { return out_; }
520 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
521 OutOfLineCode* CodeGenerator::oolCallVM(LInstruction* lir, const ArgSeq& args,
522 const StoreOutputTo& out) {
523 MOZ_ASSERT(lir->mirRaw());
524 MOZ_ASSERT(lir->mirRaw()->isInstruction());
526 #ifdef DEBUG
527 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
528 const VMFunctionData& fun = GetVMFunction(id);
529 MOZ_ASSERT(fun.explicitArgs == args.numArgs);
530 MOZ_ASSERT(fun.returnsData() !=
531 (std::is_same_v<StoreOutputTo, StoreNothing>));
532 #endif
534 OutOfLineCode* ool = new (alloc())
535 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>(lir, args, out);
536 addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
537 return ool;
540 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
541 void CodeGenerator::visitOutOfLineCallVM(
542 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>* ool) {
543 LInstruction* lir = ool->lir();
545 saveLive(lir);
546 ool->args().generate(this);
547 callVM<Fn, fn>(lir);
548 ool->out().generate(this);
549 restoreLiveIgnore(lir, ool->out().clobbered());
550 masm.jump(ool->rejoin());
553 class OutOfLineICFallback : public OutOfLineCodeBase<CodeGenerator> {
554 private:
555 LInstruction* lir_;
556 size_t cacheIndex_;
557 size_t cacheInfoIndex_;
559 public:
560 OutOfLineICFallback(LInstruction* lir, size_t cacheIndex,
561 size_t cacheInfoIndex)
562 : lir_(lir), cacheIndex_(cacheIndex), cacheInfoIndex_(cacheInfoIndex) {}
564 void bind(MacroAssembler* masm) override {
565 // The binding of the initial jump is done in
566 // CodeGenerator::visitOutOfLineICFallback.
569 size_t cacheIndex() const { return cacheIndex_; }
570 size_t cacheInfoIndex() const { return cacheInfoIndex_; }
571 LInstruction* lir() const { return lir_; }
573 void accept(CodeGenerator* codegen) override {
574 codegen->visitOutOfLineICFallback(this);
578 void CodeGeneratorShared::addIC(LInstruction* lir, size_t cacheIndex) {
579 if (cacheIndex == SIZE_MAX) {
580 masm.setOOM();
581 return;
584 DataPtr<IonIC> cache(this, cacheIndex);
585 MInstruction* mir = lir->mirRaw()->toInstruction();
586 cache->setScriptedLocation(mir->block()->info().script(),
587 mir->resumePoint()->pc());
589 Register temp = cache->scratchRegisterForEntryJump();
590 icInfo_.back().icOffsetForJump = masm.movWithPatch(ImmWord(-1), temp);
591 masm.jump(Address(temp, 0));
593 MOZ_ASSERT(!icInfo_.empty());
595 OutOfLineICFallback* ool =
596 new (alloc()) OutOfLineICFallback(lir, cacheIndex, icInfo_.length() - 1);
597 addOutOfLineCode(ool, mir);
599 masm.bind(ool->rejoin());
600 cache->setRejoinOffset(CodeOffset(ool->rejoin()->offset()));
603 void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) {
604 LInstruction* lir = ool->lir();
605 size_t cacheIndex = ool->cacheIndex();
606 size_t cacheInfoIndex = ool->cacheInfoIndex();
608 DataPtr<IonIC> ic(this, cacheIndex);
610 // Register the location of the OOL path in the IC.
611 ic->setFallbackOffset(CodeOffset(masm.currentOffset()));
613 switch (ic->kind()) {
614 case CacheKind::GetProp:
615 case CacheKind::GetElem: {
616 IonGetPropertyIC* getPropIC = ic->asGetPropertyIC();
618 saveLive(lir);
620 pushArg(getPropIC->id());
621 pushArg(getPropIC->value());
622 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
623 pushArg(ImmGCPtr(gen->outerInfo().script()));
625 using Fn = bool (*)(JSContext*, HandleScript, IonGetPropertyIC*,
626 HandleValue, HandleValue, MutableHandleValue);
627 callVM<Fn, IonGetPropertyIC::update>(lir);
629 StoreValueTo(getPropIC->output()).generate(this);
630 restoreLiveIgnore(lir, StoreValueTo(getPropIC->output()).clobbered());
632 masm.jump(ool->rejoin());
633 return;
635 case CacheKind::GetPropSuper:
636 case CacheKind::GetElemSuper: {
637 IonGetPropSuperIC* getPropSuperIC = ic->asGetPropSuperIC();
639 saveLive(lir);
641 pushArg(getPropSuperIC->id());
642 pushArg(getPropSuperIC->receiver());
643 pushArg(getPropSuperIC->object());
644 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
645 pushArg(ImmGCPtr(gen->outerInfo().script()));
647 using Fn =
648 bool (*)(JSContext*, HandleScript, IonGetPropSuperIC*, HandleObject,
649 HandleValue, HandleValue, MutableHandleValue);
650 callVM<Fn, IonGetPropSuperIC::update>(lir);
652 StoreValueTo(getPropSuperIC->output()).generate(this);
653 restoreLiveIgnore(lir,
654 StoreValueTo(getPropSuperIC->output()).clobbered());
656 masm.jump(ool->rejoin());
657 return;
659 case CacheKind::SetProp:
660 case CacheKind::SetElem: {
661 IonSetPropertyIC* setPropIC = ic->asSetPropertyIC();
663 saveLive(lir);
665 pushArg(setPropIC->rhs());
666 pushArg(setPropIC->id());
667 pushArg(setPropIC->object());
668 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
669 pushArg(ImmGCPtr(gen->outerInfo().script()));
671 using Fn = bool (*)(JSContext*, HandleScript, IonSetPropertyIC*,
672 HandleObject, HandleValue, HandleValue);
673 callVM<Fn, IonSetPropertyIC::update>(lir);
675 restoreLive(lir);
677 masm.jump(ool->rejoin());
678 return;
680 case CacheKind::GetName: {
681 IonGetNameIC* getNameIC = ic->asGetNameIC();
683 saveLive(lir);
685 pushArg(getNameIC->environment());
686 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
687 pushArg(ImmGCPtr(gen->outerInfo().script()));
689 using Fn = bool (*)(JSContext*, HandleScript, IonGetNameIC*, HandleObject,
690 MutableHandleValue);
691 callVM<Fn, IonGetNameIC::update>(lir);
693 StoreValueTo(getNameIC->output()).generate(this);
694 restoreLiveIgnore(lir, StoreValueTo(getNameIC->output()).clobbered());
696 masm.jump(ool->rejoin());
697 return;
699 case CacheKind::BindName: {
700 IonBindNameIC* bindNameIC = ic->asBindNameIC();
702 saveLive(lir);
704 pushArg(bindNameIC->environment());
705 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
706 pushArg(ImmGCPtr(gen->outerInfo().script()));
708 using Fn =
709 JSObject* (*)(JSContext*, HandleScript, IonBindNameIC*, HandleObject);
710 callVM<Fn, IonBindNameIC::update>(lir);
712 StoreRegisterTo(bindNameIC->output()).generate(this);
713 restoreLiveIgnore(lir, StoreRegisterTo(bindNameIC->output()).clobbered());
715 masm.jump(ool->rejoin());
716 return;
718 case CacheKind::GetIterator: {
719 IonGetIteratorIC* getIteratorIC = ic->asGetIteratorIC();
721 saveLive(lir);
723 pushArg(getIteratorIC->value());
724 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
725 pushArg(ImmGCPtr(gen->outerInfo().script()));
727 using Fn = JSObject* (*)(JSContext*, HandleScript, IonGetIteratorIC*,
728 HandleValue);
729 callVM<Fn, IonGetIteratorIC::update>(lir);
731 StoreRegisterTo(getIteratorIC->output()).generate(this);
732 restoreLiveIgnore(lir,
733 StoreRegisterTo(getIteratorIC->output()).clobbered());
735 masm.jump(ool->rejoin());
736 return;
738 case CacheKind::OptimizeSpreadCall: {
739 auto* optimizeSpreadCallIC = ic->asOptimizeSpreadCallIC();
741 saveLive(lir);
743 pushArg(optimizeSpreadCallIC->value());
744 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
745 pushArg(ImmGCPtr(gen->outerInfo().script()));
747 using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeSpreadCallIC*,
748 HandleValue, MutableHandleValue);
749 callVM<Fn, IonOptimizeSpreadCallIC::update>(lir);
751 StoreValueTo(optimizeSpreadCallIC->output()).generate(this);
752 restoreLiveIgnore(
753 lir, StoreValueTo(optimizeSpreadCallIC->output()).clobbered());
755 masm.jump(ool->rejoin());
756 return;
758 case CacheKind::In: {
759 IonInIC* inIC = ic->asInIC();
761 saveLive(lir);
763 pushArg(inIC->object());
764 pushArg(inIC->key());
765 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
766 pushArg(ImmGCPtr(gen->outerInfo().script()));
768 using Fn = bool (*)(JSContext*, HandleScript, IonInIC*, HandleValue,
769 HandleObject, bool*);
770 callVM<Fn, IonInIC::update>(lir);
772 StoreRegisterTo(inIC->output()).generate(this);
773 restoreLiveIgnore(lir, StoreRegisterTo(inIC->output()).clobbered());
775 masm.jump(ool->rejoin());
776 return;
778 case CacheKind::HasOwn: {
779 IonHasOwnIC* hasOwnIC = ic->asHasOwnIC();
781 saveLive(lir);
783 pushArg(hasOwnIC->id());
784 pushArg(hasOwnIC->value());
785 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
786 pushArg(ImmGCPtr(gen->outerInfo().script()));
788 using Fn = bool (*)(JSContext*, HandleScript, IonHasOwnIC*, HandleValue,
789 HandleValue, int32_t*);
790 callVM<Fn, IonHasOwnIC::update>(lir);
792 StoreRegisterTo(hasOwnIC->output()).generate(this);
793 restoreLiveIgnore(lir, StoreRegisterTo(hasOwnIC->output()).clobbered());
795 masm.jump(ool->rejoin());
796 return;
798 case CacheKind::CheckPrivateField: {
799 IonCheckPrivateFieldIC* checkPrivateFieldIC = ic->asCheckPrivateFieldIC();
801 saveLive(lir);
803 pushArg(checkPrivateFieldIC->id());
804 pushArg(checkPrivateFieldIC->value());
806 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
807 pushArg(ImmGCPtr(gen->outerInfo().script()));
809 using Fn = bool (*)(JSContext*, HandleScript, IonCheckPrivateFieldIC*,
810 HandleValue, HandleValue, bool*);
811 callVM<Fn, IonCheckPrivateFieldIC::update>(lir);
813 StoreRegisterTo(checkPrivateFieldIC->output()).generate(this);
814 restoreLiveIgnore(
815 lir, StoreRegisterTo(checkPrivateFieldIC->output()).clobbered());
817 masm.jump(ool->rejoin());
818 return;
820 case CacheKind::InstanceOf: {
821 IonInstanceOfIC* hasInstanceOfIC = ic->asInstanceOfIC();
823 saveLive(lir);
825 pushArg(hasInstanceOfIC->rhs());
826 pushArg(hasInstanceOfIC->lhs());
827 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
828 pushArg(ImmGCPtr(gen->outerInfo().script()));
830 using Fn = bool (*)(JSContext*, HandleScript, IonInstanceOfIC*,
831 HandleValue lhs, HandleObject rhs, bool* res);
832 callVM<Fn, IonInstanceOfIC::update>(lir);
834 StoreRegisterTo(hasInstanceOfIC->output()).generate(this);
835 restoreLiveIgnore(lir,
836 StoreRegisterTo(hasInstanceOfIC->output()).clobbered());
838 masm.jump(ool->rejoin());
839 return;
841 case CacheKind::UnaryArith: {
842 IonUnaryArithIC* unaryArithIC = ic->asUnaryArithIC();
844 saveLive(lir);
846 pushArg(unaryArithIC->input());
847 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
848 pushArg(ImmGCPtr(gen->outerInfo().script()));
850 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
851 IonUnaryArithIC* stub, HandleValue val,
852 MutableHandleValue res);
853 callVM<Fn, IonUnaryArithIC::update>(lir);
855 StoreValueTo(unaryArithIC->output()).generate(this);
856 restoreLiveIgnore(lir, StoreValueTo(unaryArithIC->output()).clobbered());
858 masm.jump(ool->rejoin());
859 return;
861 case CacheKind::ToPropertyKey: {
862 IonToPropertyKeyIC* toPropertyKeyIC = ic->asToPropertyKeyIC();
864 saveLive(lir);
866 pushArg(toPropertyKeyIC->input());
867 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
868 pushArg(ImmGCPtr(gen->outerInfo().script()));
870 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
871 IonToPropertyKeyIC* ic, HandleValue val,
872 MutableHandleValue res);
873 callVM<Fn, IonToPropertyKeyIC::update>(lir);
875 StoreValueTo(toPropertyKeyIC->output()).generate(this);
876 restoreLiveIgnore(lir,
877 StoreValueTo(toPropertyKeyIC->output()).clobbered());
879 masm.jump(ool->rejoin());
880 return;
882 case CacheKind::BinaryArith: {
883 IonBinaryArithIC* binaryArithIC = ic->asBinaryArithIC();
885 saveLive(lir);
887 pushArg(binaryArithIC->rhs());
888 pushArg(binaryArithIC->lhs());
889 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
890 pushArg(ImmGCPtr(gen->outerInfo().script()));
892 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
893 IonBinaryArithIC* stub, HandleValue lhs,
894 HandleValue rhs, MutableHandleValue res);
895 callVM<Fn, IonBinaryArithIC::update>(lir);
897 StoreValueTo(binaryArithIC->output()).generate(this);
898 restoreLiveIgnore(lir, StoreValueTo(binaryArithIC->output()).clobbered());
900 masm.jump(ool->rejoin());
901 return;
903 case CacheKind::Compare: {
904 IonCompareIC* compareIC = ic->asCompareIC();
906 saveLive(lir);
908 pushArg(compareIC->rhs());
909 pushArg(compareIC->lhs());
910 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
911 pushArg(ImmGCPtr(gen->outerInfo().script()));
913 using Fn =
914 bool (*)(JSContext* cx, HandleScript outerScript, IonCompareIC* stub,
915 HandleValue lhs, HandleValue rhs, bool* res);
916 callVM<Fn, IonCompareIC::update>(lir);
918 StoreRegisterTo(compareIC->output()).generate(this);
919 restoreLiveIgnore(lir, StoreRegisterTo(compareIC->output()).clobbered());
921 masm.jump(ool->rejoin());
922 return;
924 case CacheKind::CloseIter: {
925 IonCloseIterIC* closeIterIC = ic->asCloseIterIC();
927 saveLive(lir);
929 pushArg(closeIterIC->iter());
930 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
931 pushArg(ImmGCPtr(gen->outerInfo().script()));
933 using Fn =
934 bool (*)(JSContext*, HandleScript, IonCloseIterIC*, HandleObject);
935 callVM<Fn, IonCloseIterIC::update>(lir);
937 restoreLive(lir);
939 masm.jump(ool->rejoin());
940 return;
942 case CacheKind::Call:
943 case CacheKind::TypeOf:
944 case CacheKind::ToBool:
945 case CacheKind::GetIntrinsic:
946 case CacheKind::NewArray:
947 case CacheKind::NewObject:
948 MOZ_CRASH("Unsupported IC");
950 MOZ_CRASH();
953 StringObject* MNewStringObject::templateObj() const {
954 return &templateObj_->as<StringObject>();
957 CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph,
958 MacroAssembler* masm)
959 : CodeGeneratorSpecific(gen, graph, masm),
960 ionScriptLabels_(gen->alloc()),
961 ionNurseryObjectLabels_(gen->alloc()),
962 scriptCounts_(nullptr),
963 zoneStubsToReadBarrier_(0) {}
965 CodeGenerator::~CodeGenerator() { js_delete(scriptCounts_); }
967 void CodeGenerator::visitValueToInt32(LValueToInt32* lir) {
968 ValueOperand operand = ToValue(lir, LValueToInt32::Input);
969 Register output = ToRegister(lir->output());
970 FloatRegister temp = ToFloatRegister(lir->tempFloat());
972 Label fails;
973 if (lir->mode() == LValueToInt32::TRUNCATE) {
974 OutOfLineCode* oolDouble = oolTruncateDouble(temp, output, lir->mir());
976 // We can only handle strings in truncation contexts, like bitwise
977 // operations.
978 Register stringReg = ToRegister(lir->temp());
979 using Fn = bool (*)(JSContext*, JSString*, double*);
980 auto* oolString = oolCallVM<Fn, StringToNumber>(lir, ArgList(stringReg),
981 StoreFloatRegisterTo(temp));
982 Label* stringEntry = oolString->entry();
983 Label* stringRejoin = oolString->rejoin();
985 masm.truncateValueToInt32(operand, stringEntry, stringRejoin,
986 oolDouble->entry(), stringReg, temp, output,
987 &fails);
988 masm.bind(oolDouble->rejoin());
989 } else {
990 MOZ_ASSERT(lir->mode() == LValueToInt32::NORMAL);
991 masm.convertValueToInt32(operand, temp, output, &fails,
992 lir->mirNormal()->needsNegativeZeroCheck(),
993 lir->mirNormal()->conversion());
996 bailoutFrom(&fails, lir->snapshot());
999 void CodeGenerator::visitValueToDouble(LValueToDouble* lir) {
1000 ValueOperand operand = ToValue(lir, LValueToDouble::InputIndex);
1001 FloatRegister output = ToFloatRegister(lir->output());
1003 // Set if we can handle other primitives beside strings, as long as they're
1004 // guaranteed to never throw. This rules out symbols and BigInts, but allows
1005 // booleans, undefined, and null.
1006 bool hasNonStringPrimitives =
1007 lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1009 Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1012 ScratchTagScope tag(masm, operand);
1013 masm.splitTagForTest(operand, tag);
1015 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1016 masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1018 if (hasNonStringPrimitives) {
1019 masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1020 masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1021 masm.branchTestNull(Assembler::Equal, tag, &isNull);
1025 bailout(lir->snapshot());
1027 if (hasNonStringPrimitives) {
1028 masm.bind(&isNull);
1029 masm.loadConstantDouble(0.0, output);
1030 masm.jump(&done);
1033 if (hasNonStringPrimitives) {
1034 masm.bind(&isUndefined);
1035 masm.loadConstantDouble(GenericNaN(), output);
1036 masm.jump(&done);
1039 if (hasNonStringPrimitives) {
1040 masm.bind(&isBool);
1041 masm.boolValueToDouble(operand, output);
1042 masm.jump(&done);
1045 masm.bind(&isInt32);
1046 masm.int32ValueToDouble(operand, output);
1047 masm.jump(&done);
1049 masm.bind(&isDouble);
1050 masm.unboxDouble(operand, output);
1051 masm.bind(&done);
1054 void CodeGenerator::visitValueToFloat32(LValueToFloat32* lir) {
1055 ValueOperand operand = ToValue(lir, LValueToFloat32::InputIndex);
1056 FloatRegister output = ToFloatRegister(lir->output());
1058 // Set if we can handle other primitives beside strings, as long as they're
1059 // guaranteed to never throw. This rules out symbols and BigInts, but allows
1060 // booleans, undefined, and null.
1061 bool hasNonStringPrimitives =
1062 lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1064 Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1067 ScratchTagScope tag(masm, operand);
1068 masm.splitTagForTest(operand, tag);
1070 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1071 masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1073 if (hasNonStringPrimitives) {
1074 masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1075 masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1076 masm.branchTestNull(Assembler::Equal, tag, &isNull);
1080 bailout(lir->snapshot());
1082 if (hasNonStringPrimitives) {
1083 masm.bind(&isNull);
1084 masm.loadConstantFloat32(0.0f, output);
1085 masm.jump(&done);
1088 if (hasNonStringPrimitives) {
1089 masm.bind(&isUndefined);
1090 masm.loadConstantFloat32(float(GenericNaN()), output);
1091 masm.jump(&done);
1094 if (hasNonStringPrimitives) {
1095 masm.bind(&isBool);
1096 masm.boolValueToFloat32(operand, output);
1097 masm.jump(&done);
1100 masm.bind(&isInt32);
1101 masm.int32ValueToFloat32(operand, output);
1102 masm.jump(&done);
1104 masm.bind(&isDouble);
1105 // ARM and MIPS may not have a double register available if we've
1106 // allocated output as a float32.
1107 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
1108 ScratchDoubleScope fpscratch(masm);
1109 masm.unboxDouble(operand, fpscratch);
1110 masm.convertDoubleToFloat32(fpscratch, output);
1111 #else
1112 masm.unboxDouble(operand, output);
1113 masm.convertDoubleToFloat32(output, output);
1114 #endif
1115 masm.bind(&done);
1118 void CodeGenerator::visitValueToBigInt(LValueToBigInt* lir) {
1119 ValueOperand operand = ToValue(lir, LValueToBigInt::InputIndex);
1120 Register output = ToRegister(lir->output());
1122 using Fn = BigInt* (*)(JSContext*, HandleValue);
1123 auto* ool =
1124 oolCallVM<Fn, ToBigInt>(lir, ArgList(operand), StoreRegisterTo(output));
1126 Register tag = masm.extractTag(operand, output);
1128 Label notBigInt, done;
1129 masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
1130 masm.unboxBigInt(operand, output);
1131 masm.jump(&done);
1132 masm.bind(&notBigInt);
1134 masm.branchTestBoolean(Assembler::Equal, tag, ool->entry());
1135 masm.branchTestString(Assembler::Equal, tag, ool->entry());
1137 // ToBigInt(object) can have side-effects; all other types throw a TypeError.
1138 bailout(lir->snapshot());
1140 masm.bind(ool->rejoin());
1141 masm.bind(&done);
1144 void CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir) {
1145 masm.convertInt32ToDouble(ToRegister(lir->input()),
1146 ToFloatRegister(lir->output()));
1149 void CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir) {
1150 masm.convertFloat32ToDouble(ToFloatRegister(lir->input()),
1151 ToFloatRegister(lir->output()));
1154 void CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir) {
1155 masm.convertDoubleToFloat32(ToFloatRegister(lir->input()),
1156 ToFloatRegister(lir->output()));
1159 void CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir) {
1160 masm.convertInt32ToFloat32(ToRegister(lir->input()),
1161 ToFloatRegister(lir->output()));
1164 void CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir) {
1165 Label fail;
1166 FloatRegister input = ToFloatRegister(lir->input());
1167 Register output = ToRegister(lir->output());
1168 masm.convertDoubleToInt32(input, output, &fail,
1169 lir->mir()->needsNegativeZeroCheck());
1170 bailoutFrom(&fail, lir->snapshot());
1173 void CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir) {
1174 Label fail;
1175 FloatRegister input = ToFloatRegister(lir->input());
1176 Register output = ToRegister(lir->output());
1177 masm.convertFloat32ToInt32(input, output, &fail,
1178 lir->mir()->needsNegativeZeroCheck());
1179 bailoutFrom(&fail, lir->snapshot());
1182 void CodeGenerator::visitInt32ToIntPtr(LInt32ToIntPtr* lir) {
1183 #ifdef JS_64BIT
1184 // This LIR instruction is only used if the input can be negative.
1185 MOZ_ASSERT(lir->mir()->canBeNegative());
1187 Register output = ToRegister(lir->output());
1188 const LAllocation* input = lir->input();
1189 if (input->isRegister()) {
1190 masm.move32SignExtendToPtr(ToRegister(input), output);
1191 } else {
1192 masm.load32SignExtendToPtr(ToAddress(input), output);
1194 #else
1195 MOZ_CRASH("Not used on 32-bit platforms");
1196 #endif
1199 void CodeGenerator::visitNonNegativeIntPtrToInt32(
1200 LNonNegativeIntPtrToInt32* lir) {
1201 #ifdef JS_64BIT
1202 Register output = ToRegister(lir->output());
1203 MOZ_ASSERT(ToRegister(lir->input()) == output);
1205 Label bail;
1206 masm.guardNonNegativeIntPtrToInt32(output, &bail);
1207 bailoutFrom(&bail, lir->snapshot());
1208 #else
1209 MOZ_CRASH("Not used on 32-bit platforms");
1210 #endif
1213 void CodeGenerator::visitIntPtrToDouble(LIntPtrToDouble* lir) {
1214 Register input = ToRegister(lir->input());
1215 FloatRegister output = ToFloatRegister(lir->output());
1216 masm.convertIntPtrToDouble(input, output);
1219 void CodeGenerator::visitAdjustDataViewLength(LAdjustDataViewLength* lir) {
1220 Register output = ToRegister(lir->output());
1221 MOZ_ASSERT(ToRegister(lir->input()) == output);
1223 uint32_t byteSize = lir->mir()->byteSize();
1225 #ifdef DEBUG
1226 Label ok;
1227 masm.branchTestPtr(Assembler::NotSigned, output, output, &ok);
1228 masm.assumeUnreachable("Unexpected negative value in LAdjustDataViewLength");
1229 masm.bind(&ok);
1230 #endif
1232 Label bail;
1233 masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), output, &bail);
1234 bailoutFrom(&bail, lir->snapshot());
1237 void CodeGenerator::emitOOLTestObject(Register objreg,
1238 Label* ifEmulatesUndefined,
1239 Label* ifDoesntEmulateUndefined,
1240 Register scratch) {
1241 saveVolatile(scratch);
1242 using Fn = bool (*)(JSObject* obj);
1243 masm.setupAlignedABICall();
1244 masm.passABIArg(objreg);
1245 masm.callWithABI<Fn, js::EmulatesUndefined>();
1246 masm.storeCallPointerResult(scratch);
1247 restoreVolatile(scratch);
1249 masm.branchIfTrueBool(scratch, ifEmulatesUndefined);
1250 masm.jump(ifDoesntEmulateUndefined);
1253 // Base out-of-line code generator for all tests of the truthiness of an
1254 // object, where the object might not be truthy. (Recall that per spec all
1255 // objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class
1256 // flag to permit objects to look like |undefined| in certain contexts,
1257 // including in object truthiness testing.) We check truthiness inline except
1258 // when we're testing it on a proxy, in which case out-of-line code will call
1259 // EmulatesUndefined for a conclusive answer.
1260 class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator> {
1261 Register objreg_;
1262 Register scratch_;
1264 Label* ifEmulatesUndefined_;
1265 Label* ifDoesntEmulateUndefined_;
1267 #ifdef DEBUG
1268 bool initialized() { return ifEmulatesUndefined_ != nullptr; }
1269 #endif
1271 public:
1272 OutOfLineTestObject()
1273 : ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr) {}
1275 void accept(CodeGenerator* codegen) final {
1276 MOZ_ASSERT(initialized());
1277 codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_,
1278 ifDoesntEmulateUndefined_, scratch_);
1281 // Specify the register where the object to be tested is found, labels to
1282 // jump to if the object is truthy or falsy, and a scratch register for
1283 // use in the out-of-line path.
1284 void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined,
1285 Label* ifDoesntEmulateUndefined, Register scratch) {
1286 MOZ_ASSERT(!initialized());
1287 MOZ_ASSERT(ifEmulatesUndefined);
1288 objreg_ = objreg;
1289 scratch_ = scratch;
1290 ifEmulatesUndefined_ = ifEmulatesUndefined;
1291 ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined;
1295 // A subclass of OutOfLineTestObject containing two extra labels, for use when
1296 // the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line
1297 // code. The user should bind these labels in inline code, and specify them as
1298 // targets via setInputAndTargets, as appropriate.
1299 class OutOfLineTestObjectWithLabels : public OutOfLineTestObject {
1300 Label label1_;
1301 Label label2_;
1303 public:
1304 OutOfLineTestObjectWithLabels() = default;
1306 Label* label1() { return &label1_; }
1307 Label* label2() { return &label2_; }
1310 void CodeGenerator::testObjectEmulatesUndefinedKernel(
1311 Register objreg, Label* ifEmulatesUndefined,
1312 Label* ifDoesntEmulateUndefined, Register scratch,
1313 OutOfLineTestObject* ool) {
1314 ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
1315 scratch);
1317 // Perform a fast-path check of the object's class flags if the object's
1318 // not a proxy. Let out-of-line code handle the slow cases that require
1319 // saving registers, making a function call, and restoring registers.
1320 masm.branchIfObjectEmulatesUndefined(objreg, scratch, ool->entry(),
1321 ifEmulatesUndefined);
1324 void CodeGenerator::branchTestObjectEmulatesUndefined(
1325 Register objreg, Label* ifEmulatesUndefined,
1326 Label* ifDoesntEmulateUndefined, Register scratch,
1327 OutOfLineTestObject* ool) {
1328 MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(),
1329 "ifDoesntEmulateUndefined will be bound to the fallthrough path");
1331 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1332 ifDoesntEmulateUndefined, scratch, ool);
1333 masm.bind(ifDoesntEmulateUndefined);
1336 void CodeGenerator::testObjectEmulatesUndefined(Register objreg,
1337 Label* ifEmulatesUndefined,
1338 Label* ifDoesntEmulateUndefined,
1339 Register scratch,
1340 OutOfLineTestObject* ool) {
1341 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1342 ifDoesntEmulateUndefined, scratch, ool);
1343 masm.jump(ifDoesntEmulateUndefined);
1346 void CodeGenerator::testValueTruthyForType(
1347 JSValueType type, ScratchTagScope& tag, const ValueOperand& value,
1348 Register tempToUnbox, Register temp, FloatRegister floatTemp,
1349 Label* ifTruthy, Label* ifFalsy, OutOfLineTestObject* ool,
1350 bool skipTypeTest) {
1351 #ifdef DEBUG
1352 if (skipTypeTest) {
1353 Label expected;
1354 masm.branchTestType(Assembler::Equal, tag, type, &expected);
1355 masm.assumeUnreachable("Unexpected Value type in testValueTruthyForType");
1356 masm.bind(&expected);
1358 #endif
1360 // Handle irregular types first.
1361 switch (type) {
1362 case JSVAL_TYPE_UNDEFINED:
1363 case JSVAL_TYPE_NULL:
1364 // Undefined and null are falsy.
1365 if (!skipTypeTest) {
1366 masm.branchTestType(Assembler::Equal, tag, type, ifFalsy);
1367 } else {
1368 masm.jump(ifFalsy);
1370 return;
1371 case JSVAL_TYPE_SYMBOL:
1372 // Symbols are truthy.
1373 if (!skipTypeTest) {
1374 masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy);
1375 } else {
1376 masm.jump(ifTruthy);
1378 return;
1379 case JSVAL_TYPE_OBJECT: {
1380 Label notObject;
1381 if (!skipTypeTest) {
1382 masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
1384 ScratchTagScopeRelease _(&tag);
1385 Register objreg = masm.extractObject(value, tempToUnbox);
1386 testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, temp, ool);
1387 masm.bind(&notObject);
1388 return;
1390 default:
1391 break;
1394 // Check the type of the value (unless this is the last possible type).
1395 Label differentType;
1396 if (!skipTypeTest) {
1397 masm.branchTestType(Assembler::NotEqual, tag, type, &differentType);
1400 // Branch if the value is falsy.
1401 ScratchTagScopeRelease _(&tag);
1402 switch (type) {
1403 case JSVAL_TYPE_BOOLEAN: {
1404 masm.branchTestBooleanTruthy(false, value, ifFalsy);
1405 break;
1407 case JSVAL_TYPE_INT32: {
1408 masm.branchTestInt32Truthy(false, value, ifFalsy);
1409 break;
1411 case JSVAL_TYPE_STRING: {
1412 masm.branchTestStringTruthy(false, value, ifFalsy);
1413 break;
1415 case JSVAL_TYPE_BIGINT: {
1416 masm.branchTestBigIntTruthy(false, value, ifFalsy);
1417 break;
1419 case JSVAL_TYPE_DOUBLE: {
1420 masm.unboxDouble(value, floatTemp);
1421 masm.branchTestDoubleTruthy(false, floatTemp, ifFalsy);
1422 break;
1424 default:
1425 MOZ_CRASH("Unexpected value type");
1428 // If we reach this point, the value is truthy. We fall through for
1429 // truthy on the last test; otherwise, branch.
1430 if (!skipTypeTest) {
1431 masm.jump(ifTruthy);
1434 masm.bind(&differentType);
1437 void CodeGenerator::testValueTruthy(const ValueOperand& value,
1438 Register tempToUnbox, Register temp,
1439 FloatRegister floatTemp,
1440 const TypeDataList& observedTypes,
1441 Label* ifTruthy, Label* ifFalsy,
1442 OutOfLineTestObject* ool) {
1443 ScratchTagScope tag(masm, value);
1444 masm.splitTagForTest(value, tag);
1446 const std::initializer_list<JSValueType> defaultOrder = {
1447 JSVAL_TYPE_UNDEFINED, JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN,
1448 JSVAL_TYPE_INT32, JSVAL_TYPE_OBJECT, JSVAL_TYPE_STRING,
1449 JSVAL_TYPE_DOUBLE, JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
1451 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
1453 // Generate tests for previously observed types first.
1454 // The TypeDataList is sorted by descending frequency.
1455 for (auto& observed : observedTypes) {
1456 JSValueType type = observed.type();
1457 remaining -= type;
1459 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1460 ifTruthy, ifFalsy, ool, /*skipTypeTest*/ false);
1463 // Generate tests for remaining types.
1464 for (auto type : defaultOrder) {
1465 if (!remaining.contains(type)) {
1466 continue;
1468 remaining -= type;
1470 // We don't need a type test for the last possible type.
1471 bool skipTypeTest = remaining.isEmpty();
1472 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1473 ifTruthy, ifFalsy, ool, skipTypeTest);
1475 MOZ_ASSERT(remaining.isEmpty());
1477 // We fall through if the final test is truthy.
1480 void CodeGenerator::visitTestBIAndBranch(LTestBIAndBranch* lir) {
1481 Label* ifTrueLabel = getJumpLabelForBranch(lir->ifTrue());
1482 Label* ifFalseLabel = getJumpLabelForBranch(lir->ifFalse());
1483 Register input = ToRegister(lir->input());
1485 if (isNextBlock(lir->ifFalse()->lir())) {
1486 masm.branchIfBigIntIsNonZero(input, ifTrueLabel);
1487 } else if (isNextBlock(lir->ifTrue()->lir())) {
1488 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1489 } else {
1490 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1491 jumpToBlock(lir->ifTrue());
1495 void CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir) {
1496 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1497 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1498 Register input = ToRegister(lir->input());
1500 auto* ool = new (alloc()) OutOfLineTestObject();
1501 addOutOfLineCode(ool, lir->mir());
1503 testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()),
1504 ool);
1507 void CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir) {
1508 auto* ool = new (alloc()) OutOfLineTestObject();
1509 addOutOfLineCode(ool, lir->mir());
1511 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1512 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1514 ValueOperand input = ToValue(lir, LTestVAndBranch::Input);
1515 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
1516 Register temp = ToRegister(lir->temp2());
1517 FloatRegister floatTemp = ToFloatRegister(lir->tempFloat());
1518 const TypeDataList& observedTypes = lir->mir()->observedTypes();
1520 testValueTruthy(input, tempToUnbox, temp, floatTemp, observedTypes, truthy,
1521 falsy, ool);
1522 masm.jump(truthy);
1525 void CodeGenerator::visitBooleanToString(LBooleanToString* lir) {
1526 Register input = ToRegister(lir->input());
1527 Register output = ToRegister(lir->output());
1528 const JSAtomState& names = gen->runtime->names();
1529 Label true_, done;
1531 masm.branchTest32(Assembler::NonZero, input, input, &true_);
1532 masm.movePtr(ImmGCPtr(names.false_), output);
1533 masm.jump(&done);
1535 masm.bind(&true_);
1536 masm.movePtr(ImmGCPtr(names.true_), output);
1538 masm.bind(&done);
1541 void CodeGenerator::emitIntToString(Register input, Register output,
1542 Label* ool) {
1543 masm.boundsCheck32PowerOfTwo(input, StaticStrings::INT_STATIC_LIMIT, ool);
1545 // Fast path for small integers.
1546 masm.movePtr(ImmPtr(&gen->runtime->staticStrings().intStaticTable), output);
1547 masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
1550 void CodeGenerator::visitIntToString(LIntToString* lir) {
1551 Register input = ToRegister(lir->input());
1552 Register output = ToRegister(lir->output());
1554 using Fn = JSLinearString* (*)(JSContext*, int);
1555 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
1556 lir, ArgList(input), StoreRegisterTo(output));
1558 emitIntToString(input, output, ool->entry());
1560 masm.bind(ool->rejoin());
1563 void CodeGenerator::visitDoubleToString(LDoubleToString* lir) {
1564 FloatRegister input = ToFloatRegister(lir->input());
1565 Register temp = ToRegister(lir->temp0());
1566 Register output = ToRegister(lir->output());
1568 using Fn = JSString* (*)(JSContext*, double);
1569 OutOfLineCode* ool = oolCallVM<Fn, NumberToString<CanGC>>(
1570 lir, ArgList(input), StoreRegisterTo(output));
1572 // Try double to integer conversion and run integer to string code.
1573 masm.convertDoubleToInt32(input, temp, ool->entry(), false);
1574 emitIntToString(temp, output, ool->entry());
1576 masm.bind(ool->rejoin());
1579 void CodeGenerator::visitValueToString(LValueToString* lir) {
1580 ValueOperand input = ToValue(lir, LValueToString::InputIndex);
1581 Register output = ToRegister(lir->output());
1583 using Fn = JSString* (*)(JSContext*, HandleValue);
1584 OutOfLineCode* ool = oolCallVM<Fn, ToStringSlow<CanGC>>(
1585 lir, ArgList(input), StoreRegisterTo(output));
1587 Label done;
1588 Register tag = masm.extractTag(input, output);
1589 const JSAtomState& names = gen->runtime->names();
1591 // String
1593 Label notString;
1594 masm.branchTestString(Assembler::NotEqual, tag, &notString);
1595 masm.unboxString(input, output);
1596 masm.jump(&done);
1597 masm.bind(&notString);
1600 // Integer
1602 Label notInteger;
1603 masm.branchTestInt32(Assembler::NotEqual, tag, &notInteger);
1604 Register unboxed = ToTempUnboxRegister(lir->temp0());
1605 unboxed = masm.extractInt32(input, unboxed);
1606 emitIntToString(unboxed, output, ool->entry());
1607 masm.jump(&done);
1608 masm.bind(&notInteger);
1611 // Double
1613 // Note: no fastpath. Need two extra registers and can only convert doubles
1614 // that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT.
1615 masm.branchTestDouble(Assembler::Equal, tag, ool->entry());
1618 // Undefined
1620 Label notUndefined;
1621 masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
1622 masm.movePtr(ImmGCPtr(names.undefined), output);
1623 masm.jump(&done);
1624 masm.bind(&notUndefined);
1627 // Null
1629 Label notNull;
1630 masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
1631 masm.movePtr(ImmGCPtr(names.null), output);
1632 masm.jump(&done);
1633 masm.bind(&notNull);
1636 // Boolean
1638 Label notBoolean, true_;
1639 masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
1640 masm.branchTestBooleanTruthy(true, input, &true_);
1641 masm.movePtr(ImmGCPtr(names.false_), output);
1642 masm.jump(&done);
1643 masm.bind(&true_);
1644 masm.movePtr(ImmGCPtr(names.true_), output);
1645 masm.jump(&done);
1646 masm.bind(&notBoolean);
1649 // Objects/symbols are only possible when |mir->mightHaveSideEffects()|.
1650 if (lir->mir()->mightHaveSideEffects()) {
1651 // Object
1652 if (lir->mir()->supportSideEffects()) {
1653 masm.branchTestObject(Assembler::Equal, tag, ool->entry());
1654 } else {
1655 // Bail.
1656 MOZ_ASSERT(lir->mir()->needsSnapshot());
1657 Label bail;
1658 masm.branchTestObject(Assembler::Equal, tag, &bail);
1659 bailoutFrom(&bail, lir->snapshot());
1662 // Symbol
1663 if (lir->mir()->supportSideEffects()) {
1664 masm.branchTestSymbol(Assembler::Equal, tag, ool->entry());
1665 } else {
1666 // Bail.
1667 MOZ_ASSERT(lir->mir()->needsSnapshot());
1668 Label bail;
1669 masm.branchTestSymbol(Assembler::Equal, tag, &bail);
1670 bailoutFrom(&bail, lir->snapshot());
1674 // BigInt
1676 // No fastpath currently implemented.
1677 masm.branchTestBigInt(Assembler::Equal, tag, ool->entry());
1680 masm.assumeUnreachable("Unexpected type for LValueToString.");
1682 masm.bind(&done);
1683 masm.bind(ool->rejoin());
1686 using StoreBufferMutationFn = void (*)(js::gc::StoreBuffer*, js::gc::Cell**);
1688 static void EmitStoreBufferMutation(MacroAssembler& masm, Register holder,
1689 size_t offset, Register buffer,
1690 LiveGeneralRegisterSet& liveVolatiles,
1691 StoreBufferMutationFn fun) {
1692 Label callVM;
1693 Label exit;
1695 // Call into the VM to barrier the write. The only registers that need to
1696 // be preserved are those in liveVolatiles, so once they are saved on the
1697 // stack all volatile registers are available for use.
1698 masm.bind(&callVM);
1699 masm.PushRegsInMask(liveVolatiles);
1701 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
1702 regs.takeUnchecked(buffer);
1703 regs.takeUnchecked(holder);
1704 Register addrReg = regs.takeAny();
1706 masm.computeEffectiveAddress(Address(holder, offset), addrReg);
1708 bool needExtraReg = !regs.hasAny<GeneralRegisterSet::DefaultType>();
1709 if (needExtraReg) {
1710 masm.push(holder);
1711 masm.setupUnalignedABICall(holder);
1712 } else {
1713 masm.setupUnalignedABICall(regs.takeAny());
1715 masm.passABIArg(buffer);
1716 masm.passABIArg(addrReg);
1717 masm.callWithABI(DynamicFunction<StoreBufferMutationFn>(fun), MoveOp::GENERAL,
1718 CheckUnsafeCallWithABI::DontCheckOther);
1720 if (needExtraReg) {
1721 masm.pop(holder);
1723 masm.PopRegsInMask(liveVolatiles);
1724 masm.bind(&exit);
1727 // Warning: this function modifies prev and next.
1728 static void EmitPostWriteBarrierS(MacroAssembler& masm, Register holder,
1729 size_t offset, Register prev, Register next,
1730 LiveGeneralRegisterSet& liveVolatiles) {
1731 Label exit;
1732 Label checkRemove, putCell;
1734 // if (next && (buffer = next->storeBuffer()))
1735 // but we never pass in nullptr for next.
1736 Register storebuffer = next;
1737 masm.loadStoreBuffer(next, storebuffer);
1738 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &checkRemove);
1740 // if (prev && prev->storeBuffer())
1741 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &putCell);
1742 masm.loadStoreBuffer(prev, prev);
1743 masm.branchPtr(Assembler::NotEqual, prev, ImmWord(0), &exit);
1745 // buffer->putCell(cellp)
1746 masm.bind(&putCell);
1747 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1748 JSString::addCellAddressToStoreBuffer);
1749 masm.jump(&exit);
1751 // if (prev && (buffer = prev->storeBuffer()))
1752 masm.bind(&checkRemove);
1753 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &exit);
1754 masm.loadStoreBuffer(prev, storebuffer);
1755 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &exit);
1756 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1757 JSString::removeCellAddressFromStoreBuffer);
1759 masm.bind(&exit);
1762 void CodeGenerator::visitRegExp(LRegExp* lir) {
1763 Register output = ToRegister(lir->output());
1764 Register temp = ToRegister(lir->temp0());
1765 JSObject* source = lir->mir()->source();
1767 using Fn = JSObject* (*)(JSContext*, Handle<RegExpObject*>);
1768 OutOfLineCode* ool = oolCallVM<Fn, CloneRegExpObject>(
1769 lir, ArgList(ImmGCPtr(source)), StoreRegisterTo(output));
1770 if (lir->mir()->hasShared()) {
1771 TemplateObject templateObject(source);
1772 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
1773 ool->entry());
1774 } else {
1775 masm.jump(ool->entry());
1777 masm.bind(ool->rejoin());
1780 static constexpr int32_t RegExpPairsVectorStartOffset(
1781 int32_t inputOutputDataStartOffset) {
1782 return inputOutputDataStartOffset + int32_t(InputOutputDataSize) +
1783 int32_t(sizeof(MatchPairs));
1786 static Address RegExpPairCountAddress(MacroAssembler& masm,
1787 int32_t inputOutputDataStartOffset) {
1788 return Address(FramePointer, inputOutputDataStartOffset +
1789 int32_t(InputOutputDataSize) +
1790 MatchPairs::offsetOfPairCount());
1793 // When the unicode flag is set, if lastIndex points to a trail
1794 // surrogate, we should step back to the corresponding lead surrogate.
1795 // See ExecuteRegExp in builtin/RegExp.cpp for more detail.
1796 static void StepBackToLeadSurrogate(MacroAssembler& masm, Register regexpShared,
1797 Register input, Register lastIndex,
1798 Register temp1, Register temp2) {
1799 Label done;
1801 // If the unicode flag is not set, there is nothing to do.
1802 masm.branchTest32(Assembler::Zero,
1803 Address(regexpShared, RegExpShared::offsetOfFlags()),
1804 Imm32(int32_t(JS::RegExpFlag::Unicode)), &done);
1806 // If the input is latin1, there can't be any surrogates.
1807 masm.branchLatin1String(input, &done);
1809 // Check if |lastIndex > 0 && lastIndex < input->length()|.
1810 // lastIndex should already have no sign here.
1811 masm.branchTest32(Assembler::Zero, lastIndex, lastIndex, &done);
1812 masm.loadStringLength(input, temp1);
1813 masm.branch32(Assembler::AboveOrEqual, lastIndex, temp1, &done);
1815 // For TrailSurrogateMin ≤ x ≤ TrailSurrogateMax and
1816 // LeadSurrogateMin ≤ x ≤ LeadSurrogateMax, the following
1817 // equations hold.
1819 // SurrogateMin ≤ x ≤ SurrogateMax
1820 // <> SurrogateMin ≤ x ≤ SurrogateMin + 2^10 - 1
1821 // <> ((x - SurrogateMin) >>> 10) = 0 where >>> is an unsigned-shift
1822 // See Hacker's Delight, section 4-1 for details.
1824 // ((x - SurrogateMin) >>> 10) = 0
1825 // <> floor((x - SurrogateMin) / 1024) = 0
1826 // <> floor((x / 1024) - (SurrogateMin / 1024)) = 0
1827 // <> floor(x / 1024) = SurrogateMin / 1024
1828 // <> floor(x / 1024) * 1024 = SurrogateMin
1829 // <> (x >>> 10) << 10 = SurrogateMin
1830 // <> x & ~(2^10 - 1) = SurrogateMin
1832 constexpr char16_t SurrogateMask = 0xFC00;
1834 Register charsReg = temp1;
1835 masm.loadStringChars(input, charsReg, CharEncoding::TwoByte);
1837 // Check if input[lastIndex] is trail surrogate.
1838 masm.loadChar(charsReg, lastIndex, temp2, CharEncoding::TwoByte);
1839 masm.and32(Imm32(SurrogateMask), temp2);
1840 masm.branch32(Assembler::NotEqual, temp2, Imm32(unicode::TrailSurrogateMin),
1841 &done);
1843 // Check if input[lastIndex-1] is lead surrogate.
1844 masm.loadChar(charsReg, lastIndex, temp2, CharEncoding::TwoByte,
1845 -int32_t(sizeof(char16_t)));
1846 masm.and32(Imm32(SurrogateMask), temp2);
1847 masm.branch32(Assembler::NotEqual, temp2, Imm32(unicode::LeadSurrogateMin),
1848 &done);
1850 // Move lastIndex back to lead surrogate.
1851 masm.sub32(Imm32(1), lastIndex);
1853 masm.bind(&done);
1856 static void UpdateRegExpStatics(MacroAssembler& masm, Register regexp,
1857 Register input, Register lastIndex,
1858 Register staticsReg, Register temp1,
1859 Register temp2, gc::Heap initialStringHeap,
1860 LiveGeneralRegisterSet& volatileRegs) {
1861 Address pendingInputAddress(staticsReg,
1862 RegExpStatics::offsetOfPendingInput());
1863 Address matchesInputAddress(staticsReg,
1864 RegExpStatics::offsetOfMatchesInput());
1865 Address lazySourceAddress(staticsReg, RegExpStatics::offsetOfLazySource());
1866 Address lazyIndexAddress(staticsReg, RegExpStatics::offsetOfLazyIndex());
1868 masm.guardedCallPreBarrier(pendingInputAddress, MIRType::String);
1869 masm.guardedCallPreBarrier(matchesInputAddress, MIRType::String);
1870 masm.guardedCallPreBarrier(lazySourceAddress, MIRType::String);
1872 if (initialStringHeap == gc::Heap::Default) {
1873 // Writing into RegExpStatics tenured memory; must post-barrier.
1874 if (staticsReg.volatile_()) {
1875 volatileRegs.add(staticsReg);
1878 masm.loadPtr(pendingInputAddress, temp1);
1879 masm.storePtr(input, pendingInputAddress);
1880 masm.movePtr(input, temp2);
1881 EmitPostWriteBarrierS(masm, staticsReg,
1882 RegExpStatics::offsetOfPendingInput(),
1883 temp1 /* prev */, temp2 /* next */, volatileRegs);
1885 masm.loadPtr(matchesInputAddress, temp1);
1886 masm.storePtr(input, matchesInputAddress);
1887 masm.movePtr(input, temp2);
1888 EmitPostWriteBarrierS(masm, staticsReg,
1889 RegExpStatics::offsetOfMatchesInput(),
1890 temp1 /* prev */, temp2 /* next */, volatileRegs);
1891 } else {
1892 masm.debugAssertGCThingIsTenured(input, temp1);
1893 masm.storePtr(input, pendingInputAddress);
1894 masm.storePtr(input, matchesInputAddress);
1897 masm.storePtr(lastIndex,
1898 Address(staticsReg, RegExpStatics::offsetOfLazyIndex()));
1899 masm.store32(
1900 Imm32(1),
1901 Address(staticsReg, RegExpStatics::offsetOfPendingLazyEvaluation()));
1903 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
1904 RegExpObject::SHARED_SLOT)),
1905 temp1, JSVAL_TYPE_PRIVATE_GCTHING);
1906 masm.loadPtr(Address(temp1, RegExpShared::offsetOfSource()), temp2);
1907 masm.storePtr(temp2, lazySourceAddress);
1908 static_assert(sizeof(JS::RegExpFlags) == 1, "load size must match flag size");
1909 masm.load8ZeroExtend(Address(temp1, RegExpShared::offsetOfFlags()), temp2);
1910 masm.store8(temp2, Address(staticsReg, RegExpStatics::offsetOfLazyFlags()));
1913 // Prepare an InputOutputData and optional MatchPairs which space has been
1914 // allocated for on the stack, and try to execute a RegExp on a string input.
1915 // If the RegExp was successfully executed and matched the input, fallthrough.
1916 // Otherwise, jump to notFound or failure.
1918 // inputOutputDataStartOffset is the offset relative to the frame pointer
1919 // register. This offset is negative for the RegExpExecTest stub.
1920 static bool PrepareAndExecuteRegExp(MacroAssembler& masm, Register regexp,
1921 Register input, Register lastIndex,
1922 Register temp1, Register temp2,
1923 Register temp3,
1924 int32_t inputOutputDataStartOffset,
1925 gc::Heap initialStringHeap, Label* notFound,
1926 Label* failure) {
1927 JitSpew(JitSpew_Codegen, "# Emitting PrepareAndExecuteRegExp");
1929 using irregexp::InputOutputData;
1932 * [SMDOC] Stack layout for PrepareAndExecuteRegExp
1934 * Before this function is called, the caller is responsible for
1935 * allocating enough stack space for the following data:
1937 * inputOutputDataStartOffset +-----> +---------------+
1938 * |InputOutputData|
1939 * inputStartAddress +----------> inputStart|
1940 * inputEndAddress +----------> inputEnd|
1941 * startIndexAddress +----------> startIndex|
1942 * matchesAddress +----------> matches|-----+
1943 * +---------------+ |
1944 * matchPairs(Address|Offset) +-----> +---------------+ <--+
1945 * | MatchPairs |
1946 * pairCountAddress +----------> count |
1947 * pairsPointerAddress +----------> pairs |-----+
1948 * +---------------+ |
1949 * pairsArray(Address|Offset) +-----> +---------------+ <--+
1950 * | MatchPair |
1951 * firstMatchStartAddress +----------> start | <--+
1952 * | limit | |
1953 * +---------------+ |
1954 * . |
1955 * . Reserved space for
1956 * . RegExpObject::MaxPairCount
1957 * . MatchPair objects
1958 * . |
1959 * +---------------+ |
1960 * | MatchPair | |
1961 * | start | |
1962 * | limit | <--+
1963 * +---------------+
1966 int32_t ioOffset = inputOutputDataStartOffset;
1967 int32_t matchPairsOffset = ioOffset + int32_t(sizeof(InputOutputData));
1968 int32_t pairsArrayOffset = matchPairsOffset + int32_t(sizeof(MatchPairs));
1970 Address inputStartAddress(FramePointer,
1971 ioOffset + InputOutputData::offsetOfInputStart());
1972 Address inputEndAddress(FramePointer,
1973 ioOffset + InputOutputData::offsetOfInputEnd());
1974 Address startIndexAddress(FramePointer,
1975 ioOffset + InputOutputData::offsetOfStartIndex());
1976 Address matchesAddress(FramePointer,
1977 ioOffset + InputOutputData::offsetOfMatches());
1979 Address matchPairsAddress(FramePointer, matchPairsOffset);
1980 Address pairCountAddress(FramePointer,
1981 matchPairsOffset + MatchPairs::offsetOfPairCount());
1982 Address pairsPointerAddress(FramePointer,
1983 matchPairsOffset + MatchPairs::offsetOfPairs());
1985 Address pairsArrayAddress(FramePointer, pairsArrayOffset);
1986 Address firstMatchStartAddress(FramePointer,
1987 pairsArrayOffset + MatchPair::offsetOfStart());
1989 // First, fill in a skeletal MatchPairs instance on the stack. This will be
1990 // passed to the OOL stub in the caller if we aren't able to execute the
1991 // RegExp inline, and that stub needs to be able to determine whether the
1992 // execution finished successfully.
1994 // Initialize MatchPairs::pairCount to 1. The correct value can only
1995 // be determined after loading the RegExpShared. If the RegExpShared
1996 // has Kind::Atom, this is the correct pairCount.
1997 masm.store32(Imm32(1), pairCountAddress);
1999 // Initialize MatchPairs::pairs pointer
2000 masm.computeEffectiveAddress(pairsArrayAddress, temp1);
2001 masm.storePtr(temp1, pairsPointerAddress);
2003 // Initialize MatchPairs::pairs[0]::start to MatchPair::NoMatch
2004 masm.store32(Imm32(MatchPair::NoMatch), firstMatchStartAddress);
2006 // Determine the set of volatile inputs to save when calling into C++ or
2007 // regexp code.
2008 LiveGeneralRegisterSet volatileRegs;
2009 if (lastIndex.volatile_()) {
2010 volatileRegs.add(lastIndex);
2012 if (input.volatile_()) {
2013 volatileRegs.add(input);
2015 if (regexp.volatile_()) {
2016 volatileRegs.add(regexp);
2019 // Ensure the input string is not a rope.
2020 Label isLinear;
2021 masm.branchIfNotRope(input, &isLinear);
2023 masm.PushRegsInMask(volatileRegs);
2025 using Fn = JSLinearString* (*)(JSString*);
2026 masm.setupUnalignedABICall(temp1);
2027 masm.passABIArg(input);
2028 masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
2030 MOZ_ASSERT(!volatileRegs.has(temp1));
2031 masm.storeCallPointerResult(temp1);
2032 masm.PopRegsInMask(volatileRegs);
2034 masm.branchTestPtr(Assembler::Zero, temp1, temp1, failure);
2036 masm.bind(&isLinear);
2038 // Load the RegExpShared.
2039 Register regexpReg = temp1;
2040 Address sharedSlot = Address(
2041 regexp, NativeObject::getFixedSlotOffset(RegExpObject::SHARED_SLOT));
2042 masm.branchTestUndefined(Assembler::Equal, sharedSlot, failure);
2043 masm.unboxNonDouble(sharedSlot, regexpReg, JSVAL_TYPE_PRIVATE_GCTHING);
2045 // Handle Atom matches
2046 Label notAtom, checkSuccess;
2047 masm.branchPtr(Assembler::Equal,
2048 Address(regexpReg, RegExpShared::offsetOfPatternAtom()),
2049 ImmWord(0), &notAtom);
2051 masm.computeEffectiveAddress(matchPairsAddress, temp3);
2053 masm.PushRegsInMask(volatileRegs);
2054 using Fn = RegExpRunStatus (*)(RegExpShared* re, JSLinearString* input,
2055 size_t start, MatchPairs* matchPairs);
2056 masm.setupUnalignedABICall(temp2);
2057 masm.passABIArg(regexpReg);
2058 masm.passABIArg(input);
2059 masm.passABIArg(lastIndex);
2060 masm.passABIArg(temp3);
2061 masm.callWithABI<Fn, js::ExecuteRegExpAtomRaw>();
2063 MOZ_ASSERT(!volatileRegs.has(temp1));
2064 masm.storeCallInt32Result(temp1);
2065 masm.PopRegsInMask(volatileRegs);
2067 masm.jump(&checkSuccess);
2069 masm.bind(&notAtom);
2071 // Don't handle regexps with too many capture pairs.
2072 masm.load32(Address(regexpReg, RegExpShared::offsetOfPairCount()), temp2);
2073 masm.branch32(Assembler::Above, temp2, Imm32(RegExpObject::MaxPairCount),
2074 failure);
2076 // Fill in the pair count in the MatchPairs on the stack.
2077 masm.store32(temp2, pairCountAddress);
2079 // Update lastIndex if necessary.
2080 StepBackToLeadSurrogate(masm, regexpReg, input, lastIndex, temp2, temp3);
2082 // Load code pointer and length of input (in bytes).
2083 // Store the input start in the InputOutputData.
2084 Register codePointer = temp1; // Note: temp1 was previously regexpReg.
2085 Register byteLength = temp3;
2087 Label isLatin1, done;
2088 masm.loadStringLength(input, byteLength);
2090 masm.branchLatin1String(input, &isLatin1);
2092 // Two-byte input
2093 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
2094 masm.storePtr(temp2, inputStartAddress);
2095 masm.loadPtr(
2096 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/false)),
2097 codePointer);
2098 masm.lshiftPtr(Imm32(1), byteLength);
2099 masm.jump(&done);
2101 // Latin1 input
2102 masm.bind(&isLatin1);
2103 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
2104 masm.storePtr(temp2, inputStartAddress);
2105 masm.loadPtr(
2106 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/true)),
2107 codePointer);
2109 masm.bind(&done);
2111 // Store end pointer
2112 masm.addPtr(byteLength, temp2);
2113 masm.storePtr(temp2, inputEndAddress);
2116 // Guard that the RegExpShared has been compiled for this type of input.
2117 // If it has not been compiled, we fall back to the OOL case, which will
2118 // do a VM call into the interpreter.
2119 // TODO: add an interpreter trampoline?
2120 masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure);
2121 masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer);
2123 // Finish filling in the InputOutputData instance on the stack
2124 masm.computeEffectiveAddress(matchPairsAddress, temp2);
2125 masm.storePtr(temp2, matchesAddress);
2126 masm.storePtr(lastIndex, startIndexAddress);
2128 // Execute the RegExp.
2129 masm.computeEffectiveAddress(
2130 Address(FramePointer, inputOutputDataStartOffset), temp2);
2131 masm.PushRegsInMask(volatileRegs);
2132 masm.setupUnalignedABICall(temp3);
2133 masm.passABIArg(temp2);
2134 masm.callWithABI(codePointer);
2135 masm.storeCallInt32Result(temp1);
2136 masm.PopRegsInMask(volatileRegs);
2138 masm.bind(&checkSuccess);
2139 masm.branch32(Assembler::Equal, temp1,
2140 Imm32(RegExpRunStatus_Success_NotFound), notFound);
2141 masm.branch32(Assembler::Equal, temp1, Imm32(RegExpRunStatus_Error), failure);
2143 // Lazily update the RegExpStatics.
2144 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2145 RegExpRealm::offsetOfRegExpStatics();
2146 masm.loadGlobalObjectData(temp1);
2147 masm.loadPtr(Address(temp1, offset), temp1);
2148 UpdateRegExpStatics(masm, regexp, input, lastIndex, temp1, temp2, temp3,
2149 initialStringHeap, volatileRegs);
2151 return true;
2154 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
2155 Register len, Register byteOpScratch,
2156 CharEncoding encoding);
2158 class CreateDependentString {
2159 CharEncoding encoding_;
2160 Register string_;
2161 Register temp1_;
2162 Register temp2_;
2163 Label* failure_;
2165 enum class FallbackKind : uint8_t {
2166 InlineString,
2167 FatInlineString,
2168 NotInlineString,
2169 Count
2171 mozilla::EnumeratedArray<FallbackKind, FallbackKind::Count, Label> fallbacks_,
2172 joins_;
2174 public:
2175 CreateDependentString(CharEncoding encoding, Register string, Register temp1,
2176 Register temp2, Label* failure)
2177 : encoding_(encoding),
2178 string_(string),
2179 temp1_(temp1),
2180 temp2_(temp2),
2181 failure_(failure) {}
2183 Register string() const { return string_; }
2184 CharEncoding encoding() const { return encoding_; }
2186 // Generate code that creates DependentString.
2187 // Caller should call generateFallback after masm.ret(), to generate
2188 // fallback path.
2189 void generate(MacroAssembler& masm, const JSAtomState& names,
2190 CompileRuntime* runtime, Register base,
2191 BaseIndex startIndexAddress, BaseIndex limitIndexAddress,
2192 gc::Heap initialStringHeap);
2194 // Generate fallback path for creating DependentString.
2195 void generateFallback(MacroAssembler& masm);
2198 void CreateDependentString::generate(MacroAssembler& masm,
2199 const JSAtomState& names,
2200 CompileRuntime* runtime, Register base,
2201 BaseIndex startIndexAddress,
2202 BaseIndex limitIndexAddress,
2203 gc::Heap initialStringHeap) {
2204 JitSpew(JitSpew_Codegen, "# Emitting CreateDependentString (encoding=%s)",
2205 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2207 auto newGCString = [&](FallbackKind kind) {
2208 uint32_t flags = kind == FallbackKind::InlineString
2209 ? JSString::INIT_THIN_INLINE_FLAGS
2210 : kind == FallbackKind::FatInlineString
2211 ? JSString::INIT_FAT_INLINE_FLAGS
2212 : JSString::INIT_DEPENDENT_FLAGS;
2213 if (encoding_ == CharEncoding::Latin1) {
2214 flags |= JSString::LATIN1_CHARS_BIT;
2217 if (kind != FallbackKind::FatInlineString) {
2218 masm.newGCString(string_, temp2_, initialStringHeap, &fallbacks_[kind]);
2219 } else {
2220 masm.newGCFatInlineString(string_, temp2_, initialStringHeap,
2221 &fallbacks_[kind]);
2223 masm.bind(&joins_[kind]);
2224 masm.store32(Imm32(flags), Address(string_, JSString::offsetOfFlags()));
2227 // Compute the string length.
2228 masm.load32(startIndexAddress, temp2_);
2229 masm.load32(limitIndexAddress, temp1_);
2230 masm.sub32(temp2_, temp1_);
2232 Label done, nonEmpty;
2234 // Zero length matches use the empty string.
2235 masm.branchTest32(Assembler::NonZero, temp1_, temp1_, &nonEmpty);
2236 masm.movePtr(ImmGCPtr(names.empty_), string_);
2237 masm.jump(&done);
2239 masm.bind(&nonEmpty);
2241 // Complete matches use the base string.
2242 Label nonBaseStringMatch;
2243 masm.branchTest32(Assembler::NonZero, temp2_, temp2_, &nonBaseStringMatch);
2244 masm.branch32(Assembler::NotEqual, Address(base, JSString::offsetOfLength()),
2245 temp1_, &nonBaseStringMatch);
2246 masm.movePtr(base, string_);
2247 masm.jump(&done);
2249 masm.bind(&nonBaseStringMatch);
2251 Label notInline;
2253 int32_t maxInlineLength = encoding_ == CharEncoding::Latin1
2254 ? JSFatInlineString::MAX_LENGTH_LATIN1
2255 : JSFatInlineString::MAX_LENGTH_TWO_BYTE;
2256 masm.branch32(Assembler::Above, temp1_, Imm32(maxInlineLength), &notInline);
2258 // Make a thin or fat inline string.
2259 Label stringAllocated, fatInline;
2261 int32_t maxThinInlineLength = encoding_ == CharEncoding::Latin1
2262 ? JSThinInlineString::MAX_LENGTH_LATIN1
2263 : JSThinInlineString::MAX_LENGTH_TWO_BYTE;
2264 masm.branch32(Assembler::Above, temp1_, Imm32(maxThinInlineLength),
2265 &fatInline);
2266 if (encoding_ == CharEncoding::Latin1) {
2267 // One character Latin-1 strings can be loaded directly from the
2268 // static strings table.
2269 Label thinInline;
2270 masm.branch32(Assembler::Above, temp1_, Imm32(1), &thinInline);
2272 static_assert(
2273 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
2274 "Latin-1 strings can be loaded from static strings");
2276 masm.loadStringChars(base, temp1_, encoding_);
2277 masm.loadChar(temp1_, temp2_, temp1_, encoding_);
2279 masm.movePtr(ImmPtr(&runtime->staticStrings().unitStaticTable),
2280 string_);
2281 masm.loadPtr(BaseIndex(string_, temp1_, ScalePointer), string_);
2283 masm.jump(&done);
2285 masm.bind(&thinInline);
2288 newGCString(FallbackKind::InlineString);
2289 masm.jump(&stringAllocated);
2291 masm.bind(&fatInline);
2292 { newGCString(FallbackKind::FatInlineString); }
2293 masm.bind(&stringAllocated);
2295 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2297 masm.push(string_);
2298 masm.push(base);
2300 MOZ_ASSERT(startIndexAddress.base == FramePointer,
2301 "startIndexAddress is still valid after stack pushes");
2303 // Load chars pointer for the new string.
2304 masm.loadInlineStringCharsForStore(string_, string_);
2306 // Load the source characters pointer.
2307 masm.loadStringChars(base, temp2_, encoding_);
2308 masm.load32(startIndexAddress, base);
2309 masm.addToCharPtr(temp2_, base, encoding_);
2311 CopyStringChars(masm, string_, temp2_, temp1_, base, encoding_);
2313 masm.pop(base);
2314 masm.pop(string_);
2316 masm.jump(&done);
2319 masm.bind(&notInline);
2322 // Make a dependent string.
2323 // Warning: string may be tenured (if the fallback case is hit), so
2324 // stores into it must be post barriered.
2325 newGCString(FallbackKind::NotInlineString);
2327 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2329 masm.loadNonInlineStringChars(base, temp1_, encoding_);
2330 masm.load32(startIndexAddress, temp2_);
2331 masm.addToCharPtr(temp1_, temp2_, encoding_);
2332 masm.storeNonInlineStringChars(temp1_, string_);
2333 masm.storeDependentStringBase(base, string_);
2334 masm.movePtr(base, temp1_);
2336 // Follow any base pointer if the input is itself a dependent string.
2337 // Watch for undepended strings, which have a base pointer but don't
2338 // actually share their characters with it.
2339 Label noBase;
2340 masm.load32(Address(base, JSString::offsetOfFlags()), temp2_);
2341 masm.and32(Imm32(JSString::TYPE_FLAGS_MASK), temp2_);
2342 masm.branchTest32(Assembler::Zero, temp2_, Imm32(JSString::DEPENDENT_BIT),
2343 &noBase);
2344 masm.loadDependentStringBase(base, temp1_);
2345 masm.storeDependentStringBase(temp1_, string_);
2346 masm.bind(&noBase);
2348 // Post-barrier the base store, whether it was the direct or indirect
2349 // base (both will end up in temp1 here).
2350 masm.branchPtrInNurseryChunk(Assembler::Equal, string_, temp2_, &done);
2351 masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp1_, temp2_, &done);
2353 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2354 regsToSave.takeUnchecked(temp1_);
2355 regsToSave.takeUnchecked(temp2_);
2357 masm.PushRegsInMask(regsToSave);
2359 masm.mov(ImmPtr(runtime), temp1_);
2361 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
2362 masm.setupUnalignedABICall(temp2_);
2363 masm.passABIArg(temp1_);
2364 masm.passABIArg(string_);
2365 masm.callWithABI<Fn, PostWriteBarrier>();
2367 masm.PopRegsInMask(regsToSave);
2370 masm.bind(&done);
2373 void CreateDependentString::generateFallback(MacroAssembler& masm) {
2374 JitSpew(JitSpew_Codegen,
2375 "# Emitting CreateDependentString fallback (encoding=%s)",
2376 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2378 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2379 regsToSave.takeUnchecked(string_);
2380 regsToSave.takeUnchecked(temp2_);
2382 for (FallbackKind kind : mozilla::MakeEnumeratedRange(FallbackKind::Count)) {
2383 masm.bind(&fallbacks_[kind]);
2385 masm.PushRegsInMask(regsToSave);
2387 using Fn = void* (*)(JSContext* cx);
2388 masm.setupUnalignedABICall(string_);
2389 masm.loadJSContext(string_);
2390 masm.passABIArg(string_);
2391 if (kind == FallbackKind::FatInlineString) {
2392 masm.callWithABI<Fn, AllocateFatInlineString>();
2393 } else {
2394 masm.callWithABI<Fn, AllocateDependentString>();
2396 masm.storeCallPointerResult(string_);
2398 masm.PopRegsInMask(regsToSave);
2400 masm.branchPtr(Assembler::Equal, string_, ImmWord(0), failure_);
2402 masm.jump(&joins_[kind]);
2406 // Generate the RegExpMatcher and RegExpExecMatch stubs. These are very similar,
2407 // but RegExpExecMatch also has to load and update .lastIndex for global/sticky
2408 // regular expressions.
2409 static JitCode* GenerateRegExpMatchStubShared(JSContext* cx,
2410 gc::Heap initialStringHeap,
2411 bool isExecMatch) {
2412 if (isExecMatch) {
2413 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecMatch stub");
2414 } else {
2415 JitSpew(JitSpew_Codegen, "# Emitting RegExpMatcher stub");
2418 // |initialStringHeap| could be stale after a GC.
2419 JS::AutoCheckCannotGC nogc(cx);
2421 Register regexp = RegExpMatcherRegExpReg;
2422 Register input = RegExpMatcherStringReg;
2423 Register lastIndex = RegExpMatcherLastIndexReg;
2424 ValueOperand result = JSReturnOperand;
2426 // We are free to clobber all registers, as LRegExpMatcher is a call
2427 // instruction.
2428 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2429 regs.take(input);
2430 regs.take(regexp);
2431 regs.take(lastIndex);
2433 Register temp1 = regs.takeAny();
2434 Register temp2 = regs.takeAny();
2435 Register temp3 = regs.takeAny();
2436 Register maybeTemp4 = InvalidReg;
2437 if (!regs.empty()) {
2438 // There are not enough registers on x86.
2439 maybeTemp4 = regs.takeAny();
2441 Register maybeTemp5 = InvalidReg;
2442 if (!regs.empty()) {
2443 // There are not enough registers on x86.
2444 maybeTemp5 = regs.takeAny();
2447 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
2448 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
2450 TempAllocator temp(&cx->tempLifoAlloc());
2451 JitContext jcx(cx);
2452 StackMacroAssembler masm(cx, temp);
2453 AutoCreatedBy acb(masm, "GenerateRegExpMatchStubShared");
2455 #ifdef JS_USE_LINK_REGISTER
2456 masm.pushReturnAddress();
2457 #endif
2458 masm.push(FramePointer);
2459 masm.moveStackPtrTo(FramePointer);
2461 Label notFoundZeroLastIndex;
2462 if (isExecMatch) {
2463 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
2466 // The InputOutputData is placed above the frame pointer and return address on
2467 // the stack.
2468 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
2470 Label notFound, oolEntry;
2471 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
2472 temp3, inputOutputDataStartOffset,
2473 initialStringHeap, &notFound, &oolEntry)) {
2474 return nullptr;
2477 // If a regexp has named captures, fall back to the OOL stub, which
2478 // will end up calling CreateRegExpMatchResults.
2479 Register shared = temp2;
2480 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
2481 RegExpObject::SHARED_SLOT)),
2482 shared, JSVAL_TYPE_PRIVATE_GCTHING);
2483 masm.branchPtr(Assembler::NotEqual,
2484 Address(shared, RegExpShared::offsetOfGroupsTemplate()),
2485 ImmWord(0), &oolEntry);
2487 // Similarly, if the |hasIndices| flag is set, fall back to the OOL stub.
2488 masm.branchTest32(Assembler::NonZero,
2489 Address(shared, RegExpShared::offsetOfFlags()),
2490 Imm32(int32_t(JS::RegExpFlag::HasIndices)), &oolEntry);
2492 Address pairCountAddress =
2493 RegExpPairCountAddress(masm, inputOutputDataStartOffset);
2495 // Construct the result.
2496 Register object = temp1;
2498 // In most cases, the array will have just 1-2 elements, so we optimize for
2499 // that by emitting separate code paths for capacity 2/6/14 (= 4/8/16 slots
2500 // because two slots are used for the elements header).
2502 // Load the array length in temp2 and the shape in temp3.
2503 Label allocated;
2504 masm.load32(pairCountAddress, temp2);
2505 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2506 RegExpRealm::offsetOfNormalMatchResultShape();
2507 masm.loadGlobalObjectData(temp3);
2508 masm.loadPtr(Address(temp3, offset), temp3);
2510 auto emitAllocObject = [&](size_t elementCapacity) {
2511 gc::AllocKind kind = GuessArrayGCKind(elementCapacity);
2512 MOZ_ASSERT(CanChangeToBackgroundAllocKind(kind, &ArrayObject::class_));
2513 kind = ForegroundToBackgroundAllocKind(kind);
2515 #ifdef DEBUG
2516 // Assert all of the available slots are used for |elementCapacity|
2517 // elements.
2518 size_t usedSlots = ObjectElements::VALUES_PER_HEADER + elementCapacity;
2519 MOZ_ASSERT(usedSlots == GetGCKindSlots(kind));
2520 #endif
2522 constexpr size_t numUsedDynamicSlots =
2523 RegExpRealm::MatchResultObjectSlotSpan;
2524 constexpr size_t numDynamicSlots =
2525 RegExpRealm::MatchResultObjectNumDynamicSlots;
2526 constexpr size_t arrayLength = 1;
2527 masm.createArrayWithFixedElements(object, temp3, temp2, temp3,
2528 arrayLength, elementCapacity,
2529 numUsedDynamicSlots, numDynamicSlots,
2530 kind, gc::Heap::Default, &oolEntry);
2533 Label moreThan2;
2534 masm.branch32(Assembler::Above, temp2, Imm32(2), &moreThan2);
2535 emitAllocObject(2);
2536 masm.jump(&allocated);
2538 Label moreThan6;
2539 masm.bind(&moreThan2);
2540 masm.branch32(Assembler::Above, temp2, Imm32(6), &moreThan6);
2541 emitAllocObject(6);
2542 masm.jump(&allocated);
2544 masm.bind(&moreThan6);
2545 static_assert(RegExpObject::MaxPairCount == 14);
2546 emitAllocObject(RegExpObject::MaxPairCount);
2548 masm.bind(&allocated);
2551 // clang-format off
2553 * [SMDOC] Stack layout for the RegExpMatcher stub
2555 * +---------------+
2556 * FramePointer +-----> |Caller-FramePtr|
2557 * +---------------+
2558 * |Return-Address |
2559 * +---------------+
2560 * inputOutputDataStartOffset +-----> +---------------+
2561 * |InputOutputData|
2562 * +---------------+
2563 * +---------------+
2564 * | MatchPairs |
2565 * pairsCountAddress +-----------> count |
2566 * | pairs |
2567 * | |
2568 * +---------------+
2569 * pairsVectorStartOffset +-----> +---------------+
2570 * | MatchPair |
2571 * matchPairStart +------------> start | <-------+
2572 * matchPairLimit +------------> limit | | Reserved space for
2573 * +---------------+ | `RegExpObject::MaxPairCount`
2574 * . | MatchPair objects.
2575 * . |
2576 * . | `count` objects will be
2577 * +---------------+ | initialized and can be
2578 * | MatchPair | | accessed below.
2579 * | start | <-------+
2580 * | limit |
2581 * +---------------+
2583 // clang-format on
2585 static_assert(sizeof(MatchPair) == 2 * sizeof(int32_t),
2586 "MatchPair consists of two int32 values representing the start"
2587 "and the end offset of the match");
2589 int32_t pairsVectorStartOffset =
2590 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
2592 // Incremented by one below for each match pair.
2593 Register matchIndex = temp2;
2594 masm.move32(Imm32(0), matchIndex);
2596 // The element in which to store the result of the current match.
2597 size_t elementsOffset = NativeObject::offsetOfFixedElements();
2598 BaseObjectElementIndex objectMatchElement(object, matchIndex, elementsOffset);
2600 // The current match pair's "start" and "limit" member.
2601 BaseIndex matchPairStart(FramePointer, matchIndex, TimesEight,
2602 pairsVectorStartOffset + MatchPair::offsetOfStart());
2603 BaseIndex matchPairLimit(FramePointer, matchIndex, TimesEight,
2604 pairsVectorStartOffset + MatchPair::offsetOfLimit());
2606 Label* depStrFailure = &oolEntry;
2607 Label restoreRegExpAndLastIndex;
2609 Register temp4;
2610 if (maybeTemp4 == InvalidReg) {
2611 depStrFailure = &restoreRegExpAndLastIndex;
2613 // We don't have enough registers for a fourth temporary. Reuse |regexp|
2614 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2615 masm.push(regexp);
2616 temp4 = regexp;
2617 } else {
2618 temp4 = maybeTemp4;
2621 Register temp5;
2622 if (maybeTemp5 == InvalidReg) {
2623 depStrFailure = &restoreRegExpAndLastIndex;
2625 // We don't have enough registers for a fifth temporary. Reuse |lastIndex|
2626 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2627 masm.push(lastIndex);
2628 temp5 = lastIndex;
2629 } else {
2630 temp5 = maybeTemp5;
2633 auto maybeRestoreRegExpAndLastIndex = [&]() {
2634 if (maybeTemp5 == InvalidReg) {
2635 masm.pop(lastIndex);
2637 if (maybeTemp4 == InvalidReg) {
2638 masm.pop(regexp);
2642 // Loop to construct the match strings. There are two different loops,
2643 // depending on whether the input is a Two-Byte or a Latin-1 string.
2644 CreateDependentString depStrs[]{
2645 {CharEncoding::TwoByte, temp3, temp4, temp5, depStrFailure},
2646 {CharEncoding::Latin1, temp3, temp4, temp5, depStrFailure},
2650 Label isLatin1, done;
2651 masm.branchLatin1String(input, &isLatin1);
2653 for (auto& depStr : depStrs) {
2654 if (depStr.encoding() == CharEncoding::Latin1) {
2655 masm.bind(&isLatin1);
2658 Label matchLoop;
2659 masm.bind(&matchLoop);
2661 static_assert(MatchPair::NoMatch == -1,
2662 "MatchPair::start is negative if no match was found");
2664 Label isUndefined, storeDone;
2665 masm.branch32(Assembler::LessThan, matchPairStart, Imm32(0),
2666 &isUndefined);
2668 depStr.generate(masm, cx->names(), CompileRuntime::get(cx->runtime()),
2669 input, matchPairStart, matchPairLimit,
2670 initialStringHeap);
2672 // Storing into nursery-allocated results object's elements; no post
2673 // barrier.
2674 masm.storeValue(JSVAL_TYPE_STRING, depStr.string(), objectMatchElement);
2675 masm.jump(&storeDone);
2677 masm.bind(&isUndefined);
2678 { masm.storeValue(UndefinedValue(), objectMatchElement); }
2679 masm.bind(&storeDone);
2681 masm.add32(Imm32(1), matchIndex);
2682 masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex,
2683 &done);
2684 masm.jump(&matchLoop);
2687 #ifdef DEBUG
2688 masm.assumeUnreachable("The match string loop doesn't fall through.");
2689 #endif
2691 masm.bind(&done);
2694 maybeRestoreRegExpAndLastIndex();
2696 // Fill in the rest of the output object.
2697 masm.store32(
2698 matchIndex,
2699 Address(object,
2700 elementsOffset + ObjectElements::offsetOfInitializedLength()));
2701 masm.store32(
2702 matchIndex,
2703 Address(object, elementsOffset + ObjectElements::offsetOfLength()));
2705 Address firstMatchPairStartAddress(
2706 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfStart());
2707 Address firstMatchPairLimitAddress(
2708 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfLimit());
2710 static_assert(RegExpRealm::MatchResultObjectIndexSlot == 0,
2711 "First slot holds the 'index' property");
2712 static_assert(RegExpRealm::MatchResultObjectInputSlot == 1,
2713 "Second slot holds the 'input' property");
2715 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
2717 masm.load32(firstMatchPairStartAddress, temp3);
2718 masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0));
2720 // No post barrier needed (address is within nursery object.)
2721 masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value)));
2723 // For the ExecMatch stub, if the regular expression is global or sticky, we
2724 // have to update its .lastIndex slot.
2725 if (isExecMatch) {
2726 MOZ_ASSERT(object != lastIndex);
2727 Label notGlobalOrSticky;
2728 masm.branchTest32(Assembler::Zero, flagsSlot,
2729 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2730 &notGlobalOrSticky);
2731 masm.load32(firstMatchPairLimitAddress, lastIndex);
2732 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
2733 masm.bind(&notGlobalOrSticky);
2736 // All done!
2737 masm.tagValue(JSVAL_TYPE_OBJECT, object, result);
2738 masm.pop(FramePointer);
2739 masm.ret();
2741 masm.bind(&notFound);
2742 if (isExecMatch) {
2743 Label notGlobalOrSticky;
2744 masm.branchTest32(Assembler::Zero, flagsSlot,
2745 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2746 &notGlobalOrSticky);
2747 masm.bind(&notFoundZeroLastIndex);
2748 masm.storeValue(Int32Value(0), lastIndexSlot);
2749 masm.bind(&notGlobalOrSticky);
2751 masm.moveValue(NullValue(), result);
2752 masm.pop(FramePointer);
2753 masm.ret();
2755 // Fallback paths for CreateDependentString.
2756 for (auto& depStr : depStrs) {
2757 depStr.generateFallback(masm);
2760 // Fall-through to the ool entry after restoring the registers.
2761 masm.bind(&restoreRegExpAndLastIndex);
2762 maybeRestoreRegExpAndLastIndex();
2764 // Use an undefined value to signal to the caller that the OOL stub needs to
2765 // be called.
2766 masm.bind(&oolEntry);
2767 masm.moveValue(UndefinedValue(), result);
2768 masm.pop(FramePointer);
2769 masm.ret();
2771 Linker linker(masm);
2772 JitCode* code = linker.newCode(cx, CodeKind::Other);
2773 if (!code) {
2774 return nullptr;
2777 const char* name = isExecMatch ? "RegExpExecMatchStub" : "RegExpMatcherStub";
2778 CollectPerfSpewerJitCodeProfile(code, name);
2779 #ifdef MOZ_VTUNE
2780 vtune::MarkStub(code, name);
2781 #endif
2783 return code;
2786 JitCode* JitZone::generateRegExpMatcherStub(JSContext* cx) {
2787 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2788 /* isExecMatch = */ false);
2791 JitCode* JitZone::generateRegExpExecMatchStub(JSContext* cx) {
2792 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2793 /* isExecMatch = */ true);
2796 class OutOfLineRegExpMatcher : public OutOfLineCodeBase<CodeGenerator> {
2797 LRegExpMatcher* lir_;
2799 public:
2800 explicit OutOfLineRegExpMatcher(LRegExpMatcher* lir) : lir_(lir) {}
2802 void accept(CodeGenerator* codegen) override {
2803 codegen->visitOutOfLineRegExpMatcher(this);
2806 LRegExpMatcher* lir() const { return lir_; }
2809 void CodeGenerator::visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher* ool) {
2810 LRegExpMatcher* lir = ool->lir();
2811 Register lastIndex = ToRegister(lir->lastIndex());
2812 Register input = ToRegister(lir->string());
2813 Register regexp = ToRegister(lir->regexp());
2815 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2816 regs.take(lastIndex);
2817 regs.take(input);
2818 regs.take(regexp);
2819 Register temp = regs.takeAny();
2821 masm.computeEffectiveAddress(
2822 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2824 pushArg(temp);
2825 pushArg(lastIndex);
2826 pushArg(input);
2827 pushArg(regexp);
2829 // We are not using oolCallVM because we are in a Call, and that live
2830 // registers are already saved by the the register allocator.
2831 using Fn =
2832 bool (*)(JSContext*, HandleObject regexp, HandleString input,
2833 int32_t lastIndex, MatchPairs* pairs, MutableHandleValue output);
2834 callVM<Fn, RegExpMatcherRaw>(lir);
2836 masm.jump(ool->rejoin());
2839 void CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir) {
2840 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2841 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2842 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg);
2843 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2845 #if defined(JS_NUNBOX32)
2846 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2847 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2848 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2849 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2850 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Type);
2851 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Data);
2852 #elif defined(JS_PUNBOX64)
2853 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2854 static_assert(RegExpMatcherStringReg != JSReturnReg);
2855 static_assert(RegExpMatcherLastIndexReg != JSReturnReg);
2856 #endif
2858 masm.reserveStack(RegExpReservedStack);
2860 OutOfLineRegExpMatcher* ool = new (alloc()) OutOfLineRegExpMatcher(lir);
2861 addOutOfLineCode(ool, lir->mir());
2863 const JitZone* jitZone = gen->realm->zone()->jitZone();
2864 JitCode* regExpMatcherStub =
2865 jitZone->regExpMatcherStubNoBarrier(&zoneStubsToReadBarrier_);
2866 masm.call(regExpMatcherStub);
2867 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2868 masm.bind(ool->rejoin());
2870 masm.freeStack(RegExpReservedStack);
2873 class OutOfLineRegExpExecMatch : public OutOfLineCodeBase<CodeGenerator> {
2874 LRegExpExecMatch* lir_;
2876 public:
2877 explicit OutOfLineRegExpExecMatch(LRegExpExecMatch* lir) : lir_(lir) {}
2879 void accept(CodeGenerator* codegen) override {
2880 codegen->visitOutOfLineRegExpExecMatch(this);
2883 LRegExpExecMatch* lir() const { return lir_; }
2886 void CodeGenerator::visitOutOfLineRegExpExecMatch(
2887 OutOfLineRegExpExecMatch* ool) {
2888 LRegExpExecMatch* lir = ool->lir();
2889 Register input = ToRegister(lir->string());
2890 Register regexp = ToRegister(lir->regexp());
2892 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2893 regs.take(input);
2894 regs.take(regexp);
2895 Register temp = regs.takeAny();
2897 masm.computeEffectiveAddress(
2898 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2900 pushArg(temp);
2901 pushArg(input);
2902 pushArg(regexp);
2904 // We are not using oolCallVM because we are in a Call and live registers have
2905 // already been saved by the register allocator.
2906 using Fn =
2907 bool (*)(JSContext*, Handle<RegExpObject*> regexp, HandleString input,
2908 MatchPairs* pairs, MutableHandleValue output);
2909 callVM<Fn, RegExpBuiltinExecMatchFromJit>(lir);
2910 masm.jump(ool->rejoin());
2913 void CodeGenerator::visitRegExpExecMatch(LRegExpExecMatch* lir) {
2914 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2915 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2916 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2918 #if defined(JS_NUNBOX32)
2919 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2920 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2921 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2922 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2923 #elif defined(JS_PUNBOX64)
2924 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2925 static_assert(RegExpMatcherStringReg != JSReturnReg);
2926 #endif
2928 masm.reserveStack(RegExpReservedStack);
2930 auto* ool = new (alloc()) OutOfLineRegExpExecMatch(lir);
2931 addOutOfLineCode(ool, lir->mir());
2933 const JitZone* jitZone = gen->realm->zone()->jitZone();
2934 JitCode* regExpExecMatchStub =
2935 jitZone->regExpExecMatchStubNoBarrier(&zoneStubsToReadBarrier_);
2936 masm.call(regExpExecMatchStub);
2937 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2939 masm.bind(ool->rejoin());
2940 masm.freeStack(RegExpReservedStack);
2943 JitCode* JitZone::generateRegExpSearcherStub(JSContext* cx) {
2944 JitSpew(JitSpew_Codegen, "# Emitting RegExpSearcher stub");
2946 Register regexp = RegExpSearcherRegExpReg;
2947 Register input = RegExpSearcherStringReg;
2948 Register lastIndex = RegExpSearcherLastIndexReg;
2949 Register result = ReturnReg;
2951 // We are free to clobber all registers, as LRegExpSearcher is a call
2952 // instruction.
2953 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2954 regs.take(input);
2955 regs.take(regexp);
2956 regs.take(lastIndex);
2958 Register temp1 = regs.takeAny();
2959 Register temp2 = regs.takeAny();
2960 Register temp3 = regs.takeAny();
2962 TempAllocator temp(&cx->tempLifoAlloc());
2963 JitContext jcx(cx);
2964 StackMacroAssembler masm(cx, temp);
2965 AutoCreatedBy acb(masm, "JitZone::generateRegExpSearcherStub");
2967 #ifdef JS_USE_LINK_REGISTER
2968 masm.pushReturnAddress();
2969 #endif
2970 masm.push(FramePointer);
2971 masm.moveStackPtrTo(FramePointer);
2973 #ifdef DEBUG
2974 // Store sentinel value to cx->regExpSearcherLastLimit.
2975 // See comment in RegExpSearcherImpl.
2976 masm.loadJSContext(temp1);
2977 masm.store32(Imm32(RegExpSearcherLastLimitSentinel),
2978 Address(temp1, JSContext::offsetOfRegExpSearcherLastLimit()));
2979 #endif
2981 // The InputOutputData is placed above the frame pointer and return address on
2982 // the stack.
2983 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
2985 Label notFound, oolEntry;
2986 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
2987 temp3, inputOutputDataStartOffset,
2988 initialStringHeap, &notFound, &oolEntry)) {
2989 return nullptr;
2992 // clang-format off
2994 * [SMDOC] Stack layout for the RegExpSearcher stub
2996 * +---------------+
2997 * FramePointer +-----> |Caller-FramePtr|
2998 * +---------------+
2999 * |Return-Address |
3000 * +---------------+
3001 * inputOutputDataStartOffset +-----> +---------------+
3002 * |InputOutputData|
3003 * +---------------+
3004 * +---------------+
3005 * | MatchPairs |
3006 * | count |
3007 * | pairs |
3008 * | |
3009 * +---------------+
3010 * pairsVectorStartOffset +-----> +---------------+
3011 * | MatchPair |
3012 * matchPairStart +------------> start | <-------+
3013 * matchPairLimit +------------> limit | | Reserved space for
3014 * +---------------+ | `RegExpObject::MaxPairCount`
3015 * . | MatchPair objects.
3016 * . |
3017 * . | Only a single object will
3018 * +---------------+ | be initialized and can be
3019 * | MatchPair | | accessed below.
3020 * | start | <-------+
3021 * | limit |
3022 * +---------------+
3024 // clang-format on
3026 int32_t pairsVectorStartOffset =
3027 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
3028 Address matchPairStart(FramePointer,
3029 pairsVectorStartOffset + MatchPair::offsetOfStart());
3030 Address matchPairLimit(FramePointer,
3031 pairsVectorStartOffset + MatchPair::offsetOfLimit());
3033 // Store match limit to cx->regExpSearcherLastLimit and return the index.
3034 masm.load32(matchPairLimit, result);
3035 masm.loadJSContext(input);
3036 masm.store32(result,
3037 Address(input, JSContext::offsetOfRegExpSearcherLastLimit()));
3038 masm.load32(matchPairStart, result);
3039 masm.pop(FramePointer);
3040 masm.ret();
3042 masm.bind(&notFound);
3043 masm.move32(Imm32(RegExpSearcherResultNotFound), result);
3044 masm.pop(FramePointer);
3045 masm.ret();
3047 masm.bind(&oolEntry);
3048 masm.move32(Imm32(RegExpSearcherResultFailed), result);
3049 masm.pop(FramePointer);
3050 masm.ret();
3052 Linker linker(masm);
3053 JitCode* code = linker.newCode(cx, CodeKind::Other);
3054 if (!code) {
3055 return nullptr;
3058 CollectPerfSpewerJitCodeProfile(code, "RegExpSearcherStub");
3059 #ifdef MOZ_VTUNE
3060 vtune::MarkStub(code, "RegExpSearcherStub");
3061 #endif
3063 return code;
3066 class OutOfLineRegExpSearcher : public OutOfLineCodeBase<CodeGenerator> {
3067 LRegExpSearcher* lir_;
3069 public:
3070 explicit OutOfLineRegExpSearcher(LRegExpSearcher* lir) : lir_(lir) {}
3072 void accept(CodeGenerator* codegen) override {
3073 codegen->visitOutOfLineRegExpSearcher(this);
3076 LRegExpSearcher* lir() const { return lir_; }
3079 void CodeGenerator::visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher* ool) {
3080 LRegExpSearcher* lir = ool->lir();
3081 Register lastIndex = ToRegister(lir->lastIndex());
3082 Register input = ToRegister(lir->string());
3083 Register regexp = ToRegister(lir->regexp());
3085 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3086 regs.take(lastIndex);
3087 regs.take(input);
3088 regs.take(regexp);
3089 Register temp = regs.takeAny();
3091 masm.computeEffectiveAddress(
3092 Address(masm.getStackPointer(), InputOutputDataSize), temp);
3094 pushArg(temp);
3095 pushArg(lastIndex);
3096 pushArg(input);
3097 pushArg(regexp);
3099 // We are not using oolCallVM because we are in a Call, and that live
3100 // registers are already saved by the the register allocator.
3101 using Fn = bool (*)(JSContext* cx, HandleObject regexp, HandleString input,
3102 int32_t lastIndex, MatchPairs* pairs, int32_t* result);
3103 callVM<Fn, RegExpSearcherRaw>(lir);
3105 masm.jump(ool->rejoin());
3108 void CodeGenerator::visitRegExpSearcher(LRegExpSearcher* lir) {
3109 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpSearcherRegExpReg);
3110 MOZ_ASSERT(ToRegister(lir->string()) == RegExpSearcherStringReg);
3111 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpSearcherLastIndexReg);
3112 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3114 static_assert(RegExpSearcherRegExpReg != ReturnReg);
3115 static_assert(RegExpSearcherStringReg != ReturnReg);
3116 static_assert(RegExpSearcherLastIndexReg != ReturnReg);
3118 masm.reserveStack(RegExpReservedStack);
3120 OutOfLineRegExpSearcher* ool = new (alloc()) OutOfLineRegExpSearcher(lir);
3121 addOutOfLineCode(ool, lir->mir());
3123 const JitZone* jitZone = gen->realm->zone()->jitZone();
3124 JitCode* regExpSearcherStub =
3125 jitZone->regExpSearcherStubNoBarrier(&zoneStubsToReadBarrier_);
3126 masm.call(regExpSearcherStub);
3127 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpSearcherResultFailed),
3128 ool->entry());
3129 masm.bind(ool->rejoin());
3131 masm.freeStack(RegExpReservedStack);
3134 void CodeGenerator::visitRegExpSearcherLastLimit(
3135 LRegExpSearcherLastLimit* lir) {
3136 Register result = ToRegister(lir->output());
3137 Register scratch = ToRegister(lir->temp0());
3139 masm.loadAndClearRegExpSearcherLastLimit(result, scratch);
3142 JitCode* JitZone::generateRegExpExecTestStub(JSContext* cx) {
3143 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecTest stub");
3145 Register regexp = RegExpExecTestRegExpReg;
3146 Register input = RegExpExecTestStringReg;
3147 Register result = ReturnReg;
3149 TempAllocator temp(&cx->tempLifoAlloc());
3150 JitContext jcx(cx);
3151 StackMacroAssembler masm(cx, temp);
3152 AutoCreatedBy acb(masm, "JitZone::generateRegExpExecTestStub");
3154 #ifdef JS_USE_LINK_REGISTER
3155 masm.pushReturnAddress();
3156 #endif
3157 masm.push(FramePointer);
3158 masm.moveStackPtrTo(FramePointer);
3160 // We are free to clobber all registers, as LRegExpExecTest is a call
3161 // instruction.
3162 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3163 regs.take(input);
3164 regs.take(regexp);
3166 // Ensure lastIndex != result.
3167 regs.take(result);
3168 Register lastIndex = regs.takeAny();
3169 regs.add(result);
3170 Register temp1 = regs.takeAny();
3171 Register temp2 = regs.takeAny();
3172 Register temp3 = regs.takeAny();
3174 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
3175 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
3177 masm.reserveStack(RegExpReservedStack);
3179 // Load lastIndex and skip RegExp execution if needed.
3180 Label notFoundZeroLastIndex;
3181 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
3183 // In visitRegExpMatcher and visitRegExpSearcher, we reserve stack space
3184 // before calling the stub. For RegExpExecTest we call the stub before
3185 // reserving stack space, so the offset of the InputOutputData relative to the
3186 // frame pointer is negative.
3187 constexpr int32_t inputOutputDataStartOffset = -int32_t(RegExpReservedStack);
3189 // On ARM64, load/store instructions can encode an immediate offset in the
3190 // range [-256, 4095]. If we ever fail this assertion, it would be more
3191 // efficient to store the data above the frame pointer similar to
3192 // RegExpMatcher and RegExpSearcher.
3193 static_assert(inputOutputDataStartOffset >= -256);
3195 Label notFound, oolEntry;
3196 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
3197 temp3, inputOutputDataStartOffset,
3198 initialStringHeap, &notFound, &oolEntry)) {
3199 return nullptr;
3202 // Set `result` to true/false to indicate found/not-found, or to
3203 // RegExpExecTestResultFailed if we have to retry in C++. If the regular
3204 // expression is global or sticky, we also have to update its .lastIndex slot.
3206 Label done;
3207 int32_t pairsVectorStartOffset =
3208 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
3209 Address matchPairLimit(FramePointer,
3210 pairsVectorStartOffset + MatchPair::offsetOfLimit());
3212 masm.move32(Imm32(1), result);
3213 masm.branchTest32(Assembler::Zero, flagsSlot,
3214 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3215 &done);
3216 masm.load32(matchPairLimit, lastIndex);
3217 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
3218 masm.jump(&done);
3220 masm.bind(&notFound);
3221 masm.move32(Imm32(0), result);
3222 masm.branchTest32(Assembler::Zero, flagsSlot,
3223 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3224 &done);
3225 masm.storeValue(Int32Value(0), lastIndexSlot);
3226 masm.jump(&done);
3228 masm.bind(&notFoundZeroLastIndex);
3229 masm.move32(Imm32(0), result);
3230 masm.storeValue(Int32Value(0), lastIndexSlot);
3231 masm.jump(&done);
3233 masm.bind(&oolEntry);
3234 masm.move32(Imm32(RegExpExecTestResultFailed), result);
3236 masm.bind(&done);
3237 masm.freeStack(RegExpReservedStack);
3238 masm.pop(FramePointer);
3239 masm.ret();
3241 Linker linker(masm);
3242 JitCode* code = linker.newCode(cx, CodeKind::Other);
3243 if (!code) {
3244 return nullptr;
3247 CollectPerfSpewerJitCodeProfile(code, "RegExpExecTestStub");
3248 #ifdef MOZ_VTUNE
3249 vtune::MarkStub(code, "RegExpExecTestStub");
3250 #endif
3252 return code;
3255 class OutOfLineRegExpExecTest : public OutOfLineCodeBase<CodeGenerator> {
3256 LRegExpExecTest* lir_;
3258 public:
3259 explicit OutOfLineRegExpExecTest(LRegExpExecTest* lir) : lir_(lir) {}
3261 void accept(CodeGenerator* codegen) override {
3262 codegen->visitOutOfLineRegExpExecTest(this);
3265 LRegExpExecTest* lir() const { return lir_; }
3268 void CodeGenerator::visitOutOfLineRegExpExecTest(OutOfLineRegExpExecTest* ool) {
3269 LRegExpExecTest* lir = ool->lir();
3270 Register input = ToRegister(lir->string());
3271 Register regexp = ToRegister(lir->regexp());
3273 pushArg(input);
3274 pushArg(regexp);
3276 // We are not using oolCallVM because we are in a Call and live registers have
3277 // already been saved by the register allocator.
3278 using Fn = bool (*)(JSContext* cx, Handle<RegExpObject*> regexp,
3279 HandleString input, bool* result);
3280 callVM<Fn, RegExpBuiltinExecTestFromJit>(lir);
3282 masm.jump(ool->rejoin());
3285 void CodeGenerator::visitRegExpExecTest(LRegExpExecTest* lir) {
3286 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpExecTestRegExpReg);
3287 MOZ_ASSERT(ToRegister(lir->string()) == RegExpExecTestStringReg);
3288 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3290 static_assert(RegExpExecTestRegExpReg != ReturnReg);
3291 static_assert(RegExpExecTestStringReg != ReturnReg);
3293 auto* ool = new (alloc()) OutOfLineRegExpExecTest(lir);
3294 addOutOfLineCode(ool, lir->mir());
3296 const JitZone* jitZone = gen->realm->zone()->jitZone();
3297 JitCode* regExpExecTestStub =
3298 jitZone->regExpExecTestStubNoBarrier(&zoneStubsToReadBarrier_);
3299 masm.call(regExpExecTestStub);
3301 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpExecTestResultFailed),
3302 ool->entry());
3304 masm.bind(ool->rejoin());
3307 void CodeGenerator::visitRegExpHasCaptureGroups(LRegExpHasCaptureGroups* ins) {
3308 Register regexp = ToRegister(ins->regexp());
3309 Register input = ToRegister(ins->input());
3310 Register output = ToRegister(ins->output());
3312 using Fn =
3313 bool (*)(JSContext*, Handle<RegExpObject*>, Handle<JSString*>, bool*);
3314 auto* ool = oolCallVM<Fn, js::RegExpHasCaptureGroups>(
3315 ins, ArgList(regexp, input), StoreRegisterTo(output));
3317 // Load RegExpShared in |output|.
3318 Label vmCall;
3319 masm.loadParsedRegExpShared(regexp, output, ool->entry());
3321 // Return true iff pairCount > 1.
3322 Label returnTrue;
3323 masm.branch32(Assembler::Above,
3324 Address(output, RegExpShared::offsetOfPairCount()), Imm32(1),
3325 &returnTrue);
3326 masm.move32(Imm32(0), output);
3327 masm.jump(ool->rejoin());
3329 masm.bind(&returnTrue);
3330 masm.move32(Imm32(1), output);
3332 masm.bind(ool->rejoin());
3335 class OutOfLineRegExpPrototypeOptimizable
3336 : public OutOfLineCodeBase<CodeGenerator> {
3337 LRegExpPrototypeOptimizable* ins_;
3339 public:
3340 explicit OutOfLineRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins)
3341 : ins_(ins) {}
3343 void accept(CodeGenerator* codegen) override {
3344 codegen->visitOutOfLineRegExpPrototypeOptimizable(this);
3346 LRegExpPrototypeOptimizable* ins() const { return ins_; }
3349 void CodeGenerator::visitRegExpPrototypeOptimizable(
3350 LRegExpPrototypeOptimizable* ins) {
3351 Register object = ToRegister(ins->object());
3352 Register output = ToRegister(ins->output());
3353 Register temp = ToRegister(ins->temp0());
3355 OutOfLineRegExpPrototypeOptimizable* ool =
3356 new (alloc()) OutOfLineRegExpPrototypeOptimizable(ins);
3357 addOutOfLineCode(ool, ins->mir());
3359 const GlobalObject* global = gen->realm->maybeGlobal();
3360 MOZ_ASSERT(global);
3361 masm.branchIfNotRegExpPrototypeOptimizable(object, temp, global,
3362 ool->entry());
3363 masm.move32(Imm32(0x1), output);
3365 masm.bind(ool->rejoin());
3368 void CodeGenerator::visitOutOfLineRegExpPrototypeOptimizable(
3369 OutOfLineRegExpPrototypeOptimizable* ool) {
3370 LRegExpPrototypeOptimizable* ins = ool->ins();
3371 Register object = ToRegister(ins->object());
3372 Register output = ToRegister(ins->output());
3374 saveVolatile(output);
3376 using Fn = bool (*)(JSContext* cx, JSObject* proto);
3377 masm.setupAlignedABICall();
3378 masm.loadJSContext(output);
3379 masm.passABIArg(output);
3380 masm.passABIArg(object);
3381 masm.callWithABI<Fn, RegExpPrototypeOptimizableRaw>();
3382 masm.storeCallBoolResult(output);
3384 restoreVolatile(output);
3386 masm.jump(ool->rejoin());
3389 class OutOfLineRegExpInstanceOptimizable
3390 : public OutOfLineCodeBase<CodeGenerator> {
3391 LRegExpInstanceOptimizable* ins_;
3393 public:
3394 explicit OutOfLineRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins)
3395 : ins_(ins) {}
3397 void accept(CodeGenerator* codegen) override {
3398 codegen->visitOutOfLineRegExpInstanceOptimizable(this);
3400 LRegExpInstanceOptimizable* ins() const { return ins_; }
3403 void CodeGenerator::visitRegExpInstanceOptimizable(
3404 LRegExpInstanceOptimizable* ins) {
3405 Register object = ToRegister(ins->object());
3406 Register output = ToRegister(ins->output());
3407 Register temp = ToRegister(ins->temp0());
3409 OutOfLineRegExpInstanceOptimizable* ool =
3410 new (alloc()) OutOfLineRegExpInstanceOptimizable(ins);
3411 addOutOfLineCode(ool, ins->mir());
3413 const GlobalObject* global = gen->realm->maybeGlobal();
3414 MOZ_ASSERT(global);
3415 masm.branchIfNotRegExpInstanceOptimizable(object, temp, global, ool->entry());
3416 masm.move32(Imm32(0x1), output);
3418 masm.bind(ool->rejoin());
3421 void CodeGenerator::visitOutOfLineRegExpInstanceOptimizable(
3422 OutOfLineRegExpInstanceOptimizable* ool) {
3423 LRegExpInstanceOptimizable* ins = ool->ins();
3424 Register object = ToRegister(ins->object());
3425 Register proto = ToRegister(ins->proto());
3426 Register output = ToRegister(ins->output());
3428 saveVolatile(output);
3430 using Fn = bool (*)(JSContext* cx, JSObject* obj, JSObject* proto);
3431 masm.setupAlignedABICall();
3432 masm.loadJSContext(output);
3433 masm.passABIArg(output);
3434 masm.passABIArg(object);
3435 masm.passABIArg(proto);
3436 masm.callWithABI<Fn, RegExpInstanceOptimizableRaw>();
3437 masm.storeCallBoolResult(output);
3439 restoreVolatile(output);
3441 masm.jump(ool->rejoin());
3444 static void FindFirstDollarIndex(MacroAssembler& masm, Register str,
3445 Register len, Register temp0, Register temp1,
3446 Register output, CharEncoding encoding) {
3447 #ifdef DEBUG
3448 Label ok;
3449 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
3450 masm.assumeUnreachable("Length should be greater than 0.");
3451 masm.bind(&ok);
3452 #endif
3454 Register chars = temp0;
3455 masm.loadStringChars(str, chars, encoding);
3457 masm.move32(Imm32(0), output);
3459 Label start, done;
3460 masm.bind(&start);
3462 Register currentChar = temp1;
3463 masm.loadChar(chars, output, currentChar, encoding);
3464 masm.branch32(Assembler::Equal, currentChar, Imm32('$'), &done);
3466 masm.add32(Imm32(1), output);
3467 masm.branch32(Assembler::NotEqual, output, len, &start);
3469 masm.move32(Imm32(-1), output);
3471 masm.bind(&done);
3474 void CodeGenerator::visitGetFirstDollarIndex(LGetFirstDollarIndex* ins) {
3475 Register str = ToRegister(ins->str());
3476 Register output = ToRegister(ins->output());
3477 Register temp0 = ToRegister(ins->temp0());
3478 Register temp1 = ToRegister(ins->temp1());
3479 Register len = ToRegister(ins->temp2());
3481 using Fn = bool (*)(JSContext*, JSString*, int32_t*);
3482 OutOfLineCode* ool = oolCallVM<Fn, GetFirstDollarIndexRaw>(
3483 ins, ArgList(str), StoreRegisterTo(output));
3485 masm.branchIfRope(str, ool->entry());
3486 masm.loadStringLength(str, len);
3488 Label isLatin1, done;
3489 masm.branchLatin1String(str, &isLatin1);
3491 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3492 CharEncoding::TwoByte);
3493 masm.jump(&done);
3495 masm.bind(&isLatin1);
3497 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3498 CharEncoding::Latin1);
3500 masm.bind(&done);
3501 masm.bind(ool->rejoin());
3504 void CodeGenerator::visitStringReplace(LStringReplace* lir) {
3505 if (lir->replacement()->isConstant()) {
3506 pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString()));
3507 } else {
3508 pushArg(ToRegister(lir->replacement()));
3511 if (lir->pattern()->isConstant()) {
3512 pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString()));
3513 } else {
3514 pushArg(ToRegister(lir->pattern()));
3517 if (lir->string()->isConstant()) {
3518 pushArg(ImmGCPtr(lir->string()->toConstant()->toString()));
3519 } else {
3520 pushArg(ToRegister(lir->string()));
3523 using Fn =
3524 JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
3525 if (lir->mir()->isFlatReplacement()) {
3526 callVM<Fn, StringFlatReplaceString>(lir);
3527 } else {
3528 callVM<Fn, StringReplace>(lir);
3532 void CodeGenerator::visitBinaryValueCache(LBinaryValueCache* lir) {
3533 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3534 TypedOrValueRegister lhs =
3535 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::LhsIndex));
3536 TypedOrValueRegister rhs =
3537 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::RhsIndex));
3538 ValueOperand output = ToOutValue(lir);
3540 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3542 switch (jsop) {
3543 case JSOp::Add:
3544 case JSOp::Sub:
3545 case JSOp::Mul:
3546 case JSOp::Div:
3547 case JSOp::Mod:
3548 case JSOp::Pow:
3549 case JSOp::BitAnd:
3550 case JSOp::BitOr:
3551 case JSOp::BitXor:
3552 case JSOp::Lsh:
3553 case JSOp::Rsh:
3554 case JSOp::Ursh: {
3555 IonBinaryArithIC ic(liveRegs, lhs, rhs, output);
3556 addIC(lir, allocateIC(ic));
3557 return;
3559 default:
3560 MOZ_CRASH("Unsupported jsop in MBinaryValueCache");
3564 void CodeGenerator::visitBinaryBoolCache(LBinaryBoolCache* lir) {
3565 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3566 TypedOrValueRegister lhs =
3567 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::LhsIndex));
3568 TypedOrValueRegister rhs =
3569 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::RhsIndex));
3570 Register output = ToRegister(lir->output());
3572 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3574 switch (jsop) {
3575 case JSOp::Lt:
3576 case JSOp::Le:
3577 case JSOp::Gt:
3578 case JSOp::Ge:
3579 case JSOp::Eq:
3580 case JSOp::Ne:
3581 case JSOp::StrictEq:
3582 case JSOp::StrictNe: {
3583 IonCompareIC ic(liveRegs, lhs, rhs, output);
3584 addIC(lir, allocateIC(ic));
3585 return;
3587 default:
3588 MOZ_CRASH("Unsupported jsop in MBinaryBoolCache");
3592 void CodeGenerator::visitUnaryCache(LUnaryCache* lir) {
3593 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3594 TypedOrValueRegister input =
3595 TypedOrValueRegister(ToValue(lir, LUnaryCache::InputIndex));
3596 ValueOperand output = ToOutValue(lir);
3598 IonUnaryArithIC ic(liveRegs, input, output);
3599 addIC(lir, allocateIC(ic));
3602 void CodeGenerator::visitModuleMetadata(LModuleMetadata* lir) {
3603 pushArg(ImmPtr(lir->mir()->module()));
3605 using Fn = JSObject* (*)(JSContext*, HandleObject);
3606 callVM<Fn, js::GetOrCreateModuleMetaObject>(lir);
3609 void CodeGenerator::visitDynamicImport(LDynamicImport* lir) {
3610 pushArg(ToValue(lir, LDynamicImport::OptionsIndex));
3611 pushArg(ToValue(lir, LDynamicImport::SpecifierIndex));
3612 pushArg(ImmGCPtr(current->mir()->info().script()));
3614 using Fn = JSObject* (*)(JSContext*, HandleScript, HandleValue, HandleValue);
3615 callVM<Fn, js::StartDynamicModuleImport>(lir);
3618 void CodeGenerator::visitLambda(LLambda* lir) {
3619 Register envChain = ToRegister(lir->environmentChain());
3620 Register output = ToRegister(lir->output());
3621 Register tempReg = ToRegister(lir->temp0());
3623 JSFunction* fun = lir->mir()->templateFunction();
3625 using Fn = JSObject* (*)(JSContext*, HandleFunction, HandleObject);
3626 OutOfLineCode* ool = oolCallVM<Fn, js::Lambda>(
3627 lir, ArgList(ImmGCPtr(fun), envChain), StoreRegisterTo(output));
3629 TemplateObject templateObject(fun);
3630 masm.createGCObject(output, tempReg, templateObject, gc::Heap::Default,
3631 ool->entry());
3633 masm.storeValue(JSVAL_TYPE_OBJECT, envChain,
3634 Address(output, JSFunction::offsetOfEnvironment()));
3635 // No post barrier needed because output is guaranteed to be allocated in
3636 // the nursery.
3638 masm.bind(ool->rejoin());
3641 void CodeGenerator::visitFunctionWithProto(LFunctionWithProto* lir) {
3642 Register envChain = ToRegister(lir->envChain());
3643 Register prototype = ToRegister(lir->prototype());
3645 pushArg(prototype);
3646 pushArg(envChain);
3647 pushArg(ImmGCPtr(lir->mir()->function()));
3649 using Fn =
3650 JSObject* (*)(JSContext*, HandleFunction, HandleObject, HandleObject);
3651 callVM<Fn, js::FunWithProtoOperation>(lir);
3654 void CodeGenerator::visitSetFunName(LSetFunName* lir) {
3655 pushArg(Imm32(lir->mir()->prefixKind()));
3656 pushArg(ToValue(lir, LSetFunName::NameIndex));
3657 pushArg(ToRegister(lir->fun()));
3659 using Fn =
3660 bool (*)(JSContext*, HandleFunction, HandleValue, FunctionPrefixKind);
3661 callVM<Fn, js::SetFunctionName>(lir);
3664 void CodeGenerator::visitOsiPoint(LOsiPoint* lir) {
3665 // Note: markOsiPoint ensures enough space exists between the last
3666 // LOsiPoint and this one to patch adjacent call instructions.
3668 MOZ_ASSERT(masm.framePushed() == frameSize());
3670 uint32_t osiCallPointOffset = markOsiPoint(lir);
3672 LSafepoint* safepoint = lir->associatedSafepoint();
3673 MOZ_ASSERT(!safepoint->osiCallPointOffset());
3674 safepoint->setOsiCallPointOffset(osiCallPointOffset);
3676 #ifdef DEBUG
3677 // There should be no movegroups or other instructions between
3678 // an instruction and its OsiPoint. This is necessary because
3679 // we use the OsiPoint's snapshot from within VM calls.
3680 for (LInstructionReverseIterator iter(current->rbegin(lir));
3681 iter != current->rend(); iter++) {
3682 if (*iter == lir) {
3683 continue;
3685 MOZ_ASSERT(!iter->isMoveGroup());
3686 MOZ_ASSERT(iter->safepoint() == safepoint);
3687 break;
3689 #endif
3691 #ifdef CHECK_OSIPOINT_REGISTERS
3692 if (shouldVerifyOsiPointRegs(safepoint)) {
3693 verifyOsiPointRegs(safepoint);
3695 #endif
3698 void CodeGenerator::visitPhi(LPhi* lir) {
3699 MOZ_CRASH("Unexpected LPhi in CodeGenerator");
3702 void CodeGenerator::visitGoto(LGoto* lir) { jumpToBlock(lir->target()); }
3704 void CodeGenerator::visitTableSwitch(LTableSwitch* ins) {
3705 MTableSwitch* mir = ins->mir();
3706 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3707 const LAllocation* temp;
3709 if (mir->getOperand(0)->type() != MIRType::Int32) {
3710 temp = ins->tempInt()->output();
3712 // The input is a double, so try and convert it to an integer.
3713 // If it does not fit in an integer, take the default case.
3714 masm.convertDoubleToInt32(ToFloatRegister(ins->index()), ToRegister(temp),
3715 defaultcase, false);
3716 } else {
3717 temp = ins->index();
3720 emitTableSwitchDispatch(mir, ToRegister(temp),
3721 ToRegisterOrInvalid(ins->tempPointer()));
3724 void CodeGenerator::visitTableSwitchV(LTableSwitchV* ins) {
3725 MTableSwitch* mir = ins->mir();
3726 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3728 Register index = ToRegister(ins->tempInt());
3729 ValueOperand value = ToValue(ins, LTableSwitchV::InputValue);
3730 Register tag = masm.extractTag(value, index);
3731 masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase);
3733 Label unboxInt, isInt;
3734 masm.branchTestInt32(Assembler::Equal, tag, &unboxInt);
3736 FloatRegister floatIndex = ToFloatRegister(ins->tempFloat());
3737 masm.unboxDouble(value, floatIndex);
3738 masm.convertDoubleToInt32(floatIndex, index, defaultcase, false);
3739 masm.jump(&isInt);
3742 masm.bind(&unboxInt);
3743 masm.unboxInt32(value, index);
3745 masm.bind(&isInt);
3747 emitTableSwitchDispatch(mir, index, ToRegisterOrInvalid(ins->tempPointer()));
3750 void CodeGenerator::visitParameter(LParameter* lir) {}
3752 void CodeGenerator::visitCallee(LCallee* lir) {
3753 Register callee = ToRegister(lir->output());
3754 Address ptr(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3756 masm.loadFunctionFromCalleeToken(ptr, callee);
3759 void CodeGenerator::visitIsConstructing(LIsConstructing* lir) {
3760 Register output = ToRegister(lir->output());
3761 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3762 masm.loadPtr(calleeToken, output);
3764 // We must be inside a function.
3765 MOZ_ASSERT(current->mir()->info().script()->function());
3767 // The low bit indicates whether this call is constructing, just clear the
3768 // other bits.
3769 static_assert(CalleeToken_Function == 0x0,
3770 "CalleeTokenTag value should match");
3771 static_assert(CalleeToken_FunctionConstructing == 0x1,
3772 "CalleeTokenTag value should match");
3773 masm.andPtr(Imm32(0x1), output);
3776 void CodeGenerator::visitReturn(LReturn* lir) {
3777 #if defined(JS_NUNBOX32)
3778 DebugOnly<LAllocation*> type = lir->getOperand(TYPE_INDEX);
3779 DebugOnly<LAllocation*> payload = lir->getOperand(PAYLOAD_INDEX);
3780 MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type);
3781 MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data);
3782 #elif defined(JS_PUNBOX64)
3783 DebugOnly<LAllocation*> result = lir->getOperand(0);
3784 MOZ_ASSERT(ToRegister(result) == JSReturnReg);
3785 #endif
3786 // Don't emit a jump to the return label if this is the last block, as
3787 // it'll fall through to the epilogue.
3789 // This is -not- true however for a Generator-return, which may appear in the
3790 // middle of the last block, so we should always emit the jump there.
3791 if (current->mir() != *gen->graph().poBegin() || lir->isGenerator()) {
3792 masm.jump(&returnLabel_);
3796 void CodeGenerator::visitOsrEntry(LOsrEntry* lir) {
3797 Register temp = ToRegister(lir->temp());
3799 // Remember the OSR entry offset into the code buffer.
3800 masm.flushBuffer();
3801 setOsrEntryOffset(masm.size());
3803 // Allocate the full frame for this function
3804 // Note we have a new entry here. So we reset MacroAssembler::framePushed()
3805 // to 0, before reserving the stack.
3806 MOZ_ASSERT(masm.framePushed() == frameSize());
3807 masm.setFramePushed(0);
3809 // The Baseline code ensured both the frame pointer and stack pointer point to
3810 // the JitFrameLayout on the stack.
3812 // If profiling, save the current frame pointer to a per-thread global field.
3813 if (isProfilerInstrumentationEnabled()) {
3814 masm.profilerEnterFrame(FramePointer, temp);
3817 masm.reserveStack(frameSize());
3818 MOZ_ASSERT(masm.framePushed() == frameSize());
3820 // Ensure that the Ion frames is properly aligned.
3821 masm.assertStackAlignment(JitStackAlignment, 0);
3824 void CodeGenerator::visitOsrEnvironmentChain(LOsrEnvironmentChain* lir) {
3825 const LAllocation* frame = lir->getOperand(0);
3826 const LDefinition* object = lir->getDef(0);
3828 const ptrdiff_t frameOffset =
3829 BaselineFrame::reverseOffsetOfEnvironmentChain();
3831 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3834 void CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir) {
3835 const LAllocation* frame = lir->getOperand(0);
3836 const LDefinition* object = lir->getDef(0);
3838 const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj();
3840 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3843 void CodeGenerator::visitOsrValue(LOsrValue* value) {
3844 const LAllocation* frame = value->getOperand(0);
3845 const ValueOperand out = ToOutValue(value);
3847 const ptrdiff_t frameOffset = value->mir()->frameOffset();
3849 masm.loadValue(Address(ToRegister(frame), frameOffset), out);
3852 void CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir) {
3853 const LAllocation* frame = lir->getOperand(0);
3854 const ValueOperand out = ToOutValue(lir);
3856 Address flags =
3857 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags());
3858 Address retval =
3859 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue());
3861 masm.moveValue(UndefinedValue(), out);
3863 Label done;
3864 masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL),
3865 &done);
3866 masm.loadValue(retval, out);
3867 masm.bind(&done);
3870 void CodeGenerator::visitStackArgT(LStackArgT* lir) {
3871 const LAllocation* arg = lir->arg();
3872 MIRType argType = lir->type();
3873 uint32_t argslot = lir->argslot();
3874 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3876 Address dest = AddressOfPassedArg(argslot);
3878 if (arg->isFloatReg()) {
3879 masm.boxDouble(ToFloatRegister(arg), dest);
3880 } else if (arg->isRegister()) {
3881 masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest);
3882 } else {
3883 masm.storeValue(arg->toConstant()->toJSValue(), dest);
3887 void CodeGenerator::visitStackArgV(LStackArgV* lir) {
3888 ValueOperand val = ToValue(lir, 0);
3889 uint32_t argslot = lir->argslot();
3890 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3892 masm.storeValue(val, AddressOfPassedArg(argslot));
3895 void CodeGenerator::visitMoveGroup(LMoveGroup* group) {
3896 if (!group->numMoves()) {
3897 return;
3900 MoveResolver& resolver = masm.moveResolver();
3902 for (size_t i = 0; i < group->numMoves(); i++) {
3903 const LMove& move = group->getMove(i);
3905 LAllocation from = move.from();
3906 LAllocation to = move.to();
3907 LDefinition::Type type = move.type();
3909 // No bogus moves.
3910 MOZ_ASSERT(from != to);
3911 MOZ_ASSERT(!from.isConstant());
3912 MoveOp::Type moveType;
3913 switch (type) {
3914 case LDefinition::OBJECT:
3915 case LDefinition::SLOTS:
3916 case LDefinition::WASM_ANYREF:
3917 #ifdef JS_NUNBOX32
3918 case LDefinition::TYPE:
3919 case LDefinition::PAYLOAD:
3920 #else
3921 case LDefinition::BOX:
3922 #endif
3923 case LDefinition::GENERAL:
3924 case LDefinition::STACKRESULTS:
3925 moveType = MoveOp::GENERAL;
3926 break;
3927 case LDefinition::INT32:
3928 moveType = MoveOp::INT32;
3929 break;
3930 case LDefinition::FLOAT32:
3931 moveType = MoveOp::FLOAT32;
3932 break;
3933 case LDefinition::DOUBLE:
3934 moveType = MoveOp::DOUBLE;
3935 break;
3936 case LDefinition::SIMD128:
3937 moveType = MoveOp::SIMD128;
3938 break;
3939 default:
3940 MOZ_CRASH("Unexpected move type");
3943 masm.propagateOOM(
3944 resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType));
3947 masm.propagateOOM(resolver.resolve());
3948 if (masm.oom()) {
3949 return;
3952 MoveEmitter emitter(masm);
3954 #ifdef JS_CODEGEN_X86
3955 if (group->maybeScratchRegister().isGeneralReg()) {
3956 emitter.setScratchRegister(
3957 group->maybeScratchRegister().toGeneralReg()->reg());
3958 } else {
3959 resolver.sortMemoryToMemoryMoves();
3961 #endif
3963 emitter.emit(resolver);
3964 emitter.finish();
3967 void CodeGenerator::visitInteger(LInteger* lir) {
3968 masm.move32(Imm32(lir->i32()), ToRegister(lir->output()));
3971 void CodeGenerator::visitInteger64(LInteger64* lir) {
3972 masm.move64(Imm64(lir->i64()), ToOutRegister64(lir));
3975 void CodeGenerator::visitPointer(LPointer* lir) {
3976 masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output()));
3979 void CodeGenerator::visitNurseryObject(LNurseryObject* lir) {
3980 Register output = ToRegister(lir->output());
3981 uint32_t nurseryIndex = lir->mir()->nurseryIndex();
3983 // Load a pointer to the entry in IonScript's nursery objects list.
3984 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), output);
3985 masm.propagateOOM(ionNurseryObjectLabels_.emplaceBack(label, nurseryIndex));
3987 // Load the JSObject*.
3988 masm.loadPtr(Address(output, 0), output);
3991 void CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir) {
3992 // No-op.
3995 void CodeGenerator::visitDebugEnterGCUnsafeRegion(
3996 LDebugEnterGCUnsafeRegion* lir) {
3997 Register temp = ToRegister(lir->temp0());
3999 masm.loadJSContext(temp);
4001 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
4002 masm.add32(Imm32(1), inUnsafeRegion);
4004 Label ok;
4005 masm.branch32(Assembler::GreaterThan, inUnsafeRegion, Imm32(0), &ok);
4006 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
4007 masm.bind(&ok);
4010 void CodeGenerator::visitDebugLeaveGCUnsafeRegion(
4011 LDebugLeaveGCUnsafeRegion* lir) {
4012 Register temp = ToRegister(lir->temp0());
4014 masm.loadJSContext(temp);
4016 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
4017 masm.add32(Imm32(-1), inUnsafeRegion);
4019 Label ok;
4020 masm.branch32(Assembler::GreaterThanOrEqual, inUnsafeRegion, Imm32(0), &ok);
4021 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
4022 masm.bind(&ok);
4025 void CodeGenerator::visitSlots(LSlots* lir) {
4026 Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots());
4027 masm.loadPtr(slots, ToRegister(lir->output()));
4030 void CodeGenerator::visitLoadDynamicSlotV(LLoadDynamicSlotV* lir) {
4031 ValueOperand dest = ToOutValue(lir);
4032 Register base = ToRegister(lir->input());
4033 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
4035 masm.loadValue(Address(base, offset), dest);
4038 static ConstantOrRegister ToConstantOrRegister(const LAllocation* value,
4039 MIRType valueType) {
4040 if (value->isConstant()) {
4041 return ConstantOrRegister(value->toConstant()->toJSValue());
4043 return TypedOrValueRegister(valueType, ToAnyRegister(value));
4046 void CodeGenerator::visitStoreDynamicSlotT(LStoreDynamicSlotT* lir) {
4047 Register base = ToRegister(lir->slots());
4048 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
4049 Address dest(base, offset);
4051 if (lir->mir()->needsBarrier()) {
4052 emitPreBarrier(dest);
4055 MIRType valueType = lir->mir()->value()->type();
4056 ConstantOrRegister value = ToConstantOrRegister(lir->value(), valueType);
4057 masm.storeUnboxedValue(value, valueType, dest);
4060 void CodeGenerator::visitStoreDynamicSlotV(LStoreDynamicSlotV* lir) {
4061 Register base = ToRegister(lir->slots());
4062 int32_t offset = lir->mir()->slot() * sizeof(Value);
4064 const ValueOperand value = ToValue(lir, LStoreDynamicSlotV::ValueIndex);
4066 if (lir->mir()->needsBarrier()) {
4067 emitPreBarrier(Address(base, offset));
4070 masm.storeValue(value, Address(base, offset));
4073 void CodeGenerator::visitElements(LElements* lir) {
4074 Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements());
4075 masm.loadPtr(elements, ToRegister(lir->output()));
4078 void CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment* lir) {
4079 Address environment(ToRegister(lir->function()),
4080 JSFunction::offsetOfEnvironment());
4081 masm.unboxObject(environment, ToRegister(lir->output()));
4084 void CodeGenerator::visitHomeObject(LHomeObject* lir) {
4085 Register func = ToRegister(lir->function());
4086 Address homeObject(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
4088 masm.assertFunctionIsExtended(func);
4089 #ifdef DEBUG
4090 Label isObject;
4091 masm.branchTestObject(Assembler::Equal, homeObject, &isObject);
4092 masm.assumeUnreachable("[[HomeObject]] must be Object");
4093 masm.bind(&isObject);
4094 #endif
4096 masm.unboxObject(homeObject, ToRegister(lir->output()));
4099 void CodeGenerator::visitHomeObjectSuperBase(LHomeObjectSuperBase* lir) {
4100 Register homeObject = ToRegister(lir->homeObject());
4101 ValueOperand output = ToOutValue(lir);
4102 Register temp = output.scratchReg();
4104 masm.loadObjProto(homeObject, temp);
4106 #ifdef DEBUG
4107 // We won't encounter a lazy proto, because the prototype is guaranteed to
4108 // either be a JSFunction or a PlainObject, and only proxy objects can have a
4109 // lazy proto.
4110 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
4112 Label proxyCheckDone;
4113 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
4114 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperBase");
4115 masm.bind(&proxyCheckDone);
4116 #endif
4118 Label nullProto, done;
4119 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
4121 // Box prototype and return
4122 masm.tagValue(JSVAL_TYPE_OBJECT, temp, output);
4123 masm.jump(&done);
4125 masm.bind(&nullProto);
4126 masm.moveValue(NullValue(), output);
4128 masm.bind(&done);
4131 template <class T>
4132 static T* ToConstantObject(MDefinition* def) {
4133 MOZ_ASSERT(def->isConstant());
4134 return &def->toConstant()->toObject().as<T>();
4137 void CodeGenerator::visitNewLexicalEnvironmentObject(
4138 LNewLexicalEnvironmentObject* lir) {
4139 Register output = ToRegister(lir->output());
4140 Register temp = ToRegister(lir->temp0());
4142 auto* templateObj = ToConstantObject<BlockLexicalEnvironmentObject>(
4143 lir->mir()->templateObj());
4144 auto* scope = &templateObj->scope();
4145 gc::Heap initialHeap = gc::Heap::Default;
4147 using Fn =
4148 BlockLexicalEnvironmentObject* (*)(JSContext*, Handle<LexicalScope*>);
4149 auto* ool =
4150 oolCallVM<Fn, BlockLexicalEnvironmentObject::createWithoutEnclosing>(
4151 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4153 TemplateObject templateObject(templateObj);
4154 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4156 masm.bind(ool->rejoin());
4159 void CodeGenerator::visitNewClassBodyEnvironmentObject(
4160 LNewClassBodyEnvironmentObject* lir) {
4161 Register output = ToRegister(lir->output());
4162 Register temp = ToRegister(lir->temp0());
4164 auto* templateObj = ToConstantObject<ClassBodyLexicalEnvironmentObject>(
4165 lir->mir()->templateObj());
4166 auto* scope = &templateObj->scope();
4167 gc::Heap initialHeap = gc::Heap::Default;
4169 using Fn = ClassBodyLexicalEnvironmentObject* (*)(JSContext*,
4170 Handle<ClassBodyScope*>);
4171 auto* ool =
4172 oolCallVM<Fn, ClassBodyLexicalEnvironmentObject::createWithoutEnclosing>(
4173 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4175 TemplateObject templateObject(templateObj);
4176 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4178 masm.bind(ool->rejoin());
4181 void CodeGenerator::visitNewVarEnvironmentObject(
4182 LNewVarEnvironmentObject* lir) {
4183 Register output = ToRegister(lir->output());
4184 Register temp = ToRegister(lir->temp0());
4186 auto* templateObj =
4187 ToConstantObject<VarEnvironmentObject>(lir->mir()->templateObj());
4188 auto* scope = &templateObj->scope().as<VarScope>();
4189 gc::Heap initialHeap = gc::Heap::Default;
4191 using Fn = VarEnvironmentObject* (*)(JSContext*, Handle<VarScope*>);
4192 auto* ool = oolCallVM<Fn, VarEnvironmentObject::createWithoutEnclosing>(
4193 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4195 TemplateObject templateObject(templateObj);
4196 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4198 masm.bind(ool->rejoin());
4201 void CodeGenerator::visitGuardShape(LGuardShape* guard) {
4202 Register obj = ToRegister(guard->input());
4203 Register temp = ToTempRegisterOrInvalid(guard->temp0());
4204 Label bail;
4205 masm.branchTestObjShape(Assembler::NotEqual, obj, guard->mir()->shape(), temp,
4206 obj, &bail);
4207 bailoutFrom(&bail, guard->snapshot());
4210 void CodeGenerator::visitGuardMultipleShapes(LGuardMultipleShapes* guard) {
4211 Register obj = ToRegister(guard->object());
4212 Register shapeList = ToRegister(guard->shapeList());
4213 Register temp = ToRegister(guard->temp0());
4214 Register temp2 = ToRegister(guard->temp1());
4215 Register temp3 = ToRegister(guard->temp2());
4216 Register spectre = ToTempRegisterOrInvalid(guard->temp3());
4218 Label bail;
4219 masm.loadPtr(Address(shapeList, NativeObject::offsetOfElements()), temp);
4220 masm.branchTestObjShapeList(Assembler::NotEqual, obj, temp, temp2, temp3,
4221 spectre, &bail);
4222 bailoutFrom(&bail, guard->snapshot());
4225 void CodeGenerator::visitGuardProto(LGuardProto* guard) {
4226 Register obj = ToRegister(guard->object());
4227 Register expected = ToRegister(guard->expected());
4228 Register temp = ToRegister(guard->temp0());
4230 masm.loadObjProto(obj, temp);
4232 Label bail;
4233 masm.branchPtr(Assembler::NotEqual, temp, expected, &bail);
4234 bailoutFrom(&bail, guard->snapshot());
4237 void CodeGenerator::visitGuardNullProto(LGuardNullProto* guard) {
4238 Register obj = ToRegister(guard->input());
4239 Register temp = ToRegister(guard->temp0());
4241 masm.loadObjProto(obj, temp);
4243 Label bail;
4244 masm.branchTestPtr(Assembler::NonZero, temp, temp, &bail);
4245 bailoutFrom(&bail, guard->snapshot());
4248 void CodeGenerator::visitGuardIsNativeObject(LGuardIsNativeObject* guard) {
4249 Register obj = ToRegister(guard->input());
4250 Register temp = ToRegister(guard->temp0());
4252 Label bail;
4253 masm.branchIfNonNativeObj(obj, temp, &bail);
4254 bailoutFrom(&bail, guard->snapshot());
4257 void CodeGenerator::visitGuardGlobalGeneration(LGuardGlobalGeneration* guard) {
4258 Register temp = ToRegister(guard->temp0());
4259 Label bail;
4261 masm.load32(AbsoluteAddress(guard->mir()->generationAddr()), temp);
4262 masm.branch32(Assembler::NotEqual, temp, Imm32(guard->mir()->expected()),
4263 &bail);
4264 bailoutFrom(&bail, guard->snapshot());
4267 void CodeGenerator::visitGuardIsProxy(LGuardIsProxy* guard) {
4268 Register obj = ToRegister(guard->input());
4269 Register temp = ToRegister(guard->temp0());
4271 Label bail;
4272 masm.branchTestObjectIsProxy(false, obj, temp, &bail);
4273 bailoutFrom(&bail, guard->snapshot());
4276 void CodeGenerator::visitGuardIsNotProxy(LGuardIsNotProxy* guard) {
4277 Register obj = ToRegister(guard->input());
4278 Register temp = ToRegister(guard->temp0());
4280 Label bail;
4281 masm.branchTestObjectIsProxy(true, obj, temp, &bail);
4282 bailoutFrom(&bail, guard->snapshot());
4285 void CodeGenerator::visitGuardIsNotDOMProxy(LGuardIsNotDOMProxy* guard) {
4286 Register proxy = ToRegister(guard->proxy());
4287 Register temp = ToRegister(guard->temp0());
4289 Label bail;
4290 masm.branchTestProxyHandlerFamily(Assembler::Equal, proxy, temp,
4291 GetDOMProxyHandlerFamily(), &bail);
4292 bailoutFrom(&bail, guard->snapshot());
4295 void CodeGenerator::visitProxyGet(LProxyGet* lir) {
4296 Register proxy = ToRegister(lir->proxy());
4297 Register temp = ToRegister(lir->temp0());
4299 pushArg(lir->mir()->id(), temp);
4300 pushArg(proxy);
4302 using Fn = bool (*)(JSContext*, HandleObject, HandleId, MutableHandleValue);
4303 callVM<Fn, ProxyGetProperty>(lir);
4306 void CodeGenerator::visitProxyGetByValue(LProxyGetByValue* lir) {
4307 Register proxy = ToRegister(lir->proxy());
4308 ValueOperand idVal = ToValue(lir, LProxyGetByValue::IdIndex);
4310 pushArg(idVal);
4311 pushArg(proxy);
4313 using Fn =
4314 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
4315 callVM<Fn, ProxyGetPropertyByValue>(lir);
4318 void CodeGenerator::visitProxyHasProp(LProxyHasProp* lir) {
4319 Register proxy = ToRegister(lir->proxy());
4320 ValueOperand idVal = ToValue(lir, LProxyHasProp::IdIndex);
4322 pushArg(idVal);
4323 pushArg(proxy);
4325 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
4326 if (lir->mir()->hasOwn()) {
4327 callVM<Fn, ProxyHasOwn>(lir);
4328 } else {
4329 callVM<Fn, ProxyHas>(lir);
4333 void CodeGenerator::visitProxySet(LProxySet* lir) {
4334 Register proxy = ToRegister(lir->proxy());
4335 ValueOperand rhs = ToValue(lir, LProxySet::RhsIndex);
4336 Register temp = ToRegister(lir->temp0());
4338 pushArg(Imm32(lir->mir()->strict()));
4339 pushArg(rhs);
4340 pushArg(lir->mir()->id(), temp);
4341 pushArg(proxy);
4343 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4344 callVM<Fn, ProxySetProperty>(lir);
4347 void CodeGenerator::visitProxySetByValue(LProxySetByValue* lir) {
4348 Register proxy = ToRegister(lir->proxy());
4349 ValueOperand idVal = ToValue(lir, LProxySetByValue::IdIndex);
4350 ValueOperand rhs = ToValue(lir, LProxySetByValue::RhsIndex);
4352 pushArg(Imm32(lir->mir()->strict()));
4353 pushArg(rhs);
4354 pushArg(idVal);
4355 pushArg(proxy);
4357 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
4358 callVM<Fn, ProxySetPropertyByValue>(lir);
4361 void CodeGenerator::visitCallSetArrayLength(LCallSetArrayLength* lir) {
4362 Register obj = ToRegister(lir->obj());
4363 ValueOperand rhs = ToValue(lir, LCallSetArrayLength::RhsIndex);
4365 pushArg(Imm32(lir->mir()->strict()));
4366 pushArg(rhs);
4367 pushArg(obj);
4369 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
4370 callVM<Fn, jit::SetArrayLength>(lir);
4373 void CodeGenerator::visitMegamorphicLoadSlot(LMegamorphicLoadSlot* lir) {
4374 Register obj = ToRegister(lir->object());
4375 Register temp0 = ToRegister(lir->temp0());
4376 Register temp1 = ToRegister(lir->temp1());
4377 Register temp2 = ToRegister(lir->temp2());
4378 Register temp3 = ToRegister(lir->temp3());
4379 ValueOperand output = ToOutValue(lir);
4381 Label bail, cacheHit;
4382 if (JitOptions.enableWatchtowerMegamorphic) {
4383 masm.emitMegamorphicCacheLookup(lir->mir()->name(), obj, temp0, temp1,
4384 temp2, output, &cacheHit);
4385 } else {
4386 masm.xorPtr(temp2, temp2);
4389 masm.branchIfNonNativeObj(obj, temp0, &bail);
4391 masm.Push(UndefinedValue());
4392 masm.moveStackPtrTo(temp3);
4394 using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
4395 MegamorphicCache::Entry* cacheEntry, Value* vp);
4396 masm.setupAlignedABICall();
4397 masm.loadJSContext(temp0);
4398 masm.passABIArg(temp0);
4399 masm.passABIArg(obj);
4400 masm.movePropertyKey(lir->mir()->name(), temp1);
4401 masm.passABIArg(temp1);
4402 masm.passABIArg(temp2);
4403 masm.passABIArg(temp3);
4405 masm.callWithABI<Fn, GetNativeDataPropertyPure>();
4407 MOZ_ASSERT(!output.aliases(ReturnReg));
4408 masm.Pop(output);
4410 masm.branchIfFalseBool(ReturnReg, &bail);
4412 masm.bind(&cacheHit);
4413 bailoutFrom(&bail, lir->snapshot());
4416 void CodeGenerator::visitMegamorphicLoadSlotByValue(
4417 LMegamorphicLoadSlotByValue* lir) {
4418 Register obj = ToRegister(lir->object());
4419 ValueOperand idVal = ToValue(lir, LMegamorphicLoadSlotByValue::IdIndex);
4420 Register temp0 = ToRegister(lir->temp0());
4421 Register temp1 = ToRegister(lir->temp1());
4422 Register temp2 = ToRegister(lir->temp2());
4423 ValueOperand output = ToOutValue(lir);
4425 Label bail, cacheHit;
4426 if (JitOptions.enableWatchtowerMegamorphic) {
4427 masm.emitMegamorphicCacheLookupByValue(idVal, obj, temp0, temp1, temp2,
4428 output, &cacheHit);
4429 } else {
4430 masm.xorPtr(temp2, temp2);
4433 masm.branchIfNonNativeObj(obj, temp0, &bail);
4435 // idVal will be in vp[0], result will be stored in vp[1].
4436 masm.reserveStack(sizeof(Value));
4437 masm.Push(idVal);
4438 masm.moveStackPtrTo(temp0);
4440 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4441 MegamorphicCache::Entry* cacheEntry, Value* vp);
4442 masm.setupAlignedABICall();
4443 masm.loadJSContext(temp1);
4444 masm.passABIArg(temp1);
4445 masm.passABIArg(obj);
4446 masm.passABIArg(temp2);
4447 masm.passABIArg(temp0);
4448 masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
4450 MOZ_ASSERT(!idVal.aliases(temp0));
4451 masm.storeCallPointerResult(temp0);
4452 masm.Pop(idVal);
4454 uint32_t framePushed = masm.framePushed();
4455 Label ok;
4456 masm.branchIfTrueBool(temp0, &ok);
4457 masm.freeStack(sizeof(Value)); // Discard result Value.
4458 masm.jump(&bail);
4460 masm.bind(&ok);
4461 masm.setFramePushed(framePushed);
4462 masm.Pop(output);
4464 masm.bind(&cacheHit);
4465 bailoutFrom(&bail, lir->snapshot());
4468 void CodeGenerator::visitMegamorphicStoreSlot(LMegamorphicStoreSlot* lir) {
4469 Register obj = ToRegister(lir->object());
4470 ValueOperand value = ToValue(lir, LMegamorphicStoreSlot::RhsIndex);
4472 Register temp0 = ToRegister(lir->temp0());
4473 #ifndef JS_CODEGEN_X86
4474 Register temp1 = ToRegister(lir->temp1());
4475 Register temp2 = ToRegister(lir->temp2());
4476 #endif
4478 Label cacheHit, done;
4479 if (JitOptions.enableWatchtowerMegamorphic) {
4480 #ifdef JS_CODEGEN_X86
4481 masm.emitMegamorphicCachedSetSlot(
4482 lir->mir()->name(), obj, temp0, value, &cacheHit,
4483 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4484 EmitPreBarrier(masm, addr, mirType);
4486 #else
4487 masm.emitMegamorphicCachedSetSlot(
4488 lir->mir()->name(), obj, temp0, temp1, temp2, value, &cacheHit,
4489 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4490 EmitPreBarrier(masm, addr, mirType);
4492 #endif
4495 pushArg(Imm32(lir->mir()->strict()));
4496 pushArg(value);
4497 pushArg(lir->mir()->name(), temp0);
4498 pushArg(obj);
4500 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4501 callVM<Fn, SetPropertyMegamorphic<true>>(lir);
4503 masm.jump(&done);
4504 masm.bind(&cacheHit);
4506 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
4507 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
4509 saveVolatile(temp0);
4510 emitPostWriteBarrier(obj);
4511 restoreVolatile(temp0);
4513 masm.bind(&done);
4516 void CodeGenerator::visitMegamorphicHasProp(LMegamorphicHasProp* lir) {
4517 Register obj = ToRegister(lir->object());
4518 ValueOperand idVal = ToValue(lir, LMegamorphicHasProp::IdIndex);
4519 Register temp0 = ToRegister(lir->temp0());
4520 Register temp1 = ToRegister(lir->temp1());
4521 Register temp2 = ToRegister(lir->temp2());
4522 Register output = ToRegister(lir->output());
4524 Label bail, cacheHit;
4525 if (JitOptions.enableWatchtowerMegamorphic) {
4526 masm.emitMegamorphicCacheLookupExists(idVal, obj, temp0, temp1, temp2,
4527 output, &cacheHit,
4528 lir->mir()->hasOwn());
4529 } else {
4530 masm.xorPtr(temp2, temp2);
4533 masm.branchIfNonNativeObj(obj, temp0, &bail);
4535 // idVal will be in vp[0], result will be stored in vp[1].
4536 masm.reserveStack(sizeof(Value));
4537 masm.Push(idVal);
4538 masm.moveStackPtrTo(temp0);
4540 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4541 MegamorphicCache::Entry* cacheEntry, Value* vp);
4542 masm.setupAlignedABICall();
4543 masm.loadJSContext(temp1);
4544 masm.passABIArg(temp1);
4545 masm.passABIArg(obj);
4546 masm.passABIArg(temp2);
4547 masm.passABIArg(temp0);
4548 if (lir->mir()->hasOwn()) {
4549 masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
4550 } else {
4551 masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
4554 MOZ_ASSERT(!idVal.aliases(temp0));
4555 masm.storeCallPointerResult(temp0);
4556 masm.Pop(idVal);
4558 uint32_t framePushed = masm.framePushed();
4559 Label ok;
4560 masm.branchIfTrueBool(temp0, &ok);
4561 masm.freeStack(sizeof(Value)); // Discard result Value.
4562 masm.jump(&bail);
4564 masm.bind(&ok);
4565 masm.setFramePushed(framePushed);
4566 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
4567 masm.freeStack(sizeof(Value));
4568 masm.bind(&cacheHit);
4570 bailoutFrom(&bail, lir->snapshot());
4573 void CodeGenerator::visitGuardIsNotArrayBufferMaybeShared(
4574 LGuardIsNotArrayBufferMaybeShared* guard) {
4575 Register obj = ToRegister(guard->input());
4576 Register temp = ToRegister(guard->temp0());
4578 Label bail;
4579 masm.loadObjClassUnsafe(obj, temp);
4580 masm.branchPtr(Assembler::Equal, temp, ImmPtr(&ArrayBufferObject::class_),
4581 &bail);
4582 masm.branchPtr(Assembler::Equal, temp,
4583 ImmPtr(&SharedArrayBufferObject::class_), &bail);
4584 bailoutFrom(&bail, guard->snapshot());
4587 void CodeGenerator::visitGuardIsTypedArray(LGuardIsTypedArray* guard) {
4588 Register obj = ToRegister(guard->input());
4589 Register temp = ToRegister(guard->temp0());
4591 Label bail;
4592 masm.loadObjClassUnsafe(obj, temp);
4593 masm.branchIfClassIsNotTypedArray(temp, &bail);
4594 bailoutFrom(&bail, guard->snapshot());
4597 void CodeGenerator::visitGuardHasProxyHandler(LGuardHasProxyHandler* guard) {
4598 Register obj = ToRegister(guard->input());
4600 Label bail;
4602 Address handlerAddr(obj, ProxyObject::offsetOfHandler());
4603 masm.branchPtr(Assembler::NotEqual, handlerAddr,
4604 ImmPtr(guard->mir()->handler()), &bail);
4606 bailoutFrom(&bail, guard->snapshot());
4609 void CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard) {
4610 Register input = ToRegister(guard->input());
4611 Register expected = ToRegister(guard->expected());
4613 Assembler::Condition cond =
4614 guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
4615 bailoutCmpPtr(cond, input, expected, guard->snapshot());
4618 void CodeGenerator::visitGuardSpecificFunction(LGuardSpecificFunction* guard) {
4619 Register input = ToRegister(guard->input());
4620 Register expected = ToRegister(guard->expected());
4622 bailoutCmpPtr(Assembler::NotEqual, input, expected, guard->snapshot());
4625 void CodeGenerator::visitGuardSpecificAtom(LGuardSpecificAtom* guard) {
4626 Register str = ToRegister(guard->str());
4627 Register scratch = ToRegister(guard->temp0());
4629 LiveRegisterSet volatileRegs = liveVolatileRegs(guard);
4630 volatileRegs.takeUnchecked(scratch);
4632 Label bail;
4633 masm.guardSpecificAtom(str, guard->mir()->atom(), scratch, volatileRegs,
4634 &bail);
4635 bailoutFrom(&bail, guard->snapshot());
4638 void CodeGenerator::visitGuardSpecificSymbol(LGuardSpecificSymbol* guard) {
4639 Register symbol = ToRegister(guard->symbol());
4641 bailoutCmpPtr(Assembler::NotEqual, symbol, ImmGCPtr(guard->mir()->expected()),
4642 guard->snapshot());
4645 void CodeGenerator::visitGuardSpecificInt32(LGuardSpecificInt32* guard) {
4646 Register num = ToRegister(guard->num());
4648 bailoutCmp32(Assembler::NotEqual, num, Imm32(guard->mir()->expected()),
4649 guard->snapshot());
4652 void CodeGenerator::visitGuardStringToIndex(LGuardStringToIndex* lir) {
4653 Register str = ToRegister(lir->string());
4654 Register output = ToRegister(lir->output());
4656 Label vmCall, done;
4657 masm.loadStringIndexValue(str, output, &vmCall);
4658 masm.jump(&done);
4661 masm.bind(&vmCall);
4663 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4664 volatileRegs.takeUnchecked(output);
4665 masm.PushRegsInMask(volatileRegs);
4667 using Fn = int32_t (*)(JSString* str);
4668 masm.setupAlignedABICall();
4669 masm.passABIArg(str);
4670 masm.callWithABI<Fn, GetIndexFromString>();
4671 masm.storeCallInt32Result(output);
4673 masm.PopRegsInMask(volatileRegs);
4675 // GetIndexFromString returns a negative value on failure.
4676 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
4679 masm.bind(&done);
4682 void CodeGenerator::visitGuardStringToInt32(LGuardStringToInt32* lir) {
4683 Register str = ToRegister(lir->string());
4684 Register output = ToRegister(lir->output());
4685 Register temp = ToRegister(lir->temp0());
4687 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4689 Label bail;
4690 masm.guardStringToInt32(str, output, temp, volatileRegs, &bail);
4691 bailoutFrom(&bail, lir->snapshot());
4694 void CodeGenerator::visitGuardStringToDouble(LGuardStringToDouble* lir) {
4695 Register str = ToRegister(lir->string());
4696 FloatRegister output = ToFloatRegister(lir->output());
4697 Register temp0 = ToRegister(lir->temp0());
4698 Register temp1 = ToRegister(lir->temp1());
4700 Label vmCall, done;
4701 // Use indexed value as fast path if possible.
4702 masm.loadStringIndexValue(str, temp0, &vmCall);
4703 masm.convertInt32ToDouble(temp0, output);
4704 masm.jump(&done);
4706 masm.bind(&vmCall);
4708 // Reserve stack for holding the result value of the call.
4709 masm.reserveStack(sizeof(double));
4710 masm.moveStackPtrTo(temp0);
4712 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4713 volatileRegs.takeUnchecked(temp0);
4714 volatileRegs.takeUnchecked(temp1);
4715 masm.PushRegsInMask(volatileRegs);
4717 using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
4718 masm.setupAlignedABICall();
4719 masm.loadJSContext(temp1);
4720 masm.passABIArg(temp1);
4721 masm.passABIArg(str);
4722 masm.passABIArg(temp0);
4723 masm.callWithABI<Fn, StringToNumberPure>();
4724 masm.storeCallPointerResult(temp0);
4726 masm.PopRegsInMask(volatileRegs);
4728 Label ok;
4729 masm.branchIfTrueBool(temp0, &ok);
4731 // OOM path, recovered by StringToNumberPure.
4733 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
4734 // flow-insensitively, and using it here would confuse the stack height
4735 // tracking.
4736 masm.addToStackPtr(Imm32(sizeof(double)));
4737 bailout(lir->snapshot());
4739 masm.bind(&ok);
4740 masm.Pop(output);
4742 masm.bind(&done);
4745 void CodeGenerator::visitGuardNoDenseElements(LGuardNoDenseElements* guard) {
4746 Register obj = ToRegister(guard->input());
4747 Register temp = ToRegister(guard->temp0());
4749 // Load obj->elements.
4750 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
4752 // Make sure there are no dense elements.
4753 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
4754 bailoutCmp32(Assembler::NotEqual, initLength, Imm32(0), guard->snapshot());
4757 void CodeGenerator::visitBooleanToInt64(LBooleanToInt64* lir) {
4758 Register input = ToRegister(lir->input());
4759 Register64 output = ToOutRegister64(lir);
4761 masm.move32To64ZeroExtend(input, output);
4764 void CodeGenerator::emitStringToInt64(LInstruction* lir, Register input,
4765 Register64 output) {
4766 Register temp = output.scratchReg();
4768 saveLive(lir);
4770 masm.reserveStack(sizeof(uint64_t));
4771 masm.moveStackPtrTo(temp);
4772 pushArg(temp);
4773 pushArg(input);
4775 using Fn = bool (*)(JSContext*, HandleString, uint64_t*);
4776 callVM<Fn, DoStringToInt64>(lir);
4778 masm.load64(Address(masm.getStackPointer(), 0), output);
4779 masm.freeStack(sizeof(uint64_t));
4781 restoreLiveIgnore(lir, StoreValueTo(output).clobbered());
4784 void CodeGenerator::visitStringToInt64(LStringToInt64* lir) {
4785 Register input = ToRegister(lir->input());
4786 Register64 output = ToOutRegister64(lir);
4788 emitStringToInt64(lir, input, output);
4791 void CodeGenerator::visitValueToInt64(LValueToInt64* lir) {
4792 ValueOperand input = ToValue(lir, LValueToInt64::InputIndex);
4793 Register temp = ToRegister(lir->temp0());
4794 Register64 output = ToOutRegister64(lir);
4796 int checks = 3;
4798 Label fail, done;
4799 // Jump to fail if this is the last check and we fail it,
4800 // otherwise to the next test.
4801 auto emitTestAndUnbox = [&](auto testAndUnbox) {
4802 MOZ_ASSERT(checks > 0);
4804 checks--;
4805 Label notType;
4806 Label* target = checks ? &notType : &fail;
4808 testAndUnbox(target);
4810 if (checks) {
4811 masm.jump(&done);
4812 masm.bind(&notType);
4816 Register tag = masm.extractTag(input, temp);
4818 // BigInt.
4819 emitTestAndUnbox([&](Label* target) {
4820 masm.branchTestBigInt(Assembler::NotEqual, tag, target);
4821 masm.unboxBigInt(input, temp);
4822 masm.loadBigInt64(temp, output);
4825 // Boolean
4826 emitTestAndUnbox([&](Label* target) {
4827 masm.branchTestBoolean(Assembler::NotEqual, tag, target);
4828 masm.unboxBoolean(input, temp);
4829 masm.move32To64ZeroExtend(temp, output);
4832 // String
4833 emitTestAndUnbox([&](Label* target) {
4834 masm.branchTestString(Assembler::NotEqual, tag, target);
4835 masm.unboxString(input, temp);
4836 emitStringToInt64(lir, temp, output);
4839 MOZ_ASSERT(checks == 0);
4841 bailoutFrom(&fail, lir->snapshot());
4842 masm.bind(&done);
4845 void CodeGenerator::visitTruncateBigIntToInt64(LTruncateBigIntToInt64* lir) {
4846 Register operand = ToRegister(lir->input());
4847 Register64 output = ToOutRegister64(lir);
4849 masm.loadBigInt64(operand, output);
4852 OutOfLineCode* CodeGenerator::createBigIntOutOfLine(LInstruction* lir,
4853 Scalar::Type type,
4854 Register64 input,
4855 Register output) {
4856 #if JS_BITS_PER_WORD == 32
4857 using Fn = BigInt* (*)(JSContext*, uint32_t, uint32_t);
4858 auto args = ArgList(input.low, input.high);
4859 #else
4860 using Fn = BigInt* (*)(JSContext*, uint64_t);
4861 auto args = ArgList(input);
4862 #endif
4864 if (type == Scalar::BigInt64) {
4865 return oolCallVM<Fn, jit::CreateBigIntFromInt64>(lir, args,
4866 StoreRegisterTo(output));
4868 MOZ_ASSERT(type == Scalar::BigUint64);
4869 return oolCallVM<Fn, jit::CreateBigIntFromUint64>(lir, args,
4870 StoreRegisterTo(output));
4873 void CodeGenerator::emitCreateBigInt(LInstruction* lir, Scalar::Type type,
4874 Register64 input, Register output,
4875 Register maybeTemp) {
4876 OutOfLineCode* ool = createBigIntOutOfLine(lir, type, input, output);
4878 if (maybeTemp != InvalidReg) {
4879 masm.newGCBigInt(output, maybeTemp, initialBigIntHeap(), ool->entry());
4880 } else {
4881 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
4882 regs.take(input);
4883 regs.take(output);
4885 Register temp = regs.takeAny();
4887 masm.push(temp);
4889 Label fail, ok;
4890 masm.newGCBigInt(output, temp, initialBigIntHeap(), &fail);
4891 masm.pop(temp);
4892 masm.jump(&ok);
4893 masm.bind(&fail);
4894 masm.pop(temp);
4895 masm.jump(ool->entry());
4896 masm.bind(&ok);
4898 masm.initializeBigInt64(type, output, input);
4899 masm.bind(ool->rejoin());
4902 void CodeGenerator::visitInt64ToBigInt(LInt64ToBigInt* lir) {
4903 Register64 input = ToRegister64(lir->input());
4904 Register temp = ToRegister(lir->temp0());
4905 Register output = ToRegister(lir->output());
4907 emitCreateBigInt(lir, Scalar::BigInt64, input, output, temp);
4910 void CodeGenerator::visitGuardValue(LGuardValue* lir) {
4911 ValueOperand input = ToValue(lir, LGuardValue::InputIndex);
4912 Value expected = lir->mir()->expected();
4913 Label bail;
4914 masm.branchTestValue(Assembler::NotEqual, input, expected, &bail);
4915 bailoutFrom(&bail, lir->snapshot());
4918 void CodeGenerator::visitGuardNullOrUndefined(LGuardNullOrUndefined* lir) {
4919 ValueOperand input = ToValue(lir, LGuardNullOrUndefined::InputIndex);
4921 ScratchTagScope tag(masm, input);
4922 masm.splitTagForTest(input, tag);
4924 Label done;
4925 masm.branchTestNull(Assembler::Equal, tag, &done);
4927 Label bail;
4928 masm.branchTestUndefined(Assembler::NotEqual, tag, &bail);
4929 bailoutFrom(&bail, lir->snapshot());
4931 masm.bind(&done);
4934 void CodeGenerator::visitGuardIsNotObject(LGuardIsNotObject* lir) {
4935 ValueOperand input = ToValue(lir, LGuardIsNotObject::InputIndex);
4937 Label bail;
4938 masm.branchTestObject(Assembler::Equal, input, &bail);
4939 bailoutFrom(&bail, lir->snapshot());
4942 void CodeGenerator::visitGuardFunctionFlags(LGuardFunctionFlags* lir) {
4943 Register function = ToRegister(lir->function());
4945 Label bail;
4946 if (uint16_t flags = lir->mir()->expectedFlags()) {
4947 masm.branchTestFunctionFlags(function, flags, Assembler::Zero, &bail);
4949 if (uint16_t flags = lir->mir()->unexpectedFlags()) {
4950 masm.branchTestFunctionFlags(function, flags, Assembler::NonZero, &bail);
4952 bailoutFrom(&bail, lir->snapshot());
4955 void CodeGenerator::visitGuardFunctionIsNonBuiltinCtor(
4956 LGuardFunctionIsNonBuiltinCtor* lir) {
4957 Register function = ToRegister(lir->function());
4958 Register temp = ToRegister(lir->temp0());
4960 Label bail;
4961 masm.branchIfNotFunctionIsNonBuiltinCtor(function, temp, &bail);
4962 bailoutFrom(&bail, lir->snapshot());
4965 void CodeGenerator::visitGuardFunctionKind(LGuardFunctionKind* lir) {
4966 Register function = ToRegister(lir->function());
4967 Register temp = ToRegister(lir->temp0());
4969 Assembler::Condition cond =
4970 lir->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
4972 Label bail;
4973 masm.branchFunctionKind(cond, lir->mir()->expected(), function, temp, &bail);
4974 bailoutFrom(&bail, lir->snapshot());
4977 void CodeGenerator::visitGuardFunctionScript(LGuardFunctionScript* lir) {
4978 Register function = ToRegister(lir->function());
4980 Address scriptAddr(function, JSFunction::offsetOfJitInfoOrScript());
4981 bailoutCmpPtr(Assembler::NotEqual, scriptAddr,
4982 ImmGCPtr(lir->mir()->expected()), lir->snapshot());
4985 // Out-of-line path to update the store buffer.
4986 class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase<CodeGenerator> {
4987 LInstruction* lir_;
4988 const LAllocation* object_;
4990 public:
4991 OutOfLineCallPostWriteBarrier(LInstruction* lir, const LAllocation* object)
4992 : lir_(lir), object_(object) {}
4994 void accept(CodeGenerator* codegen) override {
4995 codegen->visitOutOfLineCallPostWriteBarrier(this);
4998 LInstruction* lir() const { return lir_; }
4999 const LAllocation* object() const { return object_; }
5002 static void EmitStoreBufferCheckForConstant(MacroAssembler& masm,
5003 const gc::TenuredCell* cell,
5004 AllocatableGeneralRegisterSet& regs,
5005 Label* exit, Label* callVM) {
5006 Register temp = regs.takeAny();
5008 gc::Arena* arena = cell->arena();
5010 Register cells = temp;
5011 masm.loadPtr(AbsoluteAddress(&arena->bufferedCells()), cells);
5013 size_t index = gc::ArenaCellSet::getCellIndex(cell);
5014 size_t word;
5015 uint32_t mask;
5016 gc::ArenaCellSet::getWordIndexAndMask(index, &word, &mask);
5017 size_t offset = gc::ArenaCellSet::offsetOfBits() + word * sizeof(uint32_t);
5019 masm.branchTest32(Assembler::NonZero, Address(cells, offset), Imm32(mask),
5020 exit);
5022 // Check whether this is the sentinel set and if so call the VM to allocate
5023 // one for this arena.
5024 masm.branchPtr(Assembler::Equal,
5025 Address(cells, gc::ArenaCellSet::offsetOfArena()),
5026 ImmPtr(nullptr), callVM);
5028 // Add the cell to the set.
5029 masm.or32(Imm32(mask), Address(cells, offset));
5030 masm.jump(exit);
5032 regs.add(temp);
5035 static void EmitPostWriteBarrier(MacroAssembler& masm, CompileRuntime* runtime,
5036 Register objreg, JSObject* maybeConstant,
5037 bool isGlobal,
5038 AllocatableGeneralRegisterSet& regs) {
5039 MOZ_ASSERT_IF(isGlobal, maybeConstant);
5041 Label callVM;
5042 Label exit;
5044 Register temp = regs.takeAny();
5046 // We already have a fast path to check whether a global is in the store
5047 // buffer.
5048 if (!isGlobal) {
5049 if (maybeConstant) {
5050 // Check store buffer bitmap directly for known object.
5051 EmitStoreBufferCheckForConstant(masm, &maybeConstant->asTenured(), regs,
5052 &exit, &callVM);
5053 } else {
5054 // Check one element cache to avoid VM call.
5055 masm.branchPtr(Assembler::Equal,
5056 AbsoluteAddress(runtime->addressOfLastBufferedWholeCell()),
5057 objreg, &exit);
5061 // Call into the VM to barrier the write.
5062 masm.bind(&callVM);
5064 Register runtimereg = temp;
5065 masm.mov(ImmPtr(runtime), runtimereg);
5067 masm.setupAlignedABICall();
5068 masm.passABIArg(runtimereg);
5069 masm.passABIArg(objreg);
5070 if (isGlobal) {
5071 using Fn = void (*)(JSRuntime* rt, GlobalObject* obj);
5072 masm.callWithABI<Fn, PostGlobalWriteBarrier>();
5073 } else {
5074 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* obj);
5075 masm.callWithABI<Fn, PostWriteBarrier>();
5078 masm.bind(&exit);
5081 void CodeGenerator::emitPostWriteBarrier(const LAllocation* obj) {
5082 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5084 Register objreg;
5085 JSObject* object = nullptr;
5086 bool isGlobal = false;
5087 if (obj->isConstant()) {
5088 object = &obj->toConstant()->toObject();
5089 isGlobal = isGlobalObject(object);
5090 objreg = regs.takeAny();
5091 masm.movePtr(ImmGCPtr(object), objreg);
5092 } else {
5093 objreg = ToRegister(obj);
5094 regs.takeUnchecked(objreg);
5097 EmitPostWriteBarrier(masm, gen->runtime, objreg, object, isGlobal, regs);
5100 // Returns true if `def` might be allocated in the nursery.
5101 static bool ValueNeedsPostBarrier(MDefinition* def) {
5102 if (def->isBox()) {
5103 def = def->toBox()->input();
5105 if (def->type() == MIRType::Value) {
5106 return true;
5108 return NeedsPostBarrier(def->type());
5111 class OutOfLineElementPostWriteBarrier
5112 : public OutOfLineCodeBase<CodeGenerator> {
5113 LiveRegisterSet liveVolatileRegs_;
5114 const LAllocation* index_;
5115 int32_t indexDiff_;
5116 Register obj_;
5117 Register scratch_;
5119 public:
5120 OutOfLineElementPostWriteBarrier(const LiveRegisterSet& liveVolatileRegs,
5121 Register obj, const LAllocation* index,
5122 Register scratch, int32_t indexDiff)
5123 : liveVolatileRegs_(liveVolatileRegs),
5124 index_(index),
5125 indexDiff_(indexDiff),
5126 obj_(obj),
5127 scratch_(scratch) {}
5129 void accept(CodeGenerator* codegen) override {
5130 codegen->visitOutOfLineElementPostWriteBarrier(this);
5133 const LiveRegisterSet& liveVolatileRegs() const { return liveVolatileRegs_; }
5134 const LAllocation* index() const { return index_; }
5135 int32_t indexDiff() const { return indexDiff_; }
5137 Register object() const { return obj_; }
5138 Register scratch() const { return scratch_; }
5141 void CodeGenerator::emitElementPostWriteBarrier(
5142 MInstruction* mir, const LiveRegisterSet& liveVolatileRegs, Register obj,
5143 const LAllocation* index, Register scratch, const ConstantOrRegister& val,
5144 int32_t indexDiff) {
5145 if (val.constant()) {
5146 MOZ_ASSERT_IF(val.value().isGCThing(),
5147 !IsInsideNursery(val.value().toGCThing()));
5148 return;
5151 TypedOrValueRegister reg = val.reg();
5152 if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
5153 return;
5156 auto* ool = new (alloc()) OutOfLineElementPostWriteBarrier(
5157 liveVolatileRegs, obj, index, scratch, indexDiff);
5158 addOutOfLineCode(ool, mir);
5160 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, ool->rejoin());
5162 if (reg.hasValue()) {
5163 masm.branchValueIsNurseryCell(Assembler::Equal, reg.valueReg(), scratch,
5164 ool->entry());
5165 } else {
5166 masm.branchPtrInNurseryChunk(Assembler::Equal, reg.typedReg().gpr(),
5167 scratch, ool->entry());
5170 masm.bind(ool->rejoin());
5173 void CodeGenerator::visitOutOfLineElementPostWriteBarrier(
5174 OutOfLineElementPostWriteBarrier* ool) {
5175 Register obj = ool->object();
5176 Register scratch = ool->scratch();
5177 const LAllocation* index = ool->index();
5178 int32_t indexDiff = ool->indexDiff();
5180 masm.PushRegsInMask(ool->liveVolatileRegs());
5182 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5183 regs.takeUnchecked(obj);
5184 regs.takeUnchecked(scratch);
5186 Register indexReg;
5187 if (index->isConstant()) {
5188 indexReg = regs.takeAny();
5189 masm.move32(Imm32(ToInt32(index) + indexDiff), indexReg);
5190 } else {
5191 indexReg = ToRegister(index);
5192 regs.takeUnchecked(indexReg);
5193 if (indexDiff != 0) {
5194 masm.add32(Imm32(indexDiff), indexReg);
5198 masm.setupUnalignedABICall(scratch);
5199 masm.movePtr(ImmPtr(gen->runtime), scratch);
5200 masm.passABIArg(scratch);
5201 masm.passABIArg(obj);
5202 masm.passABIArg(indexReg);
5203 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5204 masm.callWithABI<Fn, PostWriteElementBarrier>();
5206 // We don't need a sub32 here because indexReg must be in liveVolatileRegs
5207 // if indexDiff is not zero, so it will be restored below.
5208 MOZ_ASSERT_IF(indexDiff != 0, ool->liveVolatileRegs().has(indexReg));
5210 masm.PopRegsInMask(ool->liveVolatileRegs());
5212 masm.jump(ool->rejoin());
5215 void CodeGenerator::emitPostWriteBarrier(Register objreg) {
5216 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5217 regs.takeUnchecked(objreg);
5218 EmitPostWriteBarrier(masm, gen->runtime, objreg, nullptr, false, regs);
5221 void CodeGenerator::visitOutOfLineCallPostWriteBarrier(
5222 OutOfLineCallPostWriteBarrier* ool) {
5223 saveLiveVolatile(ool->lir());
5224 const LAllocation* obj = ool->object();
5225 emitPostWriteBarrier(obj);
5226 restoreLiveVolatile(ool->lir());
5228 masm.jump(ool->rejoin());
5231 void CodeGenerator::maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal,
5232 OutOfLineCode* ool) {
5233 // Check whether an object is a global that we have already barriered before
5234 // calling into the VM.
5236 // We only check for the script's global, not other globals within the same
5237 // compartment, because we bake in a pointer to realm->globalWriteBarriered
5238 // and doing that would be invalid for other realms because they could be
5239 // collected before the Ion code is discarded.
5241 if (!maybeGlobal->isConstant()) {
5242 return;
5245 JSObject* obj = &maybeGlobal->toConstant()->toObject();
5246 if (gen->realm->maybeGlobal() != obj) {
5247 return;
5250 const uint32_t* addr = gen->realm->addressOfGlobalWriteBarriered();
5251 masm.branch32(Assembler::NotEqual, AbsoluteAddress(addr), Imm32(0),
5252 ool->rejoin());
5255 template <class LPostBarrierType, MIRType nurseryType>
5256 void CodeGenerator::visitPostWriteBarrierCommon(LPostBarrierType* lir,
5257 OutOfLineCode* ool) {
5258 static_assert(NeedsPostBarrier(nurseryType));
5260 addOutOfLineCode(ool, lir->mir());
5262 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5264 if (lir->object()->isConstant()) {
5265 // Constant nursery objects cannot appear here, see
5266 // LIRGenerator::visitPostWriteElementBarrier.
5267 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5268 } else {
5269 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5270 temp, ool->rejoin());
5273 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5275 Register value = ToRegister(lir->value());
5276 if constexpr (nurseryType == MIRType::Object) {
5277 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::Object);
5278 } else if constexpr (nurseryType == MIRType::String) {
5279 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::String);
5280 } else {
5281 static_assert(nurseryType == MIRType::BigInt);
5282 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::BigInt);
5284 masm.branchPtrInNurseryChunk(Assembler::Equal, value, temp, ool->entry());
5286 masm.bind(ool->rejoin());
5289 template <class LPostBarrierType>
5290 void CodeGenerator::visitPostWriteBarrierCommonV(LPostBarrierType* lir,
5291 OutOfLineCode* ool) {
5292 addOutOfLineCode(ool, lir->mir());
5294 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5296 if (lir->object()->isConstant()) {
5297 // Constant nursery objects cannot appear here, see
5298 // LIRGenerator::visitPostWriteElementBarrier.
5299 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5300 } else {
5301 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5302 temp, ool->rejoin());
5305 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5307 ValueOperand value = ToValue(lir, LPostBarrierType::ValueIndex);
5308 masm.branchValueIsNurseryCell(Assembler::Equal, value, temp, ool->entry());
5310 masm.bind(ool->rejoin());
5313 void CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO* lir) {
5314 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5315 visitPostWriteBarrierCommon<LPostWriteBarrierO, MIRType::Object>(lir, ool);
5318 void CodeGenerator::visitPostWriteBarrierS(LPostWriteBarrierS* lir) {
5319 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5320 visitPostWriteBarrierCommon<LPostWriteBarrierS, MIRType::String>(lir, ool);
5323 void CodeGenerator::visitPostWriteBarrierBI(LPostWriteBarrierBI* lir) {
5324 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5325 visitPostWriteBarrierCommon<LPostWriteBarrierBI, MIRType::BigInt>(lir, ool);
5328 void CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV* lir) {
5329 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5330 visitPostWriteBarrierCommonV(lir, ool);
5333 // Out-of-line path to update the store buffer.
5334 class OutOfLineCallPostWriteElementBarrier
5335 : public OutOfLineCodeBase<CodeGenerator> {
5336 LInstruction* lir_;
5337 const LAllocation* object_;
5338 const LAllocation* index_;
5340 public:
5341 OutOfLineCallPostWriteElementBarrier(LInstruction* lir,
5342 const LAllocation* object,
5343 const LAllocation* index)
5344 : lir_(lir), object_(object), index_(index) {}
5346 void accept(CodeGenerator* codegen) override {
5347 codegen->visitOutOfLineCallPostWriteElementBarrier(this);
5350 LInstruction* lir() const { return lir_; }
5352 const LAllocation* object() const { return object_; }
5354 const LAllocation* index() const { return index_; }
5357 void CodeGenerator::visitOutOfLineCallPostWriteElementBarrier(
5358 OutOfLineCallPostWriteElementBarrier* ool) {
5359 saveLiveVolatile(ool->lir());
5361 const LAllocation* obj = ool->object();
5362 const LAllocation* index = ool->index();
5364 Register objreg = obj->isConstant() ? InvalidReg : ToRegister(obj);
5365 Register indexreg = ToRegister(index);
5367 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5368 regs.takeUnchecked(indexreg);
5370 if (obj->isConstant()) {
5371 objreg = regs.takeAny();
5372 masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg);
5373 } else {
5374 regs.takeUnchecked(objreg);
5377 Register runtimereg = regs.takeAny();
5378 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5379 masm.setupAlignedABICall();
5380 masm.mov(ImmPtr(gen->runtime), runtimereg);
5381 masm.passABIArg(runtimereg);
5382 masm.passABIArg(objreg);
5383 masm.passABIArg(indexreg);
5384 masm.callWithABI<Fn, PostWriteElementBarrier>();
5386 restoreLiveVolatile(ool->lir());
5388 masm.jump(ool->rejoin());
5391 void CodeGenerator::visitPostWriteElementBarrierO(
5392 LPostWriteElementBarrierO* lir) {
5393 auto ool = new (alloc())
5394 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5395 visitPostWriteBarrierCommon<LPostWriteElementBarrierO, MIRType::Object>(lir,
5396 ool);
5399 void CodeGenerator::visitPostWriteElementBarrierS(
5400 LPostWriteElementBarrierS* lir) {
5401 auto ool = new (alloc())
5402 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5403 visitPostWriteBarrierCommon<LPostWriteElementBarrierS, MIRType::String>(lir,
5404 ool);
5407 void CodeGenerator::visitPostWriteElementBarrierBI(
5408 LPostWriteElementBarrierBI* lir) {
5409 auto ool = new (alloc())
5410 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5411 visitPostWriteBarrierCommon<LPostWriteElementBarrierBI, MIRType::BigInt>(lir,
5412 ool);
5415 void CodeGenerator::visitPostWriteElementBarrierV(
5416 LPostWriteElementBarrierV* lir) {
5417 auto ool = new (alloc())
5418 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5419 visitPostWriteBarrierCommonV(lir, ool);
5422 void CodeGenerator::visitAssertCanElidePostWriteBarrier(
5423 LAssertCanElidePostWriteBarrier* lir) {
5424 Register object = ToRegister(lir->object());
5425 ValueOperand value =
5426 ToValue(lir, LAssertCanElidePostWriteBarrier::ValueIndex);
5427 Register temp = ToRegister(lir->temp0());
5429 Label ok;
5430 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp, &ok);
5431 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp, &ok);
5433 masm.assumeUnreachable("Unexpected missing post write barrier");
5435 masm.bind(&ok);
5438 template <typename LCallIns>
5439 void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
5440 MCallBase* mir = call->mir();
5442 uint32_t unusedStack = UnusedStackBytesForCall(mir->paddedNumStackArgs());
5444 // Registers used for callWithABI() argument-passing.
5445 const Register argContextReg = ToRegister(call->getArgContextReg());
5446 const Register argUintNReg = ToRegister(call->getArgUintNReg());
5447 const Register argVpReg = ToRegister(call->getArgVpReg());
5449 // Misc. temporary registers.
5450 const Register tempReg = ToRegister(call->getTempReg());
5452 DebugOnly<uint32_t> initialStack = masm.framePushed();
5454 masm.checkStackAlignment();
5456 // Native functions have the signature:
5457 // bool (*)(JSContext*, unsigned, Value* vp)
5458 // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
5459 // are the function arguments.
5461 // Allocate space for the outparam, moving the StackPointer to what will be
5462 // &vp[1].
5463 masm.adjustStack(unusedStack);
5465 // Push a Value containing the callee object: natives are allowed to access
5466 // their callee before setting the return value. The StackPointer is moved
5467 // to &vp[0].
5468 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5469 Register calleeReg = ToRegister(call->getCallee());
5470 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
5472 if (call->mir()->maybeCrossRealm()) {
5473 masm.switchToObjectRealm(calleeReg, tempReg);
5475 } else {
5476 WrappedFunction* target = call->getSingleTarget();
5477 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5479 if (call->mir()->maybeCrossRealm()) {
5480 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), tempReg);
5481 masm.switchToObjectRealm(tempReg, tempReg);
5485 // Preload arguments into registers.
5486 masm.loadJSContext(argContextReg);
5487 masm.move32(Imm32(call->mir()->numActualArgs()), argUintNReg);
5488 masm.moveStackPtrTo(argVpReg);
5490 masm.Push(argUintNReg);
5492 // Construct native exit frame.
5493 uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
5494 masm.enterFakeExitFrameForNative(argContextReg, tempReg,
5495 call->mir()->isConstructing());
5497 markSafepointAt(safepointOffset, call);
5499 // Construct and execute call.
5500 masm.setupAlignedABICall();
5501 masm.passABIArg(argContextReg);
5502 masm.passABIArg(argUintNReg);
5503 masm.passABIArg(argVpReg);
5505 ensureOsiSpace();
5506 // If we're using a simulator build, `native` will already point to the
5507 // simulator's call-redirection code for LCallClassHook. Load the address in
5508 // a register first so that we don't try to redirect it a second time.
5509 bool emittedCall = false;
5510 #ifdef JS_SIMULATOR
5511 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5512 masm.movePtr(ImmPtr(native), tempReg);
5513 masm.callWithABI(tempReg);
5514 emittedCall = true;
5516 #endif
5517 if (!emittedCall) {
5518 masm.callWithABI(DynamicFunction<JSNative>(native), MoveOp::GENERAL,
5519 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5522 // Test for failure.
5523 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
5525 if (call->mir()->maybeCrossRealm()) {
5526 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5529 // Load the outparam vp[0] into output register(s).
5530 masm.loadValue(
5531 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
5532 JSReturnOperand);
5534 // Until C++ code is instrumented against Spectre, prevent speculative
5535 // execution from returning any private data.
5536 if (JitOptions.spectreJitToCxxCalls && !call->mir()->ignoresReturnValue() &&
5537 mir->hasLiveDefUses()) {
5538 masm.speculationBarrier();
5541 // The next instruction is removing the footer of the exit frame, so there
5542 // is no need for leaveFakeExitFrame.
5544 // Move the StackPointer back to its original location, unwinding the native
5545 // exit frame.
5546 masm.adjustStack(NativeExitFrameLayout::Size() - unusedStack);
5547 MOZ_ASSERT(masm.framePushed() == initialStack);
5550 void CodeGenerator::visitCallNative(LCallNative* call) {
5551 WrappedFunction* target = call->getSingleTarget();
5552 MOZ_ASSERT(target);
5553 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5555 JSNative native = target->native();
5556 if (call->ignoresReturnValue() && target->hasJitInfo()) {
5557 const JSJitInfo* jitInfo = target->jitInfo();
5558 if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
5559 native = jitInfo->ignoresReturnValueMethod;
5562 emitCallNative(call, native);
5565 void CodeGenerator::visitCallClassHook(LCallClassHook* call) {
5566 emitCallNative(call, call->mir()->target());
5569 static void LoadDOMPrivate(MacroAssembler& masm, Register obj, Register priv,
5570 DOMObjectKind kind) {
5571 // Load the value in DOM_OBJECT_SLOT for a native or proxy DOM object. This
5572 // will be in the first slot but may be fixed or non-fixed.
5573 MOZ_ASSERT(obj != priv);
5575 switch (kind) {
5576 case DOMObjectKind::Native:
5577 // If it's a native object, the value must be in a fixed slot.
5578 // See CanAttachDOMCall in CacheIR.cpp.
5579 masm.debugAssertObjHasFixedSlots(obj, priv);
5580 masm.loadPrivate(Address(obj, NativeObject::getFixedSlotOffset(0)), priv);
5581 break;
5582 case DOMObjectKind::Proxy: {
5583 #ifdef DEBUG
5584 // Sanity check: it must be a DOM proxy.
5585 Label isDOMProxy;
5586 masm.branchTestProxyHandlerFamily(
5587 Assembler::Equal, obj, priv, GetDOMProxyHandlerFamily(), &isDOMProxy);
5588 masm.assumeUnreachable("Expected a DOM proxy");
5589 masm.bind(&isDOMProxy);
5590 #endif
5591 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), priv);
5592 masm.loadPrivate(
5593 Address(priv, js::detail::ProxyReservedSlots::offsetOfSlot(0)), priv);
5594 break;
5599 void CodeGenerator::visitCallDOMNative(LCallDOMNative* call) {
5600 WrappedFunction* target = call->getSingleTarget();
5601 MOZ_ASSERT(target);
5602 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5603 MOZ_ASSERT(target->hasJitInfo());
5604 MOZ_ASSERT(call->mir()->isCallDOMNative());
5606 int unusedStack = UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5608 // Registers used for callWithABI() argument-passing.
5609 const Register argJSContext = ToRegister(call->getArgJSContext());
5610 const Register argObj = ToRegister(call->getArgObj());
5611 const Register argPrivate = ToRegister(call->getArgPrivate());
5612 const Register argArgs = ToRegister(call->getArgArgs());
5614 DebugOnly<uint32_t> initialStack = masm.framePushed();
5616 masm.checkStackAlignment();
5618 // DOM methods have the signature:
5619 // bool (*)(JSContext*, HandleObject, void* private, const
5620 // JSJitMethodCallArgs& args)
5621 // Where args is initialized from an argc and a vp, vp[0] is space for an
5622 // outparam and the callee, vp[1] is |this|, and vp[2] onward are the
5623 // function arguments. Note that args stores the argv, not the vp, and
5624 // argv == vp + 2.
5626 // Nestle the stack up against the pushed arguments, leaving StackPointer at
5627 // &vp[1]
5628 masm.adjustStack(unusedStack);
5629 // argObj is filled with the extracted object, then returned.
5630 Register obj = masm.extractObject(Address(masm.getStackPointer(), 0), argObj);
5631 MOZ_ASSERT(obj == argObj);
5633 // Push a Value containing the callee object: natives are allowed to access
5634 // their callee before setting the return value. After this the StackPointer
5635 // points to &vp[0].
5636 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5638 // Now compute the argv value. Since StackPointer is pointing to &vp[0] and
5639 // argv is &vp[2] we just need to add 2*sizeof(Value) to the current
5640 // StackPointer.
5641 static_assert(JSJitMethodCallArgsTraits::offsetOfArgv == 0);
5642 static_assert(JSJitMethodCallArgsTraits::offsetOfArgc ==
5643 IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv);
5644 masm.computeEffectiveAddress(
5645 Address(masm.getStackPointer(), 2 * sizeof(Value)), argArgs);
5647 LoadDOMPrivate(masm, obj, argPrivate,
5648 static_cast<MCallDOMNative*>(call->mir())->objectKind());
5650 // Push argc from the call instruction into what will become the IonExitFrame
5651 masm.Push(Imm32(call->numActualArgs()));
5653 // Push our argv onto the stack
5654 masm.Push(argArgs);
5655 // And store our JSJitMethodCallArgs* in argArgs.
5656 masm.moveStackPtrTo(argArgs);
5658 // Push |this| object for passing HandleObject. We push after argc to
5659 // maintain the same sp-relative location of the object pointer with other
5660 // DOMExitFrames.
5661 masm.Push(argObj);
5662 masm.moveStackPtrTo(argObj);
5664 if (call->mir()->maybeCrossRealm()) {
5665 // We use argJSContext as scratch register here.
5666 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), argJSContext);
5667 masm.switchToObjectRealm(argJSContext, argJSContext);
5670 // Construct native exit frame.
5671 uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext);
5672 masm.loadJSContext(argJSContext);
5673 masm.enterFakeExitFrame(argJSContext, argJSContext,
5674 ExitFrameType::IonDOMMethod);
5676 markSafepointAt(safepointOffset, call);
5678 // Construct and execute call.
5679 masm.setupAlignedABICall();
5680 masm.loadJSContext(argJSContext);
5681 masm.passABIArg(argJSContext);
5682 masm.passABIArg(argObj);
5683 masm.passABIArg(argPrivate);
5684 masm.passABIArg(argArgs);
5685 ensureOsiSpace();
5686 masm.callWithABI(DynamicFunction<JSJitMethodOp>(target->jitInfo()->method),
5687 MoveOp::GENERAL,
5688 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5690 if (target->jitInfo()->isInfallible) {
5691 masm.loadValue(Address(masm.getStackPointer(),
5692 IonDOMMethodExitFrameLayout::offsetOfResult()),
5693 JSReturnOperand);
5694 } else {
5695 // Test for failure.
5696 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
5698 // Load the outparam vp[0] into output register(s).
5699 masm.loadValue(Address(masm.getStackPointer(),
5700 IonDOMMethodExitFrameLayout::offsetOfResult()),
5701 JSReturnOperand);
5704 // Switch back to the current realm if needed. Note: if the DOM method threw
5705 // an exception, the exception handler will do this.
5706 if (call->mir()->maybeCrossRealm()) {
5707 static_assert(!JSReturnOperand.aliases(ReturnReg),
5708 "Clobbering ReturnReg should not affect the return value");
5709 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5712 // Until C++ code is instrumented against Spectre, prevent speculative
5713 // execution from returning any private data.
5714 if (JitOptions.spectreJitToCxxCalls && call->mir()->hasLiveDefUses()) {
5715 masm.speculationBarrier();
5718 // The next instruction is removing the footer of the exit frame, so there
5719 // is no need for leaveFakeExitFrame.
5721 // Move the StackPointer back to its original location, unwinding the native
5722 // exit frame.
5723 masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack);
5724 MOZ_ASSERT(masm.framePushed() == initialStack);
5727 void CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir) {
5728 pushArg(ImmGCPtr(lir->mir()->name()));
5730 using Fn = bool (*)(JSContext* cx, Handle<PropertyName*>, MutableHandleValue);
5731 callVM<Fn, GetIntrinsicValue>(lir);
5734 void CodeGenerator::emitCallInvokeFunction(
5735 LInstruction* call, Register calleereg, bool constructing,
5736 bool ignoresReturnValue, uint32_t argc, uint32_t unusedStack) {
5737 // Nestle %esp up to the argument vector.
5738 // Each path must account for framePushed_ separately, for callVM to be valid.
5739 masm.freeStack(unusedStack);
5741 pushArg(masm.getStackPointer()); // argv.
5742 pushArg(Imm32(argc)); // argc.
5743 pushArg(Imm32(ignoresReturnValue));
5744 pushArg(Imm32(constructing)); // constructing.
5745 pushArg(calleereg); // JSFunction*.
5747 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
5748 MutableHandleValue);
5749 callVM<Fn, jit::InvokeFunction>(call);
5751 // Un-nestle %esp from the argument vector. No prefix was pushed.
5752 masm.reserveStack(unusedStack);
5755 void CodeGenerator::visitCallGeneric(LCallGeneric* call) {
5756 // The callee is passed straight through to the trampoline.
5757 MOZ_ASSERT(ToRegister(call->getCallee()) == IonGenericCallCalleeReg);
5759 Register argcReg = ToRegister(call->getArgc());
5760 uint32_t unusedStack =
5761 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5763 // Known-target case is handled by LCallKnown.
5764 MOZ_ASSERT(!call->hasSingleTarget());
5766 masm.checkStackAlignment();
5768 masm.move32(Imm32(call->numActualArgs()), argcReg);
5770 // Nestle the StackPointer up to the argument vector.
5771 masm.freeStack(unusedStack);
5772 ensureOsiSpace();
5774 auto kind = call->mir()->isConstructing() ? IonGenericCallKind::Construct
5775 : IonGenericCallKind::Call;
5777 TrampolinePtr genericCallStub =
5778 gen->jitRuntime()->getIonGenericCallStub(kind);
5779 uint32_t callOffset = masm.callJit(genericCallStub);
5780 markSafepointAt(callOffset, call);
5782 if (call->mir()->maybeCrossRealm()) {
5783 static_assert(!JSReturnOperand.aliases(ReturnReg),
5784 "ReturnReg available as scratch after scripted calls");
5785 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5788 // Restore stack pointer.
5789 masm.setFramePushed(frameSize());
5790 emitRestoreStackPointerFromFP();
5792 // If the return value of the constructing function is Primitive,
5793 // replace the return value with the Object from CreateThis.
5794 if (call->mir()->isConstructing()) {
5795 Label notPrimitive;
5796 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5797 &notPrimitive);
5798 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
5799 JSReturnOperand);
5800 #ifdef DEBUG
5801 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5802 &notPrimitive);
5803 masm.assumeUnreachable("CreateThis creates an object");
5804 #endif
5805 masm.bind(&notPrimitive);
5809 void JitRuntime::generateIonGenericCallArgumentsShift(
5810 MacroAssembler& masm, Register argc, Register curr, Register end,
5811 Register scratch, Label* done) {
5812 static_assert(sizeof(Value) == 8);
5813 // There are |argc| Values on the stack. Shift them all down by 8 bytes,
5814 // overwriting the first value.
5816 // Initialize `curr` to the destination of the first copy, and `end` to the
5817 // final value of curr.
5818 masm.moveStackPtrTo(curr);
5819 masm.computeEffectiveAddress(BaseValueIndex(curr, argc), end);
5821 Label loop;
5822 masm.bind(&loop);
5823 masm.branchPtr(Assembler::Equal, curr, end, done);
5824 masm.loadPtr(Address(curr, 8), scratch);
5825 masm.storePtr(scratch, Address(curr, 0));
5826 masm.addPtr(Imm32(sizeof(uintptr_t)), curr);
5827 masm.jump(&loop);
5830 void JitRuntime::generateIonGenericCallStub(MacroAssembler& masm,
5831 IonGenericCallKind kind) {
5832 AutoCreatedBy acb(masm, "JitRuntime::generateIonGenericCallStub");
5833 ionGenericCallStubOffset_[kind] = startTrampolineCode(masm);
5835 // This code is tightly coupled with visitCallGeneric.
5837 // Upon entry:
5838 // IonGenericCallCalleeReg contains a pointer to the callee object.
5839 // IonGenericCallArgcReg contains the number of actual args.
5840 // The arguments have been pushed onto the stack:
5841 // [newTarget] (iff isConstructing)
5842 // [argN]
5843 // ...
5844 // [arg1]
5845 // [arg0]
5846 // [this]
5847 // <return address> (if not JS_USE_LINK_REGISTER)
5849 // This trampoline is responsible for entering the callee's realm,
5850 // massaging the stack into the right shape, and then performing a
5851 // tail call. We will return directly to the Ion code from the
5852 // callee.
5854 // To do a tail call, we keep the return address in a register, even
5855 // on platforms that don't normally use a link register, and push it
5856 // just before jumping to the callee, after we are done setting up
5857 // the stack.
5859 // The caller is responsible for switching back to the caller's
5860 // realm and cleaning up the stack.
5862 Register calleeReg = IonGenericCallCalleeReg;
5863 Register argcReg = IonGenericCallArgcReg;
5864 Register scratch = IonGenericCallScratch;
5865 Register scratch2 = IonGenericCallScratch2;
5867 #ifndef JS_USE_LINK_REGISTER
5868 Register returnAddrReg = IonGenericCallReturnAddrReg;
5869 masm.pop(returnAddrReg);
5870 #endif
5872 #ifdef JS_CODEGEN_ARM
5873 // The default second scratch register on arm is lr, which we need
5874 // preserved for tail calls.
5875 AutoNonDefaultSecondScratchRegister andssr(masm, IonGenericSecondScratchReg);
5876 #endif
5878 bool isConstructing = kind == IonGenericCallKind::Construct;
5880 Label entry, notFunction, noJitEntry, vmCall;
5881 masm.bind(&entry);
5883 // Guard that the callee is actually a function.
5884 masm.branchTestObjIsFunction(Assembler::NotEqual, calleeReg, scratch,
5885 calleeReg, &notFunction);
5887 // Guard that the callee supports the [[Call]] or [[Construct]] operation.
5888 // If these tests fail, we will call into the VM to throw an exception.
5889 if (isConstructing) {
5890 masm.branchTestFunctionFlags(calleeReg, FunctionFlags::CONSTRUCTOR,
5891 Assembler::Zero, &vmCall);
5892 } else {
5893 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
5894 calleeReg, scratch, &vmCall);
5897 if (isConstructing) {
5898 // Use the slow path if CreateThis was unable to create the |this| object.
5899 Address thisAddr(masm.getStackPointer(), 0);
5900 masm.branchTestNull(Assembler::Equal, thisAddr, &vmCall);
5903 masm.switchToObjectRealm(calleeReg, scratch);
5905 // Load jitCodeRaw for callee if it exists.
5906 masm.branchIfFunctionHasNoJitEntry(calleeReg, isConstructing, &noJitEntry);
5908 // ****************************
5909 // * Functions with jit entry *
5910 // ****************************
5911 masm.loadJitCodeRaw(calleeReg, scratch2);
5913 // Construct the JitFrameLayout.
5914 masm.PushCalleeToken(calleeReg, isConstructing);
5915 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcReg, scratch);
5916 #ifndef JS_USE_LINK_REGISTER
5917 masm.push(returnAddrReg);
5918 #endif
5920 // Check whether we need a rectifier frame.
5921 Label noRectifier;
5922 masm.loadFunctionArgCount(calleeReg, scratch);
5923 masm.branch32(Assembler::BelowOrEqual, scratch, argcReg, &noRectifier);
5925 // Tail-call the arguments rectifier.
5926 // Because all trampolines are created at the same time,
5927 // we can't create a TrampolinePtr for the arguments rectifier,
5928 // because it hasn't been linked yet. We can, however, directly
5929 // encode its offset.
5930 Label rectifier;
5931 bindLabelToOffset(&rectifier, argumentsRectifierOffset_);
5933 masm.jump(&rectifier);
5936 // Tail call the jit entry.
5937 masm.bind(&noRectifier);
5938 masm.jump(scratch2);
5940 // ********************
5941 // * Native functions *
5942 // ********************
5943 masm.bind(&noJitEntry);
5944 if (!isConstructing) {
5945 generateIonGenericCallFunCall(masm, &entry, &vmCall);
5947 generateIonGenericCallNativeFunction(masm, isConstructing);
5949 // *******************
5950 // * Bound functions *
5951 // *******************
5952 // TODO: support class hooks?
5953 masm.bind(&notFunction);
5954 if (!isConstructing) {
5955 // TODO: support generic bound constructors?
5956 generateIonGenericCallBoundFunction(masm, &entry, &vmCall);
5959 // ********************
5960 // * Fallback VM call *
5961 // ********************
5962 masm.bind(&vmCall);
5964 masm.push(masm.getStackPointer()); // argv
5965 masm.push(argcReg); // argc
5966 masm.push(Imm32(false)); // ignores return value
5967 masm.push(Imm32(isConstructing)); // constructing
5968 masm.push(calleeReg); // callee
5970 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
5971 MutableHandleValue);
5972 VMFunctionId id = VMFunctionToId<Fn, jit::InvokeFunction>::id;
5973 uint32_t invokeFunctionOffset = functionWrapperOffsets_[size_t(id)];
5974 Label invokeFunctionVMEntry;
5975 bindLabelToOffset(&invokeFunctionVMEntry, invokeFunctionOffset);
5977 masm.pushFrameDescriptor(FrameType::IonJS);
5978 #ifndef JS_USE_LINK_REGISTER
5979 masm.push(returnAddrReg);
5980 #endif
5981 masm.jump(&invokeFunctionVMEntry);
5984 void JitRuntime::generateIonGenericCallNativeFunction(MacroAssembler& masm,
5985 bool isConstructing) {
5986 Register calleeReg = IonGenericCallCalleeReg;
5987 Register argcReg = IonGenericCallArgcReg;
5988 Register scratch = IonGenericCallScratch;
5989 Register scratch2 = IonGenericCallScratch2;
5990 Register contextReg = IonGenericCallScratch3;
5991 #ifndef JS_USE_LINK_REGISTER
5992 Register returnAddrReg = IonGenericCallReturnAddrReg;
5993 #endif
5995 // Push a value containing the callee, which will become argv[0].
5996 masm.pushValue(JSVAL_TYPE_OBJECT, calleeReg);
5998 // Load the callee address into calleeReg.
5999 #ifdef JS_SIMULATOR
6000 masm.movePtr(ImmPtr(RedirectedCallAnyNative()), calleeReg);
6001 #else
6002 masm.loadPrivate(Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
6003 calleeReg);
6004 #endif
6006 // Load argv into scratch2.
6007 masm.moveStackPtrTo(scratch2);
6009 // Push argc.
6010 masm.push(argcReg);
6012 masm.loadJSContext(contextReg);
6014 // Construct native exit frame. Note that unlike other cases in this
6015 // trampoline, this code does not use a tail call.
6016 masm.pushFrameDescriptor(FrameType::IonJS);
6017 #ifdef JS_USE_LINK_REGISTER
6018 masm.pushReturnAddress();
6019 #else
6020 masm.push(returnAddrReg);
6021 #endif
6023 masm.push(FramePointer);
6024 masm.moveStackPtrTo(FramePointer);
6025 masm.enterFakeExitFrameForNative(contextReg, scratch, isConstructing);
6027 masm.setupUnalignedABICall(scratch);
6028 masm.passABIArg(contextReg); // cx
6029 masm.passABIArg(argcReg); // argc
6030 masm.passABIArg(scratch2); // argv
6032 masm.callWithABI(calleeReg);
6034 // Test for failure.
6035 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
6037 masm.loadValue(
6038 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
6039 JSReturnOperand);
6041 // Leave the exit frame.
6042 masm.moveToStackPtr(FramePointer);
6043 masm.pop(FramePointer);
6045 // Return.
6046 masm.ret();
6049 void JitRuntime::generateIonGenericCallFunCall(MacroAssembler& masm,
6050 Label* entry, Label* vmCall) {
6051 Register calleeReg = IonGenericCallCalleeReg;
6052 Register argcReg = IonGenericCallArgcReg;
6053 Register scratch = IonGenericCallScratch;
6054 Register scratch2 = IonGenericCallScratch2;
6055 Register scratch3 = IonGenericCallScratch3;
6057 Label notFunCall;
6058 masm.branchPtr(Assembler::NotEqual,
6059 Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
6060 ImmPtr(js::fun_call), &notFunCall);
6062 // In general, we can implement fun_call by replacing calleeReg with
6063 // |this|, sliding all the other arguments down, and decrementing argc.
6065 // *BEFORE* *AFTER*
6066 // [argN] argc = N+1 <padding>
6067 // ... [argN] argc = N
6068 // [arg1] ...
6069 // [arg0] [arg1] <- now arg0
6070 // [this] <- top of stack (aligned) [arg0] <- now this
6072 // The only exception is when argc is already 0, in which case instead
6073 // of shifting arguments down we replace [this] with UndefinedValue():
6075 // *BEFORE* *AFTER*
6076 // [this] argc = 0 [undef] argc = 0
6078 // After making this transformation, we can jump back to the beginning
6079 // of this trampoline to handle the inner call.
6081 // Guard that |this| is an object. If it is, replace calleeReg.
6082 masm.fallibleUnboxObject(Address(masm.getStackPointer(), 0), scratch, vmCall);
6083 masm.movePtr(scratch, calleeReg);
6085 Label hasArgs;
6086 masm.branch32(Assembler::NotEqual, argcReg, Imm32(0), &hasArgs);
6088 // No arguments. Replace |this| with |undefined| and start from the top.
6089 masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), 0));
6090 masm.jump(entry);
6092 masm.bind(&hasArgs);
6094 Label doneSliding;
6095 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6096 scratch3, &doneSliding);
6097 masm.bind(&doneSliding);
6098 masm.sub32(Imm32(1), argcReg);
6100 masm.jump(entry);
6102 masm.bind(&notFunCall);
6105 void JitRuntime::generateIonGenericCallBoundFunction(MacroAssembler& masm,
6106 Label* entry,
6107 Label* vmCall) {
6108 Register calleeReg = IonGenericCallCalleeReg;
6109 Register argcReg = IonGenericCallArgcReg;
6110 Register scratch = IonGenericCallScratch;
6111 Register scratch2 = IonGenericCallScratch2;
6112 Register scratch3 = IonGenericCallScratch3;
6114 masm.branchTestObjClass(Assembler::NotEqual, calleeReg,
6115 &BoundFunctionObject::class_, scratch, calleeReg,
6116 vmCall);
6118 Address targetSlot(calleeReg, BoundFunctionObject::offsetOfTargetSlot());
6119 Address flagsSlot(calleeReg, BoundFunctionObject::offsetOfFlagsSlot());
6120 Address thisSlot(calleeReg, BoundFunctionObject::offsetOfBoundThisSlot());
6121 Address firstInlineArgSlot(
6122 calleeReg, BoundFunctionObject::offsetOfFirstInlineBoundArg());
6124 // Check that we won't be pushing too many arguments.
6125 masm.load32(flagsSlot, scratch);
6126 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6127 masm.add32(argcReg, scratch);
6128 masm.branch32(Assembler::Above, scratch, Imm32(JIT_ARGS_LENGTH_MAX), vmCall);
6130 // The stack is currently correctly aligned for a jit call. We will
6131 // be updating the `this` value and potentially adding additional
6132 // arguments. On platforms with 16-byte alignment, if the number of
6133 // bound arguments is odd, we have to move the arguments that are
6134 // currently on the stack. For example, with one bound argument:
6136 // *BEFORE* *AFTER*
6137 // [argN] <padding>
6138 // ... [argN] |
6139 // [arg1] ... | These arguments have been
6140 // [arg0] [arg1] | shifted down 8 bytes.
6141 // [this] <- top of stack (aligned) [arg0] v
6142 // [bound0] <- one bound argument (odd)
6143 // [boundThis] <- top of stack (aligned)
6145 Label poppedThis;
6146 if (JitStackValueAlignment > 1) {
6147 Label alreadyAligned;
6148 masm.branchTest32(Assembler::Zero, flagsSlot,
6149 Imm32(1 << BoundFunctionObject::NumBoundArgsShift),
6150 &alreadyAligned);
6152 // We have an odd number of bound arguments. Shift the existing arguments
6153 // down by 8 bytes.
6154 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6155 scratch3, &poppedThis);
6156 masm.bind(&alreadyAligned);
6159 // Pop the current `this`. It will be replaced with the bound `this`.
6160 masm.freeStack(sizeof(Value));
6161 masm.bind(&poppedThis);
6163 // Load the number of bound arguments in scratch
6164 masm.load32(flagsSlot, scratch);
6165 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6167 Label donePushingBoundArguments;
6168 masm.branch32(Assembler::Equal, scratch, Imm32(0),
6169 &donePushingBoundArguments);
6171 // Update argc to include bound arguments.
6172 masm.add32(scratch, argcReg);
6174 // Load &boundArgs[0] in scratch2.
6175 Label outOfLineBoundArguments, haveBoundArguments;
6176 masm.branch32(Assembler::Above, scratch,
6177 Imm32(BoundFunctionObject::MaxInlineBoundArgs),
6178 &outOfLineBoundArguments);
6179 masm.computeEffectiveAddress(firstInlineArgSlot, scratch2);
6180 masm.jump(&haveBoundArguments);
6182 masm.bind(&outOfLineBoundArguments);
6183 masm.unboxObject(firstInlineArgSlot, scratch2);
6184 masm.loadPtr(Address(scratch2, NativeObject::offsetOfElements()), scratch2);
6186 masm.bind(&haveBoundArguments);
6188 // Load &boundArgs[numBoundArgs] in scratch.
6189 BaseObjectElementIndex lastBoundArg(scratch2, scratch);
6190 masm.computeEffectiveAddress(lastBoundArg, scratch);
6192 // Push the bound arguments, starting with the last one.
6193 // Copying pre-decrements scratch until scratch2 is reached.
6194 Label boundArgumentsLoop;
6195 masm.bind(&boundArgumentsLoop);
6196 masm.subPtr(Imm32(sizeof(Value)), scratch);
6197 masm.pushValue(Address(scratch, 0));
6198 masm.branchPtr(Assembler::Above, scratch, scratch2, &boundArgumentsLoop);
6199 masm.bind(&donePushingBoundArguments);
6201 // Push the bound `this`.
6202 masm.pushValue(thisSlot);
6204 // Load the target in calleeReg.
6205 masm.unboxObject(targetSlot, calleeReg);
6207 // At this point, all preconditions for entering the trampoline are met:
6208 // - calleeReg contains a pointer to the callee object
6209 // - argcReg contains the number of actual args (now including bound args)
6210 // - the arguments are on the stack with the correct alignment.
6211 // Instead of generating more code, we can jump back to the entry point
6212 // of the trampoline to call the bound target.
6213 masm.jump(entry);
6216 void CodeGenerator::visitCallKnown(LCallKnown* call) {
6217 Register calleereg = ToRegister(call->getFunction());
6218 Register objreg = ToRegister(call->getTempObject());
6219 uint32_t unusedStack =
6220 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
6221 WrappedFunction* target = call->getSingleTarget();
6223 // Native single targets (except wasm) are handled by LCallNative.
6224 MOZ_ASSERT(target->hasJitEntry());
6226 // Missing arguments must have been explicitly appended by WarpBuilder.
6227 DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
6228 MOZ_ASSERT(target->nargs() <=
6229 call->mir()->numStackArgs() - numNonArgsOnStack);
6231 MOZ_ASSERT_IF(call->isConstructing(), target->isConstructor());
6233 masm.checkStackAlignment();
6235 if (target->isClassConstructor() && !call->isConstructing()) {
6236 emitCallInvokeFunction(call, calleereg, call->isConstructing(),
6237 call->ignoresReturnValue(), call->numActualArgs(),
6238 unusedStack);
6239 return;
6242 MOZ_ASSERT_IF(target->isClassConstructor(), call->isConstructing());
6244 MOZ_ASSERT(!call->mir()->needsThisCheck());
6246 if (call->mir()->maybeCrossRealm()) {
6247 masm.switchToObjectRealm(calleereg, objreg);
6250 masm.loadJitCodeRaw(calleereg, objreg);
6252 // Nestle the StackPointer up to the argument vector.
6253 masm.freeStack(unusedStack);
6255 // Construct the JitFrameLayout.
6256 masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
6257 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, call->numActualArgs());
6259 // Finally call the function in objreg.
6260 ensureOsiSpace();
6261 uint32_t callOffset = masm.callJit(objreg);
6262 markSafepointAt(callOffset, call);
6264 if (call->mir()->maybeCrossRealm()) {
6265 static_assert(!JSReturnOperand.aliases(ReturnReg),
6266 "ReturnReg available as scratch after scripted calls");
6267 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6270 // Restore stack pointer: pop JitFrameLayout fields still left on the stack
6271 // and undo the earlier |freeStack(unusedStack)|.
6272 int prefixGarbage =
6273 sizeof(JitFrameLayout) - JitFrameLayout::bytesPoppedAfterCall();
6274 masm.adjustStack(prefixGarbage - unusedStack);
6276 // If the return value of the constructing function is Primitive,
6277 // replace the return value with the Object from CreateThis.
6278 if (call->mir()->isConstructing()) {
6279 Label notPrimitive;
6280 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6281 &notPrimitive);
6282 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
6283 JSReturnOperand);
6284 #ifdef DEBUG
6285 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6286 &notPrimitive);
6287 masm.assumeUnreachable("CreateThis creates an object");
6288 #endif
6289 masm.bind(&notPrimitive);
6293 template <typename T>
6294 void CodeGenerator::emitCallInvokeFunction(T* apply) {
6295 Register objreg = ToRegister(apply->getTempObject());
6297 // Push the space used by the arguments.
6298 masm.moveStackPtrTo(objreg);
6300 pushArg(objreg); // argv.
6301 pushArg(ToRegister(apply->getArgc())); // argc.
6302 pushArg(Imm32(apply->mir()->ignoresReturnValue())); // ignoresReturnValue.
6303 pushArg(Imm32(apply->mir()->isConstructing())); // isConstructing.
6304 pushArg(ToRegister(apply->getFunction())); // JSFunction*.
6306 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
6307 MutableHandleValue);
6308 callVM<Fn, jit::InvokeFunction>(apply);
6311 // Do not bailout after the execution of this function since the stack no longer
6312 // correspond to what is expected by the snapshots.
6313 void CodeGenerator::emitAllocateSpaceForApply(Register argcreg,
6314 Register scratch) {
6315 // Use scratch register to calculate stack space (including padding).
6316 masm.movePtr(argcreg, scratch);
6318 // Align the JitFrameLayout on the JitStackAlignment.
6319 if (JitStackValueAlignment > 1) {
6320 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6321 "Stack padding assumes that the frameSize is correct");
6322 MOZ_ASSERT(JitStackValueAlignment == 2);
6323 Label noPaddingNeeded;
6324 // if the number of arguments is odd, then we do not need any padding.
6325 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6326 masm.addPtr(Imm32(1), scratch);
6327 masm.bind(&noPaddingNeeded);
6330 // Reserve space for copying the arguments.
6331 NativeObject::elementsSizeMustNotOverflow();
6332 masm.lshiftPtr(Imm32(ValueShift), scratch);
6333 masm.subFromStackPtr(scratch);
6335 #ifdef DEBUG
6336 // Put a magic value in the space reserved for padding. Note, this code
6337 // cannot be merged with the previous test, as not all architectures can
6338 // write below their stack pointers.
6339 if (JitStackValueAlignment > 1) {
6340 MOZ_ASSERT(JitStackValueAlignment == 2);
6341 Label noPaddingNeeded;
6342 // if the number of arguments is odd, then we do not need any padding.
6343 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6344 BaseValueIndex dstPtr(masm.getStackPointer(), argcreg);
6345 masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr);
6346 masm.bind(&noPaddingNeeded);
6348 #endif
6351 // Do not bailout after the execution of this function since the stack no longer
6352 // correspond to what is expected by the snapshots.
6353 void CodeGenerator::emitAllocateSpaceForConstructAndPushNewTarget(
6354 Register argcreg, Register newTargetAndScratch) {
6355 // Align the JitFrameLayout on the JitStackAlignment. Contrary to
6356 // |emitAllocateSpaceForApply()|, we're always pushing a magic value, because
6357 // we can't write to |newTargetAndScratch| before |new.target| has
6358 // been pushed onto the stack.
6359 if (JitStackValueAlignment > 1) {
6360 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6361 "Stack padding assumes that the frameSize is correct");
6362 MOZ_ASSERT(JitStackValueAlignment == 2);
6364 Label noPaddingNeeded;
6365 // If the number of arguments is even, then we do not need any padding.
6366 masm.branchTestPtr(Assembler::Zero, argcreg, Imm32(1), &noPaddingNeeded);
6367 masm.pushValue(MagicValue(JS_ARG_POISON));
6368 masm.bind(&noPaddingNeeded);
6371 // Push |new.target| after the padding value, but before any arguments.
6372 masm.pushValue(JSVAL_TYPE_OBJECT, newTargetAndScratch);
6374 // Use newTargetAndScratch to calculate stack space (including padding).
6375 masm.movePtr(argcreg, newTargetAndScratch);
6377 // Reserve space for copying the arguments.
6378 NativeObject::elementsSizeMustNotOverflow();
6379 masm.lshiftPtr(Imm32(ValueShift), newTargetAndScratch);
6380 masm.subFromStackPtr(newTargetAndScratch);
6383 // Destroys argvIndex and copyreg.
6384 void CodeGenerator::emitCopyValuesForApply(Register argvSrcBase,
6385 Register argvIndex, Register copyreg,
6386 size_t argvSrcOffset,
6387 size_t argvDstOffset) {
6388 Label loop;
6389 masm.bind(&loop);
6391 // As argvIndex is off by 1, and we use the decBranchPtr instruction
6392 // to loop back, we have to substract the size of the word which are
6393 // copied.
6394 BaseValueIndex srcPtr(argvSrcBase, argvIndex,
6395 int32_t(argvSrcOffset) - sizeof(void*));
6396 BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex,
6397 int32_t(argvDstOffset) - sizeof(void*));
6398 masm.loadPtr(srcPtr, copyreg);
6399 masm.storePtr(copyreg, dstPtr);
6401 // Handle 32 bits architectures.
6402 if (sizeof(Value) == 2 * sizeof(void*)) {
6403 BaseValueIndex srcPtrLow(argvSrcBase, argvIndex,
6404 int32_t(argvSrcOffset) - 2 * sizeof(void*));
6405 BaseValueIndex dstPtrLow(masm.getStackPointer(), argvIndex,
6406 int32_t(argvDstOffset) - 2 * sizeof(void*));
6407 masm.loadPtr(srcPtrLow, copyreg);
6408 masm.storePtr(copyreg, dstPtrLow);
6411 masm.decBranchPtr(Assembler::NonZero, argvIndex, Imm32(1), &loop);
6414 void CodeGenerator::emitRestoreStackPointerFromFP() {
6415 // This is used to restore the stack pointer after a call with a dynamic
6416 // number of arguments.
6418 MOZ_ASSERT(masm.framePushed() == frameSize());
6420 int32_t offset = -int32_t(frameSize());
6421 masm.computeEffectiveAddress(Address(FramePointer, offset),
6422 masm.getStackPointer());
6425 void CodeGenerator::emitPushArguments(Register argcreg, Register scratch,
6426 Register copyreg, uint32_t extraFormals) {
6427 Label end;
6429 // Skip the copy of arguments if there are none.
6430 masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, &end);
6432 // clang-format off
6434 // We are making a copy of the arguments which are above the JitFrameLayout
6435 // of the current Ion frame.
6437 // [arg1] [arg0] <- src [this] [JitFrameLayout] [.. frameSize ..] [pad] [arg1] [arg0] <- dst
6439 // clang-format on
6441 // Compute the source and destination offsets into the stack.
6442 Register argvSrcBase = FramePointer;
6443 size_t argvSrcOffset =
6444 JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
6445 size_t argvDstOffset = 0;
6447 Register argvIndex = scratch;
6448 masm.move32(argcreg, argvIndex);
6450 // Copy arguments.
6451 emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
6452 argvDstOffset);
6454 // Join with all arguments copied and the extra stack usage computed.
6455 masm.bind(&end);
6458 void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply,
6459 Register scratch) {
6460 // Holds the function nargs. Initially the number of args to the caller.
6461 Register argcreg = ToRegister(apply->getArgc());
6462 Register copyreg = ToRegister(apply->getTempObject());
6463 uint32_t extraFormals = apply->numExtraFormals();
6465 emitAllocateSpaceForApply(argcreg, scratch);
6467 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6469 // Push |this|.
6470 masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex));
6473 void CodeGenerator::emitPushArguments(LApplyArgsObj* apply, Register scratch) {
6474 // argc and argsObj are mapped to the same calltemp register.
6475 MOZ_ASSERT(apply->getArgsObj() == apply->getArgc());
6477 Register tmpArgc = ToRegister(apply->getTempObject());
6478 Register argsObj = ToRegister(apply->getArgsObj());
6480 // Load argc into tmpArgc.
6481 Address lengthAddr(argsObj, ArgumentsObject::getInitialLengthSlotOffset());
6482 masm.unboxInt32(lengthAddr, tmpArgc);
6483 masm.rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), tmpArgc);
6485 // Allocate space on the stack for arguments. This modifies scratch.
6486 emitAllocateSpaceForApply(tmpArgc, scratch);
6488 // Load arguments data
6489 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
6490 argsObj);
6491 size_t argsSrcOffset = ArgumentsData::offsetOfArgs();
6493 // This is the end of the lifetime of argsObj.
6494 // After this call, the argsObj register holds the argument count instead.
6495 emitPushArrayAsArguments(tmpArgc, argsObj, scratch, argsSrcOffset);
6497 masm.pushValue(ToValue(apply, LApplyArgsObj::ThisIndex));
6500 void CodeGenerator::emitPushArrayAsArguments(Register tmpArgc,
6501 Register srcBaseAndArgc,
6502 Register scratch,
6503 size_t argvSrcOffset) {
6504 // Preconditions:
6505 // 1. |tmpArgc| * sizeof(Value) bytes have been allocated at the top of
6506 // the stack to hold arguments.
6507 // 2. |srcBaseAndArgc| + |srcOffset| points to an array of |tmpArgc| values.
6509 // Postconditions:
6510 // 1. The arguments at |srcBaseAndArgc| + |srcOffset| have been copied into
6511 // the allocated space.
6512 // 2. |srcBaseAndArgc| now contains the original value of |tmpArgc|.
6514 // |scratch| is used as a temp register within this function and clobbered.
6516 Label noCopy, epilogue;
6518 // Skip the copy of arguments if there are none.
6519 masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
6521 // Copy the values. This code is skipped entirely if there are
6522 // no values.
6523 size_t argvDstOffset = 0;
6525 Register argvSrcBase = srcBaseAndArgc;
6526 Register copyreg = scratch;
6528 masm.push(tmpArgc);
6529 Register argvIndex = tmpArgc;
6530 argvDstOffset += sizeof(void*);
6532 // Copy
6533 emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
6534 argvDstOffset);
6536 // Restore.
6537 masm.pop(srcBaseAndArgc); // srcBaseAndArgc now contains argc.
6538 masm.jump(&epilogue);
6540 // Clear argc if we skipped the copy step.
6541 masm.bind(&noCopy);
6542 masm.movePtr(ImmWord(0), srcBaseAndArgc);
6544 // Join with all arguments copied and the extra stack usage computed.
6545 // Note, "srcBase" has become "argc".
6546 masm.bind(&epilogue);
6549 void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply,
6550 Register scratch) {
6551 Register tmpArgc = ToRegister(apply->getTempObject());
6552 Register elementsAndArgc = ToRegister(apply->getElements());
6554 // Invariants guarded in the caller:
6555 // - the array is not too long
6556 // - the array length equals its initialized length
6558 // The array length is our argc for the purposes of allocating space.
6559 Address length(ToRegister(apply->getElements()),
6560 ObjectElements::offsetOfLength());
6561 masm.load32(length, tmpArgc);
6563 // Allocate space for the values.
6564 emitAllocateSpaceForApply(tmpArgc, scratch);
6566 // After this call "elements" has become "argc".
6567 size_t elementsOffset = 0;
6568 emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
6570 // Push |this|.
6571 masm.pushValue(ToValue(apply, LApplyArrayGeneric::ThisIndex));
6574 void CodeGenerator::emitPushArguments(LConstructArgsGeneric* construct,
6575 Register scratch) {
6576 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6578 // Holds the function nargs. Initially the number of args to the caller.
6579 Register argcreg = ToRegister(construct->getArgc());
6580 Register copyreg = ToRegister(construct->getTempObject());
6581 uint32_t extraFormals = construct->numExtraFormals();
6583 // Allocate space for the values.
6584 // After this call "newTarget" has become "scratch".
6585 emitAllocateSpaceForConstructAndPushNewTarget(argcreg, scratch);
6587 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6589 // Push |this|.
6590 masm.pushValue(ToValue(construct, LConstructArgsGeneric::ThisIndex));
6593 void CodeGenerator::emitPushArguments(LConstructArrayGeneric* construct,
6594 Register scratch) {
6595 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6597 Register tmpArgc = ToRegister(construct->getTempObject());
6598 Register elementsAndArgc = ToRegister(construct->getElements());
6600 // Invariants guarded in the caller:
6601 // - the array is not too long
6602 // - the array length equals its initialized length
6604 // The array length is our argc for the purposes of allocating space.
6605 Address length(ToRegister(construct->getElements()),
6606 ObjectElements::offsetOfLength());
6607 masm.load32(length, tmpArgc);
6609 // Allocate space for the values.
6610 emitAllocateSpaceForConstructAndPushNewTarget(tmpArgc, scratch);
6612 // After this call "elements" has become "argc" and "newTarget" has become
6613 // "scratch".
6614 size_t elementsOffset = 0;
6615 emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
6617 // Push |this|.
6618 masm.pushValue(ToValue(construct, LConstructArrayGeneric::ThisIndex));
6621 template <typename T>
6622 void CodeGenerator::emitApplyGeneric(T* apply) {
6623 // Holds the function object.
6624 Register calleereg = ToRegister(apply->getFunction());
6626 // Temporary register for modifying the function object.
6627 Register objreg = ToRegister(apply->getTempObject());
6628 Register scratch = ToRegister(apply->getTempForArgCopy());
6630 // Holds the function nargs, computed in the invoker or (for ApplyArray,
6631 // ConstructArray, or ApplyArgsObj) in the argument pusher.
6632 Register argcreg = ToRegister(apply->getArgc());
6634 // Copy the arguments of the current function.
6636 // In the case of ApplyArray, ConstructArray, or ApplyArgsObj, also
6637 // compute argc. The argc register and the elements/argsObj register
6638 // are the same; argc must not be referenced before the call to
6639 // emitPushArguments() and elements/argsObj must not be referenced
6640 // after it returns.
6642 // In the case of ConstructArray or ConstructArgs, also overwrite newTarget
6643 // with scratch; newTarget must not be referenced after this point.
6645 // objreg is dead across this call.
6646 emitPushArguments(apply, scratch);
6648 masm.checkStackAlignment();
6650 bool constructing = apply->mir()->isConstructing();
6652 // If the function is native, only emit the call to InvokeFunction.
6653 if (apply->hasSingleTarget() &&
6654 apply->getSingleTarget()->isNativeWithoutJitEntry()) {
6655 emitCallInvokeFunction(apply);
6657 #ifdef DEBUG
6658 // Native constructors are guaranteed to return an Object value, so we never
6659 // have to replace a primitive result with the previously allocated Object
6660 // from CreateThis.
6661 if (constructing) {
6662 Label notPrimitive;
6663 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6664 &notPrimitive);
6665 masm.assumeUnreachable("native constructors don't return primitives");
6666 masm.bind(&notPrimitive);
6668 #endif
6670 emitRestoreStackPointerFromFP();
6671 return;
6674 Label end, invoke;
6676 // Unless already known, guard that calleereg is actually a function object.
6677 if (!apply->hasSingleTarget()) {
6678 masm.branchTestObjIsFunction(Assembler::NotEqual, calleereg, objreg,
6679 calleereg, &invoke);
6682 // Guard that calleereg is an interpreted function with a JSScript.
6683 masm.branchIfFunctionHasNoJitEntry(calleereg, constructing, &invoke);
6685 // Guard that callee allows the [[Call]] or [[Construct]] operation required.
6686 if (constructing) {
6687 masm.branchTestFunctionFlags(calleereg, FunctionFlags::CONSTRUCTOR,
6688 Assembler::Zero, &invoke);
6689 } else {
6690 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
6691 calleereg, objreg, &invoke);
6694 // Use the slow path if CreateThis was unable to create the |this| object.
6695 if (constructing) {
6696 Address thisAddr(masm.getStackPointer(), 0);
6697 masm.branchTestNull(Assembler::Equal, thisAddr, &invoke);
6700 // Call with an Ion frame or a rectifier frame.
6702 if (apply->mir()->maybeCrossRealm()) {
6703 masm.switchToObjectRealm(calleereg, objreg);
6706 // Knowing that calleereg is a non-native function, load jitcode.
6707 masm.loadJitCodeRaw(calleereg, objreg);
6709 masm.PushCalleeToken(calleereg, constructing);
6710 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcreg, scratch);
6712 Label underflow, rejoin;
6714 // Check whether the provided arguments satisfy target argc.
6715 if (!apply->hasSingleTarget()) {
6716 Register nformals = scratch;
6717 masm.loadFunctionArgCount(calleereg, nformals);
6718 masm.branch32(Assembler::Below, argcreg, nformals, &underflow);
6719 } else {
6720 masm.branch32(Assembler::Below, argcreg,
6721 Imm32(apply->getSingleTarget()->nargs()), &underflow);
6724 // Skip the construction of the rectifier frame because we have no
6725 // underflow.
6726 masm.jump(&rejoin);
6728 // Argument fixup needed. Get ready to call the argumentsRectifier.
6730 masm.bind(&underflow);
6732 // Hardcode the address of the argumentsRectifier code.
6733 TrampolinePtr argumentsRectifier =
6734 gen->jitRuntime()->getArgumentsRectifier();
6735 masm.movePtr(argumentsRectifier, objreg);
6738 masm.bind(&rejoin);
6740 // Finally call the function in objreg, as assigned by one of the paths
6741 // above.
6742 ensureOsiSpace();
6743 uint32_t callOffset = masm.callJit(objreg);
6744 markSafepointAt(callOffset, apply);
6746 if (apply->mir()->maybeCrossRealm()) {
6747 static_assert(!JSReturnOperand.aliases(ReturnReg),
6748 "ReturnReg available as scratch after scripted calls");
6749 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6752 // Discard JitFrameLayout fields still left on the stack.
6753 masm.freeStack(sizeof(JitFrameLayout) -
6754 JitFrameLayout::bytesPoppedAfterCall());
6755 masm.jump(&end);
6758 // Handle uncompiled or native functions.
6760 masm.bind(&invoke);
6761 emitCallInvokeFunction(apply);
6764 masm.bind(&end);
6766 // If the return value of the constructing function is Primitive,
6767 // replace the return value with the Object from CreateThis.
6768 if (constructing) {
6769 Label notPrimitive;
6770 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6771 &notPrimitive);
6772 masm.loadValue(Address(masm.getStackPointer(), 0), JSReturnOperand);
6774 #ifdef DEBUG
6775 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6776 &notPrimitive);
6777 masm.assumeUnreachable("CreateThis creates an object");
6778 #endif
6780 masm.bind(&notPrimitive);
6783 // Pop arguments and continue.
6784 emitRestoreStackPointerFromFP();
6787 void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) {
6788 LSnapshot* snapshot = apply->snapshot();
6789 Register argcreg = ToRegister(apply->getArgc());
6791 // Ensure that we have a reasonable number of arguments.
6792 bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6794 emitApplyGeneric(apply);
6797 void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
6798 Register argsObj = ToRegister(apply->getArgsObj());
6799 Register temp = ToRegister(apply->getTempObject());
6801 Label bail;
6802 masm.loadArgumentsObjectLength(argsObj, temp, &bail);
6803 masm.branch32(Assembler::Above, temp, Imm32(JIT_ARGS_LENGTH_MAX), &bail);
6804 bailoutFrom(&bail, apply->snapshot());
6806 emitApplyGeneric(apply);
6809 void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
6810 LSnapshot* snapshot = apply->snapshot();
6811 Register tmp = ToRegister(apply->getTempObject());
6813 Address length(ToRegister(apply->getElements()),
6814 ObjectElements::offsetOfLength());
6815 masm.load32(length, tmp);
6817 // Ensure that we have a reasonable number of arguments.
6818 bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6820 // Ensure that the array does not contain an uninitialized tail.
6822 Address initializedLength(ToRegister(apply->getElements()),
6823 ObjectElements::offsetOfInitializedLength());
6824 masm.sub32(initializedLength, tmp);
6825 bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
6827 emitApplyGeneric(apply);
6830 void CodeGenerator::visitConstructArgsGeneric(LConstructArgsGeneric* lir) {
6831 LSnapshot* snapshot = lir->snapshot();
6832 Register argcreg = ToRegister(lir->getArgc());
6834 // Ensure that we have a reasonable number of arguments.
6835 bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6837 emitApplyGeneric(lir);
6840 void CodeGenerator::visitConstructArrayGeneric(LConstructArrayGeneric* lir) {
6841 LSnapshot* snapshot = lir->snapshot();
6842 Register tmp = ToRegister(lir->getTempObject());
6844 Address length(ToRegister(lir->getElements()),
6845 ObjectElements::offsetOfLength());
6846 masm.load32(length, tmp);
6848 // Ensure that we have a reasonable number of arguments.
6849 bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6851 // Ensure that the array does not contain an uninitialized tail.
6853 Address initializedLength(ToRegister(lir->getElements()),
6854 ObjectElements::offsetOfInitializedLength());
6855 masm.sub32(initializedLength, tmp);
6856 bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
6858 emitApplyGeneric(lir);
6861 void CodeGenerator::visitBail(LBail* lir) { bailout(lir->snapshot()); }
6863 void CodeGenerator::visitUnreachable(LUnreachable* lir) {
6864 masm.assumeUnreachable("end-of-block assumed unreachable");
6867 void CodeGenerator::visitEncodeSnapshot(LEncodeSnapshot* lir) {
6868 encode(lir->snapshot());
6871 void CodeGenerator::visitUnreachableResultV(LUnreachableResultV* lir) {
6872 masm.assumeUnreachable("must be unreachable");
6875 void CodeGenerator::visitUnreachableResultT(LUnreachableResultT* lir) {
6876 masm.assumeUnreachable("must be unreachable");
6879 // Out-of-line path to report over-recursed error and fail.
6880 class CheckOverRecursedFailure : public OutOfLineCodeBase<CodeGenerator> {
6881 LInstruction* lir_;
6883 public:
6884 explicit CheckOverRecursedFailure(LInstruction* lir) : lir_(lir) {}
6886 void accept(CodeGenerator* codegen) override {
6887 codegen->visitCheckOverRecursedFailure(this);
6890 LInstruction* lir() const { return lir_; }
6893 void CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed* lir) {
6894 // If we don't push anything on the stack, skip the check.
6895 if (omitOverRecursedCheck()) {
6896 return;
6899 // Ensure that this frame will not cross the stack limit.
6900 // This is a weak check, justified by Ion using the C stack: we must always
6901 // be some distance away from the actual limit, since if the limit is
6902 // crossed, an error must be thrown, which requires more frames.
6904 // It must always be possible to trespass past the stack limit.
6905 // Ion may legally place frames very close to the limit. Calling additional
6906 // C functions may then violate the limit without any checking.
6908 // Since Ion frames exist on the C stack, the stack limit may be
6909 // dynamically set by JS_SetThreadStackLimit() and JS_SetNativeStackQuota().
6911 CheckOverRecursedFailure* ool = new (alloc()) CheckOverRecursedFailure(lir);
6912 addOutOfLineCode(ool, lir->mir());
6914 // Conditional forward (unlikely) branch to failure.
6915 const void* limitAddr = gen->runtime->addressOfJitStackLimit();
6916 masm.branchStackPtrRhs(Assembler::AboveOrEqual, AbsoluteAddress(limitAddr),
6917 ool->entry());
6918 masm.bind(ool->rejoin());
6921 void CodeGenerator::visitCheckOverRecursedFailure(
6922 CheckOverRecursedFailure* ool) {
6923 // The OOL path is hit if the recursion depth has been exceeded.
6924 // Throw an InternalError for over-recursion.
6926 // LFunctionEnvironment can appear before LCheckOverRecursed, so we have
6927 // to save all live registers to avoid crashes if CheckOverRecursed triggers
6928 // a GC.
6929 saveLive(ool->lir());
6931 using Fn = bool (*)(JSContext*);
6932 callVM<Fn, CheckOverRecursed>(ool->lir());
6934 restoreLive(ool->lir());
6935 masm.jump(ool->rejoin());
6938 IonScriptCounts* CodeGenerator::maybeCreateScriptCounts() {
6939 // If scripts are being profiled, create a new IonScriptCounts for the
6940 // profiling data, which will be attached to the associated JSScript or
6941 // wasm module after code generation finishes.
6942 if (!gen->hasProfilingScripts()) {
6943 return nullptr;
6946 // This test inhibits IonScriptCount creation for wasm code which is
6947 // currently incompatible with wasm codegen for two reasons: (1) wasm code
6948 // must be serializable and script count codegen bakes in absolute
6949 // addresses, (2) wasm code does not have a JSScript with which to associate
6950 // code coverage data.
6951 JSScript* script = gen->outerInfo().script();
6952 if (!script) {
6953 return nullptr;
6956 auto counts = MakeUnique<IonScriptCounts>();
6957 if (!counts || !counts->init(graph.numBlocks())) {
6958 return nullptr;
6961 for (size_t i = 0; i < graph.numBlocks(); i++) {
6962 MBasicBlock* block = graph.getBlock(i)->mir();
6964 uint32_t offset = 0;
6965 char* description = nullptr;
6966 if (MResumePoint* resume = block->entryResumePoint()) {
6967 // Find a PC offset in the outermost script to use. If this
6968 // block is from an inlined script, find a location in the
6969 // outer script to associate information about the inlining
6970 // with.
6971 while (resume->caller()) {
6972 resume = resume->caller();
6974 offset = script->pcToOffset(resume->pc());
6976 if (block->entryResumePoint()->caller()) {
6977 // Get the filename and line number of the inner script.
6978 JSScript* innerScript = block->info().script();
6979 description = js_pod_calloc<char>(200);
6980 if (description) {
6981 snprintf(description, 200, "%s:%u", innerScript->filename(),
6982 innerScript->lineno());
6987 if (!counts->block(i).init(block->id(), offset, description,
6988 block->numSuccessors())) {
6989 return nullptr;
6992 for (size_t j = 0; j < block->numSuccessors(); j++) {
6993 counts->block(i).setSuccessor(
6994 j, skipTrivialBlocks(block->getSuccessor(j))->id());
6998 scriptCounts_ = counts.release();
6999 return scriptCounts_;
7002 // Structure for managing the state tracked for a block by script counters.
7003 struct ScriptCountBlockState {
7004 IonBlockCounts& block;
7005 MacroAssembler& masm;
7007 Sprinter printer;
7009 public:
7010 ScriptCountBlockState(IonBlockCounts* block, MacroAssembler* masm)
7011 : block(*block), masm(*masm), printer(GetJitContext()->cx, false) {}
7013 bool init() {
7014 if (!printer.init()) {
7015 return false;
7018 // Bump the hit count for the block at the start. This code is not
7019 // included in either the text for the block or the instruction byte
7020 // counts.
7021 masm.inc64(AbsoluteAddress(block.addressOfHitCount()));
7023 // Collect human readable assembly for the code generated in the block.
7024 masm.setPrinter(&printer);
7026 return true;
7029 void visitInstruction(LInstruction* ins) {
7030 #ifdef JS_JITSPEW
7031 // Prefix stream of assembly instructions with their LIR instruction
7032 // name and any associated high level info.
7033 if (const char* extra = ins->getExtraName()) {
7034 printer.printf("[%s:%s]\n", ins->opName(), extra);
7035 } else {
7036 printer.printf("[%s]\n", ins->opName());
7038 #endif
7041 ~ScriptCountBlockState() {
7042 masm.setPrinter(nullptr);
7044 if (!printer.hadOutOfMemory()) {
7045 block.setCode(printer.string());
7050 void CodeGenerator::branchIfInvalidated(Register temp, Label* invalidated) {
7051 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), temp);
7052 masm.propagateOOM(ionScriptLabels_.append(label));
7054 // If IonScript::invalidationCount_ != 0, the script has been invalidated.
7055 masm.branch32(Assembler::NotEqual,
7056 Address(temp, IonScript::offsetOfInvalidationCount()), Imm32(0),
7057 invalidated);
7060 #ifdef DEBUG
7061 void CodeGenerator::emitAssertGCThingResult(Register input,
7062 const MDefinition* mir) {
7063 MIRType type = mir->type();
7064 MOZ_ASSERT(type == MIRType::Object || type == MIRType::String ||
7065 type == MIRType::Symbol || type == MIRType::BigInt);
7067 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7068 regs.take(input);
7070 Register temp = regs.takeAny();
7071 masm.push(temp);
7073 // Don't check if the script has been invalidated. In that case invalid
7074 // types are expected (until we reach the OsiPoint and bailout).
7075 Label done;
7076 branchIfInvalidated(temp, &done);
7078 # ifndef JS_SIMULATOR
7079 // Check that we have a valid GC pointer.
7080 // Disable for wasm because we don't have a context on wasm compilation
7081 // threads and this needs a context.
7082 // Also disable for simulator builds because the C++ call is a lot slower
7083 // there than on actual hardware.
7084 if (JitOptions.fullDebugChecks && !IsCompilingWasm()) {
7085 saveVolatile();
7086 masm.setupUnalignedABICall(temp);
7087 masm.loadJSContext(temp);
7088 masm.passABIArg(temp);
7089 masm.passABIArg(input);
7091 switch (type) {
7092 case MIRType::Object: {
7093 using Fn = void (*)(JSContext* cx, JSObject* obj);
7094 masm.callWithABI<Fn, AssertValidObjectPtr>();
7095 break;
7097 case MIRType::String: {
7098 using Fn = void (*)(JSContext* cx, JSString* str);
7099 masm.callWithABI<Fn, AssertValidStringPtr>();
7100 break;
7102 case MIRType::Symbol: {
7103 using Fn = void (*)(JSContext* cx, JS::Symbol* sym);
7104 masm.callWithABI<Fn, AssertValidSymbolPtr>();
7105 break;
7107 case MIRType::BigInt: {
7108 using Fn = void (*)(JSContext* cx, JS::BigInt* bi);
7109 masm.callWithABI<Fn, AssertValidBigIntPtr>();
7110 break;
7112 default:
7113 MOZ_CRASH();
7116 restoreVolatile();
7118 # endif
7120 masm.bind(&done);
7121 masm.pop(temp);
7124 void CodeGenerator::emitAssertResultV(const ValueOperand input,
7125 const MDefinition* mir) {
7126 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7127 regs.take(input);
7129 Register temp1 = regs.takeAny();
7130 Register temp2 = regs.takeAny();
7131 masm.push(temp1);
7132 masm.push(temp2);
7134 // Don't check if the script has been invalidated. In that case invalid
7135 // types are expected (until we reach the OsiPoint and bailout).
7136 Label done;
7137 branchIfInvalidated(temp1, &done);
7139 // Check that we have a valid GC pointer.
7140 if (JitOptions.fullDebugChecks) {
7141 saveVolatile();
7143 masm.pushValue(input);
7144 masm.moveStackPtrTo(temp1);
7146 using Fn = void (*)(JSContext* cx, Value* v);
7147 masm.setupUnalignedABICall(temp2);
7148 masm.loadJSContext(temp2);
7149 masm.passABIArg(temp2);
7150 masm.passABIArg(temp1);
7151 masm.callWithABI<Fn, AssertValidValue>();
7152 masm.popValue(input);
7153 restoreVolatile();
7156 masm.bind(&done);
7157 masm.pop(temp2);
7158 masm.pop(temp1);
7161 void CodeGenerator::emitGCThingResultChecks(LInstruction* lir,
7162 MDefinition* mir) {
7163 if (lir->numDefs() == 0) {
7164 return;
7167 MOZ_ASSERT(lir->numDefs() == 1);
7168 if (lir->getDef(0)->isBogusTemp()) {
7169 return;
7172 Register output = ToRegister(lir->getDef(0));
7173 emitAssertGCThingResult(output, mir);
7176 void CodeGenerator::emitValueResultChecks(LInstruction* lir, MDefinition* mir) {
7177 if (lir->numDefs() == 0) {
7178 return;
7181 MOZ_ASSERT(lir->numDefs() == BOX_PIECES);
7182 if (!lir->getDef(0)->output()->isRegister()) {
7183 return;
7186 ValueOperand output = ToOutValue(lir);
7188 emitAssertResultV(output, mir);
7191 void CodeGenerator::emitDebugResultChecks(LInstruction* ins) {
7192 // In debug builds, check that LIR instructions return valid values.
7194 MDefinition* mir = ins->mirRaw();
7195 if (!mir) {
7196 return;
7199 switch (mir->type()) {
7200 case MIRType::Object:
7201 case MIRType::String:
7202 case MIRType::Symbol:
7203 case MIRType::BigInt:
7204 emitGCThingResultChecks(ins, mir);
7205 break;
7206 case MIRType::Value:
7207 emitValueResultChecks(ins, mir);
7208 break;
7209 default:
7210 break;
7214 void CodeGenerator::emitDebugForceBailing(LInstruction* lir) {
7215 if (MOZ_LIKELY(!gen->options.ionBailAfterEnabled())) {
7216 return;
7218 if (!lir->snapshot()) {
7219 return;
7221 if (lir->isOsiPoint()) {
7222 return;
7225 masm.comment("emitDebugForceBailing");
7226 const void* bailAfterCounterAddr =
7227 gen->runtime->addressOfIonBailAfterCounter();
7229 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7231 Label done, notBail;
7232 masm.branch32(Assembler::Equal, AbsoluteAddress(bailAfterCounterAddr),
7233 Imm32(0), &done);
7235 Register temp = regs.takeAny();
7237 masm.push(temp);
7238 masm.load32(AbsoluteAddress(bailAfterCounterAddr), temp);
7239 masm.sub32(Imm32(1), temp);
7240 masm.store32(temp, AbsoluteAddress(bailAfterCounterAddr));
7242 masm.branch32(Assembler::NotEqual, temp, Imm32(0), &notBail);
7244 masm.pop(temp);
7245 bailout(lir->snapshot());
7247 masm.bind(&notBail);
7248 masm.pop(temp);
7250 masm.bind(&done);
7252 #endif
7254 bool CodeGenerator::generateBody() {
7255 JitSpewCont(JitSpew_Codegen, "\n");
7256 AutoCreatedBy acb(masm, "CodeGenerator::generateBody");
7258 JitSpew(JitSpew_Codegen, "==== BEGIN CodeGenerator::generateBody ====");
7259 IonScriptCounts* counts = maybeCreateScriptCounts();
7261 const bool compilingWasm = gen->compilingWasm();
7263 for (size_t i = 0; i < graph.numBlocks(); i++) {
7264 current = graph.getBlock(i);
7266 // Don't emit any code for trivial blocks, containing just a goto. Such
7267 // blocks are created to split critical edges, and if we didn't end up
7268 // putting any instructions in them, we can skip them.
7269 if (current->isTrivial()) {
7270 continue;
7273 #ifdef JS_JITSPEW
7274 const char* filename = nullptr;
7275 size_t lineNumber = 0;
7276 JS::LimitedColumnNumberZeroOrigin columnNumber;
7277 if (current->mir()->info().script()) {
7278 filename = current->mir()->info().script()->filename();
7279 if (current->mir()->pc()) {
7280 lineNumber = PCToLineNumber(current->mir()->info().script(),
7281 current->mir()->pc(), &columnNumber);
7284 JitSpew(JitSpew_Codegen, "--------------------------------");
7285 JitSpew(JitSpew_Codegen, "# block%zu %s:%zu:%u%s:", i,
7286 filename ? filename : "?", lineNumber,
7287 columnNumber.zeroOriginValue(),
7288 current->mir()->isLoopHeader() ? " (loop header)" : "");
7289 #endif
7291 if (current->mir()->isLoopHeader() && compilingWasm) {
7292 masm.nopAlign(CodeAlignment);
7295 masm.bind(current->label());
7297 mozilla::Maybe<ScriptCountBlockState> blockCounts;
7298 if (counts) {
7299 blockCounts.emplace(&counts->block(i), &masm);
7300 if (!blockCounts->init()) {
7301 return false;
7305 for (LInstructionIterator iter = current->begin(); iter != current->end();
7306 iter++) {
7307 if (!alloc().ensureBallast()) {
7308 return false;
7311 perfSpewer_.recordInstruction(masm, *iter);
7312 #ifdef JS_JITSPEW
7313 JitSpewStart(JitSpew_Codegen, " # LIR=%s",
7314 iter->opName());
7315 if (const char* extra = iter->getExtraName()) {
7316 JitSpewCont(JitSpew_Codegen, ":%s", extra);
7318 JitSpewFin(JitSpew_Codegen);
7319 #endif
7321 if (counts) {
7322 blockCounts->visitInstruction(*iter);
7325 #ifdef CHECK_OSIPOINT_REGISTERS
7326 if (iter->safepoint() && !compilingWasm) {
7327 resetOsiPointRegs(iter->safepoint());
7329 #endif
7331 if (!compilingWasm) {
7332 if (MDefinition* mir = iter->mirRaw()) {
7333 if (!addNativeToBytecodeEntry(mir->trackedSite())) {
7334 return false;
7339 setElement(*iter); // needed to encode correct snapshot location.
7341 #ifdef DEBUG
7342 emitDebugForceBailing(*iter);
7343 #endif
7345 switch (iter->op()) {
7346 #ifndef JS_CODEGEN_NONE
7347 # define LIROP(op) \
7348 case LNode::Opcode::op: \
7349 visit##op(iter->to##op()); \
7350 break;
7351 LIR_OPCODE_LIST(LIROP)
7352 # undef LIROP
7353 #endif
7354 case LNode::Opcode::Invalid:
7355 default:
7356 MOZ_CRASH("Invalid LIR op");
7359 #ifdef DEBUG
7360 if (!counts) {
7361 emitDebugResultChecks(*iter);
7363 #endif
7365 if (masm.oom()) {
7366 return false;
7370 JitSpew(JitSpew_Codegen, "==== END CodeGenerator::generateBody ====\n");
7371 return true;
7374 // Out-of-line object allocation for LNewArray.
7375 class OutOfLineNewArray : public OutOfLineCodeBase<CodeGenerator> {
7376 LNewArray* lir_;
7378 public:
7379 explicit OutOfLineNewArray(LNewArray* lir) : lir_(lir) {}
7381 void accept(CodeGenerator* codegen) override {
7382 codegen->visitOutOfLineNewArray(this);
7385 LNewArray* lir() const { return lir_; }
7388 void CodeGenerator::visitNewArrayCallVM(LNewArray* lir) {
7389 Register objReg = ToRegister(lir->output());
7391 MOZ_ASSERT(!lir->isCall());
7392 saveLive(lir);
7394 JSObject* templateObject = lir->mir()->templateObject();
7396 if (templateObject) {
7397 pushArg(ImmGCPtr(templateObject->shape()));
7398 pushArg(Imm32(lir->mir()->length()));
7400 using Fn = ArrayObject* (*)(JSContext*, uint32_t, Handle<Shape*>);
7401 callVM<Fn, NewArrayWithShape>(lir);
7402 } else {
7403 pushArg(Imm32(GenericObject));
7404 pushArg(Imm32(lir->mir()->length()));
7406 using Fn = ArrayObject* (*)(JSContext*, uint32_t, NewObjectKind);
7407 callVM<Fn, NewArrayOperation>(lir);
7410 masm.storeCallPointerResult(objReg);
7412 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
7413 restoreLive(lir);
7416 void CodeGenerator::visitAtan2D(LAtan2D* lir) {
7417 FloatRegister y = ToFloatRegister(lir->y());
7418 FloatRegister x = ToFloatRegister(lir->x());
7420 using Fn = double (*)(double x, double y);
7421 masm.setupAlignedABICall();
7422 masm.passABIArg(y, MoveOp::DOUBLE);
7423 masm.passABIArg(x, MoveOp::DOUBLE);
7424 masm.callWithABI<Fn, ecmaAtan2>(MoveOp::DOUBLE);
7426 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7429 void CodeGenerator::visitHypot(LHypot* lir) {
7430 uint32_t numArgs = lir->numArgs();
7431 masm.setupAlignedABICall();
7433 for (uint32_t i = 0; i < numArgs; ++i) {
7434 masm.passABIArg(ToFloatRegister(lir->getOperand(i)), MoveOp::DOUBLE);
7437 switch (numArgs) {
7438 case 2: {
7439 using Fn = double (*)(double x, double y);
7440 masm.callWithABI<Fn, ecmaHypot>(MoveOp::DOUBLE);
7441 break;
7443 case 3: {
7444 using Fn = double (*)(double x, double y, double z);
7445 masm.callWithABI<Fn, hypot3>(MoveOp::DOUBLE);
7446 break;
7448 case 4: {
7449 using Fn = double (*)(double x, double y, double z, double w);
7450 masm.callWithABI<Fn, hypot4>(MoveOp::DOUBLE);
7451 break;
7453 default:
7454 MOZ_CRASH("Unexpected number of arguments to hypot function.");
7456 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7459 void CodeGenerator::visitNewArray(LNewArray* lir) {
7460 Register objReg = ToRegister(lir->output());
7461 Register tempReg = ToRegister(lir->temp());
7462 DebugOnly<uint32_t> length = lir->mir()->length();
7464 MOZ_ASSERT(length <= NativeObject::MAX_DENSE_ELEMENTS_COUNT);
7466 if (lir->mir()->isVMCall()) {
7467 visitNewArrayCallVM(lir);
7468 return;
7471 OutOfLineNewArray* ool = new (alloc()) OutOfLineNewArray(lir);
7472 addOutOfLineCode(ool, lir->mir());
7474 TemplateObject templateObject(lir->mir()->templateObject());
7475 #ifdef DEBUG
7476 size_t numInlineElements = gc::GetGCKindSlots(templateObject.getAllocKind()) -
7477 ObjectElements::VALUES_PER_HEADER;
7478 MOZ_ASSERT(length <= numInlineElements,
7479 "Inline allocation only supports inline elements");
7480 #endif
7481 masm.createGCObject(objReg, tempReg, templateObject,
7482 lir->mir()->initialHeap(), ool->entry());
7484 masm.bind(ool->rejoin());
7487 void CodeGenerator::visitOutOfLineNewArray(OutOfLineNewArray* ool) {
7488 visitNewArrayCallVM(ool->lir());
7489 masm.jump(ool->rejoin());
7492 void CodeGenerator::visitNewArrayDynamicLength(LNewArrayDynamicLength* lir) {
7493 Register lengthReg = ToRegister(lir->length());
7494 Register objReg = ToRegister(lir->output());
7495 Register tempReg = ToRegister(lir->temp0());
7497 JSObject* templateObject = lir->mir()->templateObject();
7498 gc::Heap initialHeap = lir->mir()->initialHeap();
7500 using Fn = ArrayObject* (*)(JSContext*, Handle<ArrayObject*>, int32_t length);
7501 OutOfLineCode* ool = oolCallVM<Fn, ArrayConstructorOneArg>(
7502 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
7503 StoreRegisterTo(objReg));
7505 bool canInline = true;
7506 size_t inlineLength = 0;
7507 if (templateObject->as<ArrayObject>().hasFixedElements()) {
7508 size_t numSlots =
7509 gc::GetGCKindSlots(templateObject->asTenured().getAllocKind());
7510 inlineLength = numSlots - ObjectElements::VALUES_PER_HEADER;
7511 } else {
7512 canInline = false;
7515 if (canInline) {
7516 // Try to do the allocation inline if the template object is big enough
7517 // for the length in lengthReg. If the length is bigger we could still
7518 // use the template object and not allocate the elements, but it's more
7519 // efficient to do a single big allocation than (repeatedly) reallocating
7520 // the array later on when filling it.
7521 masm.branch32(Assembler::Above, lengthReg, Imm32(inlineLength),
7522 ool->entry());
7524 TemplateObject templateObj(templateObject);
7525 masm.createGCObject(objReg, tempReg, templateObj, initialHeap,
7526 ool->entry());
7528 size_t lengthOffset = NativeObject::offsetOfFixedElements() +
7529 ObjectElements::offsetOfLength();
7530 masm.store32(lengthReg, Address(objReg, lengthOffset));
7531 } else {
7532 masm.jump(ool->entry());
7535 masm.bind(ool->rejoin());
7538 void CodeGenerator::visitNewIterator(LNewIterator* lir) {
7539 Register objReg = ToRegister(lir->output());
7540 Register tempReg = ToRegister(lir->temp0());
7542 OutOfLineCode* ool;
7543 switch (lir->mir()->type()) {
7544 case MNewIterator::ArrayIterator: {
7545 using Fn = ArrayIteratorObject* (*)(JSContext*);
7546 ool = oolCallVM<Fn, NewArrayIterator>(lir, ArgList(),
7547 StoreRegisterTo(objReg));
7548 break;
7550 case MNewIterator::StringIterator: {
7551 using Fn = StringIteratorObject* (*)(JSContext*);
7552 ool = oolCallVM<Fn, NewStringIterator>(lir, ArgList(),
7553 StoreRegisterTo(objReg));
7554 break;
7556 case MNewIterator::RegExpStringIterator: {
7557 using Fn = RegExpStringIteratorObject* (*)(JSContext*);
7558 ool = oolCallVM<Fn, NewRegExpStringIterator>(lir, ArgList(),
7559 StoreRegisterTo(objReg));
7560 break;
7562 default:
7563 MOZ_CRASH("unexpected iterator type");
7566 TemplateObject templateObject(lir->mir()->templateObject());
7567 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
7568 ool->entry());
7570 masm.bind(ool->rejoin());
7573 void CodeGenerator::visitNewTypedArray(LNewTypedArray* lir) {
7574 Register objReg = ToRegister(lir->output());
7575 Register tempReg = ToRegister(lir->temp0());
7576 Register lengthReg = ToRegister(lir->temp1());
7577 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
7579 JSObject* templateObject = lir->mir()->templateObject();
7580 gc::Heap initialHeap = lir->mir()->initialHeap();
7582 TypedArrayObject* ttemplate = &templateObject->as<TypedArrayObject>();
7584 size_t n = ttemplate->length();
7585 MOZ_ASSERT(n <= INT32_MAX,
7586 "Template objects are only created for int32 lengths");
7588 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
7589 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
7590 lir, ArgList(ImmGCPtr(templateObject), Imm32(n)),
7591 StoreRegisterTo(objReg));
7593 TemplateObject templateObj(templateObject);
7594 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
7596 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
7597 ttemplate, MacroAssembler::TypedArrayLength::Fixed);
7599 masm.bind(ool->rejoin());
7602 void CodeGenerator::visitNewTypedArrayDynamicLength(
7603 LNewTypedArrayDynamicLength* lir) {
7604 Register lengthReg = ToRegister(lir->length());
7605 Register objReg = ToRegister(lir->output());
7606 Register tempReg = ToRegister(lir->temp0());
7607 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
7609 JSObject* templateObject = lir->mir()->templateObject();
7610 gc::Heap initialHeap = lir->mir()->initialHeap();
7612 TypedArrayObject* ttemplate = &templateObject->as<TypedArrayObject>();
7614 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
7615 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
7616 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
7617 StoreRegisterTo(objReg));
7619 // Volatile |lengthReg| is saved across the ABI call in |initTypedArraySlots|.
7620 MOZ_ASSERT_IF(lengthReg.volatile_(), liveRegs.has(lengthReg));
7622 TemplateObject templateObj(templateObject);
7623 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
7625 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
7626 ttemplate,
7627 MacroAssembler::TypedArrayLength::Dynamic);
7629 masm.bind(ool->rejoin());
7632 void CodeGenerator::visitNewTypedArrayFromArray(LNewTypedArrayFromArray* lir) {
7633 pushArg(ToRegister(lir->array()));
7634 pushArg(ImmGCPtr(lir->mir()->templateObject()));
7636 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
7637 callVM<Fn, js::NewTypedArrayWithTemplateAndArray>(lir);
7640 void CodeGenerator::visitNewTypedArrayFromArrayBuffer(
7641 LNewTypedArrayFromArrayBuffer* lir) {
7642 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::LengthIndex));
7643 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::ByteOffsetIndex));
7644 pushArg(ToRegister(lir->arrayBuffer()));
7645 pushArg(ImmGCPtr(lir->mir()->templateObject()));
7647 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
7648 HandleValue, HandleValue);
7649 callVM<Fn, js::NewTypedArrayWithTemplateAndBuffer>(lir);
7652 void CodeGenerator::visitBindFunction(LBindFunction* lir) {
7653 Register target = ToRegister(lir->target());
7654 Register temp1 = ToRegister(lir->temp0());
7655 Register temp2 = ToRegister(lir->temp1());
7657 // Try to allocate a new BoundFunctionObject we can pass to the VM function.
7658 // If this fails, we set temp1 to nullptr so we do the allocation in C++.
7659 TemplateObject templateObject(lir->mir()->templateObject());
7660 Label allocOk, allocFailed;
7661 masm.createGCObject(temp1, temp2, templateObject, gc::Heap::Default,
7662 &allocFailed);
7663 masm.jump(&allocOk);
7665 masm.bind(&allocFailed);
7666 masm.movePtr(ImmWord(0), temp1);
7668 masm.bind(&allocOk);
7670 // Set temp2 to the address of the first argument on the stack.
7671 // Note that the Value slots used for arguments are currently aligned for a
7672 // JIT call, even though that's not strictly necessary for calling into C++.
7673 uint32_t argc = lir->mir()->numStackArgs();
7674 if (JitStackValueAlignment > 1) {
7675 argc = AlignBytes(argc, JitStackValueAlignment);
7677 uint32_t unusedStack = UnusedStackBytesForCall(argc);
7678 masm.computeEffectiveAddress(Address(masm.getStackPointer(), unusedStack),
7679 temp2);
7681 pushArg(temp1);
7682 pushArg(Imm32(lir->mir()->numStackArgs()));
7683 pushArg(temp2);
7684 pushArg(target);
7686 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<JSObject*>, Value*,
7687 uint32_t, Handle<BoundFunctionObject*>);
7688 callVM<Fn, js::BoundFunctionObject::functionBindImpl>(lir);
7691 void CodeGenerator::visitNewBoundFunction(LNewBoundFunction* lir) {
7692 Register output = ToRegister(lir->output());
7693 Register temp = ToRegister(lir->temp0());
7695 JSObject* templateObj = lir->mir()->templateObj();
7697 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<BoundFunctionObject*>);
7698 OutOfLineCode* ool = oolCallVM<Fn, BoundFunctionObject::createWithTemplate>(
7699 lir, ArgList(ImmGCPtr(templateObj)), StoreRegisterTo(output));
7701 TemplateObject templateObject(templateObj);
7702 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
7703 ool->entry());
7705 masm.bind(ool->rejoin());
7708 // Out-of-line object allocation for JSOp::NewObject.
7709 class OutOfLineNewObject : public OutOfLineCodeBase<CodeGenerator> {
7710 LNewObject* lir_;
7712 public:
7713 explicit OutOfLineNewObject(LNewObject* lir) : lir_(lir) {}
7715 void accept(CodeGenerator* codegen) override {
7716 codegen->visitOutOfLineNewObject(this);
7719 LNewObject* lir() const { return lir_; }
7722 void CodeGenerator::visitNewObjectVMCall(LNewObject* lir) {
7723 Register objReg = ToRegister(lir->output());
7725 MOZ_ASSERT(!lir->isCall());
7726 saveLive(lir);
7728 JSObject* templateObject = lir->mir()->templateObject();
7730 // If we're making a new object with a class prototype (that is, an object
7731 // that derives its class from its prototype instead of being
7732 // PlainObject::class_'d) from self-hosted code, we need a different init
7733 // function.
7734 switch (lir->mir()->mode()) {
7735 case MNewObject::ObjectLiteral: {
7736 MOZ_ASSERT(!templateObject);
7737 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
7738 pushArg(ImmGCPtr(lir->mir()->block()->info().script()));
7740 using Fn = JSObject* (*)(JSContext*, HandleScript, const jsbytecode* pc);
7741 callVM<Fn, NewObjectOperation>(lir);
7742 break;
7744 case MNewObject::ObjectCreate: {
7745 pushArg(ImmGCPtr(templateObject));
7747 using Fn = PlainObject* (*)(JSContext*, Handle<PlainObject*>);
7748 callVM<Fn, ObjectCreateWithTemplate>(lir);
7749 break;
7753 masm.storeCallPointerResult(objReg);
7755 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
7756 restoreLive(lir);
7759 static bool ShouldInitFixedSlots(LNewPlainObject* lir, const Shape* shape,
7760 uint32_t nfixed) {
7761 // Look for StoreFixedSlot instructions following an object allocation
7762 // that write to this object before a GC is triggered or this object is
7763 // passed to a VM call. If all fixed slots will be initialized, the
7764 // allocation code doesn't need to set the slots to |undefined|.
7766 if (nfixed == 0) {
7767 return false;
7770 // Keep track of the fixed slots that are initialized. initializedSlots is
7771 // a bit mask with a bit for each slot.
7772 MOZ_ASSERT(nfixed <= NativeObject::MAX_FIXED_SLOTS);
7773 static_assert(NativeObject::MAX_FIXED_SLOTS <= 32,
7774 "Slot bits must fit in 32 bits");
7775 uint32_t initializedSlots = 0;
7776 uint32_t numInitialized = 0;
7778 MInstruction* allocMir = lir->mir();
7779 MBasicBlock* block = allocMir->block();
7781 // Skip the allocation instruction.
7782 MInstructionIterator iter = block->begin(allocMir);
7783 MOZ_ASSERT(*iter == allocMir);
7784 iter++;
7786 // Handle the leading shape guard, if present.
7787 for (; iter != block->end(); iter++) {
7788 if (iter->isConstant()) {
7789 // This instruction won't trigger a GC or read object slots.
7790 continue;
7792 if (iter->isGuardShape()) {
7793 auto* guard = iter->toGuardShape();
7794 if (guard->object() != allocMir || guard->shape() != shape) {
7795 return true;
7797 allocMir = guard;
7798 iter++;
7800 break;
7803 for (; iter != block->end(); iter++) {
7804 if (iter->isConstant() || iter->isPostWriteBarrier()) {
7805 // These instructions won't trigger a GC or read object slots.
7806 continue;
7809 if (iter->isStoreFixedSlot()) {
7810 MStoreFixedSlot* store = iter->toStoreFixedSlot();
7811 if (store->object() != allocMir) {
7812 return true;
7815 // We may not initialize this object slot on allocation, so the
7816 // pre-barrier could read uninitialized memory. Simply disable
7817 // the barrier for this store: the object was just initialized
7818 // so the barrier is not necessary.
7819 store->setNeedsBarrier(false);
7821 uint32_t slot = store->slot();
7822 MOZ_ASSERT(slot < nfixed);
7823 if ((initializedSlots & (1 << slot)) == 0) {
7824 numInitialized++;
7825 initializedSlots |= (1 << slot);
7827 if (numInitialized == nfixed) {
7828 // All fixed slots will be initialized.
7829 MOZ_ASSERT(mozilla::CountPopulation32(initializedSlots) == nfixed);
7830 return false;
7833 continue;
7836 // Unhandled instruction, assume it bails or reads object slots.
7837 return true;
7840 MOZ_CRASH("Shouldn't get here");
7843 void CodeGenerator::visitNewObject(LNewObject* lir) {
7844 Register objReg = ToRegister(lir->output());
7845 Register tempReg = ToRegister(lir->temp());
7847 if (lir->mir()->isVMCall()) {
7848 visitNewObjectVMCall(lir);
7849 return;
7852 OutOfLineNewObject* ool = new (alloc()) OutOfLineNewObject(lir);
7853 addOutOfLineCode(ool, lir->mir());
7855 TemplateObject templateObject(lir->mir()->templateObject());
7857 masm.createGCObject(objReg, tempReg, templateObject,
7858 lir->mir()->initialHeap(), ool->entry());
7860 masm.bind(ool->rejoin());
7863 void CodeGenerator::visitOutOfLineNewObject(OutOfLineNewObject* ool) {
7864 visitNewObjectVMCall(ool->lir());
7865 masm.jump(ool->rejoin());
7868 void CodeGenerator::visitNewPlainObject(LNewPlainObject* lir) {
7869 Register objReg = ToRegister(lir->output());
7870 Register temp0Reg = ToRegister(lir->temp0());
7871 Register temp1Reg = ToRegister(lir->temp1());
7872 Register shapeReg = ToRegister(lir->temp2());
7874 auto* mir = lir->mir();
7875 const Shape* shape = mir->shape();
7876 gc::Heap initialHeap = mir->initialHeap();
7877 gc::AllocKind allocKind = mir->allocKind();
7879 using Fn =
7880 JSObject* (*)(JSContext*, Handle<SharedShape*>, gc::AllocKind, gc::Heap);
7881 OutOfLineCode* ool = oolCallVM<Fn, NewPlainObjectOptimizedFallback>(
7882 lir,
7883 ArgList(ImmGCPtr(shape), Imm32(int32_t(allocKind)),
7884 Imm32(int32_t(initialHeap))),
7885 StoreRegisterTo(objReg));
7887 bool initContents = ShouldInitFixedSlots(lir, shape, mir->numFixedSlots());
7889 masm.movePtr(ImmGCPtr(shape), shapeReg);
7890 masm.createPlainGCObject(
7891 objReg, shapeReg, temp0Reg, temp1Reg, mir->numFixedSlots(),
7892 mir->numDynamicSlots(), allocKind, initialHeap, ool->entry(),
7893 AllocSiteInput(gc::CatchAllAllocSite::Optimized), initContents);
7895 #ifdef DEBUG
7896 // ShouldInitFixedSlots expects that the leading GuardShape will never fail,
7897 // so ensure the newly created object has the correct shape. Should the guard
7898 // ever fail, we may end up with uninitialized fixed slots, which can confuse
7899 // the GC.
7900 Label ok;
7901 masm.branchTestObjShape(Assembler::Equal, objReg, shape, temp0Reg, objReg,
7902 &ok);
7903 masm.assumeUnreachable("Newly created object has the correct shape");
7904 masm.bind(&ok);
7905 #endif
7907 masm.bind(ool->rejoin());
7910 void CodeGenerator::visitNewArrayObject(LNewArrayObject* lir) {
7911 Register objReg = ToRegister(lir->output());
7912 Register temp0Reg = ToRegister(lir->temp0());
7913 Register shapeReg = ToRegister(lir->temp1());
7915 auto* mir = lir->mir();
7916 uint32_t arrayLength = mir->length();
7918 gc::AllocKind allocKind = GuessArrayGCKind(arrayLength);
7919 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
7920 allocKind = ForegroundToBackgroundAllocKind(allocKind);
7922 uint32_t slotCount = GetGCKindSlots(allocKind);
7923 MOZ_ASSERT(slotCount >= ObjectElements::VALUES_PER_HEADER);
7924 uint32_t arrayCapacity = slotCount - ObjectElements::VALUES_PER_HEADER;
7926 const Shape* shape = mir->shape();
7928 NewObjectKind objectKind =
7929 mir->initialHeap() == gc::Heap::Tenured ? TenuredObject : GenericObject;
7931 using Fn =
7932 ArrayObject* (*)(JSContext*, uint32_t, gc::AllocKind, NewObjectKind);
7933 OutOfLineCode* ool = oolCallVM<Fn, NewArrayObjectOptimizedFallback>(
7934 lir,
7935 ArgList(Imm32(arrayLength), Imm32(int32_t(allocKind)), Imm32(objectKind)),
7936 StoreRegisterTo(objReg));
7938 masm.movePtr(ImmPtr(shape), shapeReg);
7939 masm.createArrayWithFixedElements(
7940 objReg, shapeReg, temp0Reg, InvalidReg, arrayLength, arrayCapacity, 0, 0,
7941 allocKind, mir->initialHeap(), ool->entry(),
7942 AllocSiteInput(gc::CatchAllAllocSite::Optimized));
7943 masm.bind(ool->rejoin());
7946 void CodeGenerator::visitNewNamedLambdaObject(LNewNamedLambdaObject* lir) {
7947 Register objReg = ToRegister(lir->output());
7948 Register tempReg = ToRegister(lir->temp0());
7949 const CompileInfo& info = lir->mir()->block()->info();
7951 using Fn = js::NamedLambdaObject* (*)(JSContext*, HandleFunction);
7952 OutOfLineCode* ool = oolCallVM<Fn, NamedLambdaObject::createWithoutEnclosing>(
7953 lir, ArgList(info.funMaybeLazy()), StoreRegisterTo(objReg));
7955 TemplateObject templateObject(lir->mir()->templateObj());
7957 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
7958 ool->entry());
7960 masm.bind(ool->rejoin());
7963 void CodeGenerator::visitNewCallObject(LNewCallObject* lir) {
7964 Register objReg = ToRegister(lir->output());
7965 Register tempReg = ToRegister(lir->temp0());
7967 CallObject* templateObj = lir->mir()->templateObject();
7969 using Fn = CallObject* (*)(JSContext*, Handle<SharedShape*>);
7970 OutOfLineCode* ool = oolCallVM<Fn, CallObject::createWithShape>(
7971 lir, ArgList(ImmGCPtr(templateObj->sharedShape())),
7972 StoreRegisterTo(objReg));
7974 // Inline call object creation, using the OOL path only for tricky cases.
7975 TemplateObject templateObject(templateObj);
7976 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
7977 ool->entry());
7979 masm.bind(ool->rejoin());
7982 void CodeGenerator::visitNewStringObject(LNewStringObject* lir) {
7983 Register input = ToRegister(lir->input());
7984 Register output = ToRegister(lir->output());
7985 Register temp = ToRegister(lir->temp0());
7987 StringObject* templateObj = lir->mir()->templateObj();
7989 using Fn = JSObject* (*)(JSContext*, HandleString);
7990 OutOfLineCode* ool = oolCallVM<Fn, NewStringObject>(lir, ArgList(input),
7991 StoreRegisterTo(output));
7993 TemplateObject templateObject(templateObj);
7994 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
7995 ool->entry());
7997 masm.loadStringLength(input, temp);
7999 masm.storeValue(JSVAL_TYPE_STRING, input,
8000 Address(output, StringObject::offsetOfPrimitiveValue()));
8001 masm.storeValue(JSVAL_TYPE_INT32, temp,
8002 Address(output, StringObject::offsetOfLength()));
8004 masm.bind(ool->rejoin());
8007 void CodeGenerator::visitInitElemGetterSetter(LInitElemGetterSetter* lir) {
8008 Register obj = ToRegister(lir->object());
8009 Register value = ToRegister(lir->value());
8011 pushArg(value);
8012 pushArg(ToValue(lir, LInitElemGetterSetter::IdIndex));
8013 pushArg(obj);
8014 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
8016 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject, HandleValue,
8017 HandleObject);
8018 callVM<Fn, InitElemGetterSetterOperation>(lir);
8021 void CodeGenerator::visitMutateProto(LMutateProto* lir) {
8022 Register objReg = ToRegister(lir->object());
8024 pushArg(ToValue(lir, LMutateProto::ValueIndex));
8025 pushArg(objReg);
8027 using Fn =
8028 bool (*)(JSContext* cx, Handle<PlainObject*> obj, HandleValue value);
8029 callVM<Fn, MutatePrototype>(lir);
8032 void CodeGenerator::visitInitPropGetterSetter(LInitPropGetterSetter* lir) {
8033 Register obj = ToRegister(lir->object());
8034 Register value = ToRegister(lir->value());
8036 pushArg(value);
8037 pushArg(ImmGCPtr(lir->mir()->name()));
8038 pushArg(obj);
8039 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
8041 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject,
8042 Handle<PropertyName*>, HandleObject);
8043 callVM<Fn, InitPropGetterSetterOperation>(lir);
8046 void CodeGenerator::visitCreateThis(LCreateThis* lir) {
8047 const LAllocation* callee = lir->callee();
8048 const LAllocation* newTarget = lir->newTarget();
8050 if (newTarget->isConstant()) {
8051 pushArg(ImmGCPtr(&newTarget->toConstant()->toObject()));
8052 } else {
8053 pushArg(ToRegister(newTarget));
8056 if (callee->isConstant()) {
8057 pushArg(ImmGCPtr(&callee->toConstant()->toObject()));
8058 } else {
8059 pushArg(ToRegister(callee));
8062 using Fn = bool (*)(JSContext* cx, HandleObject callee,
8063 HandleObject newTarget, MutableHandleValue rval);
8064 callVM<Fn, jit::CreateThisFromIon>(lir);
8067 void CodeGenerator::visitCreateArgumentsObject(LCreateArgumentsObject* lir) {
8068 // This should be getting constructed in the first block only, and not any OSR
8069 // entry blocks.
8070 MOZ_ASSERT(lir->mir()->block()->id() == 0);
8072 Register callObj = ToRegister(lir->callObject());
8073 Register temp0 = ToRegister(lir->temp0());
8074 Label done;
8076 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8077 Register objTemp = ToRegister(lir->temp1());
8078 Register cxTemp = ToRegister(lir->temp2());
8080 masm.Push(callObj);
8082 // Try to allocate an arguments object. This will leave the reserved
8083 // slots uninitialized, so it's important we don't GC until we
8084 // initialize these slots in ArgumentsObject::finishForIonPure.
8085 Label failure;
8086 TemplateObject templateObject(templateObj);
8087 masm.createGCObject(objTemp, temp0, templateObject, gc::Heap::Default,
8088 &failure,
8089 /* initContents = */ false);
8091 masm.moveStackPtrTo(temp0);
8092 masm.addPtr(Imm32(masm.framePushed()), temp0);
8094 using Fn = ArgumentsObject* (*)(JSContext* cx, jit::JitFrameLayout* frame,
8095 JSObject* scopeChain, ArgumentsObject* obj);
8096 masm.setupAlignedABICall();
8097 masm.loadJSContext(cxTemp);
8098 masm.passABIArg(cxTemp);
8099 masm.passABIArg(temp0);
8100 masm.passABIArg(callObj);
8101 masm.passABIArg(objTemp);
8103 masm.callWithABI<Fn, ArgumentsObject::finishForIonPure>();
8104 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8106 // Discard saved callObj on the stack.
8107 masm.addToStackPtr(Imm32(sizeof(uintptr_t)));
8108 masm.jump(&done);
8110 masm.bind(&failure);
8111 masm.Pop(callObj);
8114 masm.moveStackPtrTo(temp0);
8115 masm.addPtr(Imm32(frameSize()), temp0);
8117 pushArg(callObj);
8118 pushArg(temp0);
8120 using Fn = ArgumentsObject* (*)(JSContext*, JitFrameLayout*, HandleObject);
8121 callVM<Fn, ArgumentsObject::createForIon>(lir);
8123 masm.bind(&done);
8126 void CodeGenerator::visitCreateInlinedArgumentsObject(
8127 LCreateInlinedArgumentsObject* lir) {
8128 Register callObj = ToRegister(lir->getCallObject());
8129 Register callee = ToRegister(lir->getCallee());
8130 Register argsAddress = ToRegister(lir->temp1());
8131 Register argsObj = ToRegister(lir->temp2());
8133 // TODO: Do we have to worry about alignment here?
8135 // Create a contiguous array of values for ArgumentsObject::create
8136 // by pushing the arguments onto the stack in reverse order.
8137 uint32_t argc = lir->mir()->numActuals();
8138 for (uint32_t i = 0; i < argc; i++) {
8139 uint32_t argNum = argc - i - 1;
8140 uint32_t index = LCreateInlinedArgumentsObject::ArgIndex(argNum);
8141 ConstantOrRegister arg =
8142 toConstantOrRegister(lir, index, lir->mir()->getArg(argNum)->type());
8143 masm.Push(arg);
8145 masm.moveStackPtrTo(argsAddress);
8147 Label done;
8148 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8149 LiveRegisterSet liveRegs;
8150 liveRegs.add(callObj);
8151 liveRegs.add(callee);
8153 masm.PushRegsInMask(liveRegs);
8155 // We are free to clobber all registers, as LCreateInlinedArgumentsObject is
8156 // a call instruction.
8157 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
8158 allRegs.take(callObj);
8159 allRegs.take(callee);
8160 allRegs.take(argsObj);
8161 allRegs.take(argsAddress);
8163 Register temp3 = allRegs.takeAny();
8164 Register temp4 = allRegs.takeAny();
8166 // Try to allocate an arguments object. This will leave the reserved slots
8167 // uninitialized, so it's important we don't GC until we initialize these
8168 // slots in ArgumentsObject::finishForIonPure.
8169 Label failure;
8170 TemplateObject templateObject(templateObj);
8171 masm.createGCObject(argsObj, temp3, templateObject, gc::Heap::Default,
8172 &failure,
8173 /* initContents = */ false);
8175 Register numActuals = temp3;
8176 masm.move32(Imm32(argc), numActuals);
8178 using Fn = ArgumentsObject* (*)(JSContext*, JSObject*, JSFunction*, Value*,
8179 uint32_t, ArgumentsObject*);
8180 masm.setupAlignedABICall();
8181 masm.loadJSContext(temp4);
8182 masm.passABIArg(temp4);
8183 masm.passABIArg(callObj);
8184 masm.passABIArg(callee);
8185 masm.passABIArg(argsAddress);
8186 masm.passABIArg(numActuals);
8187 masm.passABIArg(argsObj);
8189 masm.callWithABI<Fn, ArgumentsObject::finishInlineForIonPure>();
8190 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8192 // Discard saved callObj, callee, and values array on the stack.
8193 masm.addToStackPtr(
8194 Imm32(masm.PushRegsInMaskSizeInBytes(liveRegs) + argc * sizeof(Value)));
8195 masm.jump(&done);
8197 masm.bind(&failure);
8198 masm.PopRegsInMask(liveRegs);
8200 // Reload argsAddress because it may have been overridden.
8201 masm.moveStackPtrTo(argsAddress);
8204 pushArg(Imm32(argc));
8205 pushArg(callObj);
8206 pushArg(callee);
8207 pushArg(argsAddress);
8209 using Fn = ArgumentsObject* (*)(JSContext*, Value*, HandleFunction,
8210 HandleObject, uint32_t);
8211 callVM<Fn, ArgumentsObject::createForInlinedIon>(lir);
8213 // Discard the array of values.
8214 masm.freeStack(argc * sizeof(Value));
8216 masm.bind(&done);
8219 template <class GetInlinedArgument>
8220 void CodeGenerator::emitGetInlinedArgument(GetInlinedArgument* lir,
8221 Register index,
8222 ValueOperand output) {
8223 uint32_t numActuals = lir->mir()->numActuals();
8224 MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
8226 // The index has already been bounds-checked, so the code we
8227 // generate here should be unreachable. We can end up in this
8228 // situation in self-hosted code using GetArgument(), or in a
8229 // monomorphically inlined function if we've inlined some CacheIR
8230 // that was created for a different caller.
8231 if (numActuals == 0) {
8232 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8233 return;
8236 // Check the first n-1 possible indices.
8237 Label done;
8238 for (uint32_t i = 0; i < numActuals - 1; i++) {
8239 Label skip;
8240 ConstantOrRegister arg = toConstantOrRegister(
8241 lir, GetInlinedArgument::ArgIndex(i), lir->mir()->getArg(i)->type());
8242 masm.branch32(Assembler::NotEqual, index, Imm32(i), &skip);
8243 masm.moveValue(arg, output);
8245 masm.jump(&done);
8246 masm.bind(&skip);
8249 #ifdef DEBUG
8250 Label skip;
8251 masm.branch32(Assembler::Equal, index, Imm32(numActuals - 1), &skip);
8252 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8253 masm.bind(&skip);
8254 #endif
8256 // The index has already been bounds-checked, so load the last argument.
8257 uint32_t lastIdx = numActuals - 1;
8258 ConstantOrRegister arg =
8259 toConstantOrRegister(lir, GetInlinedArgument::ArgIndex(lastIdx),
8260 lir->mir()->getArg(lastIdx)->type());
8261 masm.moveValue(arg, output);
8262 masm.bind(&done);
8265 void CodeGenerator::visitGetInlinedArgument(LGetInlinedArgument* lir) {
8266 Register index = ToRegister(lir->getIndex());
8267 ValueOperand output = ToOutValue(lir);
8269 emitGetInlinedArgument(lir, index, output);
8272 void CodeGenerator::visitGetInlinedArgumentHole(LGetInlinedArgumentHole* lir) {
8273 Register index = ToRegister(lir->getIndex());
8274 ValueOperand output = ToOutValue(lir);
8276 uint32_t numActuals = lir->mir()->numActuals();
8278 if (numActuals == 0) {
8279 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8280 masm.moveValue(UndefinedValue(), output);
8281 return;
8284 Label outOfBounds, done;
8285 masm.branch32(Assembler::AboveOrEqual, index, Imm32(numActuals),
8286 &outOfBounds);
8288 emitGetInlinedArgument(lir, index, output);
8289 masm.jump(&done);
8291 masm.bind(&outOfBounds);
8292 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8293 masm.moveValue(UndefinedValue(), output);
8295 masm.bind(&done);
8298 void CodeGenerator::visitGetArgumentsObjectArg(LGetArgumentsObjectArg* lir) {
8299 Register temp = ToRegister(lir->temp0());
8300 Register argsObj = ToRegister(lir->argsObject());
8301 ValueOperand out = ToOutValue(lir);
8303 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8304 temp);
8305 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8306 lir->mir()->argno() * sizeof(Value));
8307 masm.loadValue(argAddr, out);
8308 #ifdef DEBUG
8309 Label success;
8310 masm.branchTestMagic(Assembler::NotEqual, out, &success);
8311 masm.assumeUnreachable(
8312 "Result from ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8313 masm.bind(&success);
8314 #endif
8317 void CodeGenerator::visitSetArgumentsObjectArg(LSetArgumentsObjectArg* lir) {
8318 Register temp = ToRegister(lir->getTemp(0));
8319 Register argsObj = ToRegister(lir->argsObject());
8320 ValueOperand value = ToValue(lir, LSetArgumentsObjectArg::ValueIndex);
8322 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8323 temp);
8324 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8325 lir->mir()->argno() * sizeof(Value));
8326 emitPreBarrier(argAddr);
8327 #ifdef DEBUG
8328 Label success;
8329 masm.branchTestMagic(Assembler::NotEqual, argAddr, &success);
8330 masm.assumeUnreachable(
8331 "Result in ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8332 masm.bind(&success);
8333 #endif
8334 masm.storeValue(value, argAddr);
8337 void CodeGenerator::visitLoadArgumentsObjectArg(LLoadArgumentsObjectArg* lir) {
8338 Register temp = ToRegister(lir->temp0());
8339 Register argsObj = ToRegister(lir->argsObject());
8340 Register index = ToRegister(lir->index());
8341 ValueOperand out = ToOutValue(lir);
8343 Label bail;
8344 masm.loadArgumentsObjectElement(argsObj, index, out, temp, &bail);
8345 bailoutFrom(&bail, lir->snapshot());
8348 void CodeGenerator::visitLoadArgumentsObjectArgHole(
8349 LLoadArgumentsObjectArgHole* lir) {
8350 Register temp = ToRegister(lir->temp0());
8351 Register argsObj = ToRegister(lir->argsObject());
8352 Register index = ToRegister(lir->index());
8353 ValueOperand out = ToOutValue(lir);
8355 Label bail;
8356 masm.loadArgumentsObjectElementHole(argsObj, index, out, temp, &bail);
8357 bailoutFrom(&bail, lir->snapshot());
8360 void CodeGenerator::visitInArgumentsObjectArg(LInArgumentsObjectArg* lir) {
8361 Register temp = ToRegister(lir->temp0());
8362 Register argsObj = ToRegister(lir->argsObject());
8363 Register index = ToRegister(lir->index());
8364 Register out = ToRegister(lir->output());
8366 Label bail;
8367 masm.loadArgumentsObjectElementExists(argsObj, index, out, temp, &bail);
8368 bailoutFrom(&bail, lir->snapshot());
8371 void CodeGenerator::visitArgumentsObjectLength(LArgumentsObjectLength* lir) {
8372 Register argsObj = ToRegister(lir->argsObject());
8373 Register out = ToRegister(lir->output());
8375 Label bail;
8376 masm.loadArgumentsObjectLength(argsObj, out, &bail);
8377 bailoutFrom(&bail, lir->snapshot());
8380 void CodeGenerator::visitArrayFromArgumentsObject(
8381 LArrayFromArgumentsObject* lir) {
8382 pushArg(ToRegister(lir->argsObject()));
8384 using Fn = ArrayObject* (*)(JSContext*, Handle<ArgumentsObject*>);
8385 callVM<Fn, js::ArrayFromArgumentsObject>(lir);
8388 void CodeGenerator::visitGuardArgumentsObjectFlags(
8389 LGuardArgumentsObjectFlags* lir) {
8390 Register argsObj = ToRegister(lir->argsObject());
8391 Register temp = ToRegister(lir->temp0());
8393 Label bail;
8394 masm.branchTestArgumentsObjectFlags(argsObj, temp, lir->mir()->flags(),
8395 Assembler::NonZero, &bail);
8396 bailoutFrom(&bail, lir->snapshot());
8399 void CodeGenerator::visitBoundFunctionNumArgs(LBoundFunctionNumArgs* lir) {
8400 Register obj = ToRegister(lir->object());
8401 Register output = ToRegister(lir->output());
8403 masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
8404 output);
8405 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
8408 void CodeGenerator::visitGuardBoundFunctionIsConstructor(
8409 LGuardBoundFunctionIsConstructor* lir) {
8410 Register obj = ToRegister(lir->object());
8412 Label bail;
8413 Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
8414 masm.branchTest32(Assembler::Zero, flagsSlot,
8415 Imm32(BoundFunctionObject::IsConstructorFlag), &bail);
8416 bailoutFrom(&bail, lir->snapshot());
8419 void CodeGenerator::visitReturnFromCtor(LReturnFromCtor* lir) {
8420 ValueOperand value = ToValue(lir, LReturnFromCtor::ValueIndex);
8421 Register obj = ToRegister(lir->object());
8422 Register output = ToRegister(lir->output());
8424 Label valueIsObject, end;
8426 masm.branchTestObject(Assembler::Equal, value, &valueIsObject);
8428 // Value is not an object. Return that other object.
8429 masm.movePtr(obj, output);
8430 masm.jump(&end);
8432 // Value is an object. Return unbox(Value).
8433 masm.bind(&valueIsObject);
8434 Register payload = masm.extractObject(value, output);
8435 if (payload != output) {
8436 masm.movePtr(payload, output);
8439 masm.bind(&end);
8442 class OutOfLineBoxNonStrictThis : public OutOfLineCodeBase<CodeGenerator> {
8443 LBoxNonStrictThis* ins_;
8445 public:
8446 explicit OutOfLineBoxNonStrictThis(LBoxNonStrictThis* ins) : ins_(ins) {}
8447 void accept(CodeGenerator* codegen) override {
8448 codegen->visitOutOfLineBoxNonStrictThis(this);
8450 LBoxNonStrictThis* ins() const { return ins_; }
8453 void CodeGenerator::visitBoxNonStrictThis(LBoxNonStrictThis* lir) {
8454 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8455 Register output = ToRegister(lir->output());
8457 auto* ool = new (alloc()) OutOfLineBoxNonStrictThis(lir);
8458 addOutOfLineCode(ool, lir->mir());
8460 masm.fallibleUnboxObject(value, output, ool->entry());
8461 masm.bind(ool->rejoin());
8464 void CodeGenerator::visitOutOfLineBoxNonStrictThis(
8465 OutOfLineBoxNonStrictThis* ool) {
8466 LBoxNonStrictThis* lir = ool->ins();
8468 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8469 Register output = ToRegister(lir->output());
8471 Label notNullOrUndefined;
8473 Label isNullOrUndefined;
8474 ScratchTagScope tag(masm, value);
8475 masm.splitTagForTest(value, tag);
8476 masm.branchTestUndefined(Assembler::Equal, tag, &isNullOrUndefined);
8477 masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
8478 masm.bind(&isNullOrUndefined);
8479 masm.movePtr(ImmGCPtr(lir->mir()->globalThis()), output);
8480 masm.jump(ool->rejoin());
8483 masm.bind(&notNullOrUndefined);
8485 saveLive(lir);
8487 pushArg(value);
8488 using Fn = JSObject* (*)(JSContext*, HandleValue);
8489 callVM<Fn, BoxNonStrictThis>(lir);
8491 StoreRegisterTo(output).generate(this);
8492 restoreLiveIgnore(lir, StoreRegisterTo(output).clobbered());
8494 masm.jump(ool->rejoin());
8497 void CodeGenerator::visitImplicitThis(LImplicitThis* lir) {
8498 pushArg(ImmGCPtr(lir->mir()->name()));
8499 pushArg(ToRegister(lir->env()));
8501 using Fn = bool (*)(JSContext*, HandleObject, Handle<PropertyName*>,
8502 MutableHandleValue);
8503 callVM<Fn, ImplicitThisOperation>(lir);
8506 void CodeGenerator::visitArrayLength(LArrayLength* lir) {
8507 Register elements = ToRegister(lir->elements());
8508 Register output = ToRegister(lir->output());
8510 Address length(elements, ObjectElements::offsetOfLength());
8511 masm.load32(length, output);
8513 // Bail out if the length doesn't fit in int32.
8514 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
8517 static void SetLengthFromIndex(MacroAssembler& masm, const LAllocation* index,
8518 const Address& length) {
8519 if (index->isConstant()) {
8520 masm.store32(Imm32(ToInt32(index) + 1), length);
8521 } else {
8522 Register newLength = ToRegister(index);
8523 masm.add32(Imm32(1), newLength);
8524 masm.store32(newLength, length);
8525 masm.sub32(Imm32(1), newLength);
8529 void CodeGenerator::visitSetArrayLength(LSetArrayLength* lir) {
8530 Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength());
8531 SetLengthFromIndex(masm, lir->index(), length);
8534 void CodeGenerator::visitFunctionLength(LFunctionLength* lir) {
8535 Register function = ToRegister(lir->function());
8536 Register output = ToRegister(lir->output());
8538 Label bail;
8540 // Get the JSFunction flags.
8541 masm.load32(Address(function, JSFunction::offsetOfFlagsAndArgCount()),
8542 output);
8544 // Functions with a SelfHostedLazyScript must be compiled with the slow-path
8545 // before the function length is known. If the length was previously resolved,
8546 // the length property may be shadowed.
8547 masm.branchTest32(
8548 Assembler::NonZero, output,
8549 Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
8550 &bail);
8552 masm.loadFunctionLength(function, output, output, &bail);
8554 bailoutFrom(&bail, lir->snapshot());
8557 void CodeGenerator::visitFunctionName(LFunctionName* lir) {
8558 Register function = ToRegister(lir->function());
8559 Register output = ToRegister(lir->output());
8561 Label bail;
8563 const JSAtomState& names = gen->runtime->names();
8564 masm.loadFunctionName(function, output, ImmGCPtr(names.empty_), &bail);
8566 bailoutFrom(&bail, lir->snapshot());
8569 template <class OrderedHashTable>
8570 static void RangeFront(MacroAssembler&, Register, Register, Register);
8572 template <>
8573 void RangeFront<ValueMap>(MacroAssembler& masm, Register range, Register i,
8574 Register front) {
8575 masm.loadPtr(Address(range, ValueMap::Range::offsetOfHashTable()), front);
8576 masm.loadPtr(Address(front, ValueMap::offsetOfImplData()), front);
8578 MOZ_ASSERT(ValueMap::offsetOfImplDataElement() == 0,
8579 "offsetof(Data, element) is 0");
8580 static_assert(ValueMap::sizeofImplData() == 24, "sizeof(Data) is 24");
8581 masm.mulBy3(i, i);
8582 masm.lshiftPtr(Imm32(3), i);
8583 masm.addPtr(i, front);
8586 template <>
8587 void RangeFront<ValueSet>(MacroAssembler& masm, Register range, Register i,
8588 Register front) {
8589 masm.loadPtr(Address(range, ValueSet::Range::offsetOfHashTable()), front);
8590 masm.loadPtr(Address(front, ValueSet::offsetOfImplData()), front);
8592 MOZ_ASSERT(ValueSet::offsetOfImplDataElement() == 0,
8593 "offsetof(Data, element) is 0");
8594 static_assert(ValueSet::sizeofImplData() == 16, "sizeof(Data) is 16");
8595 masm.lshiftPtr(Imm32(4), i);
8596 masm.addPtr(i, front);
8599 template <class OrderedHashTable>
8600 static void RangePopFront(MacroAssembler& masm, Register range, Register front,
8601 Register dataLength, Register temp) {
8602 Register i = temp;
8604 masm.add32(Imm32(1),
8605 Address(range, OrderedHashTable::Range::offsetOfCount()));
8607 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), i);
8609 Label done, seek;
8610 masm.bind(&seek);
8611 masm.add32(Imm32(1), i);
8612 masm.branch32(Assembler::AboveOrEqual, i, dataLength, &done);
8614 // We can add sizeof(Data) to |front| to select the next element, because
8615 // |front| and |range.ht.data[i]| point to the same location.
8616 MOZ_ASSERT(OrderedHashTable::offsetOfImplDataElement() == 0,
8617 "offsetof(Data, element) is 0");
8618 masm.addPtr(Imm32(OrderedHashTable::sizeofImplData()), front);
8620 masm.branchTestMagic(Assembler::Equal,
8621 Address(front, OrderedHashTable::offsetOfEntryKey()),
8622 JS_HASH_KEY_EMPTY, &seek);
8624 masm.bind(&done);
8625 masm.store32(i, Address(range, OrderedHashTable::Range::offsetOfI()));
8628 template <class OrderedHashTable>
8629 static inline void RangeDestruct(MacroAssembler& masm, Register iter,
8630 Register range, Register temp0,
8631 Register temp1) {
8632 Register next = temp0;
8633 Register prevp = temp1;
8635 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfNext()), next);
8636 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfPrevP()), prevp);
8637 masm.storePtr(next, Address(prevp, 0));
8639 Label hasNoNext;
8640 masm.branchTestPtr(Assembler::Zero, next, next, &hasNoNext);
8642 masm.storePtr(prevp, Address(next, OrderedHashTable::Range::offsetOfPrevP()));
8644 masm.bind(&hasNoNext);
8646 Label nurseryAllocated;
8647 masm.branchPtrInNurseryChunk(Assembler::Equal, iter, temp0,
8648 &nurseryAllocated);
8650 masm.callFreeStub(range);
8652 masm.bind(&nurseryAllocated);
8655 template <>
8656 void CodeGenerator::emitLoadIteratorValues<ValueMap>(Register result,
8657 Register temp,
8658 Register front) {
8659 size_t elementsOffset = NativeObject::offsetOfFixedElements();
8661 Address keyAddress(front, ValueMap::Entry::offsetOfKey());
8662 Address valueAddress(front, ValueMap::Entry::offsetOfValue());
8663 Address keyElemAddress(result, elementsOffset);
8664 Address valueElemAddress(result, elementsOffset + sizeof(Value));
8665 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
8666 masm.guardedCallPreBarrier(valueElemAddress, MIRType::Value);
8667 masm.storeValue(keyAddress, keyElemAddress, temp);
8668 masm.storeValue(valueAddress, valueElemAddress, temp);
8670 Label emitBarrier, skipBarrier;
8671 masm.branchValueIsNurseryCell(Assembler::Equal, keyAddress, temp,
8672 &emitBarrier);
8673 masm.branchValueIsNurseryCell(Assembler::NotEqual, valueAddress, temp,
8674 &skipBarrier);
8676 masm.bind(&emitBarrier);
8677 saveVolatile(temp);
8678 emitPostWriteBarrier(result);
8679 restoreVolatile(temp);
8681 masm.bind(&skipBarrier);
8684 template <>
8685 void CodeGenerator::emitLoadIteratorValues<ValueSet>(Register result,
8686 Register temp,
8687 Register front) {
8688 size_t elementsOffset = NativeObject::offsetOfFixedElements();
8690 Address keyAddress(front, ValueSet::offsetOfEntryKey());
8691 Address keyElemAddress(result, elementsOffset);
8692 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
8693 masm.storeValue(keyAddress, keyElemAddress, temp);
8695 Label skipBarrier;
8696 masm.branchValueIsNurseryCell(Assembler::NotEqual, keyAddress, temp,
8697 &skipBarrier);
8699 saveVolatile(temp);
8700 emitPostWriteBarrier(result);
8701 restoreVolatile(temp);
8703 masm.bind(&skipBarrier);
8706 template <class IteratorObject, class OrderedHashTable>
8707 void CodeGenerator::emitGetNextEntryForIterator(LGetNextEntryForIterator* lir) {
8708 Register iter = ToRegister(lir->iter());
8709 Register result = ToRegister(lir->result());
8710 Register temp = ToRegister(lir->temp0());
8711 Register dataLength = ToRegister(lir->temp1());
8712 Register range = ToRegister(lir->temp2());
8713 Register output = ToRegister(lir->output());
8715 #ifdef DEBUG
8716 // Self-hosted code is responsible for ensuring GetNextEntryForIterator is
8717 // only called with the correct iterator class. Assert here all self-
8718 // hosted callers of GetNextEntryForIterator perform this class check.
8719 // No Spectre mitigations are needed because this is DEBUG-only code.
8720 Label success;
8721 masm.branchTestObjClassNoSpectreMitigations(
8722 Assembler::Equal, iter, &IteratorObject::class_, temp, &success);
8723 masm.assumeUnreachable("Iterator object should have the correct class.");
8724 masm.bind(&success);
8725 #endif
8727 masm.loadPrivate(Address(iter, NativeObject::getFixedSlotOffset(
8728 IteratorObject::RangeSlot)),
8729 range);
8731 Label iterAlreadyDone, iterDone, done;
8732 masm.branchTestPtr(Assembler::Zero, range, range, &iterAlreadyDone);
8734 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), temp);
8735 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfHashTable()),
8736 dataLength);
8737 masm.load32(Address(dataLength, OrderedHashTable::offsetOfImplDataLength()),
8738 dataLength);
8739 masm.branch32(Assembler::AboveOrEqual, temp, dataLength, &iterDone);
8741 masm.Push(iter);
8743 Register front = iter;
8744 RangeFront<OrderedHashTable>(masm, range, temp, front);
8746 emitLoadIteratorValues<OrderedHashTable>(result, temp, front);
8748 RangePopFront<OrderedHashTable>(masm, range, front, dataLength, temp);
8750 masm.Pop(iter);
8751 masm.move32(Imm32(0), output);
8753 masm.jump(&done);
8755 masm.bind(&iterDone);
8757 RangeDestruct<OrderedHashTable>(masm, iter, range, temp, dataLength);
8759 masm.storeValue(PrivateValue(nullptr),
8760 Address(iter, NativeObject::getFixedSlotOffset(
8761 IteratorObject::RangeSlot)));
8763 masm.bind(&iterAlreadyDone);
8765 masm.move32(Imm32(1), output);
8767 masm.bind(&done);
8770 void CodeGenerator::visitGetNextEntryForIterator(
8771 LGetNextEntryForIterator* lir) {
8772 if (lir->mir()->mode() == MGetNextEntryForIterator::Map) {
8773 emitGetNextEntryForIterator<MapIteratorObject, ValueMap>(lir);
8774 } else {
8775 MOZ_ASSERT(lir->mir()->mode() == MGetNextEntryForIterator::Set);
8776 emitGetNextEntryForIterator<SetIteratorObject, ValueSet>(lir);
8780 // The point of these is to inform Ion of where these values already are; they
8781 // don't normally generate (much) code.
8782 void CodeGenerator::visitWasmRegisterPairResult(LWasmRegisterPairResult* lir) {}
8783 void CodeGenerator::visitWasmStackResult(LWasmStackResult* lir) {}
8784 void CodeGenerator::visitWasmStackResult64(LWasmStackResult64* lir) {}
8786 void CodeGenerator::visitWasmStackResultArea(LWasmStackResultArea* lir) {
8787 LAllocation* output = lir->getDef(0)->output();
8788 MOZ_ASSERT(output->isStackArea());
8789 bool tempInit = false;
8790 for (auto iter = output->toStackArea()->results(); iter; iter.next()) {
8791 // Zero out ref stack results.
8792 if (iter.isWasmAnyRef()) {
8793 Register temp = ToRegister(lir->temp0());
8794 if (!tempInit) {
8795 masm.xorPtr(temp, temp);
8796 tempInit = true;
8798 masm.storePtr(temp, ToAddress(iter.alloc()));
8803 void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {
8804 #ifdef JS_64BIT
8805 if (MWasmRegisterResult* mir = lir->mir()) {
8806 if (mir->type() == MIRType::Int32) {
8807 masm.widenInt32(ToRegister(lir->output()));
8810 #endif
8813 void CodeGenerator::visitWasmCall(LWasmCall* lir) {
8814 const MWasmCallBase* callBase = lir->callBase();
8815 bool isReturnCall = lir->isReturnCall();
8817 // If this call is in Wasm try code block, initialise a wasm::TryNote for this
8818 // call.
8819 bool inTry = callBase->inTry();
8820 if (inTry) {
8821 size_t tryNoteIndex = callBase->tryNoteIndex();
8822 wasm::TryNoteVector& tryNotes = masm.tryNotes();
8823 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
8824 tryNote.setTryBodyBegin(masm.currentOffset());
8827 MOZ_ASSERT((sizeof(wasm::Frame) + masm.framePushed()) % WasmStackAlignment ==
8829 static_assert(
8830 WasmStackAlignment >= ABIStackAlignment &&
8831 WasmStackAlignment % ABIStackAlignment == 0,
8832 "The wasm stack alignment should subsume the ABI-required alignment");
8834 #ifdef DEBUG
8835 Label ok;
8836 masm.branchTestStackPtr(Assembler::Zero, Imm32(WasmStackAlignment - 1), &ok);
8837 masm.breakpoint();
8838 masm.bind(&ok);
8839 #endif
8841 // LWasmCallBase::isCallPreserved() assumes that all MWasmCalls preserve the
8842 // instance and pinned regs. The only case where where we don't have to
8843 // reload the instance and pinned regs is when the callee preserves them.
8844 bool reloadRegs = true;
8845 bool switchRealm = true;
8847 const wasm::CallSiteDesc& desc = callBase->desc();
8848 const wasm::CalleeDesc& callee = callBase->callee();
8849 CodeOffset retOffset;
8850 CodeOffset secondRetOffset;
8851 switch (callee.which()) {
8852 case wasm::CalleeDesc::Func:
8853 #ifdef ENABLE_WASM_TAIL_CALLS
8854 if (isReturnCall) {
8855 ReturnCallAdjustmentInfo retCallInfo(
8856 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8857 masm.wasmReturnCall(desc, callee.funcIndex(), retCallInfo);
8858 // The rest of the method is unnecessary for a return call.
8859 return;
8861 #endif
8862 MOZ_ASSERT(!isReturnCall);
8863 retOffset = masm.call(desc, callee.funcIndex());
8864 reloadRegs = false;
8865 switchRealm = false;
8866 break;
8867 case wasm::CalleeDesc::Import:
8868 #ifdef ENABLE_WASM_TAIL_CALLS
8869 if (isReturnCall) {
8870 ReturnCallAdjustmentInfo retCallInfo(
8871 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8872 masm.wasmReturnCallImport(desc, callee, retCallInfo);
8873 // The rest of the method is unnecessary for a return call.
8874 return;
8876 #endif
8877 MOZ_ASSERT(!isReturnCall);
8878 retOffset = masm.wasmCallImport(desc, callee);
8879 break;
8880 case wasm::CalleeDesc::AsmJSTable:
8881 retOffset = masm.asmCallIndirect(desc, callee);
8882 break;
8883 case wasm::CalleeDesc::WasmTable: {
8884 Label* boundsCheckFailed = nullptr;
8885 if (lir->needsBoundsCheck()) {
8886 OutOfLineAbortingWasmTrap* ool =
8887 new (alloc()) OutOfLineAbortingWasmTrap(
8888 wasm::BytecodeOffset(desc.lineOrBytecode()),
8889 wasm::Trap::OutOfBounds);
8890 if (lir->isCatchable()) {
8891 addOutOfLineCode(ool, lir->mirCatchable());
8892 } else if (isReturnCall) {
8893 #ifdef ENABLE_WASM_TAIL_CALLS
8894 addOutOfLineCode(ool, lir->mirReturnCall());
8895 #else
8896 MOZ_CRASH("Return calls are disabled.");
8897 #endif
8898 } else {
8899 addOutOfLineCode(ool, lir->mirUncatchable());
8901 boundsCheckFailed = ool->entry();
8903 Label* nullCheckFailed = nullptr;
8904 #ifndef WASM_HAS_HEAPREG
8906 OutOfLineAbortingWasmTrap* ool =
8907 new (alloc()) OutOfLineAbortingWasmTrap(
8908 wasm::BytecodeOffset(desc.lineOrBytecode()),
8909 wasm::Trap::IndirectCallToNull);
8910 if (lir->isCatchable()) {
8911 addOutOfLineCode(ool, lir->mirCatchable());
8912 } else if (isReturnCall) {
8913 # ifdef ENABLE_WASM_TAIL_CALLS
8914 addOutOfLineCode(ool, lir->mirReturnCall());
8915 # else
8916 MOZ_CRASH("Return calls are disabled.");
8917 # endif
8918 } else {
8919 addOutOfLineCode(ool, lir->mirUncatchable());
8921 nullCheckFailed = ool->entry();
8923 #endif
8924 #ifdef ENABLE_WASM_TAIL_CALLS
8925 if (isReturnCall) {
8926 ReturnCallAdjustmentInfo retCallInfo(
8927 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8928 masm.wasmReturnCallIndirect(desc, callee, boundsCheckFailed,
8929 nullCheckFailed, mozilla::Nothing(),
8930 retCallInfo);
8931 // The rest of the method is unnecessary for a return call.
8932 return;
8934 #endif
8935 MOZ_ASSERT(!isReturnCall);
8936 masm.wasmCallIndirect(desc, callee, boundsCheckFailed, nullCheckFailed,
8937 lir->tableSize(), &retOffset, &secondRetOffset);
8938 // Register reloading and realm switching are handled dynamically inside
8939 // wasmCallIndirect. There are two return offsets, one for each call
8940 // instruction (fast path and slow path).
8941 reloadRegs = false;
8942 switchRealm = false;
8943 break;
8945 case wasm::CalleeDesc::Builtin:
8946 retOffset = masm.call(desc, callee.builtin());
8947 reloadRegs = false;
8948 switchRealm = false;
8949 break;
8950 case wasm::CalleeDesc::BuiltinInstanceMethod:
8951 retOffset = masm.wasmCallBuiltinInstanceMethod(
8952 desc, callBase->instanceArg(), callee.builtin(),
8953 callBase->builtinMethodFailureMode());
8954 switchRealm = false;
8955 break;
8956 case wasm::CalleeDesc::FuncRef:
8957 #ifdef ENABLE_WASM_TAIL_CALLS
8958 if (isReturnCall) {
8959 ReturnCallAdjustmentInfo retCallInfo(
8960 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8961 masm.wasmReturnCallRef(desc, callee, retCallInfo);
8962 // The rest of the method is unnecessary for a return call.
8963 return;
8965 #endif
8966 MOZ_ASSERT(!isReturnCall);
8967 // Register reloading and realm switching are handled dynamically inside
8968 // wasmCallRef. There are two return offsets, one for each call
8969 // instruction (fast path and slow path).
8970 masm.wasmCallRef(desc, callee, &retOffset, &secondRetOffset);
8971 reloadRegs = false;
8972 switchRealm = false;
8973 break;
8976 // Note the assembler offset for the associated LSafePoint.
8977 MOZ_ASSERT(!isReturnCall);
8978 markSafepointAt(retOffset.offset(), lir);
8980 // Now that all the outbound in-memory args are on the stack, note the
8981 // required lower boundary point of the associated StackMap.
8982 uint32_t framePushedAtStackMapBase =
8983 masm.framePushed() - callBase->stackArgAreaSizeUnaligned();
8984 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAtStackMapBase);
8985 MOZ_ASSERT(!lir->safepoint()->isWasmTrap());
8987 // Note the assembler offset and framePushed for use by the adjunct
8988 // LSafePoint, see visitor for LWasmCallIndirectAdjunctSafepoint below.
8989 if (callee.which() == wasm::CalleeDesc::WasmTable) {
8990 lir->adjunctSafepoint()->recordSafepointInfo(secondRetOffset,
8991 framePushedAtStackMapBase);
8994 if (reloadRegs) {
8995 masm.loadPtr(
8996 Address(masm.getStackPointer(), WasmCallerInstanceOffsetBeforeCall),
8997 InstanceReg);
8998 masm.loadWasmPinnedRegsFromInstance();
8999 if (switchRealm) {
9000 masm.switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
9002 } else {
9003 MOZ_ASSERT(!switchRealm);
9006 #ifdef ENABLE_WASM_TAIL_CALLS
9007 switch (callee.which()) {
9008 case wasm::CalleeDesc::Func:
9009 case wasm::CalleeDesc::Import:
9010 case wasm::CalleeDesc::WasmTable:
9011 case wasm::CalleeDesc::FuncRef:
9012 // Stack allocation could change during Wasm (return) calls,
9013 // recover pre-call state.
9014 masm.freeStackTo(masm.framePushed());
9015 break;
9016 default:
9017 break;
9019 #endif // ENABLE_WASM_TAIL_CALLS
9021 if (inTry) {
9022 // Set the end of the try note range
9023 size_t tryNoteIndex = callBase->tryNoteIndex();
9024 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9025 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
9027 // Don't set the end of the try note if we've OOM'ed, as the above
9028 // instructions may not have been emitted, which will trigger an assert
9029 // about zero-length try-notes. This is okay as this compilation will be
9030 // thrown away.
9031 if (!masm.oom()) {
9032 tryNote.setTryBodyEnd(masm.currentOffset());
9035 // This instruction or the adjunct safepoint must be the last instruction
9036 // in the block. No other instructions may be inserted.
9037 LBlock* block = lir->block();
9038 MOZ_RELEASE_ASSERT(*block->rbegin() == lir ||
9039 (block->rbegin()->isWasmCallIndirectAdjunctSafepoint() &&
9040 *(++block->rbegin()) == lir));
9042 // Jump to the fallthrough block
9043 jumpToBlock(lir->mirCatchable()->getSuccessor(
9044 MWasmCallCatchable::FallthroughBranchIndex));
9048 void CodeGenerator::visitWasmCallLandingPrePad(LWasmCallLandingPrePad* lir) {
9049 LBlock* block = lir->block();
9050 MWasmCallLandingPrePad* mir = lir->mir();
9051 MBasicBlock* mirBlock = mir->block();
9052 MBasicBlock* callMirBlock = mir->callBlock();
9054 // This block must be the pre-pad successor of the call block. No blocks may
9055 // be inserted between us, such as for critical edge splitting.
9056 MOZ_RELEASE_ASSERT(mirBlock == callMirBlock->getSuccessor(
9057 MWasmCallCatchable::PrePadBranchIndex));
9059 // This instruction or a move group must be the first instruction in the
9060 // block. No other instructions may be inserted.
9061 MOZ_RELEASE_ASSERT(*block->begin() == lir || (block->begin()->isMoveGroup() &&
9062 *(++block->begin()) == lir));
9064 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9065 wasm::TryNote& tryNote = tryNotes[mir->tryNoteIndex()];
9066 // Set the entry point for the call try note to be the beginning of this
9067 // block. The above assertions (and assertions in visitWasmCall) guarantee
9068 // that we are not skipping over instructions that should be executed.
9069 tryNote.setLandingPad(block->label()->offset(), masm.framePushed());
9072 void CodeGenerator::visitWasmCallIndirectAdjunctSafepoint(
9073 LWasmCallIndirectAdjunctSafepoint* lir) {
9074 markSafepointAt(lir->safepointLocation().offset(), lir);
9075 lir->safepoint()->setFramePushedAtStackMapBase(
9076 lir->framePushedAtStackMapBase());
9079 template <typename InstructionWithMaybeTrapSite>
9080 void EmitSignalNullCheckTrapSite(MacroAssembler& masm,
9081 InstructionWithMaybeTrapSite* ins) {
9082 if (!ins->maybeTrap()) {
9083 return;
9085 wasm::BytecodeOffset trapOffset(ins->maybeTrap()->offset);
9086 masm.append(wasm::Trap::NullPointerDereference,
9087 wasm::TrapSite(masm.currentOffset(), trapOffset));
9090 void CodeGenerator::visitWasmLoadSlot(LWasmLoadSlot* ins) {
9091 MIRType type = ins->type();
9092 MWideningOp wideningOp = ins->wideningOp();
9093 Register container = ToRegister(ins->containerRef());
9094 Address addr(container, ins->offset());
9095 AnyRegister dst = ToAnyRegister(ins->output());
9097 EmitSignalNullCheckTrapSite(masm, ins);
9098 switch (type) {
9099 case MIRType::Int32:
9100 switch (wideningOp) {
9101 case MWideningOp::None:
9102 masm.load32(addr, dst.gpr());
9103 break;
9104 case MWideningOp::FromU16:
9105 masm.load16ZeroExtend(addr, dst.gpr());
9106 break;
9107 case MWideningOp::FromS16:
9108 masm.load16SignExtend(addr, dst.gpr());
9109 break;
9110 case MWideningOp::FromU8:
9111 masm.load8ZeroExtend(addr, dst.gpr());
9112 break;
9113 case MWideningOp::FromS8:
9114 masm.load8SignExtend(addr, dst.gpr());
9115 break;
9116 default:
9117 MOZ_CRASH("unexpected widening op in ::visitWasmLoadSlot");
9119 break;
9120 case MIRType::Float32:
9121 MOZ_ASSERT(wideningOp == MWideningOp::None);
9122 masm.loadFloat32(addr, dst.fpu());
9123 break;
9124 case MIRType::Double:
9125 MOZ_ASSERT(wideningOp == MWideningOp::None);
9126 masm.loadDouble(addr, dst.fpu());
9127 break;
9128 case MIRType::Pointer:
9129 case MIRType::WasmAnyRef:
9130 MOZ_ASSERT(wideningOp == MWideningOp::None);
9131 masm.loadPtr(addr, dst.gpr());
9132 break;
9133 #ifdef ENABLE_WASM_SIMD
9134 case MIRType::Simd128:
9135 MOZ_ASSERT(wideningOp == MWideningOp::None);
9136 masm.loadUnalignedSimd128(addr, dst.fpu());
9137 break;
9138 #endif
9139 default:
9140 MOZ_CRASH("unexpected type in ::visitWasmLoadSlot");
9144 void CodeGenerator::visitWasmStoreSlot(LWasmStoreSlot* ins) {
9145 MIRType type = ins->type();
9146 MNarrowingOp narrowingOp = ins->narrowingOp();
9147 Register container = ToRegister(ins->containerRef());
9148 Address addr(container, ins->offset());
9149 AnyRegister src = ToAnyRegister(ins->value());
9150 if (type != MIRType::Int32) {
9151 MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
9154 EmitSignalNullCheckTrapSite(masm, ins);
9155 switch (type) {
9156 case MIRType::Int32:
9157 switch (narrowingOp) {
9158 case MNarrowingOp::None:
9159 masm.store32(src.gpr(), addr);
9160 break;
9161 case MNarrowingOp::To16:
9162 masm.store16(src.gpr(), addr);
9163 break;
9164 case MNarrowingOp::To8:
9165 masm.store8(src.gpr(), addr);
9166 break;
9167 default:
9168 MOZ_CRASH();
9170 break;
9171 case MIRType::Float32:
9172 masm.storeFloat32(src.fpu(), addr);
9173 break;
9174 case MIRType::Double:
9175 masm.storeDouble(src.fpu(), addr);
9176 break;
9177 case MIRType::Pointer:
9178 // This could be correct, but it would be a new usage, so check carefully.
9179 MOZ_CRASH("Unexpected type in visitWasmStoreSlot.");
9180 case MIRType::WasmAnyRef:
9181 MOZ_CRASH("Bad type in visitWasmStoreSlot. Use LWasmStoreRef.");
9182 #ifdef ENABLE_WASM_SIMD
9183 case MIRType::Simd128:
9184 masm.storeUnalignedSimd128(src.fpu(), addr);
9185 break;
9186 #endif
9187 default:
9188 MOZ_CRASH("unexpected type in StorePrimitiveValue");
9192 void CodeGenerator::visitWasmLoadTableElement(LWasmLoadTableElement* ins) {
9193 Register elements = ToRegister(ins->elements());
9194 Register index = ToRegister(ins->index());
9195 Register output = ToRegister(ins->output());
9196 masm.loadPtr(BaseIndex(elements, index, ScalePointer), output);
9199 void CodeGenerator::visitWasmDerivedPointer(LWasmDerivedPointer* ins) {
9200 masm.movePtr(ToRegister(ins->base()), ToRegister(ins->output()));
9201 masm.addPtr(Imm32(int32_t(ins->offset())), ToRegister(ins->output()));
9204 void CodeGenerator::visitWasmDerivedIndexPointer(
9205 LWasmDerivedIndexPointer* ins) {
9206 Register base = ToRegister(ins->base());
9207 Register index = ToRegister(ins->index());
9208 Register output = ToRegister(ins->output());
9209 masm.computeEffectiveAddress(BaseIndex(base, index, ins->scale()), output);
9212 void CodeGenerator::visitWasmStoreRef(LWasmStoreRef* ins) {
9213 Register instance = ToRegister(ins->instance());
9214 Register valueBase = ToRegister(ins->valueBase());
9215 size_t offset = ins->offset();
9216 Register value = ToRegister(ins->value());
9217 Register temp = ToRegister(ins->temp0());
9219 if (ins->preBarrierKind() == WasmPreBarrierKind::Normal) {
9220 Label skipPreBarrier;
9221 wasm::EmitWasmPreBarrierGuard(
9222 masm, instance, temp, valueBase, offset, &skipPreBarrier,
9223 ins->maybeTrap() ? &ins->maybeTrap()->offset : nullptr);
9224 wasm::EmitWasmPreBarrierCall(masm, instance, temp, valueBase, offset);
9225 masm.bind(&skipPreBarrier);
9228 EmitSignalNullCheckTrapSite(masm, ins);
9229 masm.storePtr(value, Address(valueBase, offset));
9230 // The postbarrier is handled separately.
9233 // Out-of-line path to update the store buffer for wasm references.
9234 class OutOfLineWasmCallPostWriteBarrier
9235 : public OutOfLineCodeBase<CodeGenerator> {
9236 LInstruction* lir_;
9237 Register valueBase_;
9238 Register temp_;
9239 uint32_t valueOffset_;
9241 public:
9242 OutOfLineWasmCallPostWriteBarrier(LInstruction* lir, Register valueBase,
9243 Register temp, uint32_t valueOffset)
9244 : lir_(lir),
9245 valueBase_(valueBase),
9246 temp_(temp),
9247 valueOffset_(valueOffset) {}
9249 void accept(CodeGenerator* codegen) override {
9250 codegen->visitOutOfLineWasmCallPostWriteBarrier(this);
9253 LInstruction* lir() const { return lir_; }
9254 Register valueBase() const { return valueBase_; }
9255 Register temp() const { return temp_; }
9256 uint32_t valueOffset() const { return valueOffset_; }
9259 void CodeGenerator::visitOutOfLineWasmCallPostWriteBarrier(
9260 OutOfLineWasmCallPostWriteBarrier* ool) {
9261 saveLiveVolatile(ool->lir());
9262 masm.Push(InstanceReg);
9263 int32_t framePushedAfterInstance = masm.framePushed();
9265 // Fold the value offset into the value base
9266 Register valueAddr = ool->valueBase();
9267 Register temp = ool->temp();
9268 masm.computeEffectiveAddress(Address(valueAddr, ool->valueOffset()), temp);
9270 // Call Instance::postBarrier
9271 masm.setupWasmABICall();
9272 masm.passABIArg(InstanceReg);
9273 masm.passABIArg(temp);
9274 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
9275 masm.callWithABI(wasm::BytecodeOffset(0), wasm::SymbolicAddress::PostBarrier,
9276 mozilla::Some(instanceOffset), MoveOp::GENERAL);
9278 masm.Pop(InstanceReg);
9279 restoreLiveVolatile(ool->lir());
9281 masm.jump(ool->rejoin());
9284 void CodeGenerator::visitWasmPostWriteBarrier(LWasmPostWriteBarrier* lir) {
9285 Register object = ToRegister(lir->object());
9286 Register value = ToRegister(lir->value());
9287 Register valueBase = ToRegister(lir->valueBase());
9288 Register temp = ToRegister(lir->temp0());
9289 MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
9290 auto ool = new (alloc()) OutOfLineWasmCallPostWriteBarrier(
9291 lir, valueBase, temp, lir->valueOffset());
9292 addOutOfLineCode(ool, lir->mir());
9294 wasm::EmitWasmPostBarrierGuard(masm, mozilla::Some(object), temp, value,
9295 ool->rejoin());
9296 masm.jump(ool->entry());
9297 masm.bind(ool->rejoin());
9300 void CodeGenerator::visitWasmLoadSlotI64(LWasmLoadSlotI64* ins) {
9301 Register container = ToRegister(ins->containerRef());
9302 Address addr(container, ins->offset());
9303 Register64 output = ToOutRegister64(ins);
9304 EmitSignalNullCheckTrapSite(masm, ins);
9305 masm.load64(addr, output);
9308 void CodeGenerator::visitWasmStoreSlotI64(LWasmStoreSlotI64* ins) {
9309 Register container = ToRegister(ins->containerRef());
9310 Address addr(container, ins->offset());
9311 Register64 value = ToRegister64(ins->value());
9312 EmitSignalNullCheckTrapSite(masm, ins);
9313 masm.store64(value, addr);
9316 void CodeGenerator::visitArrayBufferByteLength(LArrayBufferByteLength* lir) {
9317 Register obj = ToRegister(lir->object());
9318 Register out = ToRegister(lir->output());
9319 masm.loadArrayBufferByteLengthIntPtr(obj, out);
9322 void CodeGenerator::visitArrayBufferViewLength(LArrayBufferViewLength* lir) {
9323 Register obj = ToRegister(lir->object());
9324 Register out = ToRegister(lir->output());
9325 masm.loadArrayBufferViewLengthIntPtr(obj, out);
9328 void CodeGenerator::visitArrayBufferViewByteOffset(
9329 LArrayBufferViewByteOffset* lir) {
9330 Register obj = ToRegister(lir->object());
9331 Register out = ToRegister(lir->output());
9332 masm.loadArrayBufferViewByteOffsetIntPtr(obj, out);
9335 void CodeGenerator::visitArrayBufferViewElements(
9336 LArrayBufferViewElements* lir) {
9337 Register obj = ToRegister(lir->object());
9338 Register out = ToRegister(lir->output());
9339 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), out);
9342 void CodeGenerator::visitTypedArrayElementSize(LTypedArrayElementSize* lir) {
9343 Register obj = ToRegister(lir->object());
9344 Register out = ToRegister(lir->output());
9346 masm.typedArrayElementSize(obj, out);
9349 void CodeGenerator::visitGuardHasAttachedArrayBuffer(
9350 LGuardHasAttachedArrayBuffer* lir) {
9351 Register obj = ToRegister(lir->object());
9352 Register temp = ToRegister(lir->temp0());
9354 Label bail;
9355 masm.branchIfHasDetachedArrayBuffer(obj, temp, &bail);
9356 bailoutFrom(&bail, lir->snapshot());
9359 class OutOfLineGuardNumberToIntPtrIndex
9360 : public OutOfLineCodeBase<CodeGenerator> {
9361 LGuardNumberToIntPtrIndex* lir_;
9363 public:
9364 explicit OutOfLineGuardNumberToIntPtrIndex(LGuardNumberToIntPtrIndex* lir)
9365 : lir_(lir) {}
9367 void accept(CodeGenerator* codegen) override {
9368 codegen->visitOutOfLineGuardNumberToIntPtrIndex(this);
9370 LGuardNumberToIntPtrIndex* lir() const { return lir_; }
9373 void CodeGenerator::visitGuardNumberToIntPtrIndex(
9374 LGuardNumberToIntPtrIndex* lir) {
9375 FloatRegister input = ToFloatRegister(lir->input());
9376 Register output = ToRegister(lir->output());
9378 if (!lir->mir()->supportOOB()) {
9379 Label bail;
9380 masm.convertDoubleToPtr(input, output, &bail, false);
9381 bailoutFrom(&bail, lir->snapshot());
9382 return;
9385 auto* ool = new (alloc()) OutOfLineGuardNumberToIntPtrIndex(lir);
9386 addOutOfLineCode(ool, lir->mir());
9388 masm.convertDoubleToPtr(input, output, ool->entry(), false);
9389 masm.bind(ool->rejoin());
9392 void CodeGenerator::visitOutOfLineGuardNumberToIntPtrIndex(
9393 OutOfLineGuardNumberToIntPtrIndex* ool) {
9394 // Substitute the invalid index with an arbitrary out-of-bounds index.
9395 masm.movePtr(ImmWord(-1), ToRegister(ool->lir()->output()));
9396 masm.jump(ool->rejoin());
9399 void CodeGenerator::visitStringLength(LStringLength* lir) {
9400 Register input = ToRegister(lir->string());
9401 Register output = ToRegister(lir->output());
9403 masm.loadStringLength(input, output);
9406 void CodeGenerator::visitMinMaxI(LMinMaxI* ins) {
9407 Register first = ToRegister(ins->first());
9408 Register output = ToRegister(ins->output());
9410 MOZ_ASSERT(first == output);
9412 Assembler::Condition cond =
9413 ins->mir()->isMax() ? Assembler::GreaterThan : Assembler::LessThan;
9415 if (ins->second()->isConstant()) {
9416 Label done;
9417 masm.branch32(cond, first, Imm32(ToInt32(ins->second())), &done);
9418 masm.move32(Imm32(ToInt32(ins->second())), output);
9419 masm.bind(&done);
9420 } else {
9421 Register second = ToRegister(ins->second());
9422 masm.cmp32Move32(cond, second, first, second, output);
9426 void CodeGenerator::visitMinMaxArrayI(LMinMaxArrayI* ins) {
9427 Register array = ToRegister(ins->array());
9428 Register output = ToRegister(ins->output());
9429 Register temp1 = ToRegister(ins->temp1());
9430 Register temp2 = ToRegister(ins->temp2());
9431 Register temp3 = ToRegister(ins->temp3());
9432 bool isMax = ins->isMax();
9434 Label bail;
9435 masm.minMaxArrayInt32(array, output, temp1, temp2, temp3, isMax, &bail);
9436 bailoutFrom(&bail, ins->snapshot());
9439 void CodeGenerator::visitMinMaxArrayD(LMinMaxArrayD* ins) {
9440 Register array = ToRegister(ins->array());
9441 FloatRegister output = ToFloatRegister(ins->output());
9442 Register temp1 = ToRegister(ins->temp1());
9443 Register temp2 = ToRegister(ins->temp2());
9444 FloatRegister floatTemp = ToFloatRegister(ins->floatTemp());
9445 bool isMax = ins->isMax();
9447 Label bail;
9448 masm.minMaxArrayNumber(array, output, floatTemp, temp1, temp2, isMax, &bail);
9449 bailoutFrom(&bail, ins->snapshot());
9452 // For Abs*, lowering will have tied input to output on platforms where that is
9453 // sensible, and otherwise left them untied.
9455 void CodeGenerator::visitAbsI(LAbsI* ins) {
9456 Register input = ToRegister(ins->input());
9457 Register output = ToRegister(ins->output());
9459 if (ins->mir()->fallible()) {
9460 Label positive;
9461 if (input != output) {
9462 masm.move32(input, output);
9464 masm.branchTest32(Assembler::NotSigned, output, output, &positive);
9465 Label bail;
9466 masm.branchNeg32(Assembler::Overflow, output, &bail);
9467 bailoutFrom(&bail, ins->snapshot());
9468 masm.bind(&positive);
9469 } else {
9470 masm.abs32(input, output);
9474 void CodeGenerator::visitAbsD(LAbsD* ins) {
9475 masm.absDouble(ToFloatRegister(ins->input()), ToFloatRegister(ins->output()));
9478 void CodeGenerator::visitAbsF(LAbsF* ins) {
9479 masm.absFloat32(ToFloatRegister(ins->input()),
9480 ToFloatRegister(ins->output()));
9483 void CodeGenerator::visitPowII(LPowII* ins) {
9484 Register value = ToRegister(ins->value());
9485 Register power = ToRegister(ins->power());
9486 Register output = ToRegister(ins->output());
9487 Register temp0 = ToRegister(ins->temp0());
9488 Register temp1 = ToRegister(ins->temp1());
9490 Label bailout;
9491 masm.pow32(value, power, output, temp0, temp1, &bailout);
9492 bailoutFrom(&bailout, ins->snapshot());
9495 void CodeGenerator::visitPowI(LPowI* ins) {
9496 FloatRegister value = ToFloatRegister(ins->value());
9497 Register power = ToRegister(ins->power());
9499 using Fn = double (*)(double x, int32_t y);
9500 masm.setupAlignedABICall();
9501 masm.passABIArg(value, MoveOp::DOUBLE);
9502 masm.passABIArg(power);
9504 masm.callWithABI<Fn, js::powi>(MoveOp::DOUBLE);
9505 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9508 void CodeGenerator::visitPowD(LPowD* ins) {
9509 FloatRegister value = ToFloatRegister(ins->value());
9510 FloatRegister power = ToFloatRegister(ins->power());
9512 using Fn = double (*)(double x, double y);
9513 masm.setupAlignedABICall();
9514 masm.passABIArg(value, MoveOp::DOUBLE);
9515 masm.passABIArg(power, MoveOp::DOUBLE);
9516 masm.callWithABI<Fn, ecmaPow>(MoveOp::DOUBLE);
9518 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9521 void CodeGenerator::visitPowOfTwoI(LPowOfTwoI* ins) {
9522 Register power = ToRegister(ins->power());
9523 Register output = ToRegister(ins->output());
9525 uint32_t base = ins->base();
9526 MOZ_ASSERT(mozilla::IsPowerOfTwo(base));
9528 uint32_t n = mozilla::FloorLog2(base);
9529 MOZ_ASSERT(n != 0);
9531 // Hacker's Delight, 2nd edition, theorem D2.
9532 auto ceilingDiv = [](uint32_t x, uint32_t y) { return (x + y - 1) / y; };
9534 // Take bailout if |power| is greater-or-equals |log_y(2^31)| or is negative.
9535 // |2^(n*y) < 2^31| must hold, hence |n*y < 31| resp. |y < 31/n|.
9537 // Note: it's important for this condition to match the code in CacheIR.cpp
9538 // (CanAttachInt32Pow) to prevent failure loops.
9539 bailoutCmp32(Assembler::AboveOrEqual, power, Imm32(ceilingDiv(31, n)),
9540 ins->snapshot());
9542 // Compute (2^n)^y as 2^(n*y) using repeated shifts. We could directly scale
9543 // |power| and perform a single shift, but due to the lack of necessary
9544 // MacroAssembler functionality, like multiplying a register with an
9545 // immediate, we restrict the number of generated shift instructions when
9546 // lowering this operation.
9547 masm.move32(Imm32(1), output);
9548 do {
9549 masm.lshift32(power, output);
9550 n--;
9551 } while (n > 0);
9554 void CodeGenerator::visitSqrtD(LSqrtD* ins) {
9555 FloatRegister input = ToFloatRegister(ins->input());
9556 FloatRegister output = ToFloatRegister(ins->output());
9557 masm.sqrtDouble(input, output);
9560 void CodeGenerator::visitSqrtF(LSqrtF* ins) {
9561 FloatRegister input = ToFloatRegister(ins->input());
9562 FloatRegister output = ToFloatRegister(ins->output());
9563 masm.sqrtFloat32(input, output);
9566 void CodeGenerator::visitSignI(LSignI* ins) {
9567 Register input = ToRegister(ins->input());
9568 Register output = ToRegister(ins->output());
9569 masm.signInt32(input, output);
9572 void CodeGenerator::visitSignD(LSignD* ins) {
9573 FloatRegister input = ToFloatRegister(ins->input());
9574 FloatRegister output = ToFloatRegister(ins->output());
9575 masm.signDouble(input, output);
9578 void CodeGenerator::visitSignDI(LSignDI* ins) {
9579 FloatRegister input = ToFloatRegister(ins->input());
9580 FloatRegister temp = ToFloatRegister(ins->temp0());
9581 Register output = ToRegister(ins->output());
9583 Label bail;
9584 masm.signDoubleToInt32(input, output, temp, &bail);
9585 bailoutFrom(&bail, ins->snapshot());
9588 void CodeGenerator::visitMathFunctionD(LMathFunctionD* ins) {
9589 FloatRegister input = ToFloatRegister(ins->input());
9590 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9592 UnaryMathFunction fun = ins->mir()->function();
9593 UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
9595 masm.setupAlignedABICall();
9597 masm.passABIArg(input, MoveOp::DOUBLE);
9598 masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
9599 MoveOp::DOUBLE);
9602 void CodeGenerator::visitMathFunctionF(LMathFunctionF* ins) {
9603 FloatRegister input = ToFloatRegister(ins->input());
9604 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnFloat32Reg);
9606 masm.setupAlignedABICall();
9607 masm.passABIArg(input, MoveOp::FLOAT32);
9609 using Fn = float (*)(float x);
9610 Fn funptr = nullptr;
9611 CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check;
9612 switch (ins->mir()->function()) {
9613 case UnaryMathFunction::Floor:
9614 funptr = floorf;
9615 check = CheckUnsafeCallWithABI::DontCheckOther;
9616 break;
9617 case UnaryMathFunction::Round:
9618 funptr = math_roundf_impl;
9619 break;
9620 case UnaryMathFunction::Trunc:
9621 funptr = math_truncf_impl;
9622 break;
9623 case UnaryMathFunction::Ceil:
9624 funptr = ceilf;
9625 check = CheckUnsafeCallWithABI::DontCheckOther;
9626 break;
9627 default:
9628 MOZ_CRASH("Unknown or unsupported float32 math function");
9631 masm.callWithABI(DynamicFunction<Fn>(funptr), MoveOp::FLOAT32, check);
9634 void CodeGenerator::visitModD(LModD* ins) {
9635 MOZ_ASSERT(!gen->compilingWasm());
9637 FloatRegister lhs = ToFloatRegister(ins->lhs());
9638 FloatRegister rhs = ToFloatRegister(ins->rhs());
9640 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9642 using Fn = double (*)(double a, double b);
9643 masm.setupAlignedABICall();
9644 masm.passABIArg(lhs, MoveOp::DOUBLE);
9645 masm.passABIArg(rhs, MoveOp::DOUBLE);
9646 masm.callWithABI<Fn, NumberMod>(MoveOp::DOUBLE);
9649 void CodeGenerator::visitModPowTwoD(LModPowTwoD* ins) {
9650 FloatRegister lhs = ToFloatRegister(ins->lhs());
9651 uint32_t divisor = ins->divisor();
9652 MOZ_ASSERT(mozilla::IsPowerOfTwo(divisor));
9654 FloatRegister output = ToFloatRegister(ins->output());
9656 // Compute |n % d| using |copysign(n - (d * trunc(n / d)), n)|.
9658 // This doesn't work if |d| isn't a power of two, because we may lose too much
9659 // precision. For example |Number.MAX_VALUE % 3 == 2|, but
9660 // |3 * trunc(Number.MAX_VALUE / 3) == Infinity|.
9662 Label done;
9664 ScratchDoubleScope scratch(masm);
9666 // Subnormals can lead to performance degradation, which can make calling
9667 // |fmod| faster than this inline implementation. Work around this issue by
9668 // directly returning the input for any value in the interval ]-1, +1[.
9669 Label notSubnormal;
9670 masm.loadConstantDouble(1.0, scratch);
9671 masm.loadConstantDouble(-1.0, output);
9672 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, lhs, scratch,
9673 &notSubnormal);
9674 masm.branchDouble(Assembler::DoubleLessThanOrEqual, lhs, output,
9675 &notSubnormal);
9677 masm.moveDouble(lhs, output);
9678 masm.jump(&done);
9680 masm.bind(&notSubnormal);
9682 if (divisor == 1) {
9683 // The pattern |n % 1 == 0| is used to detect integer numbers. We can skip
9684 // the multiplication by one in this case.
9685 masm.moveDouble(lhs, output);
9686 masm.nearbyIntDouble(RoundingMode::TowardsZero, output, scratch);
9687 masm.subDouble(scratch, output);
9688 } else {
9689 masm.loadConstantDouble(1.0 / double(divisor), scratch);
9690 masm.loadConstantDouble(double(divisor), output);
9692 masm.mulDouble(lhs, scratch);
9693 masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
9694 masm.mulDouble(output, scratch);
9696 masm.moveDouble(lhs, output);
9697 masm.subDouble(scratch, output);
9701 masm.copySignDouble(output, lhs, output);
9702 masm.bind(&done);
9705 void CodeGenerator::visitWasmBuiltinModD(LWasmBuiltinModD* ins) {
9706 masm.Push(InstanceReg);
9707 int32_t framePushedAfterInstance = masm.framePushed();
9709 FloatRegister lhs = ToFloatRegister(ins->lhs());
9710 FloatRegister rhs = ToFloatRegister(ins->rhs());
9712 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9714 masm.setupWasmABICall();
9715 masm.passABIArg(lhs, MoveOp::DOUBLE);
9716 masm.passABIArg(rhs, MoveOp::DOUBLE);
9718 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
9719 masm.callWithABI(ins->mir()->bytecodeOffset(), wasm::SymbolicAddress::ModD,
9720 mozilla::Some(instanceOffset), MoveOp::DOUBLE);
9722 masm.Pop(InstanceReg);
9725 void CodeGenerator::visitBigIntAdd(LBigIntAdd* ins) {
9726 Register lhs = ToRegister(ins->lhs());
9727 Register rhs = ToRegister(ins->rhs());
9728 Register temp1 = ToRegister(ins->temp1());
9729 Register temp2 = ToRegister(ins->temp2());
9730 Register output = ToRegister(ins->output());
9732 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9733 auto* ool = oolCallVM<Fn, BigInt::add>(ins, ArgList(lhs, rhs),
9734 StoreRegisterTo(output));
9736 // 0n + x == x
9737 Label lhsNonZero;
9738 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9739 masm.movePtr(rhs, output);
9740 masm.jump(ool->rejoin());
9741 masm.bind(&lhsNonZero);
9743 // x + 0n == x
9744 Label rhsNonZero;
9745 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
9746 masm.movePtr(lhs, output);
9747 masm.jump(ool->rejoin());
9748 masm.bind(&rhsNonZero);
9750 // Call into the VM when either operand can't be loaded into a pointer-sized
9751 // register.
9752 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
9753 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
9755 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
9757 // Create and return the result.
9758 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
9759 masm.initializeBigInt(output, temp1);
9761 masm.bind(ool->rejoin());
9764 void CodeGenerator::visitBigIntSub(LBigIntSub* ins) {
9765 Register lhs = ToRegister(ins->lhs());
9766 Register rhs = ToRegister(ins->rhs());
9767 Register temp1 = ToRegister(ins->temp1());
9768 Register temp2 = ToRegister(ins->temp2());
9769 Register output = ToRegister(ins->output());
9771 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9772 auto* ool = oolCallVM<Fn, BigInt::sub>(ins, ArgList(lhs, rhs),
9773 StoreRegisterTo(output));
9775 // x - 0n == x
9776 Label rhsNonZero;
9777 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
9778 masm.movePtr(lhs, output);
9779 masm.jump(ool->rejoin());
9780 masm.bind(&rhsNonZero);
9782 // Call into the VM when either operand can't be loaded into a pointer-sized
9783 // register.
9784 masm.loadBigInt(lhs, temp1, ool->entry());
9785 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
9787 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
9789 // Create and return the result.
9790 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
9791 masm.initializeBigInt(output, temp1);
9793 masm.bind(ool->rejoin());
9796 void CodeGenerator::visitBigIntMul(LBigIntMul* ins) {
9797 Register lhs = ToRegister(ins->lhs());
9798 Register rhs = ToRegister(ins->rhs());
9799 Register temp1 = ToRegister(ins->temp1());
9800 Register temp2 = ToRegister(ins->temp2());
9801 Register output = ToRegister(ins->output());
9803 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9804 auto* ool = oolCallVM<Fn, BigInt::mul>(ins, ArgList(lhs, rhs),
9805 StoreRegisterTo(output));
9807 // 0n * x == 0n
9808 Label lhsNonZero;
9809 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9810 masm.movePtr(lhs, output);
9811 masm.jump(ool->rejoin());
9812 masm.bind(&lhsNonZero);
9814 // x * 0n == 0n
9815 Label rhsNonZero;
9816 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
9817 masm.movePtr(rhs, output);
9818 masm.jump(ool->rejoin());
9819 masm.bind(&rhsNonZero);
9821 // Call into the VM when either operand can't be loaded into a pointer-sized
9822 // register.
9823 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
9824 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
9826 masm.branchMulPtr(Assembler::Overflow, temp2, temp1, ool->entry());
9828 // Create and return the result.
9829 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
9830 masm.initializeBigInt(output, temp1);
9832 masm.bind(ool->rejoin());
9835 void CodeGenerator::visitBigIntDiv(LBigIntDiv* ins) {
9836 Register lhs = ToRegister(ins->lhs());
9837 Register rhs = ToRegister(ins->rhs());
9838 Register temp1 = ToRegister(ins->temp1());
9839 Register temp2 = ToRegister(ins->temp2());
9840 Register output = ToRegister(ins->output());
9842 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9843 auto* ool = oolCallVM<Fn, BigInt::div>(ins, ArgList(lhs, rhs),
9844 StoreRegisterTo(output));
9846 // x / 0 throws an error.
9847 if (ins->mir()->canBeDivideByZero()) {
9848 masm.branchIfBigIntIsZero(rhs, ool->entry());
9851 // 0n / x == 0n
9852 Label lhsNonZero;
9853 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9854 masm.movePtr(lhs, output);
9855 masm.jump(ool->rejoin());
9856 masm.bind(&lhsNonZero);
9858 // Call into the VM when either operand can't be loaded into a pointer-sized
9859 // register.
9860 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
9861 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
9863 // |BigInt::div()| returns |lhs| for |lhs / 1n|, which means there's no
9864 // allocation which might trigger a minor GC to free up nursery space. This
9865 // requires us to apply the same optimization here, otherwise we'd end up with
9866 // always entering the OOL call, because the nursery is never evicted.
9867 Label notOne;
9868 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(1), &notOne);
9869 masm.movePtr(lhs, output);
9870 masm.jump(ool->rejoin());
9871 masm.bind(&notOne);
9873 static constexpr auto DigitMin = std::numeric_limits<
9874 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
9876 // Handle an integer overflow from INT{32,64}_MIN / -1.
9877 Label notOverflow;
9878 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
9879 masm.branchPtr(Assembler::Equal, temp2, ImmWord(-1), ool->entry());
9880 masm.bind(&notOverflow);
9882 emitBigIntDiv(ins, temp1, temp2, output, ool->entry());
9884 masm.bind(ool->rejoin());
9887 void CodeGenerator::visitBigIntMod(LBigIntMod* ins) {
9888 Register lhs = ToRegister(ins->lhs());
9889 Register rhs = ToRegister(ins->rhs());
9890 Register temp1 = ToRegister(ins->temp1());
9891 Register temp2 = ToRegister(ins->temp2());
9892 Register output = ToRegister(ins->output());
9894 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9895 auto* ool = oolCallVM<Fn, BigInt::mod>(ins, ArgList(lhs, rhs),
9896 StoreRegisterTo(output));
9898 // x % 0 throws an error.
9899 if (ins->mir()->canBeDivideByZero()) {
9900 masm.branchIfBigIntIsZero(rhs, ool->entry());
9903 // 0n % x == 0n
9904 Label lhsNonZero;
9905 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9906 masm.movePtr(lhs, output);
9907 masm.jump(ool->rejoin());
9908 masm.bind(&lhsNonZero);
9910 // Call into the VM when either operand can't be loaded into a pointer-sized
9911 // register.
9912 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
9913 masm.loadBigIntAbsolute(rhs, temp2, ool->entry());
9915 // Similar to the case for BigInt division, we must apply the same allocation
9916 // optimizations as performed in |BigInt::mod()|.
9917 Label notBelow;
9918 masm.branchPtr(Assembler::AboveOrEqual, temp1, temp2, &notBelow);
9919 masm.movePtr(lhs, output);
9920 masm.jump(ool->rejoin());
9921 masm.bind(&notBelow);
9923 // Convert both digits to signed pointer-sized values.
9924 masm.bigIntDigitToSignedPtr(lhs, temp1, ool->entry());
9925 masm.bigIntDigitToSignedPtr(rhs, temp2, ool->entry());
9927 static constexpr auto DigitMin = std::numeric_limits<
9928 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
9930 // Handle an integer overflow from INT{32,64}_MIN / -1.
9931 Label notOverflow;
9932 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
9933 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(-1), &notOverflow);
9934 masm.movePtr(ImmWord(0), temp1);
9935 masm.bind(&notOverflow);
9937 emitBigIntMod(ins, temp1, temp2, output, ool->entry());
9939 masm.bind(ool->rejoin());
9942 void CodeGenerator::visitBigIntPow(LBigIntPow* ins) {
9943 Register lhs = ToRegister(ins->lhs());
9944 Register rhs = ToRegister(ins->rhs());
9945 Register temp1 = ToRegister(ins->temp1());
9946 Register temp2 = ToRegister(ins->temp2());
9947 Register output = ToRegister(ins->output());
9949 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9950 auto* ool = oolCallVM<Fn, BigInt::pow>(ins, ArgList(lhs, rhs),
9951 StoreRegisterTo(output));
9953 // x ** -y throws an error.
9954 if (ins->mir()->canBeNegativeExponent()) {
9955 masm.branchIfBigIntIsNegative(rhs, ool->entry());
9958 Register dest = temp1;
9959 Register base = temp2;
9960 Register exponent = output;
9962 Label done;
9963 masm.movePtr(ImmWord(1), dest); // p = 1
9965 // 1n ** y == 1n
9966 // -1n ** y == 1n when y is even
9967 // -1n ** y == -1n when y is odd
9968 Label lhsNotOne;
9969 masm.branch32(Assembler::Above, Address(lhs, BigInt::offsetOfLength()),
9970 Imm32(1), &lhsNotOne);
9971 masm.loadFirstBigIntDigitOrZero(lhs, base);
9972 masm.branchPtr(Assembler::NotEqual, base, Imm32(1), &lhsNotOne);
9974 masm.loadFirstBigIntDigitOrZero(rhs, exponent);
9976 Label lhsNonNegative;
9977 masm.branchIfBigIntIsNonNegative(lhs, &lhsNonNegative);
9978 masm.branchTestPtr(Assembler::Zero, exponent, Imm32(1), &done);
9979 masm.bind(&lhsNonNegative);
9980 masm.movePtr(lhs, output);
9981 masm.jump(ool->rejoin());
9983 masm.bind(&lhsNotOne);
9985 // x ** 0n == 1n
9986 masm.branchIfBigIntIsZero(rhs, &done);
9988 // 0n ** y == 0n with y != 0n
9989 Label lhsNonZero;
9990 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9992 masm.movePtr(lhs, output);
9993 masm.jump(ool->rejoin());
9995 masm.bind(&lhsNonZero);
9997 // Call into the VM when the exponent can't be loaded into a pointer-sized
9998 // register.
9999 masm.loadBigIntAbsolute(rhs, exponent, ool->entry());
10001 // x ** y with x > 1 and y >= DigitBits can't be pointer-sized.
10002 masm.branchPtr(Assembler::AboveOrEqual, exponent, Imm32(BigInt::DigitBits),
10003 ool->entry());
10005 // x ** 1n == x
10006 Label rhsNotOne;
10007 masm.branch32(Assembler::NotEqual, exponent, Imm32(1), &rhsNotOne);
10009 masm.movePtr(lhs, output);
10010 masm.jump(ool->rejoin());
10012 masm.bind(&rhsNotOne);
10014 // Call into the VM when the base operand can't be loaded into a pointer-sized
10015 // register.
10016 masm.loadBigIntNonZero(lhs, base, ool->entry());
10018 // MacroAssembler::pow32() adjusted to work on pointer-sized registers.
10020 // m = base
10021 // n = exponent
10023 Label start, loop;
10024 masm.jump(&start);
10025 masm.bind(&loop);
10027 // m *= m
10028 masm.branchMulPtr(Assembler::Overflow, base, base, ool->entry());
10030 masm.bind(&start);
10032 // if ((n & 1) != 0) p *= m
10033 Label even;
10034 masm.branchTest32(Assembler::Zero, exponent, Imm32(1), &even);
10035 masm.branchMulPtr(Assembler::Overflow, base, dest, ool->entry());
10036 masm.bind(&even);
10038 // n >>= 1
10039 // if (n == 0) return p
10040 masm.branchRshift32(Assembler::NonZero, Imm32(1), exponent, &loop);
10043 MOZ_ASSERT(temp1 == dest);
10045 // Create and return the result.
10046 masm.bind(&done);
10047 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10048 masm.initializeBigInt(output, temp1);
10050 masm.bind(ool->rejoin());
10053 void CodeGenerator::visitBigIntBitAnd(LBigIntBitAnd* ins) {
10054 Register lhs = ToRegister(ins->lhs());
10055 Register rhs = ToRegister(ins->rhs());
10056 Register temp1 = ToRegister(ins->temp1());
10057 Register temp2 = ToRegister(ins->temp2());
10058 Register output = ToRegister(ins->output());
10060 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10061 auto* ool = oolCallVM<Fn, BigInt::bitAnd>(ins, ArgList(lhs, rhs),
10062 StoreRegisterTo(output));
10064 // 0n & x == 0n
10065 Label lhsNonZero;
10066 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10067 masm.movePtr(lhs, output);
10068 masm.jump(ool->rejoin());
10069 masm.bind(&lhsNonZero);
10071 // x & 0n == 0n
10072 Label rhsNonZero;
10073 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10074 masm.movePtr(rhs, output);
10075 masm.jump(ool->rejoin());
10076 masm.bind(&rhsNonZero);
10078 // Call into the VM when either operand can't be loaded into a pointer-sized
10079 // register.
10080 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10081 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10083 masm.andPtr(temp2, temp1);
10085 // Create and return the result.
10086 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10087 masm.initializeBigInt(output, temp1);
10089 masm.bind(ool->rejoin());
10092 void CodeGenerator::visitBigIntBitOr(LBigIntBitOr* ins) {
10093 Register lhs = ToRegister(ins->lhs());
10094 Register rhs = ToRegister(ins->rhs());
10095 Register temp1 = ToRegister(ins->temp1());
10096 Register temp2 = ToRegister(ins->temp2());
10097 Register output = ToRegister(ins->output());
10099 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10100 auto* ool = oolCallVM<Fn, BigInt::bitOr>(ins, ArgList(lhs, rhs),
10101 StoreRegisterTo(output));
10103 // 0n | x == x
10104 Label lhsNonZero;
10105 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10106 masm.movePtr(rhs, output);
10107 masm.jump(ool->rejoin());
10108 masm.bind(&lhsNonZero);
10110 // x | 0n == x
10111 Label rhsNonZero;
10112 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10113 masm.movePtr(lhs, output);
10114 masm.jump(ool->rejoin());
10115 masm.bind(&rhsNonZero);
10117 // Call into the VM when either operand can't be loaded into a pointer-sized
10118 // register.
10119 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10120 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10122 masm.orPtr(temp2, temp1);
10124 // Create and return the result.
10125 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10126 masm.initializeBigInt(output, temp1);
10128 masm.bind(ool->rejoin());
10131 void CodeGenerator::visitBigIntBitXor(LBigIntBitXor* ins) {
10132 Register lhs = ToRegister(ins->lhs());
10133 Register rhs = ToRegister(ins->rhs());
10134 Register temp1 = ToRegister(ins->temp1());
10135 Register temp2 = ToRegister(ins->temp2());
10136 Register output = ToRegister(ins->output());
10138 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10139 auto* ool = oolCallVM<Fn, BigInt::bitXor>(ins, ArgList(lhs, rhs),
10140 StoreRegisterTo(output));
10142 // 0n ^ x == x
10143 Label lhsNonZero;
10144 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10145 masm.movePtr(rhs, output);
10146 masm.jump(ool->rejoin());
10147 masm.bind(&lhsNonZero);
10149 // x ^ 0n == x
10150 Label rhsNonZero;
10151 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10152 masm.movePtr(lhs, output);
10153 masm.jump(ool->rejoin());
10154 masm.bind(&rhsNonZero);
10156 // Call into the VM when either operand can't be loaded into a pointer-sized
10157 // register.
10158 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10159 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10161 masm.xorPtr(temp2, temp1);
10163 // Create and return the result.
10164 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10165 masm.initializeBigInt(output, temp1);
10167 masm.bind(ool->rejoin());
10170 void CodeGenerator::visitBigIntLsh(LBigIntLsh* ins) {
10171 Register lhs = ToRegister(ins->lhs());
10172 Register rhs = ToRegister(ins->rhs());
10173 Register temp1 = ToRegister(ins->temp1());
10174 Register temp2 = ToRegister(ins->temp2());
10175 Register temp3 = ToRegister(ins->temp3());
10176 Register output = ToRegister(ins->output());
10178 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10179 auto* ool = oolCallVM<Fn, BigInt::lsh>(ins, ArgList(lhs, rhs),
10180 StoreRegisterTo(output));
10182 // 0n << x == 0n
10183 Label lhsNonZero;
10184 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10185 masm.movePtr(lhs, output);
10186 masm.jump(ool->rejoin());
10187 masm.bind(&lhsNonZero);
10189 // x << 0n == x
10190 Label rhsNonZero;
10191 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10192 masm.movePtr(lhs, output);
10193 masm.jump(ool->rejoin());
10194 masm.bind(&rhsNonZero);
10196 // Inline |BigInt::lsh| for the case when |lhs| contains a single digit.
10198 Label rhsTooLarge;
10199 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
10201 // Call into the VM when the left-hand side operand can't be loaded into a
10202 // pointer-sized register.
10203 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10205 // Handle shifts exceeding |BigInt::DigitBits| first.
10206 Label shift, create;
10207 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
10209 masm.bind(&rhsTooLarge);
10211 // x << DigitBits with x != 0n always exceeds pointer-sized storage.
10212 masm.branchIfBigIntIsNonNegative(rhs, ool->entry());
10214 // x << -DigitBits == x >> DigitBits, which is either 0n or -1n.
10215 masm.move32(Imm32(0), temp1);
10216 masm.branchIfBigIntIsNonNegative(lhs, &create);
10217 masm.move32(Imm32(1), temp1);
10218 masm.jump(&create);
10220 masm.bind(&shift);
10222 Label nonNegative;
10223 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
10225 masm.movePtr(temp1, temp3);
10227 // |x << -y| is computed as |x >> y|.
10228 masm.rshiftPtr(temp2, temp1);
10230 // For negative numbers, round down if any bit was shifted out.
10231 masm.branchIfBigIntIsNonNegative(lhs, &create);
10233 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
10234 masm.movePtr(ImmWord(-1), output);
10235 masm.lshiftPtr(temp2, output);
10236 masm.notPtr(output);
10238 // Add plus one when |(lhs.digit(0) & mask) != 0|.
10239 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
10240 masm.addPtr(ImmWord(1), temp1);
10241 masm.jump(&create);
10243 masm.bind(&nonNegative);
10245 masm.movePtr(temp2, temp3);
10247 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
10248 masm.negPtr(temp2);
10249 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
10250 masm.movePtr(temp1, output);
10251 masm.rshiftPtr(temp2, output);
10253 // Call into the VM when any bit will be shifted out.
10254 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
10256 masm.movePtr(temp3, temp2);
10257 masm.lshiftPtr(temp2, temp1);
10259 masm.bind(&create);
10261 // Create and return the result.
10262 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10263 masm.initializeBigIntAbsolute(output, temp1);
10265 // Set the sign bit when the left-hand side is negative.
10266 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
10267 masm.or32(Imm32(BigInt::signBitMask()),
10268 Address(output, BigInt::offsetOfFlags()));
10270 masm.bind(ool->rejoin());
10273 void CodeGenerator::visitBigIntRsh(LBigIntRsh* ins) {
10274 Register lhs = ToRegister(ins->lhs());
10275 Register rhs = ToRegister(ins->rhs());
10276 Register temp1 = ToRegister(ins->temp1());
10277 Register temp2 = ToRegister(ins->temp2());
10278 Register temp3 = ToRegister(ins->temp3());
10279 Register output = ToRegister(ins->output());
10281 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10282 auto* ool = oolCallVM<Fn, BigInt::rsh>(ins, ArgList(lhs, rhs),
10283 StoreRegisterTo(output));
10285 // 0n >> x == 0n
10286 Label lhsNonZero;
10287 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10288 masm.movePtr(lhs, output);
10289 masm.jump(ool->rejoin());
10290 masm.bind(&lhsNonZero);
10292 // x >> 0n == x
10293 Label rhsNonZero;
10294 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10295 masm.movePtr(lhs, output);
10296 masm.jump(ool->rejoin());
10297 masm.bind(&rhsNonZero);
10299 // Inline |BigInt::rsh| for the case when |lhs| contains a single digit.
10301 Label rhsTooLarge;
10302 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
10304 // Call into the VM when the left-hand side operand can't be loaded into a
10305 // pointer-sized register.
10306 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10308 // Handle shifts exceeding |BigInt::DigitBits| first.
10309 Label shift, create;
10310 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
10312 masm.bind(&rhsTooLarge);
10314 // x >> -DigitBits == x << DigitBits, which exceeds pointer-sized storage.
10315 masm.branchIfBigIntIsNegative(rhs, ool->entry());
10317 // x >> DigitBits is either 0n or -1n.
10318 masm.move32(Imm32(0), temp1);
10319 masm.branchIfBigIntIsNonNegative(lhs, &create);
10320 masm.move32(Imm32(1), temp1);
10321 masm.jump(&create);
10323 masm.bind(&shift);
10325 Label nonNegative;
10326 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
10328 masm.movePtr(temp2, temp3);
10330 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
10331 masm.negPtr(temp2);
10332 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
10333 masm.movePtr(temp1, output);
10334 masm.rshiftPtr(temp2, output);
10336 // Call into the VM when any bit will be shifted out.
10337 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
10339 // |x >> -y| is computed as |x << y|.
10340 masm.movePtr(temp3, temp2);
10341 masm.lshiftPtr(temp2, temp1);
10342 masm.jump(&create);
10344 masm.bind(&nonNegative);
10346 masm.movePtr(temp1, temp3);
10348 masm.rshiftPtr(temp2, temp1);
10350 // For negative numbers, round down if any bit was shifted out.
10351 masm.branchIfBigIntIsNonNegative(lhs, &create);
10353 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
10354 masm.movePtr(ImmWord(-1), output);
10355 masm.lshiftPtr(temp2, output);
10356 masm.notPtr(output);
10358 // Add plus one when |(lhs.digit(0) & mask) != 0|.
10359 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
10360 masm.addPtr(ImmWord(1), temp1);
10362 masm.bind(&create);
10364 // Create and return the result.
10365 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10366 masm.initializeBigIntAbsolute(output, temp1);
10368 // Set the sign bit when the left-hand side is negative.
10369 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
10370 masm.or32(Imm32(BigInt::signBitMask()),
10371 Address(output, BigInt::offsetOfFlags()));
10373 masm.bind(ool->rejoin());
10376 void CodeGenerator::visitBigIntIncrement(LBigIntIncrement* ins) {
10377 Register input = ToRegister(ins->input());
10378 Register temp1 = ToRegister(ins->temp1());
10379 Register temp2 = ToRegister(ins->temp2());
10380 Register output = ToRegister(ins->output());
10382 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10383 auto* ool =
10384 oolCallVM<Fn, BigInt::inc>(ins, ArgList(input), StoreRegisterTo(output));
10386 // Call into the VM when the input can't be loaded into a pointer-sized
10387 // register.
10388 masm.loadBigInt(input, temp1, ool->entry());
10389 masm.movePtr(ImmWord(1), temp2);
10391 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10393 // Create and return the result.
10394 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10395 masm.initializeBigInt(output, temp1);
10397 masm.bind(ool->rejoin());
10400 void CodeGenerator::visitBigIntDecrement(LBigIntDecrement* ins) {
10401 Register input = ToRegister(ins->input());
10402 Register temp1 = ToRegister(ins->temp1());
10403 Register temp2 = ToRegister(ins->temp2());
10404 Register output = ToRegister(ins->output());
10406 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10407 auto* ool =
10408 oolCallVM<Fn, BigInt::dec>(ins, ArgList(input), StoreRegisterTo(output));
10410 // Call into the VM when the input can't be loaded into a pointer-sized
10411 // register.
10412 masm.loadBigInt(input, temp1, ool->entry());
10413 masm.movePtr(ImmWord(1), temp2);
10415 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10417 // Create and return the result.
10418 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10419 masm.initializeBigInt(output, temp1);
10421 masm.bind(ool->rejoin());
10424 void CodeGenerator::visitBigIntNegate(LBigIntNegate* ins) {
10425 Register input = ToRegister(ins->input());
10426 Register temp = ToRegister(ins->temp());
10427 Register output = ToRegister(ins->output());
10429 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10430 auto* ool =
10431 oolCallVM<Fn, BigInt::neg>(ins, ArgList(input), StoreRegisterTo(output));
10433 // -0n == 0n
10434 Label lhsNonZero;
10435 masm.branchIfBigIntIsNonZero(input, &lhsNonZero);
10436 masm.movePtr(input, output);
10437 masm.jump(ool->rejoin());
10438 masm.bind(&lhsNonZero);
10440 // Call into the VM when the input uses heap digits.
10441 masm.copyBigIntWithInlineDigits(input, output, temp, initialBigIntHeap(),
10442 ool->entry());
10444 // Flip the sign bit.
10445 masm.xor32(Imm32(BigInt::signBitMask()),
10446 Address(output, BigInt::offsetOfFlags()));
10448 masm.bind(ool->rejoin());
10451 void CodeGenerator::visitBigIntBitNot(LBigIntBitNot* ins) {
10452 Register input = ToRegister(ins->input());
10453 Register temp1 = ToRegister(ins->temp1());
10454 Register temp2 = ToRegister(ins->temp2());
10455 Register output = ToRegister(ins->output());
10457 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10458 auto* ool = oolCallVM<Fn, BigInt::bitNot>(ins, ArgList(input),
10459 StoreRegisterTo(output));
10461 masm.loadBigIntAbsolute(input, temp1, ool->entry());
10463 // This follows the C++ implementation because it let's us support the full
10464 // range [-2^64, 2^64 - 1] on 64-bit resp. [-2^32, 2^32 - 1] on 32-bit.
10465 Label nonNegative, done;
10466 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
10468 // ~(-x) == ~(~(x-1)) == x-1
10469 masm.subPtr(Imm32(1), temp1);
10470 masm.jump(&done);
10472 masm.bind(&nonNegative);
10474 // ~x == -x-1 == -(x+1)
10475 masm.movePtr(ImmWord(1), temp2);
10476 masm.branchAddPtr(Assembler::CarrySet, temp2, temp1, ool->entry());
10478 masm.bind(&done);
10480 // Create and return the result.
10481 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10482 masm.initializeBigIntAbsolute(output, temp1);
10484 // Set the sign bit when the input is positive.
10485 masm.branchIfBigIntIsNegative(input, ool->rejoin());
10486 masm.or32(Imm32(BigInt::signBitMask()),
10487 Address(output, BigInt::offsetOfFlags()));
10489 masm.bind(ool->rejoin());
10492 void CodeGenerator::visitInt32ToStringWithBase(LInt32ToStringWithBase* lir) {
10493 Register input = ToRegister(lir->input());
10494 RegisterOrInt32 base = ToRegisterOrInt32(lir->base());
10495 Register output = ToRegister(lir->output());
10496 Register temp0 = ToRegister(lir->temp0());
10497 Register temp1 = ToRegister(lir->temp1());
10499 using Fn = JSString* (*)(JSContext*, int32_t, int32_t);
10500 if (base.is<Register>()) {
10501 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
10502 lir, ArgList(input, base.as<Register>()), StoreRegisterTo(output));
10504 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
10505 masm.loadInt32ToStringWithBase(input, base.as<Register>(), output, temp0,
10506 temp1, gen->runtime->staticStrings(),
10507 liveRegs, ool->entry());
10508 masm.bind(ool->rejoin());
10509 } else {
10510 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
10511 lir, ArgList(input, Imm32(base.as<int32_t>())),
10512 StoreRegisterTo(output));
10514 masm.loadInt32ToStringWithBase(input, base.as<int32_t>(), output, temp0,
10515 temp1, gen->runtime->staticStrings(),
10516 ool->entry());
10517 masm.bind(ool->rejoin());
10521 void CodeGenerator::visitNumberParseInt(LNumberParseInt* lir) {
10522 Register string = ToRegister(lir->string());
10523 Register radix = ToRegister(lir->radix());
10524 ValueOperand output = ToOutValue(lir);
10525 Register temp = ToRegister(lir->temp0());
10527 #ifdef DEBUG
10528 Label ok;
10529 masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
10530 masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
10531 masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
10532 masm.bind(&ok);
10533 #endif
10535 // Use indexed value as fast path if possible.
10536 Label vmCall, done;
10537 masm.loadStringIndexValue(string, temp, &vmCall);
10538 masm.tagValue(JSVAL_TYPE_INT32, temp, output);
10539 masm.jump(&done);
10541 masm.bind(&vmCall);
10543 pushArg(radix);
10544 pushArg(string);
10546 using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
10547 callVM<Fn, js::NumberParseInt>(lir);
10549 masm.bind(&done);
10552 void CodeGenerator::visitDoubleParseInt(LDoubleParseInt* lir) {
10553 FloatRegister number = ToFloatRegister(lir->number());
10554 Register output = ToRegister(lir->output());
10555 FloatRegister temp = ToFloatRegister(lir->temp0());
10557 Label bail;
10558 masm.branchDouble(Assembler::DoubleUnordered, number, number, &bail);
10559 masm.branchTruncateDoubleToInt32(number, output, &bail);
10561 Label ok;
10562 masm.branch32(Assembler::NotEqual, output, Imm32(0), &ok);
10564 // Accept both +0 and -0 and return 0.
10565 masm.loadConstantDouble(0.0, temp);
10566 masm.branchDouble(Assembler::DoubleEqual, number, temp, &ok);
10568 // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
10569 masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, temp);
10570 masm.branchDouble(Assembler::DoubleLessThan, number, temp, &bail);
10572 masm.bind(&ok);
10574 bailoutFrom(&bail, lir->snapshot());
10577 void CodeGenerator::visitFloor(LFloor* lir) {
10578 FloatRegister input = ToFloatRegister(lir->input());
10579 Register output = ToRegister(lir->output());
10581 Label bail;
10582 masm.floorDoubleToInt32(input, output, &bail);
10583 bailoutFrom(&bail, lir->snapshot());
10586 void CodeGenerator::visitFloorF(LFloorF* lir) {
10587 FloatRegister input = ToFloatRegister(lir->input());
10588 Register output = ToRegister(lir->output());
10590 Label bail;
10591 masm.floorFloat32ToInt32(input, output, &bail);
10592 bailoutFrom(&bail, lir->snapshot());
10595 void CodeGenerator::visitCeil(LCeil* lir) {
10596 FloatRegister input = ToFloatRegister(lir->input());
10597 Register output = ToRegister(lir->output());
10599 Label bail;
10600 masm.ceilDoubleToInt32(input, output, &bail);
10601 bailoutFrom(&bail, lir->snapshot());
10604 void CodeGenerator::visitCeilF(LCeilF* lir) {
10605 FloatRegister input = ToFloatRegister(lir->input());
10606 Register output = ToRegister(lir->output());
10608 Label bail;
10609 masm.ceilFloat32ToInt32(input, output, &bail);
10610 bailoutFrom(&bail, lir->snapshot());
10613 void CodeGenerator::visitRound(LRound* lir) {
10614 FloatRegister input = ToFloatRegister(lir->input());
10615 FloatRegister temp = ToFloatRegister(lir->temp0());
10616 Register output = ToRegister(lir->output());
10618 Label bail;
10619 masm.roundDoubleToInt32(input, output, temp, &bail);
10620 bailoutFrom(&bail, lir->snapshot());
10623 void CodeGenerator::visitRoundF(LRoundF* lir) {
10624 FloatRegister input = ToFloatRegister(lir->input());
10625 FloatRegister temp = ToFloatRegister(lir->temp0());
10626 Register output = ToRegister(lir->output());
10628 Label bail;
10629 masm.roundFloat32ToInt32(input, output, temp, &bail);
10630 bailoutFrom(&bail, lir->snapshot());
10633 void CodeGenerator::visitTrunc(LTrunc* lir) {
10634 FloatRegister input = ToFloatRegister(lir->input());
10635 Register output = ToRegister(lir->output());
10637 Label bail;
10638 masm.truncDoubleToInt32(input, output, &bail);
10639 bailoutFrom(&bail, lir->snapshot());
10642 void CodeGenerator::visitTruncF(LTruncF* lir) {
10643 FloatRegister input = ToFloatRegister(lir->input());
10644 Register output = ToRegister(lir->output());
10646 Label bail;
10647 masm.truncFloat32ToInt32(input, output, &bail);
10648 bailoutFrom(&bail, lir->snapshot());
10651 void CodeGenerator::visitCompareS(LCompareS* lir) {
10652 JSOp op = lir->mir()->jsop();
10653 Register left = ToRegister(lir->left());
10654 Register right = ToRegister(lir->right());
10655 Register output = ToRegister(lir->output());
10657 OutOfLineCode* ool = nullptr;
10659 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
10660 if (op == JSOp::Eq || op == JSOp::StrictEq) {
10661 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
10662 lir, ArgList(left, right), StoreRegisterTo(output));
10663 } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
10664 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
10665 lir, ArgList(left, right), StoreRegisterTo(output));
10666 } else if (op == JSOp::Lt) {
10667 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
10668 lir, ArgList(left, right), StoreRegisterTo(output));
10669 } else if (op == JSOp::Le) {
10670 // Push the operands in reverse order for JSOp::Le:
10671 // - |left <= right| is implemented as |right >= left|.
10672 ool =
10673 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
10674 lir, ArgList(right, left), StoreRegisterTo(output));
10675 } else if (op == JSOp::Gt) {
10676 // Push the operands in reverse order for JSOp::Gt:
10677 // - |left > right| is implemented as |right < left|.
10678 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
10679 lir, ArgList(right, left), StoreRegisterTo(output));
10680 } else {
10681 MOZ_ASSERT(op == JSOp::Ge);
10682 ool =
10683 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
10684 lir, ArgList(left, right), StoreRegisterTo(output));
10687 masm.compareStrings(op, left, right, output, ool->entry());
10689 masm.bind(ool->rejoin());
10692 template <typename T, typename CharT>
10693 static inline T CopyCharacters(const CharT* chars) {
10694 T value = 0;
10695 std::memcpy(&value, chars, sizeof(T));
10696 return value;
10699 template <typename T>
10700 static inline T CopyCharacters(const JSLinearString* str, size_t index) {
10701 JS::AutoCheckCannotGC nogc;
10703 if (str->hasLatin1Chars()) {
10704 MOZ_ASSERT(index + sizeof(T) / sizeof(JS::Latin1Char) <= str->length());
10705 return CopyCharacters<T>(str->latin1Chars(nogc) + index);
10708 MOZ_ASSERT(sizeof(T) >= sizeof(char16_t));
10709 MOZ_ASSERT(index + sizeof(T) / sizeof(char16_t) <= str->length());
10710 return CopyCharacters<T>(str->twoByteChars(nogc) + index);
10713 enum class CompareDirection { Forward, Backward };
10715 // NOTE: Clobbers the input when CompareDirection is backward.
10716 static void CompareCharacters(MacroAssembler& masm, Register input,
10717 const JSLinearString* str, Register output,
10718 JSOp op, CompareDirection direction, Label* done,
10719 Label* oolEntry) {
10720 MOZ_ASSERT(input != output);
10722 size_t length = str->length();
10723 MOZ_ASSERT(length > 0);
10725 CharEncoding encoding =
10726 str->hasLatin1Chars() ? CharEncoding::Latin1 : CharEncoding::TwoByte;
10727 size_t encodingSize = encoding == CharEncoding::Latin1
10728 ? sizeof(JS::Latin1Char)
10729 : sizeof(char16_t);
10730 size_t byteLength = length * encodingSize;
10732 // Take the OOL path when the string is a rope or has a different character
10733 // representation.
10734 masm.branchIfRope(input, oolEntry);
10735 if (encoding == CharEncoding::Latin1) {
10736 masm.branchTwoByteString(input, oolEntry);
10737 } else {
10738 JS::AutoCheckCannotGC nogc;
10739 if (mozilla::IsUtf16Latin1(str->twoByteRange(nogc))) {
10740 masm.branchLatin1String(input, oolEntry);
10741 } else {
10742 // This case was already handled in the caller.
10743 #ifdef DEBUG
10744 Label ok;
10745 masm.branchTwoByteString(input, &ok);
10746 masm.assumeUnreachable("Unexpected Latin-1 string");
10747 masm.bind(&ok);
10748 #endif
10752 #ifdef DEBUG
10754 Label ok;
10755 masm.branch32(Assembler::AboveOrEqual,
10756 Address(input, JSString::offsetOfLength()), Imm32(length),
10757 &ok);
10758 masm.assumeUnreachable("Input mustn't be smaller than search string");
10759 masm.bind(&ok);
10761 #endif
10763 // Load the input string's characters.
10764 Register stringChars = output;
10765 masm.loadStringChars(input, stringChars, encoding);
10767 if (direction == CompareDirection::Backward) {
10768 masm.loadStringLength(input, input);
10769 masm.sub32(Imm32(length), input);
10771 masm.addToCharPtr(stringChars, input, encoding);
10774 // Prefer a single compare-and-set instruction if possible.
10775 if (byteLength == 1 || byteLength == 2 || byteLength == 4 ||
10776 byteLength == 8) {
10777 auto cond = JSOpToCondition(op, /* isSigned = */ false);
10779 Address addr(stringChars, 0);
10780 switch (byteLength) {
10781 case 8: {
10782 auto x = CopyCharacters<uint64_t>(str, 0);
10783 masm.cmp64Set(cond, addr, Imm64(x), output);
10784 break;
10786 case 4: {
10787 auto x = CopyCharacters<uint32_t>(str, 0);
10788 masm.cmp32Set(cond, addr, Imm32(x), output);
10789 break;
10791 case 2: {
10792 auto x = CopyCharacters<uint16_t>(str, 0);
10793 masm.cmp16Set(cond, addr, Imm32(x), output);
10794 break;
10796 case 1: {
10797 auto x = CopyCharacters<uint8_t>(str, 0);
10798 masm.cmp8Set(cond, addr, Imm32(x), output);
10799 break;
10802 } else {
10803 Label setNotEqualResult;
10805 size_t pos = 0;
10806 for (size_t stride : {8, 4, 2, 1}) {
10807 while (byteLength >= stride) {
10808 Address addr(stringChars, pos * encodingSize);
10809 switch (stride) {
10810 case 8: {
10811 auto x = CopyCharacters<uint64_t>(str, pos);
10812 masm.branch64(Assembler::NotEqual, addr, Imm64(x),
10813 &setNotEqualResult);
10814 break;
10816 case 4: {
10817 auto x = CopyCharacters<uint32_t>(str, pos);
10818 masm.branch32(Assembler::NotEqual, addr, Imm32(x),
10819 &setNotEqualResult);
10820 break;
10822 case 2: {
10823 auto x = CopyCharacters<uint16_t>(str, pos);
10824 masm.branch16(Assembler::NotEqual, addr, Imm32(x),
10825 &setNotEqualResult);
10826 break;
10828 case 1: {
10829 auto x = CopyCharacters<uint8_t>(str, pos);
10830 masm.branch8(Assembler::NotEqual, addr, Imm32(x),
10831 &setNotEqualResult);
10832 break;
10836 byteLength -= stride;
10837 pos += stride / encodingSize;
10840 // Prefer a single comparison for trailing bytes instead of doing
10841 // multiple consecutive comparisons.
10843 // For example when comparing against the string "example", emit two
10844 // four-byte comparisons against "exam" and "mple" instead of doing
10845 // three comparisons against "exam", "pl", and finally "e".
10846 if (pos > 0 && byteLength > stride / 2) {
10847 MOZ_ASSERT(stride == 8 || stride == 4);
10849 size_t prev = pos - (stride - byteLength) / encodingSize;
10850 Address addr(stringChars, prev * encodingSize);
10851 switch (stride) {
10852 case 8: {
10853 auto x = CopyCharacters<uint64_t>(str, prev);
10854 masm.branch64(Assembler::NotEqual, addr, Imm64(x),
10855 &setNotEqualResult);
10856 break;
10858 case 4: {
10859 auto x = CopyCharacters<uint32_t>(str, prev);
10860 masm.branch32(Assembler::NotEqual, addr, Imm32(x),
10861 &setNotEqualResult);
10862 break;
10866 // Break from the loop, because we've finished the complete string.
10867 break;
10871 // Falls through if both strings are equal.
10873 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
10874 masm.jump(done);
10876 masm.bind(&setNotEqualResult);
10877 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
10881 void CodeGenerator::visitCompareSInline(LCompareSInline* lir) {
10882 JSOp op = lir->mir()->jsop();
10883 MOZ_ASSERT(IsEqualityOp(op));
10885 Register input = ToRegister(lir->input());
10886 Register output = ToRegister(lir->output());
10888 const JSLinearString* str = lir->constant();
10889 MOZ_ASSERT(str->length() > 0);
10891 OutOfLineCode* ool = nullptr;
10893 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
10894 if (op == JSOp::Eq || op == JSOp::StrictEq) {
10895 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
10896 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
10897 } else {
10898 MOZ_ASSERT(op == JSOp::Ne || op == JSOp::StrictNe);
10899 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
10900 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
10903 Label compareChars;
10905 Label notPointerEqual;
10907 // If operands point to the same instance, the strings are trivially equal.
10908 masm.branchPtr(Assembler::NotEqual, input, ImmGCPtr(str), &notPointerEqual);
10909 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
10910 masm.jump(ool->rejoin());
10912 masm.bind(&notPointerEqual);
10914 Label setNotEqualResult;
10915 if (str->isAtom()) {
10916 // Atoms cannot be equal to each other if they point to different strings.
10917 Imm32 atomBit(JSString::ATOM_BIT);
10918 masm.branchTest32(Assembler::NonZero,
10919 Address(input, JSString::offsetOfFlags()), atomBit,
10920 &setNotEqualResult);
10923 if (str->hasTwoByteChars()) {
10924 // Pure two-byte strings can't be equal to Latin-1 strings.
10925 JS::AutoCheckCannotGC nogc;
10926 if (!mozilla::IsUtf16Latin1(str->twoByteRange(nogc))) {
10927 masm.branchLatin1String(input, &setNotEqualResult);
10931 // Strings of different length can never be equal.
10932 masm.branch32(Assembler::Equal, Address(input, JSString::offsetOfLength()),
10933 Imm32(str->length()), &compareChars);
10935 masm.bind(&setNotEqualResult);
10936 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
10937 masm.jump(ool->rejoin());
10940 masm.bind(&compareChars);
10942 CompareCharacters(masm, input, str, output, op, CompareDirection::Forward,
10943 ool->rejoin(), ool->entry());
10945 masm.bind(ool->rejoin());
10948 void CodeGenerator::visitCompareBigInt(LCompareBigInt* lir) {
10949 JSOp op = lir->mir()->jsop();
10950 Register left = ToRegister(lir->left());
10951 Register right = ToRegister(lir->right());
10952 Register temp0 = ToRegister(lir->temp0());
10953 Register temp1 = ToRegister(lir->temp1());
10954 Register temp2 = ToRegister(lir->temp2());
10955 Register output = ToRegister(lir->output());
10957 Label notSame;
10958 Label compareSign;
10959 Label compareLength;
10960 Label compareDigit;
10962 Label* notSameSign;
10963 Label* notSameLength;
10964 Label* notSameDigit;
10965 if (IsEqualityOp(op)) {
10966 notSameSign = &notSame;
10967 notSameLength = &notSame;
10968 notSameDigit = &notSame;
10969 } else {
10970 notSameSign = &compareSign;
10971 notSameLength = &compareLength;
10972 notSameDigit = &compareDigit;
10975 masm.equalBigInts(left, right, temp0, temp1, temp2, output, notSameSign,
10976 notSameLength, notSameDigit);
10978 Label done;
10979 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
10980 op == JSOp::Ge),
10981 output);
10982 masm.jump(&done);
10984 if (IsEqualityOp(op)) {
10985 masm.bind(&notSame);
10986 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
10987 } else {
10988 Label invertWhenNegative;
10990 // There are two cases when sign(left) != sign(right):
10991 // 1. sign(left) = positive and sign(right) = negative,
10992 // 2. or the dual case with reversed signs.
10994 // For case 1, |left| <cmp> |right| is true for cmp=Gt or cmp=Ge and false
10995 // for cmp=Lt or cmp=Le. Initialize the result for case 1 and handle case 2
10996 // with |invertWhenNegative|.
10997 masm.bind(&compareSign);
10998 masm.move32(Imm32(op == JSOp::Gt || op == JSOp::Ge), output);
10999 masm.jump(&invertWhenNegative);
11001 // For sign(left) = sign(right) and len(digits(left)) != len(digits(right)),
11002 // we have to consider the two cases:
11003 // 1. len(digits(left)) < len(digits(right))
11004 // 2. len(digits(left)) > len(digits(right))
11006 // For |left| <cmp> |right| with cmp=Lt:
11007 // Assume both BigInts are positive, then |left < right| is true for case 1
11008 // and false for case 2. When both are negative, the result is reversed.
11010 // The other comparison operators can be handled similarly.
11012 // |temp0| holds the digits length of the right-hand side operand.
11013 masm.bind(&compareLength);
11014 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
11015 Address(left, BigInt::offsetOfLength()), temp0, output);
11016 masm.jump(&invertWhenNegative);
11018 // Similar to the case above, compare the current digit to determine the
11019 // overall comparison result.
11021 // |temp1| points to the current digit of the left-hand side operand.
11022 // |output| holds the current digit of the right-hand side operand.
11023 masm.bind(&compareDigit);
11024 masm.cmpPtrSet(JSOpToCondition(op, /* isSigned = */ false),
11025 Address(temp1, 0), output, output);
11027 Label nonNegative;
11028 masm.bind(&invertWhenNegative);
11029 masm.branchIfBigIntIsNonNegative(left, &nonNegative);
11030 masm.xor32(Imm32(1), output);
11031 masm.bind(&nonNegative);
11034 masm.bind(&done);
11037 void CodeGenerator::visitCompareBigIntInt32(LCompareBigIntInt32* lir) {
11038 JSOp op = lir->mir()->jsop();
11039 Register left = ToRegister(lir->left());
11040 Register right = ToRegister(lir->right());
11041 Register temp0 = ToRegister(lir->temp0());
11042 Register temp1 = ToRegister(lir->temp1());
11043 Register output = ToRegister(lir->output());
11045 Label ifTrue, ifFalse;
11046 masm.compareBigIntAndInt32(op, left, right, temp0, temp1, &ifTrue, &ifFalse);
11048 Label done;
11049 masm.bind(&ifFalse);
11050 masm.move32(Imm32(0), output);
11051 masm.jump(&done);
11052 masm.bind(&ifTrue);
11053 masm.move32(Imm32(1), output);
11054 masm.bind(&done);
11057 void CodeGenerator::visitCompareBigIntDouble(LCompareBigIntDouble* lir) {
11058 JSOp op = lir->mir()->jsop();
11059 Register left = ToRegister(lir->left());
11060 FloatRegister right = ToFloatRegister(lir->right());
11061 Register output = ToRegister(lir->output());
11063 masm.setupAlignedABICall();
11065 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
11066 // - |left <= right| is implemented as |right >= left|.
11067 // - |left > right| is implemented as |right < left|.
11068 if (op == JSOp::Le || op == JSOp::Gt) {
11069 masm.passABIArg(right, MoveOp::DOUBLE);
11070 masm.passABIArg(left);
11071 } else {
11072 masm.passABIArg(left);
11073 masm.passABIArg(right, MoveOp::DOUBLE);
11076 using FnBigIntNumber = bool (*)(BigInt*, double);
11077 using FnNumberBigInt = bool (*)(double, BigInt*);
11078 switch (op) {
11079 case JSOp::Eq: {
11080 masm.callWithABI<FnBigIntNumber,
11081 jit::BigIntNumberEqual<EqualityKind::Equal>>();
11082 break;
11084 case JSOp::Ne: {
11085 masm.callWithABI<FnBigIntNumber,
11086 jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
11087 break;
11089 case JSOp::Lt: {
11090 masm.callWithABI<FnBigIntNumber,
11091 jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
11092 break;
11094 case JSOp::Gt: {
11095 masm.callWithABI<FnNumberBigInt,
11096 jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
11097 break;
11099 case JSOp::Le: {
11100 masm.callWithABI<
11101 FnNumberBigInt,
11102 jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
11103 break;
11105 case JSOp::Ge: {
11106 masm.callWithABI<
11107 FnBigIntNumber,
11108 jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
11109 break;
11111 default:
11112 MOZ_CRASH("unhandled op");
11115 masm.storeCallBoolResult(output);
11118 void CodeGenerator::visitCompareBigIntString(LCompareBigIntString* lir) {
11119 JSOp op = lir->mir()->jsop();
11120 Register left = ToRegister(lir->left());
11121 Register right = ToRegister(lir->right());
11123 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
11124 // - |left <= right| is implemented as |right >= left|.
11125 // - |left > right| is implemented as |right < left|.
11126 if (op == JSOp::Le || op == JSOp::Gt) {
11127 pushArg(left);
11128 pushArg(right);
11129 } else {
11130 pushArg(right);
11131 pushArg(left);
11134 using FnBigIntString =
11135 bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
11136 using FnStringBigInt =
11137 bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
11139 switch (op) {
11140 case JSOp::Eq: {
11141 constexpr auto Equal = EqualityKind::Equal;
11142 callVM<FnBigIntString, BigIntStringEqual<Equal>>(lir);
11143 break;
11145 case JSOp::Ne: {
11146 constexpr auto NotEqual = EqualityKind::NotEqual;
11147 callVM<FnBigIntString, BigIntStringEqual<NotEqual>>(lir);
11148 break;
11150 case JSOp::Lt: {
11151 constexpr auto LessThan = ComparisonKind::LessThan;
11152 callVM<FnBigIntString, BigIntStringCompare<LessThan>>(lir);
11153 break;
11155 case JSOp::Gt: {
11156 constexpr auto LessThan = ComparisonKind::LessThan;
11157 callVM<FnStringBigInt, StringBigIntCompare<LessThan>>(lir);
11158 break;
11160 case JSOp::Le: {
11161 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
11162 callVM<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>(lir);
11163 break;
11165 case JSOp::Ge: {
11166 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
11167 callVM<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>(lir);
11168 break;
11170 default:
11171 MOZ_CRASH("Unexpected compare op");
11175 void CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir) {
11176 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
11177 lir->mir()->compareType() == MCompare::Compare_Null);
11179 JSOp op = lir->mir()->jsop();
11180 MOZ_ASSERT(IsLooseEqualityOp(op));
11182 const ValueOperand value = ToValue(lir, LIsNullOrLikeUndefinedV::ValueIndex);
11183 Register output = ToRegister(lir->output());
11185 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
11186 addOutOfLineCode(ool, lir->mir());
11188 Label* nullOrLikeUndefined = ool->label1();
11189 Label* notNullOrLikeUndefined = ool->label2();
11192 ScratchTagScope tag(masm, value);
11193 masm.splitTagForTest(value, tag);
11195 masm.branchTestNull(Assembler::Equal, tag, nullOrLikeUndefined);
11196 masm.branchTestUndefined(Assembler::Equal, tag, nullOrLikeUndefined);
11198 // Check whether it's a truthy object or a falsy object that emulates
11199 // undefined.
11200 masm.branchTestObject(Assembler::NotEqual, tag, notNullOrLikeUndefined);
11203 Register objreg =
11204 masm.extractObject(value, ToTempUnboxRegister(lir->temp0()));
11205 branchTestObjectEmulatesUndefined(objreg, nullOrLikeUndefined,
11206 notNullOrLikeUndefined, output, ool);
11207 // fall through
11209 Label done;
11211 // It's not null or undefined, and if it's an object it doesn't
11212 // emulate undefined, so it's not like undefined.
11213 masm.move32(Imm32(op == JSOp::Ne), output);
11214 masm.jump(&done);
11216 masm.bind(nullOrLikeUndefined);
11217 masm.move32(Imm32(op == JSOp::Eq), output);
11219 // Both branches meet here.
11220 masm.bind(&done);
11223 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchV(
11224 LIsNullOrLikeUndefinedAndBranchV* lir) {
11225 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
11226 lir->cmpMir()->compareType() == MCompare::Compare_Null);
11228 JSOp op = lir->cmpMir()->jsop();
11229 MOZ_ASSERT(IsLooseEqualityOp(op));
11231 const ValueOperand value =
11232 ToValue(lir, LIsNullOrLikeUndefinedAndBranchV::Value);
11234 MBasicBlock* ifTrue = lir->ifTrue();
11235 MBasicBlock* ifFalse = lir->ifFalse();
11237 if (op == JSOp::Ne) {
11238 // Swap branches.
11239 std::swap(ifTrue, ifFalse);
11242 auto* ool = new (alloc()) OutOfLineTestObject();
11243 addOutOfLineCode(ool, lir->cmpMir());
11245 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
11246 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
11249 ScratchTagScope tag(masm, value);
11250 masm.splitTagForTest(value, tag);
11252 masm.branchTestNull(Assembler::Equal, tag, ifTrueLabel);
11253 masm.branchTestUndefined(Assembler::Equal, tag, ifTrueLabel);
11255 masm.branchTestObject(Assembler::NotEqual, tag, ifFalseLabel);
11258 // Objects that emulate undefined are loosely equal to null/undefined.
11259 Register objreg =
11260 masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
11261 Register scratch = ToRegister(lir->temp());
11262 testObjectEmulatesUndefined(objreg, ifTrueLabel, ifFalseLabel, scratch, ool);
11265 void CodeGenerator::visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT* lir) {
11266 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
11267 lir->mir()->compareType() == MCompare::Compare_Null);
11268 MOZ_ASSERT(lir->mir()->lhs()->type() == MIRType::Object);
11270 JSOp op = lir->mir()->jsop();
11271 MOZ_ASSERT(IsLooseEqualityOp(op), "Strict equality should have been folded");
11273 Register objreg = ToRegister(lir->input());
11274 Register output = ToRegister(lir->output());
11276 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
11277 addOutOfLineCode(ool, lir->mir());
11279 Label* emulatesUndefined = ool->label1();
11280 Label* doesntEmulateUndefined = ool->label2();
11282 branchTestObjectEmulatesUndefined(objreg, emulatesUndefined,
11283 doesntEmulateUndefined, output, ool);
11285 Label done;
11287 masm.move32(Imm32(op == JSOp::Ne), output);
11288 masm.jump(&done);
11290 masm.bind(emulatesUndefined);
11291 masm.move32(Imm32(op == JSOp::Eq), output);
11292 masm.bind(&done);
11295 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchT(
11296 LIsNullOrLikeUndefinedAndBranchT* lir) {
11297 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
11298 lir->cmpMir()->compareType() == MCompare::Compare_Null);
11299 MOZ_ASSERT(lir->cmpMir()->lhs()->type() == MIRType::Object);
11301 JSOp op = lir->cmpMir()->jsop();
11302 MOZ_ASSERT(IsLooseEqualityOp(op), "Strict equality should have been folded");
11304 MBasicBlock* ifTrue = lir->ifTrue();
11305 MBasicBlock* ifFalse = lir->ifFalse();
11307 if (op == JSOp::Ne) {
11308 // Swap branches.
11309 std::swap(ifTrue, ifFalse);
11312 Register input = ToRegister(lir->getOperand(0));
11314 auto* ool = new (alloc()) OutOfLineTestObject();
11315 addOutOfLineCode(ool, lir->cmpMir());
11317 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
11318 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
11320 // Objects that emulate undefined are loosely equal to null/undefined.
11321 Register scratch = ToRegister(lir->temp());
11322 testObjectEmulatesUndefined(input, ifTrueLabel, ifFalseLabel, scratch, ool);
11325 void CodeGenerator::visitIsNull(LIsNull* lir) {
11326 MCompare::CompareType compareType = lir->mir()->compareType();
11327 MOZ_ASSERT(compareType == MCompare::Compare_Null);
11329 JSOp op = lir->mir()->jsop();
11330 MOZ_ASSERT(IsStrictEqualityOp(op));
11332 const ValueOperand value = ToValue(lir, LIsNull::ValueIndex);
11333 Register output = ToRegister(lir->output());
11335 Assembler::Condition cond = JSOpToCondition(compareType, op);
11336 masm.testNullSet(cond, value, output);
11339 void CodeGenerator::visitIsUndefined(LIsUndefined* lir) {
11340 MCompare::CompareType compareType = lir->mir()->compareType();
11341 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
11343 JSOp op = lir->mir()->jsop();
11344 MOZ_ASSERT(IsStrictEqualityOp(op));
11346 const ValueOperand value = ToValue(lir, LIsUndefined::ValueIndex);
11347 Register output = ToRegister(lir->output());
11349 Assembler::Condition cond = JSOpToCondition(compareType, op);
11350 masm.testUndefinedSet(cond, value, output);
11353 void CodeGenerator::visitIsNullAndBranch(LIsNullAndBranch* lir) {
11354 MCompare::CompareType compareType = lir->cmpMir()->compareType();
11355 MOZ_ASSERT(compareType == MCompare::Compare_Null);
11357 JSOp op = lir->cmpMir()->jsop();
11358 MOZ_ASSERT(IsStrictEqualityOp(op));
11360 const ValueOperand value = ToValue(lir, LIsNullAndBranch::Value);
11362 Assembler::Condition cond = JSOpToCondition(compareType, op);
11363 testNullEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
11366 void CodeGenerator::visitIsUndefinedAndBranch(LIsUndefinedAndBranch* lir) {
11367 MCompare::CompareType compareType = lir->cmpMir()->compareType();
11368 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
11370 JSOp op = lir->cmpMir()->jsop();
11371 MOZ_ASSERT(IsStrictEqualityOp(op));
11373 const ValueOperand value = ToValue(lir, LIsUndefinedAndBranch::Value);
11375 Assembler::Condition cond = JSOpToCondition(compareType, op);
11376 testUndefinedEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
11379 void CodeGenerator::visitSameValueDouble(LSameValueDouble* lir) {
11380 FloatRegister left = ToFloatRegister(lir->left());
11381 FloatRegister right = ToFloatRegister(lir->right());
11382 FloatRegister temp = ToFloatRegister(lir->temp0());
11383 Register output = ToRegister(lir->output());
11385 masm.sameValueDouble(left, right, temp, output);
11388 void CodeGenerator::visitSameValue(LSameValue* lir) {
11389 ValueOperand lhs = ToValue(lir, LSameValue::LhsIndex);
11390 ValueOperand rhs = ToValue(lir, LSameValue::RhsIndex);
11391 Register output = ToRegister(lir->output());
11393 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
11394 OutOfLineCode* ool =
11395 oolCallVM<Fn, SameValue>(lir, ArgList(lhs, rhs), StoreRegisterTo(output));
11397 // First check to see if the values have identical bits.
11398 // This is correct for SameValue because SameValue(NaN,NaN) is true,
11399 // and SameValue(0,-0) is false.
11400 masm.branch64(Assembler::NotEqual, lhs.toRegister64(), rhs.toRegister64(),
11401 ool->entry());
11402 masm.move32(Imm32(1), output);
11404 // If this fails, call SameValue.
11405 masm.bind(ool->rejoin());
11408 void CodeGenerator::emitConcat(LInstruction* lir, Register lhs, Register rhs,
11409 Register output) {
11410 using Fn =
11411 JSString* (*)(JSContext*, HandleString, HandleString, js::gc::Heap);
11412 OutOfLineCode* ool = oolCallVM<Fn, ConcatStrings<CanGC>>(
11413 lir, ArgList(lhs, rhs, static_cast<Imm32>(int32_t(gc::Heap::Default))),
11414 StoreRegisterTo(output));
11416 const JitZone* jitZone = gen->realm->zone()->jitZone();
11417 JitCode* stringConcatStub =
11418 jitZone->stringConcatStubNoBarrier(&zoneStubsToReadBarrier_);
11419 masm.call(stringConcatStub);
11420 masm.branchTestPtr(Assembler::Zero, output, output, ool->entry());
11422 masm.bind(ool->rejoin());
11425 void CodeGenerator::visitConcat(LConcat* lir) {
11426 Register lhs = ToRegister(lir->lhs());
11427 Register rhs = ToRegister(lir->rhs());
11429 Register output = ToRegister(lir->output());
11431 MOZ_ASSERT(lhs == CallTempReg0);
11432 MOZ_ASSERT(rhs == CallTempReg1);
11433 MOZ_ASSERT(ToRegister(lir->temp0()) == CallTempReg0);
11434 MOZ_ASSERT(ToRegister(lir->temp1()) == CallTempReg1);
11435 MOZ_ASSERT(ToRegister(lir->temp2()) == CallTempReg2);
11436 MOZ_ASSERT(ToRegister(lir->temp3()) == CallTempReg3);
11437 MOZ_ASSERT(ToRegister(lir->temp4()) == CallTempReg4);
11438 MOZ_ASSERT(output == CallTempReg5);
11440 emitConcat(lir, lhs, rhs, output);
11443 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
11444 Register len, Register byteOpScratch,
11445 CharEncoding fromEncoding,
11446 CharEncoding toEncoding) {
11447 // Copy |len| char16_t code units from |from| to |to|. Assumes len > 0
11448 // (checked below in debug builds), and when done |to| must point to the
11449 // next available char.
11451 #ifdef DEBUG
11452 Label ok;
11453 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
11454 masm.assumeUnreachable("Length should be greater than 0.");
11455 masm.bind(&ok);
11456 #endif
11458 MOZ_ASSERT_IF(toEncoding == CharEncoding::Latin1,
11459 fromEncoding == CharEncoding::Latin1);
11461 size_t fromWidth =
11462 fromEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
11463 size_t toWidth =
11464 toEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
11466 Label start;
11467 masm.bind(&start);
11468 masm.loadChar(Address(from, 0), byteOpScratch, fromEncoding);
11469 masm.storeChar(byteOpScratch, Address(to, 0), toEncoding);
11470 masm.addPtr(Imm32(fromWidth), from);
11471 masm.addPtr(Imm32(toWidth), to);
11472 masm.branchSub32(Assembler::NonZero, Imm32(1), len, &start);
11475 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
11476 Register len, Register byteOpScratch,
11477 CharEncoding encoding) {
11478 CopyStringChars(masm, to, from, len, byteOpScratch, encoding, encoding);
11481 static void CopyStringCharsMaybeInflate(MacroAssembler& masm, Register input,
11482 Register destChars, Register temp1,
11483 Register temp2) {
11484 // destChars is TwoByte and input is a Latin1 or TwoByte string, so we may
11485 // have to inflate.
11487 Label isLatin1, done;
11488 masm.loadStringLength(input, temp1);
11489 masm.branchLatin1String(input, &isLatin1);
11491 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
11492 masm.movePtr(temp2, input);
11493 CopyStringChars(masm, destChars, input, temp1, temp2,
11494 CharEncoding::TwoByte);
11495 masm.jump(&done);
11497 masm.bind(&isLatin1);
11499 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
11500 masm.movePtr(temp2, input);
11501 CopyStringChars(masm, destChars, input, temp1, temp2, CharEncoding::Latin1,
11502 CharEncoding::TwoByte);
11504 masm.bind(&done);
11507 static void AllocateThinOrFatInlineString(MacroAssembler& masm, Register output,
11508 Register length, Register temp,
11509 gc::Heap initialStringHeap,
11510 Label* failure,
11511 CharEncoding encoding) {
11512 #ifdef DEBUG
11513 size_t maxInlineLength;
11514 if (encoding == CharEncoding::Latin1) {
11515 maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
11516 } else {
11517 maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
11520 Label ok;
11521 masm.branch32(Assembler::BelowOrEqual, length, Imm32(maxInlineLength), &ok);
11522 masm.assumeUnreachable("string length too large to be allocated as inline");
11523 masm.bind(&ok);
11524 #endif
11526 size_t maxThinInlineLength;
11527 if (encoding == CharEncoding::Latin1) {
11528 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_LATIN1;
11529 } else {
11530 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_TWO_BYTE;
11533 Label isFat, allocDone;
11534 masm.branch32(Assembler::Above, length, Imm32(maxThinInlineLength), &isFat);
11536 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
11537 if (encoding == CharEncoding::Latin1) {
11538 flags |= JSString::LATIN1_CHARS_BIT;
11540 masm.newGCString(output, temp, initialStringHeap, failure);
11541 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
11542 masm.jump(&allocDone);
11544 masm.bind(&isFat);
11546 uint32_t flags = JSString::INIT_FAT_INLINE_FLAGS;
11547 if (encoding == CharEncoding::Latin1) {
11548 flags |= JSString::LATIN1_CHARS_BIT;
11550 masm.newGCFatInlineString(output, temp, initialStringHeap, failure);
11551 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
11553 masm.bind(&allocDone);
11555 // Store length.
11556 masm.store32(length, Address(output, JSString::offsetOfLength()));
11559 static void ConcatInlineString(MacroAssembler& masm, Register lhs, Register rhs,
11560 Register output, Register temp1, Register temp2,
11561 Register temp3, gc::Heap initialStringHeap,
11562 Label* failure, CharEncoding encoding) {
11563 JitSpew(JitSpew_Codegen, "# Emitting ConcatInlineString (encoding=%s)",
11564 (encoding == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
11566 // State: result length in temp2.
11568 // Ensure both strings are linear.
11569 masm.branchIfRope(lhs, failure);
11570 masm.branchIfRope(rhs, failure);
11572 // Allocate a JSThinInlineString or JSFatInlineString.
11573 AllocateThinOrFatInlineString(masm, output, temp2, temp1, initialStringHeap,
11574 failure, encoding);
11576 // Load chars pointer in temp2.
11577 masm.loadInlineStringCharsForStore(output, temp2);
11579 auto copyChars = [&](Register src) {
11580 if (encoding == CharEncoding::TwoByte) {
11581 CopyStringCharsMaybeInflate(masm, src, temp2, temp1, temp3);
11582 } else {
11583 masm.loadStringLength(src, temp3);
11584 masm.loadStringChars(src, temp1, CharEncoding::Latin1);
11585 masm.movePtr(temp1, src);
11586 CopyStringChars(masm, temp2, src, temp3, temp1, CharEncoding::Latin1);
11590 // Copy lhs chars. Note that this advances temp2 to point to the next
11591 // char. This also clobbers the lhs register.
11592 copyChars(lhs);
11594 // Copy rhs chars. Clobbers the rhs register.
11595 copyChars(rhs);
11598 void CodeGenerator::visitSubstr(LSubstr* lir) {
11599 Register string = ToRegister(lir->string());
11600 Register begin = ToRegister(lir->begin());
11601 Register length = ToRegister(lir->length());
11602 Register output = ToRegister(lir->output());
11603 Register temp0 = ToRegister(lir->temp0());
11604 Register temp2 = ToRegister(lir->temp2());
11606 // On x86 there are not enough registers. In that case reuse the string
11607 // register as temporary.
11608 Register temp1 =
11609 lir->temp1()->isBogusTemp() ? string : ToRegister(lir->temp1());
11611 Label isLatin1, notInline, nonZero, nonInput, isInlinedLatin1;
11613 // For every edge case use the C++ variant.
11614 // Note: we also use this upon allocation failure in newGCString and
11615 // newGCFatInlineString. To squeeze out even more performance those failures
11616 // can be handled by allocate in ool code and returning to jit code to fill
11617 // in all data.
11618 using Fn = JSString* (*)(JSContext* cx, HandleString str, int32_t begin,
11619 int32_t len);
11620 OutOfLineCode* ool = oolCallVM<Fn, SubstringKernel>(
11621 lir, ArgList(string, begin, length), StoreRegisterTo(output));
11622 Label* slowPath = ool->entry();
11623 Label* done = ool->rejoin();
11625 // Zero length, return emptystring.
11626 masm.branchTest32(Assembler::NonZero, length, length, &nonZero);
11627 const JSAtomState& names = gen->runtime->names();
11628 masm.movePtr(ImmGCPtr(names.empty_), output);
11629 masm.jump(done);
11631 // Substring from 0..|str.length|, return str.
11632 masm.bind(&nonZero);
11633 masm.branch32(Assembler::NotEqual,
11634 Address(string, JSString::offsetOfLength()), length, &nonInput);
11635 #ifdef DEBUG
11637 Label ok;
11638 masm.branchTest32(Assembler::Zero, begin, begin, &ok);
11639 masm.assumeUnreachable("length == str.length implies begin == 0");
11640 masm.bind(&ok);
11642 #endif
11643 masm.movePtr(string, output);
11644 masm.jump(done);
11646 // Use slow path for ropes.
11647 masm.bind(&nonInput);
11648 masm.branchIfRope(string, slowPath);
11650 // Allocate either a JSThinInlineString or JSFatInlineString, or jump to
11651 // notInline if we need a dependent string.
11653 static_assert(JSThinInlineString::MAX_LENGTH_LATIN1 <
11654 JSFatInlineString::MAX_LENGTH_LATIN1);
11655 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <
11656 JSFatInlineString::MAX_LENGTH_TWO_BYTE);
11658 // Use temp2 to store the JS(Thin|Fat)InlineString flags. This avoids having
11659 // duplicate newGCString/newGCFatInlineString codegen for Latin1 vs TwoByte
11660 // strings.
11662 Label isLatin1, allocFat, allocThin, allocDone;
11663 masm.branchLatin1String(string, &isLatin1);
11665 masm.branch32(Assembler::Above, length,
11666 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE), &notInline);
11667 masm.move32(Imm32(0), temp2);
11668 masm.branch32(Assembler::Above, length,
11669 Imm32(JSThinInlineString::MAX_LENGTH_TWO_BYTE), &allocFat);
11670 masm.jump(&allocThin);
11673 masm.bind(&isLatin1);
11675 masm.branch32(Assembler::Above, length,
11676 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), &notInline);
11677 masm.move32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
11678 masm.branch32(Assembler::Above, length,
11679 Imm32(JSThinInlineString::MAX_LENGTH_LATIN1), &allocFat);
11682 masm.bind(&allocThin);
11684 masm.newGCString(output, temp0, initialStringHeap(), slowPath);
11685 masm.or32(Imm32(JSString::INIT_THIN_INLINE_FLAGS), temp2);
11686 masm.jump(&allocDone);
11688 masm.bind(&allocFat);
11690 masm.newGCFatInlineString(output, temp0, initialStringHeap(), slowPath);
11691 masm.or32(Imm32(JSString::INIT_FAT_INLINE_FLAGS), temp2);
11694 masm.bind(&allocDone);
11695 masm.store32(temp2, Address(output, JSString::offsetOfFlags()));
11696 masm.store32(length, Address(output, JSString::offsetOfLength()));
11699 auto initializeInlineString = [&](CharEncoding encoding) {
11700 masm.loadStringChars(string, temp0, encoding);
11701 masm.addToCharPtr(temp0, begin, encoding);
11702 if (temp1 == string) {
11703 masm.push(string);
11705 masm.loadInlineStringCharsForStore(output, temp1);
11706 CopyStringChars(masm, temp1, temp0, length, temp2, encoding);
11707 masm.loadStringLength(output, length);
11708 if (temp1 == string) {
11709 masm.pop(string);
11711 masm.jump(done);
11714 masm.branchLatin1String(string, &isInlinedLatin1);
11715 initializeInlineString(CharEncoding::TwoByte);
11717 masm.bind(&isInlinedLatin1);
11718 initializeInlineString(CharEncoding::Latin1);
11720 // Handle other cases with a DependentString.
11721 masm.bind(&notInline);
11722 masm.newGCString(output, temp0, gen->initialStringHeap(), slowPath);
11723 masm.store32(length, Address(output, JSString::offsetOfLength()));
11724 masm.storeDependentStringBase(string, output);
11726 auto initializeDependentString = [&](CharEncoding encoding) {
11727 uint32_t flags = JSString::INIT_DEPENDENT_FLAGS;
11728 if (encoding == CharEncoding::Latin1) {
11729 flags |= JSString::LATIN1_CHARS_BIT;
11732 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
11733 masm.loadNonInlineStringChars(string, temp0, encoding);
11734 masm.addToCharPtr(temp0, begin, encoding);
11735 masm.storeNonInlineStringChars(temp0, output);
11736 masm.jump(done);
11739 masm.branchLatin1String(string, &isLatin1);
11740 initializeDependentString(CharEncoding::TwoByte);
11742 masm.bind(&isLatin1);
11743 initializeDependentString(CharEncoding::Latin1);
11745 masm.bind(done);
11748 JitCode* JitZone::generateStringConcatStub(JSContext* cx) {
11749 JitSpew(JitSpew_Codegen, "# Emitting StringConcat stub");
11751 TempAllocator temp(&cx->tempLifoAlloc());
11752 JitContext jcx(cx);
11753 StackMacroAssembler masm(cx, temp);
11754 AutoCreatedBy acb(masm, "JitZone::generateStringConcatStub");
11756 Register lhs = CallTempReg0;
11757 Register rhs = CallTempReg1;
11758 Register temp1 = CallTempReg2;
11759 Register temp2 = CallTempReg3;
11760 Register temp3 = CallTempReg4;
11761 Register output = CallTempReg5;
11763 Label failure;
11764 #ifdef JS_USE_LINK_REGISTER
11765 masm.pushReturnAddress();
11766 #endif
11767 masm.Push(FramePointer);
11768 masm.moveStackPtrTo(FramePointer);
11770 // If lhs is empty, return rhs.
11771 Label leftEmpty;
11772 masm.loadStringLength(lhs, temp1);
11773 masm.branchTest32(Assembler::Zero, temp1, temp1, &leftEmpty);
11775 // If rhs is empty, return lhs.
11776 Label rightEmpty;
11777 masm.loadStringLength(rhs, temp2);
11778 masm.branchTest32(Assembler::Zero, temp2, temp2, &rightEmpty);
11780 masm.add32(temp1, temp2);
11782 // Check if we can use a JSInlineString. The result is a Latin1 string if
11783 // lhs and rhs are both Latin1, so we AND the flags.
11784 Label isInlineTwoByte, isInlineLatin1;
11785 masm.load32(Address(lhs, JSString::offsetOfFlags()), temp1);
11786 masm.and32(Address(rhs, JSString::offsetOfFlags()), temp1);
11788 Label isLatin1, notInline;
11789 masm.branchTest32(Assembler::NonZero, temp1,
11790 Imm32(JSString::LATIN1_CHARS_BIT), &isLatin1);
11792 masm.branch32(Assembler::BelowOrEqual, temp2,
11793 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
11794 &isInlineTwoByte);
11795 masm.jump(&notInline);
11797 masm.bind(&isLatin1);
11799 masm.branch32(Assembler::BelowOrEqual, temp2,
11800 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), &isInlineLatin1);
11802 masm.bind(&notInline);
11804 // Keep AND'ed flags in temp1.
11806 // Ensure result length <= JSString::MAX_LENGTH.
11807 masm.branch32(Assembler::Above, temp2, Imm32(JSString::MAX_LENGTH), &failure);
11809 // Allocate a new rope, guaranteed to be in the nursery if initialStringHeap
11810 // == gc::Heap::Default. (As a result, no post barriers are needed below.)
11811 masm.newGCString(output, temp3, initialStringHeap, &failure);
11813 // Store rope length and flags. temp1 still holds the result of AND'ing the
11814 // lhs and rhs flags, so we just have to clear the other flags to get our rope
11815 // flags (Latin1 if both lhs and rhs are Latin1).
11816 static_assert(JSString::INIT_ROPE_FLAGS == 0,
11817 "Rope type flags must have no bits set");
11818 masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp1);
11819 masm.store32(temp1, Address(output, JSString::offsetOfFlags()));
11820 masm.store32(temp2, Address(output, JSString::offsetOfLength()));
11822 // Store left and right nodes.
11823 masm.storeRopeChildren(lhs, rhs, output);
11824 masm.pop(FramePointer);
11825 masm.ret();
11827 masm.bind(&leftEmpty);
11828 masm.mov(rhs, output);
11829 masm.pop(FramePointer);
11830 masm.ret();
11832 masm.bind(&rightEmpty);
11833 masm.mov(lhs, output);
11834 masm.pop(FramePointer);
11835 masm.ret();
11837 masm.bind(&isInlineTwoByte);
11838 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
11839 initialStringHeap, &failure, CharEncoding::TwoByte);
11840 masm.pop(FramePointer);
11841 masm.ret();
11843 masm.bind(&isInlineLatin1);
11844 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
11845 initialStringHeap, &failure, CharEncoding::Latin1);
11846 masm.pop(FramePointer);
11847 masm.ret();
11849 masm.pop(temp2);
11850 masm.pop(temp1);
11852 masm.bind(&failure);
11853 masm.movePtr(ImmPtr(nullptr), output);
11854 masm.pop(FramePointer);
11855 masm.ret();
11857 Linker linker(masm);
11858 JitCode* code = linker.newCode(cx, CodeKind::Other);
11860 CollectPerfSpewerJitCodeProfile(code, "StringConcatStub");
11861 #ifdef MOZ_VTUNE
11862 vtune::MarkStub(code, "StringConcatStub");
11863 #endif
11865 return code;
11868 void JitRuntime::generateFreeStub(MacroAssembler& masm) {
11869 AutoCreatedBy acb(masm, "JitRuntime::generateFreeStub");
11871 const Register regSlots = CallTempReg0;
11873 freeStubOffset_ = startTrampolineCode(masm);
11875 #ifdef JS_USE_LINK_REGISTER
11876 masm.pushReturnAddress();
11877 #endif
11878 AllocatableRegisterSet regs(RegisterSet::Volatile());
11879 regs.takeUnchecked(regSlots);
11880 LiveRegisterSet save(regs.asLiveSet());
11881 masm.PushRegsInMask(save);
11883 const Register regTemp = regs.takeAnyGeneral();
11884 MOZ_ASSERT(regTemp != regSlots);
11886 using Fn = void (*)(void* p);
11887 masm.setupUnalignedABICall(regTemp);
11888 masm.passABIArg(regSlots);
11889 masm.callWithABI<Fn, js_free>(MoveOp::GENERAL,
11890 CheckUnsafeCallWithABI::DontCheckOther);
11892 masm.PopRegsInMask(save);
11894 masm.ret();
11897 void JitRuntime::generateLazyLinkStub(MacroAssembler& masm) {
11898 AutoCreatedBy acb(masm, "JitRuntime::generateLazyLinkStub");
11900 lazyLinkStubOffset_ = startTrampolineCode(masm);
11902 #ifdef JS_USE_LINK_REGISTER
11903 masm.pushReturnAddress();
11904 #endif
11905 masm.Push(FramePointer);
11906 masm.moveStackPtrTo(FramePointer);
11908 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
11909 Register temp0 = regs.takeAny();
11910 Register temp1 = regs.takeAny();
11911 Register temp2 = regs.takeAny();
11913 masm.loadJSContext(temp0);
11914 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::LazyLink);
11915 masm.moveStackPtrTo(temp1);
11917 using Fn = uint8_t* (*)(JSContext* cx, LazyLinkExitFrameLayout* frame);
11918 masm.setupUnalignedABICall(temp2);
11919 masm.passABIArg(temp0);
11920 masm.passABIArg(temp1);
11921 masm.callWithABI<Fn, LazyLinkTopActivation>(
11922 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
11924 // Discard exit frame and restore frame pointer.
11925 masm.leaveExitFrame(0);
11926 masm.pop(FramePointer);
11928 #ifdef JS_USE_LINK_REGISTER
11929 // Restore the return address such that the emitPrologue function of the
11930 // CodeGenerator can push it back on the stack with pushReturnAddress.
11931 masm.popReturnAddress();
11932 #endif
11933 masm.jump(ReturnReg);
11936 void JitRuntime::generateInterpreterStub(MacroAssembler& masm) {
11937 AutoCreatedBy acb(masm, "JitRuntime::generateInterpreterStub");
11939 interpreterStubOffset_ = startTrampolineCode(masm);
11941 #ifdef JS_USE_LINK_REGISTER
11942 masm.pushReturnAddress();
11943 #endif
11944 masm.Push(FramePointer);
11945 masm.moveStackPtrTo(FramePointer);
11947 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
11948 Register temp0 = regs.takeAny();
11949 Register temp1 = regs.takeAny();
11950 Register temp2 = regs.takeAny();
11952 masm.loadJSContext(temp0);
11953 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::InterpreterStub);
11954 masm.moveStackPtrTo(temp1);
11956 using Fn = bool (*)(JSContext* cx, InterpreterStubExitFrameLayout* frame);
11957 masm.setupUnalignedABICall(temp2);
11958 masm.passABIArg(temp0);
11959 masm.passABIArg(temp1);
11960 masm.callWithABI<Fn, InvokeFromInterpreterStub>(
11961 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
11963 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
11965 // Discard exit frame and restore frame pointer.
11966 masm.leaveExitFrame(0);
11967 masm.pop(FramePointer);
11969 // InvokeFromInterpreterStub stores the return value in argv[0], where the
11970 // caller stored |this|. Subtract |sizeof(void*)| for the frame pointer we
11971 // just popped.
11972 masm.loadValue(Address(masm.getStackPointer(),
11973 JitFrameLayout::offsetOfThis() - sizeof(void*)),
11974 JSReturnOperand);
11975 masm.ret();
11978 void JitRuntime::generateDoubleToInt32ValueStub(MacroAssembler& masm) {
11979 AutoCreatedBy acb(masm, "JitRuntime::generateDoubleToInt32ValueStub");
11980 doubleToInt32ValueStubOffset_ = startTrampolineCode(masm);
11982 Label done;
11983 masm.branchTestDouble(Assembler::NotEqual, R0, &done);
11985 masm.unboxDouble(R0, FloatReg0);
11986 masm.convertDoubleToInt32(FloatReg0, R1.scratchReg(), &done,
11987 /* negativeZeroCheck = */ false);
11988 masm.tagValue(JSVAL_TYPE_INT32, R1.scratchReg(), R0);
11990 masm.bind(&done);
11991 masm.abiret();
11994 void CodeGenerator::visitLinearizeForCharAccess(LLinearizeForCharAccess* lir) {
11995 Register str = ToRegister(lir->str());
11996 Register index = ToRegister(lir->index());
11997 Register output = ToRegister(lir->output());
11999 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12000 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12001 lir, ArgList(str), StoreRegisterTo(output));
12003 masm.branchIfNotCanLoadStringChar(str, index, output, ool->entry());
12005 masm.movePtr(str, output);
12006 masm.bind(ool->rejoin());
12009 void CodeGenerator::visitCharCodeAt(LCharCodeAt* lir) {
12010 Register str = ToRegister(lir->str());
12011 Register index = ToRegister(lir->index());
12012 Register output = ToRegister(lir->output());
12013 Register temp0 = ToRegister(lir->temp0());
12014 Register temp1 = ToRegister(lir->temp1());
12016 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12017 OutOfLineCode* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
12018 StoreRegisterTo(output));
12019 masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
12020 masm.bind(ool->rejoin());
12023 void CodeGenerator::visitCharCodeAtMaybeOutOfBounds(
12024 LCharCodeAtMaybeOutOfBounds* lir) {
12025 Register str = ToRegister(lir->str());
12026 Register index = ToRegister(lir->index());
12027 ValueOperand output = ToOutValue(lir);
12028 Register temp0 = ToRegister(lir->temp0());
12029 Register temp1 = ToRegister(lir->temp1());
12031 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12032 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(
12033 lir, ArgList(str, index), StoreRegisterTo(output.scratchReg()));
12035 // Return NaN for out-of-bounds access.
12036 Label done;
12037 masm.moveValue(JS::NaNValue(), output);
12039 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
12040 temp0, &done);
12042 masm.loadStringChar(str, index, output.scratchReg(), temp0, temp1,
12043 ool->entry());
12044 masm.bind(ool->rejoin());
12046 masm.tagValue(JSVAL_TYPE_INT32, output.scratchReg(), output);
12048 masm.bind(&done);
12051 void CodeGenerator::visitCharAtMaybeOutOfBounds(LCharAtMaybeOutOfBounds* lir) {
12052 Register str = ToRegister(lir->str());
12053 Register index = ToRegister(lir->index());
12054 Register output = ToRegister(lir->output());
12055 Register temp0 = ToRegister(lir->temp0());
12056 Register temp1 = ToRegister(lir->temp1());
12058 using Fn1 = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12059 auto* oolLoadChar = oolCallVM<Fn1, jit::CharCodeAt>(lir, ArgList(str, index),
12060 StoreRegisterTo(output));
12062 using Fn2 = JSLinearString* (*)(JSContext*, int32_t);
12063 auto* oolFromCharCode = oolCallVM<Fn2, jit::StringFromCharCode>(
12064 lir, ArgList(output), StoreRegisterTo(output));
12066 // Return the empty string for out-of-bounds access.
12067 const JSAtomState& names = gen->runtime->names();
12068 masm.movePtr(ImmGCPtr(names.empty_), output);
12070 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
12071 temp0, oolFromCharCode->rejoin());
12073 masm.loadStringChar(str, index, output, temp0, temp1, oolLoadChar->entry());
12074 masm.bind(oolLoadChar->rejoin());
12076 // OOL path if code >= UNIT_STATIC_LIMIT.
12077 masm.boundsCheck32PowerOfTwo(output, StaticStrings::UNIT_STATIC_LIMIT,
12078 oolFromCharCode->entry());
12080 masm.movePtr(ImmPtr(&gen->runtime->staticStrings().unitStaticTable), temp0);
12081 masm.loadPtr(BaseIndex(temp0, output, ScalePointer), output);
12083 masm.bind(oolFromCharCode->rejoin());
12086 void CodeGenerator::visitFromCharCode(LFromCharCode* lir) {
12087 Register code = ToRegister(lir->code());
12088 Register output = ToRegister(lir->output());
12090 using Fn = JSLinearString* (*)(JSContext*, int32_t);
12091 OutOfLineCode* ool = oolCallVM<Fn, jit::StringFromCharCode>(
12092 lir, ArgList(code), StoreRegisterTo(output));
12094 // OOL path if code >= UNIT_STATIC_LIMIT.
12095 masm.boundsCheck32PowerOfTwo(code, StaticStrings::UNIT_STATIC_LIMIT,
12096 ool->entry());
12098 masm.movePtr(ImmPtr(&gen->runtime->staticStrings().unitStaticTable), output);
12099 masm.loadPtr(BaseIndex(output, code, ScalePointer), output);
12101 masm.bind(ool->rejoin());
12104 void CodeGenerator::visitFromCodePoint(LFromCodePoint* lir) {
12105 Register codePoint = ToRegister(lir->codePoint());
12106 Register output = ToRegister(lir->output());
12107 Register temp0 = ToRegister(lir->temp0());
12108 Register temp1 = ToRegister(lir->temp1());
12109 LSnapshot* snapshot = lir->snapshot();
12111 // The OOL path is only taken when we can't allocate the inline string.
12112 using Fn = JSString* (*)(JSContext*, int32_t);
12113 OutOfLineCode* ool = oolCallVM<Fn, jit::StringFromCodePoint>(
12114 lir, ArgList(codePoint), StoreRegisterTo(output));
12116 Label isTwoByte;
12117 Label* done = ool->rejoin();
12119 static_assert(
12120 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
12121 "Latin-1 strings can be loaded from static strings");
12122 masm.boundsCheck32PowerOfTwo(codePoint, StaticStrings::UNIT_STATIC_LIMIT,
12123 &isTwoByte);
12125 masm.movePtr(ImmPtr(&gen->runtime->staticStrings().unitStaticTable),
12126 output);
12127 masm.loadPtr(BaseIndex(output, codePoint, ScalePointer), output);
12128 masm.jump(done);
12130 masm.bind(&isTwoByte);
12132 // Use a bailout if the input is not a valid code point, because
12133 // MFromCodePoint is movable and it'd be observable when a moved
12134 // fromCodePoint throws an exception before its actual call site.
12135 bailoutCmp32(Assembler::Above, codePoint, Imm32(unicode::NonBMPMax),
12136 snapshot);
12138 // Allocate a JSThinInlineString.
12140 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE >= 2,
12141 "JSThinInlineString can hold a supplementary code point");
12143 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
12144 masm.newGCString(output, temp0, gen->initialStringHeap(), ool->entry());
12145 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12148 Label isSupplementary;
12149 masm.branch32(Assembler::AboveOrEqual, codePoint, Imm32(unicode::NonBMPMin),
12150 &isSupplementary);
12152 // Store length.
12153 masm.store32(Imm32(1), Address(output, JSString::offsetOfLength()));
12155 // Load chars pointer in temp0.
12156 masm.loadInlineStringCharsForStore(output, temp0);
12158 masm.store16(codePoint, Address(temp0, 0));
12160 masm.jump(done);
12162 masm.bind(&isSupplementary);
12164 // Store length.
12165 masm.store32(Imm32(2), Address(output, JSString::offsetOfLength()));
12167 // Load chars pointer in temp0.
12168 masm.loadInlineStringCharsForStore(output, temp0);
12170 // Inlined unicode::LeadSurrogate(uint32_t).
12171 masm.move32(codePoint, temp1);
12172 masm.rshift32(Imm32(10), temp1);
12173 masm.add32(Imm32(unicode::LeadSurrogateMin - (unicode::NonBMPMin >> 10)),
12174 temp1);
12176 masm.store16(temp1, Address(temp0, 0));
12178 // Inlined unicode::TrailSurrogate(uint32_t).
12179 masm.move32(codePoint, temp1);
12180 masm.and32(Imm32(0x3FF), temp1);
12181 masm.or32(Imm32(unicode::TrailSurrogateMin), temp1);
12183 masm.store16(temp1, Address(temp0, sizeof(char16_t)));
12187 masm.bind(done);
12190 void CodeGenerator::visitStringIndexOf(LStringIndexOf* lir) {
12191 pushArg(ToRegister(lir->searchString()));
12192 pushArg(ToRegister(lir->string()));
12194 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
12195 callVM<Fn, js::StringIndexOf>(lir);
12198 void CodeGenerator::visitStringStartsWith(LStringStartsWith* lir) {
12199 pushArg(ToRegister(lir->searchString()));
12200 pushArg(ToRegister(lir->string()));
12202 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12203 callVM<Fn, js::StringStartsWith>(lir);
12206 void CodeGenerator::visitStringStartsWithInline(LStringStartsWithInline* lir) {
12207 Register string = ToRegister(lir->string());
12208 Register output = ToRegister(lir->output());
12209 Register temp = ToRegister(lir->temp0());
12211 const JSLinearString* searchString = lir->searchString();
12213 size_t length = searchString->length();
12214 MOZ_ASSERT(length > 0);
12216 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12217 auto* ool = oolCallVM<Fn, js::StringStartsWith>(
12218 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
12220 masm.move32(Imm32(0), output);
12222 // Can't be a prefix when the string is smaller than the search string.
12223 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
12224 Imm32(length), ool->rejoin());
12226 // Unwind ropes at the start if possible.
12227 Label compare;
12228 masm.movePtr(string, temp);
12229 masm.branchIfNotRope(temp, &compare);
12231 Label unwindRope;
12232 masm.bind(&unwindRope);
12233 masm.loadRopeLeftChild(temp, output);
12234 masm.movePtr(output, temp);
12236 // If the left child is smaller than the search string, jump into the VM to
12237 // linearize the string.
12238 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
12239 Imm32(length), ool->entry());
12241 // Otherwise keep unwinding ropes.
12242 masm.branchIfRope(temp, &unwindRope);
12244 masm.bind(&compare);
12246 // If operands point to the same instance, it's trivially a prefix.
12247 Label notPointerEqual;
12248 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
12249 &notPointerEqual);
12250 masm.move32(Imm32(1), output);
12251 masm.jump(ool->rejoin());
12252 masm.bind(&notPointerEqual);
12254 if (searchString->hasTwoByteChars()) {
12255 // Pure two-byte strings can't be a prefix of Latin-1 strings.
12256 JS::AutoCheckCannotGC nogc;
12257 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
12258 Label compareChars;
12259 masm.branchTwoByteString(temp, &compareChars);
12260 masm.move32(Imm32(0), output);
12261 masm.jump(ool->rejoin());
12262 masm.bind(&compareChars);
12266 // Otherwise start comparing character by character.
12267 CompareCharacters(masm, temp, searchString, output, JSOp::Eq,
12268 CompareDirection::Forward, ool->rejoin(), ool->entry());
12270 masm.bind(ool->rejoin());
12273 void CodeGenerator::visitStringEndsWith(LStringEndsWith* lir) {
12274 pushArg(ToRegister(lir->searchString()));
12275 pushArg(ToRegister(lir->string()));
12277 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12278 callVM<Fn, js::StringEndsWith>(lir);
12281 void CodeGenerator::visitStringEndsWithInline(LStringEndsWithInline* lir) {
12282 Register string = ToRegister(lir->string());
12283 Register output = ToRegister(lir->output());
12284 Register temp = ToRegister(lir->temp0());
12286 const JSLinearString* searchString = lir->searchString();
12288 size_t length = searchString->length();
12289 MOZ_ASSERT(length > 0);
12291 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12292 auto* ool = oolCallVM<Fn, js::StringEndsWith>(
12293 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
12295 masm.move32(Imm32(0), output);
12297 // Can't be a suffix when the string is smaller than the search string.
12298 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
12299 Imm32(length), ool->rejoin());
12301 // Unwind ropes at the end if possible.
12302 Label compare;
12303 masm.movePtr(string, temp);
12304 masm.branchIfNotRope(temp, &compare);
12306 Label unwindRope;
12307 masm.bind(&unwindRope);
12308 masm.loadRopeRightChild(temp, output);
12309 masm.movePtr(output, temp);
12311 // If the right child is smaller than the search string, jump into the VM to
12312 // linearize the string.
12313 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
12314 Imm32(length), ool->entry());
12316 // Otherwise keep unwinding ropes.
12317 masm.branchIfRope(temp, &unwindRope);
12319 masm.bind(&compare);
12321 // If operands point to the same instance, it's trivially a suffix.
12322 Label notPointerEqual;
12323 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
12324 &notPointerEqual);
12325 masm.move32(Imm32(1), output);
12326 masm.jump(ool->rejoin());
12327 masm.bind(&notPointerEqual);
12329 if (searchString->hasTwoByteChars()) {
12330 // Pure two-byte strings can't be a suffix of Latin-1 strings.
12331 JS::AutoCheckCannotGC nogc;
12332 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
12333 Label compareChars;
12334 masm.branchTwoByteString(temp, &compareChars);
12335 masm.move32(Imm32(0), output);
12336 masm.jump(ool->rejoin());
12337 masm.bind(&compareChars);
12341 // Otherwise start comparing character by character.
12342 CompareCharacters(masm, temp, searchString, output, JSOp::Eq,
12343 CompareDirection::Backward, ool->rejoin(), ool->entry());
12345 masm.bind(ool->rejoin());
12348 void CodeGenerator::visitStringToLowerCase(LStringToLowerCase* lir) {
12349 Register string = ToRegister(lir->string());
12350 Register output = ToRegister(lir->output());
12351 Register temp0 = ToRegister(lir->temp0());
12352 Register temp1 = ToRegister(lir->temp1());
12353 Register temp2 = ToRegister(lir->temp2());
12355 // On x86 there are not enough registers. In that case reuse the string
12356 // register as a temporary.
12357 Register temp3 =
12358 lir->temp3()->isBogusTemp() ? string : ToRegister(lir->temp3());
12359 Register temp4 = ToRegister(lir->temp4());
12361 using Fn = JSString* (*)(JSContext*, HandleString);
12362 OutOfLineCode* ool = oolCallVM<Fn, js::StringToLowerCase>(
12363 lir, ArgList(string), StoreRegisterTo(output));
12365 // Take the slow path if the string isn't a linear Latin-1 string.
12366 Imm32 linearLatin1Bits(JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT);
12367 Register flags = temp0;
12368 masm.load32(Address(string, JSString::offsetOfFlags()), flags);
12369 masm.and32(linearLatin1Bits, flags);
12370 masm.branch32(Assembler::NotEqual, flags, linearLatin1Bits, ool->entry());
12372 Register length = temp0;
12373 masm.loadStringLength(string, length);
12375 // Return the input if it's the empty string.
12376 Label notEmptyString;
12377 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmptyString);
12379 masm.movePtr(string, output);
12380 masm.jump(ool->rejoin());
12382 masm.bind(&notEmptyString);
12384 Register inputChars = temp1;
12385 masm.loadStringChars(string, inputChars, CharEncoding::Latin1);
12387 Register toLowerCaseTable = temp2;
12388 masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), toLowerCaseTable);
12390 // Single element strings can be directly retrieved from static strings cache.
12391 Label notSingleElementString;
12392 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleElementString);
12394 Register current = temp4;
12396 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
12397 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
12398 current);
12399 masm.movePtr(ImmPtr(&gen->runtime->staticStrings().unitStaticTable),
12400 output);
12401 masm.loadPtr(BaseIndex(output, current, ScalePointer), output);
12403 masm.jump(ool->rejoin());
12405 masm.bind(&notSingleElementString);
12407 // Use the OOL-path when the string is too long. This prevents scanning long
12408 // strings which have upper case characters only near the end a second time in
12409 // the VM.
12410 constexpr int32_t MaxInlineLength = 64;
12411 masm.branch32(Assembler::Above, length, Imm32(MaxInlineLength), ool->entry());
12414 // Check if there are any characters which need to be converted.
12416 // This extra loop gives a small performance improvement for strings which
12417 // are already lower cased and lets us avoid calling into the runtime for
12418 // non-inline, all lower case strings. But more importantly it avoids
12419 // repeated inline allocation failures:
12420 // |AllocateThinOrFatInlineString| below takes the OOL-path and calls the
12421 // |js::StringToLowerCase| runtime function when the result string can't be
12422 // allocated inline. And |js::StringToLowerCase| directly returns the input
12423 // string when no characters need to be converted. That means it won't
12424 // trigger GC to clear up the free nursery space, so the next toLowerCase()
12425 // call will again fail to inline allocate the result string.
12426 Label hasUpper;
12428 Register checkInputChars = output;
12429 masm.movePtr(inputChars, checkInputChars);
12431 Register current = temp4;
12433 Label start;
12434 masm.bind(&start);
12435 masm.loadChar(Address(checkInputChars, 0), current, CharEncoding::Latin1);
12436 masm.branch8(Assembler::NotEqual,
12437 BaseIndex(toLowerCaseTable, current, TimesOne), current,
12438 &hasUpper);
12439 masm.addPtr(Imm32(sizeof(Latin1Char)), checkInputChars);
12440 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
12442 // Input is already in lower case.
12443 masm.movePtr(string, output);
12444 masm.jump(ool->rejoin());
12446 masm.bind(&hasUpper);
12448 // |length| was clobbered above, reload.
12449 masm.loadStringLength(string, length);
12451 // Call into the runtime when we can't create an inline string.
12452 masm.branch32(Assembler::Above, length,
12453 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), ool->entry());
12455 AllocateThinOrFatInlineString(masm, output, length, temp4,
12456 initialStringHeap(), ool->entry(),
12457 CharEncoding::Latin1);
12459 if (temp3 == string) {
12460 masm.push(string);
12463 Register outputChars = temp3;
12464 masm.loadInlineStringCharsForStore(output, outputChars);
12467 Register current = temp4;
12469 Label start;
12470 masm.bind(&start);
12471 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
12472 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
12473 current);
12474 masm.storeChar(current, Address(outputChars, 0), CharEncoding::Latin1);
12475 masm.addPtr(Imm32(sizeof(Latin1Char)), inputChars);
12476 masm.addPtr(Imm32(sizeof(Latin1Char)), outputChars);
12477 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
12480 if (temp3 == string) {
12481 masm.pop(string);
12485 masm.bind(ool->rejoin());
12488 void CodeGenerator::visitStringToUpperCase(LStringToUpperCase* lir) {
12489 pushArg(ToRegister(lir->string()));
12491 using Fn = JSString* (*)(JSContext*, HandleString);
12492 callVM<Fn, js::StringToUpperCase>(lir);
12495 void CodeGenerator::visitStringSplit(LStringSplit* lir) {
12496 pushArg(Imm32(INT32_MAX));
12497 pushArg(ToRegister(lir->separator()));
12498 pushArg(ToRegister(lir->string()));
12500 using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
12501 callVM<Fn, js::StringSplitString>(lir);
12504 void CodeGenerator::visitInitializedLength(LInitializedLength* lir) {
12505 Address initLength(ToRegister(lir->elements()),
12506 ObjectElements::offsetOfInitializedLength());
12507 masm.load32(initLength, ToRegister(lir->output()));
12510 void CodeGenerator::visitSetInitializedLength(LSetInitializedLength* lir) {
12511 Address initLength(ToRegister(lir->elements()),
12512 ObjectElements::offsetOfInitializedLength());
12513 SetLengthFromIndex(masm, lir->index(), initLength);
12516 void CodeGenerator::visitNotBI(LNotBI* lir) {
12517 Register input = ToRegister(lir->input());
12518 Register output = ToRegister(lir->output());
12520 masm.cmp32Set(Assembler::Equal, Address(input, BigInt::offsetOfLength()),
12521 Imm32(0), output);
12524 void CodeGenerator::visitNotO(LNotO* lir) {
12525 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
12526 addOutOfLineCode(ool, lir->mir());
12528 Label* ifEmulatesUndefined = ool->label1();
12529 Label* ifDoesntEmulateUndefined = ool->label2();
12531 Register objreg = ToRegister(lir->input());
12532 Register output = ToRegister(lir->output());
12533 branchTestObjectEmulatesUndefined(objreg, ifEmulatesUndefined,
12534 ifDoesntEmulateUndefined, output, ool);
12535 // fall through
12537 Label join;
12539 masm.move32(Imm32(0), output);
12540 masm.jump(&join);
12542 masm.bind(ifEmulatesUndefined);
12543 masm.move32(Imm32(1), output);
12545 masm.bind(&join);
12548 void CodeGenerator::visitNotV(LNotV* lir) {
12549 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
12550 addOutOfLineCode(ool, lir->mir());
12552 Label* ifTruthy = ool->label1();
12553 Label* ifFalsy = ool->label2();
12555 ValueOperand input = ToValue(lir, LNotV::InputIndex);
12556 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
12557 FloatRegister floatTemp = ToFloatRegister(lir->temp0());
12558 Register output = ToRegister(lir->output());
12559 const TypeDataList& observedTypes = lir->mir()->observedTypes();
12561 testValueTruthy(input, tempToUnbox, output, floatTemp, observedTypes,
12562 ifTruthy, ifFalsy, ool);
12564 Label join;
12566 // Note that the testValueTruthy call above may choose to fall through
12567 // to ifTruthy instead of branching there.
12568 masm.bind(ifTruthy);
12569 masm.move32(Imm32(0), output);
12570 masm.jump(&join);
12572 masm.bind(ifFalsy);
12573 masm.move32(Imm32(1), output);
12575 // both branches meet here.
12576 masm.bind(&join);
12579 void CodeGenerator::visitBoundsCheck(LBoundsCheck* lir) {
12580 const LAllocation* index = lir->index();
12581 const LAllocation* length = lir->length();
12582 LSnapshot* snapshot = lir->snapshot();
12584 MIRType type = lir->mir()->type();
12586 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
12587 if (type == MIRType::Int32) {
12588 bailoutCmp32(cond, lhs, rhs, snapshot);
12589 } else {
12590 MOZ_ASSERT(type == MIRType::IntPtr);
12591 bailoutCmpPtr(cond, lhs, rhs, snapshot);
12595 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
12596 int32_t rhs) {
12597 if (type == MIRType::Int32) {
12598 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
12599 } else {
12600 MOZ_ASSERT(type == MIRType::IntPtr);
12601 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
12605 if (index->isConstant()) {
12606 // Use uint32 so that the comparison is unsigned.
12607 uint32_t idx = ToInt32(index);
12608 if (length->isConstant()) {
12609 uint32_t len = ToInt32(lir->length());
12610 if (idx < len) {
12611 return;
12613 bailout(snapshot);
12614 return;
12617 if (length->isRegister()) {
12618 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), idx);
12619 } else {
12620 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), idx);
12622 return;
12625 Register indexReg = ToRegister(index);
12626 if (length->isConstant()) {
12627 bailoutCmpConstant(Assembler::AboveOrEqual, indexReg, ToInt32(length));
12628 } else if (length->isRegister()) {
12629 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), indexReg);
12630 } else {
12631 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), indexReg);
12635 void CodeGenerator::visitBoundsCheckRange(LBoundsCheckRange* lir) {
12636 int32_t min = lir->mir()->minimum();
12637 int32_t max = lir->mir()->maximum();
12638 MOZ_ASSERT(max >= min);
12640 LSnapshot* snapshot = lir->snapshot();
12641 MIRType type = lir->mir()->type();
12643 const LAllocation* length = lir->length();
12644 Register temp = ToRegister(lir->getTemp(0));
12646 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
12647 if (type == MIRType::Int32) {
12648 bailoutCmp32(cond, lhs, rhs, snapshot);
12649 } else {
12650 MOZ_ASSERT(type == MIRType::IntPtr);
12651 bailoutCmpPtr(cond, lhs, rhs, snapshot);
12655 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
12656 int32_t rhs) {
12657 if (type == MIRType::Int32) {
12658 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
12659 } else {
12660 MOZ_ASSERT(type == MIRType::IntPtr);
12661 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
12665 if (lir->index()->isConstant()) {
12666 int32_t nmin, nmax;
12667 int32_t index = ToInt32(lir->index());
12668 if (SafeAdd(index, min, &nmin) && SafeAdd(index, max, &nmax) && nmin >= 0) {
12669 if (length->isRegister()) {
12670 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), nmax);
12671 } else {
12672 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), nmax);
12674 return;
12676 masm.mov(ImmWord(index), temp);
12677 } else {
12678 masm.mov(ToRegister(lir->index()), temp);
12681 // If the minimum and maximum differ then do an underflow check first.
12682 // If the two are the same then doing an unsigned comparison on the
12683 // length will also catch a negative index.
12684 if (min != max) {
12685 if (min != 0) {
12686 Label bail;
12687 if (type == MIRType::Int32) {
12688 masm.branchAdd32(Assembler::Overflow, Imm32(min), temp, &bail);
12689 } else {
12690 masm.branchAddPtr(Assembler::Overflow, Imm32(min), temp, &bail);
12692 bailoutFrom(&bail, snapshot);
12695 bailoutCmpConstant(Assembler::LessThan, temp, 0);
12697 if (min != 0) {
12698 int32_t diff;
12699 if (SafeSub(max, min, &diff)) {
12700 max = diff;
12701 } else {
12702 if (type == MIRType::Int32) {
12703 masm.sub32(Imm32(min), temp);
12704 } else {
12705 masm.subPtr(Imm32(min), temp);
12711 // Compute the maximum possible index. No overflow check is needed when
12712 // max > 0. We can only wraparound to a negative number, which will test as
12713 // larger than all nonnegative numbers in the unsigned comparison, and the
12714 // length is required to be nonnegative (else testing a negative length
12715 // would succeed on any nonnegative index).
12716 if (max != 0) {
12717 if (max < 0) {
12718 Label bail;
12719 if (type == MIRType::Int32) {
12720 masm.branchAdd32(Assembler::Overflow, Imm32(max), temp, &bail);
12721 } else {
12722 masm.branchAddPtr(Assembler::Overflow, Imm32(max), temp, &bail);
12724 bailoutFrom(&bail, snapshot);
12725 } else {
12726 if (type == MIRType::Int32) {
12727 masm.add32(Imm32(max), temp);
12728 } else {
12729 masm.addPtr(Imm32(max), temp);
12734 if (length->isRegister()) {
12735 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), temp);
12736 } else {
12737 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), temp);
12741 void CodeGenerator::visitBoundsCheckLower(LBoundsCheckLower* lir) {
12742 int32_t min = lir->mir()->minimum();
12743 bailoutCmp32(Assembler::LessThan, ToRegister(lir->index()), Imm32(min),
12744 lir->snapshot());
12747 void CodeGenerator::visitSpectreMaskIndex(LSpectreMaskIndex* lir) {
12748 MOZ_ASSERT(JitOptions.spectreIndexMasking);
12750 const LAllocation* length = lir->length();
12751 Register index = ToRegister(lir->index());
12752 Register output = ToRegister(lir->output());
12754 if (lir->mir()->type() == MIRType::Int32) {
12755 if (length->isRegister()) {
12756 masm.spectreMaskIndex32(index, ToRegister(length), output);
12757 } else {
12758 masm.spectreMaskIndex32(index, ToAddress(length), output);
12760 } else {
12761 MOZ_ASSERT(lir->mir()->type() == MIRType::IntPtr);
12762 if (length->isRegister()) {
12763 masm.spectreMaskIndexPtr(index, ToRegister(length), output);
12764 } else {
12765 masm.spectreMaskIndexPtr(index, ToAddress(length), output);
12770 class OutOfLineStoreElementHole : public OutOfLineCodeBase<CodeGenerator> {
12771 LInstruction* ins_;
12773 public:
12774 explicit OutOfLineStoreElementHole(LInstruction* ins) : ins_(ins) {
12775 MOZ_ASSERT(ins->isStoreElementHoleV() || ins->isStoreElementHoleT());
12778 void accept(CodeGenerator* codegen) override {
12779 codegen->visitOutOfLineStoreElementHole(this);
12782 MStoreElementHole* mir() const {
12783 return ins_->isStoreElementHoleV() ? ins_->toStoreElementHoleV()->mir()
12784 : ins_->toStoreElementHoleT()->mir();
12786 LInstruction* ins() const { return ins_; }
12789 void CodeGenerator::emitStoreHoleCheck(Register elements,
12790 const LAllocation* index,
12791 LSnapshot* snapshot) {
12792 Label bail;
12793 if (index->isConstant()) {
12794 Address dest(elements, ToInt32(index) * sizeof(js::Value));
12795 masm.branchTestMagic(Assembler::Equal, dest, &bail);
12796 } else {
12797 BaseObjectElementIndex dest(elements, ToRegister(index));
12798 masm.branchTestMagic(Assembler::Equal, dest, &bail);
12800 bailoutFrom(&bail, snapshot);
12803 void CodeGenerator::emitStoreElementTyped(const LAllocation* value,
12804 MIRType valueType, Register elements,
12805 const LAllocation* index) {
12806 MOZ_ASSERT(valueType != MIRType::MagicHole);
12807 ConstantOrRegister v = ToConstantOrRegister(value, valueType);
12808 if (index->isConstant()) {
12809 Address dest(elements, ToInt32(index) * sizeof(js::Value));
12810 masm.storeUnboxedValue(v, valueType, dest);
12811 } else {
12812 BaseObjectElementIndex dest(elements, ToRegister(index));
12813 masm.storeUnboxedValue(v, valueType, dest);
12817 void CodeGenerator::visitStoreElementT(LStoreElementT* store) {
12818 Register elements = ToRegister(store->elements());
12819 const LAllocation* index = store->index();
12821 if (store->mir()->needsBarrier()) {
12822 emitPreBarrier(elements, index);
12825 if (store->mir()->needsHoleCheck()) {
12826 emitStoreHoleCheck(elements, index, store->snapshot());
12829 emitStoreElementTyped(store->value(), store->mir()->value()->type(), elements,
12830 index);
12833 void CodeGenerator::visitStoreElementV(LStoreElementV* lir) {
12834 const ValueOperand value = ToValue(lir, LStoreElementV::Value);
12835 Register elements = ToRegister(lir->elements());
12836 const LAllocation* index = lir->index();
12838 if (lir->mir()->needsBarrier()) {
12839 emitPreBarrier(elements, index);
12842 if (lir->mir()->needsHoleCheck()) {
12843 emitStoreHoleCheck(elements, index, lir->snapshot());
12846 if (lir->index()->isConstant()) {
12847 Address dest(elements, ToInt32(lir->index()) * sizeof(js::Value));
12848 masm.storeValue(value, dest);
12849 } else {
12850 BaseObjectElementIndex dest(elements, ToRegister(lir->index()));
12851 masm.storeValue(value, dest);
12855 void CodeGenerator::visitStoreHoleValueElement(LStoreHoleValueElement* lir) {
12856 Register elements = ToRegister(lir->elements());
12857 Register index = ToRegister(lir->index());
12859 Address elementsFlags(elements, ObjectElements::offsetOfFlags());
12860 masm.or32(Imm32(ObjectElements::NON_PACKED), elementsFlags);
12862 BaseObjectElementIndex element(elements, index);
12863 masm.storeValue(MagicValue(JS_ELEMENTS_HOLE), element);
12866 void CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT* lir) {
12867 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
12868 addOutOfLineCode(ool, lir->mir());
12870 Register obj = ToRegister(lir->object());
12871 Register elements = ToRegister(lir->elements());
12872 Register index = ToRegister(lir->index());
12873 Register temp = ToRegister(lir->temp0());
12875 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
12876 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
12878 emitPreBarrier(elements, lir->index());
12880 masm.bind(ool->rejoin());
12881 emitStoreElementTyped(lir->value(), lir->mir()->value()->type(), elements,
12882 lir->index());
12884 if (ValueNeedsPostBarrier(lir->mir()->value())) {
12885 LiveRegisterSet regs = liveVolatileRegs(lir);
12886 ConstantOrRegister val =
12887 ToConstantOrRegister(lir->value(), lir->mir()->value()->type());
12888 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp, val);
12892 void CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV* lir) {
12893 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
12894 addOutOfLineCode(ool, lir->mir());
12896 Register obj = ToRegister(lir->object());
12897 Register elements = ToRegister(lir->elements());
12898 Register index = ToRegister(lir->index());
12899 const ValueOperand value = ToValue(lir, LStoreElementHoleV::ValueIndex);
12900 Register temp = ToRegister(lir->temp0());
12902 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
12903 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
12905 emitPreBarrier(elements, lir->index());
12907 masm.bind(ool->rejoin());
12908 masm.storeValue(value, BaseObjectElementIndex(elements, index));
12910 if (ValueNeedsPostBarrier(lir->mir()->value())) {
12911 LiveRegisterSet regs = liveVolatileRegs(lir);
12912 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp,
12913 ConstantOrRegister(value));
12917 void CodeGenerator::visitOutOfLineStoreElementHole(
12918 OutOfLineStoreElementHole* ool) {
12919 Register object, elements, index;
12920 LInstruction* ins = ool->ins();
12921 mozilla::Maybe<ConstantOrRegister> value;
12922 Register temp;
12924 if (ins->isStoreElementHoleV()) {
12925 LStoreElementHoleV* store = ins->toStoreElementHoleV();
12926 object = ToRegister(store->object());
12927 elements = ToRegister(store->elements());
12928 index = ToRegister(store->index());
12929 value.emplace(
12930 TypedOrValueRegister(ToValue(store, LStoreElementHoleV::ValueIndex)));
12931 temp = ToRegister(store->temp0());
12932 } else {
12933 LStoreElementHoleT* store = ins->toStoreElementHoleT();
12934 object = ToRegister(store->object());
12935 elements = ToRegister(store->elements());
12936 index = ToRegister(store->index());
12937 if (store->value()->isConstant()) {
12938 value.emplace(
12939 ConstantOrRegister(store->value()->toConstant()->toJSValue()));
12940 } else {
12941 MIRType valueType = store->mir()->value()->type();
12942 value.emplace(
12943 TypedOrValueRegister(valueType, ToAnyRegister(store->value())));
12945 temp = ToRegister(store->temp0());
12948 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
12950 // We're out-of-bounds. We only handle the index == initlength case.
12951 // If index > initializedLength, bail out. Note that this relies on the
12952 // condition flags sticking from the incoming branch.
12953 // Also note: this branch does not need Spectre mitigations, doing that for
12954 // the capacity check below is sufficient.
12955 Label allocElement, addNewElement;
12956 #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
12957 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
12958 // Had to reimplement for MIPS because there are no flags.
12959 bailoutCmp32(Assembler::NotEqual, initLength, index, ins->snapshot());
12960 #else
12961 bailoutIf(Assembler::NotEqual, ins->snapshot());
12962 #endif
12964 // If index < capacity, we can add a dense element inline. If not, we need
12965 // to allocate more elements first.
12966 masm.spectreBoundsCheck32(
12967 index, Address(elements, ObjectElements::offsetOfCapacity()), temp,
12968 &allocElement);
12969 masm.jump(&addNewElement);
12971 masm.bind(&allocElement);
12973 // Save all live volatile registers, except |temp|.
12974 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
12975 liveRegs.takeUnchecked(temp);
12976 masm.PushRegsInMask(liveRegs);
12978 masm.setupAlignedABICall();
12979 masm.loadJSContext(temp);
12980 masm.passABIArg(temp);
12981 masm.passABIArg(object);
12983 using Fn = bool (*)(JSContext*, NativeObject*);
12984 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
12985 masm.storeCallPointerResult(temp);
12987 masm.PopRegsInMask(liveRegs);
12988 bailoutIfFalseBool(temp, ins->snapshot());
12990 // Load the reallocated elements pointer.
12991 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), elements);
12993 masm.bind(&addNewElement);
12995 // Increment initLength
12996 masm.add32(Imm32(1), initLength);
12998 // If length is now <= index, increment length too.
12999 Label skipIncrementLength;
13000 Address length(elements, ObjectElements::offsetOfLength());
13001 masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
13002 masm.add32(Imm32(1), length);
13003 masm.bind(&skipIncrementLength);
13005 // Jump to the inline path where we will store the value.
13006 // We rejoin after the prebarrier, because the memory is uninitialized.
13007 masm.jump(ool->rejoin());
13010 void CodeGenerator::visitArrayPopShift(LArrayPopShift* lir) {
13011 Register obj = ToRegister(lir->object());
13012 Register temp1 = ToRegister(lir->temp0());
13013 Register temp2 = ToRegister(lir->temp1());
13014 ValueOperand out = ToOutValue(lir);
13016 Label bail;
13017 if (lir->mir()->mode() == MArrayPopShift::Pop) {
13018 masm.packedArrayPop(obj, out, temp1, temp2, &bail);
13019 } else {
13020 MOZ_ASSERT(lir->mir()->mode() == MArrayPopShift::Shift);
13021 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
13022 masm.packedArrayShift(obj, out, temp1, temp2, volatileRegs, &bail);
13024 bailoutFrom(&bail, lir->snapshot());
13027 class OutOfLineArrayPush : public OutOfLineCodeBase<CodeGenerator> {
13028 LArrayPush* ins_;
13030 public:
13031 explicit OutOfLineArrayPush(LArrayPush* ins) : ins_(ins) {}
13033 void accept(CodeGenerator* codegen) override {
13034 codegen->visitOutOfLineArrayPush(this);
13037 LArrayPush* ins() const { return ins_; }
13040 void CodeGenerator::visitArrayPush(LArrayPush* lir) {
13041 Register obj = ToRegister(lir->object());
13042 Register elementsTemp = ToRegister(lir->temp0());
13043 Register length = ToRegister(lir->output());
13044 ValueOperand value = ToValue(lir, LArrayPush::ValueIndex);
13045 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
13047 auto* ool = new (alloc()) OutOfLineArrayPush(lir);
13048 addOutOfLineCode(ool, lir->mir());
13050 // Load obj->elements in elementsTemp.
13051 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), elementsTemp);
13053 Address initLengthAddr(elementsTemp,
13054 ObjectElements::offsetOfInitializedLength());
13055 Address lengthAddr(elementsTemp, ObjectElements::offsetOfLength());
13056 Address capacityAddr(elementsTemp, ObjectElements::offsetOfCapacity());
13058 // Bail out if length != initLength.
13059 masm.load32(lengthAddr, length);
13060 bailoutCmp32(Assembler::NotEqual, initLengthAddr, length, lir->snapshot());
13062 // If length < capacity, we can add a dense element inline. If not, we
13063 // need to allocate more elements.
13064 masm.spectreBoundsCheck32(length, capacityAddr, spectreTemp, ool->entry());
13065 masm.bind(ool->rejoin());
13067 // Store the value.
13068 masm.storeValue(value, BaseObjectElementIndex(elementsTemp, length));
13070 // Update length and initialized length.
13071 masm.add32(Imm32(1), length);
13072 masm.store32(length, Address(elementsTemp, ObjectElements::offsetOfLength()));
13073 masm.store32(length, Address(elementsTemp,
13074 ObjectElements::offsetOfInitializedLength()));
13076 if (ValueNeedsPostBarrier(lir->mir()->value())) {
13077 LiveRegisterSet regs = liveVolatileRegs(lir);
13078 regs.addUnchecked(length);
13079 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->output()->output(),
13080 elementsTemp, ConstantOrRegister(value),
13081 /* indexDiff = */ -1);
13085 void CodeGenerator::visitOutOfLineArrayPush(OutOfLineArrayPush* ool) {
13086 LArrayPush* ins = ool->ins();
13088 Register object = ToRegister(ins->object());
13089 Register temp = ToRegister(ins->temp0());
13091 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
13092 liveRegs.takeUnchecked(temp);
13093 liveRegs.addUnchecked(ToRegister(ins->output()));
13094 liveRegs.addUnchecked(ToValue(ins, LArrayPush::ValueIndex));
13096 masm.PushRegsInMask(liveRegs);
13098 masm.setupAlignedABICall();
13099 masm.loadJSContext(temp);
13100 masm.passABIArg(temp);
13101 masm.passABIArg(object);
13103 using Fn = bool (*)(JSContext*, NativeObject* obj);
13104 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
13105 masm.storeCallPointerResult(temp);
13107 masm.PopRegsInMask(liveRegs);
13108 bailoutIfFalseBool(temp, ins->snapshot());
13110 // Load the reallocated elements pointer.
13111 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
13113 masm.jump(ool->rejoin());
13116 void CodeGenerator::visitArraySlice(LArraySlice* lir) {
13117 Register object = ToRegister(lir->object());
13118 Register begin = ToRegister(lir->begin());
13119 Register end = ToRegister(lir->end());
13120 Register temp0 = ToRegister(lir->temp0());
13121 Register temp1 = ToRegister(lir->temp1());
13123 Label call, fail;
13125 Label bail;
13126 masm.branchArrayIsNotPacked(object, temp0, temp1, &bail);
13127 bailoutFrom(&bail, lir->snapshot());
13129 // Try to allocate an object.
13130 TemplateObject templateObject(lir->mir()->templateObj());
13131 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
13132 &fail);
13134 masm.jump(&call);
13136 masm.bind(&fail);
13137 masm.movePtr(ImmPtr(nullptr), temp0);
13139 masm.bind(&call);
13141 pushArg(temp0);
13142 pushArg(end);
13143 pushArg(begin);
13144 pushArg(object);
13146 using Fn =
13147 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
13148 callVM<Fn, ArraySliceDense>(lir);
13151 void CodeGenerator::visitArgumentsSlice(LArgumentsSlice* lir) {
13152 Register object = ToRegister(lir->object());
13153 Register begin = ToRegister(lir->begin());
13154 Register end = ToRegister(lir->end());
13155 Register temp0 = ToRegister(lir->temp0());
13156 Register temp1 = ToRegister(lir->temp1());
13158 Label call, fail;
13160 // Try to allocate an object.
13161 TemplateObject templateObject(lir->mir()->templateObj());
13162 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
13163 &fail);
13165 masm.jump(&call);
13167 masm.bind(&fail);
13168 masm.movePtr(ImmPtr(nullptr), temp0);
13170 masm.bind(&call);
13172 pushArg(temp0);
13173 pushArg(end);
13174 pushArg(begin);
13175 pushArg(object);
13177 using Fn =
13178 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
13179 callVM<Fn, ArgumentsSliceDense>(lir);
13182 #ifdef DEBUG
13183 void CodeGenerator::emitAssertArgumentsSliceBounds(const RegisterOrInt32& begin,
13184 const RegisterOrInt32& count,
13185 Register numActualArgs) {
13186 // |begin| must be positive or zero.
13187 if (begin.is<Register>()) {
13188 Label beginOk;
13189 masm.branch32(Assembler::GreaterThanOrEqual, begin.as<Register>(), Imm32(0),
13190 &beginOk);
13191 masm.assumeUnreachable("begin < 0");
13192 masm.bind(&beginOk);
13193 } else {
13194 MOZ_ASSERT(begin.as<int32_t>() >= 0);
13197 // |count| must be positive or zero.
13198 if (count.is<Register>()) {
13199 Label countOk;
13200 masm.branch32(Assembler::GreaterThanOrEqual, count.as<Register>(), Imm32(0),
13201 &countOk);
13202 masm.assumeUnreachable("count < 0");
13203 masm.bind(&countOk);
13204 } else {
13205 MOZ_ASSERT(count.as<int32_t>() >= 0);
13208 // |begin| must be less-or-equal to |numActualArgs|.
13209 Label argsBeginOk;
13210 if (begin.is<Register>()) {
13211 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
13212 &argsBeginOk);
13213 } else {
13214 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
13215 Imm32(begin.as<int32_t>()), &argsBeginOk);
13217 masm.assumeUnreachable("begin <= numActualArgs");
13218 masm.bind(&argsBeginOk);
13220 // |count| must be less-or-equal to |numActualArgs|.
13221 Label argsCountOk;
13222 if (count.is<Register>()) {
13223 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, count.as<Register>(),
13224 &argsCountOk);
13225 } else {
13226 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
13227 Imm32(count.as<int32_t>()), &argsCountOk);
13229 masm.assumeUnreachable("count <= numActualArgs");
13230 masm.bind(&argsCountOk);
13232 // |begin| and |count| must be preserved, but |numActualArgs| can be changed.
13234 // Pre-condition: |count| <= |numActualArgs|
13235 // Condition to test: |begin + count| <= |numActualArgs|
13236 // Transform to: |begin| <= |numActualArgs - count|
13237 if (count.is<Register>()) {
13238 masm.subPtr(count.as<Register>(), numActualArgs);
13239 } else {
13240 masm.subPtr(Imm32(count.as<int32_t>()), numActualArgs);
13243 // |begin + count| must be less-or-equal to |numActualArgs|.
13244 Label argsBeginCountOk;
13245 if (begin.is<Register>()) {
13246 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
13247 &argsBeginCountOk);
13248 } else {
13249 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
13250 Imm32(begin.as<int32_t>()), &argsBeginCountOk);
13252 masm.assumeUnreachable("begin + count <= numActualArgs");
13253 masm.bind(&argsBeginCountOk);
13255 #endif
13257 template <class ArgumentsSlice>
13258 void CodeGenerator::emitNewArray(ArgumentsSlice* lir,
13259 const RegisterOrInt32& count, Register output,
13260 Register temp) {
13261 using Fn = ArrayObject* (*)(JSContext*, int32_t);
13262 auto* ool = count.match(
13263 [&](Register count) {
13264 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
13265 lir, ArgList(count), StoreRegisterTo(output));
13267 [&](int32_t count) {
13268 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
13269 lir, ArgList(Imm32(count)), StoreRegisterTo(output));
13272 TemplateObject templateObject(lir->mir()->templateObj());
13273 MOZ_ASSERT(templateObject.isArrayObject());
13275 auto templateNativeObj = templateObject.asTemplateNativeObject();
13276 MOZ_ASSERT(templateNativeObj.getArrayLength() == 0);
13277 MOZ_ASSERT(templateNativeObj.getDenseInitializedLength() == 0);
13278 MOZ_ASSERT(!templateNativeObj.hasDynamicElements());
13280 // Check array capacity. Call into the VM if the template object's capacity
13281 // is too small.
13282 bool tryAllocate = count.match(
13283 [&](Register count) {
13284 masm.branch32(Assembler::Above, count,
13285 Imm32(templateNativeObj.getDenseCapacity()),
13286 ool->entry());
13287 return true;
13289 [&](int32_t count) {
13290 MOZ_ASSERT(count >= 0);
13291 if (uint32_t(count) > templateNativeObj.getDenseCapacity()) {
13292 masm.jump(ool->entry());
13293 return false;
13295 return true;
13298 if (tryAllocate) {
13299 // Try to allocate an object.
13300 masm.createGCObject(output, temp, templateObject, lir->mir()->initialHeap(),
13301 ool->entry());
13303 auto setInitializedLengthAndLength = [&](auto count) {
13304 const int elementsOffset = NativeObject::offsetOfFixedElements();
13306 // Update initialized length.
13307 Address initLength(
13308 output, elementsOffset + ObjectElements::offsetOfInitializedLength());
13309 masm.store32(count, initLength);
13311 // Update length.
13312 Address length(output, elementsOffset + ObjectElements::offsetOfLength());
13313 masm.store32(count, length);
13316 // The array object was successfully created. Set the length and initialized
13317 // length and then proceed to fill the elements.
13318 count.match([&](Register count) { setInitializedLengthAndLength(count); },
13319 [&](int32_t count) {
13320 if (count > 0) {
13321 setInitializedLengthAndLength(Imm32(count));
13326 masm.bind(ool->rejoin());
13329 void CodeGenerator::visitFrameArgumentsSlice(LFrameArgumentsSlice* lir) {
13330 Register begin = ToRegister(lir->begin());
13331 Register count = ToRegister(lir->count());
13332 Register temp = ToRegister(lir->temp0());
13333 Register output = ToRegister(lir->output());
13335 #ifdef DEBUG
13336 masm.loadNumActualArgs(FramePointer, temp);
13337 emitAssertArgumentsSliceBounds(RegisterOrInt32(begin), RegisterOrInt32(count),
13338 temp);
13339 #endif
13341 emitNewArray(lir, RegisterOrInt32(count), output, temp);
13343 Label done;
13344 masm.branch32(Assembler::Equal, count, Imm32(0), &done);
13346 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
13347 allRegs.take(begin);
13348 allRegs.take(count);
13349 allRegs.take(temp);
13350 allRegs.take(output);
13352 ValueOperand value = allRegs.takeAnyValue();
13354 LiveRegisterSet liveRegs;
13355 liveRegs.add(output);
13356 liveRegs.add(begin);
13357 liveRegs.add(value);
13359 masm.PushRegsInMask(liveRegs);
13361 // Initialize all elements.
13363 Register elements = output;
13364 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
13366 Register argIndex = begin;
13368 Register index = temp;
13369 masm.move32(Imm32(0), index);
13371 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
13372 BaseValueIndex argPtr(FramePointer, argIndex, argvOffset);
13374 Label loop;
13375 masm.bind(&loop);
13377 masm.loadValue(argPtr, value);
13379 // We don't need a pre-barrier, because the element at |index| is guaranteed
13380 // to be a non-GC thing (either uninitialized memory or the magic hole
13381 // value).
13382 masm.storeValue(value, BaseObjectElementIndex(elements, index));
13384 masm.add32(Imm32(1), index);
13385 masm.add32(Imm32(1), argIndex);
13387 masm.branch32(Assembler::LessThan, index, count, &loop);
13389 masm.PopRegsInMask(liveRegs);
13391 // Emit a post-write barrier if |output| is tenured.
13393 // We expect that |output| is nursery allocated, so it isn't worth the
13394 // trouble to check if no frame argument is a nursery thing, which would
13395 // allow to omit the post-write barrier.
13396 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
13398 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
13399 volatileRegs.takeUnchecked(temp);
13400 if (output.volatile_()) {
13401 volatileRegs.addUnchecked(output);
13404 masm.PushRegsInMask(volatileRegs);
13405 emitPostWriteBarrier(output);
13406 masm.PopRegsInMask(volatileRegs);
13408 masm.bind(&done);
13411 CodeGenerator::RegisterOrInt32 CodeGenerator::ToRegisterOrInt32(
13412 const LAllocation* allocation) {
13413 if (allocation->isConstant()) {
13414 return RegisterOrInt32(allocation->toConstant()->toInt32());
13416 return RegisterOrInt32(ToRegister(allocation));
13419 void CodeGenerator::visitInlineArgumentsSlice(LInlineArgumentsSlice* lir) {
13420 RegisterOrInt32 begin = ToRegisterOrInt32(lir->begin());
13421 RegisterOrInt32 count = ToRegisterOrInt32(lir->count());
13422 Register temp = ToRegister(lir->temp());
13423 Register output = ToRegister(lir->output());
13425 uint32_t numActuals = lir->mir()->numActuals();
13427 #ifdef DEBUG
13428 masm.move32(Imm32(numActuals), temp);
13430 emitAssertArgumentsSliceBounds(begin, count, temp);
13431 #endif
13433 emitNewArray(lir, count, output, temp);
13435 // We're done if there are no actual arguments.
13436 if (numActuals == 0) {
13437 return;
13440 // Check if any arguments have to be copied.
13441 Label done;
13442 if (count.is<Register>()) {
13443 masm.branch32(Assembler::Equal, count.as<Register>(), Imm32(0), &done);
13444 } else if (count.as<int32_t>() == 0) {
13445 return;
13448 auto getArg = [&](uint32_t i) {
13449 return toConstantOrRegister(lir, LInlineArgumentsSlice::ArgIndex(i),
13450 lir->mir()->getArg(i)->type());
13453 auto storeArg = [&](uint32_t i, auto dest) {
13454 // We don't need a pre-barrier because the element at |index| is guaranteed
13455 // to be a non-GC thing (either uninitialized memory or the magic hole
13456 // value).
13457 masm.storeConstantOrRegister(getArg(i), dest);
13460 // Initialize all elements.
13461 if (numActuals == 1) {
13462 // There's exactly one argument. We've checked that |count| is non-zero,
13463 // which implies that |begin| must be zero.
13464 MOZ_ASSERT_IF(begin.is<int32_t>(), begin.as<int32_t>() == 0);
13466 Register elements = temp;
13467 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
13469 storeArg(0, Address(elements, 0));
13470 } else if (begin.is<Register>()) {
13471 // There is more than one argument and |begin| isn't a compile-time
13472 // constant. Iterate through 0..numActuals to search for |begin| and then
13473 // start copying |count| arguments from that index.
13475 LiveGeneralRegisterSet liveRegs;
13476 liveRegs.add(output);
13477 liveRegs.add(begin.as<Register>());
13479 masm.PushRegsInMask(liveRegs);
13481 Register elements = output;
13482 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
13484 Register argIndex = begin.as<Register>();
13486 Register index = temp;
13487 masm.move32(Imm32(0), index);
13489 Label doneLoop;
13490 for (uint32_t i = 0; i < numActuals; ++i) {
13491 Label next;
13492 masm.branch32(Assembler::NotEqual, argIndex, Imm32(i), &next);
13494 storeArg(i, BaseObjectElementIndex(elements, index));
13496 masm.add32(Imm32(1), index);
13497 masm.add32(Imm32(1), argIndex);
13499 if (count.is<Register>()) {
13500 masm.branch32(Assembler::GreaterThanOrEqual, index,
13501 count.as<Register>(), &doneLoop);
13502 } else {
13503 masm.branch32(Assembler::GreaterThanOrEqual, index,
13504 Imm32(count.as<int32_t>()), &doneLoop);
13507 masm.bind(&next);
13509 masm.bind(&doneLoop);
13511 masm.PopRegsInMask(liveRegs);
13512 } else {
13513 // There is more than one argument and |begin| is a compile-time constant.
13515 Register elements = temp;
13516 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
13518 int32_t argIndex = begin.as<int32_t>();
13520 int32_t index = 0;
13522 Label doneLoop;
13523 for (uint32_t i = argIndex; i < numActuals; ++i) {
13524 storeArg(i, Address(elements, index * sizeof(Value)));
13526 index += 1;
13528 if (count.is<Register>()) {
13529 masm.branch32(Assembler::LessThanOrEqual, count.as<Register>(),
13530 Imm32(index), &doneLoop);
13531 } else {
13532 if (index >= count.as<int32_t>()) {
13533 break;
13537 masm.bind(&doneLoop);
13540 // Determine if we have to emit post-write barrier.
13542 // If either |begin| or |count| is a constant, use their value directly.
13543 // Otherwise assume we copy all inline arguments from 0..numActuals.
13544 bool postWriteBarrier = false;
13545 uint32_t actualBegin = begin.match([](Register) { return 0; },
13546 [](int32_t value) { return value; });
13547 uint32_t actualCount =
13548 count.match([=](Register) { return numActuals; },
13549 [](int32_t value) -> uint32_t { return value; });
13550 for (uint32_t i = 0; i < actualCount; ++i) {
13551 ConstantOrRegister arg = getArg(actualBegin + i);
13552 if (arg.constant()) {
13553 Value v = arg.value();
13554 if (v.isGCThing() && IsInsideNursery(v.toGCThing())) {
13555 postWriteBarrier = true;
13557 } else {
13558 MIRType type = arg.reg().type();
13559 if (type == MIRType::Value || NeedsPostBarrier(type)) {
13560 postWriteBarrier = true;
13565 // Emit a post-write barrier if |output| is tenured and we couldn't
13566 // determine at compile-time that no barrier is needed.
13567 if (postWriteBarrier) {
13568 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
13570 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
13571 volatileRegs.takeUnchecked(temp);
13572 if (output.volatile_()) {
13573 volatileRegs.addUnchecked(output);
13576 masm.PushRegsInMask(volatileRegs);
13577 emitPostWriteBarrier(output);
13578 masm.PopRegsInMask(volatileRegs);
13581 masm.bind(&done);
13584 void CodeGenerator::visitNormalizeSliceTerm(LNormalizeSliceTerm* lir) {
13585 Register value = ToRegister(lir->value());
13586 Register length = ToRegister(lir->length());
13587 Register output = ToRegister(lir->output());
13589 masm.move32(value, output);
13591 Label positive;
13592 masm.branch32(Assembler::GreaterThanOrEqual, value, Imm32(0), &positive);
13594 Label done;
13595 masm.add32(length, output);
13596 masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(0), &done);
13597 masm.move32(Imm32(0), output);
13598 masm.jump(&done);
13600 masm.bind(&positive);
13601 masm.cmp32Move32(Assembler::LessThan, length, value, length, output);
13603 masm.bind(&done);
13606 void CodeGenerator::visitArrayJoin(LArrayJoin* lir) {
13607 Label skipCall;
13609 Register output = ToRegister(lir->output());
13610 Register sep = ToRegister(lir->separator());
13611 Register array = ToRegister(lir->array());
13612 Register temp = ToRegister(lir->temp0());
13614 // Fast path for simple length <= 1 cases.
13616 masm.loadPtr(Address(array, NativeObject::offsetOfElements()), temp);
13617 Address length(temp, ObjectElements::offsetOfLength());
13618 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
13620 // Check for length == 0
13621 Label notEmpty;
13622 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmpty);
13623 const JSAtomState& names = gen->runtime->names();
13624 masm.movePtr(ImmGCPtr(names.empty_), output);
13625 masm.jump(&skipCall);
13627 masm.bind(&notEmpty);
13628 Label notSingleString;
13629 // Check for length == 1, initializedLength >= 1, arr[0].isString()
13630 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleString);
13631 masm.branch32(Assembler::LessThan, initLength, Imm32(1), &notSingleString);
13633 Address elem0(temp, 0);
13634 masm.branchTestString(Assembler::NotEqual, elem0, &notSingleString);
13636 // At this point, 'output' can be used as a scratch register, since we're
13637 // guaranteed to succeed.
13638 masm.unboxString(elem0, output);
13639 masm.jump(&skipCall);
13640 masm.bind(&notSingleString);
13643 pushArg(sep);
13644 pushArg(array);
13646 using Fn = JSString* (*)(JSContext*, HandleObject, HandleString);
13647 callVM<Fn, jit::ArrayJoin>(lir);
13648 masm.bind(&skipCall);
13651 void CodeGenerator::visitGetIteratorCache(LGetIteratorCache* lir) {
13652 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
13653 TypedOrValueRegister val =
13654 toConstantOrRegister(lir, LGetIteratorCache::ValueIndex,
13655 lir->mir()->value()->type())
13656 .reg();
13657 Register output = ToRegister(lir->output());
13658 Register temp0 = ToRegister(lir->temp0());
13659 Register temp1 = ToRegister(lir->temp1());
13661 IonGetIteratorIC ic(liveRegs, val, output, temp0, temp1);
13662 addIC(lir, allocateIC(ic));
13665 void CodeGenerator::visitOptimizeSpreadCallCache(
13666 LOptimizeSpreadCallCache* lir) {
13667 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
13668 ValueOperand val = ToValue(lir, LOptimizeSpreadCallCache::ValueIndex);
13669 ValueOperand output = ToOutValue(lir);
13670 Register temp = ToRegister(lir->temp0());
13672 IonOptimizeSpreadCallIC ic(liveRegs, val, output, temp);
13673 addIC(lir, allocateIC(ic));
13676 void CodeGenerator::visitCloseIterCache(LCloseIterCache* lir) {
13677 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
13678 Register iter = ToRegister(lir->iter());
13679 Register temp = ToRegister(lir->temp0());
13680 CompletionKind kind = CompletionKind(lir->mir()->completionKind());
13682 IonCloseIterIC ic(liveRegs, iter, temp, kind);
13683 addIC(lir, allocateIC(ic));
13686 void CodeGenerator::visitIteratorMore(LIteratorMore* lir) {
13687 const Register obj = ToRegister(lir->iterator());
13688 const ValueOperand output = ToOutValue(lir);
13689 const Register temp = ToRegister(lir->temp0());
13691 masm.iteratorMore(obj, output, temp);
13694 void CodeGenerator::visitIsNoIterAndBranch(LIsNoIterAndBranch* lir) {
13695 ValueOperand input = ToValue(lir, LIsNoIterAndBranch::Input);
13696 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
13697 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
13699 masm.branchTestMagic(Assembler::Equal, input, ifTrue);
13701 if (!isNextBlock(lir->ifFalse()->lir())) {
13702 masm.jump(ifFalse);
13706 void CodeGenerator::visitIteratorEnd(LIteratorEnd* lir) {
13707 const Register obj = ToRegister(lir->object());
13708 const Register temp0 = ToRegister(lir->temp0());
13709 const Register temp1 = ToRegister(lir->temp1());
13710 const Register temp2 = ToRegister(lir->temp2());
13712 masm.iteratorClose(obj, temp0, temp1, temp2);
13715 void CodeGenerator::visitArgumentsLength(LArgumentsLength* lir) {
13716 // read number of actual arguments from the JS frame.
13717 Register argc = ToRegister(lir->output());
13718 masm.loadNumActualArgs(FramePointer, argc);
13721 void CodeGenerator::visitGetFrameArgument(LGetFrameArgument* lir) {
13722 ValueOperand result = ToOutValue(lir);
13723 const LAllocation* index = lir->index();
13724 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
13726 // This instruction is used to access actual arguments and formal arguments.
13727 // The number of Values on the stack is |max(numFormals, numActuals)|, so we
13728 // assert |index < numFormals || index < numActuals| in debug builds.
13729 DebugOnly<size_t> numFormals = gen->outerInfo().script()->function()->nargs();
13731 if (index->isConstant()) {
13732 int32_t i = index->toConstant()->toInt32();
13733 #ifdef DEBUG
13734 if (uint32_t(i) >= numFormals) {
13735 Label ok;
13736 Register argc = result.scratchReg();
13737 masm.loadNumActualArgs(FramePointer, argc);
13738 masm.branch32(Assembler::Above, argc, Imm32(i), &ok);
13739 masm.assumeUnreachable("Invalid argument index");
13740 masm.bind(&ok);
13742 #endif
13743 Address argPtr(FramePointer, sizeof(Value) * i + argvOffset);
13744 masm.loadValue(argPtr, result);
13745 } else {
13746 Register i = ToRegister(index);
13747 #ifdef DEBUG
13748 Label ok;
13749 Register argc = result.scratchReg();
13750 masm.branch32(Assembler::Below, i, Imm32(numFormals), &ok);
13751 masm.loadNumActualArgs(FramePointer, argc);
13752 masm.branch32(Assembler::Above, argc, i, &ok);
13753 masm.assumeUnreachable("Invalid argument index");
13754 masm.bind(&ok);
13755 #endif
13756 BaseValueIndex argPtr(FramePointer, i, argvOffset);
13757 masm.loadValue(argPtr, result);
13761 void CodeGenerator::visitGetFrameArgumentHole(LGetFrameArgumentHole* lir) {
13762 ValueOperand result = ToOutValue(lir);
13763 Register index = ToRegister(lir->index());
13764 Register length = ToRegister(lir->length());
13765 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
13766 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
13768 Label outOfBounds, done;
13769 masm.spectreBoundsCheck32(index, length, spectreTemp, &outOfBounds);
13771 BaseValueIndex argPtr(FramePointer, index, argvOffset);
13772 masm.loadValue(argPtr, result);
13773 masm.jump(&done);
13775 masm.bind(&outOfBounds);
13776 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
13777 masm.moveValue(UndefinedValue(), result);
13779 masm.bind(&done);
13782 void CodeGenerator::visitRest(LRest* lir) {
13783 Register numActuals = ToRegister(lir->numActuals());
13784 Register temp0 = ToRegister(lir->temp0());
13785 Register temp1 = ToRegister(lir->temp1());
13786 Register temp2 = ToRegister(lir->temp2());
13787 unsigned numFormals = lir->mir()->numFormals();
13789 if (Shape* shape = lir->mir()->shape()) {
13790 uint32_t arrayLength = 0;
13791 uint32_t arrayCapacity = 2;
13792 gc::AllocKind allocKind = GuessArrayGCKind(arrayCapacity);
13793 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
13794 allocKind = ForegroundToBackgroundAllocKind(allocKind);
13795 MOZ_ASSERT(GetGCKindSlots(allocKind) ==
13796 arrayCapacity + ObjectElements::VALUES_PER_HEADER);
13798 Label joinAlloc, failAlloc;
13799 masm.movePtr(ImmGCPtr(shape), temp0);
13800 masm.createArrayWithFixedElements(temp2, temp0, temp1, InvalidReg,
13801 arrayLength, arrayCapacity, 0, 0,
13802 allocKind, gc::Heap::Default, &failAlloc);
13803 masm.jump(&joinAlloc);
13805 masm.bind(&failAlloc);
13806 masm.movePtr(ImmPtr(nullptr), temp2);
13808 masm.bind(&joinAlloc);
13809 } else {
13810 masm.movePtr(ImmPtr(nullptr), temp2);
13813 // Set temp1 to the address of the first actual argument.
13814 size_t actualsOffset = JitFrameLayout::offsetOfActualArgs();
13815 masm.computeEffectiveAddress(Address(FramePointer, actualsOffset), temp1);
13817 // Compute array length: max(numActuals - numFormals, 0).
13818 Register lengthReg;
13819 if (numFormals) {
13820 lengthReg = temp0;
13821 Label emptyLength, joinLength;
13822 masm.branch32(Assembler::LessThanOrEqual, numActuals, Imm32(numFormals),
13823 &emptyLength);
13825 masm.move32(numActuals, lengthReg);
13826 masm.sub32(Imm32(numFormals), lengthReg);
13828 // Skip formal arguments.
13829 masm.addPtr(Imm32(sizeof(Value) * numFormals), temp1);
13831 masm.jump(&joinLength);
13833 masm.bind(&emptyLength);
13835 masm.move32(Imm32(0), lengthReg);
13837 // Leave temp1 pointed to the start of actuals() when the rest-array
13838 // length is zero. We don't use |actuals() + numFormals| because
13839 // |numFormals| can be any non-negative int32 value when this MRest was
13840 // created from scalar replacement optimizations. And it seems
13841 // questionable to compute a Value* pointer which points to who knows
13842 // where.
13844 masm.bind(&joinLength);
13845 } else {
13846 // Use numActuals directly when there are no formals.
13847 lengthReg = numActuals;
13850 pushArg(temp2);
13851 pushArg(temp1);
13852 pushArg(lengthReg);
13854 using Fn = JSObject* (*)(JSContext*, uint32_t, Value*, HandleObject);
13855 callVM<Fn, InitRestParameter>(lir);
13858 // Create a stackmap from the given safepoint, with the structure:
13860 // <reg dump area, if trap>
13861 // | ++ <body (general spill)>
13862 // | ++ <space for Frame>
13863 // | ++ <inbound args>
13864 // | |
13865 // Lowest Addr Highest Addr
13867 // The caller owns the resulting stackmap. This assumes a grow-down stack.
13869 // For non-debug builds, if the stackmap would contain no pointers, no
13870 // stackmap is created, and nullptr is returned. For a debug build, a
13871 // stackmap is always created and returned.
13872 static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
13873 const RegisterOffsets& trapExitLayout,
13874 size_t trapExitLayoutNumWords,
13875 size_t nInboundStackArgBytes,
13876 wasm::StackMap** result) {
13877 // Ensure this is defined on all return paths.
13878 *result = nullptr;
13880 // The size of the wasm::Frame itself.
13881 const size_t nFrameBytes = sizeof(wasm::Frame);
13883 // This is the number of bytes in the general spill area, below the Frame.
13884 const size_t nBodyBytes = safepoint.framePushedAtStackMapBase();
13886 // This is the number of bytes in the general spill area, the Frame, and the
13887 // incoming args, but not including any trap (register dump) area.
13888 const size_t nNonTrapBytes = nBodyBytes + nFrameBytes + nInboundStackArgBytes;
13889 MOZ_ASSERT(nNonTrapBytes % sizeof(void*) == 0);
13891 // This is the total number of bytes covered by the map.
13892 const DebugOnly<size_t> nTotalBytes =
13893 nNonTrapBytes +
13894 (safepoint.isWasmTrap() ? (trapExitLayoutNumWords * sizeof(void*)) : 0);
13896 // Create the stackmap initially in this vector. Since most frames will
13897 // contain 128 or fewer words, heap allocation is avoided in the majority of
13898 // cases. vec[0] is for the lowest address in the map, vec[N-1] is for the
13899 // highest address in the map.
13900 wasm::StackMapBoolVector vec;
13902 // Keep track of whether we've actually seen any refs.
13903 bool hasRefs = false;
13905 // REG DUMP AREA, if any.
13906 const LiveGeneralRegisterSet wasmAnyRefRegs = safepoint.wasmAnyRefRegs();
13907 GeneralRegisterForwardIterator wasmAnyRefRegsIter(wasmAnyRefRegs);
13908 if (safepoint.isWasmTrap()) {
13909 // Deal with roots in registers. This can only happen for safepoints
13910 // associated with a trap. For safepoints associated with a call, we
13911 // don't expect to have any live values in registers, hence no roots in
13912 // registers.
13913 if (!vec.appendN(false, trapExitLayoutNumWords)) {
13914 return false;
13916 for (; wasmAnyRefRegsIter.more(); ++wasmAnyRefRegsIter) {
13917 Register reg = *wasmAnyRefRegsIter;
13918 size_t offsetFromTop = trapExitLayout.getOffset(reg);
13920 // If this doesn't hold, the associated register wasn't saved by
13921 // the trap exit stub. Better to crash now than much later, in
13922 // some obscure place, and possibly with security consequences.
13923 MOZ_RELEASE_ASSERT(offsetFromTop < trapExitLayoutNumWords);
13925 // offsetFromTop is an offset in words down from the highest
13926 // address in the exit stub save area. Switch it around to be an
13927 // offset up from the bottom of the (integer register) save area.
13928 size_t offsetFromBottom = trapExitLayoutNumWords - 1 - offsetFromTop;
13930 vec[offsetFromBottom] = true;
13931 hasRefs = true;
13933 } else {
13934 // This map is associated with a call instruction. We expect there to be
13935 // no live ref-carrying registers, and if there are we're in deep trouble.
13936 MOZ_RELEASE_ASSERT(!wasmAnyRefRegsIter.more());
13939 // BODY (GENERAL SPILL) AREA and FRAME and INCOMING ARGS
13940 // Deal with roots on the stack.
13941 size_t wordsSoFar = vec.length();
13942 if (!vec.appendN(false, nNonTrapBytes / sizeof(void*))) {
13943 return false;
13945 const LSafepoint::SlotList& wasmAnyRefSlots = safepoint.wasmAnyRefSlots();
13946 for (SafepointSlotEntry wasmAnyRefSlot : wasmAnyRefSlots) {
13947 // The following needs to correspond with JitFrameLayout::slotRef
13948 // wasmAnyRefSlot.stack == 0 means the slot is in the args area
13949 if (wasmAnyRefSlot.stack) {
13950 // It's a slot in the body allocation, so .slot is interpreted
13951 // as an index downwards from the Frame*
13952 MOZ_ASSERT(wasmAnyRefSlot.slot <= nBodyBytes);
13953 uint32_t offsetInBytes = nBodyBytes - wasmAnyRefSlot.slot;
13954 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
13955 vec[wordsSoFar + offsetInBytes / sizeof(void*)] = true;
13956 } else {
13957 // It's an argument slot
13958 MOZ_ASSERT(wasmAnyRefSlot.slot < nInboundStackArgBytes);
13959 uint32_t offsetInBytes = nBodyBytes + nFrameBytes + wasmAnyRefSlot.slot;
13960 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
13961 vec[wordsSoFar + offsetInBytes / sizeof(void*)] = true;
13963 hasRefs = true;
13966 #ifndef DEBUG
13967 // We saw no references, and this is a non-debug build, so don't bother
13968 // building the stackmap.
13969 if (!hasRefs) {
13970 return true;
13972 #endif
13974 // Convert vec into a wasm::StackMap.
13975 MOZ_ASSERT(vec.length() * sizeof(void*) == nTotalBytes);
13976 wasm::StackMap* stackMap =
13977 wasm::ConvertStackMapBoolVectorToStackMap(vec, hasRefs);
13978 if (!stackMap) {
13979 return false;
13981 if (safepoint.isWasmTrap()) {
13982 stackMap->setExitStubWords(trapExitLayoutNumWords);
13985 // Record in the map, how far down from the highest address the Frame* is.
13986 // Take the opportunity to check that we haven't marked any part of the
13987 // Frame itself as a pointer.
13988 stackMap->setFrameOffsetFromTop((nInboundStackArgBytes + nFrameBytes) /
13989 sizeof(void*));
13990 #ifdef DEBUG
13991 for (uint32_t i = 0; i < nFrameBytes / sizeof(void*); i++) {
13992 MOZ_ASSERT(stackMap->getBit(stackMap->header.numMappedWords -
13993 stackMap->header.frameOffsetFromTop + i) == 0);
13995 #endif
13997 *result = stackMap;
13998 return true;
14001 bool CodeGenerator::generateWasm(
14002 wasm::CallIndirectId callIndirectId, wasm::BytecodeOffset trapOffset,
14003 const wasm::ArgTypeVector& argTypes, const RegisterOffsets& trapExitLayout,
14004 size_t trapExitLayoutNumWords, wasm::FuncOffsets* offsets,
14005 wasm::StackMaps* stackMaps, wasm::Decoder* decoder) {
14006 AutoCreatedBy acb(masm, "CodeGenerator::generateWasm");
14008 JitSpew(JitSpew_Codegen, "# Emitting wasm code");
14010 size_t nInboundStackArgBytes = StackArgAreaSizeUnaligned(argTypes);
14011 inboundStackArgBytes_ = nInboundStackArgBytes;
14013 wasm::GenerateFunctionPrologue(masm, callIndirectId, mozilla::Nothing(),
14014 offsets);
14016 MOZ_ASSERT(masm.framePushed() == 0);
14018 // Very large frames are implausible, probably an attack.
14019 if (frameSize() > wasm::MaxFrameSize) {
14020 return decoder->fail(decoder->beginOffset(), "stack frame is too large");
14023 if (omitOverRecursedCheck()) {
14024 masm.reserveStack(frameSize());
14025 } else {
14026 std::pair<CodeOffset, uint32_t> pair =
14027 masm.wasmReserveStackChecked(frameSize(), trapOffset);
14028 CodeOffset trapInsnOffset = pair.first;
14029 size_t nBytesReservedBeforeTrap = pair.second;
14031 wasm::StackMap* functionEntryStackMap = nullptr;
14032 if (!CreateStackMapForFunctionEntryTrap(
14033 argTypes, trapExitLayout, trapExitLayoutNumWords,
14034 nBytesReservedBeforeTrap, nInboundStackArgBytes,
14035 &functionEntryStackMap)) {
14036 return false;
14039 // In debug builds, we'll always have a stack map, even if there are no
14040 // refs to track.
14041 MOZ_ASSERT(functionEntryStackMap);
14043 if (functionEntryStackMap &&
14044 !stackMaps->add((uint8_t*)(uintptr_t)trapInsnOffset.offset(),
14045 functionEntryStackMap)) {
14046 functionEntryStackMap->destroy();
14047 return false;
14051 MOZ_ASSERT(masm.framePushed() == frameSize());
14053 if (!generateBody()) {
14054 return false;
14057 masm.bind(&returnLabel_);
14058 wasm::GenerateFunctionEpilogue(masm, frameSize(), offsets);
14060 if (!generateOutOfLineCode()) {
14061 return false;
14064 masm.flush();
14065 if (masm.oom()) {
14066 return false;
14069 offsets->end = masm.currentOffset();
14071 MOZ_ASSERT(!masm.failureLabel()->used());
14072 MOZ_ASSERT(snapshots_.listSize() == 0);
14073 MOZ_ASSERT(snapshots_.RVATableSize() == 0);
14074 MOZ_ASSERT(recovers_.size() == 0);
14075 MOZ_ASSERT(graph.numConstants() == 0);
14076 MOZ_ASSERT(osiIndices_.empty());
14077 MOZ_ASSERT(icList_.empty());
14078 MOZ_ASSERT(safepoints_.size() == 0);
14079 MOZ_ASSERT(!scriptCounts_);
14081 // Convert the safepoints to stackmaps and add them to our running
14082 // collection thereof.
14083 for (CodegenSafepointIndex& index : safepointIndices_) {
14084 wasm::StackMap* stackMap = nullptr;
14085 if (!CreateStackMapFromLSafepoint(*index.safepoint(), trapExitLayout,
14086 trapExitLayoutNumWords,
14087 nInboundStackArgBytes, &stackMap)) {
14088 return false;
14091 // In debug builds, we'll always have a stack map.
14092 MOZ_ASSERT(stackMap);
14093 if (!stackMap) {
14094 continue;
14097 if (!stackMaps->add((uint8_t*)(uintptr_t)index.displacement(), stackMap)) {
14098 stackMap->destroy();
14099 return false;
14103 return true;
14106 bool CodeGenerator::generate() {
14107 AutoCreatedBy acb(masm, "CodeGenerator::generate");
14109 JitSpew(JitSpew_Codegen, "# Emitting code for script %s:%u:%u",
14110 gen->outerInfo().script()->filename(),
14111 gen->outerInfo().script()->lineno(),
14112 gen->outerInfo().script()->column().zeroOriginValue());
14114 // Initialize native code table with an entry to the start of
14115 // top-level script.
14116 InlineScriptTree* tree = gen->outerInfo().inlineScriptTree();
14117 jsbytecode* startPC = tree->script()->code();
14118 BytecodeSite* startSite = new (gen->alloc()) BytecodeSite(tree, startPC);
14119 if (!addNativeToBytecodeEntry(startSite)) {
14120 return false;
14123 if (!safepoints_.init(gen->alloc())) {
14124 return false;
14127 perfSpewer_.recordOffset(masm, "Prologue");
14128 if (!generatePrologue()) {
14129 return false;
14132 // Reset native => bytecode map table with top-level script and startPc.
14133 if (!addNativeToBytecodeEntry(startSite)) {
14134 return false;
14137 if (!generateBody()) {
14138 return false;
14141 // Reset native => bytecode map table with top-level script and startPc.
14142 if (!addNativeToBytecodeEntry(startSite)) {
14143 return false;
14146 perfSpewer_.recordOffset(masm, "Epilogue");
14147 if (!generateEpilogue()) {
14148 return false;
14151 // Reset native => bytecode map table with top-level script and startPc.
14152 if (!addNativeToBytecodeEntry(startSite)) {
14153 return false;
14156 perfSpewer_.recordOffset(masm, "InvalidateEpilogue");
14157 generateInvalidateEpilogue();
14159 // native => bytecode entries for OOL code will be added
14160 // by CodeGeneratorShared::generateOutOfLineCode
14161 perfSpewer_.recordOffset(masm, "OOLCode");
14162 if (!generateOutOfLineCode()) {
14163 return false;
14166 // Add terminal entry.
14167 if (!addNativeToBytecodeEntry(startSite)) {
14168 return false;
14171 // Dump Native to bytecode entries to spew.
14172 dumpNativeToBytecodeEntries();
14174 // We encode safepoints after the OSI-point offsets have been determined.
14175 if (!encodeSafepoints()) {
14176 return false;
14179 return !masm.oom();
14182 static bool AddInlinedCompilations(JSContext* cx, HandleScript script,
14183 IonCompilationId compilationId,
14184 const WarpSnapshot* snapshot,
14185 bool* isValid) {
14186 MOZ_ASSERT(!*isValid);
14187 RecompileInfo recompileInfo(script, compilationId);
14189 JitZone* jitZone = cx->zone()->jitZone();
14191 for (const auto* scriptSnapshot : snapshot->scripts()) {
14192 JSScript* inlinedScript = scriptSnapshot->script();
14193 if (inlinedScript == script) {
14194 continue;
14197 // TODO(post-Warp): This matches FinishCompilation and is necessary to
14198 // ensure in-progress compilations are canceled when an inlined functon
14199 // becomes a debuggee. See the breakpoint-14.js jit-test.
14200 // When TI is gone, try to clean this up by moving AddInlinedCompilations to
14201 // WarpOracle so that we can handle this as part of addPendingRecompile
14202 // instead of requiring this separate check.
14203 if (inlinedScript->isDebuggee()) {
14204 *isValid = false;
14205 return true;
14208 if (!jitZone->addInlinedCompilation(recompileInfo, inlinedScript)) {
14209 return false;
14213 *isValid = true;
14214 return true;
14217 bool CodeGenerator::link(JSContext* cx, const WarpSnapshot* snapshot) {
14218 AutoCreatedBy acb(masm, "CodeGenerator::link");
14220 // We cancel off-thread Ion compilations in a few places during GC, but if
14221 // this compilation was performed off-thread it will already have been
14222 // removed from the relevant lists by this point. Don't allow GC here.
14223 JS::AutoAssertNoGC nogc(cx);
14225 RootedScript script(cx, gen->outerInfo().script());
14226 MOZ_ASSERT(!script->hasIonScript());
14228 // Perform any read barriers which were skipped while compiling the
14229 // script, which may have happened off-thread.
14230 JitZone* jitZone = cx->zone()->jitZone();
14231 jitZone->performStubReadBarriers(zoneStubsToReadBarrier_);
14233 if (scriptCounts_ && !script->hasScriptCounts() &&
14234 !script->initScriptCounts(cx)) {
14235 return false;
14238 IonCompilationId compilationId =
14239 cx->runtime()->jitRuntime()->nextCompilationId();
14240 jitZone->currentCompilationIdRef().emplace(compilationId);
14241 auto resetCurrentId = mozilla::MakeScopeExit(
14242 [jitZone] { jitZone->currentCompilationIdRef().reset(); });
14244 // Record constraints. If an error occured, returns false and potentially
14245 // prevent future compilations. Otherwise, if an invalidation occured, then
14246 // skip the current compilation.
14247 bool isValid = false;
14249 // If an inlined script is invalidated (for example, by attaching
14250 // a debugger), we must also invalidate the parent IonScript.
14251 if (!AddInlinedCompilations(cx, script, compilationId, snapshot, &isValid)) {
14252 return false;
14254 if (!isValid) {
14255 return true;
14258 uint32_t argumentSlots = (gen->outerInfo().nargs() + 1) * sizeof(Value);
14260 size_t numNurseryObjects = snapshot->nurseryObjects().length();
14262 IonScript* ionScript = IonScript::New(
14263 cx, compilationId, graph.localSlotsSize(), argumentSlots, frameDepth_,
14264 snapshots_.listSize(), snapshots_.RVATableSize(), recovers_.size(),
14265 graph.numConstants(), numNurseryObjects, safepointIndices_.length(),
14266 osiIndices_.length(), icList_.length(), runtimeData_.length(),
14267 safepoints_.size());
14268 if (!ionScript) {
14269 return false;
14271 #ifdef DEBUG
14272 ionScript->setICHash(snapshot->icHash());
14273 #endif
14275 auto freeIonScript = mozilla::MakeScopeExit([&ionScript] {
14276 // Use js_free instead of IonScript::Destroy: the cache list is still
14277 // uninitialized.
14278 js_free(ionScript);
14281 Linker linker(masm);
14282 JitCode* code = linker.newCode(cx, CodeKind::Ion);
14283 if (!code) {
14284 return false;
14287 // Encode native to bytecode map if profiling is enabled.
14288 if (isProfilerInstrumentationEnabled()) {
14289 // Generate native-to-bytecode main table.
14290 IonEntry::ScriptList scriptList;
14291 if (!generateCompactNativeToBytecodeMap(cx, code, scriptList)) {
14292 return false;
14295 uint8_t* ionTableAddr =
14296 ((uint8_t*)nativeToBytecodeMap_.get()) + nativeToBytecodeTableOffset_;
14297 JitcodeIonTable* ionTable = (JitcodeIonTable*)ionTableAddr;
14299 // Construct the IonEntry that will go into the global table.
14300 auto entry = MakeJitcodeGlobalEntry<IonEntry>(
14301 cx, code, code->raw(), code->rawEnd(), std::move(scriptList), ionTable);
14302 if (!entry) {
14303 return false;
14305 (void)nativeToBytecodeMap_.release(); // Table is now owned by |entry|.
14307 // Add entry to the global table.
14308 JitcodeGlobalTable* globalTable =
14309 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
14310 if (!globalTable->addEntry(std::move(entry))) {
14311 return false;
14314 // Mark the jitcode as having a bytecode map.
14315 code->setHasBytecodeMap();
14316 } else {
14317 // Add a dumy jitcodeGlobalTable entry.
14318 auto entry = MakeJitcodeGlobalEntry<DummyEntry>(cx, code, code->raw(),
14319 code->rawEnd());
14320 if (!entry) {
14321 return false;
14324 // Add entry to the global table.
14325 JitcodeGlobalTable* globalTable =
14326 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
14327 if (!globalTable->addEntry(std::move(entry))) {
14328 return false;
14331 // Mark the jitcode as having a bytecode map.
14332 code->setHasBytecodeMap();
14335 ionScript->setMethod(code);
14337 // If the Gecko Profiler is enabled, mark IonScript as having been
14338 // instrumented accordingly.
14339 if (isProfilerInstrumentationEnabled()) {
14340 ionScript->setHasProfilingInstrumentation();
14343 Assembler::PatchDataWithValueCheck(
14344 CodeLocationLabel(code, invalidateEpilogueData_), ImmPtr(ionScript),
14345 ImmPtr((void*)-1));
14347 for (CodeOffset offset : ionScriptLabels_) {
14348 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, offset),
14349 ImmPtr(ionScript), ImmPtr((void*)-1));
14352 for (NurseryObjectLabel label : ionNurseryObjectLabels_) {
14353 void* entry = ionScript->addressOfNurseryObject(label.nurseryIndex);
14354 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label.offset),
14355 ImmPtr(entry), ImmPtr((void*)-1));
14358 // for generating inline caches during the execution.
14359 if (runtimeData_.length()) {
14360 ionScript->copyRuntimeData(&runtimeData_[0]);
14362 if (icList_.length()) {
14363 ionScript->copyICEntries(&icList_[0]);
14366 for (size_t i = 0; i < icInfo_.length(); i++) {
14367 IonIC& ic = ionScript->getICFromIndex(i);
14368 Assembler::PatchDataWithValueCheck(
14369 CodeLocationLabel(code, icInfo_[i].icOffsetForJump),
14370 ImmPtr(ic.codeRawPtr()), ImmPtr((void*)-1));
14371 Assembler::PatchDataWithValueCheck(
14372 CodeLocationLabel(code, icInfo_[i].icOffsetForPush), ImmPtr(&ic),
14373 ImmPtr((void*)-1));
14376 JitSpew(JitSpew_Codegen, "Created IonScript %p (raw %p)", (void*)ionScript,
14377 (void*)code->raw());
14379 ionScript->setInvalidationEpilogueDataOffset(
14380 invalidateEpilogueData_.offset());
14381 if (jsbytecode* osrPc = gen->outerInfo().osrPc()) {
14382 ionScript->setOsrPc(osrPc);
14383 ionScript->setOsrEntryOffset(getOsrEntryOffset());
14385 ionScript->setInvalidationEpilogueOffset(invalidate_.offset());
14387 perfSpewer_.saveProfile(cx, script, code);
14389 #ifdef MOZ_VTUNE
14390 vtune::MarkScript(code, script, "ion");
14391 #endif
14393 // Set a Ion counter hint for this script.
14394 if (cx->runtime()->jitRuntime()->hasJitHintsMap()) {
14395 JitHintsMap* jitHints = cx->runtime()->jitRuntime()->getJitHintsMap();
14396 jitHints->recordIonCompilation(script);
14399 // for marking during GC.
14400 if (safepointIndices_.length()) {
14401 ionScript->copySafepointIndices(&safepointIndices_[0]);
14403 if (safepoints_.size()) {
14404 ionScript->copySafepoints(&safepoints_);
14407 // for recovering from an Ion Frame.
14408 if (osiIndices_.length()) {
14409 ionScript->copyOsiIndices(&osiIndices_[0]);
14411 if (snapshots_.listSize()) {
14412 ionScript->copySnapshots(&snapshots_);
14414 MOZ_ASSERT_IF(snapshots_.listSize(), recovers_.size());
14415 if (recovers_.size()) {
14416 ionScript->copyRecovers(&recovers_);
14418 if (graph.numConstants()) {
14419 const Value* vp = graph.constantPool();
14420 ionScript->copyConstants(vp);
14421 for (size_t i = 0; i < graph.numConstants(); i++) {
14422 const Value& v = vp[i];
14423 if (v.isGCThing()) {
14424 if (gc::StoreBuffer* sb = v.toGCThing()->storeBuffer()) {
14425 sb->putWholeCell(script);
14426 break;
14432 // Attach any generated script counts to the script.
14433 if (IonScriptCounts* counts = extractScriptCounts()) {
14434 script->addIonCounts(counts);
14437 // WARNING: Code after this point must be infallible!
14439 // Copy the list of nursery objects. Note that the store buffer can add
14440 // HeapPtr edges that must be cleared in IonScript::Destroy. See the
14441 // infallibility warning above.
14442 const auto& nurseryObjects = snapshot->nurseryObjects();
14443 for (size_t i = 0; i < nurseryObjects.length(); i++) {
14444 ionScript->nurseryObjects()[i].init(nurseryObjects[i]);
14447 // Transfer ownership of the IonScript to the JitScript. At this point enough
14448 // of the IonScript must be initialized for IonScript::Destroy to work.
14449 freeIonScript.release();
14450 script->jitScript()->setIonScript(script, ionScript);
14452 return true;
14455 // An out-of-line path to convert a boxed int32 to either a float or double.
14456 class OutOfLineUnboxFloatingPoint : public OutOfLineCodeBase<CodeGenerator> {
14457 LUnboxFloatingPoint* unboxFloatingPoint_;
14459 public:
14460 explicit OutOfLineUnboxFloatingPoint(LUnboxFloatingPoint* unboxFloatingPoint)
14461 : unboxFloatingPoint_(unboxFloatingPoint) {}
14463 void accept(CodeGenerator* codegen) override {
14464 codegen->visitOutOfLineUnboxFloatingPoint(this);
14467 LUnboxFloatingPoint* unboxFloatingPoint() const {
14468 return unboxFloatingPoint_;
14472 void CodeGenerator::visitUnboxFloatingPoint(LUnboxFloatingPoint* lir) {
14473 const ValueOperand box = ToValue(lir, LUnboxFloatingPoint::Input);
14474 const LDefinition* result = lir->output();
14476 // Out-of-line path to convert int32 to double or bailout
14477 // if this instruction is fallible.
14478 OutOfLineUnboxFloatingPoint* ool =
14479 new (alloc()) OutOfLineUnboxFloatingPoint(lir);
14480 addOutOfLineCode(ool, lir->mir());
14482 FloatRegister resultReg = ToFloatRegister(result);
14483 masm.branchTestDouble(Assembler::NotEqual, box, ool->entry());
14484 masm.unboxDouble(box, resultReg);
14485 if (lir->type() == MIRType::Float32) {
14486 masm.convertDoubleToFloat32(resultReg, resultReg);
14488 masm.bind(ool->rejoin());
14491 void CodeGenerator::visitOutOfLineUnboxFloatingPoint(
14492 OutOfLineUnboxFloatingPoint* ool) {
14493 LUnboxFloatingPoint* ins = ool->unboxFloatingPoint();
14494 const ValueOperand value = ToValue(ins, LUnboxFloatingPoint::Input);
14496 if (ins->mir()->fallible()) {
14497 Label bail;
14498 masm.branchTestInt32(Assembler::NotEqual, value, &bail);
14499 bailoutFrom(&bail, ins->snapshot());
14501 masm.int32ValueToFloatingPoint(value, ToFloatRegister(ins->output()),
14502 ins->type());
14503 masm.jump(ool->rejoin());
14506 void CodeGenerator::visitCallBindVar(LCallBindVar* lir) {
14507 pushArg(ToRegister(lir->environmentChain()));
14509 using Fn = JSObject* (*)(JSContext*, JSObject*);
14510 callVM<Fn, BindVarOperation>(lir);
14513 void CodeGenerator::visitMegamorphicSetElement(LMegamorphicSetElement* lir) {
14514 Register obj = ToRegister(lir->getOperand(0));
14515 ValueOperand idVal = ToValue(lir, LMegamorphicSetElement::IndexIndex);
14516 ValueOperand value = ToValue(lir, LMegamorphicSetElement::ValueIndex);
14518 Register temp0 = ToRegister(lir->temp0());
14519 // See comment in LIROps.yaml (x86 is short on registers)
14520 #ifndef JS_CODEGEN_X86
14521 Register temp1 = ToRegister(lir->temp1());
14522 Register temp2 = ToRegister(lir->temp2());
14523 #endif
14525 Label cacheHit, done;
14526 if (JitOptions.enableWatchtowerMegamorphic) {
14527 #ifdef JS_CODEGEN_X86
14528 masm.emitMegamorphicCachedSetSlot(
14529 idVal, obj, temp0, value, &cacheHit,
14530 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
14531 EmitPreBarrier(masm, addr, mirType);
14533 #else
14534 masm.emitMegamorphicCachedSetSlot(
14535 idVal, obj, temp0, temp1, temp2, value, &cacheHit,
14536 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
14537 EmitPreBarrier(masm, addr, mirType);
14539 #endif
14542 pushArg(Imm32(lir->mir()->strict()));
14543 pushArg(ToValue(lir, LMegamorphicSetElement::ValueIndex));
14544 pushArg(ToValue(lir, LMegamorphicSetElement::IndexIndex));
14545 pushArg(obj);
14547 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
14548 callVM<Fn, js::jit::SetElementMegamorphic<true>>(lir);
14550 masm.jump(&done);
14551 masm.bind(&cacheHit);
14553 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
14554 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
14556 saveVolatile(temp0);
14557 emitPostWriteBarrier(obj);
14558 restoreVolatile(temp0);
14560 masm.bind(&done);
14563 void CodeGenerator::visitLoadScriptedProxyHandler(
14564 LLoadScriptedProxyHandler* ins) {
14565 const Register obj = ToRegister(ins->getOperand(0));
14566 ValueOperand output = ToOutValue(ins);
14568 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
14569 output.scratchReg());
14570 masm.loadValue(
14571 Address(output.scratchReg(), js::detail::ProxyReservedSlots::offsetOfSlot(
14572 ScriptedProxyHandler::HANDLER_EXTRA)),
14573 output);
14576 #ifdef JS_PUNBOX64
14577 void CodeGenerator::visitCheckScriptedProxyGetResult(
14578 LCheckScriptedProxyGetResult* ins) {
14579 ValueOperand target = ToValue(ins, LCheckScriptedProxyGetResult::TargetIndex);
14580 ValueOperand value = ToValue(ins, LCheckScriptedProxyGetResult::ValueIndex);
14581 ValueOperand id = ToValue(ins, LCheckScriptedProxyGetResult::IdIndex);
14582 Register scratch = ToRegister(ins->temp0());
14583 Register scratch2 = ToRegister(ins->temp1());
14585 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue,
14586 MutableHandleValue);
14587 OutOfLineCode* ool = oolCallVM<Fn, CheckProxyGetByValueResult>(
14588 ins, ArgList(scratch, id, value), StoreValueTo(value));
14590 masm.unboxObject(target, scratch);
14591 masm.branchTestObjectNeedsProxyResultValidation(Assembler::NonZero, scratch,
14592 scratch2, ool->entry());
14593 masm.bind(ool->rejoin());
14595 #endif
14597 void CodeGenerator::visitIdToStringOrSymbol(LIdToStringOrSymbol* ins) {
14598 ValueOperand id = ToValue(ins, LIdToStringOrSymbol::IdIndex);
14599 ValueOperand output = ToOutValue(ins);
14600 Register scratch = ToRegister(ins->temp0());
14602 masm.moveValue(id, output);
14604 Label done, callVM;
14605 Maybe<Label> bail;
14607 MDefinition* idDef = ins->mir()->idVal();
14608 if (idDef->isBox()) {
14609 idDef = idDef->toBox()->input();
14611 if (idDef->type() != MIRType::Int32) {
14612 bail.emplace();
14613 ScratchTagScope tag(masm, output);
14614 masm.splitTagForTest(output, tag);
14615 masm.branchTestString(Assembler::Equal, tag, &done);
14616 masm.branchTestSymbol(Assembler::Equal, tag, &done);
14617 masm.branchTestInt32(Assembler::NotEqual, tag, &*bail);
14620 masm.unboxInt32(output, scratch);
14622 using Fn = JSLinearString* (*)(JSContext*, int);
14623 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
14624 ins, ArgList(scratch), StoreRegisterTo(output.scratchReg()));
14626 emitIntToString(scratch, output.scratchReg(), ool->entry());
14628 masm.bind(ool->rejoin());
14629 masm.tagValue(JSVAL_TYPE_STRING, output.scratchReg(), output);
14630 masm.bind(&done);
14631 if (bail) {
14632 bailoutFrom(&*bail, ins->snapshot());
14636 void CodeGenerator::visitLoadFixedSlotV(LLoadFixedSlotV* ins) {
14637 const Register obj = ToRegister(ins->getOperand(0));
14638 size_t slot = ins->mir()->slot();
14639 ValueOperand result = ToOutValue(ins);
14641 masm.loadValue(Address(obj, NativeObject::getFixedSlotOffset(slot)), result);
14644 void CodeGenerator::visitLoadFixedSlotT(LLoadFixedSlotT* ins) {
14645 const Register obj = ToRegister(ins->getOperand(0));
14646 size_t slot = ins->mir()->slot();
14647 AnyRegister result = ToAnyRegister(ins->getDef(0));
14648 MIRType type = ins->mir()->type();
14650 masm.loadUnboxedValue(Address(obj, NativeObject::getFixedSlotOffset(slot)),
14651 type, result);
14654 template <typename T>
14655 static void EmitLoadAndUnbox(MacroAssembler& masm, const T& src, MIRType type,
14656 bool fallible, AnyRegister dest, Label* fail) {
14657 if (type == MIRType::Double) {
14658 MOZ_ASSERT(dest.isFloat());
14659 masm.ensureDouble(src, dest.fpu(), fail);
14660 return;
14662 if (fallible) {
14663 switch (type) {
14664 case MIRType::Int32:
14665 masm.fallibleUnboxInt32(src, dest.gpr(), fail);
14666 break;
14667 case MIRType::Boolean:
14668 masm.fallibleUnboxBoolean(src, dest.gpr(), fail);
14669 break;
14670 case MIRType::Object:
14671 masm.fallibleUnboxObject(src, dest.gpr(), fail);
14672 break;
14673 case MIRType::String:
14674 masm.fallibleUnboxString(src, dest.gpr(), fail);
14675 break;
14676 case MIRType::Symbol:
14677 masm.fallibleUnboxSymbol(src, dest.gpr(), fail);
14678 break;
14679 case MIRType::BigInt:
14680 masm.fallibleUnboxBigInt(src, dest.gpr(), fail);
14681 break;
14682 default:
14683 MOZ_CRASH("Unexpected MIRType");
14685 return;
14687 masm.loadUnboxedValue(src, type, dest);
14690 void CodeGenerator::visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox* ins) {
14691 const MLoadFixedSlotAndUnbox* mir = ins->mir();
14692 MIRType type = mir->type();
14693 Register input = ToRegister(ins->object());
14694 AnyRegister result = ToAnyRegister(ins->output());
14695 size_t slot = mir->slot();
14697 Address address(input, NativeObject::getFixedSlotOffset(slot));
14699 Label bail;
14700 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
14701 if (mir->fallible()) {
14702 bailoutFrom(&bail, ins->snapshot());
14706 void CodeGenerator::visitLoadDynamicSlotAndUnbox(
14707 LLoadDynamicSlotAndUnbox* ins) {
14708 const MLoadDynamicSlotAndUnbox* mir = ins->mir();
14709 MIRType type = mir->type();
14710 Register input = ToRegister(ins->slots());
14711 AnyRegister result = ToAnyRegister(ins->output());
14712 size_t slot = mir->slot();
14714 Address address(input, slot * sizeof(JS::Value));
14716 Label bail;
14717 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
14718 if (mir->fallible()) {
14719 bailoutFrom(&bail, ins->snapshot());
14723 void CodeGenerator::visitLoadElementAndUnbox(LLoadElementAndUnbox* ins) {
14724 const MLoadElementAndUnbox* mir = ins->mir();
14725 MIRType type = mir->type();
14726 Register elements = ToRegister(ins->elements());
14727 AnyRegister result = ToAnyRegister(ins->output());
14729 Label bail;
14730 if (ins->index()->isConstant()) {
14731 NativeObject::elementsSizeMustNotOverflow();
14732 int32_t offset = ToInt32(ins->index()) * sizeof(Value);
14733 Address address(elements, offset);
14734 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
14735 } else {
14736 BaseObjectElementIndex address(elements, ToRegister(ins->index()));
14737 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
14740 if (mir->fallible()) {
14741 bailoutFrom(&bail, ins->snapshot());
14745 void CodeGenerator::visitAddAndStoreSlot(LAddAndStoreSlot* ins) {
14746 const Register obj = ToRegister(ins->getOperand(0));
14747 const ValueOperand value = ToValue(ins, LAddAndStoreSlot::ValueIndex);
14748 const Register maybeTemp = ToTempRegisterOrInvalid(ins->temp0());
14750 Shape* shape = ins->mir()->shape();
14751 masm.storeObjShape(shape, obj, [](MacroAssembler& masm, const Address& addr) {
14752 EmitPreBarrier(masm, addr, MIRType::Shape);
14755 // Perform the store. No pre-barrier required since this is a new
14756 // initialization.
14758 uint32_t offset = ins->mir()->slotOffset();
14759 if (ins->mir()->kind() == MAddAndStoreSlot::Kind::FixedSlot) {
14760 Address slot(obj, offset);
14761 masm.storeValue(value, slot);
14762 } else {
14763 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), maybeTemp);
14764 Address slot(maybeTemp, offset);
14765 masm.storeValue(value, slot);
14769 void CodeGenerator::visitAllocateAndStoreSlot(LAllocateAndStoreSlot* ins) {
14770 const Register obj = ToRegister(ins->getOperand(0));
14771 const ValueOperand value = ToValue(ins, LAllocateAndStoreSlot::ValueIndex);
14772 const Register temp0 = ToRegister(ins->temp0());
14773 const Register temp1 = ToRegister(ins->temp1());
14775 masm.Push(obj);
14776 masm.Push(value);
14778 using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
14779 masm.setupAlignedABICall();
14780 masm.loadJSContext(temp0);
14781 masm.passABIArg(temp0);
14782 masm.passABIArg(obj);
14783 masm.move32(Imm32(ins->mir()->numNewSlots()), temp1);
14784 masm.passABIArg(temp1);
14785 masm.callWithABI<Fn, NativeObject::growSlotsPure>();
14786 masm.storeCallPointerResult(temp0);
14788 masm.Pop(value);
14789 masm.Pop(obj);
14791 bailoutIfFalseBool(temp0, ins->snapshot());
14793 masm.storeObjShape(ins->mir()->shape(), obj,
14794 [](MacroAssembler& masm, const Address& addr) {
14795 EmitPreBarrier(masm, addr, MIRType::Shape);
14798 // Perform the store. No pre-barrier required since this is a new
14799 // initialization.
14800 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), temp0);
14801 Address slot(temp0, ins->mir()->slotOffset());
14802 masm.storeValue(value, slot);
14805 void CodeGenerator::visitAddSlotAndCallAddPropHook(
14806 LAddSlotAndCallAddPropHook* ins) {
14807 const Register obj = ToRegister(ins->object());
14808 const ValueOperand value =
14809 ToValue(ins, LAddSlotAndCallAddPropHook::ValueIndex);
14811 pushArg(ImmGCPtr(ins->mir()->shape()));
14812 pushArg(value);
14813 pushArg(obj);
14815 using Fn =
14816 bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, Handle<Shape*>);
14817 callVM<Fn, AddSlotAndCallAddPropHook>(ins);
14820 void CodeGenerator::visitStoreFixedSlotV(LStoreFixedSlotV* ins) {
14821 const Register obj = ToRegister(ins->getOperand(0));
14822 size_t slot = ins->mir()->slot();
14824 const ValueOperand value = ToValue(ins, LStoreFixedSlotV::ValueIndex);
14826 Address address(obj, NativeObject::getFixedSlotOffset(slot));
14827 if (ins->mir()->needsBarrier()) {
14828 emitPreBarrier(address);
14831 masm.storeValue(value, address);
14834 void CodeGenerator::visitStoreFixedSlotT(LStoreFixedSlotT* ins) {
14835 const Register obj = ToRegister(ins->getOperand(0));
14836 size_t slot = ins->mir()->slot();
14838 const LAllocation* value = ins->value();
14839 MIRType valueType = ins->mir()->value()->type();
14841 Address address(obj, NativeObject::getFixedSlotOffset(slot));
14842 if (ins->mir()->needsBarrier()) {
14843 emitPreBarrier(address);
14846 ConstantOrRegister nvalue =
14847 value->isConstant()
14848 ? ConstantOrRegister(value->toConstant()->toJSValue())
14849 : TypedOrValueRegister(valueType, ToAnyRegister(value));
14850 masm.storeConstantOrRegister(nvalue, address);
14853 void CodeGenerator::visitGetNameCache(LGetNameCache* ins) {
14854 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
14855 Register envChain = ToRegister(ins->envObj());
14856 ValueOperand output = ToOutValue(ins);
14857 Register temp = ToRegister(ins->temp0());
14859 IonGetNameIC ic(liveRegs, envChain, output, temp);
14860 addIC(ins, allocateIC(ic));
14863 void CodeGenerator::addGetPropertyCache(LInstruction* ins,
14864 LiveRegisterSet liveRegs,
14865 TypedOrValueRegister value,
14866 const ConstantOrRegister& id,
14867 ValueOperand output) {
14868 CacheKind kind = CacheKind::GetElem;
14869 if (id.constant() && id.value().isString()) {
14870 JSString* idString = id.value().toString();
14871 if (idString->isAtom() && !idString->asAtom().isIndex()) {
14872 kind = CacheKind::GetProp;
14875 IonGetPropertyIC cache(kind, liveRegs, value, id, output);
14876 addIC(ins, allocateIC(cache));
14879 void CodeGenerator::addSetPropertyCache(LInstruction* ins,
14880 LiveRegisterSet liveRegs,
14881 Register objReg, Register temp,
14882 const ConstantOrRegister& id,
14883 const ConstantOrRegister& value,
14884 bool strict) {
14885 CacheKind kind = CacheKind::SetElem;
14886 if (id.constant() && id.value().isString()) {
14887 JSString* idString = id.value().toString();
14888 if (idString->isAtom() && !idString->asAtom().isIndex()) {
14889 kind = CacheKind::SetProp;
14892 IonSetPropertyIC cache(kind, liveRegs, objReg, temp, id, value, strict);
14893 addIC(ins, allocateIC(cache));
14896 ConstantOrRegister CodeGenerator::toConstantOrRegister(LInstruction* lir,
14897 size_t n, MIRType type) {
14898 if (type == MIRType::Value) {
14899 return TypedOrValueRegister(ToValue(lir, n));
14902 const LAllocation* value = lir->getOperand(n);
14903 if (value->isConstant()) {
14904 return ConstantOrRegister(value->toConstant()->toJSValue());
14907 return TypedOrValueRegister(type, ToAnyRegister(value));
14910 void CodeGenerator::visitGetPropertyCache(LGetPropertyCache* ins) {
14911 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
14912 TypedOrValueRegister value =
14913 toConstantOrRegister(ins, LGetPropertyCache::ValueIndex,
14914 ins->mir()->value()->type())
14915 .reg();
14916 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropertyCache::IdIndex,
14917 ins->mir()->idval()->type());
14918 ValueOperand output = ToOutValue(ins);
14919 addGetPropertyCache(ins, liveRegs, value, id, output);
14922 void CodeGenerator::visitGetPropSuperCache(LGetPropSuperCache* ins) {
14923 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
14924 Register obj = ToRegister(ins->obj());
14925 TypedOrValueRegister receiver =
14926 toConstantOrRegister(ins, LGetPropSuperCache::ReceiverIndex,
14927 ins->mir()->receiver()->type())
14928 .reg();
14929 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropSuperCache::IdIndex,
14930 ins->mir()->idval()->type());
14931 ValueOperand output = ToOutValue(ins);
14933 CacheKind kind = CacheKind::GetElemSuper;
14934 if (id.constant() && id.value().isString()) {
14935 JSString* idString = id.value().toString();
14936 if (idString->isAtom() && !idString->asAtom().isIndex()) {
14937 kind = CacheKind::GetPropSuper;
14941 IonGetPropSuperIC cache(kind, liveRegs, obj, receiver, id, output);
14942 addIC(ins, allocateIC(cache));
14945 void CodeGenerator::visitBindNameCache(LBindNameCache* ins) {
14946 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
14947 Register envChain = ToRegister(ins->environmentChain());
14948 Register output = ToRegister(ins->output());
14949 Register temp = ToRegister(ins->temp0());
14951 IonBindNameIC ic(liveRegs, envChain, output, temp);
14952 addIC(ins, allocateIC(ic));
14955 void CodeGenerator::visitHasOwnCache(LHasOwnCache* ins) {
14956 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
14957 TypedOrValueRegister value =
14958 toConstantOrRegister(ins, LHasOwnCache::ValueIndex,
14959 ins->mir()->value()->type())
14960 .reg();
14961 TypedOrValueRegister id = toConstantOrRegister(ins, LHasOwnCache::IdIndex,
14962 ins->mir()->idval()->type())
14963 .reg();
14964 Register output = ToRegister(ins->output());
14966 IonHasOwnIC cache(liveRegs, value, id, output);
14967 addIC(ins, allocateIC(cache));
14970 void CodeGenerator::visitCheckPrivateFieldCache(LCheckPrivateFieldCache* ins) {
14971 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
14972 TypedOrValueRegister value =
14973 toConstantOrRegister(ins, LCheckPrivateFieldCache::ValueIndex,
14974 ins->mir()->value()->type())
14975 .reg();
14976 TypedOrValueRegister id =
14977 toConstantOrRegister(ins, LCheckPrivateFieldCache::IdIndex,
14978 ins->mir()->idval()->type())
14979 .reg();
14980 Register output = ToRegister(ins->output());
14982 IonCheckPrivateFieldIC cache(liveRegs, value, id, output);
14983 addIC(ins, allocateIC(cache));
14986 void CodeGenerator::visitNewPrivateName(LNewPrivateName* ins) {
14987 pushArg(ImmGCPtr(ins->mir()->name()));
14989 using Fn = JS::Symbol* (*)(JSContext*, Handle<JSAtom*>);
14990 callVM<Fn, NewPrivateName>(ins);
14993 void CodeGenerator::visitCallDeleteProperty(LCallDeleteProperty* lir) {
14994 pushArg(ImmGCPtr(lir->mir()->name()));
14995 pushArg(ToValue(lir, LCallDeleteProperty::ValueIndex));
14997 using Fn = bool (*)(JSContext*, HandleValue, Handle<PropertyName*>, bool*);
14998 if (lir->mir()->strict()) {
14999 callVM<Fn, DelPropOperation<true>>(lir);
15000 } else {
15001 callVM<Fn, DelPropOperation<false>>(lir);
15005 void CodeGenerator::visitCallDeleteElement(LCallDeleteElement* lir) {
15006 pushArg(ToValue(lir, LCallDeleteElement::IndexIndex));
15007 pushArg(ToValue(lir, LCallDeleteElement::ValueIndex));
15009 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
15010 if (lir->mir()->strict()) {
15011 callVM<Fn, DelElemOperation<true>>(lir);
15012 } else {
15013 callVM<Fn, DelElemOperation<false>>(lir);
15017 void CodeGenerator::visitObjectToIterator(LObjectToIterator* lir) {
15018 Register obj = ToRegister(lir->object());
15019 Register iterObj = ToRegister(lir->output());
15020 Register temp = ToRegister(lir->temp0());
15021 Register temp2 = ToRegister(lir->temp1());
15022 Register temp3 = ToRegister(lir->temp2());
15024 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
15025 OutOfLineCode* ool = (lir->mir()->wantsIndices())
15026 ? oolCallVM<Fn, GetIteratorWithIndices>(
15027 lir, ArgList(obj), StoreRegisterTo(iterObj))
15028 : oolCallVM<Fn, GetIterator>(
15029 lir, ArgList(obj), StoreRegisterTo(iterObj));
15031 masm.maybeLoadIteratorFromShape(obj, iterObj, temp, temp2, temp3,
15032 ool->entry());
15034 Register nativeIter = temp;
15035 masm.loadPrivate(
15036 Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
15037 nativeIter);
15039 if (lir->mir()->wantsIndices()) {
15040 // At least one consumer of the output of this iterator has been optimized
15041 // to use iterator indices. If the cached iterator doesn't include indices,
15042 // but it was marked to indicate that we can create them if needed, then we
15043 // do a VM call to replace the cached iterator with a fresh iterator
15044 // including indices.
15045 masm.branchNativeIteratorIndices(Assembler::Equal, nativeIter, temp2,
15046 NativeIteratorIndices::AvailableOnRequest,
15047 ool->entry());
15050 Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlagsAndCount());
15051 masm.storePtr(
15052 obj, Address(nativeIter, NativeIterator::offsetOfObjectBeingIterated()));
15053 masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
15055 Register enumeratorsAddr = temp2;
15056 masm.movePtr(ImmPtr(lir->mir()->enumeratorsAddr()), enumeratorsAddr);
15057 masm.registerIterator(enumeratorsAddr, nativeIter, temp3);
15059 // Generate post-write barrier for storing to |iterObj->objectBeingIterated_|.
15060 // We already know that |iterObj| is tenured, so we only have to check |obj|.
15061 Label skipBarrier;
15062 masm.branchPtrInNurseryChunk(Assembler::NotEqual, obj, temp2, &skipBarrier);
15064 LiveRegisterSet save = liveVolatileRegs(lir);
15065 save.takeUnchecked(temp);
15066 save.takeUnchecked(temp2);
15067 save.takeUnchecked(temp3);
15068 if (iterObj.volatile_()) {
15069 save.addUnchecked(iterObj);
15072 masm.PushRegsInMask(save);
15073 emitPostWriteBarrier(iterObj);
15074 masm.PopRegsInMask(save);
15076 masm.bind(&skipBarrier);
15078 masm.bind(ool->rejoin());
15081 void CodeGenerator::visitValueToIterator(LValueToIterator* lir) {
15082 pushArg(ToValue(lir, LValueToIterator::ValueIndex));
15084 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleValue);
15085 callVM<Fn, ValueToIterator>(lir);
15088 void CodeGenerator::visitIteratorHasIndicesAndBranch(
15089 LIteratorHasIndicesAndBranch* lir) {
15090 Register iterator = ToRegister(lir->iterator());
15091 Register object = ToRegister(lir->object());
15092 Register temp = ToRegister(lir->temp());
15093 Register temp2 = ToRegister(lir->temp2());
15094 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
15095 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
15097 // Check that the iterator has indices available.
15098 Address nativeIterAddr(iterator,
15099 PropertyIteratorObject::offsetOfIteratorSlot());
15100 masm.loadPrivate(nativeIterAddr, temp);
15101 masm.branchNativeIteratorIndices(Assembler::NotEqual, temp, temp2,
15102 NativeIteratorIndices::Valid, ifFalse);
15104 // Guard that the first shape stored in the iterator matches the current
15105 // shape of the iterated object.
15106 Address firstShapeAddr(temp, NativeIterator::offsetOfFirstShape());
15107 masm.loadPtr(firstShapeAddr, temp);
15108 masm.branchTestObjShape(Assembler::NotEqual, object, temp, temp2, object,
15109 ifFalse);
15111 if (!isNextBlock(lir->ifTrue()->lir())) {
15112 masm.jump(ifTrue);
15116 void CodeGenerator::visitLoadSlotByIteratorIndex(
15117 LLoadSlotByIteratorIndex* lir) {
15118 Register object = ToRegister(lir->object());
15119 Register iterator = ToRegister(lir->iterator());
15120 Register temp = ToRegister(lir->temp0());
15121 Register temp2 = ToRegister(lir->temp1());
15122 ValueOperand result = ToOutValue(lir);
15124 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
15126 Label notDynamicSlot, notFixedSlot, done;
15127 masm.branch32(Assembler::NotEqual, temp2,
15128 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
15129 &notDynamicSlot);
15130 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
15131 masm.loadValue(BaseValueIndex(temp2, temp), result);
15132 masm.jump(&done);
15134 masm.bind(&notDynamicSlot);
15135 masm.branch32(Assembler::NotEqual, temp2,
15136 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
15137 // Fixed slot
15138 masm.loadValue(BaseValueIndex(object, temp, sizeof(NativeObject)), result);
15139 masm.jump(&done);
15140 masm.bind(&notFixedSlot);
15142 #ifdef DEBUG
15143 Label kindOkay;
15144 masm.branch32(Assembler::Equal, temp2,
15145 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
15146 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
15147 masm.bind(&kindOkay);
15148 #endif
15150 // Dense element
15151 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
15152 Label indexOkay;
15153 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
15154 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
15155 masm.assumeUnreachable("Dense element out of bounds");
15156 masm.bind(&indexOkay);
15158 masm.loadValue(BaseObjectElementIndex(temp2, temp), result);
15159 masm.bind(&done);
15162 void CodeGenerator::visitStoreSlotByIteratorIndex(
15163 LStoreSlotByIteratorIndex* lir) {
15164 Register object = ToRegister(lir->object());
15165 Register iterator = ToRegister(lir->iterator());
15166 ValueOperand value = ToValue(lir, LStoreSlotByIteratorIndex::ValueIndex);
15167 Register temp = ToRegister(lir->temp0());
15168 Register temp2 = ToRegister(lir->temp1());
15170 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
15172 Label notDynamicSlot, notFixedSlot, done, doStore;
15173 masm.branch32(Assembler::NotEqual, temp2,
15174 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
15175 &notDynamicSlot);
15176 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
15177 masm.computeEffectiveAddress(BaseValueIndex(temp2, temp), temp);
15178 masm.jump(&doStore);
15180 masm.bind(&notDynamicSlot);
15181 masm.branch32(Assembler::NotEqual, temp2,
15182 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
15183 // Fixed slot
15184 masm.computeEffectiveAddress(
15185 BaseValueIndex(object, temp, sizeof(NativeObject)), temp);
15186 masm.jump(&doStore);
15187 masm.bind(&notFixedSlot);
15189 #ifdef DEBUG
15190 Label kindOkay;
15191 masm.branch32(Assembler::Equal, temp2,
15192 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
15193 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
15194 masm.bind(&kindOkay);
15195 #endif
15197 // Dense element
15198 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
15199 Label indexOkay;
15200 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
15201 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
15202 masm.assumeUnreachable("Dense element out of bounds");
15203 masm.bind(&indexOkay);
15205 BaseObjectElementIndex elementAddress(temp2, temp);
15206 masm.computeEffectiveAddress(elementAddress, temp);
15208 masm.bind(&doStore);
15209 Address storeAddress(temp, 0);
15210 emitPreBarrier(storeAddress);
15211 masm.storeValue(value, storeAddress);
15213 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp2, &done);
15214 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp2, &done);
15216 saveVolatile(temp2);
15217 emitPostWriteBarrier(object);
15218 restoreVolatile(temp2);
15220 masm.bind(&done);
15223 void CodeGenerator::visitSetPropertyCache(LSetPropertyCache* ins) {
15224 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15225 Register objReg = ToRegister(ins->object());
15226 Register temp = ToRegister(ins->temp0());
15228 ConstantOrRegister id = toConstantOrRegister(ins, LSetPropertyCache::IdIndex,
15229 ins->mir()->idval()->type());
15230 ConstantOrRegister value = toConstantOrRegister(
15231 ins, LSetPropertyCache::ValueIndex, ins->mir()->value()->type());
15233 addSetPropertyCache(ins, liveRegs, objReg, temp, id, value,
15234 ins->mir()->strict());
15237 void CodeGenerator::visitThrow(LThrow* lir) {
15238 pushArg(ToValue(lir, LThrow::ValueIndex));
15240 using Fn = bool (*)(JSContext*, HandleValue);
15241 callVM<Fn, js::ThrowOperation>(lir);
15244 class OutOfLineTypeOfV : public OutOfLineCodeBase<CodeGenerator> {
15245 LTypeOfV* ins_;
15247 public:
15248 explicit OutOfLineTypeOfV(LTypeOfV* ins) : ins_(ins) {}
15250 void accept(CodeGenerator* codegen) override {
15251 codegen->visitOutOfLineTypeOfV(this);
15253 LTypeOfV* ins() const { return ins_; }
15256 void CodeGenerator::emitTypeOfJSType(JSValueType type, Register output) {
15257 switch (type) {
15258 case JSVAL_TYPE_OBJECT:
15259 masm.move32(Imm32(JSTYPE_OBJECT), output);
15260 break;
15261 case JSVAL_TYPE_DOUBLE:
15262 case JSVAL_TYPE_INT32:
15263 masm.move32(Imm32(JSTYPE_NUMBER), output);
15264 break;
15265 case JSVAL_TYPE_BOOLEAN:
15266 masm.move32(Imm32(JSTYPE_BOOLEAN), output);
15267 break;
15268 case JSVAL_TYPE_UNDEFINED:
15269 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
15270 break;
15271 case JSVAL_TYPE_NULL:
15272 masm.move32(Imm32(JSTYPE_OBJECT), output);
15273 break;
15274 case JSVAL_TYPE_STRING:
15275 masm.move32(Imm32(JSTYPE_STRING), output);
15276 break;
15277 case JSVAL_TYPE_SYMBOL:
15278 masm.move32(Imm32(JSTYPE_SYMBOL), output);
15279 break;
15280 case JSVAL_TYPE_BIGINT:
15281 masm.move32(Imm32(JSTYPE_BIGINT), output);
15282 break;
15283 default:
15284 MOZ_CRASH("Unsupported JSValueType");
15288 void CodeGenerator::emitTypeOfCheck(JSValueType type, Register tag,
15289 Register output, Label* done,
15290 Label* oolObject) {
15291 Label notMatch;
15292 switch (type) {
15293 case JSVAL_TYPE_OBJECT:
15294 // The input may be a callable object (result is "function") or
15295 // may emulate undefined (result is "undefined"). Use an OOL path.
15296 masm.branchTestObject(Assembler::Equal, tag, oolObject);
15297 return;
15298 case JSVAL_TYPE_DOUBLE:
15299 case JSVAL_TYPE_INT32:
15300 masm.branchTestNumber(Assembler::NotEqual, tag, &notMatch);
15301 break;
15302 default:
15303 masm.branchTestType(Assembler::NotEqual, tag, type, &notMatch);
15304 break;
15307 emitTypeOfJSType(type, output);
15308 masm.jump(done);
15309 masm.bind(&notMatch);
15312 void CodeGenerator::visitTypeOfV(LTypeOfV* lir) {
15313 const ValueOperand value = ToValue(lir, LTypeOfV::InputIndex);
15314 Register output = ToRegister(lir->output());
15315 Register tag = masm.extractTag(value, output);
15317 Label done;
15319 auto* ool = new (alloc()) OutOfLineTypeOfV(lir);
15320 addOutOfLineCode(ool, lir->mir());
15322 const std::initializer_list<JSValueType> defaultOrder = {
15323 JSVAL_TYPE_OBJECT, JSVAL_TYPE_DOUBLE, JSVAL_TYPE_UNDEFINED,
15324 JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN, JSVAL_TYPE_STRING,
15325 JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
15327 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
15329 // Generate checks for previously observed types first.
15330 // The TypeDataList is sorted by descending frequency.
15331 for (auto& observed : lir->mir()->observedTypes()) {
15332 JSValueType type = observed.type();
15334 // Unify number types.
15335 if (type == JSVAL_TYPE_INT32) {
15336 type = JSVAL_TYPE_DOUBLE;
15339 remaining -= type;
15341 emitTypeOfCheck(type, tag, output, &done, ool->entry());
15344 // Generate checks for remaining types.
15345 for (auto type : defaultOrder) {
15346 if (!remaining.contains(type)) {
15347 continue;
15349 remaining -= type;
15351 if (remaining.isEmpty() && type != JSVAL_TYPE_OBJECT) {
15352 // We can skip the check for the last remaining type, unless the type is
15353 // JSVAL_TYPE_OBJECT, which may have to go through the OOL path.
15354 #ifdef DEBUG
15355 emitTypeOfCheck(type, tag, output, &done, ool->entry());
15356 masm.assumeUnreachable("Unexpected Value type in visitTypeOfV");
15357 #else
15358 emitTypeOfJSType(type, output);
15359 #endif
15360 } else {
15361 emitTypeOfCheck(type, tag, output, &done, ool->entry());
15364 MOZ_ASSERT(remaining.isEmpty());
15366 masm.bind(&done);
15367 masm.bind(ool->rejoin());
15370 void CodeGenerator::emitTypeOfObject(Register obj, Register output,
15371 Label* done) {
15372 Label slowCheck, isObject, isCallable, isUndefined;
15373 masm.typeOfObject(obj, output, &slowCheck, &isObject, &isCallable,
15374 &isUndefined);
15376 masm.bind(&isCallable);
15377 masm.move32(Imm32(JSTYPE_FUNCTION), output);
15378 masm.jump(done);
15380 masm.bind(&isUndefined);
15381 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
15382 masm.jump(done);
15384 masm.bind(&isObject);
15385 masm.move32(Imm32(JSTYPE_OBJECT), output);
15386 masm.jump(done);
15388 masm.bind(&slowCheck);
15390 saveVolatile(output);
15391 using Fn = JSType (*)(JSObject*);
15392 masm.setupAlignedABICall();
15393 masm.passABIArg(obj);
15394 masm.callWithABI<Fn, js::TypeOfObject>();
15395 masm.storeCallInt32Result(output);
15396 restoreVolatile(output);
15399 void CodeGenerator::visitOutOfLineTypeOfV(OutOfLineTypeOfV* ool) {
15400 LTypeOfV* ins = ool->ins();
15402 ValueOperand input = ToValue(ins, LTypeOfV::InputIndex);
15403 Register temp = ToTempUnboxRegister(ins->temp0());
15404 Register output = ToRegister(ins->output());
15406 Register obj = masm.extractObject(input, temp);
15407 emitTypeOfObject(obj, output, ool->rejoin());
15408 masm.jump(ool->rejoin());
15411 void CodeGenerator::visitTypeOfO(LTypeOfO* lir) {
15412 Register obj = ToRegister(lir->object());
15413 Register output = ToRegister(lir->output());
15415 Label done;
15416 emitTypeOfObject(obj, output, &done);
15417 masm.bind(&done);
15420 void CodeGenerator::visitTypeOfName(LTypeOfName* lir) {
15421 Register input = ToRegister(lir->input());
15422 Register output = ToRegister(lir->output());
15424 #ifdef DEBUG
15425 Label ok;
15426 masm.branch32(Assembler::Below, input, Imm32(JSTYPE_LIMIT), &ok);
15427 masm.assumeUnreachable("bad JSType");
15428 masm.bind(&ok);
15429 #endif
15431 static_assert(JSTYPE_UNDEFINED == 0);
15433 masm.movePtr(ImmPtr(&gen->runtime->names().undefined), output);
15434 masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
15437 class OutOfLineTypeOfIsNonPrimitiveV : public OutOfLineCodeBase<CodeGenerator> {
15438 LTypeOfIsNonPrimitiveV* ins_;
15440 public:
15441 explicit OutOfLineTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* ins)
15442 : ins_(ins) {}
15444 void accept(CodeGenerator* codegen) override {
15445 codegen->visitOutOfLineTypeOfIsNonPrimitiveV(this);
15447 auto* ins() const { return ins_; }
15450 class OutOfLineTypeOfIsNonPrimitiveO : public OutOfLineCodeBase<CodeGenerator> {
15451 LTypeOfIsNonPrimitiveO* ins_;
15453 public:
15454 explicit OutOfLineTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* ins)
15455 : ins_(ins) {}
15457 void accept(CodeGenerator* codegen) override {
15458 codegen->visitOutOfLineTypeOfIsNonPrimitiveO(this);
15460 auto* ins() const { return ins_; }
15463 void CodeGenerator::emitTypeOfIsObjectOOL(MTypeOfIs* mir, Register obj,
15464 Register output) {
15465 saveVolatile(output);
15466 using Fn = JSType (*)(JSObject*);
15467 masm.setupAlignedABICall();
15468 masm.passABIArg(obj);
15469 masm.callWithABI<Fn, js::TypeOfObject>();
15470 masm.storeCallInt32Result(output);
15471 restoreVolatile(output);
15473 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
15474 masm.cmp32Set(cond, output, Imm32(mir->jstype()), output);
15477 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveV(
15478 OutOfLineTypeOfIsNonPrimitiveV* ool) {
15479 auto* ins = ool->ins();
15480 ValueOperand input = ToValue(ins, LTypeOfIsNonPrimitiveV::InputIndex);
15481 Register output = ToRegister(ins->output());
15482 Register temp = ToTempUnboxRegister(ins->temp0());
15484 Register obj = masm.extractObject(input, temp);
15486 emitTypeOfIsObjectOOL(ins->mir(), obj, output);
15488 masm.jump(ool->rejoin());
15491 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveO(
15492 OutOfLineTypeOfIsNonPrimitiveO* ool) {
15493 auto* ins = ool->ins();
15494 Register input = ToRegister(ins->input());
15495 Register output = ToRegister(ins->output());
15497 emitTypeOfIsObjectOOL(ins->mir(), input, output);
15499 masm.jump(ool->rejoin());
15502 void CodeGenerator::emitTypeOfIsObject(MTypeOfIs* mir, Register obj,
15503 Register output, Label* success,
15504 Label* fail, Label* slowCheck) {
15505 Label* isObject = fail;
15506 Label* isFunction = fail;
15507 Label* isUndefined = fail;
15509 switch (mir->jstype()) {
15510 case JSTYPE_UNDEFINED:
15511 isUndefined = success;
15512 break;
15514 case JSTYPE_OBJECT:
15515 isObject = success;
15516 break;
15518 case JSTYPE_FUNCTION:
15519 isFunction = success;
15520 break;
15522 case JSTYPE_STRING:
15523 case JSTYPE_NUMBER:
15524 case JSTYPE_BOOLEAN:
15525 case JSTYPE_SYMBOL:
15526 case JSTYPE_BIGINT:
15527 #ifdef ENABLE_RECORD_TUPLE
15528 case JSTYPE_RECORD:
15529 case JSTYPE_TUPLE:
15530 #endif
15531 case JSTYPE_LIMIT:
15532 MOZ_CRASH("Primitive type");
15535 masm.typeOfObject(obj, output, slowCheck, isObject, isFunction, isUndefined);
15537 auto op = mir->jsop();
15539 Label done;
15540 masm.bind(fail);
15541 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
15542 masm.jump(&done);
15543 masm.bind(success);
15544 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
15545 masm.bind(&done);
15548 void CodeGenerator::visitTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* lir) {
15549 ValueOperand input = ToValue(lir, LTypeOfIsNonPrimitiveV::InputIndex);
15550 Register output = ToRegister(lir->output());
15551 Register temp = ToTempUnboxRegister(lir->temp0());
15553 auto* mir = lir->mir();
15555 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveV(lir);
15556 addOutOfLineCode(ool, mir);
15558 Label success, fail;
15560 switch (mir->jstype()) {
15561 case JSTYPE_UNDEFINED: {
15562 ScratchTagScope tag(masm, input);
15563 masm.splitTagForTest(input, tag);
15565 masm.branchTestUndefined(Assembler::Equal, tag, &success);
15566 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
15567 break;
15570 case JSTYPE_OBJECT: {
15571 ScratchTagScope tag(masm, input);
15572 masm.splitTagForTest(input, tag);
15574 masm.branchTestNull(Assembler::Equal, tag, &success);
15575 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
15576 break;
15579 case JSTYPE_FUNCTION: {
15580 masm.branchTestObject(Assembler::NotEqual, input, &fail);
15581 break;
15584 case JSTYPE_STRING:
15585 case JSTYPE_NUMBER:
15586 case JSTYPE_BOOLEAN:
15587 case JSTYPE_SYMBOL:
15588 case JSTYPE_BIGINT:
15589 #ifdef ENABLE_RECORD_TUPLE
15590 case JSTYPE_RECORD:
15591 case JSTYPE_TUPLE:
15592 #endif
15593 case JSTYPE_LIMIT:
15594 MOZ_CRASH("Primitive type");
15597 Register obj = masm.extractObject(input, temp);
15599 emitTypeOfIsObject(mir, obj, output, &success, &fail, ool->entry());
15601 masm.bind(ool->rejoin());
15604 void CodeGenerator::visitTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* lir) {
15605 Register input = ToRegister(lir->input());
15606 Register output = ToRegister(lir->output());
15608 auto* mir = lir->mir();
15610 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveO(lir);
15611 addOutOfLineCode(ool, mir);
15613 Label success, fail;
15614 emitTypeOfIsObject(mir, input, output, &success, &fail, ool->entry());
15616 masm.bind(ool->rejoin());
15619 void CodeGenerator::visitTypeOfIsPrimitive(LTypeOfIsPrimitive* lir) {
15620 ValueOperand input = ToValue(lir, LTypeOfIsPrimitive::InputIndex);
15621 Register output = ToRegister(lir->output());
15623 auto* mir = lir->mir();
15624 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
15626 switch (mir->jstype()) {
15627 case JSTYPE_STRING:
15628 masm.testStringSet(cond, input, output);
15629 break;
15630 case JSTYPE_NUMBER:
15631 masm.testNumberSet(cond, input, output);
15632 break;
15633 case JSTYPE_BOOLEAN:
15634 masm.testBooleanSet(cond, input, output);
15635 break;
15636 case JSTYPE_SYMBOL:
15637 masm.testSymbolSet(cond, input, output);
15638 break;
15639 case JSTYPE_BIGINT:
15640 masm.testBigIntSet(cond, input, output);
15641 break;
15643 case JSTYPE_UNDEFINED:
15644 case JSTYPE_OBJECT:
15645 case JSTYPE_FUNCTION:
15646 #ifdef ENABLE_RECORD_TUPLE
15647 case JSTYPE_RECORD:
15648 case JSTYPE_TUPLE:
15649 #endif
15650 case JSTYPE_LIMIT:
15651 MOZ_CRASH("Non-primitive type");
15655 void CodeGenerator::visitToAsyncIter(LToAsyncIter* lir) {
15656 pushArg(ToValue(lir, LToAsyncIter::NextMethodIndex));
15657 pushArg(ToRegister(lir->iterator()));
15659 using Fn = JSObject* (*)(JSContext*, HandleObject, HandleValue);
15660 callVM<Fn, js::CreateAsyncFromSyncIterator>(lir);
15663 void CodeGenerator::visitToPropertyKeyCache(LToPropertyKeyCache* lir) {
15664 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
15665 ValueOperand input = ToValue(lir, LToPropertyKeyCache::InputIndex);
15666 ValueOperand output = ToOutValue(lir);
15668 IonToPropertyKeyIC ic(liveRegs, input, output);
15669 addIC(lir, allocateIC(ic));
15672 void CodeGenerator::visitLoadElementV(LLoadElementV* load) {
15673 Register elements = ToRegister(load->elements());
15674 const ValueOperand out = ToOutValue(load);
15676 if (load->index()->isConstant()) {
15677 NativeObject::elementsSizeMustNotOverflow();
15678 int32_t offset = ToInt32(load->index()) * sizeof(Value);
15679 masm.loadValue(Address(elements, offset), out);
15680 } else {
15681 masm.loadValue(BaseObjectElementIndex(elements, ToRegister(load->index())),
15682 out);
15685 Label testMagic;
15686 masm.branchTestMagic(Assembler::Equal, out, &testMagic);
15687 bailoutFrom(&testMagic, load->snapshot());
15690 void CodeGenerator::visitLoadElementHole(LLoadElementHole* lir) {
15691 Register elements = ToRegister(lir->elements());
15692 Register index = ToRegister(lir->index());
15693 Register initLength = ToRegister(lir->initLength());
15694 const ValueOperand out = ToOutValue(lir);
15696 const MLoadElementHole* mir = lir->mir();
15698 // If the index is out of bounds, load |undefined|. Otherwise, load the
15699 // value.
15700 Label outOfBounds, done;
15701 masm.spectreBoundsCheck32(index, initLength, out.scratchReg(), &outOfBounds);
15703 masm.loadValue(BaseObjectElementIndex(elements, index), out);
15705 // If the value wasn't a hole, we're done. Otherwise, we'll load undefined.
15706 masm.branchTestMagic(Assembler::NotEqual, out, &done);
15708 if (mir->needsNegativeIntCheck()) {
15709 Label loadUndefined;
15710 masm.jump(&loadUndefined);
15712 masm.bind(&outOfBounds);
15714 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
15716 masm.bind(&loadUndefined);
15717 } else {
15718 masm.bind(&outOfBounds);
15720 masm.moveValue(UndefinedValue(), out);
15722 masm.bind(&done);
15725 void CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir) {
15726 Register elements = ToRegister(lir->elements());
15727 Register temp = ToTempRegisterOrInvalid(lir->temp0());
15728 AnyRegister out = ToAnyRegister(lir->output());
15730 const MLoadUnboxedScalar* mir = lir->mir();
15732 Scalar::Type storageType = mir->storageType();
15734 Label fail;
15735 if (lir->index()->isConstant()) {
15736 Address source =
15737 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
15738 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
15739 } else {
15740 BaseIndex source(elements, ToRegister(lir->index()),
15741 ScaleFromScalarType(storageType), mir->offsetAdjustment());
15742 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
15745 if (fail.used()) {
15746 bailoutFrom(&fail, lir->snapshot());
15750 void CodeGenerator::visitLoadUnboxedBigInt(LLoadUnboxedBigInt* lir) {
15751 Register elements = ToRegister(lir->elements());
15752 Register temp = ToRegister(lir->temp());
15753 Register64 temp64 = ToRegister64(lir->temp64());
15754 Register out = ToRegister(lir->output());
15756 const MLoadUnboxedScalar* mir = lir->mir();
15758 Scalar::Type storageType = mir->storageType();
15760 if (lir->index()->isConstant()) {
15761 Address source =
15762 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
15763 masm.load64(source, temp64);
15764 } else {
15765 BaseIndex source(elements, ToRegister(lir->index()),
15766 ScaleFromScalarType(storageType), mir->offsetAdjustment());
15767 masm.load64(source, temp64);
15770 emitCreateBigInt(lir, storageType, temp64, out, temp);
15773 void CodeGenerator::visitLoadDataViewElement(LLoadDataViewElement* lir) {
15774 Register elements = ToRegister(lir->elements());
15775 const LAllocation* littleEndian = lir->littleEndian();
15776 Register temp = ToTempRegisterOrInvalid(lir->temp());
15777 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
15778 AnyRegister out = ToAnyRegister(lir->output());
15780 const MLoadDataViewElement* mir = lir->mir();
15781 Scalar::Type storageType = mir->storageType();
15783 BaseIndex source(elements, ToRegister(lir->index()), TimesOne);
15785 bool noSwap = littleEndian->isConstant() &&
15786 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
15788 // Directly load if no byte swap is needed and the platform supports unaligned
15789 // accesses for the access. (Such support is assumed for integer types.)
15790 if (noSwap && (!Scalar::isFloatingType(storageType) ||
15791 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
15792 if (!Scalar::isBigIntType(storageType)) {
15793 Label fail;
15794 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
15796 if (fail.used()) {
15797 bailoutFrom(&fail, lir->snapshot());
15799 } else {
15800 masm.load64(source, temp64);
15802 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
15804 return;
15807 // Load the value into a gpr register.
15808 switch (storageType) {
15809 case Scalar::Int16:
15810 masm.load16UnalignedSignExtend(source, out.gpr());
15811 break;
15812 case Scalar::Uint16:
15813 masm.load16UnalignedZeroExtend(source, out.gpr());
15814 break;
15815 case Scalar::Int32:
15816 masm.load32Unaligned(source, out.gpr());
15817 break;
15818 case Scalar::Uint32:
15819 masm.load32Unaligned(source, out.isFloat() ? temp : out.gpr());
15820 break;
15821 case Scalar::Float32:
15822 masm.load32Unaligned(source, temp);
15823 break;
15824 case Scalar::Float64:
15825 case Scalar::BigInt64:
15826 case Scalar::BigUint64:
15827 masm.load64Unaligned(source, temp64);
15828 break;
15829 case Scalar::Int8:
15830 case Scalar::Uint8:
15831 case Scalar::Uint8Clamped:
15832 default:
15833 MOZ_CRASH("Invalid typed array type");
15836 if (!noSwap) {
15837 // Swap the bytes in the loaded value.
15838 Label skip;
15839 if (!littleEndian->isConstant()) {
15840 masm.branch32(
15841 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
15842 ToRegister(littleEndian), Imm32(0), &skip);
15845 switch (storageType) {
15846 case Scalar::Int16:
15847 masm.byteSwap16SignExtend(out.gpr());
15848 break;
15849 case Scalar::Uint16:
15850 masm.byteSwap16ZeroExtend(out.gpr());
15851 break;
15852 case Scalar::Int32:
15853 masm.byteSwap32(out.gpr());
15854 break;
15855 case Scalar::Uint32:
15856 masm.byteSwap32(out.isFloat() ? temp : out.gpr());
15857 break;
15858 case Scalar::Float32:
15859 masm.byteSwap32(temp);
15860 break;
15861 case Scalar::Float64:
15862 case Scalar::BigInt64:
15863 case Scalar::BigUint64:
15864 masm.byteSwap64(temp64);
15865 break;
15866 case Scalar::Int8:
15867 case Scalar::Uint8:
15868 case Scalar::Uint8Clamped:
15869 default:
15870 MOZ_CRASH("Invalid typed array type");
15873 if (skip.used()) {
15874 masm.bind(&skip);
15878 // Move the value into the output register.
15879 switch (storageType) {
15880 case Scalar::Int16:
15881 case Scalar::Uint16:
15882 case Scalar::Int32:
15883 break;
15884 case Scalar::Uint32:
15885 if (out.isFloat()) {
15886 masm.convertUInt32ToDouble(temp, out.fpu());
15887 } else {
15888 // Bail out if the value doesn't fit into a signed int32 value. This
15889 // is what allows MLoadDataViewElement to have a type() of
15890 // MIRType::Int32 for UInt32 array loads.
15891 bailoutTest32(Assembler::Signed, out.gpr(), out.gpr(), lir->snapshot());
15893 break;
15894 case Scalar::Float32:
15895 masm.moveGPRToFloat32(temp, out.fpu());
15896 masm.canonicalizeFloat(out.fpu());
15897 break;
15898 case Scalar::Float64:
15899 masm.moveGPR64ToDouble(temp64, out.fpu());
15900 masm.canonicalizeDouble(out.fpu());
15901 break;
15902 case Scalar::BigInt64:
15903 case Scalar::BigUint64:
15904 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
15905 break;
15906 case Scalar::Int8:
15907 case Scalar::Uint8:
15908 case Scalar::Uint8Clamped:
15909 default:
15910 MOZ_CRASH("Invalid typed array type");
15914 void CodeGenerator::visitLoadTypedArrayElementHole(
15915 LLoadTypedArrayElementHole* lir) {
15916 Register object = ToRegister(lir->object());
15917 const ValueOperand out = ToOutValue(lir);
15919 // Load the length.
15920 Register scratch = out.scratchReg();
15921 Register scratch2 = ToRegister(lir->temp0());
15922 Register index = ToRegister(lir->index());
15923 masm.loadArrayBufferViewLengthIntPtr(object, scratch);
15925 // Load undefined if index >= length.
15926 Label outOfBounds, done;
15927 masm.spectreBoundsCheckPtr(index, scratch, scratch2, &outOfBounds);
15929 // Load the elements vector.
15930 masm.loadPtr(Address(object, ArrayBufferViewObject::dataOffset()), scratch);
15932 Scalar::Type arrayType = lir->mir()->arrayType();
15933 Label fail;
15934 BaseIndex source(scratch, index, ScaleFromScalarType(arrayType));
15935 MacroAssembler::Uint32Mode uint32Mode =
15936 lir->mir()->forceDouble() ? MacroAssembler::Uint32Mode::ForceDouble
15937 : MacroAssembler::Uint32Mode::FailOnDouble;
15938 masm.loadFromTypedArray(arrayType, source, out, uint32Mode, out.scratchReg(),
15939 &fail);
15940 masm.jump(&done);
15942 masm.bind(&outOfBounds);
15943 masm.moveValue(UndefinedValue(), out);
15945 if (fail.used()) {
15946 bailoutFrom(&fail, lir->snapshot());
15949 masm.bind(&done);
15952 void CodeGenerator::visitLoadTypedArrayElementHoleBigInt(
15953 LLoadTypedArrayElementHoleBigInt* lir) {
15954 Register object = ToRegister(lir->object());
15955 const ValueOperand out = ToOutValue(lir);
15957 // On x86 there are not enough registers. In that case reuse the output's
15958 // type register as temporary.
15959 #ifdef JS_CODEGEN_X86
15960 MOZ_ASSERT(lir->temp()->isBogusTemp());
15961 Register temp = out.typeReg();
15962 #else
15963 Register temp = ToRegister(lir->temp());
15964 #endif
15965 Register64 temp64 = ToRegister64(lir->temp64());
15967 // Load the length.
15968 Register scratch = out.scratchReg();
15969 Register index = ToRegister(lir->index());
15970 masm.loadArrayBufferViewLengthIntPtr(object, scratch);
15972 // Load undefined if index >= length.
15973 Label outOfBounds, done;
15974 masm.spectreBoundsCheckPtr(index, scratch, temp, &outOfBounds);
15976 // Load the elements vector.
15977 masm.loadPtr(Address(object, ArrayBufferViewObject::dataOffset()), scratch);
15979 Scalar::Type arrayType = lir->mir()->arrayType();
15980 BaseIndex source(scratch, index, ScaleFromScalarType(arrayType));
15981 masm.load64(source, temp64);
15983 Register bigInt = out.scratchReg();
15984 emitCreateBigInt(lir, arrayType, temp64, bigInt, temp);
15986 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, out);
15987 masm.jump(&done);
15989 masm.bind(&outOfBounds);
15990 masm.moveValue(UndefinedValue(), out);
15992 masm.bind(&done);
15995 template <SwitchTableType tableType>
15996 class OutOfLineSwitch : public OutOfLineCodeBase<CodeGenerator> {
15997 using LabelsVector = Vector<Label, 0, JitAllocPolicy>;
15998 using CodeLabelsVector = Vector<CodeLabel, 0, JitAllocPolicy>;
15999 LabelsVector labels_;
16000 CodeLabelsVector codeLabels_;
16001 CodeLabel start_;
16002 bool isOutOfLine_;
16004 void accept(CodeGenerator* codegen) override {
16005 codegen->visitOutOfLineSwitch(this);
16008 public:
16009 explicit OutOfLineSwitch(TempAllocator& alloc)
16010 : labels_(alloc), codeLabels_(alloc), isOutOfLine_(false) {}
16012 CodeLabel* start() { return &start_; }
16014 CodeLabelsVector& codeLabels() { return codeLabels_; }
16015 LabelsVector& labels() { return labels_; }
16017 void jumpToCodeEntries(MacroAssembler& masm, Register index, Register temp) {
16018 Register base;
16019 if (tableType == SwitchTableType::Inline) {
16020 #if defined(JS_CODEGEN_ARM)
16021 base = ::js::jit::pc;
16022 #else
16023 MOZ_CRASH("NYI: SwitchTableType::Inline");
16024 #endif
16025 } else {
16026 #if defined(JS_CODEGEN_ARM)
16027 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
16028 #else
16029 masm.mov(start(), temp);
16030 base = temp;
16031 #endif
16033 BaseIndex jumpTarget(base, index, ScalePointer);
16034 masm.branchToComputedAddress(jumpTarget);
16037 // Register an entry in the switch table.
16038 void addTableEntry(MacroAssembler& masm) {
16039 if ((!isOutOfLine_ && tableType == SwitchTableType::Inline) ||
16040 (isOutOfLine_ && tableType == SwitchTableType::OutOfLine)) {
16041 CodeLabel cl;
16042 masm.writeCodePointer(&cl);
16043 masm.propagateOOM(codeLabels_.append(std::move(cl)));
16046 // Register the code, to which the table will jump to.
16047 void addCodeEntry(MacroAssembler& masm) {
16048 Label entry;
16049 masm.bind(&entry);
16050 masm.propagateOOM(labels_.append(std::move(entry)));
16053 void setOutOfLine() { isOutOfLine_ = true; }
16056 template <SwitchTableType tableType>
16057 void CodeGenerator::visitOutOfLineSwitch(
16058 OutOfLineSwitch<tableType>* jumpTable) {
16059 jumpTable->setOutOfLine();
16060 auto& labels = jumpTable->labels();
16062 if (tableType == SwitchTableType::OutOfLine) {
16063 #if defined(JS_CODEGEN_ARM)
16064 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
16065 #elif defined(JS_CODEGEN_NONE)
16066 MOZ_CRASH();
16067 #else
16069 # if defined(JS_CODEGEN_ARM64)
16070 AutoForbidPoolsAndNops afp(
16071 &masm,
16072 (labels.length() + 1) * (sizeof(void*) / vixl::kInstructionSize));
16073 # endif
16075 masm.haltingAlign(sizeof(void*));
16077 // Bind the address of the jump table and reserve the space for code
16078 // pointers to jump in the newly generated code.
16079 masm.bind(jumpTable->start());
16080 masm.addCodeLabel(*jumpTable->start());
16081 for (size_t i = 0, e = labels.length(); i < e; i++) {
16082 jumpTable->addTableEntry(masm);
16084 #endif
16087 // Register all reserved pointers of the jump table to target labels. The
16088 // entries of the jump table need to be absolute addresses and thus must be
16089 // patched after codegen is finished.
16090 auto& codeLabels = jumpTable->codeLabels();
16091 for (size_t i = 0, e = codeLabels.length(); i < e; i++) {
16092 auto& cl = codeLabels[i];
16093 cl.target()->bind(labels[i].offset());
16094 masm.addCodeLabel(cl);
16098 template void CodeGenerator::visitOutOfLineSwitch(
16099 OutOfLineSwitch<SwitchTableType::Inline>* jumpTable);
16100 template void CodeGenerator::visitOutOfLineSwitch(
16101 OutOfLineSwitch<SwitchTableType::OutOfLine>* jumpTable);
16103 template <typename T>
16104 static inline void StoreToTypedArray(MacroAssembler& masm,
16105 Scalar::Type writeType,
16106 const LAllocation* value, const T& dest) {
16107 if (writeType == Scalar::Float32 || writeType == Scalar::Float64) {
16108 masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest);
16109 } else {
16110 if (value->isConstant()) {
16111 masm.storeToTypedIntArray(writeType, Imm32(ToInt32(value)), dest);
16112 } else {
16113 masm.storeToTypedIntArray(writeType, ToRegister(value), dest);
16118 void CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir) {
16119 Register elements = ToRegister(lir->elements());
16120 const LAllocation* value = lir->value();
16122 const MStoreUnboxedScalar* mir = lir->mir();
16124 Scalar::Type writeType = mir->writeType();
16126 if (lir->index()->isConstant()) {
16127 Address dest = ToAddress(elements, lir->index(), writeType);
16128 StoreToTypedArray(masm, writeType, value, dest);
16129 } else {
16130 BaseIndex dest(elements, ToRegister(lir->index()),
16131 ScaleFromScalarType(writeType));
16132 StoreToTypedArray(masm, writeType, value, dest);
16136 void CodeGenerator::visitStoreUnboxedBigInt(LStoreUnboxedBigInt* lir) {
16137 Register elements = ToRegister(lir->elements());
16138 Register value = ToRegister(lir->value());
16139 Register64 temp = ToRegister64(lir->temp());
16141 Scalar::Type writeType = lir->mir()->writeType();
16143 masm.loadBigInt64(value, temp);
16145 if (lir->index()->isConstant()) {
16146 Address dest = ToAddress(elements, lir->index(), writeType);
16147 masm.storeToTypedBigIntArray(writeType, temp, dest);
16148 } else {
16149 BaseIndex dest(elements, ToRegister(lir->index()),
16150 ScaleFromScalarType(writeType));
16151 masm.storeToTypedBigIntArray(writeType, temp, dest);
16155 void CodeGenerator::visitStoreDataViewElement(LStoreDataViewElement* lir) {
16156 Register elements = ToRegister(lir->elements());
16157 const LAllocation* value = lir->value();
16158 const LAllocation* littleEndian = lir->littleEndian();
16159 Register temp = ToTempRegisterOrInvalid(lir->temp());
16160 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
16162 const MStoreDataViewElement* mir = lir->mir();
16163 Scalar::Type writeType = mir->writeType();
16165 BaseIndex dest(elements, ToRegister(lir->index()), TimesOne);
16167 bool noSwap = littleEndian->isConstant() &&
16168 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
16170 // Directly store if no byte swap is needed and the platform supports
16171 // unaligned accesses for the access. (Such support is assumed for integer
16172 // types.)
16173 if (noSwap && (!Scalar::isFloatingType(writeType) ||
16174 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
16175 if (!Scalar::isBigIntType(writeType)) {
16176 StoreToTypedArray(masm, writeType, value, dest);
16177 } else {
16178 masm.loadBigInt64(ToRegister(value), temp64);
16179 masm.storeToTypedBigIntArray(writeType, temp64, dest);
16181 return;
16184 // Load the value into a gpr register.
16185 switch (writeType) {
16186 case Scalar::Int16:
16187 case Scalar::Uint16:
16188 case Scalar::Int32:
16189 case Scalar::Uint32:
16190 if (value->isConstant()) {
16191 masm.move32(Imm32(ToInt32(value)), temp);
16192 } else {
16193 masm.move32(ToRegister(value), temp);
16195 break;
16196 case Scalar::Float32: {
16197 FloatRegister fvalue = ToFloatRegister(value);
16198 masm.canonicalizeFloatIfDeterministic(fvalue);
16199 masm.moveFloat32ToGPR(fvalue, temp);
16200 break;
16202 case Scalar::Float64: {
16203 FloatRegister fvalue = ToFloatRegister(value);
16204 masm.canonicalizeDoubleIfDeterministic(fvalue);
16205 masm.moveDoubleToGPR64(fvalue, temp64);
16206 break;
16208 case Scalar::BigInt64:
16209 case Scalar::BigUint64:
16210 masm.loadBigInt64(ToRegister(value), temp64);
16211 break;
16212 case Scalar::Int8:
16213 case Scalar::Uint8:
16214 case Scalar::Uint8Clamped:
16215 default:
16216 MOZ_CRASH("Invalid typed array type");
16219 if (!noSwap) {
16220 // Swap the bytes in the loaded value.
16221 Label skip;
16222 if (!littleEndian->isConstant()) {
16223 masm.branch32(
16224 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
16225 ToRegister(littleEndian), Imm32(0), &skip);
16228 switch (writeType) {
16229 case Scalar::Int16:
16230 masm.byteSwap16SignExtend(temp);
16231 break;
16232 case Scalar::Uint16:
16233 masm.byteSwap16ZeroExtend(temp);
16234 break;
16235 case Scalar::Int32:
16236 case Scalar::Uint32:
16237 case Scalar::Float32:
16238 masm.byteSwap32(temp);
16239 break;
16240 case Scalar::Float64:
16241 case Scalar::BigInt64:
16242 case Scalar::BigUint64:
16243 masm.byteSwap64(temp64);
16244 break;
16245 case Scalar::Int8:
16246 case Scalar::Uint8:
16247 case Scalar::Uint8Clamped:
16248 default:
16249 MOZ_CRASH("Invalid typed array type");
16252 if (skip.used()) {
16253 masm.bind(&skip);
16257 // Store the value into the destination.
16258 switch (writeType) {
16259 case Scalar::Int16:
16260 case Scalar::Uint16:
16261 masm.store16Unaligned(temp, dest);
16262 break;
16263 case Scalar::Int32:
16264 case Scalar::Uint32:
16265 case Scalar::Float32:
16266 masm.store32Unaligned(temp, dest);
16267 break;
16268 case Scalar::Float64:
16269 case Scalar::BigInt64:
16270 case Scalar::BigUint64:
16271 masm.store64Unaligned(temp64, dest);
16272 break;
16273 case Scalar::Int8:
16274 case Scalar::Uint8:
16275 case Scalar::Uint8Clamped:
16276 default:
16277 MOZ_CRASH("Invalid typed array type");
16281 void CodeGenerator::visitStoreTypedArrayElementHole(
16282 LStoreTypedArrayElementHole* lir) {
16283 Register elements = ToRegister(lir->elements());
16284 const LAllocation* value = lir->value();
16286 Scalar::Type arrayType = lir->mir()->arrayType();
16288 Register index = ToRegister(lir->index());
16289 const LAllocation* length = lir->length();
16290 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
16292 Label skip;
16293 if (length->isRegister()) {
16294 masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
16295 } else {
16296 masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
16299 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
16300 StoreToTypedArray(masm, arrayType, value, dest);
16302 masm.bind(&skip);
16305 void CodeGenerator::visitStoreTypedArrayElementHoleBigInt(
16306 LStoreTypedArrayElementHoleBigInt* lir) {
16307 Register elements = ToRegister(lir->elements());
16308 Register value = ToRegister(lir->value());
16309 Register64 temp = ToRegister64(lir->temp());
16311 Scalar::Type arrayType = lir->mir()->arrayType();
16313 Register index = ToRegister(lir->index());
16314 const LAllocation* length = lir->length();
16315 Register spectreTemp = temp.scratchReg();
16317 Label skip;
16318 if (length->isRegister()) {
16319 masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
16320 } else {
16321 masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
16324 masm.loadBigInt64(value, temp);
16326 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
16327 masm.storeToTypedBigIntArray(arrayType, temp, dest);
16329 masm.bind(&skip);
16332 void CodeGenerator::visitAtomicIsLockFree(LAtomicIsLockFree* lir) {
16333 Register value = ToRegister(lir->value());
16334 Register output = ToRegister(lir->output());
16336 masm.atomicIsLockFreeJS(value, output);
16339 void CodeGenerator::visitClampIToUint8(LClampIToUint8* lir) {
16340 Register output = ToRegister(lir->output());
16341 MOZ_ASSERT(output == ToRegister(lir->input()));
16342 masm.clampIntToUint8(output);
16345 void CodeGenerator::visitClampDToUint8(LClampDToUint8* lir) {
16346 FloatRegister input = ToFloatRegister(lir->input());
16347 Register output = ToRegister(lir->output());
16348 masm.clampDoubleToUint8(input, output);
16351 void CodeGenerator::visitClampVToUint8(LClampVToUint8* lir) {
16352 ValueOperand operand = ToValue(lir, LClampVToUint8::InputIndex);
16353 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
16354 Register output = ToRegister(lir->output());
16356 using Fn = bool (*)(JSContext*, JSString*, double*);
16357 OutOfLineCode* oolString = oolCallVM<Fn, StringToNumber>(
16358 lir, ArgList(output), StoreFloatRegisterTo(tempFloat));
16359 Label* stringEntry = oolString->entry();
16360 Label* stringRejoin = oolString->rejoin();
16362 Label fails;
16363 masm.clampValueToUint8(operand, stringEntry, stringRejoin, output, tempFloat,
16364 output, &fails);
16366 bailoutFrom(&fails, lir->snapshot());
16369 void CodeGenerator::visitInCache(LInCache* ins) {
16370 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16372 ConstantOrRegister key =
16373 toConstantOrRegister(ins, LInCache::LhsIndex, ins->mir()->key()->type());
16374 Register object = ToRegister(ins->rhs());
16375 Register output = ToRegister(ins->output());
16376 Register temp = ToRegister(ins->temp0());
16378 IonInIC cache(liveRegs, key, object, output, temp);
16379 addIC(ins, allocateIC(cache));
16382 void CodeGenerator::visitInArray(LInArray* lir) {
16383 const MInArray* mir = lir->mir();
16384 Register elements = ToRegister(lir->elements());
16385 Register initLength = ToRegister(lir->initLength());
16386 Register output = ToRegister(lir->output());
16388 Label falseBranch, done, trueBranch;
16390 if (lir->index()->isConstant()) {
16391 int32_t index = ToInt32(lir->index());
16393 if (index < 0) {
16394 MOZ_ASSERT(mir->needsNegativeIntCheck());
16395 bailout(lir->snapshot());
16396 return;
16399 masm.branch32(Assembler::BelowOrEqual, initLength, Imm32(index),
16400 &falseBranch);
16402 NativeObject::elementsSizeMustNotOverflow();
16403 Address address = Address(elements, index * sizeof(Value));
16404 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
16405 } else {
16406 Register index = ToRegister(lir->index());
16408 Label negativeIntCheck;
16409 Label* failedInitLength = &falseBranch;
16410 if (mir->needsNegativeIntCheck()) {
16411 failedInitLength = &negativeIntCheck;
16414 masm.branch32(Assembler::BelowOrEqual, initLength, index, failedInitLength);
16416 BaseObjectElementIndex address(elements, index);
16417 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
16419 if (mir->needsNegativeIntCheck()) {
16420 masm.jump(&trueBranch);
16421 masm.bind(&negativeIntCheck);
16423 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
16425 masm.jump(&falseBranch);
16429 masm.bind(&trueBranch);
16430 masm.move32(Imm32(1), output);
16431 masm.jump(&done);
16433 masm.bind(&falseBranch);
16434 masm.move32(Imm32(0), output);
16435 masm.bind(&done);
16438 void CodeGenerator::visitGuardElementNotHole(LGuardElementNotHole* lir) {
16439 Register elements = ToRegister(lir->elements());
16440 const LAllocation* index = lir->index();
16442 Label testMagic;
16443 if (index->isConstant()) {
16444 Address address(elements, ToInt32(index) * sizeof(js::Value));
16445 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
16446 } else {
16447 BaseObjectElementIndex address(elements, ToRegister(index));
16448 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
16450 bailoutFrom(&testMagic, lir->snapshot());
16453 void CodeGenerator::visitInstanceOfO(LInstanceOfO* ins) {
16454 Register protoReg = ToRegister(ins->rhs());
16455 emitInstanceOf(ins, protoReg);
16458 void CodeGenerator::visitInstanceOfV(LInstanceOfV* ins) {
16459 Register protoReg = ToRegister(ins->rhs());
16460 emitInstanceOf(ins, protoReg);
16463 void CodeGenerator::emitInstanceOf(LInstruction* ins, Register protoReg) {
16464 // This path implements fun_hasInstance when the function's prototype is
16465 // known to be the object in protoReg
16467 Label done;
16468 Register output = ToRegister(ins->getDef(0));
16470 // If the lhs is a primitive, the result is false.
16471 Register objReg;
16472 if (ins->isInstanceOfV()) {
16473 Label isObject;
16474 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
16475 masm.branchTestObject(Assembler::Equal, lhsValue, &isObject);
16476 masm.mov(ImmWord(0), output);
16477 masm.jump(&done);
16478 masm.bind(&isObject);
16479 objReg = masm.extractObject(lhsValue, output);
16480 } else {
16481 objReg = ToRegister(ins->toInstanceOfO()->lhs());
16484 // Crawl the lhs's prototype chain in a loop to search for prototypeObject.
16485 // This follows the main loop of js::IsPrototypeOf, though additionally breaks
16486 // out of the loop on Proxy::LazyProto.
16488 // Load the lhs's prototype.
16489 masm.loadObjProto(objReg, output);
16491 Label testLazy;
16493 Label loopPrototypeChain;
16494 masm.bind(&loopPrototypeChain);
16496 // Test for the target prototype object.
16497 Label notPrototypeObject;
16498 masm.branchPtr(Assembler::NotEqual, output, protoReg, &notPrototypeObject);
16499 masm.mov(ImmWord(1), output);
16500 masm.jump(&done);
16501 masm.bind(&notPrototypeObject);
16503 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
16505 // Test for nullptr or Proxy::LazyProto
16506 masm.branchPtr(Assembler::BelowOrEqual, output, ImmWord(1), &testLazy);
16508 // Load the current object's prototype.
16509 masm.loadObjProto(output, output);
16511 masm.jump(&loopPrototypeChain);
16514 // Make a VM call if an object with a lazy proto was found on the prototype
16515 // chain. This currently occurs only for cross compartment wrappers, which
16516 // we do not expect to be compared with non-wrapper functions from this
16517 // compartment. Otherwise, we stopped on a nullptr prototype and the output
16518 // register is already correct.
16520 using Fn = bool (*)(JSContext*, HandleObject, JSObject*, bool*);
16521 auto* ool = oolCallVM<Fn, IsPrototypeOf>(ins, ArgList(protoReg, objReg),
16522 StoreRegisterTo(output));
16524 // Regenerate the original lhs object for the VM call.
16525 Label regenerate, *lazyEntry;
16526 if (objReg != output) {
16527 lazyEntry = ool->entry();
16528 } else {
16529 masm.bind(&regenerate);
16530 lazyEntry = &regenerate;
16531 if (ins->isInstanceOfV()) {
16532 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
16533 objReg = masm.extractObject(lhsValue, output);
16534 } else {
16535 objReg = ToRegister(ins->toInstanceOfO()->lhs());
16537 MOZ_ASSERT(objReg == output);
16538 masm.jump(ool->entry());
16541 masm.bind(&testLazy);
16542 masm.branchPtr(Assembler::Equal, output, ImmWord(1), lazyEntry);
16544 masm.bind(&done);
16545 masm.bind(ool->rejoin());
16548 void CodeGenerator::visitInstanceOfCache(LInstanceOfCache* ins) {
16549 // The Lowering ensures that RHS is an object, and that LHS is a value.
16550 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16551 TypedOrValueRegister lhs =
16552 TypedOrValueRegister(ToValue(ins, LInstanceOfCache::LHS));
16553 Register rhs = ToRegister(ins->rhs());
16554 Register output = ToRegister(ins->output());
16556 IonInstanceOfIC ic(liveRegs, lhs, rhs, output);
16557 addIC(ins, allocateIC(ic));
16560 void CodeGenerator::visitGetDOMProperty(LGetDOMProperty* ins) {
16561 const Register JSContextReg = ToRegister(ins->getJSContextReg());
16562 const Register ObjectReg = ToRegister(ins->getObjectReg());
16563 const Register PrivateReg = ToRegister(ins->getPrivReg());
16564 const Register ValueReg = ToRegister(ins->getValueReg());
16566 Label haveValue;
16567 if (ins->mir()->valueMayBeInSlot()) {
16568 size_t slot = ins->mir()->domMemberSlotIndex();
16569 // It's a bit annoying to redo these slot calculations, which duplcate
16570 // LSlots and a few other things like that, but I'm not sure there's a
16571 // way to reuse those here.
16573 // If this ever gets fixed to work with proxies (by not assuming that
16574 // reserved slot indices, which is what domMemberSlotIndex() returns,
16575 // match fixed slot indices), we can reenable MGetDOMProperty for
16576 // proxies in IonBuilder.
16577 if (slot < NativeObject::MAX_FIXED_SLOTS) {
16578 masm.loadValue(Address(ObjectReg, NativeObject::getFixedSlotOffset(slot)),
16579 JSReturnOperand);
16580 } else {
16581 // It's a dynamic slot.
16582 slot -= NativeObject::MAX_FIXED_SLOTS;
16583 // Use PrivateReg as a scratch register for the slots pointer.
16584 masm.loadPtr(Address(ObjectReg, NativeObject::offsetOfSlots()),
16585 PrivateReg);
16586 masm.loadValue(Address(PrivateReg, slot * sizeof(js::Value)),
16587 JSReturnOperand);
16589 masm.branchTestUndefined(Assembler::NotEqual, JSReturnOperand, &haveValue);
16592 DebugOnly<uint32_t> initialStack = masm.framePushed();
16594 masm.checkStackAlignment();
16596 // Make space for the outparam. Pre-initialize it to UndefinedValue so we
16597 // can trace it at GC time.
16598 masm.Push(UndefinedValue());
16599 // We pass the pointer to our out param as an instance of
16600 // JSJitGetterCallArgs, since on the binary level it's the same thing.
16601 static_assert(sizeof(JSJitGetterCallArgs) == sizeof(Value*));
16602 masm.moveStackPtrTo(ValueReg);
16604 masm.Push(ObjectReg);
16606 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
16608 // Rooting will happen at GC time.
16609 masm.moveStackPtrTo(ObjectReg);
16611 Realm* getterRealm = ins->mir()->getterRealm();
16612 if (gen->realm->realmPtr() != getterRealm) {
16613 // We use JSContextReg as scratch register here.
16614 masm.switchToRealm(getterRealm, JSContextReg);
16617 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
16618 masm.loadJSContext(JSContextReg);
16619 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
16620 ExitFrameType::IonDOMGetter);
16622 markSafepointAt(safepointOffset, ins);
16624 masm.setupAlignedABICall();
16625 masm.loadJSContext(JSContextReg);
16626 masm.passABIArg(JSContextReg);
16627 masm.passABIArg(ObjectReg);
16628 masm.passABIArg(PrivateReg);
16629 masm.passABIArg(ValueReg);
16630 ensureOsiSpace();
16631 masm.callWithABI(DynamicFunction<JSJitGetterOp>(ins->mir()->fun()),
16632 MoveOp::GENERAL,
16633 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
16635 if (ins->mir()->isInfallible()) {
16636 masm.loadValue(Address(masm.getStackPointer(),
16637 IonDOMExitFrameLayout::offsetOfResult()),
16638 JSReturnOperand);
16639 } else {
16640 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
16642 masm.loadValue(Address(masm.getStackPointer(),
16643 IonDOMExitFrameLayout::offsetOfResult()),
16644 JSReturnOperand);
16647 // Switch back to the current realm if needed. Note: if the getter threw an
16648 // exception, the exception handler will do this.
16649 if (gen->realm->realmPtr() != getterRealm) {
16650 static_assert(!JSReturnOperand.aliases(ReturnReg),
16651 "Clobbering ReturnReg should not affect the return value");
16652 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
16655 // Until C++ code is instrumented against Spectre, prevent speculative
16656 // execution from returning any private data.
16657 if (JitOptions.spectreJitToCxxCalls && ins->mir()->hasLiveDefUses()) {
16658 masm.speculationBarrier();
16661 masm.adjustStack(IonDOMExitFrameLayout::Size());
16663 masm.bind(&haveValue);
16665 MOZ_ASSERT(masm.framePushed() == initialStack);
16668 void CodeGenerator::visitGetDOMMemberV(LGetDOMMemberV* ins) {
16669 // It's simpler to duplicate visitLoadFixedSlotV here than it is to try to
16670 // use an LLoadFixedSlotV or some subclass of it for this case: that would
16671 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
16672 // we'd have to duplicate a bunch of stuff we now get for free from
16673 // MGetDOMProperty.
16675 // If this ever gets fixed to work with proxies (by not assuming that
16676 // reserved slot indices, which is what domMemberSlotIndex() returns,
16677 // match fixed slot indices), we can reenable MGetDOMMember for
16678 // proxies in IonBuilder.
16679 Register object = ToRegister(ins->object());
16680 size_t slot = ins->mir()->domMemberSlotIndex();
16681 ValueOperand result = ToOutValue(ins);
16683 masm.loadValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
16684 result);
16687 void CodeGenerator::visitGetDOMMemberT(LGetDOMMemberT* ins) {
16688 // It's simpler to duplicate visitLoadFixedSlotT here than it is to try to
16689 // use an LLoadFixedSlotT or some subclass of it for this case: that would
16690 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
16691 // we'd have to duplicate a bunch of stuff we now get for free from
16692 // MGetDOMProperty.
16694 // If this ever gets fixed to work with proxies (by not assuming that
16695 // reserved slot indices, which is what domMemberSlotIndex() returns,
16696 // match fixed slot indices), we can reenable MGetDOMMember for
16697 // proxies in IonBuilder.
16698 Register object = ToRegister(ins->object());
16699 size_t slot = ins->mir()->domMemberSlotIndex();
16700 AnyRegister result = ToAnyRegister(ins->getDef(0));
16701 MIRType type = ins->mir()->type();
16703 masm.loadUnboxedValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
16704 type, result);
16707 void CodeGenerator::visitSetDOMProperty(LSetDOMProperty* ins) {
16708 const Register JSContextReg = ToRegister(ins->getJSContextReg());
16709 const Register ObjectReg = ToRegister(ins->getObjectReg());
16710 const Register PrivateReg = ToRegister(ins->getPrivReg());
16711 const Register ValueReg = ToRegister(ins->getValueReg());
16713 DebugOnly<uint32_t> initialStack = masm.framePushed();
16715 masm.checkStackAlignment();
16717 // Push the argument. Rooting will happen at GC time.
16718 ValueOperand argVal = ToValue(ins, LSetDOMProperty::Value);
16719 masm.Push(argVal);
16720 // We pass the pointer to our out param as an instance of
16721 // JSJitGetterCallArgs, since on the binary level it's the same thing.
16722 static_assert(sizeof(JSJitSetterCallArgs) == sizeof(Value*));
16723 masm.moveStackPtrTo(ValueReg);
16725 masm.Push(ObjectReg);
16727 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
16729 // Rooting will happen at GC time.
16730 masm.moveStackPtrTo(ObjectReg);
16732 Realm* setterRealm = ins->mir()->setterRealm();
16733 if (gen->realm->realmPtr() != setterRealm) {
16734 // We use JSContextReg as scratch register here.
16735 masm.switchToRealm(setterRealm, JSContextReg);
16738 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
16739 masm.loadJSContext(JSContextReg);
16740 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
16741 ExitFrameType::IonDOMSetter);
16743 markSafepointAt(safepointOffset, ins);
16745 masm.setupAlignedABICall();
16746 masm.loadJSContext(JSContextReg);
16747 masm.passABIArg(JSContextReg);
16748 masm.passABIArg(ObjectReg);
16749 masm.passABIArg(PrivateReg);
16750 masm.passABIArg(ValueReg);
16751 ensureOsiSpace();
16752 masm.callWithABI(DynamicFunction<JSJitSetterOp>(ins->mir()->fun()),
16753 MoveOp::GENERAL,
16754 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
16756 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
16758 // Switch back to the current realm if needed. Note: if the setter threw an
16759 // exception, the exception handler will do this.
16760 if (gen->realm->realmPtr() != setterRealm) {
16761 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
16764 masm.adjustStack(IonDOMExitFrameLayout::Size());
16766 MOZ_ASSERT(masm.framePushed() == initialStack);
16769 void CodeGenerator::visitLoadDOMExpandoValue(LLoadDOMExpandoValue* ins) {
16770 Register proxy = ToRegister(ins->proxy());
16771 ValueOperand out = ToOutValue(ins);
16773 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
16774 out.scratchReg());
16775 masm.loadValue(Address(out.scratchReg(),
16776 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
16777 out);
16780 void CodeGenerator::visitLoadDOMExpandoValueGuardGeneration(
16781 LLoadDOMExpandoValueGuardGeneration* ins) {
16782 Register proxy = ToRegister(ins->proxy());
16783 ValueOperand out = ToOutValue(ins);
16785 Label bail;
16786 masm.loadDOMExpandoValueGuardGeneration(proxy, out,
16787 ins->mir()->expandoAndGeneration(),
16788 ins->mir()->generation(), &bail);
16789 bailoutFrom(&bail, ins->snapshot());
16792 void CodeGenerator::visitLoadDOMExpandoValueIgnoreGeneration(
16793 LLoadDOMExpandoValueIgnoreGeneration* ins) {
16794 Register proxy = ToRegister(ins->proxy());
16795 ValueOperand out = ToOutValue(ins);
16797 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
16798 out.scratchReg());
16800 // Load the ExpandoAndGeneration* from the PrivateValue.
16801 masm.loadPrivate(
16802 Address(out.scratchReg(),
16803 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
16804 out.scratchReg());
16806 // Load expandoAndGeneration->expando into the output Value register.
16807 masm.loadValue(
16808 Address(out.scratchReg(), ExpandoAndGeneration::offsetOfExpando()), out);
16811 void CodeGenerator::visitGuardDOMExpandoMissingOrGuardShape(
16812 LGuardDOMExpandoMissingOrGuardShape* ins) {
16813 Register temp = ToRegister(ins->temp0());
16814 ValueOperand input =
16815 ToValue(ins, LGuardDOMExpandoMissingOrGuardShape::InputIndex);
16817 Label done;
16818 masm.branchTestUndefined(Assembler::Equal, input, &done);
16820 masm.debugAssertIsObject(input);
16821 masm.unboxObject(input, temp);
16822 // The expando object is not used in this case, so we don't need Spectre
16823 // mitigations.
16824 Label bail;
16825 masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, temp,
16826 ins->mir()->shape(), &bail);
16827 bailoutFrom(&bail, ins->snapshot());
16829 masm.bind(&done);
16832 class OutOfLineIsCallable : public OutOfLineCodeBase<CodeGenerator> {
16833 Register object_;
16834 Register output_;
16836 public:
16837 OutOfLineIsCallable(Register object, Register output)
16838 : object_(object), output_(output) {}
16840 void accept(CodeGenerator* codegen) override {
16841 codegen->visitOutOfLineIsCallable(this);
16843 Register object() const { return object_; }
16844 Register output() const { return output_; }
16847 void CodeGenerator::visitIsCallableO(LIsCallableO* ins) {
16848 Register object = ToRegister(ins->object());
16849 Register output = ToRegister(ins->output());
16851 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(object, output);
16852 addOutOfLineCode(ool, ins->mir());
16854 masm.isCallable(object, output, ool->entry());
16856 masm.bind(ool->rejoin());
16859 void CodeGenerator::visitIsCallableV(LIsCallableV* ins) {
16860 ValueOperand val = ToValue(ins, LIsCallableV::ObjectIndex);
16861 Register output = ToRegister(ins->output());
16862 Register temp = ToRegister(ins->temp0());
16864 Label notObject;
16865 masm.fallibleUnboxObject(val, temp, &notObject);
16867 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(temp, output);
16868 addOutOfLineCode(ool, ins->mir());
16870 masm.isCallable(temp, output, ool->entry());
16871 masm.jump(ool->rejoin());
16873 masm.bind(&notObject);
16874 masm.move32(Imm32(0), output);
16876 masm.bind(ool->rejoin());
16879 void CodeGenerator::visitOutOfLineIsCallable(OutOfLineIsCallable* ool) {
16880 Register object = ool->object();
16881 Register output = ool->output();
16883 saveVolatile(output);
16884 using Fn = bool (*)(JSObject* obj);
16885 masm.setupAlignedABICall();
16886 masm.passABIArg(object);
16887 masm.callWithABI<Fn, ObjectIsCallable>();
16888 masm.storeCallBoolResult(output);
16889 restoreVolatile(output);
16890 masm.jump(ool->rejoin());
16893 class OutOfLineIsConstructor : public OutOfLineCodeBase<CodeGenerator> {
16894 LIsConstructor* ins_;
16896 public:
16897 explicit OutOfLineIsConstructor(LIsConstructor* ins) : ins_(ins) {}
16899 void accept(CodeGenerator* codegen) override {
16900 codegen->visitOutOfLineIsConstructor(this);
16902 LIsConstructor* ins() const { return ins_; }
16905 void CodeGenerator::visitIsConstructor(LIsConstructor* ins) {
16906 Register object = ToRegister(ins->object());
16907 Register output = ToRegister(ins->output());
16909 OutOfLineIsConstructor* ool = new (alloc()) OutOfLineIsConstructor(ins);
16910 addOutOfLineCode(ool, ins->mir());
16912 masm.isConstructor(object, output, ool->entry());
16914 masm.bind(ool->rejoin());
16917 void CodeGenerator::visitOutOfLineIsConstructor(OutOfLineIsConstructor* ool) {
16918 LIsConstructor* ins = ool->ins();
16919 Register object = ToRegister(ins->object());
16920 Register output = ToRegister(ins->output());
16922 saveVolatile(output);
16923 using Fn = bool (*)(JSObject* obj);
16924 masm.setupAlignedABICall();
16925 masm.passABIArg(object);
16926 masm.callWithABI<Fn, ObjectIsConstructor>();
16927 masm.storeCallBoolResult(output);
16928 restoreVolatile(output);
16929 masm.jump(ool->rejoin());
16932 void CodeGenerator::visitIsCrossRealmArrayConstructor(
16933 LIsCrossRealmArrayConstructor* ins) {
16934 Register object = ToRegister(ins->object());
16935 Register output = ToRegister(ins->output());
16937 masm.setIsCrossRealmArrayConstructor(object, output);
16940 static void EmitObjectIsArray(MacroAssembler& masm, OutOfLineCode* ool,
16941 Register obj, Register output,
16942 Label* notArray = nullptr) {
16943 masm.loadObjClassUnsafe(obj, output);
16945 Label isArray;
16946 masm.branchPtr(Assembler::Equal, output, ImmPtr(&ArrayObject::class_),
16947 &isArray);
16949 // Branch to OOL path if it's a proxy.
16950 masm.branchTestClassIsProxy(true, output, ool->entry());
16952 if (notArray) {
16953 masm.bind(notArray);
16955 masm.move32(Imm32(0), output);
16956 masm.jump(ool->rejoin());
16958 masm.bind(&isArray);
16959 masm.move32(Imm32(1), output);
16961 masm.bind(ool->rejoin());
16964 void CodeGenerator::visitIsArrayO(LIsArrayO* lir) {
16965 Register object = ToRegister(lir->object());
16966 Register output = ToRegister(lir->output());
16968 using Fn = bool (*)(JSContext*, HandleObject, bool*);
16969 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
16970 lir, ArgList(object), StoreRegisterTo(output));
16971 EmitObjectIsArray(masm, ool, object, output);
16974 void CodeGenerator::visitIsArrayV(LIsArrayV* lir) {
16975 ValueOperand val = ToValue(lir, LIsArrayV::ValueIndex);
16976 Register output = ToRegister(lir->output());
16977 Register temp = ToRegister(lir->temp0());
16979 Label notArray;
16980 masm.fallibleUnboxObject(val, temp, &notArray);
16982 using Fn = bool (*)(JSContext*, HandleObject, bool*);
16983 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
16984 lir, ArgList(temp), StoreRegisterTo(output));
16985 EmitObjectIsArray(masm, ool, temp, output, &notArray);
16988 void CodeGenerator::visitIsTypedArray(LIsTypedArray* lir) {
16989 Register object = ToRegister(lir->object());
16990 Register output = ToRegister(lir->output());
16992 OutOfLineCode* ool = nullptr;
16993 if (lir->mir()->isPossiblyWrapped()) {
16994 using Fn = bool (*)(JSContext*, JSObject*, bool*);
16995 ool = oolCallVM<Fn, jit::IsPossiblyWrappedTypedArray>(
16996 lir, ArgList(object), StoreRegisterTo(output));
16999 Label notTypedArray;
17000 Label done;
17002 masm.loadObjClassUnsafe(object, output);
17003 masm.branchIfClassIsNotTypedArray(output, &notTypedArray);
17005 masm.move32(Imm32(1), output);
17006 masm.jump(&done);
17007 masm.bind(&notTypedArray);
17008 if (ool) {
17009 masm.branchTestClassIsProxy(true, output, ool->entry());
17011 masm.move32(Imm32(0), output);
17012 masm.bind(&done);
17013 if (ool) {
17014 masm.bind(ool->rejoin());
17018 void CodeGenerator::visitIsObject(LIsObject* ins) {
17019 Register output = ToRegister(ins->output());
17020 ValueOperand value = ToValue(ins, LIsObject::ObjectIndex);
17021 masm.testObjectSet(Assembler::Equal, value, output);
17024 void CodeGenerator::visitIsObjectAndBranch(LIsObjectAndBranch* ins) {
17025 ValueOperand value = ToValue(ins, LIsObjectAndBranch::Input);
17026 testObjectEmitBranch(Assembler::Equal, value, ins->ifTrue(), ins->ifFalse());
17029 void CodeGenerator::visitIsNullOrUndefined(LIsNullOrUndefined* ins) {
17030 Register output = ToRegister(ins->output());
17031 ValueOperand value = ToValue(ins, LIsNullOrUndefined::InputIndex);
17033 Label isNotNull, done;
17034 masm.branchTestNull(Assembler::NotEqual, value, &isNotNull);
17036 masm.move32(Imm32(1), output);
17037 masm.jump(&done);
17039 masm.bind(&isNotNull);
17040 masm.testUndefinedSet(Assembler::Equal, value, output);
17042 masm.bind(&done);
17045 void CodeGenerator::visitIsNullOrUndefinedAndBranch(
17046 LIsNullOrUndefinedAndBranch* ins) {
17047 Label* ifTrue = getJumpLabelForBranch(ins->ifTrue());
17048 Label* ifFalse = getJumpLabelForBranch(ins->ifFalse());
17049 ValueOperand value = ToValue(ins, LIsNullOrUndefinedAndBranch::Input);
17051 ScratchTagScope tag(masm, value);
17052 masm.splitTagForTest(value, tag);
17054 masm.branchTestNull(Assembler::Equal, tag, ifTrue);
17055 masm.branchTestUndefined(Assembler::Equal, tag, ifTrue);
17057 if (!isNextBlock(ins->ifFalse()->lir())) {
17058 masm.jump(ifFalse);
17062 void CodeGenerator::loadOutermostJSScript(Register reg) {
17063 // The "outermost" JSScript means the script that we are compiling
17064 // basically; this is not always the script associated with the
17065 // current basic block, which might be an inlined script.
17067 MIRGraph& graph = current->mir()->graph();
17068 MBasicBlock* entryBlock = graph.entryBlock();
17069 masm.movePtr(ImmGCPtr(entryBlock->info().script()), reg);
17072 void CodeGenerator::loadJSScriptForBlock(MBasicBlock* block, Register reg) {
17073 // The current JSScript means the script for the current
17074 // basic block. This may be an inlined script.
17076 JSScript* script = block->info().script();
17077 masm.movePtr(ImmGCPtr(script), reg);
17080 void CodeGenerator::visitHasClass(LHasClass* ins) {
17081 Register lhs = ToRegister(ins->lhs());
17082 Register output = ToRegister(ins->output());
17084 masm.loadObjClassUnsafe(lhs, output);
17085 masm.cmpPtrSet(Assembler::Equal, output, ImmPtr(ins->mir()->getClass()),
17086 output);
17089 void CodeGenerator::visitGuardToClass(LGuardToClass* ins) {
17090 Register lhs = ToRegister(ins->lhs());
17091 Register temp = ToRegister(ins->temp0());
17093 // branchTestObjClass may zero the object register on speculative paths
17094 // (we should have a defineReuseInput allocation in this case).
17095 Register spectreRegToZero = lhs;
17097 Label notEqual;
17099 masm.branchTestObjClass(Assembler::NotEqual, lhs, ins->mir()->getClass(),
17100 temp, spectreRegToZero, &notEqual);
17102 // Can't return null-return here, so bail.
17103 bailoutFrom(&notEqual, ins->snapshot());
17106 void CodeGenerator::visitGuardToFunction(LGuardToFunction* ins) {
17107 Register lhs = ToRegister(ins->lhs());
17108 Register temp = ToRegister(ins->temp0());
17110 // branchTestObjClass may zero the object register on speculative paths
17111 // (we should have a defineReuseInput allocation in this case).
17112 Register spectreRegToZero = lhs;
17114 Label notEqual;
17116 masm.branchTestObjIsFunction(Assembler::NotEqual, lhs, temp, spectreRegToZero,
17117 &notEqual);
17119 // Can't return null-return here, so bail.
17120 bailoutFrom(&notEqual, ins->snapshot());
17123 void CodeGenerator::visitObjectClassToString(LObjectClassToString* lir) {
17124 Register obj = ToRegister(lir->lhs());
17125 Register temp = ToRegister(lir->temp0());
17127 using Fn = JSString* (*)(JSContext*, JSObject*);
17128 masm.setupAlignedABICall();
17129 masm.loadJSContext(temp);
17130 masm.passABIArg(temp);
17131 masm.passABIArg(obj);
17132 masm.callWithABI<Fn, js::ObjectClassToString>();
17134 bailoutCmpPtr(Assembler::Equal, ReturnReg, ImmWord(0), lir->snapshot());
17137 void CodeGenerator::visitWasmParameter(LWasmParameter* lir) {}
17139 void CodeGenerator::visitWasmParameterI64(LWasmParameterI64* lir) {}
17141 void CodeGenerator::visitWasmReturn(LWasmReturn* lir) {
17142 // Don't emit a jump to the return label if this is the last block.
17143 if (current->mir() != *gen->graph().poBegin()) {
17144 masm.jump(&returnLabel_);
17148 void CodeGenerator::visitWasmReturnI64(LWasmReturnI64* lir) {
17149 // Don't emit a jump to the return label if this is the last block.
17150 if (current->mir() != *gen->graph().poBegin()) {
17151 masm.jump(&returnLabel_);
17155 void CodeGenerator::visitWasmReturnVoid(LWasmReturnVoid* lir) {
17156 // Don't emit a jump to the return label if this is the last block.
17157 if (current->mir() != *gen->graph().poBegin()) {
17158 masm.jump(&returnLabel_);
17162 void CodeGenerator::emitAssertRangeI(MIRType type, const Range* r,
17163 Register input) {
17164 // Check the lower bound.
17165 if (r->hasInt32LowerBound() && r->lower() > INT32_MIN) {
17166 Label success;
17167 if (type == MIRType::Int32 || type == MIRType::Boolean) {
17168 masm.branch32(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
17169 &success);
17170 } else {
17171 MOZ_ASSERT(type == MIRType::IntPtr);
17172 masm.branchPtr(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
17173 &success);
17175 masm.assumeUnreachable(
17176 "Integer input should be equal or higher than Lowerbound.");
17177 masm.bind(&success);
17180 // Check the upper bound.
17181 if (r->hasInt32UpperBound() && r->upper() < INT32_MAX) {
17182 Label success;
17183 if (type == MIRType::Int32 || type == MIRType::Boolean) {
17184 masm.branch32(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
17185 &success);
17186 } else {
17187 MOZ_ASSERT(type == MIRType::IntPtr);
17188 masm.branchPtr(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
17189 &success);
17191 masm.assumeUnreachable(
17192 "Integer input should be lower or equal than Upperbound.");
17193 masm.bind(&success);
17196 // For r->canHaveFractionalPart(), r->canBeNegativeZero(), and
17197 // r->exponent(), there's nothing to check, because if we ended up in the
17198 // integer range checking code, the value is already in an integer register
17199 // in the integer range.
17202 void CodeGenerator::emitAssertRangeD(const Range* r, FloatRegister input,
17203 FloatRegister temp) {
17204 // Check the lower bound.
17205 if (r->hasInt32LowerBound()) {
17206 Label success;
17207 masm.loadConstantDouble(r->lower(), temp);
17208 if (r->canBeNaN()) {
17209 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
17211 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
17212 &success);
17213 masm.assumeUnreachable(
17214 "Double input should be equal or higher than Lowerbound.");
17215 masm.bind(&success);
17217 // Check the upper bound.
17218 if (r->hasInt32UpperBound()) {
17219 Label success;
17220 masm.loadConstantDouble(r->upper(), temp);
17221 if (r->canBeNaN()) {
17222 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
17224 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp, &success);
17225 masm.assumeUnreachable(
17226 "Double input should be lower or equal than Upperbound.");
17227 masm.bind(&success);
17230 // This code does not yet check r->canHaveFractionalPart(). This would require
17231 // new assembler interfaces to make rounding instructions available.
17233 if (!r->canBeNegativeZero()) {
17234 Label success;
17236 // First, test for being equal to 0.0, which also includes -0.0.
17237 masm.loadConstantDouble(0.0, temp);
17238 masm.branchDouble(Assembler::DoubleNotEqualOrUnordered, input, temp,
17239 &success);
17241 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0 is
17242 // -Infinity instead of Infinity.
17243 masm.loadConstantDouble(1.0, temp);
17244 masm.divDouble(input, temp);
17245 masm.branchDouble(Assembler::DoubleGreaterThan, temp, input, &success);
17247 masm.assumeUnreachable("Input shouldn't be negative zero.");
17249 masm.bind(&success);
17252 if (!r->hasInt32Bounds() && !r->canBeInfiniteOrNaN() &&
17253 r->exponent() < FloatingPoint<double>::kExponentBias) {
17254 // Check the bounds implied by the maximum exponent.
17255 Label exponentLoOk;
17256 masm.loadConstantDouble(pow(2.0, r->exponent() + 1), temp);
17257 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentLoOk);
17258 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp,
17259 &exponentLoOk);
17260 masm.assumeUnreachable("Check for exponent failed.");
17261 masm.bind(&exponentLoOk);
17263 Label exponentHiOk;
17264 masm.loadConstantDouble(-pow(2.0, r->exponent() + 1), temp);
17265 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentHiOk);
17266 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
17267 &exponentHiOk);
17268 masm.assumeUnreachable("Check for exponent failed.");
17269 masm.bind(&exponentHiOk);
17270 } else if (!r->hasInt32Bounds() && !r->canBeNaN()) {
17271 // If we think the value can't be NaN, check that it isn't.
17272 Label notnan;
17273 masm.branchDouble(Assembler::DoubleOrdered, input, input, &notnan);
17274 masm.assumeUnreachable("Input shouldn't be NaN.");
17275 masm.bind(&notnan);
17277 // If we think the value also can't be an infinity, check that it isn't.
17278 if (!r->canBeInfiniteOrNaN()) {
17279 Label notposinf;
17280 masm.loadConstantDouble(PositiveInfinity<double>(), temp);
17281 masm.branchDouble(Assembler::DoubleLessThan, input, temp, &notposinf);
17282 masm.assumeUnreachable("Input shouldn't be +Inf.");
17283 masm.bind(&notposinf);
17285 Label notneginf;
17286 masm.loadConstantDouble(NegativeInfinity<double>(), temp);
17287 masm.branchDouble(Assembler::DoubleGreaterThan, input, temp, &notneginf);
17288 masm.assumeUnreachable("Input shouldn't be -Inf.");
17289 masm.bind(&notneginf);
17294 void CodeGenerator::visitAssertClass(LAssertClass* ins) {
17295 Register obj = ToRegister(ins->input());
17296 Register temp = ToRegister(ins->getTemp(0));
17298 Label success;
17299 if (ins->mir()->getClass() == &FunctionClass) {
17300 // Allow both possible function classes here.
17301 masm.branchTestObjIsFunctionNoSpectreMitigations(Assembler::Equal, obj,
17302 temp, &success);
17303 } else {
17304 masm.branchTestObjClassNoSpectreMitigations(
17305 Assembler::Equal, obj, ins->mir()->getClass(), temp, &success);
17307 masm.assumeUnreachable("Wrong KnownClass during run-time");
17308 masm.bind(&success);
17311 void CodeGenerator::visitAssertShape(LAssertShape* ins) {
17312 Register obj = ToRegister(ins->input());
17314 Label success;
17315 masm.branchTestObjShapeNoSpectreMitigations(Assembler::Equal, obj,
17316 ins->mir()->shape(), &success);
17317 masm.assumeUnreachable("Wrong Shape during run-time");
17318 masm.bind(&success);
17321 void CodeGenerator::visitAssertRangeI(LAssertRangeI* ins) {
17322 Register input = ToRegister(ins->input());
17323 const Range* r = ins->range();
17325 emitAssertRangeI(ins->mir()->input()->type(), r, input);
17328 void CodeGenerator::visitAssertRangeD(LAssertRangeD* ins) {
17329 FloatRegister input = ToFloatRegister(ins->input());
17330 FloatRegister temp = ToFloatRegister(ins->temp());
17331 const Range* r = ins->range();
17333 emitAssertRangeD(r, input, temp);
17336 void CodeGenerator::visitAssertRangeF(LAssertRangeF* ins) {
17337 FloatRegister input = ToFloatRegister(ins->input());
17338 FloatRegister temp = ToFloatRegister(ins->temp());
17339 FloatRegister temp2 = ToFloatRegister(ins->temp2());
17341 const Range* r = ins->range();
17343 masm.convertFloat32ToDouble(input, temp);
17344 emitAssertRangeD(r, temp, temp2);
17347 void CodeGenerator::visitAssertRangeV(LAssertRangeV* ins) {
17348 const Range* r = ins->range();
17349 const ValueOperand value = ToValue(ins, LAssertRangeV::Input);
17350 Label done;
17353 ScratchTagScope tag(masm, value);
17354 masm.splitTagForTest(value, tag);
17357 Label isNotInt32;
17358 masm.branchTestInt32(Assembler::NotEqual, tag, &isNotInt32);
17360 ScratchTagScopeRelease _(&tag);
17361 Register unboxInt32 = ToTempUnboxRegister(ins->temp());
17362 Register input = masm.extractInt32(value, unboxInt32);
17363 emitAssertRangeI(MIRType::Int32, r, input);
17364 masm.jump(&done);
17366 masm.bind(&isNotInt32);
17370 Label isNotDouble;
17371 masm.branchTestDouble(Assembler::NotEqual, tag, &isNotDouble);
17373 ScratchTagScopeRelease _(&tag);
17374 FloatRegister input = ToFloatRegister(ins->floatTemp1());
17375 FloatRegister temp = ToFloatRegister(ins->floatTemp2());
17376 masm.unboxDouble(value, input);
17377 emitAssertRangeD(r, input, temp);
17378 masm.jump(&done);
17380 masm.bind(&isNotDouble);
17384 masm.assumeUnreachable("Incorrect range for Value.");
17385 masm.bind(&done);
17388 void CodeGenerator::visitInterruptCheck(LInterruptCheck* lir) {
17389 using Fn = bool (*)(JSContext*);
17390 OutOfLineCode* ool =
17391 oolCallVM<Fn, InterruptCheck>(lir, ArgList(), StoreNothing());
17393 const void* interruptAddr = gen->runtime->addressOfInterruptBits();
17394 masm.branch32(Assembler::NotEqual, AbsoluteAddress(interruptAddr), Imm32(0),
17395 ool->entry());
17396 masm.bind(ool->rejoin());
17399 void CodeGenerator::visitOutOfLineResumableWasmTrap(
17400 OutOfLineResumableWasmTrap* ool) {
17401 LInstruction* lir = ool->lir();
17402 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
17404 markSafepointAt(masm.currentOffset(), lir);
17406 // Note that masm.framePushed() doesn't include the register dump area.
17407 // That will be taken into account when the StackMap is created from the
17408 // LSafepoint.
17409 lir->safepoint()->setFramePushedAtStackMapBase(ool->framePushed());
17410 lir->safepoint()->setIsWasmTrap();
17412 masm.jump(ool->rejoin());
17415 void CodeGenerator::visitOutOfLineAbortingWasmTrap(
17416 OutOfLineAbortingWasmTrap* ool) {
17417 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
17420 void CodeGenerator::visitWasmInterruptCheck(LWasmInterruptCheck* lir) {
17421 MOZ_ASSERT(gen->compilingWasm());
17423 OutOfLineResumableWasmTrap* ool = new (alloc()) OutOfLineResumableWasmTrap(
17424 lir, masm.framePushed(), lir->mir()->bytecodeOffset(),
17425 wasm::Trap::CheckInterrupt);
17426 addOutOfLineCode(ool, lir->mir());
17427 masm.branch32(
17428 Assembler::NotEqual,
17429 Address(ToRegister(lir->instance()), wasm::Instance::offsetOfInterrupt()),
17430 Imm32(0), ool->entry());
17431 masm.bind(ool->rejoin());
17434 void CodeGenerator::visitWasmTrap(LWasmTrap* lir) {
17435 MOZ_ASSERT(gen->compilingWasm());
17436 const MWasmTrap* mir = lir->mir();
17438 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
17441 void CodeGenerator::visitWasmTrapIfNull(LWasmTrapIfNull* lir) {
17442 MOZ_ASSERT(gen->compilingWasm());
17443 const MWasmTrapIfNull* mir = lir->mir();
17444 Label nonNull;
17445 Register ref = ToRegister(lir->ref());
17447 masm.branchWasmAnyRefIsNull(false, ref, &nonNull);
17448 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
17449 masm.bind(&nonNull);
17452 static void BranchWasmRefIsSubtype(MacroAssembler& masm, Register ref,
17453 const wasm::RefType& sourceType,
17454 const wasm::RefType& destType, Label* label,
17455 Register superSTV, Register scratch1,
17456 Register scratch2) {
17457 if (destType.isAnyHierarchy()) {
17458 masm.branchWasmRefIsSubtypeAny(ref, sourceType, destType, label,
17459 /*onSuccess=*/true, superSTV, scratch1,
17460 scratch2);
17461 } else if (destType.isFuncHierarchy()) {
17462 masm.branchWasmRefIsSubtypeFunc(ref, sourceType, destType, label,
17463 /*onSuccess=*/true, superSTV, scratch1,
17464 scratch2);
17465 } else if (destType.isExternHierarchy()) {
17466 masm.branchWasmRefIsSubtypeExtern(ref, sourceType, destType, label,
17467 /*onSuccess=*/true);
17468 } else {
17469 MOZ_CRASH("could not generate casting code for unknown type hierarchy");
17473 void CodeGenerator::visitWasmRefIsSubtypeOfAbstract(
17474 LWasmRefIsSubtypeOfAbstract* ins) {
17475 MOZ_ASSERT(gen->compilingWasm());
17477 const MWasmRefIsSubtypeOfAbstract* mir = ins->mir();
17478 MOZ_ASSERT(!mir->destType().isTypeRef());
17480 Register ref = ToRegister(ins->ref());
17481 Register superSTV = Register::Invalid();
17482 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
17483 Register scratch2 = Register::Invalid();
17484 Register result = ToRegister(ins->output());
17485 Label onSuccess;
17486 Label onFail;
17487 Label join;
17488 BranchWasmRefIsSubtype(masm, ref, mir->sourceType(), mir->destType(),
17489 &onSuccess, superSTV, scratch1, scratch2);
17490 masm.bind(&onFail);
17491 masm.xor32(result, result);
17492 masm.jump(&join);
17493 masm.bind(&onSuccess);
17494 masm.move32(Imm32(1), result);
17495 masm.bind(&join);
17498 void CodeGenerator::visitWasmRefIsSubtypeOfConcrete(
17499 LWasmRefIsSubtypeOfConcrete* ins) {
17500 MOZ_ASSERT(gen->compilingWasm());
17502 const MWasmRefIsSubtypeOfConcrete* mir = ins->mir();
17503 MOZ_ASSERT(mir->destType().isTypeRef());
17505 Register ref = ToRegister(ins->ref());
17506 Register superSTV = ToRegister(ins->superSTV());
17507 Register scratch1 = ToRegister(ins->temp0());
17508 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
17509 Register result = ToRegister(ins->output());
17510 Label onSuccess;
17511 Label join;
17512 BranchWasmRefIsSubtype(masm, ref, mir->sourceType(), mir->destType(),
17513 &onSuccess, superSTV, scratch1, scratch2);
17514 masm.move32(Imm32(0), result);
17515 masm.jump(&join);
17516 masm.bind(&onSuccess);
17517 masm.move32(Imm32(1), result);
17518 masm.bind(&join);
17521 void CodeGenerator::visitWasmRefIsSubtypeOfAbstractAndBranch(
17522 LWasmRefIsSubtypeOfAbstractAndBranch* ins) {
17523 MOZ_ASSERT(gen->compilingWasm());
17524 Register ref = ToRegister(ins->ref());
17525 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
17526 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
17527 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
17528 BranchWasmRefIsSubtype(masm, ref, ins->sourceType(), ins->destType(),
17529 onSuccess, Register::Invalid(), scratch1,
17530 Register::Invalid());
17531 masm.jump(onFail);
17534 void CodeGenerator::visitWasmRefIsSubtypeOfConcreteAndBranch(
17535 LWasmRefIsSubtypeOfConcreteAndBranch* ins) {
17536 MOZ_ASSERT(gen->compilingWasm());
17537 Register ref = ToRegister(ins->ref());
17538 Register superSTV = ToRegister(ins->superSTV());
17539 Register scratch1 = ToRegister(ins->temp0());
17540 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
17541 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
17542 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
17543 BranchWasmRefIsSubtype(masm, ref, ins->sourceType(), ins->destType(),
17544 onSuccess, superSTV, scratch1, scratch2);
17545 masm.jump(onFail);
17548 void CodeGenerator::visitWasmHeapReg(LWasmHeapReg* ins) {
17549 #ifdef WASM_HAS_HEAPREG
17550 masm.movePtr(HeapReg, ToRegister(ins->output()));
17551 #else
17552 MOZ_CRASH();
17553 #endif
17556 void CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins) {
17557 const MWasmBoundsCheck* mir = ins->mir();
17558 Register ptr = ToRegister(ins->ptr());
17559 Register boundsCheckLimit = ToRegister(ins->boundsCheckLimit());
17560 // When there are no spectre mitigations in place, branching out-of-line to
17561 // the trap is a big performance win, but with mitigations it's trickier. See
17562 // bug 1680243.
17563 if (JitOptions.spectreIndexMasking) {
17564 Label ok;
17565 masm.wasmBoundsCheck32(Assembler::Below, ptr, boundsCheckLimit, &ok);
17566 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
17567 masm.bind(&ok);
17568 } else {
17569 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
17570 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
17571 addOutOfLineCode(ool, mir);
17572 masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
17573 ool->entry());
17577 void CodeGenerator::visitWasmBoundsCheck64(LWasmBoundsCheck64* ins) {
17578 const MWasmBoundsCheck* mir = ins->mir();
17579 Register64 ptr = ToRegister64(ins->ptr());
17580 Register64 boundsCheckLimit = ToRegister64(ins->boundsCheckLimit());
17581 // See above.
17582 if (JitOptions.spectreIndexMasking) {
17583 Label ok;
17584 masm.wasmBoundsCheck64(Assembler::Below, ptr, boundsCheckLimit, &ok);
17585 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
17586 masm.bind(&ok);
17587 } else {
17588 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
17589 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
17590 addOutOfLineCode(ool, mir);
17591 masm.wasmBoundsCheck64(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
17592 ool->entry());
17596 void CodeGenerator::visitWasmAlignmentCheck(LWasmAlignmentCheck* ins) {
17597 const MWasmAlignmentCheck* mir = ins->mir();
17598 Register ptr = ToRegister(ins->ptr());
17599 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
17600 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
17601 addOutOfLineCode(ool, mir);
17602 masm.branchTest32(Assembler::NonZero, ptr, Imm32(mir->byteSize() - 1),
17603 ool->entry());
17606 void CodeGenerator::visitWasmAlignmentCheck64(LWasmAlignmentCheck64* ins) {
17607 const MWasmAlignmentCheck* mir = ins->mir();
17608 Register64 ptr = ToRegister64(ins->ptr());
17609 #ifdef JS_64BIT
17610 Register r = ptr.reg;
17611 #else
17612 Register r = ptr.low;
17613 #endif
17614 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
17615 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
17616 addOutOfLineCode(ool, mir);
17617 masm.branchTestPtr(Assembler::NonZero, r, Imm32(mir->byteSize() - 1),
17618 ool->entry());
17621 void CodeGenerator::visitWasmLoadInstance(LWasmLoadInstance* ins) {
17622 switch (ins->mir()->type()) {
17623 case MIRType::WasmAnyRef:
17624 case MIRType::Pointer:
17625 masm.loadPtr(Address(ToRegister(ins->instance()), ins->mir()->offset()),
17626 ToRegister(ins->output()));
17627 break;
17628 case MIRType::Int32:
17629 masm.load32(Address(ToRegister(ins->instance()), ins->mir()->offset()),
17630 ToRegister(ins->output()));
17631 break;
17632 default:
17633 MOZ_CRASH("MIRType not supported in WasmLoadInstance");
17637 void CodeGenerator::visitWasmLoadInstance64(LWasmLoadInstance64* ins) {
17638 MOZ_ASSERT(ins->mir()->type() == MIRType::Int64);
17639 masm.load64(Address(ToRegister(ins->instance()), ins->mir()->offset()),
17640 ToOutRegister64(ins));
17643 void CodeGenerator::incrementWarmUpCounter(AbsoluteAddress warmUpCount,
17644 JSScript* script, Register tmp) {
17645 // The code depends on the JitScript* not being discarded without also
17646 // invalidating Ion code. Assert this.
17647 #ifdef DEBUG
17648 Label ok;
17649 masm.movePtr(ImmGCPtr(script), tmp);
17650 masm.loadJitScript(tmp, tmp);
17651 masm.branchPtr(Assembler::Equal, tmp, ImmPtr(script->jitScript()), &ok);
17652 masm.assumeUnreachable("Didn't find JitScript?");
17653 masm.bind(&ok);
17654 #endif
17656 masm.load32(warmUpCount, tmp);
17657 masm.add32(Imm32(1), tmp);
17658 masm.store32(tmp, warmUpCount);
17661 void CodeGenerator::visitIncrementWarmUpCounter(LIncrementWarmUpCounter* ins) {
17662 Register tmp = ToRegister(ins->temp0());
17664 AbsoluteAddress warmUpCount =
17665 AbsoluteAddress(ins->mir()->script()->jitScript())
17666 .offset(JitScript::offsetOfWarmUpCount());
17667 incrementWarmUpCounter(warmUpCount, ins->mir()->script(), tmp);
17670 void CodeGenerator::visitLexicalCheck(LLexicalCheck* ins) {
17671 ValueOperand inputValue = ToValue(ins, LLexicalCheck::InputIndex);
17672 Label bail;
17673 masm.branchTestMagicValue(Assembler::Equal, inputValue,
17674 JS_UNINITIALIZED_LEXICAL, &bail);
17675 bailoutFrom(&bail, ins->snapshot());
17678 void CodeGenerator::visitThrowRuntimeLexicalError(
17679 LThrowRuntimeLexicalError* ins) {
17680 pushArg(Imm32(ins->mir()->errorNumber()));
17682 using Fn = bool (*)(JSContext*, unsigned);
17683 callVM<Fn, jit::ThrowRuntimeLexicalError>(ins);
17686 void CodeGenerator::visitThrowMsg(LThrowMsg* ins) {
17687 pushArg(Imm32(static_cast<int32_t>(ins->mir()->throwMsgKind())));
17689 using Fn = bool (*)(JSContext*, unsigned);
17690 callVM<Fn, js::ThrowMsgOperation>(ins);
17693 void CodeGenerator::visitGlobalDeclInstantiation(
17694 LGlobalDeclInstantiation* ins) {
17695 pushArg(ImmPtr(ins->mir()->resumePoint()->pc()));
17696 pushArg(ImmGCPtr(ins->mir()->block()->info().script()));
17698 using Fn = bool (*)(JSContext*, HandleScript, const jsbytecode*);
17699 callVM<Fn, GlobalDeclInstantiationFromIon>(ins);
17702 void CodeGenerator::visitDebugger(LDebugger* ins) {
17703 Register cx = ToRegister(ins->temp0());
17705 masm.loadJSContext(cx);
17706 using Fn = bool (*)(JSContext* cx);
17707 masm.setupAlignedABICall();
17708 masm.passABIArg(cx);
17709 masm.callWithABI<Fn, GlobalHasLiveOnDebuggerStatement>();
17711 Label bail;
17712 masm.branchIfTrueBool(ReturnReg, &bail);
17713 bailoutFrom(&bail, ins->snapshot());
17716 void CodeGenerator::visitNewTarget(LNewTarget* ins) {
17717 ValueOperand output = ToOutValue(ins);
17719 // if (isConstructing) output = argv[Max(numActualArgs, numFormalArgs)]
17720 Label notConstructing, done;
17721 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
17722 masm.branchTestPtr(Assembler::Zero, calleeToken,
17723 Imm32(CalleeToken_FunctionConstructing), &notConstructing);
17725 Register argvLen = output.scratchReg();
17726 masm.loadNumActualArgs(FramePointer, argvLen);
17728 Label useNFormals;
17730 size_t numFormalArgs = ins->mirRaw()->block()->info().nargs();
17731 masm.branchPtr(Assembler::Below, argvLen, Imm32(numFormalArgs), &useNFormals);
17733 size_t argsOffset = JitFrameLayout::offsetOfActualArgs();
17735 BaseValueIndex newTarget(FramePointer, argvLen, argsOffset);
17736 masm.loadValue(newTarget, output);
17737 masm.jump(&done);
17740 masm.bind(&useNFormals);
17743 Address newTarget(FramePointer,
17744 argsOffset + (numFormalArgs * sizeof(Value)));
17745 masm.loadValue(newTarget, output);
17746 masm.jump(&done);
17749 // else output = undefined
17750 masm.bind(&notConstructing);
17751 masm.moveValue(UndefinedValue(), output);
17752 masm.bind(&done);
17755 void CodeGenerator::visitCheckReturn(LCheckReturn* ins) {
17756 ValueOperand returnValue = ToValue(ins, LCheckReturn::ReturnValueIndex);
17757 ValueOperand thisValue = ToValue(ins, LCheckReturn::ThisValueIndex);
17758 ValueOperand output = ToOutValue(ins);
17760 using Fn = bool (*)(JSContext*, HandleValue);
17761 OutOfLineCode* ool = oolCallVM<Fn, ThrowBadDerivedReturnOrUninitializedThis>(
17762 ins, ArgList(returnValue), StoreNothing());
17764 Label noChecks;
17765 masm.branchTestObject(Assembler::Equal, returnValue, &noChecks);
17766 masm.branchTestUndefined(Assembler::NotEqual, returnValue, ool->entry());
17767 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
17768 masm.moveValue(thisValue, output);
17769 masm.jump(ool->rejoin());
17770 masm.bind(&noChecks);
17771 masm.moveValue(returnValue, output);
17772 masm.bind(ool->rejoin());
17775 void CodeGenerator::visitCheckIsObj(LCheckIsObj* ins) {
17776 ValueOperand value = ToValue(ins, LCheckIsObj::ValueIndex);
17777 Register output = ToRegister(ins->output());
17779 using Fn = bool (*)(JSContext*, CheckIsObjectKind);
17780 OutOfLineCode* ool = oolCallVM<Fn, ThrowCheckIsObject>(
17781 ins, ArgList(Imm32(ins->mir()->checkKind())), StoreNothing());
17783 masm.fallibleUnboxObject(value, output, ool->entry());
17784 masm.bind(ool->rejoin());
17787 void CodeGenerator::visitCheckObjCoercible(LCheckObjCoercible* ins) {
17788 ValueOperand checkValue = ToValue(ins, LCheckObjCoercible::ValueIndex);
17790 using Fn = bool (*)(JSContext*, HandleValue);
17791 OutOfLineCode* ool = oolCallVM<Fn, ThrowObjectCoercible>(
17792 ins, ArgList(checkValue), StoreNothing());
17793 masm.branchTestNull(Assembler::Equal, checkValue, ool->entry());
17794 masm.branchTestUndefined(Assembler::Equal, checkValue, ool->entry());
17795 masm.bind(ool->rejoin());
17798 void CodeGenerator::visitCheckClassHeritage(LCheckClassHeritage* ins) {
17799 ValueOperand heritage = ToValue(ins, LCheckClassHeritage::HeritageIndex);
17800 Register temp0 = ToRegister(ins->temp0());
17801 Register temp1 = ToRegister(ins->temp1());
17803 using Fn = bool (*)(JSContext*, HandleValue);
17804 OutOfLineCode* ool = oolCallVM<Fn, CheckClassHeritageOperation>(
17805 ins, ArgList(heritage), StoreNothing());
17807 masm.branchTestNull(Assembler::Equal, heritage, ool->rejoin());
17808 masm.fallibleUnboxObject(heritage, temp0, ool->entry());
17810 masm.isConstructor(temp0, temp1, ool->entry());
17811 masm.branchTest32(Assembler::Zero, temp1, temp1, ool->entry());
17813 masm.bind(ool->rejoin());
17816 void CodeGenerator::visitCheckThis(LCheckThis* ins) {
17817 ValueOperand thisValue = ToValue(ins, LCheckThis::ValueIndex);
17819 using Fn = bool (*)(JSContext*);
17820 OutOfLineCode* ool =
17821 oolCallVM<Fn, ThrowUninitializedThis>(ins, ArgList(), StoreNothing());
17822 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
17823 masm.bind(ool->rejoin());
17826 void CodeGenerator::visitCheckThisReinit(LCheckThisReinit* ins) {
17827 ValueOperand thisValue = ToValue(ins, LCheckThisReinit::ThisValueIndex);
17829 using Fn = bool (*)(JSContext*);
17830 OutOfLineCode* ool =
17831 oolCallVM<Fn, ThrowInitializedThis>(ins, ArgList(), StoreNothing());
17832 masm.branchTestMagic(Assembler::NotEqual, thisValue, ool->entry());
17833 masm.bind(ool->rejoin());
17836 void CodeGenerator::visitGenerator(LGenerator* lir) {
17837 Register callee = ToRegister(lir->callee());
17838 Register environmentChain = ToRegister(lir->environmentChain());
17839 Register argsObject = ToRegister(lir->argsObject());
17841 pushArg(argsObject);
17842 pushArg(environmentChain);
17843 pushArg(ImmGCPtr(current->mir()->info().script()));
17844 pushArg(callee);
17846 using Fn = JSObject* (*)(JSContext* cx, HandleFunction, HandleScript,
17847 HandleObject, HandleObject);
17848 callVM<Fn, CreateGenerator>(lir);
17851 void CodeGenerator::visitAsyncResolve(LAsyncResolve* lir) {
17852 Register generator = ToRegister(lir->generator());
17853 ValueOperand valueOrReason = ToValue(lir, LAsyncResolve::ValueOrReasonIndex);
17854 AsyncFunctionResolveKind resolveKind = lir->mir()->resolveKind();
17856 pushArg(Imm32(static_cast<int32_t>(resolveKind)));
17857 pushArg(valueOrReason);
17858 pushArg(generator);
17860 using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
17861 HandleValue, AsyncFunctionResolveKind);
17862 callVM<Fn, js::AsyncFunctionResolve>(lir);
17865 void CodeGenerator::visitAsyncAwait(LAsyncAwait* lir) {
17866 ValueOperand value = ToValue(lir, LAsyncAwait::ValueIndex);
17867 Register generator = ToRegister(lir->generator());
17869 pushArg(value);
17870 pushArg(generator);
17872 using Fn =
17873 JSObject* (*)(JSContext* cx, Handle<AsyncFunctionGeneratorObject*> genObj,
17874 HandleValue value);
17875 callVM<Fn, js::AsyncFunctionAwait>(lir);
17878 void CodeGenerator::visitCanSkipAwait(LCanSkipAwait* lir) {
17879 ValueOperand value = ToValue(lir, LCanSkipAwait::ValueIndex);
17881 pushArg(value);
17883 using Fn = bool (*)(JSContext*, HandleValue, bool* canSkip);
17884 callVM<Fn, js::CanSkipAwait>(lir);
17887 void CodeGenerator::visitMaybeExtractAwaitValue(LMaybeExtractAwaitValue* lir) {
17888 ValueOperand value = ToValue(lir, LMaybeExtractAwaitValue::ValueIndex);
17889 ValueOperand output = ToOutValue(lir);
17890 Register canSkip = ToRegister(lir->canSkip());
17892 Label cantExtract, finished;
17893 masm.branchIfFalseBool(canSkip, &cantExtract);
17895 pushArg(value);
17897 using Fn = bool (*)(JSContext*, HandleValue, MutableHandleValue);
17898 callVM<Fn, js::ExtractAwaitValue>(lir);
17899 masm.jump(&finished);
17900 masm.bind(&cantExtract);
17902 masm.moveValue(value, output);
17904 masm.bind(&finished);
17907 void CodeGenerator::visitDebugCheckSelfHosted(LDebugCheckSelfHosted* ins) {
17908 ValueOperand checkValue = ToValue(ins, LDebugCheckSelfHosted::ValueIndex);
17909 pushArg(checkValue);
17910 using Fn = bool (*)(JSContext*, HandleValue);
17911 callVM<Fn, js::Debug_CheckSelfHosted>(ins);
17914 void CodeGenerator::visitRandom(LRandom* ins) {
17915 using mozilla::non_crypto::XorShift128PlusRNG;
17917 FloatRegister output = ToFloatRegister(ins->output());
17918 Register rngReg = ToRegister(ins->temp0());
17920 Register64 temp1 = ToRegister64(ins->temp1());
17921 Register64 temp2 = ToRegister64(ins->temp2());
17923 const XorShift128PlusRNG* rng = gen->realm->addressOfRandomNumberGenerator();
17924 masm.movePtr(ImmPtr(rng), rngReg);
17926 masm.randomDouble(rngReg, output, temp1, temp2);
17927 if (js::SupportDifferentialTesting()) {
17928 masm.loadConstantDouble(0.0, output);
17932 void CodeGenerator::visitSignExtendInt32(LSignExtendInt32* ins) {
17933 Register input = ToRegister(ins->input());
17934 Register output = ToRegister(ins->output());
17936 switch (ins->mode()) {
17937 case MSignExtendInt32::Byte:
17938 masm.move8SignExtend(input, output);
17939 break;
17940 case MSignExtendInt32::Half:
17941 masm.move16SignExtend(input, output);
17942 break;
17946 void CodeGenerator::visitRotate(LRotate* ins) {
17947 MRotate* mir = ins->mir();
17948 Register input = ToRegister(ins->input());
17949 Register dest = ToRegister(ins->output());
17951 const LAllocation* count = ins->count();
17952 if (count->isConstant()) {
17953 int32_t c = ToInt32(count) & 0x1F;
17954 if (mir->isLeftRotate()) {
17955 masm.rotateLeft(Imm32(c), input, dest);
17956 } else {
17957 masm.rotateRight(Imm32(c), input, dest);
17959 } else {
17960 Register creg = ToRegister(count);
17961 if (mir->isLeftRotate()) {
17962 masm.rotateLeft(creg, input, dest);
17963 } else {
17964 masm.rotateRight(creg, input, dest);
17969 class OutOfLineNaNToZero : public OutOfLineCodeBase<CodeGenerator> {
17970 LNaNToZero* lir_;
17972 public:
17973 explicit OutOfLineNaNToZero(LNaNToZero* lir) : lir_(lir) {}
17975 void accept(CodeGenerator* codegen) override {
17976 codegen->visitOutOfLineNaNToZero(this);
17978 LNaNToZero* lir() const { return lir_; }
17981 void CodeGenerator::visitOutOfLineNaNToZero(OutOfLineNaNToZero* ool) {
17982 FloatRegister output = ToFloatRegister(ool->lir()->output());
17983 masm.loadConstantDouble(0.0, output);
17984 masm.jump(ool->rejoin());
17987 void CodeGenerator::visitNaNToZero(LNaNToZero* lir) {
17988 FloatRegister input = ToFloatRegister(lir->input());
17990 OutOfLineNaNToZero* ool = new (alloc()) OutOfLineNaNToZero(lir);
17991 addOutOfLineCode(ool, lir->mir());
17993 if (lir->mir()->operandIsNeverNegativeZero()) {
17994 masm.branchDouble(Assembler::DoubleUnordered, input, input, ool->entry());
17995 } else {
17996 FloatRegister scratch = ToFloatRegister(lir->temp0());
17997 masm.loadConstantDouble(0.0, scratch);
17998 masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch,
17999 ool->entry());
18001 masm.bind(ool->rejoin());
18004 void CodeGenerator::visitIsPackedArray(LIsPackedArray* lir) {
18005 Register obj = ToRegister(lir->object());
18006 Register output = ToRegister(lir->output());
18007 Register temp = ToRegister(lir->temp0());
18009 masm.setIsPackedArray(obj, output, temp);
18012 void CodeGenerator::visitGuardArrayIsPacked(LGuardArrayIsPacked* lir) {
18013 Register array = ToRegister(lir->array());
18014 Register temp0 = ToRegister(lir->temp0());
18015 Register temp1 = ToRegister(lir->temp1());
18017 Label bail;
18018 masm.branchArrayIsNotPacked(array, temp0, temp1, &bail);
18019 bailoutFrom(&bail, lir->snapshot());
18022 void CodeGenerator::visitGetPrototypeOf(LGetPrototypeOf* lir) {
18023 Register target = ToRegister(lir->target());
18024 ValueOperand out = ToOutValue(lir);
18025 Register scratch = out.scratchReg();
18027 using Fn = bool (*)(JSContext*, HandleObject, MutableHandleValue);
18028 OutOfLineCode* ool = oolCallVM<Fn, jit::GetPrototypeOf>(lir, ArgList(target),
18029 StoreValueTo(out));
18031 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
18033 masm.loadObjProto(target, scratch);
18035 Label hasProto;
18036 masm.branchPtr(Assembler::Above, scratch, ImmWord(1), &hasProto);
18038 // Call into the VM for lazy prototypes.
18039 masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), ool->entry());
18041 masm.moveValue(NullValue(), out);
18042 masm.jump(ool->rejoin());
18044 masm.bind(&hasProto);
18045 masm.tagValue(JSVAL_TYPE_OBJECT, scratch, out);
18047 masm.bind(ool->rejoin());
18050 void CodeGenerator::visitObjectWithProto(LObjectWithProto* lir) {
18051 pushArg(ToValue(lir, LObjectWithProto::PrototypeIndex));
18053 using Fn = PlainObject* (*)(JSContext*, HandleValue);
18054 callVM<Fn, js::ObjectWithProtoOperation>(lir);
18057 void CodeGenerator::visitObjectStaticProto(LObjectStaticProto* lir) {
18058 Register obj = ToRegister(lir->input());
18059 Register output = ToRegister(lir->output());
18061 masm.loadObjProto(obj, output);
18063 #ifdef DEBUG
18064 // We shouldn't encounter a null or lazy proto.
18065 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
18067 Label done;
18068 masm.branchPtr(Assembler::Above, output, ImmWord(1), &done);
18069 masm.assumeUnreachable("Unexpected null or lazy proto in MObjectStaticProto");
18070 masm.bind(&done);
18071 #endif
18074 void CodeGenerator::visitBuiltinObject(LBuiltinObject* lir) {
18075 pushArg(Imm32(static_cast<int32_t>(lir->mir()->builtinObjectKind())));
18077 using Fn = JSObject* (*)(JSContext*, BuiltinObjectKind);
18078 callVM<Fn, js::BuiltinObjectOperation>(lir);
18081 void CodeGenerator::visitSuperFunction(LSuperFunction* lir) {
18082 Register callee = ToRegister(lir->callee());
18083 ValueOperand out = ToOutValue(lir);
18084 Register temp = ToRegister(lir->temp0());
18086 #ifdef DEBUG
18087 Label classCheckDone;
18088 masm.branchTestObjIsFunction(Assembler::Equal, callee, temp, callee,
18089 &classCheckDone);
18090 masm.assumeUnreachable("Unexpected non-JSFunction callee in JSOp::SuperFun");
18091 masm.bind(&classCheckDone);
18092 #endif
18094 // Load prototype of callee
18095 masm.loadObjProto(callee, temp);
18097 #ifdef DEBUG
18098 // We won't encounter a lazy proto, because |callee| is guaranteed to be a
18099 // JSFunction and only proxy objects can have a lazy proto.
18100 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
18102 Label proxyCheckDone;
18103 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
18104 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperFun");
18105 masm.bind(&proxyCheckDone);
18106 #endif
18108 Label nullProto, done;
18109 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
18111 // Box prototype and return
18112 masm.tagValue(JSVAL_TYPE_OBJECT, temp, out);
18113 masm.jump(&done);
18115 masm.bind(&nullProto);
18116 masm.moveValue(NullValue(), out);
18118 masm.bind(&done);
18121 void CodeGenerator::visitInitHomeObject(LInitHomeObject* lir) {
18122 Register func = ToRegister(lir->function());
18123 ValueOperand homeObject = ToValue(lir, LInitHomeObject::HomeObjectIndex);
18125 masm.assertFunctionIsExtended(func);
18127 Address addr(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
18129 emitPreBarrier(addr);
18130 masm.storeValue(homeObject, addr);
18133 void CodeGenerator::visitIsTypedArrayConstructor(
18134 LIsTypedArrayConstructor* lir) {
18135 Register object = ToRegister(lir->object());
18136 Register output = ToRegister(lir->output());
18138 masm.setIsDefinitelyTypedArrayConstructor(object, output);
18141 void CodeGenerator::visitLoadValueTag(LLoadValueTag* lir) {
18142 ValueOperand value = ToValue(lir, LLoadValueTag::ValueIndex);
18143 Register output = ToRegister(lir->output());
18145 Register tag = masm.extractTag(value, output);
18146 if (tag != output) {
18147 masm.mov(tag, output);
18151 void CodeGenerator::visitGuardTagNotEqual(LGuardTagNotEqual* lir) {
18152 Register lhs = ToRegister(lir->lhs());
18153 Register rhs = ToRegister(lir->rhs());
18155 bailoutCmp32(Assembler::Equal, lhs, rhs, lir->snapshot());
18157 // If both lhs and rhs are numbers, can't use tag comparison to do inequality
18158 // comparison
18159 Label done;
18160 masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
18161 masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
18162 bailout(lir->snapshot());
18164 masm.bind(&done);
18167 void CodeGenerator::visitLoadWrapperTarget(LLoadWrapperTarget* lir) {
18168 Register object = ToRegister(lir->object());
18169 Register output = ToRegister(lir->output());
18171 masm.loadPtr(Address(object, ProxyObject::offsetOfReservedSlots()), output);
18172 masm.unboxObject(
18173 Address(output, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
18174 output);
18177 void CodeGenerator::visitGuardHasGetterSetter(LGuardHasGetterSetter* lir) {
18178 Register object = ToRegister(lir->object());
18179 Register temp0 = ToRegister(lir->temp0());
18180 Register temp1 = ToRegister(lir->temp1());
18181 Register temp2 = ToRegister(lir->temp2());
18183 masm.movePropertyKey(lir->mir()->propId(), temp1);
18184 masm.movePtr(ImmGCPtr(lir->mir()->getterSetter()), temp2);
18186 using Fn = bool (*)(JSContext* cx, JSObject* obj, jsid id,
18187 GetterSetter* getterSetter);
18188 masm.setupAlignedABICall();
18189 masm.loadJSContext(temp0);
18190 masm.passABIArg(temp0);
18191 masm.passABIArg(object);
18192 masm.passABIArg(temp1);
18193 masm.passABIArg(temp2);
18194 masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
18196 bailoutIfFalseBool(ReturnReg, lir->snapshot());
18199 void CodeGenerator::visitGuardIsExtensible(LGuardIsExtensible* lir) {
18200 Register object = ToRegister(lir->object());
18201 Register temp = ToRegister(lir->temp0());
18203 Label bail;
18204 masm.branchIfObjectNotExtensible(object, temp, &bail);
18205 bailoutFrom(&bail, lir->snapshot());
18208 void CodeGenerator::visitGuardInt32IsNonNegative(
18209 LGuardInt32IsNonNegative* lir) {
18210 Register index = ToRegister(lir->index());
18212 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
18215 void CodeGenerator::visitGuardInt32Range(LGuardInt32Range* lir) {
18216 Register input = ToRegister(lir->input());
18218 bailoutCmp32(Assembler::LessThan, input, Imm32(lir->mir()->minimum()),
18219 lir->snapshot());
18220 bailoutCmp32(Assembler::GreaterThan, input, Imm32(lir->mir()->maximum()),
18221 lir->snapshot());
18224 void CodeGenerator::visitGuardIndexIsNotDenseElement(
18225 LGuardIndexIsNotDenseElement* lir) {
18226 Register object = ToRegister(lir->object());
18227 Register index = ToRegister(lir->index());
18228 Register temp = ToRegister(lir->temp0());
18229 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
18231 // Load obj->elements.
18232 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
18234 // Ensure index >= initLength or the element is a hole.
18235 Label notDense;
18236 Address capacity(temp, ObjectElements::offsetOfInitializedLength());
18237 masm.spectreBoundsCheck32(index, capacity, spectreTemp, &notDense);
18239 BaseValueIndex element(temp, index);
18240 masm.branchTestMagic(Assembler::Equal, element, &notDense);
18242 bailout(lir->snapshot());
18244 masm.bind(&notDense);
18247 void CodeGenerator::visitGuardIndexIsValidUpdateOrAdd(
18248 LGuardIndexIsValidUpdateOrAdd* lir) {
18249 Register object = ToRegister(lir->object());
18250 Register index = ToRegister(lir->index());
18251 Register temp = ToRegister(lir->temp0());
18252 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
18254 // Load obj->elements.
18255 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
18257 Label success;
18259 // If length is writable, branch to &success. All indices are writable.
18260 Address flags(temp, ObjectElements::offsetOfFlags());
18261 masm.branchTest32(Assembler::Zero, flags,
18262 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
18263 &success);
18265 // Otherwise, ensure index is in bounds.
18266 Label bail;
18267 Address length(temp, ObjectElements::offsetOfLength());
18268 masm.spectreBoundsCheck32(index, length, spectreTemp, &bail);
18269 masm.bind(&success);
18271 bailoutFrom(&bail, lir->snapshot());
18274 void CodeGenerator::visitCallAddOrUpdateSparseElement(
18275 LCallAddOrUpdateSparseElement* lir) {
18276 Register object = ToRegister(lir->object());
18277 Register index = ToRegister(lir->index());
18278 ValueOperand value = ToValue(lir, LCallAddOrUpdateSparseElement::ValueIndex);
18280 pushArg(Imm32(lir->mir()->strict()));
18281 pushArg(value);
18282 pushArg(index);
18283 pushArg(object);
18285 using Fn =
18286 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, HandleValue, bool);
18287 callVM<Fn, js::AddOrUpdateSparseElementHelper>(lir);
18290 void CodeGenerator::visitCallGetSparseElement(LCallGetSparseElement* lir) {
18291 Register object = ToRegister(lir->object());
18292 Register index = ToRegister(lir->index());
18294 pushArg(index);
18295 pushArg(object);
18297 using Fn =
18298 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, MutableHandleValue);
18299 callVM<Fn, js::GetSparseElementHelper>(lir);
18302 void CodeGenerator::visitCallNativeGetElement(LCallNativeGetElement* lir) {
18303 Register object = ToRegister(lir->object());
18304 Register index = ToRegister(lir->index());
18306 pushArg(index);
18307 pushArg(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
18308 pushArg(object);
18310 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
18311 MutableHandleValue);
18312 callVM<Fn, js::NativeGetElement>(lir);
18315 void CodeGenerator::visitCallNativeGetElementSuper(
18316 LCallNativeGetElementSuper* lir) {
18317 Register object = ToRegister(lir->object());
18318 Register index = ToRegister(lir->index());
18319 ValueOperand receiver =
18320 ToValue(lir, LCallNativeGetElementSuper::ReceiverIndex);
18322 pushArg(index);
18323 pushArg(receiver);
18324 pushArg(object);
18326 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
18327 MutableHandleValue);
18328 callVM<Fn, js::NativeGetElement>(lir);
18331 void CodeGenerator::visitCallObjectHasSparseElement(
18332 LCallObjectHasSparseElement* lir) {
18333 Register object = ToRegister(lir->object());
18334 Register index = ToRegister(lir->index());
18335 Register temp0 = ToRegister(lir->temp0());
18336 Register temp1 = ToRegister(lir->temp1());
18337 Register output = ToRegister(lir->output());
18339 masm.reserveStack(sizeof(Value));
18340 masm.moveStackPtrTo(temp1);
18342 using Fn = bool (*)(JSContext*, NativeObject*, int32_t, Value*);
18343 masm.setupAlignedABICall();
18344 masm.loadJSContext(temp0);
18345 masm.passABIArg(temp0);
18346 masm.passABIArg(object);
18347 masm.passABIArg(index);
18348 masm.passABIArg(temp1);
18349 masm.callWithABI<Fn, HasNativeElementPure>();
18350 masm.storeCallPointerResult(temp0);
18352 Label bail, ok;
18353 uint32_t framePushed = masm.framePushed();
18354 masm.branchIfTrueBool(temp0, &ok);
18355 masm.adjustStack(sizeof(Value));
18356 masm.jump(&bail);
18358 masm.bind(&ok);
18359 masm.setFramePushed(framePushed);
18360 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
18361 masm.adjustStack(sizeof(Value));
18363 bailoutFrom(&bail, lir->snapshot());
18366 void CodeGenerator::visitBigIntAsIntN(LBigIntAsIntN* ins) {
18367 Register bits = ToRegister(ins->bits());
18368 Register input = ToRegister(ins->input());
18370 pushArg(bits);
18371 pushArg(input);
18373 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
18374 callVM<Fn, jit::BigIntAsIntN>(ins);
18377 void CodeGenerator::visitBigIntAsIntN64(LBigIntAsIntN64* ins) {
18378 Register input = ToRegister(ins->input());
18379 Register temp = ToRegister(ins->temp());
18380 Register64 temp64 = ToRegister64(ins->temp64());
18381 Register output = ToRegister(ins->output());
18383 Label done, create;
18385 masm.movePtr(input, output);
18387 // Load the BigInt value as an int64.
18388 masm.loadBigInt64(input, temp64);
18390 // Create a new BigInt when the input exceeds the int64 range.
18391 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
18392 Imm32(64 / BigInt::DigitBits), &create);
18394 // And create a new BigInt when the value and the BigInt have different signs.
18395 Label nonNegative;
18396 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
18397 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &create);
18398 masm.jump(&done);
18400 masm.bind(&nonNegative);
18401 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &done);
18403 masm.bind(&create);
18404 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
18406 masm.bind(&done);
18409 void CodeGenerator::visitBigIntAsIntN32(LBigIntAsIntN32* ins) {
18410 Register input = ToRegister(ins->input());
18411 Register temp = ToRegister(ins->temp());
18412 Register64 temp64 = ToRegister64(ins->temp64());
18413 Register output = ToRegister(ins->output());
18415 Label done, create;
18417 masm.movePtr(input, output);
18419 // Load the absolute value of the first digit.
18420 masm.loadFirstBigIntDigitOrZero(input, temp);
18422 // If the absolute value exceeds the int32 range, create a new BigInt.
18423 masm.branchPtr(Assembler::Above, temp, Imm32(INT32_MAX), &create);
18425 // Also create a new BigInt if we have more than one digit.
18426 masm.branch32(Assembler::BelowOrEqual,
18427 Address(input, BigInt::offsetOfLength()), Imm32(1), &done);
18429 masm.bind(&create);
18431 // |temp| stores the absolute value, negate it when the sign flag is set.
18432 Label nonNegative;
18433 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
18434 masm.negPtr(temp);
18435 masm.bind(&nonNegative);
18437 masm.move32To64SignExtend(temp, temp64);
18438 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
18440 masm.bind(&done);
18443 void CodeGenerator::visitBigIntAsUintN(LBigIntAsUintN* ins) {
18444 Register bits = ToRegister(ins->bits());
18445 Register input = ToRegister(ins->input());
18447 pushArg(bits);
18448 pushArg(input);
18450 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
18451 callVM<Fn, jit::BigIntAsUintN>(ins);
18454 void CodeGenerator::visitBigIntAsUintN64(LBigIntAsUintN64* ins) {
18455 Register input = ToRegister(ins->input());
18456 Register temp = ToRegister(ins->temp());
18457 Register64 temp64 = ToRegister64(ins->temp64());
18458 Register output = ToRegister(ins->output());
18460 Label done, create;
18462 masm.movePtr(input, output);
18464 // Load the BigInt value as an uint64.
18465 masm.loadBigInt64(input, temp64);
18467 // Create a new BigInt when the input exceeds the uint64 range.
18468 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
18469 Imm32(64 / BigInt::DigitBits), &create);
18471 // And create a new BigInt when the input has the sign flag set.
18472 masm.branchIfBigIntIsNonNegative(input, &done);
18474 masm.bind(&create);
18475 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
18477 masm.bind(&done);
18480 void CodeGenerator::visitBigIntAsUintN32(LBigIntAsUintN32* ins) {
18481 Register input = ToRegister(ins->input());
18482 Register temp = ToRegister(ins->temp());
18483 Register64 temp64 = ToRegister64(ins->temp64());
18484 Register output = ToRegister(ins->output());
18486 Label done, create;
18488 masm.movePtr(input, output);
18490 // Load the absolute value of the first digit.
18491 masm.loadFirstBigIntDigitOrZero(input, temp);
18493 // If the absolute value exceeds the uint32 range, create a new BigInt.
18494 #if JS_PUNBOX64
18495 masm.branchPtr(Assembler::Above, temp, ImmWord(UINT32_MAX), &create);
18496 #endif
18498 // Also create a new BigInt if we have more than one digit.
18499 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
18500 Imm32(1), &create);
18502 // And create a new BigInt when the input has the sign flag set.
18503 masm.branchIfBigIntIsNonNegative(input, &done);
18505 masm.bind(&create);
18507 // |temp| stores the absolute value, negate it when the sign flag is set.
18508 Label nonNegative;
18509 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
18510 masm.negPtr(temp);
18511 masm.bind(&nonNegative);
18513 masm.move32To64ZeroExtend(temp, temp64);
18514 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
18516 masm.bind(&done);
18519 void CodeGenerator::visitGuardNonGCThing(LGuardNonGCThing* ins) {
18520 ValueOperand input = ToValue(ins, LGuardNonGCThing::InputIndex);
18522 Label bail;
18523 masm.branchTestGCThing(Assembler::Equal, input, &bail);
18524 bailoutFrom(&bail, ins->snapshot());
18527 void CodeGenerator::visitToHashableNonGCThing(LToHashableNonGCThing* ins) {
18528 ValueOperand input = ToValue(ins, LToHashableNonGCThing::InputIndex);
18529 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
18530 ValueOperand output = ToOutValue(ins);
18532 masm.toHashableNonGCThing(input, output, tempFloat);
18535 void CodeGenerator::visitToHashableString(LToHashableString* ins) {
18536 Register input = ToRegister(ins->input());
18537 Register output = ToRegister(ins->output());
18539 using Fn = JSAtom* (*)(JSContext*, JSString*);
18540 auto* ool = oolCallVM<Fn, js::AtomizeString>(ins, ArgList(input),
18541 StoreRegisterTo(output));
18543 masm.branchTest32(Assembler::Zero, Address(input, JSString::offsetOfFlags()),
18544 Imm32(JSString::ATOM_BIT), ool->entry());
18545 masm.movePtr(input, output);
18546 masm.bind(ool->rejoin());
18549 void CodeGenerator::visitToHashableValue(LToHashableValue* ins) {
18550 ValueOperand input = ToValue(ins, LToHashableValue::InputIndex);
18551 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
18552 ValueOperand output = ToOutValue(ins);
18554 Register str = output.scratchReg();
18556 using Fn = JSAtom* (*)(JSContext*, JSString*);
18557 auto* ool =
18558 oolCallVM<Fn, js::AtomizeString>(ins, ArgList(str), StoreRegisterTo(str));
18560 masm.toHashableValue(input, output, tempFloat, ool->entry(), ool->rejoin());
18563 void CodeGenerator::visitHashNonGCThing(LHashNonGCThing* ins) {
18564 ValueOperand input = ToValue(ins, LHashNonGCThing::InputIndex);
18565 Register temp = ToRegister(ins->temp0());
18566 Register output = ToRegister(ins->output());
18568 masm.prepareHashNonGCThing(input, output, temp);
18571 void CodeGenerator::visitHashString(LHashString* ins) {
18572 Register input = ToRegister(ins->input());
18573 Register temp = ToRegister(ins->temp0());
18574 Register output = ToRegister(ins->output());
18576 masm.prepareHashString(input, output, temp);
18579 void CodeGenerator::visitHashSymbol(LHashSymbol* ins) {
18580 Register input = ToRegister(ins->input());
18581 Register output = ToRegister(ins->output());
18583 masm.prepareHashSymbol(input, output);
18586 void CodeGenerator::visitHashBigInt(LHashBigInt* ins) {
18587 Register input = ToRegister(ins->input());
18588 Register temp0 = ToRegister(ins->temp0());
18589 Register temp1 = ToRegister(ins->temp1());
18590 Register temp2 = ToRegister(ins->temp2());
18591 Register output = ToRegister(ins->output());
18593 masm.prepareHashBigInt(input, output, temp0, temp1, temp2);
18596 void CodeGenerator::visitHashObject(LHashObject* ins) {
18597 Register setObj = ToRegister(ins->setObject());
18598 ValueOperand input = ToValue(ins, LHashObject::InputIndex);
18599 Register temp0 = ToRegister(ins->temp0());
18600 Register temp1 = ToRegister(ins->temp1());
18601 Register temp2 = ToRegister(ins->temp2());
18602 Register temp3 = ToRegister(ins->temp3());
18603 Register output = ToRegister(ins->output());
18605 masm.prepareHashObject(setObj, input, output, temp0, temp1, temp2, temp3);
18608 void CodeGenerator::visitHashValue(LHashValue* ins) {
18609 Register setObj = ToRegister(ins->setObject());
18610 ValueOperand input = ToValue(ins, LHashValue::InputIndex);
18611 Register temp0 = ToRegister(ins->temp0());
18612 Register temp1 = ToRegister(ins->temp1());
18613 Register temp2 = ToRegister(ins->temp2());
18614 Register temp3 = ToRegister(ins->temp3());
18615 Register output = ToRegister(ins->output());
18617 masm.prepareHashValue(setObj, input, output, temp0, temp1, temp2, temp3);
18620 void CodeGenerator::visitSetObjectHasNonBigInt(LSetObjectHasNonBigInt* ins) {
18621 Register setObj = ToRegister(ins->setObject());
18622 ValueOperand input = ToValue(ins, LSetObjectHasNonBigInt::InputIndex);
18623 Register hash = ToRegister(ins->hash());
18624 Register temp0 = ToRegister(ins->temp0());
18625 Register temp1 = ToRegister(ins->temp1());
18626 Register output = ToRegister(ins->output());
18628 masm.setObjectHasNonBigInt(setObj, input, hash, output, temp0, temp1);
18631 void CodeGenerator::visitSetObjectHasBigInt(LSetObjectHasBigInt* ins) {
18632 Register setObj = ToRegister(ins->setObject());
18633 ValueOperand input = ToValue(ins, LSetObjectHasBigInt::InputIndex);
18634 Register hash = ToRegister(ins->hash());
18635 Register temp0 = ToRegister(ins->temp0());
18636 Register temp1 = ToRegister(ins->temp1());
18637 Register temp2 = ToRegister(ins->temp2());
18638 Register temp3 = ToRegister(ins->temp3());
18639 Register output = ToRegister(ins->output());
18641 masm.setObjectHasBigInt(setObj, input, hash, output, temp0, temp1, temp2,
18642 temp3);
18645 void CodeGenerator::visitSetObjectHasValue(LSetObjectHasValue* ins) {
18646 Register setObj = ToRegister(ins->setObject());
18647 ValueOperand input = ToValue(ins, LSetObjectHasValue::InputIndex);
18648 Register hash = ToRegister(ins->hash());
18649 Register temp0 = ToRegister(ins->temp0());
18650 Register temp1 = ToRegister(ins->temp1());
18651 Register temp2 = ToRegister(ins->temp2());
18652 Register temp3 = ToRegister(ins->temp3());
18653 Register output = ToRegister(ins->output());
18655 masm.setObjectHasValue(setObj, input, hash, output, temp0, temp1, temp2,
18656 temp3);
18659 void CodeGenerator::visitSetObjectHasValueVMCall(
18660 LSetObjectHasValueVMCall* ins) {
18661 pushArg(ToValue(ins, LSetObjectHasValueVMCall::InputIndex));
18662 pushArg(ToRegister(ins->setObject()));
18664 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
18665 callVM<Fn, jit::SetObjectHas>(ins);
18668 void CodeGenerator::visitSetObjectSize(LSetObjectSize* ins) {
18669 Register setObj = ToRegister(ins->setObject());
18670 Register output = ToRegister(ins->output());
18672 masm.loadSetObjectSize(setObj, output);
18675 void CodeGenerator::visitMapObjectHasNonBigInt(LMapObjectHasNonBigInt* ins) {
18676 Register mapObj = ToRegister(ins->mapObject());
18677 ValueOperand input = ToValue(ins, LMapObjectHasNonBigInt::InputIndex);
18678 Register hash = ToRegister(ins->hash());
18679 Register temp0 = ToRegister(ins->temp0());
18680 Register temp1 = ToRegister(ins->temp1());
18681 Register output = ToRegister(ins->output());
18683 masm.mapObjectHasNonBigInt(mapObj, input, hash, output, temp0, temp1);
18686 void CodeGenerator::visitMapObjectHasBigInt(LMapObjectHasBigInt* ins) {
18687 Register mapObj = ToRegister(ins->mapObject());
18688 ValueOperand input = ToValue(ins, LMapObjectHasBigInt::InputIndex);
18689 Register hash = ToRegister(ins->hash());
18690 Register temp0 = ToRegister(ins->temp0());
18691 Register temp1 = ToRegister(ins->temp1());
18692 Register temp2 = ToRegister(ins->temp2());
18693 Register temp3 = ToRegister(ins->temp3());
18694 Register output = ToRegister(ins->output());
18696 masm.mapObjectHasBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
18697 temp3);
18700 void CodeGenerator::visitMapObjectHasValue(LMapObjectHasValue* ins) {
18701 Register mapObj = ToRegister(ins->mapObject());
18702 ValueOperand input = ToValue(ins, LMapObjectHasValue::InputIndex);
18703 Register hash = ToRegister(ins->hash());
18704 Register temp0 = ToRegister(ins->temp0());
18705 Register temp1 = ToRegister(ins->temp1());
18706 Register temp2 = ToRegister(ins->temp2());
18707 Register temp3 = ToRegister(ins->temp3());
18708 Register output = ToRegister(ins->output());
18710 masm.mapObjectHasValue(mapObj, input, hash, output, temp0, temp1, temp2,
18711 temp3);
18714 void CodeGenerator::visitMapObjectHasValueVMCall(
18715 LMapObjectHasValueVMCall* ins) {
18716 pushArg(ToValue(ins, LMapObjectHasValueVMCall::InputIndex));
18717 pushArg(ToRegister(ins->mapObject()));
18719 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
18720 callVM<Fn, jit::MapObjectHas>(ins);
18723 void CodeGenerator::visitMapObjectGetNonBigInt(LMapObjectGetNonBigInt* ins) {
18724 Register mapObj = ToRegister(ins->mapObject());
18725 ValueOperand input = ToValue(ins, LMapObjectGetNonBigInt::InputIndex);
18726 Register hash = ToRegister(ins->hash());
18727 Register temp0 = ToRegister(ins->temp0());
18728 Register temp1 = ToRegister(ins->temp1());
18729 ValueOperand output = ToOutValue(ins);
18731 masm.mapObjectGetNonBigInt(mapObj, input, hash, output, temp0, temp1,
18732 output.scratchReg());
18735 void CodeGenerator::visitMapObjectGetBigInt(LMapObjectGetBigInt* ins) {
18736 Register mapObj = ToRegister(ins->mapObject());
18737 ValueOperand input = ToValue(ins, LMapObjectGetBigInt::InputIndex);
18738 Register hash = ToRegister(ins->hash());
18739 Register temp0 = ToRegister(ins->temp0());
18740 Register temp1 = ToRegister(ins->temp1());
18741 Register temp2 = ToRegister(ins->temp2());
18742 Register temp3 = ToRegister(ins->temp3());
18743 ValueOperand output = ToOutValue(ins);
18745 masm.mapObjectGetBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
18746 temp3, output.scratchReg());
18749 void CodeGenerator::visitMapObjectGetValue(LMapObjectGetValue* ins) {
18750 Register mapObj = ToRegister(ins->mapObject());
18751 ValueOperand input = ToValue(ins, LMapObjectGetValue::InputIndex);
18752 Register hash = ToRegister(ins->hash());
18753 Register temp0 = ToRegister(ins->temp0());
18754 Register temp1 = ToRegister(ins->temp1());
18755 Register temp2 = ToRegister(ins->temp2());
18756 Register temp3 = ToRegister(ins->temp3());
18757 ValueOperand output = ToOutValue(ins);
18759 masm.mapObjectGetValue(mapObj, input, hash, output, temp0, temp1, temp2,
18760 temp3, output.scratchReg());
18763 void CodeGenerator::visitMapObjectGetValueVMCall(
18764 LMapObjectGetValueVMCall* ins) {
18765 pushArg(ToValue(ins, LMapObjectGetValueVMCall::InputIndex));
18766 pushArg(ToRegister(ins->mapObject()));
18768 using Fn =
18769 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
18770 callVM<Fn, jit::MapObjectGet>(ins);
18773 void CodeGenerator::visitMapObjectSize(LMapObjectSize* ins) {
18774 Register mapObj = ToRegister(ins->mapObject());
18775 Register output = ToRegister(ins->output());
18777 masm.loadMapObjectSize(mapObj, output);
18780 template <size_t NumDefs>
18781 void CodeGenerator::emitIonToWasmCallBase(LIonToWasmCallBase<NumDefs>* lir) {
18782 wasm::JitCallStackArgVector stackArgs;
18783 masm.propagateOOM(stackArgs.reserve(lir->numOperands()));
18784 if (masm.oom()) {
18785 return;
18788 MIonToWasmCall* mir = lir->mir();
18789 const wasm::FuncExport& funcExport = mir->funcExport();
18790 const wasm::FuncType& sig =
18791 mir->instance()->metadata().getFuncExportType(funcExport);
18793 WasmABIArgGenerator abi;
18794 for (size_t i = 0; i < lir->numOperands(); i++) {
18795 MIRType argMir;
18796 switch (sig.args()[i].kind()) {
18797 case wasm::ValType::I32:
18798 case wasm::ValType::I64:
18799 case wasm::ValType::F32:
18800 case wasm::ValType::F64:
18801 argMir = sig.args()[i].toMIRType();
18802 break;
18803 case wasm::ValType::V128:
18804 MOZ_CRASH("unexpected argument type when calling from ion to wasm");
18805 case wasm::ValType::Ref:
18806 // temporarilyUnsupportedReftypeForEntry() restricts args to externref
18807 MOZ_RELEASE_ASSERT(sig.args()[i].refType().isExtern());
18808 // Argument is boxed on the JS side to an anyref, so passed as a
18809 // pointer here.
18810 argMir = sig.args()[i].toMIRType();
18811 break;
18814 ABIArg arg = abi.next(argMir);
18815 switch (arg.kind()) {
18816 case ABIArg::GPR:
18817 case ABIArg::FPU: {
18818 MOZ_ASSERT(ToAnyRegister(lir->getOperand(i)) == arg.reg());
18819 stackArgs.infallibleEmplaceBack(wasm::JitCallStackArg());
18820 break;
18822 case ABIArg::Stack: {
18823 const LAllocation* larg = lir->getOperand(i);
18824 if (larg->isConstant()) {
18825 stackArgs.infallibleEmplaceBack(ToInt32(larg));
18826 } else if (larg->isGeneralReg()) {
18827 stackArgs.infallibleEmplaceBack(ToRegister(larg));
18828 } else if (larg->isFloatReg()) {
18829 stackArgs.infallibleEmplaceBack(ToFloatRegister(larg));
18830 } else {
18831 // Always use the stack pointer here because GenerateDirectCallFromJit
18832 // depends on this.
18833 Address addr = ToAddress<BaseRegForAddress::SP>(larg);
18834 stackArgs.infallibleEmplaceBack(addr);
18836 break;
18838 #ifdef JS_CODEGEN_REGISTER_PAIR
18839 case ABIArg::GPR_PAIR: {
18840 MOZ_CRASH(
18841 "no way to pass i64, and wasm uses hardfp for function calls");
18843 #endif
18844 case ABIArg::Uninitialized: {
18845 MOZ_CRASH("Uninitialized ABIArg kind");
18850 const wasm::ValTypeVector& results = sig.results();
18851 if (results.length() == 0) {
18852 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
18853 } else {
18854 MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
18855 switch (results[0].kind()) {
18856 case wasm::ValType::I32:
18857 MOZ_ASSERT(lir->mir()->type() == MIRType::Int32);
18858 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
18859 break;
18860 case wasm::ValType::I64:
18861 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
18862 MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
18863 break;
18864 case wasm::ValType::F32:
18865 MOZ_ASSERT(lir->mir()->type() == MIRType::Float32);
18866 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnFloat32Reg);
18867 break;
18868 case wasm::ValType::F64:
18869 MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
18870 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
18871 break;
18872 case wasm::ValType::V128:
18873 MOZ_CRASH("unexpected return type when calling from ion to wasm");
18874 case wasm::ValType::Ref:
18875 // The wasm stubs layer unboxes anything that needs to be unboxed
18876 // and leaves it in a Value. A FuncRef/EqRef we could in principle
18877 // leave it as a raw object pointer but for now it complicates the
18878 // API to do so.
18879 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
18880 break;
18884 WasmInstanceObject* instObj = lir->mir()->instanceObject();
18886 Register scratch = ToRegister(lir->temp());
18888 uint32_t callOffset;
18889 ensureOsiSpace();
18890 GenerateDirectCallFromJit(masm, funcExport, instObj->instance(), stackArgs,
18891 scratch, &callOffset);
18893 // Add the instance object to the constant pool, so it is transferred to
18894 // the owning IonScript and so that it gets traced as long as the IonScript
18895 // lives.
18897 uint32_t unused;
18898 masm.propagateOOM(graph.addConstantToPool(ObjectValue(*instObj), &unused));
18900 markSafepointAt(callOffset, lir);
18903 void CodeGenerator::visitIonToWasmCall(LIonToWasmCall* lir) {
18904 emitIonToWasmCallBase(lir);
18906 void CodeGenerator::visitIonToWasmCallV(LIonToWasmCallV* lir) {
18907 emitIonToWasmCallBase(lir);
18909 void CodeGenerator::visitIonToWasmCallI64(LIonToWasmCallI64* lir) {
18910 emitIonToWasmCallBase(lir);
18913 void CodeGenerator::visitWasmNullConstant(LWasmNullConstant* lir) {
18914 masm.xorPtr(ToRegister(lir->output()), ToRegister(lir->output()));
18917 void CodeGenerator::visitWasmFence(LWasmFence* lir) {
18918 MOZ_ASSERT(gen->compilingWasm());
18919 masm.memoryBarrier(MembarFull);
18922 void CodeGenerator::visitWasmAnyRefFromJSValue(LWasmAnyRefFromJSValue* lir) {
18923 ValueOperand input = ToValue(lir, LWasmAnyRefFromJSValue::InputIndex);
18924 Register output = ToRegister(lir->output());
18925 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
18927 using Fn = JSObject* (*)(JSContext* cx, HandleValue value);
18928 OutOfLineCode* oolBoxValue = oolCallVM<Fn, wasm::AnyRef::boxValue>(
18929 lir, ArgList(input), StoreRegisterTo(output));
18930 masm.convertValueToWasmAnyRef(input, output, tempFloat, oolBoxValue->entry());
18931 masm.bind(oolBoxValue->rejoin());
18934 void CodeGenerator::visitWasmAnyRefFromJSObject(LWasmAnyRefFromJSObject* lir) {
18935 Register input = ToRegister(lir->input());
18936 Register output = ToRegister(lir->output());
18937 masm.convertObjectToWasmAnyRef(input, output);
18940 void CodeGenerator::visitWasmAnyRefFromJSString(LWasmAnyRefFromJSString* lir) {
18941 Register input = ToRegister(lir->input());
18942 Register output = ToRegister(lir->output());
18943 masm.convertStringToWasmAnyRef(input, output);
18946 void CodeGenerator::visitWasmNewI31Ref(LWasmNewI31Ref* lir) {
18947 Register value = ToRegister(lir->value());
18948 Register output = ToRegister(lir->output());
18949 masm.truncate32ToWasmI31Ref(value, output);
18952 void CodeGenerator::visitWasmI31RefGet(LWasmI31RefGet* lir) {
18953 Register value = ToRegister(lir->value());
18954 Register output = ToRegister(lir->output());
18955 if (lir->mir()->wideningOp() == wasm::FieldWideningOp::Signed) {
18956 masm.convertWasmI31RefTo32Signed(value, output);
18957 } else {
18958 masm.convertWasmI31RefTo32Unsigned(value, output);
18962 #ifdef FUZZING_JS_FUZZILLI
18963 void CodeGenerator::emitFuzzilliHashDouble(FloatRegister floatDouble,
18964 Register scratch, Register output) {
18965 # ifdef JS_PUNBOX64
18966 Register64 reg64_1(scratch);
18967 Register64 reg64_2(output);
18968 masm.moveDoubleToGPR64(floatDouble, reg64_1);
18969 masm.move64(reg64_1, reg64_2);
18970 masm.rshift64(Imm32(32), reg64_2);
18971 masm.add32(scratch, output);
18972 # else
18973 Register64 reg64(scratch, output);
18974 masm.moveDoubleToGPR64(floatDouble, reg64);
18975 masm.add32(scratch, output);
18976 # endif
18979 void CodeGenerator::emitFuzzilliHashObject(LInstruction* lir, Register obj,
18980 Register output) {
18981 using Fn = void (*)(JSContext* cx, JSObject* obj, uint32_t* out);
18982 OutOfLineCode* ool = oolCallVM<Fn, FuzzilliHashObjectInl>(
18983 lir, ArgList(obj), StoreRegisterTo(output));
18985 masm.jump(ool->entry());
18986 masm.bind(ool->rejoin());
18989 void CodeGenerator::emitFuzzilliHashBigInt(Register bigInt, Register output) {
18990 LiveRegisterSet volatileRegs(GeneralRegisterSet::All(),
18991 FloatRegisterSet::All());
18992 volatileRegs.takeUnchecked(output);
18993 masm.PushRegsInMask(volatileRegs);
18995 using Fn = uint32_t (*)(BigInt* bigInt);
18996 masm.setupUnalignedABICall(output);
18997 masm.passABIArg(bigInt);
18998 masm.callWithABI<Fn, js::FuzzilliHashBigInt>();
18999 masm.storeCallInt32Result(output);
19001 masm.PopRegsInMask(volatileRegs);
19004 void CodeGenerator::visitFuzzilliHashV(LFuzzilliHashV* ins) {
19005 MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Value);
19007 ValueOperand value = ToValue(ins, 0);
19009 Label isDouble, isObject, isBigInt, done;
19011 FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
19012 Register scratch = ToRegister(ins->getTemp(0));
19013 Register output = ToRegister(ins->output());
19014 MOZ_ASSERT(scratch != output);
19016 # ifdef JS_PUNBOX64
19017 Register tagReg = ToRegister(ins->getTemp(0));
19018 masm.splitTag(value, tagReg);
19019 # else
19020 Register tagReg = value.typeReg();
19021 # endif
19023 Label noBigInt;
19024 masm.branchTestBigInt(Assembler::NotEqual, tagReg, &noBigInt);
19025 masm.unboxBigInt(value, scratch);
19026 masm.jump(&isBigInt);
19027 masm.bind(&noBigInt);
19029 Label noObject;
19030 masm.branchTestObject(Assembler::NotEqual, tagReg, &noObject);
19031 masm.unboxObject(value, scratch);
19032 masm.jump(&isObject);
19033 masm.bind(&noObject);
19035 Label noInt32;
19036 masm.branchTestInt32(Assembler::NotEqual, tagReg, &noInt32);
19037 masm.unboxInt32(value, scratch);
19038 masm.convertInt32ToDouble(scratch, scratchFloat);
19039 masm.jump(&isDouble);
19040 masm.bind(&noInt32);
19042 Label noNull;
19043 masm.branchTestNull(Assembler::NotEqual, tagReg, &noNull);
19044 masm.move32(Imm32(1), scratch);
19045 masm.convertInt32ToDouble(scratch, scratchFloat);
19046 masm.jump(&isDouble);
19047 masm.bind(&noNull);
19049 Label noUndefined;
19050 masm.branchTestUndefined(Assembler::NotEqual, tagReg, &noUndefined);
19051 masm.move32(Imm32(2), scratch);
19052 masm.convertInt32ToDouble(scratch, scratchFloat);
19053 masm.jump(&isDouble);
19054 masm.bind(&noUndefined);
19056 Label noBoolean;
19057 masm.branchTestBoolean(Assembler::NotEqual, tagReg, &noBoolean);
19058 masm.unboxBoolean(value, scratch);
19059 masm.add32(Imm32(3), scratch);
19060 masm.convertInt32ToDouble(scratch, scratchFloat);
19061 masm.jump(&isDouble);
19062 masm.bind(&noBoolean);
19064 Label noDouble;
19065 masm.branchTestDouble(Assembler::NotEqual, tagReg, &noDouble);
19066 masm.unboxDouble(value, scratchFloat);
19067 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
19069 masm.jump(&isDouble);
19070 masm.bind(&noDouble);
19071 masm.move32(Imm32(0), output);
19072 masm.jump(&done);
19074 masm.bind(&isBigInt);
19075 emitFuzzilliHashBigInt(scratch, output);
19076 masm.jump(&done);
19078 masm.bind(&isObject);
19079 emitFuzzilliHashObject(ins, scratch, output);
19080 masm.jump(&done);
19082 masm.bind(&isDouble);
19083 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19085 masm.bind(&done);
19088 void CodeGenerator::visitFuzzilliHashT(LFuzzilliHashT* ins) {
19089 const LAllocation* value = ins->value();
19090 MIRType mirType = ins->mir()->getOperand(0)->type();
19092 FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
19093 Register scratch = ToRegister(ins->getTemp(0));
19094 Register output = ToRegister(ins->output());
19095 MOZ_ASSERT(scratch != output);
19097 if (mirType == MIRType::Object) {
19098 MOZ_ASSERT(value->isGeneralReg());
19099 masm.mov(value->toGeneralReg()->reg(), scratch);
19100 emitFuzzilliHashObject(ins, scratch, output);
19101 } else if (mirType == MIRType::BigInt) {
19102 MOZ_ASSERT(value->isGeneralReg());
19103 masm.mov(value->toGeneralReg()->reg(), scratch);
19104 emitFuzzilliHashBigInt(scratch, output);
19105 } else if (mirType == MIRType::Double) {
19106 MOZ_ASSERT(value->isFloatReg());
19107 masm.moveDouble(value->toFloatReg()->reg(), scratchFloat);
19108 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
19109 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19110 } else if (mirType == MIRType::Float32) {
19111 MOZ_ASSERT(value->isFloatReg());
19112 masm.convertFloat32ToDouble(value->toFloatReg()->reg(), scratchFloat);
19113 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
19114 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19115 } else if (mirType == MIRType::Int32) {
19116 MOZ_ASSERT(value->isGeneralReg());
19117 masm.mov(value->toGeneralReg()->reg(), scratch);
19118 masm.convertInt32ToDouble(scratch, scratchFloat);
19119 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19120 } else if (mirType == MIRType::Null) {
19121 MOZ_ASSERT(value->isBogus());
19122 masm.move32(Imm32(1), scratch);
19123 masm.convertInt32ToDouble(scratch, scratchFloat);
19124 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19125 } else if (mirType == MIRType::Undefined) {
19126 MOZ_ASSERT(value->isBogus());
19127 masm.move32(Imm32(2), scratch);
19128 masm.convertInt32ToDouble(scratch, scratchFloat);
19129 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19130 } else if (mirType == MIRType::Boolean) {
19131 MOZ_ASSERT(value->isGeneralReg());
19132 masm.mov(value->toGeneralReg()->reg(), scratch);
19133 masm.add32(Imm32(3), scratch);
19134 masm.convertInt32ToDouble(scratch, scratchFloat);
19135 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19136 } else {
19137 MOZ_CRASH("unexpected type");
19141 void CodeGenerator::visitFuzzilliHashStore(LFuzzilliHashStore* ins) {
19142 const LAllocation* value = ins->value();
19143 MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Int32);
19144 MOZ_ASSERT(value->isGeneralReg());
19146 Register scratchJSContext = ToRegister(ins->getTemp(0));
19147 Register scratch = ToRegister(ins->getTemp(1));
19149 masm.loadJSContext(scratchJSContext);
19151 // stats
19152 Address addrExecHashInputs(scratchJSContext,
19153 offsetof(JSContext, executionHashInputs));
19154 masm.load32(addrExecHashInputs, scratch);
19155 masm.add32(Imm32(1), scratch);
19156 masm.store32(scratch, addrExecHashInputs);
19158 Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
19159 masm.load32(addrExecHash, scratch);
19160 masm.add32(value->toGeneralReg()->reg(), scratch);
19161 masm.rotateLeft(Imm32(1), scratch, scratch);
19162 masm.store32(scratch, addrExecHash);
19164 #endif
19166 static_assert(!std::is_polymorphic_v<CodeGenerator>,
19167 "CodeGenerator should not have any virtual methods");
19169 } // namespace jit
19170 } // namespace js