Bug 1873042 - Part 8: Optimise single- and two-character strings for LSubstr. r=jandem
[gecko.git] / js / src / jit / CodeGenerator.cpp
blob0b73162ae9fbb7494de3a95969d94014525d888c
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/CodeGenerator.h"
9 #include "mozilla/Assertions.h"
10 #include "mozilla/Casting.h"
11 #include "mozilla/DebugOnly.h"
12 #include "mozilla/EndianUtils.h"
13 #include "mozilla/EnumeratedArray.h"
14 #include "mozilla/EnumeratedRange.h"
15 #include "mozilla/EnumSet.h"
16 #include "mozilla/IntegerTypeTraits.h"
17 #include "mozilla/Latin1.h"
18 #include "mozilla/MathAlgorithms.h"
19 #include "mozilla/ScopeExit.h"
20 #include "mozilla/SIMD.h"
22 #include <limits>
23 #include <type_traits>
24 #include <utility>
26 #include "jslibmath.h"
27 #include "jsmath.h"
28 #include "jsnum.h"
30 #include "builtin/MapObject.h"
31 #include "builtin/RegExp.h"
32 #include "builtin/String.h"
33 #include "irregexp/RegExpTypes.h"
34 #include "jit/ABIArgGenerator.h"
35 #include "jit/CompileInfo.h"
36 #include "jit/InlineScriptTree.h"
37 #include "jit/Invalidation.h"
38 #include "jit/IonGenericCallStub.h"
39 #include "jit/IonIC.h"
40 #include "jit/IonScript.h"
41 #include "jit/JitcodeMap.h"
42 #include "jit/JitFrames.h"
43 #include "jit/JitRuntime.h"
44 #include "jit/JitSpewer.h"
45 #include "jit/JitZone.h"
46 #include "jit/Linker.h"
47 #include "jit/MIRGenerator.h"
48 #include "jit/MoveEmitter.h"
49 #include "jit/RangeAnalysis.h"
50 #include "jit/RegExpStubConstants.h"
51 #include "jit/SafepointIndex.h"
52 #include "jit/SharedICHelpers.h"
53 #include "jit/SharedICRegisters.h"
54 #include "jit/VMFunctions.h"
55 #include "jit/WarpSnapshot.h"
56 #include "js/ColumnNumber.h" // JS::LimitedColumnNumberOneOrigin
57 #include "js/experimental/JitInfo.h" // JSJit{Getter,Setter}CallArgs, JSJitMethodCallArgsTraits, JSJitInfo
58 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
59 #include "js/RegExpFlags.h" // JS::RegExpFlag
60 #include "js/ScalarType.h" // js::Scalar::Type
61 #include "proxy/DOMProxy.h"
62 #include "proxy/ScriptedProxyHandler.h"
63 #include "util/CheckedArithmetic.h"
64 #include "util/Unicode.h"
65 #include "vm/ArrayBufferViewObject.h"
66 #include "vm/AsyncFunction.h"
67 #include "vm/AsyncIteration.h"
68 #include "vm/BuiltinObjectKind.h"
69 #include "vm/FunctionFlags.h" // js::FunctionFlags
70 #include "vm/Interpreter.h"
71 #include "vm/JSAtomUtils.h" // AtomizeString
72 #include "vm/MatchPairs.h"
73 #include "vm/RegExpObject.h"
74 #include "vm/RegExpStatics.h"
75 #include "vm/StaticStrings.h"
76 #include "vm/StringObject.h"
77 #include "vm/StringType.h"
78 #include "vm/TypedArrayObject.h"
79 #include "wasm/WasmCodegenConstants.h"
80 #include "wasm/WasmValType.h"
81 #ifdef MOZ_VTUNE
82 # include "vtune/VTuneWrapper.h"
83 #endif
84 #include "wasm/WasmBinary.h"
85 #include "wasm/WasmGC.h"
86 #include "wasm/WasmGcObject.h"
87 #include "wasm/WasmStubs.h"
89 #include "builtin/Boolean-inl.h"
90 #include "jit/MacroAssembler-inl.h"
91 #include "jit/shared/CodeGenerator-shared-inl.h"
92 #include "jit/TemplateObject-inl.h"
93 #include "jit/VMFunctionList-inl.h"
94 #include "vm/JSScript-inl.h"
95 #include "wasm/WasmInstance-inl.h"
97 using namespace js;
98 using namespace js::jit;
100 using JS::GenericNaN;
101 using mozilla::AssertedCast;
102 using mozilla::DebugOnly;
103 using mozilla::FloatingPoint;
104 using mozilla::Maybe;
105 using mozilla::NegativeInfinity;
106 using mozilla::PositiveInfinity;
108 using JS::ExpandoAndGeneration;
110 namespace js {
111 namespace jit {
113 #ifdef CHECK_OSIPOINT_REGISTERS
114 template <class Op>
115 static void HandleRegisterDump(Op op, MacroAssembler& masm,
116 LiveRegisterSet liveRegs, Register activation,
117 Register scratch) {
118 const size_t baseOffset = JitActivation::offsetOfRegs();
120 // Handle live GPRs.
121 for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
122 Register reg = *iter;
123 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
125 if (reg == activation) {
126 // To use the original value of the activation register (that's
127 // now on top of the stack), we need the scratch register.
128 masm.push(scratch);
129 masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
130 op(scratch, dump);
131 masm.pop(scratch);
132 } else {
133 op(reg, dump);
137 // Handle live FPRs.
138 for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
139 FloatRegister reg = *iter;
140 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
141 op(reg, dump);
145 class StoreOp {
146 MacroAssembler& masm;
148 public:
149 explicit StoreOp(MacroAssembler& masm) : masm(masm) {}
151 void operator()(Register reg, Address dump) { masm.storePtr(reg, dump); }
152 void operator()(FloatRegister reg, Address dump) {
153 if (reg.isDouble()) {
154 masm.storeDouble(reg, dump);
155 } else if (reg.isSingle()) {
156 masm.storeFloat32(reg, dump);
157 } else if (reg.isSimd128()) {
158 MOZ_CRASH("Unexpected case for SIMD");
159 } else {
160 MOZ_CRASH("Unexpected register type.");
165 class VerifyOp {
166 MacroAssembler& masm;
167 Label* failure_;
169 public:
170 VerifyOp(MacroAssembler& masm, Label* failure)
171 : masm(masm), failure_(failure) {}
173 void operator()(Register reg, Address dump) {
174 masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
176 void operator()(FloatRegister reg, Address dump) {
177 if (reg.isDouble()) {
178 ScratchDoubleScope scratch(masm);
179 masm.loadDouble(dump, scratch);
180 masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
181 } else if (reg.isSingle()) {
182 ScratchFloat32Scope scratch(masm);
183 masm.loadFloat32(dump, scratch);
184 masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
185 } else if (reg.isSimd128()) {
186 MOZ_CRASH("Unexpected case for SIMD");
187 } else {
188 MOZ_CRASH("Unexpected register type.");
193 void CodeGenerator::verifyOsiPointRegs(LSafepoint* safepoint) {
194 // Ensure the live registers stored by callVM did not change between
195 // the call and this OsiPoint. Try-catch relies on this invariant.
197 // Load pointer to the JitActivation in a scratch register.
198 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
199 Register scratch = allRegs.takeAny();
200 masm.push(scratch);
201 masm.loadJitActivation(scratch);
203 // If we should not check registers (because the instruction did not call
204 // into the VM, or a GC happened), we're done.
205 Label failure, done;
206 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
207 masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
209 // Having more than one VM function call made in one visit function at
210 // runtime is a sec-ciritcal error, because if we conservatively assume that
211 // one of the function call can re-enter Ion, then the invalidation process
212 // will potentially add a call at a random location, by patching the code
213 // before the return address.
214 masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
216 // Set checkRegs to 0, so that we don't try to verify registers after we
217 // return from this script to the caller.
218 masm.store32(Imm32(0), checkRegs);
220 // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
221 // temps after calling into the VM. This is fine because no other
222 // instructions (including this OsiPoint) will depend on them. Also
223 // backtracking can also use the same register for an input and an output.
224 // These are marked as clobbered and shouldn't get checked.
225 LiveRegisterSet liveRegs;
226 liveRegs.set() = RegisterSet::Intersect(
227 safepoint->liveRegs().set(),
228 RegisterSet::Not(safepoint->clobberedRegs().set()));
230 VerifyOp op(masm, &failure);
231 HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
233 masm.jump(&done);
235 // Do not profile the callWithABI that occurs below. This is to avoid a
236 // rare corner case that occurs when profiling interacts with itself:
238 // When slow profiling assertions are turned on, FunctionBoundary ops
239 // (which update the profiler pseudo-stack) may emit a callVM, which
240 // forces them to have an osi point associated with them. The
241 // FunctionBoundary for inline function entry is added to the caller's
242 // graph with a PC from the caller's code, but during codegen it modifies
243 // Gecko Profiler instrumentation to add the callee as the current top-most
244 // script. When codegen gets to the OSIPoint, and the callWithABI below is
245 // emitted, the codegen thinks that the current frame is the callee, but
246 // the PC it's using from the OSIPoint refers to the caller. This causes
247 // the profiler instrumentation of the callWithABI below to ASSERT, since
248 // the script and pc are mismatched. To avoid this, we simply omit
249 // instrumentation for these callWithABIs.
251 // Any live register captured by a safepoint (other than temp registers)
252 // must remain unchanged between the call and the OsiPoint instruction.
253 masm.bind(&failure);
254 masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
256 masm.bind(&done);
257 masm.pop(scratch);
260 bool CodeGenerator::shouldVerifyOsiPointRegs(LSafepoint* safepoint) {
261 if (!checkOsiPointRegisters) {
262 return false;
265 if (safepoint->liveRegs().emptyGeneral() &&
266 safepoint->liveRegs().emptyFloat()) {
267 return false; // No registers to check.
270 return true;
273 void CodeGenerator::resetOsiPointRegs(LSafepoint* safepoint) {
274 if (!shouldVerifyOsiPointRegs(safepoint)) {
275 return;
278 // Set checkRegs to 0. If we perform a VM call, the instruction
279 // will set it to 1.
280 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
281 Register scratch = allRegs.takeAny();
282 masm.push(scratch);
283 masm.loadJitActivation(scratch);
284 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
285 masm.store32(Imm32(0), checkRegs);
286 masm.pop(scratch);
289 static void StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs) {
290 // Store a copy of all live registers before performing the call.
291 // When we reach the OsiPoint, we can use this to check nothing
292 // modified them in the meantime.
294 // Load pointer to the JitActivation in a scratch register.
295 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
296 Register scratch = allRegs.takeAny();
297 masm.push(scratch);
298 masm.loadJitActivation(scratch);
300 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
301 masm.add32(Imm32(1), checkRegs);
303 StoreOp op(masm);
304 HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
306 masm.pop(scratch);
308 #endif // CHECK_OSIPOINT_REGISTERS
310 // Before doing any call to Cpp, you should ensure that volatile
311 // registers are evicted by the register allocator.
312 void CodeGenerator::callVMInternal(VMFunctionId id, LInstruction* ins) {
313 TrampolinePtr code = gen->jitRuntime()->getVMWrapper(id);
314 const VMFunctionData& fun = GetVMFunction(id);
316 // Stack is:
317 // ... frame ...
318 // [args]
319 #ifdef DEBUG
320 MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
321 pushedArgs_ = 0;
322 #endif
324 #ifdef CHECK_OSIPOINT_REGISTERS
325 if (shouldVerifyOsiPointRegs(ins->safepoint())) {
326 StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
328 #endif
330 #ifdef DEBUG
331 if (ins->mirRaw()) {
332 MOZ_ASSERT(ins->mirRaw()->isInstruction());
333 MInstruction* mir = ins->mirRaw()->toInstruction();
334 MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
336 // If this MIR instruction has an overridden AliasSet, set the JitRuntime's
337 // disallowArbitraryCode_ flag so we can assert this VMFunction doesn't call
338 // RunScript. Whitelist MInterruptCheck and MCheckOverRecursed because
339 // interrupt callbacks can call JS (chrome JS or shell testing functions).
340 bool isWhitelisted = mir->isInterruptCheck() || mir->isCheckOverRecursed();
341 if (!mir->hasDefaultAliasSet() && !isWhitelisted) {
342 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
343 masm.move32(Imm32(1), ReturnReg);
344 masm.store32(ReturnReg, AbsoluteAddress(addr));
347 #endif
349 // Push an exit frame descriptor.
350 masm.PushFrameDescriptor(FrameType::IonJS);
352 // Call the wrapper function. The wrapper is in charge to unwind the stack
353 // when returning from the call. Failures are handled with exceptions based
354 // on the return value of the C functions. To guard the outcome of the
355 // returned value, use another LIR instruction.
356 ensureOsiSpace();
357 uint32_t callOffset = masm.callJit(code);
358 markSafepointAt(callOffset, ins);
360 #ifdef DEBUG
361 // Reset the disallowArbitraryCode flag after the call.
363 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
364 masm.push(ReturnReg);
365 masm.move32(Imm32(0), ReturnReg);
366 masm.store32(ReturnReg, AbsoluteAddress(addr));
367 masm.pop(ReturnReg);
369 #endif
371 // Pop rest of the exit frame and the arguments left on the stack.
372 int framePop =
373 sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
374 masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
376 // Stack is:
377 // ... frame ...
380 template <typename Fn, Fn fn>
381 void CodeGenerator::callVM(LInstruction* ins) {
382 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
383 callVMInternal(id, ins);
386 // ArgSeq store arguments for OutOfLineCallVM.
388 // OutOfLineCallVM are created with "oolCallVM" function. The third argument of
389 // this function is an instance of a class which provides a "generate" in charge
390 // of pushing the argument, with "pushArg", for a VMFunction.
392 // Such list of arguments can be created by using the "ArgList" function which
393 // creates one instance of "ArgSeq", where the type of the arguments are
394 // inferred from the type of the arguments.
396 // The list of arguments must be written in the same order as if you were
397 // calling the function in C++.
399 // Example:
400 // ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))
402 template <typename... ArgTypes>
403 class ArgSeq {
404 std::tuple<std::remove_reference_t<ArgTypes>...> args_;
406 template <std::size_t... ISeq>
407 inline void generate(CodeGenerator* codegen,
408 std::index_sequence<ISeq...>) const {
409 // Arguments are pushed in reverse order, from last argument to first
410 // argument.
411 (codegen->pushArg(std::get<sizeof...(ISeq) - 1 - ISeq>(args_)), ...);
414 public:
415 explicit ArgSeq(ArgTypes&&... args)
416 : args_(std::forward<ArgTypes>(args)...) {}
418 inline void generate(CodeGenerator* codegen) const {
419 generate(codegen, std::index_sequence_for<ArgTypes...>{});
422 #ifdef DEBUG
423 static constexpr size_t numArgs = sizeof...(ArgTypes);
424 #endif
427 template <typename... ArgTypes>
428 inline ArgSeq<ArgTypes...> ArgList(ArgTypes&&... args) {
429 return ArgSeq<ArgTypes...>(std::forward<ArgTypes>(args)...);
432 // Store wrappers, to generate the right move of data after the VM call.
434 struct StoreNothing {
435 inline void generate(CodeGenerator* codegen) const {}
436 inline LiveRegisterSet clobbered() const {
437 return LiveRegisterSet(); // No register gets clobbered
441 class StoreRegisterTo {
442 private:
443 Register out_;
445 public:
446 explicit StoreRegisterTo(Register out) : out_(out) {}
448 inline void generate(CodeGenerator* codegen) const {
449 // It's okay to use storePointerResultTo here - the VMFunction wrapper
450 // ensures the upper bytes are zero for bool/int32 return values.
451 codegen->storePointerResultTo(out_);
453 inline LiveRegisterSet clobbered() const {
454 LiveRegisterSet set;
455 set.add(out_);
456 return set;
460 class StoreFloatRegisterTo {
461 private:
462 FloatRegister out_;
464 public:
465 explicit StoreFloatRegisterTo(FloatRegister out) : out_(out) {}
467 inline void generate(CodeGenerator* codegen) const {
468 codegen->storeFloatResultTo(out_);
470 inline LiveRegisterSet clobbered() const {
471 LiveRegisterSet set;
472 set.add(out_);
473 return set;
477 template <typename Output>
478 class StoreValueTo_ {
479 private:
480 Output out_;
482 public:
483 explicit StoreValueTo_(const Output& out) : out_(out) {}
485 inline void generate(CodeGenerator* codegen) const {
486 codegen->storeResultValueTo(out_);
488 inline LiveRegisterSet clobbered() const {
489 LiveRegisterSet set;
490 set.add(out_);
491 return set;
495 template <typename Output>
496 StoreValueTo_<Output> StoreValueTo(const Output& out) {
497 return StoreValueTo_<Output>(out);
500 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
501 class OutOfLineCallVM : public OutOfLineCodeBase<CodeGenerator> {
502 private:
503 LInstruction* lir_;
504 ArgSeq args_;
505 StoreOutputTo out_;
507 public:
508 OutOfLineCallVM(LInstruction* lir, const ArgSeq& args,
509 const StoreOutputTo& out)
510 : lir_(lir), args_(args), out_(out) {}
512 void accept(CodeGenerator* codegen) override {
513 codegen->visitOutOfLineCallVM(this);
516 LInstruction* lir() const { return lir_; }
517 const ArgSeq& args() const { return args_; }
518 const StoreOutputTo& out() const { return out_; }
521 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
522 OutOfLineCode* CodeGenerator::oolCallVM(LInstruction* lir, const ArgSeq& args,
523 const StoreOutputTo& out) {
524 MOZ_ASSERT(lir->mirRaw());
525 MOZ_ASSERT(lir->mirRaw()->isInstruction());
527 #ifdef DEBUG
528 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
529 const VMFunctionData& fun = GetVMFunction(id);
530 MOZ_ASSERT(fun.explicitArgs == args.numArgs);
531 MOZ_ASSERT(fun.returnsData() !=
532 (std::is_same_v<StoreOutputTo, StoreNothing>));
533 #endif
535 OutOfLineCode* ool = new (alloc())
536 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>(lir, args, out);
537 addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
538 return ool;
541 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
542 void CodeGenerator::visitOutOfLineCallVM(
543 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>* ool) {
544 LInstruction* lir = ool->lir();
546 #ifdef JS_JITSPEW
547 JitSpewStart(JitSpew_Codegen, " # LIR=%s",
548 lir->opName());
549 if (const char* extra = lir->getExtraName()) {
550 JitSpewCont(JitSpew_Codegen, ":%s", extra);
552 JitSpewFin(JitSpew_Codegen);
553 #endif
554 perfSpewer_.recordInstruction(masm, lir);
555 saveLive(lir);
556 ool->args().generate(this);
557 callVM<Fn, fn>(lir);
558 ool->out().generate(this);
559 restoreLiveIgnore(lir, ool->out().clobbered());
560 masm.jump(ool->rejoin());
563 class OutOfLineICFallback : public OutOfLineCodeBase<CodeGenerator> {
564 private:
565 LInstruction* lir_;
566 size_t cacheIndex_;
567 size_t cacheInfoIndex_;
569 public:
570 OutOfLineICFallback(LInstruction* lir, size_t cacheIndex,
571 size_t cacheInfoIndex)
572 : lir_(lir), cacheIndex_(cacheIndex), cacheInfoIndex_(cacheInfoIndex) {}
574 void bind(MacroAssembler* masm) override {
575 // The binding of the initial jump is done in
576 // CodeGenerator::visitOutOfLineICFallback.
579 size_t cacheIndex() const { return cacheIndex_; }
580 size_t cacheInfoIndex() const { return cacheInfoIndex_; }
581 LInstruction* lir() const { return lir_; }
583 void accept(CodeGenerator* codegen) override {
584 codegen->visitOutOfLineICFallback(this);
588 void CodeGeneratorShared::addIC(LInstruction* lir, size_t cacheIndex) {
589 if (cacheIndex == SIZE_MAX) {
590 masm.setOOM();
591 return;
594 DataPtr<IonIC> cache(this, cacheIndex);
595 MInstruction* mir = lir->mirRaw()->toInstruction();
596 cache->setScriptedLocation(mir->block()->info().script(),
597 mir->resumePoint()->pc());
599 Register temp = cache->scratchRegisterForEntryJump();
600 icInfo_.back().icOffsetForJump = masm.movWithPatch(ImmWord(-1), temp);
601 masm.jump(Address(temp, 0));
603 MOZ_ASSERT(!icInfo_.empty());
605 OutOfLineICFallback* ool =
606 new (alloc()) OutOfLineICFallback(lir, cacheIndex, icInfo_.length() - 1);
607 addOutOfLineCode(ool, mir);
609 masm.bind(ool->rejoin());
610 cache->setRejoinOffset(CodeOffset(ool->rejoin()->offset()));
613 void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) {
614 LInstruction* lir = ool->lir();
615 size_t cacheIndex = ool->cacheIndex();
616 size_t cacheInfoIndex = ool->cacheInfoIndex();
618 DataPtr<IonIC> ic(this, cacheIndex);
620 // Register the location of the OOL path in the IC.
621 ic->setFallbackOffset(CodeOffset(masm.currentOffset()));
623 switch (ic->kind()) {
624 case CacheKind::GetProp:
625 case CacheKind::GetElem: {
626 IonGetPropertyIC* getPropIC = ic->asGetPropertyIC();
628 saveLive(lir);
630 pushArg(getPropIC->id());
631 pushArg(getPropIC->value());
632 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
633 pushArg(ImmGCPtr(gen->outerInfo().script()));
635 using Fn = bool (*)(JSContext*, HandleScript, IonGetPropertyIC*,
636 HandleValue, HandleValue, MutableHandleValue);
637 callVM<Fn, IonGetPropertyIC::update>(lir);
639 StoreValueTo(getPropIC->output()).generate(this);
640 restoreLiveIgnore(lir, StoreValueTo(getPropIC->output()).clobbered());
642 masm.jump(ool->rejoin());
643 return;
645 case CacheKind::GetPropSuper:
646 case CacheKind::GetElemSuper: {
647 IonGetPropSuperIC* getPropSuperIC = ic->asGetPropSuperIC();
649 saveLive(lir);
651 pushArg(getPropSuperIC->id());
652 pushArg(getPropSuperIC->receiver());
653 pushArg(getPropSuperIC->object());
654 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
655 pushArg(ImmGCPtr(gen->outerInfo().script()));
657 using Fn =
658 bool (*)(JSContext*, HandleScript, IonGetPropSuperIC*, HandleObject,
659 HandleValue, HandleValue, MutableHandleValue);
660 callVM<Fn, IonGetPropSuperIC::update>(lir);
662 StoreValueTo(getPropSuperIC->output()).generate(this);
663 restoreLiveIgnore(lir,
664 StoreValueTo(getPropSuperIC->output()).clobbered());
666 masm.jump(ool->rejoin());
667 return;
669 case CacheKind::SetProp:
670 case CacheKind::SetElem: {
671 IonSetPropertyIC* setPropIC = ic->asSetPropertyIC();
673 saveLive(lir);
675 pushArg(setPropIC->rhs());
676 pushArg(setPropIC->id());
677 pushArg(setPropIC->object());
678 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
679 pushArg(ImmGCPtr(gen->outerInfo().script()));
681 using Fn = bool (*)(JSContext*, HandleScript, IonSetPropertyIC*,
682 HandleObject, HandleValue, HandleValue);
683 callVM<Fn, IonSetPropertyIC::update>(lir);
685 restoreLive(lir);
687 masm.jump(ool->rejoin());
688 return;
690 case CacheKind::GetName: {
691 IonGetNameIC* getNameIC = ic->asGetNameIC();
693 saveLive(lir);
695 pushArg(getNameIC->environment());
696 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
697 pushArg(ImmGCPtr(gen->outerInfo().script()));
699 using Fn = bool (*)(JSContext*, HandleScript, IonGetNameIC*, HandleObject,
700 MutableHandleValue);
701 callVM<Fn, IonGetNameIC::update>(lir);
703 StoreValueTo(getNameIC->output()).generate(this);
704 restoreLiveIgnore(lir, StoreValueTo(getNameIC->output()).clobbered());
706 masm.jump(ool->rejoin());
707 return;
709 case CacheKind::BindName: {
710 IonBindNameIC* bindNameIC = ic->asBindNameIC();
712 saveLive(lir);
714 pushArg(bindNameIC->environment());
715 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
716 pushArg(ImmGCPtr(gen->outerInfo().script()));
718 using Fn =
719 JSObject* (*)(JSContext*, HandleScript, IonBindNameIC*, HandleObject);
720 callVM<Fn, IonBindNameIC::update>(lir);
722 StoreRegisterTo(bindNameIC->output()).generate(this);
723 restoreLiveIgnore(lir, StoreRegisterTo(bindNameIC->output()).clobbered());
725 masm.jump(ool->rejoin());
726 return;
728 case CacheKind::GetIterator: {
729 IonGetIteratorIC* getIteratorIC = ic->asGetIteratorIC();
731 saveLive(lir);
733 pushArg(getIteratorIC->value());
734 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
735 pushArg(ImmGCPtr(gen->outerInfo().script()));
737 using Fn = JSObject* (*)(JSContext*, HandleScript, IonGetIteratorIC*,
738 HandleValue);
739 callVM<Fn, IonGetIteratorIC::update>(lir);
741 StoreRegisterTo(getIteratorIC->output()).generate(this);
742 restoreLiveIgnore(lir,
743 StoreRegisterTo(getIteratorIC->output()).clobbered());
745 masm.jump(ool->rejoin());
746 return;
748 case CacheKind::OptimizeSpreadCall: {
749 auto* optimizeSpreadCallIC = ic->asOptimizeSpreadCallIC();
751 saveLive(lir);
753 pushArg(optimizeSpreadCallIC->value());
754 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
755 pushArg(ImmGCPtr(gen->outerInfo().script()));
757 using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeSpreadCallIC*,
758 HandleValue, MutableHandleValue);
759 callVM<Fn, IonOptimizeSpreadCallIC::update>(lir);
761 StoreValueTo(optimizeSpreadCallIC->output()).generate(this);
762 restoreLiveIgnore(
763 lir, StoreValueTo(optimizeSpreadCallIC->output()).clobbered());
765 masm.jump(ool->rejoin());
766 return;
768 case CacheKind::In: {
769 IonInIC* inIC = ic->asInIC();
771 saveLive(lir);
773 pushArg(inIC->object());
774 pushArg(inIC->key());
775 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
776 pushArg(ImmGCPtr(gen->outerInfo().script()));
778 using Fn = bool (*)(JSContext*, HandleScript, IonInIC*, HandleValue,
779 HandleObject, bool*);
780 callVM<Fn, IonInIC::update>(lir);
782 StoreRegisterTo(inIC->output()).generate(this);
783 restoreLiveIgnore(lir, StoreRegisterTo(inIC->output()).clobbered());
785 masm.jump(ool->rejoin());
786 return;
788 case CacheKind::HasOwn: {
789 IonHasOwnIC* hasOwnIC = ic->asHasOwnIC();
791 saveLive(lir);
793 pushArg(hasOwnIC->id());
794 pushArg(hasOwnIC->value());
795 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
796 pushArg(ImmGCPtr(gen->outerInfo().script()));
798 using Fn = bool (*)(JSContext*, HandleScript, IonHasOwnIC*, HandleValue,
799 HandleValue, int32_t*);
800 callVM<Fn, IonHasOwnIC::update>(lir);
802 StoreRegisterTo(hasOwnIC->output()).generate(this);
803 restoreLiveIgnore(lir, StoreRegisterTo(hasOwnIC->output()).clobbered());
805 masm.jump(ool->rejoin());
806 return;
808 case CacheKind::CheckPrivateField: {
809 IonCheckPrivateFieldIC* checkPrivateFieldIC = ic->asCheckPrivateFieldIC();
811 saveLive(lir);
813 pushArg(checkPrivateFieldIC->id());
814 pushArg(checkPrivateFieldIC->value());
816 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
817 pushArg(ImmGCPtr(gen->outerInfo().script()));
819 using Fn = bool (*)(JSContext*, HandleScript, IonCheckPrivateFieldIC*,
820 HandleValue, HandleValue, bool*);
821 callVM<Fn, IonCheckPrivateFieldIC::update>(lir);
823 StoreRegisterTo(checkPrivateFieldIC->output()).generate(this);
824 restoreLiveIgnore(
825 lir, StoreRegisterTo(checkPrivateFieldIC->output()).clobbered());
827 masm.jump(ool->rejoin());
828 return;
830 case CacheKind::InstanceOf: {
831 IonInstanceOfIC* hasInstanceOfIC = ic->asInstanceOfIC();
833 saveLive(lir);
835 pushArg(hasInstanceOfIC->rhs());
836 pushArg(hasInstanceOfIC->lhs());
837 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
838 pushArg(ImmGCPtr(gen->outerInfo().script()));
840 using Fn = bool (*)(JSContext*, HandleScript, IonInstanceOfIC*,
841 HandleValue lhs, HandleObject rhs, bool* res);
842 callVM<Fn, IonInstanceOfIC::update>(lir);
844 StoreRegisterTo(hasInstanceOfIC->output()).generate(this);
845 restoreLiveIgnore(lir,
846 StoreRegisterTo(hasInstanceOfIC->output()).clobbered());
848 masm.jump(ool->rejoin());
849 return;
851 case CacheKind::UnaryArith: {
852 IonUnaryArithIC* unaryArithIC = ic->asUnaryArithIC();
854 saveLive(lir);
856 pushArg(unaryArithIC->input());
857 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
858 pushArg(ImmGCPtr(gen->outerInfo().script()));
860 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
861 IonUnaryArithIC* stub, HandleValue val,
862 MutableHandleValue res);
863 callVM<Fn, IonUnaryArithIC::update>(lir);
865 StoreValueTo(unaryArithIC->output()).generate(this);
866 restoreLiveIgnore(lir, StoreValueTo(unaryArithIC->output()).clobbered());
868 masm.jump(ool->rejoin());
869 return;
871 case CacheKind::ToPropertyKey: {
872 IonToPropertyKeyIC* toPropertyKeyIC = ic->asToPropertyKeyIC();
874 saveLive(lir);
876 pushArg(toPropertyKeyIC->input());
877 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
878 pushArg(ImmGCPtr(gen->outerInfo().script()));
880 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
881 IonToPropertyKeyIC* ic, HandleValue val,
882 MutableHandleValue res);
883 callVM<Fn, IonToPropertyKeyIC::update>(lir);
885 StoreValueTo(toPropertyKeyIC->output()).generate(this);
886 restoreLiveIgnore(lir,
887 StoreValueTo(toPropertyKeyIC->output()).clobbered());
889 masm.jump(ool->rejoin());
890 return;
892 case CacheKind::BinaryArith: {
893 IonBinaryArithIC* binaryArithIC = ic->asBinaryArithIC();
895 saveLive(lir);
897 pushArg(binaryArithIC->rhs());
898 pushArg(binaryArithIC->lhs());
899 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
900 pushArg(ImmGCPtr(gen->outerInfo().script()));
902 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
903 IonBinaryArithIC* stub, HandleValue lhs,
904 HandleValue rhs, MutableHandleValue res);
905 callVM<Fn, IonBinaryArithIC::update>(lir);
907 StoreValueTo(binaryArithIC->output()).generate(this);
908 restoreLiveIgnore(lir, StoreValueTo(binaryArithIC->output()).clobbered());
910 masm.jump(ool->rejoin());
911 return;
913 case CacheKind::Compare: {
914 IonCompareIC* compareIC = ic->asCompareIC();
916 saveLive(lir);
918 pushArg(compareIC->rhs());
919 pushArg(compareIC->lhs());
920 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
921 pushArg(ImmGCPtr(gen->outerInfo().script()));
923 using Fn =
924 bool (*)(JSContext* cx, HandleScript outerScript, IonCompareIC* stub,
925 HandleValue lhs, HandleValue rhs, bool* res);
926 callVM<Fn, IonCompareIC::update>(lir);
928 StoreRegisterTo(compareIC->output()).generate(this);
929 restoreLiveIgnore(lir, StoreRegisterTo(compareIC->output()).clobbered());
931 masm.jump(ool->rejoin());
932 return;
934 case CacheKind::CloseIter: {
935 IonCloseIterIC* closeIterIC = ic->asCloseIterIC();
937 saveLive(lir);
939 pushArg(closeIterIC->iter());
940 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
941 pushArg(ImmGCPtr(gen->outerInfo().script()));
943 using Fn =
944 bool (*)(JSContext*, HandleScript, IonCloseIterIC*, HandleObject);
945 callVM<Fn, IonCloseIterIC::update>(lir);
947 restoreLive(lir);
949 masm.jump(ool->rejoin());
950 return;
952 case CacheKind::OptimizeGetIterator: {
953 auto* optimizeGetIteratorIC = ic->asOptimizeGetIteratorIC();
955 saveLive(lir);
957 pushArg(optimizeGetIteratorIC->value());
958 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
959 pushArg(ImmGCPtr(gen->outerInfo().script()));
961 using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeGetIteratorIC*,
962 HandleValue, bool* res);
963 callVM<Fn, IonOptimizeGetIteratorIC::update>(lir);
965 StoreRegisterTo(optimizeGetIteratorIC->output()).generate(this);
966 restoreLiveIgnore(
967 lir, StoreRegisterTo(optimizeGetIteratorIC->output()).clobbered());
969 masm.jump(ool->rejoin());
970 return;
972 case CacheKind::Call:
973 case CacheKind::TypeOf:
974 case CacheKind::ToBool:
975 case CacheKind::GetIntrinsic:
976 case CacheKind::NewArray:
977 case CacheKind::NewObject:
978 MOZ_CRASH("Unsupported IC");
980 MOZ_CRASH();
983 StringObject* MNewStringObject::templateObj() const {
984 return &templateObj_->as<StringObject>();
987 CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph,
988 MacroAssembler* masm)
989 : CodeGeneratorSpecific(gen, graph, masm),
990 ionScriptLabels_(gen->alloc()),
991 ionNurseryObjectLabels_(gen->alloc()),
992 scriptCounts_(nullptr),
993 zoneStubsToReadBarrier_(0) {}
995 CodeGenerator::~CodeGenerator() { js_delete(scriptCounts_); }
997 void CodeGenerator::visitValueToInt32(LValueToInt32* lir) {
998 ValueOperand operand = ToValue(lir, LValueToInt32::Input);
999 Register output = ToRegister(lir->output());
1000 FloatRegister temp = ToFloatRegister(lir->tempFloat());
1002 Label fails;
1003 if (lir->mode() == LValueToInt32::TRUNCATE) {
1004 OutOfLineCode* oolDouble = oolTruncateDouble(temp, output, lir->mir());
1006 // We can only handle strings in truncation contexts, like bitwise
1007 // operations.
1008 Register stringReg = ToRegister(lir->temp());
1009 using Fn = bool (*)(JSContext*, JSString*, double*);
1010 auto* oolString = oolCallVM<Fn, StringToNumber>(lir, ArgList(stringReg),
1011 StoreFloatRegisterTo(temp));
1012 Label* stringEntry = oolString->entry();
1013 Label* stringRejoin = oolString->rejoin();
1015 masm.truncateValueToInt32(operand, stringEntry, stringRejoin,
1016 oolDouble->entry(), stringReg, temp, output,
1017 &fails);
1018 masm.bind(oolDouble->rejoin());
1019 } else {
1020 MOZ_ASSERT(lir->mode() == LValueToInt32::NORMAL);
1021 masm.convertValueToInt32(operand, temp, output, &fails,
1022 lir->mirNormal()->needsNegativeZeroCheck(),
1023 lir->mirNormal()->conversion());
1026 bailoutFrom(&fails, lir->snapshot());
1029 void CodeGenerator::visitValueToDouble(LValueToDouble* lir) {
1030 ValueOperand operand = ToValue(lir, LValueToDouble::InputIndex);
1031 FloatRegister output = ToFloatRegister(lir->output());
1033 // Set if we can handle other primitives beside strings, as long as they're
1034 // guaranteed to never throw. This rules out symbols and BigInts, but allows
1035 // booleans, undefined, and null.
1036 bool hasNonStringPrimitives =
1037 lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1039 Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1042 ScratchTagScope tag(masm, operand);
1043 masm.splitTagForTest(operand, tag);
1045 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1046 masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1048 if (hasNonStringPrimitives) {
1049 masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1050 masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1051 masm.branchTestNull(Assembler::Equal, tag, &isNull);
1055 bailout(lir->snapshot());
1057 if (hasNonStringPrimitives) {
1058 masm.bind(&isNull);
1059 masm.loadConstantDouble(0.0, output);
1060 masm.jump(&done);
1063 if (hasNonStringPrimitives) {
1064 masm.bind(&isUndefined);
1065 masm.loadConstantDouble(GenericNaN(), output);
1066 masm.jump(&done);
1069 if (hasNonStringPrimitives) {
1070 masm.bind(&isBool);
1071 masm.boolValueToDouble(operand, output);
1072 masm.jump(&done);
1075 masm.bind(&isInt32);
1076 masm.int32ValueToDouble(operand, output);
1077 masm.jump(&done);
1079 masm.bind(&isDouble);
1080 masm.unboxDouble(operand, output);
1081 masm.bind(&done);
1084 void CodeGenerator::visitValueToFloat32(LValueToFloat32* lir) {
1085 ValueOperand operand = ToValue(lir, LValueToFloat32::InputIndex);
1086 FloatRegister output = ToFloatRegister(lir->output());
1088 // Set if we can handle other primitives beside strings, as long as they're
1089 // guaranteed to never throw. This rules out symbols and BigInts, but allows
1090 // booleans, undefined, and null.
1091 bool hasNonStringPrimitives =
1092 lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1094 Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1097 ScratchTagScope tag(masm, operand);
1098 masm.splitTagForTest(operand, tag);
1100 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1101 masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1103 if (hasNonStringPrimitives) {
1104 masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1105 masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1106 masm.branchTestNull(Assembler::Equal, tag, &isNull);
1110 bailout(lir->snapshot());
1112 if (hasNonStringPrimitives) {
1113 masm.bind(&isNull);
1114 masm.loadConstantFloat32(0.0f, output);
1115 masm.jump(&done);
1118 if (hasNonStringPrimitives) {
1119 masm.bind(&isUndefined);
1120 masm.loadConstantFloat32(float(GenericNaN()), output);
1121 masm.jump(&done);
1124 if (hasNonStringPrimitives) {
1125 masm.bind(&isBool);
1126 masm.boolValueToFloat32(operand, output);
1127 masm.jump(&done);
1130 masm.bind(&isInt32);
1131 masm.int32ValueToFloat32(operand, output);
1132 masm.jump(&done);
1134 masm.bind(&isDouble);
1135 // ARM and MIPS may not have a double register available if we've
1136 // allocated output as a float32.
1137 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
1138 ScratchDoubleScope fpscratch(masm);
1139 masm.unboxDouble(operand, fpscratch);
1140 masm.convertDoubleToFloat32(fpscratch, output);
1141 #else
1142 masm.unboxDouble(operand, output);
1143 masm.convertDoubleToFloat32(output, output);
1144 #endif
1145 masm.bind(&done);
1148 void CodeGenerator::visitValueToBigInt(LValueToBigInt* lir) {
1149 ValueOperand operand = ToValue(lir, LValueToBigInt::InputIndex);
1150 Register output = ToRegister(lir->output());
1152 using Fn = BigInt* (*)(JSContext*, HandleValue);
1153 auto* ool =
1154 oolCallVM<Fn, ToBigInt>(lir, ArgList(operand), StoreRegisterTo(output));
1156 Register tag = masm.extractTag(operand, output);
1158 Label notBigInt, done;
1159 masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
1160 masm.unboxBigInt(operand, output);
1161 masm.jump(&done);
1162 masm.bind(&notBigInt);
1164 masm.branchTestBoolean(Assembler::Equal, tag, ool->entry());
1165 masm.branchTestString(Assembler::Equal, tag, ool->entry());
1167 // ToBigInt(object) can have side-effects; all other types throw a TypeError.
1168 bailout(lir->snapshot());
1170 masm.bind(ool->rejoin());
1171 masm.bind(&done);
1174 void CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir) {
1175 masm.convertInt32ToDouble(ToRegister(lir->input()),
1176 ToFloatRegister(lir->output()));
1179 void CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir) {
1180 masm.convertFloat32ToDouble(ToFloatRegister(lir->input()),
1181 ToFloatRegister(lir->output()));
1184 void CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir) {
1185 masm.convertDoubleToFloat32(ToFloatRegister(lir->input()),
1186 ToFloatRegister(lir->output()));
1189 void CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir) {
1190 masm.convertInt32ToFloat32(ToRegister(lir->input()),
1191 ToFloatRegister(lir->output()));
1194 void CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir) {
1195 Label fail;
1196 FloatRegister input = ToFloatRegister(lir->input());
1197 Register output = ToRegister(lir->output());
1198 masm.convertDoubleToInt32(input, output, &fail,
1199 lir->mir()->needsNegativeZeroCheck());
1200 bailoutFrom(&fail, lir->snapshot());
1203 void CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir) {
1204 Label fail;
1205 FloatRegister input = ToFloatRegister(lir->input());
1206 Register output = ToRegister(lir->output());
1207 masm.convertFloat32ToInt32(input, output, &fail,
1208 lir->mir()->needsNegativeZeroCheck());
1209 bailoutFrom(&fail, lir->snapshot());
1212 void CodeGenerator::visitInt32ToIntPtr(LInt32ToIntPtr* lir) {
1213 #ifdef JS_64BIT
1214 // This LIR instruction is only used if the input can be negative.
1215 MOZ_ASSERT(lir->mir()->canBeNegative());
1217 Register output = ToRegister(lir->output());
1218 const LAllocation* input = lir->input();
1219 if (input->isRegister()) {
1220 masm.move32SignExtendToPtr(ToRegister(input), output);
1221 } else {
1222 masm.load32SignExtendToPtr(ToAddress(input), output);
1224 #else
1225 MOZ_CRASH("Not used on 32-bit platforms");
1226 #endif
1229 void CodeGenerator::visitNonNegativeIntPtrToInt32(
1230 LNonNegativeIntPtrToInt32* lir) {
1231 #ifdef JS_64BIT
1232 Register output = ToRegister(lir->output());
1233 MOZ_ASSERT(ToRegister(lir->input()) == output);
1235 Label bail;
1236 masm.guardNonNegativeIntPtrToInt32(output, &bail);
1237 bailoutFrom(&bail, lir->snapshot());
1238 #else
1239 MOZ_CRASH("Not used on 32-bit platforms");
1240 #endif
1243 void CodeGenerator::visitIntPtrToDouble(LIntPtrToDouble* lir) {
1244 Register input = ToRegister(lir->input());
1245 FloatRegister output = ToFloatRegister(lir->output());
1246 masm.convertIntPtrToDouble(input, output);
1249 void CodeGenerator::visitAdjustDataViewLength(LAdjustDataViewLength* lir) {
1250 Register output = ToRegister(lir->output());
1251 MOZ_ASSERT(ToRegister(lir->input()) == output);
1253 uint32_t byteSize = lir->mir()->byteSize();
1255 #ifdef DEBUG
1256 Label ok;
1257 masm.branchTestPtr(Assembler::NotSigned, output, output, &ok);
1258 masm.assumeUnreachable("Unexpected negative value in LAdjustDataViewLength");
1259 masm.bind(&ok);
1260 #endif
1262 Label bail;
1263 masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), output, &bail);
1264 bailoutFrom(&bail, lir->snapshot());
1267 void CodeGenerator::emitOOLTestObject(Register objreg,
1268 Label* ifEmulatesUndefined,
1269 Label* ifDoesntEmulateUndefined,
1270 Register scratch) {
1271 saveVolatile(scratch);
1272 using Fn = bool (*)(JSObject* obj);
1273 masm.setupAlignedABICall();
1274 masm.passABIArg(objreg);
1275 masm.callWithABI<Fn, js::EmulatesUndefined>();
1276 masm.storeCallPointerResult(scratch);
1277 restoreVolatile(scratch);
1279 masm.branchIfTrueBool(scratch, ifEmulatesUndefined);
1280 masm.jump(ifDoesntEmulateUndefined);
1283 // Base out-of-line code generator for all tests of the truthiness of an
1284 // object, where the object might not be truthy. (Recall that per spec all
1285 // objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class
1286 // flag to permit objects to look like |undefined| in certain contexts,
1287 // including in object truthiness testing.) We check truthiness inline except
1288 // when we're testing it on a proxy, in which case out-of-line code will call
1289 // EmulatesUndefined for a conclusive answer.
1290 class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator> {
1291 Register objreg_;
1292 Register scratch_;
1294 Label* ifEmulatesUndefined_;
1295 Label* ifDoesntEmulateUndefined_;
1297 #ifdef DEBUG
1298 bool initialized() { return ifEmulatesUndefined_ != nullptr; }
1299 #endif
1301 public:
1302 OutOfLineTestObject()
1303 : ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr) {}
1305 void accept(CodeGenerator* codegen) final {
1306 MOZ_ASSERT(initialized());
1307 codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_,
1308 ifDoesntEmulateUndefined_, scratch_);
1311 // Specify the register where the object to be tested is found, labels to
1312 // jump to if the object is truthy or falsy, and a scratch register for
1313 // use in the out-of-line path.
1314 void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined,
1315 Label* ifDoesntEmulateUndefined, Register scratch) {
1316 MOZ_ASSERT(!initialized());
1317 MOZ_ASSERT(ifEmulatesUndefined);
1318 objreg_ = objreg;
1319 scratch_ = scratch;
1320 ifEmulatesUndefined_ = ifEmulatesUndefined;
1321 ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined;
1325 // A subclass of OutOfLineTestObject containing two extra labels, for use when
1326 // the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line
1327 // code. The user should bind these labels in inline code, and specify them as
1328 // targets via setInputAndTargets, as appropriate.
1329 class OutOfLineTestObjectWithLabels : public OutOfLineTestObject {
1330 Label label1_;
1331 Label label2_;
1333 public:
1334 OutOfLineTestObjectWithLabels() = default;
1336 Label* label1() { return &label1_; }
1337 Label* label2() { return &label2_; }
1340 void CodeGenerator::testObjectEmulatesUndefinedKernel(
1341 Register objreg, Label* ifEmulatesUndefined,
1342 Label* ifDoesntEmulateUndefined, Register scratch,
1343 OutOfLineTestObject* ool) {
1344 ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
1345 scratch);
1347 // Perform a fast-path check of the object's class flags if the object's
1348 // not a proxy. Let out-of-line code handle the slow cases that require
1349 // saving registers, making a function call, and restoring registers.
1350 masm.branchIfObjectEmulatesUndefined(objreg, scratch, ool->entry(),
1351 ifEmulatesUndefined);
1354 void CodeGenerator::branchTestObjectEmulatesUndefined(
1355 Register objreg, Label* ifEmulatesUndefined,
1356 Label* ifDoesntEmulateUndefined, Register scratch,
1357 OutOfLineTestObject* ool) {
1358 MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(),
1359 "ifDoesntEmulateUndefined will be bound to the fallthrough path");
1361 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1362 ifDoesntEmulateUndefined, scratch, ool);
1363 masm.bind(ifDoesntEmulateUndefined);
1366 void CodeGenerator::testObjectEmulatesUndefined(Register objreg,
1367 Label* ifEmulatesUndefined,
1368 Label* ifDoesntEmulateUndefined,
1369 Register scratch,
1370 OutOfLineTestObject* ool) {
1371 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1372 ifDoesntEmulateUndefined, scratch, ool);
1373 masm.jump(ifDoesntEmulateUndefined);
1376 void CodeGenerator::testValueTruthyForType(
1377 JSValueType type, ScratchTagScope& tag, const ValueOperand& value,
1378 Register tempToUnbox, Register temp, FloatRegister floatTemp,
1379 Label* ifTruthy, Label* ifFalsy, OutOfLineTestObject* ool,
1380 bool skipTypeTest) {
1381 #ifdef DEBUG
1382 if (skipTypeTest) {
1383 Label expected;
1384 masm.branchTestType(Assembler::Equal, tag, type, &expected);
1385 masm.assumeUnreachable("Unexpected Value type in testValueTruthyForType");
1386 masm.bind(&expected);
1388 #endif
1390 // Handle irregular types first.
1391 switch (type) {
1392 case JSVAL_TYPE_UNDEFINED:
1393 case JSVAL_TYPE_NULL:
1394 // Undefined and null are falsy.
1395 if (!skipTypeTest) {
1396 masm.branchTestType(Assembler::Equal, tag, type, ifFalsy);
1397 } else {
1398 masm.jump(ifFalsy);
1400 return;
1401 case JSVAL_TYPE_SYMBOL:
1402 // Symbols are truthy.
1403 if (!skipTypeTest) {
1404 masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy);
1405 } else {
1406 masm.jump(ifTruthy);
1408 return;
1409 case JSVAL_TYPE_OBJECT: {
1410 Label notObject;
1411 if (!skipTypeTest) {
1412 masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
1414 ScratchTagScopeRelease _(&tag);
1415 Register objreg = masm.extractObject(value, tempToUnbox);
1416 testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, temp, ool);
1417 masm.bind(&notObject);
1418 return;
1420 default:
1421 break;
1424 // Check the type of the value (unless this is the last possible type).
1425 Label differentType;
1426 if (!skipTypeTest) {
1427 masm.branchTestType(Assembler::NotEqual, tag, type, &differentType);
1430 // Branch if the value is falsy.
1431 ScratchTagScopeRelease _(&tag);
1432 switch (type) {
1433 case JSVAL_TYPE_BOOLEAN: {
1434 masm.branchTestBooleanTruthy(false, value, ifFalsy);
1435 break;
1437 case JSVAL_TYPE_INT32: {
1438 masm.branchTestInt32Truthy(false, value, ifFalsy);
1439 break;
1441 case JSVAL_TYPE_STRING: {
1442 masm.branchTestStringTruthy(false, value, ifFalsy);
1443 break;
1445 case JSVAL_TYPE_BIGINT: {
1446 masm.branchTestBigIntTruthy(false, value, ifFalsy);
1447 break;
1449 case JSVAL_TYPE_DOUBLE: {
1450 masm.unboxDouble(value, floatTemp);
1451 masm.branchTestDoubleTruthy(false, floatTemp, ifFalsy);
1452 break;
1454 default:
1455 MOZ_CRASH("Unexpected value type");
1458 // If we reach this point, the value is truthy. We fall through for
1459 // truthy on the last test; otherwise, branch.
1460 if (!skipTypeTest) {
1461 masm.jump(ifTruthy);
1464 masm.bind(&differentType);
1467 void CodeGenerator::testValueTruthy(const ValueOperand& value,
1468 Register tempToUnbox, Register temp,
1469 FloatRegister floatTemp,
1470 const TypeDataList& observedTypes,
1471 Label* ifTruthy, Label* ifFalsy,
1472 OutOfLineTestObject* ool) {
1473 ScratchTagScope tag(masm, value);
1474 masm.splitTagForTest(value, tag);
1476 const std::initializer_list<JSValueType> defaultOrder = {
1477 JSVAL_TYPE_UNDEFINED, JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN,
1478 JSVAL_TYPE_INT32, JSVAL_TYPE_OBJECT, JSVAL_TYPE_STRING,
1479 JSVAL_TYPE_DOUBLE, JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
1481 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
1483 // Generate tests for previously observed types first.
1484 // The TypeDataList is sorted by descending frequency.
1485 for (auto& observed : observedTypes) {
1486 JSValueType type = observed.type();
1487 remaining -= type;
1489 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1490 ifTruthy, ifFalsy, ool, /*skipTypeTest*/ false);
1493 // Generate tests for remaining types.
1494 for (auto type : defaultOrder) {
1495 if (!remaining.contains(type)) {
1496 continue;
1498 remaining -= type;
1500 // We don't need a type test for the last possible type.
1501 bool skipTypeTest = remaining.isEmpty();
1502 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1503 ifTruthy, ifFalsy, ool, skipTypeTest);
1505 MOZ_ASSERT(remaining.isEmpty());
1507 // We fall through if the final test is truthy.
1510 void CodeGenerator::visitTestBIAndBranch(LTestBIAndBranch* lir) {
1511 Label* ifTrueLabel = getJumpLabelForBranch(lir->ifTrue());
1512 Label* ifFalseLabel = getJumpLabelForBranch(lir->ifFalse());
1513 Register input = ToRegister(lir->input());
1515 if (isNextBlock(lir->ifFalse()->lir())) {
1516 masm.branchIfBigIntIsNonZero(input, ifTrueLabel);
1517 } else if (isNextBlock(lir->ifTrue()->lir())) {
1518 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1519 } else {
1520 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1521 jumpToBlock(lir->ifTrue());
1525 void CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir) {
1526 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1527 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1528 Register input = ToRegister(lir->input());
1530 auto* ool = new (alloc()) OutOfLineTestObject();
1531 addOutOfLineCode(ool, lir->mir());
1533 testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()),
1534 ool);
1537 void CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir) {
1538 auto* ool = new (alloc()) OutOfLineTestObject();
1539 addOutOfLineCode(ool, lir->mir());
1541 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1542 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1544 ValueOperand input = ToValue(lir, LTestVAndBranch::Input);
1545 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
1546 Register temp = ToRegister(lir->temp2());
1547 FloatRegister floatTemp = ToFloatRegister(lir->tempFloat());
1548 const TypeDataList& observedTypes = lir->mir()->observedTypes();
1550 testValueTruthy(input, tempToUnbox, temp, floatTemp, observedTypes, truthy,
1551 falsy, ool);
1552 masm.jump(truthy);
1555 void CodeGenerator::visitBooleanToString(LBooleanToString* lir) {
1556 Register input = ToRegister(lir->input());
1557 Register output = ToRegister(lir->output());
1558 const JSAtomState& names = gen->runtime->names();
1559 Label true_, done;
1561 masm.branchTest32(Assembler::NonZero, input, input, &true_);
1562 masm.movePtr(ImmGCPtr(names.false_), output);
1563 masm.jump(&done);
1565 masm.bind(&true_);
1566 masm.movePtr(ImmGCPtr(names.true_), output);
1568 masm.bind(&done);
1571 void CodeGenerator::visitIntToString(LIntToString* lir) {
1572 Register input = ToRegister(lir->input());
1573 Register output = ToRegister(lir->output());
1575 using Fn = JSLinearString* (*)(JSContext*, int);
1576 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
1577 lir, ArgList(input), StoreRegisterTo(output));
1579 masm.lookupStaticIntString(input, output, gen->runtime->staticStrings(),
1580 ool->entry());
1582 masm.bind(ool->rejoin());
1585 void CodeGenerator::visitDoubleToString(LDoubleToString* lir) {
1586 FloatRegister input = ToFloatRegister(lir->input());
1587 Register temp = ToRegister(lir->temp0());
1588 Register output = ToRegister(lir->output());
1590 using Fn = JSString* (*)(JSContext*, double);
1591 OutOfLineCode* ool = oolCallVM<Fn, NumberToString<CanGC>>(
1592 lir, ArgList(input), StoreRegisterTo(output));
1594 // Try double to integer conversion and run integer to string code.
1595 masm.convertDoubleToInt32(input, temp, ool->entry(), false);
1596 masm.lookupStaticIntString(temp, output, gen->runtime->staticStrings(),
1597 ool->entry());
1599 masm.bind(ool->rejoin());
1602 void CodeGenerator::visitValueToString(LValueToString* lir) {
1603 ValueOperand input = ToValue(lir, LValueToString::InputIndex);
1604 Register output = ToRegister(lir->output());
1606 using Fn = JSString* (*)(JSContext*, HandleValue);
1607 OutOfLineCode* ool = oolCallVM<Fn, ToStringSlow<CanGC>>(
1608 lir, ArgList(input), StoreRegisterTo(output));
1610 Label done;
1611 Register tag = masm.extractTag(input, output);
1612 const JSAtomState& names = gen->runtime->names();
1614 // String
1616 Label notString;
1617 masm.branchTestString(Assembler::NotEqual, tag, &notString);
1618 masm.unboxString(input, output);
1619 masm.jump(&done);
1620 masm.bind(&notString);
1623 // Integer
1625 Label notInteger;
1626 masm.branchTestInt32(Assembler::NotEqual, tag, &notInteger);
1627 Register unboxed = ToTempUnboxRegister(lir->temp0());
1628 unboxed = masm.extractInt32(input, unboxed);
1629 masm.lookupStaticIntString(unboxed, output, gen->runtime->staticStrings(),
1630 ool->entry());
1631 masm.jump(&done);
1632 masm.bind(&notInteger);
1635 // Double
1637 // Note: no fastpath. Need two extra registers and can only convert doubles
1638 // that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT.
1639 masm.branchTestDouble(Assembler::Equal, tag, ool->entry());
1642 // Undefined
1644 Label notUndefined;
1645 masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
1646 masm.movePtr(ImmGCPtr(names.undefined), output);
1647 masm.jump(&done);
1648 masm.bind(&notUndefined);
1651 // Null
1653 Label notNull;
1654 masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
1655 masm.movePtr(ImmGCPtr(names.null), output);
1656 masm.jump(&done);
1657 masm.bind(&notNull);
1660 // Boolean
1662 Label notBoolean, true_;
1663 masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
1664 masm.branchTestBooleanTruthy(true, input, &true_);
1665 masm.movePtr(ImmGCPtr(names.false_), output);
1666 masm.jump(&done);
1667 masm.bind(&true_);
1668 masm.movePtr(ImmGCPtr(names.true_), output);
1669 masm.jump(&done);
1670 masm.bind(&notBoolean);
1673 // Objects/symbols are only possible when |mir->mightHaveSideEffects()|.
1674 if (lir->mir()->mightHaveSideEffects()) {
1675 // Object
1676 if (lir->mir()->supportSideEffects()) {
1677 masm.branchTestObject(Assembler::Equal, tag, ool->entry());
1678 } else {
1679 // Bail.
1680 MOZ_ASSERT(lir->mir()->needsSnapshot());
1681 Label bail;
1682 masm.branchTestObject(Assembler::Equal, tag, &bail);
1683 bailoutFrom(&bail, lir->snapshot());
1686 // Symbol
1687 if (lir->mir()->supportSideEffects()) {
1688 masm.branchTestSymbol(Assembler::Equal, tag, ool->entry());
1689 } else {
1690 // Bail.
1691 MOZ_ASSERT(lir->mir()->needsSnapshot());
1692 Label bail;
1693 masm.branchTestSymbol(Assembler::Equal, tag, &bail);
1694 bailoutFrom(&bail, lir->snapshot());
1698 // BigInt
1700 // No fastpath currently implemented.
1701 masm.branchTestBigInt(Assembler::Equal, tag, ool->entry());
1704 masm.assumeUnreachable("Unexpected type for LValueToString.");
1706 masm.bind(&done);
1707 masm.bind(ool->rejoin());
1710 using StoreBufferMutationFn = void (*)(js::gc::StoreBuffer*, js::gc::Cell**);
1712 static void EmitStoreBufferMutation(MacroAssembler& masm, Register holder,
1713 size_t offset, Register buffer,
1714 LiveGeneralRegisterSet& liveVolatiles,
1715 StoreBufferMutationFn fun) {
1716 Label callVM;
1717 Label exit;
1719 // Call into the VM to barrier the write. The only registers that need to
1720 // be preserved are those in liveVolatiles, so once they are saved on the
1721 // stack all volatile registers are available for use.
1722 masm.bind(&callVM);
1723 masm.PushRegsInMask(liveVolatiles);
1725 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
1726 regs.takeUnchecked(buffer);
1727 regs.takeUnchecked(holder);
1728 Register addrReg = regs.takeAny();
1730 masm.computeEffectiveAddress(Address(holder, offset), addrReg);
1732 bool needExtraReg = !regs.hasAny<GeneralRegisterSet::DefaultType>();
1733 if (needExtraReg) {
1734 masm.push(holder);
1735 masm.setupUnalignedABICall(holder);
1736 } else {
1737 masm.setupUnalignedABICall(regs.takeAny());
1739 masm.passABIArg(buffer);
1740 masm.passABIArg(addrReg);
1741 masm.callWithABI(DynamicFunction<StoreBufferMutationFn>(fun), MoveOp::GENERAL,
1742 CheckUnsafeCallWithABI::DontCheckOther);
1744 if (needExtraReg) {
1745 masm.pop(holder);
1747 masm.PopRegsInMask(liveVolatiles);
1748 masm.bind(&exit);
1751 // Warning: this function modifies prev and next.
1752 static void EmitPostWriteBarrierS(MacroAssembler& masm, Register holder,
1753 size_t offset, Register prev, Register next,
1754 LiveGeneralRegisterSet& liveVolatiles) {
1755 Label exit;
1756 Label checkRemove, putCell;
1758 // if (next && (buffer = next->storeBuffer()))
1759 // but we never pass in nullptr for next.
1760 Register storebuffer = next;
1761 masm.loadStoreBuffer(next, storebuffer);
1762 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &checkRemove);
1764 // if (prev && prev->storeBuffer())
1765 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &putCell);
1766 masm.loadStoreBuffer(prev, prev);
1767 masm.branchPtr(Assembler::NotEqual, prev, ImmWord(0), &exit);
1769 // buffer->putCell(cellp)
1770 masm.bind(&putCell);
1771 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1772 JSString::addCellAddressToStoreBuffer);
1773 masm.jump(&exit);
1775 // if (prev && (buffer = prev->storeBuffer()))
1776 masm.bind(&checkRemove);
1777 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &exit);
1778 masm.loadStoreBuffer(prev, storebuffer);
1779 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &exit);
1780 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1781 JSString::removeCellAddressFromStoreBuffer);
1783 masm.bind(&exit);
1786 void CodeGenerator::visitRegExp(LRegExp* lir) {
1787 Register output = ToRegister(lir->output());
1788 Register temp = ToRegister(lir->temp0());
1789 JSObject* source = lir->mir()->source();
1791 using Fn = JSObject* (*)(JSContext*, Handle<RegExpObject*>);
1792 OutOfLineCode* ool = oolCallVM<Fn, CloneRegExpObject>(
1793 lir, ArgList(ImmGCPtr(source)), StoreRegisterTo(output));
1794 if (lir->mir()->hasShared()) {
1795 TemplateObject templateObject(source);
1796 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
1797 ool->entry());
1798 } else {
1799 masm.jump(ool->entry());
1801 masm.bind(ool->rejoin());
1804 static constexpr int32_t RegExpPairsVectorStartOffset(
1805 int32_t inputOutputDataStartOffset) {
1806 return inputOutputDataStartOffset + int32_t(InputOutputDataSize) +
1807 int32_t(sizeof(MatchPairs));
1810 static Address RegExpPairCountAddress(MacroAssembler& masm,
1811 int32_t inputOutputDataStartOffset) {
1812 return Address(FramePointer, inputOutputDataStartOffset +
1813 int32_t(InputOutputDataSize) +
1814 MatchPairs::offsetOfPairCount());
1817 // When the unicode flag is set, if lastIndex points to a trail
1818 // surrogate, we should step back to the corresponding lead surrogate.
1819 // See ExecuteRegExp in builtin/RegExp.cpp for more detail.
1820 static void StepBackToLeadSurrogate(MacroAssembler& masm, Register regexpShared,
1821 Register input, Register lastIndex,
1822 Register temp1, Register temp2) {
1823 Label done;
1825 // If the unicode flag is not set, there is nothing to do.
1826 masm.branchTest32(Assembler::Zero,
1827 Address(regexpShared, RegExpShared::offsetOfFlags()),
1828 Imm32(int32_t(JS::RegExpFlag::Unicode)), &done);
1830 // If the input is latin1, there can't be any surrogates.
1831 masm.branchLatin1String(input, &done);
1833 // Check if |lastIndex > 0 && lastIndex < input->length()|.
1834 // lastIndex should already have no sign here.
1835 masm.branchTest32(Assembler::Zero, lastIndex, lastIndex, &done);
1836 masm.loadStringLength(input, temp1);
1837 masm.branch32(Assembler::AboveOrEqual, lastIndex, temp1, &done);
1839 // For TrailSurrogateMin ≤ x ≤ TrailSurrogateMax and
1840 // LeadSurrogateMin ≤ x ≤ LeadSurrogateMax, the following
1841 // equations hold.
1843 // SurrogateMin ≤ x ≤ SurrogateMax
1844 // <> SurrogateMin ≤ x ≤ SurrogateMin + 2^10 - 1
1845 // <> ((x - SurrogateMin) >>> 10) = 0 where >>> is an unsigned-shift
1846 // See Hacker's Delight, section 4-1 for details.
1848 // ((x - SurrogateMin) >>> 10) = 0
1849 // <> floor((x - SurrogateMin) / 1024) = 0
1850 // <> floor((x / 1024) - (SurrogateMin / 1024)) = 0
1851 // <> floor(x / 1024) = SurrogateMin / 1024
1852 // <> floor(x / 1024) * 1024 = SurrogateMin
1853 // <> (x >>> 10) << 10 = SurrogateMin
1854 // <> x & ~(2^10 - 1) = SurrogateMin
1856 constexpr char16_t SurrogateMask = 0xFC00;
1858 Register charsReg = temp1;
1859 masm.loadStringChars(input, charsReg, CharEncoding::TwoByte);
1861 // Check if input[lastIndex] is trail surrogate.
1862 masm.loadChar(charsReg, lastIndex, temp2, CharEncoding::TwoByte);
1863 masm.and32(Imm32(SurrogateMask), temp2);
1864 masm.branch32(Assembler::NotEqual, temp2, Imm32(unicode::TrailSurrogateMin),
1865 &done);
1867 // Check if input[lastIndex-1] is lead surrogate.
1868 masm.loadChar(charsReg, lastIndex, temp2, CharEncoding::TwoByte,
1869 -int32_t(sizeof(char16_t)));
1870 masm.and32(Imm32(SurrogateMask), temp2);
1871 masm.branch32(Assembler::NotEqual, temp2, Imm32(unicode::LeadSurrogateMin),
1872 &done);
1874 // Move lastIndex back to lead surrogate.
1875 masm.sub32(Imm32(1), lastIndex);
1877 masm.bind(&done);
1880 static void UpdateRegExpStatics(MacroAssembler& masm, Register regexp,
1881 Register input, Register lastIndex,
1882 Register staticsReg, Register temp1,
1883 Register temp2, gc::Heap initialStringHeap,
1884 LiveGeneralRegisterSet& volatileRegs) {
1885 Address pendingInputAddress(staticsReg,
1886 RegExpStatics::offsetOfPendingInput());
1887 Address matchesInputAddress(staticsReg,
1888 RegExpStatics::offsetOfMatchesInput());
1889 Address lazySourceAddress(staticsReg, RegExpStatics::offsetOfLazySource());
1890 Address lazyIndexAddress(staticsReg, RegExpStatics::offsetOfLazyIndex());
1892 masm.guardedCallPreBarrier(pendingInputAddress, MIRType::String);
1893 masm.guardedCallPreBarrier(matchesInputAddress, MIRType::String);
1894 masm.guardedCallPreBarrier(lazySourceAddress, MIRType::String);
1896 if (initialStringHeap == gc::Heap::Default) {
1897 // Writing into RegExpStatics tenured memory; must post-barrier.
1898 if (staticsReg.volatile_()) {
1899 volatileRegs.add(staticsReg);
1902 masm.loadPtr(pendingInputAddress, temp1);
1903 masm.storePtr(input, pendingInputAddress);
1904 masm.movePtr(input, temp2);
1905 EmitPostWriteBarrierS(masm, staticsReg,
1906 RegExpStatics::offsetOfPendingInput(),
1907 temp1 /* prev */, temp2 /* next */, volatileRegs);
1909 masm.loadPtr(matchesInputAddress, temp1);
1910 masm.storePtr(input, matchesInputAddress);
1911 masm.movePtr(input, temp2);
1912 EmitPostWriteBarrierS(masm, staticsReg,
1913 RegExpStatics::offsetOfMatchesInput(),
1914 temp1 /* prev */, temp2 /* next */, volatileRegs);
1915 } else {
1916 masm.debugAssertGCThingIsTenured(input, temp1);
1917 masm.storePtr(input, pendingInputAddress);
1918 masm.storePtr(input, matchesInputAddress);
1921 masm.storePtr(lastIndex,
1922 Address(staticsReg, RegExpStatics::offsetOfLazyIndex()));
1923 masm.store32(
1924 Imm32(1),
1925 Address(staticsReg, RegExpStatics::offsetOfPendingLazyEvaluation()));
1927 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
1928 RegExpObject::SHARED_SLOT)),
1929 temp1, JSVAL_TYPE_PRIVATE_GCTHING);
1930 masm.loadPtr(Address(temp1, RegExpShared::offsetOfSource()), temp2);
1931 masm.storePtr(temp2, lazySourceAddress);
1932 static_assert(sizeof(JS::RegExpFlags) == 1, "load size must match flag size");
1933 masm.load8ZeroExtend(Address(temp1, RegExpShared::offsetOfFlags()), temp2);
1934 masm.store8(temp2, Address(staticsReg, RegExpStatics::offsetOfLazyFlags()));
1937 // Prepare an InputOutputData and optional MatchPairs which space has been
1938 // allocated for on the stack, and try to execute a RegExp on a string input.
1939 // If the RegExp was successfully executed and matched the input, fallthrough.
1940 // Otherwise, jump to notFound or failure.
1942 // inputOutputDataStartOffset is the offset relative to the frame pointer
1943 // register. This offset is negative for the RegExpExecTest stub.
1944 static bool PrepareAndExecuteRegExp(MacroAssembler& masm, Register regexp,
1945 Register input, Register lastIndex,
1946 Register temp1, Register temp2,
1947 Register temp3,
1948 int32_t inputOutputDataStartOffset,
1949 gc::Heap initialStringHeap, Label* notFound,
1950 Label* failure) {
1951 JitSpew(JitSpew_Codegen, "# Emitting PrepareAndExecuteRegExp");
1953 using irregexp::InputOutputData;
1956 * [SMDOC] Stack layout for PrepareAndExecuteRegExp
1958 * Before this function is called, the caller is responsible for
1959 * allocating enough stack space for the following data:
1961 * inputOutputDataStartOffset +-----> +---------------+
1962 * |InputOutputData|
1963 * inputStartAddress +----------> inputStart|
1964 * inputEndAddress +----------> inputEnd|
1965 * startIndexAddress +----------> startIndex|
1966 * matchesAddress +----------> matches|-----+
1967 * +---------------+ |
1968 * matchPairs(Address|Offset) +-----> +---------------+ <--+
1969 * | MatchPairs |
1970 * pairCountAddress +----------> count |
1971 * pairsPointerAddress +----------> pairs |-----+
1972 * +---------------+ |
1973 * pairsArray(Address|Offset) +-----> +---------------+ <--+
1974 * | MatchPair |
1975 * firstMatchStartAddress +----------> start | <--+
1976 * | limit | |
1977 * +---------------+ |
1978 * . |
1979 * . Reserved space for
1980 * . RegExpObject::MaxPairCount
1981 * . MatchPair objects
1982 * . |
1983 * +---------------+ |
1984 * | MatchPair | |
1985 * | start | |
1986 * | limit | <--+
1987 * +---------------+
1990 int32_t ioOffset = inputOutputDataStartOffset;
1991 int32_t matchPairsOffset = ioOffset + int32_t(sizeof(InputOutputData));
1992 int32_t pairsArrayOffset = matchPairsOffset + int32_t(sizeof(MatchPairs));
1994 Address inputStartAddress(FramePointer,
1995 ioOffset + InputOutputData::offsetOfInputStart());
1996 Address inputEndAddress(FramePointer,
1997 ioOffset + InputOutputData::offsetOfInputEnd());
1998 Address startIndexAddress(FramePointer,
1999 ioOffset + InputOutputData::offsetOfStartIndex());
2000 Address matchesAddress(FramePointer,
2001 ioOffset + InputOutputData::offsetOfMatches());
2003 Address matchPairsAddress(FramePointer, matchPairsOffset);
2004 Address pairCountAddress(FramePointer,
2005 matchPairsOffset + MatchPairs::offsetOfPairCount());
2006 Address pairsPointerAddress(FramePointer,
2007 matchPairsOffset + MatchPairs::offsetOfPairs());
2009 Address pairsArrayAddress(FramePointer, pairsArrayOffset);
2010 Address firstMatchStartAddress(FramePointer,
2011 pairsArrayOffset + MatchPair::offsetOfStart());
2013 // First, fill in a skeletal MatchPairs instance on the stack. This will be
2014 // passed to the OOL stub in the caller if we aren't able to execute the
2015 // RegExp inline, and that stub needs to be able to determine whether the
2016 // execution finished successfully.
2018 // Initialize MatchPairs::pairCount to 1. The correct value can only
2019 // be determined after loading the RegExpShared. If the RegExpShared
2020 // has Kind::Atom, this is the correct pairCount.
2021 masm.store32(Imm32(1), pairCountAddress);
2023 // Initialize MatchPairs::pairs pointer
2024 masm.computeEffectiveAddress(pairsArrayAddress, temp1);
2025 masm.storePtr(temp1, pairsPointerAddress);
2027 // Initialize MatchPairs::pairs[0]::start to MatchPair::NoMatch
2028 masm.store32(Imm32(MatchPair::NoMatch), firstMatchStartAddress);
2030 // Determine the set of volatile inputs to save when calling into C++ or
2031 // regexp code.
2032 LiveGeneralRegisterSet volatileRegs;
2033 if (lastIndex.volatile_()) {
2034 volatileRegs.add(lastIndex);
2036 if (input.volatile_()) {
2037 volatileRegs.add(input);
2039 if (regexp.volatile_()) {
2040 volatileRegs.add(regexp);
2043 // Ensure the input string is not a rope.
2044 Label isLinear;
2045 masm.branchIfNotRope(input, &isLinear);
2047 masm.PushRegsInMask(volatileRegs);
2049 using Fn = JSLinearString* (*)(JSString*);
2050 masm.setupUnalignedABICall(temp1);
2051 masm.passABIArg(input);
2052 masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
2054 MOZ_ASSERT(!volatileRegs.has(temp1));
2055 masm.storeCallPointerResult(temp1);
2056 masm.PopRegsInMask(volatileRegs);
2058 masm.branchTestPtr(Assembler::Zero, temp1, temp1, failure);
2060 masm.bind(&isLinear);
2062 // Load the RegExpShared.
2063 Register regexpReg = temp1;
2064 Address sharedSlot = Address(
2065 regexp, NativeObject::getFixedSlotOffset(RegExpObject::SHARED_SLOT));
2066 masm.branchTestUndefined(Assembler::Equal, sharedSlot, failure);
2067 masm.unboxNonDouble(sharedSlot, regexpReg, JSVAL_TYPE_PRIVATE_GCTHING);
2069 // Handle Atom matches
2070 Label notAtom, checkSuccess;
2071 masm.branchPtr(Assembler::Equal,
2072 Address(regexpReg, RegExpShared::offsetOfPatternAtom()),
2073 ImmWord(0), &notAtom);
2075 masm.computeEffectiveAddress(matchPairsAddress, temp3);
2077 masm.PushRegsInMask(volatileRegs);
2078 using Fn = RegExpRunStatus (*)(RegExpShared* re, JSLinearString* input,
2079 size_t start, MatchPairs* matchPairs);
2080 masm.setupUnalignedABICall(temp2);
2081 masm.passABIArg(regexpReg);
2082 masm.passABIArg(input);
2083 masm.passABIArg(lastIndex);
2084 masm.passABIArg(temp3);
2085 masm.callWithABI<Fn, js::ExecuteRegExpAtomRaw>();
2087 MOZ_ASSERT(!volatileRegs.has(temp1));
2088 masm.storeCallInt32Result(temp1);
2089 masm.PopRegsInMask(volatileRegs);
2091 masm.jump(&checkSuccess);
2093 masm.bind(&notAtom);
2095 // Don't handle regexps with too many capture pairs.
2096 masm.load32(Address(regexpReg, RegExpShared::offsetOfPairCount()), temp2);
2097 masm.branch32(Assembler::Above, temp2, Imm32(RegExpObject::MaxPairCount),
2098 failure);
2100 // Fill in the pair count in the MatchPairs on the stack.
2101 masm.store32(temp2, pairCountAddress);
2103 // Update lastIndex if necessary.
2104 StepBackToLeadSurrogate(masm, regexpReg, input, lastIndex, temp2, temp3);
2106 // Load code pointer and length of input (in bytes).
2107 // Store the input start in the InputOutputData.
2108 Register codePointer = temp1; // Note: temp1 was previously regexpReg.
2109 Register byteLength = temp3;
2111 Label isLatin1, done;
2112 masm.loadStringLength(input, byteLength);
2114 masm.branchLatin1String(input, &isLatin1);
2116 // Two-byte input
2117 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
2118 masm.storePtr(temp2, inputStartAddress);
2119 masm.loadPtr(
2120 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/false)),
2121 codePointer);
2122 masm.lshiftPtr(Imm32(1), byteLength);
2123 masm.jump(&done);
2125 // Latin1 input
2126 masm.bind(&isLatin1);
2127 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
2128 masm.storePtr(temp2, inputStartAddress);
2129 masm.loadPtr(
2130 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/true)),
2131 codePointer);
2133 masm.bind(&done);
2135 // Store end pointer
2136 masm.addPtr(byteLength, temp2);
2137 masm.storePtr(temp2, inputEndAddress);
2140 // Guard that the RegExpShared has been compiled for this type of input.
2141 // If it has not been compiled, we fall back to the OOL case, which will
2142 // do a VM call into the interpreter.
2143 // TODO: add an interpreter trampoline?
2144 masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure);
2145 masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer);
2147 // Finish filling in the InputOutputData instance on the stack
2148 masm.computeEffectiveAddress(matchPairsAddress, temp2);
2149 masm.storePtr(temp2, matchesAddress);
2150 masm.storePtr(lastIndex, startIndexAddress);
2152 // Execute the RegExp.
2153 masm.computeEffectiveAddress(
2154 Address(FramePointer, inputOutputDataStartOffset), temp2);
2155 masm.PushRegsInMask(volatileRegs);
2156 masm.setupUnalignedABICall(temp3);
2157 masm.passABIArg(temp2);
2158 masm.callWithABI(codePointer);
2159 masm.storeCallInt32Result(temp1);
2160 masm.PopRegsInMask(volatileRegs);
2162 masm.bind(&checkSuccess);
2163 masm.branch32(Assembler::Equal, temp1,
2164 Imm32(RegExpRunStatus_Success_NotFound), notFound);
2165 masm.branch32(Assembler::Equal, temp1, Imm32(RegExpRunStatus_Error), failure);
2167 // Lazily update the RegExpStatics.
2168 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2169 RegExpRealm::offsetOfRegExpStatics();
2170 masm.loadGlobalObjectData(temp1);
2171 masm.loadPtr(Address(temp1, offset), temp1);
2172 UpdateRegExpStatics(masm, regexp, input, lastIndex, temp1, temp2, temp3,
2173 initialStringHeap, volatileRegs);
2175 return true;
2178 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
2179 Register len, Register byteOpScratch,
2180 CharEncoding encoding);
2182 class CreateDependentString {
2183 CharEncoding encoding_;
2184 Register string_;
2185 Register temp1_;
2186 Register temp2_;
2187 Label* failure_;
2189 enum class FallbackKind : uint8_t {
2190 InlineString,
2191 FatInlineString,
2192 NotInlineString,
2193 Count
2195 mozilla::EnumeratedArray<FallbackKind, FallbackKind::Count, Label> fallbacks_,
2196 joins_;
2198 public:
2199 CreateDependentString(CharEncoding encoding, Register string, Register temp1,
2200 Register temp2, Label* failure)
2201 : encoding_(encoding),
2202 string_(string),
2203 temp1_(temp1),
2204 temp2_(temp2),
2205 failure_(failure) {}
2207 Register string() const { return string_; }
2208 CharEncoding encoding() const { return encoding_; }
2210 // Generate code that creates DependentString.
2211 // Caller should call generateFallback after masm.ret(), to generate
2212 // fallback path.
2213 void generate(MacroAssembler& masm, const JSAtomState& names,
2214 CompileRuntime* runtime, Register base,
2215 BaseIndex startIndexAddress, BaseIndex limitIndexAddress,
2216 gc::Heap initialStringHeap);
2218 // Generate fallback path for creating DependentString.
2219 void generateFallback(MacroAssembler& masm);
2222 void CreateDependentString::generate(MacroAssembler& masm,
2223 const JSAtomState& names,
2224 CompileRuntime* runtime, Register base,
2225 BaseIndex startIndexAddress,
2226 BaseIndex limitIndexAddress,
2227 gc::Heap initialStringHeap) {
2228 JitSpew(JitSpew_Codegen, "# Emitting CreateDependentString (encoding=%s)",
2229 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2231 auto newGCString = [&](FallbackKind kind) {
2232 uint32_t flags = kind == FallbackKind::InlineString
2233 ? JSString::INIT_THIN_INLINE_FLAGS
2234 : kind == FallbackKind::FatInlineString
2235 ? JSString::INIT_FAT_INLINE_FLAGS
2236 : JSString::INIT_DEPENDENT_FLAGS;
2237 if (encoding_ == CharEncoding::Latin1) {
2238 flags |= JSString::LATIN1_CHARS_BIT;
2241 if (kind != FallbackKind::FatInlineString) {
2242 masm.newGCString(string_, temp2_, initialStringHeap, &fallbacks_[kind]);
2243 } else {
2244 masm.newGCFatInlineString(string_, temp2_, initialStringHeap,
2245 &fallbacks_[kind]);
2247 masm.bind(&joins_[kind]);
2248 masm.store32(Imm32(flags), Address(string_, JSString::offsetOfFlags()));
2251 // Compute the string length.
2252 masm.load32(startIndexAddress, temp2_);
2253 masm.load32(limitIndexAddress, temp1_);
2254 masm.sub32(temp2_, temp1_);
2256 Label done, nonEmpty;
2258 // Zero length matches use the empty string.
2259 masm.branchTest32(Assembler::NonZero, temp1_, temp1_, &nonEmpty);
2260 masm.movePtr(ImmGCPtr(names.empty_), string_);
2261 masm.jump(&done);
2263 masm.bind(&nonEmpty);
2265 // Complete matches use the base string.
2266 Label nonBaseStringMatch;
2267 masm.branchTest32(Assembler::NonZero, temp2_, temp2_, &nonBaseStringMatch);
2268 masm.branch32(Assembler::NotEqual, Address(base, JSString::offsetOfLength()),
2269 temp1_, &nonBaseStringMatch);
2270 masm.movePtr(base, string_);
2271 masm.jump(&done);
2273 masm.bind(&nonBaseStringMatch);
2275 Label notInline;
2277 int32_t maxInlineLength = encoding_ == CharEncoding::Latin1
2278 ? JSFatInlineString::MAX_LENGTH_LATIN1
2279 : JSFatInlineString::MAX_LENGTH_TWO_BYTE;
2280 masm.branch32(Assembler::Above, temp1_, Imm32(maxInlineLength), &notInline);
2282 // Make a thin or fat inline string.
2283 Label stringAllocated, fatInline;
2285 int32_t maxThinInlineLength = encoding_ == CharEncoding::Latin1
2286 ? JSThinInlineString::MAX_LENGTH_LATIN1
2287 : JSThinInlineString::MAX_LENGTH_TWO_BYTE;
2288 masm.branch32(Assembler::Above, temp1_, Imm32(maxThinInlineLength),
2289 &fatInline);
2290 if (encoding_ == CharEncoding::Latin1) {
2291 // One character Latin-1 strings can be loaded directly from the
2292 // static strings table.
2293 Label thinInline;
2294 masm.branch32(Assembler::Above, temp1_, Imm32(1), &thinInline);
2296 static_assert(
2297 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
2298 "Latin-1 strings can be loaded from static strings");
2300 masm.loadStringChars(base, temp1_, encoding_);
2301 masm.loadChar(temp1_, temp2_, temp1_, encoding_);
2303 masm.lookupStaticString(temp1_, string_, runtime->staticStrings());
2305 masm.jump(&done);
2307 masm.bind(&thinInline);
2310 newGCString(FallbackKind::InlineString);
2311 masm.jump(&stringAllocated);
2313 masm.bind(&fatInline);
2314 { newGCString(FallbackKind::FatInlineString); }
2315 masm.bind(&stringAllocated);
2317 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2319 masm.push(string_);
2320 masm.push(base);
2322 MOZ_ASSERT(startIndexAddress.base == FramePointer,
2323 "startIndexAddress is still valid after stack pushes");
2325 // Load chars pointer for the new string.
2326 masm.loadInlineStringCharsForStore(string_, string_);
2328 // Load the source characters pointer.
2329 masm.loadStringChars(base, temp2_, encoding_);
2330 masm.load32(startIndexAddress, base);
2331 masm.addToCharPtr(temp2_, base, encoding_);
2333 CopyStringChars(masm, string_, temp2_, temp1_, base, encoding_);
2335 masm.pop(base);
2336 masm.pop(string_);
2338 masm.jump(&done);
2341 masm.bind(&notInline);
2344 // Make a dependent string.
2345 // Warning: string may be tenured (if the fallback case is hit), so
2346 // stores into it must be post barriered.
2347 newGCString(FallbackKind::NotInlineString);
2349 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2351 masm.loadNonInlineStringChars(base, temp1_, encoding_);
2352 masm.load32(startIndexAddress, temp2_);
2353 masm.addToCharPtr(temp1_, temp2_, encoding_);
2354 masm.storeNonInlineStringChars(temp1_, string_);
2355 masm.storeDependentStringBase(base, string_);
2356 masm.movePtr(base, temp1_);
2358 // Follow any base pointer if the input is itself a dependent string.
2359 // Watch for undepended strings, which have a base pointer but don't
2360 // actually share their characters with it.
2361 Label noBase;
2362 masm.load32(Address(base, JSString::offsetOfFlags()), temp2_);
2363 masm.and32(Imm32(JSString::TYPE_FLAGS_MASK), temp2_);
2364 masm.branchTest32(Assembler::Zero, temp2_, Imm32(JSString::DEPENDENT_BIT),
2365 &noBase);
2366 masm.loadDependentStringBase(base, temp1_);
2367 masm.storeDependentStringBase(temp1_, string_);
2368 masm.bind(&noBase);
2370 // Post-barrier the base store, whether it was the direct or indirect
2371 // base (both will end up in temp1 here).
2372 masm.branchPtrInNurseryChunk(Assembler::Equal, string_, temp2_, &done);
2373 masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp1_, temp2_, &done);
2375 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2376 regsToSave.takeUnchecked(temp1_);
2377 regsToSave.takeUnchecked(temp2_);
2379 masm.PushRegsInMask(regsToSave);
2381 masm.mov(ImmPtr(runtime), temp1_);
2383 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
2384 masm.setupUnalignedABICall(temp2_);
2385 masm.passABIArg(temp1_);
2386 masm.passABIArg(string_);
2387 masm.callWithABI<Fn, PostWriteBarrier>();
2389 masm.PopRegsInMask(regsToSave);
2392 masm.bind(&done);
2395 void CreateDependentString::generateFallback(MacroAssembler& masm) {
2396 JitSpew(JitSpew_Codegen,
2397 "# Emitting CreateDependentString fallback (encoding=%s)",
2398 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2400 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2401 regsToSave.takeUnchecked(string_);
2402 regsToSave.takeUnchecked(temp2_);
2404 for (FallbackKind kind : mozilla::MakeEnumeratedRange(FallbackKind::Count)) {
2405 masm.bind(&fallbacks_[kind]);
2407 masm.PushRegsInMask(regsToSave);
2409 using Fn = void* (*)(JSContext* cx);
2410 masm.setupUnalignedABICall(string_);
2411 masm.loadJSContext(string_);
2412 masm.passABIArg(string_);
2413 if (kind == FallbackKind::FatInlineString) {
2414 masm.callWithABI<Fn, AllocateFatInlineString>();
2415 } else {
2416 masm.callWithABI<Fn, AllocateDependentString>();
2418 masm.storeCallPointerResult(string_);
2420 masm.PopRegsInMask(regsToSave);
2422 masm.branchPtr(Assembler::Equal, string_, ImmWord(0), failure_);
2424 masm.jump(&joins_[kind]);
2428 // Generate the RegExpMatcher and RegExpExecMatch stubs. These are very similar,
2429 // but RegExpExecMatch also has to load and update .lastIndex for global/sticky
2430 // regular expressions.
2431 static JitCode* GenerateRegExpMatchStubShared(JSContext* cx,
2432 gc::Heap initialStringHeap,
2433 bool isExecMatch) {
2434 if (isExecMatch) {
2435 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecMatch stub");
2436 } else {
2437 JitSpew(JitSpew_Codegen, "# Emitting RegExpMatcher stub");
2440 // |initialStringHeap| could be stale after a GC.
2441 JS::AutoCheckCannotGC nogc(cx);
2443 Register regexp = RegExpMatcherRegExpReg;
2444 Register input = RegExpMatcherStringReg;
2445 Register lastIndex = RegExpMatcherLastIndexReg;
2446 ValueOperand result = JSReturnOperand;
2448 // We are free to clobber all registers, as LRegExpMatcher is a call
2449 // instruction.
2450 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2451 regs.take(input);
2452 regs.take(regexp);
2453 regs.take(lastIndex);
2455 Register temp1 = regs.takeAny();
2456 Register temp2 = regs.takeAny();
2457 Register temp3 = regs.takeAny();
2458 Register maybeTemp4 = InvalidReg;
2459 if (!regs.empty()) {
2460 // There are not enough registers on x86.
2461 maybeTemp4 = regs.takeAny();
2463 Register maybeTemp5 = InvalidReg;
2464 if (!regs.empty()) {
2465 // There are not enough registers on x86.
2466 maybeTemp5 = regs.takeAny();
2469 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
2470 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
2472 TempAllocator temp(&cx->tempLifoAlloc());
2473 JitContext jcx(cx);
2474 StackMacroAssembler masm(cx, temp);
2475 AutoCreatedBy acb(masm, "GenerateRegExpMatchStubShared");
2477 #ifdef JS_USE_LINK_REGISTER
2478 masm.pushReturnAddress();
2479 #endif
2480 masm.push(FramePointer);
2481 masm.moveStackPtrTo(FramePointer);
2483 Label notFoundZeroLastIndex;
2484 if (isExecMatch) {
2485 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
2488 // The InputOutputData is placed above the frame pointer and return address on
2489 // the stack.
2490 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
2492 Label notFound, oolEntry;
2493 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
2494 temp3, inputOutputDataStartOffset,
2495 initialStringHeap, &notFound, &oolEntry)) {
2496 return nullptr;
2499 // If a regexp has named captures, fall back to the OOL stub, which
2500 // will end up calling CreateRegExpMatchResults.
2501 Register shared = temp2;
2502 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
2503 RegExpObject::SHARED_SLOT)),
2504 shared, JSVAL_TYPE_PRIVATE_GCTHING);
2505 masm.branchPtr(Assembler::NotEqual,
2506 Address(shared, RegExpShared::offsetOfGroupsTemplate()),
2507 ImmWord(0), &oolEntry);
2509 // Similarly, if the |hasIndices| flag is set, fall back to the OOL stub.
2510 masm.branchTest32(Assembler::NonZero,
2511 Address(shared, RegExpShared::offsetOfFlags()),
2512 Imm32(int32_t(JS::RegExpFlag::HasIndices)), &oolEntry);
2514 Address pairCountAddress =
2515 RegExpPairCountAddress(masm, inputOutputDataStartOffset);
2517 // Construct the result.
2518 Register object = temp1;
2520 // In most cases, the array will have just 1-2 elements, so we optimize for
2521 // that by emitting separate code paths for capacity 2/6/14 (= 4/8/16 slots
2522 // because two slots are used for the elements header).
2524 // Load the array length in temp2 and the shape in temp3.
2525 Label allocated;
2526 masm.load32(pairCountAddress, temp2);
2527 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2528 RegExpRealm::offsetOfNormalMatchResultShape();
2529 masm.loadGlobalObjectData(temp3);
2530 masm.loadPtr(Address(temp3, offset), temp3);
2532 auto emitAllocObject = [&](size_t elementCapacity) {
2533 gc::AllocKind kind = GuessArrayGCKind(elementCapacity);
2534 MOZ_ASSERT(CanChangeToBackgroundAllocKind(kind, &ArrayObject::class_));
2535 kind = ForegroundToBackgroundAllocKind(kind);
2537 #ifdef DEBUG
2538 // Assert all of the available slots are used for |elementCapacity|
2539 // elements.
2540 size_t usedSlots = ObjectElements::VALUES_PER_HEADER + elementCapacity;
2541 MOZ_ASSERT(usedSlots == GetGCKindSlots(kind));
2542 #endif
2544 constexpr size_t numUsedDynamicSlots =
2545 RegExpRealm::MatchResultObjectSlotSpan;
2546 constexpr size_t numDynamicSlots =
2547 RegExpRealm::MatchResultObjectNumDynamicSlots;
2548 constexpr size_t arrayLength = 1;
2549 masm.createArrayWithFixedElements(object, temp3, temp2, temp3,
2550 arrayLength, elementCapacity,
2551 numUsedDynamicSlots, numDynamicSlots,
2552 kind, gc::Heap::Default, &oolEntry);
2555 Label moreThan2;
2556 masm.branch32(Assembler::Above, temp2, Imm32(2), &moreThan2);
2557 emitAllocObject(2);
2558 masm.jump(&allocated);
2560 Label moreThan6;
2561 masm.bind(&moreThan2);
2562 masm.branch32(Assembler::Above, temp2, Imm32(6), &moreThan6);
2563 emitAllocObject(6);
2564 masm.jump(&allocated);
2566 masm.bind(&moreThan6);
2567 static_assert(RegExpObject::MaxPairCount == 14);
2568 emitAllocObject(RegExpObject::MaxPairCount);
2570 masm.bind(&allocated);
2573 // clang-format off
2575 * [SMDOC] Stack layout for the RegExpMatcher stub
2577 * +---------------+
2578 * FramePointer +-----> |Caller-FramePtr|
2579 * +---------------+
2580 * |Return-Address |
2581 * +---------------+
2582 * inputOutputDataStartOffset +-----> +---------------+
2583 * |InputOutputData|
2584 * +---------------+
2585 * +---------------+
2586 * | MatchPairs |
2587 * pairsCountAddress +-----------> count |
2588 * | pairs |
2589 * | |
2590 * +---------------+
2591 * pairsVectorStartOffset +-----> +---------------+
2592 * | MatchPair |
2593 * matchPairStart +------------> start | <-------+
2594 * matchPairLimit +------------> limit | | Reserved space for
2595 * +---------------+ | `RegExpObject::MaxPairCount`
2596 * . | MatchPair objects.
2597 * . |
2598 * . | `count` objects will be
2599 * +---------------+ | initialized and can be
2600 * | MatchPair | | accessed below.
2601 * | start | <-------+
2602 * | limit |
2603 * +---------------+
2605 // clang-format on
2607 static_assert(sizeof(MatchPair) == 2 * sizeof(int32_t),
2608 "MatchPair consists of two int32 values representing the start"
2609 "and the end offset of the match");
2611 int32_t pairsVectorStartOffset =
2612 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
2614 // Incremented by one below for each match pair.
2615 Register matchIndex = temp2;
2616 masm.move32(Imm32(0), matchIndex);
2618 // The element in which to store the result of the current match.
2619 size_t elementsOffset = NativeObject::offsetOfFixedElements();
2620 BaseObjectElementIndex objectMatchElement(object, matchIndex, elementsOffset);
2622 // The current match pair's "start" and "limit" member.
2623 BaseIndex matchPairStart(FramePointer, matchIndex, TimesEight,
2624 pairsVectorStartOffset + MatchPair::offsetOfStart());
2625 BaseIndex matchPairLimit(FramePointer, matchIndex, TimesEight,
2626 pairsVectorStartOffset + MatchPair::offsetOfLimit());
2628 Label* depStrFailure = &oolEntry;
2629 Label restoreRegExpAndLastIndex;
2631 Register temp4;
2632 if (maybeTemp4 == InvalidReg) {
2633 depStrFailure = &restoreRegExpAndLastIndex;
2635 // We don't have enough registers for a fourth temporary. Reuse |regexp|
2636 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2637 masm.push(regexp);
2638 temp4 = regexp;
2639 } else {
2640 temp4 = maybeTemp4;
2643 Register temp5;
2644 if (maybeTemp5 == InvalidReg) {
2645 depStrFailure = &restoreRegExpAndLastIndex;
2647 // We don't have enough registers for a fifth temporary. Reuse |lastIndex|
2648 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2649 masm.push(lastIndex);
2650 temp5 = lastIndex;
2651 } else {
2652 temp5 = maybeTemp5;
2655 auto maybeRestoreRegExpAndLastIndex = [&]() {
2656 if (maybeTemp5 == InvalidReg) {
2657 masm.pop(lastIndex);
2659 if (maybeTemp4 == InvalidReg) {
2660 masm.pop(regexp);
2664 // Loop to construct the match strings. There are two different loops,
2665 // depending on whether the input is a Two-Byte or a Latin-1 string.
2666 CreateDependentString depStrs[]{
2667 {CharEncoding::TwoByte, temp3, temp4, temp5, depStrFailure},
2668 {CharEncoding::Latin1, temp3, temp4, temp5, depStrFailure},
2672 Label isLatin1, done;
2673 masm.branchLatin1String(input, &isLatin1);
2675 for (auto& depStr : depStrs) {
2676 if (depStr.encoding() == CharEncoding::Latin1) {
2677 masm.bind(&isLatin1);
2680 Label matchLoop;
2681 masm.bind(&matchLoop);
2683 static_assert(MatchPair::NoMatch == -1,
2684 "MatchPair::start is negative if no match was found");
2686 Label isUndefined, storeDone;
2687 masm.branch32(Assembler::LessThan, matchPairStart, Imm32(0),
2688 &isUndefined);
2690 depStr.generate(masm, cx->names(), CompileRuntime::get(cx->runtime()),
2691 input, matchPairStart, matchPairLimit,
2692 initialStringHeap);
2694 // Storing into nursery-allocated results object's elements; no post
2695 // barrier.
2696 masm.storeValue(JSVAL_TYPE_STRING, depStr.string(), objectMatchElement);
2697 masm.jump(&storeDone);
2699 masm.bind(&isUndefined);
2700 { masm.storeValue(UndefinedValue(), objectMatchElement); }
2701 masm.bind(&storeDone);
2703 masm.add32(Imm32(1), matchIndex);
2704 masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex,
2705 &done);
2706 masm.jump(&matchLoop);
2709 #ifdef DEBUG
2710 masm.assumeUnreachable("The match string loop doesn't fall through.");
2711 #endif
2713 masm.bind(&done);
2716 maybeRestoreRegExpAndLastIndex();
2718 // Fill in the rest of the output object.
2719 masm.store32(
2720 matchIndex,
2721 Address(object,
2722 elementsOffset + ObjectElements::offsetOfInitializedLength()));
2723 masm.store32(
2724 matchIndex,
2725 Address(object, elementsOffset + ObjectElements::offsetOfLength()));
2727 Address firstMatchPairStartAddress(
2728 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfStart());
2729 Address firstMatchPairLimitAddress(
2730 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfLimit());
2732 static_assert(RegExpRealm::MatchResultObjectIndexSlot == 0,
2733 "First slot holds the 'index' property");
2734 static_assert(RegExpRealm::MatchResultObjectInputSlot == 1,
2735 "Second slot holds the 'input' property");
2737 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
2739 masm.load32(firstMatchPairStartAddress, temp3);
2740 masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0));
2742 // No post barrier needed (address is within nursery object.)
2743 masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value)));
2745 // For the ExecMatch stub, if the regular expression is global or sticky, we
2746 // have to update its .lastIndex slot.
2747 if (isExecMatch) {
2748 MOZ_ASSERT(object != lastIndex);
2749 Label notGlobalOrSticky;
2750 masm.branchTest32(Assembler::Zero, flagsSlot,
2751 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2752 &notGlobalOrSticky);
2753 masm.load32(firstMatchPairLimitAddress, lastIndex);
2754 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
2755 masm.bind(&notGlobalOrSticky);
2758 // All done!
2759 masm.tagValue(JSVAL_TYPE_OBJECT, object, result);
2760 masm.pop(FramePointer);
2761 masm.ret();
2763 masm.bind(&notFound);
2764 if (isExecMatch) {
2765 Label notGlobalOrSticky;
2766 masm.branchTest32(Assembler::Zero, flagsSlot,
2767 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2768 &notGlobalOrSticky);
2769 masm.bind(&notFoundZeroLastIndex);
2770 masm.storeValue(Int32Value(0), lastIndexSlot);
2771 masm.bind(&notGlobalOrSticky);
2773 masm.moveValue(NullValue(), result);
2774 masm.pop(FramePointer);
2775 masm.ret();
2777 // Fallback paths for CreateDependentString.
2778 for (auto& depStr : depStrs) {
2779 depStr.generateFallback(masm);
2782 // Fall-through to the ool entry after restoring the registers.
2783 masm.bind(&restoreRegExpAndLastIndex);
2784 maybeRestoreRegExpAndLastIndex();
2786 // Use an undefined value to signal to the caller that the OOL stub needs to
2787 // be called.
2788 masm.bind(&oolEntry);
2789 masm.moveValue(UndefinedValue(), result);
2790 masm.pop(FramePointer);
2791 masm.ret();
2793 Linker linker(masm);
2794 JitCode* code = linker.newCode(cx, CodeKind::Other);
2795 if (!code) {
2796 return nullptr;
2799 const char* name = isExecMatch ? "RegExpExecMatchStub" : "RegExpMatcherStub";
2800 CollectPerfSpewerJitCodeProfile(code, name);
2801 #ifdef MOZ_VTUNE
2802 vtune::MarkStub(code, name);
2803 #endif
2805 return code;
2808 JitCode* JitZone::generateRegExpMatcherStub(JSContext* cx) {
2809 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2810 /* isExecMatch = */ false);
2813 JitCode* JitZone::generateRegExpExecMatchStub(JSContext* cx) {
2814 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2815 /* isExecMatch = */ true);
2818 class OutOfLineRegExpMatcher : public OutOfLineCodeBase<CodeGenerator> {
2819 LRegExpMatcher* lir_;
2821 public:
2822 explicit OutOfLineRegExpMatcher(LRegExpMatcher* lir) : lir_(lir) {}
2824 void accept(CodeGenerator* codegen) override {
2825 codegen->visitOutOfLineRegExpMatcher(this);
2828 LRegExpMatcher* lir() const { return lir_; }
2831 void CodeGenerator::visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher* ool) {
2832 LRegExpMatcher* lir = ool->lir();
2833 Register lastIndex = ToRegister(lir->lastIndex());
2834 Register input = ToRegister(lir->string());
2835 Register regexp = ToRegister(lir->regexp());
2837 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2838 regs.take(lastIndex);
2839 regs.take(input);
2840 regs.take(regexp);
2841 Register temp = regs.takeAny();
2843 masm.computeEffectiveAddress(
2844 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2846 pushArg(temp);
2847 pushArg(lastIndex);
2848 pushArg(input);
2849 pushArg(regexp);
2851 // We are not using oolCallVM because we are in a Call, and that live
2852 // registers are already saved by the the register allocator.
2853 using Fn =
2854 bool (*)(JSContext*, HandleObject regexp, HandleString input,
2855 int32_t lastIndex, MatchPairs* pairs, MutableHandleValue output);
2856 callVM<Fn, RegExpMatcherRaw>(lir);
2858 masm.jump(ool->rejoin());
2861 void CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir) {
2862 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2863 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2864 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg);
2865 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2867 #if defined(JS_NUNBOX32)
2868 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2869 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2870 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2871 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2872 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Type);
2873 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Data);
2874 #elif defined(JS_PUNBOX64)
2875 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2876 static_assert(RegExpMatcherStringReg != JSReturnReg);
2877 static_assert(RegExpMatcherLastIndexReg != JSReturnReg);
2878 #endif
2880 masm.reserveStack(RegExpReservedStack);
2882 OutOfLineRegExpMatcher* ool = new (alloc()) OutOfLineRegExpMatcher(lir);
2883 addOutOfLineCode(ool, lir->mir());
2885 const JitZone* jitZone = gen->realm->zone()->jitZone();
2886 JitCode* regExpMatcherStub =
2887 jitZone->regExpMatcherStubNoBarrier(&zoneStubsToReadBarrier_);
2888 masm.call(regExpMatcherStub);
2889 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2890 masm.bind(ool->rejoin());
2892 masm.freeStack(RegExpReservedStack);
2895 class OutOfLineRegExpExecMatch : public OutOfLineCodeBase<CodeGenerator> {
2896 LRegExpExecMatch* lir_;
2898 public:
2899 explicit OutOfLineRegExpExecMatch(LRegExpExecMatch* lir) : lir_(lir) {}
2901 void accept(CodeGenerator* codegen) override {
2902 codegen->visitOutOfLineRegExpExecMatch(this);
2905 LRegExpExecMatch* lir() const { return lir_; }
2908 void CodeGenerator::visitOutOfLineRegExpExecMatch(
2909 OutOfLineRegExpExecMatch* ool) {
2910 LRegExpExecMatch* lir = ool->lir();
2911 Register input = ToRegister(lir->string());
2912 Register regexp = ToRegister(lir->regexp());
2914 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2915 regs.take(input);
2916 regs.take(regexp);
2917 Register temp = regs.takeAny();
2919 masm.computeEffectiveAddress(
2920 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2922 pushArg(temp);
2923 pushArg(input);
2924 pushArg(regexp);
2926 // We are not using oolCallVM because we are in a Call and live registers have
2927 // already been saved by the register allocator.
2928 using Fn =
2929 bool (*)(JSContext*, Handle<RegExpObject*> regexp, HandleString input,
2930 MatchPairs* pairs, MutableHandleValue output);
2931 callVM<Fn, RegExpBuiltinExecMatchFromJit>(lir);
2932 masm.jump(ool->rejoin());
2935 void CodeGenerator::visitRegExpExecMatch(LRegExpExecMatch* lir) {
2936 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2937 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2938 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2940 #if defined(JS_NUNBOX32)
2941 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2942 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2943 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2944 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2945 #elif defined(JS_PUNBOX64)
2946 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2947 static_assert(RegExpMatcherStringReg != JSReturnReg);
2948 #endif
2950 masm.reserveStack(RegExpReservedStack);
2952 auto* ool = new (alloc()) OutOfLineRegExpExecMatch(lir);
2953 addOutOfLineCode(ool, lir->mir());
2955 const JitZone* jitZone = gen->realm->zone()->jitZone();
2956 JitCode* regExpExecMatchStub =
2957 jitZone->regExpExecMatchStubNoBarrier(&zoneStubsToReadBarrier_);
2958 masm.call(regExpExecMatchStub);
2959 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2961 masm.bind(ool->rejoin());
2962 masm.freeStack(RegExpReservedStack);
2965 JitCode* JitZone::generateRegExpSearcherStub(JSContext* cx) {
2966 JitSpew(JitSpew_Codegen, "# Emitting RegExpSearcher stub");
2968 Register regexp = RegExpSearcherRegExpReg;
2969 Register input = RegExpSearcherStringReg;
2970 Register lastIndex = RegExpSearcherLastIndexReg;
2971 Register result = ReturnReg;
2973 // We are free to clobber all registers, as LRegExpSearcher is a call
2974 // instruction.
2975 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2976 regs.take(input);
2977 regs.take(regexp);
2978 regs.take(lastIndex);
2980 Register temp1 = regs.takeAny();
2981 Register temp2 = regs.takeAny();
2982 Register temp3 = regs.takeAny();
2984 TempAllocator temp(&cx->tempLifoAlloc());
2985 JitContext jcx(cx);
2986 StackMacroAssembler masm(cx, temp);
2987 AutoCreatedBy acb(masm, "JitZone::generateRegExpSearcherStub");
2989 #ifdef JS_USE_LINK_REGISTER
2990 masm.pushReturnAddress();
2991 #endif
2992 masm.push(FramePointer);
2993 masm.moveStackPtrTo(FramePointer);
2995 #ifdef DEBUG
2996 // Store sentinel value to cx->regExpSearcherLastLimit.
2997 // See comment in RegExpSearcherImpl.
2998 masm.loadJSContext(temp1);
2999 masm.store32(Imm32(RegExpSearcherLastLimitSentinel),
3000 Address(temp1, JSContext::offsetOfRegExpSearcherLastLimit()));
3001 #endif
3003 // The InputOutputData is placed above the frame pointer and return address on
3004 // the stack.
3005 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
3007 Label notFound, oolEntry;
3008 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
3009 temp3, inputOutputDataStartOffset,
3010 initialStringHeap, &notFound, &oolEntry)) {
3011 return nullptr;
3014 // clang-format off
3016 * [SMDOC] Stack layout for the RegExpSearcher stub
3018 * +---------------+
3019 * FramePointer +-----> |Caller-FramePtr|
3020 * +---------------+
3021 * |Return-Address |
3022 * +---------------+
3023 * inputOutputDataStartOffset +-----> +---------------+
3024 * |InputOutputData|
3025 * +---------------+
3026 * +---------------+
3027 * | MatchPairs |
3028 * | count |
3029 * | pairs |
3030 * | |
3031 * +---------------+
3032 * pairsVectorStartOffset +-----> +---------------+
3033 * | MatchPair |
3034 * matchPairStart +------------> start | <-------+
3035 * matchPairLimit +------------> limit | | Reserved space for
3036 * +---------------+ | `RegExpObject::MaxPairCount`
3037 * . | MatchPair objects.
3038 * . |
3039 * . | Only a single object will
3040 * +---------------+ | be initialized and can be
3041 * | MatchPair | | accessed below.
3042 * | start | <-------+
3043 * | limit |
3044 * +---------------+
3046 // clang-format on
3048 int32_t pairsVectorStartOffset =
3049 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
3050 Address matchPairStart(FramePointer,
3051 pairsVectorStartOffset + MatchPair::offsetOfStart());
3052 Address matchPairLimit(FramePointer,
3053 pairsVectorStartOffset + MatchPair::offsetOfLimit());
3055 // Store match limit to cx->regExpSearcherLastLimit and return the index.
3056 masm.load32(matchPairLimit, result);
3057 masm.loadJSContext(input);
3058 masm.store32(result,
3059 Address(input, JSContext::offsetOfRegExpSearcherLastLimit()));
3060 masm.load32(matchPairStart, result);
3061 masm.pop(FramePointer);
3062 masm.ret();
3064 masm.bind(&notFound);
3065 masm.move32(Imm32(RegExpSearcherResultNotFound), result);
3066 masm.pop(FramePointer);
3067 masm.ret();
3069 masm.bind(&oolEntry);
3070 masm.move32(Imm32(RegExpSearcherResultFailed), result);
3071 masm.pop(FramePointer);
3072 masm.ret();
3074 Linker linker(masm);
3075 JitCode* code = linker.newCode(cx, CodeKind::Other);
3076 if (!code) {
3077 return nullptr;
3080 CollectPerfSpewerJitCodeProfile(code, "RegExpSearcherStub");
3081 #ifdef MOZ_VTUNE
3082 vtune::MarkStub(code, "RegExpSearcherStub");
3083 #endif
3085 return code;
3088 class OutOfLineRegExpSearcher : public OutOfLineCodeBase<CodeGenerator> {
3089 LRegExpSearcher* lir_;
3091 public:
3092 explicit OutOfLineRegExpSearcher(LRegExpSearcher* lir) : lir_(lir) {}
3094 void accept(CodeGenerator* codegen) override {
3095 codegen->visitOutOfLineRegExpSearcher(this);
3098 LRegExpSearcher* lir() const { return lir_; }
3101 void CodeGenerator::visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher* ool) {
3102 LRegExpSearcher* lir = ool->lir();
3103 Register lastIndex = ToRegister(lir->lastIndex());
3104 Register input = ToRegister(lir->string());
3105 Register regexp = ToRegister(lir->regexp());
3107 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3108 regs.take(lastIndex);
3109 regs.take(input);
3110 regs.take(regexp);
3111 Register temp = regs.takeAny();
3113 masm.computeEffectiveAddress(
3114 Address(masm.getStackPointer(), InputOutputDataSize), temp);
3116 pushArg(temp);
3117 pushArg(lastIndex);
3118 pushArg(input);
3119 pushArg(regexp);
3121 // We are not using oolCallVM because we are in a Call, and that live
3122 // registers are already saved by the the register allocator.
3123 using Fn = bool (*)(JSContext* cx, HandleObject regexp, HandleString input,
3124 int32_t lastIndex, MatchPairs* pairs, int32_t* result);
3125 callVM<Fn, RegExpSearcherRaw>(lir);
3127 masm.jump(ool->rejoin());
3130 void CodeGenerator::visitRegExpSearcher(LRegExpSearcher* lir) {
3131 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpSearcherRegExpReg);
3132 MOZ_ASSERT(ToRegister(lir->string()) == RegExpSearcherStringReg);
3133 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpSearcherLastIndexReg);
3134 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3136 static_assert(RegExpSearcherRegExpReg != ReturnReg);
3137 static_assert(RegExpSearcherStringReg != ReturnReg);
3138 static_assert(RegExpSearcherLastIndexReg != ReturnReg);
3140 masm.reserveStack(RegExpReservedStack);
3142 OutOfLineRegExpSearcher* ool = new (alloc()) OutOfLineRegExpSearcher(lir);
3143 addOutOfLineCode(ool, lir->mir());
3145 const JitZone* jitZone = gen->realm->zone()->jitZone();
3146 JitCode* regExpSearcherStub =
3147 jitZone->regExpSearcherStubNoBarrier(&zoneStubsToReadBarrier_);
3148 masm.call(regExpSearcherStub);
3149 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpSearcherResultFailed),
3150 ool->entry());
3151 masm.bind(ool->rejoin());
3153 masm.freeStack(RegExpReservedStack);
3156 void CodeGenerator::visitRegExpSearcherLastLimit(
3157 LRegExpSearcherLastLimit* lir) {
3158 Register result = ToRegister(lir->output());
3159 Register scratch = ToRegister(lir->temp0());
3161 masm.loadAndClearRegExpSearcherLastLimit(result, scratch);
3164 JitCode* JitZone::generateRegExpExecTestStub(JSContext* cx) {
3165 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecTest stub");
3167 Register regexp = RegExpExecTestRegExpReg;
3168 Register input = RegExpExecTestStringReg;
3169 Register result = ReturnReg;
3171 TempAllocator temp(&cx->tempLifoAlloc());
3172 JitContext jcx(cx);
3173 StackMacroAssembler masm(cx, temp);
3174 AutoCreatedBy acb(masm, "JitZone::generateRegExpExecTestStub");
3176 #ifdef JS_USE_LINK_REGISTER
3177 masm.pushReturnAddress();
3178 #endif
3179 masm.push(FramePointer);
3180 masm.moveStackPtrTo(FramePointer);
3182 // We are free to clobber all registers, as LRegExpExecTest is a call
3183 // instruction.
3184 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3185 regs.take(input);
3186 regs.take(regexp);
3188 // Ensure lastIndex != result.
3189 regs.take(result);
3190 Register lastIndex = regs.takeAny();
3191 regs.add(result);
3192 Register temp1 = regs.takeAny();
3193 Register temp2 = regs.takeAny();
3194 Register temp3 = regs.takeAny();
3196 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
3197 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
3199 masm.reserveStack(RegExpReservedStack);
3201 // Load lastIndex and skip RegExp execution if needed.
3202 Label notFoundZeroLastIndex;
3203 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
3205 // In visitRegExpMatcher and visitRegExpSearcher, we reserve stack space
3206 // before calling the stub. For RegExpExecTest we call the stub before
3207 // reserving stack space, so the offset of the InputOutputData relative to the
3208 // frame pointer is negative.
3209 constexpr int32_t inputOutputDataStartOffset = -int32_t(RegExpReservedStack);
3211 // On ARM64, load/store instructions can encode an immediate offset in the
3212 // range [-256, 4095]. If we ever fail this assertion, it would be more
3213 // efficient to store the data above the frame pointer similar to
3214 // RegExpMatcher and RegExpSearcher.
3215 static_assert(inputOutputDataStartOffset >= -256);
3217 Label notFound, oolEntry;
3218 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
3219 temp3, inputOutputDataStartOffset,
3220 initialStringHeap, &notFound, &oolEntry)) {
3221 return nullptr;
3224 // Set `result` to true/false to indicate found/not-found, or to
3225 // RegExpExecTestResultFailed if we have to retry in C++. If the regular
3226 // expression is global or sticky, we also have to update its .lastIndex slot.
3228 Label done;
3229 int32_t pairsVectorStartOffset =
3230 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
3231 Address matchPairLimit(FramePointer,
3232 pairsVectorStartOffset + MatchPair::offsetOfLimit());
3234 masm.move32(Imm32(1), result);
3235 masm.branchTest32(Assembler::Zero, flagsSlot,
3236 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3237 &done);
3238 masm.load32(matchPairLimit, lastIndex);
3239 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
3240 masm.jump(&done);
3242 masm.bind(&notFound);
3243 masm.move32(Imm32(0), result);
3244 masm.branchTest32(Assembler::Zero, flagsSlot,
3245 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3246 &done);
3247 masm.storeValue(Int32Value(0), lastIndexSlot);
3248 masm.jump(&done);
3250 masm.bind(&notFoundZeroLastIndex);
3251 masm.move32(Imm32(0), result);
3252 masm.storeValue(Int32Value(0), lastIndexSlot);
3253 masm.jump(&done);
3255 masm.bind(&oolEntry);
3256 masm.move32(Imm32(RegExpExecTestResultFailed), result);
3258 masm.bind(&done);
3259 masm.freeStack(RegExpReservedStack);
3260 masm.pop(FramePointer);
3261 masm.ret();
3263 Linker linker(masm);
3264 JitCode* code = linker.newCode(cx, CodeKind::Other);
3265 if (!code) {
3266 return nullptr;
3269 CollectPerfSpewerJitCodeProfile(code, "RegExpExecTestStub");
3270 #ifdef MOZ_VTUNE
3271 vtune::MarkStub(code, "RegExpExecTestStub");
3272 #endif
3274 return code;
3277 class OutOfLineRegExpExecTest : public OutOfLineCodeBase<CodeGenerator> {
3278 LRegExpExecTest* lir_;
3280 public:
3281 explicit OutOfLineRegExpExecTest(LRegExpExecTest* lir) : lir_(lir) {}
3283 void accept(CodeGenerator* codegen) override {
3284 codegen->visitOutOfLineRegExpExecTest(this);
3287 LRegExpExecTest* lir() const { return lir_; }
3290 void CodeGenerator::visitOutOfLineRegExpExecTest(OutOfLineRegExpExecTest* ool) {
3291 LRegExpExecTest* lir = ool->lir();
3292 Register input = ToRegister(lir->string());
3293 Register regexp = ToRegister(lir->regexp());
3295 pushArg(input);
3296 pushArg(regexp);
3298 // We are not using oolCallVM because we are in a Call and live registers have
3299 // already been saved by the register allocator.
3300 using Fn = bool (*)(JSContext* cx, Handle<RegExpObject*> regexp,
3301 HandleString input, bool* result);
3302 callVM<Fn, RegExpBuiltinExecTestFromJit>(lir);
3304 masm.jump(ool->rejoin());
3307 void CodeGenerator::visitRegExpExecTest(LRegExpExecTest* lir) {
3308 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpExecTestRegExpReg);
3309 MOZ_ASSERT(ToRegister(lir->string()) == RegExpExecTestStringReg);
3310 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3312 static_assert(RegExpExecTestRegExpReg != ReturnReg);
3313 static_assert(RegExpExecTestStringReg != ReturnReg);
3315 auto* ool = new (alloc()) OutOfLineRegExpExecTest(lir);
3316 addOutOfLineCode(ool, lir->mir());
3318 const JitZone* jitZone = gen->realm->zone()->jitZone();
3319 JitCode* regExpExecTestStub =
3320 jitZone->regExpExecTestStubNoBarrier(&zoneStubsToReadBarrier_);
3321 masm.call(regExpExecTestStub);
3323 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpExecTestResultFailed),
3324 ool->entry());
3326 masm.bind(ool->rejoin());
3329 void CodeGenerator::visitRegExpHasCaptureGroups(LRegExpHasCaptureGroups* ins) {
3330 Register regexp = ToRegister(ins->regexp());
3331 Register input = ToRegister(ins->input());
3332 Register output = ToRegister(ins->output());
3334 using Fn =
3335 bool (*)(JSContext*, Handle<RegExpObject*>, Handle<JSString*>, bool*);
3336 auto* ool = oolCallVM<Fn, js::RegExpHasCaptureGroups>(
3337 ins, ArgList(regexp, input), StoreRegisterTo(output));
3339 // Load RegExpShared in |output|.
3340 Label vmCall;
3341 masm.loadParsedRegExpShared(regexp, output, ool->entry());
3343 // Return true iff pairCount > 1.
3344 Label returnTrue;
3345 masm.branch32(Assembler::Above,
3346 Address(output, RegExpShared::offsetOfPairCount()), Imm32(1),
3347 &returnTrue);
3348 masm.move32(Imm32(0), output);
3349 masm.jump(ool->rejoin());
3351 masm.bind(&returnTrue);
3352 masm.move32(Imm32(1), output);
3354 masm.bind(ool->rejoin());
3357 class OutOfLineRegExpPrototypeOptimizable
3358 : public OutOfLineCodeBase<CodeGenerator> {
3359 LRegExpPrototypeOptimizable* ins_;
3361 public:
3362 explicit OutOfLineRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins)
3363 : ins_(ins) {}
3365 void accept(CodeGenerator* codegen) override {
3366 codegen->visitOutOfLineRegExpPrototypeOptimizable(this);
3368 LRegExpPrototypeOptimizable* ins() const { return ins_; }
3371 void CodeGenerator::visitRegExpPrototypeOptimizable(
3372 LRegExpPrototypeOptimizable* ins) {
3373 Register object = ToRegister(ins->object());
3374 Register output = ToRegister(ins->output());
3375 Register temp = ToRegister(ins->temp0());
3377 OutOfLineRegExpPrototypeOptimizable* ool =
3378 new (alloc()) OutOfLineRegExpPrototypeOptimizable(ins);
3379 addOutOfLineCode(ool, ins->mir());
3381 const GlobalObject* global = gen->realm->maybeGlobal();
3382 MOZ_ASSERT(global);
3383 masm.branchIfNotRegExpPrototypeOptimizable(object, temp, global,
3384 ool->entry());
3385 masm.move32(Imm32(0x1), output);
3387 masm.bind(ool->rejoin());
3390 void CodeGenerator::visitOutOfLineRegExpPrototypeOptimizable(
3391 OutOfLineRegExpPrototypeOptimizable* ool) {
3392 LRegExpPrototypeOptimizable* ins = ool->ins();
3393 Register object = ToRegister(ins->object());
3394 Register output = ToRegister(ins->output());
3396 saveVolatile(output);
3398 using Fn = bool (*)(JSContext* cx, JSObject* proto);
3399 masm.setupAlignedABICall();
3400 masm.loadJSContext(output);
3401 masm.passABIArg(output);
3402 masm.passABIArg(object);
3403 masm.callWithABI<Fn, RegExpPrototypeOptimizableRaw>();
3404 masm.storeCallBoolResult(output);
3406 restoreVolatile(output);
3408 masm.jump(ool->rejoin());
3411 class OutOfLineRegExpInstanceOptimizable
3412 : public OutOfLineCodeBase<CodeGenerator> {
3413 LRegExpInstanceOptimizable* ins_;
3415 public:
3416 explicit OutOfLineRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins)
3417 : ins_(ins) {}
3419 void accept(CodeGenerator* codegen) override {
3420 codegen->visitOutOfLineRegExpInstanceOptimizable(this);
3422 LRegExpInstanceOptimizable* ins() const { return ins_; }
3425 void CodeGenerator::visitRegExpInstanceOptimizable(
3426 LRegExpInstanceOptimizable* ins) {
3427 Register object = ToRegister(ins->object());
3428 Register output = ToRegister(ins->output());
3429 Register temp = ToRegister(ins->temp0());
3431 OutOfLineRegExpInstanceOptimizable* ool =
3432 new (alloc()) OutOfLineRegExpInstanceOptimizable(ins);
3433 addOutOfLineCode(ool, ins->mir());
3435 const GlobalObject* global = gen->realm->maybeGlobal();
3436 MOZ_ASSERT(global);
3437 masm.branchIfNotRegExpInstanceOptimizable(object, temp, global, ool->entry());
3438 masm.move32(Imm32(0x1), output);
3440 masm.bind(ool->rejoin());
3443 void CodeGenerator::visitOutOfLineRegExpInstanceOptimizable(
3444 OutOfLineRegExpInstanceOptimizable* ool) {
3445 LRegExpInstanceOptimizable* ins = ool->ins();
3446 Register object = ToRegister(ins->object());
3447 Register proto = ToRegister(ins->proto());
3448 Register output = ToRegister(ins->output());
3450 saveVolatile(output);
3452 using Fn = bool (*)(JSContext* cx, JSObject* obj, JSObject* proto);
3453 masm.setupAlignedABICall();
3454 masm.loadJSContext(output);
3455 masm.passABIArg(output);
3456 masm.passABIArg(object);
3457 masm.passABIArg(proto);
3458 masm.callWithABI<Fn, RegExpInstanceOptimizableRaw>();
3459 masm.storeCallBoolResult(output);
3461 restoreVolatile(output);
3463 masm.jump(ool->rejoin());
3466 static void FindFirstDollarIndex(MacroAssembler& masm, Register str,
3467 Register len, Register temp0, Register temp1,
3468 Register output, CharEncoding encoding) {
3469 #ifdef DEBUG
3470 Label ok;
3471 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
3472 masm.assumeUnreachable("Length should be greater than 0.");
3473 masm.bind(&ok);
3474 #endif
3476 Register chars = temp0;
3477 masm.loadStringChars(str, chars, encoding);
3479 masm.move32(Imm32(0), output);
3481 Label start, done;
3482 masm.bind(&start);
3484 Register currentChar = temp1;
3485 masm.loadChar(chars, output, currentChar, encoding);
3486 masm.branch32(Assembler::Equal, currentChar, Imm32('$'), &done);
3488 masm.add32(Imm32(1), output);
3489 masm.branch32(Assembler::NotEqual, output, len, &start);
3491 masm.move32(Imm32(-1), output);
3493 masm.bind(&done);
3496 void CodeGenerator::visitGetFirstDollarIndex(LGetFirstDollarIndex* ins) {
3497 Register str = ToRegister(ins->str());
3498 Register output = ToRegister(ins->output());
3499 Register temp0 = ToRegister(ins->temp0());
3500 Register temp1 = ToRegister(ins->temp1());
3501 Register len = ToRegister(ins->temp2());
3503 using Fn = bool (*)(JSContext*, JSString*, int32_t*);
3504 OutOfLineCode* ool = oolCallVM<Fn, GetFirstDollarIndexRaw>(
3505 ins, ArgList(str), StoreRegisterTo(output));
3507 masm.branchIfRope(str, ool->entry());
3508 masm.loadStringLength(str, len);
3510 Label isLatin1, done;
3511 masm.branchLatin1String(str, &isLatin1);
3513 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3514 CharEncoding::TwoByte);
3515 masm.jump(&done);
3517 masm.bind(&isLatin1);
3519 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3520 CharEncoding::Latin1);
3522 masm.bind(&done);
3523 masm.bind(ool->rejoin());
3526 void CodeGenerator::visitStringReplace(LStringReplace* lir) {
3527 if (lir->replacement()->isConstant()) {
3528 pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString()));
3529 } else {
3530 pushArg(ToRegister(lir->replacement()));
3533 if (lir->pattern()->isConstant()) {
3534 pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString()));
3535 } else {
3536 pushArg(ToRegister(lir->pattern()));
3539 if (lir->string()->isConstant()) {
3540 pushArg(ImmGCPtr(lir->string()->toConstant()->toString()));
3541 } else {
3542 pushArg(ToRegister(lir->string()));
3545 using Fn =
3546 JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
3547 if (lir->mir()->isFlatReplacement()) {
3548 callVM<Fn, StringFlatReplaceString>(lir);
3549 } else {
3550 callVM<Fn, StringReplace>(lir);
3554 void CodeGenerator::visitBinaryValueCache(LBinaryValueCache* lir) {
3555 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3556 TypedOrValueRegister lhs =
3557 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::LhsIndex));
3558 TypedOrValueRegister rhs =
3559 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::RhsIndex));
3560 ValueOperand output = ToOutValue(lir);
3562 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3564 switch (jsop) {
3565 case JSOp::Add:
3566 case JSOp::Sub:
3567 case JSOp::Mul:
3568 case JSOp::Div:
3569 case JSOp::Mod:
3570 case JSOp::Pow:
3571 case JSOp::BitAnd:
3572 case JSOp::BitOr:
3573 case JSOp::BitXor:
3574 case JSOp::Lsh:
3575 case JSOp::Rsh:
3576 case JSOp::Ursh: {
3577 IonBinaryArithIC ic(liveRegs, lhs, rhs, output);
3578 addIC(lir, allocateIC(ic));
3579 return;
3581 default:
3582 MOZ_CRASH("Unsupported jsop in MBinaryValueCache");
3586 void CodeGenerator::visitBinaryBoolCache(LBinaryBoolCache* lir) {
3587 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3588 TypedOrValueRegister lhs =
3589 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::LhsIndex));
3590 TypedOrValueRegister rhs =
3591 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::RhsIndex));
3592 Register output = ToRegister(lir->output());
3594 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3596 switch (jsop) {
3597 case JSOp::Lt:
3598 case JSOp::Le:
3599 case JSOp::Gt:
3600 case JSOp::Ge:
3601 case JSOp::Eq:
3602 case JSOp::Ne:
3603 case JSOp::StrictEq:
3604 case JSOp::StrictNe: {
3605 IonCompareIC ic(liveRegs, lhs, rhs, output);
3606 addIC(lir, allocateIC(ic));
3607 return;
3609 default:
3610 MOZ_CRASH("Unsupported jsop in MBinaryBoolCache");
3614 void CodeGenerator::visitUnaryCache(LUnaryCache* lir) {
3615 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3616 TypedOrValueRegister input =
3617 TypedOrValueRegister(ToValue(lir, LUnaryCache::InputIndex));
3618 ValueOperand output = ToOutValue(lir);
3620 IonUnaryArithIC ic(liveRegs, input, output);
3621 addIC(lir, allocateIC(ic));
3624 void CodeGenerator::visitModuleMetadata(LModuleMetadata* lir) {
3625 pushArg(ImmPtr(lir->mir()->module()));
3627 using Fn = JSObject* (*)(JSContext*, HandleObject);
3628 callVM<Fn, js::GetOrCreateModuleMetaObject>(lir);
3631 void CodeGenerator::visitDynamicImport(LDynamicImport* lir) {
3632 pushArg(ToValue(lir, LDynamicImport::OptionsIndex));
3633 pushArg(ToValue(lir, LDynamicImport::SpecifierIndex));
3634 pushArg(ImmGCPtr(current->mir()->info().script()));
3636 using Fn = JSObject* (*)(JSContext*, HandleScript, HandleValue, HandleValue);
3637 callVM<Fn, js::StartDynamicModuleImport>(lir);
3640 void CodeGenerator::visitLambda(LLambda* lir) {
3641 Register envChain = ToRegister(lir->environmentChain());
3642 Register output = ToRegister(lir->output());
3643 Register tempReg = ToRegister(lir->temp0());
3645 JSFunction* fun = lir->mir()->templateFunction();
3647 using Fn = JSObject* (*)(JSContext*, HandleFunction, HandleObject);
3648 OutOfLineCode* ool = oolCallVM<Fn, js::Lambda>(
3649 lir, ArgList(ImmGCPtr(fun), envChain), StoreRegisterTo(output));
3651 TemplateObject templateObject(fun);
3652 masm.createGCObject(output, tempReg, templateObject, gc::Heap::Default,
3653 ool->entry());
3655 masm.storeValue(JSVAL_TYPE_OBJECT, envChain,
3656 Address(output, JSFunction::offsetOfEnvironment()));
3657 // No post barrier needed because output is guaranteed to be allocated in
3658 // the nursery.
3660 masm.bind(ool->rejoin());
3663 void CodeGenerator::visitFunctionWithProto(LFunctionWithProto* lir) {
3664 Register envChain = ToRegister(lir->envChain());
3665 Register prototype = ToRegister(lir->prototype());
3667 pushArg(prototype);
3668 pushArg(envChain);
3669 pushArg(ImmGCPtr(lir->mir()->function()));
3671 using Fn =
3672 JSObject* (*)(JSContext*, HandleFunction, HandleObject, HandleObject);
3673 callVM<Fn, js::FunWithProtoOperation>(lir);
3676 void CodeGenerator::visitSetFunName(LSetFunName* lir) {
3677 pushArg(Imm32(lir->mir()->prefixKind()));
3678 pushArg(ToValue(lir, LSetFunName::NameIndex));
3679 pushArg(ToRegister(lir->fun()));
3681 using Fn =
3682 bool (*)(JSContext*, HandleFunction, HandleValue, FunctionPrefixKind);
3683 callVM<Fn, js::SetFunctionName>(lir);
3686 void CodeGenerator::visitOsiPoint(LOsiPoint* lir) {
3687 // Note: markOsiPoint ensures enough space exists between the last
3688 // LOsiPoint and this one to patch adjacent call instructions.
3690 MOZ_ASSERT(masm.framePushed() == frameSize());
3692 uint32_t osiCallPointOffset = markOsiPoint(lir);
3694 LSafepoint* safepoint = lir->associatedSafepoint();
3695 MOZ_ASSERT(!safepoint->osiCallPointOffset());
3696 safepoint->setOsiCallPointOffset(osiCallPointOffset);
3698 #ifdef DEBUG
3699 // There should be no movegroups or other instructions between
3700 // an instruction and its OsiPoint. This is necessary because
3701 // we use the OsiPoint's snapshot from within VM calls.
3702 for (LInstructionReverseIterator iter(current->rbegin(lir));
3703 iter != current->rend(); iter++) {
3704 if (*iter == lir) {
3705 continue;
3707 MOZ_ASSERT(!iter->isMoveGroup());
3708 MOZ_ASSERT(iter->safepoint() == safepoint);
3709 break;
3711 #endif
3713 #ifdef CHECK_OSIPOINT_REGISTERS
3714 if (shouldVerifyOsiPointRegs(safepoint)) {
3715 verifyOsiPointRegs(safepoint);
3717 #endif
3720 void CodeGenerator::visitPhi(LPhi* lir) {
3721 MOZ_CRASH("Unexpected LPhi in CodeGenerator");
3724 void CodeGenerator::visitGoto(LGoto* lir) { jumpToBlock(lir->target()); }
3726 void CodeGenerator::visitTableSwitch(LTableSwitch* ins) {
3727 MTableSwitch* mir = ins->mir();
3728 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3729 const LAllocation* temp;
3731 if (mir->getOperand(0)->type() != MIRType::Int32) {
3732 temp = ins->tempInt()->output();
3734 // The input is a double, so try and convert it to an integer.
3735 // If it does not fit in an integer, take the default case.
3736 masm.convertDoubleToInt32(ToFloatRegister(ins->index()), ToRegister(temp),
3737 defaultcase, false);
3738 } else {
3739 temp = ins->index();
3742 emitTableSwitchDispatch(mir, ToRegister(temp),
3743 ToRegisterOrInvalid(ins->tempPointer()));
3746 void CodeGenerator::visitTableSwitchV(LTableSwitchV* ins) {
3747 MTableSwitch* mir = ins->mir();
3748 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3750 Register index = ToRegister(ins->tempInt());
3751 ValueOperand value = ToValue(ins, LTableSwitchV::InputValue);
3752 Register tag = masm.extractTag(value, index);
3753 masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase);
3755 Label unboxInt, isInt;
3756 masm.branchTestInt32(Assembler::Equal, tag, &unboxInt);
3758 FloatRegister floatIndex = ToFloatRegister(ins->tempFloat());
3759 masm.unboxDouble(value, floatIndex);
3760 masm.convertDoubleToInt32(floatIndex, index, defaultcase, false);
3761 masm.jump(&isInt);
3764 masm.bind(&unboxInt);
3765 masm.unboxInt32(value, index);
3767 masm.bind(&isInt);
3769 emitTableSwitchDispatch(mir, index, ToRegisterOrInvalid(ins->tempPointer()));
3772 void CodeGenerator::visitParameter(LParameter* lir) {}
3774 void CodeGenerator::visitCallee(LCallee* lir) {
3775 Register callee = ToRegister(lir->output());
3776 Address ptr(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3778 masm.loadFunctionFromCalleeToken(ptr, callee);
3781 void CodeGenerator::visitIsConstructing(LIsConstructing* lir) {
3782 Register output = ToRegister(lir->output());
3783 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3784 masm.loadPtr(calleeToken, output);
3786 // We must be inside a function.
3787 MOZ_ASSERT(current->mir()->info().script()->function());
3789 // The low bit indicates whether this call is constructing, just clear the
3790 // other bits.
3791 static_assert(CalleeToken_Function == 0x0,
3792 "CalleeTokenTag value should match");
3793 static_assert(CalleeToken_FunctionConstructing == 0x1,
3794 "CalleeTokenTag value should match");
3795 masm.andPtr(Imm32(0x1), output);
3798 void CodeGenerator::visitReturn(LReturn* lir) {
3799 #if defined(JS_NUNBOX32)
3800 DebugOnly<LAllocation*> type = lir->getOperand(TYPE_INDEX);
3801 DebugOnly<LAllocation*> payload = lir->getOperand(PAYLOAD_INDEX);
3802 MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type);
3803 MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data);
3804 #elif defined(JS_PUNBOX64)
3805 DebugOnly<LAllocation*> result = lir->getOperand(0);
3806 MOZ_ASSERT(ToRegister(result) == JSReturnReg);
3807 #endif
3808 // Don't emit a jump to the return label if this is the last block, as
3809 // it'll fall through to the epilogue.
3811 // This is -not- true however for a Generator-return, which may appear in the
3812 // middle of the last block, so we should always emit the jump there.
3813 if (current->mir() != *gen->graph().poBegin() || lir->isGenerator()) {
3814 masm.jump(&returnLabel_);
3818 void CodeGenerator::visitOsrEntry(LOsrEntry* lir) {
3819 Register temp = ToRegister(lir->temp());
3821 // Remember the OSR entry offset into the code buffer.
3822 masm.flushBuffer();
3823 setOsrEntryOffset(masm.size());
3825 // Allocate the full frame for this function
3826 // Note we have a new entry here. So we reset MacroAssembler::framePushed()
3827 // to 0, before reserving the stack.
3828 MOZ_ASSERT(masm.framePushed() == frameSize());
3829 masm.setFramePushed(0);
3831 // The Baseline code ensured both the frame pointer and stack pointer point to
3832 // the JitFrameLayout on the stack.
3834 // If profiling, save the current frame pointer to a per-thread global field.
3835 if (isProfilerInstrumentationEnabled()) {
3836 masm.profilerEnterFrame(FramePointer, temp);
3839 masm.reserveStack(frameSize());
3840 MOZ_ASSERT(masm.framePushed() == frameSize());
3842 // Ensure that the Ion frames is properly aligned.
3843 masm.assertStackAlignment(JitStackAlignment, 0);
3846 void CodeGenerator::visitOsrEnvironmentChain(LOsrEnvironmentChain* lir) {
3847 const LAllocation* frame = lir->getOperand(0);
3848 const LDefinition* object = lir->getDef(0);
3850 const ptrdiff_t frameOffset =
3851 BaselineFrame::reverseOffsetOfEnvironmentChain();
3853 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3856 void CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir) {
3857 const LAllocation* frame = lir->getOperand(0);
3858 const LDefinition* object = lir->getDef(0);
3860 const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj();
3862 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3865 void CodeGenerator::visitOsrValue(LOsrValue* value) {
3866 const LAllocation* frame = value->getOperand(0);
3867 const ValueOperand out = ToOutValue(value);
3869 const ptrdiff_t frameOffset = value->mir()->frameOffset();
3871 masm.loadValue(Address(ToRegister(frame), frameOffset), out);
3874 void CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir) {
3875 const LAllocation* frame = lir->getOperand(0);
3876 const ValueOperand out = ToOutValue(lir);
3878 Address flags =
3879 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags());
3880 Address retval =
3881 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue());
3883 masm.moveValue(UndefinedValue(), out);
3885 Label done;
3886 masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL),
3887 &done);
3888 masm.loadValue(retval, out);
3889 masm.bind(&done);
3892 void CodeGenerator::visitStackArgT(LStackArgT* lir) {
3893 const LAllocation* arg = lir->arg();
3894 MIRType argType = lir->type();
3895 uint32_t argslot = lir->argslot();
3896 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3898 Address dest = AddressOfPassedArg(argslot);
3900 if (arg->isFloatReg()) {
3901 masm.boxDouble(ToFloatRegister(arg), dest);
3902 } else if (arg->isRegister()) {
3903 masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest);
3904 } else {
3905 masm.storeValue(arg->toConstant()->toJSValue(), dest);
3909 void CodeGenerator::visitStackArgV(LStackArgV* lir) {
3910 ValueOperand val = ToValue(lir, 0);
3911 uint32_t argslot = lir->argslot();
3912 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3914 masm.storeValue(val, AddressOfPassedArg(argslot));
3917 void CodeGenerator::visitMoveGroup(LMoveGroup* group) {
3918 if (!group->numMoves()) {
3919 return;
3922 MoveResolver& resolver = masm.moveResolver();
3924 for (size_t i = 0; i < group->numMoves(); i++) {
3925 const LMove& move = group->getMove(i);
3927 LAllocation from = move.from();
3928 LAllocation to = move.to();
3929 LDefinition::Type type = move.type();
3931 // No bogus moves.
3932 MOZ_ASSERT(from != to);
3933 MOZ_ASSERT(!from.isConstant());
3934 MoveOp::Type moveType;
3935 switch (type) {
3936 case LDefinition::OBJECT:
3937 case LDefinition::SLOTS:
3938 case LDefinition::WASM_ANYREF:
3939 #ifdef JS_NUNBOX32
3940 case LDefinition::TYPE:
3941 case LDefinition::PAYLOAD:
3942 #else
3943 case LDefinition::BOX:
3944 #endif
3945 case LDefinition::GENERAL:
3946 case LDefinition::STACKRESULTS:
3947 moveType = MoveOp::GENERAL;
3948 break;
3949 case LDefinition::INT32:
3950 moveType = MoveOp::INT32;
3951 break;
3952 case LDefinition::FLOAT32:
3953 moveType = MoveOp::FLOAT32;
3954 break;
3955 case LDefinition::DOUBLE:
3956 moveType = MoveOp::DOUBLE;
3957 break;
3958 case LDefinition::SIMD128:
3959 moveType = MoveOp::SIMD128;
3960 break;
3961 default:
3962 MOZ_CRASH("Unexpected move type");
3965 masm.propagateOOM(
3966 resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType));
3969 masm.propagateOOM(resolver.resolve());
3970 if (masm.oom()) {
3971 return;
3974 MoveEmitter emitter(masm);
3976 #ifdef JS_CODEGEN_X86
3977 if (group->maybeScratchRegister().isGeneralReg()) {
3978 emitter.setScratchRegister(
3979 group->maybeScratchRegister().toGeneralReg()->reg());
3980 } else {
3981 resolver.sortMemoryToMemoryMoves();
3983 #endif
3985 emitter.emit(resolver);
3986 emitter.finish();
3989 void CodeGenerator::visitInteger(LInteger* lir) {
3990 masm.move32(Imm32(lir->i32()), ToRegister(lir->output()));
3993 void CodeGenerator::visitInteger64(LInteger64* lir) {
3994 masm.move64(Imm64(lir->i64()), ToOutRegister64(lir));
3997 void CodeGenerator::visitPointer(LPointer* lir) {
3998 masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output()));
4001 void CodeGenerator::visitNurseryObject(LNurseryObject* lir) {
4002 Register output = ToRegister(lir->output());
4003 uint32_t nurseryIndex = lir->mir()->nurseryIndex();
4005 // Load a pointer to the entry in IonScript's nursery objects list.
4006 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), output);
4007 masm.propagateOOM(ionNurseryObjectLabels_.emplaceBack(label, nurseryIndex));
4009 // Load the JSObject*.
4010 masm.loadPtr(Address(output, 0), output);
4013 void CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir) {
4014 // No-op.
4017 void CodeGenerator::visitDebugEnterGCUnsafeRegion(
4018 LDebugEnterGCUnsafeRegion* lir) {
4019 Register temp = ToRegister(lir->temp0());
4021 masm.loadJSContext(temp);
4023 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
4024 masm.add32(Imm32(1), inUnsafeRegion);
4026 Label ok;
4027 masm.branch32(Assembler::GreaterThan, inUnsafeRegion, Imm32(0), &ok);
4028 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
4029 masm.bind(&ok);
4032 void CodeGenerator::visitDebugLeaveGCUnsafeRegion(
4033 LDebugLeaveGCUnsafeRegion* lir) {
4034 Register temp = ToRegister(lir->temp0());
4036 masm.loadJSContext(temp);
4038 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
4039 masm.add32(Imm32(-1), inUnsafeRegion);
4041 Label ok;
4042 masm.branch32(Assembler::GreaterThanOrEqual, inUnsafeRegion, Imm32(0), &ok);
4043 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
4044 masm.bind(&ok);
4047 void CodeGenerator::visitSlots(LSlots* lir) {
4048 Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots());
4049 masm.loadPtr(slots, ToRegister(lir->output()));
4052 void CodeGenerator::visitLoadDynamicSlotV(LLoadDynamicSlotV* lir) {
4053 ValueOperand dest = ToOutValue(lir);
4054 Register base = ToRegister(lir->input());
4055 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
4057 masm.loadValue(Address(base, offset), dest);
4060 static ConstantOrRegister ToConstantOrRegister(const LAllocation* value,
4061 MIRType valueType) {
4062 if (value->isConstant()) {
4063 return ConstantOrRegister(value->toConstant()->toJSValue());
4065 return TypedOrValueRegister(valueType, ToAnyRegister(value));
4068 void CodeGenerator::visitStoreDynamicSlotT(LStoreDynamicSlotT* lir) {
4069 Register base = ToRegister(lir->slots());
4070 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
4071 Address dest(base, offset);
4073 if (lir->mir()->needsBarrier()) {
4074 emitPreBarrier(dest);
4077 MIRType valueType = lir->mir()->value()->type();
4078 ConstantOrRegister value = ToConstantOrRegister(lir->value(), valueType);
4079 masm.storeUnboxedValue(value, valueType, dest);
4082 void CodeGenerator::visitStoreDynamicSlotV(LStoreDynamicSlotV* lir) {
4083 Register base = ToRegister(lir->slots());
4084 int32_t offset = lir->mir()->slot() * sizeof(Value);
4086 const ValueOperand value = ToValue(lir, LStoreDynamicSlotV::ValueIndex);
4088 if (lir->mir()->needsBarrier()) {
4089 emitPreBarrier(Address(base, offset));
4092 masm.storeValue(value, Address(base, offset));
4095 void CodeGenerator::visitElements(LElements* lir) {
4096 Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements());
4097 masm.loadPtr(elements, ToRegister(lir->output()));
4100 void CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment* lir) {
4101 Address environment(ToRegister(lir->function()),
4102 JSFunction::offsetOfEnvironment());
4103 masm.unboxObject(environment, ToRegister(lir->output()));
4106 void CodeGenerator::visitHomeObject(LHomeObject* lir) {
4107 Register func = ToRegister(lir->function());
4108 Address homeObject(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
4110 masm.assertFunctionIsExtended(func);
4111 #ifdef DEBUG
4112 Label isObject;
4113 masm.branchTestObject(Assembler::Equal, homeObject, &isObject);
4114 masm.assumeUnreachable("[[HomeObject]] must be Object");
4115 masm.bind(&isObject);
4116 #endif
4118 masm.unboxObject(homeObject, ToRegister(lir->output()));
4121 void CodeGenerator::visitHomeObjectSuperBase(LHomeObjectSuperBase* lir) {
4122 Register homeObject = ToRegister(lir->homeObject());
4123 ValueOperand output = ToOutValue(lir);
4124 Register temp = output.scratchReg();
4126 masm.loadObjProto(homeObject, temp);
4128 #ifdef DEBUG
4129 // We won't encounter a lazy proto, because the prototype is guaranteed to
4130 // either be a JSFunction or a PlainObject, and only proxy objects can have a
4131 // lazy proto.
4132 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
4134 Label proxyCheckDone;
4135 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
4136 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperBase");
4137 masm.bind(&proxyCheckDone);
4138 #endif
4140 Label nullProto, done;
4141 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
4143 // Box prototype and return
4144 masm.tagValue(JSVAL_TYPE_OBJECT, temp, output);
4145 masm.jump(&done);
4147 masm.bind(&nullProto);
4148 masm.moveValue(NullValue(), output);
4150 masm.bind(&done);
4153 template <class T>
4154 static T* ToConstantObject(MDefinition* def) {
4155 MOZ_ASSERT(def->isConstant());
4156 return &def->toConstant()->toObject().as<T>();
4159 void CodeGenerator::visitNewLexicalEnvironmentObject(
4160 LNewLexicalEnvironmentObject* lir) {
4161 Register output = ToRegister(lir->output());
4162 Register temp = ToRegister(lir->temp0());
4164 auto* templateObj = ToConstantObject<BlockLexicalEnvironmentObject>(
4165 lir->mir()->templateObj());
4166 auto* scope = &templateObj->scope();
4167 gc::Heap initialHeap = gc::Heap::Default;
4169 using Fn =
4170 BlockLexicalEnvironmentObject* (*)(JSContext*, Handle<LexicalScope*>);
4171 auto* ool =
4172 oolCallVM<Fn, BlockLexicalEnvironmentObject::createWithoutEnclosing>(
4173 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4175 TemplateObject templateObject(templateObj);
4176 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4178 masm.bind(ool->rejoin());
4181 void CodeGenerator::visitNewClassBodyEnvironmentObject(
4182 LNewClassBodyEnvironmentObject* lir) {
4183 Register output = ToRegister(lir->output());
4184 Register temp = ToRegister(lir->temp0());
4186 auto* templateObj = ToConstantObject<ClassBodyLexicalEnvironmentObject>(
4187 lir->mir()->templateObj());
4188 auto* scope = &templateObj->scope();
4189 gc::Heap initialHeap = gc::Heap::Default;
4191 using Fn = ClassBodyLexicalEnvironmentObject* (*)(JSContext*,
4192 Handle<ClassBodyScope*>);
4193 auto* ool =
4194 oolCallVM<Fn, ClassBodyLexicalEnvironmentObject::createWithoutEnclosing>(
4195 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4197 TemplateObject templateObject(templateObj);
4198 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4200 masm.bind(ool->rejoin());
4203 void CodeGenerator::visitNewVarEnvironmentObject(
4204 LNewVarEnvironmentObject* lir) {
4205 Register output = ToRegister(lir->output());
4206 Register temp = ToRegister(lir->temp0());
4208 auto* templateObj =
4209 ToConstantObject<VarEnvironmentObject>(lir->mir()->templateObj());
4210 auto* scope = &templateObj->scope().as<VarScope>();
4211 gc::Heap initialHeap = gc::Heap::Default;
4213 using Fn = VarEnvironmentObject* (*)(JSContext*, Handle<VarScope*>);
4214 auto* ool = oolCallVM<Fn, VarEnvironmentObject::createWithoutEnclosing>(
4215 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4217 TemplateObject templateObject(templateObj);
4218 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4220 masm.bind(ool->rejoin());
4223 void CodeGenerator::visitGuardShape(LGuardShape* guard) {
4224 Register obj = ToRegister(guard->input());
4225 Register temp = ToTempRegisterOrInvalid(guard->temp0());
4226 Label bail;
4227 masm.branchTestObjShape(Assembler::NotEqual, obj, guard->mir()->shape(), temp,
4228 obj, &bail);
4229 bailoutFrom(&bail, guard->snapshot());
4232 void CodeGenerator::visitGuardFuse(LGuardFuse* guard) {
4233 Register temp = ToRegister(guard->temp0());
4234 Label bail;
4236 // Bake specific fuse address for Ion code, because we won't share this code
4237 // across realms.
4238 GuardFuse* fuse =
4239 mirGen().realm->realmFuses().getFuseByIndex(guard->mir()->fuseIndex());
4240 masm.loadPtr(AbsoluteAddress(fuse->fuseRef()), temp);
4241 masm.branchPtr(Assembler::NotEqual, temp, ImmPtr(nullptr), &bail);
4243 bailoutFrom(&bail, guard->snapshot());
4246 void CodeGenerator::visitGuardMultipleShapes(LGuardMultipleShapes* guard) {
4247 Register obj = ToRegister(guard->object());
4248 Register shapeList = ToRegister(guard->shapeList());
4249 Register temp = ToRegister(guard->temp0());
4250 Register temp2 = ToRegister(guard->temp1());
4251 Register temp3 = ToRegister(guard->temp2());
4252 Register spectre = ToTempRegisterOrInvalid(guard->temp3());
4254 Label bail;
4255 masm.loadPtr(Address(shapeList, NativeObject::offsetOfElements()), temp);
4256 masm.branchTestObjShapeList(Assembler::NotEqual, obj, temp, temp2, temp3,
4257 spectre, &bail);
4258 bailoutFrom(&bail, guard->snapshot());
4261 void CodeGenerator::visitGuardProto(LGuardProto* guard) {
4262 Register obj = ToRegister(guard->object());
4263 Register expected = ToRegister(guard->expected());
4264 Register temp = ToRegister(guard->temp0());
4266 masm.loadObjProto(obj, temp);
4268 Label bail;
4269 masm.branchPtr(Assembler::NotEqual, temp, expected, &bail);
4270 bailoutFrom(&bail, guard->snapshot());
4273 void CodeGenerator::visitGuardNullProto(LGuardNullProto* guard) {
4274 Register obj = ToRegister(guard->input());
4275 Register temp = ToRegister(guard->temp0());
4277 masm.loadObjProto(obj, temp);
4279 Label bail;
4280 masm.branchTestPtr(Assembler::NonZero, temp, temp, &bail);
4281 bailoutFrom(&bail, guard->snapshot());
4284 void CodeGenerator::visitGuardIsNativeObject(LGuardIsNativeObject* guard) {
4285 Register obj = ToRegister(guard->input());
4286 Register temp = ToRegister(guard->temp0());
4288 Label bail;
4289 masm.branchIfNonNativeObj(obj, temp, &bail);
4290 bailoutFrom(&bail, guard->snapshot());
4293 void CodeGenerator::visitGuardGlobalGeneration(LGuardGlobalGeneration* guard) {
4294 Register temp = ToRegister(guard->temp0());
4295 Label bail;
4297 masm.load32(AbsoluteAddress(guard->mir()->generationAddr()), temp);
4298 masm.branch32(Assembler::NotEqual, temp, Imm32(guard->mir()->expected()),
4299 &bail);
4300 bailoutFrom(&bail, guard->snapshot());
4303 void CodeGenerator::visitGuardIsProxy(LGuardIsProxy* guard) {
4304 Register obj = ToRegister(guard->input());
4305 Register temp = ToRegister(guard->temp0());
4307 Label bail;
4308 masm.branchTestObjectIsProxy(false, obj, temp, &bail);
4309 bailoutFrom(&bail, guard->snapshot());
4312 void CodeGenerator::visitGuardIsNotProxy(LGuardIsNotProxy* guard) {
4313 Register obj = ToRegister(guard->input());
4314 Register temp = ToRegister(guard->temp0());
4316 Label bail;
4317 masm.branchTestObjectIsProxy(true, obj, temp, &bail);
4318 bailoutFrom(&bail, guard->snapshot());
4321 void CodeGenerator::visitGuardIsNotDOMProxy(LGuardIsNotDOMProxy* guard) {
4322 Register proxy = ToRegister(guard->proxy());
4323 Register temp = ToRegister(guard->temp0());
4325 Label bail;
4326 masm.branchTestProxyHandlerFamily(Assembler::Equal, proxy, temp,
4327 GetDOMProxyHandlerFamily(), &bail);
4328 bailoutFrom(&bail, guard->snapshot());
4331 void CodeGenerator::visitProxyGet(LProxyGet* lir) {
4332 Register proxy = ToRegister(lir->proxy());
4333 Register temp = ToRegister(lir->temp0());
4335 pushArg(lir->mir()->id(), temp);
4336 pushArg(proxy);
4338 using Fn = bool (*)(JSContext*, HandleObject, HandleId, MutableHandleValue);
4339 callVM<Fn, ProxyGetProperty>(lir);
4342 void CodeGenerator::visitProxyGetByValue(LProxyGetByValue* lir) {
4343 Register proxy = ToRegister(lir->proxy());
4344 ValueOperand idVal = ToValue(lir, LProxyGetByValue::IdIndex);
4346 pushArg(idVal);
4347 pushArg(proxy);
4349 using Fn =
4350 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
4351 callVM<Fn, ProxyGetPropertyByValue>(lir);
4354 void CodeGenerator::visitProxyHasProp(LProxyHasProp* lir) {
4355 Register proxy = ToRegister(lir->proxy());
4356 ValueOperand idVal = ToValue(lir, LProxyHasProp::IdIndex);
4358 pushArg(idVal);
4359 pushArg(proxy);
4361 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
4362 if (lir->mir()->hasOwn()) {
4363 callVM<Fn, ProxyHasOwn>(lir);
4364 } else {
4365 callVM<Fn, ProxyHas>(lir);
4369 void CodeGenerator::visitProxySet(LProxySet* lir) {
4370 Register proxy = ToRegister(lir->proxy());
4371 ValueOperand rhs = ToValue(lir, LProxySet::RhsIndex);
4372 Register temp = ToRegister(lir->temp0());
4374 pushArg(Imm32(lir->mir()->strict()));
4375 pushArg(rhs);
4376 pushArg(lir->mir()->id(), temp);
4377 pushArg(proxy);
4379 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4380 callVM<Fn, ProxySetProperty>(lir);
4383 void CodeGenerator::visitProxySetByValue(LProxySetByValue* lir) {
4384 Register proxy = ToRegister(lir->proxy());
4385 ValueOperand idVal = ToValue(lir, LProxySetByValue::IdIndex);
4386 ValueOperand rhs = ToValue(lir, LProxySetByValue::RhsIndex);
4388 pushArg(Imm32(lir->mir()->strict()));
4389 pushArg(rhs);
4390 pushArg(idVal);
4391 pushArg(proxy);
4393 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
4394 callVM<Fn, ProxySetPropertyByValue>(lir);
4397 void CodeGenerator::visitCallSetArrayLength(LCallSetArrayLength* lir) {
4398 Register obj = ToRegister(lir->obj());
4399 ValueOperand rhs = ToValue(lir, LCallSetArrayLength::RhsIndex);
4401 pushArg(Imm32(lir->mir()->strict()));
4402 pushArg(rhs);
4403 pushArg(obj);
4405 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
4406 callVM<Fn, jit::SetArrayLength>(lir);
4409 void CodeGenerator::visitMegamorphicLoadSlot(LMegamorphicLoadSlot* lir) {
4410 Register obj = ToRegister(lir->object());
4411 Register temp0 = ToRegister(lir->temp0());
4412 Register temp1 = ToRegister(lir->temp1());
4413 Register temp2 = ToRegister(lir->temp2());
4414 Register temp3 = ToRegister(lir->temp3());
4415 ValueOperand output = ToOutValue(lir);
4417 Label bail, cacheHit;
4418 masm.emitMegamorphicCacheLookup(lir->mir()->name(), obj, temp0, temp1, temp2,
4419 output, &cacheHit);
4421 masm.branchIfNonNativeObj(obj, temp0, &bail);
4423 masm.Push(UndefinedValue());
4424 masm.moveStackPtrTo(temp3);
4426 using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
4427 MegamorphicCache::Entry* cacheEntry, Value* vp);
4428 masm.setupAlignedABICall();
4429 masm.loadJSContext(temp0);
4430 masm.passABIArg(temp0);
4431 masm.passABIArg(obj);
4432 masm.movePropertyKey(lir->mir()->name(), temp1);
4433 masm.passABIArg(temp1);
4434 masm.passABIArg(temp2);
4435 masm.passABIArg(temp3);
4437 masm.callWithABI<Fn, GetNativeDataPropertyPure>();
4439 MOZ_ASSERT(!output.aliases(ReturnReg));
4440 masm.Pop(output);
4442 masm.branchIfFalseBool(ReturnReg, &bail);
4444 masm.bind(&cacheHit);
4445 bailoutFrom(&bail, lir->snapshot());
4448 void CodeGenerator::visitMegamorphicLoadSlotByValue(
4449 LMegamorphicLoadSlotByValue* lir) {
4450 Register obj = ToRegister(lir->object());
4451 ValueOperand idVal = ToValue(lir, LMegamorphicLoadSlotByValue::IdIndex);
4452 Register temp0 = ToRegister(lir->temp0());
4453 Register temp1 = ToRegister(lir->temp1());
4454 Register temp2 = ToRegister(lir->temp2());
4455 ValueOperand output = ToOutValue(lir);
4457 Label bail, cacheHit;
4458 masm.emitMegamorphicCacheLookupByValue(idVal, obj, temp0, temp1, temp2,
4459 output, &cacheHit);
4461 masm.branchIfNonNativeObj(obj, temp0, &bail);
4463 // idVal will be in vp[0], result will be stored in vp[1].
4464 masm.reserveStack(sizeof(Value));
4465 masm.Push(idVal);
4466 masm.moveStackPtrTo(temp0);
4468 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4469 MegamorphicCache::Entry* cacheEntry, Value* vp);
4470 masm.setupAlignedABICall();
4471 masm.loadJSContext(temp1);
4472 masm.passABIArg(temp1);
4473 masm.passABIArg(obj);
4474 masm.passABIArg(temp2);
4475 masm.passABIArg(temp0);
4476 masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
4478 MOZ_ASSERT(!idVal.aliases(temp0));
4479 masm.storeCallPointerResult(temp0);
4480 masm.Pop(idVal);
4482 uint32_t framePushed = masm.framePushed();
4483 Label ok;
4484 masm.branchIfTrueBool(temp0, &ok);
4485 masm.freeStack(sizeof(Value)); // Discard result Value.
4486 masm.jump(&bail);
4488 masm.bind(&ok);
4489 masm.setFramePushed(framePushed);
4490 masm.Pop(output);
4492 masm.bind(&cacheHit);
4493 bailoutFrom(&bail, lir->snapshot());
4496 void CodeGenerator::visitMegamorphicStoreSlot(LMegamorphicStoreSlot* lir) {
4497 Register obj = ToRegister(lir->object());
4498 ValueOperand value = ToValue(lir, LMegamorphicStoreSlot::RhsIndex);
4500 Register temp0 = ToRegister(lir->temp0());
4501 #ifndef JS_CODEGEN_X86
4502 Register temp1 = ToRegister(lir->temp1());
4503 Register temp2 = ToRegister(lir->temp2());
4504 #endif
4506 Label cacheHit, done;
4507 #ifdef JS_CODEGEN_X86
4508 masm.emitMegamorphicCachedSetSlot(
4509 lir->mir()->name(), obj, temp0, value, &cacheHit,
4510 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4511 EmitPreBarrier(masm, addr, mirType);
4513 #else
4514 masm.emitMegamorphicCachedSetSlot(
4515 lir->mir()->name(), obj, temp0, temp1, temp2, value, &cacheHit,
4516 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4517 EmitPreBarrier(masm, addr, mirType);
4519 #endif
4521 pushArg(Imm32(lir->mir()->strict()));
4522 pushArg(value);
4523 pushArg(lir->mir()->name(), temp0);
4524 pushArg(obj);
4526 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4527 callVM<Fn, SetPropertyMegamorphic<true>>(lir);
4529 masm.jump(&done);
4530 masm.bind(&cacheHit);
4532 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
4533 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
4535 saveVolatile(temp0);
4536 emitPostWriteBarrier(obj);
4537 restoreVolatile(temp0);
4539 masm.bind(&done);
4542 void CodeGenerator::visitMegamorphicHasProp(LMegamorphicHasProp* lir) {
4543 Register obj = ToRegister(lir->object());
4544 ValueOperand idVal = ToValue(lir, LMegamorphicHasProp::IdIndex);
4545 Register temp0 = ToRegister(lir->temp0());
4546 Register temp1 = ToRegister(lir->temp1());
4547 Register temp2 = ToRegister(lir->temp2());
4548 Register output = ToRegister(lir->output());
4550 Label bail, cacheHit;
4551 masm.emitMegamorphicCacheLookupExists(idVal, obj, temp0, temp1, temp2, output,
4552 &cacheHit, lir->mir()->hasOwn());
4554 masm.branchIfNonNativeObj(obj, temp0, &bail);
4556 // idVal will be in vp[0], result will be stored in vp[1].
4557 masm.reserveStack(sizeof(Value));
4558 masm.Push(idVal);
4559 masm.moveStackPtrTo(temp0);
4561 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4562 MegamorphicCache::Entry* cacheEntry, Value* vp);
4563 masm.setupAlignedABICall();
4564 masm.loadJSContext(temp1);
4565 masm.passABIArg(temp1);
4566 masm.passABIArg(obj);
4567 masm.passABIArg(temp2);
4568 masm.passABIArg(temp0);
4569 if (lir->mir()->hasOwn()) {
4570 masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
4571 } else {
4572 masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
4575 MOZ_ASSERT(!idVal.aliases(temp0));
4576 masm.storeCallPointerResult(temp0);
4577 masm.Pop(idVal);
4579 uint32_t framePushed = masm.framePushed();
4580 Label ok;
4581 masm.branchIfTrueBool(temp0, &ok);
4582 masm.freeStack(sizeof(Value)); // Discard result Value.
4583 masm.jump(&bail);
4585 masm.bind(&ok);
4586 masm.setFramePushed(framePushed);
4587 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
4588 masm.freeStack(sizeof(Value));
4589 masm.bind(&cacheHit);
4591 bailoutFrom(&bail, lir->snapshot());
4594 void CodeGenerator::visitGuardIsNotArrayBufferMaybeShared(
4595 LGuardIsNotArrayBufferMaybeShared* guard) {
4596 Register obj = ToRegister(guard->input());
4597 Register temp = ToRegister(guard->temp0());
4599 Label bail;
4600 masm.loadObjClassUnsafe(obj, temp);
4601 masm.branchPtr(Assembler::Equal, temp, ImmPtr(&ArrayBufferObject::class_),
4602 &bail);
4603 masm.branchPtr(Assembler::Equal, temp,
4604 ImmPtr(&SharedArrayBufferObject::class_), &bail);
4605 bailoutFrom(&bail, guard->snapshot());
4608 void CodeGenerator::visitGuardIsTypedArray(LGuardIsTypedArray* guard) {
4609 Register obj = ToRegister(guard->input());
4610 Register temp = ToRegister(guard->temp0());
4612 Label bail;
4613 masm.loadObjClassUnsafe(obj, temp);
4614 masm.branchIfClassIsNotTypedArray(temp, &bail);
4615 bailoutFrom(&bail, guard->snapshot());
4618 void CodeGenerator::visitGuardHasProxyHandler(LGuardHasProxyHandler* guard) {
4619 Register obj = ToRegister(guard->input());
4621 Label bail;
4623 Address handlerAddr(obj, ProxyObject::offsetOfHandler());
4624 masm.branchPtr(Assembler::NotEqual, handlerAddr,
4625 ImmPtr(guard->mir()->handler()), &bail);
4627 bailoutFrom(&bail, guard->snapshot());
4630 void CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard) {
4631 Register input = ToRegister(guard->input());
4632 Register expected = ToRegister(guard->expected());
4634 Assembler::Condition cond =
4635 guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
4636 bailoutCmpPtr(cond, input, expected, guard->snapshot());
4639 void CodeGenerator::visitGuardSpecificFunction(LGuardSpecificFunction* guard) {
4640 Register input = ToRegister(guard->input());
4641 Register expected = ToRegister(guard->expected());
4643 bailoutCmpPtr(Assembler::NotEqual, input, expected, guard->snapshot());
4646 void CodeGenerator::visitGuardSpecificAtom(LGuardSpecificAtom* guard) {
4647 Register str = ToRegister(guard->str());
4648 Register scratch = ToRegister(guard->temp0());
4650 LiveRegisterSet volatileRegs = liveVolatileRegs(guard);
4651 volatileRegs.takeUnchecked(scratch);
4653 Label bail;
4654 masm.guardSpecificAtom(str, guard->mir()->atom(), scratch, volatileRegs,
4655 &bail);
4656 bailoutFrom(&bail, guard->snapshot());
4659 void CodeGenerator::visitGuardSpecificSymbol(LGuardSpecificSymbol* guard) {
4660 Register symbol = ToRegister(guard->symbol());
4662 bailoutCmpPtr(Assembler::NotEqual, symbol, ImmGCPtr(guard->mir()->expected()),
4663 guard->snapshot());
4666 void CodeGenerator::visitGuardSpecificInt32(LGuardSpecificInt32* guard) {
4667 Register num = ToRegister(guard->num());
4669 bailoutCmp32(Assembler::NotEqual, num, Imm32(guard->mir()->expected()),
4670 guard->snapshot());
4673 void CodeGenerator::visitGuardStringToIndex(LGuardStringToIndex* lir) {
4674 Register str = ToRegister(lir->string());
4675 Register output = ToRegister(lir->output());
4677 Label vmCall, done;
4678 masm.loadStringIndexValue(str, output, &vmCall);
4679 masm.jump(&done);
4682 masm.bind(&vmCall);
4684 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4685 volatileRegs.takeUnchecked(output);
4686 masm.PushRegsInMask(volatileRegs);
4688 using Fn = int32_t (*)(JSString* str);
4689 masm.setupAlignedABICall();
4690 masm.passABIArg(str);
4691 masm.callWithABI<Fn, GetIndexFromString>();
4692 masm.storeCallInt32Result(output);
4694 masm.PopRegsInMask(volatileRegs);
4696 // GetIndexFromString returns a negative value on failure.
4697 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
4700 masm.bind(&done);
4703 void CodeGenerator::visitGuardStringToInt32(LGuardStringToInt32* lir) {
4704 Register str = ToRegister(lir->string());
4705 Register output = ToRegister(lir->output());
4706 Register temp = ToRegister(lir->temp0());
4708 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4710 Label bail;
4711 masm.guardStringToInt32(str, output, temp, volatileRegs, &bail);
4712 bailoutFrom(&bail, lir->snapshot());
4715 void CodeGenerator::visitGuardStringToDouble(LGuardStringToDouble* lir) {
4716 Register str = ToRegister(lir->string());
4717 FloatRegister output = ToFloatRegister(lir->output());
4718 Register temp0 = ToRegister(lir->temp0());
4719 Register temp1 = ToRegister(lir->temp1());
4721 Label vmCall, done;
4722 // Use indexed value as fast path if possible.
4723 masm.loadStringIndexValue(str, temp0, &vmCall);
4724 masm.convertInt32ToDouble(temp0, output);
4725 masm.jump(&done);
4727 masm.bind(&vmCall);
4729 // Reserve stack for holding the result value of the call.
4730 masm.reserveStack(sizeof(double));
4731 masm.moveStackPtrTo(temp0);
4733 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4734 volatileRegs.takeUnchecked(temp0);
4735 volatileRegs.takeUnchecked(temp1);
4736 masm.PushRegsInMask(volatileRegs);
4738 using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
4739 masm.setupAlignedABICall();
4740 masm.loadJSContext(temp1);
4741 masm.passABIArg(temp1);
4742 masm.passABIArg(str);
4743 masm.passABIArg(temp0);
4744 masm.callWithABI<Fn, StringToNumberPure>();
4745 masm.storeCallPointerResult(temp0);
4747 masm.PopRegsInMask(volatileRegs);
4749 Label ok;
4750 masm.branchIfTrueBool(temp0, &ok);
4752 // OOM path, recovered by StringToNumberPure.
4754 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
4755 // flow-insensitively, and using it here would confuse the stack height
4756 // tracking.
4757 masm.addToStackPtr(Imm32(sizeof(double)));
4758 bailout(lir->snapshot());
4760 masm.bind(&ok);
4761 masm.Pop(output);
4763 masm.bind(&done);
4766 void CodeGenerator::visitGuardNoDenseElements(LGuardNoDenseElements* guard) {
4767 Register obj = ToRegister(guard->input());
4768 Register temp = ToRegister(guard->temp0());
4770 // Load obj->elements.
4771 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
4773 // Make sure there are no dense elements.
4774 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
4775 bailoutCmp32(Assembler::NotEqual, initLength, Imm32(0), guard->snapshot());
4778 void CodeGenerator::visitBooleanToInt64(LBooleanToInt64* lir) {
4779 Register input = ToRegister(lir->input());
4780 Register64 output = ToOutRegister64(lir);
4782 masm.move32To64ZeroExtend(input, output);
4785 void CodeGenerator::emitStringToInt64(LInstruction* lir, Register input,
4786 Register64 output) {
4787 Register temp = output.scratchReg();
4789 saveLive(lir);
4791 masm.reserveStack(sizeof(uint64_t));
4792 masm.moveStackPtrTo(temp);
4793 pushArg(temp);
4794 pushArg(input);
4796 using Fn = bool (*)(JSContext*, HandleString, uint64_t*);
4797 callVM<Fn, DoStringToInt64>(lir);
4799 masm.load64(Address(masm.getStackPointer(), 0), output);
4800 masm.freeStack(sizeof(uint64_t));
4802 restoreLiveIgnore(lir, StoreValueTo(output).clobbered());
4805 void CodeGenerator::visitStringToInt64(LStringToInt64* lir) {
4806 Register input = ToRegister(lir->input());
4807 Register64 output = ToOutRegister64(lir);
4809 emitStringToInt64(lir, input, output);
4812 void CodeGenerator::visitValueToInt64(LValueToInt64* lir) {
4813 ValueOperand input = ToValue(lir, LValueToInt64::InputIndex);
4814 Register temp = ToRegister(lir->temp0());
4815 Register64 output = ToOutRegister64(lir);
4817 int checks = 3;
4819 Label fail, done;
4820 // Jump to fail if this is the last check and we fail it,
4821 // otherwise to the next test.
4822 auto emitTestAndUnbox = [&](auto testAndUnbox) {
4823 MOZ_ASSERT(checks > 0);
4825 checks--;
4826 Label notType;
4827 Label* target = checks ? &notType : &fail;
4829 testAndUnbox(target);
4831 if (checks) {
4832 masm.jump(&done);
4833 masm.bind(&notType);
4837 Register tag = masm.extractTag(input, temp);
4839 // BigInt.
4840 emitTestAndUnbox([&](Label* target) {
4841 masm.branchTestBigInt(Assembler::NotEqual, tag, target);
4842 masm.unboxBigInt(input, temp);
4843 masm.loadBigInt64(temp, output);
4846 // Boolean
4847 emitTestAndUnbox([&](Label* target) {
4848 masm.branchTestBoolean(Assembler::NotEqual, tag, target);
4849 masm.unboxBoolean(input, temp);
4850 masm.move32To64ZeroExtend(temp, output);
4853 // String
4854 emitTestAndUnbox([&](Label* target) {
4855 masm.branchTestString(Assembler::NotEqual, tag, target);
4856 masm.unboxString(input, temp);
4857 emitStringToInt64(lir, temp, output);
4860 MOZ_ASSERT(checks == 0);
4862 bailoutFrom(&fail, lir->snapshot());
4863 masm.bind(&done);
4866 void CodeGenerator::visitTruncateBigIntToInt64(LTruncateBigIntToInt64* lir) {
4867 Register operand = ToRegister(lir->input());
4868 Register64 output = ToOutRegister64(lir);
4870 masm.loadBigInt64(operand, output);
4873 OutOfLineCode* CodeGenerator::createBigIntOutOfLine(LInstruction* lir,
4874 Scalar::Type type,
4875 Register64 input,
4876 Register output) {
4877 #if JS_BITS_PER_WORD == 32
4878 using Fn = BigInt* (*)(JSContext*, uint32_t, uint32_t);
4879 auto args = ArgList(input.low, input.high);
4880 #else
4881 using Fn = BigInt* (*)(JSContext*, uint64_t);
4882 auto args = ArgList(input);
4883 #endif
4885 if (type == Scalar::BigInt64) {
4886 return oolCallVM<Fn, jit::CreateBigIntFromInt64>(lir, args,
4887 StoreRegisterTo(output));
4889 MOZ_ASSERT(type == Scalar::BigUint64);
4890 return oolCallVM<Fn, jit::CreateBigIntFromUint64>(lir, args,
4891 StoreRegisterTo(output));
4894 void CodeGenerator::emitCreateBigInt(LInstruction* lir, Scalar::Type type,
4895 Register64 input, Register output,
4896 Register maybeTemp) {
4897 OutOfLineCode* ool = createBigIntOutOfLine(lir, type, input, output);
4899 if (maybeTemp != InvalidReg) {
4900 masm.newGCBigInt(output, maybeTemp, initialBigIntHeap(), ool->entry());
4901 } else {
4902 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
4903 regs.take(input);
4904 regs.take(output);
4906 Register temp = regs.takeAny();
4908 masm.push(temp);
4910 Label fail, ok;
4911 masm.newGCBigInt(output, temp, initialBigIntHeap(), &fail);
4912 masm.pop(temp);
4913 masm.jump(&ok);
4914 masm.bind(&fail);
4915 masm.pop(temp);
4916 masm.jump(ool->entry());
4917 masm.bind(&ok);
4919 masm.initializeBigInt64(type, output, input);
4920 masm.bind(ool->rejoin());
4923 void CodeGenerator::visitInt64ToBigInt(LInt64ToBigInt* lir) {
4924 Register64 input = ToRegister64(lir->input());
4925 Register temp = ToRegister(lir->temp0());
4926 Register output = ToRegister(lir->output());
4928 emitCreateBigInt(lir, Scalar::BigInt64, input, output, temp);
4931 void CodeGenerator::visitGuardValue(LGuardValue* lir) {
4932 ValueOperand input = ToValue(lir, LGuardValue::InputIndex);
4933 Value expected = lir->mir()->expected();
4934 Label bail;
4935 masm.branchTestValue(Assembler::NotEqual, input, expected, &bail);
4936 bailoutFrom(&bail, lir->snapshot());
4939 void CodeGenerator::visitGuardNullOrUndefined(LGuardNullOrUndefined* lir) {
4940 ValueOperand input = ToValue(lir, LGuardNullOrUndefined::InputIndex);
4942 ScratchTagScope tag(masm, input);
4943 masm.splitTagForTest(input, tag);
4945 Label done;
4946 masm.branchTestNull(Assembler::Equal, tag, &done);
4948 Label bail;
4949 masm.branchTestUndefined(Assembler::NotEqual, tag, &bail);
4950 bailoutFrom(&bail, lir->snapshot());
4952 masm.bind(&done);
4955 void CodeGenerator::visitGuardIsNotObject(LGuardIsNotObject* lir) {
4956 ValueOperand input = ToValue(lir, LGuardIsNotObject::InputIndex);
4958 Label bail;
4959 masm.branchTestObject(Assembler::Equal, input, &bail);
4960 bailoutFrom(&bail, lir->snapshot());
4963 void CodeGenerator::visitGuardFunctionFlags(LGuardFunctionFlags* lir) {
4964 Register function = ToRegister(lir->function());
4966 Label bail;
4967 if (uint16_t flags = lir->mir()->expectedFlags()) {
4968 masm.branchTestFunctionFlags(function, flags, Assembler::Zero, &bail);
4970 if (uint16_t flags = lir->mir()->unexpectedFlags()) {
4971 masm.branchTestFunctionFlags(function, flags, Assembler::NonZero, &bail);
4973 bailoutFrom(&bail, lir->snapshot());
4976 void CodeGenerator::visitGuardFunctionIsNonBuiltinCtor(
4977 LGuardFunctionIsNonBuiltinCtor* lir) {
4978 Register function = ToRegister(lir->function());
4979 Register temp = ToRegister(lir->temp0());
4981 Label bail;
4982 masm.branchIfNotFunctionIsNonBuiltinCtor(function, temp, &bail);
4983 bailoutFrom(&bail, lir->snapshot());
4986 void CodeGenerator::visitGuardFunctionKind(LGuardFunctionKind* lir) {
4987 Register function = ToRegister(lir->function());
4988 Register temp = ToRegister(lir->temp0());
4990 Assembler::Condition cond =
4991 lir->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
4993 Label bail;
4994 masm.branchFunctionKind(cond, lir->mir()->expected(), function, temp, &bail);
4995 bailoutFrom(&bail, lir->snapshot());
4998 void CodeGenerator::visitGuardFunctionScript(LGuardFunctionScript* lir) {
4999 Register function = ToRegister(lir->function());
5001 Address scriptAddr(function, JSFunction::offsetOfJitInfoOrScript());
5002 bailoutCmpPtr(Assembler::NotEqual, scriptAddr,
5003 ImmGCPtr(lir->mir()->expected()), lir->snapshot());
5006 // Out-of-line path to update the store buffer.
5007 class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase<CodeGenerator> {
5008 LInstruction* lir_;
5009 const LAllocation* object_;
5011 public:
5012 OutOfLineCallPostWriteBarrier(LInstruction* lir, const LAllocation* object)
5013 : lir_(lir), object_(object) {}
5015 void accept(CodeGenerator* codegen) override {
5016 codegen->visitOutOfLineCallPostWriteBarrier(this);
5019 LInstruction* lir() const { return lir_; }
5020 const LAllocation* object() const { return object_; }
5023 static void EmitStoreBufferCheckForConstant(MacroAssembler& masm,
5024 const gc::TenuredCell* cell,
5025 AllocatableGeneralRegisterSet& regs,
5026 Label* exit, Label* callVM) {
5027 Register temp = regs.takeAny();
5029 gc::Arena* arena = cell->arena();
5031 Register cells = temp;
5032 masm.loadPtr(AbsoluteAddress(&arena->bufferedCells()), cells);
5034 size_t index = gc::ArenaCellSet::getCellIndex(cell);
5035 size_t word;
5036 uint32_t mask;
5037 gc::ArenaCellSet::getWordIndexAndMask(index, &word, &mask);
5038 size_t offset = gc::ArenaCellSet::offsetOfBits() + word * sizeof(uint32_t);
5040 masm.branchTest32(Assembler::NonZero, Address(cells, offset), Imm32(mask),
5041 exit);
5043 // Check whether this is the sentinel set and if so call the VM to allocate
5044 // one for this arena.
5045 masm.branchPtr(Assembler::Equal,
5046 Address(cells, gc::ArenaCellSet::offsetOfArena()),
5047 ImmPtr(nullptr), callVM);
5049 // Add the cell to the set.
5050 masm.or32(Imm32(mask), Address(cells, offset));
5051 masm.jump(exit);
5053 regs.add(temp);
5056 static void EmitPostWriteBarrier(MacroAssembler& masm, CompileRuntime* runtime,
5057 Register objreg, JSObject* maybeConstant,
5058 bool isGlobal,
5059 AllocatableGeneralRegisterSet& regs) {
5060 MOZ_ASSERT_IF(isGlobal, maybeConstant);
5062 Label callVM;
5063 Label exit;
5065 Register temp = regs.takeAny();
5067 // We already have a fast path to check whether a global is in the store
5068 // buffer.
5069 if (!isGlobal) {
5070 if (maybeConstant) {
5071 // Check store buffer bitmap directly for known object.
5072 EmitStoreBufferCheckForConstant(masm, &maybeConstant->asTenured(), regs,
5073 &exit, &callVM);
5074 } else {
5075 // Check one element cache to avoid VM call.
5076 masm.branchPtr(Assembler::Equal,
5077 AbsoluteAddress(runtime->addressOfLastBufferedWholeCell()),
5078 objreg, &exit);
5082 // Call into the VM to barrier the write.
5083 masm.bind(&callVM);
5085 Register runtimereg = temp;
5086 masm.mov(ImmPtr(runtime), runtimereg);
5088 masm.setupAlignedABICall();
5089 masm.passABIArg(runtimereg);
5090 masm.passABIArg(objreg);
5091 if (isGlobal) {
5092 using Fn = void (*)(JSRuntime* rt, GlobalObject* obj);
5093 masm.callWithABI<Fn, PostGlobalWriteBarrier>();
5094 } else {
5095 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* obj);
5096 masm.callWithABI<Fn, PostWriteBarrier>();
5099 masm.bind(&exit);
5102 void CodeGenerator::emitPostWriteBarrier(const LAllocation* obj) {
5103 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5105 Register objreg;
5106 JSObject* object = nullptr;
5107 bool isGlobal = false;
5108 if (obj->isConstant()) {
5109 object = &obj->toConstant()->toObject();
5110 isGlobal = isGlobalObject(object);
5111 objreg = regs.takeAny();
5112 masm.movePtr(ImmGCPtr(object), objreg);
5113 } else {
5114 objreg = ToRegister(obj);
5115 regs.takeUnchecked(objreg);
5118 EmitPostWriteBarrier(masm, gen->runtime, objreg, object, isGlobal, regs);
5121 // Returns true if `def` might be allocated in the nursery.
5122 static bool ValueNeedsPostBarrier(MDefinition* def) {
5123 if (def->isBox()) {
5124 def = def->toBox()->input();
5126 if (def->type() == MIRType::Value) {
5127 return true;
5129 return NeedsPostBarrier(def->type());
5132 class OutOfLineElementPostWriteBarrier
5133 : public OutOfLineCodeBase<CodeGenerator> {
5134 LiveRegisterSet liveVolatileRegs_;
5135 const LAllocation* index_;
5136 int32_t indexDiff_;
5137 Register obj_;
5138 Register scratch_;
5140 public:
5141 OutOfLineElementPostWriteBarrier(const LiveRegisterSet& liveVolatileRegs,
5142 Register obj, const LAllocation* index,
5143 Register scratch, int32_t indexDiff)
5144 : liveVolatileRegs_(liveVolatileRegs),
5145 index_(index),
5146 indexDiff_(indexDiff),
5147 obj_(obj),
5148 scratch_(scratch) {}
5150 void accept(CodeGenerator* codegen) override {
5151 codegen->visitOutOfLineElementPostWriteBarrier(this);
5154 const LiveRegisterSet& liveVolatileRegs() const { return liveVolatileRegs_; }
5155 const LAllocation* index() const { return index_; }
5156 int32_t indexDiff() const { return indexDiff_; }
5158 Register object() const { return obj_; }
5159 Register scratch() const { return scratch_; }
5162 void CodeGenerator::emitElementPostWriteBarrier(
5163 MInstruction* mir, const LiveRegisterSet& liveVolatileRegs, Register obj,
5164 const LAllocation* index, Register scratch, const ConstantOrRegister& val,
5165 int32_t indexDiff) {
5166 if (val.constant()) {
5167 MOZ_ASSERT_IF(val.value().isGCThing(),
5168 !IsInsideNursery(val.value().toGCThing()));
5169 return;
5172 TypedOrValueRegister reg = val.reg();
5173 if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
5174 return;
5177 auto* ool = new (alloc()) OutOfLineElementPostWriteBarrier(
5178 liveVolatileRegs, obj, index, scratch, indexDiff);
5179 addOutOfLineCode(ool, mir);
5181 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, ool->rejoin());
5183 if (reg.hasValue()) {
5184 masm.branchValueIsNurseryCell(Assembler::Equal, reg.valueReg(), scratch,
5185 ool->entry());
5186 } else {
5187 masm.branchPtrInNurseryChunk(Assembler::Equal, reg.typedReg().gpr(),
5188 scratch, ool->entry());
5191 masm.bind(ool->rejoin());
5194 void CodeGenerator::visitOutOfLineElementPostWriteBarrier(
5195 OutOfLineElementPostWriteBarrier* ool) {
5196 Register obj = ool->object();
5197 Register scratch = ool->scratch();
5198 const LAllocation* index = ool->index();
5199 int32_t indexDiff = ool->indexDiff();
5201 masm.PushRegsInMask(ool->liveVolatileRegs());
5203 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5204 regs.takeUnchecked(obj);
5205 regs.takeUnchecked(scratch);
5207 Register indexReg;
5208 if (index->isConstant()) {
5209 indexReg = regs.takeAny();
5210 masm.move32(Imm32(ToInt32(index) + indexDiff), indexReg);
5211 } else {
5212 indexReg = ToRegister(index);
5213 regs.takeUnchecked(indexReg);
5214 if (indexDiff != 0) {
5215 masm.add32(Imm32(indexDiff), indexReg);
5219 masm.setupUnalignedABICall(scratch);
5220 masm.movePtr(ImmPtr(gen->runtime), scratch);
5221 masm.passABIArg(scratch);
5222 masm.passABIArg(obj);
5223 masm.passABIArg(indexReg);
5224 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5225 masm.callWithABI<Fn, PostWriteElementBarrier>();
5227 // We don't need a sub32 here because indexReg must be in liveVolatileRegs
5228 // if indexDiff is not zero, so it will be restored below.
5229 MOZ_ASSERT_IF(indexDiff != 0, ool->liveVolatileRegs().has(indexReg));
5231 masm.PopRegsInMask(ool->liveVolatileRegs());
5233 masm.jump(ool->rejoin());
5236 void CodeGenerator::emitPostWriteBarrier(Register objreg) {
5237 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5238 regs.takeUnchecked(objreg);
5239 EmitPostWriteBarrier(masm, gen->runtime, objreg, nullptr, false, regs);
5242 void CodeGenerator::visitOutOfLineCallPostWriteBarrier(
5243 OutOfLineCallPostWriteBarrier* ool) {
5244 saveLiveVolatile(ool->lir());
5245 const LAllocation* obj = ool->object();
5246 emitPostWriteBarrier(obj);
5247 restoreLiveVolatile(ool->lir());
5249 masm.jump(ool->rejoin());
5252 void CodeGenerator::maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal,
5253 OutOfLineCode* ool) {
5254 // Check whether an object is a global that we have already barriered before
5255 // calling into the VM.
5257 // We only check for the script's global, not other globals within the same
5258 // compartment, because we bake in a pointer to realm->globalWriteBarriered
5259 // and doing that would be invalid for other realms because they could be
5260 // collected before the Ion code is discarded.
5262 if (!maybeGlobal->isConstant()) {
5263 return;
5266 JSObject* obj = &maybeGlobal->toConstant()->toObject();
5267 if (gen->realm->maybeGlobal() != obj) {
5268 return;
5271 const uint32_t* addr = gen->realm->addressOfGlobalWriteBarriered();
5272 masm.branch32(Assembler::NotEqual, AbsoluteAddress(addr), Imm32(0),
5273 ool->rejoin());
5276 template <class LPostBarrierType, MIRType nurseryType>
5277 void CodeGenerator::visitPostWriteBarrierCommon(LPostBarrierType* lir,
5278 OutOfLineCode* ool) {
5279 static_assert(NeedsPostBarrier(nurseryType));
5281 addOutOfLineCode(ool, lir->mir());
5283 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5285 if (lir->object()->isConstant()) {
5286 // Constant nursery objects cannot appear here, see
5287 // LIRGenerator::visitPostWriteElementBarrier.
5288 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5289 } else {
5290 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5291 temp, ool->rejoin());
5294 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5296 Register value = ToRegister(lir->value());
5297 if constexpr (nurseryType == MIRType::Object) {
5298 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::Object);
5299 } else if constexpr (nurseryType == MIRType::String) {
5300 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::String);
5301 } else {
5302 static_assert(nurseryType == MIRType::BigInt);
5303 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::BigInt);
5305 masm.branchPtrInNurseryChunk(Assembler::Equal, value, temp, ool->entry());
5307 masm.bind(ool->rejoin());
5310 template <class LPostBarrierType>
5311 void CodeGenerator::visitPostWriteBarrierCommonV(LPostBarrierType* lir,
5312 OutOfLineCode* ool) {
5313 addOutOfLineCode(ool, lir->mir());
5315 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5317 if (lir->object()->isConstant()) {
5318 // Constant nursery objects cannot appear here, see
5319 // LIRGenerator::visitPostWriteElementBarrier.
5320 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5321 } else {
5322 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5323 temp, ool->rejoin());
5326 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5328 ValueOperand value = ToValue(lir, LPostBarrierType::ValueIndex);
5329 masm.branchValueIsNurseryCell(Assembler::Equal, value, temp, ool->entry());
5331 masm.bind(ool->rejoin());
5334 void CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO* lir) {
5335 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5336 visitPostWriteBarrierCommon<LPostWriteBarrierO, MIRType::Object>(lir, ool);
5339 void CodeGenerator::visitPostWriteBarrierS(LPostWriteBarrierS* lir) {
5340 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5341 visitPostWriteBarrierCommon<LPostWriteBarrierS, MIRType::String>(lir, ool);
5344 void CodeGenerator::visitPostWriteBarrierBI(LPostWriteBarrierBI* lir) {
5345 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5346 visitPostWriteBarrierCommon<LPostWriteBarrierBI, MIRType::BigInt>(lir, ool);
5349 void CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV* lir) {
5350 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5351 visitPostWriteBarrierCommonV(lir, ool);
5354 // Out-of-line path to update the store buffer.
5355 class OutOfLineCallPostWriteElementBarrier
5356 : public OutOfLineCodeBase<CodeGenerator> {
5357 LInstruction* lir_;
5358 const LAllocation* object_;
5359 const LAllocation* index_;
5361 public:
5362 OutOfLineCallPostWriteElementBarrier(LInstruction* lir,
5363 const LAllocation* object,
5364 const LAllocation* index)
5365 : lir_(lir), object_(object), index_(index) {}
5367 void accept(CodeGenerator* codegen) override {
5368 codegen->visitOutOfLineCallPostWriteElementBarrier(this);
5371 LInstruction* lir() const { return lir_; }
5373 const LAllocation* object() const { return object_; }
5375 const LAllocation* index() const { return index_; }
5378 void CodeGenerator::visitOutOfLineCallPostWriteElementBarrier(
5379 OutOfLineCallPostWriteElementBarrier* ool) {
5380 saveLiveVolatile(ool->lir());
5382 const LAllocation* obj = ool->object();
5383 const LAllocation* index = ool->index();
5385 Register objreg = obj->isConstant() ? InvalidReg : ToRegister(obj);
5386 Register indexreg = ToRegister(index);
5388 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5389 regs.takeUnchecked(indexreg);
5391 if (obj->isConstant()) {
5392 objreg = regs.takeAny();
5393 masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg);
5394 } else {
5395 regs.takeUnchecked(objreg);
5398 Register runtimereg = regs.takeAny();
5399 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5400 masm.setupAlignedABICall();
5401 masm.mov(ImmPtr(gen->runtime), runtimereg);
5402 masm.passABIArg(runtimereg);
5403 masm.passABIArg(objreg);
5404 masm.passABIArg(indexreg);
5405 masm.callWithABI<Fn, PostWriteElementBarrier>();
5407 restoreLiveVolatile(ool->lir());
5409 masm.jump(ool->rejoin());
5412 void CodeGenerator::visitPostWriteElementBarrierO(
5413 LPostWriteElementBarrierO* lir) {
5414 auto ool = new (alloc())
5415 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5416 visitPostWriteBarrierCommon<LPostWriteElementBarrierO, MIRType::Object>(lir,
5417 ool);
5420 void CodeGenerator::visitPostWriteElementBarrierS(
5421 LPostWriteElementBarrierS* lir) {
5422 auto ool = new (alloc())
5423 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5424 visitPostWriteBarrierCommon<LPostWriteElementBarrierS, MIRType::String>(lir,
5425 ool);
5428 void CodeGenerator::visitPostWriteElementBarrierBI(
5429 LPostWriteElementBarrierBI* lir) {
5430 auto ool = new (alloc())
5431 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5432 visitPostWriteBarrierCommon<LPostWriteElementBarrierBI, MIRType::BigInt>(lir,
5433 ool);
5436 void CodeGenerator::visitPostWriteElementBarrierV(
5437 LPostWriteElementBarrierV* lir) {
5438 auto ool = new (alloc())
5439 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5440 visitPostWriteBarrierCommonV(lir, ool);
5443 void CodeGenerator::visitAssertCanElidePostWriteBarrier(
5444 LAssertCanElidePostWriteBarrier* lir) {
5445 Register object = ToRegister(lir->object());
5446 ValueOperand value =
5447 ToValue(lir, LAssertCanElidePostWriteBarrier::ValueIndex);
5448 Register temp = ToRegister(lir->temp0());
5450 Label ok;
5451 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp, &ok);
5452 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp, &ok);
5454 masm.assumeUnreachable("Unexpected missing post write barrier");
5456 masm.bind(&ok);
5459 template <typename LCallIns>
5460 void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
5461 MCallBase* mir = call->mir();
5463 uint32_t unusedStack = UnusedStackBytesForCall(mir->paddedNumStackArgs());
5465 // Registers used for callWithABI() argument-passing.
5466 const Register argContextReg = ToRegister(call->getArgContextReg());
5467 const Register argUintNReg = ToRegister(call->getArgUintNReg());
5468 const Register argVpReg = ToRegister(call->getArgVpReg());
5470 // Misc. temporary registers.
5471 const Register tempReg = ToRegister(call->getTempReg());
5473 DebugOnly<uint32_t> initialStack = masm.framePushed();
5475 masm.checkStackAlignment();
5477 // Native functions have the signature:
5478 // bool (*)(JSContext*, unsigned, Value* vp)
5479 // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
5480 // are the function arguments.
5482 // Allocate space for the outparam, moving the StackPointer to what will be
5483 // &vp[1].
5484 masm.adjustStack(unusedStack);
5486 // Push a Value containing the callee object: natives are allowed to access
5487 // their callee before setting the return value. The StackPointer is moved
5488 // to &vp[0].
5489 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5490 Register calleeReg = ToRegister(call->getCallee());
5491 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
5493 if (call->mir()->maybeCrossRealm()) {
5494 masm.switchToObjectRealm(calleeReg, tempReg);
5496 } else {
5497 WrappedFunction* target = call->getSingleTarget();
5498 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5500 if (call->mir()->maybeCrossRealm()) {
5501 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), tempReg);
5502 masm.switchToObjectRealm(tempReg, tempReg);
5506 // Preload arguments into registers.
5507 masm.loadJSContext(argContextReg);
5508 masm.move32(Imm32(call->mir()->numActualArgs()), argUintNReg);
5509 masm.moveStackPtrTo(argVpReg);
5511 masm.Push(argUintNReg);
5513 // Construct native exit frame.
5514 uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
5515 masm.enterFakeExitFrameForNative(argContextReg, tempReg,
5516 call->mir()->isConstructing());
5518 markSafepointAt(safepointOffset, call);
5520 // Construct and execute call.
5521 masm.setupAlignedABICall();
5522 masm.passABIArg(argContextReg);
5523 masm.passABIArg(argUintNReg);
5524 masm.passABIArg(argVpReg);
5526 ensureOsiSpace();
5527 // If we're using a simulator build, `native` will already point to the
5528 // simulator's call-redirection code for LCallClassHook. Load the address in
5529 // a register first so that we don't try to redirect it a second time.
5530 bool emittedCall = false;
5531 #ifdef JS_SIMULATOR
5532 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5533 masm.movePtr(ImmPtr(native), tempReg);
5534 masm.callWithABI(tempReg);
5535 emittedCall = true;
5537 #endif
5538 if (!emittedCall) {
5539 masm.callWithABI(DynamicFunction<JSNative>(native), MoveOp::GENERAL,
5540 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5543 // Test for failure.
5544 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
5546 if (call->mir()->maybeCrossRealm()) {
5547 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5550 // Load the outparam vp[0] into output register(s).
5551 masm.loadValue(
5552 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
5553 JSReturnOperand);
5555 // Until C++ code is instrumented against Spectre, prevent speculative
5556 // execution from returning any private data.
5557 if (JitOptions.spectreJitToCxxCalls && !call->mir()->ignoresReturnValue() &&
5558 mir->hasLiveDefUses()) {
5559 masm.speculationBarrier();
5562 // The next instruction is removing the footer of the exit frame, so there
5563 // is no need for leaveFakeExitFrame.
5565 // Move the StackPointer back to its original location, unwinding the native
5566 // exit frame.
5567 masm.adjustStack(NativeExitFrameLayout::Size() - unusedStack);
5568 MOZ_ASSERT(masm.framePushed() == initialStack);
5571 void CodeGenerator::visitCallNative(LCallNative* call) {
5572 WrappedFunction* target = call->getSingleTarget();
5573 MOZ_ASSERT(target);
5574 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5576 JSNative native = target->native();
5577 if (call->ignoresReturnValue() && target->hasJitInfo()) {
5578 const JSJitInfo* jitInfo = target->jitInfo();
5579 if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
5580 native = jitInfo->ignoresReturnValueMethod;
5583 emitCallNative(call, native);
5586 void CodeGenerator::visitCallClassHook(LCallClassHook* call) {
5587 emitCallNative(call, call->mir()->target());
5590 static void LoadDOMPrivate(MacroAssembler& masm, Register obj, Register priv,
5591 DOMObjectKind kind) {
5592 // Load the value in DOM_OBJECT_SLOT for a native or proxy DOM object. This
5593 // will be in the first slot but may be fixed or non-fixed.
5594 MOZ_ASSERT(obj != priv);
5596 switch (kind) {
5597 case DOMObjectKind::Native:
5598 // If it's a native object, the value must be in a fixed slot.
5599 // See CanAttachDOMCall in CacheIR.cpp.
5600 masm.debugAssertObjHasFixedSlots(obj, priv);
5601 masm.loadPrivate(Address(obj, NativeObject::getFixedSlotOffset(0)), priv);
5602 break;
5603 case DOMObjectKind::Proxy: {
5604 #ifdef DEBUG
5605 // Sanity check: it must be a DOM proxy.
5606 Label isDOMProxy;
5607 masm.branchTestProxyHandlerFamily(
5608 Assembler::Equal, obj, priv, GetDOMProxyHandlerFamily(), &isDOMProxy);
5609 masm.assumeUnreachable("Expected a DOM proxy");
5610 masm.bind(&isDOMProxy);
5611 #endif
5612 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), priv);
5613 masm.loadPrivate(
5614 Address(priv, js::detail::ProxyReservedSlots::offsetOfSlot(0)), priv);
5615 break;
5620 void CodeGenerator::visitCallDOMNative(LCallDOMNative* call) {
5621 WrappedFunction* target = call->getSingleTarget();
5622 MOZ_ASSERT(target);
5623 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5624 MOZ_ASSERT(target->hasJitInfo());
5625 MOZ_ASSERT(call->mir()->isCallDOMNative());
5627 int unusedStack = UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5629 // Registers used for callWithABI() argument-passing.
5630 const Register argJSContext = ToRegister(call->getArgJSContext());
5631 const Register argObj = ToRegister(call->getArgObj());
5632 const Register argPrivate = ToRegister(call->getArgPrivate());
5633 const Register argArgs = ToRegister(call->getArgArgs());
5635 DebugOnly<uint32_t> initialStack = masm.framePushed();
5637 masm.checkStackAlignment();
5639 // DOM methods have the signature:
5640 // bool (*)(JSContext*, HandleObject, void* private, const
5641 // JSJitMethodCallArgs& args)
5642 // Where args is initialized from an argc and a vp, vp[0] is space for an
5643 // outparam and the callee, vp[1] is |this|, and vp[2] onward are the
5644 // function arguments. Note that args stores the argv, not the vp, and
5645 // argv == vp + 2.
5647 // Nestle the stack up against the pushed arguments, leaving StackPointer at
5648 // &vp[1]
5649 masm.adjustStack(unusedStack);
5650 // argObj is filled with the extracted object, then returned.
5651 Register obj = masm.extractObject(Address(masm.getStackPointer(), 0), argObj);
5652 MOZ_ASSERT(obj == argObj);
5654 // Push a Value containing the callee object: natives are allowed to access
5655 // their callee before setting the return value. After this the StackPointer
5656 // points to &vp[0].
5657 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5659 // Now compute the argv value. Since StackPointer is pointing to &vp[0] and
5660 // argv is &vp[2] we just need to add 2*sizeof(Value) to the current
5661 // StackPointer.
5662 static_assert(JSJitMethodCallArgsTraits::offsetOfArgv == 0);
5663 static_assert(JSJitMethodCallArgsTraits::offsetOfArgc ==
5664 IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv);
5665 masm.computeEffectiveAddress(
5666 Address(masm.getStackPointer(), 2 * sizeof(Value)), argArgs);
5668 LoadDOMPrivate(masm, obj, argPrivate,
5669 static_cast<MCallDOMNative*>(call->mir())->objectKind());
5671 // Push argc from the call instruction into what will become the IonExitFrame
5672 masm.Push(Imm32(call->numActualArgs()));
5674 // Push our argv onto the stack
5675 masm.Push(argArgs);
5676 // And store our JSJitMethodCallArgs* in argArgs.
5677 masm.moveStackPtrTo(argArgs);
5679 // Push |this| object for passing HandleObject. We push after argc to
5680 // maintain the same sp-relative location of the object pointer with other
5681 // DOMExitFrames.
5682 masm.Push(argObj);
5683 masm.moveStackPtrTo(argObj);
5685 if (call->mir()->maybeCrossRealm()) {
5686 // We use argJSContext as scratch register here.
5687 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), argJSContext);
5688 masm.switchToObjectRealm(argJSContext, argJSContext);
5691 // Construct native exit frame.
5692 uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext);
5693 masm.loadJSContext(argJSContext);
5694 masm.enterFakeExitFrame(argJSContext, argJSContext,
5695 ExitFrameType::IonDOMMethod);
5697 markSafepointAt(safepointOffset, call);
5699 // Construct and execute call.
5700 masm.setupAlignedABICall();
5701 masm.loadJSContext(argJSContext);
5702 masm.passABIArg(argJSContext);
5703 masm.passABIArg(argObj);
5704 masm.passABIArg(argPrivate);
5705 masm.passABIArg(argArgs);
5706 ensureOsiSpace();
5707 masm.callWithABI(DynamicFunction<JSJitMethodOp>(target->jitInfo()->method),
5708 MoveOp::GENERAL,
5709 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5711 if (target->jitInfo()->isInfallible) {
5712 masm.loadValue(Address(masm.getStackPointer(),
5713 IonDOMMethodExitFrameLayout::offsetOfResult()),
5714 JSReturnOperand);
5715 } else {
5716 // Test for failure.
5717 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
5719 // Load the outparam vp[0] into output register(s).
5720 masm.loadValue(Address(masm.getStackPointer(),
5721 IonDOMMethodExitFrameLayout::offsetOfResult()),
5722 JSReturnOperand);
5725 // Switch back to the current realm if needed. Note: if the DOM method threw
5726 // an exception, the exception handler will do this.
5727 if (call->mir()->maybeCrossRealm()) {
5728 static_assert(!JSReturnOperand.aliases(ReturnReg),
5729 "Clobbering ReturnReg should not affect the return value");
5730 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5733 // Until C++ code is instrumented against Spectre, prevent speculative
5734 // execution from returning any private data.
5735 if (JitOptions.spectreJitToCxxCalls && call->mir()->hasLiveDefUses()) {
5736 masm.speculationBarrier();
5739 // The next instruction is removing the footer of the exit frame, so there
5740 // is no need for leaveFakeExitFrame.
5742 // Move the StackPointer back to its original location, unwinding the native
5743 // exit frame.
5744 masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack);
5745 MOZ_ASSERT(masm.framePushed() == initialStack);
5748 void CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir) {
5749 pushArg(ImmGCPtr(lir->mir()->name()));
5751 using Fn = bool (*)(JSContext* cx, Handle<PropertyName*>, MutableHandleValue);
5752 callVM<Fn, GetIntrinsicValue>(lir);
5755 void CodeGenerator::emitCallInvokeFunction(
5756 LInstruction* call, Register calleereg, bool constructing,
5757 bool ignoresReturnValue, uint32_t argc, uint32_t unusedStack) {
5758 // Nestle %esp up to the argument vector.
5759 // Each path must account for framePushed_ separately, for callVM to be valid.
5760 masm.freeStack(unusedStack);
5762 pushArg(masm.getStackPointer()); // argv.
5763 pushArg(Imm32(argc)); // argc.
5764 pushArg(Imm32(ignoresReturnValue));
5765 pushArg(Imm32(constructing)); // constructing.
5766 pushArg(calleereg); // JSFunction*.
5768 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
5769 MutableHandleValue);
5770 callVM<Fn, jit::InvokeFunction>(call);
5772 // Un-nestle %esp from the argument vector. No prefix was pushed.
5773 masm.reserveStack(unusedStack);
5776 void CodeGenerator::visitCallGeneric(LCallGeneric* call) {
5777 // The callee is passed straight through to the trampoline.
5778 MOZ_ASSERT(ToRegister(call->getCallee()) == IonGenericCallCalleeReg);
5780 Register argcReg = ToRegister(call->getArgc());
5781 uint32_t unusedStack =
5782 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5784 // Known-target case is handled by LCallKnown.
5785 MOZ_ASSERT(!call->hasSingleTarget());
5787 masm.checkStackAlignment();
5789 masm.move32(Imm32(call->numActualArgs()), argcReg);
5791 // Nestle the StackPointer up to the argument vector.
5792 masm.freeStack(unusedStack);
5793 ensureOsiSpace();
5795 auto kind = call->mir()->isConstructing() ? IonGenericCallKind::Construct
5796 : IonGenericCallKind::Call;
5798 TrampolinePtr genericCallStub =
5799 gen->jitRuntime()->getIonGenericCallStub(kind);
5800 uint32_t callOffset = masm.callJit(genericCallStub);
5801 markSafepointAt(callOffset, call);
5803 if (call->mir()->maybeCrossRealm()) {
5804 static_assert(!JSReturnOperand.aliases(ReturnReg),
5805 "ReturnReg available as scratch after scripted calls");
5806 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5809 // Restore stack pointer.
5810 masm.setFramePushed(frameSize());
5811 emitRestoreStackPointerFromFP();
5813 // If the return value of the constructing function is Primitive,
5814 // replace the return value with the Object from CreateThis.
5815 if (call->mir()->isConstructing()) {
5816 Label notPrimitive;
5817 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5818 &notPrimitive);
5819 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
5820 JSReturnOperand);
5821 #ifdef DEBUG
5822 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5823 &notPrimitive);
5824 masm.assumeUnreachable("CreateThis creates an object");
5825 #endif
5826 masm.bind(&notPrimitive);
5830 void JitRuntime::generateIonGenericCallArgumentsShift(
5831 MacroAssembler& masm, Register argc, Register curr, Register end,
5832 Register scratch, Label* done) {
5833 static_assert(sizeof(Value) == 8);
5834 // There are |argc| Values on the stack. Shift them all down by 8 bytes,
5835 // overwriting the first value.
5837 // Initialize `curr` to the destination of the first copy, and `end` to the
5838 // final value of curr.
5839 masm.moveStackPtrTo(curr);
5840 masm.computeEffectiveAddress(BaseValueIndex(curr, argc), end);
5842 Label loop;
5843 masm.bind(&loop);
5844 masm.branchPtr(Assembler::Equal, curr, end, done);
5845 masm.loadPtr(Address(curr, 8), scratch);
5846 masm.storePtr(scratch, Address(curr, 0));
5847 masm.addPtr(Imm32(sizeof(uintptr_t)), curr);
5848 masm.jump(&loop);
5851 void JitRuntime::generateIonGenericCallStub(MacroAssembler& masm,
5852 IonGenericCallKind kind) {
5853 AutoCreatedBy acb(masm, "JitRuntime::generateIonGenericCallStub");
5854 ionGenericCallStubOffset_[kind] = startTrampolineCode(masm);
5856 // This code is tightly coupled with visitCallGeneric.
5858 // Upon entry:
5859 // IonGenericCallCalleeReg contains a pointer to the callee object.
5860 // IonGenericCallArgcReg contains the number of actual args.
5861 // The arguments have been pushed onto the stack:
5862 // [newTarget] (iff isConstructing)
5863 // [argN]
5864 // ...
5865 // [arg1]
5866 // [arg0]
5867 // [this]
5868 // <return address> (if not JS_USE_LINK_REGISTER)
5870 // This trampoline is responsible for entering the callee's realm,
5871 // massaging the stack into the right shape, and then performing a
5872 // tail call. We will return directly to the Ion code from the
5873 // callee.
5875 // To do a tail call, we keep the return address in a register, even
5876 // on platforms that don't normally use a link register, and push it
5877 // just before jumping to the callee, after we are done setting up
5878 // the stack.
5880 // The caller is responsible for switching back to the caller's
5881 // realm and cleaning up the stack.
5883 Register calleeReg = IonGenericCallCalleeReg;
5884 Register argcReg = IonGenericCallArgcReg;
5885 Register scratch = IonGenericCallScratch;
5886 Register scratch2 = IonGenericCallScratch2;
5888 #ifndef JS_USE_LINK_REGISTER
5889 Register returnAddrReg = IonGenericCallReturnAddrReg;
5890 masm.pop(returnAddrReg);
5891 #endif
5893 #ifdef JS_CODEGEN_ARM
5894 // The default second scratch register on arm is lr, which we need
5895 // preserved for tail calls.
5896 AutoNonDefaultSecondScratchRegister andssr(masm, IonGenericSecondScratchReg);
5897 #endif
5899 bool isConstructing = kind == IonGenericCallKind::Construct;
5901 Label entry, notFunction, noJitEntry, vmCall;
5902 masm.bind(&entry);
5904 // Guard that the callee is actually a function.
5905 masm.branchTestObjIsFunction(Assembler::NotEqual, calleeReg, scratch,
5906 calleeReg, &notFunction);
5908 // Guard that the callee supports the [[Call]] or [[Construct]] operation.
5909 // If these tests fail, we will call into the VM to throw an exception.
5910 if (isConstructing) {
5911 masm.branchTestFunctionFlags(calleeReg, FunctionFlags::CONSTRUCTOR,
5912 Assembler::Zero, &vmCall);
5913 } else {
5914 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
5915 calleeReg, scratch, &vmCall);
5918 if (isConstructing) {
5919 // Use the slow path if CreateThis was unable to create the |this| object.
5920 Address thisAddr(masm.getStackPointer(), 0);
5921 masm.branchTestNull(Assembler::Equal, thisAddr, &vmCall);
5924 masm.switchToObjectRealm(calleeReg, scratch);
5926 // Load jitCodeRaw for callee if it exists.
5927 masm.branchIfFunctionHasNoJitEntry(calleeReg, isConstructing, &noJitEntry);
5929 // ****************************
5930 // * Functions with jit entry *
5931 // ****************************
5932 masm.loadJitCodeRaw(calleeReg, scratch2);
5934 // Construct the JitFrameLayout.
5935 masm.PushCalleeToken(calleeReg, isConstructing);
5936 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcReg, scratch);
5937 #ifndef JS_USE_LINK_REGISTER
5938 masm.push(returnAddrReg);
5939 #endif
5941 // Check whether we need a rectifier frame.
5942 Label noRectifier;
5943 masm.loadFunctionArgCount(calleeReg, scratch);
5944 masm.branch32(Assembler::BelowOrEqual, scratch, argcReg, &noRectifier);
5946 // Tail-call the arguments rectifier.
5947 // Because all trampolines are created at the same time,
5948 // we can't create a TrampolinePtr for the arguments rectifier,
5949 // because it hasn't been linked yet. We can, however, directly
5950 // encode its offset.
5951 Label rectifier;
5952 bindLabelToOffset(&rectifier, argumentsRectifierOffset_);
5954 masm.jump(&rectifier);
5957 // Tail call the jit entry.
5958 masm.bind(&noRectifier);
5959 masm.jump(scratch2);
5961 // ********************
5962 // * Native functions *
5963 // ********************
5964 masm.bind(&noJitEntry);
5965 if (!isConstructing) {
5966 generateIonGenericCallFunCall(masm, &entry, &vmCall);
5968 generateIonGenericCallNativeFunction(masm, isConstructing);
5970 // *******************
5971 // * Bound functions *
5972 // *******************
5973 // TODO: support class hooks?
5974 masm.bind(&notFunction);
5975 if (!isConstructing) {
5976 // TODO: support generic bound constructors?
5977 generateIonGenericCallBoundFunction(masm, &entry, &vmCall);
5980 // ********************
5981 // * Fallback VM call *
5982 // ********************
5983 masm.bind(&vmCall);
5985 masm.push(masm.getStackPointer()); // argv
5986 masm.push(argcReg); // argc
5987 masm.push(Imm32(false)); // ignores return value
5988 masm.push(Imm32(isConstructing)); // constructing
5989 masm.push(calleeReg); // callee
5991 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
5992 MutableHandleValue);
5993 VMFunctionId id = VMFunctionToId<Fn, jit::InvokeFunction>::id;
5994 uint32_t invokeFunctionOffset = functionWrapperOffsets_[size_t(id)];
5995 Label invokeFunctionVMEntry;
5996 bindLabelToOffset(&invokeFunctionVMEntry, invokeFunctionOffset);
5998 masm.pushFrameDescriptor(FrameType::IonJS);
5999 #ifndef JS_USE_LINK_REGISTER
6000 masm.push(returnAddrReg);
6001 #endif
6002 masm.jump(&invokeFunctionVMEntry);
6005 void JitRuntime::generateIonGenericCallNativeFunction(MacroAssembler& masm,
6006 bool isConstructing) {
6007 Register calleeReg = IonGenericCallCalleeReg;
6008 Register argcReg = IonGenericCallArgcReg;
6009 Register scratch = IonGenericCallScratch;
6010 Register scratch2 = IonGenericCallScratch2;
6011 Register contextReg = IonGenericCallScratch3;
6012 #ifndef JS_USE_LINK_REGISTER
6013 Register returnAddrReg = IonGenericCallReturnAddrReg;
6014 #endif
6016 // Push a value containing the callee, which will become argv[0].
6017 masm.pushValue(JSVAL_TYPE_OBJECT, calleeReg);
6019 // Load the callee address into calleeReg.
6020 #ifdef JS_SIMULATOR
6021 masm.movePtr(ImmPtr(RedirectedCallAnyNative()), calleeReg);
6022 #else
6023 masm.loadPrivate(Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
6024 calleeReg);
6025 #endif
6027 // Load argv into scratch2.
6028 masm.moveStackPtrTo(scratch2);
6030 // Push argc.
6031 masm.push(argcReg);
6033 masm.loadJSContext(contextReg);
6035 // Construct native exit frame. Note that unlike other cases in this
6036 // trampoline, this code does not use a tail call.
6037 masm.pushFrameDescriptor(FrameType::IonJS);
6038 #ifdef JS_USE_LINK_REGISTER
6039 masm.pushReturnAddress();
6040 #else
6041 masm.push(returnAddrReg);
6042 #endif
6044 masm.push(FramePointer);
6045 masm.moveStackPtrTo(FramePointer);
6046 masm.enterFakeExitFrameForNative(contextReg, scratch, isConstructing);
6048 masm.setupUnalignedABICall(scratch);
6049 masm.passABIArg(contextReg); // cx
6050 masm.passABIArg(argcReg); // argc
6051 masm.passABIArg(scratch2); // argv
6053 masm.callWithABI(calleeReg);
6055 // Test for failure.
6056 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
6058 masm.loadValue(
6059 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
6060 JSReturnOperand);
6062 // Leave the exit frame.
6063 masm.moveToStackPtr(FramePointer);
6064 masm.pop(FramePointer);
6066 // Return.
6067 masm.ret();
6070 void JitRuntime::generateIonGenericCallFunCall(MacroAssembler& masm,
6071 Label* entry, Label* vmCall) {
6072 Register calleeReg = IonGenericCallCalleeReg;
6073 Register argcReg = IonGenericCallArgcReg;
6074 Register scratch = IonGenericCallScratch;
6075 Register scratch2 = IonGenericCallScratch2;
6076 Register scratch3 = IonGenericCallScratch3;
6078 Label notFunCall;
6079 masm.branchPtr(Assembler::NotEqual,
6080 Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
6081 ImmPtr(js::fun_call), &notFunCall);
6083 // In general, we can implement fun_call by replacing calleeReg with
6084 // |this|, sliding all the other arguments down, and decrementing argc.
6086 // *BEFORE* *AFTER*
6087 // [argN] argc = N+1 <padding>
6088 // ... [argN] argc = N
6089 // [arg1] ...
6090 // [arg0] [arg1] <- now arg0
6091 // [this] <- top of stack (aligned) [arg0] <- now this
6093 // The only exception is when argc is already 0, in which case instead
6094 // of shifting arguments down we replace [this] with UndefinedValue():
6096 // *BEFORE* *AFTER*
6097 // [this] argc = 0 [undef] argc = 0
6099 // After making this transformation, we can jump back to the beginning
6100 // of this trampoline to handle the inner call.
6102 // Guard that |this| is an object. If it is, replace calleeReg.
6103 masm.fallibleUnboxObject(Address(masm.getStackPointer(), 0), scratch, vmCall);
6104 masm.movePtr(scratch, calleeReg);
6106 Label hasArgs;
6107 masm.branch32(Assembler::NotEqual, argcReg, Imm32(0), &hasArgs);
6109 // No arguments. Replace |this| with |undefined| and start from the top.
6110 masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), 0));
6111 masm.jump(entry);
6113 masm.bind(&hasArgs);
6115 Label doneSliding;
6116 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6117 scratch3, &doneSliding);
6118 masm.bind(&doneSliding);
6119 masm.sub32(Imm32(1), argcReg);
6121 masm.jump(entry);
6123 masm.bind(&notFunCall);
6126 void JitRuntime::generateIonGenericCallBoundFunction(MacroAssembler& masm,
6127 Label* entry,
6128 Label* vmCall) {
6129 Register calleeReg = IonGenericCallCalleeReg;
6130 Register argcReg = IonGenericCallArgcReg;
6131 Register scratch = IonGenericCallScratch;
6132 Register scratch2 = IonGenericCallScratch2;
6133 Register scratch3 = IonGenericCallScratch3;
6135 masm.branchTestObjClass(Assembler::NotEqual, calleeReg,
6136 &BoundFunctionObject::class_, scratch, calleeReg,
6137 vmCall);
6139 Address targetSlot(calleeReg, BoundFunctionObject::offsetOfTargetSlot());
6140 Address flagsSlot(calleeReg, BoundFunctionObject::offsetOfFlagsSlot());
6141 Address thisSlot(calleeReg, BoundFunctionObject::offsetOfBoundThisSlot());
6142 Address firstInlineArgSlot(
6143 calleeReg, BoundFunctionObject::offsetOfFirstInlineBoundArg());
6145 // Check that we won't be pushing too many arguments.
6146 masm.load32(flagsSlot, scratch);
6147 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6148 masm.add32(argcReg, scratch);
6149 masm.branch32(Assembler::Above, scratch, Imm32(JIT_ARGS_LENGTH_MAX), vmCall);
6151 // The stack is currently correctly aligned for a jit call. We will
6152 // be updating the `this` value and potentially adding additional
6153 // arguments. On platforms with 16-byte alignment, if the number of
6154 // bound arguments is odd, we have to move the arguments that are
6155 // currently on the stack. For example, with one bound argument:
6157 // *BEFORE* *AFTER*
6158 // [argN] <padding>
6159 // ... [argN] |
6160 // [arg1] ... | These arguments have been
6161 // [arg0] [arg1] | shifted down 8 bytes.
6162 // [this] <- top of stack (aligned) [arg0] v
6163 // [bound0] <- one bound argument (odd)
6164 // [boundThis] <- top of stack (aligned)
6166 Label poppedThis;
6167 if (JitStackValueAlignment > 1) {
6168 Label alreadyAligned;
6169 masm.branchTest32(Assembler::Zero, flagsSlot,
6170 Imm32(1 << BoundFunctionObject::NumBoundArgsShift),
6171 &alreadyAligned);
6173 // We have an odd number of bound arguments. Shift the existing arguments
6174 // down by 8 bytes.
6175 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6176 scratch3, &poppedThis);
6177 masm.bind(&alreadyAligned);
6180 // Pop the current `this`. It will be replaced with the bound `this`.
6181 masm.freeStack(sizeof(Value));
6182 masm.bind(&poppedThis);
6184 // Load the number of bound arguments in scratch
6185 masm.load32(flagsSlot, scratch);
6186 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6188 Label donePushingBoundArguments;
6189 masm.branch32(Assembler::Equal, scratch, Imm32(0),
6190 &donePushingBoundArguments);
6192 // Update argc to include bound arguments.
6193 masm.add32(scratch, argcReg);
6195 // Load &boundArgs[0] in scratch2.
6196 Label outOfLineBoundArguments, haveBoundArguments;
6197 masm.branch32(Assembler::Above, scratch,
6198 Imm32(BoundFunctionObject::MaxInlineBoundArgs),
6199 &outOfLineBoundArguments);
6200 masm.computeEffectiveAddress(firstInlineArgSlot, scratch2);
6201 masm.jump(&haveBoundArguments);
6203 masm.bind(&outOfLineBoundArguments);
6204 masm.unboxObject(firstInlineArgSlot, scratch2);
6205 masm.loadPtr(Address(scratch2, NativeObject::offsetOfElements()), scratch2);
6207 masm.bind(&haveBoundArguments);
6209 // Load &boundArgs[numBoundArgs] in scratch.
6210 BaseObjectElementIndex lastBoundArg(scratch2, scratch);
6211 masm.computeEffectiveAddress(lastBoundArg, scratch);
6213 // Push the bound arguments, starting with the last one.
6214 // Copying pre-decrements scratch until scratch2 is reached.
6215 Label boundArgumentsLoop;
6216 masm.bind(&boundArgumentsLoop);
6217 masm.subPtr(Imm32(sizeof(Value)), scratch);
6218 masm.pushValue(Address(scratch, 0));
6219 masm.branchPtr(Assembler::Above, scratch, scratch2, &boundArgumentsLoop);
6220 masm.bind(&donePushingBoundArguments);
6222 // Push the bound `this`.
6223 masm.pushValue(thisSlot);
6225 // Load the target in calleeReg.
6226 masm.unboxObject(targetSlot, calleeReg);
6228 // At this point, all preconditions for entering the trampoline are met:
6229 // - calleeReg contains a pointer to the callee object
6230 // - argcReg contains the number of actual args (now including bound args)
6231 // - the arguments are on the stack with the correct alignment.
6232 // Instead of generating more code, we can jump back to the entry point
6233 // of the trampoline to call the bound target.
6234 masm.jump(entry);
6237 void CodeGenerator::visitCallKnown(LCallKnown* call) {
6238 Register calleereg = ToRegister(call->getFunction());
6239 Register objreg = ToRegister(call->getTempObject());
6240 uint32_t unusedStack =
6241 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
6242 WrappedFunction* target = call->getSingleTarget();
6244 // Native single targets (except wasm) are handled by LCallNative.
6245 MOZ_ASSERT(target->hasJitEntry());
6247 // Missing arguments must have been explicitly appended by WarpBuilder.
6248 DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
6249 MOZ_ASSERT(target->nargs() <=
6250 call->mir()->numStackArgs() - numNonArgsOnStack);
6252 MOZ_ASSERT_IF(call->isConstructing(), target->isConstructor());
6254 masm.checkStackAlignment();
6256 if (target->isClassConstructor() && !call->isConstructing()) {
6257 emitCallInvokeFunction(call, calleereg, call->isConstructing(),
6258 call->ignoresReturnValue(), call->numActualArgs(),
6259 unusedStack);
6260 return;
6263 MOZ_ASSERT_IF(target->isClassConstructor(), call->isConstructing());
6265 MOZ_ASSERT(!call->mir()->needsThisCheck());
6267 if (call->mir()->maybeCrossRealm()) {
6268 masm.switchToObjectRealm(calleereg, objreg);
6271 masm.loadJitCodeRaw(calleereg, objreg);
6273 // Nestle the StackPointer up to the argument vector.
6274 masm.freeStack(unusedStack);
6276 // Construct the JitFrameLayout.
6277 masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
6278 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, call->numActualArgs());
6280 // Finally call the function in objreg.
6281 ensureOsiSpace();
6282 uint32_t callOffset = masm.callJit(objreg);
6283 markSafepointAt(callOffset, call);
6285 if (call->mir()->maybeCrossRealm()) {
6286 static_assert(!JSReturnOperand.aliases(ReturnReg),
6287 "ReturnReg available as scratch after scripted calls");
6288 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6291 // Restore stack pointer: pop JitFrameLayout fields still left on the stack
6292 // and undo the earlier |freeStack(unusedStack)|.
6293 int prefixGarbage =
6294 sizeof(JitFrameLayout) - JitFrameLayout::bytesPoppedAfterCall();
6295 masm.adjustStack(prefixGarbage - unusedStack);
6297 // If the return value of the constructing function is Primitive,
6298 // replace the return value with the Object from CreateThis.
6299 if (call->mir()->isConstructing()) {
6300 Label notPrimitive;
6301 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6302 &notPrimitive);
6303 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
6304 JSReturnOperand);
6305 #ifdef DEBUG
6306 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6307 &notPrimitive);
6308 masm.assumeUnreachable("CreateThis creates an object");
6309 #endif
6310 masm.bind(&notPrimitive);
6314 template <typename T>
6315 void CodeGenerator::emitCallInvokeFunction(T* apply) {
6316 Register objreg = ToRegister(apply->getTempObject());
6318 // Push the space used by the arguments.
6319 masm.moveStackPtrTo(objreg);
6321 pushArg(objreg); // argv.
6322 pushArg(ToRegister(apply->getArgc())); // argc.
6323 pushArg(Imm32(apply->mir()->ignoresReturnValue())); // ignoresReturnValue.
6324 pushArg(Imm32(apply->mir()->isConstructing())); // isConstructing.
6325 pushArg(ToRegister(apply->getFunction())); // JSFunction*.
6327 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
6328 MutableHandleValue);
6329 callVM<Fn, jit::InvokeFunction>(apply);
6332 // Do not bailout after the execution of this function since the stack no longer
6333 // correspond to what is expected by the snapshots.
6334 void CodeGenerator::emitAllocateSpaceForApply(Register argcreg,
6335 Register scratch) {
6336 // Use scratch register to calculate stack space (including padding).
6337 masm.movePtr(argcreg, scratch);
6339 // Align the JitFrameLayout on the JitStackAlignment.
6340 if (JitStackValueAlignment > 1) {
6341 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6342 "Stack padding assumes that the frameSize is correct");
6343 MOZ_ASSERT(JitStackValueAlignment == 2);
6344 Label noPaddingNeeded;
6345 // if the number of arguments is odd, then we do not need any padding.
6346 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6347 masm.addPtr(Imm32(1), scratch);
6348 masm.bind(&noPaddingNeeded);
6351 // Reserve space for copying the arguments.
6352 NativeObject::elementsSizeMustNotOverflow();
6353 masm.lshiftPtr(Imm32(ValueShift), scratch);
6354 masm.subFromStackPtr(scratch);
6356 #ifdef DEBUG
6357 // Put a magic value in the space reserved for padding. Note, this code
6358 // cannot be merged with the previous test, as not all architectures can
6359 // write below their stack pointers.
6360 if (JitStackValueAlignment > 1) {
6361 MOZ_ASSERT(JitStackValueAlignment == 2);
6362 Label noPaddingNeeded;
6363 // if the number of arguments is odd, then we do not need any padding.
6364 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6365 BaseValueIndex dstPtr(masm.getStackPointer(), argcreg);
6366 masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr);
6367 masm.bind(&noPaddingNeeded);
6369 #endif
6372 // Do not bailout after the execution of this function since the stack no longer
6373 // correspond to what is expected by the snapshots.
6374 void CodeGenerator::emitAllocateSpaceForConstructAndPushNewTarget(
6375 Register argcreg, Register newTargetAndScratch) {
6376 // Align the JitFrameLayout on the JitStackAlignment. Contrary to
6377 // |emitAllocateSpaceForApply()|, we're always pushing a magic value, because
6378 // we can't write to |newTargetAndScratch| before |new.target| has
6379 // been pushed onto the stack.
6380 if (JitStackValueAlignment > 1) {
6381 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6382 "Stack padding assumes that the frameSize is correct");
6383 MOZ_ASSERT(JitStackValueAlignment == 2);
6385 Label noPaddingNeeded;
6386 // If the number of arguments is even, then we do not need any padding.
6387 masm.branchTestPtr(Assembler::Zero, argcreg, Imm32(1), &noPaddingNeeded);
6388 masm.pushValue(MagicValue(JS_ARG_POISON));
6389 masm.bind(&noPaddingNeeded);
6392 // Push |new.target| after the padding value, but before any arguments.
6393 masm.pushValue(JSVAL_TYPE_OBJECT, newTargetAndScratch);
6395 // Use newTargetAndScratch to calculate stack space (including padding).
6396 masm.movePtr(argcreg, newTargetAndScratch);
6398 // Reserve space for copying the arguments.
6399 NativeObject::elementsSizeMustNotOverflow();
6400 masm.lshiftPtr(Imm32(ValueShift), newTargetAndScratch);
6401 masm.subFromStackPtr(newTargetAndScratch);
6404 // Destroys argvIndex and copyreg.
6405 void CodeGenerator::emitCopyValuesForApply(Register argvSrcBase,
6406 Register argvIndex, Register copyreg,
6407 size_t argvSrcOffset,
6408 size_t argvDstOffset) {
6409 Label loop;
6410 masm.bind(&loop);
6412 // As argvIndex is off by 1, and we use the decBranchPtr instruction
6413 // to loop back, we have to substract the size of the word which are
6414 // copied.
6415 BaseValueIndex srcPtr(argvSrcBase, argvIndex,
6416 int32_t(argvSrcOffset) - sizeof(void*));
6417 BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex,
6418 int32_t(argvDstOffset) - sizeof(void*));
6419 masm.loadPtr(srcPtr, copyreg);
6420 masm.storePtr(copyreg, dstPtr);
6422 // Handle 32 bits architectures.
6423 if (sizeof(Value) == 2 * sizeof(void*)) {
6424 BaseValueIndex srcPtrLow(argvSrcBase, argvIndex,
6425 int32_t(argvSrcOffset) - 2 * sizeof(void*));
6426 BaseValueIndex dstPtrLow(masm.getStackPointer(), argvIndex,
6427 int32_t(argvDstOffset) - 2 * sizeof(void*));
6428 masm.loadPtr(srcPtrLow, copyreg);
6429 masm.storePtr(copyreg, dstPtrLow);
6432 masm.decBranchPtr(Assembler::NonZero, argvIndex, Imm32(1), &loop);
6435 void CodeGenerator::emitRestoreStackPointerFromFP() {
6436 // This is used to restore the stack pointer after a call with a dynamic
6437 // number of arguments.
6439 MOZ_ASSERT(masm.framePushed() == frameSize());
6441 int32_t offset = -int32_t(frameSize());
6442 masm.computeEffectiveAddress(Address(FramePointer, offset),
6443 masm.getStackPointer());
6446 void CodeGenerator::emitPushArguments(Register argcreg, Register scratch,
6447 Register copyreg, uint32_t extraFormals) {
6448 Label end;
6450 // Skip the copy of arguments if there are none.
6451 masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, &end);
6453 // clang-format off
6455 // We are making a copy of the arguments which are above the JitFrameLayout
6456 // of the current Ion frame.
6458 // [arg1] [arg0] <- src [this] [JitFrameLayout] [.. frameSize ..] [pad] [arg1] [arg0] <- dst
6460 // clang-format on
6462 // Compute the source and destination offsets into the stack.
6463 Register argvSrcBase = FramePointer;
6464 size_t argvSrcOffset =
6465 JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
6466 size_t argvDstOffset = 0;
6468 Register argvIndex = scratch;
6469 masm.move32(argcreg, argvIndex);
6471 // Copy arguments.
6472 emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
6473 argvDstOffset);
6475 // Join with all arguments copied and the extra stack usage computed.
6476 masm.bind(&end);
6479 void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply,
6480 Register scratch) {
6481 // Holds the function nargs. Initially the number of args to the caller.
6482 Register argcreg = ToRegister(apply->getArgc());
6483 Register copyreg = ToRegister(apply->getTempObject());
6484 uint32_t extraFormals = apply->numExtraFormals();
6486 emitAllocateSpaceForApply(argcreg, scratch);
6488 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6490 // Push |this|.
6491 masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex));
6494 void CodeGenerator::emitPushArguments(LApplyArgsObj* apply, Register scratch) {
6495 // argc and argsObj are mapped to the same calltemp register.
6496 MOZ_ASSERT(apply->getArgsObj() == apply->getArgc());
6498 Register tmpArgc = ToRegister(apply->getTempObject());
6499 Register argsObj = ToRegister(apply->getArgsObj());
6501 // Load argc into tmpArgc.
6502 Address lengthAddr(argsObj, ArgumentsObject::getInitialLengthSlotOffset());
6503 masm.unboxInt32(lengthAddr, tmpArgc);
6504 masm.rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), tmpArgc);
6506 // Allocate space on the stack for arguments. This modifies scratch.
6507 emitAllocateSpaceForApply(tmpArgc, scratch);
6509 // Load arguments data
6510 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
6511 argsObj);
6512 size_t argsSrcOffset = ArgumentsData::offsetOfArgs();
6514 // This is the end of the lifetime of argsObj.
6515 // After this call, the argsObj register holds the argument count instead.
6516 emitPushArrayAsArguments(tmpArgc, argsObj, scratch, argsSrcOffset);
6518 masm.pushValue(ToValue(apply, LApplyArgsObj::ThisIndex));
6521 void CodeGenerator::emitPushArrayAsArguments(Register tmpArgc,
6522 Register srcBaseAndArgc,
6523 Register scratch,
6524 size_t argvSrcOffset) {
6525 // Preconditions:
6526 // 1. |tmpArgc| * sizeof(Value) bytes have been allocated at the top of
6527 // the stack to hold arguments.
6528 // 2. |srcBaseAndArgc| + |srcOffset| points to an array of |tmpArgc| values.
6530 // Postconditions:
6531 // 1. The arguments at |srcBaseAndArgc| + |srcOffset| have been copied into
6532 // the allocated space.
6533 // 2. |srcBaseAndArgc| now contains the original value of |tmpArgc|.
6535 // |scratch| is used as a temp register within this function and clobbered.
6537 Label noCopy, epilogue;
6539 // Skip the copy of arguments if there are none.
6540 masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
6542 // Copy the values. This code is skipped entirely if there are
6543 // no values.
6544 size_t argvDstOffset = 0;
6546 Register argvSrcBase = srcBaseAndArgc;
6547 Register copyreg = scratch;
6549 masm.push(tmpArgc);
6550 Register argvIndex = tmpArgc;
6551 argvDstOffset += sizeof(void*);
6553 // Copy
6554 emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
6555 argvDstOffset);
6557 // Restore.
6558 masm.pop(srcBaseAndArgc); // srcBaseAndArgc now contains argc.
6559 masm.jump(&epilogue);
6561 // Clear argc if we skipped the copy step.
6562 masm.bind(&noCopy);
6563 masm.movePtr(ImmWord(0), srcBaseAndArgc);
6565 // Join with all arguments copied and the extra stack usage computed.
6566 // Note, "srcBase" has become "argc".
6567 masm.bind(&epilogue);
6570 void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply,
6571 Register scratch) {
6572 Register tmpArgc = ToRegister(apply->getTempObject());
6573 Register elementsAndArgc = ToRegister(apply->getElements());
6575 // Invariants guarded in the caller:
6576 // - the array is not too long
6577 // - the array length equals its initialized length
6579 // The array length is our argc for the purposes of allocating space.
6580 Address length(ToRegister(apply->getElements()),
6581 ObjectElements::offsetOfLength());
6582 masm.load32(length, tmpArgc);
6584 // Allocate space for the values.
6585 emitAllocateSpaceForApply(tmpArgc, scratch);
6587 // After this call "elements" has become "argc".
6588 size_t elementsOffset = 0;
6589 emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
6591 // Push |this|.
6592 masm.pushValue(ToValue(apply, LApplyArrayGeneric::ThisIndex));
6595 void CodeGenerator::emitPushArguments(LConstructArgsGeneric* construct,
6596 Register scratch) {
6597 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6599 // Holds the function nargs. Initially the number of args to the caller.
6600 Register argcreg = ToRegister(construct->getArgc());
6601 Register copyreg = ToRegister(construct->getTempObject());
6602 uint32_t extraFormals = construct->numExtraFormals();
6604 // Allocate space for the values.
6605 // After this call "newTarget" has become "scratch".
6606 emitAllocateSpaceForConstructAndPushNewTarget(argcreg, scratch);
6608 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6610 // Push |this|.
6611 masm.pushValue(ToValue(construct, LConstructArgsGeneric::ThisIndex));
6614 void CodeGenerator::emitPushArguments(LConstructArrayGeneric* construct,
6615 Register scratch) {
6616 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6618 Register tmpArgc = ToRegister(construct->getTempObject());
6619 Register elementsAndArgc = ToRegister(construct->getElements());
6621 // Invariants guarded in the caller:
6622 // - the array is not too long
6623 // - the array length equals its initialized length
6625 // The array length is our argc for the purposes of allocating space.
6626 Address length(ToRegister(construct->getElements()),
6627 ObjectElements::offsetOfLength());
6628 masm.load32(length, tmpArgc);
6630 // Allocate space for the values.
6631 emitAllocateSpaceForConstructAndPushNewTarget(tmpArgc, scratch);
6633 // After this call "elements" has become "argc" and "newTarget" has become
6634 // "scratch".
6635 size_t elementsOffset = 0;
6636 emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
6638 // Push |this|.
6639 masm.pushValue(ToValue(construct, LConstructArrayGeneric::ThisIndex));
6642 template <typename T>
6643 void CodeGenerator::emitApplyGeneric(T* apply) {
6644 // Holds the function object.
6645 Register calleereg = ToRegister(apply->getFunction());
6647 // Temporary register for modifying the function object.
6648 Register objreg = ToRegister(apply->getTempObject());
6649 Register scratch = ToRegister(apply->getTempForArgCopy());
6651 // Holds the function nargs, computed in the invoker or (for ApplyArray,
6652 // ConstructArray, or ApplyArgsObj) in the argument pusher.
6653 Register argcreg = ToRegister(apply->getArgc());
6655 // Copy the arguments of the current function.
6657 // In the case of ApplyArray, ConstructArray, or ApplyArgsObj, also
6658 // compute argc. The argc register and the elements/argsObj register
6659 // are the same; argc must not be referenced before the call to
6660 // emitPushArguments() and elements/argsObj must not be referenced
6661 // after it returns.
6663 // In the case of ConstructArray or ConstructArgs, also overwrite newTarget
6664 // with scratch; newTarget must not be referenced after this point.
6666 // objreg is dead across this call.
6667 emitPushArguments(apply, scratch);
6669 masm.checkStackAlignment();
6671 bool constructing = apply->mir()->isConstructing();
6673 // If the function is native, only emit the call to InvokeFunction.
6674 if (apply->hasSingleTarget() &&
6675 apply->getSingleTarget()->isNativeWithoutJitEntry()) {
6676 emitCallInvokeFunction(apply);
6678 #ifdef DEBUG
6679 // Native constructors are guaranteed to return an Object value, so we never
6680 // have to replace a primitive result with the previously allocated Object
6681 // from CreateThis.
6682 if (constructing) {
6683 Label notPrimitive;
6684 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6685 &notPrimitive);
6686 masm.assumeUnreachable("native constructors don't return primitives");
6687 masm.bind(&notPrimitive);
6689 #endif
6691 emitRestoreStackPointerFromFP();
6692 return;
6695 Label end, invoke;
6697 // Unless already known, guard that calleereg is actually a function object.
6698 if (!apply->hasSingleTarget()) {
6699 masm.branchTestObjIsFunction(Assembler::NotEqual, calleereg, objreg,
6700 calleereg, &invoke);
6703 // Guard that calleereg is an interpreted function with a JSScript.
6704 masm.branchIfFunctionHasNoJitEntry(calleereg, constructing, &invoke);
6706 // Guard that callee allows the [[Call]] or [[Construct]] operation required.
6707 if (constructing) {
6708 masm.branchTestFunctionFlags(calleereg, FunctionFlags::CONSTRUCTOR,
6709 Assembler::Zero, &invoke);
6710 } else {
6711 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
6712 calleereg, objreg, &invoke);
6715 // Use the slow path if CreateThis was unable to create the |this| object.
6716 if (constructing) {
6717 Address thisAddr(masm.getStackPointer(), 0);
6718 masm.branchTestNull(Assembler::Equal, thisAddr, &invoke);
6721 // Call with an Ion frame or a rectifier frame.
6723 if (apply->mir()->maybeCrossRealm()) {
6724 masm.switchToObjectRealm(calleereg, objreg);
6727 // Knowing that calleereg is a non-native function, load jitcode.
6728 masm.loadJitCodeRaw(calleereg, objreg);
6730 masm.PushCalleeToken(calleereg, constructing);
6731 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcreg, scratch);
6733 Label underflow, rejoin;
6735 // Check whether the provided arguments satisfy target argc.
6736 if (!apply->hasSingleTarget()) {
6737 Register nformals = scratch;
6738 masm.loadFunctionArgCount(calleereg, nformals);
6739 masm.branch32(Assembler::Below, argcreg, nformals, &underflow);
6740 } else {
6741 masm.branch32(Assembler::Below, argcreg,
6742 Imm32(apply->getSingleTarget()->nargs()), &underflow);
6745 // Skip the construction of the rectifier frame because we have no
6746 // underflow.
6747 masm.jump(&rejoin);
6749 // Argument fixup needed. Get ready to call the argumentsRectifier.
6751 masm.bind(&underflow);
6753 // Hardcode the address of the argumentsRectifier code.
6754 TrampolinePtr argumentsRectifier =
6755 gen->jitRuntime()->getArgumentsRectifier();
6756 masm.movePtr(argumentsRectifier, objreg);
6759 masm.bind(&rejoin);
6761 // Finally call the function in objreg, as assigned by one of the paths
6762 // above.
6763 ensureOsiSpace();
6764 uint32_t callOffset = masm.callJit(objreg);
6765 markSafepointAt(callOffset, apply);
6767 if (apply->mir()->maybeCrossRealm()) {
6768 static_assert(!JSReturnOperand.aliases(ReturnReg),
6769 "ReturnReg available as scratch after scripted calls");
6770 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6773 // Discard JitFrameLayout fields still left on the stack.
6774 masm.freeStack(sizeof(JitFrameLayout) -
6775 JitFrameLayout::bytesPoppedAfterCall());
6776 masm.jump(&end);
6779 // Handle uncompiled or native functions.
6781 masm.bind(&invoke);
6782 emitCallInvokeFunction(apply);
6785 masm.bind(&end);
6787 // If the return value of the constructing function is Primitive,
6788 // replace the return value with the Object from CreateThis.
6789 if (constructing) {
6790 Label notPrimitive;
6791 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6792 &notPrimitive);
6793 masm.loadValue(Address(masm.getStackPointer(), 0), JSReturnOperand);
6795 #ifdef DEBUG
6796 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6797 &notPrimitive);
6798 masm.assumeUnreachable("CreateThis creates an object");
6799 #endif
6801 masm.bind(&notPrimitive);
6804 // Pop arguments and continue.
6805 emitRestoreStackPointerFromFP();
6808 void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) {
6809 LSnapshot* snapshot = apply->snapshot();
6810 Register argcreg = ToRegister(apply->getArgc());
6812 // Ensure that we have a reasonable number of arguments.
6813 bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6815 emitApplyGeneric(apply);
6818 void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
6819 Register argsObj = ToRegister(apply->getArgsObj());
6820 Register temp = ToRegister(apply->getTempObject());
6822 Label bail;
6823 masm.loadArgumentsObjectLength(argsObj, temp, &bail);
6824 masm.branch32(Assembler::Above, temp, Imm32(JIT_ARGS_LENGTH_MAX), &bail);
6825 bailoutFrom(&bail, apply->snapshot());
6827 emitApplyGeneric(apply);
6830 void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
6831 LSnapshot* snapshot = apply->snapshot();
6832 Register tmp = ToRegister(apply->getTempObject());
6834 Address length(ToRegister(apply->getElements()),
6835 ObjectElements::offsetOfLength());
6836 masm.load32(length, tmp);
6838 // Ensure that we have a reasonable number of arguments.
6839 bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6841 // Ensure that the array does not contain an uninitialized tail.
6843 Address initializedLength(ToRegister(apply->getElements()),
6844 ObjectElements::offsetOfInitializedLength());
6845 masm.sub32(initializedLength, tmp);
6846 bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
6848 emitApplyGeneric(apply);
6851 void CodeGenerator::visitConstructArgsGeneric(LConstructArgsGeneric* lir) {
6852 LSnapshot* snapshot = lir->snapshot();
6853 Register argcreg = ToRegister(lir->getArgc());
6855 // Ensure that we have a reasonable number of arguments.
6856 bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6858 emitApplyGeneric(lir);
6861 void CodeGenerator::visitConstructArrayGeneric(LConstructArrayGeneric* lir) {
6862 LSnapshot* snapshot = lir->snapshot();
6863 Register tmp = ToRegister(lir->getTempObject());
6865 Address length(ToRegister(lir->getElements()),
6866 ObjectElements::offsetOfLength());
6867 masm.load32(length, tmp);
6869 // Ensure that we have a reasonable number of arguments.
6870 bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6872 // Ensure that the array does not contain an uninitialized tail.
6874 Address initializedLength(ToRegister(lir->getElements()),
6875 ObjectElements::offsetOfInitializedLength());
6876 masm.sub32(initializedLength, tmp);
6877 bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
6879 emitApplyGeneric(lir);
6882 void CodeGenerator::visitBail(LBail* lir) { bailout(lir->snapshot()); }
6884 void CodeGenerator::visitUnreachable(LUnreachable* lir) {
6885 masm.assumeUnreachable("end-of-block assumed unreachable");
6888 void CodeGenerator::visitEncodeSnapshot(LEncodeSnapshot* lir) {
6889 encode(lir->snapshot());
6892 void CodeGenerator::visitUnreachableResultV(LUnreachableResultV* lir) {
6893 masm.assumeUnreachable("must be unreachable");
6896 void CodeGenerator::visitUnreachableResultT(LUnreachableResultT* lir) {
6897 masm.assumeUnreachable("must be unreachable");
6900 // Out-of-line path to report over-recursed error and fail.
6901 class CheckOverRecursedFailure : public OutOfLineCodeBase<CodeGenerator> {
6902 LInstruction* lir_;
6904 public:
6905 explicit CheckOverRecursedFailure(LInstruction* lir) : lir_(lir) {}
6907 void accept(CodeGenerator* codegen) override {
6908 codegen->visitCheckOverRecursedFailure(this);
6911 LInstruction* lir() const { return lir_; }
6914 void CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed* lir) {
6915 // If we don't push anything on the stack, skip the check.
6916 if (omitOverRecursedCheck()) {
6917 return;
6920 // Ensure that this frame will not cross the stack limit.
6921 // This is a weak check, justified by Ion using the C stack: we must always
6922 // be some distance away from the actual limit, since if the limit is
6923 // crossed, an error must be thrown, which requires more frames.
6925 // It must always be possible to trespass past the stack limit.
6926 // Ion may legally place frames very close to the limit. Calling additional
6927 // C functions may then violate the limit without any checking.
6929 // Since Ion frames exist on the C stack, the stack limit may be
6930 // dynamically set by JS_SetThreadStackLimit() and JS_SetNativeStackQuota().
6932 CheckOverRecursedFailure* ool = new (alloc()) CheckOverRecursedFailure(lir);
6933 addOutOfLineCode(ool, lir->mir());
6935 // Conditional forward (unlikely) branch to failure.
6936 const void* limitAddr = gen->runtime->addressOfJitStackLimit();
6937 masm.branchStackPtrRhs(Assembler::AboveOrEqual, AbsoluteAddress(limitAddr),
6938 ool->entry());
6939 masm.bind(ool->rejoin());
6942 void CodeGenerator::visitCheckOverRecursedFailure(
6943 CheckOverRecursedFailure* ool) {
6944 // The OOL path is hit if the recursion depth has been exceeded.
6945 // Throw an InternalError for over-recursion.
6947 // LFunctionEnvironment can appear before LCheckOverRecursed, so we have
6948 // to save all live registers to avoid crashes if CheckOverRecursed triggers
6949 // a GC.
6950 saveLive(ool->lir());
6952 using Fn = bool (*)(JSContext*);
6953 callVM<Fn, CheckOverRecursed>(ool->lir());
6955 restoreLive(ool->lir());
6956 masm.jump(ool->rejoin());
6959 IonScriptCounts* CodeGenerator::maybeCreateScriptCounts() {
6960 // If scripts are being profiled, create a new IonScriptCounts for the
6961 // profiling data, which will be attached to the associated JSScript or
6962 // wasm module after code generation finishes.
6963 if (!gen->hasProfilingScripts()) {
6964 return nullptr;
6967 // This test inhibits IonScriptCount creation for wasm code which is
6968 // currently incompatible with wasm codegen for two reasons: (1) wasm code
6969 // must be serializable and script count codegen bakes in absolute
6970 // addresses, (2) wasm code does not have a JSScript with which to associate
6971 // code coverage data.
6972 JSScript* script = gen->outerInfo().script();
6973 if (!script) {
6974 return nullptr;
6977 auto counts = MakeUnique<IonScriptCounts>();
6978 if (!counts || !counts->init(graph.numBlocks())) {
6979 return nullptr;
6982 for (size_t i = 0; i < graph.numBlocks(); i++) {
6983 MBasicBlock* block = graph.getBlock(i)->mir();
6985 uint32_t offset = 0;
6986 char* description = nullptr;
6987 if (MResumePoint* resume = block->entryResumePoint()) {
6988 // Find a PC offset in the outermost script to use. If this
6989 // block is from an inlined script, find a location in the
6990 // outer script to associate information about the inlining
6991 // with.
6992 while (resume->caller()) {
6993 resume = resume->caller();
6995 offset = script->pcToOffset(resume->pc());
6997 if (block->entryResumePoint()->caller()) {
6998 // Get the filename and line number of the inner script.
6999 JSScript* innerScript = block->info().script();
7000 description = js_pod_calloc<char>(200);
7001 if (description) {
7002 snprintf(description, 200, "%s:%u", innerScript->filename(),
7003 innerScript->lineno());
7008 if (!counts->block(i).init(block->id(), offset, description,
7009 block->numSuccessors())) {
7010 return nullptr;
7013 for (size_t j = 0; j < block->numSuccessors(); j++) {
7014 counts->block(i).setSuccessor(
7015 j, skipTrivialBlocks(block->getSuccessor(j))->id());
7019 scriptCounts_ = counts.release();
7020 return scriptCounts_;
7023 // Structure for managing the state tracked for a block by script counters.
7024 struct ScriptCountBlockState {
7025 IonBlockCounts& block;
7026 MacroAssembler& masm;
7028 Sprinter printer;
7030 public:
7031 ScriptCountBlockState(IonBlockCounts* block, MacroAssembler* masm)
7032 : block(*block), masm(*masm), printer(GetJitContext()->cx, false) {}
7034 bool init() {
7035 if (!printer.init()) {
7036 return false;
7039 // Bump the hit count for the block at the start. This code is not
7040 // included in either the text for the block or the instruction byte
7041 // counts.
7042 masm.inc64(AbsoluteAddress(block.addressOfHitCount()));
7044 // Collect human readable assembly for the code generated in the block.
7045 masm.setPrinter(&printer);
7047 return true;
7050 void visitInstruction(LInstruction* ins) {
7051 #ifdef JS_JITSPEW
7052 // Prefix stream of assembly instructions with their LIR instruction
7053 // name and any associated high level info.
7054 if (const char* extra = ins->getExtraName()) {
7055 printer.printf("[%s:%s]\n", ins->opName(), extra);
7056 } else {
7057 printer.printf("[%s]\n", ins->opName());
7059 #endif
7062 ~ScriptCountBlockState() {
7063 masm.setPrinter(nullptr);
7065 if (JS::UniqueChars str = printer.release()) {
7066 block.setCode(str.get());
7071 void CodeGenerator::branchIfInvalidated(Register temp, Label* invalidated) {
7072 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), temp);
7073 masm.propagateOOM(ionScriptLabels_.append(label));
7075 // If IonScript::invalidationCount_ != 0, the script has been invalidated.
7076 masm.branch32(Assembler::NotEqual,
7077 Address(temp, IonScript::offsetOfInvalidationCount()), Imm32(0),
7078 invalidated);
7081 #ifdef DEBUG
7082 void CodeGenerator::emitAssertGCThingResult(Register input,
7083 const MDefinition* mir) {
7084 MIRType type = mir->type();
7085 MOZ_ASSERT(type == MIRType::Object || type == MIRType::String ||
7086 type == MIRType::Symbol || type == MIRType::BigInt);
7088 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7089 regs.take(input);
7091 Register temp = regs.takeAny();
7092 masm.push(temp);
7094 // Don't check if the script has been invalidated. In that case invalid
7095 // types are expected (until we reach the OsiPoint and bailout).
7096 Label done;
7097 branchIfInvalidated(temp, &done);
7099 # ifndef JS_SIMULATOR
7100 // Check that we have a valid GC pointer.
7101 // Disable for wasm because we don't have a context on wasm compilation
7102 // threads and this needs a context.
7103 // Also disable for simulator builds because the C++ call is a lot slower
7104 // there than on actual hardware.
7105 if (JitOptions.fullDebugChecks && !IsCompilingWasm()) {
7106 saveVolatile();
7107 masm.setupUnalignedABICall(temp);
7108 masm.loadJSContext(temp);
7109 masm.passABIArg(temp);
7110 masm.passABIArg(input);
7112 switch (type) {
7113 case MIRType::Object: {
7114 using Fn = void (*)(JSContext* cx, JSObject* obj);
7115 masm.callWithABI<Fn, AssertValidObjectPtr>();
7116 break;
7118 case MIRType::String: {
7119 using Fn = void (*)(JSContext* cx, JSString* str);
7120 masm.callWithABI<Fn, AssertValidStringPtr>();
7121 break;
7123 case MIRType::Symbol: {
7124 using Fn = void (*)(JSContext* cx, JS::Symbol* sym);
7125 masm.callWithABI<Fn, AssertValidSymbolPtr>();
7126 break;
7128 case MIRType::BigInt: {
7129 using Fn = void (*)(JSContext* cx, JS::BigInt* bi);
7130 masm.callWithABI<Fn, AssertValidBigIntPtr>();
7131 break;
7133 default:
7134 MOZ_CRASH();
7137 restoreVolatile();
7139 # endif
7141 masm.bind(&done);
7142 masm.pop(temp);
7145 void CodeGenerator::emitAssertResultV(const ValueOperand input,
7146 const MDefinition* mir) {
7147 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7148 regs.take(input);
7150 Register temp1 = regs.takeAny();
7151 Register temp2 = regs.takeAny();
7152 masm.push(temp1);
7153 masm.push(temp2);
7155 // Don't check if the script has been invalidated. In that case invalid
7156 // types are expected (until we reach the OsiPoint and bailout).
7157 Label done;
7158 branchIfInvalidated(temp1, &done);
7160 // Check that we have a valid GC pointer.
7161 if (JitOptions.fullDebugChecks) {
7162 saveVolatile();
7164 masm.pushValue(input);
7165 masm.moveStackPtrTo(temp1);
7167 using Fn = void (*)(JSContext* cx, Value* v);
7168 masm.setupUnalignedABICall(temp2);
7169 masm.loadJSContext(temp2);
7170 masm.passABIArg(temp2);
7171 masm.passABIArg(temp1);
7172 masm.callWithABI<Fn, AssertValidValue>();
7173 masm.popValue(input);
7174 restoreVolatile();
7177 masm.bind(&done);
7178 masm.pop(temp2);
7179 masm.pop(temp1);
7182 void CodeGenerator::emitGCThingResultChecks(LInstruction* lir,
7183 MDefinition* mir) {
7184 if (lir->numDefs() == 0) {
7185 return;
7188 MOZ_ASSERT(lir->numDefs() == 1);
7189 if (lir->getDef(0)->isBogusTemp()) {
7190 return;
7193 Register output = ToRegister(lir->getDef(0));
7194 emitAssertGCThingResult(output, mir);
7197 void CodeGenerator::emitValueResultChecks(LInstruction* lir, MDefinition* mir) {
7198 if (lir->numDefs() == 0) {
7199 return;
7202 MOZ_ASSERT(lir->numDefs() == BOX_PIECES);
7203 if (!lir->getDef(0)->output()->isRegister()) {
7204 return;
7207 ValueOperand output = ToOutValue(lir);
7209 emitAssertResultV(output, mir);
7212 void CodeGenerator::emitDebugResultChecks(LInstruction* ins) {
7213 // In debug builds, check that LIR instructions return valid values.
7215 MDefinition* mir = ins->mirRaw();
7216 if (!mir) {
7217 return;
7220 switch (mir->type()) {
7221 case MIRType::Object:
7222 case MIRType::String:
7223 case MIRType::Symbol:
7224 case MIRType::BigInt:
7225 emitGCThingResultChecks(ins, mir);
7226 break;
7227 case MIRType::Value:
7228 emitValueResultChecks(ins, mir);
7229 break;
7230 default:
7231 break;
7235 void CodeGenerator::emitDebugForceBailing(LInstruction* lir) {
7236 if (MOZ_LIKELY(!gen->options.ionBailAfterEnabled())) {
7237 return;
7239 if (!lir->snapshot()) {
7240 return;
7242 if (lir->isOsiPoint()) {
7243 return;
7246 masm.comment("emitDebugForceBailing");
7247 const void* bailAfterCounterAddr =
7248 gen->runtime->addressOfIonBailAfterCounter();
7250 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7252 Label done, notBail;
7253 masm.branch32(Assembler::Equal, AbsoluteAddress(bailAfterCounterAddr),
7254 Imm32(0), &done);
7256 Register temp = regs.takeAny();
7258 masm.push(temp);
7259 masm.load32(AbsoluteAddress(bailAfterCounterAddr), temp);
7260 masm.sub32(Imm32(1), temp);
7261 masm.store32(temp, AbsoluteAddress(bailAfterCounterAddr));
7263 masm.branch32(Assembler::NotEqual, temp, Imm32(0), &notBail);
7265 masm.pop(temp);
7266 bailout(lir->snapshot());
7268 masm.bind(&notBail);
7269 masm.pop(temp);
7271 masm.bind(&done);
7273 #endif
7275 bool CodeGenerator::generateBody() {
7276 JitSpewCont(JitSpew_Codegen, "\n");
7277 AutoCreatedBy acb(masm, "CodeGenerator::generateBody");
7279 JitSpew(JitSpew_Codegen, "==== BEGIN CodeGenerator::generateBody ====");
7280 IonScriptCounts* counts = maybeCreateScriptCounts();
7282 const bool compilingWasm = gen->compilingWasm();
7284 for (size_t i = 0; i < graph.numBlocks(); i++) {
7285 current = graph.getBlock(i);
7287 // Don't emit any code for trivial blocks, containing just a goto. Such
7288 // blocks are created to split critical edges, and if we didn't end up
7289 // putting any instructions in them, we can skip them.
7290 if (current->isTrivial()) {
7291 continue;
7294 #ifdef JS_JITSPEW
7295 const char* filename = nullptr;
7296 size_t lineNumber = 0;
7297 JS::LimitedColumnNumberOneOrigin columnNumber;
7298 if (current->mir()->info().script()) {
7299 filename = current->mir()->info().script()->filename();
7300 if (current->mir()->pc()) {
7301 lineNumber = PCToLineNumber(current->mir()->info().script(),
7302 current->mir()->pc(), &columnNumber);
7305 JitSpew(JitSpew_Codegen, "--------------------------------");
7306 JitSpew(JitSpew_Codegen, "# block%zu %s:%zu:%u%s:", i,
7307 filename ? filename : "?", lineNumber,
7308 columnNumber.oneOriginValue(),
7309 current->mir()->isLoopHeader() ? " (loop header)" : "");
7310 #endif
7312 if (current->mir()->isLoopHeader() && compilingWasm) {
7313 masm.nopAlign(CodeAlignment);
7316 masm.bind(current->label());
7318 mozilla::Maybe<ScriptCountBlockState> blockCounts;
7319 if (counts) {
7320 blockCounts.emplace(&counts->block(i), &masm);
7321 if (!blockCounts->init()) {
7322 return false;
7326 for (LInstructionIterator iter = current->begin(); iter != current->end();
7327 iter++) {
7328 if (!alloc().ensureBallast()) {
7329 return false;
7332 perfSpewer_.recordInstruction(masm, *iter);
7333 #ifdef JS_JITSPEW
7334 JitSpewStart(JitSpew_Codegen, " # LIR=%s",
7335 iter->opName());
7336 if (const char* extra = iter->getExtraName()) {
7337 JitSpewCont(JitSpew_Codegen, ":%s", extra);
7339 JitSpewFin(JitSpew_Codegen);
7340 #endif
7342 if (counts) {
7343 blockCounts->visitInstruction(*iter);
7346 #ifdef CHECK_OSIPOINT_REGISTERS
7347 if (iter->safepoint() && !compilingWasm) {
7348 resetOsiPointRegs(iter->safepoint());
7350 #endif
7352 if (!compilingWasm) {
7353 if (MDefinition* mir = iter->mirRaw()) {
7354 if (!addNativeToBytecodeEntry(mir->trackedSite())) {
7355 return false;
7360 setElement(*iter); // needed to encode correct snapshot location.
7362 #ifdef DEBUG
7363 emitDebugForceBailing(*iter);
7364 #endif
7366 switch (iter->op()) {
7367 #ifndef JS_CODEGEN_NONE
7368 # define LIROP(op) \
7369 case LNode::Opcode::op: \
7370 visit##op(iter->to##op()); \
7371 break;
7372 LIR_OPCODE_LIST(LIROP)
7373 # undef LIROP
7374 #endif
7375 case LNode::Opcode::Invalid:
7376 default:
7377 MOZ_CRASH("Invalid LIR op");
7380 #ifdef DEBUG
7381 if (!counts) {
7382 emitDebugResultChecks(*iter);
7384 #endif
7386 if (masm.oom()) {
7387 return false;
7391 JitSpew(JitSpew_Codegen, "==== END CodeGenerator::generateBody ====\n");
7392 return true;
7395 // Out-of-line object allocation for LNewArray.
7396 class OutOfLineNewArray : public OutOfLineCodeBase<CodeGenerator> {
7397 LNewArray* lir_;
7399 public:
7400 explicit OutOfLineNewArray(LNewArray* lir) : lir_(lir) {}
7402 void accept(CodeGenerator* codegen) override {
7403 codegen->visitOutOfLineNewArray(this);
7406 LNewArray* lir() const { return lir_; }
7409 void CodeGenerator::visitNewArrayCallVM(LNewArray* lir) {
7410 Register objReg = ToRegister(lir->output());
7412 MOZ_ASSERT(!lir->isCall());
7413 saveLive(lir);
7415 JSObject* templateObject = lir->mir()->templateObject();
7417 if (templateObject) {
7418 pushArg(ImmGCPtr(templateObject->shape()));
7419 pushArg(Imm32(lir->mir()->length()));
7421 using Fn = ArrayObject* (*)(JSContext*, uint32_t, Handle<Shape*>);
7422 callVM<Fn, NewArrayWithShape>(lir);
7423 } else {
7424 pushArg(Imm32(GenericObject));
7425 pushArg(Imm32(lir->mir()->length()));
7427 using Fn = ArrayObject* (*)(JSContext*, uint32_t, NewObjectKind);
7428 callVM<Fn, NewArrayOperation>(lir);
7431 masm.storeCallPointerResult(objReg);
7433 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
7434 restoreLive(lir);
7437 void CodeGenerator::visitAtan2D(LAtan2D* lir) {
7438 FloatRegister y = ToFloatRegister(lir->y());
7439 FloatRegister x = ToFloatRegister(lir->x());
7441 using Fn = double (*)(double x, double y);
7442 masm.setupAlignedABICall();
7443 masm.passABIArg(y, MoveOp::DOUBLE);
7444 masm.passABIArg(x, MoveOp::DOUBLE);
7445 masm.callWithABI<Fn, ecmaAtan2>(MoveOp::DOUBLE);
7447 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7450 void CodeGenerator::visitHypot(LHypot* lir) {
7451 uint32_t numArgs = lir->numArgs();
7452 masm.setupAlignedABICall();
7454 for (uint32_t i = 0; i < numArgs; ++i) {
7455 masm.passABIArg(ToFloatRegister(lir->getOperand(i)), MoveOp::DOUBLE);
7458 switch (numArgs) {
7459 case 2: {
7460 using Fn = double (*)(double x, double y);
7461 masm.callWithABI<Fn, ecmaHypot>(MoveOp::DOUBLE);
7462 break;
7464 case 3: {
7465 using Fn = double (*)(double x, double y, double z);
7466 masm.callWithABI<Fn, hypot3>(MoveOp::DOUBLE);
7467 break;
7469 case 4: {
7470 using Fn = double (*)(double x, double y, double z, double w);
7471 masm.callWithABI<Fn, hypot4>(MoveOp::DOUBLE);
7472 break;
7474 default:
7475 MOZ_CRASH("Unexpected number of arguments to hypot function.");
7477 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7480 void CodeGenerator::visitNewArray(LNewArray* lir) {
7481 Register objReg = ToRegister(lir->output());
7482 Register tempReg = ToRegister(lir->temp());
7483 DebugOnly<uint32_t> length = lir->mir()->length();
7485 MOZ_ASSERT(length <= NativeObject::MAX_DENSE_ELEMENTS_COUNT);
7487 if (lir->mir()->isVMCall()) {
7488 visitNewArrayCallVM(lir);
7489 return;
7492 OutOfLineNewArray* ool = new (alloc()) OutOfLineNewArray(lir);
7493 addOutOfLineCode(ool, lir->mir());
7495 TemplateObject templateObject(lir->mir()->templateObject());
7496 #ifdef DEBUG
7497 size_t numInlineElements = gc::GetGCKindSlots(templateObject.getAllocKind()) -
7498 ObjectElements::VALUES_PER_HEADER;
7499 MOZ_ASSERT(length <= numInlineElements,
7500 "Inline allocation only supports inline elements");
7501 #endif
7502 masm.createGCObject(objReg, tempReg, templateObject,
7503 lir->mir()->initialHeap(), ool->entry());
7505 masm.bind(ool->rejoin());
7508 void CodeGenerator::visitOutOfLineNewArray(OutOfLineNewArray* ool) {
7509 visitNewArrayCallVM(ool->lir());
7510 masm.jump(ool->rejoin());
7513 void CodeGenerator::visitNewArrayDynamicLength(LNewArrayDynamicLength* lir) {
7514 Register lengthReg = ToRegister(lir->length());
7515 Register objReg = ToRegister(lir->output());
7516 Register tempReg = ToRegister(lir->temp0());
7518 JSObject* templateObject = lir->mir()->templateObject();
7519 gc::Heap initialHeap = lir->mir()->initialHeap();
7521 using Fn = ArrayObject* (*)(JSContext*, Handle<ArrayObject*>, int32_t length);
7522 OutOfLineCode* ool = oolCallVM<Fn, ArrayConstructorOneArg>(
7523 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
7524 StoreRegisterTo(objReg));
7526 bool canInline = true;
7527 size_t inlineLength = 0;
7528 if (templateObject->as<ArrayObject>().hasFixedElements()) {
7529 size_t numSlots =
7530 gc::GetGCKindSlots(templateObject->asTenured().getAllocKind());
7531 inlineLength = numSlots - ObjectElements::VALUES_PER_HEADER;
7532 } else {
7533 canInline = false;
7536 if (canInline) {
7537 // Try to do the allocation inline if the template object is big enough
7538 // for the length in lengthReg. If the length is bigger we could still
7539 // use the template object and not allocate the elements, but it's more
7540 // efficient to do a single big allocation than (repeatedly) reallocating
7541 // the array later on when filling it.
7542 masm.branch32(Assembler::Above, lengthReg, Imm32(inlineLength),
7543 ool->entry());
7545 TemplateObject templateObj(templateObject);
7546 masm.createGCObject(objReg, tempReg, templateObj, initialHeap,
7547 ool->entry());
7549 size_t lengthOffset = NativeObject::offsetOfFixedElements() +
7550 ObjectElements::offsetOfLength();
7551 masm.store32(lengthReg, Address(objReg, lengthOffset));
7552 } else {
7553 masm.jump(ool->entry());
7556 masm.bind(ool->rejoin());
7559 void CodeGenerator::visitNewIterator(LNewIterator* lir) {
7560 Register objReg = ToRegister(lir->output());
7561 Register tempReg = ToRegister(lir->temp0());
7563 OutOfLineCode* ool;
7564 switch (lir->mir()->type()) {
7565 case MNewIterator::ArrayIterator: {
7566 using Fn = ArrayIteratorObject* (*)(JSContext*);
7567 ool = oolCallVM<Fn, NewArrayIterator>(lir, ArgList(),
7568 StoreRegisterTo(objReg));
7569 break;
7571 case MNewIterator::StringIterator: {
7572 using Fn = StringIteratorObject* (*)(JSContext*);
7573 ool = oolCallVM<Fn, NewStringIterator>(lir, ArgList(),
7574 StoreRegisterTo(objReg));
7575 break;
7577 case MNewIterator::RegExpStringIterator: {
7578 using Fn = RegExpStringIteratorObject* (*)(JSContext*);
7579 ool = oolCallVM<Fn, NewRegExpStringIterator>(lir, ArgList(),
7580 StoreRegisterTo(objReg));
7581 break;
7583 default:
7584 MOZ_CRASH("unexpected iterator type");
7587 TemplateObject templateObject(lir->mir()->templateObject());
7588 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
7589 ool->entry());
7591 masm.bind(ool->rejoin());
7594 void CodeGenerator::visitNewTypedArray(LNewTypedArray* lir) {
7595 Register objReg = ToRegister(lir->output());
7596 Register tempReg = ToRegister(lir->temp0());
7597 Register lengthReg = ToRegister(lir->temp1());
7598 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
7600 JSObject* templateObject = lir->mir()->templateObject();
7601 gc::Heap initialHeap = lir->mir()->initialHeap();
7603 TypedArrayObject* ttemplate = &templateObject->as<TypedArrayObject>();
7605 size_t n = ttemplate->length();
7606 MOZ_ASSERT(n <= INT32_MAX,
7607 "Template objects are only created for int32 lengths");
7609 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
7610 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
7611 lir, ArgList(ImmGCPtr(templateObject), Imm32(n)),
7612 StoreRegisterTo(objReg));
7614 TemplateObject templateObj(templateObject);
7615 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
7617 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
7618 ttemplate, MacroAssembler::TypedArrayLength::Fixed);
7620 masm.bind(ool->rejoin());
7623 void CodeGenerator::visitNewTypedArrayDynamicLength(
7624 LNewTypedArrayDynamicLength* lir) {
7625 Register lengthReg = ToRegister(lir->length());
7626 Register objReg = ToRegister(lir->output());
7627 Register tempReg = ToRegister(lir->temp0());
7628 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
7630 JSObject* templateObject = lir->mir()->templateObject();
7631 gc::Heap initialHeap = lir->mir()->initialHeap();
7633 TypedArrayObject* ttemplate = &templateObject->as<TypedArrayObject>();
7635 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
7636 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
7637 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
7638 StoreRegisterTo(objReg));
7640 // Volatile |lengthReg| is saved across the ABI call in |initTypedArraySlots|.
7641 MOZ_ASSERT_IF(lengthReg.volatile_(), liveRegs.has(lengthReg));
7643 TemplateObject templateObj(templateObject);
7644 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
7646 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
7647 ttemplate,
7648 MacroAssembler::TypedArrayLength::Dynamic);
7650 masm.bind(ool->rejoin());
7653 void CodeGenerator::visitNewTypedArrayFromArray(LNewTypedArrayFromArray* lir) {
7654 pushArg(ToRegister(lir->array()));
7655 pushArg(ImmGCPtr(lir->mir()->templateObject()));
7657 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
7658 callVM<Fn, js::NewTypedArrayWithTemplateAndArray>(lir);
7661 void CodeGenerator::visitNewTypedArrayFromArrayBuffer(
7662 LNewTypedArrayFromArrayBuffer* lir) {
7663 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::LengthIndex));
7664 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::ByteOffsetIndex));
7665 pushArg(ToRegister(lir->arrayBuffer()));
7666 pushArg(ImmGCPtr(lir->mir()->templateObject()));
7668 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
7669 HandleValue, HandleValue);
7670 callVM<Fn, js::NewTypedArrayWithTemplateAndBuffer>(lir);
7673 void CodeGenerator::visitBindFunction(LBindFunction* lir) {
7674 Register target = ToRegister(lir->target());
7675 Register temp1 = ToRegister(lir->temp0());
7676 Register temp2 = ToRegister(lir->temp1());
7678 // Try to allocate a new BoundFunctionObject we can pass to the VM function.
7679 // If this fails, we set temp1 to nullptr so we do the allocation in C++.
7680 TemplateObject templateObject(lir->mir()->templateObject());
7681 Label allocOk, allocFailed;
7682 masm.createGCObject(temp1, temp2, templateObject, gc::Heap::Default,
7683 &allocFailed);
7684 masm.jump(&allocOk);
7686 masm.bind(&allocFailed);
7687 masm.movePtr(ImmWord(0), temp1);
7689 masm.bind(&allocOk);
7691 // Set temp2 to the address of the first argument on the stack.
7692 // Note that the Value slots used for arguments are currently aligned for a
7693 // JIT call, even though that's not strictly necessary for calling into C++.
7694 uint32_t argc = lir->mir()->numStackArgs();
7695 if (JitStackValueAlignment > 1) {
7696 argc = AlignBytes(argc, JitStackValueAlignment);
7698 uint32_t unusedStack = UnusedStackBytesForCall(argc);
7699 masm.computeEffectiveAddress(Address(masm.getStackPointer(), unusedStack),
7700 temp2);
7702 pushArg(temp1);
7703 pushArg(Imm32(lir->mir()->numStackArgs()));
7704 pushArg(temp2);
7705 pushArg(target);
7707 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<JSObject*>, Value*,
7708 uint32_t, Handle<BoundFunctionObject*>);
7709 callVM<Fn, js::BoundFunctionObject::functionBindImpl>(lir);
7712 void CodeGenerator::visitNewBoundFunction(LNewBoundFunction* lir) {
7713 Register output = ToRegister(lir->output());
7714 Register temp = ToRegister(lir->temp0());
7716 JSObject* templateObj = lir->mir()->templateObj();
7718 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<BoundFunctionObject*>);
7719 OutOfLineCode* ool = oolCallVM<Fn, BoundFunctionObject::createWithTemplate>(
7720 lir, ArgList(ImmGCPtr(templateObj)), StoreRegisterTo(output));
7722 TemplateObject templateObject(templateObj);
7723 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
7724 ool->entry());
7726 masm.bind(ool->rejoin());
7729 // Out-of-line object allocation for JSOp::NewObject.
7730 class OutOfLineNewObject : public OutOfLineCodeBase<CodeGenerator> {
7731 LNewObject* lir_;
7733 public:
7734 explicit OutOfLineNewObject(LNewObject* lir) : lir_(lir) {}
7736 void accept(CodeGenerator* codegen) override {
7737 codegen->visitOutOfLineNewObject(this);
7740 LNewObject* lir() const { return lir_; }
7743 void CodeGenerator::visitNewObjectVMCall(LNewObject* lir) {
7744 Register objReg = ToRegister(lir->output());
7746 MOZ_ASSERT(!lir->isCall());
7747 saveLive(lir);
7749 JSObject* templateObject = lir->mir()->templateObject();
7751 // If we're making a new object with a class prototype (that is, an object
7752 // that derives its class from its prototype instead of being
7753 // PlainObject::class_'d) from self-hosted code, we need a different init
7754 // function.
7755 switch (lir->mir()->mode()) {
7756 case MNewObject::ObjectLiteral: {
7757 MOZ_ASSERT(!templateObject);
7758 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
7759 pushArg(ImmGCPtr(lir->mir()->block()->info().script()));
7761 using Fn = JSObject* (*)(JSContext*, HandleScript, const jsbytecode* pc);
7762 callVM<Fn, NewObjectOperation>(lir);
7763 break;
7765 case MNewObject::ObjectCreate: {
7766 pushArg(ImmGCPtr(templateObject));
7768 using Fn = PlainObject* (*)(JSContext*, Handle<PlainObject*>);
7769 callVM<Fn, ObjectCreateWithTemplate>(lir);
7770 break;
7774 masm.storeCallPointerResult(objReg);
7776 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
7777 restoreLive(lir);
7780 static bool ShouldInitFixedSlots(LNewPlainObject* lir, const Shape* shape,
7781 uint32_t nfixed) {
7782 // Look for StoreFixedSlot instructions following an object allocation
7783 // that write to this object before a GC is triggered or this object is
7784 // passed to a VM call. If all fixed slots will be initialized, the
7785 // allocation code doesn't need to set the slots to |undefined|.
7787 if (nfixed == 0) {
7788 return false;
7791 // Keep track of the fixed slots that are initialized. initializedSlots is
7792 // a bit mask with a bit for each slot.
7793 MOZ_ASSERT(nfixed <= NativeObject::MAX_FIXED_SLOTS);
7794 static_assert(NativeObject::MAX_FIXED_SLOTS <= 32,
7795 "Slot bits must fit in 32 bits");
7796 uint32_t initializedSlots = 0;
7797 uint32_t numInitialized = 0;
7799 MInstruction* allocMir = lir->mir();
7800 MBasicBlock* block = allocMir->block();
7802 // Skip the allocation instruction.
7803 MInstructionIterator iter = block->begin(allocMir);
7804 MOZ_ASSERT(*iter == allocMir);
7805 iter++;
7807 // Handle the leading shape guard, if present.
7808 for (; iter != block->end(); iter++) {
7809 if (iter->isConstant()) {
7810 // This instruction won't trigger a GC or read object slots.
7811 continue;
7813 if (iter->isGuardShape()) {
7814 auto* guard = iter->toGuardShape();
7815 if (guard->object() != allocMir || guard->shape() != shape) {
7816 return true;
7818 allocMir = guard;
7819 iter++;
7821 break;
7824 for (; iter != block->end(); iter++) {
7825 if (iter->isConstant() || iter->isPostWriteBarrier()) {
7826 // These instructions won't trigger a GC or read object slots.
7827 continue;
7830 if (iter->isStoreFixedSlot()) {
7831 MStoreFixedSlot* store = iter->toStoreFixedSlot();
7832 if (store->object() != allocMir) {
7833 return true;
7836 // We may not initialize this object slot on allocation, so the
7837 // pre-barrier could read uninitialized memory. Simply disable
7838 // the barrier for this store: the object was just initialized
7839 // so the barrier is not necessary.
7840 store->setNeedsBarrier(false);
7842 uint32_t slot = store->slot();
7843 MOZ_ASSERT(slot < nfixed);
7844 if ((initializedSlots & (1 << slot)) == 0) {
7845 numInitialized++;
7846 initializedSlots |= (1 << slot);
7848 if (numInitialized == nfixed) {
7849 // All fixed slots will be initialized.
7850 MOZ_ASSERT(mozilla::CountPopulation32(initializedSlots) == nfixed);
7851 return false;
7854 continue;
7857 // Unhandled instruction, assume it bails or reads object slots.
7858 return true;
7861 MOZ_CRASH("Shouldn't get here");
7864 void CodeGenerator::visitNewObject(LNewObject* lir) {
7865 Register objReg = ToRegister(lir->output());
7866 Register tempReg = ToRegister(lir->temp());
7868 if (lir->mir()->isVMCall()) {
7869 visitNewObjectVMCall(lir);
7870 return;
7873 OutOfLineNewObject* ool = new (alloc()) OutOfLineNewObject(lir);
7874 addOutOfLineCode(ool, lir->mir());
7876 TemplateObject templateObject(lir->mir()->templateObject());
7878 masm.createGCObject(objReg, tempReg, templateObject,
7879 lir->mir()->initialHeap(), ool->entry());
7881 masm.bind(ool->rejoin());
7884 void CodeGenerator::visitOutOfLineNewObject(OutOfLineNewObject* ool) {
7885 visitNewObjectVMCall(ool->lir());
7886 masm.jump(ool->rejoin());
7889 void CodeGenerator::visitNewPlainObject(LNewPlainObject* lir) {
7890 Register objReg = ToRegister(lir->output());
7891 Register temp0Reg = ToRegister(lir->temp0());
7892 Register temp1Reg = ToRegister(lir->temp1());
7893 Register shapeReg = ToRegister(lir->temp2());
7895 auto* mir = lir->mir();
7896 const Shape* shape = mir->shape();
7897 gc::Heap initialHeap = mir->initialHeap();
7898 gc::AllocKind allocKind = mir->allocKind();
7900 using Fn =
7901 JSObject* (*)(JSContext*, Handle<SharedShape*>, gc::AllocKind, gc::Heap);
7902 OutOfLineCode* ool = oolCallVM<Fn, NewPlainObjectOptimizedFallback>(
7903 lir,
7904 ArgList(ImmGCPtr(shape), Imm32(int32_t(allocKind)),
7905 Imm32(int32_t(initialHeap))),
7906 StoreRegisterTo(objReg));
7908 bool initContents = ShouldInitFixedSlots(lir, shape, mir->numFixedSlots());
7910 masm.movePtr(ImmGCPtr(shape), shapeReg);
7911 masm.createPlainGCObject(
7912 objReg, shapeReg, temp0Reg, temp1Reg, mir->numFixedSlots(),
7913 mir->numDynamicSlots(), allocKind, initialHeap, ool->entry(),
7914 AllocSiteInput(gc::CatchAllAllocSite::Optimized), initContents);
7916 #ifdef DEBUG
7917 // ShouldInitFixedSlots expects that the leading GuardShape will never fail,
7918 // so ensure the newly created object has the correct shape. Should the guard
7919 // ever fail, we may end up with uninitialized fixed slots, which can confuse
7920 // the GC.
7921 Label ok;
7922 masm.branchTestObjShape(Assembler::Equal, objReg, shape, temp0Reg, objReg,
7923 &ok);
7924 masm.assumeUnreachable("Newly created object has the correct shape");
7925 masm.bind(&ok);
7926 #endif
7928 masm.bind(ool->rejoin());
7931 void CodeGenerator::visitNewArrayObject(LNewArrayObject* lir) {
7932 Register objReg = ToRegister(lir->output());
7933 Register temp0Reg = ToRegister(lir->temp0());
7934 Register shapeReg = ToRegister(lir->temp1());
7936 auto* mir = lir->mir();
7937 uint32_t arrayLength = mir->length();
7939 gc::AllocKind allocKind = GuessArrayGCKind(arrayLength);
7940 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
7941 allocKind = ForegroundToBackgroundAllocKind(allocKind);
7943 uint32_t slotCount = GetGCKindSlots(allocKind);
7944 MOZ_ASSERT(slotCount >= ObjectElements::VALUES_PER_HEADER);
7945 uint32_t arrayCapacity = slotCount - ObjectElements::VALUES_PER_HEADER;
7947 const Shape* shape = mir->shape();
7949 NewObjectKind objectKind =
7950 mir->initialHeap() == gc::Heap::Tenured ? TenuredObject : GenericObject;
7952 using Fn =
7953 ArrayObject* (*)(JSContext*, uint32_t, gc::AllocKind, NewObjectKind);
7954 OutOfLineCode* ool = oolCallVM<Fn, NewArrayObjectOptimizedFallback>(
7955 lir,
7956 ArgList(Imm32(arrayLength), Imm32(int32_t(allocKind)), Imm32(objectKind)),
7957 StoreRegisterTo(objReg));
7959 masm.movePtr(ImmPtr(shape), shapeReg);
7960 masm.createArrayWithFixedElements(
7961 objReg, shapeReg, temp0Reg, InvalidReg, arrayLength, arrayCapacity, 0, 0,
7962 allocKind, mir->initialHeap(), ool->entry(),
7963 AllocSiteInput(gc::CatchAllAllocSite::Optimized));
7964 masm.bind(ool->rejoin());
7967 void CodeGenerator::visitNewNamedLambdaObject(LNewNamedLambdaObject* lir) {
7968 Register objReg = ToRegister(lir->output());
7969 Register tempReg = ToRegister(lir->temp0());
7970 const CompileInfo& info = lir->mir()->block()->info();
7972 using Fn = js::NamedLambdaObject* (*)(JSContext*, HandleFunction);
7973 OutOfLineCode* ool = oolCallVM<Fn, NamedLambdaObject::createWithoutEnclosing>(
7974 lir, ArgList(info.funMaybeLazy()), StoreRegisterTo(objReg));
7976 TemplateObject templateObject(lir->mir()->templateObj());
7978 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
7979 ool->entry());
7981 masm.bind(ool->rejoin());
7984 void CodeGenerator::visitNewCallObject(LNewCallObject* lir) {
7985 Register objReg = ToRegister(lir->output());
7986 Register tempReg = ToRegister(lir->temp0());
7988 CallObject* templateObj = lir->mir()->templateObject();
7990 using Fn = CallObject* (*)(JSContext*, Handle<SharedShape*>);
7991 OutOfLineCode* ool = oolCallVM<Fn, CallObject::createWithShape>(
7992 lir, ArgList(ImmGCPtr(templateObj->sharedShape())),
7993 StoreRegisterTo(objReg));
7995 // Inline call object creation, using the OOL path only for tricky cases.
7996 TemplateObject templateObject(templateObj);
7997 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
7998 ool->entry());
8000 masm.bind(ool->rejoin());
8003 void CodeGenerator::visitNewStringObject(LNewStringObject* lir) {
8004 Register input = ToRegister(lir->input());
8005 Register output = ToRegister(lir->output());
8006 Register temp = ToRegister(lir->temp0());
8008 StringObject* templateObj = lir->mir()->templateObj();
8010 using Fn = JSObject* (*)(JSContext*, HandleString);
8011 OutOfLineCode* ool = oolCallVM<Fn, NewStringObject>(lir, ArgList(input),
8012 StoreRegisterTo(output));
8014 TemplateObject templateObject(templateObj);
8015 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
8016 ool->entry());
8018 masm.loadStringLength(input, temp);
8020 masm.storeValue(JSVAL_TYPE_STRING, input,
8021 Address(output, StringObject::offsetOfPrimitiveValue()));
8022 masm.storeValue(JSVAL_TYPE_INT32, temp,
8023 Address(output, StringObject::offsetOfLength()));
8025 masm.bind(ool->rejoin());
8028 void CodeGenerator::visitInitElemGetterSetter(LInitElemGetterSetter* lir) {
8029 Register obj = ToRegister(lir->object());
8030 Register value = ToRegister(lir->value());
8032 pushArg(value);
8033 pushArg(ToValue(lir, LInitElemGetterSetter::IdIndex));
8034 pushArg(obj);
8035 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
8037 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject, HandleValue,
8038 HandleObject);
8039 callVM<Fn, InitElemGetterSetterOperation>(lir);
8042 void CodeGenerator::visitMutateProto(LMutateProto* lir) {
8043 Register objReg = ToRegister(lir->object());
8045 pushArg(ToValue(lir, LMutateProto::ValueIndex));
8046 pushArg(objReg);
8048 using Fn =
8049 bool (*)(JSContext* cx, Handle<PlainObject*> obj, HandleValue value);
8050 callVM<Fn, MutatePrototype>(lir);
8053 void CodeGenerator::visitInitPropGetterSetter(LInitPropGetterSetter* lir) {
8054 Register obj = ToRegister(lir->object());
8055 Register value = ToRegister(lir->value());
8057 pushArg(value);
8058 pushArg(ImmGCPtr(lir->mir()->name()));
8059 pushArg(obj);
8060 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
8062 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject,
8063 Handle<PropertyName*>, HandleObject);
8064 callVM<Fn, InitPropGetterSetterOperation>(lir);
8067 void CodeGenerator::visitCreateThis(LCreateThis* lir) {
8068 const LAllocation* callee = lir->callee();
8069 const LAllocation* newTarget = lir->newTarget();
8071 if (newTarget->isConstant()) {
8072 pushArg(ImmGCPtr(&newTarget->toConstant()->toObject()));
8073 } else {
8074 pushArg(ToRegister(newTarget));
8077 if (callee->isConstant()) {
8078 pushArg(ImmGCPtr(&callee->toConstant()->toObject()));
8079 } else {
8080 pushArg(ToRegister(callee));
8083 using Fn = bool (*)(JSContext* cx, HandleObject callee,
8084 HandleObject newTarget, MutableHandleValue rval);
8085 callVM<Fn, jit::CreateThisFromIon>(lir);
8088 void CodeGenerator::visitCreateArgumentsObject(LCreateArgumentsObject* lir) {
8089 // This should be getting constructed in the first block only, and not any OSR
8090 // entry blocks.
8091 MOZ_ASSERT(lir->mir()->block()->id() == 0);
8093 Register callObj = ToRegister(lir->callObject());
8094 Register temp0 = ToRegister(lir->temp0());
8095 Label done;
8097 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8098 Register objTemp = ToRegister(lir->temp1());
8099 Register cxTemp = ToRegister(lir->temp2());
8101 masm.Push(callObj);
8103 // Try to allocate an arguments object. This will leave the reserved
8104 // slots uninitialized, so it's important we don't GC until we
8105 // initialize these slots in ArgumentsObject::finishForIonPure.
8106 Label failure;
8107 TemplateObject templateObject(templateObj);
8108 masm.createGCObject(objTemp, temp0, templateObject, gc::Heap::Default,
8109 &failure,
8110 /* initContents = */ false);
8112 masm.moveStackPtrTo(temp0);
8113 masm.addPtr(Imm32(masm.framePushed()), temp0);
8115 using Fn = ArgumentsObject* (*)(JSContext* cx, jit::JitFrameLayout* frame,
8116 JSObject* scopeChain, ArgumentsObject* obj);
8117 masm.setupAlignedABICall();
8118 masm.loadJSContext(cxTemp);
8119 masm.passABIArg(cxTemp);
8120 masm.passABIArg(temp0);
8121 masm.passABIArg(callObj);
8122 masm.passABIArg(objTemp);
8124 masm.callWithABI<Fn, ArgumentsObject::finishForIonPure>();
8125 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8127 // Discard saved callObj on the stack.
8128 masm.addToStackPtr(Imm32(sizeof(uintptr_t)));
8129 masm.jump(&done);
8131 masm.bind(&failure);
8132 masm.Pop(callObj);
8135 masm.moveStackPtrTo(temp0);
8136 masm.addPtr(Imm32(frameSize()), temp0);
8138 pushArg(callObj);
8139 pushArg(temp0);
8141 using Fn = ArgumentsObject* (*)(JSContext*, JitFrameLayout*, HandleObject);
8142 callVM<Fn, ArgumentsObject::createForIon>(lir);
8144 masm.bind(&done);
8147 void CodeGenerator::visitCreateInlinedArgumentsObject(
8148 LCreateInlinedArgumentsObject* lir) {
8149 Register callObj = ToRegister(lir->getCallObject());
8150 Register callee = ToRegister(lir->getCallee());
8151 Register argsAddress = ToRegister(lir->temp1());
8152 Register argsObj = ToRegister(lir->temp2());
8154 // TODO: Do we have to worry about alignment here?
8156 // Create a contiguous array of values for ArgumentsObject::create
8157 // by pushing the arguments onto the stack in reverse order.
8158 uint32_t argc = lir->mir()->numActuals();
8159 for (uint32_t i = 0; i < argc; i++) {
8160 uint32_t argNum = argc - i - 1;
8161 uint32_t index = LCreateInlinedArgumentsObject::ArgIndex(argNum);
8162 ConstantOrRegister arg =
8163 toConstantOrRegister(lir, index, lir->mir()->getArg(argNum)->type());
8164 masm.Push(arg);
8166 masm.moveStackPtrTo(argsAddress);
8168 Label done;
8169 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8170 LiveRegisterSet liveRegs;
8171 liveRegs.add(callObj);
8172 liveRegs.add(callee);
8174 masm.PushRegsInMask(liveRegs);
8176 // We are free to clobber all registers, as LCreateInlinedArgumentsObject is
8177 // a call instruction.
8178 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
8179 allRegs.take(callObj);
8180 allRegs.take(callee);
8181 allRegs.take(argsObj);
8182 allRegs.take(argsAddress);
8184 Register temp3 = allRegs.takeAny();
8185 Register temp4 = allRegs.takeAny();
8187 // Try to allocate an arguments object. This will leave the reserved slots
8188 // uninitialized, so it's important we don't GC until we initialize these
8189 // slots in ArgumentsObject::finishForIonPure.
8190 Label failure;
8191 TemplateObject templateObject(templateObj);
8192 masm.createGCObject(argsObj, temp3, templateObject, gc::Heap::Default,
8193 &failure,
8194 /* initContents = */ false);
8196 Register numActuals = temp3;
8197 masm.move32(Imm32(argc), numActuals);
8199 using Fn = ArgumentsObject* (*)(JSContext*, JSObject*, JSFunction*, Value*,
8200 uint32_t, ArgumentsObject*);
8201 masm.setupAlignedABICall();
8202 masm.loadJSContext(temp4);
8203 masm.passABIArg(temp4);
8204 masm.passABIArg(callObj);
8205 masm.passABIArg(callee);
8206 masm.passABIArg(argsAddress);
8207 masm.passABIArg(numActuals);
8208 masm.passABIArg(argsObj);
8210 masm.callWithABI<Fn, ArgumentsObject::finishInlineForIonPure>();
8211 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8213 // Discard saved callObj, callee, and values array on the stack.
8214 masm.addToStackPtr(
8215 Imm32(MacroAssembler::PushRegsInMaskSizeInBytes(liveRegs) +
8216 argc * sizeof(Value)));
8217 masm.jump(&done);
8219 masm.bind(&failure);
8220 masm.PopRegsInMask(liveRegs);
8222 // Reload argsAddress because it may have been overridden.
8223 masm.moveStackPtrTo(argsAddress);
8226 pushArg(Imm32(argc));
8227 pushArg(callObj);
8228 pushArg(callee);
8229 pushArg(argsAddress);
8231 using Fn = ArgumentsObject* (*)(JSContext*, Value*, HandleFunction,
8232 HandleObject, uint32_t);
8233 callVM<Fn, ArgumentsObject::createForInlinedIon>(lir);
8235 // Discard the array of values.
8236 masm.freeStack(argc * sizeof(Value));
8238 masm.bind(&done);
8241 template <class GetInlinedArgument>
8242 void CodeGenerator::emitGetInlinedArgument(GetInlinedArgument* lir,
8243 Register index,
8244 ValueOperand output) {
8245 uint32_t numActuals = lir->mir()->numActuals();
8246 MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
8248 // The index has already been bounds-checked, so the code we
8249 // generate here should be unreachable. We can end up in this
8250 // situation in self-hosted code using GetArgument(), or in a
8251 // monomorphically inlined function if we've inlined some CacheIR
8252 // that was created for a different caller.
8253 if (numActuals == 0) {
8254 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8255 return;
8258 // Check the first n-1 possible indices.
8259 Label done;
8260 for (uint32_t i = 0; i < numActuals - 1; i++) {
8261 Label skip;
8262 ConstantOrRegister arg = toConstantOrRegister(
8263 lir, GetInlinedArgument::ArgIndex(i), lir->mir()->getArg(i)->type());
8264 masm.branch32(Assembler::NotEqual, index, Imm32(i), &skip);
8265 masm.moveValue(arg, output);
8267 masm.jump(&done);
8268 masm.bind(&skip);
8271 #ifdef DEBUG
8272 Label skip;
8273 masm.branch32(Assembler::Equal, index, Imm32(numActuals - 1), &skip);
8274 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8275 masm.bind(&skip);
8276 #endif
8278 // The index has already been bounds-checked, so load the last argument.
8279 uint32_t lastIdx = numActuals - 1;
8280 ConstantOrRegister arg =
8281 toConstantOrRegister(lir, GetInlinedArgument::ArgIndex(lastIdx),
8282 lir->mir()->getArg(lastIdx)->type());
8283 masm.moveValue(arg, output);
8284 masm.bind(&done);
8287 void CodeGenerator::visitGetInlinedArgument(LGetInlinedArgument* lir) {
8288 Register index = ToRegister(lir->getIndex());
8289 ValueOperand output = ToOutValue(lir);
8291 emitGetInlinedArgument(lir, index, output);
8294 void CodeGenerator::visitGetInlinedArgumentHole(LGetInlinedArgumentHole* lir) {
8295 Register index = ToRegister(lir->getIndex());
8296 ValueOperand output = ToOutValue(lir);
8298 uint32_t numActuals = lir->mir()->numActuals();
8300 if (numActuals == 0) {
8301 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8302 masm.moveValue(UndefinedValue(), output);
8303 return;
8306 Label outOfBounds, done;
8307 masm.branch32(Assembler::AboveOrEqual, index, Imm32(numActuals),
8308 &outOfBounds);
8310 emitGetInlinedArgument(lir, index, output);
8311 masm.jump(&done);
8313 masm.bind(&outOfBounds);
8314 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8315 masm.moveValue(UndefinedValue(), output);
8317 masm.bind(&done);
8320 void CodeGenerator::visitGetArgumentsObjectArg(LGetArgumentsObjectArg* lir) {
8321 Register temp = ToRegister(lir->temp0());
8322 Register argsObj = ToRegister(lir->argsObject());
8323 ValueOperand out = ToOutValue(lir);
8325 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8326 temp);
8327 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8328 lir->mir()->argno() * sizeof(Value));
8329 masm.loadValue(argAddr, out);
8330 #ifdef DEBUG
8331 Label success;
8332 masm.branchTestMagic(Assembler::NotEqual, out, &success);
8333 masm.assumeUnreachable(
8334 "Result from ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8335 masm.bind(&success);
8336 #endif
8339 void CodeGenerator::visitSetArgumentsObjectArg(LSetArgumentsObjectArg* lir) {
8340 Register temp = ToRegister(lir->getTemp(0));
8341 Register argsObj = ToRegister(lir->argsObject());
8342 ValueOperand value = ToValue(lir, LSetArgumentsObjectArg::ValueIndex);
8344 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8345 temp);
8346 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8347 lir->mir()->argno() * sizeof(Value));
8348 emitPreBarrier(argAddr);
8349 #ifdef DEBUG
8350 Label success;
8351 masm.branchTestMagic(Assembler::NotEqual, argAddr, &success);
8352 masm.assumeUnreachable(
8353 "Result in ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8354 masm.bind(&success);
8355 #endif
8356 masm.storeValue(value, argAddr);
8359 void CodeGenerator::visitLoadArgumentsObjectArg(LLoadArgumentsObjectArg* lir) {
8360 Register temp = ToRegister(lir->temp0());
8361 Register argsObj = ToRegister(lir->argsObject());
8362 Register index = ToRegister(lir->index());
8363 ValueOperand out = ToOutValue(lir);
8365 Label bail;
8366 masm.loadArgumentsObjectElement(argsObj, index, out, temp, &bail);
8367 bailoutFrom(&bail, lir->snapshot());
8370 void CodeGenerator::visitLoadArgumentsObjectArgHole(
8371 LLoadArgumentsObjectArgHole* lir) {
8372 Register temp = ToRegister(lir->temp0());
8373 Register argsObj = ToRegister(lir->argsObject());
8374 Register index = ToRegister(lir->index());
8375 ValueOperand out = ToOutValue(lir);
8377 Label bail;
8378 masm.loadArgumentsObjectElementHole(argsObj, index, out, temp, &bail);
8379 bailoutFrom(&bail, lir->snapshot());
8382 void CodeGenerator::visitInArgumentsObjectArg(LInArgumentsObjectArg* lir) {
8383 Register temp = ToRegister(lir->temp0());
8384 Register argsObj = ToRegister(lir->argsObject());
8385 Register index = ToRegister(lir->index());
8386 Register out = ToRegister(lir->output());
8388 Label bail;
8389 masm.loadArgumentsObjectElementExists(argsObj, index, out, temp, &bail);
8390 bailoutFrom(&bail, lir->snapshot());
8393 void CodeGenerator::visitArgumentsObjectLength(LArgumentsObjectLength* lir) {
8394 Register argsObj = ToRegister(lir->argsObject());
8395 Register out = ToRegister(lir->output());
8397 Label bail;
8398 masm.loadArgumentsObjectLength(argsObj, out, &bail);
8399 bailoutFrom(&bail, lir->snapshot());
8402 void CodeGenerator::visitArrayFromArgumentsObject(
8403 LArrayFromArgumentsObject* lir) {
8404 pushArg(ToRegister(lir->argsObject()));
8406 using Fn = ArrayObject* (*)(JSContext*, Handle<ArgumentsObject*>);
8407 callVM<Fn, js::ArrayFromArgumentsObject>(lir);
8410 void CodeGenerator::visitGuardArgumentsObjectFlags(
8411 LGuardArgumentsObjectFlags* lir) {
8412 Register argsObj = ToRegister(lir->argsObject());
8413 Register temp = ToRegister(lir->temp0());
8415 Label bail;
8416 masm.branchTestArgumentsObjectFlags(argsObj, temp, lir->mir()->flags(),
8417 Assembler::NonZero, &bail);
8418 bailoutFrom(&bail, lir->snapshot());
8421 void CodeGenerator::visitBoundFunctionNumArgs(LBoundFunctionNumArgs* lir) {
8422 Register obj = ToRegister(lir->object());
8423 Register output = ToRegister(lir->output());
8425 masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
8426 output);
8427 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
8430 void CodeGenerator::visitGuardBoundFunctionIsConstructor(
8431 LGuardBoundFunctionIsConstructor* lir) {
8432 Register obj = ToRegister(lir->object());
8434 Label bail;
8435 Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
8436 masm.branchTest32(Assembler::Zero, flagsSlot,
8437 Imm32(BoundFunctionObject::IsConstructorFlag), &bail);
8438 bailoutFrom(&bail, lir->snapshot());
8441 void CodeGenerator::visitReturnFromCtor(LReturnFromCtor* lir) {
8442 ValueOperand value = ToValue(lir, LReturnFromCtor::ValueIndex);
8443 Register obj = ToRegister(lir->object());
8444 Register output = ToRegister(lir->output());
8446 Label valueIsObject, end;
8448 masm.branchTestObject(Assembler::Equal, value, &valueIsObject);
8450 // Value is not an object. Return that other object.
8451 masm.movePtr(obj, output);
8452 masm.jump(&end);
8454 // Value is an object. Return unbox(Value).
8455 masm.bind(&valueIsObject);
8456 Register payload = masm.extractObject(value, output);
8457 if (payload != output) {
8458 masm.movePtr(payload, output);
8461 masm.bind(&end);
8464 class OutOfLineBoxNonStrictThis : public OutOfLineCodeBase<CodeGenerator> {
8465 LBoxNonStrictThis* ins_;
8467 public:
8468 explicit OutOfLineBoxNonStrictThis(LBoxNonStrictThis* ins) : ins_(ins) {}
8469 void accept(CodeGenerator* codegen) override {
8470 codegen->visitOutOfLineBoxNonStrictThis(this);
8472 LBoxNonStrictThis* ins() const { return ins_; }
8475 void CodeGenerator::visitBoxNonStrictThis(LBoxNonStrictThis* lir) {
8476 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8477 Register output = ToRegister(lir->output());
8479 auto* ool = new (alloc()) OutOfLineBoxNonStrictThis(lir);
8480 addOutOfLineCode(ool, lir->mir());
8482 masm.fallibleUnboxObject(value, output, ool->entry());
8483 masm.bind(ool->rejoin());
8486 void CodeGenerator::visitOutOfLineBoxNonStrictThis(
8487 OutOfLineBoxNonStrictThis* ool) {
8488 LBoxNonStrictThis* lir = ool->ins();
8490 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8491 Register output = ToRegister(lir->output());
8493 Label notNullOrUndefined;
8495 Label isNullOrUndefined;
8496 ScratchTagScope tag(masm, value);
8497 masm.splitTagForTest(value, tag);
8498 masm.branchTestUndefined(Assembler::Equal, tag, &isNullOrUndefined);
8499 masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
8500 masm.bind(&isNullOrUndefined);
8501 masm.movePtr(ImmGCPtr(lir->mir()->globalThis()), output);
8502 masm.jump(ool->rejoin());
8505 masm.bind(&notNullOrUndefined);
8507 saveLive(lir);
8509 pushArg(value);
8510 using Fn = JSObject* (*)(JSContext*, HandleValue);
8511 callVM<Fn, BoxNonStrictThis>(lir);
8513 StoreRegisterTo(output).generate(this);
8514 restoreLiveIgnore(lir, StoreRegisterTo(output).clobbered());
8516 masm.jump(ool->rejoin());
8519 void CodeGenerator::visitImplicitThis(LImplicitThis* lir) {
8520 pushArg(ImmGCPtr(lir->mir()->name()));
8521 pushArg(ToRegister(lir->env()));
8523 using Fn = bool (*)(JSContext*, HandleObject, Handle<PropertyName*>,
8524 MutableHandleValue);
8525 callVM<Fn, ImplicitThisOperation>(lir);
8528 void CodeGenerator::visitArrayLength(LArrayLength* lir) {
8529 Register elements = ToRegister(lir->elements());
8530 Register output = ToRegister(lir->output());
8532 Address length(elements, ObjectElements::offsetOfLength());
8533 masm.load32(length, output);
8535 // Bail out if the length doesn't fit in int32.
8536 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
8539 static void SetLengthFromIndex(MacroAssembler& masm, const LAllocation* index,
8540 const Address& length) {
8541 if (index->isConstant()) {
8542 masm.store32(Imm32(ToInt32(index) + 1), length);
8543 } else {
8544 Register newLength = ToRegister(index);
8545 masm.add32(Imm32(1), newLength);
8546 masm.store32(newLength, length);
8547 masm.sub32(Imm32(1), newLength);
8551 void CodeGenerator::visitSetArrayLength(LSetArrayLength* lir) {
8552 Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength());
8553 SetLengthFromIndex(masm, lir->index(), length);
8556 void CodeGenerator::visitFunctionLength(LFunctionLength* lir) {
8557 Register function = ToRegister(lir->function());
8558 Register output = ToRegister(lir->output());
8560 Label bail;
8562 // Get the JSFunction flags.
8563 masm.load32(Address(function, JSFunction::offsetOfFlagsAndArgCount()),
8564 output);
8566 // Functions with a SelfHostedLazyScript must be compiled with the slow-path
8567 // before the function length is known. If the length was previously resolved,
8568 // the length property may be shadowed.
8569 masm.branchTest32(
8570 Assembler::NonZero, output,
8571 Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
8572 &bail);
8574 masm.loadFunctionLength(function, output, output, &bail);
8576 bailoutFrom(&bail, lir->snapshot());
8579 void CodeGenerator::visitFunctionName(LFunctionName* lir) {
8580 Register function = ToRegister(lir->function());
8581 Register output = ToRegister(lir->output());
8583 Label bail;
8585 const JSAtomState& names = gen->runtime->names();
8586 masm.loadFunctionName(function, output, ImmGCPtr(names.empty_), &bail);
8588 bailoutFrom(&bail, lir->snapshot());
8591 template <class OrderedHashTable>
8592 static void RangeFront(MacroAssembler&, Register, Register, Register);
8594 template <>
8595 void RangeFront<ValueMap>(MacroAssembler& masm, Register range, Register i,
8596 Register front) {
8597 masm.loadPtr(Address(range, ValueMap::Range::offsetOfHashTable()), front);
8598 masm.loadPtr(Address(front, ValueMap::offsetOfImplData()), front);
8600 MOZ_ASSERT(ValueMap::offsetOfImplDataElement() == 0,
8601 "offsetof(Data, element) is 0");
8602 static_assert(ValueMap::sizeofImplData() == 24, "sizeof(Data) is 24");
8603 masm.mulBy3(i, i);
8604 masm.lshiftPtr(Imm32(3), i);
8605 masm.addPtr(i, front);
8608 template <>
8609 void RangeFront<ValueSet>(MacroAssembler& masm, Register range, Register i,
8610 Register front) {
8611 masm.loadPtr(Address(range, ValueSet::Range::offsetOfHashTable()), front);
8612 masm.loadPtr(Address(front, ValueSet::offsetOfImplData()), front);
8614 MOZ_ASSERT(ValueSet::offsetOfImplDataElement() == 0,
8615 "offsetof(Data, element) is 0");
8616 static_assert(ValueSet::sizeofImplData() == 16, "sizeof(Data) is 16");
8617 masm.lshiftPtr(Imm32(4), i);
8618 masm.addPtr(i, front);
8621 template <class OrderedHashTable>
8622 static void RangePopFront(MacroAssembler& masm, Register range, Register front,
8623 Register dataLength, Register temp) {
8624 Register i = temp;
8626 masm.add32(Imm32(1),
8627 Address(range, OrderedHashTable::Range::offsetOfCount()));
8629 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), i);
8631 Label done, seek;
8632 masm.bind(&seek);
8633 masm.add32(Imm32(1), i);
8634 masm.branch32(Assembler::AboveOrEqual, i, dataLength, &done);
8636 // We can add sizeof(Data) to |front| to select the next element, because
8637 // |front| and |range.ht.data[i]| point to the same location.
8638 MOZ_ASSERT(OrderedHashTable::offsetOfImplDataElement() == 0,
8639 "offsetof(Data, element) is 0");
8640 masm.addPtr(Imm32(OrderedHashTable::sizeofImplData()), front);
8642 masm.branchTestMagic(Assembler::Equal,
8643 Address(front, OrderedHashTable::offsetOfEntryKey()),
8644 JS_HASH_KEY_EMPTY, &seek);
8646 masm.bind(&done);
8647 masm.store32(i, Address(range, OrderedHashTable::Range::offsetOfI()));
8650 template <class OrderedHashTable>
8651 static inline void RangeDestruct(MacroAssembler& masm, Register iter,
8652 Register range, Register temp0,
8653 Register temp1) {
8654 Register next = temp0;
8655 Register prevp = temp1;
8657 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfNext()), next);
8658 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfPrevP()), prevp);
8659 masm.storePtr(next, Address(prevp, 0));
8661 Label hasNoNext;
8662 masm.branchTestPtr(Assembler::Zero, next, next, &hasNoNext);
8664 masm.storePtr(prevp, Address(next, OrderedHashTable::Range::offsetOfPrevP()));
8666 masm.bind(&hasNoNext);
8668 Label nurseryAllocated;
8669 masm.branchPtrInNurseryChunk(Assembler::Equal, iter, temp0,
8670 &nurseryAllocated);
8672 masm.callFreeStub(range);
8674 masm.bind(&nurseryAllocated);
8677 template <>
8678 void CodeGenerator::emitLoadIteratorValues<ValueMap>(Register result,
8679 Register temp,
8680 Register front) {
8681 size_t elementsOffset = NativeObject::offsetOfFixedElements();
8683 Address keyAddress(front, ValueMap::Entry::offsetOfKey());
8684 Address valueAddress(front, ValueMap::Entry::offsetOfValue());
8685 Address keyElemAddress(result, elementsOffset);
8686 Address valueElemAddress(result, elementsOffset + sizeof(Value));
8687 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
8688 masm.guardedCallPreBarrier(valueElemAddress, MIRType::Value);
8689 masm.storeValue(keyAddress, keyElemAddress, temp);
8690 masm.storeValue(valueAddress, valueElemAddress, temp);
8692 Label emitBarrier, skipBarrier;
8693 masm.branchValueIsNurseryCell(Assembler::Equal, keyAddress, temp,
8694 &emitBarrier);
8695 masm.branchValueIsNurseryCell(Assembler::NotEqual, valueAddress, temp,
8696 &skipBarrier);
8698 masm.bind(&emitBarrier);
8699 saveVolatile(temp);
8700 emitPostWriteBarrier(result);
8701 restoreVolatile(temp);
8703 masm.bind(&skipBarrier);
8706 template <>
8707 void CodeGenerator::emitLoadIteratorValues<ValueSet>(Register result,
8708 Register temp,
8709 Register front) {
8710 size_t elementsOffset = NativeObject::offsetOfFixedElements();
8712 Address keyAddress(front, ValueSet::offsetOfEntryKey());
8713 Address keyElemAddress(result, elementsOffset);
8714 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
8715 masm.storeValue(keyAddress, keyElemAddress, temp);
8717 Label skipBarrier;
8718 masm.branchValueIsNurseryCell(Assembler::NotEqual, keyAddress, temp,
8719 &skipBarrier);
8721 saveVolatile(temp);
8722 emitPostWriteBarrier(result);
8723 restoreVolatile(temp);
8725 masm.bind(&skipBarrier);
8728 template <class IteratorObject, class OrderedHashTable>
8729 void CodeGenerator::emitGetNextEntryForIterator(LGetNextEntryForIterator* lir) {
8730 Register iter = ToRegister(lir->iter());
8731 Register result = ToRegister(lir->result());
8732 Register temp = ToRegister(lir->temp0());
8733 Register dataLength = ToRegister(lir->temp1());
8734 Register range = ToRegister(lir->temp2());
8735 Register output = ToRegister(lir->output());
8737 #ifdef DEBUG
8738 // Self-hosted code is responsible for ensuring GetNextEntryForIterator is
8739 // only called with the correct iterator class. Assert here all self-
8740 // hosted callers of GetNextEntryForIterator perform this class check.
8741 // No Spectre mitigations are needed because this is DEBUG-only code.
8742 Label success;
8743 masm.branchTestObjClassNoSpectreMitigations(
8744 Assembler::Equal, iter, &IteratorObject::class_, temp, &success);
8745 masm.assumeUnreachable("Iterator object should have the correct class.");
8746 masm.bind(&success);
8747 #endif
8749 masm.loadPrivate(Address(iter, NativeObject::getFixedSlotOffset(
8750 IteratorObject::RangeSlot)),
8751 range);
8753 Label iterAlreadyDone, iterDone, done;
8754 masm.branchTestPtr(Assembler::Zero, range, range, &iterAlreadyDone);
8756 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), temp);
8757 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfHashTable()),
8758 dataLength);
8759 masm.load32(Address(dataLength, OrderedHashTable::offsetOfImplDataLength()),
8760 dataLength);
8761 masm.branch32(Assembler::AboveOrEqual, temp, dataLength, &iterDone);
8763 masm.Push(iter);
8765 Register front = iter;
8766 RangeFront<OrderedHashTable>(masm, range, temp, front);
8768 emitLoadIteratorValues<OrderedHashTable>(result, temp, front);
8770 RangePopFront<OrderedHashTable>(masm, range, front, dataLength, temp);
8772 masm.Pop(iter);
8773 masm.move32(Imm32(0), output);
8775 masm.jump(&done);
8777 masm.bind(&iterDone);
8779 RangeDestruct<OrderedHashTable>(masm, iter, range, temp, dataLength);
8781 masm.storeValue(PrivateValue(nullptr),
8782 Address(iter, NativeObject::getFixedSlotOffset(
8783 IteratorObject::RangeSlot)));
8785 masm.bind(&iterAlreadyDone);
8787 masm.move32(Imm32(1), output);
8789 masm.bind(&done);
8792 void CodeGenerator::visitGetNextEntryForIterator(
8793 LGetNextEntryForIterator* lir) {
8794 if (lir->mir()->mode() == MGetNextEntryForIterator::Map) {
8795 emitGetNextEntryForIterator<MapIteratorObject, ValueMap>(lir);
8796 } else {
8797 MOZ_ASSERT(lir->mir()->mode() == MGetNextEntryForIterator::Set);
8798 emitGetNextEntryForIterator<SetIteratorObject, ValueSet>(lir);
8802 // The point of these is to inform Ion of where these values already are; they
8803 // don't normally generate (much) code.
8804 void CodeGenerator::visitWasmRegisterPairResult(LWasmRegisterPairResult* lir) {}
8805 void CodeGenerator::visitWasmStackResult(LWasmStackResult* lir) {}
8806 void CodeGenerator::visitWasmStackResult64(LWasmStackResult64* lir) {}
8808 void CodeGenerator::visitWasmStackResultArea(LWasmStackResultArea* lir) {
8809 LAllocation* output = lir->getDef(0)->output();
8810 MOZ_ASSERT(output->isStackArea());
8811 bool tempInit = false;
8812 for (auto iter = output->toStackArea()->results(); iter; iter.next()) {
8813 // Zero out ref stack results.
8814 if (iter.isWasmAnyRef()) {
8815 Register temp = ToRegister(lir->temp0());
8816 if (!tempInit) {
8817 masm.xorPtr(temp, temp);
8818 tempInit = true;
8820 masm.storePtr(temp, ToAddress(iter.alloc()));
8825 void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {
8826 #ifdef JS_64BIT
8827 if (MWasmRegisterResult* mir = lir->mir()) {
8828 if (mir->type() == MIRType::Int32) {
8829 masm.widenInt32(ToRegister(lir->output()));
8832 #endif
8835 void CodeGenerator::visitWasmCall(LWasmCall* lir) {
8836 const MWasmCallBase* callBase = lir->callBase();
8837 bool isReturnCall = lir->isReturnCall();
8839 // If this call is in Wasm try code block, initialise a wasm::TryNote for this
8840 // call.
8841 bool inTry = callBase->inTry();
8842 if (inTry) {
8843 size_t tryNoteIndex = callBase->tryNoteIndex();
8844 wasm::TryNoteVector& tryNotes = masm.tryNotes();
8845 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
8846 tryNote.setTryBodyBegin(masm.currentOffset());
8849 MOZ_ASSERT((sizeof(wasm::Frame) + masm.framePushed()) % WasmStackAlignment ==
8851 static_assert(
8852 WasmStackAlignment >= ABIStackAlignment &&
8853 WasmStackAlignment % ABIStackAlignment == 0,
8854 "The wasm stack alignment should subsume the ABI-required alignment");
8856 #ifdef DEBUG
8857 Label ok;
8858 masm.branchTestStackPtr(Assembler::Zero, Imm32(WasmStackAlignment - 1), &ok);
8859 masm.breakpoint();
8860 masm.bind(&ok);
8861 #endif
8863 // LWasmCallBase::isCallPreserved() assumes that all MWasmCalls preserve the
8864 // instance and pinned regs. The only case where where we don't have to
8865 // reload the instance and pinned regs is when the callee preserves them.
8866 bool reloadRegs = true;
8867 bool switchRealm = true;
8869 const wasm::CallSiteDesc& desc = callBase->desc();
8870 const wasm::CalleeDesc& callee = callBase->callee();
8871 CodeOffset retOffset;
8872 CodeOffset secondRetOffset;
8873 switch (callee.which()) {
8874 case wasm::CalleeDesc::Func:
8875 #ifdef ENABLE_WASM_TAIL_CALLS
8876 if (isReturnCall) {
8877 ReturnCallAdjustmentInfo retCallInfo(
8878 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8879 masm.wasmReturnCall(desc, callee.funcIndex(), retCallInfo);
8880 // The rest of the method is unnecessary for a return call.
8881 return;
8883 #endif
8884 MOZ_ASSERT(!isReturnCall);
8885 retOffset = masm.call(desc, callee.funcIndex());
8886 reloadRegs = false;
8887 switchRealm = false;
8888 break;
8889 case wasm::CalleeDesc::Import:
8890 #ifdef ENABLE_WASM_TAIL_CALLS
8891 if (isReturnCall) {
8892 ReturnCallAdjustmentInfo retCallInfo(
8893 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8894 masm.wasmReturnCallImport(desc, callee, retCallInfo);
8895 // The rest of the method is unnecessary for a return call.
8896 return;
8898 #endif
8899 MOZ_ASSERT(!isReturnCall);
8900 retOffset = masm.wasmCallImport(desc, callee);
8901 break;
8902 case wasm::CalleeDesc::AsmJSTable:
8903 retOffset = masm.asmCallIndirect(desc, callee);
8904 break;
8905 case wasm::CalleeDesc::WasmTable: {
8906 Label* boundsCheckFailed = nullptr;
8907 if (lir->needsBoundsCheck()) {
8908 OutOfLineAbortingWasmTrap* ool =
8909 new (alloc()) OutOfLineAbortingWasmTrap(
8910 wasm::BytecodeOffset(desc.lineOrBytecode()),
8911 wasm::Trap::OutOfBounds);
8912 if (lir->isCatchable()) {
8913 addOutOfLineCode(ool, lir->mirCatchable());
8914 } else if (isReturnCall) {
8915 #ifdef ENABLE_WASM_TAIL_CALLS
8916 addOutOfLineCode(ool, lir->mirReturnCall());
8917 #else
8918 MOZ_CRASH("Return calls are disabled.");
8919 #endif
8920 } else {
8921 addOutOfLineCode(ool, lir->mirUncatchable());
8923 boundsCheckFailed = ool->entry();
8925 Label* nullCheckFailed = nullptr;
8926 #ifndef WASM_HAS_HEAPREG
8928 OutOfLineAbortingWasmTrap* ool =
8929 new (alloc()) OutOfLineAbortingWasmTrap(
8930 wasm::BytecodeOffset(desc.lineOrBytecode()),
8931 wasm::Trap::IndirectCallToNull);
8932 if (lir->isCatchable()) {
8933 addOutOfLineCode(ool, lir->mirCatchable());
8934 } else if (isReturnCall) {
8935 # ifdef ENABLE_WASM_TAIL_CALLS
8936 addOutOfLineCode(ool, lir->mirReturnCall());
8937 # else
8938 MOZ_CRASH("Return calls are disabled.");
8939 # endif
8940 } else {
8941 addOutOfLineCode(ool, lir->mirUncatchable());
8943 nullCheckFailed = ool->entry();
8945 #endif
8946 #ifdef ENABLE_WASM_TAIL_CALLS
8947 if (isReturnCall) {
8948 ReturnCallAdjustmentInfo retCallInfo(
8949 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8950 masm.wasmReturnCallIndirect(desc, callee, boundsCheckFailed,
8951 nullCheckFailed, mozilla::Nothing(),
8952 retCallInfo);
8953 // The rest of the method is unnecessary for a return call.
8954 return;
8956 #endif
8957 MOZ_ASSERT(!isReturnCall);
8958 masm.wasmCallIndirect(desc, callee, boundsCheckFailed, nullCheckFailed,
8959 lir->tableSize(), &retOffset, &secondRetOffset);
8960 // Register reloading and realm switching are handled dynamically inside
8961 // wasmCallIndirect. There are two return offsets, one for each call
8962 // instruction (fast path and slow path).
8963 reloadRegs = false;
8964 switchRealm = false;
8965 break;
8967 case wasm::CalleeDesc::Builtin:
8968 retOffset = masm.call(desc, callee.builtin());
8969 reloadRegs = false;
8970 switchRealm = false;
8971 break;
8972 case wasm::CalleeDesc::BuiltinInstanceMethod:
8973 retOffset = masm.wasmCallBuiltinInstanceMethod(
8974 desc, callBase->instanceArg(), callee.builtin(),
8975 callBase->builtinMethodFailureMode());
8976 switchRealm = false;
8977 break;
8978 case wasm::CalleeDesc::FuncRef:
8979 #ifdef ENABLE_WASM_TAIL_CALLS
8980 if (isReturnCall) {
8981 ReturnCallAdjustmentInfo retCallInfo(
8982 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8983 masm.wasmReturnCallRef(desc, callee, retCallInfo);
8984 // The rest of the method is unnecessary for a return call.
8985 return;
8987 #endif
8988 MOZ_ASSERT(!isReturnCall);
8989 // Register reloading and realm switching are handled dynamically inside
8990 // wasmCallRef. There are two return offsets, one for each call
8991 // instruction (fast path and slow path).
8992 masm.wasmCallRef(desc, callee, &retOffset, &secondRetOffset);
8993 reloadRegs = false;
8994 switchRealm = false;
8995 break;
8998 // Note the assembler offset for the associated LSafePoint.
8999 MOZ_ASSERT(!isReturnCall);
9000 markSafepointAt(retOffset.offset(), lir);
9002 // Now that all the outbound in-memory args are on the stack, note the
9003 // required lower boundary point of the associated StackMap.
9004 uint32_t framePushedAtStackMapBase =
9005 masm.framePushed() - callBase->stackArgAreaSizeUnaligned();
9006 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAtStackMapBase);
9007 MOZ_ASSERT(lir->safepoint()->wasmSafepointKind() ==
9008 WasmSafepointKind::LirCall);
9010 // Note the assembler offset and framePushed for use by the adjunct
9011 // LSafePoint, see visitor for LWasmCallIndirectAdjunctSafepoint below.
9012 if (callee.which() == wasm::CalleeDesc::WasmTable) {
9013 lir->adjunctSafepoint()->recordSafepointInfo(secondRetOffset,
9014 framePushedAtStackMapBase);
9017 if (reloadRegs) {
9018 masm.loadPtr(
9019 Address(masm.getStackPointer(), WasmCallerInstanceOffsetBeforeCall),
9020 InstanceReg);
9021 masm.loadWasmPinnedRegsFromInstance();
9022 if (switchRealm) {
9023 masm.switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
9025 } else {
9026 MOZ_ASSERT(!switchRealm);
9029 #ifdef ENABLE_WASM_TAIL_CALLS
9030 switch (callee.which()) {
9031 case wasm::CalleeDesc::Func:
9032 case wasm::CalleeDesc::Import:
9033 case wasm::CalleeDesc::WasmTable:
9034 case wasm::CalleeDesc::FuncRef:
9035 // Stack allocation could change during Wasm (return) calls,
9036 // recover pre-call state.
9037 masm.freeStackTo(masm.framePushed());
9038 break;
9039 default:
9040 break;
9042 #endif // ENABLE_WASM_TAIL_CALLS
9044 if (inTry) {
9045 // Set the end of the try note range
9046 size_t tryNoteIndex = callBase->tryNoteIndex();
9047 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9048 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
9050 // Don't set the end of the try note if we've OOM'ed, as the above
9051 // instructions may not have been emitted, which will trigger an assert
9052 // about zero-length try-notes. This is okay as this compilation will be
9053 // thrown away.
9054 if (!masm.oom()) {
9055 tryNote.setTryBodyEnd(masm.currentOffset());
9058 // This instruction or the adjunct safepoint must be the last instruction
9059 // in the block. No other instructions may be inserted.
9060 LBlock* block = lir->block();
9061 MOZ_RELEASE_ASSERT(*block->rbegin() == lir ||
9062 (block->rbegin()->isWasmCallIndirectAdjunctSafepoint() &&
9063 *(++block->rbegin()) == lir));
9065 // Jump to the fallthrough block
9066 jumpToBlock(lir->mirCatchable()->getSuccessor(
9067 MWasmCallCatchable::FallthroughBranchIndex));
9071 void CodeGenerator::visitWasmCallLandingPrePad(LWasmCallLandingPrePad* lir) {
9072 LBlock* block = lir->block();
9073 MWasmCallLandingPrePad* mir = lir->mir();
9074 MBasicBlock* mirBlock = mir->block();
9075 MBasicBlock* callMirBlock = mir->callBlock();
9077 // This block must be the pre-pad successor of the call block. No blocks may
9078 // be inserted between us, such as for critical edge splitting.
9079 MOZ_RELEASE_ASSERT(mirBlock == callMirBlock->getSuccessor(
9080 MWasmCallCatchable::PrePadBranchIndex));
9082 // This instruction or a move group must be the first instruction in the
9083 // block. No other instructions may be inserted.
9084 MOZ_RELEASE_ASSERT(*block->begin() == lir || (block->begin()->isMoveGroup() &&
9085 *(++block->begin()) == lir));
9087 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9088 wasm::TryNote& tryNote = tryNotes[mir->tryNoteIndex()];
9089 // Set the entry point for the call try note to be the beginning of this
9090 // block. The above assertions (and assertions in visitWasmCall) guarantee
9091 // that we are not skipping over instructions that should be executed.
9092 tryNote.setLandingPad(block->label()->offset(), masm.framePushed());
9095 void CodeGenerator::visitWasmCallIndirectAdjunctSafepoint(
9096 LWasmCallIndirectAdjunctSafepoint* lir) {
9097 markSafepointAt(lir->safepointLocation().offset(), lir);
9098 lir->safepoint()->setFramePushedAtStackMapBase(
9099 lir->framePushedAtStackMapBase());
9102 template <typename InstructionWithMaybeTrapSite>
9103 void EmitSignalNullCheckTrapSite(MacroAssembler& masm,
9104 InstructionWithMaybeTrapSite* ins,
9105 FaultingCodeOffset fco,
9106 wasm::TrapMachineInsn tmi) {
9107 if (!ins->maybeTrap()) {
9108 return;
9110 wasm::BytecodeOffset trapOffset(ins->maybeTrap()->offset);
9111 masm.append(wasm::Trap::NullPointerDereference,
9112 wasm::TrapSite(tmi, fco, trapOffset));
9115 void CodeGenerator::visitWasmLoadSlot(LWasmLoadSlot* ins) {
9116 MIRType type = ins->type();
9117 MWideningOp wideningOp = ins->wideningOp();
9118 Register container = ToRegister(ins->containerRef());
9119 Address addr(container, ins->offset());
9120 AnyRegister dst = ToAnyRegister(ins->output());
9122 FaultingCodeOffset fco;
9123 switch (type) {
9124 case MIRType::Int32:
9125 switch (wideningOp) {
9126 case MWideningOp::None:
9127 fco = masm.load32(addr, dst.gpr());
9128 EmitSignalNullCheckTrapSite(masm, ins, fco,
9129 wasm::TrapMachineInsn::Load32);
9130 break;
9131 case MWideningOp::FromU16:
9132 fco = masm.load16ZeroExtend(addr, dst.gpr());
9133 EmitSignalNullCheckTrapSite(masm, ins, fco,
9134 wasm::TrapMachineInsn::Load16);
9135 break;
9136 case MWideningOp::FromS16:
9137 fco = masm.load16SignExtend(addr, dst.gpr());
9138 EmitSignalNullCheckTrapSite(masm, ins, fco,
9139 wasm::TrapMachineInsn::Load16);
9140 break;
9141 case MWideningOp::FromU8:
9142 fco = masm.load8ZeroExtend(addr, dst.gpr());
9143 EmitSignalNullCheckTrapSite(masm, ins, fco,
9144 wasm::TrapMachineInsn::Load8);
9145 break;
9146 case MWideningOp::FromS8:
9147 fco = masm.load8SignExtend(addr, dst.gpr());
9148 EmitSignalNullCheckTrapSite(masm, ins, fco,
9149 wasm::TrapMachineInsn::Load8);
9150 break;
9151 default:
9152 MOZ_CRASH("unexpected widening op in ::visitWasmLoadSlot");
9154 break;
9155 case MIRType::Float32:
9156 MOZ_ASSERT(wideningOp == MWideningOp::None);
9157 fco = masm.loadFloat32(addr, dst.fpu());
9158 EmitSignalNullCheckTrapSite(masm, ins, fco,
9159 wasm::TrapMachineInsn::Load32);
9160 break;
9161 case MIRType::Double:
9162 MOZ_ASSERT(wideningOp == MWideningOp::None);
9163 fco = masm.loadDouble(addr, dst.fpu());
9164 EmitSignalNullCheckTrapSite(masm, ins, fco,
9165 wasm::TrapMachineInsn::Load64);
9166 break;
9167 case MIRType::Pointer:
9168 case MIRType::WasmAnyRef:
9169 MOZ_ASSERT(wideningOp == MWideningOp::None);
9170 fco = masm.loadPtr(addr, dst.gpr());
9171 EmitSignalNullCheckTrapSite(masm, ins, fco,
9172 wasm::TrapMachineInsnForLoadWord());
9173 break;
9174 #ifdef ENABLE_WASM_SIMD
9175 case MIRType::Simd128:
9176 MOZ_ASSERT(wideningOp == MWideningOp::None);
9177 fco = masm.loadUnalignedSimd128(addr, dst.fpu());
9178 EmitSignalNullCheckTrapSite(masm, ins, fco,
9179 wasm::TrapMachineInsn::Load128);
9180 break;
9181 #endif
9182 default:
9183 MOZ_CRASH("unexpected type in ::visitWasmLoadSlot");
9187 void CodeGenerator::visitWasmStoreSlot(LWasmStoreSlot* ins) {
9188 MIRType type = ins->type();
9189 MNarrowingOp narrowingOp = ins->narrowingOp();
9190 Register container = ToRegister(ins->containerRef());
9191 Address addr(container, ins->offset());
9192 AnyRegister src = ToAnyRegister(ins->value());
9193 if (type != MIRType::Int32) {
9194 MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
9197 FaultingCodeOffset fco;
9198 switch (type) {
9199 case MIRType::Int32:
9200 switch (narrowingOp) {
9201 case MNarrowingOp::None:
9202 fco = masm.store32(src.gpr(), addr);
9203 EmitSignalNullCheckTrapSite(masm, ins, fco,
9204 wasm::TrapMachineInsn::Store32);
9205 break;
9206 case MNarrowingOp::To16:
9207 fco = masm.store16(src.gpr(), addr);
9208 EmitSignalNullCheckTrapSite(masm, ins, fco,
9209 wasm::TrapMachineInsn::Store16);
9210 break;
9211 case MNarrowingOp::To8:
9212 fco = masm.store8(src.gpr(), addr);
9213 EmitSignalNullCheckTrapSite(masm, ins, fco,
9214 wasm::TrapMachineInsn::Store8);
9215 break;
9216 default:
9217 MOZ_CRASH();
9219 break;
9220 case MIRType::Float32:
9221 fco = masm.storeFloat32(src.fpu(), addr);
9222 EmitSignalNullCheckTrapSite(masm, ins, fco,
9223 wasm::TrapMachineInsn::Store32);
9224 break;
9225 case MIRType::Double:
9226 fco = masm.storeDouble(src.fpu(), addr);
9227 EmitSignalNullCheckTrapSite(masm, ins, fco,
9228 wasm::TrapMachineInsn::Store64);
9229 break;
9230 case MIRType::Pointer:
9231 // This could be correct, but it would be a new usage, so check carefully.
9232 MOZ_CRASH("Unexpected type in visitWasmStoreSlot.");
9233 case MIRType::WasmAnyRef:
9234 MOZ_CRASH("Bad type in visitWasmStoreSlot. Use LWasmStoreRef.");
9235 #ifdef ENABLE_WASM_SIMD
9236 case MIRType::Simd128:
9237 fco = masm.storeUnalignedSimd128(src.fpu(), addr);
9238 EmitSignalNullCheckTrapSite(masm, ins, fco,
9239 wasm::TrapMachineInsn::Store128);
9240 break;
9241 #endif
9242 default:
9243 MOZ_CRASH("unexpected type in StorePrimitiveValue");
9247 void CodeGenerator::visitWasmLoadTableElement(LWasmLoadTableElement* ins) {
9248 Register elements = ToRegister(ins->elements());
9249 Register index = ToRegister(ins->index());
9250 Register output = ToRegister(ins->output());
9251 masm.loadPtr(BaseIndex(elements, index, ScalePointer), output);
9254 void CodeGenerator::visitWasmDerivedPointer(LWasmDerivedPointer* ins) {
9255 masm.movePtr(ToRegister(ins->base()), ToRegister(ins->output()));
9256 masm.addPtr(Imm32(int32_t(ins->offset())), ToRegister(ins->output()));
9259 void CodeGenerator::visitWasmDerivedIndexPointer(
9260 LWasmDerivedIndexPointer* ins) {
9261 Register base = ToRegister(ins->base());
9262 Register index = ToRegister(ins->index());
9263 Register output = ToRegister(ins->output());
9264 masm.computeEffectiveAddress(BaseIndex(base, index, ins->scale()), output);
9267 void CodeGenerator::visitWasmStoreRef(LWasmStoreRef* ins) {
9268 Register instance = ToRegister(ins->instance());
9269 Register valueBase = ToRegister(ins->valueBase());
9270 size_t offset = ins->offset();
9271 Register value = ToRegister(ins->value());
9272 Register temp = ToRegister(ins->temp0());
9274 if (ins->preBarrierKind() == WasmPreBarrierKind::Normal) {
9275 Label skipPreBarrier;
9276 wasm::EmitWasmPreBarrierGuard(
9277 masm, instance, temp, valueBase, offset, &skipPreBarrier,
9278 ins->maybeTrap() ? &ins->maybeTrap()->offset : nullptr);
9279 wasm::EmitWasmPreBarrierCall(masm, instance, temp, valueBase, offset);
9280 masm.bind(&skipPreBarrier);
9283 FaultingCodeOffset fco = masm.storePtr(value, Address(valueBase, offset));
9284 EmitSignalNullCheckTrapSite(masm, ins, fco,
9285 wasm::TrapMachineInsnForStoreWord());
9286 // The postbarrier is handled separately.
9289 // Out-of-line path to update the store buffer for wasm references.
9290 class OutOfLineWasmCallPostWriteBarrier
9291 : public OutOfLineCodeBase<CodeGenerator> {
9292 LInstruction* lir_;
9293 Register valueBase_;
9294 Register temp_;
9295 uint32_t valueOffset_;
9297 public:
9298 OutOfLineWasmCallPostWriteBarrier(LInstruction* lir, Register valueBase,
9299 Register temp, uint32_t valueOffset)
9300 : lir_(lir),
9301 valueBase_(valueBase),
9302 temp_(temp),
9303 valueOffset_(valueOffset) {}
9305 void accept(CodeGenerator* codegen) override {
9306 codegen->visitOutOfLineWasmCallPostWriteBarrier(this);
9309 LInstruction* lir() const { return lir_; }
9310 Register valueBase() const { return valueBase_; }
9311 Register temp() const { return temp_; }
9312 uint32_t valueOffset() const { return valueOffset_; }
9315 void CodeGenerator::visitOutOfLineWasmCallPostWriteBarrier(
9316 OutOfLineWasmCallPostWriteBarrier* ool) {
9317 saveLiveVolatile(ool->lir());
9318 masm.Push(InstanceReg);
9319 int32_t framePushedAfterInstance = masm.framePushed();
9321 // Fold the value offset into the value base
9322 Register valueAddr = ool->valueBase();
9323 Register temp = ool->temp();
9324 masm.computeEffectiveAddress(Address(valueAddr, ool->valueOffset()), temp);
9326 // Call Instance::postBarrier
9327 masm.setupWasmABICall();
9328 masm.passABIArg(InstanceReg);
9329 masm.passABIArg(temp);
9330 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
9331 masm.callWithABI(wasm::BytecodeOffset(0), wasm::SymbolicAddress::PostBarrier,
9332 mozilla::Some(instanceOffset), MoveOp::GENERAL);
9334 masm.Pop(InstanceReg);
9335 restoreLiveVolatile(ool->lir());
9337 masm.jump(ool->rejoin());
9340 void CodeGenerator::visitWasmPostWriteBarrier(LWasmPostWriteBarrier* lir) {
9341 Register object = ToRegister(lir->object());
9342 Register value = ToRegister(lir->value());
9343 Register valueBase = ToRegister(lir->valueBase());
9344 Register temp = ToRegister(lir->temp0());
9345 MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
9346 auto ool = new (alloc()) OutOfLineWasmCallPostWriteBarrier(
9347 lir, valueBase, temp, lir->valueOffset());
9348 addOutOfLineCode(ool, lir->mir());
9350 wasm::EmitWasmPostBarrierGuard(masm, mozilla::Some(object), temp, value,
9351 ool->rejoin());
9352 masm.jump(ool->entry());
9353 masm.bind(ool->rejoin());
9356 void CodeGenerator::visitWasmLoadSlotI64(LWasmLoadSlotI64* ins) {
9357 Register container = ToRegister(ins->containerRef());
9358 Address addr(container, ins->offset());
9359 Register64 output = ToOutRegister64(ins);
9360 // Either 1 or 2 words. On a 32-bit target, it is hard to argue that one
9361 // transaction will always trap before the other, so it seems safest to
9362 // register both of them as potentially trapping.
9363 #ifdef JS_64BIT
9364 FaultingCodeOffset fco = masm.load64(addr, output);
9365 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load64);
9366 #else
9367 FaultingCodeOffsetPair fcop = masm.load64(addr, output);
9368 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9369 wasm::TrapMachineInsn::Load32);
9370 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9371 wasm::TrapMachineInsn::Load32);
9372 #endif
9375 void CodeGenerator::visitWasmStoreSlotI64(LWasmStoreSlotI64* ins) {
9376 Register container = ToRegister(ins->containerRef());
9377 Address addr(container, ins->offset());
9378 Register64 value = ToRegister64(ins->value());
9379 // Either 1 or 2 words. As above we register both transactions in the
9380 // 2-word case.
9381 #ifdef JS_64BIT
9382 FaultingCodeOffset fco = masm.store64(value, addr);
9383 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Store64);
9384 #else
9385 FaultingCodeOffsetPair fcop = masm.store64(value, addr);
9386 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9387 wasm::TrapMachineInsn::Store32);
9388 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9389 wasm::TrapMachineInsn::Store32);
9390 #endif
9393 void CodeGenerator::visitArrayBufferByteLength(LArrayBufferByteLength* lir) {
9394 Register obj = ToRegister(lir->object());
9395 Register out = ToRegister(lir->output());
9396 masm.loadArrayBufferByteLengthIntPtr(obj, out);
9399 void CodeGenerator::visitArrayBufferViewLength(LArrayBufferViewLength* lir) {
9400 Register obj = ToRegister(lir->object());
9401 Register out = ToRegister(lir->output());
9402 masm.loadArrayBufferViewLengthIntPtr(obj, out);
9405 void CodeGenerator::visitArrayBufferViewByteOffset(
9406 LArrayBufferViewByteOffset* lir) {
9407 Register obj = ToRegister(lir->object());
9408 Register out = ToRegister(lir->output());
9409 masm.loadArrayBufferViewByteOffsetIntPtr(obj, out);
9412 void CodeGenerator::visitArrayBufferViewElements(
9413 LArrayBufferViewElements* lir) {
9414 Register obj = ToRegister(lir->object());
9415 Register out = ToRegister(lir->output());
9416 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), out);
9419 void CodeGenerator::visitTypedArrayElementSize(LTypedArrayElementSize* lir) {
9420 Register obj = ToRegister(lir->object());
9421 Register out = ToRegister(lir->output());
9423 masm.typedArrayElementSize(obj, out);
9426 void CodeGenerator::visitGuardHasAttachedArrayBuffer(
9427 LGuardHasAttachedArrayBuffer* lir) {
9428 Register obj = ToRegister(lir->object());
9429 Register temp = ToRegister(lir->temp0());
9431 Label bail;
9432 masm.branchIfHasDetachedArrayBuffer(obj, temp, &bail);
9433 bailoutFrom(&bail, lir->snapshot());
9436 class OutOfLineGuardNumberToIntPtrIndex
9437 : public OutOfLineCodeBase<CodeGenerator> {
9438 LGuardNumberToIntPtrIndex* lir_;
9440 public:
9441 explicit OutOfLineGuardNumberToIntPtrIndex(LGuardNumberToIntPtrIndex* lir)
9442 : lir_(lir) {}
9444 void accept(CodeGenerator* codegen) override {
9445 codegen->visitOutOfLineGuardNumberToIntPtrIndex(this);
9447 LGuardNumberToIntPtrIndex* lir() const { return lir_; }
9450 void CodeGenerator::visitGuardNumberToIntPtrIndex(
9451 LGuardNumberToIntPtrIndex* lir) {
9452 FloatRegister input = ToFloatRegister(lir->input());
9453 Register output = ToRegister(lir->output());
9455 if (!lir->mir()->supportOOB()) {
9456 Label bail;
9457 masm.convertDoubleToPtr(input, output, &bail, false);
9458 bailoutFrom(&bail, lir->snapshot());
9459 return;
9462 auto* ool = new (alloc()) OutOfLineGuardNumberToIntPtrIndex(lir);
9463 addOutOfLineCode(ool, lir->mir());
9465 masm.convertDoubleToPtr(input, output, ool->entry(), false);
9466 masm.bind(ool->rejoin());
9469 void CodeGenerator::visitOutOfLineGuardNumberToIntPtrIndex(
9470 OutOfLineGuardNumberToIntPtrIndex* ool) {
9471 // Substitute the invalid index with an arbitrary out-of-bounds index.
9472 masm.movePtr(ImmWord(-1), ToRegister(ool->lir()->output()));
9473 masm.jump(ool->rejoin());
9476 void CodeGenerator::visitStringLength(LStringLength* lir) {
9477 Register input = ToRegister(lir->string());
9478 Register output = ToRegister(lir->output());
9480 masm.loadStringLength(input, output);
9483 void CodeGenerator::visitMinMaxI(LMinMaxI* ins) {
9484 Register first = ToRegister(ins->first());
9485 Register output = ToRegister(ins->output());
9487 MOZ_ASSERT(first == output);
9489 Assembler::Condition cond =
9490 ins->mir()->isMax() ? Assembler::GreaterThan : Assembler::LessThan;
9492 if (ins->second()->isConstant()) {
9493 Label done;
9494 masm.branch32(cond, first, Imm32(ToInt32(ins->second())), &done);
9495 masm.move32(Imm32(ToInt32(ins->second())), output);
9496 masm.bind(&done);
9497 } else {
9498 Register second = ToRegister(ins->second());
9499 masm.cmp32Move32(cond, second, first, second, output);
9503 void CodeGenerator::visitMinMaxArrayI(LMinMaxArrayI* ins) {
9504 Register array = ToRegister(ins->array());
9505 Register output = ToRegister(ins->output());
9506 Register temp1 = ToRegister(ins->temp1());
9507 Register temp2 = ToRegister(ins->temp2());
9508 Register temp3 = ToRegister(ins->temp3());
9509 bool isMax = ins->isMax();
9511 Label bail;
9512 masm.minMaxArrayInt32(array, output, temp1, temp2, temp3, isMax, &bail);
9513 bailoutFrom(&bail, ins->snapshot());
9516 void CodeGenerator::visitMinMaxArrayD(LMinMaxArrayD* ins) {
9517 Register array = ToRegister(ins->array());
9518 FloatRegister output = ToFloatRegister(ins->output());
9519 Register temp1 = ToRegister(ins->temp1());
9520 Register temp2 = ToRegister(ins->temp2());
9521 FloatRegister floatTemp = ToFloatRegister(ins->floatTemp());
9522 bool isMax = ins->isMax();
9524 Label bail;
9525 masm.minMaxArrayNumber(array, output, floatTemp, temp1, temp2, isMax, &bail);
9526 bailoutFrom(&bail, ins->snapshot());
9529 // For Abs*, lowering will have tied input to output on platforms where that is
9530 // sensible, and otherwise left them untied.
9532 void CodeGenerator::visitAbsI(LAbsI* ins) {
9533 Register input = ToRegister(ins->input());
9534 Register output = ToRegister(ins->output());
9536 if (ins->mir()->fallible()) {
9537 Label positive;
9538 if (input != output) {
9539 masm.move32(input, output);
9541 masm.branchTest32(Assembler::NotSigned, output, output, &positive);
9542 Label bail;
9543 masm.branchNeg32(Assembler::Overflow, output, &bail);
9544 bailoutFrom(&bail, ins->snapshot());
9545 masm.bind(&positive);
9546 } else {
9547 masm.abs32(input, output);
9551 void CodeGenerator::visitAbsD(LAbsD* ins) {
9552 masm.absDouble(ToFloatRegister(ins->input()), ToFloatRegister(ins->output()));
9555 void CodeGenerator::visitAbsF(LAbsF* ins) {
9556 masm.absFloat32(ToFloatRegister(ins->input()),
9557 ToFloatRegister(ins->output()));
9560 void CodeGenerator::visitPowII(LPowII* ins) {
9561 Register value = ToRegister(ins->value());
9562 Register power = ToRegister(ins->power());
9563 Register output = ToRegister(ins->output());
9564 Register temp0 = ToRegister(ins->temp0());
9565 Register temp1 = ToRegister(ins->temp1());
9567 Label bailout;
9568 masm.pow32(value, power, output, temp0, temp1, &bailout);
9569 bailoutFrom(&bailout, ins->snapshot());
9572 void CodeGenerator::visitPowI(LPowI* ins) {
9573 FloatRegister value = ToFloatRegister(ins->value());
9574 Register power = ToRegister(ins->power());
9576 using Fn = double (*)(double x, int32_t y);
9577 masm.setupAlignedABICall();
9578 masm.passABIArg(value, MoveOp::DOUBLE);
9579 masm.passABIArg(power);
9581 masm.callWithABI<Fn, js::powi>(MoveOp::DOUBLE);
9582 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9585 void CodeGenerator::visitPowD(LPowD* ins) {
9586 FloatRegister value = ToFloatRegister(ins->value());
9587 FloatRegister power = ToFloatRegister(ins->power());
9589 using Fn = double (*)(double x, double y);
9590 masm.setupAlignedABICall();
9591 masm.passABIArg(value, MoveOp::DOUBLE);
9592 masm.passABIArg(power, MoveOp::DOUBLE);
9593 masm.callWithABI<Fn, ecmaPow>(MoveOp::DOUBLE);
9595 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9598 void CodeGenerator::visitPowOfTwoI(LPowOfTwoI* ins) {
9599 Register power = ToRegister(ins->power());
9600 Register output = ToRegister(ins->output());
9602 uint32_t base = ins->base();
9603 MOZ_ASSERT(mozilla::IsPowerOfTwo(base));
9605 uint32_t n = mozilla::FloorLog2(base);
9606 MOZ_ASSERT(n != 0);
9608 // Hacker's Delight, 2nd edition, theorem D2.
9609 auto ceilingDiv = [](uint32_t x, uint32_t y) { return (x + y - 1) / y; };
9611 // Take bailout if |power| is greater-or-equals |log_y(2^31)| or is negative.
9612 // |2^(n*y) < 2^31| must hold, hence |n*y < 31| resp. |y < 31/n|.
9614 // Note: it's important for this condition to match the code in CacheIR.cpp
9615 // (CanAttachInt32Pow) to prevent failure loops.
9616 bailoutCmp32(Assembler::AboveOrEqual, power, Imm32(ceilingDiv(31, n)),
9617 ins->snapshot());
9619 // Compute (2^n)^y as 2^(n*y) using repeated shifts. We could directly scale
9620 // |power| and perform a single shift, but due to the lack of necessary
9621 // MacroAssembler functionality, like multiplying a register with an
9622 // immediate, we restrict the number of generated shift instructions when
9623 // lowering this operation.
9624 masm.move32(Imm32(1), output);
9625 do {
9626 masm.lshift32(power, output);
9627 n--;
9628 } while (n > 0);
9631 void CodeGenerator::visitSqrtD(LSqrtD* ins) {
9632 FloatRegister input = ToFloatRegister(ins->input());
9633 FloatRegister output = ToFloatRegister(ins->output());
9634 masm.sqrtDouble(input, output);
9637 void CodeGenerator::visitSqrtF(LSqrtF* ins) {
9638 FloatRegister input = ToFloatRegister(ins->input());
9639 FloatRegister output = ToFloatRegister(ins->output());
9640 masm.sqrtFloat32(input, output);
9643 void CodeGenerator::visitSignI(LSignI* ins) {
9644 Register input = ToRegister(ins->input());
9645 Register output = ToRegister(ins->output());
9646 masm.signInt32(input, output);
9649 void CodeGenerator::visitSignD(LSignD* ins) {
9650 FloatRegister input = ToFloatRegister(ins->input());
9651 FloatRegister output = ToFloatRegister(ins->output());
9652 masm.signDouble(input, output);
9655 void CodeGenerator::visitSignDI(LSignDI* ins) {
9656 FloatRegister input = ToFloatRegister(ins->input());
9657 FloatRegister temp = ToFloatRegister(ins->temp0());
9658 Register output = ToRegister(ins->output());
9660 Label bail;
9661 masm.signDoubleToInt32(input, output, temp, &bail);
9662 bailoutFrom(&bail, ins->snapshot());
9665 void CodeGenerator::visitMathFunctionD(LMathFunctionD* ins) {
9666 FloatRegister input = ToFloatRegister(ins->input());
9667 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9669 UnaryMathFunction fun = ins->mir()->function();
9670 UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
9672 masm.setupAlignedABICall();
9674 masm.passABIArg(input, MoveOp::DOUBLE);
9675 masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
9676 MoveOp::DOUBLE);
9679 void CodeGenerator::visitMathFunctionF(LMathFunctionF* ins) {
9680 FloatRegister input = ToFloatRegister(ins->input());
9681 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnFloat32Reg);
9683 masm.setupAlignedABICall();
9684 masm.passABIArg(input, MoveOp::FLOAT32);
9686 using Fn = float (*)(float x);
9687 Fn funptr = nullptr;
9688 CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check;
9689 switch (ins->mir()->function()) {
9690 case UnaryMathFunction::Floor:
9691 funptr = floorf;
9692 check = CheckUnsafeCallWithABI::DontCheckOther;
9693 break;
9694 case UnaryMathFunction::Round:
9695 funptr = math_roundf_impl;
9696 break;
9697 case UnaryMathFunction::Trunc:
9698 funptr = math_truncf_impl;
9699 break;
9700 case UnaryMathFunction::Ceil:
9701 funptr = ceilf;
9702 check = CheckUnsafeCallWithABI::DontCheckOther;
9703 break;
9704 default:
9705 MOZ_CRASH("Unknown or unsupported float32 math function");
9708 masm.callWithABI(DynamicFunction<Fn>(funptr), MoveOp::FLOAT32, check);
9711 void CodeGenerator::visitModD(LModD* ins) {
9712 MOZ_ASSERT(!gen->compilingWasm());
9714 FloatRegister lhs = ToFloatRegister(ins->lhs());
9715 FloatRegister rhs = ToFloatRegister(ins->rhs());
9717 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9719 using Fn = double (*)(double a, double b);
9720 masm.setupAlignedABICall();
9721 masm.passABIArg(lhs, MoveOp::DOUBLE);
9722 masm.passABIArg(rhs, MoveOp::DOUBLE);
9723 masm.callWithABI<Fn, NumberMod>(MoveOp::DOUBLE);
9726 void CodeGenerator::visitModPowTwoD(LModPowTwoD* ins) {
9727 FloatRegister lhs = ToFloatRegister(ins->lhs());
9728 uint32_t divisor = ins->divisor();
9729 MOZ_ASSERT(mozilla::IsPowerOfTwo(divisor));
9731 FloatRegister output = ToFloatRegister(ins->output());
9733 // Compute |n % d| using |copysign(n - (d * trunc(n / d)), n)|.
9735 // This doesn't work if |d| isn't a power of two, because we may lose too much
9736 // precision. For example |Number.MAX_VALUE % 3 == 2|, but
9737 // |3 * trunc(Number.MAX_VALUE / 3) == Infinity|.
9739 Label done;
9741 ScratchDoubleScope scratch(masm);
9743 // Subnormals can lead to performance degradation, which can make calling
9744 // |fmod| faster than this inline implementation. Work around this issue by
9745 // directly returning the input for any value in the interval ]-1, +1[.
9746 Label notSubnormal;
9747 masm.loadConstantDouble(1.0, scratch);
9748 masm.loadConstantDouble(-1.0, output);
9749 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, lhs, scratch,
9750 &notSubnormal);
9751 masm.branchDouble(Assembler::DoubleLessThanOrEqual, lhs, output,
9752 &notSubnormal);
9754 masm.moveDouble(lhs, output);
9755 masm.jump(&done);
9757 masm.bind(&notSubnormal);
9759 if (divisor == 1) {
9760 // The pattern |n % 1 == 0| is used to detect integer numbers. We can skip
9761 // the multiplication by one in this case.
9762 masm.moveDouble(lhs, output);
9763 masm.nearbyIntDouble(RoundingMode::TowardsZero, output, scratch);
9764 masm.subDouble(scratch, output);
9765 } else {
9766 masm.loadConstantDouble(1.0 / double(divisor), scratch);
9767 masm.loadConstantDouble(double(divisor), output);
9769 masm.mulDouble(lhs, scratch);
9770 masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
9771 masm.mulDouble(output, scratch);
9773 masm.moveDouble(lhs, output);
9774 masm.subDouble(scratch, output);
9778 masm.copySignDouble(output, lhs, output);
9779 masm.bind(&done);
9782 void CodeGenerator::visitWasmBuiltinModD(LWasmBuiltinModD* ins) {
9783 masm.Push(InstanceReg);
9784 int32_t framePushedAfterInstance = masm.framePushed();
9786 FloatRegister lhs = ToFloatRegister(ins->lhs());
9787 FloatRegister rhs = ToFloatRegister(ins->rhs());
9789 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9791 masm.setupWasmABICall();
9792 masm.passABIArg(lhs, MoveOp::DOUBLE);
9793 masm.passABIArg(rhs, MoveOp::DOUBLE);
9795 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
9796 masm.callWithABI(ins->mir()->bytecodeOffset(), wasm::SymbolicAddress::ModD,
9797 mozilla::Some(instanceOffset), MoveOp::DOUBLE);
9799 masm.Pop(InstanceReg);
9802 void CodeGenerator::visitBigIntAdd(LBigIntAdd* ins) {
9803 Register lhs = ToRegister(ins->lhs());
9804 Register rhs = ToRegister(ins->rhs());
9805 Register temp1 = ToRegister(ins->temp1());
9806 Register temp2 = ToRegister(ins->temp2());
9807 Register output = ToRegister(ins->output());
9809 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9810 auto* ool = oolCallVM<Fn, BigInt::add>(ins, ArgList(lhs, rhs),
9811 StoreRegisterTo(output));
9813 // 0n + x == x
9814 Label lhsNonZero;
9815 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9816 masm.movePtr(rhs, output);
9817 masm.jump(ool->rejoin());
9818 masm.bind(&lhsNonZero);
9820 // x + 0n == x
9821 Label rhsNonZero;
9822 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
9823 masm.movePtr(lhs, output);
9824 masm.jump(ool->rejoin());
9825 masm.bind(&rhsNonZero);
9827 // Call into the VM when either operand can't be loaded into a pointer-sized
9828 // register.
9829 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
9830 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
9832 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
9834 // Create and return the result.
9835 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
9836 masm.initializeBigInt(output, temp1);
9838 masm.bind(ool->rejoin());
9841 void CodeGenerator::visitBigIntSub(LBigIntSub* ins) {
9842 Register lhs = ToRegister(ins->lhs());
9843 Register rhs = ToRegister(ins->rhs());
9844 Register temp1 = ToRegister(ins->temp1());
9845 Register temp2 = ToRegister(ins->temp2());
9846 Register output = ToRegister(ins->output());
9848 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9849 auto* ool = oolCallVM<Fn, BigInt::sub>(ins, ArgList(lhs, rhs),
9850 StoreRegisterTo(output));
9852 // x - 0n == x
9853 Label rhsNonZero;
9854 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
9855 masm.movePtr(lhs, output);
9856 masm.jump(ool->rejoin());
9857 masm.bind(&rhsNonZero);
9859 // Call into the VM when either operand can't be loaded into a pointer-sized
9860 // register.
9861 masm.loadBigInt(lhs, temp1, ool->entry());
9862 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
9864 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
9866 // Create and return the result.
9867 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
9868 masm.initializeBigInt(output, temp1);
9870 masm.bind(ool->rejoin());
9873 void CodeGenerator::visitBigIntMul(LBigIntMul* ins) {
9874 Register lhs = ToRegister(ins->lhs());
9875 Register rhs = ToRegister(ins->rhs());
9876 Register temp1 = ToRegister(ins->temp1());
9877 Register temp2 = ToRegister(ins->temp2());
9878 Register output = ToRegister(ins->output());
9880 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9881 auto* ool = oolCallVM<Fn, BigInt::mul>(ins, ArgList(lhs, rhs),
9882 StoreRegisterTo(output));
9884 // 0n * x == 0n
9885 Label lhsNonZero;
9886 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9887 masm.movePtr(lhs, output);
9888 masm.jump(ool->rejoin());
9889 masm.bind(&lhsNonZero);
9891 // x * 0n == 0n
9892 Label rhsNonZero;
9893 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
9894 masm.movePtr(rhs, output);
9895 masm.jump(ool->rejoin());
9896 masm.bind(&rhsNonZero);
9898 // Call into the VM when either operand can't be loaded into a pointer-sized
9899 // register.
9900 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
9901 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
9903 masm.branchMulPtr(Assembler::Overflow, temp2, temp1, ool->entry());
9905 // Create and return the result.
9906 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
9907 masm.initializeBigInt(output, temp1);
9909 masm.bind(ool->rejoin());
9912 void CodeGenerator::visitBigIntDiv(LBigIntDiv* ins) {
9913 Register lhs = ToRegister(ins->lhs());
9914 Register rhs = ToRegister(ins->rhs());
9915 Register temp1 = ToRegister(ins->temp1());
9916 Register temp2 = ToRegister(ins->temp2());
9917 Register output = ToRegister(ins->output());
9919 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9920 auto* ool = oolCallVM<Fn, BigInt::div>(ins, ArgList(lhs, rhs),
9921 StoreRegisterTo(output));
9923 // x / 0 throws an error.
9924 if (ins->mir()->canBeDivideByZero()) {
9925 masm.branchIfBigIntIsZero(rhs, ool->entry());
9928 // 0n / x == 0n
9929 Label lhsNonZero;
9930 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9931 masm.movePtr(lhs, output);
9932 masm.jump(ool->rejoin());
9933 masm.bind(&lhsNonZero);
9935 // Call into the VM when either operand can't be loaded into a pointer-sized
9936 // register.
9937 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
9938 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
9940 // |BigInt::div()| returns |lhs| for |lhs / 1n|, which means there's no
9941 // allocation which might trigger a minor GC to free up nursery space. This
9942 // requires us to apply the same optimization here, otherwise we'd end up with
9943 // always entering the OOL call, because the nursery is never evicted.
9944 Label notOne;
9945 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(1), &notOne);
9946 masm.movePtr(lhs, output);
9947 masm.jump(ool->rejoin());
9948 masm.bind(&notOne);
9950 static constexpr auto DigitMin = std::numeric_limits<
9951 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
9953 // Handle an integer overflow from INT{32,64}_MIN / -1.
9954 Label notOverflow;
9955 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
9956 masm.branchPtr(Assembler::Equal, temp2, ImmWord(-1), ool->entry());
9957 masm.bind(&notOverflow);
9959 emitBigIntDiv(ins, temp1, temp2, output, ool->entry());
9961 masm.bind(ool->rejoin());
9964 void CodeGenerator::visitBigIntMod(LBigIntMod* ins) {
9965 Register lhs = ToRegister(ins->lhs());
9966 Register rhs = ToRegister(ins->rhs());
9967 Register temp1 = ToRegister(ins->temp1());
9968 Register temp2 = ToRegister(ins->temp2());
9969 Register output = ToRegister(ins->output());
9971 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9972 auto* ool = oolCallVM<Fn, BigInt::mod>(ins, ArgList(lhs, rhs),
9973 StoreRegisterTo(output));
9975 // x % 0 throws an error.
9976 if (ins->mir()->canBeDivideByZero()) {
9977 masm.branchIfBigIntIsZero(rhs, ool->entry());
9980 // 0n % x == 0n
9981 Label lhsNonZero;
9982 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9983 masm.movePtr(lhs, output);
9984 masm.jump(ool->rejoin());
9985 masm.bind(&lhsNonZero);
9987 // Call into the VM when either operand can't be loaded into a pointer-sized
9988 // register.
9989 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
9990 masm.loadBigIntAbsolute(rhs, temp2, ool->entry());
9992 // Similar to the case for BigInt division, we must apply the same allocation
9993 // optimizations as performed in |BigInt::mod()|.
9994 Label notBelow;
9995 masm.branchPtr(Assembler::AboveOrEqual, temp1, temp2, &notBelow);
9996 masm.movePtr(lhs, output);
9997 masm.jump(ool->rejoin());
9998 masm.bind(&notBelow);
10000 // Convert both digits to signed pointer-sized values.
10001 masm.bigIntDigitToSignedPtr(lhs, temp1, ool->entry());
10002 masm.bigIntDigitToSignedPtr(rhs, temp2, ool->entry());
10004 static constexpr auto DigitMin = std::numeric_limits<
10005 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
10007 // Handle an integer overflow from INT{32,64}_MIN / -1.
10008 Label notOverflow;
10009 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
10010 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(-1), &notOverflow);
10011 masm.movePtr(ImmWord(0), temp1);
10012 masm.bind(&notOverflow);
10014 emitBigIntMod(ins, temp1, temp2, output, ool->entry());
10016 masm.bind(ool->rejoin());
10019 void CodeGenerator::visitBigIntPow(LBigIntPow* ins) {
10020 Register lhs = ToRegister(ins->lhs());
10021 Register rhs = ToRegister(ins->rhs());
10022 Register temp1 = ToRegister(ins->temp1());
10023 Register temp2 = ToRegister(ins->temp2());
10024 Register output = ToRegister(ins->output());
10026 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10027 auto* ool = oolCallVM<Fn, BigInt::pow>(ins, ArgList(lhs, rhs),
10028 StoreRegisterTo(output));
10030 // x ** -y throws an error.
10031 if (ins->mir()->canBeNegativeExponent()) {
10032 masm.branchIfBigIntIsNegative(rhs, ool->entry());
10035 Register dest = temp1;
10036 Register base = temp2;
10037 Register exponent = output;
10039 Label done;
10040 masm.movePtr(ImmWord(1), dest); // p = 1
10042 // 1n ** y == 1n
10043 // -1n ** y == 1n when y is even
10044 // -1n ** y == -1n when y is odd
10045 Label lhsNotOne;
10046 masm.branch32(Assembler::Above, Address(lhs, BigInt::offsetOfLength()),
10047 Imm32(1), &lhsNotOne);
10048 masm.loadFirstBigIntDigitOrZero(lhs, base);
10049 masm.branchPtr(Assembler::NotEqual, base, Imm32(1), &lhsNotOne);
10051 masm.loadFirstBigIntDigitOrZero(rhs, exponent);
10053 Label lhsNonNegative;
10054 masm.branchIfBigIntIsNonNegative(lhs, &lhsNonNegative);
10055 masm.branchTestPtr(Assembler::Zero, exponent, Imm32(1), &done);
10056 masm.bind(&lhsNonNegative);
10057 masm.movePtr(lhs, output);
10058 masm.jump(ool->rejoin());
10060 masm.bind(&lhsNotOne);
10062 // x ** 0n == 1n
10063 masm.branchIfBigIntIsZero(rhs, &done);
10065 // 0n ** y == 0n with y != 0n
10066 Label lhsNonZero;
10067 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10069 masm.movePtr(lhs, output);
10070 masm.jump(ool->rejoin());
10072 masm.bind(&lhsNonZero);
10074 // Call into the VM when the exponent can't be loaded into a pointer-sized
10075 // register.
10076 masm.loadBigIntAbsolute(rhs, exponent, ool->entry());
10078 // x ** y with x > 1 and y >= DigitBits can't be pointer-sized.
10079 masm.branchPtr(Assembler::AboveOrEqual, exponent, Imm32(BigInt::DigitBits),
10080 ool->entry());
10082 // x ** 1n == x
10083 Label rhsNotOne;
10084 masm.branch32(Assembler::NotEqual, exponent, Imm32(1), &rhsNotOne);
10086 masm.movePtr(lhs, output);
10087 masm.jump(ool->rejoin());
10089 masm.bind(&rhsNotOne);
10091 // Call into the VM when the base operand can't be loaded into a pointer-sized
10092 // register.
10093 masm.loadBigIntNonZero(lhs, base, ool->entry());
10095 // MacroAssembler::pow32() adjusted to work on pointer-sized registers.
10097 // m = base
10098 // n = exponent
10100 Label start, loop;
10101 masm.jump(&start);
10102 masm.bind(&loop);
10104 // m *= m
10105 masm.branchMulPtr(Assembler::Overflow, base, base, ool->entry());
10107 masm.bind(&start);
10109 // if ((n & 1) != 0) p *= m
10110 Label even;
10111 masm.branchTest32(Assembler::Zero, exponent, Imm32(1), &even);
10112 masm.branchMulPtr(Assembler::Overflow, base, dest, ool->entry());
10113 masm.bind(&even);
10115 // n >>= 1
10116 // if (n == 0) return p
10117 masm.branchRshift32(Assembler::NonZero, Imm32(1), exponent, &loop);
10120 MOZ_ASSERT(temp1 == dest);
10122 // Create and return the result.
10123 masm.bind(&done);
10124 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10125 masm.initializeBigInt(output, temp1);
10127 masm.bind(ool->rejoin());
10130 void CodeGenerator::visitBigIntBitAnd(LBigIntBitAnd* ins) {
10131 Register lhs = ToRegister(ins->lhs());
10132 Register rhs = ToRegister(ins->rhs());
10133 Register temp1 = ToRegister(ins->temp1());
10134 Register temp2 = ToRegister(ins->temp2());
10135 Register output = ToRegister(ins->output());
10137 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10138 auto* ool = oolCallVM<Fn, BigInt::bitAnd>(ins, ArgList(lhs, rhs),
10139 StoreRegisterTo(output));
10141 // 0n & x == 0n
10142 Label lhsNonZero;
10143 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10144 masm.movePtr(lhs, output);
10145 masm.jump(ool->rejoin());
10146 masm.bind(&lhsNonZero);
10148 // x & 0n == 0n
10149 Label rhsNonZero;
10150 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10151 masm.movePtr(rhs, output);
10152 masm.jump(ool->rejoin());
10153 masm.bind(&rhsNonZero);
10155 // Call into the VM when either operand can't be loaded into a pointer-sized
10156 // register.
10157 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10158 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10160 masm.andPtr(temp2, temp1);
10162 // Create and return the result.
10163 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10164 masm.initializeBigInt(output, temp1);
10166 masm.bind(ool->rejoin());
10169 void CodeGenerator::visitBigIntBitOr(LBigIntBitOr* ins) {
10170 Register lhs = ToRegister(ins->lhs());
10171 Register rhs = ToRegister(ins->rhs());
10172 Register temp1 = ToRegister(ins->temp1());
10173 Register temp2 = ToRegister(ins->temp2());
10174 Register output = ToRegister(ins->output());
10176 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10177 auto* ool = oolCallVM<Fn, BigInt::bitOr>(ins, ArgList(lhs, rhs),
10178 StoreRegisterTo(output));
10180 // 0n | x == x
10181 Label lhsNonZero;
10182 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10183 masm.movePtr(rhs, output);
10184 masm.jump(ool->rejoin());
10185 masm.bind(&lhsNonZero);
10187 // x | 0n == x
10188 Label rhsNonZero;
10189 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10190 masm.movePtr(lhs, output);
10191 masm.jump(ool->rejoin());
10192 masm.bind(&rhsNonZero);
10194 // Call into the VM when either operand can't be loaded into a pointer-sized
10195 // register.
10196 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10197 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10199 masm.orPtr(temp2, temp1);
10201 // Create and return the result.
10202 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10203 masm.initializeBigInt(output, temp1);
10205 masm.bind(ool->rejoin());
10208 void CodeGenerator::visitBigIntBitXor(LBigIntBitXor* ins) {
10209 Register lhs = ToRegister(ins->lhs());
10210 Register rhs = ToRegister(ins->rhs());
10211 Register temp1 = ToRegister(ins->temp1());
10212 Register temp2 = ToRegister(ins->temp2());
10213 Register output = ToRegister(ins->output());
10215 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10216 auto* ool = oolCallVM<Fn, BigInt::bitXor>(ins, ArgList(lhs, rhs),
10217 StoreRegisterTo(output));
10219 // 0n ^ x == x
10220 Label lhsNonZero;
10221 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10222 masm.movePtr(rhs, output);
10223 masm.jump(ool->rejoin());
10224 masm.bind(&lhsNonZero);
10226 // x ^ 0n == x
10227 Label rhsNonZero;
10228 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10229 masm.movePtr(lhs, output);
10230 masm.jump(ool->rejoin());
10231 masm.bind(&rhsNonZero);
10233 // Call into the VM when either operand can't be loaded into a pointer-sized
10234 // register.
10235 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10236 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10238 masm.xorPtr(temp2, temp1);
10240 // Create and return the result.
10241 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10242 masm.initializeBigInt(output, temp1);
10244 masm.bind(ool->rejoin());
10247 void CodeGenerator::visitBigIntLsh(LBigIntLsh* ins) {
10248 Register lhs = ToRegister(ins->lhs());
10249 Register rhs = ToRegister(ins->rhs());
10250 Register temp1 = ToRegister(ins->temp1());
10251 Register temp2 = ToRegister(ins->temp2());
10252 Register temp3 = ToRegister(ins->temp3());
10253 Register output = ToRegister(ins->output());
10255 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10256 auto* ool = oolCallVM<Fn, BigInt::lsh>(ins, ArgList(lhs, rhs),
10257 StoreRegisterTo(output));
10259 // 0n << x == 0n
10260 Label lhsNonZero;
10261 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10262 masm.movePtr(lhs, output);
10263 masm.jump(ool->rejoin());
10264 masm.bind(&lhsNonZero);
10266 // x << 0n == x
10267 Label rhsNonZero;
10268 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10269 masm.movePtr(lhs, output);
10270 masm.jump(ool->rejoin());
10271 masm.bind(&rhsNonZero);
10273 // Inline |BigInt::lsh| for the case when |lhs| contains a single digit.
10275 Label rhsTooLarge;
10276 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
10278 // Call into the VM when the left-hand side operand can't be loaded into a
10279 // pointer-sized register.
10280 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10282 // Handle shifts exceeding |BigInt::DigitBits| first.
10283 Label shift, create;
10284 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
10286 masm.bind(&rhsTooLarge);
10288 // x << DigitBits with x != 0n always exceeds pointer-sized storage.
10289 masm.branchIfBigIntIsNonNegative(rhs, ool->entry());
10291 // x << -DigitBits == x >> DigitBits, which is either 0n or -1n.
10292 masm.move32(Imm32(0), temp1);
10293 masm.branchIfBigIntIsNonNegative(lhs, &create);
10294 masm.move32(Imm32(1), temp1);
10295 masm.jump(&create);
10297 masm.bind(&shift);
10299 Label nonNegative;
10300 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
10302 masm.movePtr(temp1, temp3);
10304 // |x << -y| is computed as |x >> y|.
10305 masm.rshiftPtr(temp2, temp1);
10307 // For negative numbers, round down if any bit was shifted out.
10308 masm.branchIfBigIntIsNonNegative(lhs, &create);
10310 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
10311 masm.movePtr(ImmWord(-1), output);
10312 masm.lshiftPtr(temp2, output);
10313 masm.notPtr(output);
10315 // Add plus one when |(lhs.digit(0) & mask) != 0|.
10316 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
10317 masm.addPtr(ImmWord(1), temp1);
10318 masm.jump(&create);
10320 masm.bind(&nonNegative);
10322 masm.movePtr(temp2, temp3);
10324 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
10325 masm.negPtr(temp2);
10326 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
10327 masm.movePtr(temp1, output);
10328 masm.rshiftPtr(temp2, output);
10330 // Call into the VM when any bit will be shifted out.
10331 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
10333 masm.movePtr(temp3, temp2);
10334 masm.lshiftPtr(temp2, temp1);
10336 masm.bind(&create);
10338 // Create and return the result.
10339 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10340 masm.initializeBigIntAbsolute(output, temp1);
10342 // Set the sign bit when the left-hand side is negative.
10343 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
10344 masm.or32(Imm32(BigInt::signBitMask()),
10345 Address(output, BigInt::offsetOfFlags()));
10347 masm.bind(ool->rejoin());
10350 void CodeGenerator::visitBigIntRsh(LBigIntRsh* ins) {
10351 Register lhs = ToRegister(ins->lhs());
10352 Register rhs = ToRegister(ins->rhs());
10353 Register temp1 = ToRegister(ins->temp1());
10354 Register temp2 = ToRegister(ins->temp2());
10355 Register temp3 = ToRegister(ins->temp3());
10356 Register output = ToRegister(ins->output());
10358 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10359 auto* ool = oolCallVM<Fn, BigInt::rsh>(ins, ArgList(lhs, rhs),
10360 StoreRegisterTo(output));
10362 // 0n >> x == 0n
10363 Label lhsNonZero;
10364 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10365 masm.movePtr(lhs, output);
10366 masm.jump(ool->rejoin());
10367 masm.bind(&lhsNonZero);
10369 // x >> 0n == x
10370 Label rhsNonZero;
10371 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10372 masm.movePtr(lhs, output);
10373 masm.jump(ool->rejoin());
10374 masm.bind(&rhsNonZero);
10376 // Inline |BigInt::rsh| for the case when |lhs| contains a single digit.
10378 Label rhsTooLarge;
10379 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
10381 // Call into the VM when the left-hand side operand can't be loaded into a
10382 // pointer-sized register.
10383 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10385 // Handle shifts exceeding |BigInt::DigitBits| first.
10386 Label shift, create;
10387 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
10389 masm.bind(&rhsTooLarge);
10391 // x >> -DigitBits == x << DigitBits, which exceeds pointer-sized storage.
10392 masm.branchIfBigIntIsNegative(rhs, ool->entry());
10394 // x >> DigitBits is either 0n or -1n.
10395 masm.move32(Imm32(0), temp1);
10396 masm.branchIfBigIntIsNonNegative(lhs, &create);
10397 masm.move32(Imm32(1), temp1);
10398 masm.jump(&create);
10400 masm.bind(&shift);
10402 Label nonNegative;
10403 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
10405 masm.movePtr(temp2, temp3);
10407 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
10408 masm.negPtr(temp2);
10409 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
10410 masm.movePtr(temp1, output);
10411 masm.rshiftPtr(temp2, output);
10413 // Call into the VM when any bit will be shifted out.
10414 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
10416 // |x >> -y| is computed as |x << y|.
10417 masm.movePtr(temp3, temp2);
10418 masm.lshiftPtr(temp2, temp1);
10419 masm.jump(&create);
10421 masm.bind(&nonNegative);
10423 masm.movePtr(temp1, temp3);
10425 masm.rshiftPtr(temp2, temp1);
10427 // For negative numbers, round down if any bit was shifted out.
10428 masm.branchIfBigIntIsNonNegative(lhs, &create);
10430 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
10431 masm.movePtr(ImmWord(-1), output);
10432 masm.lshiftPtr(temp2, output);
10433 masm.notPtr(output);
10435 // Add plus one when |(lhs.digit(0) & mask) != 0|.
10436 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
10437 masm.addPtr(ImmWord(1), temp1);
10439 masm.bind(&create);
10441 // Create and return the result.
10442 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10443 masm.initializeBigIntAbsolute(output, temp1);
10445 // Set the sign bit when the left-hand side is negative.
10446 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
10447 masm.or32(Imm32(BigInt::signBitMask()),
10448 Address(output, BigInt::offsetOfFlags()));
10450 masm.bind(ool->rejoin());
10453 void CodeGenerator::visitBigIntIncrement(LBigIntIncrement* ins) {
10454 Register input = ToRegister(ins->input());
10455 Register temp1 = ToRegister(ins->temp1());
10456 Register temp2 = ToRegister(ins->temp2());
10457 Register output = ToRegister(ins->output());
10459 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10460 auto* ool =
10461 oolCallVM<Fn, BigInt::inc>(ins, ArgList(input), StoreRegisterTo(output));
10463 // Call into the VM when the input can't be loaded into a pointer-sized
10464 // register.
10465 masm.loadBigInt(input, temp1, ool->entry());
10466 masm.movePtr(ImmWord(1), temp2);
10468 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10470 // Create and return the result.
10471 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10472 masm.initializeBigInt(output, temp1);
10474 masm.bind(ool->rejoin());
10477 void CodeGenerator::visitBigIntDecrement(LBigIntDecrement* ins) {
10478 Register input = ToRegister(ins->input());
10479 Register temp1 = ToRegister(ins->temp1());
10480 Register temp2 = ToRegister(ins->temp2());
10481 Register output = ToRegister(ins->output());
10483 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10484 auto* ool =
10485 oolCallVM<Fn, BigInt::dec>(ins, ArgList(input), StoreRegisterTo(output));
10487 // Call into the VM when the input can't be loaded into a pointer-sized
10488 // register.
10489 masm.loadBigInt(input, temp1, ool->entry());
10490 masm.movePtr(ImmWord(1), temp2);
10492 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10494 // Create and return the result.
10495 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10496 masm.initializeBigInt(output, temp1);
10498 masm.bind(ool->rejoin());
10501 void CodeGenerator::visitBigIntNegate(LBigIntNegate* ins) {
10502 Register input = ToRegister(ins->input());
10503 Register temp = ToRegister(ins->temp());
10504 Register output = ToRegister(ins->output());
10506 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10507 auto* ool =
10508 oolCallVM<Fn, BigInt::neg>(ins, ArgList(input), StoreRegisterTo(output));
10510 // -0n == 0n
10511 Label lhsNonZero;
10512 masm.branchIfBigIntIsNonZero(input, &lhsNonZero);
10513 masm.movePtr(input, output);
10514 masm.jump(ool->rejoin());
10515 masm.bind(&lhsNonZero);
10517 // Call into the VM when the input uses heap digits.
10518 masm.copyBigIntWithInlineDigits(input, output, temp, initialBigIntHeap(),
10519 ool->entry());
10521 // Flip the sign bit.
10522 masm.xor32(Imm32(BigInt::signBitMask()),
10523 Address(output, BigInt::offsetOfFlags()));
10525 masm.bind(ool->rejoin());
10528 void CodeGenerator::visitBigIntBitNot(LBigIntBitNot* ins) {
10529 Register input = ToRegister(ins->input());
10530 Register temp1 = ToRegister(ins->temp1());
10531 Register temp2 = ToRegister(ins->temp2());
10532 Register output = ToRegister(ins->output());
10534 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10535 auto* ool = oolCallVM<Fn, BigInt::bitNot>(ins, ArgList(input),
10536 StoreRegisterTo(output));
10538 masm.loadBigIntAbsolute(input, temp1, ool->entry());
10540 // This follows the C++ implementation because it let's us support the full
10541 // range [-2^64, 2^64 - 1] on 64-bit resp. [-2^32, 2^32 - 1] on 32-bit.
10542 Label nonNegative, done;
10543 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
10545 // ~(-x) == ~(~(x-1)) == x-1
10546 masm.subPtr(Imm32(1), temp1);
10547 masm.jump(&done);
10549 masm.bind(&nonNegative);
10551 // ~x == -x-1 == -(x+1)
10552 masm.movePtr(ImmWord(1), temp2);
10553 masm.branchAddPtr(Assembler::CarrySet, temp2, temp1, ool->entry());
10555 masm.bind(&done);
10557 // Create and return the result.
10558 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10559 masm.initializeBigIntAbsolute(output, temp1);
10561 // Set the sign bit when the input is positive.
10562 masm.branchIfBigIntIsNegative(input, ool->rejoin());
10563 masm.or32(Imm32(BigInt::signBitMask()),
10564 Address(output, BigInt::offsetOfFlags()));
10566 masm.bind(ool->rejoin());
10569 void CodeGenerator::visitInt32ToStringWithBase(LInt32ToStringWithBase* lir) {
10570 Register input = ToRegister(lir->input());
10571 RegisterOrInt32 base = ToRegisterOrInt32(lir->base());
10572 Register output = ToRegister(lir->output());
10573 Register temp0 = ToRegister(lir->temp0());
10574 Register temp1 = ToRegister(lir->temp1());
10576 using Fn = JSString* (*)(JSContext*, int32_t, int32_t);
10577 if (base.is<Register>()) {
10578 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
10579 lir, ArgList(input, base.as<Register>()), StoreRegisterTo(output));
10581 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
10582 masm.loadInt32ToStringWithBase(input, base.as<Register>(), output, temp0,
10583 temp1, gen->runtime->staticStrings(),
10584 liveRegs, ool->entry());
10585 masm.bind(ool->rejoin());
10586 } else {
10587 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
10588 lir, ArgList(input, Imm32(base.as<int32_t>())),
10589 StoreRegisterTo(output));
10591 masm.loadInt32ToStringWithBase(input, base.as<int32_t>(), output, temp0,
10592 temp1, gen->runtime->staticStrings(),
10593 ool->entry());
10594 masm.bind(ool->rejoin());
10598 void CodeGenerator::visitNumberParseInt(LNumberParseInt* lir) {
10599 Register string = ToRegister(lir->string());
10600 Register radix = ToRegister(lir->radix());
10601 ValueOperand output = ToOutValue(lir);
10602 Register temp = ToRegister(lir->temp0());
10604 #ifdef DEBUG
10605 Label ok;
10606 masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
10607 masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
10608 masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
10609 masm.bind(&ok);
10610 #endif
10612 // Use indexed value as fast path if possible.
10613 Label vmCall, done;
10614 masm.loadStringIndexValue(string, temp, &vmCall);
10615 masm.tagValue(JSVAL_TYPE_INT32, temp, output);
10616 masm.jump(&done);
10618 masm.bind(&vmCall);
10620 pushArg(radix);
10621 pushArg(string);
10623 using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
10624 callVM<Fn, js::NumberParseInt>(lir);
10626 masm.bind(&done);
10629 void CodeGenerator::visitDoubleParseInt(LDoubleParseInt* lir) {
10630 FloatRegister number = ToFloatRegister(lir->number());
10631 Register output = ToRegister(lir->output());
10632 FloatRegister temp = ToFloatRegister(lir->temp0());
10634 Label bail;
10635 masm.branchDouble(Assembler::DoubleUnordered, number, number, &bail);
10636 masm.branchTruncateDoubleToInt32(number, output, &bail);
10638 Label ok;
10639 masm.branch32(Assembler::NotEqual, output, Imm32(0), &ok);
10641 // Accept both +0 and -0 and return 0.
10642 masm.loadConstantDouble(0.0, temp);
10643 masm.branchDouble(Assembler::DoubleEqual, number, temp, &ok);
10645 // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
10646 masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, temp);
10647 masm.branchDouble(Assembler::DoubleLessThan, number, temp, &bail);
10649 masm.bind(&ok);
10651 bailoutFrom(&bail, lir->snapshot());
10654 void CodeGenerator::visitFloor(LFloor* lir) {
10655 FloatRegister input = ToFloatRegister(lir->input());
10656 Register output = ToRegister(lir->output());
10658 Label bail;
10659 masm.floorDoubleToInt32(input, output, &bail);
10660 bailoutFrom(&bail, lir->snapshot());
10663 void CodeGenerator::visitFloorF(LFloorF* lir) {
10664 FloatRegister input = ToFloatRegister(lir->input());
10665 Register output = ToRegister(lir->output());
10667 Label bail;
10668 masm.floorFloat32ToInt32(input, output, &bail);
10669 bailoutFrom(&bail, lir->snapshot());
10672 void CodeGenerator::visitCeil(LCeil* lir) {
10673 FloatRegister input = ToFloatRegister(lir->input());
10674 Register output = ToRegister(lir->output());
10676 Label bail;
10677 masm.ceilDoubleToInt32(input, output, &bail);
10678 bailoutFrom(&bail, lir->snapshot());
10681 void CodeGenerator::visitCeilF(LCeilF* lir) {
10682 FloatRegister input = ToFloatRegister(lir->input());
10683 Register output = ToRegister(lir->output());
10685 Label bail;
10686 masm.ceilFloat32ToInt32(input, output, &bail);
10687 bailoutFrom(&bail, lir->snapshot());
10690 void CodeGenerator::visitRound(LRound* lir) {
10691 FloatRegister input = ToFloatRegister(lir->input());
10692 FloatRegister temp = ToFloatRegister(lir->temp0());
10693 Register output = ToRegister(lir->output());
10695 Label bail;
10696 masm.roundDoubleToInt32(input, output, temp, &bail);
10697 bailoutFrom(&bail, lir->snapshot());
10700 void CodeGenerator::visitRoundF(LRoundF* lir) {
10701 FloatRegister input = ToFloatRegister(lir->input());
10702 FloatRegister temp = ToFloatRegister(lir->temp0());
10703 Register output = ToRegister(lir->output());
10705 Label bail;
10706 masm.roundFloat32ToInt32(input, output, temp, &bail);
10707 bailoutFrom(&bail, lir->snapshot());
10710 void CodeGenerator::visitTrunc(LTrunc* lir) {
10711 FloatRegister input = ToFloatRegister(lir->input());
10712 Register output = ToRegister(lir->output());
10714 Label bail;
10715 masm.truncDoubleToInt32(input, output, &bail);
10716 bailoutFrom(&bail, lir->snapshot());
10719 void CodeGenerator::visitTruncF(LTruncF* lir) {
10720 FloatRegister input = ToFloatRegister(lir->input());
10721 Register output = ToRegister(lir->output());
10723 Label bail;
10724 masm.truncFloat32ToInt32(input, output, &bail);
10725 bailoutFrom(&bail, lir->snapshot());
10728 void CodeGenerator::visitCompareS(LCompareS* lir) {
10729 JSOp op = lir->mir()->jsop();
10730 Register left = ToRegister(lir->left());
10731 Register right = ToRegister(lir->right());
10732 Register output = ToRegister(lir->output());
10734 OutOfLineCode* ool = nullptr;
10736 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
10737 if (op == JSOp::Eq || op == JSOp::StrictEq) {
10738 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
10739 lir, ArgList(left, right), StoreRegisterTo(output));
10740 } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
10741 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
10742 lir, ArgList(left, right), StoreRegisterTo(output));
10743 } else if (op == JSOp::Lt) {
10744 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
10745 lir, ArgList(left, right), StoreRegisterTo(output));
10746 } else if (op == JSOp::Le) {
10747 // Push the operands in reverse order for JSOp::Le:
10748 // - |left <= right| is implemented as |right >= left|.
10749 ool =
10750 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
10751 lir, ArgList(right, left), StoreRegisterTo(output));
10752 } else if (op == JSOp::Gt) {
10753 // Push the operands in reverse order for JSOp::Gt:
10754 // - |left > right| is implemented as |right < left|.
10755 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
10756 lir, ArgList(right, left), StoreRegisterTo(output));
10757 } else {
10758 MOZ_ASSERT(op == JSOp::Ge);
10759 ool =
10760 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
10761 lir, ArgList(left, right), StoreRegisterTo(output));
10764 masm.compareStrings(op, left, right, output, ool->entry());
10766 masm.bind(ool->rejoin());
10769 void CodeGenerator::visitCompareSInline(LCompareSInline* lir) {
10770 JSOp op = lir->mir()->jsop();
10771 MOZ_ASSERT(IsEqualityOp(op));
10773 Register input = ToRegister(lir->input());
10774 Register output = ToRegister(lir->output());
10776 const JSLinearString* str = lir->constant();
10777 MOZ_ASSERT(str->length() > 0);
10779 OutOfLineCode* ool = nullptr;
10781 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
10782 if (op == JSOp::Eq || op == JSOp::StrictEq) {
10783 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
10784 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
10785 } else {
10786 MOZ_ASSERT(op == JSOp::Ne || op == JSOp::StrictNe);
10787 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
10788 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
10791 Label compareChars;
10793 Label notPointerEqual;
10795 // If operands point to the same instance, the strings are trivially equal.
10796 masm.branchPtr(Assembler::NotEqual, input, ImmGCPtr(str), &notPointerEqual);
10797 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
10798 masm.jump(ool->rejoin());
10800 masm.bind(&notPointerEqual);
10802 Label setNotEqualResult;
10803 if (str->isAtom()) {
10804 // Atoms cannot be equal to each other if they point to different strings.
10805 Imm32 atomBit(JSString::ATOM_BIT);
10806 masm.branchTest32(Assembler::NonZero,
10807 Address(input, JSString::offsetOfFlags()), atomBit,
10808 &setNotEqualResult);
10811 if (str->hasTwoByteChars()) {
10812 // Pure two-byte strings can't be equal to Latin-1 strings.
10813 JS::AutoCheckCannotGC nogc;
10814 if (!mozilla::IsUtf16Latin1(str->twoByteRange(nogc))) {
10815 masm.branchLatin1String(input, &setNotEqualResult);
10819 // Strings of different length can never be equal.
10820 masm.branch32(Assembler::Equal, Address(input, JSString::offsetOfLength()),
10821 Imm32(str->length()), &compareChars);
10823 masm.bind(&setNotEqualResult);
10824 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
10825 masm.jump(ool->rejoin());
10828 masm.bind(&compareChars);
10830 // Load the input string's characters.
10831 Register stringChars = output;
10832 masm.loadStringCharsForCompare(input, str, stringChars, ool->entry());
10834 // Start comparing character by character.
10835 masm.compareStringChars(op, stringChars, str, output);
10837 masm.bind(ool->rejoin());
10840 void CodeGenerator::visitCompareSSingle(LCompareSSingle* lir) {
10841 JSOp op = lir->jsop();
10842 MOZ_ASSERT(IsRelationalOp(op));
10844 Register input = ToRegister(lir->input());
10845 Register output = ToRegister(lir->output());
10846 Register temp = ToRegister(lir->temp0());
10848 const JSLinearString* str = lir->constant();
10849 MOZ_ASSERT(str->length() == 1);
10851 char16_t ch = str->latin1OrTwoByteChar(0);
10853 masm.movePtr(input, temp);
10855 // Check if the string is empty.
10856 Label compareLength;
10857 masm.branch32(Assembler::Equal, Address(temp, JSString::offsetOfLength()),
10858 Imm32(0), &compareLength);
10860 // The first character is in the left-most rope child.
10861 Label notRope;
10862 masm.branchIfNotRope(temp, &notRope);
10864 // Unwind ropes at the start if possible.
10865 Label unwindRope;
10866 masm.bind(&unwindRope);
10867 masm.loadRopeLeftChild(temp, output);
10868 masm.movePtr(output, temp);
10870 #ifdef DEBUG
10871 Label notEmpty;
10872 masm.branch32(Assembler::NotEqual,
10873 Address(temp, JSString::offsetOfLength()), Imm32(0),
10874 &notEmpty);
10875 masm.assumeUnreachable("rope children are non-empty");
10876 masm.bind(&notEmpty);
10877 #endif
10879 // Otherwise keep unwinding ropes.
10880 masm.branchIfRope(temp, &unwindRope);
10882 masm.bind(&notRope);
10884 // Load the first character into |output|.
10885 auto loadFirstChar = [&](auto encoding) {
10886 masm.loadStringChars(temp, output, encoding);
10887 masm.loadChar(Address(output, 0), output, encoding);
10890 Label done;
10891 if (ch <= JSString::MAX_LATIN1_CHAR) {
10892 // Handle both encodings when the search character is Latin-1.
10893 Label twoByte, compare;
10894 masm.branchTwoByteString(temp, &twoByte);
10896 loadFirstChar(CharEncoding::Latin1);
10897 masm.jump(&compare);
10899 masm.bind(&twoByte);
10900 loadFirstChar(CharEncoding::TwoByte);
10902 masm.bind(&compare);
10903 } else {
10904 // The search character is a two-byte character, so it can't be equal to any
10905 // character of a Latin-1 string.
10906 masm.move32(Imm32(int32_t(op == JSOp::Lt || op == JSOp::Le)), output);
10907 masm.branchLatin1String(temp, &done);
10909 loadFirstChar(CharEncoding::TwoByte);
10912 // Compare the string length when the search character is equal to the
10913 // input's first character.
10914 masm.branch32(Assembler::Equal, output, Imm32(ch), &compareLength);
10916 // Otherwise compute the result and jump to the end.
10917 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false), output, Imm32(ch),
10918 output);
10919 masm.jump(&done);
10921 // Compare the string length to compute the overall result.
10922 masm.bind(&compareLength);
10923 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
10924 Address(temp, JSString::offsetOfLength()), Imm32(1), output);
10926 masm.bind(&done);
10929 void CodeGenerator::visitCompareBigInt(LCompareBigInt* lir) {
10930 JSOp op = lir->mir()->jsop();
10931 Register left = ToRegister(lir->left());
10932 Register right = ToRegister(lir->right());
10933 Register temp0 = ToRegister(lir->temp0());
10934 Register temp1 = ToRegister(lir->temp1());
10935 Register temp2 = ToRegister(lir->temp2());
10936 Register output = ToRegister(lir->output());
10938 Label notSame;
10939 Label compareSign;
10940 Label compareLength;
10941 Label compareDigit;
10943 Label* notSameSign;
10944 Label* notSameLength;
10945 Label* notSameDigit;
10946 if (IsEqualityOp(op)) {
10947 notSameSign = &notSame;
10948 notSameLength = &notSame;
10949 notSameDigit = &notSame;
10950 } else {
10951 notSameSign = &compareSign;
10952 notSameLength = &compareLength;
10953 notSameDigit = &compareDigit;
10956 masm.equalBigInts(left, right, temp0, temp1, temp2, output, notSameSign,
10957 notSameLength, notSameDigit);
10959 Label done;
10960 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
10961 op == JSOp::Ge),
10962 output);
10963 masm.jump(&done);
10965 if (IsEqualityOp(op)) {
10966 masm.bind(&notSame);
10967 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
10968 } else {
10969 Label invertWhenNegative;
10971 // There are two cases when sign(left) != sign(right):
10972 // 1. sign(left) = positive and sign(right) = negative,
10973 // 2. or the dual case with reversed signs.
10975 // For case 1, |left| <cmp> |right| is true for cmp=Gt or cmp=Ge and false
10976 // for cmp=Lt or cmp=Le. Initialize the result for case 1 and handle case 2
10977 // with |invertWhenNegative|.
10978 masm.bind(&compareSign);
10979 masm.move32(Imm32(op == JSOp::Gt || op == JSOp::Ge), output);
10980 masm.jump(&invertWhenNegative);
10982 // For sign(left) = sign(right) and len(digits(left)) != len(digits(right)),
10983 // we have to consider the two cases:
10984 // 1. len(digits(left)) < len(digits(right))
10985 // 2. len(digits(left)) > len(digits(right))
10987 // For |left| <cmp> |right| with cmp=Lt:
10988 // Assume both BigInts are positive, then |left < right| is true for case 1
10989 // and false for case 2. When both are negative, the result is reversed.
10991 // The other comparison operators can be handled similarly.
10993 // |temp0| holds the digits length of the right-hand side operand.
10994 masm.bind(&compareLength);
10995 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
10996 Address(left, BigInt::offsetOfLength()), temp0, output);
10997 masm.jump(&invertWhenNegative);
10999 // Similar to the case above, compare the current digit to determine the
11000 // overall comparison result.
11002 // |temp1| points to the current digit of the left-hand side operand.
11003 // |output| holds the current digit of the right-hand side operand.
11004 masm.bind(&compareDigit);
11005 masm.cmpPtrSet(JSOpToCondition(op, /* isSigned = */ false),
11006 Address(temp1, 0), output, output);
11008 Label nonNegative;
11009 masm.bind(&invertWhenNegative);
11010 masm.branchIfBigIntIsNonNegative(left, &nonNegative);
11011 masm.xor32(Imm32(1), output);
11012 masm.bind(&nonNegative);
11015 masm.bind(&done);
11018 void CodeGenerator::visitCompareBigIntInt32(LCompareBigIntInt32* lir) {
11019 JSOp op = lir->mir()->jsop();
11020 Register left = ToRegister(lir->left());
11021 Register right = ToRegister(lir->right());
11022 Register temp0 = ToRegister(lir->temp0());
11023 Register temp1 = ToRegister(lir->temp1());
11024 Register output = ToRegister(lir->output());
11026 Label ifTrue, ifFalse;
11027 masm.compareBigIntAndInt32(op, left, right, temp0, temp1, &ifTrue, &ifFalse);
11029 Label done;
11030 masm.bind(&ifFalse);
11031 masm.move32(Imm32(0), output);
11032 masm.jump(&done);
11033 masm.bind(&ifTrue);
11034 masm.move32(Imm32(1), output);
11035 masm.bind(&done);
11038 void CodeGenerator::visitCompareBigIntDouble(LCompareBigIntDouble* lir) {
11039 JSOp op = lir->mir()->jsop();
11040 Register left = ToRegister(lir->left());
11041 FloatRegister right = ToFloatRegister(lir->right());
11042 Register output = ToRegister(lir->output());
11044 masm.setupAlignedABICall();
11046 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
11047 // - |left <= right| is implemented as |right >= left|.
11048 // - |left > right| is implemented as |right < left|.
11049 if (op == JSOp::Le || op == JSOp::Gt) {
11050 masm.passABIArg(right, MoveOp::DOUBLE);
11051 masm.passABIArg(left);
11052 } else {
11053 masm.passABIArg(left);
11054 masm.passABIArg(right, MoveOp::DOUBLE);
11057 using FnBigIntNumber = bool (*)(BigInt*, double);
11058 using FnNumberBigInt = bool (*)(double, BigInt*);
11059 switch (op) {
11060 case JSOp::Eq: {
11061 masm.callWithABI<FnBigIntNumber,
11062 jit::BigIntNumberEqual<EqualityKind::Equal>>();
11063 break;
11065 case JSOp::Ne: {
11066 masm.callWithABI<FnBigIntNumber,
11067 jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
11068 break;
11070 case JSOp::Lt: {
11071 masm.callWithABI<FnBigIntNumber,
11072 jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
11073 break;
11075 case JSOp::Gt: {
11076 masm.callWithABI<FnNumberBigInt,
11077 jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
11078 break;
11080 case JSOp::Le: {
11081 masm.callWithABI<
11082 FnNumberBigInt,
11083 jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
11084 break;
11086 case JSOp::Ge: {
11087 masm.callWithABI<
11088 FnBigIntNumber,
11089 jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
11090 break;
11092 default:
11093 MOZ_CRASH("unhandled op");
11096 masm.storeCallBoolResult(output);
11099 void CodeGenerator::visitCompareBigIntString(LCompareBigIntString* lir) {
11100 JSOp op = lir->mir()->jsop();
11101 Register left = ToRegister(lir->left());
11102 Register right = ToRegister(lir->right());
11104 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
11105 // - |left <= right| is implemented as |right >= left|.
11106 // - |left > right| is implemented as |right < left|.
11107 if (op == JSOp::Le || op == JSOp::Gt) {
11108 pushArg(left);
11109 pushArg(right);
11110 } else {
11111 pushArg(right);
11112 pushArg(left);
11115 using FnBigIntString =
11116 bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
11117 using FnStringBigInt =
11118 bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
11120 switch (op) {
11121 case JSOp::Eq: {
11122 constexpr auto Equal = EqualityKind::Equal;
11123 callVM<FnBigIntString, BigIntStringEqual<Equal>>(lir);
11124 break;
11126 case JSOp::Ne: {
11127 constexpr auto NotEqual = EqualityKind::NotEqual;
11128 callVM<FnBigIntString, BigIntStringEqual<NotEqual>>(lir);
11129 break;
11131 case JSOp::Lt: {
11132 constexpr auto LessThan = ComparisonKind::LessThan;
11133 callVM<FnBigIntString, BigIntStringCompare<LessThan>>(lir);
11134 break;
11136 case JSOp::Gt: {
11137 constexpr auto LessThan = ComparisonKind::LessThan;
11138 callVM<FnStringBigInt, StringBigIntCompare<LessThan>>(lir);
11139 break;
11141 case JSOp::Le: {
11142 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
11143 callVM<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>(lir);
11144 break;
11146 case JSOp::Ge: {
11147 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
11148 callVM<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>(lir);
11149 break;
11151 default:
11152 MOZ_CRASH("Unexpected compare op");
11156 void CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir) {
11157 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
11158 lir->mir()->compareType() == MCompare::Compare_Null);
11160 JSOp op = lir->mir()->jsop();
11161 MOZ_ASSERT(IsLooseEqualityOp(op));
11163 const ValueOperand value = ToValue(lir, LIsNullOrLikeUndefinedV::ValueIndex);
11164 Register output = ToRegister(lir->output());
11166 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
11167 addOutOfLineCode(ool, lir->mir());
11169 Label* nullOrLikeUndefined = ool->label1();
11170 Label* notNullOrLikeUndefined = ool->label2();
11173 ScratchTagScope tag(masm, value);
11174 masm.splitTagForTest(value, tag);
11176 masm.branchTestNull(Assembler::Equal, tag, nullOrLikeUndefined);
11177 masm.branchTestUndefined(Assembler::Equal, tag, nullOrLikeUndefined);
11179 // Check whether it's a truthy object or a falsy object that emulates
11180 // undefined.
11181 masm.branchTestObject(Assembler::NotEqual, tag, notNullOrLikeUndefined);
11184 Register objreg =
11185 masm.extractObject(value, ToTempUnboxRegister(lir->temp0()));
11186 branchTestObjectEmulatesUndefined(objreg, nullOrLikeUndefined,
11187 notNullOrLikeUndefined, output, ool);
11188 // fall through
11190 Label done;
11192 // It's not null or undefined, and if it's an object it doesn't
11193 // emulate undefined, so it's not like undefined.
11194 masm.move32(Imm32(op == JSOp::Ne), output);
11195 masm.jump(&done);
11197 masm.bind(nullOrLikeUndefined);
11198 masm.move32(Imm32(op == JSOp::Eq), output);
11200 // Both branches meet here.
11201 masm.bind(&done);
11204 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchV(
11205 LIsNullOrLikeUndefinedAndBranchV* lir) {
11206 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
11207 lir->cmpMir()->compareType() == MCompare::Compare_Null);
11209 JSOp op = lir->cmpMir()->jsop();
11210 MOZ_ASSERT(IsLooseEqualityOp(op));
11212 const ValueOperand value =
11213 ToValue(lir, LIsNullOrLikeUndefinedAndBranchV::Value);
11215 MBasicBlock* ifTrue = lir->ifTrue();
11216 MBasicBlock* ifFalse = lir->ifFalse();
11218 if (op == JSOp::Ne) {
11219 // Swap branches.
11220 std::swap(ifTrue, ifFalse);
11223 auto* ool = new (alloc()) OutOfLineTestObject();
11224 addOutOfLineCode(ool, lir->cmpMir());
11226 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
11227 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
11230 ScratchTagScope tag(masm, value);
11231 masm.splitTagForTest(value, tag);
11233 masm.branchTestNull(Assembler::Equal, tag, ifTrueLabel);
11234 masm.branchTestUndefined(Assembler::Equal, tag, ifTrueLabel);
11236 masm.branchTestObject(Assembler::NotEqual, tag, ifFalseLabel);
11239 // Objects that emulate undefined are loosely equal to null/undefined.
11240 Register objreg =
11241 masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
11242 Register scratch = ToRegister(lir->temp());
11243 testObjectEmulatesUndefined(objreg, ifTrueLabel, ifFalseLabel, scratch, ool);
11246 void CodeGenerator::visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT* lir) {
11247 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
11248 lir->mir()->compareType() == MCompare::Compare_Null);
11249 MOZ_ASSERT(lir->mir()->lhs()->type() == MIRType::Object);
11251 JSOp op = lir->mir()->jsop();
11252 MOZ_ASSERT(IsLooseEqualityOp(op), "Strict equality should have been folded");
11254 Register objreg = ToRegister(lir->input());
11255 Register output = ToRegister(lir->output());
11257 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
11258 addOutOfLineCode(ool, lir->mir());
11260 Label* emulatesUndefined = ool->label1();
11261 Label* doesntEmulateUndefined = ool->label2();
11263 branchTestObjectEmulatesUndefined(objreg, emulatesUndefined,
11264 doesntEmulateUndefined, output, ool);
11266 Label done;
11268 masm.move32(Imm32(op == JSOp::Ne), output);
11269 masm.jump(&done);
11271 masm.bind(emulatesUndefined);
11272 masm.move32(Imm32(op == JSOp::Eq), output);
11273 masm.bind(&done);
11276 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchT(
11277 LIsNullOrLikeUndefinedAndBranchT* lir) {
11278 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
11279 lir->cmpMir()->compareType() == MCompare::Compare_Null);
11280 MOZ_ASSERT(lir->cmpMir()->lhs()->type() == MIRType::Object);
11282 JSOp op = lir->cmpMir()->jsop();
11283 MOZ_ASSERT(IsLooseEqualityOp(op), "Strict equality should have been folded");
11285 MBasicBlock* ifTrue = lir->ifTrue();
11286 MBasicBlock* ifFalse = lir->ifFalse();
11288 if (op == JSOp::Ne) {
11289 // Swap branches.
11290 std::swap(ifTrue, ifFalse);
11293 Register input = ToRegister(lir->getOperand(0));
11295 auto* ool = new (alloc()) OutOfLineTestObject();
11296 addOutOfLineCode(ool, lir->cmpMir());
11298 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
11299 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
11301 // Objects that emulate undefined are loosely equal to null/undefined.
11302 Register scratch = ToRegister(lir->temp());
11303 testObjectEmulatesUndefined(input, ifTrueLabel, ifFalseLabel, scratch, ool);
11306 void CodeGenerator::visitIsNull(LIsNull* lir) {
11307 MCompare::CompareType compareType = lir->mir()->compareType();
11308 MOZ_ASSERT(compareType == MCompare::Compare_Null);
11310 JSOp op = lir->mir()->jsop();
11311 MOZ_ASSERT(IsStrictEqualityOp(op));
11313 const ValueOperand value = ToValue(lir, LIsNull::ValueIndex);
11314 Register output = ToRegister(lir->output());
11316 Assembler::Condition cond = JSOpToCondition(compareType, op);
11317 masm.testNullSet(cond, value, output);
11320 void CodeGenerator::visitIsUndefined(LIsUndefined* lir) {
11321 MCompare::CompareType compareType = lir->mir()->compareType();
11322 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
11324 JSOp op = lir->mir()->jsop();
11325 MOZ_ASSERT(IsStrictEqualityOp(op));
11327 const ValueOperand value = ToValue(lir, LIsUndefined::ValueIndex);
11328 Register output = ToRegister(lir->output());
11330 Assembler::Condition cond = JSOpToCondition(compareType, op);
11331 masm.testUndefinedSet(cond, value, output);
11334 void CodeGenerator::visitIsNullAndBranch(LIsNullAndBranch* lir) {
11335 MCompare::CompareType compareType = lir->cmpMir()->compareType();
11336 MOZ_ASSERT(compareType == MCompare::Compare_Null);
11338 JSOp op = lir->cmpMir()->jsop();
11339 MOZ_ASSERT(IsStrictEqualityOp(op));
11341 const ValueOperand value = ToValue(lir, LIsNullAndBranch::Value);
11343 Assembler::Condition cond = JSOpToCondition(compareType, op);
11344 testNullEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
11347 void CodeGenerator::visitIsUndefinedAndBranch(LIsUndefinedAndBranch* lir) {
11348 MCompare::CompareType compareType = lir->cmpMir()->compareType();
11349 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
11351 JSOp op = lir->cmpMir()->jsop();
11352 MOZ_ASSERT(IsStrictEqualityOp(op));
11354 const ValueOperand value = ToValue(lir, LIsUndefinedAndBranch::Value);
11356 Assembler::Condition cond = JSOpToCondition(compareType, op);
11357 testUndefinedEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
11360 void CodeGenerator::visitSameValueDouble(LSameValueDouble* lir) {
11361 FloatRegister left = ToFloatRegister(lir->left());
11362 FloatRegister right = ToFloatRegister(lir->right());
11363 FloatRegister temp = ToFloatRegister(lir->temp0());
11364 Register output = ToRegister(lir->output());
11366 masm.sameValueDouble(left, right, temp, output);
11369 void CodeGenerator::visitSameValue(LSameValue* lir) {
11370 ValueOperand lhs = ToValue(lir, LSameValue::LhsIndex);
11371 ValueOperand rhs = ToValue(lir, LSameValue::RhsIndex);
11372 Register output = ToRegister(lir->output());
11374 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
11375 OutOfLineCode* ool =
11376 oolCallVM<Fn, SameValue>(lir, ArgList(lhs, rhs), StoreRegisterTo(output));
11378 // First check to see if the values have identical bits.
11379 // This is correct for SameValue because SameValue(NaN,NaN) is true,
11380 // and SameValue(0,-0) is false.
11381 masm.branch64(Assembler::NotEqual, lhs.toRegister64(), rhs.toRegister64(),
11382 ool->entry());
11383 masm.move32(Imm32(1), output);
11385 // If this fails, call SameValue.
11386 masm.bind(ool->rejoin());
11389 void CodeGenerator::emitConcat(LInstruction* lir, Register lhs, Register rhs,
11390 Register output) {
11391 using Fn =
11392 JSString* (*)(JSContext*, HandleString, HandleString, js::gc::Heap);
11393 OutOfLineCode* ool = oolCallVM<Fn, ConcatStrings<CanGC>>(
11394 lir, ArgList(lhs, rhs, static_cast<Imm32>(int32_t(gc::Heap::Default))),
11395 StoreRegisterTo(output));
11397 const JitZone* jitZone = gen->realm->zone()->jitZone();
11398 JitCode* stringConcatStub =
11399 jitZone->stringConcatStubNoBarrier(&zoneStubsToReadBarrier_);
11400 masm.call(stringConcatStub);
11401 masm.branchTestPtr(Assembler::Zero, output, output, ool->entry());
11403 masm.bind(ool->rejoin());
11406 void CodeGenerator::visitConcat(LConcat* lir) {
11407 Register lhs = ToRegister(lir->lhs());
11408 Register rhs = ToRegister(lir->rhs());
11410 Register output = ToRegister(lir->output());
11412 MOZ_ASSERT(lhs == CallTempReg0);
11413 MOZ_ASSERT(rhs == CallTempReg1);
11414 MOZ_ASSERT(ToRegister(lir->temp0()) == CallTempReg0);
11415 MOZ_ASSERT(ToRegister(lir->temp1()) == CallTempReg1);
11416 MOZ_ASSERT(ToRegister(lir->temp2()) == CallTempReg2);
11417 MOZ_ASSERT(ToRegister(lir->temp3()) == CallTempReg3);
11418 MOZ_ASSERT(ToRegister(lir->temp4()) == CallTempReg4);
11419 MOZ_ASSERT(output == CallTempReg5);
11421 emitConcat(lir, lhs, rhs, output);
11424 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
11425 Register len, Register byteOpScratch,
11426 CharEncoding fromEncoding,
11427 CharEncoding toEncoding) {
11428 // Copy |len| char16_t code units from |from| to |to|. Assumes len > 0
11429 // (checked below in debug builds), and when done |to| must point to the
11430 // next available char.
11432 #ifdef DEBUG
11433 Label ok;
11434 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
11435 masm.assumeUnreachable("Length should be greater than 0.");
11436 masm.bind(&ok);
11437 #endif
11439 MOZ_ASSERT_IF(toEncoding == CharEncoding::Latin1,
11440 fromEncoding == CharEncoding::Latin1);
11442 size_t fromWidth =
11443 fromEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
11444 size_t toWidth =
11445 toEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
11447 Label start;
11448 masm.bind(&start);
11449 masm.loadChar(Address(from, 0), byteOpScratch, fromEncoding);
11450 masm.storeChar(byteOpScratch, Address(to, 0), toEncoding);
11451 masm.addPtr(Imm32(fromWidth), from);
11452 masm.addPtr(Imm32(toWidth), to);
11453 masm.branchSub32(Assembler::NonZero, Imm32(1), len, &start);
11456 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
11457 Register len, Register byteOpScratch,
11458 CharEncoding encoding) {
11459 CopyStringChars(masm, to, from, len, byteOpScratch, encoding, encoding);
11462 static void CopyStringCharsMaybeInflate(MacroAssembler& masm, Register input,
11463 Register destChars, Register temp1,
11464 Register temp2) {
11465 // destChars is TwoByte and input is a Latin1 or TwoByte string, so we may
11466 // have to inflate.
11468 Label isLatin1, done;
11469 masm.loadStringLength(input, temp1);
11470 masm.branchLatin1String(input, &isLatin1);
11472 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
11473 masm.movePtr(temp2, input);
11474 CopyStringChars(masm, destChars, input, temp1, temp2,
11475 CharEncoding::TwoByte);
11476 masm.jump(&done);
11478 masm.bind(&isLatin1);
11480 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
11481 masm.movePtr(temp2, input);
11482 CopyStringChars(masm, destChars, input, temp1, temp2, CharEncoding::Latin1,
11483 CharEncoding::TwoByte);
11485 masm.bind(&done);
11488 static void AllocateThinOrFatInlineString(MacroAssembler& masm, Register output,
11489 Register length, Register temp,
11490 gc::Heap initialStringHeap,
11491 Label* failure,
11492 CharEncoding encoding) {
11493 #ifdef DEBUG
11494 size_t maxInlineLength;
11495 if (encoding == CharEncoding::Latin1) {
11496 maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
11497 } else {
11498 maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
11501 Label ok;
11502 masm.branch32(Assembler::BelowOrEqual, length, Imm32(maxInlineLength), &ok);
11503 masm.assumeUnreachable("string length too large to be allocated as inline");
11504 masm.bind(&ok);
11505 #endif
11507 size_t maxThinInlineLength;
11508 if (encoding == CharEncoding::Latin1) {
11509 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_LATIN1;
11510 } else {
11511 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_TWO_BYTE;
11514 Label isFat, allocDone;
11515 masm.branch32(Assembler::Above, length, Imm32(maxThinInlineLength), &isFat);
11517 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
11518 if (encoding == CharEncoding::Latin1) {
11519 flags |= JSString::LATIN1_CHARS_BIT;
11521 masm.newGCString(output, temp, initialStringHeap, failure);
11522 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
11523 masm.jump(&allocDone);
11525 masm.bind(&isFat);
11527 uint32_t flags = JSString::INIT_FAT_INLINE_FLAGS;
11528 if (encoding == CharEncoding::Latin1) {
11529 flags |= JSString::LATIN1_CHARS_BIT;
11531 masm.newGCFatInlineString(output, temp, initialStringHeap, failure);
11532 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
11534 masm.bind(&allocDone);
11536 // Store length.
11537 masm.store32(length, Address(output, JSString::offsetOfLength()));
11540 static void ConcatInlineString(MacroAssembler& masm, Register lhs, Register rhs,
11541 Register output, Register temp1, Register temp2,
11542 Register temp3, gc::Heap initialStringHeap,
11543 Label* failure, CharEncoding encoding) {
11544 JitSpew(JitSpew_Codegen, "# Emitting ConcatInlineString (encoding=%s)",
11545 (encoding == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
11547 // State: result length in temp2.
11549 // Ensure both strings are linear.
11550 masm.branchIfRope(lhs, failure);
11551 masm.branchIfRope(rhs, failure);
11553 // Allocate a JSThinInlineString or JSFatInlineString.
11554 AllocateThinOrFatInlineString(masm, output, temp2, temp1, initialStringHeap,
11555 failure, encoding);
11557 // Load chars pointer in temp2.
11558 masm.loadInlineStringCharsForStore(output, temp2);
11560 auto copyChars = [&](Register src) {
11561 if (encoding == CharEncoding::TwoByte) {
11562 CopyStringCharsMaybeInflate(masm, src, temp2, temp1, temp3);
11563 } else {
11564 masm.loadStringLength(src, temp3);
11565 masm.loadStringChars(src, temp1, CharEncoding::Latin1);
11566 masm.movePtr(temp1, src);
11567 CopyStringChars(masm, temp2, src, temp3, temp1, CharEncoding::Latin1);
11571 // Copy lhs chars. Note that this advances temp2 to point to the next
11572 // char. This also clobbers the lhs register.
11573 copyChars(lhs);
11575 // Copy rhs chars. Clobbers the rhs register.
11576 copyChars(rhs);
11579 void CodeGenerator::visitSubstr(LSubstr* lir) {
11580 Register string = ToRegister(lir->string());
11581 Register begin = ToRegister(lir->begin());
11582 Register length = ToRegister(lir->length());
11583 Register output = ToRegister(lir->output());
11584 Register temp0 = ToRegister(lir->temp0());
11585 Register temp2 = ToRegister(lir->temp2());
11587 // On x86 there are not enough registers. In that case reuse the string
11588 // register as temporary.
11589 Register temp1 =
11590 lir->temp1()->isBogusTemp() ? string : ToRegister(lir->temp1());
11592 Label isLatin1, notInline, nonZero, nonInput, isInlinedLatin1;
11594 // For every edge case use the C++ variant.
11595 // Note: we also use this upon allocation failure in newGCString and
11596 // newGCFatInlineString. To squeeze out even more performance those failures
11597 // can be handled by allocate in ool code and returning to jit code to fill
11598 // in all data.
11599 using Fn = JSString* (*)(JSContext* cx, HandleString str, int32_t begin,
11600 int32_t len);
11601 OutOfLineCode* ool = oolCallVM<Fn, SubstringKernel>(
11602 lir, ArgList(string, begin, length), StoreRegisterTo(output));
11603 Label* slowPath = ool->entry();
11604 Label* done = ool->rejoin();
11606 // Zero length, return emptystring.
11607 masm.branchTest32(Assembler::NonZero, length, length, &nonZero);
11608 const JSAtomState& names = gen->runtime->names();
11609 masm.movePtr(ImmGCPtr(names.empty_), output);
11610 masm.jump(done);
11612 // Substring from 0..|str.length|, return str.
11613 masm.bind(&nonZero);
11614 masm.branch32(Assembler::NotEqual,
11615 Address(string, JSString::offsetOfLength()), length, &nonInput);
11616 #ifdef DEBUG
11618 Label ok;
11619 masm.branchTest32(Assembler::Zero, begin, begin, &ok);
11620 masm.assumeUnreachable("length == str.length implies begin == 0");
11621 masm.bind(&ok);
11623 #endif
11624 masm.movePtr(string, output);
11625 masm.jump(done);
11627 // Use slow path for ropes.
11628 masm.bind(&nonInput);
11629 masm.branchIfRope(string, slowPath);
11631 // Optimize one and two character strings.
11632 Label nonStatic;
11633 masm.branch32(Assembler::Above, length, Imm32(2), &nonStatic);
11635 Label loadLengthOne, loadLengthTwo;
11637 auto loadChars = [&](CharEncoding encoding, bool fallthru) {
11638 size_t size = encoding == CharEncoding::Latin1 ? sizeof(JS::Latin1Char)
11639 : sizeof(char16_t);
11641 masm.loadStringChars(string, temp0, encoding);
11642 masm.loadChar(temp0, begin, temp2, encoding);
11643 masm.branch32(Assembler::Equal, length, Imm32(1), &loadLengthOne);
11644 masm.loadChar(temp0, begin, temp0, encoding, int32_t(size));
11645 if (!fallthru) {
11646 masm.jump(&loadLengthTwo);
11650 Label isLatin1;
11651 masm.branchLatin1String(string, &isLatin1);
11652 loadChars(CharEncoding::TwoByte, /* fallthru = */ false);
11654 masm.bind(&isLatin1);
11655 loadChars(CharEncoding::Latin1, /* fallthru = */ true);
11657 // Try to load a length-two static string.
11658 masm.bind(&loadLengthTwo);
11659 masm.lookupStaticString(temp2, temp0, output, gen->runtime->staticStrings(),
11660 &nonStatic);
11661 masm.jump(done);
11663 // Try to load a length-one static string.
11664 masm.bind(&loadLengthOne);
11665 masm.lookupStaticString(temp2, output, gen->runtime->staticStrings(),
11666 &nonStatic);
11667 masm.jump(done);
11669 masm.bind(&nonStatic);
11671 // Allocate either a JSThinInlineString or JSFatInlineString, or jump to
11672 // notInline if we need a dependent string.
11674 static_assert(JSThinInlineString::MAX_LENGTH_LATIN1 <
11675 JSFatInlineString::MAX_LENGTH_LATIN1);
11676 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <
11677 JSFatInlineString::MAX_LENGTH_TWO_BYTE);
11679 // Use temp2 to store the JS(Thin|Fat)InlineString flags. This avoids having
11680 // duplicate newGCString/newGCFatInlineString codegen for Latin1 vs TwoByte
11681 // strings.
11683 Label isLatin1, allocFat, allocThin, allocDone;
11684 masm.branchLatin1String(string, &isLatin1);
11686 masm.branch32(Assembler::Above, length,
11687 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE), &notInline);
11688 masm.move32(Imm32(0), temp2);
11689 masm.branch32(Assembler::Above, length,
11690 Imm32(JSThinInlineString::MAX_LENGTH_TWO_BYTE), &allocFat);
11691 masm.jump(&allocThin);
11694 masm.bind(&isLatin1);
11696 masm.branch32(Assembler::Above, length,
11697 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), &notInline);
11698 masm.move32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
11699 masm.branch32(Assembler::Above, length,
11700 Imm32(JSThinInlineString::MAX_LENGTH_LATIN1), &allocFat);
11703 masm.bind(&allocThin);
11705 masm.newGCString(output, temp0, initialStringHeap(), slowPath);
11706 masm.or32(Imm32(JSString::INIT_THIN_INLINE_FLAGS), temp2);
11707 masm.jump(&allocDone);
11709 masm.bind(&allocFat);
11711 masm.newGCFatInlineString(output, temp0, initialStringHeap(), slowPath);
11712 masm.or32(Imm32(JSString::INIT_FAT_INLINE_FLAGS), temp2);
11715 masm.bind(&allocDone);
11716 masm.store32(temp2, Address(output, JSString::offsetOfFlags()));
11717 masm.store32(length, Address(output, JSString::offsetOfLength()));
11720 auto initializeInlineString = [&](CharEncoding encoding) {
11721 masm.loadStringChars(string, temp0, encoding);
11722 masm.addToCharPtr(temp0, begin, encoding);
11723 if (temp1 == string) {
11724 masm.push(string);
11726 masm.loadInlineStringCharsForStore(output, temp1);
11727 CopyStringChars(masm, temp1, temp0, length, temp2, encoding);
11728 masm.loadStringLength(output, length);
11729 if (temp1 == string) {
11730 masm.pop(string);
11732 masm.jump(done);
11735 masm.branchLatin1String(string, &isInlinedLatin1);
11736 initializeInlineString(CharEncoding::TwoByte);
11738 masm.bind(&isInlinedLatin1);
11739 initializeInlineString(CharEncoding::Latin1);
11741 // Handle other cases with a DependentString.
11742 masm.bind(&notInline);
11743 masm.newGCString(output, temp0, gen->initialStringHeap(), slowPath);
11744 masm.store32(length, Address(output, JSString::offsetOfLength()));
11745 masm.storeDependentStringBase(string, output);
11747 auto initializeDependentString = [&](CharEncoding encoding) {
11748 uint32_t flags = JSString::INIT_DEPENDENT_FLAGS;
11749 if (encoding == CharEncoding::Latin1) {
11750 flags |= JSString::LATIN1_CHARS_BIT;
11753 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
11754 masm.loadNonInlineStringChars(string, temp0, encoding);
11755 masm.addToCharPtr(temp0, begin, encoding);
11756 masm.storeNonInlineStringChars(temp0, output);
11757 masm.jump(done);
11760 masm.branchLatin1String(string, &isLatin1);
11761 initializeDependentString(CharEncoding::TwoByte);
11763 masm.bind(&isLatin1);
11764 initializeDependentString(CharEncoding::Latin1);
11766 masm.bind(done);
11769 JitCode* JitZone::generateStringConcatStub(JSContext* cx) {
11770 JitSpew(JitSpew_Codegen, "# Emitting StringConcat stub");
11772 TempAllocator temp(&cx->tempLifoAlloc());
11773 JitContext jcx(cx);
11774 StackMacroAssembler masm(cx, temp);
11775 AutoCreatedBy acb(masm, "JitZone::generateStringConcatStub");
11777 Register lhs = CallTempReg0;
11778 Register rhs = CallTempReg1;
11779 Register temp1 = CallTempReg2;
11780 Register temp2 = CallTempReg3;
11781 Register temp3 = CallTempReg4;
11782 Register output = CallTempReg5;
11784 Label failure;
11785 #ifdef JS_USE_LINK_REGISTER
11786 masm.pushReturnAddress();
11787 #endif
11788 masm.Push(FramePointer);
11789 masm.moveStackPtrTo(FramePointer);
11791 // If lhs is empty, return rhs.
11792 Label leftEmpty;
11793 masm.loadStringLength(lhs, temp1);
11794 masm.branchTest32(Assembler::Zero, temp1, temp1, &leftEmpty);
11796 // If rhs is empty, return lhs.
11797 Label rightEmpty;
11798 masm.loadStringLength(rhs, temp2);
11799 masm.branchTest32(Assembler::Zero, temp2, temp2, &rightEmpty);
11801 masm.add32(temp1, temp2);
11803 // Check if we can use a JSInlineString. The result is a Latin1 string if
11804 // lhs and rhs are both Latin1, so we AND the flags.
11805 Label isInlineTwoByte, isInlineLatin1;
11806 masm.load32(Address(lhs, JSString::offsetOfFlags()), temp1);
11807 masm.and32(Address(rhs, JSString::offsetOfFlags()), temp1);
11809 Label isLatin1, notInline;
11810 masm.branchTest32(Assembler::NonZero, temp1,
11811 Imm32(JSString::LATIN1_CHARS_BIT), &isLatin1);
11813 masm.branch32(Assembler::BelowOrEqual, temp2,
11814 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
11815 &isInlineTwoByte);
11816 masm.jump(&notInline);
11818 masm.bind(&isLatin1);
11820 masm.branch32(Assembler::BelowOrEqual, temp2,
11821 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), &isInlineLatin1);
11823 masm.bind(&notInline);
11825 // Keep AND'ed flags in temp1.
11827 // Ensure result length <= JSString::MAX_LENGTH.
11828 masm.branch32(Assembler::Above, temp2, Imm32(JSString::MAX_LENGTH), &failure);
11830 // Allocate a new rope, guaranteed to be in the nursery if initialStringHeap
11831 // == gc::Heap::Default. (As a result, no post barriers are needed below.)
11832 masm.newGCString(output, temp3, initialStringHeap, &failure);
11834 // Store rope length and flags. temp1 still holds the result of AND'ing the
11835 // lhs and rhs flags, so we just have to clear the other flags to get our rope
11836 // flags (Latin1 if both lhs and rhs are Latin1).
11837 static_assert(JSString::INIT_ROPE_FLAGS == 0,
11838 "Rope type flags must have no bits set");
11839 masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp1);
11840 masm.store32(temp1, Address(output, JSString::offsetOfFlags()));
11841 masm.store32(temp2, Address(output, JSString::offsetOfLength()));
11843 // Store left and right nodes.
11844 masm.storeRopeChildren(lhs, rhs, output);
11845 masm.pop(FramePointer);
11846 masm.ret();
11848 masm.bind(&leftEmpty);
11849 masm.mov(rhs, output);
11850 masm.pop(FramePointer);
11851 masm.ret();
11853 masm.bind(&rightEmpty);
11854 masm.mov(lhs, output);
11855 masm.pop(FramePointer);
11856 masm.ret();
11858 masm.bind(&isInlineTwoByte);
11859 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
11860 initialStringHeap, &failure, CharEncoding::TwoByte);
11861 masm.pop(FramePointer);
11862 masm.ret();
11864 masm.bind(&isInlineLatin1);
11865 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
11866 initialStringHeap, &failure, CharEncoding::Latin1);
11867 masm.pop(FramePointer);
11868 masm.ret();
11870 masm.bind(&failure);
11871 masm.movePtr(ImmPtr(nullptr), output);
11872 masm.pop(FramePointer);
11873 masm.ret();
11875 Linker linker(masm);
11876 JitCode* code = linker.newCode(cx, CodeKind::Other);
11878 CollectPerfSpewerJitCodeProfile(code, "StringConcatStub");
11879 #ifdef MOZ_VTUNE
11880 vtune::MarkStub(code, "StringConcatStub");
11881 #endif
11883 return code;
11886 void JitRuntime::generateFreeStub(MacroAssembler& masm) {
11887 AutoCreatedBy acb(masm, "JitRuntime::generateFreeStub");
11889 const Register regSlots = CallTempReg0;
11891 freeStubOffset_ = startTrampolineCode(masm);
11893 #ifdef JS_USE_LINK_REGISTER
11894 masm.pushReturnAddress();
11895 #endif
11896 AllocatableRegisterSet regs(RegisterSet::Volatile());
11897 regs.takeUnchecked(regSlots);
11898 LiveRegisterSet save(regs.asLiveSet());
11899 masm.PushRegsInMask(save);
11901 const Register regTemp = regs.takeAnyGeneral();
11902 MOZ_ASSERT(regTemp != regSlots);
11904 using Fn = void (*)(void* p);
11905 masm.setupUnalignedABICall(regTemp);
11906 masm.passABIArg(regSlots);
11907 masm.callWithABI<Fn, js_free>(MoveOp::GENERAL,
11908 CheckUnsafeCallWithABI::DontCheckOther);
11910 masm.PopRegsInMask(save);
11912 masm.ret();
11915 void JitRuntime::generateLazyLinkStub(MacroAssembler& masm) {
11916 AutoCreatedBy acb(masm, "JitRuntime::generateLazyLinkStub");
11918 lazyLinkStubOffset_ = startTrampolineCode(masm);
11920 #ifdef JS_USE_LINK_REGISTER
11921 masm.pushReturnAddress();
11922 #endif
11923 masm.Push(FramePointer);
11924 masm.moveStackPtrTo(FramePointer);
11926 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
11927 Register temp0 = regs.takeAny();
11928 Register temp1 = regs.takeAny();
11929 Register temp2 = regs.takeAny();
11931 masm.loadJSContext(temp0);
11932 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::LazyLink);
11933 masm.moveStackPtrTo(temp1);
11935 using Fn = uint8_t* (*)(JSContext* cx, LazyLinkExitFrameLayout* frame);
11936 masm.setupUnalignedABICall(temp2);
11937 masm.passABIArg(temp0);
11938 masm.passABIArg(temp1);
11939 masm.callWithABI<Fn, LazyLinkTopActivation>(
11940 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
11942 // Discard exit frame and restore frame pointer.
11943 masm.leaveExitFrame(0);
11944 masm.pop(FramePointer);
11946 #ifdef JS_USE_LINK_REGISTER
11947 // Restore the return address such that the emitPrologue function of the
11948 // CodeGenerator can push it back on the stack with pushReturnAddress.
11949 masm.popReturnAddress();
11950 #endif
11951 masm.jump(ReturnReg);
11954 void JitRuntime::generateInterpreterStub(MacroAssembler& masm) {
11955 AutoCreatedBy acb(masm, "JitRuntime::generateInterpreterStub");
11957 interpreterStubOffset_ = startTrampolineCode(masm);
11959 #ifdef JS_USE_LINK_REGISTER
11960 masm.pushReturnAddress();
11961 #endif
11962 masm.Push(FramePointer);
11963 masm.moveStackPtrTo(FramePointer);
11965 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
11966 Register temp0 = regs.takeAny();
11967 Register temp1 = regs.takeAny();
11968 Register temp2 = regs.takeAny();
11970 masm.loadJSContext(temp0);
11971 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::InterpreterStub);
11972 masm.moveStackPtrTo(temp1);
11974 using Fn = bool (*)(JSContext* cx, InterpreterStubExitFrameLayout* frame);
11975 masm.setupUnalignedABICall(temp2);
11976 masm.passABIArg(temp0);
11977 masm.passABIArg(temp1);
11978 masm.callWithABI<Fn, InvokeFromInterpreterStub>(
11979 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
11981 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
11983 // Discard exit frame and restore frame pointer.
11984 masm.leaveExitFrame(0);
11985 masm.pop(FramePointer);
11987 // InvokeFromInterpreterStub stores the return value in argv[0], where the
11988 // caller stored |this|. Subtract |sizeof(void*)| for the frame pointer we
11989 // just popped.
11990 masm.loadValue(Address(masm.getStackPointer(),
11991 JitFrameLayout::offsetOfThis() - sizeof(void*)),
11992 JSReturnOperand);
11993 masm.ret();
11996 void JitRuntime::generateDoubleToInt32ValueStub(MacroAssembler& masm) {
11997 AutoCreatedBy acb(masm, "JitRuntime::generateDoubleToInt32ValueStub");
11998 doubleToInt32ValueStubOffset_ = startTrampolineCode(masm);
12000 Label done;
12001 masm.branchTestDouble(Assembler::NotEqual, R0, &done);
12003 masm.unboxDouble(R0, FloatReg0);
12004 masm.convertDoubleToInt32(FloatReg0, R1.scratchReg(), &done,
12005 /* negativeZeroCheck = */ false);
12006 masm.tagValue(JSVAL_TYPE_INT32, R1.scratchReg(), R0);
12008 masm.bind(&done);
12009 masm.abiret();
12012 void CodeGenerator::visitLinearizeString(LLinearizeString* lir) {
12013 Register str = ToRegister(lir->str());
12014 Register output = ToRegister(lir->output());
12016 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12017 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12018 lir, ArgList(str), StoreRegisterTo(output));
12020 masm.branchIfRope(str, ool->entry());
12022 masm.movePtr(str, output);
12023 masm.bind(ool->rejoin());
12026 void CodeGenerator::visitLinearizeForCharAccess(LLinearizeForCharAccess* lir) {
12027 Register str = ToRegister(lir->str());
12028 Register index = ToRegister(lir->index());
12029 Register output = ToRegister(lir->output());
12031 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12032 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12033 lir, ArgList(str), StoreRegisterTo(output));
12035 masm.branchIfNotCanLoadStringChar(str, index, output, ool->entry());
12037 masm.movePtr(str, output);
12038 masm.bind(ool->rejoin());
12041 void CodeGenerator::visitCharCodeAt(LCharCodeAt* lir) {
12042 Register str = ToRegister(lir->str());
12043 Register output = ToRegister(lir->output());
12044 Register temp0 = ToRegister(lir->temp0());
12045 Register temp1 = ToRegister(lir->temp1());
12047 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12049 if (lir->index()->isBogus()) {
12050 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
12051 StoreRegisterTo(output));
12052 masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
12053 masm.bind(ool->rejoin());
12054 } else {
12055 Register index = ToRegister(lir->index());
12057 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
12058 StoreRegisterTo(output));
12059 masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
12060 masm.bind(ool->rejoin());
12064 void CodeGenerator::visitCharCodeAtOrNegative(LCharCodeAtOrNegative* lir) {
12065 Register str = ToRegister(lir->str());
12066 Register output = ToRegister(lir->output());
12067 Register temp0 = ToRegister(lir->temp0());
12068 Register temp1 = ToRegister(lir->temp1());
12070 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12072 // Return -1 for out-of-bounds access.
12073 masm.move32(Imm32(-1), output);
12075 if (lir->index()->isBogus()) {
12076 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
12077 StoreRegisterTo(output));
12079 masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
12080 Imm32(0), ool->rejoin());
12081 masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
12082 masm.bind(ool->rejoin());
12083 } else {
12084 Register index = ToRegister(lir->index());
12086 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
12087 StoreRegisterTo(output));
12089 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
12090 temp0, ool->rejoin());
12091 masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
12092 masm.bind(ool->rejoin());
12096 void CodeGenerator::visitNegativeToNaN(LNegativeToNaN* lir) {
12097 Register input = ToRegister(lir->input());
12098 ValueOperand output = ToOutValue(lir);
12100 masm.tagValue(JSVAL_TYPE_INT32, input, output);
12102 Label done;
12103 masm.branchTest32(Assembler::NotSigned, input, input, &done);
12104 masm.moveValue(JS::NaNValue(), output);
12105 masm.bind(&done);
12108 void CodeGenerator::visitFromCharCode(LFromCharCode* lir) {
12109 Register code = ToRegister(lir->code());
12110 Register output = ToRegister(lir->output());
12112 using Fn = JSLinearString* (*)(JSContext*, int32_t);
12113 OutOfLineCode* ool = oolCallVM<Fn, jit::StringFromCharCode>(
12114 lir, ArgList(code), StoreRegisterTo(output));
12116 // OOL path if code >= UNIT_STATIC_LIMIT.
12117 masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
12118 ool->entry());
12120 masm.bind(ool->rejoin());
12123 void CodeGenerator::visitFromCharCodeEmptyIfNegative(
12124 LFromCharCodeEmptyIfNegative* lir) {
12125 Register code = ToRegister(lir->code());
12126 Register output = ToRegister(lir->output());
12128 using Fn = JSLinearString* (*)(JSContext*, int32_t);
12129 auto* ool = oolCallVM<Fn, jit::StringFromCharCode>(lir, ArgList(code),
12130 StoreRegisterTo(output));
12132 // Return the empty string for negative inputs.
12133 const JSAtomState& names = gen->runtime->names();
12134 masm.movePtr(ImmGCPtr(names.empty_), output);
12135 masm.branchTest32(Assembler::Signed, code, code, ool->rejoin());
12137 // OOL path if code >= UNIT_STATIC_LIMIT.
12138 masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
12139 ool->entry());
12141 masm.bind(ool->rejoin());
12144 void CodeGenerator::visitFromCodePoint(LFromCodePoint* lir) {
12145 Register codePoint = ToRegister(lir->codePoint());
12146 Register output = ToRegister(lir->output());
12147 Register temp0 = ToRegister(lir->temp0());
12148 Register temp1 = ToRegister(lir->temp1());
12149 LSnapshot* snapshot = lir->snapshot();
12151 // The OOL path is only taken when we can't allocate the inline string.
12152 using Fn = JSString* (*)(JSContext*, int32_t);
12153 OutOfLineCode* ool = oolCallVM<Fn, jit::StringFromCodePoint>(
12154 lir, ArgList(codePoint), StoreRegisterTo(output));
12156 Label isTwoByte;
12157 Label* done = ool->rejoin();
12159 static_assert(
12160 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
12161 "Latin-1 strings can be loaded from static strings");
12164 masm.lookupStaticString(codePoint, output, gen->runtime->staticStrings(),
12165 &isTwoByte);
12166 masm.jump(done);
12168 masm.bind(&isTwoByte);
12170 // Use a bailout if the input is not a valid code point, because
12171 // MFromCodePoint is movable and it'd be observable when a moved
12172 // fromCodePoint throws an exception before its actual call site.
12173 bailoutCmp32(Assembler::Above, codePoint, Imm32(unicode::NonBMPMax),
12174 snapshot);
12176 // Allocate a JSThinInlineString.
12178 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE >= 2,
12179 "JSThinInlineString can hold a supplementary code point");
12181 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
12182 masm.newGCString(output, temp0, gen->initialStringHeap(), ool->entry());
12183 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12186 Label isSupplementary;
12187 masm.branch32(Assembler::AboveOrEqual, codePoint, Imm32(unicode::NonBMPMin),
12188 &isSupplementary);
12190 // Store length.
12191 masm.store32(Imm32(1), Address(output, JSString::offsetOfLength()));
12193 // Load chars pointer in temp0.
12194 masm.loadInlineStringCharsForStore(output, temp0);
12196 masm.store16(codePoint, Address(temp0, 0));
12198 masm.jump(done);
12200 masm.bind(&isSupplementary);
12202 // Store length.
12203 masm.store32(Imm32(2), Address(output, JSString::offsetOfLength()));
12205 // Load chars pointer in temp0.
12206 masm.loadInlineStringCharsForStore(output, temp0);
12208 // Inlined unicode::LeadSurrogate(uint32_t).
12209 masm.move32(codePoint, temp1);
12210 masm.rshift32(Imm32(10), temp1);
12211 masm.add32(Imm32(unicode::LeadSurrogateMin - (unicode::NonBMPMin >> 10)),
12212 temp1);
12214 masm.store16(temp1, Address(temp0, 0));
12216 // Inlined unicode::TrailSurrogate(uint32_t).
12217 masm.move32(codePoint, temp1);
12218 masm.and32(Imm32(0x3FF), temp1);
12219 masm.or32(Imm32(unicode::TrailSurrogateMin), temp1);
12221 masm.store16(temp1, Address(temp0, sizeof(char16_t)));
12225 masm.bind(done);
12228 void CodeGenerator::visitStringIncludes(LStringIncludes* lir) {
12229 pushArg(ToRegister(lir->searchString()));
12230 pushArg(ToRegister(lir->string()));
12232 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12233 callVM<Fn, js::StringIncludes>(lir);
12236 template <typename LIns>
12237 static void CallStringMatch(MacroAssembler& masm, LIns* lir, OutOfLineCode* ool,
12238 LiveRegisterSet volatileRegs) {
12239 Register string = ToRegister(lir->string());
12240 Register output = ToRegister(lir->output());
12241 Register tempLength = ToRegister(lir->temp0());
12242 Register tempChars = ToRegister(lir->temp1());
12243 Register maybeTempPat = ToTempRegisterOrInvalid(lir->temp2());
12245 const JSLinearString* searchString = lir->searchString();
12246 size_t length = searchString->length();
12247 MOZ_ASSERT(length == 1 || length == 2);
12249 // The additional temp register is only needed when searching for two
12250 // pattern characters.
12251 MOZ_ASSERT_IF(length == 2, maybeTempPat != InvalidReg);
12253 if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
12254 masm.move32(Imm32(0), output);
12255 } else {
12256 masm.move32(Imm32(-1), output);
12259 masm.loadStringLength(string, tempLength);
12261 // Can't be a substring when the string is smaller than the search string.
12262 Label done;
12263 masm.branch32(Assembler::Below, tempLength, Imm32(length), ool->rejoin());
12265 bool searchStringIsPureTwoByte = false;
12266 if (searchString->hasTwoByteChars()) {
12267 JS::AutoCheckCannotGC nogc;
12268 searchStringIsPureTwoByte =
12269 !mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc));
12272 // Pure two-byte strings can't occur in a Latin-1 string.
12273 if (searchStringIsPureTwoByte) {
12274 masm.branchLatin1String(string, ool->rejoin());
12277 // Slow path when we need to linearize the string.
12278 masm.branchIfRope(string, ool->entry());
12280 Label restoreVolatile;
12282 auto callMatcher = [&](CharEncoding encoding) {
12283 masm.loadStringChars(string, tempChars, encoding);
12285 LiveGeneralRegisterSet liveRegs;
12286 if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
12287 // Save |tempChars| to compute the result index.
12288 liveRegs.add(tempChars);
12290 #ifdef DEBUG
12291 // Save |tempLength| in debug-mode for assertions.
12292 liveRegs.add(tempLength);
12293 #endif
12295 // Exclude non-volatile registers.
12296 liveRegs.set() = GeneralRegisterSet::Intersect(
12297 liveRegs.set(), GeneralRegisterSet::Volatile());
12299 masm.PushRegsInMask(liveRegs);
12302 if (length == 1) {
12303 char16_t pat = searchString->latin1OrTwoByteChar(0);
12304 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
12305 pat <= JSString::MAX_LATIN1_CHAR);
12307 masm.move32(Imm32(pat), output);
12309 masm.setupAlignedABICall();
12310 masm.passABIArg(tempChars);
12311 masm.passABIArg(output);
12312 masm.passABIArg(tempLength);
12313 if (encoding == CharEncoding::Latin1) {
12314 using Fn = const char* (*)(const char*, char, size_t);
12315 masm.callWithABI<Fn, mozilla::SIMD::memchr8>(
12316 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
12317 } else {
12318 using Fn = const char16_t* (*)(const char16_t*, char16_t, size_t);
12319 masm.callWithABI<Fn, mozilla::SIMD::memchr16>(
12320 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
12322 } else {
12323 char16_t pat0 = searchString->latin1OrTwoByteChar(0);
12324 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
12325 pat0 <= JSString::MAX_LATIN1_CHAR);
12327 char16_t pat1 = searchString->latin1OrTwoByteChar(1);
12328 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
12329 pat1 <= JSString::MAX_LATIN1_CHAR);
12331 masm.move32(Imm32(pat0), output);
12332 masm.move32(Imm32(pat1), maybeTempPat);
12334 masm.setupAlignedABICall();
12335 masm.passABIArg(tempChars);
12336 masm.passABIArg(output);
12337 masm.passABIArg(maybeTempPat);
12338 masm.passABIArg(tempLength);
12339 if (encoding == CharEncoding::Latin1) {
12340 using Fn = const char* (*)(const char*, char, char, size_t);
12341 masm.callWithABI<Fn, mozilla::SIMD::memchr2x8>(
12342 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
12343 } else {
12344 using Fn =
12345 const char16_t* (*)(const char16_t*, char16_t, char16_t, size_t);
12346 masm.callWithABI<Fn, mozilla::SIMD::memchr2x16>(
12347 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
12351 masm.storeCallPointerResult(output);
12353 // Convert to string index for `indexOf`.
12354 if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
12355 // Restore |tempChars|. (And in debug mode |tempLength|.)
12356 masm.PopRegsInMask(liveRegs);
12358 Label found;
12359 masm.branchPtr(Assembler::NotEqual, output, ImmPtr(nullptr), &found);
12361 masm.move32(Imm32(-1), output);
12362 masm.jump(&restoreVolatile);
12364 masm.bind(&found);
12366 #ifdef DEBUG
12367 // Check lower bound.
12368 Label lower;
12369 masm.branchPtr(Assembler::AboveOrEqual, output, tempChars, &lower);
12370 masm.assumeUnreachable("result pointer below string chars");
12371 masm.bind(&lower);
12373 // Compute the end position of the characters.
12374 auto scale = encoding == CharEncoding::Latin1 ? TimesOne : TimesTwo;
12375 masm.computeEffectiveAddress(BaseIndex(tempChars, tempLength, scale),
12376 tempLength);
12378 // Check upper bound.
12379 Label upper;
12380 masm.branchPtr(Assembler::Below, output, tempLength, &upper);
12381 masm.assumeUnreachable("result pointer above string chars");
12382 masm.bind(&upper);
12383 #endif
12385 masm.subPtr(tempChars, output);
12387 if (encoding == CharEncoding::TwoByte) {
12388 masm.rshiftPtr(Imm32(1), output);
12393 volatileRegs.takeUnchecked(output);
12394 volatileRegs.takeUnchecked(tempLength);
12395 volatileRegs.takeUnchecked(tempChars);
12396 if (maybeTempPat != InvalidReg) {
12397 volatileRegs.takeUnchecked(maybeTempPat);
12399 masm.PushRegsInMask(volatileRegs);
12401 // Handle the case when the input is a Latin-1 string.
12402 if (!searchStringIsPureTwoByte) {
12403 Label twoByte;
12404 masm.branchTwoByteString(string, &twoByte);
12406 callMatcher(CharEncoding::Latin1);
12407 masm.jump(&restoreVolatile);
12409 masm.bind(&twoByte);
12412 // Handle the case when the input is a two-byte string.
12413 callMatcher(CharEncoding::TwoByte);
12415 masm.bind(&restoreVolatile);
12416 masm.PopRegsInMask(volatileRegs);
12418 // Convert to bool for `includes`.
12419 if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
12420 masm.cmpPtrSet(Assembler::NotEqual, output, ImmPtr(nullptr), output);
12423 masm.bind(ool->rejoin());
12426 void CodeGenerator::visitStringIncludesSIMD(LStringIncludesSIMD* lir) {
12427 Register string = ToRegister(lir->string());
12428 Register output = ToRegister(lir->output());
12429 const JSLinearString* searchString = lir->searchString();
12431 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12432 auto* ool = oolCallVM<Fn, js::StringIncludes>(
12433 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
12435 CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
12438 void CodeGenerator::visitStringIndexOf(LStringIndexOf* lir) {
12439 pushArg(ToRegister(lir->searchString()));
12440 pushArg(ToRegister(lir->string()));
12442 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
12443 callVM<Fn, js::StringIndexOf>(lir);
12446 void CodeGenerator::visitStringIndexOfSIMD(LStringIndexOfSIMD* lir) {
12447 Register string = ToRegister(lir->string());
12448 Register output = ToRegister(lir->output());
12449 const JSLinearString* searchString = lir->searchString();
12451 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
12452 auto* ool = oolCallVM<Fn, js::StringIndexOf>(
12453 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
12455 CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
12458 void CodeGenerator::visitStringLastIndexOf(LStringLastIndexOf* lir) {
12459 pushArg(ToRegister(lir->searchString()));
12460 pushArg(ToRegister(lir->string()));
12462 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
12463 callVM<Fn, js::StringLastIndexOf>(lir);
12466 void CodeGenerator::visitStringStartsWith(LStringStartsWith* lir) {
12467 pushArg(ToRegister(lir->searchString()));
12468 pushArg(ToRegister(lir->string()));
12470 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12471 callVM<Fn, js::StringStartsWith>(lir);
12474 void CodeGenerator::visitStringStartsWithInline(LStringStartsWithInline* lir) {
12475 Register string = ToRegister(lir->string());
12476 Register output = ToRegister(lir->output());
12477 Register temp = ToRegister(lir->temp0());
12479 const JSLinearString* searchString = lir->searchString();
12481 size_t length = searchString->length();
12482 MOZ_ASSERT(length > 0);
12484 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12485 auto* ool = oolCallVM<Fn, js::StringStartsWith>(
12486 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
12488 masm.move32(Imm32(0), output);
12490 // Can't be a prefix when the string is smaller than the search string.
12491 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
12492 Imm32(length), ool->rejoin());
12494 // Unwind ropes at the start if possible.
12495 Label compare;
12496 masm.movePtr(string, temp);
12497 masm.branchIfNotRope(temp, &compare);
12499 Label unwindRope;
12500 masm.bind(&unwindRope);
12501 masm.loadRopeLeftChild(temp, output);
12502 masm.movePtr(output, temp);
12504 // If the left child is smaller than the search string, jump into the VM to
12505 // linearize the string.
12506 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
12507 Imm32(length), ool->entry());
12509 // Otherwise keep unwinding ropes.
12510 masm.branchIfRope(temp, &unwindRope);
12512 masm.bind(&compare);
12514 // If operands point to the same instance, it's trivially a prefix.
12515 Label notPointerEqual;
12516 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
12517 &notPointerEqual);
12518 masm.move32(Imm32(1), output);
12519 masm.jump(ool->rejoin());
12520 masm.bind(&notPointerEqual);
12522 if (searchString->hasTwoByteChars()) {
12523 // Pure two-byte strings can't be a prefix of Latin-1 strings.
12524 JS::AutoCheckCannotGC nogc;
12525 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
12526 Label compareChars;
12527 masm.branchTwoByteString(temp, &compareChars);
12528 masm.move32(Imm32(0), output);
12529 masm.jump(ool->rejoin());
12530 masm.bind(&compareChars);
12534 // Load the input string's characters.
12535 Register stringChars = output;
12536 masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
12538 // Start comparing character by character.
12539 masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
12541 masm.bind(ool->rejoin());
12544 void CodeGenerator::visitStringEndsWith(LStringEndsWith* lir) {
12545 pushArg(ToRegister(lir->searchString()));
12546 pushArg(ToRegister(lir->string()));
12548 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12549 callVM<Fn, js::StringEndsWith>(lir);
12552 void CodeGenerator::visitStringEndsWithInline(LStringEndsWithInline* lir) {
12553 Register string = ToRegister(lir->string());
12554 Register output = ToRegister(lir->output());
12555 Register temp = ToRegister(lir->temp0());
12557 const JSLinearString* searchString = lir->searchString();
12559 size_t length = searchString->length();
12560 MOZ_ASSERT(length > 0);
12562 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12563 auto* ool = oolCallVM<Fn, js::StringEndsWith>(
12564 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
12566 masm.move32(Imm32(0), output);
12568 // Can't be a suffix when the string is smaller than the search string.
12569 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
12570 Imm32(length), ool->rejoin());
12572 // Unwind ropes at the end if possible.
12573 Label compare;
12574 masm.movePtr(string, temp);
12575 masm.branchIfNotRope(temp, &compare);
12577 Label unwindRope;
12578 masm.bind(&unwindRope);
12579 masm.loadRopeRightChild(temp, output);
12580 masm.movePtr(output, temp);
12582 // If the right child is smaller than the search string, jump into the VM to
12583 // linearize the string.
12584 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
12585 Imm32(length), ool->entry());
12587 // Otherwise keep unwinding ropes.
12588 masm.branchIfRope(temp, &unwindRope);
12590 masm.bind(&compare);
12592 // If operands point to the same instance, it's trivially a suffix.
12593 Label notPointerEqual;
12594 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
12595 &notPointerEqual);
12596 masm.move32(Imm32(1), output);
12597 masm.jump(ool->rejoin());
12598 masm.bind(&notPointerEqual);
12600 CharEncoding encoding = searchString->hasLatin1Chars()
12601 ? CharEncoding::Latin1
12602 : CharEncoding::TwoByte;
12603 if (encoding == CharEncoding::TwoByte) {
12604 // Pure two-byte strings can't be a suffix of Latin-1 strings.
12605 JS::AutoCheckCannotGC nogc;
12606 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
12607 Label compareChars;
12608 masm.branchTwoByteString(temp, &compareChars);
12609 masm.move32(Imm32(0), output);
12610 masm.jump(ool->rejoin());
12611 masm.bind(&compareChars);
12615 // Load the input string's characters.
12616 Register stringChars = output;
12617 masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
12619 // Move string-char pointer to the suffix string.
12620 masm.loadStringLength(temp, temp);
12621 masm.sub32(Imm32(length), temp);
12622 masm.addToCharPtr(stringChars, temp, encoding);
12624 // Start comparing character by character.
12625 masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
12627 masm.bind(ool->rejoin());
12630 void CodeGenerator::visitStringToLowerCase(LStringToLowerCase* lir) {
12631 Register string = ToRegister(lir->string());
12632 Register output = ToRegister(lir->output());
12633 Register temp0 = ToRegister(lir->temp0());
12634 Register temp1 = ToRegister(lir->temp1());
12635 Register temp2 = ToRegister(lir->temp2());
12637 // On x86 there are not enough registers. In that case reuse the string
12638 // register as a temporary.
12639 Register temp3 =
12640 lir->temp3()->isBogusTemp() ? string : ToRegister(lir->temp3());
12641 Register temp4 = ToRegister(lir->temp4());
12643 using Fn = JSString* (*)(JSContext*, HandleString);
12644 OutOfLineCode* ool = oolCallVM<Fn, js::StringToLowerCase>(
12645 lir, ArgList(string), StoreRegisterTo(output));
12647 // Take the slow path if the string isn't a linear Latin-1 string.
12648 Imm32 linearLatin1Bits(JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT);
12649 Register flags = temp0;
12650 masm.load32(Address(string, JSString::offsetOfFlags()), flags);
12651 masm.and32(linearLatin1Bits, flags);
12652 masm.branch32(Assembler::NotEqual, flags, linearLatin1Bits, ool->entry());
12654 Register length = temp0;
12655 masm.loadStringLength(string, length);
12657 // Return the input if it's the empty string.
12658 Label notEmptyString;
12659 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmptyString);
12661 masm.movePtr(string, output);
12662 masm.jump(ool->rejoin());
12664 masm.bind(&notEmptyString);
12666 Register inputChars = temp1;
12667 masm.loadStringChars(string, inputChars, CharEncoding::Latin1);
12669 Register toLowerCaseTable = temp2;
12670 masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), toLowerCaseTable);
12672 // Single element strings can be directly retrieved from static strings cache.
12673 Label notSingleElementString;
12674 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleElementString);
12676 Register current = temp4;
12678 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
12679 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
12680 current);
12681 masm.lookupStaticString(current, output, gen->runtime->staticStrings());
12683 masm.jump(ool->rejoin());
12685 masm.bind(&notSingleElementString);
12687 // Use the OOL-path when the string is too long. This prevents scanning long
12688 // strings which have upper case characters only near the end a second time in
12689 // the VM.
12690 constexpr int32_t MaxInlineLength = 64;
12691 masm.branch32(Assembler::Above, length, Imm32(MaxInlineLength), ool->entry());
12694 // Check if there are any characters which need to be converted.
12696 // This extra loop gives a small performance improvement for strings which
12697 // are already lower cased and lets us avoid calling into the runtime for
12698 // non-inline, all lower case strings. But more importantly it avoids
12699 // repeated inline allocation failures:
12700 // |AllocateThinOrFatInlineString| below takes the OOL-path and calls the
12701 // |js::StringToLowerCase| runtime function when the result string can't be
12702 // allocated inline. And |js::StringToLowerCase| directly returns the input
12703 // string when no characters need to be converted. That means it won't
12704 // trigger GC to clear up the free nursery space, so the next toLowerCase()
12705 // call will again fail to inline allocate the result string.
12706 Label hasUpper;
12708 Register checkInputChars = output;
12709 masm.movePtr(inputChars, checkInputChars);
12711 Register current = temp4;
12713 Label start;
12714 masm.bind(&start);
12715 masm.loadChar(Address(checkInputChars, 0), current, CharEncoding::Latin1);
12716 masm.branch8(Assembler::NotEqual,
12717 BaseIndex(toLowerCaseTable, current, TimesOne), current,
12718 &hasUpper);
12719 masm.addPtr(Imm32(sizeof(Latin1Char)), checkInputChars);
12720 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
12722 // Input is already in lower case.
12723 masm.movePtr(string, output);
12724 masm.jump(ool->rejoin());
12726 masm.bind(&hasUpper);
12728 // |length| was clobbered above, reload.
12729 masm.loadStringLength(string, length);
12731 // Call into the runtime when we can't create an inline string.
12732 masm.branch32(Assembler::Above, length,
12733 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), ool->entry());
12735 AllocateThinOrFatInlineString(masm, output, length, temp4,
12736 initialStringHeap(), ool->entry(),
12737 CharEncoding::Latin1);
12739 if (temp3 == string) {
12740 masm.push(string);
12743 Register outputChars = temp3;
12744 masm.loadInlineStringCharsForStore(output, outputChars);
12747 Register current = temp4;
12749 Label start;
12750 masm.bind(&start);
12751 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
12752 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
12753 current);
12754 masm.storeChar(current, Address(outputChars, 0), CharEncoding::Latin1);
12755 masm.addPtr(Imm32(sizeof(Latin1Char)), inputChars);
12756 masm.addPtr(Imm32(sizeof(Latin1Char)), outputChars);
12757 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
12760 if (temp3 == string) {
12761 masm.pop(string);
12765 masm.bind(ool->rejoin());
12768 void CodeGenerator::visitStringToUpperCase(LStringToUpperCase* lir) {
12769 pushArg(ToRegister(lir->string()));
12771 using Fn = JSString* (*)(JSContext*, HandleString);
12772 callVM<Fn, js::StringToUpperCase>(lir);
12775 void CodeGenerator::visitCharCodeToLowerCase(LCharCodeToLowerCase* lir) {
12776 Register code = ToRegister(lir->code());
12777 Register output = ToRegister(lir->output());
12778 Register temp = ToRegister(lir->temp0());
12780 using Fn = JSString* (*)(JSContext*, int32_t);
12781 auto* ool = oolCallVM<Fn, jit::CharCodeToLowerCase>(lir, ArgList(code),
12782 StoreRegisterTo(output));
12784 constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
12786 // OOL path if code >= NonLatin1Min.
12787 masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
12789 // Convert to lower case.
12790 masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), temp);
12791 masm.load8ZeroExtend(BaseIndex(temp, code, TimesOne), temp);
12793 // Load static string for lower case character.
12794 masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
12796 masm.bind(ool->rejoin());
12799 void CodeGenerator::visitCharCodeToUpperCase(LCharCodeToUpperCase* lir) {
12800 Register code = ToRegister(lir->code());
12801 Register output = ToRegister(lir->output());
12802 Register temp = ToRegister(lir->temp0());
12804 using Fn = JSString* (*)(JSContext*, int32_t);
12805 auto* ool = oolCallVM<Fn, jit::CharCodeToUpperCase>(lir, ArgList(code),
12806 StoreRegisterTo(output));
12808 constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
12810 // OOL path if code >= NonLatin1Min.
12811 masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
12813 // Most one element Latin-1 strings can be directly retrieved from the
12814 // static strings cache, except the following three characters:
12816 // 1. ToUpper(U+00B5) = 0+039C
12817 // 2. ToUpper(U+00FF) = 0+0178
12818 // 3. ToUpper(U+00DF) = 0+0053 0+0053
12819 masm.branch32(Assembler::Equal, code, Imm32(unicode::MICRO_SIGN),
12820 ool->entry());
12821 masm.branch32(Assembler::Equal, code,
12822 Imm32(unicode::LATIN_SMALL_LETTER_Y_WITH_DIAERESIS),
12823 ool->entry());
12824 masm.branch32(Assembler::Equal, code,
12825 Imm32(unicode::LATIN_SMALL_LETTER_SHARP_S), ool->entry());
12827 // Inline unicode::ToUpperCase (without the special case for ASCII characters)
12829 constexpr size_t shift = unicode::CharInfoShift;
12831 // code >> shift
12832 masm.move32(code, temp);
12833 masm.rshift32(Imm32(shift), temp);
12835 // index = index1[code >> shift];
12836 masm.movePtr(ImmPtr(unicode::index1), output);
12837 masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
12839 // (code & ((1 << shift) - 1)
12840 masm.move32(code, output);
12841 masm.and32(Imm32((1 << shift) - 1), output);
12843 // (index << shift) + (code & ((1 << shift) - 1))
12844 masm.lshift32(Imm32(shift), temp);
12845 masm.add32(output, temp);
12847 // index = index2[(index << shift) + (code & ((1 << shift) - 1))]
12848 masm.movePtr(ImmPtr(unicode::index2), output);
12849 masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
12851 // Compute |index * 6| through |(index * 3) * TimesTwo|.
12852 static_assert(sizeof(unicode::CharacterInfo) == 6);
12853 masm.mulBy3(temp, temp);
12855 // upperCase = js_charinfo[index].upperCase
12856 masm.movePtr(ImmPtr(unicode::js_charinfo), output);
12857 masm.load16ZeroExtend(BaseIndex(output, temp, TimesTwo,
12858 offsetof(unicode::CharacterInfo, upperCase)),
12859 temp);
12861 // uint16_t(ch) + upperCase
12862 masm.add32(code, temp);
12864 // Clear any high bits added when performing the unsigned 16-bit addition
12865 // through a signed 32-bit addition.
12866 masm.move8ZeroExtend(temp, temp);
12868 // Load static string for upper case character.
12869 masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
12871 masm.bind(ool->rejoin());
12874 void CodeGenerator::visitStringTrimStartIndex(LStringTrimStartIndex* lir) {
12875 Register string = ToRegister(lir->string());
12876 Register output = ToRegister(lir->output());
12878 auto volatileRegs = liveVolatileRegs(lir);
12879 volatileRegs.takeUnchecked(output);
12881 masm.PushRegsInMask(volatileRegs);
12883 using Fn = int32_t (*)(const JSString*);
12884 masm.setupAlignedABICall();
12885 masm.passABIArg(string);
12886 masm.callWithABI<Fn, jit::StringTrimStartIndex>();
12887 masm.storeCallInt32Result(output);
12889 masm.PopRegsInMask(volatileRegs);
12892 void CodeGenerator::visitStringTrimEndIndex(LStringTrimEndIndex* lir) {
12893 Register string = ToRegister(lir->string());
12894 Register start = ToRegister(lir->start());
12895 Register output = ToRegister(lir->output());
12897 auto volatileRegs = liveVolatileRegs(lir);
12898 volatileRegs.takeUnchecked(output);
12900 masm.PushRegsInMask(volatileRegs);
12902 using Fn = int32_t (*)(const JSString*, int32_t);
12903 masm.setupAlignedABICall();
12904 masm.passABIArg(string);
12905 masm.passABIArg(start);
12906 masm.callWithABI<Fn, jit::StringTrimEndIndex>();
12907 masm.storeCallInt32Result(output);
12909 masm.PopRegsInMask(volatileRegs);
12912 void CodeGenerator::visitStringSplit(LStringSplit* lir) {
12913 pushArg(Imm32(INT32_MAX));
12914 pushArg(ToRegister(lir->separator()));
12915 pushArg(ToRegister(lir->string()));
12917 using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
12918 callVM<Fn, js::StringSplitString>(lir);
12921 void CodeGenerator::visitInitializedLength(LInitializedLength* lir) {
12922 Address initLength(ToRegister(lir->elements()),
12923 ObjectElements::offsetOfInitializedLength());
12924 masm.load32(initLength, ToRegister(lir->output()));
12927 void CodeGenerator::visitSetInitializedLength(LSetInitializedLength* lir) {
12928 Address initLength(ToRegister(lir->elements()),
12929 ObjectElements::offsetOfInitializedLength());
12930 SetLengthFromIndex(masm, lir->index(), initLength);
12933 void CodeGenerator::visitNotBI(LNotBI* lir) {
12934 Register input = ToRegister(lir->input());
12935 Register output = ToRegister(lir->output());
12937 masm.cmp32Set(Assembler::Equal, Address(input, BigInt::offsetOfLength()),
12938 Imm32(0), output);
12941 void CodeGenerator::visitNotO(LNotO* lir) {
12942 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
12943 addOutOfLineCode(ool, lir->mir());
12945 Label* ifEmulatesUndefined = ool->label1();
12946 Label* ifDoesntEmulateUndefined = ool->label2();
12948 Register objreg = ToRegister(lir->input());
12949 Register output = ToRegister(lir->output());
12950 branchTestObjectEmulatesUndefined(objreg, ifEmulatesUndefined,
12951 ifDoesntEmulateUndefined, output, ool);
12952 // fall through
12954 Label join;
12956 masm.move32(Imm32(0), output);
12957 masm.jump(&join);
12959 masm.bind(ifEmulatesUndefined);
12960 masm.move32(Imm32(1), output);
12962 masm.bind(&join);
12965 void CodeGenerator::visitNotV(LNotV* lir) {
12966 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
12967 addOutOfLineCode(ool, lir->mir());
12969 Label* ifTruthy = ool->label1();
12970 Label* ifFalsy = ool->label2();
12972 ValueOperand input = ToValue(lir, LNotV::InputIndex);
12973 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
12974 FloatRegister floatTemp = ToFloatRegister(lir->temp0());
12975 Register output = ToRegister(lir->output());
12976 const TypeDataList& observedTypes = lir->mir()->observedTypes();
12978 testValueTruthy(input, tempToUnbox, output, floatTemp, observedTypes,
12979 ifTruthy, ifFalsy, ool);
12981 Label join;
12983 // Note that the testValueTruthy call above may choose to fall through
12984 // to ifTruthy instead of branching there.
12985 masm.bind(ifTruthy);
12986 masm.move32(Imm32(0), output);
12987 masm.jump(&join);
12989 masm.bind(ifFalsy);
12990 masm.move32(Imm32(1), output);
12992 // both branches meet here.
12993 masm.bind(&join);
12996 void CodeGenerator::visitBoundsCheck(LBoundsCheck* lir) {
12997 const LAllocation* index = lir->index();
12998 const LAllocation* length = lir->length();
12999 LSnapshot* snapshot = lir->snapshot();
13001 MIRType type = lir->mir()->type();
13003 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
13004 if (type == MIRType::Int32) {
13005 bailoutCmp32(cond, lhs, rhs, snapshot);
13006 } else {
13007 MOZ_ASSERT(type == MIRType::IntPtr);
13008 bailoutCmpPtr(cond, lhs, rhs, snapshot);
13012 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
13013 int32_t rhs) {
13014 if (type == MIRType::Int32) {
13015 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
13016 } else {
13017 MOZ_ASSERT(type == MIRType::IntPtr);
13018 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
13022 if (index->isConstant()) {
13023 // Use uint32 so that the comparison is unsigned.
13024 uint32_t idx = ToInt32(index);
13025 if (length->isConstant()) {
13026 uint32_t len = ToInt32(lir->length());
13027 if (idx < len) {
13028 return;
13030 bailout(snapshot);
13031 return;
13034 if (length->isRegister()) {
13035 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), idx);
13036 } else {
13037 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), idx);
13039 return;
13042 Register indexReg = ToRegister(index);
13043 if (length->isConstant()) {
13044 bailoutCmpConstant(Assembler::AboveOrEqual, indexReg, ToInt32(length));
13045 } else if (length->isRegister()) {
13046 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), indexReg);
13047 } else {
13048 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), indexReg);
13052 void CodeGenerator::visitBoundsCheckRange(LBoundsCheckRange* lir) {
13053 int32_t min = lir->mir()->minimum();
13054 int32_t max = lir->mir()->maximum();
13055 MOZ_ASSERT(max >= min);
13057 LSnapshot* snapshot = lir->snapshot();
13058 MIRType type = lir->mir()->type();
13060 const LAllocation* length = lir->length();
13061 Register temp = ToRegister(lir->getTemp(0));
13063 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
13064 if (type == MIRType::Int32) {
13065 bailoutCmp32(cond, lhs, rhs, snapshot);
13066 } else {
13067 MOZ_ASSERT(type == MIRType::IntPtr);
13068 bailoutCmpPtr(cond, lhs, rhs, snapshot);
13072 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
13073 int32_t rhs) {
13074 if (type == MIRType::Int32) {
13075 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
13076 } else {
13077 MOZ_ASSERT(type == MIRType::IntPtr);
13078 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
13082 if (lir->index()->isConstant()) {
13083 int32_t nmin, nmax;
13084 int32_t index = ToInt32(lir->index());
13085 if (SafeAdd(index, min, &nmin) && SafeAdd(index, max, &nmax) && nmin >= 0) {
13086 if (length->isRegister()) {
13087 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), nmax);
13088 } else {
13089 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), nmax);
13091 return;
13093 masm.mov(ImmWord(index), temp);
13094 } else {
13095 masm.mov(ToRegister(lir->index()), temp);
13098 // If the minimum and maximum differ then do an underflow check first.
13099 // If the two are the same then doing an unsigned comparison on the
13100 // length will also catch a negative index.
13101 if (min != max) {
13102 if (min != 0) {
13103 Label bail;
13104 if (type == MIRType::Int32) {
13105 masm.branchAdd32(Assembler::Overflow, Imm32(min), temp, &bail);
13106 } else {
13107 masm.branchAddPtr(Assembler::Overflow, Imm32(min), temp, &bail);
13109 bailoutFrom(&bail, snapshot);
13112 bailoutCmpConstant(Assembler::LessThan, temp, 0);
13114 if (min != 0) {
13115 int32_t diff;
13116 if (SafeSub(max, min, &diff)) {
13117 max = diff;
13118 } else {
13119 if (type == MIRType::Int32) {
13120 masm.sub32(Imm32(min), temp);
13121 } else {
13122 masm.subPtr(Imm32(min), temp);
13128 // Compute the maximum possible index. No overflow check is needed when
13129 // max > 0. We can only wraparound to a negative number, which will test as
13130 // larger than all nonnegative numbers in the unsigned comparison, and the
13131 // length is required to be nonnegative (else testing a negative length
13132 // would succeed on any nonnegative index).
13133 if (max != 0) {
13134 if (max < 0) {
13135 Label bail;
13136 if (type == MIRType::Int32) {
13137 masm.branchAdd32(Assembler::Overflow, Imm32(max), temp, &bail);
13138 } else {
13139 masm.branchAddPtr(Assembler::Overflow, Imm32(max), temp, &bail);
13141 bailoutFrom(&bail, snapshot);
13142 } else {
13143 if (type == MIRType::Int32) {
13144 masm.add32(Imm32(max), temp);
13145 } else {
13146 masm.addPtr(Imm32(max), temp);
13151 if (length->isRegister()) {
13152 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), temp);
13153 } else {
13154 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), temp);
13158 void CodeGenerator::visitBoundsCheckLower(LBoundsCheckLower* lir) {
13159 int32_t min = lir->mir()->minimum();
13160 bailoutCmp32(Assembler::LessThan, ToRegister(lir->index()), Imm32(min),
13161 lir->snapshot());
13164 void CodeGenerator::visitSpectreMaskIndex(LSpectreMaskIndex* lir) {
13165 MOZ_ASSERT(JitOptions.spectreIndexMasking);
13167 const LAllocation* length = lir->length();
13168 Register index = ToRegister(lir->index());
13169 Register output = ToRegister(lir->output());
13171 if (lir->mir()->type() == MIRType::Int32) {
13172 if (length->isRegister()) {
13173 masm.spectreMaskIndex32(index, ToRegister(length), output);
13174 } else {
13175 masm.spectreMaskIndex32(index, ToAddress(length), output);
13177 } else {
13178 MOZ_ASSERT(lir->mir()->type() == MIRType::IntPtr);
13179 if (length->isRegister()) {
13180 masm.spectreMaskIndexPtr(index, ToRegister(length), output);
13181 } else {
13182 masm.spectreMaskIndexPtr(index, ToAddress(length), output);
13187 class OutOfLineStoreElementHole : public OutOfLineCodeBase<CodeGenerator> {
13188 LInstruction* ins_;
13190 public:
13191 explicit OutOfLineStoreElementHole(LInstruction* ins) : ins_(ins) {
13192 MOZ_ASSERT(ins->isStoreElementHoleV() || ins->isStoreElementHoleT());
13195 void accept(CodeGenerator* codegen) override {
13196 codegen->visitOutOfLineStoreElementHole(this);
13199 MStoreElementHole* mir() const {
13200 return ins_->isStoreElementHoleV() ? ins_->toStoreElementHoleV()->mir()
13201 : ins_->toStoreElementHoleT()->mir();
13203 LInstruction* ins() const { return ins_; }
13206 void CodeGenerator::emitStoreHoleCheck(Register elements,
13207 const LAllocation* index,
13208 LSnapshot* snapshot) {
13209 Label bail;
13210 if (index->isConstant()) {
13211 Address dest(elements, ToInt32(index) * sizeof(js::Value));
13212 masm.branchTestMagic(Assembler::Equal, dest, &bail);
13213 } else {
13214 BaseObjectElementIndex dest(elements, ToRegister(index));
13215 masm.branchTestMagic(Assembler::Equal, dest, &bail);
13217 bailoutFrom(&bail, snapshot);
13220 void CodeGenerator::emitStoreElementTyped(const LAllocation* value,
13221 MIRType valueType, Register elements,
13222 const LAllocation* index) {
13223 MOZ_ASSERT(valueType != MIRType::MagicHole);
13224 ConstantOrRegister v = ToConstantOrRegister(value, valueType);
13225 if (index->isConstant()) {
13226 Address dest(elements, ToInt32(index) * sizeof(js::Value));
13227 masm.storeUnboxedValue(v, valueType, dest);
13228 } else {
13229 BaseObjectElementIndex dest(elements, ToRegister(index));
13230 masm.storeUnboxedValue(v, valueType, dest);
13234 void CodeGenerator::visitStoreElementT(LStoreElementT* store) {
13235 Register elements = ToRegister(store->elements());
13236 const LAllocation* index = store->index();
13238 if (store->mir()->needsBarrier()) {
13239 emitPreBarrier(elements, index);
13242 if (store->mir()->needsHoleCheck()) {
13243 emitStoreHoleCheck(elements, index, store->snapshot());
13246 emitStoreElementTyped(store->value(), store->mir()->value()->type(), elements,
13247 index);
13250 void CodeGenerator::visitStoreElementV(LStoreElementV* lir) {
13251 const ValueOperand value = ToValue(lir, LStoreElementV::Value);
13252 Register elements = ToRegister(lir->elements());
13253 const LAllocation* index = lir->index();
13255 if (lir->mir()->needsBarrier()) {
13256 emitPreBarrier(elements, index);
13259 if (lir->mir()->needsHoleCheck()) {
13260 emitStoreHoleCheck(elements, index, lir->snapshot());
13263 if (lir->index()->isConstant()) {
13264 Address dest(elements, ToInt32(lir->index()) * sizeof(js::Value));
13265 masm.storeValue(value, dest);
13266 } else {
13267 BaseObjectElementIndex dest(elements, ToRegister(lir->index()));
13268 masm.storeValue(value, dest);
13272 void CodeGenerator::visitStoreHoleValueElement(LStoreHoleValueElement* lir) {
13273 Register elements = ToRegister(lir->elements());
13274 Register index = ToRegister(lir->index());
13276 Address elementsFlags(elements, ObjectElements::offsetOfFlags());
13277 masm.or32(Imm32(ObjectElements::NON_PACKED), elementsFlags);
13279 BaseObjectElementIndex element(elements, index);
13280 masm.storeValue(MagicValue(JS_ELEMENTS_HOLE), element);
13283 void CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT* lir) {
13284 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
13285 addOutOfLineCode(ool, lir->mir());
13287 Register obj = ToRegister(lir->object());
13288 Register elements = ToRegister(lir->elements());
13289 Register index = ToRegister(lir->index());
13290 Register temp = ToRegister(lir->temp0());
13292 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
13293 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
13295 emitPreBarrier(elements, lir->index());
13297 masm.bind(ool->rejoin());
13298 emitStoreElementTyped(lir->value(), lir->mir()->value()->type(), elements,
13299 lir->index());
13301 if (ValueNeedsPostBarrier(lir->mir()->value())) {
13302 LiveRegisterSet regs = liveVolatileRegs(lir);
13303 ConstantOrRegister val =
13304 ToConstantOrRegister(lir->value(), lir->mir()->value()->type());
13305 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp, val);
13309 void CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV* lir) {
13310 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
13311 addOutOfLineCode(ool, lir->mir());
13313 Register obj = ToRegister(lir->object());
13314 Register elements = ToRegister(lir->elements());
13315 Register index = ToRegister(lir->index());
13316 const ValueOperand value = ToValue(lir, LStoreElementHoleV::ValueIndex);
13317 Register temp = ToRegister(lir->temp0());
13319 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
13320 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
13322 emitPreBarrier(elements, lir->index());
13324 masm.bind(ool->rejoin());
13325 masm.storeValue(value, BaseObjectElementIndex(elements, index));
13327 if (ValueNeedsPostBarrier(lir->mir()->value())) {
13328 LiveRegisterSet regs = liveVolatileRegs(lir);
13329 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp,
13330 ConstantOrRegister(value));
13334 void CodeGenerator::visitOutOfLineStoreElementHole(
13335 OutOfLineStoreElementHole* ool) {
13336 Register object, elements, index;
13337 LInstruction* ins = ool->ins();
13338 mozilla::Maybe<ConstantOrRegister> value;
13339 Register temp;
13341 if (ins->isStoreElementHoleV()) {
13342 LStoreElementHoleV* store = ins->toStoreElementHoleV();
13343 object = ToRegister(store->object());
13344 elements = ToRegister(store->elements());
13345 index = ToRegister(store->index());
13346 value.emplace(
13347 TypedOrValueRegister(ToValue(store, LStoreElementHoleV::ValueIndex)));
13348 temp = ToRegister(store->temp0());
13349 } else {
13350 LStoreElementHoleT* store = ins->toStoreElementHoleT();
13351 object = ToRegister(store->object());
13352 elements = ToRegister(store->elements());
13353 index = ToRegister(store->index());
13354 if (store->value()->isConstant()) {
13355 value.emplace(
13356 ConstantOrRegister(store->value()->toConstant()->toJSValue()));
13357 } else {
13358 MIRType valueType = store->mir()->value()->type();
13359 value.emplace(
13360 TypedOrValueRegister(valueType, ToAnyRegister(store->value())));
13362 temp = ToRegister(store->temp0());
13365 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
13367 // We're out-of-bounds. We only handle the index == initlength case.
13368 // If index > initializedLength, bail out. Note that this relies on the
13369 // condition flags sticking from the incoming branch.
13370 // Also note: this branch does not need Spectre mitigations, doing that for
13371 // the capacity check below is sufficient.
13372 Label allocElement, addNewElement;
13373 #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
13374 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
13375 // Had to reimplement for MIPS because there are no flags.
13376 bailoutCmp32(Assembler::NotEqual, initLength, index, ins->snapshot());
13377 #else
13378 bailoutIf(Assembler::NotEqual, ins->snapshot());
13379 #endif
13381 // If index < capacity, we can add a dense element inline. If not, we need
13382 // to allocate more elements first.
13383 masm.spectreBoundsCheck32(
13384 index, Address(elements, ObjectElements::offsetOfCapacity()), temp,
13385 &allocElement);
13386 masm.jump(&addNewElement);
13388 masm.bind(&allocElement);
13390 // Save all live volatile registers, except |temp|.
13391 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
13392 liveRegs.takeUnchecked(temp);
13393 masm.PushRegsInMask(liveRegs);
13395 masm.setupAlignedABICall();
13396 masm.loadJSContext(temp);
13397 masm.passABIArg(temp);
13398 masm.passABIArg(object);
13400 using Fn = bool (*)(JSContext*, NativeObject*);
13401 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
13402 masm.storeCallPointerResult(temp);
13404 masm.PopRegsInMask(liveRegs);
13405 bailoutIfFalseBool(temp, ins->snapshot());
13407 // Load the reallocated elements pointer.
13408 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), elements);
13410 masm.bind(&addNewElement);
13412 // Increment initLength
13413 masm.add32(Imm32(1), initLength);
13415 // If length is now <= index, increment length too.
13416 Label skipIncrementLength;
13417 Address length(elements, ObjectElements::offsetOfLength());
13418 masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
13419 masm.add32(Imm32(1), length);
13420 masm.bind(&skipIncrementLength);
13422 // Jump to the inline path where we will store the value.
13423 // We rejoin after the prebarrier, because the memory is uninitialized.
13424 masm.jump(ool->rejoin());
13427 void CodeGenerator::visitArrayPopShift(LArrayPopShift* lir) {
13428 Register obj = ToRegister(lir->object());
13429 Register temp1 = ToRegister(lir->temp0());
13430 Register temp2 = ToRegister(lir->temp1());
13431 ValueOperand out = ToOutValue(lir);
13433 Label bail;
13434 if (lir->mir()->mode() == MArrayPopShift::Pop) {
13435 masm.packedArrayPop(obj, out, temp1, temp2, &bail);
13436 } else {
13437 MOZ_ASSERT(lir->mir()->mode() == MArrayPopShift::Shift);
13438 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
13439 masm.packedArrayShift(obj, out, temp1, temp2, volatileRegs, &bail);
13441 bailoutFrom(&bail, lir->snapshot());
13444 class OutOfLineArrayPush : public OutOfLineCodeBase<CodeGenerator> {
13445 LArrayPush* ins_;
13447 public:
13448 explicit OutOfLineArrayPush(LArrayPush* ins) : ins_(ins) {}
13450 void accept(CodeGenerator* codegen) override {
13451 codegen->visitOutOfLineArrayPush(this);
13454 LArrayPush* ins() const { return ins_; }
13457 void CodeGenerator::visitArrayPush(LArrayPush* lir) {
13458 Register obj = ToRegister(lir->object());
13459 Register elementsTemp = ToRegister(lir->temp0());
13460 Register length = ToRegister(lir->output());
13461 ValueOperand value = ToValue(lir, LArrayPush::ValueIndex);
13462 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
13464 auto* ool = new (alloc()) OutOfLineArrayPush(lir);
13465 addOutOfLineCode(ool, lir->mir());
13467 // Load obj->elements in elementsTemp.
13468 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), elementsTemp);
13470 Address initLengthAddr(elementsTemp,
13471 ObjectElements::offsetOfInitializedLength());
13472 Address lengthAddr(elementsTemp, ObjectElements::offsetOfLength());
13473 Address capacityAddr(elementsTemp, ObjectElements::offsetOfCapacity());
13475 // Bail out if length != initLength.
13476 masm.load32(lengthAddr, length);
13477 bailoutCmp32(Assembler::NotEqual, initLengthAddr, length, lir->snapshot());
13479 // If length < capacity, we can add a dense element inline. If not, we
13480 // need to allocate more elements.
13481 masm.spectreBoundsCheck32(length, capacityAddr, spectreTemp, ool->entry());
13482 masm.bind(ool->rejoin());
13484 // Store the value.
13485 masm.storeValue(value, BaseObjectElementIndex(elementsTemp, length));
13487 // Update length and initialized length.
13488 masm.add32(Imm32(1), length);
13489 masm.store32(length, Address(elementsTemp, ObjectElements::offsetOfLength()));
13490 masm.store32(length, Address(elementsTemp,
13491 ObjectElements::offsetOfInitializedLength()));
13493 if (ValueNeedsPostBarrier(lir->mir()->value())) {
13494 LiveRegisterSet regs = liveVolatileRegs(lir);
13495 regs.addUnchecked(length);
13496 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->output()->output(),
13497 elementsTemp, ConstantOrRegister(value),
13498 /* indexDiff = */ -1);
13502 void CodeGenerator::visitOutOfLineArrayPush(OutOfLineArrayPush* ool) {
13503 LArrayPush* ins = ool->ins();
13505 Register object = ToRegister(ins->object());
13506 Register temp = ToRegister(ins->temp0());
13508 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
13509 liveRegs.takeUnchecked(temp);
13510 liveRegs.addUnchecked(ToRegister(ins->output()));
13511 liveRegs.addUnchecked(ToValue(ins, LArrayPush::ValueIndex));
13513 masm.PushRegsInMask(liveRegs);
13515 masm.setupAlignedABICall();
13516 masm.loadJSContext(temp);
13517 masm.passABIArg(temp);
13518 masm.passABIArg(object);
13520 using Fn = bool (*)(JSContext*, NativeObject* obj);
13521 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
13522 masm.storeCallPointerResult(temp);
13524 masm.PopRegsInMask(liveRegs);
13525 bailoutIfFalseBool(temp, ins->snapshot());
13527 // Load the reallocated elements pointer.
13528 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
13530 masm.jump(ool->rejoin());
13533 void CodeGenerator::visitArraySlice(LArraySlice* lir) {
13534 Register object = ToRegister(lir->object());
13535 Register begin = ToRegister(lir->begin());
13536 Register end = ToRegister(lir->end());
13537 Register temp0 = ToRegister(lir->temp0());
13538 Register temp1 = ToRegister(lir->temp1());
13540 Label call, fail;
13542 Label bail;
13543 masm.branchArrayIsNotPacked(object, temp0, temp1, &bail);
13544 bailoutFrom(&bail, lir->snapshot());
13546 // Try to allocate an object.
13547 TemplateObject templateObject(lir->mir()->templateObj());
13548 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
13549 &fail);
13551 masm.jump(&call);
13553 masm.bind(&fail);
13554 masm.movePtr(ImmPtr(nullptr), temp0);
13556 masm.bind(&call);
13558 pushArg(temp0);
13559 pushArg(end);
13560 pushArg(begin);
13561 pushArg(object);
13563 using Fn =
13564 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
13565 callVM<Fn, ArraySliceDense>(lir);
13568 void CodeGenerator::visitArgumentsSlice(LArgumentsSlice* lir) {
13569 Register object = ToRegister(lir->object());
13570 Register begin = ToRegister(lir->begin());
13571 Register end = ToRegister(lir->end());
13572 Register temp0 = ToRegister(lir->temp0());
13573 Register temp1 = ToRegister(lir->temp1());
13575 Label call, fail;
13577 // Try to allocate an object.
13578 TemplateObject templateObject(lir->mir()->templateObj());
13579 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
13580 &fail);
13582 masm.jump(&call);
13584 masm.bind(&fail);
13585 masm.movePtr(ImmPtr(nullptr), temp0);
13587 masm.bind(&call);
13589 pushArg(temp0);
13590 pushArg(end);
13591 pushArg(begin);
13592 pushArg(object);
13594 using Fn =
13595 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
13596 callVM<Fn, ArgumentsSliceDense>(lir);
13599 #ifdef DEBUG
13600 void CodeGenerator::emitAssertArgumentsSliceBounds(const RegisterOrInt32& begin,
13601 const RegisterOrInt32& count,
13602 Register numActualArgs) {
13603 // |begin| must be positive or zero.
13604 if (begin.is<Register>()) {
13605 Label beginOk;
13606 masm.branch32(Assembler::GreaterThanOrEqual, begin.as<Register>(), Imm32(0),
13607 &beginOk);
13608 masm.assumeUnreachable("begin < 0");
13609 masm.bind(&beginOk);
13610 } else {
13611 MOZ_ASSERT(begin.as<int32_t>() >= 0);
13614 // |count| must be positive or zero.
13615 if (count.is<Register>()) {
13616 Label countOk;
13617 masm.branch32(Assembler::GreaterThanOrEqual, count.as<Register>(), Imm32(0),
13618 &countOk);
13619 masm.assumeUnreachable("count < 0");
13620 masm.bind(&countOk);
13621 } else {
13622 MOZ_ASSERT(count.as<int32_t>() >= 0);
13625 // |begin| must be less-or-equal to |numActualArgs|.
13626 Label argsBeginOk;
13627 if (begin.is<Register>()) {
13628 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
13629 &argsBeginOk);
13630 } else {
13631 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
13632 Imm32(begin.as<int32_t>()), &argsBeginOk);
13634 masm.assumeUnreachable("begin <= numActualArgs");
13635 masm.bind(&argsBeginOk);
13637 // |count| must be less-or-equal to |numActualArgs|.
13638 Label argsCountOk;
13639 if (count.is<Register>()) {
13640 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, count.as<Register>(),
13641 &argsCountOk);
13642 } else {
13643 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
13644 Imm32(count.as<int32_t>()), &argsCountOk);
13646 masm.assumeUnreachable("count <= numActualArgs");
13647 masm.bind(&argsCountOk);
13649 // |begin| and |count| must be preserved, but |numActualArgs| can be changed.
13651 // Pre-condition: |count| <= |numActualArgs|
13652 // Condition to test: |begin + count| <= |numActualArgs|
13653 // Transform to: |begin| <= |numActualArgs - count|
13654 if (count.is<Register>()) {
13655 masm.subPtr(count.as<Register>(), numActualArgs);
13656 } else {
13657 masm.subPtr(Imm32(count.as<int32_t>()), numActualArgs);
13660 // |begin + count| must be less-or-equal to |numActualArgs|.
13661 Label argsBeginCountOk;
13662 if (begin.is<Register>()) {
13663 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
13664 &argsBeginCountOk);
13665 } else {
13666 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
13667 Imm32(begin.as<int32_t>()), &argsBeginCountOk);
13669 masm.assumeUnreachable("begin + count <= numActualArgs");
13670 masm.bind(&argsBeginCountOk);
13672 #endif
13674 template <class ArgumentsSlice>
13675 void CodeGenerator::emitNewArray(ArgumentsSlice* lir,
13676 const RegisterOrInt32& count, Register output,
13677 Register temp) {
13678 using Fn = ArrayObject* (*)(JSContext*, int32_t);
13679 auto* ool = count.match(
13680 [&](Register count) {
13681 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
13682 lir, ArgList(count), StoreRegisterTo(output));
13684 [&](int32_t count) {
13685 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
13686 lir, ArgList(Imm32(count)), StoreRegisterTo(output));
13689 TemplateObject templateObject(lir->mir()->templateObj());
13690 MOZ_ASSERT(templateObject.isArrayObject());
13692 auto templateNativeObj = templateObject.asTemplateNativeObject();
13693 MOZ_ASSERT(templateNativeObj.getArrayLength() == 0);
13694 MOZ_ASSERT(templateNativeObj.getDenseInitializedLength() == 0);
13695 MOZ_ASSERT(!templateNativeObj.hasDynamicElements());
13697 // Check array capacity. Call into the VM if the template object's capacity
13698 // is too small.
13699 bool tryAllocate = count.match(
13700 [&](Register count) {
13701 masm.branch32(Assembler::Above, count,
13702 Imm32(templateNativeObj.getDenseCapacity()),
13703 ool->entry());
13704 return true;
13706 [&](int32_t count) {
13707 MOZ_ASSERT(count >= 0);
13708 if (uint32_t(count) > templateNativeObj.getDenseCapacity()) {
13709 masm.jump(ool->entry());
13710 return false;
13712 return true;
13715 if (tryAllocate) {
13716 // Try to allocate an object.
13717 masm.createGCObject(output, temp, templateObject, lir->mir()->initialHeap(),
13718 ool->entry());
13720 auto setInitializedLengthAndLength = [&](auto count) {
13721 const int elementsOffset = NativeObject::offsetOfFixedElements();
13723 // Update initialized length.
13724 Address initLength(
13725 output, elementsOffset + ObjectElements::offsetOfInitializedLength());
13726 masm.store32(count, initLength);
13728 // Update length.
13729 Address length(output, elementsOffset + ObjectElements::offsetOfLength());
13730 masm.store32(count, length);
13733 // The array object was successfully created. Set the length and initialized
13734 // length and then proceed to fill the elements.
13735 count.match([&](Register count) { setInitializedLengthAndLength(count); },
13736 [&](int32_t count) {
13737 if (count > 0) {
13738 setInitializedLengthAndLength(Imm32(count));
13743 masm.bind(ool->rejoin());
13746 void CodeGenerator::visitFrameArgumentsSlice(LFrameArgumentsSlice* lir) {
13747 Register begin = ToRegister(lir->begin());
13748 Register count = ToRegister(lir->count());
13749 Register temp = ToRegister(lir->temp0());
13750 Register output = ToRegister(lir->output());
13752 #ifdef DEBUG
13753 masm.loadNumActualArgs(FramePointer, temp);
13754 emitAssertArgumentsSliceBounds(RegisterOrInt32(begin), RegisterOrInt32(count),
13755 temp);
13756 #endif
13758 emitNewArray(lir, RegisterOrInt32(count), output, temp);
13760 Label done;
13761 masm.branch32(Assembler::Equal, count, Imm32(0), &done);
13763 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
13764 allRegs.take(begin);
13765 allRegs.take(count);
13766 allRegs.take(temp);
13767 allRegs.take(output);
13769 ValueOperand value = allRegs.takeAnyValue();
13771 LiveRegisterSet liveRegs;
13772 liveRegs.add(output);
13773 liveRegs.add(begin);
13774 liveRegs.add(value);
13776 masm.PushRegsInMask(liveRegs);
13778 // Initialize all elements.
13780 Register elements = output;
13781 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
13783 Register argIndex = begin;
13785 Register index = temp;
13786 masm.move32(Imm32(0), index);
13788 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
13789 BaseValueIndex argPtr(FramePointer, argIndex, argvOffset);
13791 Label loop;
13792 masm.bind(&loop);
13794 masm.loadValue(argPtr, value);
13796 // We don't need a pre-barrier, because the element at |index| is guaranteed
13797 // to be a non-GC thing (either uninitialized memory or the magic hole
13798 // value).
13799 masm.storeValue(value, BaseObjectElementIndex(elements, index));
13801 masm.add32(Imm32(1), index);
13802 masm.add32(Imm32(1), argIndex);
13804 masm.branch32(Assembler::LessThan, index, count, &loop);
13806 masm.PopRegsInMask(liveRegs);
13808 // Emit a post-write barrier if |output| is tenured.
13810 // We expect that |output| is nursery allocated, so it isn't worth the
13811 // trouble to check if no frame argument is a nursery thing, which would
13812 // allow to omit the post-write barrier.
13813 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
13815 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
13816 volatileRegs.takeUnchecked(temp);
13817 if (output.volatile_()) {
13818 volatileRegs.addUnchecked(output);
13821 masm.PushRegsInMask(volatileRegs);
13822 emitPostWriteBarrier(output);
13823 masm.PopRegsInMask(volatileRegs);
13825 masm.bind(&done);
13828 CodeGenerator::RegisterOrInt32 CodeGenerator::ToRegisterOrInt32(
13829 const LAllocation* allocation) {
13830 if (allocation->isConstant()) {
13831 return RegisterOrInt32(allocation->toConstant()->toInt32());
13833 return RegisterOrInt32(ToRegister(allocation));
13836 void CodeGenerator::visitInlineArgumentsSlice(LInlineArgumentsSlice* lir) {
13837 RegisterOrInt32 begin = ToRegisterOrInt32(lir->begin());
13838 RegisterOrInt32 count = ToRegisterOrInt32(lir->count());
13839 Register temp = ToRegister(lir->temp());
13840 Register output = ToRegister(lir->output());
13842 uint32_t numActuals = lir->mir()->numActuals();
13844 #ifdef DEBUG
13845 masm.move32(Imm32(numActuals), temp);
13847 emitAssertArgumentsSliceBounds(begin, count, temp);
13848 #endif
13850 emitNewArray(lir, count, output, temp);
13852 // We're done if there are no actual arguments.
13853 if (numActuals == 0) {
13854 return;
13857 // Check if any arguments have to be copied.
13858 Label done;
13859 if (count.is<Register>()) {
13860 masm.branch32(Assembler::Equal, count.as<Register>(), Imm32(0), &done);
13861 } else if (count.as<int32_t>() == 0) {
13862 return;
13865 auto getArg = [&](uint32_t i) {
13866 return toConstantOrRegister(lir, LInlineArgumentsSlice::ArgIndex(i),
13867 lir->mir()->getArg(i)->type());
13870 auto storeArg = [&](uint32_t i, auto dest) {
13871 // We don't need a pre-barrier because the element at |index| is guaranteed
13872 // to be a non-GC thing (either uninitialized memory or the magic hole
13873 // value).
13874 masm.storeConstantOrRegister(getArg(i), dest);
13877 // Initialize all elements.
13878 if (numActuals == 1) {
13879 // There's exactly one argument. We've checked that |count| is non-zero,
13880 // which implies that |begin| must be zero.
13881 MOZ_ASSERT_IF(begin.is<int32_t>(), begin.as<int32_t>() == 0);
13883 Register elements = temp;
13884 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
13886 storeArg(0, Address(elements, 0));
13887 } else if (begin.is<Register>()) {
13888 // There is more than one argument and |begin| isn't a compile-time
13889 // constant. Iterate through 0..numActuals to search for |begin| and then
13890 // start copying |count| arguments from that index.
13892 LiveGeneralRegisterSet liveRegs;
13893 liveRegs.add(output);
13894 liveRegs.add(begin.as<Register>());
13896 masm.PushRegsInMask(liveRegs);
13898 Register elements = output;
13899 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
13901 Register argIndex = begin.as<Register>();
13903 Register index = temp;
13904 masm.move32(Imm32(0), index);
13906 Label doneLoop;
13907 for (uint32_t i = 0; i < numActuals; ++i) {
13908 Label next;
13909 masm.branch32(Assembler::NotEqual, argIndex, Imm32(i), &next);
13911 storeArg(i, BaseObjectElementIndex(elements, index));
13913 masm.add32(Imm32(1), index);
13914 masm.add32(Imm32(1), argIndex);
13916 if (count.is<Register>()) {
13917 masm.branch32(Assembler::GreaterThanOrEqual, index,
13918 count.as<Register>(), &doneLoop);
13919 } else {
13920 masm.branch32(Assembler::GreaterThanOrEqual, index,
13921 Imm32(count.as<int32_t>()), &doneLoop);
13924 masm.bind(&next);
13926 masm.bind(&doneLoop);
13928 masm.PopRegsInMask(liveRegs);
13929 } else {
13930 // There is more than one argument and |begin| is a compile-time constant.
13932 Register elements = temp;
13933 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
13935 int32_t argIndex = begin.as<int32_t>();
13937 int32_t index = 0;
13939 Label doneLoop;
13940 for (uint32_t i = argIndex; i < numActuals; ++i) {
13941 storeArg(i, Address(elements, index * sizeof(Value)));
13943 index += 1;
13945 if (count.is<Register>()) {
13946 masm.branch32(Assembler::LessThanOrEqual, count.as<Register>(),
13947 Imm32(index), &doneLoop);
13948 } else {
13949 if (index >= count.as<int32_t>()) {
13950 break;
13954 masm.bind(&doneLoop);
13957 // Determine if we have to emit post-write barrier.
13959 // If either |begin| or |count| is a constant, use their value directly.
13960 // Otherwise assume we copy all inline arguments from 0..numActuals.
13961 bool postWriteBarrier = false;
13962 uint32_t actualBegin = begin.match([](Register) { return 0; },
13963 [](int32_t value) { return value; });
13964 uint32_t actualCount =
13965 count.match([=](Register) { return numActuals; },
13966 [](int32_t value) -> uint32_t { return value; });
13967 for (uint32_t i = 0; i < actualCount; ++i) {
13968 ConstantOrRegister arg = getArg(actualBegin + i);
13969 if (arg.constant()) {
13970 Value v = arg.value();
13971 if (v.isGCThing() && IsInsideNursery(v.toGCThing())) {
13972 postWriteBarrier = true;
13974 } else {
13975 MIRType type = arg.reg().type();
13976 if (type == MIRType::Value || NeedsPostBarrier(type)) {
13977 postWriteBarrier = true;
13982 // Emit a post-write barrier if |output| is tenured and we couldn't
13983 // determine at compile-time that no barrier is needed.
13984 if (postWriteBarrier) {
13985 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
13987 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
13988 volatileRegs.takeUnchecked(temp);
13989 if (output.volatile_()) {
13990 volatileRegs.addUnchecked(output);
13993 masm.PushRegsInMask(volatileRegs);
13994 emitPostWriteBarrier(output);
13995 masm.PopRegsInMask(volatileRegs);
13998 masm.bind(&done);
14001 void CodeGenerator::visitNormalizeSliceTerm(LNormalizeSliceTerm* lir) {
14002 Register value = ToRegister(lir->value());
14003 Register length = ToRegister(lir->length());
14004 Register output = ToRegister(lir->output());
14006 masm.move32(value, output);
14008 Label positive;
14009 masm.branch32(Assembler::GreaterThanOrEqual, value, Imm32(0), &positive);
14011 Label done;
14012 masm.add32(length, output);
14013 masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(0), &done);
14014 masm.move32(Imm32(0), output);
14015 masm.jump(&done);
14017 masm.bind(&positive);
14018 masm.cmp32Move32(Assembler::LessThan, length, value, length, output);
14020 masm.bind(&done);
14023 void CodeGenerator::visitArrayJoin(LArrayJoin* lir) {
14024 Label skipCall;
14026 Register output = ToRegister(lir->output());
14027 Register sep = ToRegister(lir->separator());
14028 Register array = ToRegister(lir->array());
14029 Register temp = ToRegister(lir->temp0());
14031 // Fast path for simple length <= 1 cases.
14033 masm.loadPtr(Address(array, NativeObject::offsetOfElements()), temp);
14034 Address length(temp, ObjectElements::offsetOfLength());
14035 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
14037 // Check for length == 0
14038 Label notEmpty;
14039 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmpty);
14040 const JSAtomState& names = gen->runtime->names();
14041 masm.movePtr(ImmGCPtr(names.empty_), output);
14042 masm.jump(&skipCall);
14044 masm.bind(&notEmpty);
14045 Label notSingleString;
14046 // Check for length == 1, initializedLength >= 1, arr[0].isString()
14047 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleString);
14048 masm.branch32(Assembler::LessThan, initLength, Imm32(1), &notSingleString);
14050 Address elem0(temp, 0);
14051 masm.branchTestString(Assembler::NotEqual, elem0, &notSingleString);
14053 // At this point, 'output' can be used as a scratch register, since we're
14054 // guaranteed to succeed.
14055 masm.unboxString(elem0, output);
14056 masm.jump(&skipCall);
14057 masm.bind(&notSingleString);
14060 pushArg(sep);
14061 pushArg(array);
14063 using Fn = JSString* (*)(JSContext*, HandleObject, HandleString);
14064 callVM<Fn, jit::ArrayJoin>(lir);
14065 masm.bind(&skipCall);
14068 void CodeGenerator::visitObjectKeys(LObjectKeys* lir) {
14069 Register object = ToRegister(lir->object());
14071 pushArg(object);
14073 using Fn = JSObject* (*)(JSContext*, HandleObject);
14074 callVM<Fn, jit::ObjectKeys>(lir);
14077 void CodeGenerator::visitObjectKeysLength(LObjectKeysLength* lir) {
14078 Register object = ToRegister(lir->object());
14080 pushArg(object);
14082 using Fn = bool (*)(JSContext*, HandleObject, int32_t*);
14083 callVM<Fn, jit::ObjectKeysLength>(lir);
14086 void CodeGenerator::visitGetIteratorCache(LGetIteratorCache* lir) {
14087 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14088 TypedOrValueRegister val =
14089 toConstantOrRegister(lir, LGetIteratorCache::ValueIndex,
14090 lir->mir()->value()->type())
14091 .reg();
14092 Register output = ToRegister(lir->output());
14093 Register temp0 = ToRegister(lir->temp0());
14094 Register temp1 = ToRegister(lir->temp1());
14096 IonGetIteratorIC ic(liveRegs, val, output, temp0, temp1);
14097 addIC(lir, allocateIC(ic));
14100 void CodeGenerator::visitOptimizeSpreadCallCache(
14101 LOptimizeSpreadCallCache* lir) {
14102 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14103 ValueOperand val = ToValue(lir, LOptimizeSpreadCallCache::ValueIndex);
14104 ValueOperand output = ToOutValue(lir);
14105 Register temp = ToRegister(lir->temp0());
14107 IonOptimizeSpreadCallIC ic(liveRegs, val, output, temp);
14108 addIC(lir, allocateIC(ic));
14111 void CodeGenerator::visitCloseIterCache(LCloseIterCache* lir) {
14112 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14113 Register iter = ToRegister(lir->iter());
14114 Register temp = ToRegister(lir->temp0());
14115 CompletionKind kind = CompletionKind(lir->mir()->completionKind());
14117 IonCloseIterIC ic(liveRegs, iter, temp, kind);
14118 addIC(lir, allocateIC(ic));
14121 void CodeGenerator::visitOptimizeGetIteratorCache(
14122 LOptimizeGetIteratorCache* lir) {
14123 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14124 ValueOperand val = ToValue(lir, LOptimizeGetIteratorCache::ValueIndex);
14125 Register output = ToRegister(lir->output());
14126 Register temp = ToRegister(lir->temp0());
14128 IonOptimizeGetIteratorIC ic(liveRegs, val, output, temp);
14129 addIC(lir, allocateIC(ic));
14132 void CodeGenerator::visitIteratorMore(LIteratorMore* lir) {
14133 const Register obj = ToRegister(lir->iterator());
14134 const ValueOperand output = ToOutValue(lir);
14135 const Register temp = ToRegister(lir->temp0());
14137 masm.iteratorMore(obj, output, temp);
14140 void CodeGenerator::visitIsNoIterAndBranch(LIsNoIterAndBranch* lir) {
14141 ValueOperand input = ToValue(lir, LIsNoIterAndBranch::Input);
14142 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
14143 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
14145 masm.branchTestMagic(Assembler::Equal, input, ifTrue);
14147 if (!isNextBlock(lir->ifFalse()->lir())) {
14148 masm.jump(ifFalse);
14152 void CodeGenerator::visitIteratorEnd(LIteratorEnd* lir) {
14153 const Register obj = ToRegister(lir->object());
14154 const Register temp0 = ToRegister(lir->temp0());
14155 const Register temp1 = ToRegister(lir->temp1());
14156 const Register temp2 = ToRegister(lir->temp2());
14158 masm.iteratorClose(obj, temp0, temp1, temp2);
14161 void CodeGenerator::visitArgumentsLength(LArgumentsLength* lir) {
14162 // read number of actual arguments from the JS frame.
14163 Register argc = ToRegister(lir->output());
14164 masm.loadNumActualArgs(FramePointer, argc);
14167 void CodeGenerator::visitGetFrameArgument(LGetFrameArgument* lir) {
14168 ValueOperand result = ToOutValue(lir);
14169 const LAllocation* index = lir->index();
14170 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
14172 // This instruction is used to access actual arguments and formal arguments.
14173 // The number of Values on the stack is |max(numFormals, numActuals)|, so we
14174 // assert |index < numFormals || index < numActuals| in debug builds.
14175 DebugOnly<size_t> numFormals = gen->outerInfo().script()->function()->nargs();
14177 if (index->isConstant()) {
14178 int32_t i = index->toConstant()->toInt32();
14179 #ifdef DEBUG
14180 if (uint32_t(i) >= numFormals) {
14181 Label ok;
14182 Register argc = result.scratchReg();
14183 masm.loadNumActualArgs(FramePointer, argc);
14184 masm.branch32(Assembler::Above, argc, Imm32(i), &ok);
14185 masm.assumeUnreachable("Invalid argument index");
14186 masm.bind(&ok);
14188 #endif
14189 Address argPtr(FramePointer, sizeof(Value) * i + argvOffset);
14190 masm.loadValue(argPtr, result);
14191 } else {
14192 Register i = ToRegister(index);
14193 #ifdef DEBUG
14194 Label ok;
14195 Register argc = result.scratchReg();
14196 masm.branch32(Assembler::Below, i, Imm32(numFormals), &ok);
14197 masm.loadNumActualArgs(FramePointer, argc);
14198 masm.branch32(Assembler::Above, argc, i, &ok);
14199 masm.assumeUnreachable("Invalid argument index");
14200 masm.bind(&ok);
14201 #endif
14202 BaseValueIndex argPtr(FramePointer, i, argvOffset);
14203 masm.loadValue(argPtr, result);
14207 void CodeGenerator::visitGetFrameArgumentHole(LGetFrameArgumentHole* lir) {
14208 ValueOperand result = ToOutValue(lir);
14209 Register index = ToRegister(lir->index());
14210 Register length = ToRegister(lir->length());
14211 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
14212 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
14214 Label outOfBounds, done;
14215 masm.spectreBoundsCheck32(index, length, spectreTemp, &outOfBounds);
14217 BaseValueIndex argPtr(FramePointer, index, argvOffset);
14218 masm.loadValue(argPtr, result);
14219 masm.jump(&done);
14221 masm.bind(&outOfBounds);
14222 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
14223 masm.moveValue(UndefinedValue(), result);
14225 masm.bind(&done);
14228 void CodeGenerator::visitRest(LRest* lir) {
14229 Register numActuals = ToRegister(lir->numActuals());
14230 Register temp0 = ToRegister(lir->temp0());
14231 Register temp1 = ToRegister(lir->temp1());
14232 Register temp2 = ToRegister(lir->temp2());
14233 unsigned numFormals = lir->mir()->numFormals();
14235 if (Shape* shape = lir->mir()->shape()) {
14236 uint32_t arrayLength = 0;
14237 uint32_t arrayCapacity = 2;
14238 gc::AllocKind allocKind = GuessArrayGCKind(arrayCapacity);
14239 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
14240 allocKind = ForegroundToBackgroundAllocKind(allocKind);
14241 MOZ_ASSERT(GetGCKindSlots(allocKind) ==
14242 arrayCapacity + ObjectElements::VALUES_PER_HEADER);
14244 Label joinAlloc, failAlloc;
14245 masm.movePtr(ImmGCPtr(shape), temp0);
14246 masm.createArrayWithFixedElements(temp2, temp0, temp1, InvalidReg,
14247 arrayLength, arrayCapacity, 0, 0,
14248 allocKind, gc::Heap::Default, &failAlloc);
14249 masm.jump(&joinAlloc);
14251 masm.bind(&failAlloc);
14252 masm.movePtr(ImmPtr(nullptr), temp2);
14254 masm.bind(&joinAlloc);
14255 } else {
14256 masm.movePtr(ImmPtr(nullptr), temp2);
14259 // Set temp1 to the address of the first actual argument.
14260 size_t actualsOffset = JitFrameLayout::offsetOfActualArgs();
14261 masm.computeEffectiveAddress(Address(FramePointer, actualsOffset), temp1);
14263 // Compute array length: max(numActuals - numFormals, 0).
14264 Register lengthReg;
14265 if (numFormals) {
14266 lengthReg = temp0;
14267 Label emptyLength, joinLength;
14268 masm.branch32(Assembler::LessThanOrEqual, numActuals, Imm32(numFormals),
14269 &emptyLength);
14271 masm.move32(numActuals, lengthReg);
14272 masm.sub32(Imm32(numFormals), lengthReg);
14274 // Skip formal arguments.
14275 masm.addPtr(Imm32(sizeof(Value) * numFormals), temp1);
14277 masm.jump(&joinLength);
14279 masm.bind(&emptyLength);
14281 masm.move32(Imm32(0), lengthReg);
14283 // Leave temp1 pointed to the start of actuals() when the rest-array
14284 // length is zero. We don't use |actuals() + numFormals| because
14285 // |numFormals| can be any non-negative int32 value when this MRest was
14286 // created from scalar replacement optimizations. And it seems
14287 // questionable to compute a Value* pointer which points to who knows
14288 // where.
14290 masm.bind(&joinLength);
14291 } else {
14292 // Use numActuals directly when there are no formals.
14293 lengthReg = numActuals;
14296 pushArg(temp2);
14297 pushArg(temp1);
14298 pushArg(lengthReg);
14300 using Fn = JSObject* (*)(JSContext*, uint32_t, Value*, HandleObject);
14301 callVM<Fn, InitRestParameter>(lir);
14304 // Create a stackmap from the given safepoint, with the structure:
14306 // <reg dump, if any>
14307 // | ++ <body (general spill)>
14308 // | | ++ <space for Frame>
14309 // | | ++ <inbound args>
14310 // | | |
14311 // Lowest Addr Highest Addr
14312 // |
14313 // framePushedAtStackMapBase
14315 // The caller owns the resulting stackmap. This assumes a grow-down stack.
14317 // For non-debug builds, if the stackmap would contain no pointers, no
14318 // stackmap is created, and nullptr is returned. For a debug build, a
14319 // stackmap is always created and returned.
14321 // Depending on the type of safepoint, the stackmap may need to account for
14322 // spilled registers. WasmSafepointKind::LirCall corresponds to LIR nodes where
14323 // isCall() == true, for which the register allocator will spill/restore all
14324 // live registers at the LIR level - in this case, the LSafepoint sees only live
14325 // values on the stack, never in registers. WasmSafepointKind::CodegenCall, on
14326 // the other hand, is for LIR nodes which may manually spill/restore live
14327 // registers in codegen, in which case the stackmap must account for this. Traps
14328 // also require tracking of live registers, but spilling is handled by the trap
14329 // mechanism.
14330 static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
14331 const RegisterOffsets& trapExitLayout,
14332 size_t trapExitLayoutNumWords,
14333 size_t nInboundStackArgBytes,
14334 wasm::StackMap** result) {
14335 // Ensure this is defined on all return paths.
14336 *result = nullptr;
14338 // The size of the wasm::Frame itself.
14339 const size_t nFrameBytes = sizeof(wasm::Frame);
14341 // This is the number of bytes spilled for live registers, outside of a trap.
14342 // For traps, trapExitLayout and trapExitLayoutNumWords will be used.
14343 const size_t nRegisterDumpBytes =
14344 MacroAssembler::PushRegsInMaskSizeInBytes(safepoint.liveRegs());
14346 // As mentioned above, for WasmSafepointKind::LirCall, register spills and
14347 // restores are handled at the LIR level and there should therefore be no live
14348 // registers to handle here.
14349 MOZ_ASSERT_IF(safepoint.wasmSafepointKind() == WasmSafepointKind::LirCall,
14350 nRegisterDumpBytes == 0);
14351 MOZ_ASSERT(nRegisterDumpBytes % sizeof(void*) == 0);
14353 // This is the number of bytes in the general spill area, below the Frame.
14354 const size_t nBodyBytes = safepoint.framePushedAtStackMapBase();
14356 // This is the number of bytes in the general spill area, the Frame, and the
14357 // incoming args, but not including any register dump area.
14358 const size_t nNonRegisterBytes =
14359 nBodyBytes + nFrameBytes + nInboundStackArgBytes;
14360 MOZ_ASSERT(nNonRegisterBytes % sizeof(void*) == 0);
14362 // This is the number of bytes in the register dump area, if any, below the
14363 // general spill area.
14364 const size_t nRegisterBytes =
14365 (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap)
14366 ? (trapExitLayoutNumWords * sizeof(void*))
14367 : nRegisterDumpBytes;
14369 // This is the total number of bytes covered by the map.
14370 const DebugOnly<size_t> nTotalBytes = nNonRegisterBytes + nRegisterBytes;
14372 // Create the stackmap initially in this vector. Since most frames will
14373 // contain 128 or fewer words, heap allocation is avoided in the majority of
14374 // cases. vec[0] is for the lowest address in the map, vec[N-1] is for the
14375 // highest address in the map.
14376 wasm::StackMapBoolVector vec;
14378 // Keep track of whether we've actually seen any refs.
14379 bool hasRefs = false;
14381 // REG DUMP AREA, if any.
14382 const LiveGeneralRegisterSet wasmAnyRefRegs = safepoint.wasmAnyRefRegs();
14383 GeneralRegisterForwardIterator wasmAnyRefRegsIter(wasmAnyRefRegs);
14384 switch (safepoint.wasmSafepointKind()) {
14385 case WasmSafepointKind::LirCall:
14386 case WasmSafepointKind::CodegenCall: {
14387 size_t spilledNumWords = nRegisterDumpBytes / sizeof(void*);
14388 if (!vec.appendN(false, spilledNumWords)) {
14389 return false;
14392 for (; wasmAnyRefRegsIter.more(); ++wasmAnyRefRegsIter) {
14393 Register reg = *wasmAnyRefRegsIter;
14394 size_t offsetFromSpillBase =
14395 safepoint.liveRegs().gprs().offsetOfPushedRegister(reg) /
14396 sizeof(void*);
14397 MOZ_ASSERT(0 < offsetFromSpillBase &&
14398 offsetFromSpillBase <= spilledNumWords);
14399 size_t offsetInVector = spilledNumWords - offsetFromSpillBase;
14401 vec[offsetInVector] = true;
14402 hasRefs = true;
14405 // Float and vector registers do not have to be handled; they cannot
14406 // contain wasm anyrefs, and they are spilled after general-purpose
14407 // registers. Gprs are therefore closest to the spill base and thus their
14408 // offset calculation does not need to account for other spills.
14409 } break;
14410 case WasmSafepointKind::Trap: {
14411 if (!vec.appendN(false, trapExitLayoutNumWords)) {
14412 return false;
14414 for (; wasmAnyRefRegsIter.more(); ++wasmAnyRefRegsIter) {
14415 Register reg = *wasmAnyRefRegsIter;
14416 size_t offsetFromTop = trapExitLayout.getOffset(reg);
14418 // If this doesn't hold, the associated register wasn't saved by
14419 // the trap exit stub. Better to crash now than much later, in
14420 // some obscure place, and possibly with security consequences.
14421 MOZ_RELEASE_ASSERT(offsetFromTop < trapExitLayoutNumWords);
14423 // offsetFromTop is an offset in words down from the highest
14424 // address in the exit stub save area. Switch it around to be an
14425 // offset up from the bottom of the (integer register) save area.
14426 size_t offsetFromBottom = trapExitLayoutNumWords - 1 - offsetFromTop;
14428 vec[offsetFromBottom] = true;
14429 hasRefs = true;
14431 } break;
14432 default:
14433 MOZ_CRASH("unreachable");
14436 // BODY (GENERAL SPILL) AREA and FRAME and INCOMING ARGS
14437 // Deal with roots on the stack.
14438 size_t wordsSoFar = vec.length();
14439 if (!vec.appendN(false, nNonRegisterBytes / sizeof(void*))) {
14440 return false;
14442 const LSafepoint::SlotList& wasmAnyRefSlots = safepoint.wasmAnyRefSlots();
14443 for (SafepointSlotEntry wasmAnyRefSlot : wasmAnyRefSlots) {
14444 // The following needs to correspond with JitFrameLayout::slotRef
14445 // wasmAnyRefSlot.stack == 0 means the slot is in the args area
14446 if (wasmAnyRefSlot.stack) {
14447 // It's a slot in the body allocation, so .slot is interpreted
14448 // as an index downwards from the Frame*
14449 MOZ_ASSERT(wasmAnyRefSlot.slot <= nBodyBytes);
14450 uint32_t offsetInBytes = nBodyBytes - wasmAnyRefSlot.slot;
14451 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
14452 vec[wordsSoFar + offsetInBytes / sizeof(void*)] = true;
14453 } else {
14454 // It's an argument slot
14455 MOZ_ASSERT(wasmAnyRefSlot.slot < nInboundStackArgBytes);
14456 uint32_t offsetInBytes = nBodyBytes + nFrameBytes + wasmAnyRefSlot.slot;
14457 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
14458 vec[wordsSoFar + offsetInBytes / sizeof(void*)] = true;
14460 hasRefs = true;
14463 #ifndef DEBUG
14464 // We saw no references, and this is a non-debug build, so don't bother
14465 // building the stackmap.
14466 if (!hasRefs) {
14467 return true;
14469 #endif
14471 // Convert vec into a wasm::StackMap.
14472 MOZ_ASSERT(vec.length() * sizeof(void*) == nTotalBytes);
14473 wasm::StackMap* stackMap =
14474 wasm::ConvertStackMapBoolVectorToStackMap(vec, hasRefs);
14475 if (!stackMap) {
14476 return false;
14478 if (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap) {
14479 stackMap->setExitStubWords(trapExitLayoutNumWords);
14482 // Record in the map, how far down from the highest address the Frame* is.
14483 // Take the opportunity to check that we haven't marked any part of the
14484 // Frame itself as a pointer.
14485 stackMap->setFrameOffsetFromTop((nInboundStackArgBytes + nFrameBytes) /
14486 sizeof(void*));
14487 #ifdef DEBUG
14488 for (uint32_t i = 0; i < nFrameBytes / sizeof(void*); i++) {
14489 MOZ_ASSERT(stackMap->getBit(stackMap->header.numMappedWords -
14490 stackMap->header.frameOffsetFromTop + i) == 0);
14492 #endif
14494 *result = stackMap;
14495 return true;
14498 bool CodeGenerator::generateWasm(
14499 wasm::CallIndirectId callIndirectId, wasm::BytecodeOffset trapOffset,
14500 const wasm::ArgTypeVector& argTypes, const RegisterOffsets& trapExitLayout,
14501 size_t trapExitLayoutNumWords, wasm::FuncOffsets* offsets,
14502 wasm::StackMaps* stackMaps, wasm::Decoder* decoder) {
14503 AutoCreatedBy acb(masm, "CodeGenerator::generateWasm");
14505 JitSpew(JitSpew_Codegen, "# Emitting wasm code");
14507 size_t nInboundStackArgBytes = StackArgAreaSizeUnaligned(argTypes);
14508 inboundStackArgBytes_ = nInboundStackArgBytes;
14510 wasm::GenerateFunctionPrologue(masm, callIndirectId, mozilla::Nothing(),
14511 offsets);
14513 MOZ_ASSERT(masm.framePushed() == 0);
14515 // Very large frames are implausible, probably an attack.
14516 if (frameSize() > wasm::MaxFrameSize) {
14517 return decoder->fail(decoder->beginOffset(), "stack frame is too large");
14520 if (omitOverRecursedCheck()) {
14521 masm.reserveStack(frameSize());
14522 } else {
14523 std::pair<CodeOffset, uint32_t> pair =
14524 masm.wasmReserveStackChecked(frameSize(), trapOffset);
14525 CodeOffset trapInsnOffset = pair.first;
14526 size_t nBytesReservedBeforeTrap = pair.second;
14528 wasm::StackMap* functionEntryStackMap = nullptr;
14529 if (!CreateStackMapForFunctionEntryTrap(
14530 argTypes, trapExitLayout, trapExitLayoutNumWords,
14531 nBytesReservedBeforeTrap, nInboundStackArgBytes,
14532 &functionEntryStackMap)) {
14533 return false;
14536 // In debug builds, we'll always have a stack map, even if there are no
14537 // refs to track.
14538 MOZ_ASSERT(functionEntryStackMap);
14540 if (functionEntryStackMap &&
14541 !stackMaps->add((uint8_t*)(uintptr_t)trapInsnOffset.offset(),
14542 functionEntryStackMap)) {
14543 functionEntryStackMap->destroy();
14544 return false;
14548 MOZ_ASSERT(masm.framePushed() == frameSize());
14550 if (!generateBody()) {
14551 return false;
14554 masm.bind(&returnLabel_);
14555 wasm::GenerateFunctionEpilogue(masm, frameSize(), offsets);
14557 if (!generateOutOfLineCode()) {
14558 return false;
14561 masm.flush();
14562 if (masm.oom()) {
14563 return false;
14566 offsets->end = masm.currentOffset();
14568 MOZ_ASSERT(!masm.failureLabel()->used());
14569 MOZ_ASSERT(snapshots_.listSize() == 0);
14570 MOZ_ASSERT(snapshots_.RVATableSize() == 0);
14571 MOZ_ASSERT(recovers_.size() == 0);
14572 MOZ_ASSERT(graph.numConstants() == 0);
14573 MOZ_ASSERT(osiIndices_.empty());
14574 MOZ_ASSERT(icList_.empty());
14575 MOZ_ASSERT(safepoints_.size() == 0);
14576 MOZ_ASSERT(!scriptCounts_);
14578 // Convert the safepoints to stackmaps and add them to our running
14579 // collection thereof.
14580 for (CodegenSafepointIndex& index : safepointIndices_) {
14581 wasm::StackMap* stackMap = nullptr;
14582 if (!CreateStackMapFromLSafepoint(*index.safepoint(), trapExitLayout,
14583 trapExitLayoutNumWords,
14584 nInboundStackArgBytes, &stackMap)) {
14585 return false;
14588 // In debug builds, we'll always have a stack map.
14589 MOZ_ASSERT(stackMap);
14590 if (!stackMap) {
14591 continue;
14594 if (!stackMaps->add((uint8_t*)(uintptr_t)index.displacement(), stackMap)) {
14595 stackMap->destroy();
14596 return false;
14600 return true;
14603 bool CodeGenerator::generate() {
14604 AutoCreatedBy acb(masm, "CodeGenerator::generate");
14606 JitSpew(JitSpew_Codegen, "# Emitting code for script %s:%u:%u",
14607 gen->outerInfo().script()->filename(),
14608 gen->outerInfo().script()->lineno(),
14609 gen->outerInfo().script()->column().oneOriginValue());
14611 // Initialize native code table with an entry to the start of
14612 // top-level script.
14613 InlineScriptTree* tree = gen->outerInfo().inlineScriptTree();
14614 jsbytecode* startPC = tree->script()->code();
14615 BytecodeSite* startSite = new (gen->alloc()) BytecodeSite(tree, startPC);
14616 if (!addNativeToBytecodeEntry(startSite)) {
14617 return false;
14620 if (!safepoints_.init(gen->alloc())) {
14621 return false;
14624 perfSpewer_.recordOffset(masm, "Prologue");
14625 if (!generatePrologue()) {
14626 return false;
14629 // Reset native => bytecode map table with top-level script and startPc.
14630 if (!addNativeToBytecodeEntry(startSite)) {
14631 return false;
14634 if (!generateBody()) {
14635 return false;
14638 // Reset native => bytecode map table with top-level script and startPc.
14639 if (!addNativeToBytecodeEntry(startSite)) {
14640 return false;
14643 perfSpewer_.recordOffset(masm, "Epilogue");
14644 if (!generateEpilogue()) {
14645 return false;
14648 // Reset native => bytecode map table with top-level script and startPc.
14649 if (!addNativeToBytecodeEntry(startSite)) {
14650 return false;
14653 perfSpewer_.recordOffset(masm, "InvalidateEpilogue");
14654 generateInvalidateEpilogue();
14656 // native => bytecode entries for OOL code will be added
14657 // by CodeGeneratorShared::generateOutOfLineCode
14658 perfSpewer_.recordOffset(masm, "OOLCode");
14659 if (!generateOutOfLineCode()) {
14660 return false;
14663 // Add terminal entry.
14664 if (!addNativeToBytecodeEntry(startSite)) {
14665 return false;
14668 // Dump Native to bytecode entries to spew.
14669 dumpNativeToBytecodeEntries();
14671 // We encode safepoints after the OSI-point offsets have been determined.
14672 if (!encodeSafepoints()) {
14673 return false;
14676 return !masm.oom();
14679 static bool AddInlinedCompilations(JSContext* cx, HandleScript script,
14680 IonCompilationId compilationId,
14681 const WarpSnapshot* snapshot,
14682 bool* isValid) {
14683 MOZ_ASSERT(!*isValid);
14684 RecompileInfo recompileInfo(script, compilationId);
14686 JitZone* jitZone = cx->zone()->jitZone();
14688 for (const auto* scriptSnapshot : snapshot->scripts()) {
14689 JSScript* inlinedScript = scriptSnapshot->script();
14690 if (inlinedScript == script) {
14691 continue;
14694 // TODO(post-Warp): This matches FinishCompilation and is necessary to
14695 // ensure in-progress compilations are canceled when an inlined functon
14696 // becomes a debuggee. See the breakpoint-14.js jit-test.
14697 // When TI is gone, try to clean this up by moving AddInlinedCompilations to
14698 // WarpOracle so that we can handle this as part of addPendingRecompile
14699 // instead of requiring this separate check.
14700 if (inlinedScript->isDebuggee()) {
14701 *isValid = false;
14702 return true;
14705 if (!jitZone->addInlinedCompilation(recompileInfo, inlinedScript)) {
14706 return false;
14710 *isValid = true;
14711 return true;
14714 bool CodeGenerator::link(JSContext* cx, const WarpSnapshot* snapshot) {
14715 AutoCreatedBy acb(masm, "CodeGenerator::link");
14717 // We cancel off-thread Ion compilations in a few places during GC, but if
14718 // this compilation was performed off-thread it will already have been
14719 // removed from the relevant lists by this point. Don't allow GC here.
14720 JS::AutoAssertNoGC nogc(cx);
14722 RootedScript script(cx, gen->outerInfo().script());
14723 MOZ_ASSERT(!script->hasIonScript());
14725 // Perform any read barriers which were skipped while compiling the
14726 // script, which may have happened off-thread.
14727 JitZone* jitZone = cx->zone()->jitZone();
14728 jitZone->performStubReadBarriers(zoneStubsToReadBarrier_);
14730 if (scriptCounts_ && !script->hasScriptCounts() &&
14731 !script->initScriptCounts(cx)) {
14732 return false;
14735 IonCompilationId compilationId =
14736 cx->runtime()->jitRuntime()->nextCompilationId();
14737 jitZone->currentCompilationIdRef().emplace(compilationId);
14738 auto resetCurrentId = mozilla::MakeScopeExit(
14739 [jitZone] { jitZone->currentCompilationIdRef().reset(); });
14741 // Record constraints. If an error occured, returns false and potentially
14742 // prevent future compilations. Otherwise, if an invalidation occured, then
14743 // skip the current compilation.
14744 bool isValid = false;
14746 // If an inlined script is invalidated (for example, by attaching
14747 // a debugger), we must also invalidate the parent IonScript.
14748 if (!AddInlinedCompilations(cx, script, compilationId, snapshot, &isValid)) {
14749 return false;
14751 if (!isValid) {
14752 return true;
14755 uint32_t argumentSlots = (gen->outerInfo().nargs() + 1) * sizeof(Value);
14757 size_t numNurseryObjects = snapshot->nurseryObjects().length();
14759 IonScript* ionScript = IonScript::New(
14760 cx, compilationId, graph.localSlotsSize(), argumentSlots, frameDepth_,
14761 snapshots_.listSize(), snapshots_.RVATableSize(), recovers_.size(),
14762 graph.numConstants(), numNurseryObjects, safepointIndices_.length(),
14763 osiIndices_.length(), icList_.length(), runtimeData_.length(),
14764 safepoints_.size());
14765 if (!ionScript) {
14766 return false;
14768 #ifdef DEBUG
14769 ionScript->setICHash(snapshot->icHash());
14770 #endif
14772 auto freeIonScript = mozilla::MakeScopeExit([&ionScript] {
14773 // Use js_free instead of IonScript::Destroy: the cache list is still
14774 // uninitialized.
14775 js_free(ionScript);
14778 Linker linker(masm);
14779 JitCode* code = linker.newCode(cx, CodeKind::Ion);
14780 if (!code) {
14781 return false;
14784 // Encode native to bytecode map if profiling is enabled.
14785 if (isProfilerInstrumentationEnabled()) {
14786 // Generate native-to-bytecode main table.
14787 IonEntry::ScriptList scriptList;
14788 if (!generateCompactNativeToBytecodeMap(cx, code, scriptList)) {
14789 return false;
14792 uint8_t* ionTableAddr =
14793 ((uint8_t*)nativeToBytecodeMap_.get()) + nativeToBytecodeTableOffset_;
14794 JitcodeIonTable* ionTable = (JitcodeIonTable*)ionTableAddr;
14796 // Construct the IonEntry that will go into the global table.
14797 auto entry = MakeJitcodeGlobalEntry<IonEntry>(
14798 cx, code, code->raw(), code->rawEnd(), std::move(scriptList), ionTable);
14799 if (!entry) {
14800 return false;
14802 (void)nativeToBytecodeMap_.release(); // Table is now owned by |entry|.
14804 // Add entry to the global table.
14805 JitcodeGlobalTable* globalTable =
14806 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
14807 if (!globalTable->addEntry(std::move(entry))) {
14808 return false;
14811 // Mark the jitcode as having a bytecode map.
14812 code->setHasBytecodeMap();
14813 } else {
14814 // Add a dumy jitcodeGlobalTable entry.
14815 auto entry = MakeJitcodeGlobalEntry<DummyEntry>(cx, code, code->raw(),
14816 code->rawEnd());
14817 if (!entry) {
14818 return false;
14821 // Add entry to the global table.
14822 JitcodeGlobalTable* globalTable =
14823 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
14824 if (!globalTable->addEntry(std::move(entry))) {
14825 return false;
14828 // Mark the jitcode as having a bytecode map.
14829 code->setHasBytecodeMap();
14832 ionScript->setMethod(code);
14834 // If the Gecko Profiler is enabled, mark IonScript as having been
14835 // instrumented accordingly.
14836 if (isProfilerInstrumentationEnabled()) {
14837 ionScript->setHasProfilingInstrumentation();
14840 Assembler::PatchDataWithValueCheck(
14841 CodeLocationLabel(code, invalidateEpilogueData_), ImmPtr(ionScript),
14842 ImmPtr((void*)-1));
14844 for (CodeOffset offset : ionScriptLabels_) {
14845 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, offset),
14846 ImmPtr(ionScript), ImmPtr((void*)-1));
14849 for (NurseryObjectLabel label : ionNurseryObjectLabels_) {
14850 void* entry = ionScript->addressOfNurseryObject(label.nurseryIndex);
14851 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label.offset),
14852 ImmPtr(entry), ImmPtr((void*)-1));
14855 // for generating inline caches during the execution.
14856 if (runtimeData_.length()) {
14857 ionScript->copyRuntimeData(&runtimeData_[0]);
14859 if (icList_.length()) {
14860 ionScript->copyICEntries(&icList_[0]);
14863 for (size_t i = 0; i < icInfo_.length(); i++) {
14864 IonIC& ic = ionScript->getICFromIndex(i);
14865 Assembler::PatchDataWithValueCheck(
14866 CodeLocationLabel(code, icInfo_[i].icOffsetForJump),
14867 ImmPtr(ic.codeRawPtr()), ImmPtr((void*)-1));
14868 Assembler::PatchDataWithValueCheck(
14869 CodeLocationLabel(code, icInfo_[i].icOffsetForPush), ImmPtr(&ic),
14870 ImmPtr((void*)-1));
14873 JitSpew(JitSpew_Codegen, "Created IonScript %p (raw %p)", (void*)ionScript,
14874 (void*)code->raw());
14876 ionScript->setInvalidationEpilogueDataOffset(
14877 invalidateEpilogueData_.offset());
14878 if (jsbytecode* osrPc = gen->outerInfo().osrPc()) {
14879 ionScript->setOsrPc(osrPc);
14880 ionScript->setOsrEntryOffset(getOsrEntryOffset());
14882 ionScript->setInvalidationEpilogueOffset(invalidate_.offset());
14884 perfSpewer_.saveProfile(cx, script, code);
14886 #ifdef MOZ_VTUNE
14887 vtune::MarkScript(code, script, "ion");
14888 #endif
14890 // Set a Ion counter hint for this script.
14891 if (cx->runtime()->jitRuntime()->hasJitHintsMap()) {
14892 JitHintsMap* jitHints = cx->runtime()->jitRuntime()->getJitHintsMap();
14893 jitHints->recordIonCompilation(script);
14896 // for marking during GC.
14897 if (safepointIndices_.length()) {
14898 ionScript->copySafepointIndices(&safepointIndices_[0]);
14900 if (safepoints_.size()) {
14901 ionScript->copySafepoints(&safepoints_);
14904 // for recovering from an Ion Frame.
14905 if (osiIndices_.length()) {
14906 ionScript->copyOsiIndices(&osiIndices_[0]);
14908 if (snapshots_.listSize()) {
14909 ionScript->copySnapshots(&snapshots_);
14911 MOZ_ASSERT_IF(snapshots_.listSize(), recovers_.size());
14912 if (recovers_.size()) {
14913 ionScript->copyRecovers(&recovers_);
14915 if (graph.numConstants()) {
14916 const Value* vp = graph.constantPool();
14917 ionScript->copyConstants(vp);
14918 for (size_t i = 0; i < graph.numConstants(); i++) {
14919 const Value& v = vp[i];
14920 if (v.isGCThing()) {
14921 if (gc::StoreBuffer* sb = v.toGCThing()->storeBuffer()) {
14922 sb->putWholeCell(script);
14923 break;
14929 // Attach any generated script counts to the script.
14930 if (IonScriptCounts* counts = extractScriptCounts()) {
14931 script->addIonCounts(counts);
14934 // WARNING: Code after this point must be infallible!
14936 // Copy the list of nursery objects. Note that the store buffer can add
14937 // HeapPtr edges that must be cleared in IonScript::Destroy. See the
14938 // infallibility warning above.
14939 const auto& nurseryObjects = snapshot->nurseryObjects();
14940 for (size_t i = 0; i < nurseryObjects.length(); i++) {
14941 ionScript->nurseryObjects()[i].init(nurseryObjects[i]);
14944 // Transfer ownership of the IonScript to the JitScript. At this point enough
14945 // of the IonScript must be initialized for IonScript::Destroy to work.
14946 freeIonScript.release();
14947 script->jitScript()->setIonScript(script, ionScript);
14949 return true;
14952 // An out-of-line path to convert a boxed int32 to either a float or double.
14953 class OutOfLineUnboxFloatingPoint : public OutOfLineCodeBase<CodeGenerator> {
14954 LUnboxFloatingPoint* unboxFloatingPoint_;
14956 public:
14957 explicit OutOfLineUnboxFloatingPoint(LUnboxFloatingPoint* unboxFloatingPoint)
14958 : unboxFloatingPoint_(unboxFloatingPoint) {}
14960 void accept(CodeGenerator* codegen) override {
14961 codegen->visitOutOfLineUnboxFloatingPoint(this);
14964 LUnboxFloatingPoint* unboxFloatingPoint() const {
14965 return unboxFloatingPoint_;
14969 void CodeGenerator::visitUnboxFloatingPoint(LUnboxFloatingPoint* lir) {
14970 const ValueOperand box = ToValue(lir, LUnboxFloatingPoint::Input);
14971 const LDefinition* result = lir->output();
14973 // Out-of-line path to convert int32 to double or bailout
14974 // if this instruction is fallible.
14975 OutOfLineUnboxFloatingPoint* ool =
14976 new (alloc()) OutOfLineUnboxFloatingPoint(lir);
14977 addOutOfLineCode(ool, lir->mir());
14979 FloatRegister resultReg = ToFloatRegister(result);
14980 masm.branchTestDouble(Assembler::NotEqual, box, ool->entry());
14981 masm.unboxDouble(box, resultReg);
14982 if (lir->type() == MIRType::Float32) {
14983 masm.convertDoubleToFloat32(resultReg, resultReg);
14985 masm.bind(ool->rejoin());
14988 void CodeGenerator::visitOutOfLineUnboxFloatingPoint(
14989 OutOfLineUnboxFloatingPoint* ool) {
14990 LUnboxFloatingPoint* ins = ool->unboxFloatingPoint();
14991 const ValueOperand value = ToValue(ins, LUnboxFloatingPoint::Input);
14993 if (ins->mir()->fallible()) {
14994 Label bail;
14995 masm.branchTestInt32(Assembler::NotEqual, value, &bail);
14996 bailoutFrom(&bail, ins->snapshot());
14998 masm.int32ValueToFloatingPoint(value, ToFloatRegister(ins->output()),
14999 ins->type());
15000 masm.jump(ool->rejoin());
15003 void CodeGenerator::visitCallBindVar(LCallBindVar* lir) {
15004 pushArg(ToRegister(lir->environmentChain()));
15006 using Fn = JSObject* (*)(JSContext*, JSObject*);
15007 callVM<Fn, BindVarOperation>(lir);
15010 void CodeGenerator::visitMegamorphicSetElement(LMegamorphicSetElement* lir) {
15011 Register obj = ToRegister(lir->getOperand(0));
15012 ValueOperand idVal = ToValue(lir, LMegamorphicSetElement::IndexIndex);
15013 ValueOperand value = ToValue(lir, LMegamorphicSetElement::ValueIndex);
15015 Register temp0 = ToRegister(lir->temp0());
15016 // See comment in LIROps.yaml (x86 is short on registers)
15017 #ifndef JS_CODEGEN_X86
15018 Register temp1 = ToRegister(lir->temp1());
15019 Register temp2 = ToRegister(lir->temp2());
15020 #endif
15022 Label cacheHit, done;
15023 #ifdef JS_CODEGEN_X86
15024 masm.emitMegamorphicCachedSetSlot(
15025 idVal, obj, temp0, value, &cacheHit,
15026 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
15027 EmitPreBarrier(masm, addr, mirType);
15029 #else
15030 masm.emitMegamorphicCachedSetSlot(
15031 idVal, obj, temp0, temp1, temp2, value, &cacheHit,
15032 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
15033 EmitPreBarrier(masm, addr, mirType);
15035 #endif
15037 pushArg(Imm32(lir->mir()->strict()));
15038 pushArg(ToValue(lir, LMegamorphicSetElement::ValueIndex));
15039 pushArg(ToValue(lir, LMegamorphicSetElement::IndexIndex));
15040 pushArg(obj);
15042 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
15043 callVM<Fn, js::jit::SetElementMegamorphic<true>>(lir);
15045 masm.jump(&done);
15046 masm.bind(&cacheHit);
15048 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
15049 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
15051 saveVolatile(temp0);
15052 emitPostWriteBarrier(obj);
15053 restoreVolatile(temp0);
15055 masm.bind(&done);
15058 void CodeGenerator::visitLoadScriptedProxyHandler(
15059 LLoadScriptedProxyHandler* ins) {
15060 const Register obj = ToRegister(ins->getOperand(0));
15061 ValueOperand output = ToOutValue(ins);
15063 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
15064 output.scratchReg());
15065 masm.loadValue(
15066 Address(output.scratchReg(), js::detail::ProxyReservedSlots::offsetOfSlot(
15067 ScriptedProxyHandler::HANDLER_EXTRA)),
15068 output);
15071 #ifdef JS_PUNBOX64
15072 void CodeGenerator::visitCheckScriptedProxyGetResult(
15073 LCheckScriptedProxyGetResult* ins) {
15074 ValueOperand target = ToValue(ins, LCheckScriptedProxyGetResult::TargetIndex);
15075 ValueOperand value = ToValue(ins, LCheckScriptedProxyGetResult::ValueIndex);
15076 ValueOperand id = ToValue(ins, LCheckScriptedProxyGetResult::IdIndex);
15077 Register scratch = ToRegister(ins->temp0());
15078 Register scratch2 = ToRegister(ins->temp1());
15080 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue,
15081 MutableHandleValue);
15082 OutOfLineCode* ool = oolCallVM<Fn, CheckProxyGetByValueResult>(
15083 ins, ArgList(scratch, id, value), StoreValueTo(value));
15085 masm.unboxObject(target, scratch);
15086 masm.branchTestObjectNeedsProxyResultValidation(Assembler::NonZero, scratch,
15087 scratch2, ool->entry());
15088 masm.bind(ool->rejoin());
15090 #endif
15092 void CodeGenerator::visitIdToStringOrSymbol(LIdToStringOrSymbol* ins) {
15093 ValueOperand id = ToValue(ins, LIdToStringOrSymbol::IdIndex);
15094 ValueOperand output = ToOutValue(ins);
15095 Register scratch = ToRegister(ins->temp0());
15097 masm.moveValue(id, output);
15099 Label done, callVM;
15100 Label bail;
15102 ScratchTagScope tag(masm, output);
15103 masm.splitTagForTest(output, tag);
15104 masm.branchTestString(Assembler::Equal, tag, &done);
15105 masm.branchTestSymbol(Assembler::Equal, tag, &done);
15106 masm.branchTestInt32(Assembler::NotEqual, tag, &bail);
15109 masm.unboxInt32(output, scratch);
15111 using Fn = JSLinearString* (*)(JSContext*, int);
15112 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
15113 ins, ArgList(scratch), StoreRegisterTo(output.scratchReg()));
15115 masm.lookupStaticIntString(scratch, output.scratchReg(),
15116 gen->runtime->staticStrings(), ool->entry());
15118 masm.bind(ool->rejoin());
15119 masm.tagValue(JSVAL_TYPE_STRING, output.scratchReg(), output);
15120 masm.bind(&done);
15122 bailoutFrom(&bail, ins->snapshot());
15125 void CodeGenerator::visitLoadFixedSlotV(LLoadFixedSlotV* ins) {
15126 const Register obj = ToRegister(ins->getOperand(0));
15127 size_t slot = ins->mir()->slot();
15128 ValueOperand result = ToOutValue(ins);
15130 masm.loadValue(Address(obj, NativeObject::getFixedSlotOffset(slot)), result);
15133 void CodeGenerator::visitLoadFixedSlotT(LLoadFixedSlotT* ins) {
15134 const Register obj = ToRegister(ins->getOperand(0));
15135 size_t slot = ins->mir()->slot();
15136 AnyRegister result = ToAnyRegister(ins->getDef(0));
15137 MIRType type = ins->mir()->type();
15139 masm.loadUnboxedValue(Address(obj, NativeObject::getFixedSlotOffset(slot)),
15140 type, result);
15143 template <typename T>
15144 static void EmitLoadAndUnbox(MacroAssembler& masm, const T& src, MIRType type,
15145 bool fallible, AnyRegister dest, Label* fail) {
15146 if (type == MIRType::Double) {
15147 MOZ_ASSERT(dest.isFloat());
15148 masm.ensureDouble(src, dest.fpu(), fail);
15149 return;
15151 if (fallible) {
15152 switch (type) {
15153 case MIRType::Int32:
15154 masm.fallibleUnboxInt32(src, dest.gpr(), fail);
15155 break;
15156 case MIRType::Boolean:
15157 masm.fallibleUnboxBoolean(src, dest.gpr(), fail);
15158 break;
15159 case MIRType::Object:
15160 masm.fallibleUnboxObject(src, dest.gpr(), fail);
15161 break;
15162 case MIRType::String:
15163 masm.fallibleUnboxString(src, dest.gpr(), fail);
15164 break;
15165 case MIRType::Symbol:
15166 masm.fallibleUnboxSymbol(src, dest.gpr(), fail);
15167 break;
15168 case MIRType::BigInt:
15169 masm.fallibleUnboxBigInt(src, dest.gpr(), fail);
15170 break;
15171 default:
15172 MOZ_CRASH("Unexpected MIRType");
15174 return;
15176 masm.loadUnboxedValue(src, type, dest);
15179 void CodeGenerator::visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox* ins) {
15180 const MLoadFixedSlotAndUnbox* mir = ins->mir();
15181 MIRType type = mir->type();
15182 Register input = ToRegister(ins->object());
15183 AnyRegister result = ToAnyRegister(ins->output());
15184 size_t slot = mir->slot();
15186 Address address(input, NativeObject::getFixedSlotOffset(slot));
15188 Label bail;
15189 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
15190 if (mir->fallible()) {
15191 bailoutFrom(&bail, ins->snapshot());
15195 void CodeGenerator::visitLoadDynamicSlotAndUnbox(
15196 LLoadDynamicSlotAndUnbox* ins) {
15197 const MLoadDynamicSlotAndUnbox* mir = ins->mir();
15198 MIRType type = mir->type();
15199 Register input = ToRegister(ins->slots());
15200 AnyRegister result = ToAnyRegister(ins->output());
15201 size_t slot = mir->slot();
15203 Address address(input, slot * sizeof(JS::Value));
15205 Label bail;
15206 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
15207 if (mir->fallible()) {
15208 bailoutFrom(&bail, ins->snapshot());
15212 void CodeGenerator::visitLoadElementAndUnbox(LLoadElementAndUnbox* ins) {
15213 const MLoadElementAndUnbox* mir = ins->mir();
15214 MIRType type = mir->type();
15215 Register elements = ToRegister(ins->elements());
15216 AnyRegister result = ToAnyRegister(ins->output());
15218 Label bail;
15219 if (ins->index()->isConstant()) {
15220 NativeObject::elementsSizeMustNotOverflow();
15221 int32_t offset = ToInt32(ins->index()) * sizeof(Value);
15222 Address address(elements, offset);
15223 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
15224 } else {
15225 BaseObjectElementIndex address(elements, ToRegister(ins->index()));
15226 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
15229 if (mir->fallible()) {
15230 bailoutFrom(&bail, ins->snapshot());
15234 class OutOfLineAtomizeSlot : public OutOfLineCodeBase<CodeGenerator> {
15235 LInstruction* lir_;
15236 Register stringReg_;
15237 Address slotAddr_;
15238 TypedOrValueRegister dest_;
15240 public:
15241 OutOfLineAtomizeSlot(LInstruction* lir, Register stringReg, Address slotAddr,
15242 TypedOrValueRegister dest)
15243 : lir_(lir), stringReg_(stringReg), slotAddr_(slotAddr), dest_(dest) {}
15245 void accept(CodeGenerator* codegen) final {
15246 codegen->visitOutOfLineAtomizeSlot(this);
15248 LInstruction* lir() const { return lir_; }
15249 Register stringReg() const { return stringReg_; }
15250 Address slotAddr() const { return slotAddr_; }
15251 TypedOrValueRegister dest() const { return dest_; }
15254 void CodeGenerator::visitOutOfLineAtomizeSlot(OutOfLineAtomizeSlot* ool) {
15255 LInstruction* lir = ool->lir();
15256 Register stringReg = ool->stringReg();
15257 Address slotAddr = ool->slotAddr();
15258 TypedOrValueRegister dest = ool->dest();
15260 // This code is called with a non-atomic string in |stringReg|.
15261 // When it returns, |stringReg| contains an unboxed pointer to an
15262 // atomized version of that string, and |slotAddr| contains a
15263 // StringValue pointing to that atom. If |dest| is a ValueOperand,
15264 // it contains the same StringValue; otherwise we assert that |dest|
15265 // is |stringReg|.
15267 saveLive(lir);
15268 pushArg(stringReg);
15270 using Fn = JSAtom* (*)(JSContext*, JSString*);
15271 callVM<Fn, js::AtomizeString>(lir);
15272 StoreRegisterTo(stringReg).generate(this);
15273 restoreLiveIgnore(lir, StoreRegisterTo(stringReg).clobbered());
15275 if (dest.hasValue()) {
15276 masm.moveValue(
15277 TypedOrValueRegister(MIRType::String, AnyRegister(stringReg)),
15278 dest.valueReg());
15279 } else {
15280 MOZ_ASSERT(dest.typedReg().gpr() == stringReg);
15283 emitPreBarrier(slotAddr);
15284 masm.storeTypedOrValue(dest, slotAddr);
15286 // We don't need a post-barrier because atoms aren't nursery-allocated.
15287 #ifdef DEBUG
15288 // We need a temp register for the nursery check. Spill something.
15289 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
15290 allRegs.take(stringReg);
15291 Register temp = allRegs.takeAny();
15292 masm.push(temp);
15294 Label tenured;
15295 masm.branchPtrInNurseryChunk(Assembler::NotEqual, stringReg, temp, &tenured);
15296 masm.assumeUnreachable("AtomizeString returned a nursery pointer");
15297 masm.bind(&tenured);
15299 masm.pop(temp);
15300 #endif
15302 masm.jump(ool->rejoin());
15305 void CodeGenerator::emitMaybeAtomizeSlot(LInstruction* ins, Register stringReg,
15306 Address slotAddr,
15307 TypedOrValueRegister dest) {
15308 OutOfLineAtomizeSlot* ool =
15309 new (alloc()) OutOfLineAtomizeSlot(ins, stringReg, slotAddr, dest);
15310 addOutOfLineCode(ool, ins->mirRaw()->toInstruction());
15311 masm.branchTest32(Assembler::Zero,
15312 Address(stringReg, JSString::offsetOfFlags()),
15313 Imm32(JSString::ATOM_BIT), ool->entry());
15314 masm.bind(ool->rejoin());
15317 void CodeGenerator::visitLoadFixedSlotAndAtomize(
15318 LLoadFixedSlotAndAtomize* ins) {
15319 Register obj = ToRegister(ins->getOperand(0));
15320 Register temp = ToRegister(ins->temp0());
15321 size_t slot = ins->mir()->slot();
15322 ValueOperand result = ToOutValue(ins);
15324 Address slotAddr(obj, NativeObject::getFixedSlotOffset(slot));
15325 masm.loadValue(slotAddr, result);
15327 Label notString;
15328 masm.branchTestString(Assembler::NotEqual, result, &notString);
15329 masm.unboxString(result, temp);
15330 emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
15331 masm.bind(&notString);
15334 void CodeGenerator::visitLoadDynamicSlotAndAtomize(
15335 LLoadDynamicSlotAndAtomize* ins) {
15336 ValueOperand result = ToOutValue(ins);
15337 Register temp = ToRegister(ins->temp0());
15338 Register base = ToRegister(ins->input());
15339 int32_t offset = ins->mir()->slot() * sizeof(js::Value);
15341 Address slotAddr(base, offset);
15342 masm.loadValue(slotAddr, result);
15344 Label notString;
15345 masm.branchTestString(Assembler::NotEqual, result, &notString);
15346 masm.unboxString(result, temp);
15347 emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
15348 masm.bind(&notString);
15351 void CodeGenerator::visitLoadFixedSlotUnboxAndAtomize(
15352 LLoadFixedSlotUnboxAndAtomize* ins) {
15353 const MLoadFixedSlotAndUnbox* mir = ins->mir();
15354 MOZ_ASSERT(mir->type() == MIRType::String);
15355 Register input = ToRegister(ins->object());
15356 AnyRegister result = ToAnyRegister(ins->output());
15357 size_t slot = mir->slot();
15359 Address slotAddr(input, NativeObject::getFixedSlotOffset(slot));
15361 Label bail;
15362 EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
15363 &bail);
15364 emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
15365 TypedOrValueRegister(MIRType::String, result));
15367 if (mir->fallible()) {
15368 bailoutFrom(&bail, ins->snapshot());
15372 void CodeGenerator::visitLoadDynamicSlotUnboxAndAtomize(
15373 LLoadDynamicSlotUnboxAndAtomize* ins) {
15374 const MLoadDynamicSlotAndUnbox* mir = ins->mir();
15375 MOZ_ASSERT(mir->type() == MIRType::String);
15376 Register input = ToRegister(ins->slots());
15377 AnyRegister result = ToAnyRegister(ins->output());
15378 size_t slot = mir->slot();
15380 Address slotAddr(input, slot * sizeof(JS::Value));
15382 Label bail;
15383 EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
15384 &bail);
15385 emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
15386 TypedOrValueRegister(MIRType::String, result));
15388 if (mir->fallible()) {
15389 bailoutFrom(&bail, ins->snapshot());
15393 void CodeGenerator::visitAddAndStoreSlot(LAddAndStoreSlot* ins) {
15394 const Register obj = ToRegister(ins->getOperand(0));
15395 const ValueOperand value = ToValue(ins, LAddAndStoreSlot::ValueIndex);
15396 const Register maybeTemp = ToTempRegisterOrInvalid(ins->temp0());
15398 Shape* shape = ins->mir()->shape();
15399 masm.storeObjShape(shape, obj, [](MacroAssembler& masm, const Address& addr) {
15400 EmitPreBarrier(masm, addr, MIRType::Shape);
15403 // Perform the store. No pre-barrier required since this is a new
15404 // initialization.
15406 uint32_t offset = ins->mir()->slotOffset();
15407 if (ins->mir()->kind() == MAddAndStoreSlot::Kind::FixedSlot) {
15408 Address slot(obj, offset);
15409 masm.storeValue(value, slot);
15410 } else {
15411 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), maybeTemp);
15412 Address slot(maybeTemp, offset);
15413 masm.storeValue(value, slot);
15417 void CodeGenerator::visitAllocateAndStoreSlot(LAllocateAndStoreSlot* ins) {
15418 const Register obj = ToRegister(ins->getOperand(0));
15419 const ValueOperand value = ToValue(ins, LAllocateAndStoreSlot::ValueIndex);
15420 const Register temp0 = ToRegister(ins->temp0());
15421 const Register temp1 = ToRegister(ins->temp1());
15423 masm.Push(obj);
15424 masm.Push(value);
15426 using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
15427 masm.setupAlignedABICall();
15428 masm.loadJSContext(temp0);
15429 masm.passABIArg(temp0);
15430 masm.passABIArg(obj);
15431 masm.move32(Imm32(ins->mir()->numNewSlots()), temp1);
15432 masm.passABIArg(temp1);
15433 masm.callWithABI<Fn, NativeObject::growSlotsPure>();
15434 masm.storeCallPointerResult(temp0);
15436 masm.Pop(value);
15437 masm.Pop(obj);
15439 bailoutIfFalseBool(temp0, ins->snapshot());
15441 masm.storeObjShape(ins->mir()->shape(), obj,
15442 [](MacroAssembler& masm, const Address& addr) {
15443 EmitPreBarrier(masm, addr, MIRType::Shape);
15446 // Perform the store. No pre-barrier required since this is a new
15447 // initialization.
15448 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), temp0);
15449 Address slot(temp0, ins->mir()->slotOffset());
15450 masm.storeValue(value, slot);
15453 void CodeGenerator::visitAddSlotAndCallAddPropHook(
15454 LAddSlotAndCallAddPropHook* ins) {
15455 const Register obj = ToRegister(ins->object());
15456 const ValueOperand value =
15457 ToValue(ins, LAddSlotAndCallAddPropHook::ValueIndex);
15459 pushArg(ImmGCPtr(ins->mir()->shape()));
15460 pushArg(value);
15461 pushArg(obj);
15463 using Fn =
15464 bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, Handle<Shape*>);
15465 callVM<Fn, AddSlotAndCallAddPropHook>(ins);
15468 void CodeGenerator::visitStoreFixedSlotV(LStoreFixedSlotV* ins) {
15469 const Register obj = ToRegister(ins->getOperand(0));
15470 size_t slot = ins->mir()->slot();
15472 const ValueOperand value = ToValue(ins, LStoreFixedSlotV::ValueIndex);
15474 Address address(obj, NativeObject::getFixedSlotOffset(slot));
15475 if (ins->mir()->needsBarrier()) {
15476 emitPreBarrier(address);
15479 masm.storeValue(value, address);
15482 void CodeGenerator::visitStoreFixedSlotT(LStoreFixedSlotT* ins) {
15483 const Register obj = ToRegister(ins->getOperand(0));
15484 size_t slot = ins->mir()->slot();
15486 const LAllocation* value = ins->value();
15487 MIRType valueType = ins->mir()->value()->type();
15489 Address address(obj, NativeObject::getFixedSlotOffset(slot));
15490 if (ins->mir()->needsBarrier()) {
15491 emitPreBarrier(address);
15494 ConstantOrRegister nvalue =
15495 value->isConstant()
15496 ? ConstantOrRegister(value->toConstant()->toJSValue())
15497 : TypedOrValueRegister(valueType, ToAnyRegister(value));
15498 masm.storeConstantOrRegister(nvalue, address);
15501 void CodeGenerator::visitGetNameCache(LGetNameCache* ins) {
15502 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15503 Register envChain = ToRegister(ins->envObj());
15504 ValueOperand output = ToOutValue(ins);
15505 Register temp = ToRegister(ins->temp0());
15507 IonGetNameIC ic(liveRegs, envChain, output, temp);
15508 addIC(ins, allocateIC(ic));
15511 void CodeGenerator::addGetPropertyCache(LInstruction* ins,
15512 LiveRegisterSet liveRegs,
15513 TypedOrValueRegister value,
15514 const ConstantOrRegister& id,
15515 ValueOperand output) {
15516 CacheKind kind = CacheKind::GetElem;
15517 if (id.constant() && id.value().isString()) {
15518 JSString* idString = id.value().toString();
15519 if (idString->isAtom() && !idString->asAtom().isIndex()) {
15520 kind = CacheKind::GetProp;
15523 IonGetPropertyIC cache(kind, liveRegs, value, id, output);
15524 addIC(ins, allocateIC(cache));
15527 void CodeGenerator::addSetPropertyCache(LInstruction* ins,
15528 LiveRegisterSet liveRegs,
15529 Register objReg, Register temp,
15530 const ConstantOrRegister& id,
15531 const ConstantOrRegister& value,
15532 bool strict) {
15533 CacheKind kind = CacheKind::SetElem;
15534 if (id.constant() && id.value().isString()) {
15535 JSString* idString = id.value().toString();
15536 if (idString->isAtom() && !idString->asAtom().isIndex()) {
15537 kind = CacheKind::SetProp;
15540 IonSetPropertyIC cache(kind, liveRegs, objReg, temp, id, value, strict);
15541 addIC(ins, allocateIC(cache));
15544 ConstantOrRegister CodeGenerator::toConstantOrRegister(LInstruction* lir,
15545 size_t n, MIRType type) {
15546 if (type == MIRType::Value) {
15547 return TypedOrValueRegister(ToValue(lir, n));
15550 const LAllocation* value = lir->getOperand(n);
15551 if (value->isConstant()) {
15552 return ConstantOrRegister(value->toConstant()->toJSValue());
15555 return TypedOrValueRegister(type, ToAnyRegister(value));
15558 void CodeGenerator::visitGetPropertyCache(LGetPropertyCache* ins) {
15559 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15560 TypedOrValueRegister value =
15561 toConstantOrRegister(ins, LGetPropertyCache::ValueIndex,
15562 ins->mir()->value()->type())
15563 .reg();
15564 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropertyCache::IdIndex,
15565 ins->mir()->idval()->type());
15566 ValueOperand output = ToOutValue(ins);
15567 addGetPropertyCache(ins, liveRegs, value, id, output);
15570 void CodeGenerator::visitGetPropSuperCache(LGetPropSuperCache* ins) {
15571 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15572 Register obj = ToRegister(ins->obj());
15573 TypedOrValueRegister receiver =
15574 toConstantOrRegister(ins, LGetPropSuperCache::ReceiverIndex,
15575 ins->mir()->receiver()->type())
15576 .reg();
15577 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropSuperCache::IdIndex,
15578 ins->mir()->idval()->type());
15579 ValueOperand output = ToOutValue(ins);
15581 CacheKind kind = CacheKind::GetElemSuper;
15582 if (id.constant() && id.value().isString()) {
15583 JSString* idString = id.value().toString();
15584 if (idString->isAtom() && !idString->asAtom().isIndex()) {
15585 kind = CacheKind::GetPropSuper;
15589 IonGetPropSuperIC cache(kind, liveRegs, obj, receiver, id, output);
15590 addIC(ins, allocateIC(cache));
15593 void CodeGenerator::visitBindNameCache(LBindNameCache* ins) {
15594 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15595 Register envChain = ToRegister(ins->environmentChain());
15596 Register output = ToRegister(ins->output());
15597 Register temp = ToRegister(ins->temp0());
15599 IonBindNameIC ic(liveRegs, envChain, output, temp);
15600 addIC(ins, allocateIC(ic));
15603 void CodeGenerator::visitHasOwnCache(LHasOwnCache* ins) {
15604 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15605 TypedOrValueRegister value =
15606 toConstantOrRegister(ins, LHasOwnCache::ValueIndex,
15607 ins->mir()->value()->type())
15608 .reg();
15609 TypedOrValueRegister id = toConstantOrRegister(ins, LHasOwnCache::IdIndex,
15610 ins->mir()->idval()->type())
15611 .reg();
15612 Register output = ToRegister(ins->output());
15614 IonHasOwnIC cache(liveRegs, value, id, output);
15615 addIC(ins, allocateIC(cache));
15618 void CodeGenerator::visitCheckPrivateFieldCache(LCheckPrivateFieldCache* ins) {
15619 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15620 TypedOrValueRegister value =
15621 toConstantOrRegister(ins, LCheckPrivateFieldCache::ValueIndex,
15622 ins->mir()->value()->type())
15623 .reg();
15624 TypedOrValueRegister id =
15625 toConstantOrRegister(ins, LCheckPrivateFieldCache::IdIndex,
15626 ins->mir()->idval()->type())
15627 .reg();
15628 Register output = ToRegister(ins->output());
15630 IonCheckPrivateFieldIC cache(liveRegs, value, id, output);
15631 addIC(ins, allocateIC(cache));
15634 void CodeGenerator::visitNewPrivateName(LNewPrivateName* ins) {
15635 pushArg(ImmGCPtr(ins->mir()->name()));
15637 using Fn = JS::Symbol* (*)(JSContext*, Handle<JSAtom*>);
15638 callVM<Fn, NewPrivateName>(ins);
15641 void CodeGenerator::visitCallDeleteProperty(LCallDeleteProperty* lir) {
15642 pushArg(ImmGCPtr(lir->mir()->name()));
15643 pushArg(ToValue(lir, LCallDeleteProperty::ValueIndex));
15645 using Fn = bool (*)(JSContext*, HandleValue, Handle<PropertyName*>, bool*);
15646 if (lir->mir()->strict()) {
15647 callVM<Fn, DelPropOperation<true>>(lir);
15648 } else {
15649 callVM<Fn, DelPropOperation<false>>(lir);
15653 void CodeGenerator::visitCallDeleteElement(LCallDeleteElement* lir) {
15654 pushArg(ToValue(lir, LCallDeleteElement::IndexIndex));
15655 pushArg(ToValue(lir, LCallDeleteElement::ValueIndex));
15657 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
15658 if (lir->mir()->strict()) {
15659 callVM<Fn, DelElemOperation<true>>(lir);
15660 } else {
15661 callVM<Fn, DelElemOperation<false>>(lir);
15665 void CodeGenerator::visitObjectToIterator(LObjectToIterator* lir) {
15666 Register obj = ToRegister(lir->object());
15667 Register iterObj = ToRegister(lir->output());
15668 Register temp = ToRegister(lir->temp0());
15669 Register temp2 = ToRegister(lir->temp1());
15670 Register temp3 = ToRegister(lir->temp2());
15672 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
15673 OutOfLineCode* ool = (lir->mir()->wantsIndices())
15674 ? oolCallVM<Fn, GetIteratorWithIndices>(
15675 lir, ArgList(obj), StoreRegisterTo(iterObj))
15676 : oolCallVM<Fn, GetIterator>(
15677 lir, ArgList(obj), StoreRegisterTo(iterObj));
15679 masm.maybeLoadIteratorFromShape(obj, iterObj, temp, temp2, temp3,
15680 ool->entry());
15682 Register nativeIter = temp;
15683 masm.loadPrivate(
15684 Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
15685 nativeIter);
15687 if (lir->mir()->wantsIndices()) {
15688 // At least one consumer of the output of this iterator has been optimized
15689 // to use iterator indices. If the cached iterator doesn't include indices,
15690 // but it was marked to indicate that we can create them if needed, then we
15691 // do a VM call to replace the cached iterator with a fresh iterator
15692 // including indices.
15693 masm.branchNativeIteratorIndices(Assembler::Equal, nativeIter, temp2,
15694 NativeIteratorIndices::AvailableOnRequest,
15695 ool->entry());
15698 Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlagsAndCount());
15699 masm.storePtr(
15700 obj, Address(nativeIter, NativeIterator::offsetOfObjectBeingIterated()));
15701 masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
15703 Register enumeratorsAddr = temp2;
15704 masm.movePtr(ImmPtr(lir->mir()->enumeratorsAddr()), enumeratorsAddr);
15705 masm.registerIterator(enumeratorsAddr, nativeIter, temp3);
15707 // Generate post-write barrier for storing to |iterObj->objectBeingIterated_|.
15708 // We already know that |iterObj| is tenured, so we only have to check |obj|.
15709 Label skipBarrier;
15710 masm.branchPtrInNurseryChunk(Assembler::NotEqual, obj, temp2, &skipBarrier);
15712 LiveRegisterSet save = liveVolatileRegs(lir);
15713 save.takeUnchecked(temp);
15714 save.takeUnchecked(temp2);
15715 save.takeUnchecked(temp3);
15716 if (iterObj.volatile_()) {
15717 save.addUnchecked(iterObj);
15720 masm.PushRegsInMask(save);
15721 emitPostWriteBarrier(iterObj);
15722 masm.PopRegsInMask(save);
15724 masm.bind(&skipBarrier);
15726 masm.bind(ool->rejoin());
15729 void CodeGenerator::visitValueToIterator(LValueToIterator* lir) {
15730 pushArg(ToValue(lir, LValueToIterator::ValueIndex));
15732 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleValue);
15733 callVM<Fn, ValueToIterator>(lir);
15736 void CodeGenerator::visitIteratorHasIndicesAndBranch(
15737 LIteratorHasIndicesAndBranch* lir) {
15738 Register iterator = ToRegister(lir->iterator());
15739 Register object = ToRegister(lir->object());
15740 Register temp = ToRegister(lir->temp());
15741 Register temp2 = ToRegister(lir->temp2());
15742 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
15743 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
15745 // Check that the iterator has indices available.
15746 Address nativeIterAddr(iterator,
15747 PropertyIteratorObject::offsetOfIteratorSlot());
15748 masm.loadPrivate(nativeIterAddr, temp);
15749 masm.branchNativeIteratorIndices(Assembler::NotEqual, temp, temp2,
15750 NativeIteratorIndices::Valid, ifFalse);
15752 // Guard that the first shape stored in the iterator matches the current
15753 // shape of the iterated object.
15754 Address firstShapeAddr(temp, NativeIterator::offsetOfFirstShape());
15755 masm.loadPtr(firstShapeAddr, temp);
15756 masm.branchTestObjShape(Assembler::NotEqual, object, temp, temp2, object,
15757 ifFalse);
15759 if (!isNextBlock(lir->ifTrue()->lir())) {
15760 masm.jump(ifTrue);
15764 void CodeGenerator::visitLoadSlotByIteratorIndex(
15765 LLoadSlotByIteratorIndex* lir) {
15766 Register object = ToRegister(lir->object());
15767 Register iterator = ToRegister(lir->iterator());
15768 Register temp = ToRegister(lir->temp0());
15769 Register temp2 = ToRegister(lir->temp1());
15770 ValueOperand result = ToOutValue(lir);
15772 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
15774 Label notDynamicSlot, notFixedSlot, done;
15775 masm.branch32(Assembler::NotEqual, temp2,
15776 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
15777 &notDynamicSlot);
15778 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
15779 masm.loadValue(BaseValueIndex(temp2, temp), result);
15780 masm.jump(&done);
15782 masm.bind(&notDynamicSlot);
15783 masm.branch32(Assembler::NotEqual, temp2,
15784 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
15785 // Fixed slot
15786 masm.loadValue(BaseValueIndex(object, temp, sizeof(NativeObject)), result);
15787 masm.jump(&done);
15788 masm.bind(&notFixedSlot);
15790 #ifdef DEBUG
15791 Label kindOkay;
15792 masm.branch32(Assembler::Equal, temp2,
15793 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
15794 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
15795 masm.bind(&kindOkay);
15796 #endif
15798 // Dense element
15799 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
15800 Label indexOkay;
15801 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
15802 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
15803 masm.assumeUnreachable("Dense element out of bounds");
15804 masm.bind(&indexOkay);
15806 masm.loadValue(BaseObjectElementIndex(temp2, temp), result);
15807 masm.bind(&done);
15810 void CodeGenerator::visitStoreSlotByIteratorIndex(
15811 LStoreSlotByIteratorIndex* lir) {
15812 Register object = ToRegister(lir->object());
15813 Register iterator = ToRegister(lir->iterator());
15814 ValueOperand value = ToValue(lir, LStoreSlotByIteratorIndex::ValueIndex);
15815 Register temp = ToRegister(lir->temp0());
15816 Register temp2 = ToRegister(lir->temp1());
15818 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
15820 Label notDynamicSlot, notFixedSlot, done, doStore;
15821 masm.branch32(Assembler::NotEqual, temp2,
15822 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
15823 &notDynamicSlot);
15824 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
15825 masm.computeEffectiveAddress(BaseValueIndex(temp2, temp), temp);
15826 masm.jump(&doStore);
15828 masm.bind(&notDynamicSlot);
15829 masm.branch32(Assembler::NotEqual, temp2,
15830 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
15831 // Fixed slot
15832 masm.computeEffectiveAddress(
15833 BaseValueIndex(object, temp, sizeof(NativeObject)), temp);
15834 masm.jump(&doStore);
15835 masm.bind(&notFixedSlot);
15837 #ifdef DEBUG
15838 Label kindOkay;
15839 masm.branch32(Assembler::Equal, temp2,
15840 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
15841 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
15842 masm.bind(&kindOkay);
15843 #endif
15845 // Dense element
15846 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
15847 Label indexOkay;
15848 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
15849 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
15850 masm.assumeUnreachable("Dense element out of bounds");
15851 masm.bind(&indexOkay);
15853 BaseObjectElementIndex elementAddress(temp2, temp);
15854 masm.computeEffectiveAddress(elementAddress, temp);
15856 masm.bind(&doStore);
15857 Address storeAddress(temp, 0);
15858 emitPreBarrier(storeAddress);
15859 masm.storeValue(value, storeAddress);
15861 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp2, &done);
15862 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp2, &done);
15864 saveVolatile(temp2);
15865 emitPostWriteBarrier(object);
15866 restoreVolatile(temp2);
15868 masm.bind(&done);
15871 void CodeGenerator::visitSetPropertyCache(LSetPropertyCache* ins) {
15872 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15873 Register objReg = ToRegister(ins->object());
15874 Register temp = ToRegister(ins->temp0());
15876 ConstantOrRegister id = toConstantOrRegister(ins, LSetPropertyCache::IdIndex,
15877 ins->mir()->idval()->type());
15878 ConstantOrRegister value = toConstantOrRegister(
15879 ins, LSetPropertyCache::ValueIndex, ins->mir()->value()->type());
15881 addSetPropertyCache(ins, liveRegs, objReg, temp, id, value,
15882 ins->mir()->strict());
15885 void CodeGenerator::visitThrow(LThrow* lir) {
15886 pushArg(ToValue(lir, LThrow::ValueIndex));
15888 using Fn = bool (*)(JSContext*, HandleValue);
15889 callVM<Fn, js::ThrowOperation>(lir);
15892 class OutOfLineTypeOfV : public OutOfLineCodeBase<CodeGenerator> {
15893 LTypeOfV* ins_;
15895 public:
15896 explicit OutOfLineTypeOfV(LTypeOfV* ins) : ins_(ins) {}
15898 void accept(CodeGenerator* codegen) override {
15899 codegen->visitOutOfLineTypeOfV(this);
15901 LTypeOfV* ins() const { return ins_; }
15904 void CodeGenerator::emitTypeOfJSType(JSValueType type, Register output) {
15905 switch (type) {
15906 case JSVAL_TYPE_OBJECT:
15907 masm.move32(Imm32(JSTYPE_OBJECT), output);
15908 break;
15909 case JSVAL_TYPE_DOUBLE:
15910 case JSVAL_TYPE_INT32:
15911 masm.move32(Imm32(JSTYPE_NUMBER), output);
15912 break;
15913 case JSVAL_TYPE_BOOLEAN:
15914 masm.move32(Imm32(JSTYPE_BOOLEAN), output);
15915 break;
15916 case JSVAL_TYPE_UNDEFINED:
15917 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
15918 break;
15919 case JSVAL_TYPE_NULL:
15920 masm.move32(Imm32(JSTYPE_OBJECT), output);
15921 break;
15922 case JSVAL_TYPE_STRING:
15923 masm.move32(Imm32(JSTYPE_STRING), output);
15924 break;
15925 case JSVAL_TYPE_SYMBOL:
15926 masm.move32(Imm32(JSTYPE_SYMBOL), output);
15927 break;
15928 case JSVAL_TYPE_BIGINT:
15929 masm.move32(Imm32(JSTYPE_BIGINT), output);
15930 break;
15931 default:
15932 MOZ_CRASH("Unsupported JSValueType");
15936 void CodeGenerator::emitTypeOfCheck(JSValueType type, Register tag,
15937 Register output, Label* done,
15938 Label* oolObject) {
15939 Label notMatch;
15940 switch (type) {
15941 case JSVAL_TYPE_OBJECT:
15942 // The input may be a callable object (result is "function") or
15943 // may emulate undefined (result is "undefined"). Use an OOL path.
15944 masm.branchTestObject(Assembler::Equal, tag, oolObject);
15945 return;
15946 case JSVAL_TYPE_DOUBLE:
15947 case JSVAL_TYPE_INT32:
15948 masm.branchTestNumber(Assembler::NotEqual, tag, &notMatch);
15949 break;
15950 default:
15951 masm.branchTestType(Assembler::NotEqual, tag, type, &notMatch);
15952 break;
15955 emitTypeOfJSType(type, output);
15956 masm.jump(done);
15957 masm.bind(&notMatch);
15960 void CodeGenerator::visitTypeOfV(LTypeOfV* lir) {
15961 const ValueOperand value = ToValue(lir, LTypeOfV::InputIndex);
15962 Register output = ToRegister(lir->output());
15963 Register tag = masm.extractTag(value, output);
15965 Label done;
15967 auto* ool = new (alloc()) OutOfLineTypeOfV(lir);
15968 addOutOfLineCode(ool, lir->mir());
15970 const std::initializer_list<JSValueType> defaultOrder = {
15971 JSVAL_TYPE_OBJECT, JSVAL_TYPE_DOUBLE, JSVAL_TYPE_UNDEFINED,
15972 JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN, JSVAL_TYPE_STRING,
15973 JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
15975 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
15977 // Generate checks for previously observed types first.
15978 // The TypeDataList is sorted by descending frequency.
15979 for (auto& observed : lir->mir()->observedTypes()) {
15980 JSValueType type = observed.type();
15982 // Unify number types.
15983 if (type == JSVAL_TYPE_INT32) {
15984 type = JSVAL_TYPE_DOUBLE;
15987 remaining -= type;
15989 emitTypeOfCheck(type, tag, output, &done, ool->entry());
15992 // Generate checks for remaining types.
15993 for (auto type : defaultOrder) {
15994 if (!remaining.contains(type)) {
15995 continue;
15997 remaining -= type;
15999 if (remaining.isEmpty() && type != JSVAL_TYPE_OBJECT) {
16000 // We can skip the check for the last remaining type, unless the type is
16001 // JSVAL_TYPE_OBJECT, which may have to go through the OOL path.
16002 #ifdef DEBUG
16003 emitTypeOfCheck(type, tag, output, &done, ool->entry());
16004 masm.assumeUnreachable("Unexpected Value type in visitTypeOfV");
16005 #else
16006 emitTypeOfJSType(type, output);
16007 #endif
16008 } else {
16009 emitTypeOfCheck(type, tag, output, &done, ool->entry());
16012 MOZ_ASSERT(remaining.isEmpty());
16014 masm.bind(&done);
16015 masm.bind(ool->rejoin());
16018 void CodeGenerator::emitTypeOfObject(Register obj, Register output,
16019 Label* done) {
16020 Label slowCheck, isObject, isCallable, isUndefined;
16021 masm.typeOfObject(obj, output, &slowCheck, &isObject, &isCallable,
16022 &isUndefined);
16024 masm.bind(&isCallable);
16025 masm.move32(Imm32(JSTYPE_FUNCTION), output);
16026 masm.jump(done);
16028 masm.bind(&isUndefined);
16029 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
16030 masm.jump(done);
16032 masm.bind(&isObject);
16033 masm.move32(Imm32(JSTYPE_OBJECT), output);
16034 masm.jump(done);
16036 masm.bind(&slowCheck);
16038 saveVolatile(output);
16039 using Fn = JSType (*)(JSObject*);
16040 masm.setupAlignedABICall();
16041 masm.passABIArg(obj);
16042 masm.callWithABI<Fn, js::TypeOfObject>();
16043 masm.storeCallInt32Result(output);
16044 restoreVolatile(output);
16047 void CodeGenerator::visitOutOfLineTypeOfV(OutOfLineTypeOfV* ool) {
16048 LTypeOfV* ins = ool->ins();
16050 ValueOperand input = ToValue(ins, LTypeOfV::InputIndex);
16051 Register temp = ToTempUnboxRegister(ins->temp0());
16052 Register output = ToRegister(ins->output());
16054 Register obj = masm.extractObject(input, temp);
16055 emitTypeOfObject(obj, output, ool->rejoin());
16056 masm.jump(ool->rejoin());
16059 void CodeGenerator::visitTypeOfO(LTypeOfO* lir) {
16060 Register obj = ToRegister(lir->object());
16061 Register output = ToRegister(lir->output());
16063 Label done;
16064 emitTypeOfObject(obj, output, &done);
16065 masm.bind(&done);
16068 void CodeGenerator::visitTypeOfName(LTypeOfName* lir) {
16069 Register input = ToRegister(lir->input());
16070 Register output = ToRegister(lir->output());
16072 #ifdef DEBUG
16073 Label ok;
16074 masm.branch32(Assembler::Below, input, Imm32(JSTYPE_LIMIT), &ok);
16075 masm.assumeUnreachable("bad JSType");
16076 masm.bind(&ok);
16077 #endif
16079 static_assert(JSTYPE_UNDEFINED == 0);
16081 masm.movePtr(ImmPtr(&gen->runtime->names().undefined), output);
16082 masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
16085 class OutOfLineTypeOfIsNonPrimitiveV : public OutOfLineCodeBase<CodeGenerator> {
16086 LTypeOfIsNonPrimitiveV* ins_;
16088 public:
16089 explicit OutOfLineTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* ins)
16090 : ins_(ins) {}
16092 void accept(CodeGenerator* codegen) override {
16093 codegen->visitOutOfLineTypeOfIsNonPrimitiveV(this);
16095 auto* ins() const { return ins_; }
16098 class OutOfLineTypeOfIsNonPrimitiveO : public OutOfLineCodeBase<CodeGenerator> {
16099 LTypeOfIsNonPrimitiveO* ins_;
16101 public:
16102 explicit OutOfLineTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* ins)
16103 : ins_(ins) {}
16105 void accept(CodeGenerator* codegen) override {
16106 codegen->visitOutOfLineTypeOfIsNonPrimitiveO(this);
16108 auto* ins() const { return ins_; }
16111 void CodeGenerator::emitTypeOfIsObjectOOL(MTypeOfIs* mir, Register obj,
16112 Register output) {
16113 saveVolatile(output);
16114 using Fn = JSType (*)(JSObject*);
16115 masm.setupAlignedABICall();
16116 masm.passABIArg(obj);
16117 masm.callWithABI<Fn, js::TypeOfObject>();
16118 masm.storeCallInt32Result(output);
16119 restoreVolatile(output);
16121 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
16122 masm.cmp32Set(cond, output, Imm32(mir->jstype()), output);
16125 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveV(
16126 OutOfLineTypeOfIsNonPrimitiveV* ool) {
16127 auto* ins = ool->ins();
16128 ValueOperand input = ToValue(ins, LTypeOfIsNonPrimitiveV::InputIndex);
16129 Register output = ToRegister(ins->output());
16130 Register temp = ToTempUnboxRegister(ins->temp0());
16132 Register obj = masm.extractObject(input, temp);
16134 emitTypeOfIsObjectOOL(ins->mir(), obj, output);
16136 masm.jump(ool->rejoin());
16139 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveO(
16140 OutOfLineTypeOfIsNonPrimitiveO* ool) {
16141 auto* ins = ool->ins();
16142 Register input = ToRegister(ins->input());
16143 Register output = ToRegister(ins->output());
16145 emitTypeOfIsObjectOOL(ins->mir(), input, output);
16147 masm.jump(ool->rejoin());
16150 void CodeGenerator::emitTypeOfIsObject(MTypeOfIs* mir, Register obj,
16151 Register output, Label* success,
16152 Label* fail, Label* slowCheck) {
16153 Label* isObject = fail;
16154 Label* isFunction = fail;
16155 Label* isUndefined = fail;
16157 switch (mir->jstype()) {
16158 case JSTYPE_UNDEFINED:
16159 isUndefined = success;
16160 break;
16162 case JSTYPE_OBJECT:
16163 isObject = success;
16164 break;
16166 case JSTYPE_FUNCTION:
16167 isFunction = success;
16168 break;
16170 case JSTYPE_STRING:
16171 case JSTYPE_NUMBER:
16172 case JSTYPE_BOOLEAN:
16173 case JSTYPE_SYMBOL:
16174 case JSTYPE_BIGINT:
16175 #ifdef ENABLE_RECORD_TUPLE
16176 case JSTYPE_RECORD:
16177 case JSTYPE_TUPLE:
16178 #endif
16179 case JSTYPE_LIMIT:
16180 MOZ_CRASH("Primitive type");
16183 masm.typeOfObject(obj, output, slowCheck, isObject, isFunction, isUndefined);
16185 auto op = mir->jsop();
16187 Label done;
16188 masm.bind(fail);
16189 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
16190 masm.jump(&done);
16191 masm.bind(success);
16192 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
16193 masm.bind(&done);
16196 void CodeGenerator::visitTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* lir) {
16197 ValueOperand input = ToValue(lir, LTypeOfIsNonPrimitiveV::InputIndex);
16198 Register output = ToRegister(lir->output());
16199 Register temp = ToTempUnboxRegister(lir->temp0());
16201 auto* mir = lir->mir();
16203 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveV(lir);
16204 addOutOfLineCode(ool, mir);
16206 Label success, fail;
16208 switch (mir->jstype()) {
16209 case JSTYPE_UNDEFINED: {
16210 ScratchTagScope tag(masm, input);
16211 masm.splitTagForTest(input, tag);
16213 masm.branchTestUndefined(Assembler::Equal, tag, &success);
16214 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
16215 break;
16218 case JSTYPE_OBJECT: {
16219 ScratchTagScope tag(masm, input);
16220 masm.splitTagForTest(input, tag);
16222 masm.branchTestNull(Assembler::Equal, tag, &success);
16223 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
16224 break;
16227 case JSTYPE_FUNCTION: {
16228 masm.branchTestObject(Assembler::NotEqual, input, &fail);
16229 break;
16232 case JSTYPE_STRING:
16233 case JSTYPE_NUMBER:
16234 case JSTYPE_BOOLEAN:
16235 case JSTYPE_SYMBOL:
16236 case JSTYPE_BIGINT:
16237 #ifdef ENABLE_RECORD_TUPLE
16238 case JSTYPE_RECORD:
16239 case JSTYPE_TUPLE:
16240 #endif
16241 case JSTYPE_LIMIT:
16242 MOZ_CRASH("Primitive type");
16245 Register obj = masm.extractObject(input, temp);
16247 emitTypeOfIsObject(mir, obj, output, &success, &fail, ool->entry());
16249 masm.bind(ool->rejoin());
16252 void CodeGenerator::visitTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* lir) {
16253 Register input = ToRegister(lir->input());
16254 Register output = ToRegister(lir->output());
16256 auto* mir = lir->mir();
16258 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveO(lir);
16259 addOutOfLineCode(ool, mir);
16261 Label success, fail;
16262 emitTypeOfIsObject(mir, input, output, &success, &fail, ool->entry());
16264 masm.bind(ool->rejoin());
16267 void CodeGenerator::visitTypeOfIsPrimitive(LTypeOfIsPrimitive* lir) {
16268 ValueOperand input = ToValue(lir, LTypeOfIsPrimitive::InputIndex);
16269 Register output = ToRegister(lir->output());
16271 auto* mir = lir->mir();
16272 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
16274 switch (mir->jstype()) {
16275 case JSTYPE_STRING:
16276 masm.testStringSet(cond, input, output);
16277 break;
16278 case JSTYPE_NUMBER:
16279 masm.testNumberSet(cond, input, output);
16280 break;
16281 case JSTYPE_BOOLEAN:
16282 masm.testBooleanSet(cond, input, output);
16283 break;
16284 case JSTYPE_SYMBOL:
16285 masm.testSymbolSet(cond, input, output);
16286 break;
16287 case JSTYPE_BIGINT:
16288 masm.testBigIntSet(cond, input, output);
16289 break;
16291 case JSTYPE_UNDEFINED:
16292 case JSTYPE_OBJECT:
16293 case JSTYPE_FUNCTION:
16294 #ifdef ENABLE_RECORD_TUPLE
16295 case JSTYPE_RECORD:
16296 case JSTYPE_TUPLE:
16297 #endif
16298 case JSTYPE_LIMIT:
16299 MOZ_CRASH("Non-primitive type");
16303 void CodeGenerator::visitToAsyncIter(LToAsyncIter* lir) {
16304 pushArg(ToValue(lir, LToAsyncIter::NextMethodIndex));
16305 pushArg(ToRegister(lir->iterator()));
16307 using Fn = JSObject* (*)(JSContext*, HandleObject, HandleValue);
16308 callVM<Fn, js::CreateAsyncFromSyncIterator>(lir);
16311 void CodeGenerator::visitToPropertyKeyCache(LToPropertyKeyCache* lir) {
16312 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
16313 ValueOperand input = ToValue(lir, LToPropertyKeyCache::InputIndex);
16314 ValueOperand output = ToOutValue(lir);
16316 IonToPropertyKeyIC ic(liveRegs, input, output);
16317 addIC(lir, allocateIC(ic));
16320 void CodeGenerator::visitLoadElementV(LLoadElementV* load) {
16321 Register elements = ToRegister(load->elements());
16322 const ValueOperand out = ToOutValue(load);
16324 if (load->index()->isConstant()) {
16325 NativeObject::elementsSizeMustNotOverflow();
16326 int32_t offset = ToInt32(load->index()) * sizeof(Value);
16327 masm.loadValue(Address(elements, offset), out);
16328 } else {
16329 masm.loadValue(BaseObjectElementIndex(elements, ToRegister(load->index())),
16330 out);
16333 Label testMagic;
16334 masm.branchTestMagic(Assembler::Equal, out, &testMagic);
16335 bailoutFrom(&testMagic, load->snapshot());
16338 void CodeGenerator::visitLoadElementHole(LLoadElementHole* lir) {
16339 Register elements = ToRegister(lir->elements());
16340 Register index = ToRegister(lir->index());
16341 Register initLength = ToRegister(lir->initLength());
16342 const ValueOperand out = ToOutValue(lir);
16344 const MLoadElementHole* mir = lir->mir();
16346 // If the index is out of bounds, load |undefined|. Otherwise, load the
16347 // value.
16348 Label outOfBounds, done;
16349 masm.spectreBoundsCheck32(index, initLength, out.scratchReg(), &outOfBounds);
16351 masm.loadValue(BaseObjectElementIndex(elements, index), out);
16353 // If the value wasn't a hole, we're done. Otherwise, we'll load undefined.
16354 masm.branchTestMagic(Assembler::NotEqual, out, &done);
16356 if (mir->needsNegativeIntCheck()) {
16357 Label loadUndefined;
16358 masm.jump(&loadUndefined);
16360 masm.bind(&outOfBounds);
16362 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
16364 masm.bind(&loadUndefined);
16365 } else {
16366 masm.bind(&outOfBounds);
16368 masm.moveValue(UndefinedValue(), out);
16370 masm.bind(&done);
16373 void CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir) {
16374 Register elements = ToRegister(lir->elements());
16375 Register temp = ToTempRegisterOrInvalid(lir->temp0());
16376 AnyRegister out = ToAnyRegister(lir->output());
16378 const MLoadUnboxedScalar* mir = lir->mir();
16380 Scalar::Type storageType = mir->storageType();
16382 Label fail;
16383 if (lir->index()->isConstant()) {
16384 Address source =
16385 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
16386 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
16387 } else {
16388 BaseIndex source(elements, ToRegister(lir->index()),
16389 ScaleFromScalarType(storageType), mir->offsetAdjustment());
16390 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
16393 if (fail.used()) {
16394 bailoutFrom(&fail, lir->snapshot());
16398 void CodeGenerator::visitLoadUnboxedBigInt(LLoadUnboxedBigInt* lir) {
16399 Register elements = ToRegister(lir->elements());
16400 Register temp = ToRegister(lir->temp());
16401 Register64 temp64 = ToRegister64(lir->temp64());
16402 Register out = ToRegister(lir->output());
16404 const MLoadUnboxedScalar* mir = lir->mir();
16406 Scalar::Type storageType = mir->storageType();
16408 if (lir->index()->isConstant()) {
16409 Address source =
16410 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
16411 masm.load64(source, temp64);
16412 } else {
16413 BaseIndex source(elements, ToRegister(lir->index()),
16414 ScaleFromScalarType(storageType), mir->offsetAdjustment());
16415 masm.load64(source, temp64);
16418 emitCreateBigInt(lir, storageType, temp64, out, temp);
16421 void CodeGenerator::visitLoadDataViewElement(LLoadDataViewElement* lir) {
16422 Register elements = ToRegister(lir->elements());
16423 const LAllocation* littleEndian = lir->littleEndian();
16424 Register temp = ToTempRegisterOrInvalid(lir->temp());
16425 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
16426 AnyRegister out = ToAnyRegister(lir->output());
16428 const MLoadDataViewElement* mir = lir->mir();
16429 Scalar::Type storageType = mir->storageType();
16431 BaseIndex source(elements, ToRegister(lir->index()), TimesOne);
16433 bool noSwap = littleEndian->isConstant() &&
16434 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
16436 // Directly load if no byte swap is needed and the platform supports unaligned
16437 // accesses for the access. (Such support is assumed for integer types.)
16438 if (noSwap && (!Scalar::isFloatingType(storageType) ||
16439 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
16440 if (!Scalar::isBigIntType(storageType)) {
16441 Label fail;
16442 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
16444 if (fail.used()) {
16445 bailoutFrom(&fail, lir->snapshot());
16447 } else {
16448 masm.load64(source, temp64);
16450 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
16452 return;
16455 // Load the value into a gpr register.
16456 switch (storageType) {
16457 case Scalar::Int16:
16458 masm.load16UnalignedSignExtend(source, out.gpr());
16459 break;
16460 case Scalar::Uint16:
16461 masm.load16UnalignedZeroExtend(source, out.gpr());
16462 break;
16463 case Scalar::Int32:
16464 masm.load32Unaligned(source, out.gpr());
16465 break;
16466 case Scalar::Uint32:
16467 masm.load32Unaligned(source, out.isFloat() ? temp : out.gpr());
16468 break;
16469 case Scalar::Float32:
16470 masm.load32Unaligned(source, temp);
16471 break;
16472 case Scalar::Float64:
16473 case Scalar::BigInt64:
16474 case Scalar::BigUint64:
16475 masm.load64Unaligned(source, temp64);
16476 break;
16477 case Scalar::Int8:
16478 case Scalar::Uint8:
16479 case Scalar::Uint8Clamped:
16480 default:
16481 MOZ_CRASH("Invalid typed array type");
16484 if (!noSwap) {
16485 // Swap the bytes in the loaded value.
16486 Label skip;
16487 if (!littleEndian->isConstant()) {
16488 masm.branch32(
16489 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
16490 ToRegister(littleEndian), Imm32(0), &skip);
16493 switch (storageType) {
16494 case Scalar::Int16:
16495 masm.byteSwap16SignExtend(out.gpr());
16496 break;
16497 case Scalar::Uint16:
16498 masm.byteSwap16ZeroExtend(out.gpr());
16499 break;
16500 case Scalar::Int32:
16501 masm.byteSwap32(out.gpr());
16502 break;
16503 case Scalar::Uint32:
16504 masm.byteSwap32(out.isFloat() ? temp : out.gpr());
16505 break;
16506 case Scalar::Float32:
16507 masm.byteSwap32(temp);
16508 break;
16509 case Scalar::Float64:
16510 case Scalar::BigInt64:
16511 case Scalar::BigUint64:
16512 masm.byteSwap64(temp64);
16513 break;
16514 case Scalar::Int8:
16515 case Scalar::Uint8:
16516 case Scalar::Uint8Clamped:
16517 default:
16518 MOZ_CRASH("Invalid typed array type");
16521 if (skip.used()) {
16522 masm.bind(&skip);
16526 // Move the value into the output register.
16527 switch (storageType) {
16528 case Scalar::Int16:
16529 case Scalar::Uint16:
16530 case Scalar::Int32:
16531 break;
16532 case Scalar::Uint32:
16533 if (out.isFloat()) {
16534 masm.convertUInt32ToDouble(temp, out.fpu());
16535 } else {
16536 // Bail out if the value doesn't fit into a signed int32 value. This
16537 // is what allows MLoadDataViewElement to have a type() of
16538 // MIRType::Int32 for UInt32 array loads.
16539 bailoutTest32(Assembler::Signed, out.gpr(), out.gpr(), lir->snapshot());
16541 break;
16542 case Scalar::Float32:
16543 masm.moveGPRToFloat32(temp, out.fpu());
16544 masm.canonicalizeFloat(out.fpu());
16545 break;
16546 case Scalar::Float64:
16547 masm.moveGPR64ToDouble(temp64, out.fpu());
16548 masm.canonicalizeDouble(out.fpu());
16549 break;
16550 case Scalar::BigInt64:
16551 case Scalar::BigUint64:
16552 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
16553 break;
16554 case Scalar::Int8:
16555 case Scalar::Uint8:
16556 case Scalar::Uint8Clamped:
16557 default:
16558 MOZ_CRASH("Invalid typed array type");
16562 void CodeGenerator::visitLoadTypedArrayElementHole(
16563 LLoadTypedArrayElementHole* lir) {
16564 Register object = ToRegister(lir->object());
16565 const ValueOperand out = ToOutValue(lir);
16567 // Load the length.
16568 Register scratch = out.scratchReg();
16569 Register scratch2 = ToRegister(lir->temp0());
16570 Register index = ToRegister(lir->index());
16571 masm.loadArrayBufferViewLengthIntPtr(object, scratch);
16573 // Load undefined if index >= length.
16574 Label outOfBounds, done;
16575 masm.spectreBoundsCheckPtr(index, scratch, scratch2, &outOfBounds);
16577 // Load the elements vector.
16578 masm.loadPtr(Address(object, ArrayBufferViewObject::dataOffset()), scratch);
16580 Scalar::Type arrayType = lir->mir()->arrayType();
16581 Label fail;
16582 BaseIndex source(scratch, index, ScaleFromScalarType(arrayType));
16583 MacroAssembler::Uint32Mode uint32Mode =
16584 lir->mir()->forceDouble() ? MacroAssembler::Uint32Mode::ForceDouble
16585 : MacroAssembler::Uint32Mode::FailOnDouble;
16586 masm.loadFromTypedArray(arrayType, source, out, uint32Mode, out.scratchReg(),
16587 &fail);
16588 masm.jump(&done);
16590 masm.bind(&outOfBounds);
16591 masm.moveValue(UndefinedValue(), out);
16593 if (fail.used()) {
16594 bailoutFrom(&fail, lir->snapshot());
16597 masm.bind(&done);
16600 void CodeGenerator::visitLoadTypedArrayElementHoleBigInt(
16601 LLoadTypedArrayElementHoleBigInt* lir) {
16602 Register object = ToRegister(lir->object());
16603 const ValueOperand out = ToOutValue(lir);
16605 // On x86 there are not enough registers. In that case reuse the output's
16606 // type register as temporary.
16607 #ifdef JS_CODEGEN_X86
16608 MOZ_ASSERT(lir->temp()->isBogusTemp());
16609 Register temp = out.typeReg();
16610 #else
16611 Register temp = ToRegister(lir->temp());
16612 #endif
16613 Register64 temp64 = ToRegister64(lir->temp64());
16615 // Load the length.
16616 Register scratch = out.scratchReg();
16617 Register index = ToRegister(lir->index());
16618 masm.loadArrayBufferViewLengthIntPtr(object, scratch);
16620 // Load undefined if index >= length.
16621 Label outOfBounds, done;
16622 masm.spectreBoundsCheckPtr(index, scratch, temp, &outOfBounds);
16624 // Load the elements vector.
16625 masm.loadPtr(Address(object, ArrayBufferViewObject::dataOffset()), scratch);
16627 Scalar::Type arrayType = lir->mir()->arrayType();
16628 BaseIndex source(scratch, index, ScaleFromScalarType(arrayType));
16629 masm.load64(source, temp64);
16631 Register bigInt = out.scratchReg();
16632 emitCreateBigInt(lir, arrayType, temp64, bigInt, temp);
16634 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, out);
16635 masm.jump(&done);
16637 masm.bind(&outOfBounds);
16638 masm.moveValue(UndefinedValue(), out);
16640 masm.bind(&done);
16643 template <SwitchTableType tableType>
16644 class OutOfLineSwitch : public OutOfLineCodeBase<CodeGenerator> {
16645 using LabelsVector = Vector<Label, 0, JitAllocPolicy>;
16646 using CodeLabelsVector = Vector<CodeLabel, 0, JitAllocPolicy>;
16647 LabelsVector labels_;
16648 CodeLabelsVector codeLabels_;
16649 CodeLabel start_;
16650 bool isOutOfLine_;
16652 void accept(CodeGenerator* codegen) override {
16653 codegen->visitOutOfLineSwitch(this);
16656 public:
16657 explicit OutOfLineSwitch(TempAllocator& alloc)
16658 : labels_(alloc), codeLabels_(alloc), isOutOfLine_(false) {}
16660 CodeLabel* start() { return &start_; }
16662 CodeLabelsVector& codeLabels() { return codeLabels_; }
16663 LabelsVector& labels() { return labels_; }
16665 void jumpToCodeEntries(MacroAssembler& masm, Register index, Register temp) {
16666 Register base;
16667 if (tableType == SwitchTableType::Inline) {
16668 #if defined(JS_CODEGEN_ARM)
16669 base = ::js::jit::pc;
16670 #else
16671 MOZ_CRASH("NYI: SwitchTableType::Inline");
16672 #endif
16673 } else {
16674 #if defined(JS_CODEGEN_ARM)
16675 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
16676 #else
16677 masm.mov(start(), temp);
16678 base = temp;
16679 #endif
16681 BaseIndex jumpTarget(base, index, ScalePointer);
16682 masm.branchToComputedAddress(jumpTarget);
16685 // Register an entry in the switch table.
16686 void addTableEntry(MacroAssembler& masm) {
16687 if ((!isOutOfLine_ && tableType == SwitchTableType::Inline) ||
16688 (isOutOfLine_ && tableType == SwitchTableType::OutOfLine)) {
16689 CodeLabel cl;
16690 masm.writeCodePointer(&cl);
16691 masm.propagateOOM(codeLabels_.append(std::move(cl)));
16694 // Register the code, to which the table will jump to.
16695 void addCodeEntry(MacroAssembler& masm) {
16696 Label entry;
16697 masm.bind(&entry);
16698 masm.propagateOOM(labels_.append(std::move(entry)));
16701 void setOutOfLine() { isOutOfLine_ = true; }
16704 template <SwitchTableType tableType>
16705 void CodeGenerator::visitOutOfLineSwitch(
16706 OutOfLineSwitch<tableType>* jumpTable) {
16707 jumpTable->setOutOfLine();
16708 auto& labels = jumpTable->labels();
16710 if (tableType == SwitchTableType::OutOfLine) {
16711 #if defined(JS_CODEGEN_ARM)
16712 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
16713 #elif defined(JS_CODEGEN_NONE)
16714 MOZ_CRASH();
16715 #else
16717 # if defined(JS_CODEGEN_ARM64)
16718 AutoForbidPoolsAndNops afp(
16719 &masm,
16720 (labels.length() + 1) * (sizeof(void*) / vixl::kInstructionSize));
16721 # endif
16723 masm.haltingAlign(sizeof(void*));
16725 // Bind the address of the jump table and reserve the space for code
16726 // pointers to jump in the newly generated code.
16727 masm.bind(jumpTable->start());
16728 masm.addCodeLabel(*jumpTable->start());
16729 for (size_t i = 0, e = labels.length(); i < e; i++) {
16730 jumpTable->addTableEntry(masm);
16732 #endif
16735 // Register all reserved pointers of the jump table to target labels. The
16736 // entries of the jump table need to be absolute addresses and thus must be
16737 // patched after codegen is finished.
16738 auto& codeLabels = jumpTable->codeLabels();
16739 for (size_t i = 0, e = codeLabels.length(); i < e; i++) {
16740 auto& cl = codeLabels[i];
16741 cl.target()->bind(labels[i].offset());
16742 masm.addCodeLabel(cl);
16746 template void CodeGenerator::visitOutOfLineSwitch(
16747 OutOfLineSwitch<SwitchTableType::Inline>* jumpTable);
16748 template void CodeGenerator::visitOutOfLineSwitch(
16749 OutOfLineSwitch<SwitchTableType::OutOfLine>* jumpTable);
16751 template <typename T>
16752 static inline void StoreToTypedArray(MacroAssembler& masm,
16753 Scalar::Type writeType,
16754 const LAllocation* value, const T& dest) {
16755 if (writeType == Scalar::Float32 || writeType == Scalar::Float64) {
16756 masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest);
16757 } else {
16758 if (value->isConstant()) {
16759 masm.storeToTypedIntArray(writeType, Imm32(ToInt32(value)), dest);
16760 } else {
16761 masm.storeToTypedIntArray(writeType, ToRegister(value), dest);
16766 void CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir) {
16767 Register elements = ToRegister(lir->elements());
16768 const LAllocation* value = lir->value();
16770 const MStoreUnboxedScalar* mir = lir->mir();
16772 Scalar::Type writeType = mir->writeType();
16774 if (lir->index()->isConstant()) {
16775 Address dest = ToAddress(elements, lir->index(), writeType);
16776 StoreToTypedArray(masm, writeType, value, dest);
16777 } else {
16778 BaseIndex dest(elements, ToRegister(lir->index()),
16779 ScaleFromScalarType(writeType));
16780 StoreToTypedArray(masm, writeType, value, dest);
16784 void CodeGenerator::visitStoreUnboxedBigInt(LStoreUnboxedBigInt* lir) {
16785 Register elements = ToRegister(lir->elements());
16786 Register value = ToRegister(lir->value());
16787 Register64 temp = ToRegister64(lir->temp());
16789 Scalar::Type writeType = lir->mir()->writeType();
16791 masm.loadBigInt64(value, temp);
16793 if (lir->index()->isConstant()) {
16794 Address dest = ToAddress(elements, lir->index(), writeType);
16795 masm.storeToTypedBigIntArray(writeType, temp, dest);
16796 } else {
16797 BaseIndex dest(elements, ToRegister(lir->index()),
16798 ScaleFromScalarType(writeType));
16799 masm.storeToTypedBigIntArray(writeType, temp, dest);
16803 void CodeGenerator::visitStoreDataViewElement(LStoreDataViewElement* lir) {
16804 Register elements = ToRegister(lir->elements());
16805 const LAllocation* value = lir->value();
16806 const LAllocation* littleEndian = lir->littleEndian();
16807 Register temp = ToTempRegisterOrInvalid(lir->temp());
16808 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
16810 const MStoreDataViewElement* mir = lir->mir();
16811 Scalar::Type writeType = mir->writeType();
16813 BaseIndex dest(elements, ToRegister(lir->index()), TimesOne);
16815 bool noSwap = littleEndian->isConstant() &&
16816 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
16818 // Directly store if no byte swap is needed and the platform supports
16819 // unaligned accesses for the access. (Such support is assumed for integer
16820 // types.)
16821 if (noSwap && (!Scalar::isFloatingType(writeType) ||
16822 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
16823 if (!Scalar::isBigIntType(writeType)) {
16824 StoreToTypedArray(masm, writeType, value, dest);
16825 } else {
16826 masm.loadBigInt64(ToRegister(value), temp64);
16827 masm.storeToTypedBigIntArray(writeType, temp64, dest);
16829 return;
16832 // Load the value into a gpr register.
16833 switch (writeType) {
16834 case Scalar::Int16:
16835 case Scalar::Uint16:
16836 case Scalar::Int32:
16837 case Scalar::Uint32:
16838 if (value->isConstant()) {
16839 masm.move32(Imm32(ToInt32(value)), temp);
16840 } else {
16841 masm.move32(ToRegister(value), temp);
16843 break;
16844 case Scalar::Float32: {
16845 FloatRegister fvalue = ToFloatRegister(value);
16846 masm.canonicalizeFloatIfDeterministic(fvalue);
16847 masm.moveFloat32ToGPR(fvalue, temp);
16848 break;
16850 case Scalar::Float64: {
16851 FloatRegister fvalue = ToFloatRegister(value);
16852 masm.canonicalizeDoubleIfDeterministic(fvalue);
16853 masm.moveDoubleToGPR64(fvalue, temp64);
16854 break;
16856 case Scalar::BigInt64:
16857 case Scalar::BigUint64:
16858 masm.loadBigInt64(ToRegister(value), temp64);
16859 break;
16860 case Scalar::Int8:
16861 case Scalar::Uint8:
16862 case Scalar::Uint8Clamped:
16863 default:
16864 MOZ_CRASH("Invalid typed array type");
16867 if (!noSwap) {
16868 // Swap the bytes in the loaded value.
16869 Label skip;
16870 if (!littleEndian->isConstant()) {
16871 masm.branch32(
16872 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
16873 ToRegister(littleEndian), Imm32(0), &skip);
16876 switch (writeType) {
16877 case Scalar::Int16:
16878 masm.byteSwap16SignExtend(temp);
16879 break;
16880 case Scalar::Uint16:
16881 masm.byteSwap16ZeroExtend(temp);
16882 break;
16883 case Scalar::Int32:
16884 case Scalar::Uint32:
16885 case Scalar::Float32:
16886 masm.byteSwap32(temp);
16887 break;
16888 case Scalar::Float64:
16889 case Scalar::BigInt64:
16890 case Scalar::BigUint64:
16891 masm.byteSwap64(temp64);
16892 break;
16893 case Scalar::Int8:
16894 case Scalar::Uint8:
16895 case Scalar::Uint8Clamped:
16896 default:
16897 MOZ_CRASH("Invalid typed array type");
16900 if (skip.used()) {
16901 masm.bind(&skip);
16905 // Store the value into the destination.
16906 switch (writeType) {
16907 case Scalar::Int16:
16908 case Scalar::Uint16:
16909 masm.store16Unaligned(temp, dest);
16910 break;
16911 case Scalar::Int32:
16912 case Scalar::Uint32:
16913 case Scalar::Float32:
16914 masm.store32Unaligned(temp, dest);
16915 break;
16916 case Scalar::Float64:
16917 case Scalar::BigInt64:
16918 case Scalar::BigUint64:
16919 masm.store64Unaligned(temp64, dest);
16920 break;
16921 case Scalar::Int8:
16922 case Scalar::Uint8:
16923 case Scalar::Uint8Clamped:
16924 default:
16925 MOZ_CRASH("Invalid typed array type");
16929 void CodeGenerator::visitStoreTypedArrayElementHole(
16930 LStoreTypedArrayElementHole* lir) {
16931 Register elements = ToRegister(lir->elements());
16932 const LAllocation* value = lir->value();
16934 Scalar::Type arrayType = lir->mir()->arrayType();
16936 Register index = ToRegister(lir->index());
16937 const LAllocation* length = lir->length();
16938 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
16940 Label skip;
16941 if (length->isRegister()) {
16942 masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
16943 } else {
16944 masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
16947 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
16948 StoreToTypedArray(masm, arrayType, value, dest);
16950 masm.bind(&skip);
16953 void CodeGenerator::visitStoreTypedArrayElementHoleBigInt(
16954 LStoreTypedArrayElementHoleBigInt* lir) {
16955 Register elements = ToRegister(lir->elements());
16956 Register value = ToRegister(lir->value());
16957 Register64 temp = ToRegister64(lir->temp());
16959 Scalar::Type arrayType = lir->mir()->arrayType();
16961 Register index = ToRegister(lir->index());
16962 const LAllocation* length = lir->length();
16963 Register spectreTemp = temp.scratchReg();
16965 Label skip;
16966 if (length->isRegister()) {
16967 masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
16968 } else {
16969 masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
16972 masm.loadBigInt64(value, temp);
16974 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
16975 masm.storeToTypedBigIntArray(arrayType, temp, dest);
16977 masm.bind(&skip);
16980 void CodeGenerator::visitAtomicIsLockFree(LAtomicIsLockFree* lir) {
16981 Register value = ToRegister(lir->value());
16982 Register output = ToRegister(lir->output());
16984 masm.atomicIsLockFreeJS(value, output);
16987 void CodeGenerator::visitClampIToUint8(LClampIToUint8* lir) {
16988 Register output = ToRegister(lir->output());
16989 MOZ_ASSERT(output == ToRegister(lir->input()));
16990 masm.clampIntToUint8(output);
16993 void CodeGenerator::visitClampDToUint8(LClampDToUint8* lir) {
16994 FloatRegister input = ToFloatRegister(lir->input());
16995 Register output = ToRegister(lir->output());
16996 masm.clampDoubleToUint8(input, output);
16999 void CodeGenerator::visitClampVToUint8(LClampVToUint8* lir) {
17000 ValueOperand operand = ToValue(lir, LClampVToUint8::InputIndex);
17001 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
17002 Register output = ToRegister(lir->output());
17004 using Fn = bool (*)(JSContext*, JSString*, double*);
17005 OutOfLineCode* oolString = oolCallVM<Fn, StringToNumber>(
17006 lir, ArgList(output), StoreFloatRegisterTo(tempFloat));
17007 Label* stringEntry = oolString->entry();
17008 Label* stringRejoin = oolString->rejoin();
17010 Label fails;
17011 masm.clampValueToUint8(operand, stringEntry, stringRejoin, output, tempFloat,
17012 output, &fails);
17014 bailoutFrom(&fails, lir->snapshot());
17017 void CodeGenerator::visitInCache(LInCache* ins) {
17018 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
17020 ConstantOrRegister key =
17021 toConstantOrRegister(ins, LInCache::LhsIndex, ins->mir()->key()->type());
17022 Register object = ToRegister(ins->rhs());
17023 Register output = ToRegister(ins->output());
17024 Register temp = ToRegister(ins->temp0());
17026 IonInIC cache(liveRegs, key, object, output, temp);
17027 addIC(ins, allocateIC(cache));
17030 void CodeGenerator::visitInArray(LInArray* lir) {
17031 const MInArray* mir = lir->mir();
17032 Register elements = ToRegister(lir->elements());
17033 Register initLength = ToRegister(lir->initLength());
17034 Register output = ToRegister(lir->output());
17036 Label falseBranch, done, trueBranch;
17038 if (lir->index()->isConstant()) {
17039 int32_t index = ToInt32(lir->index());
17041 if (index < 0) {
17042 MOZ_ASSERT(mir->needsNegativeIntCheck());
17043 bailout(lir->snapshot());
17044 return;
17047 masm.branch32(Assembler::BelowOrEqual, initLength, Imm32(index),
17048 &falseBranch);
17050 NativeObject::elementsSizeMustNotOverflow();
17051 Address address = Address(elements, index * sizeof(Value));
17052 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
17053 } else {
17054 Register index = ToRegister(lir->index());
17056 Label negativeIntCheck;
17057 Label* failedInitLength = &falseBranch;
17058 if (mir->needsNegativeIntCheck()) {
17059 failedInitLength = &negativeIntCheck;
17062 masm.branch32(Assembler::BelowOrEqual, initLength, index, failedInitLength);
17064 BaseObjectElementIndex address(elements, index);
17065 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
17067 if (mir->needsNegativeIntCheck()) {
17068 masm.jump(&trueBranch);
17069 masm.bind(&negativeIntCheck);
17071 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
17073 masm.jump(&falseBranch);
17077 masm.bind(&trueBranch);
17078 masm.move32(Imm32(1), output);
17079 masm.jump(&done);
17081 masm.bind(&falseBranch);
17082 masm.move32(Imm32(0), output);
17083 masm.bind(&done);
17086 void CodeGenerator::visitGuardElementNotHole(LGuardElementNotHole* lir) {
17087 Register elements = ToRegister(lir->elements());
17088 const LAllocation* index = lir->index();
17090 Label testMagic;
17091 if (index->isConstant()) {
17092 Address address(elements, ToInt32(index) * sizeof(js::Value));
17093 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
17094 } else {
17095 BaseObjectElementIndex address(elements, ToRegister(index));
17096 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
17098 bailoutFrom(&testMagic, lir->snapshot());
17101 void CodeGenerator::visitInstanceOfO(LInstanceOfO* ins) {
17102 Register protoReg = ToRegister(ins->rhs());
17103 emitInstanceOf(ins, protoReg);
17106 void CodeGenerator::visitInstanceOfV(LInstanceOfV* ins) {
17107 Register protoReg = ToRegister(ins->rhs());
17108 emitInstanceOf(ins, protoReg);
17111 void CodeGenerator::emitInstanceOf(LInstruction* ins, Register protoReg) {
17112 // This path implements fun_hasInstance when the function's prototype is
17113 // known to be the object in protoReg
17115 Label done;
17116 Register output = ToRegister(ins->getDef(0));
17118 // If the lhs is a primitive, the result is false.
17119 Register objReg;
17120 if (ins->isInstanceOfV()) {
17121 Label isObject;
17122 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
17123 masm.branchTestObject(Assembler::Equal, lhsValue, &isObject);
17124 masm.mov(ImmWord(0), output);
17125 masm.jump(&done);
17126 masm.bind(&isObject);
17127 objReg = masm.extractObject(lhsValue, output);
17128 } else {
17129 objReg = ToRegister(ins->toInstanceOfO()->lhs());
17132 // Crawl the lhs's prototype chain in a loop to search for prototypeObject.
17133 // This follows the main loop of js::IsPrototypeOf, though additionally breaks
17134 // out of the loop on Proxy::LazyProto.
17136 // Load the lhs's prototype.
17137 masm.loadObjProto(objReg, output);
17139 Label testLazy;
17141 Label loopPrototypeChain;
17142 masm.bind(&loopPrototypeChain);
17144 // Test for the target prototype object.
17145 Label notPrototypeObject;
17146 masm.branchPtr(Assembler::NotEqual, output, protoReg, &notPrototypeObject);
17147 masm.mov(ImmWord(1), output);
17148 masm.jump(&done);
17149 masm.bind(&notPrototypeObject);
17151 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
17153 // Test for nullptr or Proxy::LazyProto
17154 masm.branchPtr(Assembler::BelowOrEqual, output, ImmWord(1), &testLazy);
17156 // Load the current object's prototype.
17157 masm.loadObjProto(output, output);
17159 masm.jump(&loopPrototypeChain);
17162 // Make a VM call if an object with a lazy proto was found on the prototype
17163 // chain. This currently occurs only for cross compartment wrappers, which
17164 // we do not expect to be compared with non-wrapper functions from this
17165 // compartment. Otherwise, we stopped on a nullptr prototype and the output
17166 // register is already correct.
17168 using Fn = bool (*)(JSContext*, HandleObject, JSObject*, bool*);
17169 auto* ool = oolCallVM<Fn, IsPrototypeOf>(ins, ArgList(protoReg, objReg),
17170 StoreRegisterTo(output));
17172 // Regenerate the original lhs object for the VM call.
17173 Label regenerate, *lazyEntry;
17174 if (objReg != output) {
17175 lazyEntry = ool->entry();
17176 } else {
17177 masm.bind(&regenerate);
17178 lazyEntry = &regenerate;
17179 if (ins->isInstanceOfV()) {
17180 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
17181 objReg = masm.extractObject(lhsValue, output);
17182 } else {
17183 objReg = ToRegister(ins->toInstanceOfO()->lhs());
17185 MOZ_ASSERT(objReg == output);
17186 masm.jump(ool->entry());
17189 masm.bind(&testLazy);
17190 masm.branchPtr(Assembler::Equal, output, ImmWord(1), lazyEntry);
17192 masm.bind(&done);
17193 masm.bind(ool->rejoin());
17196 void CodeGenerator::visitInstanceOfCache(LInstanceOfCache* ins) {
17197 // The Lowering ensures that RHS is an object, and that LHS is a value.
17198 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
17199 TypedOrValueRegister lhs =
17200 TypedOrValueRegister(ToValue(ins, LInstanceOfCache::LHS));
17201 Register rhs = ToRegister(ins->rhs());
17202 Register output = ToRegister(ins->output());
17204 IonInstanceOfIC ic(liveRegs, lhs, rhs, output);
17205 addIC(ins, allocateIC(ic));
17208 void CodeGenerator::visitGetDOMProperty(LGetDOMProperty* ins) {
17209 const Register JSContextReg = ToRegister(ins->getJSContextReg());
17210 const Register ObjectReg = ToRegister(ins->getObjectReg());
17211 const Register PrivateReg = ToRegister(ins->getPrivReg());
17212 const Register ValueReg = ToRegister(ins->getValueReg());
17214 Label haveValue;
17215 if (ins->mir()->valueMayBeInSlot()) {
17216 size_t slot = ins->mir()->domMemberSlotIndex();
17217 // It's a bit annoying to redo these slot calculations, which duplcate
17218 // LSlots and a few other things like that, but I'm not sure there's a
17219 // way to reuse those here.
17221 // If this ever gets fixed to work with proxies (by not assuming that
17222 // reserved slot indices, which is what domMemberSlotIndex() returns,
17223 // match fixed slot indices), we can reenable MGetDOMProperty for
17224 // proxies in IonBuilder.
17225 if (slot < NativeObject::MAX_FIXED_SLOTS) {
17226 masm.loadValue(Address(ObjectReg, NativeObject::getFixedSlotOffset(slot)),
17227 JSReturnOperand);
17228 } else {
17229 // It's a dynamic slot.
17230 slot -= NativeObject::MAX_FIXED_SLOTS;
17231 // Use PrivateReg as a scratch register for the slots pointer.
17232 masm.loadPtr(Address(ObjectReg, NativeObject::offsetOfSlots()),
17233 PrivateReg);
17234 masm.loadValue(Address(PrivateReg, slot * sizeof(js::Value)),
17235 JSReturnOperand);
17237 masm.branchTestUndefined(Assembler::NotEqual, JSReturnOperand, &haveValue);
17240 DebugOnly<uint32_t> initialStack = masm.framePushed();
17242 masm.checkStackAlignment();
17244 // Make space for the outparam. Pre-initialize it to UndefinedValue so we
17245 // can trace it at GC time.
17246 masm.Push(UndefinedValue());
17247 // We pass the pointer to our out param as an instance of
17248 // JSJitGetterCallArgs, since on the binary level it's the same thing.
17249 static_assert(sizeof(JSJitGetterCallArgs) == sizeof(Value*));
17250 masm.moveStackPtrTo(ValueReg);
17252 masm.Push(ObjectReg);
17254 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
17256 // Rooting will happen at GC time.
17257 masm.moveStackPtrTo(ObjectReg);
17259 Realm* getterRealm = ins->mir()->getterRealm();
17260 if (gen->realm->realmPtr() != getterRealm) {
17261 // We use JSContextReg as scratch register here.
17262 masm.switchToRealm(getterRealm, JSContextReg);
17265 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
17266 masm.loadJSContext(JSContextReg);
17267 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
17268 ExitFrameType::IonDOMGetter);
17270 markSafepointAt(safepointOffset, ins);
17272 masm.setupAlignedABICall();
17273 masm.loadJSContext(JSContextReg);
17274 masm.passABIArg(JSContextReg);
17275 masm.passABIArg(ObjectReg);
17276 masm.passABIArg(PrivateReg);
17277 masm.passABIArg(ValueReg);
17278 ensureOsiSpace();
17279 masm.callWithABI(DynamicFunction<JSJitGetterOp>(ins->mir()->fun()),
17280 MoveOp::GENERAL,
17281 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
17283 if (ins->mir()->isInfallible()) {
17284 masm.loadValue(Address(masm.getStackPointer(),
17285 IonDOMExitFrameLayout::offsetOfResult()),
17286 JSReturnOperand);
17287 } else {
17288 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
17290 masm.loadValue(Address(masm.getStackPointer(),
17291 IonDOMExitFrameLayout::offsetOfResult()),
17292 JSReturnOperand);
17295 // Switch back to the current realm if needed. Note: if the getter threw an
17296 // exception, the exception handler will do this.
17297 if (gen->realm->realmPtr() != getterRealm) {
17298 static_assert(!JSReturnOperand.aliases(ReturnReg),
17299 "Clobbering ReturnReg should not affect the return value");
17300 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
17303 // Until C++ code is instrumented against Spectre, prevent speculative
17304 // execution from returning any private data.
17305 if (JitOptions.spectreJitToCxxCalls && ins->mir()->hasLiveDefUses()) {
17306 masm.speculationBarrier();
17309 masm.adjustStack(IonDOMExitFrameLayout::Size());
17311 masm.bind(&haveValue);
17313 MOZ_ASSERT(masm.framePushed() == initialStack);
17316 void CodeGenerator::visitGetDOMMemberV(LGetDOMMemberV* ins) {
17317 // It's simpler to duplicate visitLoadFixedSlotV here than it is to try to
17318 // use an LLoadFixedSlotV or some subclass of it for this case: that would
17319 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
17320 // we'd have to duplicate a bunch of stuff we now get for free from
17321 // MGetDOMProperty.
17323 // If this ever gets fixed to work with proxies (by not assuming that
17324 // reserved slot indices, which is what domMemberSlotIndex() returns,
17325 // match fixed slot indices), we can reenable MGetDOMMember for
17326 // proxies in IonBuilder.
17327 Register object = ToRegister(ins->object());
17328 size_t slot = ins->mir()->domMemberSlotIndex();
17329 ValueOperand result = ToOutValue(ins);
17331 masm.loadValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
17332 result);
17335 void CodeGenerator::visitGetDOMMemberT(LGetDOMMemberT* ins) {
17336 // It's simpler to duplicate visitLoadFixedSlotT here than it is to try to
17337 // use an LLoadFixedSlotT or some subclass of it for this case: that would
17338 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
17339 // we'd have to duplicate a bunch of stuff we now get for free from
17340 // MGetDOMProperty.
17342 // If this ever gets fixed to work with proxies (by not assuming that
17343 // reserved slot indices, which is what domMemberSlotIndex() returns,
17344 // match fixed slot indices), we can reenable MGetDOMMember for
17345 // proxies in IonBuilder.
17346 Register object = ToRegister(ins->object());
17347 size_t slot = ins->mir()->domMemberSlotIndex();
17348 AnyRegister result = ToAnyRegister(ins->getDef(0));
17349 MIRType type = ins->mir()->type();
17351 masm.loadUnboxedValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
17352 type, result);
17355 void CodeGenerator::visitSetDOMProperty(LSetDOMProperty* ins) {
17356 const Register JSContextReg = ToRegister(ins->getJSContextReg());
17357 const Register ObjectReg = ToRegister(ins->getObjectReg());
17358 const Register PrivateReg = ToRegister(ins->getPrivReg());
17359 const Register ValueReg = ToRegister(ins->getValueReg());
17361 DebugOnly<uint32_t> initialStack = masm.framePushed();
17363 masm.checkStackAlignment();
17365 // Push the argument. Rooting will happen at GC time.
17366 ValueOperand argVal = ToValue(ins, LSetDOMProperty::Value);
17367 masm.Push(argVal);
17368 // We pass the pointer to our out param as an instance of
17369 // JSJitGetterCallArgs, since on the binary level it's the same thing.
17370 static_assert(sizeof(JSJitSetterCallArgs) == sizeof(Value*));
17371 masm.moveStackPtrTo(ValueReg);
17373 masm.Push(ObjectReg);
17375 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
17377 // Rooting will happen at GC time.
17378 masm.moveStackPtrTo(ObjectReg);
17380 Realm* setterRealm = ins->mir()->setterRealm();
17381 if (gen->realm->realmPtr() != setterRealm) {
17382 // We use JSContextReg as scratch register here.
17383 masm.switchToRealm(setterRealm, JSContextReg);
17386 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
17387 masm.loadJSContext(JSContextReg);
17388 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
17389 ExitFrameType::IonDOMSetter);
17391 markSafepointAt(safepointOffset, ins);
17393 masm.setupAlignedABICall();
17394 masm.loadJSContext(JSContextReg);
17395 masm.passABIArg(JSContextReg);
17396 masm.passABIArg(ObjectReg);
17397 masm.passABIArg(PrivateReg);
17398 masm.passABIArg(ValueReg);
17399 ensureOsiSpace();
17400 masm.callWithABI(DynamicFunction<JSJitSetterOp>(ins->mir()->fun()),
17401 MoveOp::GENERAL,
17402 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
17404 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
17406 // Switch back to the current realm if needed. Note: if the setter threw an
17407 // exception, the exception handler will do this.
17408 if (gen->realm->realmPtr() != setterRealm) {
17409 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
17412 masm.adjustStack(IonDOMExitFrameLayout::Size());
17414 MOZ_ASSERT(masm.framePushed() == initialStack);
17417 void CodeGenerator::visitLoadDOMExpandoValue(LLoadDOMExpandoValue* ins) {
17418 Register proxy = ToRegister(ins->proxy());
17419 ValueOperand out = ToOutValue(ins);
17421 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
17422 out.scratchReg());
17423 masm.loadValue(Address(out.scratchReg(),
17424 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
17425 out);
17428 void CodeGenerator::visitLoadDOMExpandoValueGuardGeneration(
17429 LLoadDOMExpandoValueGuardGeneration* ins) {
17430 Register proxy = ToRegister(ins->proxy());
17431 ValueOperand out = ToOutValue(ins);
17433 Label bail;
17434 masm.loadDOMExpandoValueGuardGeneration(proxy, out,
17435 ins->mir()->expandoAndGeneration(),
17436 ins->mir()->generation(), &bail);
17437 bailoutFrom(&bail, ins->snapshot());
17440 void CodeGenerator::visitLoadDOMExpandoValueIgnoreGeneration(
17441 LLoadDOMExpandoValueIgnoreGeneration* ins) {
17442 Register proxy = ToRegister(ins->proxy());
17443 ValueOperand out = ToOutValue(ins);
17445 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
17446 out.scratchReg());
17448 // Load the ExpandoAndGeneration* from the PrivateValue.
17449 masm.loadPrivate(
17450 Address(out.scratchReg(),
17451 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
17452 out.scratchReg());
17454 // Load expandoAndGeneration->expando into the output Value register.
17455 masm.loadValue(
17456 Address(out.scratchReg(), ExpandoAndGeneration::offsetOfExpando()), out);
17459 void CodeGenerator::visitGuardDOMExpandoMissingOrGuardShape(
17460 LGuardDOMExpandoMissingOrGuardShape* ins) {
17461 Register temp = ToRegister(ins->temp0());
17462 ValueOperand input =
17463 ToValue(ins, LGuardDOMExpandoMissingOrGuardShape::InputIndex);
17465 Label done;
17466 masm.branchTestUndefined(Assembler::Equal, input, &done);
17468 masm.debugAssertIsObject(input);
17469 masm.unboxObject(input, temp);
17470 // The expando object is not used in this case, so we don't need Spectre
17471 // mitigations.
17472 Label bail;
17473 masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, temp,
17474 ins->mir()->shape(), &bail);
17475 bailoutFrom(&bail, ins->snapshot());
17477 masm.bind(&done);
17480 class OutOfLineIsCallable : public OutOfLineCodeBase<CodeGenerator> {
17481 Register object_;
17482 Register output_;
17484 public:
17485 OutOfLineIsCallable(Register object, Register output)
17486 : object_(object), output_(output) {}
17488 void accept(CodeGenerator* codegen) override {
17489 codegen->visitOutOfLineIsCallable(this);
17491 Register object() const { return object_; }
17492 Register output() const { return output_; }
17495 void CodeGenerator::visitIsCallableO(LIsCallableO* ins) {
17496 Register object = ToRegister(ins->object());
17497 Register output = ToRegister(ins->output());
17499 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(object, output);
17500 addOutOfLineCode(ool, ins->mir());
17502 masm.isCallable(object, output, ool->entry());
17504 masm.bind(ool->rejoin());
17507 void CodeGenerator::visitIsCallableV(LIsCallableV* ins) {
17508 ValueOperand val = ToValue(ins, LIsCallableV::ObjectIndex);
17509 Register output = ToRegister(ins->output());
17510 Register temp = ToRegister(ins->temp0());
17512 Label notObject;
17513 masm.fallibleUnboxObject(val, temp, &notObject);
17515 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(temp, output);
17516 addOutOfLineCode(ool, ins->mir());
17518 masm.isCallable(temp, output, ool->entry());
17519 masm.jump(ool->rejoin());
17521 masm.bind(&notObject);
17522 masm.move32(Imm32(0), output);
17524 masm.bind(ool->rejoin());
17527 void CodeGenerator::visitOutOfLineIsCallable(OutOfLineIsCallable* ool) {
17528 Register object = ool->object();
17529 Register output = ool->output();
17531 saveVolatile(output);
17532 using Fn = bool (*)(JSObject* obj);
17533 masm.setupAlignedABICall();
17534 masm.passABIArg(object);
17535 masm.callWithABI<Fn, ObjectIsCallable>();
17536 masm.storeCallBoolResult(output);
17537 restoreVolatile(output);
17538 masm.jump(ool->rejoin());
17541 class OutOfLineIsConstructor : public OutOfLineCodeBase<CodeGenerator> {
17542 LIsConstructor* ins_;
17544 public:
17545 explicit OutOfLineIsConstructor(LIsConstructor* ins) : ins_(ins) {}
17547 void accept(CodeGenerator* codegen) override {
17548 codegen->visitOutOfLineIsConstructor(this);
17550 LIsConstructor* ins() const { return ins_; }
17553 void CodeGenerator::visitIsConstructor(LIsConstructor* ins) {
17554 Register object = ToRegister(ins->object());
17555 Register output = ToRegister(ins->output());
17557 OutOfLineIsConstructor* ool = new (alloc()) OutOfLineIsConstructor(ins);
17558 addOutOfLineCode(ool, ins->mir());
17560 masm.isConstructor(object, output, ool->entry());
17562 masm.bind(ool->rejoin());
17565 void CodeGenerator::visitOutOfLineIsConstructor(OutOfLineIsConstructor* ool) {
17566 LIsConstructor* ins = ool->ins();
17567 Register object = ToRegister(ins->object());
17568 Register output = ToRegister(ins->output());
17570 saveVolatile(output);
17571 using Fn = bool (*)(JSObject* obj);
17572 masm.setupAlignedABICall();
17573 masm.passABIArg(object);
17574 masm.callWithABI<Fn, ObjectIsConstructor>();
17575 masm.storeCallBoolResult(output);
17576 restoreVolatile(output);
17577 masm.jump(ool->rejoin());
17580 void CodeGenerator::visitIsCrossRealmArrayConstructor(
17581 LIsCrossRealmArrayConstructor* ins) {
17582 Register object = ToRegister(ins->object());
17583 Register output = ToRegister(ins->output());
17585 masm.setIsCrossRealmArrayConstructor(object, output);
17588 static void EmitObjectIsArray(MacroAssembler& masm, OutOfLineCode* ool,
17589 Register obj, Register output,
17590 Label* notArray = nullptr) {
17591 masm.loadObjClassUnsafe(obj, output);
17593 Label isArray;
17594 masm.branchPtr(Assembler::Equal, output, ImmPtr(&ArrayObject::class_),
17595 &isArray);
17597 // Branch to OOL path if it's a proxy.
17598 masm.branchTestClassIsProxy(true, output, ool->entry());
17600 if (notArray) {
17601 masm.bind(notArray);
17603 masm.move32(Imm32(0), output);
17604 masm.jump(ool->rejoin());
17606 masm.bind(&isArray);
17607 masm.move32(Imm32(1), output);
17609 masm.bind(ool->rejoin());
17612 void CodeGenerator::visitIsArrayO(LIsArrayO* lir) {
17613 Register object = ToRegister(lir->object());
17614 Register output = ToRegister(lir->output());
17616 using Fn = bool (*)(JSContext*, HandleObject, bool*);
17617 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
17618 lir, ArgList(object), StoreRegisterTo(output));
17619 EmitObjectIsArray(masm, ool, object, output);
17622 void CodeGenerator::visitIsArrayV(LIsArrayV* lir) {
17623 ValueOperand val = ToValue(lir, LIsArrayV::ValueIndex);
17624 Register output = ToRegister(lir->output());
17625 Register temp = ToRegister(lir->temp0());
17627 Label notArray;
17628 masm.fallibleUnboxObject(val, temp, &notArray);
17630 using Fn = bool (*)(JSContext*, HandleObject, bool*);
17631 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
17632 lir, ArgList(temp), StoreRegisterTo(output));
17633 EmitObjectIsArray(masm, ool, temp, output, &notArray);
17636 void CodeGenerator::visitIsTypedArray(LIsTypedArray* lir) {
17637 Register object = ToRegister(lir->object());
17638 Register output = ToRegister(lir->output());
17640 OutOfLineCode* ool = nullptr;
17641 if (lir->mir()->isPossiblyWrapped()) {
17642 using Fn = bool (*)(JSContext*, JSObject*, bool*);
17643 ool = oolCallVM<Fn, jit::IsPossiblyWrappedTypedArray>(
17644 lir, ArgList(object), StoreRegisterTo(output));
17647 Label notTypedArray;
17648 Label done;
17650 masm.loadObjClassUnsafe(object, output);
17651 masm.branchIfClassIsNotTypedArray(output, &notTypedArray);
17653 masm.move32(Imm32(1), output);
17654 masm.jump(&done);
17655 masm.bind(&notTypedArray);
17656 if (ool) {
17657 masm.branchTestClassIsProxy(true, output, ool->entry());
17659 masm.move32(Imm32(0), output);
17660 masm.bind(&done);
17661 if (ool) {
17662 masm.bind(ool->rejoin());
17666 void CodeGenerator::visitIsObject(LIsObject* ins) {
17667 Register output = ToRegister(ins->output());
17668 ValueOperand value = ToValue(ins, LIsObject::ObjectIndex);
17669 masm.testObjectSet(Assembler::Equal, value, output);
17672 void CodeGenerator::visitIsObjectAndBranch(LIsObjectAndBranch* ins) {
17673 ValueOperand value = ToValue(ins, LIsObjectAndBranch::Input);
17674 testObjectEmitBranch(Assembler::Equal, value, ins->ifTrue(), ins->ifFalse());
17677 void CodeGenerator::visitIsNullOrUndefined(LIsNullOrUndefined* ins) {
17678 Register output = ToRegister(ins->output());
17679 ValueOperand value = ToValue(ins, LIsNullOrUndefined::InputIndex);
17681 Label isNotNull, done;
17682 masm.branchTestNull(Assembler::NotEqual, value, &isNotNull);
17684 masm.move32(Imm32(1), output);
17685 masm.jump(&done);
17687 masm.bind(&isNotNull);
17688 masm.testUndefinedSet(Assembler::Equal, value, output);
17690 masm.bind(&done);
17693 void CodeGenerator::visitIsNullOrUndefinedAndBranch(
17694 LIsNullOrUndefinedAndBranch* ins) {
17695 Label* ifTrue = getJumpLabelForBranch(ins->ifTrue());
17696 Label* ifFalse = getJumpLabelForBranch(ins->ifFalse());
17697 ValueOperand value = ToValue(ins, LIsNullOrUndefinedAndBranch::Input);
17699 ScratchTagScope tag(masm, value);
17700 masm.splitTagForTest(value, tag);
17702 masm.branchTestNull(Assembler::Equal, tag, ifTrue);
17703 masm.branchTestUndefined(Assembler::Equal, tag, ifTrue);
17705 if (!isNextBlock(ins->ifFalse()->lir())) {
17706 masm.jump(ifFalse);
17710 void CodeGenerator::loadOutermostJSScript(Register reg) {
17711 // The "outermost" JSScript means the script that we are compiling
17712 // basically; this is not always the script associated with the
17713 // current basic block, which might be an inlined script.
17715 MIRGraph& graph = current->mir()->graph();
17716 MBasicBlock* entryBlock = graph.entryBlock();
17717 masm.movePtr(ImmGCPtr(entryBlock->info().script()), reg);
17720 void CodeGenerator::loadJSScriptForBlock(MBasicBlock* block, Register reg) {
17721 // The current JSScript means the script for the current
17722 // basic block. This may be an inlined script.
17724 JSScript* script = block->info().script();
17725 masm.movePtr(ImmGCPtr(script), reg);
17728 void CodeGenerator::visitHasClass(LHasClass* ins) {
17729 Register lhs = ToRegister(ins->lhs());
17730 Register output = ToRegister(ins->output());
17732 masm.loadObjClassUnsafe(lhs, output);
17733 masm.cmpPtrSet(Assembler::Equal, output, ImmPtr(ins->mir()->getClass()),
17734 output);
17737 void CodeGenerator::visitGuardToClass(LGuardToClass* ins) {
17738 Register lhs = ToRegister(ins->lhs());
17739 Register temp = ToRegister(ins->temp0());
17741 // branchTestObjClass may zero the object register on speculative paths
17742 // (we should have a defineReuseInput allocation in this case).
17743 Register spectreRegToZero = lhs;
17745 Label notEqual;
17747 masm.branchTestObjClass(Assembler::NotEqual, lhs, ins->mir()->getClass(),
17748 temp, spectreRegToZero, &notEqual);
17750 // Can't return null-return here, so bail.
17751 bailoutFrom(&notEqual, ins->snapshot());
17754 void CodeGenerator::visitGuardToFunction(LGuardToFunction* ins) {
17755 Register lhs = ToRegister(ins->lhs());
17756 Register temp = ToRegister(ins->temp0());
17758 // branchTestObjClass may zero the object register on speculative paths
17759 // (we should have a defineReuseInput allocation in this case).
17760 Register spectreRegToZero = lhs;
17762 Label notEqual;
17764 masm.branchTestObjIsFunction(Assembler::NotEqual, lhs, temp, spectreRegToZero,
17765 &notEqual);
17767 // Can't return null-return here, so bail.
17768 bailoutFrom(&notEqual, ins->snapshot());
17771 void CodeGenerator::visitObjectClassToString(LObjectClassToString* lir) {
17772 Register obj = ToRegister(lir->lhs());
17773 Register temp = ToRegister(lir->temp0());
17775 using Fn = JSString* (*)(JSContext*, JSObject*);
17776 masm.setupAlignedABICall();
17777 masm.loadJSContext(temp);
17778 masm.passABIArg(temp);
17779 masm.passABIArg(obj);
17780 masm.callWithABI<Fn, js::ObjectClassToString>();
17782 bailoutCmpPtr(Assembler::Equal, ReturnReg, ImmWord(0), lir->snapshot());
17785 void CodeGenerator::visitWasmParameter(LWasmParameter* lir) {}
17787 void CodeGenerator::visitWasmParameterI64(LWasmParameterI64* lir) {}
17789 void CodeGenerator::visitWasmReturn(LWasmReturn* lir) {
17790 // Don't emit a jump to the return label if this is the last block.
17791 if (current->mir() != *gen->graph().poBegin()) {
17792 masm.jump(&returnLabel_);
17796 void CodeGenerator::visitWasmReturnI64(LWasmReturnI64* lir) {
17797 // Don't emit a jump to the return label if this is the last block.
17798 if (current->mir() != *gen->graph().poBegin()) {
17799 masm.jump(&returnLabel_);
17803 void CodeGenerator::visitWasmReturnVoid(LWasmReturnVoid* lir) {
17804 // Don't emit a jump to the return label if this is the last block.
17805 if (current->mir() != *gen->graph().poBegin()) {
17806 masm.jump(&returnLabel_);
17810 void CodeGenerator::emitAssertRangeI(MIRType type, const Range* r,
17811 Register input) {
17812 // Check the lower bound.
17813 if (r->hasInt32LowerBound() && r->lower() > INT32_MIN) {
17814 Label success;
17815 if (type == MIRType::Int32 || type == MIRType::Boolean) {
17816 masm.branch32(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
17817 &success);
17818 } else {
17819 MOZ_ASSERT(type == MIRType::IntPtr);
17820 masm.branchPtr(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
17821 &success);
17823 masm.assumeUnreachable(
17824 "Integer input should be equal or higher than Lowerbound.");
17825 masm.bind(&success);
17828 // Check the upper bound.
17829 if (r->hasInt32UpperBound() && r->upper() < INT32_MAX) {
17830 Label success;
17831 if (type == MIRType::Int32 || type == MIRType::Boolean) {
17832 masm.branch32(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
17833 &success);
17834 } else {
17835 MOZ_ASSERT(type == MIRType::IntPtr);
17836 masm.branchPtr(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
17837 &success);
17839 masm.assumeUnreachable(
17840 "Integer input should be lower or equal than Upperbound.");
17841 masm.bind(&success);
17844 // For r->canHaveFractionalPart(), r->canBeNegativeZero(), and
17845 // r->exponent(), there's nothing to check, because if we ended up in the
17846 // integer range checking code, the value is already in an integer register
17847 // in the integer range.
17850 void CodeGenerator::emitAssertRangeD(const Range* r, FloatRegister input,
17851 FloatRegister temp) {
17852 // Check the lower bound.
17853 if (r->hasInt32LowerBound()) {
17854 Label success;
17855 masm.loadConstantDouble(r->lower(), temp);
17856 if (r->canBeNaN()) {
17857 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
17859 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
17860 &success);
17861 masm.assumeUnreachable(
17862 "Double input should be equal or higher than Lowerbound.");
17863 masm.bind(&success);
17865 // Check the upper bound.
17866 if (r->hasInt32UpperBound()) {
17867 Label success;
17868 masm.loadConstantDouble(r->upper(), temp);
17869 if (r->canBeNaN()) {
17870 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
17872 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp, &success);
17873 masm.assumeUnreachable(
17874 "Double input should be lower or equal than Upperbound.");
17875 masm.bind(&success);
17878 // This code does not yet check r->canHaveFractionalPart(). This would require
17879 // new assembler interfaces to make rounding instructions available.
17881 if (!r->canBeNegativeZero()) {
17882 Label success;
17884 // First, test for being equal to 0.0, which also includes -0.0.
17885 masm.loadConstantDouble(0.0, temp);
17886 masm.branchDouble(Assembler::DoubleNotEqualOrUnordered, input, temp,
17887 &success);
17889 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0 is
17890 // -Infinity instead of Infinity.
17891 masm.loadConstantDouble(1.0, temp);
17892 masm.divDouble(input, temp);
17893 masm.branchDouble(Assembler::DoubleGreaterThan, temp, input, &success);
17895 masm.assumeUnreachable("Input shouldn't be negative zero.");
17897 masm.bind(&success);
17900 if (!r->hasInt32Bounds() && !r->canBeInfiniteOrNaN() &&
17901 r->exponent() < FloatingPoint<double>::kExponentBias) {
17902 // Check the bounds implied by the maximum exponent.
17903 Label exponentLoOk;
17904 masm.loadConstantDouble(pow(2.0, r->exponent() + 1), temp);
17905 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentLoOk);
17906 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp,
17907 &exponentLoOk);
17908 masm.assumeUnreachable("Check for exponent failed.");
17909 masm.bind(&exponentLoOk);
17911 Label exponentHiOk;
17912 masm.loadConstantDouble(-pow(2.0, r->exponent() + 1), temp);
17913 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentHiOk);
17914 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
17915 &exponentHiOk);
17916 masm.assumeUnreachable("Check for exponent failed.");
17917 masm.bind(&exponentHiOk);
17918 } else if (!r->hasInt32Bounds() && !r->canBeNaN()) {
17919 // If we think the value can't be NaN, check that it isn't.
17920 Label notnan;
17921 masm.branchDouble(Assembler::DoubleOrdered, input, input, &notnan);
17922 masm.assumeUnreachable("Input shouldn't be NaN.");
17923 masm.bind(&notnan);
17925 // If we think the value also can't be an infinity, check that it isn't.
17926 if (!r->canBeInfiniteOrNaN()) {
17927 Label notposinf;
17928 masm.loadConstantDouble(PositiveInfinity<double>(), temp);
17929 masm.branchDouble(Assembler::DoubleLessThan, input, temp, &notposinf);
17930 masm.assumeUnreachable("Input shouldn't be +Inf.");
17931 masm.bind(&notposinf);
17933 Label notneginf;
17934 masm.loadConstantDouble(NegativeInfinity<double>(), temp);
17935 masm.branchDouble(Assembler::DoubleGreaterThan, input, temp, &notneginf);
17936 masm.assumeUnreachable("Input shouldn't be -Inf.");
17937 masm.bind(&notneginf);
17942 void CodeGenerator::visitAssertClass(LAssertClass* ins) {
17943 Register obj = ToRegister(ins->input());
17944 Register temp = ToRegister(ins->getTemp(0));
17946 Label success;
17947 if (ins->mir()->getClass() == &FunctionClass) {
17948 // Allow both possible function classes here.
17949 masm.branchTestObjIsFunctionNoSpectreMitigations(Assembler::Equal, obj,
17950 temp, &success);
17951 } else {
17952 masm.branchTestObjClassNoSpectreMitigations(
17953 Assembler::Equal, obj, ins->mir()->getClass(), temp, &success);
17955 masm.assumeUnreachable("Wrong KnownClass during run-time");
17956 masm.bind(&success);
17959 void CodeGenerator::visitAssertShape(LAssertShape* ins) {
17960 Register obj = ToRegister(ins->input());
17962 Label success;
17963 masm.branchTestObjShapeNoSpectreMitigations(Assembler::Equal, obj,
17964 ins->mir()->shape(), &success);
17965 masm.assumeUnreachable("Wrong Shape during run-time");
17966 masm.bind(&success);
17969 void CodeGenerator::visitAssertRangeI(LAssertRangeI* ins) {
17970 Register input = ToRegister(ins->input());
17971 const Range* r = ins->range();
17973 emitAssertRangeI(ins->mir()->input()->type(), r, input);
17976 void CodeGenerator::visitAssertRangeD(LAssertRangeD* ins) {
17977 FloatRegister input = ToFloatRegister(ins->input());
17978 FloatRegister temp = ToFloatRegister(ins->temp());
17979 const Range* r = ins->range();
17981 emitAssertRangeD(r, input, temp);
17984 void CodeGenerator::visitAssertRangeF(LAssertRangeF* ins) {
17985 FloatRegister input = ToFloatRegister(ins->input());
17986 FloatRegister temp = ToFloatRegister(ins->temp());
17987 FloatRegister temp2 = ToFloatRegister(ins->temp2());
17989 const Range* r = ins->range();
17991 masm.convertFloat32ToDouble(input, temp);
17992 emitAssertRangeD(r, temp, temp2);
17995 void CodeGenerator::visitAssertRangeV(LAssertRangeV* ins) {
17996 const Range* r = ins->range();
17997 const ValueOperand value = ToValue(ins, LAssertRangeV::Input);
17998 Label done;
18001 ScratchTagScope tag(masm, value);
18002 masm.splitTagForTest(value, tag);
18005 Label isNotInt32;
18006 masm.branchTestInt32(Assembler::NotEqual, tag, &isNotInt32);
18008 ScratchTagScopeRelease _(&tag);
18009 Register unboxInt32 = ToTempUnboxRegister(ins->temp());
18010 Register input = masm.extractInt32(value, unboxInt32);
18011 emitAssertRangeI(MIRType::Int32, r, input);
18012 masm.jump(&done);
18014 masm.bind(&isNotInt32);
18018 Label isNotDouble;
18019 masm.branchTestDouble(Assembler::NotEqual, tag, &isNotDouble);
18021 ScratchTagScopeRelease _(&tag);
18022 FloatRegister input = ToFloatRegister(ins->floatTemp1());
18023 FloatRegister temp = ToFloatRegister(ins->floatTemp2());
18024 masm.unboxDouble(value, input);
18025 emitAssertRangeD(r, input, temp);
18026 masm.jump(&done);
18028 masm.bind(&isNotDouble);
18032 masm.assumeUnreachable("Incorrect range for Value.");
18033 masm.bind(&done);
18036 void CodeGenerator::visitInterruptCheck(LInterruptCheck* lir) {
18037 using Fn = bool (*)(JSContext*);
18038 OutOfLineCode* ool =
18039 oolCallVM<Fn, InterruptCheck>(lir, ArgList(), StoreNothing());
18041 const void* interruptAddr = gen->runtime->addressOfInterruptBits();
18042 masm.branch32(Assembler::NotEqual, AbsoluteAddress(interruptAddr), Imm32(0),
18043 ool->entry());
18044 masm.bind(ool->rejoin());
18047 void CodeGenerator::visitOutOfLineResumableWasmTrap(
18048 OutOfLineResumableWasmTrap* ool) {
18049 LInstruction* lir = ool->lir();
18050 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
18052 markSafepointAt(masm.currentOffset(), lir);
18054 // Note that masm.framePushed() doesn't include the register dump area.
18055 // That will be taken into account when the StackMap is created from the
18056 // LSafepoint.
18057 lir->safepoint()->setFramePushedAtStackMapBase(ool->framePushed());
18058 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::Trap);
18060 masm.jump(ool->rejoin());
18063 void CodeGenerator::visitOutOfLineAbortingWasmTrap(
18064 OutOfLineAbortingWasmTrap* ool) {
18065 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
18068 void CodeGenerator::visitWasmInterruptCheck(LWasmInterruptCheck* lir) {
18069 MOZ_ASSERT(gen->compilingWasm());
18071 OutOfLineResumableWasmTrap* ool = new (alloc()) OutOfLineResumableWasmTrap(
18072 lir, masm.framePushed(), lir->mir()->bytecodeOffset(),
18073 wasm::Trap::CheckInterrupt);
18074 addOutOfLineCode(ool, lir->mir());
18075 masm.branch32(
18076 Assembler::NotEqual,
18077 Address(ToRegister(lir->instance()), wasm::Instance::offsetOfInterrupt()),
18078 Imm32(0), ool->entry());
18079 masm.bind(ool->rejoin());
18082 void CodeGenerator::visitWasmTrap(LWasmTrap* lir) {
18083 MOZ_ASSERT(gen->compilingWasm());
18084 const MWasmTrap* mir = lir->mir();
18086 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
18089 void CodeGenerator::visitWasmTrapIfNull(LWasmTrapIfNull* lir) {
18090 MOZ_ASSERT(gen->compilingWasm());
18091 const MWasmTrapIfNull* mir = lir->mir();
18092 Label nonNull;
18093 Register ref = ToRegister(lir->ref());
18095 masm.branchWasmAnyRefIsNull(false, ref, &nonNull);
18096 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
18097 masm.bind(&nonNull);
18100 static void BranchWasmRefIsSubtype(MacroAssembler& masm, Register ref,
18101 const wasm::RefType& sourceType,
18102 const wasm::RefType& destType, Label* label,
18103 Register superSTV, Register scratch1,
18104 Register scratch2) {
18105 if (destType.isAnyHierarchy()) {
18106 masm.branchWasmRefIsSubtypeAny(ref, sourceType, destType, label,
18107 /*onSuccess=*/true, superSTV, scratch1,
18108 scratch2);
18109 } else if (destType.isFuncHierarchy()) {
18110 masm.branchWasmRefIsSubtypeFunc(ref, sourceType, destType, label,
18111 /*onSuccess=*/true, superSTV, scratch1,
18112 scratch2);
18113 } else if (destType.isExternHierarchy()) {
18114 masm.branchWasmRefIsSubtypeExtern(ref, sourceType, destType, label,
18115 /*onSuccess=*/true);
18116 } else {
18117 MOZ_CRASH("could not generate casting code for unknown type hierarchy");
18121 void CodeGenerator::visitWasmRefIsSubtypeOfAbstract(
18122 LWasmRefIsSubtypeOfAbstract* ins) {
18123 MOZ_ASSERT(gen->compilingWasm());
18125 const MWasmRefIsSubtypeOfAbstract* mir = ins->mir();
18126 MOZ_ASSERT(!mir->destType().isTypeRef());
18128 Register ref = ToRegister(ins->ref());
18129 Register superSTV = Register::Invalid();
18130 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
18131 Register scratch2 = Register::Invalid();
18132 Register result = ToRegister(ins->output());
18133 Label onSuccess;
18134 Label onFail;
18135 Label join;
18136 BranchWasmRefIsSubtype(masm, ref, mir->sourceType(), mir->destType(),
18137 &onSuccess, superSTV, scratch1, scratch2);
18138 masm.bind(&onFail);
18139 masm.xor32(result, result);
18140 masm.jump(&join);
18141 masm.bind(&onSuccess);
18142 masm.move32(Imm32(1), result);
18143 masm.bind(&join);
18146 void CodeGenerator::visitWasmRefIsSubtypeOfConcrete(
18147 LWasmRefIsSubtypeOfConcrete* ins) {
18148 MOZ_ASSERT(gen->compilingWasm());
18150 const MWasmRefIsSubtypeOfConcrete* mir = ins->mir();
18151 MOZ_ASSERT(mir->destType().isTypeRef());
18153 Register ref = ToRegister(ins->ref());
18154 Register superSTV = ToRegister(ins->superSTV());
18155 Register scratch1 = ToRegister(ins->temp0());
18156 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
18157 Register result = ToRegister(ins->output());
18158 Label onSuccess;
18159 Label join;
18160 BranchWasmRefIsSubtype(masm, ref, mir->sourceType(), mir->destType(),
18161 &onSuccess, superSTV, scratch1, scratch2);
18162 masm.move32(Imm32(0), result);
18163 masm.jump(&join);
18164 masm.bind(&onSuccess);
18165 masm.move32(Imm32(1), result);
18166 masm.bind(&join);
18169 void CodeGenerator::visitWasmRefIsSubtypeOfAbstractAndBranch(
18170 LWasmRefIsSubtypeOfAbstractAndBranch* ins) {
18171 MOZ_ASSERT(gen->compilingWasm());
18172 Register ref = ToRegister(ins->ref());
18173 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
18174 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
18175 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
18176 BranchWasmRefIsSubtype(masm, ref, ins->sourceType(), ins->destType(),
18177 onSuccess, Register::Invalid(), scratch1,
18178 Register::Invalid());
18179 masm.jump(onFail);
18182 void CodeGenerator::visitWasmRefIsSubtypeOfConcreteAndBranch(
18183 LWasmRefIsSubtypeOfConcreteAndBranch* ins) {
18184 MOZ_ASSERT(gen->compilingWasm());
18185 Register ref = ToRegister(ins->ref());
18186 Register superSTV = ToRegister(ins->superSTV());
18187 Register scratch1 = ToRegister(ins->temp0());
18188 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
18189 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
18190 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
18191 BranchWasmRefIsSubtype(masm, ref, ins->sourceType(), ins->destType(),
18192 onSuccess, superSTV, scratch1, scratch2);
18193 masm.jump(onFail);
18196 void CodeGenerator::callWasmStructAllocFun(LInstruction* lir,
18197 wasm::SymbolicAddress fun,
18198 Register typeDefData,
18199 Register output) {
18200 masm.Push(InstanceReg);
18201 int32_t framePushedAfterInstance = masm.framePushed();
18202 saveLive(lir);
18204 masm.setupWasmABICall();
18205 masm.passABIArg(InstanceReg);
18206 masm.passABIArg(typeDefData);
18207 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
18208 CodeOffset offset =
18209 masm.callWithABI(wasm::BytecodeOffset(0), fun,
18210 mozilla::Some(instanceOffset), MoveOp::GENERAL);
18211 masm.storeCallPointerResult(output);
18213 markSafepointAt(offset.offset(), lir);
18214 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAfterInstance);
18215 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::CodegenCall);
18217 restoreLive(lir);
18218 masm.Pop(InstanceReg);
18219 #if JS_CODEGEN_ARM64
18220 masm.syncStackPtr();
18221 #endif
18224 // Out-of-line path to allocate wasm GC structs
18225 class OutOfLineWasmNewStruct : public OutOfLineCodeBase<CodeGenerator> {
18226 LInstruction* lir_;
18227 wasm::SymbolicAddress fun_;
18228 Register typeDefData_;
18229 Register output_;
18231 public:
18232 OutOfLineWasmNewStruct(LInstruction* lir, wasm::SymbolicAddress fun,
18233 Register typeDefData, Register output)
18234 : lir_(lir), fun_(fun), typeDefData_(typeDefData), output_(output) {}
18236 void accept(CodeGenerator* codegen) override {
18237 codegen->visitOutOfLineWasmNewStruct(this);
18240 LInstruction* lir() const { return lir_; }
18241 wasm::SymbolicAddress fun() const { return fun_; }
18242 Register typeDefData() const { return typeDefData_; }
18243 Register output() const { return output_; }
18246 void CodeGenerator::visitOutOfLineWasmNewStruct(OutOfLineWasmNewStruct* ool) {
18247 callWasmStructAllocFun(ool->lir(), ool->fun(), ool->typeDefData(),
18248 ool->output());
18249 masm.jump(ool->rejoin());
18252 void CodeGenerator::visitWasmNewStructObject(LWasmNewStructObject* lir) {
18253 MOZ_ASSERT(gen->compilingWasm());
18255 MWasmNewStructObject* mir = lir->mir();
18257 Register typeDefData = ToRegister(lir->typeDefData());
18258 Register output = ToRegister(lir->output());
18260 if (mir->isOutline()) {
18261 wasm::SymbolicAddress fun = mir->zeroFields()
18262 ? wasm::SymbolicAddress::StructNewOOL_true
18263 : wasm::SymbolicAddress::StructNewOOL_false;
18264 callWasmStructAllocFun(lir, fun, typeDefData, output);
18265 } else {
18266 wasm::SymbolicAddress fun = mir->zeroFields()
18267 ? wasm::SymbolicAddress::StructNewIL_true
18268 : wasm::SymbolicAddress::StructNewIL_false;
18270 Register instance = ToRegister(lir->instance());
18271 MOZ_ASSERT(instance == InstanceReg);
18273 auto ool =
18274 new (alloc()) OutOfLineWasmNewStruct(lir, fun, typeDefData, output);
18275 addOutOfLineCode(ool, lir->mir());
18277 Register temp1 = ToRegister(lir->temp0());
18278 Register temp2 = ToRegister(lir->temp1());
18279 masm.wasmNewStructObject(instance, output, typeDefData, temp1, temp2,
18280 ool->entry(), mir->allocKind(), mir->zeroFields());
18282 masm.bind(ool->rejoin());
18286 void CodeGenerator::visitWasmHeapReg(LWasmHeapReg* ins) {
18287 #ifdef WASM_HAS_HEAPREG
18288 masm.movePtr(HeapReg, ToRegister(ins->output()));
18289 #else
18290 MOZ_CRASH();
18291 #endif
18294 void CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins) {
18295 const MWasmBoundsCheck* mir = ins->mir();
18296 Register ptr = ToRegister(ins->ptr());
18297 Register boundsCheckLimit = ToRegister(ins->boundsCheckLimit());
18298 // When there are no spectre mitigations in place, branching out-of-line to
18299 // the trap is a big performance win, but with mitigations it's trickier. See
18300 // bug 1680243.
18301 if (JitOptions.spectreIndexMasking) {
18302 Label ok;
18303 masm.wasmBoundsCheck32(Assembler::Below, ptr, boundsCheckLimit, &ok);
18304 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
18305 masm.bind(&ok);
18306 } else {
18307 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
18308 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
18309 addOutOfLineCode(ool, mir);
18310 masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
18311 ool->entry());
18315 void CodeGenerator::visitWasmBoundsCheck64(LWasmBoundsCheck64* ins) {
18316 const MWasmBoundsCheck* mir = ins->mir();
18317 Register64 ptr = ToRegister64(ins->ptr());
18318 Register64 boundsCheckLimit = ToRegister64(ins->boundsCheckLimit());
18319 // See above.
18320 if (JitOptions.spectreIndexMasking) {
18321 Label ok;
18322 masm.wasmBoundsCheck64(Assembler::Below, ptr, boundsCheckLimit, &ok);
18323 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
18324 masm.bind(&ok);
18325 } else {
18326 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
18327 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
18328 addOutOfLineCode(ool, mir);
18329 masm.wasmBoundsCheck64(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
18330 ool->entry());
18334 void CodeGenerator::visitWasmBoundsCheckRange32(LWasmBoundsCheckRange32* ins) {
18335 const MWasmBoundsCheckRange32* mir = ins->mir();
18336 Register index = ToRegister(ins->index());
18337 Register length = ToRegister(ins->length());
18338 Register limit = ToRegister(ins->limit());
18339 Register tmp = ToRegister(ins->temp0());
18341 masm.wasmBoundsCheckRange32(index, length, limit, tmp, mir->bytecodeOffset());
18344 void CodeGenerator::visitWasmAlignmentCheck(LWasmAlignmentCheck* ins) {
18345 const MWasmAlignmentCheck* mir = ins->mir();
18346 Register ptr = ToRegister(ins->ptr());
18347 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
18348 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
18349 addOutOfLineCode(ool, mir);
18350 masm.branchTest32(Assembler::NonZero, ptr, Imm32(mir->byteSize() - 1),
18351 ool->entry());
18354 void CodeGenerator::visitWasmAlignmentCheck64(LWasmAlignmentCheck64* ins) {
18355 const MWasmAlignmentCheck* mir = ins->mir();
18356 Register64 ptr = ToRegister64(ins->ptr());
18357 #ifdef JS_64BIT
18358 Register r = ptr.reg;
18359 #else
18360 Register r = ptr.low;
18361 #endif
18362 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
18363 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
18364 addOutOfLineCode(ool, mir);
18365 masm.branchTestPtr(Assembler::NonZero, r, Imm32(mir->byteSize() - 1),
18366 ool->entry());
18369 void CodeGenerator::visitWasmLoadInstance(LWasmLoadInstance* ins) {
18370 switch (ins->mir()->type()) {
18371 case MIRType::WasmAnyRef:
18372 case MIRType::Pointer:
18373 masm.loadPtr(Address(ToRegister(ins->instance()), ins->mir()->offset()),
18374 ToRegister(ins->output()));
18375 break;
18376 case MIRType::Int32:
18377 masm.load32(Address(ToRegister(ins->instance()), ins->mir()->offset()),
18378 ToRegister(ins->output()));
18379 break;
18380 default:
18381 MOZ_CRASH("MIRType not supported in WasmLoadInstance");
18385 void CodeGenerator::visitWasmLoadInstance64(LWasmLoadInstance64* ins) {
18386 MOZ_ASSERT(ins->mir()->type() == MIRType::Int64);
18387 masm.load64(Address(ToRegister(ins->instance()), ins->mir()->offset()),
18388 ToOutRegister64(ins));
18391 void CodeGenerator::incrementWarmUpCounter(AbsoluteAddress warmUpCount,
18392 JSScript* script, Register tmp) {
18393 // The code depends on the JitScript* not being discarded without also
18394 // invalidating Ion code. Assert this.
18395 #ifdef DEBUG
18396 Label ok;
18397 masm.movePtr(ImmGCPtr(script), tmp);
18398 masm.loadJitScript(tmp, tmp);
18399 masm.branchPtr(Assembler::Equal, tmp, ImmPtr(script->jitScript()), &ok);
18400 masm.assumeUnreachable("Didn't find JitScript?");
18401 masm.bind(&ok);
18402 #endif
18404 masm.load32(warmUpCount, tmp);
18405 masm.add32(Imm32(1), tmp);
18406 masm.store32(tmp, warmUpCount);
18409 void CodeGenerator::visitIncrementWarmUpCounter(LIncrementWarmUpCounter* ins) {
18410 Register tmp = ToRegister(ins->temp0());
18412 AbsoluteAddress warmUpCount =
18413 AbsoluteAddress(ins->mir()->script()->jitScript())
18414 .offset(JitScript::offsetOfWarmUpCount());
18415 incrementWarmUpCounter(warmUpCount, ins->mir()->script(), tmp);
18418 void CodeGenerator::visitLexicalCheck(LLexicalCheck* ins) {
18419 ValueOperand inputValue = ToValue(ins, LLexicalCheck::InputIndex);
18420 Label bail;
18421 masm.branchTestMagicValue(Assembler::Equal, inputValue,
18422 JS_UNINITIALIZED_LEXICAL, &bail);
18423 bailoutFrom(&bail, ins->snapshot());
18426 void CodeGenerator::visitThrowRuntimeLexicalError(
18427 LThrowRuntimeLexicalError* ins) {
18428 pushArg(Imm32(ins->mir()->errorNumber()));
18430 using Fn = bool (*)(JSContext*, unsigned);
18431 callVM<Fn, jit::ThrowRuntimeLexicalError>(ins);
18434 void CodeGenerator::visitThrowMsg(LThrowMsg* ins) {
18435 pushArg(Imm32(static_cast<int32_t>(ins->mir()->throwMsgKind())));
18437 using Fn = bool (*)(JSContext*, unsigned);
18438 callVM<Fn, js::ThrowMsgOperation>(ins);
18441 void CodeGenerator::visitGlobalDeclInstantiation(
18442 LGlobalDeclInstantiation* ins) {
18443 pushArg(ImmPtr(ins->mir()->resumePoint()->pc()));
18444 pushArg(ImmGCPtr(ins->mir()->block()->info().script()));
18446 using Fn = bool (*)(JSContext*, HandleScript, const jsbytecode*);
18447 callVM<Fn, GlobalDeclInstantiationFromIon>(ins);
18450 void CodeGenerator::visitDebugger(LDebugger* ins) {
18451 Register cx = ToRegister(ins->temp0());
18453 masm.loadJSContext(cx);
18454 using Fn = bool (*)(JSContext* cx);
18455 masm.setupAlignedABICall();
18456 masm.passABIArg(cx);
18457 masm.callWithABI<Fn, GlobalHasLiveOnDebuggerStatement>();
18459 Label bail;
18460 masm.branchIfTrueBool(ReturnReg, &bail);
18461 bailoutFrom(&bail, ins->snapshot());
18464 void CodeGenerator::visitNewTarget(LNewTarget* ins) {
18465 ValueOperand output = ToOutValue(ins);
18467 // if (isConstructing) output = argv[Max(numActualArgs, numFormalArgs)]
18468 Label notConstructing, done;
18469 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
18470 masm.branchTestPtr(Assembler::Zero, calleeToken,
18471 Imm32(CalleeToken_FunctionConstructing), &notConstructing);
18473 Register argvLen = output.scratchReg();
18474 masm.loadNumActualArgs(FramePointer, argvLen);
18476 Label useNFormals;
18478 size_t numFormalArgs = ins->mirRaw()->block()->info().nargs();
18479 masm.branchPtr(Assembler::Below, argvLen, Imm32(numFormalArgs), &useNFormals);
18481 size_t argsOffset = JitFrameLayout::offsetOfActualArgs();
18483 BaseValueIndex newTarget(FramePointer, argvLen, argsOffset);
18484 masm.loadValue(newTarget, output);
18485 masm.jump(&done);
18488 masm.bind(&useNFormals);
18491 Address newTarget(FramePointer,
18492 argsOffset + (numFormalArgs * sizeof(Value)));
18493 masm.loadValue(newTarget, output);
18494 masm.jump(&done);
18497 // else output = undefined
18498 masm.bind(&notConstructing);
18499 masm.moveValue(UndefinedValue(), output);
18500 masm.bind(&done);
18503 void CodeGenerator::visitCheckReturn(LCheckReturn* ins) {
18504 ValueOperand returnValue = ToValue(ins, LCheckReturn::ReturnValueIndex);
18505 ValueOperand thisValue = ToValue(ins, LCheckReturn::ThisValueIndex);
18506 ValueOperand output = ToOutValue(ins);
18508 using Fn = bool (*)(JSContext*, HandleValue);
18509 OutOfLineCode* ool = oolCallVM<Fn, ThrowBadDerivedReturnOrUninitializedThis>(
18510 ins, ArgList(returnValue), StoreNothing());
18512 Label noChecks;
18513 masm.branchTestObject(Assembler::Equal, returnValue, &noChecks);
18514 masm.branchTestUndefined(Assembler::NotEqual, returnValue, ool->entry());
18515 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
18516 masm.moveValue(thisValue, output);
18517 masm.jump(ool->rejoin());
18518 masm.bind(&noChecks);
18519 masm.moveValue(returnValue, output);
18520 masm.bind(ool->rejoin());
18523 void CodeGenerator::visitCheckIsObj(LCheckIsObj* ins) {
18524 ValueOperand value = ToValue(ins, LCheckIsObj::ValueIndex);
18525 Register output = ToRegister(ins->output());
18527 using Fn = bool (*)(JSContext*, CheckIsObjectKind);
18528 OutOfLineCode* ool = oolCallVM<Fn, ThrowCheckIsObject>(
18529 ins, ArgList(Imm32(ins->mir()->checkKind())), StoreNothing());
18531 masm.fallibleUnboxObject(value, output, ool->entry());
18532 masm.bind(ool->rejoin());
18535 void CodeGenerator::visitCheckObjCoercible(LCheckObjCoercible* ins) {
18536 ValueOperand checkValue = ToValue(ins, LCheckObjCoercible::ValueIndex);
18538 using Fn = bool (*)(JSContext*, HandleValue);
18539 OutOfLineCode* ool = oolCallVM<Fn, ThrowObjectCoercible>(
18540 ins, ArgList(checkValue), StoreNothing());
18541 masm.branchTestNull(Assembler::Equal, checkValue, ool->entry());
18542 masm.branchTestUndefined(Assembler::Equal, checkValue, ool->entry());
18543 masm.bind(ool->rejoin());
18546 void CodeGenerator::visitCheckClassHeritage(LCheckClassHeritage* ins) {
18547 ValueOperand heritage = ToValue(ins, LCheckClassHeritage::HeritageIndex);
18548 Register temp0 = ToRegister(ins->temp0());
18549 Register temp1 = ToRegister(ins->temp1());
18551 using Fn = bool (*)(JSContext*, HandleValue);
18552 OutOfLineCode* ool = oolCallVM<Fn, CheckClassHeritageOperation>(
18553 ins, ArgList(heritage), StoreNothing());
18555 masm.branchTestNull(Assembler::Equal, heritage, ool->rejoin());
18556 masm.fallibleUnboxObject(heritage, temp0, ool->entry());
18558 masm.isConstructor(temp0, temp1, ool->entry());
18559 masm.branchTest32(Assembler::Zero, temp1, temp1, ool->entry());
18561 masm.bind(ool->rejoin());
18564 void CodeGenerator::visitCheckThis(LCheckThis* ins) {
18565 ValueOperand thisValue = ToValue(ins, LCheckThis::ValueIndex);
18567 using Fn = bool (*)(JSContext*);
18568 OutOfLineCode* ool =
18569 oolCallVM<Fn, ThrowUninitializedThis>(ins, ArgList(), StoreNothing());
18570 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
18571 masm.bind(ool->rejoin());
18574 void CodeGenerator::visitCheckThisReinit(LCheckThisReinit* ins) {
18575 ValueOperand thisValue = ToValue(ins, LCheckThisReinit::ThisValueIndex);
18577 using Fn = bool (*)(JSContext*);
18578 OutOfLineCode* ool =
18579 oolCallVM<Fn, ThrowInitializedThis>(ins, ArgList(), StoreNothing());
18580 masm.branchTestMagic(Assembler::NotEqual, thisValue, ool->entry());
18581 masm.bind(ool->rejoin());
18584 void CodeGenerator::visitGenerator(LGenerator* lir) {
18585 Register callee = ToRegister(lir->callee());
18586 Register environmentChain = ToRegister(lir->environmentChain());
18587 Register argsObject = ToRegister(lir->argsObject());
18589 pushArg(argsObject);
18590 pushArg(environmentChain);
18591 pushArg(ImmGCPtr(current->mir()->info().script()));
18592 pushArg(callee);
18594 using Fn = JSObject* (*)(JSContext* cx, HandleFunction, HandleScript,
18595 HandleObject, HandleObject);
18596 callVM<Fn, CreateGenerator>(lir);
18599 void CodeGenerator::visitAsyncResolve(LAsyncResolve* lir) {
18600 Register generator = ToRegister(lir->generator());
18601 ValueOperand valueOrReason = ToValue(lir, LAsyncResolve::ValueOrReasonIndex);
18602 AsyncFunctionResolveKind resolveKind = lir->mir()->resolveKind();
18604 pushArg(Imm32(static_cast<int32_t>(resolveKind)));
18605 pushArg(valueOrReason);
18606 pushArg(generator);
18608 using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
18609 HandleValue, AsyncFunctionResolveKind);
18610 callVM<Fn, js::AsyncFunctionResolve>(lir);
18613 void CodeGenerator::visitAsyncAwait(LAsyncAwait* lir) {
18614 ValueOperand value = ToValue(lir, LAsyncAwait::ValueIndex);
18615 Register generator = ToRegister(lir->generator());
18617 pushArg(value);
18618 pushArg(generator);
18620 using Fn =
18621 JSObject* (*)(JSContext* cx, Handle<AsyncFunctionGeneratorObject*> genObj,
18622 HandleValue value);
18623 callVM<Fn, js::AsyncFunctionAwait>(lir);
18626 void CodeGenerator::visitCanSkipAwait(LCanSkipAwait* lir) {
18627 ValueOperand value = ToValue(lir, LCanSkipAwait::ValueIndex);
18629 pushArg(value);
18631 using Fn = bool (*)(JSContext*, HandleValue, bool* canSkip);
18632 callVM<Fn, js::CanSkipAwait>(lir);
18635 void CodeGenerator::visitMaybeExtractAwaitValue(LMaybeExtractAwaitValue* lir) {
18636 ValueOperand value = ToValue(lir, LMaybeExtractAwaitValue::ValueIndex);
18637 ValueOperand output = ToOutValue(lir);
18638 Register canSkip = ToRegister(lir->canSkip());
18640 Label cantExtract, finished;
18641 masm.branchIfFalseBool(canSkip, &cantExtract);
18643 pushArg(value);
18645 using Fn = bool (*)(JSContext*, HandleValue, MutableHandleValue);
18646 callVM<Fn, js::ExtractAwaitValue>(lir);
18647 masm.jump(&finished);
18648 masm.bind(&cantExtract);
18650 masm.moveValue(value, output);
18652 masm.bind(&finished);
18655 void CodeGenerator::visitDebugCheckSelfHosted(LDebugCheckSelfHosted* ins) {
18656 ValueOperand checkValue = ToValue(ins, LDebugCheckSelfHosted::ValueIndex);
18657 pushArg(checkValue);
18658 using Fn = bool (*)(JSContext*, HandleValue);
18659 callVM<Fn, js::Debug_CheckSelfHosted>(ins);
18662 void CodeGenerator::visitRandom(LRandom* ins) {
18663 using mozilla::non_crypto::XorShift128PlusRNG;
18665 FloatRegister output = ToFloatRegister(ins->output());
18666 Register rngReg = ToRegister(ins->temp0());
18668 Register64 temp1 = ToRegister64(ins->temp1());
18669 Register64 temp2 = ToRegister64(ins->temp2());
18671 const XorShift128PlusRNG* rng = gen->realm->addressOfRandomNumberGenerator();
18672 masm.movePtr(ImmPtr(rng), rngReg);
18674 masm.randomDouble(rngReg, output, temp1, temp2);
18675 if (js::SupportDifferentialTesting()) {
18676 masm.loadConstantDouble(0.0, output);
18680 void CodeGenerator::visitSignExtendInt32(LSignExtendInt32* ins) {
18681 Register input = ToRegister(ins->input());
18682 Register output = ToRegister(ins->output());
18684 switch (ins->mode()) {
18685 case MSignExtendInt32::Byte:
18686 masm.move8SignExtend(input, output);
18687 break;
18688 case MSignExtendInt32::Half:
18689 masm.move16SignExtend(input, output);
18690 break;
18694 void CodeGenerator::visitRotate(LRotate* ins) {
18695 MRotate* mir = ins->mir();
18696 Register input = ToRegister(ins->input());
18697 Register dest = ToRegister(ins->output());
18699 const LAllocation* count = ins->count();
18700 if (count->isConstant()) {
18701 int32_t c = ToInt32(count) & 0x1F;
18702 if (mir->isLeftRotate()) {
18703 masm.rotateLeft(Imm32(c), input, dest);
18704 } else {
18705 masm.rotateRight(Imm32(c), input, dest);
18707 } else {
18708 Register creg = ToRegister(count);
18709 if (mir->isLeftRotate()) {
18710 masm.rotateLeft(creg, input, dest);
18711 } else {
18712 masm.rotateRight(creg, input, dest);
18717 class OutOfLineNaNToZero : public OutOfLineCodeBase<CodeGenerator> {
18718 LNaNToZero* lir_;
18720 public:
18721 explicit OutOfLineNaNToZero(LNaNToZero* lir) : lir_(lir) {}
18723 void accept(CodeGenerator* codegen) override {
18724 codegen->visitOutOfLineNaNToZero(this);
18726 LNaNToZero* lir() const { return lir_; }
18729 void CodeGenerator::visitOutOfLineNaNToZero(OutOfLineNaNToZero* ool) {
18730 FloatRegister output = ToFloatRegister(ool->lir()->output());
18731 masm.loadConstantDouble(0.0, output);
18732 masm.jump(ool->rejoin());
18735 void CodeGenerator::visitNaNToZero(LNaNToZero* lir) {
18736 FloatRegister input = ToFloatRegister(lir->input());
18738 OutOfLineNaNToZero* ool = new (alloc()) OutOfLineNaNToZero(lir);
18739 addOutOfLineCode(ool, lir->mir());
18741 if (lir->mir()->operandIsNeverNegativeZero()) {
18742 masm.branchDouble(Assembler::DoubleUnordered, input, input, ool->entry());
18743 } else {
18744 FloatRegister scratch = ToFloatRegister(lir->temp0());
18745 masm.loadConstantDouble(0.0, scratch);
18746 masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch,
18747 ool->entry());
18749 masm.bind(ool->rejoin());
18752 void CodeGenerator::visitIsPackedArray(LIsPackedArray* lir) {
18753 Register obj = ToRegister(lir->object());
18754 Register output = ToRegister(lir->output());
18755 Register temp = ToRegister(lir->temp0());
18757 masm.setIsPackedArray(obj, output, temp);
18760 void CodeGenerator::visitGuardArrayIsPacked(LGuardArrayIsPacked* lir) {
18761 Register array = ToRegister(lir->array());
18762 Register temp0 = ToRegister(lir->temp0());
18763 Register temp1 = ToRegister(lir->temp1());
18765 Label bail;
18766 masm.branchArrayIsNotPacked(array, temp0, temp1, &bail);
18767 bailoutFrom(&bail, lir->snapshot());
18770 void CodeGenerator::visitGetPrototypeOf(LGetPrototypeOf* lir) {
18771 Register target = ToRegister(lir->target());
18772 ValueOperand out = ToOutValue(lir);
18773 Register scratch = out.scratchReg();
18775 using Fn = bool (*)(JSContext*, HandleObject, MutableHandleValue);
18776 OutOfLineCode* ool = oolCallVM<Fn, jit::GetPrototypeOf>(lir, ArgList(target),
18777 StoreValueTo(out));
18779 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
18781 masm.loadObjProto(target, scratch);
18783 Label hasProto;
18784 masm.branchPtr(Assembler::Above, scratch, ImmWord(1), &hasProto);
18786 // Call into the VM for lazy prototypes.
18787 masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), ool->entry());
18789 masm.moveValue(NullValue(), out);
18790 masm.jump(ool->rejoin());
18792 masm.bind(&hasProto);
18793 masm.tagValue(JSVAL_TYPE_OBJECT, scratch, out);
18795 masm.bind(ool->rejoin());
18798 void CodeGenerator::visitObjectWithProto(LObjectWithProto* lir) {
18799 pushArg(ToValue(lir, LObjectWithProto::PrototypeIndex));
18801 using Fn = PlainObject* (*)(JSContext*, HandleValue);
18802 callVM<Fn, js::ObjectWithProtoOperation>(lir);
18805 void CodeGenerator::visitObjectStaticProto(LObjectStaticProto* lir) {
18806 Register obj = ToRegister(lir->input());
18807 Register output = ToRegister(lir->output());
18809 masm.loadObjProto(obj, output);
18811 #ifdef DEBUG
18812 // We shouldn't encounter a null or lazy proto.
18813 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
18815 Label done;
18816 masm.branchPtr(Assembler::Above, output, ImmWord(1), &done);
18817 masm.assumeUnreachable("Unexpected null or lazy proto in MObjectStaticProto");
18818 masm.bind(&done);
18819 #endif
18822 void CodeGenerator::visitBuiltinObject(LBuiltinObject* lir) {
18823 pushArg(Imm32(static_cast<int32_t>(lir->mir()->builtinObjectKind())));
18825 using Fn = JSObject* (*)(JSContext*, BuiltinObjectKind);
18826 callVM<Fn, js::BuiltinObjectOperation>(lir);
18829 void CodeGenerator::visitSuperFunction(LSuperFunction* lir) {
18830 Register callee = ToRegister(lir->callee());
18831 ValueOperand out = ToOutValue(lir);
18832 Register temp = ToRegister(lir->temp0());
18834 #ifdef DEBUG
18835 Label classCheckDone;
18836 masm.branchTestObjIsFunction(Assembler::Equal, callee, temp, callee,
18837 &classCheckDone);
18838 masm.assumeUnreachable("Unexpected non-JSFunction callee in JSOp::SuperFun");
18839 masm.bind(&classCheckDone);
18840 #endif
18842 // Load prototype of callee
18843 masm.loadObjProto(callee, temp);
18845 #ifdef DEBUG
18846 // We won't encounter a lazy proto, because |callee| is guaranteed to be a
18847 // JSFunction and only proxy objects can have a lazy proto.
18848 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
18850 Label proxyCheckDone;
18851 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
18852 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperFun");
18853 masm.bind(&proxyCheckDone);
18854 #endif
18856 Label nullProto, done;
18857 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
18859 // Box prototype and return
18860 masm.tagValue(JSVAL_TYPE_OBJECT, temp, out);
18861 masm.jump(&done);
18863 masm.bind(&nullProto);
18864 masm.moveValue(NullValue(), out);
18866 masm.bind(&done);
18869 void CodeGenerator::visitInitHomeObject(LInitHomeObject* lir) {
18870 Register func = ToRegister(lir->function());
18871 ValueOperand homeObject = ToValue(lir, LInitHomeObject::HomeObjectIndex);
18873 masm.assertFunctionIsExtended(func);
18875 Address addr(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
18877 emitPreBarrier(addr);
18878 masm.storeValue(homeObject, addr);
18881 void CodeGenerator::visitIsTypedArrayConstructor(
18882 LIsTypedArrayConstructor* lir) {
18883 Register object = ToRegister(lir->object());
18884 Register output = ToRegister(lir->output());
18886 masm.setIsDefinitelyTypedArrayConstructor(object, output);
18889 void CodeGenerator::visitLoadValueTag(LLoadValueTag* lir) {
18890 ValueOperand value = ToValue(lir, LLoadValueTag::ValueIndex);
18891 Register output = ToRegister(lir->output());
18893 Register tag = masm.extractTag(value, output);
18894 if (tag != output) {
18895 masm.mov(tag, output);
18899 void CodeGenerator::visitGuardTagNotEqual(LGuardTagNotEqual* lir) {
18900 Register lhs = ToRegister(lir->lhs());
18901 Register rhs = ToRegister(lir->rhs());
18903 bailoutCmp32(Assembler::Equal, lhs, rhs, lir->snapshot());
18905 // If both lhs and rhs are numbers, can't use tag comparison to do inequality
18906 // comparison
18907 Label done;
18908 masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
18909 masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
18910 bailout(lir->snapshot());
18912 masm.bind(&done);
18915 void CodeGenerator::visitLoadWrapperTarget(LLoadWrapperTarget* lir) {
18916 Register object = ToRegister(lir->object());
18917 Register output = ToRegister(lir->output());
18919 masm.loadPtr(Address(object, ProxyObject::offsetOfReservedSlots()), output);
18920 masm.unboxObject(
18921 Address(output, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
18922 output);
18925 void CodeGenerator::visitGuardHasGetterSetter(LGuardHasGetterSetter* lir) {
18926 Register object = ToRegister(lir->object());
18927 Register temp0 = ToRegister(lir->temp0());
18928 Register temp1 = ToRegister(lir->temp1());
18929 Register temp2 = ToRegister(lir->temp2());
18931 masm.movePropertyKey(lir->mir()->propId(), temp1);
18932 masm.movePtr(ImmGCPtr(lir->mir()->getterSetter()), temp2);
18934 using Fn = bool (*)(JSContext* cx, JSObject* obj, jsid id,
18935 GetterSetter* getterSetter);
18936 masm.setupAlignedABICall();
18937 masm.loadJSContext(temp0);
18938 masm.passABIArg(temp0);
18939 masm.passABIArg(object);
18940 masm.passABIArg(temp1);
18941 masm.passABIArg(temp2);
18942 masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
18944 bailoutIfFalseBool(ReturnReg, lir->snapshot());
18947 void CodeGenerator::visitGuardIsExtensible(LGuardIsExtensible* lir) {
18948 Register object = ToRegister(lir->object());
18949 Register temp = ToRegister(lir->temp0());
18951 Label bail;
18952 masm.branchIfObjectNotExtensible(object, temp, &bail);
18953 bailoutFrom(&bail, lir->snapshot());
18956 void CodeGenerator::visitGuardInt32IsNonNegative(
18957 LGuardInt32IsNonNegative* lir) {
18958 Register index = ToRegister(lir->index());
18960 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
18963 void CodeGenerator::visitGuardInt32Range(LGuardInt32Range* lir) {
18964 Register input = ToRegister(lir->input());
18966 bailoutCmp32(Assembler::LessThan, input, Imm32(lir->mir()->minimum()),
18967 lir->snapshot());
18968 bailoutCmp32(Assembler::GreaterThan, input, Imm32(lir->mir()->maximum()),
18969 lir->snapshot());
18972 void CodeGenerator::visitGuardIndexIsNotDenseElement(
18973 LGuardIndexIsNotDenseElement* lir) {
18974 Register object = ToRegister(lir->object());
18975 Register index = ToRegister(lir->index());
18976 Register temp = ToRegister(lir->temp0());
18977 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
18979 // Load obj->elements.
18980 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
18982 // Ensure index >= initLength or the element is a hole.
18983 Label notDense;
18984 Address capacity(temp, ObjectElements::offsetOfInitializedLength());
18985 masm.spectreBoundsCheck32(index, capacity, spectreTemp, &notDense);
18987 BaseValueIndex element(temp, index);
18988 masm.branchTestMagic(Assembler::Equal, element, &notDense);
18990 bailout(lir->snapshot());
18992 masm.bind(&notDense);
18995 void CodeGenerator::visitGuardIndexIsValidUpdateOrAdd(
18996 LGuardIndexIsValidUpdateOrAdd* lir) {
18997 Register object = ToRegister(lir->object());
18998 Register index = ToRegister(lir->index());
18999 Register temp = ToRegister(lir->temp0());
19000 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
19002 // Load obj->elements.
19003 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
19005 Label success;
19007 // If length is writable, branch to &success. All indices are writable.
19008 Address flags(temp, ObjectElements::offsetOfFlags());
19009 masm.branchTest32(Assembler::Zero, flags,
19010 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
19011 &success);
19013 // Otherwise, ensure index is in bounds.
19014 Label bail;
19015 Address length(temp, ObjectElements::offsetOfLength());
19016 masm.spectreBoundsCheck32(index, length, spectreTemp, &bail);
19017 masm.bind(&success);
19019 bailoutFrom(&bail, lir->snapshot());
19022 void CodeGenerator::visitCallAddOrUpdateSparseElement(
19023 LCallAddOrUpdateSparseElement* lir) {
19024 Register object = ToRegister(lir->object());
19025 Register index = ToRegister(lir->index());
19026 ValueOperand value = ToValue(lir, LCallAddOrUpdateSparseElement::ValueIndex);
19028 pushArg(Imm32(lir->mir()->strict()));
19029 pushArg(value);
19030 pushArg(index);
19031 pushArg(object);
19033 using Fn =
19034 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, HandleValue, bool);
19035 callVM<Fn, js::AddOrUpdateSparseElementHelper>(lir);
19038 void CodeGenerator::visitCallGetSparseElement(LCallGetSparseElement* lir) {
19039 Register object = ToRegister(lir->object());
19040 Register index = ToRegister(lir->index());
19042 pushArg(index);
19043 pushArg(object);
19045 using Fn =
19046 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, MutableHandleValue);
19047 callVM<Fn, js::GetSparseElementHelper>(lir);
19050 void CodeGenerator::visitCallNativeGetElement(LCallNativeGetElement* lir) {
19051 Register object = ToRegister(lir->object());
19052 Register index = ToRegister(lir->index());
19054 pushArg(index);
19055 pushArg(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
19056 pushArg(object);
19058 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
19059 MutableHandleValue);
19060 callVM<Fn, js::NativeGetElement>(lir);
19063 void CodeGenerator::visitCallNativeGetElementSuper(
19064 LCallNativeGetElementSuper* lir) {
19065 Register object = ToRegister(lir->object());
19066 Register index = ToRegister(lir->index());
19067 ValueOperand receiver =
19068 ToValue(lir, LCallNativeGetElementSuper::ReceiverIndex);
19070 pushArg(index);
19071 pushArg(receiver);
19072 pushArg(object);
19074 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
19075 MutableHandleValue);
19076 callVM<Fn, js::NativeGetElement>(lir);
19079 void CodeGenerator::visitCallObjectHasSparseElement(
19080 LCallObjectHasSparseElement* lir) {
19081 Register object = ToRegister(lir->object());
19082 Register index = ToRegister(lir->index());
19083 Register temp0 = ToRegister(lir->temp0());
19084 Register temp1 = ToRegister(lir->temp1());
19085 Register output = ToRegister(lir->output());
19087 masm.reserveStack(sizeof(Value));
19088 masm.moveStackPtrTo(temp1);
19090 using Fn = bool (*)(JSContext*, NativeObject*, int32_t, Value*);
19091 masm.setupAlignedABICall();
19092 masm.loadJSContext(temp0);
19093 masm.passABIArg(temp0);
19094 masm.passABIArg(object);
19095 masm.passABIArg(index);
19096 masm.passABIArg(temp1);
19097 masm.callWithABI<Fn, HasNativeElementPure>();
19098 masm.storeCallPointerResult(temp0);
19100 Label bail, ok;
19101 uint32_t framePushed = masm.framePushed();
19102 masm.branchIfTrueBool(temp0, &ok);
19103 masm.adjustStack(sizeof(Value));
19104 masm.jump(&bail);
19106 masm.bind(&ok);
19107 masm.setFramePushed(framePushed);
19108 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
19109 masm.adjustStack(sizeof(Value));
19111 bailoutFrom(&bail, lir->snapshot());
19114 void CodeGenerator::visitBigIntAsIntN(LBigIntAsIntN* ins) {
19115 Register bits = ToRegister(ins->bits());
19116 Register input = ToRegister(ins->input());
19118 pushArg(bits);
19119 pushArg(input);
19121 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
19122 callVM<Fn, jit::BigIntAsIntN>(ins);
19125 void CodeGenerator::visitBigIntAsIntN64(LBigIntAsIntN64* ins) {
19126 Register input = ToRegister(ins->input());
19127 Register temp = ToRegister(ins->temp());
19128 Register64 temp64 = ToRegister64(ins->temp64());
19129 Register output = ToRegister(ins->output());
19131 Label done, create;
19133 masm.movePtr(input, output);
19135 // Load the BigInt value as an int64.
19136 masm.loadBigInt64(input, temp64);
19138 // Create a new BigInt when the input exceeds the int64 range.
19139 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
19140 Imm32(64 / BigInt::DigitBits), &create);
19142 // And create a new BigInt when the value and the BigInt have different signs.
19143 Label nonNegative;
19144 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
19145 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &create);
19146 masm.jump(&done);
19148 masm.bind(&nonNegative);
19149 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &done);
19151 masm.bind(&create);
19152 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
19154 masm.bind(&done);
19157 void CodeGenerator::visitBigIntAsIntN32(LBigIntAsIntN32* ins) {
19158 Register input = ToRegister(ins->input());
19159 Register temp = ToRegister(ins->temp());
19160 Register64 temp64 = ToRegister64(ins->temp64());
19161 Register output = ToRegister(ins->output());
19163 Label done, create;
19165 masm.movePtr(input, output);
19167 // Load the absolute value of the first digit.
19168 masm.loadFirstBigIntDigitOrZero(input, temp);
19170 // If the absolute value exceeds the int32 range, create a new BigInt.
19171 masm.branchPtr(Assembler::Above, temp, Imm32(INT32_MAX), &create);
19173 // Also create a new BigInt if we have more than one digit.
19174 masm.branch32(Assembler::BelowOrEqual,
19175 Address(input, BigInt::offsetOfLength()), Imm32(1), &done);
19177 masm.bind(&create);
19179 // |temp| stores the absolute value, negate it when the sign flag is set.
19180 Label nonNegative;
19181 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
19182 masm.negPtr(temp);
19183 masm.bind(&nonNegative);
19185 masm.move32To64SignExtend(temp, temp64);
19186 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
19188 masm.bind(&done);
19191 void CodeGenerator::visitBigIntAsUintN(LBigIntAsUintN* ins) {
19192 Register bits = ToRegister(ins->bits());
19193 Register input = ToRegister(ins->input());
19195 pushArg(bits);
19196 pushArg(input);
19198 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
19199 callVM<Fn, jit::BigIntAsUintN>(ins);
19202 void CodeGenerator::visitBigIntAsUintN64(LBigIntAsUintN64* ins) {
19203 Register input = ToRegister(ins->input());
19204 Register temp = ToRegister(ins->temp());
19205 Register64 temp64 = ToRegister64(ins->temp64());
19206 Register output = ToRegister(ins->output());
19208 Label done, create;
19210 masm.movePtr(input, output);
19212 // Load the BigInt value as an uint64.
19213 masm.loadBigInt64(input, temp64);
19215 // Create a new BigInt when the input exceeds the uint64 range.
19216 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
19217 Imm32(64 / BigInt::DigitBits), &create);
19219 // And create a new BigInt when the input has the sign flag set.
19220 masm.branchIfBigIntIsNonNegative(input, &done);
19222 masm.bind(&create);
19223 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
19225 masm.bind(&done);
19228 void CodeGenerator::visitBigIntAsUintN32(LBigIntAsUintN32* ins) {
19229 Register input = ToRegister(ins->input());
19230 Register temp = ToRegister(ins->temp());
19231 Register64 temp64 = ToRegister64(ins->temp64());
19232 Register output = ToRegister(ins->output());
19234 Label done, create;
19236 masm.movePtr(input, output);
19238 // Load the absolute value of the first digit.
19239 masm.loadFirstBigIntDigitOrZero(input, temp);
19241 // If the absolute value exceeds the uint32 range, create a new BigInt.
19242 #if JS_PUNBOX64
19243 masm.branchPtr(Assembler::Above, temp, ImmWord(UINT32_MAX), &create);
19244 #endif
19246 // Also create a new BigInt if we have more than one digit.
19247 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
19248 Imm32(1), &create);
19250 // And create a new BigInt when the input has the sign flag set.
19251 masm.branchIfBigIntIsNonNegative(input, &done);
19253 masm.bind(&create);
19255 // |temp| stores the absolute value, negate it when the sign flag is set.
19256 Label nonNegative;
19257 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
19258 masm.negPtr(temp);
19259 masm.bind(&nonNegative);
19261 masm.move32To64ZeroExtend(temp, temp64);
19262 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
19264 masm.bind(&done);
19267 void CodeGenerator::visitGuardNonGCThing(LGuardNonGCThing* ins) {
19268 ValueOperand input = ToValue(ins, LGuardNonGCThing::InputIndex);
19270 Label bail;
19271 masm.branchTestGCThing(Assembler::Equal, input, &bail);
19272 bailoutFrom(&bail, ins->snapshot());
19275 void CodeGenerator::visitToHashableNonGCThing(LToHashableNonGCThing* ins) {
19276 ValueOperand input = ToValue(ins, LToHashableNonGCThing::InputIndex);
19277 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
19278 ValueOperand output = ToOutValue(ins);
19280 masm.toHashableNonGCThing(input, output, tempFloat);
19283 void CodeGenerator::visitToHashableString(LToHashableString* ins) {
19284 Register input = ToRegister(ins->input());
19285 Register output = ToRegister(ins->output());
19287 using Fn = JSAtom* (*)(JSContext*, JSString*);
19288 auto* ool = oolCallVM<Fn, js::AtomizeString>(ins, ArgList(input),
19289 StoreRegisterTo(output));
19291 masm.branchTest32(Assembler::Zero, Address(input, JSString::offsetOfFlags()),
19292 Imm32(JSString::ATOM_BIT), ool->entry());
19293 masm.movePtr(input, output);
19294 masm.bind(ool->rejoin());
19297 void CodeGenerator::visitToHashableValue(LToHashableValue* ins) {
19298 ValueOperand input = ToValue(ins, LToHashableValue::InputIndex);
19299 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
19300 ValueOperand output = ToOutValue(ins);
19302 Register str = output.scratchReg();
19304 using Fn = JSAtom* (*)(JSContext*, JSString*);
19305 auto* ool =
19306 oolCallVM<Fn, js::AtomizeString>(ins, ArgList(str), StoreRegisterTo(str));
19308 masm.toHashableValue(input, output, tempFloat, ool->entry(), ool->rejoin());
19311 void CodeGenerator::visitHashNonGCThing(LHashNonGCThing* ins) {
19312 ValueOperand input = ToValue(ins, LHashNonGCThing::InputIndex);
19313 Register temp = ToRegister(ins->temp0());
19314 Register output = ToRegister(ins->output());
19316 masm.prepareHashNonGCThing(input, output, temp);
19319 void CodeGenerator::visitHashString(LHashString* ins) {
19320 Register input = ToRegister(ins->input());
19321 Register temp = ToRegister(ins->temp0());
19322 Register output = ToRegister(ins->output());
19324 masm.prepareHashString(input, output, temp);
19327 void CodeGenerator::visitHashSymbol(LHashSymbol* ins) {
19328 Register input = ToRegister(ins->input());
19329 Register output = ToRegister(ins->output());
19331 masm.prepareHashSymbol(input, output);
19334 void CodeGenerator::visitHashBigInt(LHashBigInt* ins) {
19335 Register input = ToRegister(ins->input());
19336 Register temp0 = ToRegister(ins->temp0());
19337 Register temp1 = ToRegister(ins->temp1());
19338 Register temp2 = ToRegister(ins->temp2());
19339 Register output = ToRegister(ins->output());
19341 masm.prepareHashBigInt(input, output, temp0, temp1, temp2);
19344 void CodeGenerator::visitHashObject(LHashObject* ins) {
19345 Register setObj = ToRegister(ins->setObject());
19346 ValueOperand input = ToValue(ins, LHashObject::InputIndex);
19347 Register temp0 = ToRegister(ins->temp0());
19348 Register temp1 = ToRegister(ins->temp1());
19349 Register temp2 = ToRegister(ins->temp2());
19350 Register temp3 = ToRegister(ins->temp3());
19351 Register output = ToRegister(ins->output());
19353 masm.prepareHashObject(setObj, input, output, temp0, temp1, temp2, temp3);
19356 void CodeGenerator::visitHashValue(LHashValue* ins) {
19357 Register setObj = ToRegister(ins->setObject());
19358 ValueOperand input = ToValue(ins, LHashValue::InputIndex);
19359 Register temp0 = ToRegister(ins->temp0());
19360 Register temp1 = ToRegister(ins->temp1());
19361 Register temp2 = ToRegister(ins->temp2());
19362 Register temp3 = ToRegister(ins->temp3());
19363 Register output = ToRegister(ins->output());
19365 masm.prepareHashValue(setObj, input, output, temp0, temp1, temp2, temp3);
19368 void CodeGenerator::visitSetObjectHasNonBigInt(LSetObjectHasNonBigInt* ins) {
19369 Register setObj = ToRegister(ins->setObject());
19370 ValueOperand input = ToValue(ins, LSetObjectHasNonBigInt::InputIndex);
19371 Register hash = ToRegister(ins->hash());
19372 Register temp0 = ToRegister(ins->temp0());
19373 Register temp1 = ToRegister(ins->temp1());
19374 Register output = ToRegister(ins->output());
19376 masm.setObjectHasNonBigInt(setObj, input, hash, output, temp0, temp1);
19379 void CodeGenerator::visitSetObjectHasBigInt(LSetObjectHasBigInt* ins) {
19380 Register setObj = ToRegister(ins->setObject());
19381 ValueOperand input = ToValue(ins, LSetObjectHasBigInt::InputIndex);
19382 Register hash = ToRegister(ins->hash());
19383 Register temp0 = ToRegister(ins->temp0());
19384 Register temp1 = ToRegister(ins->temp1());
19385 Register temp2 = ToRegister(ins->temp2());
19386 Register temp3 = ToRegister(ins->temp3());
19387 Register output = ToRegister(ins->output());
19389 masm.setObjectHasBigInt(setObj, input, hash, output, temp0, temp1, temp2,
19390 temp3);
19393 void CodeGenerator::visitSetObjectHasValue(LSetObjectHasValue* ins) {
19394 Register setObj = ToRegister(ins->setObject());
19395 ValueOperand input = ToValue(ins, LSetObjectHasValue::InputIndex);
19396 Register hash = ToRegister(ins->hash());
19397 Register temp0 = ToRegister(ins->temp0());
19398 Register temp1 = ToRegister(ins->temp1());
19399 Register temp2 = ToRegister(ins->temp2());
19400 Register temp3 = ToRegister(ins->temp3());
19401 Register output = ToRegister(ins->output());
19403 masm.setObjectHasValue(setObj, input, hash, output, temp0, temp1, temp2,
19404 temp3);
19407 void CodeGenerator::visitSetObjectHasValueVMCall(
19408 LSetObjectHasValueVMCall* ins) {
19409 pushArg(ToValue(ins, LSetObjectHasValueVMCall::InputIndex));
19410 pushArg(ToRegister(ins->setObject()));
19412 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
19413 callVM<Fn, jit::SetObjectHas>(ins);
19416 void CodeGenerator::visitSetObjectSize(LSetObjectSize* ins) {
19417 Register setObj = ToRegister(ins->setObject());
19418 Register output = ToRegister(ins->output());
19420 masm.loadSetObjectSize(setObj, output);
19423 void CodeGenerator::visitMapObjectHasNonBigInt(LMapObjectHasNonBigInt* ins) {
19424 Register mapObj = ToRegister(ins->mapObject());
19425 ValueOperand input = ToValue(ins, LMapObjectHasNonBigInt::InputIndex);
19426 Register hash = ToRegister(ins->hash());
19427 Register temp0 = ToRegister(ins->temp0());
19428 Register temp1 = ToRegister(ins->temp1());
19429 Register output = ToRegister(ins->output());
19431 masm.mapObjectHasNonBigInt(mapObj, input, hash, output, temp0, temp1);
19434 void CodeGenerator::visitMapObjectHasBigInt(LMapObjectHasBigInt* ins) {
19435 Register mapObj = ToRegister(ins->mapObject());
19436 ValueOperand input = ToValue(ins, LMapObjectHasBigInt::InputIndex);
19437 Register hash = ToRegister(ins->hash());
19438 Register temp0 = ToRegister(ins->temp0());
19439 Register temp1 = ToRegister(ins->temp1());
19440 Register temp2 = ToRegister(ins->temp2());
19441 Register temp3 = ToRegister(ins->temp3());
19442 Register output = ToRegister(ins->output());
19444 masm.mapObjectHasBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
19445 temp3);
19448 void CodeGenerator::visitMapObjectHasValue(LMapObjectHasValue* ins) {
19449 Register mapObj = ToRegister(ins->mapObject());
19450 ValueOperand input = ToValue(ins, LMapObjectHasValue::InputIndex);
19451 Register hash = ToRegister(ins->hash());
19452 Register temp0 = ToRegister(ins->temp0());
19453 Register temp1 = ToRegister(ins->temp1());
19454 Register temp2 = ToRegister(ins->temp2());
19455 Register temp3 = ToRegister(ins->temp3());
19456 Register output = ToRegister(ins->output());
19458 masm.mapObjectHasValue(mapObj, input, hash, output, temp0, temp1, temp2,
19459 temp3);
19462 void CodeGenerator::visitMapObjectHasValueVMCall(
19463 LMapObjectHasValueVMCall* ins) {
19464 pushArg(ToValue(ins, LMapObjectHasValueVMCall::InputIndex));
19465 pushArg(ToRegister(ins->mapObject()));
19467 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
19468 callVM<Fn, jit::MapObjectHas>(ins);
19471 void CodeGenerator::visitMapObjectGetNonBigInt(LMapObjectGetNonBigInt* ins) {
19472 Register mapObj = ToRegister(ins->mapObject());
19473 ValueOperand input = ToValue(ins, LMapObjectGetNonBigInt::InputIndex);
19474 Register hash = ToRegister(ins->hash());
19475 Register temp0 = ToRegister(ins->temp0());
19476 Register temp1 = ToRegister(ins->temp1());
19477 ValueOperand output = ToOutValue(ins);
19479 masm.mapObjectGetNonBigInt(mapObj, input, hash, output, temp0, temp1,
19480 output.scratchReg());
19483 void CodeGenerator::visitMapObjectGetBigInt(LMapObjectGetBigInt* ins) {
19484 Register mapObj = ToRegister(ins->mapObject());
19485 ValueOperand input = ToValue(ins, LMapObjectGetBigInt::InputIndex);
19486 Register hash = ToRegister(ins->hash());
19487 Register temp0 = ToRegister(ins->temp0());
19488 Register temp1 = ToRegister(ins->temp1());
19489 Register temp2 = ToRegister(ins->temp2());
19490 Register temp3 = ToRegister(ins->temp3());
19491 ValueOperand output = ToOutValue(ins);
19493 masm.mapObjectGetBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
19494 temp3, output.scratchReg());
19497 void CodeGenerator::visitMapObjectGetValue(LMapObjectGetValue* ins) {
19498 Register mapObj = ToRegister(ins->mapObject());
19499 ValueOperand input = ToValue(ins, LMapObjectGetValue::InputIndex);
19500 Register hash = ToRegister(ins->hash());
19501 Register temp0 = ToRegister(ins->temp0());
19502 Register temp1 = ToRegister(ins->temp1());
19503 Register temp2 = ToRegister(ins->temp2());
19504 Register temp3 = ToRegister(ins->temp3());
19505 ValueOperand output = ToOutValue(ins);
19507 masm.mapObjectGetValue(mapObj, input, hash, output, temp0, temp1, temp2,
19508 temp3, output.scratchReg());
19511 void CodeGenerator::visitMapObjectGetValueVMCall(
19512 LMapObjectGetValueVMCall* ins) {
19513 pushArg(ToValue(ins, LMapObjectGetValueVMCall::InputIndex));
19514 pushArg(ToRegister(ins->mapObject()));
19516 using Fn =
19517 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
19518 callVM<Fn, jit::MapObjectGet>(ins);
19521 void CodeGenerator::visitMapObjectSize(LMapObjectSize* ins) {
19522 Register mapObj = ToRegister(ins->mapObject());
19523 Register output = ToRegister(ins->output());
19525 masm.loadMapObjectSize(mapObj, output);
19528 template <size_t NumDefs>
19529 void CodeGenerator::emitIonToWasmCallBase(LIonToWasmCallBase<NumDefs>* lir) {
19530 wasm::JitCallStackArgVector stackArgs;
19531 masm.propagateOOM(stackArgs.reserve(lir->numOperands()));
19532 if (masm.oom()) {
19533 return;
19536 MIonToWasmCall* mir = lir->mir();
19537 const wasm::FuncExport& funcExport = mir->funcExport();
19538 const wasm::FuncType& sig =
19539 mir->instance()->metadata().getFuncExportType(funcExport);
19541 WasmABIArgGenerator abi;
19542 for (size_t i = 0; i < lir->numOperands(); i++) {
19543 MIRType argMir;
19544 switch (sig.args()[i].kind()) {
19545 case wasm::ValType::I32:
19546 case wasm::ValType::I64:
19547 case wasm::ValType::F32:
19548 case wasm::ValType::F64:
19549 argMir = sig.args()[i].toMIRType();
19550 break;
19551 case wasm::ValType::V128:
19552 MOZ_CRASH("unexpected argument type when calling from ion to wasm");
19553 case wasm::ValType::Ref:
19554 // temporarilyUnsupportedReftypeForEntry() restricts args to externref
19555 MOZ_RELEASE_ASSERT(sig.args()[i].refType().isExtern());
19556 // Argument is boxed on the JS side to an anyref, so passed as a
19557 // pointer here.
19558 argMir = sig.args()[i].toMIRType();
19559 break;
19562 ABIArg arg = abi.next(argMir);
19563 switch (arg.kind()) {
19564 case ABIArg::GPR:
19565 case ABIArg::FPU: {
19566 MOZ_ASSERT(ToAnyRegister(lir->getOperand(i)) == arg.reg());
19567 stackArgs.infallibleEmplaceBack(wasm::JitCallStackArg());
19568 break;
19570 case ABIArg::Stack: {
19571 const LAllocation* larg = lir->getOperand(i);
19572 if (larg->isConstant()) {
19573 stackArgs.infallibleEmplaceBack(ToInt32(larg));
19574 } else if (larg->isGeneralReg()) {
19575 stackArgs.infallibleEmplaceBack(ToRegister(larg));
19576 } else if (larg->isFloatReg()) {
19577 stackArgs.infallibleEmplaceBack(ToFloatRegister(larg));
19578 } else {
19579 // Always use the stack pointer here because GenerateDirectCallFromJit
19580 // depends on this.
19581 Address addr = ToAddress<BaseRegForAddress::SP>(larg);
19582 stackArgs.infallibleEmplaceBack(addr);
19584 break;
19586 #ifdef JS_CODEGEN_REGISTER_PAIR
19587 case ABIArg::GPR_PAIR: {
19588 MOZ_CRASH(
19589 "no way to pass i64, and wasm uses hardfp for function calls");
19591 #endif
19592 case ABIArg::Uninitialized: {
19593 MOZ_CRASH("Uninitialized ABIArg kind");
19598 const wasm::ValTypeVector& results = sig.results();
19599 if (results.length() == 0) {
19600 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
19601 } else {
19602 MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
19603 switch (results[0].kind()) {
19604 case wasm::ValType::I32:
19605 MOZ_ASSERT(lir->mir()->type() == MIRType::Int32);
19606 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
19607 break;
19608 case wasm::ValType::I64:
19609 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
19610 MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
19611 break;
19612 case wasm::ValType::F32:
19613 MOZ_ASSERT(lir->mir()->type() == MIRType::Float32);
19614 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnFloat32Reg);
19615 break;
19616 case wasm::ValType::F64:
19617 MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
19618 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
19619 break;
19620 case wasm::ValType::V128:
19621 MOZ_CRASH("unexpected return type when calling from ion to wasm");
19622 case wasm::ValType::Ref:
19623 // The wasm stubs layer unboxes anything that needs to be unboxed
19624 // and leaves it in a Value. A FuncRef/EqRef we could in principle
19625 // leave it as a raw object pointer but for now it complicates the
19626 // API to do so.
19627 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
19628 break;
19632 WasmInstanceObject* instObj = lir->mir()->instanceObject();
19634 Register scratch = ToRegister(lir->temp());
19636 uint32_t callOffset;
19637 ensureOsiSpace();
19638 GenerateDirectCallFromJit(masm, funcExport, instObj->instance(), stackArgs,
19639 scratch, &callOffset);
19641 // Add the instance object to the constant pool, so it is transferred to
19642 // the owning IonScript and so that it gets traced as long as the IonScript
19643 // lives.
19645 uint32_t unused;
19646 masm.propagateOOM(graph.addConstantToPool(ObjectValue(*instObj), &unused));
19648 markSafepointAt(callOffset, lir);
19651 void CodeGenerator::visitIonToWasmCall(LIonToWasmCall* lir) {
19652 emitIonToWasmCallBase(lir);
19654 void CodeGenerator::visitIonToWasmCallV(LIonToWasmCallV* lir) {
19655 emitIonToWasmCallBase(lir);
19657 void CodeGenerator::visitIonToWasmCallI64(LIonToWasmCallI64* lir) {
19658 emitIonToWasmCallBase(lir);
19661 void CodeGenerator::visitWasmNullConstant(LWasmNullConstant* lir) {
19662 masm.xorPtr(ToRegister(lir->output()), ToRegister(lir->output()));
19665 void CodeGenerator::visitWasmFence(LWasmFence* lir) {
19666 MOZ_ASSERT(gen->compilingWasm());
19667 masm.memoryBarrier(MembarFull);
19670 void CodeGenerator::visitWasmAnyRefFromJSValue(LWasmAnyRefFromJSValue* lir) {
19671 ValueOperand input = ToValue(lir, LWasmAnyRefFromJSValue::InputIndex);
19672 Register output = ToRegister(lir->output());
19673 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
19675 using Fn = JSObject* (*)(JSContext* cx, HandleValue value);
19676 OutOfLineCode* oolBoxValue = oolCallVM<Fn, wasm::AnyRef::boxValue>(
19677 lir, ArgList(input), StoreRegisterTo(output));
19678 masm.convertValueToWasmAnyRef(input, output, tempFloat, oolBoxValue->entry());
19679 masm.bind(oolBoxValue->rejoin());
19682 void CodeGenerator::visitWasmAnyRefFromJSObject(LWasmAnyRefFromJSObject* lir) {
19683 Register input = ToRegister(lir->input());
19684 Register output = ToRegister(lir->output());
19685 masm.convertObjectToWasmAnyRef(input, output);
19688 void CodeGenerator::visitWasmAnyRefFromJSString(LWasmAnyRefFromJSString* lir) {
19689 Register input = ToRegister(lir->input());
19690 Register output = ToRegister(lir->output());
19691 masm.convertStringToWasmAnyRef(input, output);
19694 void CodeGenerator::visitWasmNewI31Ref(LWasmNewI31Ref* lir) {
19695 Register value = ToRegister(lir->value());
19696 Register output = ToRegister(lir->output());
19697 masm.truncate32ToWasmI31Ref(value, output);
19700 void CodeGenerator::visitWasmI31RefGet(LWasmI31RefGet* lir) {
19701 Register value = ToRegister(lir->value());
19702 Register output = ToRegister(lir->output());
19703 if (lir->mir()->wideningOp() == wasm::FieldWideningOp::Signed) {
19704 masm.convertWasmI31RefTo32Signed(value, output);
19705 } else {
19706 masm.convertWasmI31RefTo32Unsigned(value, output);
19710 #ifdef FUZZING_JS_FUZZILLI
19711 void CodeGenerator::emitFuzzilliHashDouble(FloatRegister floatDouble,
19712 Register scratch, Register output) {
19713 # ifdef JS_PUNBOX64
19714 Register64 reg64_1(scratch);
19715 Register64 reg64_2(output);
19716 masm.moveDoubleToGPR64(floatDouble, reg64_1);
19717 masm.move64(reg64_1, reg64_2);
19718 masm.rshift64(Imm32(32), reg64_2);
19719 masm.add32(scratch, output);
19720 # else
19721 Register64 reg64(scratch, output);
19722 masm.moveDoubleToGPR64(floatDouble, reg64);
19723 masm.add32(scratch, output);
19724 # endif
19727 void CodeGenerator::emitFuzzilliHashObject(LInstruction* lir, Register obj,
19728 Register output) {
19729 using Fn = void (*)(JSContext* cx, JSObject* obj, uint32_t* out);
19730 OutOfLineCode* ool = oolCallVM<Fn, FuzzilliHashObjectInl>(
19731 lir, ArgList(obj), StoreRegisterTo(output));
19733 masm.jump(ool->entry());
19734 masm.bind(ool->rejoin());
19737 void CodeGenerator::emitFuzzilliHashBigInt(Register bigInt, Register output) {
19738 LiveRegisterSet volatileRegs(GeneralRegisterSet::All(),
19739 FloatRegisterSet::All());
19740 volatileRegs.takeUnchecked(output);
19741 masm.PushRegsInMask(volatileRegs);
19743 using Fn = uint32_t (*)(BigInt* bigInt);
19744 masm.setupUnalignedABICall(output);
19745 masm.passABIArg(bigInt);
19746 masm.callWithABI<Fn, js::FuzzilliHashBigInt>();
19747 masm.storeCallInt32Result(output);
19749 masm.PopRegsInMask(volatileRegs);
19752 void CodeGenerator::visitFuzzilliHashV(LFuzzilliHashV* ins) {
19753 MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Value);
19755 ValueOperand value = ToValue(ins, 0);
19757 Label isDouble, isObject, isBigInt, done;
19759 FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
19760 Register scratch = ToRegister(ins->getTemp(0));
19761 Register output = ToRegister(ins->output());
19762 MOZ_ASSERT(scratch != output);
19764 # ifdef JS_PUNBOX64
19765 Register tagReg = ToRegister(ins->getTemp(0));
19766 masm.splitTag(value, tagReg);
19767 # else
19768 Register tagReg = value.typeReg();
19769 # endif
19771 Label noBigInt;
19772 masm.branchTestBigInt(Assembler::NotEqual, tagReg, &noBigInt);
19773 masm.unboxBigInt(value, scratch);
19774 masm.jump(&isBigInt);
19775 masm.bind(&noBigInt);
19777 Label noObject;
19778 masm.branchTestObject(Assembler::NotEqual, tagReg, &noObject);
19779 masm.unboxObject(value, scratch);
19780 masm.jump(&isObject);
19781 masm.bind(&noObject);
19783 Label noInt32;
19784 masm.branchTestInt32(Assembler::NotEqual, tagReg, &noInt32);
19785 masm.unboxInt32(value, scratch);
19786 masm.convertInt32ToDouble(scratch, scratchFloat);
19787 masm.jump(&isDouble);
19788 masm.bind(&noInt32);
19790 Label noNull;
19791 masm.branchTestNull(Assembler::NotEqual, tagReg, &noNull);
19792 masm.move32(Imm32(1), scratch);
19793 masm.convertInt32ToDouble(scratch, scratchFloat);
19794 masm.jump(&isDouble);
19795 masm.bind(&noNull);
19797 Label noUndefined;
19798 masm.branchTestUndefined(Assembler::NotEqual, tagReg, &noUndefined);
19799 masm.move32(Imm32(2), scratch);
19800 masm.convertInt32ToDouble(scratch, scratchFloat);
19801 masm.jump(&isDouble);
19802 masm.bind(&noUndefined);
19804 Label noBoolean;
19805 masm.branchTestBoolean(Assembler::NotEqual, tagReg, &noBoolean);
19806 masm.unboxBoolean(value, scratch);
19807 masm.add32(Imm32(3), scratch);
19808 masm.convertInt32ToDouble(scratch, scratchFloat);
19809 masm.jump(&isDouble);
19810 masm.bind(&noBoolean);
19812 Label noDouble;
19813 masm.branchTestDouble(Assembler::NotEqual, tagReg, &noDouble);
19814 masm.unboxDouble(value, scratchFloat);
19815 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
19817 masm.jump(&isDouble);
19818 masm.bind(&noDouble);
19819 masm.move32(Imm32(0), output);
19820 masm.jump(&done);
19822 masm.bind(&isBigInt);
19823 emitFuzzilliHashBigInt(scratch, output);
19824 masm.jump(&done);
19826 masm.bind(&isObject);
19827 emitFuzzilliHashObject(ins, scratch, output);
19828 masm.jump(&done);
19830 masm.bind(&isDouble);
19831 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19833 masm.bind(&done);
19836 void CodeGenerator::visitFuzzilliHashT(LFuzzilliHashT* ins) {
19837 const LAllocation* value = ins->value();
19838 MIRType mirType = ins->mir()->getOperand(0)->type();
19840 FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
19841 Register scratch = ToRegister(ins->getTemp(0));
19842 Register output = ToRegister(ins->output());
19843 MOZ_ASSERT(scratch != output);
19845 if (mirType == MIRType::Object) {
19846 MOZ_ASSERT(value->isGeneralReg());
19847 masm.mov(value->toGeneralReg()->reg(), scratch);
19848 emitFuzzilliHashObject(ins, scratch, output);
19849 } else if (mirType == MIRType::BigInt) {
19850 MOZ_ASSERT(value->isGeneralReg());
19851 masm.mov(value->toGeneralReg()->reg(), scratch);
19852 emitFuzzilliHashBigInt(scratch, output);
19853 } else if (mirType == MIRType::Double) {
19854 MOZ_ASSERT(value->isFloatReg());
19855 masm.moveDouble(value->toFloatReg()->reg(), scratchFloat);
19856 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
19857 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19858 } else if (mirType == MIRType::Float32) {
19859 MOZ_ASSERT(value->isFloatReg());
19860 masm.convertFloat32ToDouble(value->toFloatReg()->reg(), scratchFloat);
19861 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
19862 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19863 } else if (mirType == MIRType::Int32) {
19864 MOZ_ASSERT(value->isGeneralReg());
19865 masm.mov(value->toGeneralReg()->reg(), scratch);
19866 masm.convertInt32ToDouble(scratch, scratchFloat);
19867 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19868 } else if (mirType == MIRType::Null) {
19869 MOZ_ASSERT(value->isBogus());
19870 masm.move32(Imm32(1), scratch);
19871 masm.convertInt32ToDouble(scratch, scratchFloat);
19872 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19873 } else if (mirType == MIRType::Undefined) {
19874 MOZ_ASSERT(value->isBogus());
19875 masm.move32(Imm32(2), scratch);
19876 masm.convertInt32ToDouble(scratch, scratchFloat);
19877 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19878 } else if (mirType == MIRType::Boolean) {
19879 MOZ_ASSERT(value->isGeneralReg());
19880 masm.mov(value->toGeneralReg()->reg(), scratch);
19881 masm.add32(Imm32(3), scratch);
19882 masm.convertInt32ToDouble(scratch, scratchFloat);
19883 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19884 } else {
19885 MOZ_CRASH("unexpected type");
19889 void CodeGenerator::visitFuzzilliHashStore(LFuzzilliHashStore* ins) {
19890 const LAllocation* value = ins->value();
19891 MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Int32);
19892 MOZ_ASSERT(value->isGeneralReg());
19894 Register scratchJSContext = ToRegister(ins->getTemp(0));
19895 Register scratch = ToRegister(ins->getTemp(1));
19897 masm.loadJSContext(scratchJSContext);
19899 // stats
19900 Address addrExecHashInputs(scratchJSContext,
19901 offsetof(JSContext, executionHashInputs));
19902 masm.load32(addrExecHashInputs, scratch);
19903 masm.add32(Imm32(1), scratch);
19904 masm.store32(scratch, addrExecHashInputs);
19906 Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
19907 masm.load32(addrExecHash, scratch);
19908 masm.add32(value->toGeneralReg()->reg(), scratch);
19909 masm.rotateLeft(Imm32(1), scratch, scratch);
19910 masm.store32(scratch, addrExecHash);
19912 #endif
19914 static_assert(!std::is_polymorphic_v<CodeGenerator>,
19915 "CodeGenerator should not have any virtual methods");
19917 } // namespace jit
19918 } // namespace js